summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.mailmap12
-rw-r--r--Documentation/admin-guide/README.rst2
-rw-r--r--Documentation/admin-guide/sysctl/kernel.rst11
-rw-r--r--Documentation/arch/powerpc/cxl.rst3
-rw-r--r--Documentation/devicetree/bindings/display/mediatek/mediatek,dpi.yaml7
-rw-r--r--Documentation/devicetree/bindings/display/mediatek/mediatek,dsc.yaml3
-rw-r--r--Documentation/devicetree/bindings/display/msm/dsi-controller-main.yaml70
-rw-r--r--Documentation/devicetree/bindings/display/msm/dsi-phy-common.yaml2
-rw-r--r--Documentation/devicetree/bindings/display/msm/gmu.yaml1
-rw-r--r--Documentation/devicetree/bindings/display/msm/qcom,sa8775p-mdss.yaml32
-rw-r--r--Documentation/devicetree/bindings/display/msm/qcom,sm8550-mdss.yaml14
-rw-r--r--Documentation/devicetree/bindings/display/msm/qcom,sm8650-mdss.yaml13
-rw-r--r--Documentation/devicetree/bindings/iio/adc/adi,ad7606.yaml1
-rw-r--r--Documentation/filesystems/idmappings.rst4
-rw-r--r--Documentation/gpu/drivers.rst1
-rw-r--r--Documentation/gpu/nova/core/guidelines.rst24
-rw-r--r--Documentation/gpu/nova/core/todo.rst446
-rw-r--r--Documentation/gpu/nova/guidelines.rst69
-rw-r--r--Documentation/gpu/nova/index.rst30
-rw-r--r--Documentation/gpu/rfc/gpusvm.rst112
-rw-r--r--Documentation/gpu/rfc/index.rst4
-rw-r--r--Documentation/scheduler/sched-rt-group.rst3
-rw-r--r--Documentation/userspace-api/landlock.rst6
-rw-r--r--MAINTAINERS51
-rw-r--r--Makefile7
-rw-r--r--arch/arm/mm/fault-armv.c37
-rw-r--r--arch/arm64/include/asm/el2_setup.h31
-rw-r--r--arch/arm64/include/asm/hugetlb.h26
-rw-r--r--arch/arm64/include/asm/kvm_arm.h2
-rw-r--r--arch/arm64/include/asm/kvm_host.h2
-rw-r--r--arch/arm64/kernel/head.S22
-rw-r--r--arch/arm64/kvm/arm.c37
-rw-r--r--arch/arm64/kvm/hyp/nvhe/hyp-init.S10
-rw-r--r--arch/arm64/kvm/hyp/nvhe/psci-relay.c3
-rw-r--r--arch/arm64/kvm/vmid.c11
-rw-r--r--arch/arm64/mm/hugetlbpage.c61
-rw-r--r--arch/arm64/mm/init.c7
-rw-r--r--arch/loongarch/include/asm/hugetlb.h6
-rw-r--r--arch/loongarch/kernel/acpi.c12
-rw-r--r--arch/loongarch/kernel/machine_kexec.c4
-rw-r--r--arch/loongarch/kernel/setup.c3
-rw-r--r--arch/loongarch/kernel/smp.c47
-rw-r--r--arch/loongarch/kvm/exit.c6
-rw-r--r--arch/loongarch/kvm/main.c7
-rw-r--r--arch/loongarch/kvm/vcpu.c2
-rw-r--r--arch/loongarch/kvm/vm.c6
-rw-r--r--arch/loongarch/mm/mmap.c6
-rw-r--r--arch/m68k/include/asm/sun3_pgalloc.h6
-rw-r--r--arch/mips/boot/tools/relocs.c5
-rw-r--r--arch/mips/include/asm/hugetlb.h6
-rw-r--r--arch/parisc/include/asm/hugetlb.h2
-rw-r--r--arch/parisc/mm/hugetlbpage.c2
-rw-r--r--arch/powerpc/include/asm/hugetlb.h6
-rw-r--r--arch/riscv/include/asm/cmpxchg.h2
-rw-r--r--arch/riscv/include/asm/futex.h2
-rw-r--r--arch/riscv/include/asm/hugetlb.h3
-rw-r--r--arch/riscv/kernel/cacheinfo.c12
-rw-r--r--arch/riscv/kernel/cpufeature.c2
-rw-r--r--arch/riscv/kernel/setup.c2
-rw-r--r--arch/riscv/kernel/signal.c6
-rw-r--r--arch/riscv/kvm/aia_imsic.c1
-rw-r--r--arch/riscv/kvm/vcpu_sbi_hsm.c11
-rw-r--r--arch/riscv/kvm/vcpu_sbi_replace.c15
-rw-r--r--arch/riscv/kvm/vcpu_sbi_system.c3
-rw-r--r--arch/riscv/mm/hugetlbpage.c2
-rw-r--r--arch/s390/include/asm/hugetlb.h16
-rw-r--r--arch/s390/kernel/ftrace.c3
-rw-r--r--arch/s390/kernel/traps.c6
-rw-r--r--arch/s390/mm/hugetlbpage.c4
-rw-r--r--arch/sparc/include/asm/hugetlb.h2
-rw-r--r--arch/sparc/mm/hugetlbpage.c2
-rw-r--r--arch/x86/Kconfig1
-rw-r--r--arch/x86/boot/compressed/pgtable_64.c2
-rw-r--r--arch/x86/coco/sev/core.c23
-rw-r--r--arch/x86/entry/common.c1
-rw-r--r--arch/x86/events/core.c2
-rw-r--r--arch/x86/events/intel/core.c85
-rw-r--r--arch/x86/events/rapl.c1
-rw-r--r--arch/x86/include/asm/kvm_host.h1
-rw-r--r--arch/x86/include/asm/nospec-branch.h32
-rw-r--r--arch/x86/include/asm/pgtable-2level_types.h8
-rw-r--r--arch/x86/include/asm/sev.h6
-rw-r--r--arch/x86/kernel/amd_nb.c9
-rw-r--r--arch/x86/kernel/cpu/cacheinfo.c2
-rw-r--r--arch/x86/kernel/cpu/cyrix.c4
-rw-r--r--arch/x86/kernel/cpu/intel.c52
-rw-r--r--arch/x86/kernel/cpu/microcode/amd.c272
-rw-r--r--arch/x86/kernel/cpu/microcode/amd_shas.c444
-rw-r--r--arch/x86/kernel/cpu/microcode/internal.h2
-rw-r--r--arch/x86/kernel/cpu/sgx/ioctl.c7
-rw-r--r--arch/x86/kernel/devicetree.c3
-rw-r--r--arch/x86/kernel/irq.c2
-rw-r--r--arch/x86/kvm/cpuid.c2
-rw-r--r--arch/x86/kvm/mmu/mmu.c12
-rw-r--r--arch/x86/kvm/svm/sev.c24
-rw-r--r--arch/x86/kvm/svm/svm.c49
-rw-r--r--arch/x86/kvm/svm/svm.h2
-rw-r--r--arch/x86/kvm/svm/vmenter.S10
-rw-r--r--arch/x86/kvm/vmx/nested.c11
-rw-r--r--arch/x86/kvm/vmx/vmx.c8
-rw-r--r--arch/x86/kvm/vmx/vmx.h2
-rw-r--r--arch/x86/kvm/x86.c4
-rw-r--r--block/bio.c2
-rw-r--r--block/blk-merge.c2
-rw-r--r--block/blk-settings.c14
-rw-r--r--block/blk-zoned.c76
-rw-r--r--block/blk.h9
-rw-r--r--block/partitions/efi.c2
-rw-r--r--drivers/acpi/platform_profile.c94
-rw-r--r--drivers/android/binderfs.c1
-rw-r--r--drivers/ata/ahci.h8
-rw-r--r--drivers/ata/libahci.c1
-rw-r--r--drivers/ata/libata-core.c4
-rw-r--r--drivers/base/component.c3
-rw-r--r--drivers/base/core.c1
-rw-r--r--drivers/base/devres.c12
-rw-r--r--drivers/block/ublk_drv.c7
-rw-r--r--drivers/bluetooth/btusb.c7
-rw-r--r--drivers/bus/mhi/host/pci_generic.c5
-rw-r--r--drivers/bus/simple-pm-bus.c22
-rw-r--r--drivers/cdx/cdx.c6
-rw-r--r--drivers/char/misc.c2
-rw-r--r--drivers/char/virtio_console.c4
-rw-r--r--drivers/dma/qcom/bam_dma.c24
-rw-r--r--drivers/dma/tegra210-adma.c5
-rw-r--r--drivers/firmware/cirrus/cs_dsp.c24
-rw-r--r--drivers/firmware/efi/cper-arm.c2
-rw-r--r--drivers/firmware/efi/cper-x86.c2
-rw-r--r--drivers/firmware/efi/mokvar-table.c57
-rw-r--r--drivers/gpio/gpio-aggregator.c20
-rw-r--r--drivers/gpio/gpio-rcar.c31
-rw-r--r--drivers/gpio/gpiolib.c20
-rw-r--r--drivers/gpu/Makefile1
-rw-r--r--drivers/gpu/drm/Kconfig9
-rw-r--r--drivers/gpu/drm/Makefile1
-rw-r--r--drivers/gpu/drm/drm_client_event.c41
-rw-r--r--drivers/gpu/drm/drm_damage_helper.c2
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c20
-rw-r--r--drivers/gpu/drm/drm_fbdev_dma.c217
-rw-r--r--drivers/gpu/drm/drm_gpusvm.c2250
-rw-r--r--drivers/gpu/drm/i915/display/g4x_dp.c2
-rw-r--r--drivers/gpu/drm/i915/display/g4x_hdmi.c2
-rw-r--r--drivers/gpu/drm/i915/display/i9xx_wm.c18
-rw-r--r--drivers/gpu/drm/i915/display/icl_dsi.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_acpi.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic_plane.c126
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic_plane.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_audio.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_bw.c74
-rw-r--r--drivers/gpu/drm/i915/display/intel_bw.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.c30
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_combo_phy_regs.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_connector.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_crt.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.c39
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi_buf_trans.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c1364
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.h34
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_core.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_debugfs.c34
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_debugfs.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_device.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_driver.c18
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_irq.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.h7
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power_map.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power_well.c9
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_reset.c73
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_reset.h12
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_rps.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_rps.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_types.h55
-rw-r--r--drivers/gpu/drm/i915/display/intel_dkl_phy.c8
-rw-r--r--drivers/gpu/drm/i915/display/intel_dkl_phy.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c50
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_hdcp.c8
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_link_training.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.c158
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_test.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_tunnel.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpt.c29
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpt.h7
-rw-r--r--drivers/gpu/drm/i915/display/intel_drrs.c45
-rw-r--r--drivers/gpu/drm/i915/display/intel_drrs.h10
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb_pin.c10
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbc.c270
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbc.h10
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbc_regs.h9
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev.c340
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev.h17
-rw-r--r--drivers/gpu/drm/i915/display/intel_fdi.c464
-rw-r--r--drivers/gpu/drm/i915/display/intel_fdi.h16
-rw-r--r--drivers/gpu/drm/i915/display/intel_frontbuffer.c12
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp.c94
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_link_bw.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_lvds.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_modeset_setup.c55
-rw-r--r--drivers/gpu/drm/i915/display/intel_modeset_verify.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_overlay.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_overlay.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_panel.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_pch_display.c35
-rw-r--r--drivers/gpu/drm/i915/display/intel_pfit.c170
-rw-r--r--drivers/gpu/drm/i915/display/intel_pfit.h10
-rw-r--r--drivers/gpu/drm/i915/display/intel_pfit_regs.h79
-rw-r--r--drivers/gpu/drm/i915/display/intel_pipe_crc.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_quirks.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_snps_phy.c75
-rw-r--r--drivers/gpu/drm/i915/display/intel_snps_phy.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_tdf.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_vdsc.c182
-rw-r--r--drivers/gpu/drm/i915/display/intel_wm.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_wm.h1
-rw-r--r--drivers/gpu/drm/i915/display/skl_scaler.c60
-rw-r--r--drivers/gpu/drm/i915/display/skl_scaler.h7
-rw-r--r--drivers/gpu/drm/i915/display/skl_universal_plane.c53
-rw-r--r--drivers/gpu/drm/i915/display/skl_watermark.c56
-rw-r--r--drivers/gpu/drm/i915/display/skl_watermark.h4
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi.c2
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi_pll.h6
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c2
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/mock_context.c38
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/mock_context.h3
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_cs.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c7
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_regs.h3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c47
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset.c47
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset.h2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset_types.h3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rps.c4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_workarounds.c19
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_rps.c13
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_slpc.c17
-rw-r--r--drivers/gpu/drm/i915/gt/shmem_utils.c8
-rw-r--r--drivers/gpu/drm/i915/gt/uc/abi/guc_actions_slpc_abi.h5
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c11
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_hwconfig.c3
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c65
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.h1
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_slpc_types.h3
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_huc.c12
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c3
-rw-r--r--drivers/gpu/drm/i915/i915_driver.c16
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.h2
-rw-r--r--drivers/gpu/drm/i915/i915_gtt_view_types.h59
-rw-r--r--drivers/gpu/drm/i915/i915_module.c2
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.c120
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.h11
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h74
-rw-r--r--drivers/gpu/drm/i915/i915_vma_types.h52
-rw-r--r--drivers/gpu/drm/i915/intel_gvt_mmio_table.c1
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp.c6
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp.h4
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem.c7
-rw-r--r--drivers/gpu/drm/imagination/Makefile2
-rw-r--r--drivers/gpu/drm/imagination/pvr_fw_meta.c6
-rw-r--r--drivers/gpu/drm/imagination/pvr_fw_trace.c4
-rw-r--r--drivers/gpu/drm/imagination/pvr_queue.c18
-rw-r--r--drivers/gpu/drm/imagination/pvr_queue.h4
-rw-r--r--drivers/gpu/drm/imagination/pvr_vm.c134
-rw-r--r--drivers/gpu/drm/imagination/pvr_vm.h3
-rw-r--r--drivers/gpu/drm/mediatek/mtk_crtc.c7
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dp.c6
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dpi.c323
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dpi_regs.h9
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.c10
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dsi.c6
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi.c104
-rw-r--r--drivers/gpu/drm/msm/Kconfig2
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_catalog.c29
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.c72
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.c8
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c15
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h17
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c10
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.h5
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_10_0_sm8650.h5
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_14_msm8937.h1
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_15_msm8917.h1
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_16_msm8953.h1
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_7_msm8996.h1
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_0_msm8998.h1
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_2_sdm660.h1
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_3_sdm630.h1
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_0_sdm845.h1
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_1_sdm670.h1
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h5
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h5
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_2_sm7150.h5
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_3_sm6150.h19
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h5
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_0_sm8250.h2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_2_sc7180.h5
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_4_sm6350.h5
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_0_sm8350.h5
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_2_sc7280.h2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_0_sc8280xp.h1
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_1_sm8450.h5
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_4_sa8775p.h2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_0_sm8550.h5
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_2_x1e80100.h5
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c140
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h10
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c233
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h3
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c468
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h12
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h7
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c5
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c16
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c40
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.c4
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c30
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h15
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c21
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h13
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c298
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h14
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c3
-rw-r--r--drivers/gpu/drm/msm/dp/dp_ctrl.c25
-rw-r--r--drivers/gpu/drm/msm/dp/dp_display.c4
-rw-r--r--drivers/gpu/drm/msm/dp/dp_drm.c5
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_host.c193
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_manager.c32
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.h5
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c1
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c1
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c1
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c1
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c21
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.c120
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.h31
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_audio.c124
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_bridge.c324
-rw-r--r--drivers/gpu/drm/msm/msm_atomic.c15
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c1
-rw-r--r--drivers/gpu/drm/msm/msm_dsc_helper.h11
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c2
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c9
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.h8
-rw-r--r--drivers/gpu/drm/msm/msm_iommu.c54
-rw-r--r--drivers/gpu/drm/msm/msm_kms.c19
-rw-r--r--drivers/gpu/drm/msm/msm_kms.h10
-rw-r--r--drivers/gpu/drm/msm/msm_mmu.h4
-rw-r--r--drivers/gpu/drm/msm/registers/display/dsi_phy_7nm.xml1
-rw-r--r--drivers/gpu/drm/msm/registers/display/hdmi.xml2
-rw-r--r--drivers/gpu/drm/nouveau/Kconfig1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c1
-rw-r--r--drivers/gpu/drm/scheduler/gpu_scheduler_trace.h4
-rw-r--r--drivers/gpu/drm/tiny/appletbdrm.c1
-rw-r--r--drivers/gpu/drm/tiny/bochs.c5
-rw-r--r--drivers/gpu/drm/vkms/vkms_composer.c2
-rw-r--r--drivers/gpu/drm/xe/Kconfig10
-rw-r--r--drivers/gpu/drm/xe/Makefile3
-rw-r--r--drivers/gpu/drm/xe/abi/guc_actions_abi.h1
-rw-r--r--drivers/gpu/drm/xe/abi/guc_actions_slpc_abi.h3
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_object.h15
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h8
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/i915_gtt_view_types.h7
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/i915_scheduler_types.h13
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/i915_vma_types.h74
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/intel_uncore.h2
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/pxp/intel_pxp.h7
-rw-r--r--drivers/gpu/drm/xe/display/intel_fbdev_fb.c4
-rw-r--r--drivers/gpu/drm/xe/display/xe_display.c26
-rw-r--r--drivers/gpu/drm/xe/display/xe_display.h1
-rw-r--r--drivers/gpu/drm/xe/display/xe_display_rps.c2
-rw-r--r--drivers/gpu/drm/xe/display/xe_fb_pin.c21
-rw-r--r--drivers/gpu/drm/xe/display/xe_tdf.c6
-rw-r--r--drivers/gpu/drm/xe/regs/xe_engine_regs.h1
-rw-r--r--drivers/gpu/drm/xe/regs/xe_eu_stall_regs.h29
-rw-r--r--drivers/gpu/drm/xe/regs/xe_gt_regs.h7
-rw-r--r--drivers/gpu/drm/xe/regs/xe_regs.h4
-rw-r--r--drivers/gpu/drm/xe/tests/xe_pci.c26
-rw-r--r--drivers/gpu/drm/xe/tests/xe_rtp_test.c2
-rw-r--r--drivers/gpu/drm/xe/xe_bo.c54
-rw-r--r--drivers/gpu/drm/xe/xe_bo.h20
-rw-r--r--drivers/gpu/drm/xe/xe_bo_types.h4
-rw-r--r--drivers/gpu/drm/xe/xe_devcoredump.c8
-rw-r--r--drivers/gpu/drm/xe/xe_device.c101
-rw-r--r--drivers/gpu/drm/xe/xe_device.h3
-rw-r--r--drivers/gpu/drm/xe/xe_device_sysfs.c6
-rw-r--r--drivers/gpu/drm/xe/xe_device_types.h36
-rw-r--r--drivers/gpu/drm/xe/xe_eu_stall.c960
-rw-r--r--drivers/gpu/drm/xe/xe_eu_stall.h24
-rw-r--r--drivers/gpu/drm/xe/xe_exec_queue.c11
-rw-r--r--drivers/gpu/drm/xe/xe_exec_queue_types.h2
-rw-r--r--drivers/gpu/drm/xe/xe_gen_wa_oob.c6
-rw-r--r--drivers/gpu/drm/xe/xe_gsc_proxy.c2
-rw-r--r--drivers/gpu/drm/xe/xe_gt.c13
-rw-r--r--drivers/gpu/drm/xe/xe_gt_clock.c57
-rw-r--r--drivers/gpu/drm/xe/xe_gt_debugfs.c11
-rw-r--r--drivers/gpu/drm/xe/xe_gt_pagefault.c20
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_service.c5
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_vf.c9
-rw-r--r--drivers/gpu/drm/xe/xe_gt_stats.c8
-rw-r--r--drivers/gpu/drm/xe/xe_gt_stats_types.h2
-rw-r--r--drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c22
-rw-r--r--drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h2
-rw-r--r--drivers/gpu/drm/xe/xe_gt_topology.h13
-rw-r--r--drivers/gpu/drm/xe/xe_gt_types.h15
-rw-r--r--drivers/gpu/drm/xe/xe_guc.c13
-rw-r--r--drivers/gpu/drm/xe/xe_guc_ads.c2
-rw-r--r--drivers/gpu/drm/xe/xe_guc_engine_activity.c373
-rw-r--r--drivers/gpu/drm/xe/xe_guc_engine_activity.h19
-rw-r--r--drivers/gpu/drm/xe/xe_guc_engine_activity_types.h92
-rw-r--r--drivers/gpu/drm/xe/xe_guc_fwif.h19
-rw-r--r--drivers/gpu/drm/xe/xe_guc_pc.c16
-rw-r--r--drivers/gpu/drm/xe/xe_guc_submit.c10
-rw-r--r--drivers/gpu/drm/xe/xe_guc_types.h4
-rw-r--r--drivers/gpu/drm/xe/xe_heci_gsc.c39
-rw-r--r--drivers/gpu/drm/xe/xe_heci_gsc.h3
-rw-r--r--drivers/gpu/drm/xe/xe_hmm.c188
-rw-r--r--drivers/gpu/drm/xe/xe_hmm.h7
-rw-r--r--drivers/gpu/drm/xe/xe_hw_engine.c6
-rw-r--r--drivers/gpu/drm/xe/xe_hw_engine_group.c1
-rw-r--r--drivers/gpu/drm/xe/xe_migrate.c175
-rw-r--r--drivers/gpu/drm/xe/xe_migrate.h10
-rw-r--r--drivers/gpu/drm/xe/xe_mocs.c4
-rw-r--r--drivers/gpu/drm/xe/xe_module.c7
-rw-r--r--drivers/gpu/drm/xe/xe_module.h2
-rw-r--r--drivers/gpu/drm/xe/xe_oa.c35
-rw-r--r--drivers/gpu/drm/xe/xe_observation.c14
-rw-r--r--drivers/gpu/drm/xe/xe_pci.c245
-rw-r--r--drivers/gpu/drm/xe/xe_pci_sriov.c51
-rw-r--r--drivers/gpu/drm/xe/xe_pci_types.h15
-rw-r--r--drivers/gpu/drm/xe/xe_pmu.c175
-rw-r--r--drivers/gpu/drm/xe/xe_pt.c493
-rw-r--r--drivers/gpu/drm/xe/xe_pt.h5
-rw-r--r--drivers/gpu/drm/xe/xe_pt_types.h2
-rw-r--r--drivers/gpu/drm/xe/xe_pt_walk.c3
-rw-r--r--drivers/gpu/drm/xe/xe_pt_walk.h4
-rw-r--r--drivers/gpu/drm/xe/xe_pxp.c87
-rw-r--r--drivers/gpu/drm/xe/xe_pxp.h2
-rw-r--r--drivers/gpu/drm/xe/xe_query.c50
-rw-r--r--drivers/gpu/drm/xe/xe_reg_whitelist.c4
-rw-r--r--drivers/gpu/drm/xe/xe_res_cursor.h123
-rw-r--r--drivers/gpu/drm/xe/xe_ring_ops.c32
-rw-r--r--drivers/gpu/drm/xe/xe_rtp.c6
-rw-r--r--drivers/gpu/drm/xe/xe_rtp.h2
-rw-r--r--drivers/gpu/drm/xe/xe_survivability_mode.c77
-rw-r--r--drivers/gpu/drm/xe/xe_survivability_mode.h5
-rw-r--r--drivers/gpu/drm/xe/xe_svm.c946
-rw-r--r--drivers/gpu/drm/xe/xe_svm.h150
-rw-r--r--drivers/gpu/drm/xe/xe_tile.c5
-rw-r--r--drivers/gpu/drm/xe/xe_trace.h30
-rw-r--r--drivers/gpu/drm/xe/xe_trace_guc.h49
-rw-r--r--drivers/gpu/drm/xe/xe_tuning.c84
-rw-r--r--drivers/gpu/drm/xe/xe_tuning.h3
-rw-r--r--drivers/gpu/drm/xe/xe_uc.c3
-rw-r--r--drivers/gpu/drm/xe/xe_vm.c521
-rw-r--r--drivers/gpu/drm/xe/xe_vm.h25
-rw-r--r--drivers/gpu/drm/xe/xe_vm_types.h65
-rw-r--r--drivers/gpu/drm/xe/xe_wa.c31
-rw-r--r--drivers/gpu/drm/xe/xe_wa_oob.rules10
-rw-r--r--drivers/gpu/nova-core/Kconfig14
-rw-r--r--drivers/gpu/nova-core/Makefile3
-rw-r--r--drivers/gpu/nova-core/driver.rs47
-rw-r--r--drivers/gpu/nova-core/firmware.rs45
-rw-r--r--drivers/gpu/nova-core/gpu.rs199
-rw-r--r--drivers/gpu/nova-core/nova_core.rs20
-rw-r--r--drivers/gpu/nova-core/regs.rs55
-rw-r--r--drivers/gpu/nova-core/util.rs21
-rw-r--r--drivers/hid/hid-apple.c11
-rw-r--r--drivers/hid/hid-appleir.c2
-rw-r--r--drivers/hid/hid-corsair-void.c83
-rw-r--r--drivers/hid/hid-debug.c2
-rw-r--r--drivers/hid/hid-google-hammer.c2
-rw-r--r--drivers/hid/hid-nintendo.c14
-rw-r--r--drivers/hid/hid-steam.c2
-rw-r--r--drivers/hid/i2c-hid/i2c-hid-core.c2
-rw-r--r--drivers/hid/intel-ish-hid/ishtp-hid-client.c2
-rw-r--r--drivers/hid/intel-ish-hid/ishtp-hid.c4
-rw-r--r--drivers/hid/intel-thc-hid/intel-quickspi/pci-quickspi.c2
-rw-r--r--drivers/hid/intel-thc-hid/intel-quickspi/quickspi-protocol.c2
-rw-r--r--drivers/hwmon/ad7314.c10
-rw-r--r--drivers/hwmon/ntc_thermistor.c66
-rw-r--r--drivers/hwmon/peci/dimmtemp.c10
-rw-r--r--drivers/hwmon/pmbus/pmbus.c2
-rw-r--r--drivers/hwmon/xgene-hwmon.c2
-rw-r--r--drivers/hwtracing/intel_th/msu.c13
-rw-r--r--drivers/hwtracing/intel_th/pci.c15
-rw-r--r--drivers/i2c/busses/i2c-amd-asf-plat.c1
-rw-r--r--drivers/i2c/busses/i2c-ls2x.c16
-rw-r--r--drivers/i2c/busses/i2c-npcm7xx.c7
-rw-r--r--drivers/idle/intel_idle.c4
-rw-r--r--drivers/iio/adc/ad7192.c2
-rw-r--r--drivers/iio/adc/ad7606.c2
-rw-r--r--drivers/iio/adc/at91-sama5d2_adc.c68
-rw-r--r--drivers/iio/adc/pac1921.c2
-rw-r--r--drivers/iio/dac/ad3552r.c6
-rw-r--r--drivers/iio/filter/admv8818.c14
-rw-r--r--drivers/iio/light/apds9306.c4
-rw-r--r--drivers/iio/light/hid-sensor-prox.c7
-rw-r--r--drivers/iio/proximity/hx9023s.c3
-rw-r--r--drivers/infiniband/hw/bnxt_re/bnxt_re.h1
-rw-r--r--drivers/infiniband/hw/bnxt_re/hw_counters.c4
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.c2
-rw-r--r--drivers/infiniband/hw/bnxt_re/main.c22
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_res.h8
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.c64
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.h2
-rw-r--r--drivers/infiniband/hw/mana/main.c2
-rw-r--r--drivers/infiniband/hw/mlx5/ah.c3
-rw-r--r--drivers/infiniband/hw/mlx5/counters.c8
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c16
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c1
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c10
-rw-r--r--drivers/infiniband/hw/mlx5/qp.h1
-rw-r--r--drivers/infiniband/hw/mlx5/umr.c83
-rw-r--r--drivers/iommu/amd/iommu.c4
-rw-r--r--drivers/iommu/intel/dmar.c1
-rw-r--r--drivers/iommu/intel/iommu.c10
-rw-r--r--drivers/md/dm-integrity.c25
-rw-r--r--drivers/md/dm-vdo/dedupe.c1
-rw-r--r--drivers/misc/cardreader/rtsx_usb.c15
-rw-r--r--drivers/misc/eeprom/digsy_mtc_eeprom.c2
-rw-r--r--drivers/misc/mei/hw-me-regs.h2
-rw-r--r--drivers/misc/mei/pci-me.c2
-rw-r--r--drivers/misc/mei/vsc-tp.c2
-rw-r--r--drivers/misc/ntsync.c7
-rw-r--r--drivers/net/caif/caif_virtio.c2
-rw-r--r--drivers/net/dsa/mt7530.c8
-rw-r--r--drivers/net/dsa/realtek/Kconfig6
-rw-r--r--drivers/net/dsa/realtek/Makefile3
-rw-r--r--drivers/net/dsa/realtek/rtl8366rb-leds.c177
-rw-r--r--drivers/net/dsa/realtek/rtl8366rb.c258
-rw-r--r--drivers/net/dsa/realtek/rtl8366rb.h107
-rw-r--r--drivers/net/ethernet/cadence/macb.h2
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c12
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c197
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c2
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.c103
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc4_pf.c2
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_ethtool.c7
-rw-r--r--drivers/net/ethernet/google/gve/gve_rx_dqo.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c2
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c12
-rw-r--r--drivers/net/ethernet/intel/ice/ice_eswitch.c3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sriov.c5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib.c8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib_private.h1
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_txrx.c3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_e610.c2
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c20
-rw-r--r--drivers/net/ethernet/ti/Kconfig1
-rw-r--r--drivers/net/ethernet/ti/icssg/icss_iep.c21
-rw-r--r--drivers/net/ipa/data/ipa_data-v4.7.c18
-rw-r--r--drivers/net/ipvlan/ipvlan_core.c21
-rw-r--r--drivers/net/loopback.c14
-rw-r--r--drivers/net/mctp/mctp-i3c.c3
-rw-r--r--drivers/net/netdevsim/ethtool.c2
-rw-r--r--drivers/net/phy/qcom/qca807x.c2
-rw-r--r--drivers/net/ppp/ppp_generic.c28
-rw-r--r--drivers/net/usb/gl620a.c4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c20
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.c86
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dump.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/d3.c77
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/time-event.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/internal.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx.c20
-rw-r--r--drivers/nvme/host/ioctl.c12
-rw-r--r--drivers/nvme/host/pci.c21
-rw-r--r--drivers/nvme/host/tcp.c45
-rw-r--r--drivers/nvme/target/nvmet.h1
-rw-r--r--drivers/nvme/target/tcp.c15
-rw-r--r--drivers/of/of_reserved_mem.c4
-rw-r--r--drivers/phy/freescale/phy-fsl-samsung-hdmi.c13
-rw-r--r--drivers/phy/rockchip/Kconfig1
-rw-r--r--drivers/phy/rockchip/phy-rockchip-naneng-combphy.c5
-rw-r--r--drivers/phy/samsung/phy-exynos5-usbdrd.c25
-rw-r--r--drivers/phy/st/phy-stm32-combophy.c38
-rw-r--r--drivers/phy/tegra/xusb-tegra186.c11
-rw-r--r--drivers/phy/ti/phy-gmii-sel.c15
-rw-r--r--drivers/platform/x86/amd/pmf/core.c2
-rw-r--r--drivers/platform/x86/amd/pmf/pmf.h5
-rw-r--r--drivers/platform/x86/amd/pmf/sps.c11
-rw-r--r--drivers/platform/x86/amd/pmf/tee-if.c52
-rw-r--r--drivers/platform/x86/intel/hid.c7
-rw-r--r--drivers/platform/x86/intel/vsec.c7
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c1
-rw-r--r--drivers/rapidio/devices/rio_mport_cdev.c3
-rw-r--r--drivers/rapidio/rio-scan.c5
-rw-r--r--drivers/scsi/scsi_lib.c14
-rw-r--r--drivers/slimbus/messaging.c5
-rw-r--r--drivers/thermal/gov_power_allocator.c32
-rw-r--r--drivers/thermal/thermal_of.c50
-rw-r--r--drivers/ufs/core/ufs_bsg.c6
-rw-r--r--drivers/ufs/core/ufshcd.c38
-rw-r--r--drivers/usb/atm/cxacru.c13
-rw-r--r--drivers/usb/core/hub.c33
-rw-r--r--drivers/usb/core/quirks.c4
-rw-r--r--drivers/usb/dwc3/core.c85
-rw-r--r--drivers/usb/dwc3/core.h2
-rw-r--r--drivers/usb/dwc3/drd.c4
-rw-r--r--drivers/usb/dwc3/gadget.c10
-rw-r--r--drivers/usb/gadget/composite.c17
-rw-r--r--drivers/usb/gadget/function/u_ether.c4
-rw-r--r--drivers/usb/host/xhci-hub.c8
-rw-r--r--drivers/usb/host/xhci-mem.c3
-rw-r--r--drivers/usb/host/xhci-pci.c10
-rw-r--r--drivers/usb/host/xhci.c6
-rw-r--r--drivers/usb/host/xhci.h2
-rw-r--r--drivers/usb/renesas_usbhs/common.c6
-rw-r--r--drivers/usb/renesas_usbhs/mod_gadget.c2
-rw-r--r--drivers/usb/typec/tcpm/tcpci_rt1711h.c11
-rw-r--r--drivers/usb/typec/ucsi/ucsi.c25
-rw-r--r--drivers/usb/typec/ucsi/ucsi.h2
-rw-r--r--drivers/usb/typec/ucsi/ucsi_acpi.c21
-rw-r--r--drivers/usb/typec/ucsi/ucsi_ccg.c1
-rw-r--r--drivers/usb/typec/ucsi/ucsi_glink.c1
-rw-r--r--drivers/usb/typec/ucsi/ucsi_stm32g0.c1
-rw-r--r--drivers/usb/typec/ucsi/ucsi_yoga_c630.c1
-rw-r--r--drivers/vhost/vhost.c2
-rw-r--r--drivers/video/Kconfig1
-rw-r--r--drivers/virt/acrn/hsm.c6
-rw-r--r--drivers/virt/coco/sev-guest/sev-guest.c58
-rw-r--r--drivers/virt/vboxguest/Kconfig3
-rw-r--r--fs/affs/file.c9
-rw-r--r--fs/afs/server.c3
-rw-r--r--fs/afs/server_list.c4
-rw-r--r--fs/bcachefs/btree_cache.c9
-rw-r--r--fs/bcachefs/btree_io.c2
-rw-r--r--fs/bcachefs/btree_key_cache.c2
-rw-r--r--fs/bcachefs/btree_locking.c5
-rw-r--r--fs/bcachefs/btree_locking.h2
-rw-r--r--fs/bcachefs/data_update.c1
-rw-r--r--fs/bcachefs/dirent.h5
-rw-r--r--fs/bcachefs/extents.h2
-rw-r--r--fs/bcachefs/fs-common.c11
-rw-r--r--fs/bcachefs/fs-io.c1
-rw-r--r--fs/bcachefs/fsck.c21
-rw-r--r--fs/bcachefs/journal.c57
-rw-r--r--fs/bcachefs/movinggc.c25
-rw-r--r--fs/bcachefs/sb-downgrade.c5
-rw-r--r--fs/bcachefs/six.c5
-rw-r--r--fs/bcachefs/six.h7
-rw-r--r--fs/bcachefs/super-io.c24
-rw-r--r--fs/bcachefs/super-io.h11
-rw-r--r--fs/btrfs/extent_map.c83
-rw-r--r--fs/btrfs/file.c9
-rw-r--r--fs/btrfs/inode.c9
-rw-r--r--fs/btrfs/sysfs.c4
-rw-r--r--fs/btrfs/tests/delayed-refs-tests.c1
-rw-r--r--fs/btrfs/volumes.c7
-rw-r--r--fs/coredump.c15
-rw-r--r--fs/efivarfs/file.c10
-rw-r--r--fs/efivarfs/super.c3
-rw-r--r--fs/exfat/balloc.c10
-rw-r--r--fs/exfat/exfat_fs.h2
-rw-r--r--fs/exfat/fatent.c11
-rw-r--r--fs/exfat/file.c2
-rw-r--r--fs/exfat/namei.c7
-rw-r--r--fs/fuse/dev.c21
-rw-r--r--fs/fuse/dir.c2
-rw-r--r--fs/fuse/file.c13
-rw-r--r--fs/iomap/direct-io.c8
-rw-r--r--fs/namei.c24
-rw-r--r--fs/nfs/delegation.c37
-rw-r--r--fs/nfs/delegation.h1
-rw-r--r--fs/nfs/direct.c23
-rw-r--r--fs/nfs/file.c3
-rw-r--r--fs/nfs/nfs4proc.c10
-rw-r--r--fs/nsfs.c1
-rw-r--r--fs/overlayfs/copy_up.c2
-rw-r--r--fs/pidfs.c1
-rw-r--r--fs/pipe.c32
-rw-r--r--fs/smb/client/cifsacl.c34
-rw-r--r--fs/smb/client/cifssmb.c3
-rw-r--r--fs/smb/common/smbacl.h3
-rw-r--r--fs/smb/server/smb2pdu.c8
-rw-r--r--fs/smb/server/smbacl.c52
-rw-r--r--fs/smb/server/smbacl.h2
-rw-r--r--fs/smb/server/transport_ipc.c1
-rw-r--r--fs/splice.c20
-rw-r--r--fs/xfs/xfs_buf.c182
-rw-r--r--fs/xfs/xfs_buf.h7
-rw-r--r--fs/xfs/xfs_buf_mem.c2
-rw-r--r--fs/xfs/xfs_log_recover.c2
-rw-r--r--fs/xfs/xfs_mount.c7
-rw-r--r--fs/xfs/xfs_rtalloc.c2
-rw-r--r--fs/xfs/xfs_trace.h1
-rw-r--r--include/asm-generic/hugetlb.h2
-rw-r--r--include/asm-generic/vmlinux.lds.h2
-rw-r--r--include/drm/drm_client.h8
-rw-r--r--include/drm/drm_damage_helper.h2
-rw-r--r--include/drm/drm_fb_helper.h44
-rw-r--r--include/drm/drm_gpusvm.h509
-rw-r--r--include/drm/drm_gpuvm.h5
-rw-r--r--include/drm/drm_pagemap.h107
-rw-r--r--include/dt-bindings/clock/qcom,dsi-phy-28nm.h9
-rw-r--r--include/linux/blk-mq.h2
-rw-r--r--include/linux/blkdev.h8
-rw-r--r--include/linux/call_once.h47
-rw-r--r--include/linux/compaction.h5
-rw-r--r--include/linux/compiler.h2
-rw-r--r--include/linux/cred.h10
-rw-r--r--include/linux/fs.h6
-rw-r--r--include/linux/hugetlb.h9
-rw-r--r--include/linux/log2.h2
-rw-r--r--include/linux/migrate.h1
-rw-r--r--include/linux/nfs4.h1
-rw-r--r--include/linux/pipe_fs_i.h88
-rw-r--r--include/linux/platform_profile.h3
-rw-r--r--include/linux/rcuref.h9
-rw-r--r--include/linux/sched.h2
-rw-r--r--include/linux/socket.h2
-rw-r--r--include/linux/sunrpc/sched.h3
-rw-r--r--include/net/sock.h1
-rw-r--r--include/sound/cs35l56.h31
-rw-r--r--include/trace/events/afs.h2
-rw-r--r--include/trace/events/sunrpc.h3
-rw-r--r--include/uapi/drm/xe_drm.h117
-rw-r--r--include/uapi/linux/landlock.h8
-rw-r--r--io_uring/net.c4
-rw-r--r--io_uring/rw.c7
-rw-r--r--kernel/events/core.c63
-rw-r--r--kernel/events/uprobes.c15
-rw-r--r--kernel/pid_namespace.c2
-rw-r--r--kernel/sched/core.c2
-rw-r--r--kernel/sched/deadline.c2
-rw-r--r--kernel/sched/ext.c11
-rw-r--r--kernel/sched/fair.c6
-rw-r--r--kernel/trace/ftrace.c27
-rw-r--r--kernel/trace/trace_events_hist.c30
-rw-r--r--kernel/trace/trace_fprobe.c20
-rw-r--r--kernel/trace/trace_probe.h5
-rw-r--r--kernel/vhost_task.c4
-rw-r--r--kernel/workqueue.c4
-rw-r--r--lib/Kconfig.debug2
-rw-r--r--lib/rcuref.c5
-rw-r--r--mm/compaction.c3
-rw-r--r--mm/filemap.c9
-rw-r--r--mm/hugetlb.c12
-rw-r--r--mm/internal.h5
-rw-r--r--mm/kmsan/hooks.c1
-rw-r--r--mm/memory-failure.c63
-rw-r--r--mm/memory.c34
-rw-r--r--mm/memory_hotplug.c26
-rw-r--r--mm/migrate_device.c116
-rw-r--r--mm/page_alloc.c4
-rw-r--r--mm/page_isolation.c10
-rw-r--r--mm/shmem.c39
-rw-r--r--mm/slab_common.c14
-rw-r--r--mm/swapfile.c12
-rw-r--r--mm/truncate.c2
-rw-r--r--mm/userfaultfd.c107
-rw-r--r--mm/vma.c12
-rw-r--r--mm/vmalloc.c4
-rw-r--r--mm/zswap.c2
-rw-r--r--net/8021q/vlan.c3
-rw-r--r--net/bluetooth/l2cap_core.c9
-rw-r--r--net/bluetooth/mgmt.c5
-rw-r--r--net/core/dev.c14
-rw-r--r--net/core/gro.c1
-rw-r--r--net/core/scm.c10
-rw-r--r--net/core/skbuff.c2
-rw-r--r--net/core/sock.c27
-rw-r--r--net/core/sysctl_net_core.c3
-rw-r--r--net/ethtool/cabletest.c8
-rw-r--r--net/ethtool/common.c16
-rw-r--r--net/ethtool/common.h6
-rw-r--r--net/ethtool/ioctl.c4
-rw-r--r--net/ethtool/linkstate.c2
-rw-r--r--net/ethtool/netlink.c6
-rw-r--r--net/ethtool/netlink.h5
-rw-r--r--net/ethtool/phy.c2
-rw-r--r--net/ethtool/plca.c6
-rw-r--r--net/ethtool/pse-pd.c4
-rw-r--r--net/ethtool/rings.c9
-rw-r--r--net/ethtool/stats.c2
-rw-r--r--net/ethtool/strset.c2
-rw-r--r--net/ipv4/tcp.c26
-rw-r--r--net/ipv4/tcp_minisocks.c10
-rw-r--r--net/ipv4/tcp_offload.c11
-rw-r--r--net/ipv4/udp_offload.c8
-rw-r--r--net/ipv6/ila/ila_lwt.c4
-rw-r--r--net/ipv6/rpl_iptunnel.c14
-rw-r--r--net/ipv6/seg6_iptunnel.c14
-rw-r--r--net/llc/llc_s_ac.c49
-rw-r--r--net/mac80211/driver-ops.c10
-rw-r--r--net/mac80211/iface.c11
-rw-r--r--net/mac80211/mlme.c1
-rw-r--r--net/mac80211/parse.c135
-rw-r--r--net/mac80211/util.c5
-rw-r--r--net/mptcp/pm_netlink.c23
-rw-r--r--net/mptcp/protocol.h2
-rw-r--r--net/mptcp/subflow.c20
-rw-r--r--net/netlink/af_netlink.c10
-rw-r--r--net/rds/tcp.c8
-rw-r--r--net/rxrpc/ar-internal.h1
-rw-r--r--net/rxrpc/input.c2
-rw-r--r--net/rxrpc/peer_event.c9
-rw-r--r--net/rxrpc/peer_object.c5
-rw-r--r--net/rxrpc/rxperf.c12
-rw-r--r--net/smc/af_smc.c5
-rw-r--r--net/sunrpc/cache.c10
-rw-r--r--net/sunrpc/sched.c2
-rw-r--r--net/sunrpc/svcsock.c5
-rw-r--r--net/sunrpc/xprtsock.c18
-rw-r--r--net/unix/af_unix.c1
-rw-r--r--net/wireless/nl80211.c7
-rw-r--r--net/wireless/reg.c3
-rw-r--r--rust/kernel/firmware.rs216
-rw-r--r--rust/macros/module.rs4
-rwxr-xr-xscripts/package/install-extmod-build2
-rw-r--r--security/integrity/evm/evm_crypto.c2
-rw-r--r--security/integrity/evm/evm_main.c2
-rw-r--r--security/integrity/ima/ima.h3
-rw-r--r--security/integrity/ima/ima_main.c13
-rw-r--r--security/landlock/net.c3
-rw-r--r--security/landlock/ruleset.c2
-rw-r--r--sound/core/seq/seq_clientmgr.c46
-rw-r--r--sound/pci/hda/Kconfig1
-rw-r--r--sound/pci/hda/cs35l56_hda_spi.c3
-rw-r--r--sound/pci/hda/hda_intel.c2
-rw-r--r--sound/pci/hda/patch_realtek.c130
-rw-r--r--sound/soc/codecs/cs35l56-shared.c80
-rw-r--r--sound/soc/codecs/cs35l56-spi.c3
-rw-r--r--sound/soc/codecs/es8328.c15
-rw-r--r--sound/soc/codecs/tas2764.c10
-rw-r--r--sound/soc/codecs/tas2764.h8
-rw-r--r--sound/soc/codecs/tas2770.c2
-rw-r--r--sound/soc/fsl/fsl_sai.c6
-rw-r--r--sound/soc/fsl/imx-audmix.c4
-rw-r--r--sound/soc/intel/boards/sof_sdw.c7
-rw-r--r--sound/soc/sof/intel/hda.c18
-rw-r--r--sound/usb/midi.c2
-rw-r--r--sound/usb/quirks.c1
-rw-r--r--sound/usb/usx2y/usbusx2y.c11
-rw-r--r--sound/usb/usx2y/usbusx2y.h26
-rw-r--r--sound/usb/usx2y/usbusx2yaudio.c27
-rw-r--r--tools/arch/arm64/tools/Makefile6
-rw-r--r--tools/bpf/Makefile6
-rw-r--r--tools/bpf/bpftool/Documentation/Makefile6
-rw-r--r--tools/bpf/bpftool/Makefile6
-rw-r--r--tools/bpf/resolve_btfids/Makefile2
-rw-r--r--tools/bpf/runqslower/Makefile5
-rw-r--r--tools/build/Makefile8
-rw-r--r--tools/lib/bpf/Makefile13
-rw-r--r--tools/lib/perf/Makefile13
-rw-r--r--tools/lib/thermal/Makefile13
-rw-r--r--tools/objtool/Makefile6
-rw-r--r--tools/objtool/check.c7
-rw-r--r--tools/objtool/include/objtool/special.h2
-rw-r--r--tools/objtool/noreturns.h2
-rw-r--r--tools/perf/Makefile.perf41
-rw-r--r--tools/scripts/Makefile.include30
-rwxr-xr-xtools/sound/dapm-graph2
-rw-r--r--tools/testing/selftests/bpf/Makefile.docs6
-rwxr-xr-xtools/testing/selftests/damon/damon_nr_regions.py2
-rwxr-xr-xtools/testing/selftests/damon/damos_quota.py9
-rwxr-xr-xtools/testing/selftests/damon/damos_quota_goal.py3
-rwxr-xr-xtools/testing/selftests/drivers/net/hds.py145
-rwxr-xr-xtools/testing/selftests/drivers/net/queues.py7
-rw-r--r--tools/testing/selftests/ftrace/test.d/dynevent/add_remove_fprobe.tc18
-rw-r--r--tools/testing/selftests/hid/Makefile2
-rw-r--r--tools/testing/selftests/kvm/mmu_stress_test.c21
-rw-r--r--tools/testing/selftests/kvm/x86/nested_exceptions_test.c2
-rw-r--r--tools/testing/selftests/kvm/x86/sev_smoke_test.c3
-rw-r--r--tools/testing/selftests/landlock/.gitignore2
-rw-r--r--tools/testing/selftests/landlock/common.h1
-rw-r--r--tools/testing/selftests/landlock/config3
-rw-r--r--tools/testing/selftests/landlock/net_test.c124
-rw-r--r--tools/testing/selftests/mm/hugepage-mremap.c2
-rw-r--r--tools/testing/selftests/mm/ksm_functional_tests.c8
-rw-r--r--tools/testing/selftests/mm/memfd_secret.c14
-rw-r--r--tools/testing/selftests/mm/mkdirty.c8
-rw-r--r--tools/testing/selftests/mm/mlock2.h1
-rw-r--r--tools/testing/selftests/mm/protection_keys.c2
-rw-r--r--tools/testing/selftests/mm/uffd-common.c4
-rw-r--r--tools/testing/selftests/mm/uffd-stress.c15
-rw-r--r--tools/testing/selftests/mm/uffd-unit-tests.c14
-rw-r--r--tools/testing/selftests/net/lib/Makefile3
-rw-r--r--tools/testing/selftests/net/lib/xdp_dummy.bpf.c13
-rw-r--r--tools/testing/selftests/rseq/rseq-riscv-bits.h6
-rw-r--r--tools/testing/selftests/rseq/rseq-riscv.h2
-rw-r--r--tools/testing/selftests/vDSO/parse_vdso.c10
-rw-r--r--tools/thermal/lib/Makefile13
-rw-r--r--tools/tracing/latency/Makefile6
-rw-r--r--tools/tracing/rtla/Makefile6
-rw-r--r--tools/verification/rv/Makefile6
-rw-r--r--usr/include/Makefile2
904 files changed, 20053 insertions, 7450 deletions
diff --git a/.mailmap b/.mailmap
index a4072c213b20..b1b709230db8 100644
--- a/.mailmap
+++ b/.mailmap
@@ -88,7 +88,6 @@ Antonio Quartulli <antonio@mandelbit.com> <antonio@open-mesh.com>
Antonio Quartulli <antonio@mandelbit.com> <antonio.quartulli@open-mesh.com>
Antonio Quartulli <antonio@mandelbit.com> <ordex@autistici.org>
Antonio Quartulli <antonio@mandelbit.com> <ordex@ritirata.org>
-Antonio Quartulli <antonio@mandelbit.com> <antonio@openvpn.net>
Antonio Quartulli <antonio@mandelbit.com> <a@unstable.cc>
Anup Patel <anup@brainfault.org> <anup.patel@wdc.com>
Archit Taneja <archit@ti.com>
@@ -200,10 +199,11 @@ Dengcheng Zhu <dzhu@wavecomp.com> <dengcheng.zhu@imgtec.com>
Dengcheng Zhu <dzhu@wavecomp.com> <dengcheng.zhu@mips.com>
<dev.kurt@vandijck-laurijssen.be> <kurt.van.dijck@eia.be>
Dikshita Agarwal <quic_dikshita@quicinc.com> <dikshita@codeaurora.org>
-Dmitry Baryshkov <dbaryshkov@gmail.com>
-Dmitry Baryshkov <dbaryshkov@gmail.com> <[dbaryshkov@gmail.com]>
-Dmitry Baryshkov <dbaryshkov@gmail.com> <dmitry_baryshkov@mentor.com>
-Dmitry Baryshkov <dbaryshkov@gmail.com> <dmitry_eremin@mentor.com>
+Dmitry Baryshkov <lumag@kernel.org> <dbaryshkov@gmail.com>
+Dmitry Baryshkov <lumag@kernel.org> <[dbaryshkov@gmail.com]>
+Dmitry Baryshkov <lumag@kernel.org> <dmitry_baryshkov@mentor.com>
+Dmitry Baryshkov <lumag@kernel.org> <dmitry_eremin@mentor.com>
+Dmitry Baryshkov <lumag@kernel.org> <dmitry.baryshkov@linaro.org>
Dmitry Safonov <0x7f454c46@gmail.com> <dima@arista.com>
Dmitry Safonov <0x7f454c46@gmail.com> <d.safonov@partner.samsung.com>
Dmitry Safonov <0x7f454c46@gmail.com> <dsafonov@virtuozzo.com>
@@ -523,6 +523,7 @@ Nadav Amit <nadav.amit@gmail.com> <namit@cs.technion.ac.il>
Nadia Yvette Chambers <nyc@holomorphy.com> William Lee Irwin III <wli@holomorphy.com>
Naoya Horiguchi <nao.horiguchi@gmail.com> <n-horiguchi@ah.jp.nec.com>
Naoya Horiguchi <nao.horiguchi@gmail.com> <naoya.horiguchi@nec.com>
+Natalie Vock <natalie.vock@gmx.de> <friedrich.vock@gmx.de>
Nathan Chancellor <nathan@kernel.org> <natechancellor@gmail.com>
Naveen N Rao <naveen@kernel.org> <naveen.n.rao@linux.ibm.com>
Naveen N Rao <naveen@kernel.org> <naveen.n.rao@linux.vnet.ibm.com>
@@ -692,6 +693,7 @@ Subbaraman Narayanamurthy <quic_subbaram@quicinc.com> <subbaram@codeaurora.org>
Subhash Jadavani <subhashj@codeaurora.org>
Sudarshan Rajagopalan <quic_sudaraja@quicinc.com> <sudaraja@codeaurora.org>
Sudeep Holla <sudeep.holla@arm.com> Sudeep KarkadaNagesha <sudeep.karkadanagesha@arm.com>
+Sumit Garg <sumit.garg@kernel.org> <sumit.garg@linaro.org>
Sumit Semwal <sumit.semwal@ti.com>
Surabhi Vishnoi <quic_svishnoi@quicinc.com> <svishnoi@codeaurora.org>
Sven Eckelmann <sven@narfation.org> <seckelmann@datto.com>
diff --git a/Documentation/admin-guide/README.rst b/Documentation/admin-guide/README.rst
index eb9452668909..b557cf1c820d 100644
--- a/Documentation/admin-guide/README.rst
+++ b/Documentation/admin-guide/README.rst
@@ -176,7 +176,7 @@ Configuring the kernel
values without prompting.
"make defconfig" Create a ./.config file by using the default
- symbol values from either arch/$ARCH/defconfig
+ symbol values from either arch/$ARCH/configs/defconfig
or arch/$ARCH/configs/${PLATFORM}_defconfig,
depending on the architecture.
diff --git a/Documentation/admin-guide/sysctl/kernel.rst b/Documentation/admin-guide/sysctl/kernel.rst
index a43b78b4b646..dd49a89a62d3 100644
--- a/Documentation/admin-guide/sysctl/kernel.rst
+++ b/Documentation/admin-guide/sysctl/kernel.rst
@@ -212,6 +212,17 @@ pid>/``).
This value defaults to 0.
+core_sort_vma
+=============
+
+The default coredump writes VMAs in address order. By setting
+``core_sort_vma`` to 1, VMAs will be written from smallest size
+to largest size. This is known to break at least elfutils, but
+can be handy when dealing with very large (and truncated)
+coredumps where the more useful debugging details are included
+in the smaller VMAs.
+
+
core_uses_pid
=============
diff --git a/Documentation/arch/powerpc/cxl.rst b/Documentation/arch/powerpc/cxl.rst
index d2d77057610e..778adda740d2 100644
--- a/Documentation/arch/powerpc/cxl.rst
+++ b/Documentation/arch/powerpc/cxl.rst
@@ -18,6 +18,7 @@ Introduction
both access system memory directly and with the same effective
addresses.
+ **This driver is deprecated and will be removed in a future release.**
Hardware overview
=================
@@ -453,7 +454,7 @@ Sysfs Class
A cxl sysfs class is added under /sys/class/cxl to facilitate
enumeration and tuning of the accelerators. Its layout is
- described in Documentation/ABI/testing/sysfs-class-cxl
+ described in Documentation/ABI/obsolete/sysfs-class-cxl
Udev rules
diff --git a/Documentation/devicetree/bindings/display/mediatek/mediatek,dpi.yaml b/Documentation/devicetree/bindings/display/mediatek/mediatek,dpi.yaml
index 0f1e556dc8ef..b659d79393a8 100644
--- a/Documentation/devicetree/bindings/display/mediatek/mediatek,dpi.yaml
+++ b/Documentation/devicetree/bindings/display/mediatek/mediatek,dpi.yaml
@@ -27,6 +27,7 @@ properties:
- mediatek,mt8188-dp-intf
- mediatek,mt8192-dpi
- mediatek,mt8195-dp-intf
+ - mediatek,mt8195-dpi
- items:
- enum:
- mediatek,mt6795-dpi
@@ -35,6 +36,10 @@ properties:
- enum:
- mediatek,mt8365-dpi
- const: mediatek,mt8192-dpi
+ - items:
+ - enum:
+ - mediatek,mt8188-dpi
+ - const: mediatek,mt8195-dpi
reg:
maxItems: 1
@@ -116,11 +121,13 @@ examples:
- |
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/clock/mt8173-clk.h>
+ #include <dt-bindings/power/mt8173-power.h>
dpi: dpi@1401d000 {
compatible = "mediatek,mt8173-dpi";
reg = <0x1401d000 0x1000>;
interrupts = <GIC_SPI 194 IRQ_TYPE_LEVEL_LOW>;
+ power-domains = <&spm MT8173_POWER_DOMAIN_MM>;
clocks = <&mmsys CLK_MM_DPI_PIXEL>,
<&mmsys CLK_MM_DPI_ENGINE>,
<&apmixedsys CLK_APMIXED_TVDPLL>;
diff --git a/Documentation/devicetree/bindings/display/mediatek/mediatek,dsc.yaml b/Documentation/devicetree/bindings/display/mediatek/mediatek,dsc.yaml
index 846de6c17d93..a5b88eb97e3b 100644
--- a/Documentation/devicetree/bindings/display/mediatek/mediatek,dsc.yaml
+++ b/Documentation/devicetree/bindings/display/mediatek/mediatek,dsc.yaml
@@ -22,6 +22,9 @@ properties:
oneOf:
- enum:
- mediatek,mt8195-disp-dsc
+ - items:
+ - const: mediatek,mt8188-disp-dsc
+ - const: mediatek,mt8195-disp-dsc
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/display/msm/dsi-controller-main.yaml b/Documentation/devicetree/bindings/display/msm/dsi-controller-main.yaml
index ffbd1dc9470e..2aab33cd0017 100644
--- a/Documentation/devicetree/bindings/display/msm/dsi-controller-main.yaml
+++ b/Documentation/devicetree/bindings/display/msm/dsi-controller-main.yaml
@@ -231,6 +231,7 @@ allOf:
then:
properties:
clocks:
+ minItems: 7
maxItems: 7
clock-names:
items:
@@ -248,29 +249,12 @@ allOf:
contains:
enum:
- qcom,msm8916-dsi-ctrl
- then:
- properties:
- clocks:
- maxItems: 6
- clock-names:
- items:
- - const: mdp_core
- - const: iface
- - const: bus
- - const: byte
- - const: pixel
- - const: core
-
- - if:
- properties:
- compatible:
- contains:
- enum:
- qcom,msm8953-dsi-ctrl
- qcom,msm8976-dsi-ctrl
then:
properties:
clocks:
+ minItems: 6
maxItems: 6
clock-names:
items:
@@ -291,6 +275,7 @@ allOf:
then:
properties:
clocks:
+ minItems: 7
maxItems: 7
clock-names:
items:
@@ -311,6 +296,7 @@ allOf:
then:
properties:
clocks:
+ minItems: 7
maxItems: 7
clock-names:
items:
@@ -328,28 +314,13 @@ allOf:
contains:
enum:
- qcom,msm8998-dsi-ctrl
- - qcom,sm6125-dsi-ctrl
- - qcom,sm6350-dsi-ctrl
- then:
- properties:
- clocks:
- maxItems: 6
- clock-names:
- items:
- - const: byte
- - const: byte_intf
- - const: pixel
- - const: core
- - const: iface
- - const: bus
-
- - if:
- properties:
- compatible:
- contains:
- enum:
- qcom,sc7180-dsi-ctrl
- qcom,sc7280-dsi-ctrl
+ - qcom,sdm845-dsi-ctrl
+ - qcom,sm6115-dsi-ctrl
+ - qcom,sm6125-dsi-ctrl
+ - qcom,sm6350-dsi-ctrl
+ - qcom,sm6375-dsi-ctrl
- qcom,sm6150-dsi-ctrl
- qcom,sm7150-dsi-ctrl
- qcom,sm8150-dsi-ctrl
@@ -361,6 +332,7 @@ allOf:
then:
properties:
clocks:
+ minItems: 6
maxItems: 6
clock-names:
items:
@@ -380,6 +352,7 @@ allOf:
then:
properties:
clocks:
+ minItems: 9
maxItems: 9
clock-names:
items:
@@ -393,27 +366,6 @@ allOf:
- const: pixel
- const: core
- - if:
- properties:
- compatible:
- contains:
- enum:
- - qcom,sdm845-dsi-ctrl
- - qcom,sm6115-dsi-ctrl
- - qcom,sm6375-dsi-ctrl
- then:
- properties:
- clocks:
- maxItems: 6
- clock-names:
- items:
- - const: byte
- - const: byte_intf
- - const: pixel
- - const: core
- - const: iface
- - const: bus
-
unevaluatedProperties: false
examples:
diff --git a/Documentation/devicetree/bindings/display/msm/dsi-phy-common.yaml b/Documentation/devicetree/bindings/display/msm/dsi-phy-common.yaml
index 6b57ce41c95f..d0ce85a08b6d 100644
--- a/Documentation/devicetree/bindings/display/msm/dsi-phy-common.yaml
+++ b/Documentation/devicetree/bindings/display/msm/dsi-phy-common.yaml
@@ -15,6 +15,8 @@ description:
properties:
"#clock-cells":
const: 1
+ description:
+ See include/dt-bindings/clock/qcom,dsi-phy-28nm.h for clock IDs.
"#phy-cells":
const: 0
diff --git a/Documentation/devicetree/bindings/display/msm/gmu.yaml b/Documentation/devicetree/bindings/display/msm/gmu.yaml
index ab884e236429..4392aa7a4ffe 100644
--- a/Documentation/devicetree/bindings/display/msm/gmu.yaml
+++ b/Documentation/devicetree/bindings/display/msm/gmu.yaml
@@ -123,6 +123,7 @@ allOf:
compatible:
contains:
enum:
+ - qcom,adreno-gmu-623.0
- qcom,adreno-gmu-635.0
- qcom,adreno-gmu-660.1
- qcom,adreno-gmu-663.0
diff --git a/Documentation/devicetree/bindings/display/msm/qcom,sa8775p-mdss.yaml b/Documentation/devicetree/bindings/display/msm/qcom,sa8775p-mdss.yaml
index a90a8b3f1a9e..5fac3e266703 100644
--- a/Documentation/devicetree/bindings/display/msm/qcom,sa8775p-mdss.yaml
+++ b/Documentation/devicetree/bindings/display/msm/qcom,sa8775p-mdss.yaml
@@ -52,6 +52,13 @@ patternProperties:
items:
- const: qcom,sa8775p-dp
+ "^phy@[0-9a-f]+$":
+ type: object
+ additionalProperties: true
+ properties:
+ compatible:
+ const: qcom,sa8775p-edp-phy
+
required:
- compatible
@@ -61,6 +68,7 @@ examples:
- |
#include <dt-bindings/interconnect/qcom,icc.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/clock/qcom,sa8775p-dispcc.h>
#include <dt-bindings/clock/qcom,sa8775p-gcc.h>
#include <dt-bindings/interconnect/qcom,sa8775p-rpmh.h>
#include <dt-bindings/power/qcom,rpmhpd.h>
@@ -158,6 +166,26 @@ examples:
};
};
+ mdss0_dp0_phy: phy@aec2a00 {
+ compatible = "qcom,sa8775p-edp-phy";
+
+ reg = <0x0aec2a00 0x200>,
+ <0x0aec2200 0xd0>,
+ <0x0aec2600 0xd0>,
+ <0x0aec2000 0x1c8>;
+
+ clocks = <&dispcc0 MDSS_DISP_CC_MDSS_DPTX0_AUX_CLK>,
+ <&dispcc0 MDSS_DISP_CC_MDSS_AHB_CLK>;
+ clock-names = "aux",
+ "cfg_ahb";
+
+ #clock-cells = <1>;
+ #phy-cells = <0>;
+
+ vdda-phy-supply = <&vreg_l1c>;
+ vdda-pll-supply = <&vreg_l4a>;
+ };
+
displayport-controller@af54000 {
compatible = "qcom,sa8775p-dp";
@@ -186,9 +214,9 @@ examples:
assigned-clocks = <&dispcc_mdss_dptx0_link_clk_src>,
<&dispcc_mdss_dptx0_pixel0_clk_src>;
- assigned-clock-parents = <&mdss0_edp_phy 0>, <&mdss0_edp_phy 1>;
+ assigned-clock-parents = <&mdss0_dp0_phy 0>, <&mdss0_dp0_phy 1>;
- phys = <&mdss0_edp_phy>;
+ phys = <&mdss0_dp0_phy>;
phy-names = "dp";
operating-points-v2 = <&dp_opp_table>;
diff --git a/Documentation/devicetree/bindings/display/msm/qcom,sm8550-mdss.yaml b/Documentation/devicetree/bindings/display/msm/qcom,sm8550-mdss.yaml
index 1ea50a2c7c8e..59192c59ddb9 100644
--- a/Documentation/devicetree/bindings/display/msm/qcom,sm8550-mdss.yaml
+++ b/Documentation/devicetree/bindings/display/msm/qcom,sm8550-mdss.yaml
@@ -30,10 +30,14 @@ properties:
maxItems: 1
interconnects:
- maxItems: 2
+ items:
+ - description: Interconnect path from mdp0 port to the data bus
+ - description: Interconnect path from CPU to the reg bus
interconnect-names:
- maxItems: 2
+ items:
+ - const: mdp0-mem
+ - const: cpu-cfg
patternProperties:
"^display-controller@[0-9a-f]+$":
@@ -91,9 +95,9 @@ examples:
reg = <0x0ae00000 0x1000>;
reg-names = "mdss";
- interconnects = <&mmss_noc MASTER_MDP 0 &gem_noc SLAVE_LLCC 0>,
- <&mc_virt MASTER_LLCC 0 &mc_virt SLAVE_EBI1 0>;
- interconnect-names = "mdp0-mem", "mdp1-mem";
+ interconnects = <&mmss_noc MASTER_MDP 0 &mc_virt SLAVE_EBI1 0>,
+ <&gem_noc MASTER_APPSS_PROC 0 &config_noc SLAVE_DISPLAY_CFG 0>;
+ interconnect-names = "mdp0-mem", "cpu-cfg";
resets = <&dispcc DISP_CC_MDSS_CORE_BCR>;
diff --git a/Documentation/devicetree/bindings/display/msm/qcom,sm8650-mdss.yaml b/Documentation/devicetree/bindings/display/msm/qcom,sm8650-mdss.yaml
index 24cece1e888b..a1c53e191033 100644
--- a/Documentation/devicetree/bindings/display/msm/qcom,sm8650-mdss.yaml
+++ b/Documentation/devicetree/bindings/display/msm/qcom,sm8650-mdss.yaml
@@ -29,10 +29,14 @@ properties:
maxItems: 1
interconnects:
- maxItems: 2
+ items:
+ - description: Interconnect path from mdp0 port to the data bus
+ - description: Interconnect path from CPU to the reg bus
interconnect-names:
- maxItems: 2
+ items:
+ - const: mdp0-mem
+ - const: cpu-cfg
patternProperties:
"^display-controller@[0-9a-f]+$":
@@ -75,12 +79,17 @@ examples:
#include <dt-bindings/clock/qcom,rpmh.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/power/qcom,rpmhpd.h>
+ #include <dt-bindings/interconnect/qcom,sm8650-rpmh.h>
display-subsystem@ae00000 {
compatible = "qcom,sm8650-mdss";
reg = <0x0ae00000 0x1000>;
reg-names = "mdss";
+ interconnects = <&mmss_noc MASTER_MDP 0 &mc_virt SLAVE_EBI1 0>,
+ <&gem_noc MASTER_APPSS_PROC 0 &config_noc SLAVE_DISPLAY_CFG 0>;
+ interconnect-names = "mdp0-mem", "cpu-cfg";
+
resets = <&dispcc_core_bcr>;
power-domains = <&dispcc_gdsc>;
diff --git a/Documentation/devicetree/bindings/iio/adc/adi,ad7606.yaml b/Documentation/devicetree/bindings/iio/adc/adi,ad7606.yaml
index ab5881d0d017..52d3f1ce3367 100644
--- a/Documentation/devicetree/bindings/iio/adc/adi,ad7606.yaml
+++ b/Documentation/devicetree/bindings/iio/adc/adi,ad7606.yaml
@@ -146,6 +146,7 @@ properties:
maxItems: 2
pwm-names:
+ minItems: 1
items:
- const: convst1
- const: convst2
diff --git a/Documentation/filesystems/idmappings.rst b/Documentation/filesystems/idmappings.rst
index 77930c77fcfe..2a206129f828 100644
--- a/Documentation/filesystems/idmappings.rst
+++ b/Documentation/filesystems/idmappings.rst
@@ -63,8 +63,8 @@ what id ``k11000`` corresponds to in the second or third idmapping. The
straightforward algorithm to use is to apply the inverse of the first idmapping,
mapping ``k11000`` up to ``u1000``. Afterwards, we can map ``u1000`` down using
either the second idmapping mapping or third idmapping mapping. The second
-idmapping would map ``u1000`` down to ``21000``. The third idmapping would map
-``u1000`` down to ``u31000``.
+idmapping would map ``u1000`` down to ``k21000``. The third idmapping would map
+``u1000`` down to ``k31000``.
If we were given the same task for the following three idmappings::
diff --git a/Documentation/gpu/drivers.rst b/Documentation/gpu/drivers.rst
index 74fc2cbf1b6f..78b80be17f21 100644
--- a/Documentation/gpu/drivers.rst
+++ b/Documentation/gpu/drivers.rst
@@ -25,6 +25,7 @@ GPU Driver Documentation
panfrost
panthor
zynqmp
+ nova/index
.. only:: subproject and html
diff --git a/Documentation/gpu/nova/core/guidelines.rst b/Documentation/gpu/nova/core/guidelines.rst
new file mode 100644
index 000000000000..a389d65d7982
--- /dev/null
+++ b/Documentation/gpu/nova/core/guidelines.rst
@@ -0,0 +1,24 @@
+.. SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+
+==========
+Guidelines
+==========
+
+This documents contains the guidelines for nova-core. Additionally, all common
+guidelines of the Nova project do apply.
+
+Driver API
+==========
+
+One main purpose of nova-core is to implement the abstraction around the
+firmware interface of GSP and provide a firmware (version) independent API for
+2nd level drivers, such as nova-drm or the vGPU manager VFIO driver.
+
+Therefore, it is not permitted to leak firmware (version) specifics, through the
+driver API, to 2nd level drivers.
+
+Acceptance Criteria
+===================
+
+- To the extend possible, patches submitted to nova-core must be tested for
+ regressions with all 2nd level drivers.
diff --git a/Documentation/gpu/nova/core/todo.rst b/Documentation/gpu/nova/core/todo.rst
new file mode 100644
index 000000000000..ca08377d3b73
--- /dev/null
+++ b/Documentation/gpu/nova/core/todo.rst
@@ -0,0 +1,446 @@
+.. SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+
+=========
+Task List
+=========
+
+Tasks may have the following fields:
+
+- ``Complexity``: Describes the required familiarity with Rust and / or the
+ corresponding kernel APIs or subsystems. There are four different complexities,
+ ``Beginner``, ``Intermediate``, ``Advanced`` and ``Expert``.
+- ``Reference``: References to other tasks.
+- ``Link``: Links to external resources.
+- ``Contact``: The person that can be contacted for further information about
+ the task.
+
+Enablement (Rust)
+=================
+
+Tasks that are not directly related to nova-core, but are preconditions in terms
+of required APIs.
+
+FromPrimitive API
+-----------------
+
+Sometimes the need arises to convert a number to a value of an enum or a
+structure.
+
+A good example from nova-core would be the ``Chipset`` enum type, which defines
+the value ``AD102``. When probing the GPU the value ``0x192`` can be read from a
+certain register indication the chipset AD102. Hence, the enum value ``AD102``
+should be derived from the number ``0x192``. Currently, nova-core uses a custom
+implementation (``Chipset::from_u32`` for this.
+
+Instead, it would be desirable to have something like the ``FromPrimitive``
+trait [1] from the num crate.
+
+Having this generalization also helps with implementing a generic macro that
+automatically generates the corresponding mappings between a value and a number.
+
+| Complexity: Beginner
+| Link: https://docs.rs/num/latest/num/trait.FromPrimitive.html
+
+Generic register abstraction
+----------------------------
+
+Work out how register constants and structures can be automatically generated
+through generalized macros.
+
+Example:
+
+.. code-block:: rust
+
+ register!(BOOT0, 0x0, u32, pci::Bar<SIZE>, Fields [
+ MINOR_REVISION(3:0, RO),
+ MAJOR_REVISION(7:4, RO),
+ REVISION(7:0, RO), // Virtual register combining major and minor rev.
+ ])
+
+This could expand to something like:
+
+.. code-block:: rust
+
+ const BOOT0_OFFSET: usize = 0x00000000;
+ const BOOT0_MINOR_REVISION_SHIFT: u8 = 0;
+ const BOOT0_MINOR_REVISION_MASK: u32 = 0x0000000f;
+ const BOOT0_MAJOR_REVISION_SHIFT: u8 = 4;
+ const BOOT0_MAJOR_REVISION_MASK: u32 = 0x000000f0;
+ const BOOT0_REVISION_SHIFT: u8 = BOOT0_MINOR_REVISION_SHIFT;
+ const BOOT0_REVISION_MASK: u32 = BOOT0_MINOR_REVISION_MASK | BOOT0_MAJOR_REVISION_MASK;
+
+ struct Boot0(u32);
+
+ impl Boot0 {
+ #[inline]
+ fn read(bar: &RevocableGuard<'_, pci::Bar<SIZE>>) -> Self {
+ Self(bar.readl(BOOT0_OFFSET))
+ }
+
+ #[inline]
+ fn minor_revision(&self) -> u32 {
+ (self.0 & BOOT0_MINOR_REVISION_MASK) >> BOOT0_MINOR_REVISION_SHIFT
+ }
+
+ #[inline]
+ fn major_revision(&self) -> u32 {
+ (self.0 & BOOT0_MAJOR_REVISION_MASK) >> BOOT0_MAJOR_REVISION_SHIFT
+ }
+
+ #[inline]
+ fn revision(&self) -> u32 {
+ (self.0 & BOOT0_REVISION_MASK) >> BOOT0_REVISION_SHIFT
+ }
+ }
+
+Usage:
+
+.. code-block:: rust
+
+ let bar = bar.try_access().ok_or(ENXIO)?;
+
+ let boot0 = Boot0::read(&bar);
+ pr_info!("Revision: {}\n", boot0.revision());
+
+| Complexity: Advanced
+
+Delay / Sleep abstractions
+--------------------------
+
+Rust abstractions for the kernel's delay() and sleep() functions.
+
+FUJITA Tomonori plans to work on abstractions for read_poll_timeout_atomic()
+(and friends) [1].
+
+| Complexity: Beginner
+| Link: https://lore.kernel.org/netdev/20250228.080550.354359820929821928.fujita.tomonori@gmail.com/ [1]
+
+IRQ abstractions
+----------------
+
+Rust abstractions for IRQ handling.
+
+There is active ongoing work from Daniel Almeida [1] for the "core" abstractions
+to request IRQs.
+
+Besides optional review and testing work, the required ``pci::Device`` code
+around those core abstractions needs to be worked out.
+
+| Complexity: Intermediate
+| Link: https://lore.kernel.org/lkml/20250122163932.46697-1-daniel.almeida@collabora.com/ [1]
+| Contact: Daniel Almeida
+
+Page abstraction for foreign pages
+----------------------------------
+
+Rust abstractions for pages not created by the Rust page abstraction without
+direct ownership.
+
+There is active onging work from Abdiel Janulgue [1] and Lina [2].
+
+| Complexity: Advanced
+| Link: https://lore.kernel.org/linux-mm/20241119112408.779243-1-abdiel.janulgue@gmail.com/ [1]
+| Link: https://lore.kernel.org/rust-for-linux/20250202-rust-page-v1-0-e3170d7fe55e@asahilina.net/ [2]
+
+Scatterlist / sg_table abstractions
+-----------------------------------
+
+Rust abstractions for scatterlist / sg_table.
+
+There is preceding work from Abdiel Janulgue, which hasn't made it to the
+mailing list yet.
+
+| Complexity: Intermediate
+| Contact: Abdiel Janulgue
+
+ELF utils
+---------
+
+Rust implementation of ELF header representation to retrieve section header
+tables, names, and data from an ELF-formatted images.
+
+There is preceding work from Abdiel Janulgue, which hasn't made it to the
+mailing list yet.
+
+| Complexity: Beginner
+| Contact: Abdiel Janulgue
+
+PCI MISC APIs
+-------------
+
+Extend the existing PCI device / driver abstractions by SR-IOV, config space,
+capability, MSI API abstractions.
+
+| Complexity: Beginner
+
+Auxiliary bus abstractions
+--------------------------
+
+Rust abstraction for the auxiliary bus APIs.
+
+This is needed to connect nova-core to the nova-drm driver.
+
+| Complexity: Intermediate
+
+Debugfs abstractions
+--------------------
+
+Rust abstraction for debugfs APIs.
+
+| Reference: Export GSP log buffers
+| Complexity: Intermediate
+
+Vec extensions
+--------------
+
+Implement ``Vec::truncate`` and ``Vec::resize``.
+
+Currently this is used for some experimental code to parse the vBIOS.
+
+| Reference vBIOS support
+| Complexity: Beginner
+
+GPU (general)
+=============
+
+Parse firmware headers
+----------------------
+
+Parse ELF headers from the firmware files loaded from the filesystem.
+
+| Reference: ELF utils
+| Complexity: Beginner
+| Contact: Abdiel Janulgue
+
+Build radix3 page table
+-----------------------
+
+Build the radix3 page table to map the firmware.
+
+| Complexity: Intermediate
+| Contact: Abdiel Janulgue
+
+vBIOS support
+-------------
+
+Parse the vBIOS and probe the structures required for driver initialization.
+
+| Contact: Dave Airlie
+| Reference: Vec extensions
+| Complexity: Intermediate
+
+Initial Devinit support
+-----------------------
+
+Implement BIOS Device Initialization, i.e. memory sizing, waiting, PLL
+configuration.
+
+| Contact: Dave Airlie
+| Complexity: Beginner
+
+Boot Falcon controller
+----------------------
+
+Infrastructure to load and execute falcon (sec2) firmware images; handle the
+GSP falcon processor and fwsec loading.
+
+| Complexity: Advanced
+| Contact: Dave Airlie
+
+GPU Timer support
+-----------------
+
+Support for the GPU's internal timer peripheral.
+
+| Complexity: Beginner
+| Contact: Dave Airlie
+
+MMU / PT management
+-------------------
+
+Work out the architecture for MMU / page table management.
+
+We need to consider that nova-drm will need rather fine-grained control,
+especially in terms of locking, in order to be able to implement asynchronous
+Vulkan queues.
+
+While generally sharing the corresponding code is desirable, it needs to be
+evaluated how (and if at all) sharing the corresponding code is expedient.
+
+| Complexity: Expert
+
+VRAM memory allocator
+---------------------
+
+Investigate options for a VRAM memory allocator.
+
+Some possible options:
+ - Rust abstractions for
+ - RB tree (interval tree) / drm_mm
+ - maple_tree
+ - native Rust collections
+
+| Complexity: Advanced
+
+Instance Memory
+---------------
+
+Implement support for instmem (bar2) used to store page tables.
+
+| Complexity: Intermediate
+| Contact: Dave Airlie
+
+GPU System Processor (GSP)
+==========================
+
+Export GSP log buffers
+----------------------
+
+Recent patches from Timur Tabi [1] added support to expose GSP-RM log buffers
+(even after failure to probe the driver) through debugfs.
+
+This is also an interesting feature for nova-core, especially in the early days.
+
+| Link: https://lore.kernel.org/nouveau/20241030202952.694055-2-ttabi@nvidia.com/ [1]
+| Reference: Debugfs abstractions
+| Complexity: Intermediate
+
+GSP firmware abstraction
+------------------------
+
+The GSP-RM firmware API is unstable and may incompatibly change from version to
+version, in terms of data structures and semantics.
+
+This problem is one of the big motivations for using Rust for nova-core, since
+it turns out that Rust's procedural macro feature provides a rather elegant way
+to address this issue:
+
+1. generate Rust structures from the C headers in a separate namespace per version
+2. build abstraction structures (within a generic namespace) that implement the
+ firmware interfaces; annotate the differences in implementation with version
+ identifiers
+3. use a procedural macro to generate the actual per version implementation out
+ of this abstraction
+4. instantiate the correct version type one on runtime (can be sure that all
+ have the same interface because it's defined by a common trait)
+
+There is a PoC implementation of this pattern, in the context of the nova-core
+PoC driver.
+
+This task aims at refining the feature and ideally generalize it, to be usable
+by other drivers as well.
+
+| Complexity: Expert
+
+GSP message queue
+-----------------
+
+Implement low level GSP message queue (command, status) for communication
+between the kernel driver and GSP.
+
+| Complexity: Advanced
+| Contact: Dave Airlie
+
+Bootstrap GSP
+-------------
+
+Call the boot firmware to boot the GSP processor; execute initial control
+messages.
+
+| Complexity: Intermediate
+| Contact: Dave Airlie
+
+Client / Device APIs
+--------------------
+
+Implement the GSP message interface for client / device allocation and the
+corresponding client and device allocation APIs.
+
+| Complexity: Intermediate
+| Contact: Dave Airlie
+
+Bar PDE handling
+----------------
+
+Synchronize page table handling for BARs between the kernel driver and GSP.
+
+| Complexity: Beginner
+| Contact: Dave Airlie
+
+FIFO engine
+-----------
+
+Implement support for the FIFO engine, i.e. the corresponding GSP message
+interface and provide an API for chid allocation and channel handling.
+
+| Complexity: Advanced
+| Contact: Dave Airlie
+
+GR engine
+---------
+
+Implement support for the graphics engine, i.e. the corresponding GSP message
+interface and provide an API for (golden) context creation and promotion.
+
+| Complexity: Advanced
+| Contact: Dave Airlie
+
+CE engine
+---------
+
+Implement support for the copy engine, i.e. the corresponding GSP message
+interface.
+
+| Complexity: Intermediate
+| Contact: Dave Airlie
+
+VFN IRQ controller
+------------------
+
+Support for the VFN interrupt controller.
+
+| Complexity: Intermediate
+| Contact: Dave Airlie
+
+External APIs
+=============
+
+nova-core base API
+------------------
+
+Work out the common pieces of the API to connect 2nd level drivers, i.e. vGPU
+manager and nova-drm.
+
+| Complexity: Advanced
+
+vGPU manager API
+----------------
+
+Work out the API parts required by the vGPU manager, which are not covered by
+the base API.
+
+| Complexity: Advanced
+
+nova-core C API
+---------------
+
+Implement a C wrapper for the APIs required by the vGPU manager driver.
+
+| Complexity: Intermediate
+
+Testing
+=======
+
+CI pipeline
+-----------
+
+Investigate option for continuous integration testing.
+
+This can go from as simple as running KUnit tests over running (graphics) CTS to
+booting up (multiple) guest VMs to test VFIO use-cases.
+
+It might also be worth to consider the introduction of a new test suite directly
+sitting on top of the uAPI for more targeted testing and debugging. There may be
+options for collaboration / shared code with the Mesa project.
+
+| Complexity: Advanced
diff --git a/Documentation/gpu/nova/guidelines.rst b/Documentation/gpu/nova/guidelines.rst
new file mode 100644
index 000000000000..13ab13984a18
--- /dev/null
+++ b/Documentation/gpu/nova/guidelines.rst
@@ -0,0 +1,69 @@
+.. SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+
+==========
+Guidelines
+==========
+
+This document describes the general project guidelines that apply to nova-core
+and nova-drm.
+
+Language
+========
+
+The Nova project uses the Rust programming language. In this context, all rules
+of the Rust for Linux project as documented in
+:doc:`../../rust/general-information` apply. Additionally, the following rules
+apply.
+
+- Unless technically necessary otherwise (e.g. uAPI), any driver code is written
+ in Rust.
+
+- Unless technically necessary, unsafe Rust code must be avoided. In case of
+ technical necessity, unsafe code should be isolated in a separate component
+ providing a safe API for other driver code to use.
+
+Style
+-----
+
+All rules of the Rust for Linux project as documented in
+:doc:`../../rust/coding-guidelines` apply.
+
+For a submit checklist, please also see the `Rust for Linux Submit checklist
+addendum <https://rust-for-linux.com/contributing#submit-checklist-addendum>`_.
+
+Documentation
+=============
+
+The availability of proper documentation is essential in terms of scalability,
+accessibility for new contributors and maintainability of a project in general,
+but especially for a driver running as complex hardware as Nova is targeting.
+
+Hence, adding documentation of any kind is very much encouraged by the project.
+
+Besides that, there are some minimum requirements.
+
+- Every non-private structure needs at least a brief doc comment explaining the
+ semantical sense of the structure, as well as potential locking and lifetime
+ requirements. It is encouraged to have the same minimum documentation for
+ non-trivial private structures.
+
+- uAPIs must be fully documented with kernel-doc comments; additionally, the
+ semantical behavior must be explained including potential special or corner
+ cases.
+
+- The APIs connecting the 1st level driver (nova-core) with 2nd level drivers
+ must be fully documented. This includes doc comments, potential locking and
+ lifetime requirements, as well as example code if applicable.
+
+- Abbreviations must be explained when introduced; terminology must be uniquely
+ defined.
+
+- Register addresses, layouts, shift values and masks must be defined properly;
+ unless obvious, the semantical sense must be documented. This only applies if
+ the author is able to obtain the corresponding information.
+
+Acceptance Criteria
+===================
+
+- Patches must only be applied if reviewed by at least one other person on the
+ mailing list; this also applies for maintainers.
diff --git a/Documentation/gpu/nova/index.rst b/Documentation/gpu/nova/index.rst
new file mode 100644
index 000000000000..2701b3f4af35
--- /dev/null
+++ b/Documentation/gpu/nova/index.rst
@@ -0,0 +1,30 @@
+.. SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+
+=======================
+nova NVIDIA GPU drivers
+=======================
+
+The nova driver project consists out of two separate drivers nova-core and
+nova-drm and intends to supersede the nouveau driver for NVIDIA GPUs based on
+the GPU System Processor (GSP).
+
+The following documents apply to both nova-core and nova-drm.
+
+.. toctree::
+ :titlesonly:
+
+ guidelines
+
+nova-core
+=========
+
+The nova-core driver is the core driver for NVIDIA GPUs based on GSP. nova-core,
+as the 1st level driver, provides an abstraction around the GPUs hard- and
+firmware interfaces providing a common base for 2nd level drivers, such as the
+vGPU manager VFIO driver and the nova-drm driver.
+
+.. toctree::
+ :titlesonly:
+
+ core/guidelines
+ core/todo
diff --git a/Documentation/gpu/rfc/gpusvm.rst b/Documentation/gpu/rfc/gpusvm.rst
new file mode 100644
index 000000000000..bcf66a8137a6
--- /dev/null
+++ b/Documentation/gpu/rfc/gpusvm.rst
@@ -0,0 +1,112 @@
+.. SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+
+===============
+GPU SVM Section
+===============
+
+Agreed upon design principles
+=============================
+
+* migrate_to_ram path
+ * Rely only on core MM concepts (migration PTEs, page references, and
+ page locking).
+ * No driver specific locks other than locks for hardware interaction in
+ this path. These are not required and generally a bad idea to
+ invent driver defined locks to seal core MM races.
+ * An example of a driver-specific lock causing issues occurred before
+ fixing do_swap_page to lock the faulting page. A driver-exclusive lock
+ in migrate_to_ram produced a stable livelock if enough threads read
+ the faulting page.
+ * Partial migration is supported (i.e., a subset of pages attempting to
+ migrate can actually migrate, with only the faulting page guaranteed
+ to migrate).
+ * Driver handles mixed migrations via retry loops rather than locking.
+* Eviction
+ * Eviction is defined as migrating data from the GPU back to the
+ CPU without a virtual address to free up GPU memory.
+ * Only looking at physical memory data structures and locks as opposed to
+ looking at virtual memory data structures and locks.
+ * No looking at mm/vma structs or relying on those being locked.
+ * The rationale for the above two points is that CPU virtual addresses
+ can change at any moment, while the physical pages remain stable.
+ * GPU page table invalidation, which requires a GPU virtual address, is
+ handled via the notifier that has access to the GPU virtual address.
+* GPU fault side
+ * mmap_read only used around core MM functions which require this lock
+ and should strive to take mmap_read lock only in GPU SVM layer.
+ * Big retry loop to handle all races with the mmu notifier under the gpu
+ pagetable locks/mmu notifier range lock/whatever we end up calling
+ those.
+ * Races (especially against concurrent eviction or migrate_to_ram)
+ should not be handled on the fault side by trying to hold locks;
+ rather, they should be handled using retry loops. One possible
+ exception is holding a BO's dma-resv lock during the initial migration
+ to VRAM, as this is a well-defined lock that can be taken underneath
+ the mmap_read lock.
+ * One possible issue with the above approach is if a driver has a strict
+ migration policy requiring GPU access to occur in GPU memory.
+ Concurrent CPU access could cause a livelock due to endless retries.
+ While no current user (Xe) of GPU SVM has such a policy, it is likely
+ to be added in the future. Ideally, this should be resolved on the
+ core-MM side rather than through a driver-side lock.
+* Physical memory to virtual backpointer
+ * This does not work, as no pointers from physical memory to virtual
+ memory should exist. mremap() is an example of the core MM updating
+ the virtual address without notifying the driver of address
+ change rather the driver only receiving the invalidation notifier.
+ * The physical memory backpointer (page->zone_device_data) should remain
+ stable from allocation to page free. Safely updating this against a
+ concurrent user would be very difficult unless the page is free.
+* GPU pagetable locking
+ * Notifier lock only protects range tree, pages valid state for a range
+ (rather than seqno due to wider notifiers), pagetable entries, and
+ mmu notifier seqno tracking, it is not a global lock to protect
+ against races.
+ * All races handled with big retry as mentioned above.
+
+Overview of baseline design
+===========================
+
+.. kernel-doc:: drivers/gpu/drm/drm_gpusvm.c
+ :doc: Overview
+
+.. kernel-doc:: drivers/gpu/drm/drm_gpusvm.c
+ :doc: Locking
+
+.. kernel-doc:: drivers/gpu/drm/drm_gpusvm.c
+ :doc: Migration
+
+.. kernel-doc:: drivers/gpu/drm/drm_gpusvm.c
+ :doc: Partial Unmapping of Ranges
+
+.. kernel-doc:: drivers/gpu/drm/drm_gpusvm.c
+ :doc: Examples
+
+Possible future design features
+===============================
+
+* Concurrent GPU faults
+ * CPU faults are concurrent so makes sense to have concurrent GPU
+ faults.
+ * Should be possible with fined grained locking in the driver GPU
+ fault handler.
+ * No expected GPU SVM changes required.
+* Ranges with mixed system and device pages
+ * Can be added if required to drm_gpusvm_get_pages fairly easily.
+* Multi-GPU support
+ * Work in progress and patches expected after initially landing on GPU
+ SVM.
+ * Ideally can be done with little to no changes to GPU SVM.
+* Drop ranges in favor of radix tree
+ * May be desirable for faster notifiers.
+* Compound device pages
+ * Nvidia, AMD, and Intel all have agreed expensive core MM functions in
+ migrate device layer are a performance bottleneck, having compound
+ device pages should help increase performance by reducing the number
+ of these expensive calls.
+* Higher order dma mapping for migration
+ * 4k dma mapping adversely affects migration performance on Intel
+ hardware, higher order (2M) dma mapping should help here.
+* Build common userptr implementation on top of GPU SVM
+* Driver side madvise implementation and migration policies
+* Pull in pending dma-mapping API changes from Leon / Nvidia when these land
diff --git a/Documentation/gpu/rfc/index.rst b/Documentation/gpu/rfc/index.rst
index 476719771eef..396e535377fb 100644
--- a/Documentation/gpu/rfc/index.rst
+++ b/Documentation/gpu/rfc/index.rst
@@ -18,6 +18,10 @@ host such documentation:
.. toctree::
+ gpusvm.rst
+
+.. toctree::
+
i915_gem_lmem.rst
.. toctree::
diff --git a/Documentation/scheduler/sched-rt-group.rst b/Documentation/scheduler/sched-rt-group.rst
index 80b05a3009ea..ab464335d320 100644
--- a/Documentation/scheduler/sched-rt-group.rst
+++ b/Documentation/scheduler/sched-rt-group.rst
@@ -102,6 +102,9 @@ The system wide settings are configured under the /proc virtual file system:
* sched_rt_period_us takes values from 1 to INT_MAX.
* sched_rt_runtime_us takes values from -1 to sched_rt_period_us.
* A run time of -1 specifies runtime == period, ie. no limit.
+ * sched_rt_runtime_us/sched_rt_period_us > 0.05 inorder to preserve
+ bandwidth for fair dl_server. For accurate value check average of
+ runtime/period in /sys/kernel/debug/sched/fair_server/cpuX/
2.2 Default behaviour
diff --git a/Documentation/userspace-api/landlock.rst b/Documentation/userspace-api/landlock.rst
index d639c61cb472..ad587f53fe41 100644
--- a/Documentation/userspace-api/landlock.rst
+++ b/Documentation/userspace-api/landlock.rst
@@ -8,7 +8,7 @@ Landlock: unprivileged access control
=====================================
:Author: Mickaël Salaün
-:Date: October 2024
+:Date: January 2025
The goal of Landlock is to enable restriction of ambient rights (e.g. global
filesystem or network access) for a set of processes. Because Landlock
@@ -329,11 +329,11 @@ non-sandboxed process, we can specify this restriction with
A sandboxed process can connect to a non-sandboxed process when its domain is
not scoped. If a process's domain is scoped, it can only connect to sockets
created by processes in the same scope.
-Moreover, If a process is scoped to send signal to a non-scoped process, it can
+Moreover, if a process is scoped to send signal to a non-scoped process, it can
only send signals to processes in the same scope.
A connected datagram socket behaves like a stream socket when its domain is
-scoped, meaning if the domain is scoped after the socket is connected , it can
+scoped, meaning if the domain is scoped after the socket is connected, it can
still :manpage:`send(2)` data just like a stream socket. However, in the same
scenario, a non-connected datagram socket cannot send data (with
:manpage:`sendto(2)`) outside its scope.
diff --git a/MAINTAINERS b/MAINTAINERS
index 5a742f1b8c2f..1554908c94cc 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2878,7 +2878,7 @@ F: drivers/pinctrl/nxp/
ARM/NXP S32G/S32R DWMAC ETHERNET DRIVER
M: Jan Petrous <jan.petrous@oss.nxp.com>
-L: NXP S32 Linux Team <s32@nxp.com>
+R: s32@nxp.com
S: Maintained
F: Documentation/devicetree/bindings/net/nxp,s32-dwmac.yaml
F: drivers/net/ethernet/stmicro/stmmac/dwmac-s32.c
@@ -5775,6 +5775,7 @@ X: drivers/clk/clkdev.c
COMMON INTERNET FILE SYSTEM CLIENT (CIFS and SMB3)
M: Steve French <sfrench@samba.org>
+M: Steve French <smfrench@gmail.com>
R: Paulo Alcantara <pc@manguebit.com> (DFS, global name space)
R: Ronnie Sahlberg <ronniesahlberg@gmail.com> (directory leases, sparse files)
R: Shyam Prasad N <sprasad@microsoft.com> (multichannel)
@@ -5856,7 +5857,6 @@ F: Documentation/security/snp-tdx-threat-model.rst
CONFIGFS
M: Joel Becker <jlbec@evilplan.org>
-M: Christoph Hellwig <hch@lst.de>
S: Supported
T: git git://git.infradead.org/users/hch/configfs.git
F: fs/configfs/
@@ -5927,6 +5927,17 @@ F: tools/testing/selftests/cgroup/test_cpuset.c
F: tools/testing/selftests/cgroup/test_cpuset_prs.sh
F: tools/testing/selftests/cgroup/test_cpuset_v1_base.sh
+CONTROL GROUP - DEVICE MEMORY CONTROLLER (DMEM)
+M: Maarten Lankhorst <dev@lankhorst.se>
+M: Maxime Ripard <mripard@kernel.org>
+M: Natalie Vock <natalie.vock@gmx.de>
+L: cgroups@vger.kernel.org
+L: dri-devel@lists.freedesktop.org
+S: Maintained
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
+F: include/linux/cgroup_dmem.h
+F: kernel/cgroup/dmem.c
+
CONTROL GROUP - MEMORY RESOURCE CONTROLLER (MEMCG)
M: Johannes Weiner <hannes@cmpxchg.org>
M: Michal Hocko <mhocko@kernel.org>
@@ -6879,7 +6890,6 @@ F: kernel/dma/map_benchmark.c
F: tools/testing/selftests/dma/
DMA MAPPING HELPERS
-M: Christoph Hellwig <hch@lst.de>
M: Marek Szyprowski <m.szyprowski@samsung.com>
R: Robin Murphy <robin.murphy@arm.com>
L: iommu@lists.linux.dev
@@ -7389,7 +7399,7 @@ F: include/uapi/drm/msm_drm.h
DRM DRIVER for Qualcomm display hardware
M: Rob Clark <robdclark@gmail.com>
M: Abhinav Kumar <quic_abhinavk@quicinc.com>
-M: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
+M: Dmitry Baryshkov <lumag@kernel.org>
R: Sean Paul <sean@poorly.run>
R: Marijn Suijten <marijn.suijten@somainline.org>
L: linux-arm-msm@vger.kernel.org
@@ -7401,6 +7411,7 @@ T: git https://gitlab.freedesktop.org/drm/msm.git
F: Documentation/devicetree/bindings/display/msm/
F: drivers/gpu/drm/ci/xfails/msm*
F: drivers/gpu/drm/msm/
+F: include/dt-bindings/clock/qcom,dsi-phy-28nm.h
F: include/uapi/drm/msm_drm.h
DRM DRIVER FOR NOVATEK NT35510 PANELS
@@ -7446,6 +7457,17 @@ T: git https://gitlab.freedesktop.org/drm/nouveau.git
F: drivers/gpu/drm/nouveau/
F: include/uapi/drm/nouveau_drm.h
+CORE DRIVER FOR NVIDIA GPUS [RUST]
+M: Danilo Krummrich <dakr@kernel.org>
+L: nouveau@lists.freedesktop.org
+S: Supported
+Q: https://patchwork.freedesktop.org/project/nouveau/
+B: https://gitlab.freedesktop.org/drm/nova/-/issues
+C: irc://irc.oftc.net/nouveau
+T: git https://gitlab.freedesktop.org/drm/nova.git nova-next
+F: Documentation/gpu/nova/
+F: drivers/gpu/nova-core/
+
DRM DRIVER FOR OLIMEX LCD-OLINUXINO PANELS
M: Stefan Mavrodiev <stefan@olimex.com>
S: Maintained
@@ -9458,14 +9480,11 @@ F: include/linux/fscrypt.h
F: include/uapi/linux/fscrypt.h
FSI SUBSYSTEM
-M: Jeremy Kerr <jk@ozlabs.org>
-M: Joel Stanley <joel@jms.id.au>
-R: Alistar Popple <alistair@popple.id.au>
-R: Eddie James <eajames@linux.ibm.com>
+M: Eddie James <eajames@linux.ibm.com>
+R: Ninad Palsule <ninad@linux.ibm.com>
L: linux-fsi@lists.ozlabs.org
S: Supported
Q: http://patchwork.ozlabs.org/project/linux-fsi/list/
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/joel/fsi.git
F: drivers/fsi/
F: include/linux/fsi*.h
F: include/trace/events/fsi*.h
@@ -12671,7 +12690,9 @@ F: tools/testing/selftests/
KERNEL SMB3 SERVER (KSMBD)
M: Namjae Jeon <linkinjeon@kernel.org>
+M: Namjae Jeon <linkinjeon@samba.org>
M: Steve French <sfrench@samba.org>
+M: Steve French <smfrench@gmail.com>
R: Sergey Senozhatsky <senozhatsky@chromium.org>
R: Tom Talpey <tom@talpey.com>
L: linux-cifs@vger.kernel.org
@@ -12888,7 +12909,7 @@ F: include/keys/trusted_dcp.h
F: security/keys/trusted-keys/trusted_dcp.c
KEYS-TRUSTED-TEE
-M: Sumit Garg <sumit.garg@linaro.org>
+M: Sumit Garg <sumit.garg@kernel.org>
L: linux-integrity@vger.kernel.org
L: keyrings@vger.kernel.org
S: Supported
@@ -15707,7 +15728,7 @@ F: include/uapi/linux/cciss*.h
MICROSOFT MANA RDMA DRIVER
M: Long Li <longli@microsoft.com>
-M: Ajay Sharma <sharmaajay@microsoft.com>
+M: Konstantin Taranov <kotaranov@microsoft.com>
L: linux-rdma@vger.kernel.org
S: Supported
F: drivers/infiniband/hw/mana/
@@ -17687,7 +17708,7 @@ F: Documentation/ABI/testing/sysfs-bus-optee-devices
F: drivers/tee/optee/
OP-TEE RANDOM NUMBER GENERATOR (RNG) DRIVER
-M: Sumit Garg <sumit.garg@linaro.org>
+M: Sumit Garg <sumit.garg@kernel.org>
L: op-tee@lists.trustedfirmware.org
S: Maintained
F: drivers/char/hw_random/optee-rng.c
@@ -20353,6 +20374,7 @@ RISC-V ARCHITECTURE
M: Paul Walmsley <paul.walmsley@sifive.com>
M: Palmer Dabbelt <palmer@dabbelt.com>
M: Albert Ou <aou@eecs.berkeley.edu>
+R: Alexandre Ghiti <alex@ghiti.fr>
L: linux-riscv@lists.infradead.org
S: Supported
Q: https://patchwork.kernel.org/project/linux-riscv/list/
@@ -21946,10 +21968,13 @@ F: sound/soc/uniphier/
SOCKET TIMESTAMPING
M: Willem de Bruijn <willemdebruijn.kernel@gmail.com>
+R: Jason Xing <kernelxing@tencent.com>
S: Maintained
F: Documentation/networking/timestamping.rst
F: include/linux/net_tstamp.h
F: include/uapi/linux/net_tstamp.h
+F: tools/testing/selftests/bpf/*/net_timestamping*
+F: tools/testing/selftests/net/*timestamp*
F: tools/testing/selftests/net/so_txtime.c
SOEKRIS NET48XX LED SUPPORT
@@ -23296,7 +23321,7 @@ F: include/media/i2c/tw9910.h
TEE SUBSYSTEM
M: Jens Wiklander <jens.wiklander@linaro.org>
-R: Sumit Garg <sumit.garg@linaro.org>
+R: Sumit Garg <sumit.garg@kernel.org>
L: op-tee@lists.trustedfirmware.org
S: Maintained
F: Documentation/ABI/testing/sysfs-class-tee
diff --git a/Makefile b/Makefile
index 30dab4c8b012..1d6a9ec8a2ac 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
VERSION = 6
PATCHLEVEL = 14
SUBLEVEL = 0
-EXTRAVERSION = -rc4
+EXTRAVERSION = -rc6
NAME = Baby Opossum Posse
# *DOCUMENTATION*
@@ -1123,6 +1123,11 @@ endif
KBUILD_USERCFLAGS += $(filter -m32 -m64 --target=%, $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS))
KBUILD_USERLDFLAGS += $(filter -m32 -m64 --target=%, $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS))
+# userspace programs are linked via the compiler, use the correct linker
+ifeq ($(CONFIG_CC_IS_CLANG)$(CONFIG_LD_IS_LLD),yy)
+KBUILD_USERLDFLAGS += --ld-path=$(LD)
+endif
+
# make the checker run with the right architecture
CHECKFLAGS += --arch=$(ARCH)
diff --git a/arch/arm/mm/fault-armv.c b/arch/arm/mm/fault-armv.c
index 2bec87c3327d..39fd5df73317 100644
--- a/arch/arm/mm/fault-armv.c
+++ b/arch/arm/mm/fault-armv.c
@@ -62,7 +62,7 @@ static int do_adjust_pte(struct vm_area_struct *vma, unsigned long address,
}
static int adjust_pte(struct vm_area_struct *vma, unsigned long address,
- unsigned long pfn, struct vm_fault *vmf)
+ unsigned long pfn, bool need_lock)
{
spinlock_t *ptl;
pgd_t *pgd;
@@ -99,12 +99,11 @@ again:
if (!pte)
return 0;
- /*
- * If we are using split PTE locks, then we need to take the page
- * lock here. Otherwise we are using shared mm->page_table_lock
- * which is already locked, thus cannot take it.
- */
- if (ptl != vmf->ptl) {
+ if (need_lock) {
+ /*
+ * Use nested version here to indicate that we are already
+ * holding one similar spinlock.
+ */
spin_lock_nested(ptl, SINGLE_DEPTH_NESTING);
if (unlikely(!pmd_same(pmdval, pmdp_get_lockless(pmd)))) {
pte_unmap_unlock(pte, ptl);
@@ -114,7 +113,7 @@ again:
ret = do_adjust_pte(vma, address, pfn, pte);
- if (ptl != vmf->ptl)
+ if (need_lock)
spin_unlock(ptl);
pte_unmap(pte);
@@ -123,9 +122,10 @@ again:
static void
make_coherent(struct address_space *mapping, struct vm_area_struct *vma,
- unsigned long addr, pte_t *ptep, unsigned long pfn,
- struct vm_fault *vmf)
+ unsigned long addr, pte_t *ptep, unsigned long pfn)
{
+ const unsigned long pmd_start_addr = ALIGN_DOWN(addr, PMD_SIZE);
+ const unsigned long pmd_end_addr = pmd_start_addr + PMD_SIZE;
struct mm_struct *mm = vma->vm_mm;
struct vm_area_struct *mpnt;
unsigned long offset;
@@ -142,6 +142,14 @@ make_coherent(struct address_space *mapping, struct vm_area_struct *vma,
flush_dcache_mmap_lock(mapping);
vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) {
/*
+ * If we are using split PTE locks, then we need to take the pte
+ * lock. Otherwise we are using shared mm->page_table_lock which
+ * is already locked, thus cannot take it.
+ */
+ bool need_lock = IS_ENABLED(CONFIG_SPLIT_PTE_PTLOCKS);
+ unsigned long mpnt_addr;
+
+ /*
* If this VMA is not in our MM, we can ignore it.
* Note that we intentionally mask out the VMA
* that we are fixing up.
@@ -151,7 +159,12 @@ make_coherent(struct address_space *mapping, struct vm_area_struct *vma,
if (!(mpnt->vm_flags & VM_MAYSHARE))
continue;
offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
- aliases += adjust_pte(mpnt, mpnt->vm_start + offset, pfn, vmf);
+ mpnt_addr = mpnt->vm_start + offset;
+
+ /* Avoid deadlocks by not grabbing the same PTE lock again. */
+ if (mpnt_addr >= pmd_start_addr && mpnt_addr < pmd_end_addr)
+ need_lock = false;
+ aliases += adjust_pte(mpnt, mpnt_addr, pfn, need_lock);
}
flush_dcache_mmap_unlock(mapping);
if (aliases)
@@ -194,7 +207,7 @@ void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma,
__flush_dcache_folio(mapping, folio);
if (mapping) {
if (cache_is_vivt())
- make_coherent(mapping, vma, addr, ptep, pfn, vmf);
+ make_coherent(mapping, vma, addr, ptep, pfn);
else if (vma->vm_flags & VM_EXEC)
__flush_icache_all();
}
diff --git a/arch/arm64/include/asm/el2_setup.h b/arch/arm64/include/asm/el2_setup.h
index 25e162651750..555c613fd232 100644
--- a/arch/arm64/include/asm/el2_setup.h
+++ b/arch/arm64/include/asm/el2_setup.h
@@ -16,6 +16,32 @@
#include <asm/sysreg.h>
#include <linux/irqchip/arm-gic-v3.h>
+.macro init_el2_hcr val
+ mov_q x0, \val
+
+ /*
+ * Compliant CPUs advertise their VHE-onlyness with
+ * ID_AA64MMFR4_EL1.E2H0 < 0. On such CPUs HCR_EL2.E2H is RES1, but it
+ * can reset into an UNKNOWN state and might not read as 1 until it has
+ * been initialized explicitly.
+ *
+ * Fruity CPUs seem to have HCR_EL2.E2H set to RAO/WI, but
+ * don't advertise it (they predate this relaxation).
+ *
+ * Initalize HCR_EL2.E2H so that later code can rely upon HCR_EL2.E2H
+ * indicating whether the CPU is running in E2H mode.
+ */
+ mrs_s x1, SYS_ID_AA64MMFR4_EL1
+ sbfx x1, x1, #ID_AA64MMFR4_EL1_E2H0_SHIFT, #ID_AA64MMFR4_EL1_E2H0_WIDTH
+ cmp x1, #0
+ b.ge .LnVHE_\@
+
+ orr x0, x0, #HCR_E2H
+.LnVHE_\@:
+ msr hcr_el2, x0
+ isb
+.endm
+
.macro __init_el2_sctlr
mov_q x0, INIT_SCTLR_EL2_MMU_OFF
msr sctlr_el2, x0
@@ -244,11 +270,6 @@
.Lskip_gcs_\@:
.endm
-.macro __init_el2_nvhe_prepare_eret
- mov x0, #INIT_PSTATE_EL1
- msr spsr_el2, x0
-.endm
-
.macro __init_el2_mpam
/* Memory Partitioning And Monitoring: disable EL2 traps */
mrs x1, id_aa64pfr0_el1
diff --git a/arch/arm64/include/asm/hugetlb.h b/arch/arm64/include/asm/hugetlb.h
index c6dff3e69539..07fbf5bf85a7 100644
--- a/arch/arm64/include/asm/hugetlb.h
+++ b/arch/arm64/include/asm/hugetlb.h
@@ -42,8 +42,8 @@ extern int huge_ptep_set_access_flags(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep,
pte_t pte, int dirty);
#define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR
-extern pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
- unsigned long addr, pte_t *ptep);
+extern pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, unsigned long sz);
#define __HAVE_ARCH_HUGE_PTEP_SET_WRPROTECT
extern void huge_ptep_set_wrprotect(struct mm_struct *mm,
unsigned long addr, pte_t *ptep);
@@ -76,12 +76,22 @@ static inline void flush_hugetlb_tlb_range(struct vm_area_struct *vma,
{
unsigned long stride = huge_page_size(hstate_vma(vma));
- if (stride == PMD_SIZE)
- __flush_tlb_range(vma, start, end, stride, false, 2);
- else if (stride == PUD_SIZE)
- __flush_tlb_range(vma, start, end, stride, false, 1);
- else
- __flush_tlb_range(vma, start, end, PAGE_SIZE, false, 0);
+ switch (stride) {
+#ifndef __PAGETABLE_PMD_FOLDED
+ case PUD_SIZE:
+ __flush_tlb_range(vma, start, end, PUD_SIZE, false, 1);
+ break;
+#endif
+ case CONT_PMD_SIZE:
+ case PMD_SIZE:
+ __flush_tlb_range(vma, start, end, PMD_SIZE, false, 2);
+ break;
+ case CONT_PTE_SIZE:
+ __flush_tlb_range(vma, start, end, PAGE_SIZE, false, 3);
+ break;
+ default:
+ __flush_tlb_range(vma, start, end, PAGE_SIZE, false, TLBI_TTL_UNKNOWN);
+ }
}
#endif /* __ASM_HUGETLB_H */
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
index 8d94a6c0ed5c..c2417a424b98 100644
--- a/arch/arm64/include/asm/kvm_arm.h
+++ b/arch/arm64/include/asm/kvm_arm.h
@@ -119,7 +119,7 @@
#define TCR_EL2_IRGN0_MASK TCR_IRGN0_MASK
#define TCR_EL2_T0SZ_MASK 0x3f
#define TCR_EL2_MASK (TCR_EL2_TG0_MASK | TCR_EL2_SH0_MASK | \
- TCR_EL2_ORGN0_MASK | TCR_EL2_IRGN0_MASK | TCR_EL2_T0SZ_MASK)
+ TCR_EL2_ORGN0_MASK | TCR_EL2_IRGN0_MASK)
/* VTCR_EL2 Registers bits */
#define VTCR_EL2_DS TCR_EL2_DS
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 3a7ec98ef123..d919557af5e5 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -1259,7 +1259,7 @@ int kvm_arm_pvtime_has_attr(struct kvm_vcpu *vcpu,
extern unsigned int __ro_after_init kvm_arm_vmid_bits;
int __init kvm_arm_vmid_alloc_init(void);
void __init kvm_arm_vmid_alloc_free(void);
-bool kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid);
+void kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid);
void kvm_arm_vmid_clear_active(void);
static inline void kvm_arm_pvtime_vcpu_init(struct kvm_vcpu_arch *vcpu_arch)
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index 5ab1970ee543..2ce73525de2c 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -298,25 +298,8 @@ SYM_INNER_LABEL(init_el2, SYM_L_LOCAL)
msr sctlr_el2, x0
isb
0:
- mov_q x0, HCR_HOST_NVHE_FLAGS
-
- /*
- * Compliant CPUs advertise their VHE-onlyness with
- * ID_AA64MMFR4_EL1.E2H0 < 0. HCR_EL2.E2H can be
- * RES1 in that case. Publish the E2H bit early so that
- * it can be picked up by the init_el2_state macro.
- *
- * Fruity CPUs seem to have HCR_EL2.E2H set to RAO/WI, but
- * don't advertise it (they predate this relaxation).
- */
- mrs_s x1, SYS_ID_AA64MMFR4_EL1
- tbz x1, #(ID_AA64MMFR4_EL1_E2H0_SHIFT + ID_AA64MMFR4_EL1_E2H0_WIDTH - 1), 1f
-
- orr x0, x0, #HCR_E2H
-1:
- msr hcr_el2, x0
- isb
+ init_el2_hcr HCR_HOST_NVHE_FLAGS
init_el2_state
/* Hypervisor stub */
@@ -339,7 +322,8 @@ SYM_INNER_LABEL(init_el2, SYM_L_LOCAL)
msr sctlr_el1, x1
mov x2, xzr
3:
- __init_el2_nvhe_prepare_eret
+ mov x0, #INIT_PSTATE_EL1
+ msr spsr_el2, x0
mov w0, #BOOT_CPU_MODE_EL2
orr x0, x0, x2
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index b8e55a441282..0160b4924351 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -560,6 +560,16 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
last_ran = this_cpu_ptr(mmu->last_vcpu_ran);
/*
+ * Ensure a VMID is allocated for the MMU before programming VTTBR_EL2,
+ * which happens eagerly in VHE.
+ *
+ * Also, the VMID allocator only preserves VMIDs that are active at the
+ * time of rollover, so KVM might need to grab a new VMID for the MMU if
+ * this is called from kvm_sched_in().
+ */
+ kvm_arm_vmid_update(&mmu->vmid);
+
+ /*
* We guarantee that both TLBs and I-cache are private to each
* vcpu. If detecting that a vcpu from the same VM has
* previously run on the same physical CPU, call into the
@@ -1138,18 +1148,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
*/
preempt_disable();
- /*
- * The VMID allocator only tracks active VMIDs per
- * physical CPU, and therefore the VMID allocated may not be
- * preserved on VMID roll-over if the task was preempted,
- * making a thread's VMID inactive. So we need to call
- * kvm_arm_vmid_update() in non-premptible context.
- */
- if (kvm_arm_vmid_update(&vcpu->arch.hw_mmu->vmid) &&
- has_vhe())
- __load_stage2(vcpu->arch.hw_mmu,
- vcpu->arch.hw_mmu->arch);
-
kvm_pmu_flush_hwstate(vcpu);
local_irq_disable();
@@ -1980,7 +1978,7 @@ static int kvm_init_vector_slots(void)
static void __init cpu_prepare_hyp_mode(int cpu, u32 hyp_va_bits)
{
struct kvm_nvhe_init_params *params = per_cpu_ptr_nvhe_sym(kvm_init_params, cpu);
- unsigned long tcr, ips;
+ unsigned long tcr;
/*
* Calculate the raw per-cpu offset without a translation from the
@@ -1994,19 +1992,18 @@ static void __init cpu_prepare_hyp_mode(int cpu, u32 hyp_va_bits)
params->mair_el2 = read_sysreg(mair_el1);
tcr = read_sysreg(tcr_el1);
- ips = FIELD_GET(TCR_IPS_MASK, tcr);
if (cpus_have_final_cap(ARM64_KVM_HVHE)) {
+ tcr &= ~(TCR_HD | TCR_HA | TCR_A1 | TCR_T0SZ_MASK);
tcr |= TCR_EPD1_MASK;
} else {
+ unsigned long ips = FIELD_GET(TCR_IPS_MASK, tcr);
+
tcr &= TCR_EL2_MASK;
- tcr |= TCR_EL2_RES1;
+ tcr |= TCR_EL2_RES1 | FIELD_PREP(TCR_EL2_PS_MASK, ips);
+ if (lpa2_is_enabled())
+ tcr |= TCR_EL2_DS;
}
- tcr &= ~TCR_T0SZ_MASK;
tcr |= TCR_T0SZ(hyp_va_bits);
- tcr &= ~TCR_EL2_PS_MASK;
- tcr |= FIELD_PREP(TCR_EL2_PS_MASK, ips);
- if (lpa2_is_enabled())
- tcr |= TCR_EL2_DS;
params->tcr_el2 = tcr;
params->pgd_pa = kvm_mmu_get_httbr();
diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-init.S b/arch/arm64/kvm/hyp/nvhe/hyp-init.S
index fc1866226067..f8af11189572 100644
--- a/arch/arm64/kvm/hyp/nvhe/hyp-init.S
+++ b/arch/arm64/kvm/hyp/nvhe/hyp-init.S
@@ -73,8 +73,12 @@ __do_hyp_init:
eret
SYM_CODE_END(__kvm_hyp_init)
+/*
+ * Initialize EL2 CPU state to sane values.
+ *
+ * HCR_EL2.E2H must have been initialized already.
+ */
SYM_CODE_START_LOCAL(__kvm_init_el2_state)
- /* Initialize EL2 CPU state to sane values. */
init_el2_state // Clobbers x0..x2
finalise_el2_state
ret
@@ -206,9 +210,9 @@ SYM_CODE_START_LOCAL(__kvm_hyp_init_cpu)
2: msr SPsel, #1 // We want to use SP_EL{1,2}
- bl __kvm_init_el2_state
+ init_el2_hcr 0
- __init_el2_nvhe_prepare_eret
+ bl __kvm_init_el2_state
/* Enable MMU, set vectors and stack. */
mov x0, x28
diff --git a/arch/arm64/kvm/hyp/nvhe/psci-relay.c b/arch/arm64/kvm/hyp/nvhe/psci-relay.c
index 9c2ce1e0e99a..c3e196fb8b18 100644
--- a/arch/arm64/kvm/hyp/nvhe/psci-relay.c
+++ b/arch/arm64/kvm/hyp/nvhe/psci-relay.c
@@ -218,6 +218,9 @@ asmlinkage void __noreturn __kvm_host_psci_cpu_entry(bool is_cpu_on)
if (is_cpu_on)
release_boot_args(boot_args);
+ write_sysreg_el1(INIT_SCTLR_EL1_MMU_OFF, SYS_SCTLR);
+ write_sysreg(INIT_PSTATE_EL1, SPSR_EL2);
+
__host_enter(host_ctxt);
}
diff --git a/arch/arm64/kvm/vmid.c b/arch/arm64/kvm/vmid.c
index 806223b7022a..7fe8ba1a2851 100644
--- a/arch/arm64/kvm/vmid.c
+++ b/arch/arm64/kvm/vmid.c
@@ -135,11 +135,10 @@ void kvm_arm_vmid_clear_active(void)
atomic64_set(this_cpu_ptr(&active_vmids), VMID_ACTIVE_INVALID);
}
-bool kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid)
+void kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid)
{
unsigned long flags;
u64 vmid, old_active_vmid;
- bool updated = false;
vmid = atomic64_read(&kvm_vmid->id);
@@ -157,21 +156,17 @@ bool kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid)
if (old_active_vmid != 0 && vmid_gen_match(vmid) &&
0 != atomic64_cmpxchg_relaxed(this_cpu_ptr(&active_vmids),
old_active_vmid, vmid))
- return false;
+ return;
raw_spin_lock_irqsave(&cpu_vmid_lock, flags);
/* Check that our VMID belongs to the current generation. */
vmid = atomic64_read(&kvm_vmid->id);
- if (!vmid_gen_match(vmid)) {
+ if (!vmid_gen_match(vmid))
vmid = new_vmid(kvm_vmid);
- updated = true;
- }
atomic64_set(this_cpu_ptr(&active_vmids), vmid);
raw_spin_unlock_irqrestore(&cpu_vmid_lock, flags);
-
- return updated;
}
/*
diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c
index 98a2a0e64e25..b3a7fafe8892 100644
--- a/arch/arm64/mm/hugetlbpage.c
+++ b/arch/arm64/mm/hugetlbpage.c
@@ -100,20 +100,11 @@ static int find_num_contig(struct mm_struct *mm, unsigned long addr,
static inline int num_contig_ptes(unsigned long size, size_t *pgsize)
{
- int contig_ptes = 0;
+ int contig_ptes = 1;
*pgsize = size;
switch (size) {
-#ifndef __PAGETABLE_PMD_FOLDED
- case PUD_SIZE:
- if (pud_sect_supported())
- contig_ptes = 1;
- break;
-#endif
- case PMD_SIZE:
- contig_ptes = 1;
- break;
case CONT_PMD_SIZE:
*pgsize = PMD_SIZE;
contig_ptes = CONT_PMDS;
@@ -122,6 +113,8 @@ static inline int num_contig_ptes(unsigned long size, size_t *pgsize)
*pgsize = PAGE_SIZE;
contig_ptes = CONT_PTES;
break;
+ default:
+ WARN_ON(!__hugetlb_valid_size(size));
}
return contig_ptes;
@@ -163,24 +156,23 @@ static pte_t get_clear_contig(struct mm_struct *mm,
unsigned long pgsize,
unsigned long ncontig)
{
- pte_t orig_pte = __ptep_get(ptep);
- unsigned long i;
-
- for (i = 0; i < ncontig; i++, addr += pgsize, ptep++) {
- pte_t pte = __ptep_get_and_clear(mm, addr, ptep);
-
- /*
- * If HW_AFDBM is enabled, then the HW could turn on
- * the dirty or accessed bit for any page in the set,
- * so check them all.
- */
- if (pte_dirty(pte))
- orig_pte = pte_mkdirty(orig_pte);
-
- if (pte_young(pte))
- orig_pte = pte_mkyoung(orig_pte);
+ pte_t pte, tmp_pte;
+ bool present;
+
+ pte = __ptep_get_and_clear(mm, addr, ptep);
+ present = pte_present(pte);
+ while (--ncontig) {
+ ptep++;
+ addr += pgsize;
+ tmp_pte = __ptep_get_and_clear(mm, addr, ptep);
+ if (present) {
+ if (pte_dirty(tmp_pte))
+ pte = pte_mkdirty(pte);
+ if (pte_young(tmp_pte))
+ pte = pte_mkyoung(pte);
+ }
}
- return orig_pte;
+ return pte;
}
static pte_t get_clear_contig_flush(struct mm_struct *mm,
@@ -396,18 +388,13 @@ void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
__pte_clear(mm, addr, ptep);
}
-pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
- unsigned long addr, pte_t *ptep)
+pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, unsigned long sz)
{
int ncontig;
size_t pgsize;
- pte_t orig_pte = __ptep_get(ptep);
-
- if (!pte_cont(orig_pte))
- return __ptep_get_and_clear(mm, addr, ptep);
-
- ncontig = find_num_contig(mm, addr, ptep, &pgsize);
+ ncontig = num_contig_ptes(sz, &pgsize);
return get_clear_contig(mm, addr, ptep, pgsize, ncontig);
}
@@ -549,6 +536,8 @@ bool __init arch_hugetlb_valid_size(unsigned long size)
pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
{
+ unsigned long psize = huge_page_size(hstate_vma(vma));
+
if (alternative_has_cap_unlikely(ARM64_WORKAROUND_2645198)) {
/*
* Break-before-make (BBM) is required for all user space mappings
@@ -558,7 +547,7 @@ pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr
if (pte_user_exec(__ptep_get(ptep)))
return huge_ptep_clear_flush(vma, addr, ptep);
}
- return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
+ return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep, psize);
}
void huge_ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep,
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 9c0b8d9558fc..ccdef53872a0 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -279,12 +279,7 @@ void __init arm64_memblock_init(void)
if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
extern u16 memstart_offset_seed;
-
- /*
- * Use the sanitised version of id_aa64mmfr0_el1 so that linear
- * map randomization can be enabled by shrinking the IPA space.
- */
- u64 mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
+ u64 mmfr0 = read_cpuid(ID_AA64MMFR0_EL1);
int parange = cpuid_feature_extract_unsigned_field(
mmfr0, ID_AA64MMFR0_EL1_PARANGE_SHIFT);
s64 range = linear_region_size -
diff --git a/arch/loongarch/include/asm/hugetlb.h b/arch/loongarch/include/asm/hugetlb.h
index c8e4057734d0..4dc4b3e04225 100644
--- a/arch/loongarch/include/asm/hugetlb.h
+++ b/arch/loongarch/include/asm/hugetlb.h
@@ -36,7 +36,8 @@ static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
#define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR
static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
- unsigned long addr, pte_t *ptep)
+ unsigned long addr, pte_t *ptep,
+ unsigned long sz)
{
pte_t clear;
pte_t pte = ptep_get(ptep);
@@ -51,8 +52,9 @@ static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep)
{
pte_t pte;
+ unsigned long sz = huge_page_size(hstate_vma(vma));
- pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
+ pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep, sz);
flush_tlb_page(vma, addr);
return pte;
}
diff --git a/arch/loongarch/kernel/acpi.c b/arch/loongarch/kernel/acpi.c
index 382a09a7152c..1120ac2824f6 100644
--- a/arch/loongarch/kernel/acpi.c
+++ b/arch/loongarch/kernel/acpi.c
@@ -249,18 +249,6 @@ static __init int setup_node(int pxm)
return acpi_map_pxm_to_node(pxm);
}
-/*
- * Callback for SLIT parsing. pxm_to_node() returns NUMA_NO_NODE for
- * I/O localities since SRAT does not list them. I/O localities are
- * not supported at this point.
- */
-unsigned int numa_distance_cnt;
-
-static inline unsigned int get_numa_distances_cnt(struct acpi_table_slit *slit)
-{
- return slit->locality_count;
-}
-
void __init numa_set_distance(int from, int to, int distance)
{
if ((u8)distance != distance || (from == to && distance != LOCAL_DISTANCE)) {
diff --git a/arch/loongarch/kernel/machine_kexec.c b/arch/loongarch/kernel/machine_kexec.c
index 8ae641dc53bb..f9381800e291 100644
--- a/arch/loongarch/kernel/machine_kexec.c
+++ b/arch/loongarch/kernel/machine_kexec.c
@@ -126,14 +126,14 @@ void kexec_reboot(void)
/* All secondary cpus go to kexec_smp_wait */
if (smp_processor_id() > 0) {
relocated_kexec_smp_wait(NULL);
- unreachable();
+ BUG();
}
#endif
do_kexec = (void *)reboot_code_buffer;
do_kexec(efi_boot, cmdline_ptr, systable_ptr, start_addr, first_ind_entry);
- unreachable();
+ BUG();
}
diff --git a/arch/loongarch/kernel/setup.c b/arch/loongarch/kernel/setup.c
index edcfdfcad7d2..90cb3ca96f08 100644
--- a/arch/loongarch/kernel/setup.c
+++ b/arch/loongarch/kernel/setup.c
@@ -387,6 +387,9 @@ static void __init check_kernel_sections_mem(void)
*/
static void __init arch_mem_init(char **cmdline_p)
{
+ /* Recalculate max_low_pfn for "mem=xxx" */
+ max_pfn = max_low_pfn = PHYS_PFN(memblock_end_of_DRAM());
+
if (usermem)
pr_info("User-defined physical RAM map overwrite\n");
diff --git a/arch/loongarch/kernel/smp.c b/arch/loongarch/kernel/smp.c
index fbf747447f13..4b24589c0b56 100644
--- a/arch/loongarch/kernel/smp.c
+++ b/arch/loongarch/kernel/smp.c
@@ -19,6 +19,7 @@
#include <linux/smp.h>
#include <linux/threads.h>
#include <linux/export.h>
+#include <linux/suspend.h>
#include <linux/syscore_ops.h>
#include <linux/time.h>
#include <linux/tracepoint.h>
@@ -423,7 +424,7 @@ void loongson_cpu_die(unsigned int cpu)
mb();
}
-void __noreturn arch_cpu_idle_dead(void)
+static void __noreturn idle_play_dead(void)
{
register uint64_t addr;
register void (*init_fn)(void);
@@ -447,6 +448,50 @@ void __noreturn arch_cpu_idle_dead(void)
BUG();
}
+#ifdef CONFIG_HIBERNATION
+static void __noreturn poll_play_dead(void)
+{
+ register uint64_t addr;
+ register void (*init_fn)(void);
+
+ idle_task_exit();
+ __this_cpu_write(cpu_state, CPU_DEAD);
+
+ __smp_mb();
+ do {
+ __asm__ __volatile__("nop\n\t");
+ addr = iocsr_read64(LOONGARCH_IOCSR_MBUF0);
+ } while (addr == 0);
+
+ init_fn = (void *)TO_CACHE(addr);
+ iocsr_write32(0xffffffff, LOONGARCH_IOCSR_IPI_CLEAR);
+
+ init_fn();
+ BUG();
+}
+#endif
+
+static void (*play_dead)(void) = idle_play_dead;
+
+void __noreturn arch_cpu_idle_dead(void)
+{
+ play_dead();
+ BUG(); /* play_dead() doesn't return */
+}
+
+#ifdef CONFIG_HIBERNATION
+int hibernate_resume_nonboot_cpu_disable(void)
+{
+ int ret;
+
+ play_dead = poll_play_dead;
+ ret = suspend_disable_secondary_cpus();
+ play_dead = idle_play_dead;
+
+ return ret;
+}
+#endif
+
#endif
/*
diff --git a/arch/loongarch/kvm/exit.c b/arch/loongarch/kvm/exit.c
index c1e8ec5b941b..ea321403644a 100644
--- a/arch/loongarch/kvm/exit.c
+++ b/arch/loongarch/kvm/exit.c
@@ -669,6 +669,12 @@ static int kvm_handle_rdwr_fault(struct kvm_vcpu *vcpu, bool write)
struct kvm_run *run = vcpu->run;
unsigned long badv = vcpu->arch.badv;
+ /* Inject ADE exception if exceed max GPA size */
+ if (unlikely(badv >= vcpu->kvm->arch.gpa_size)) {
+ kvm_queue_exception(vcpu, EXCCODE_ADE, EXSUBCODE_ADEM);
+ return RESUME_GUEST;
+ }
+
ret = kvm_handle_mm_fault(vcpu, badv, write);
if (ret) {
/* Treat as MMIO */
diff --git a/arch/loongarch/kvm/main.c b/arch/loongarch/kvm/main.c
index f6d3242b9234..b6864d6e5ec8 100644
--- a/arch/loongarch/kvm/main.c
+++ b/arch/loongarch/kvm/main.c
@@ -317,6 +317,13 @@ int kvm_arch_enable_virtualization_cpu(void)
kvm_debug("GCFG:%lx GSTAT:%lx GINTC:%lx GTLBC:%lx",
read_csr_gcfg(), read_csr_gstat(), read_csr_gintc(), read_csr_gtlbc());
+ /*
+ * HW Guest CSR registers are lost after CPU suspend and resume.
+ * Clear last_vcpu so that Guest CSR registers forced to reload
+ * from vCPU SW state.
+ */
+ this_cpu_ptr(vmcs)->last_vcpu = NULL;
+
return 0;
}
diff --git a/arch/loongarch/kvm/vcpu.c b/arch/loongarch/kvm/vcpu.c
index 20f941af3e9e..9e1a9b4aa4c6 100644
--- a/arch/loongarch/kvm/vcpu.c
+++ b/arch/loongarch/kvm/vcpu.c
@@ -311,7 +311,7 @@ static int kvm_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
{
int ret = RESUME_GUEST;
unsigned long estat = vcpu->arch.host_estat;
- u32 intr = estat & 0x1fff; /* Ignore NMI */
+ u32 intr = estat & CSR_ESTAT_IS;
u32 ecode = (estat & CSR_ESTAT_EXC) >> CSR_ESTAT_EXC_SHIFT;
vcpu->mode = OUTSIDE_GUEST_MODE;
diff --git a/arch/loongarch/kvm/vm.c b/arch/loongarch/kvm/vm.c
index b8b3e1972d6e..edccfc8c9cd8 100644
--- a/arch/loongarch/kvm/vm.c
+++ b/arch/loongarch/kvm/vm.c
@@ -48,7 +48,11 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
if (kvm_pvtime_supported())
kvm->arch.pv_features |= BIT(KVM_FEATURE_STEAL_TIME);
- kvm->arch.gpa_size = BIT(cpu_vabits - 1);
+ /*
+ * cpu_vabits means user address space only (a half of total).
+ * GPA size of VM is the same with the size of user address space.
+ */
+ kvm->arch.gpa_size = BIT(cpu_vabits);
kvm->arch.root_level = CONFIG_PGTABLE_LEVELS - 1;
kvm->arch.invalid_ptes[0] = 0;
kvm->arch.invalid_ptes[1] = (unsigned long)invalid_pte_table;
diff --git a/arch/loongarch/mm/mmap.c b/arch/loongarch/mm/mmap.c
index 914e82ff3f65..1df9e99582cc 100644
--- a/arch/loongarch/mm/mmap.c
+++ b/arch/loongarch/mm/mmap.c
@@ -3,6 +3,7 @@
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
#include <linux/export.h>
+#include <linux/hugetlb.h>
#include <linux/io.h>
#include <linux/kfence.h>
#include <linux/memblock.h>
@@ -63,8 +64,11 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
}
info.length = len;
- info.align_mask = do_color_align ? (PAGE_MASK & SHM_ALIGN_MASK) : 0;
info.align_offset = pgoff << PAGE_SHIFT;
+ if (filp && is_file_hugepages(filp))
+ info.align_mask = huge_page_mask_align(filp);
+ else
+ info.align_mask = do_color_align ? (PAGE_MASK & SHM_ALIGN_MASK) : 0;
if (dir == DOWN) {
info.flags = VM_UNMAPPED_AREA_TOPDOWN;
diff --git a/arch/m68k/include/asm/sun3_pgalloc.h b/arch/m68k/include/asm/sun3_pgalloc.h
index f1ae4ed890db..80afc3a18724 100644
--- a/arch/m68k/include/asm/sun3_pgalloc.h
+++ b/arch/m68k/include/asm/sun3_pgalloc.h
@@ -44,8 +44,10 @@ static inline pgd_t * pgd_alloc(struct mm_struct *mm)
pgd_t *new_pgd;
new_pgd = __pgd_alloc(mm, 0);
- memcpy(new_pgd, swapper_pg_dir, PAGE_SIZE);
- memset(new_pgd, 0, (PAGE_OFFSET >> PGDIR_SHIFT));
+ if (likely(new_pgd != NULL)) {
+ memcpy(new_pgd, swapper_pg_dir, PAGE_SIZE);
+ memset(new_pgd, 0, (PAGE_OFFSET >> PGDIR_SHIFT));
+ }
return new_pgd;
}
diff --git a/arch/mips/boot/tools/relocs.c b/arch/mips/boot/tools/relocs.c
index a88d66c46d7f..9863e1d5c62e 100644
--- a/arch/mips/boot/tools/relocs.c
+++ b/arch/mips/boot/tools/relocs.c
@@ -468,6 +468,8 @@ static void walk_relocs(int (*process)(struct section *sec, Elf_Rel *rel,
Elf_Sym *sym, const char *symname))
{
int i;
+ struct section *extab_sec = sec_lookup("__ex_table");
+ int extab_index = extab_sec ? extab_sec - secs : -1;
/* Walk through the relocations */
for (i = 0; i < ehdr.e_shnum; i++) {
@@ -480,6 +482,9 @@ static void walk_relocs(int (*process)(struct section *sec, Elf_Rel *rel,
if (sec->shdr.sh_type != SHT_REL_TYPE)
continue;
+ if (sec->shdr.sh_info == extab_index)
+ continue;
+
sec_symtab = sec->link;
sec_applies = &secs[sec->shdr.sh_info];
if (!(sec_applies->shdr.sh_flags & SHF_ALLOC))
diff --git a/arch/mips/include/asm/hugetlb.h b/arch/mips/include/asm/hugetlb.h
index d0a86ce83de9..fbc71ddcf0f6 100644
--- a/arch/mips/include/asm/hugetlb.h
+++ b/arch/mips/include/asm/hugetlb.h
@@ -27,7 +27,8 @@ static inline int prepare_hugepage_range(struct file *file,
#define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR
static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
- unsigned long addr, pte_t *ptep)
+ unsigned long addr, pte_t *ptep,
+ unsigned long sz)
{
pte_t clear;
pte_t pte = *ptep;
@@ -42,13 +43,14 @@ static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep)
{
pte_t pte;
+ unsigned long sz = huge_page_size(hstate_vma(vma));
/*
* clear the huge pte entry firstly, so that the other smp threads will
* not get old pte entry after finishing flush_tlb_page and before
* setting new huge pte entry
*/
- pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
+ pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep, sz);
flush_tlb_page(vma, addr);
return pte;
}
diff --git a/arch/parisc/include/asm/hugetlb.h b/arch/parisc/include/asm/hugetlb.h
index 5b3a5429f71b..21e9ace17739 100644
--- a/arch/parisc/include/asm/hugetlb.h
+++ b/arch/parisc/include/asm/hugetlb.h
@@ -10,7 +10,7 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
#define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR
pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep);
+ pte_t *ptep, unsigned long sz);
#define __HAVE_ARCH_HUGE_PTEP_CLEAR_FLUSH
static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
diff --git a/arch/parisc/mm/hugetlbpage.c b/arch/parisc/mm/hugetlbpage.c
index e9d18cf25b79..a94fe546d434 100644
--- a/arch/parisc/mm/hugetlbpage.c
+++ b/arch/parisc/mm/hugetlbpage.c
@@ -126,7 +126,7 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep)
+ pte_t *ptep, unsigned long sz)
{
pte_t entry;
diff --git a/arch/powerpc/include/asm/hugetlb.h b/arch/powerpc/include/asm/hugetlb.h
index dad2e7980f24..86326587e58d 100644
--- a/arch/powerpc/include/asm/hugetlb.h
+++ b/arch/powerpc/include/asm/hugetlb.h
@@ -45,7 +45,8 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
#define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR
static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
- unsigned long addr, pte_t *ptep)
+ unsigned long addr, pte_t *ptep,
+ unsigned long sz)
{
return __pte(pte_update(mm, addr, ptep, ~0UL, 0, 1));
}
@@ -55,8 +56,9 @@ static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep)
{
pte_t pte;
+ unsigned long sz = huge_page_size(hstate_vma(vma));
- pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
+ pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep, sz);
flush_hugetlb_page(vma, addr);
return pte;
}
diff --git a/arch/riscv/include/asm/cmpxchg.h b/arch/riscv/include/asm/cmpxchg.h
index 4cadc56220fe..427c41dde643 100644
--- a/arch/riscv/include/asm/cmpxchg.h
+++ b/arch/riscv/include/asm/cmpxchg.h
@@ -231,7 +231,7 @@
__arch_cmpxchg(".w", ".w" sc_sfx, ".w" cas_sfx, \
sc_prepend, sc_append, \
cas_prepend, cas_append, \
- __ret, __ptr, (long), __old, __new); \
+ __ret, __ptr, (long)(int)(long), __old, __new); \
break; \
case 8: \
__arch_cmpxchg(".d", ".d" sc_sfx, ".d" cas_sfx, \
diff --git a/arch/riscv/include/asm/futex.h b/arch/riscv/include/asm/futex.h
index 72be100afa23..90c86b115e00 100644
--- a/arch/riscv/include/asm/futex.h
+++ b/arch/riscv/include/asm/futex.h
@@ -93,7 +93,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
_ASM_EXTABLE_UACCESS_ERR(1b, 3b, %[r]) \
_ASM_EXTABLE_UACCESS_ERR(2b, 3b, %[r]) \
: [r] "+r" (ret), [v] "=&r" (val), [u] "+m" (*uaddr), [t] "=&r" (tmp)
- : [ov] "Jr" (oldval), [nv] "Jr" (newval)
+ : [ov] "Jr" ((long)(int)oldval), [nv] "Jr" (newval)
: "memory");
__disable_user_access();
diff --git a/arch/riscv/include/asm/hugetlb.h b/arch/riscv/include/asm/hugetlb.h
index faf3624d8057..446126497768 100644
--- a/arch/riscv/include/asm/hugetlb.h
+++ b/arch/riscv/include/asm/hugetlb.h
@@ -28,7 +28,8 @@ void set_huge_pte_at(struct mm_struct *mm,
#define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR
pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
- unsigned long addr, pte_t *ptep);
+ unsigned long addr, pte_t *ptep,
+ unsigned long sz);
#define __HAVE_ARCH_HUGE_PTEP_CLEAR_FLUSH
pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
diff --git a/arch/riscv/kernel/cacheinfo.c b/arch/riscv/kernel/cacheinfo.c
index 2d40736fc37c..26b085dbdd07 100644
--- a/arch/riscv/kernel/cacheinfo.c
+++ b/arch/riscv/kernel/cacheinfo.c
@@ -108,11 +108,11 @@ int populate_cache_leaves(unsigned int cpu)
if (!np)
return -ENOENT;
- if (of_property_read_bool(np, "cache-size"))
+ if (of_property_present(np, "cache-size"))
ci_leaf_init(this_leaf++, CACHE_TYPE_UNIFIED, level);
- if (of_property_read_bool(np, "i-cache-size"))
+ if (of_property_present(np, "i-cache-size"))
ci_leaf_init(this_leaf++, CACHE_TYPE_INST, level);
- if (of_property_read_bool(np, "d-cache-size"))
+ if (of_property_present(np, "d-cache-size"))
ci_leaf_init(this_leaf++, CACHE_TYPE_DATA, level);
prev = np;
@@ -125,11 +125,11 @@ int populate_cache_leaves(unsigned int cpu)
break;
if (level <= levels)
break;
- if (of_property_read_bool(np, "cache-size"))
+ if (of_property_present(np, "cache-size"))
ci_leaf_init(this_leaf++, CACHE_TYPE_UNIFIED, level);
- if (of_property_read_bool(np, "i-cache-size"))
+ if (of_property_present(np, "i-cache-size"))
ci_leaf_init(this_leaf++, CACHE_TYPE_INST, level);
- if (of_property_read_bool(np, "d-cache-size"))
+ if (of_property_present(np, "d-cache-size"))
ci_leaf_init(this_leaf++, CACHE_TYPE_DATA, level);
levels = level;
}
diff --git a/arch/riscv/kernel/cpufeature.c b/arch/riscv/kernel/cpufeature.c
index c6ba750536c3..40ac72e407b6 100644
--- a/arch/riscv/kernel/cpufeature.c
+++ b/arch/riscv/kernel/cpufeature.c
@@ -479,7 +479,7 @@ static void __init riscv_resolve_isa(unsigned long *source_isa,
if (bit < RISCV_ISA_EXT_BASE)
*this_hwcap |= isa2hwcap[bit];
}
- } while (loop && memcmp(prev_resolved_isa, resolved_isa, sizeof(prev_resolved_isa)));
+ } while (loop && !bitmap_equal(prev_resolved_isa, resolved_isa, RISCV_ISA_EXT_MAX));
}
static void __init match_isa_ext(const char *name, const char *name_end, unsigned long *bitmap)
diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c
index f1793630fc51..4fe45daa6281 100644
--- a/arch/riscv/kernel/setup.c
+++ b/arch/riscv/kernel/setup.c
@@ -322,8 +322,8 @@ void __init setup_arch(char **cmdline_p)
riscv_init_cbo_blocksizes();
riscv_fill_hwcap();
- init_rt_signal_env();
apply_boot_alternatives();
+ init_rt_signal_env();
if (IS_ENABLED(CONFIG_RISCV_ISA_ZICBOM) &&
riscv_isa_extension_available(NULL, ZICBOM))
diff --git a/arch/riscv/kernel/signal.c b/arch/riscv/kernel/signal.c
index 94e905eea1de..08378fea3a11 100644
--- a/arch/riscv/kernel/signal.c
+++ b/arch/riscv/kernel/signal.c
@@ -215,12 +215,6 @@ static size_t get_rt_frame_size(bool cal_all)
if (cal_all || riscv_v_vstate_query(task_pt_regs(current)))
total_context_size += riscv_v_sc_size;
}
- /*
- * Preserved a __riscv_ctx_hdr for END signal context header if an
- * extension uses __riscv_extra_ext_header
- */
- if (total_context_size)
- total_context_size += sizeof(struct __riscv_ctx_hdr);
frame_size += total_context_size;
diff --git a/arch/riscv/kvm/aia_imsic.c b/arch/riscv/kvm/aia_imsic.c
index a8085cd8215e..29ef9c2133a9 100644
--- a/arch/riscv/kvm/aia_imsic.c
+++ b/arch/riscv/kvm/aia_imsic.c
@@ -974,7 +974,6 @@ int kvm_riscv_vcpu_aia_imsic_inject(struct kvm_vcpu *vcpu,
if (imsic->vsfile_cpu >= 0) {
writel(iid, imsic->vsfile_va + IMSIC_MMIO_SETIPNUM_LE);
- kvm_vcpu_kick(vcpu);
} else {
eix = &imsic->swfile->eix[iid / BITS_PER_TYPE(u64)];
set_bit(iid & (BITS_PER_TYPE(u64) - 1), eix->eip);
diff --git a/arch/riscv/kvm/vcpu_sbi_hsm.c b/arch/riscv/kvm/vcpu_sbi_hsm.c
index dce667f4b6ab..3070bb31745d 100644
--- a/arch/riscv/kvm/vcpu_sbi_hsm.c
+++ b/arch/riscv/kvm/vcpu_sbi_hsm.c
@@ -9,6 +9,7 @@
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/kvm_host.h>
+#include <linux/wordpart.h>
#include <asm/sbi.h>
#include <asm/kvm_vcpu_sbi.h>
@@ -79,12 +80,12 @@ static int kvm_sbi_hsm_vcpu_get_status(struct kvm_vcpu *vcpu)
target_vcpu = kvm_get_vcpu_by_id(vcpu->kvm, target_vcpuid);
if (!target_vcpu)
return SBI_ERR_INVALID_PARAM;
- if (!kvm_riscv_vcpu_stopped(target_vcpu))
- return SBI_HSM_STATE_STARTED;
- else if (vcpu->stat.generic.blocking)
+ if (kvm_riscv_vcpu_stopped(target_vcpu))
+ return SBI_HSM_STATE_STOPPED;
+ else if (target_vcpu->stat.generic.blocking)
return SBI_HSM_STATE_SUSPENDED;
else
- return SBI_HSM_STATE_STOPPED;
+ return SBI_HSM_STATE_STARTED;
}
static int kvm_sbi_ext_hsm_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
@@ -109,7 +110,7 @@ static int kvm_sbi_ext_hsm_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
}
return 0;
case SBI_EXT_HSM_HART_SUSPEND:
- switch (cp->a0) {
+ switch (lower_32_bits(cp->a0)) {
case SBI_HSM_SUSPEND_RET_DEFAULT:
kvm_riscv_vcpu_wfi(vcpu);
break;
diff --git a/arch/riscv/kvm/vcpu_sbi_replace.c b/arch/riscv/kvm/vcpu_sbi_replace.c
index 9c2ab3dfa93a..5fbf3f94f1e8 100644
--- a/arch/riscv/kvm/vcpu_sbi_replace.c
+++ b/arch/riscv/kvm/vcpu_sbi_replace.c
@@ -21,7 +21,7 @@ static int kvm_sbi_ext_time_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
u64 next_cycle;
if (cp->a6 != SBI_EXT_TIME_SET_TIMER) {
- retdata->err_val = SBI_ERR_INVALID_PARAM;
+ retdata->err_val = SBI_ERR_NOT_SUPPORTED;
return 0;
}
@@ -51,9 +51,10 @@ static int kvm_sbi_ext_ipi_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
unsigned long hmask = cp->a0;
unsigned long hbase = cp->a1;
+ unsigned long hart_bit = 0, sentmask = 0;
if (cp->a6 != SBI_EXT_IPI_SEND_IPI) {
- retdata->err_val = SBI_ERR_INVALID_PARAM;
+ retdata->err_val = SBI_ERR_NOT_SUPPORTED;
return 0;
}
@@ -62,15 +63,23 @@ static int kvm_sbi_ext_ipi_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
if (hbase != -1UL) {
if (tmp->vcpu_id < hbase)
continue;
- if (!(hmask & (1UL << (tmp->vcpu_id - hbase))))
+ hart_bit = tmp->vcpu_id - hbase;
+ if (hart_bit >= __riscv_xlen)
+ goto done;
+ if (!(hmask & (1UL << hart_bit)))
continue;
}
ret = kvm_riscv_vcpu_set_interrupt(tmp, IRQ_VS_SOFT);
if (ret < 0)
break;
+ sentmask |= 1UL << hart_bit;
kvm_riscv_vcpu_pmu_incr_fw(tmp, SBI_PMU_FW_IPI_RCVD);
}
+done:
+ if (hbase != -1UL && (hmask ^ sentmask))
+ retdata->err_val = SBI_ERR_INVALID_PARAM;
+
return ret;
}
diff --git a/arch/riscv/kvm/vcpu_sbi_system.c b/arch/riscv/kvm/vcpu_sbi_system.c
index 5d55e08791fa..bc0ebba89003 100644
--- a/arch/riscv/kvm/vcpu_sbi_system.c
+++ b/arch/riscv/kvm/vcpu_sbi_system.c
@@ -4,6 +4,7 @@
*/
#include <linux/kvm_host.h>
+#include <linux/wordpart.h>
#include <asm/kvm_vcpu_sbi.h>
#include <asm/sbi.h>
@@ -19,7 +20,7 @@ static int kvm_sbi_ext_susp_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
switch (funcid) {
case SBI_EXT_SUSP_SYSTEM_SUSPEND:
- if (cp->a0 != SBI_SUSP_SLEEP_TYPE_SUSPEND_TO_RAM) {
+ if (lower_32_bits(cp->a0) != SBI_SUSP_SLEEP_TYPE_SUSPEND_TO_RAM) {
retdata->err_val = SBI_ERR_INVALID_PARAM;
return 0;
}
diff --git a/arch/riscv/mm/hugetlbpage.c b/arch/riscv/mm/hugetlbpage.c
index 42314f093922..b4a78a4b35cf 100644
--- a/arch/riscv/mm/hugetlbpage.c
+++ b/arch/riscv/mm/hugetlbpage.c
@@ -293,7 +293,7 @@ int huge_ptep_set_access_flags(struct vm_area_struct *vma,
pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
unsigned long addr,
- pte_t *ptep)
+ pte_t *ptep, unsigned long sz)
{
pte_t orig_pte = ptep_get(ptep);
int pte_num;
diff --git a/arch/s390/include/asm/hugetlb.h b/arch/s390/include/asm/hugetlb.h
index 7c52acaf9f82..663e87220e89 100644
--- a/arch/s390/include/asm/hugetlb.h
+++ b/arch/s390/include/asm/hugetlb.h
@@ -25,8 +25,16 @@ void __set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
#define __HAVE_ARCH_HUGE_PTEP_GET
pte_t huge_ptep_get(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
+pte_t __huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep);
+
#define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR
-pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
+static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep,
+ unsigned long sz)
+{
+ return __huge_ptep_get_and_clear(mm, addr, ptep);
+}
static inline void arch_clear_hugetlb_flags(struct folio *folio)
{
@@ -48,7 +56,7 @@ static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
unsigned long address, pte_t *ptep)
{
- return huge_ptep_get_and_clear(vma->vm_mm, address, ptep);
+ return __huge_ptep_get_and_clear(vma->vm_mm, address, ptep);
}
#define __HAVE_ARCH_HUGE_PTEP_SET_ACCESS_FLAGS
@@ -59,7 +67,7 @@ static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
int changed = !pte_same(huge_ptep_get(vma->vm_mm, addr, ptep), pte);
if (changed) {
- huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
+ __huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
__set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
}
return changed;
@@ -69,7 +77,7 @@ static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
- pte_t pte = huge_ptep_get_and_clear(mm, addr, ptep);
+ pte_t pte = __huge_ptep_get_and_clear(mm, addr, ptep);
__set_huge_pte_at(mm, addr, ptep, pte_wrprotect(pte));
}
diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c
index 63ba6306632e..e540b022ceb2 100644
--- a/arch/s390/kernel/ftrace.c
+++ b/arch/s390/kernel/ftrace.c
@@ -266,12 +266,13 @@ void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op, struct ftrace_regs *fregs)
{
unsigned long *parent = &arch_ftrace_regs(fregs)->regs.gprs[14];
+ unsigned long sp = arch_ftrace_regs(fregs)->regs.gprs[15];
if (unlikely(ftrace_graph_is_dead()))
return;
if (unlikely(atomic_read(&current->tracing_graph_pause)))
return;
- if (!function_graph_enter_regs(*parent, ip, 0, parent, fregs))
+ if (!function_graph_enter_regs(*parent, ip, 0, (unsigned long *)sp, fregs))
*parent = (unsigned long)&return_to_handler;
}
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c
index 24fee11b030d..b746213d3110 100644
--- a/arch/s390/kernel/traps.c
+++ b/arch/s390/kernel/traps.c
@@ -285,10 +285,10 @@ static void __init test_monitor_call(void)
return;
asm volatile(
" mc 0,0\n"
- "0: xgr %0,%0\n"
+ "0: lhi %[val],0\n"
"1:\n"
- EX_TABLE(0b,1b)
- : "+d" (val));
+ EX_TABLE(0b, 1b)
+ : [val] "+d" (val));
if (!val)
panic("Monitor call doesn't work!\n");
}
diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c
index d9ce199953de..2e568f175cd4 100644
--- a/arch/s390/mm/hugetlbpage.c
+++ b/arch/s390/mm/hugetlbpage.c
@@ -188,8 +188,8 @@ pte_t huge_ptep_get(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
return __rste_to_pte(pte_val(*ptep));
}
-pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
- unsigned long addr, pte_t *ptep)
+pte_t __huge_ptep_get_and_clear(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep)
{
pte_t pte = huge_ptep_get(mm, addr, ptep);
pmd_t *pmdp = (pmd_t *) ptep;
diff --git a/arch/sparc/include/asm/hugetlb.h b/arch/sparc/include/asm/hugetlb.h
index c714ca6a05aa..e7a9cdd498dc 100644
--- a/arch/sparc/include/asm/hugetlb.h
+++ b/arch/sparc/include/asm/hugetlb.h
@@ -20,7 +20,7 @@ void __set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
#define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR
pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep);
+ pte_t *ptep, unsigned long sz);
#define __HAVE_ARCH_HUGE_PTEP_CLEAR_FLUSH
static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
index eee601a0d2cf..80504148d8a5 100644
--- a/arch/sparc/mm/hugetlbpage.c
+++ b/arch/sparc/mm/hugetlbpage.c
@@ -260,7 +260,7 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
}
pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep)
+ pte_t *ptep, unsigned long sz)
{
unsigned int i, nptes, orig_shift, shift;
unsigned long size;
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index be2c311f5118..0e27ebd7e36a 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -1341,6 +1341,7 @@ config X86_REBOOTFIXUPS
config MICROCODE
def_bool y
depends on CPU_SUP_AMD || CPU_SUP_INTEL
+ select CRYPTO_LIB_SHA256 if CPU_SUP_AMD
config MICROCODE_INITRD32
def_bool y
diff --git a/arch/x86/boot/compressed/pgtable_64.c b/arch/x86/boot/compressed/pgtable_64.c
index c882e1f67af0..d8c5de40669d 100644
--- a/arch/x86/boot/compressed/pgtable_64.c
+++ b/arch/x86/boot/compressed/pgtable_64.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
#include "misc.h"
#include <asm/bootparam.h>
+#include <asm/bootparam_utils.h>
#include <asm/e820/types.h>
#include <asm/processor.h>
#include "pgtable.h"
@@ -107,6 +108,7 @@ asmlinkage void configure_5level_paging(struct boot_params *bp, void *pgtable)
bool l5_required = false;
/* Initialize boot_params. Required for cmdline_find_option_bool(). */
+ sanitize_boot_params(bp);
boot_params_ptr = bp;
/*
diff --git a/arch/x86/coco/sev/core.c b/arch/x86/coco/sev/core.c
index 82492efc5d94..96c7bc698e6b 100644
--- a/arch/x86/coco/sev/core.c
+++ b/arch/x86/coco/sev/core.c
@@ -2853,19 +2853,8 @@ struct snp_msg_desc *snp_msg_alloc(void)
if (!mdesc->response)
goto e_free_request;
- mdesc->certs_data = alloc_shared_pages(SEV_FW_BLOB_MAX_SIZE);
- if (!mdesc->certs_data)
- goto e_free_response;
-
- /* initial the input address for guest request */
- mdesc->input.req_gpa = __pa(mdesc->request);
- mdesc->input.resp_gpa = __pa(mdesc->response);
- mdesc->input.data_gpa = __pa(mdesc->certs_data);
-
return mdesc;
-e_free_response:
- free_shared_pages(mdesc->response, sizeof(struct snp_guest_msg));
e_free_request:
free_shared_pages(mdesc->request, sizeof(struct snp_guest_msg));
e_unmap:
@@ -2885,7 +2874,6 @@ void snp_msg_free(struct snp_msg_desc *mdesc)
kfree(mdesc->ctx);
free_shared_pages(mdesc->response, sizeof(struct snp_guest_msg));
free_shared_pages(mdesc->request, sizeof(struct snp_guest_msg));
- free_shared_pages(mdesc->certs_data, SEV_FW_BLOB_MAX_SIZE);
iounmap((__force void __iomem *)mdesc->secrets);
memset(mdesc, 0, sizeof(*mdesc));
@@ -3054,7 +3042,7 @@ retry_request:
* sequence number must be incremented or the VMPCK must be deleted to
* prevent reuse of the IV.
*/
- rc = snp_issue_guest_request(req, &mdesc->input, rio);
+ rc = snp_issue_guest_request(req, &req->input, rio);
switch (rc) {
case -ENOSPC:
/*
@@ -3064,7 +3052,7 @@ retry_request:
* order to increment the sequence number and thus avoid
* IV reuse.
*/
- override_npages = mdesc->input.data_npages;
+ override_npages = req->input.data_npages;
req->exit_code = SVM_VMGEXIT_GUEST_REQUEST;
/*
@@ -3120,7 +3108,7 @@ retry_request:
}
if (override_npages)
- mdesc->input.data_npages = override_npages;
+ req->input.data_npages = override_npages;
return rc;
}
@@ -3158,6 +3146,11 @@ int snp_send_guest_request(struct snp_msg_desc *mdesc, struct snp_guest_req *req
*/
memcpy(mdesc->request, &mdesc->secret_request, sizeof(mdesc->secret_request));
+ /* Initialize the input address for guest request */
+ req->input.req_gpa = __pa(mdesc->request);
+ req->input.resp_gpa = __pa(mdesc->response);
+ req->input.data_gpa = req->certs_data ? __pa(req->certs_data) : 0;
+
rc = __handle_guest_request(mdesc, req, rio);
if (rc) {
if (rc == -EIO &&
diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c
index 94941c5a10ac..14db5b85114c 100644
--- a/arch/x86/entry/common.c
+++ b/arch/x86/entry/common.c
@@ -190,6 +190,7 @@ static __always_inline bool int80_is_external(void)
/**
* do_int80_emulation - 32-bit legacy syscall C entry from asm
+ * @regs: syscall arguments in struct pt_args on the stack.
*
* This entry point can be used by 32-bit and 64-bit programs to perform
* 32-bit system calls. Instances of INT $0x80 can be found inline in
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index 8f218ac0d445..2092d615333d 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -628,7 +628,7 @@ int x86_pmu_hw_config(struct perf_event *event)
if (event->attr.type == event->pmu->type)
event->hw.config |= x86_pmu_get_event_config(event);
- if (event->attr.sample_period && x86_pmu.limit_period) {
+ if (!event->attr.freq && x86_pmu.limit_period) {
s64 left = event->attr.sample_period;
x86_pmu.limit_period(event, &left);
if (left > event->attr.sample_period)
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index cdcebf30468a..cdb19e3ba3aa 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -3952,6 +3952,85 @@ static inline bool intel_pmu_has_cap(struct perf_event *event, int idx)
return test_bit(idx, (unsigned long *)&intel_cap->capabilities);
}
+static u64 intel_pmu_freq_start_period(struct perf_event *event)
+{
+ int type = event->attr.type;
+ u64 config, factor;
+ s64 start;
+
+ /*
+ * The 127 is the lowest possible recommended SAV (sample after value)
+ * for a 4000 freq (default freq), according to the event list JSON file.
+ * Also, assume the workload is idle 50% time.
+ */
+ factor = 64 * 4000;
+ if (type != PERF_TYPE_HARDWARE && type != PERF_TYPE_HW_CACHE)
+ goto end;
+
+ /*
+ * The estimation of the start period in the freq mode is
+ * based on the below assumption.
+ *
+ * For a cycles or an instructions event, 1GHZ of the
+ * underlying platform, 1 IPC. The workload is idle 50% time.
+ * The start period = 1,000,000,000 * 1 / freq / 2.
+ * = 500,000,000 / freq
+ *
+ * Usually, the branch-related events occur less than the
+ * instructions event. According to the Intel event list JSON
+ * file, the SAV (sample after value) of a branch-related event
+ * is usually 1/4 of an instruction event.
+ * The start period of branch-related events = 125,000,000 / freq.
+ *
+ * The cache-related events occurs even less. The SAV is usually
+ * 1/20 of an instruction event.
+ * The start period of cache-related events = 25,000,000 / freq.
+ */
+ config = event->attr.config & PERF_HW_EVENT_MASK;
+ if (type == PERF_TYPE_HARDWARE) {
+ switch (config) {
+ case PERF_COUNT_HW_CPU_CYCLES:
+ case PERF_COUNT_HW_INSTRUCTIONS:
+ case PERF_COUNT_HW_BUS_CYCLES:
+ case PERF_COUNT_HW_STALLED_CYCLES_FRONTEND:
+ case PERF_COUNT_HW_STALLED_CYCLES_BACKEND:
+ case PERF_COUNT_HW_REF_CPU_CYCLES:
+ factor = 500000000;
+ break;
+ case PERF_COUNT_HW_BRANCH_INSTRUCTIONS:
+ case PERF_COUNT_HW_BRANCH_MISSES:
+ factor = 125000000;
+ break;
+ case PERF_COUNT_HW_CACHE_REFERENCES:
+ case PERF_COUNT_HW_CACHE_MISSES:
+ factor = 25000000;
+ break;
+ default:
+ goto end;
+ }
+ }
+
+ if (type == PERF_TYPE_HW_CACHE)
+ factor = 25000000;
+end:
+ /*
+ * Usually, a prime or a number with less factors (close to prime)
+ * is chosen as an SAV, which makes it less likely that the sampling
+ * period synchronizes with some periodic event in the workload.
+ * Minus 1 to make it at least avoiding values near power of twos
+ * for the default freq.
+ */
+ start = DIV_ROUND_UP_ULL(factor, event->attr.sample_freq) - 1;
+
+ if (start > x86_pmu.max_period)
+ start = x86_pmu.max_period;
+
+ if (x86_pmu.limit_period)
+ x86_pmu.limit_period(event, &start);
+
+ return start;
+}
+
static int intel_pmu_hw_config(struct perf_event *event)
{
int ret = x86_pmu_hw_config(event);
@@ -3963,6 +4042,12 @@ static int intel_pmu_hw_config(struct perf_event *event)
if (ret)
return ret;
+ if (event->attr.freq && event->attr.sample_freq) {
+ event->hw.sample_period = intel_pmu_freq_start_period(event);
+ event->hw.last_period = event->hw.sample_period;
+ local64_set(&event->hw.period_left, event->hw.sample_period);
+ }
+
if (event->attr.precise_ip) {
if ((event->attr.config & INTEL_ARCH_EVENT_MASK) == INTEL_FIXED_VLBR_EVENT)
return -EINVAL;
diff --git a/arch/x86/events/rapl.c b/arch/x86/events/rapl.c
index 4952faf03e82..6941f4811bec 100644
--- a/arch/x86/events/rapl.c
+++ b/arch/x86/events/rapl.c
@@ -879,6 +879,7 @@ static const struct x86_cpu_id rapl_model_match[] __initconst = {
X86_MATCH_VFM(INTEL_METEORLAKE_L, &model_skl),
X86_MATCH_VFM(INTEL_ARROWLAKE_H, &model_skl),
X86_MATCH_VFM(INTEL_ARROWLAKE, &model_skl),
+ X86_MATCH_VFM(INTEL_ARROWLAKE_U, &model_skl),
X86_MATCH_VFM(INTEL_LUNARLAKE_M, &model_skl),
{},
};
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 0b7af5902ff7..32ae3aa50c7e 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -780,6 +780,7 @@ struct kvm_vcpu_arch {
u32 pkru;
u32 hflags;
u64 efer;
+ u64 host_debugctl;
u64 apic_base;
struct kvm_lapic *apic; /* kernel irqchip context */
bool load_eoi_exitmap_pending;
diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
index 7e8bf78c03d5..aee26bb8230f 100644
--- a/arch/x86/include/asm/nospec-branch.h
+++ b/arch/x86/include/asm/nospec-branch.h
@@ -198,9 +198,8 @@
.endm
/*
- * Equivalent to -mindirect-branch-cs-prefix; emit the 5 byte jmp/call
- * to the retpoline thunk with a CS prefix when the register requires
- * a RAX prefix byte to encode. Also see apply_retpolines().
+ * Emits a conditional CS prefix that is compatible with
+ * -mindirect-branch-cs-prefix.
*/
.macro __CS_PREFIX reg:req
.irp rs,r8,r9,r10,r11,r12,r13,r14,r15
@@ -421,19 +420,26 @@ static inline void call_depth_return_thunk(void) {}
#ifdef CONFIG_X86_64
/*
+ * Emits a conditional CS prefix that is compatible with
+ * -mindirect-branch-cs-prefix.
+ */
+#define __CS_PREFIX(reg) \
+ ".irp rs,r8,r9,r10,r11,r12,r13,r14,r15\n" \
+ ".ifc \\rs," reg "\n" \
+ ".byte 0x2e\n" \
+ ".endif\n" \
+ ".endr\n"
+
+/*
* Inline asm uses the %V modifier which is only in newer GCC
* which is ensured when CONFIG_MITIGATION_RETPOLINE is defined.
*/
-# define CALL_NOSPEC \
- ALTERNATIVE_2( \
- ANNOTATE_RETPOLINE_SAFE \
- "call *%[thunk_target]\n", \
- "call __x86_indirect_thunk_%V[thunk_target]\n", \
- X86_FEATURE_RETPOLINE, \
- "lfence;\n" \
- ANNOTATE_RETPOLINE_SAFE \
- "call *%[thunk_target]\n", \
- X86_FEATURE_RETPOLINE_LFENCE)
+#ifdef CONFIG_MITIGATION_RETPOLINE
+#define CALL_NOSPEC __CS_PREFIX("%V[thunk_target]") \
+ "call __x86_indirect_thunk_%V[thunk_target]\n"
+#else
+#define CALL_NOSPEC "call *%[thunk_target]\n"
+#endif
# define THUNK_TARGET(addr) [thunk_target] "r" (addr)
diff --git a/arch/x86/include/asm/pgtable-2level_types.h b/arch/x86/include/asm/pgtable-2level_types.h
index 7f6ccff0ba72..4a12c276b181 100644
--- a/arch/x86/include/asm/pgtable-2level_types.h
+++ b/arch/x86/include/asm/pgtable-2level_types.h
@@ -23,17 +23,17 @@ typedef union {
#define ARCH_PAGE_TABLE_SYNC_MASK PGTBL_PMD_MODIFIED
/*
- * traditional i386 two-level paging structure:
+ * Traditional i386 two-level paging structure:
*/
#define PGDIR_SHIFT 22
#define PTRS_PER_PGD 1024
-
/*
- * the i386 is two-level, so we don't really have any
- * PMD directory physically.
+ * The i386 is two-level, so we don't really have any
+ * PMD directory physically:
*/
+#define PTRS_PER_PMD 1
#define PTRS_PER_PTE 1024
diff --git a/arch/x86/include/asm/sev.h b/arch/x86/include/asm/sev.h
index 1581246491b5..ba7999f66abe 100644
--- a/arch/x86/include/asm/sev.h
+++ b/arch/x86/include/asm/sev.h
@@ -203,6 +203,9 @@ struct snp_guest_req {
unsigned int vmpck_id;
u8 msg_version;
u8 msg_type;
+
+ struct snp_req_data input;
+ void *certs_data;
};
/*
@@ -263,9 +266,6 @@ struct snp_msg_desc {
struct snp_guest_msg secret_request, secret_response;
struct snp_secrets_page *secrets;
- struct snp_req_data input;
-
- void *certs_data;
struct aesgcm_ctx *ctx;
diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c
index 11fac09e3a8c..67e773744edb 100644
--- a/arch/x86/kernel/amd_nb.c
+++ b/arch/x86/kernel/amd_nb.c
@@ -143,7 +143,6 @@ bool __init early_is_amd_nb(u32 device)
struct resource *amd_get_mmconfig_range(struct resource *res)
{
- u32 address;
u64 base, msr;
unsigned int segn_busn_bits;
@@ -151,13 +150,11 @@ struct resource *amd_get_mmconfig_range(struct resource *res)
boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
return NULL;
- /* assume all cpus from fam10h have mmconfig */
- if (boot_cpu_data.x86 < 0x10)
+ /* Assume CPUs from Fam10h have mmconfig, although not all VMs do */
+ if (boot_cpu_data.x86 < 0x10 ||
+ rdmsrl_safe(MSR_FAM10H_MMIO_CONF_BASE, &msr))
return NULL;
- address = MSR_FAM10H_MMIO_CONF_BASE;
- rdmsrl(address, msr);
-
/* mmconfig is not enabled */
if (!(msr & FAM10H_MMIO_CONF_ENABLE))
return NULL;
diff --git a/arch/x86/kernel/cpu/cacheinfo.c b/arch/x86/kernel/cpu/cacheinfo.c
index e6fa03ed9172..a6c6bccfa8b8 100644
--- a/arch/x86/kernel/cpu/cacheinfo.c
+++ b/arch/x86/kernel/cpu/cacheinfo.c
@@ -808,7 +808,7 @@ void init_intel_cacheinfo(struct cpuinfo_x86 *c)
cpuid(2, &regs[0], &regs[1], &regs[2], &regs[3]);
/* If bit 31 is set, this is an unknown format */
- for (j = 0 ; j < 3 ; j++)
+ for (j = 0 ; j < 4 ; j++)
if (regs[j] & (1 << 31))
regs[j] = 0;
diff --git a/arch/x86/kernel/cpu/cyrix.c b/arch/x86/kernel/cpu/cyrix.c
index 9651275aecd1..dfec2c61e354 100644
--- a/arch/x86/kernel/cpu/cyrix.c
+++ b/arch/x86/kernel/cpu/cyrix.c
@@ -153,8 +153,8 @@ static void geode_configure(void)
u8 ccr3;
local_irq_save(flags);
- /* Suspend on halt power saving and enable #SUSP pin */
- setCx86(CX86_CCR2, getCx86(CX86_CCR2) | 0x88);
+ /* Suspend on halt power saving */
+ setCx86(CX86_CCR2, getCx86(CX86_CCR2) | 0x08);
ccr3 = getCx86(CX86_CCR3);
setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index 3dce22f00dc3..134368a3f4b1 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -635,26 +635,37 @@ static unsigned int intel_size_cache(struct cpuinfo_x86 *c, unsigned int size)
}
#endif
-#define TLB_INST_4K 0x01
-#define TLB_INST_4M 0x02
-#define TLB_INST_2M_4M 0x03
+#define TLB_INST_4K 0x01
+#define TLB_INST_4M 0x02
+#define TLB_INST_2M_4M 0x03
-#define TLB_INST_ALL 0x05
-#define TLB_INST_1G 0x06
+#define TLB_INST_ALL 0x05
+#define TLB_INST_1G 0x06
-#define TLB_DATA_4K 0x11
-#define TLB_DATA_4M 0x12
-#define TLB_DATA_2M_4M 0x13
-#define TLB_DATA_4K_4M 0x14
+#define TLB_DATA_4K 0x11
+#define TLB_DATA_4M 0x12
+#define TLB_DATA_2M_4M 0x13
+#define TLB_DATA_4K_4M 0x14
-#define TLB_DATA_1G 0x16
+#define TLB_DATA_1G 0x16
+#define TLB_DATA_1G_2M_4M 0x17
-#define TLB_DATA0_4K 0x21
-#define TLB_DATA0_4M 0x22
-#define TLB_DATA0_2M_4M 0x23
+#define TLB_DATA0_4K 0x21
+#define TLB_DATA0_4M 0x22
+#define TLB_DATA0_2M_4M 0x23
-#define STLB_4K 0x41
-#define STLB_4K_2M 0x42
+#define STLB_4K 0x41
+#define STLB_4K_2M 0x42
+
+/*
+ * All of leaf 0x2's one-byte TLB descriptors implies the same number of
+ * entries for their respective TLB types. The 0x63 descriptor is an
+ * exception: it implies 4 dTLB entries for 1GB pages 32 dTLB entries
+ * for 2MB or 4MB pages. Encode descriptor 0x63 dTLB entry count for
+ * 2MB/4MB pages here, as its count for dTLB 1GB pages is already at the
+ * intel_tlb_table[] mapping.
+ */
+#define TLB_0x63_2M_4M_ENTRIES 32
static const struct _tlb_table intel_tlb_table[] = {
{ 0x01, TLB_INST_4K, 32, " TLB_INST 4 KByte pages, 4-way set associative" },
@@ -676,7 +687,8 @@ static const struct _tlb_table intel_tlb_table[] = {
{ 0x5c, TLB_DATA_4K_4M, 128, " TLB_DATA 4 KByte and 4 MByte pages" },
{ 0x5d, TLB_DATA_4K_4M, 256, " TLB_DATA 4 KByte and 4 MByte pages" },
{ 0x61, TLB_INST_4K, 48, " TLB_INST 4 KByte pages, full associative" },
- { 0x63, TLB_DATA_1G, 4, " TLB_DATA 1 GByte pages, 4-way set associative" },
+ { 0x63, TLB_DATA_1G_2M_4M, 4, " TLB_DATA 1 GByte pages, 4-way set associative"
+ " (plus 32 entries TLB_DATA 2 MByte or 4 MByte pages, not encoded here)" },
{ 0x6b, TLB_DATA_4K, 256, " TLB_DATA 4 KByte pages, 8-way associative" },
{ 0x6c, TLB_DATA_2M_4M, 128, " TLB_DATA 2 MByte or 4 MByte pages, 8-way associative" },
{ 0x6d, TLB_DATA_1G, 16, " TLB_DATA 1 GByte pages, fully associative" },
@@ -776,6 +788,12 @@ static void intel_tlb_lookup(const unsigned char desc)
if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries)
tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries;
break;
+ case TLB_DATA_1G_2M_4M:
+ if (tlb_lld_2m[ENTRIES] < TLB_0x63_2M_4M_ENTRIES)
+ tlb_lld_2m[ENTRIES] = TLB_0x63_2M_4M_ENTRIES;
+ if (tlb_lld_4m[ENTRIES] < TLB_0x63_2M_4M_ENTRIES)
+ tlb_lld_4m[ENTRIES] = TLB_0x63_2M_4M_ENTRIES;
+ fallthrough;
case TLB_DATA_1G:
if (tlb_lld_1g[ENTRIES] < intel_tlb_table[k].entries)
tlb_lld_1g[ENTRIES] = intel_tlb_table[k].entries;
@@ -799,7 +817,7 @@ static void intel_detect_tlb(struct cpuinfo_x86 *c)
cpuid(2, &regs[0], &regs[1], &regs[2], &regs[3]);
/* If bit 31 is set, this is an unknown format */
- for (j = 0 ; j < 3 ; j++)
+ for (j = 0 ; j < 4 ; j++)
if (regs[j] & (1 << 31))
regs[j] = 0;
diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c
index a5dac7f3c0a0..c69b1bc45483 100644
--- a/arch/x86/kernel/cpu/microcode/amd.c
+++ b/arch/x86/kernel/cpu/microcode/amd.c
@@ -23,14 +23,18 @@
#include <linux/earlycpio.h>
#include <linux/firmware.h>
+#include <linux/bsearch.h>
#include <linux/uaccess.h>
#include <linux/vmalloc.h>
#include <linux/initrd.h>
#include <linux/kernel.h>
#include <linux/pci.h>
+#include <crypto/sha2.h>
+
#include <asm/microcode.h>
#include <asm/processor.h>
+#include <asm/cmdline.h>
#include <asm/setup.h>
#include <asm/cpu.h>
#include <asm/msr.h>
@@ -145,6 +149,113 @@ ucode_path[] __maybe_unused = "kernel/x86/microcode/AuthenticAMD.bin";
*/
static u32 bsp_cpuid_1_eax __ro_after_init;
+static bool sha_check = true;
+
+struct patch_digest {
+ u32 patch_id;
+ u8 sha256[SHA256_DIGEST_SIZE];
+};
+
+#include "amd_shas.c"
+
+static int cmp_id(const void *key, const void *elem)
+{
+ struct patch_digest *pd = (struct patch_digest *)elem;
+ u32 patch_id = *(u32 *)key;
+
+ if (patch_id == pd->patch_id)
+ return 0;
+ else if (patch_id < pd->patch_id)
+ return -1;
+ else
+ return 1;
+}
+
+static bool need_sha_check(u32 cur_rev)
+{
+ switch (cur_rev >> 8) {
+ case 0x80012: return cur_rev <= 0x800126f; break;
+ case 0x80082: return cur_rev <= 0x800820f; break;
+ case 0x83010: return cur_rev <= 0x830107c; break;
+ case 0x86001: return cur_rev <= 0x860010e; break;
+ case 0x86081: return cur_rev <= 0x8608108; break;
+ case 0x87010: return cur_rev <= 0x8701034; break;
+ case 0x8a000: return cur_rev <= 0x8a0000a; break;
+ case 0xa0010: return cur_rev <= 0xa00107a; break;
+ case 0xa0011: return cur_rev <= 0xa0011da; break;
+ case 0xa0012: return cur_rev <= 0xa001243; break;
+ case 0xa0082: return cur_rev <= 0xa00820e; break;
+ case 0xa1011: return cur_rev <= 0xa101153; break;
+ case 0xa1012: return cur_rev <= 0xa10124e; break;
+ case 0xa1081: return cur_rev <= 0xa108109; break;
+ case 0xa2010: return cur_rev <= 0xa20102f; break;
+ case 0xa2012: return cur_rev <= 0xa201212; break;
+ case 0xa4041: return cur_rev <= 0xa404109; break;
+ case 0xa5000: return cur_rev <= 0xa500013; break;
+ case 0xa6012: return cur_rev <= 0xa60120a; break;
+ case 0xa7041: return cur_rev <= 0xa704109; break;
+ case 0xa7052: return cur_rev <= 0xa705208; break;
+ case 0xa7080: return cur_rev <= 0xa708009; break;
+ case 0xa70c0: return cur_rev <= 0xa70C009; break;
+ case 0xaa001: return cur_rev <= 0xaa00116; break;
+ case 0xaa002: return cur_rev <= 0xaa00218; break;
+ default: break;
+ }
+
+ pr_info("You should not be seeing this. Please send the following couple of lines to x86-<at>-kernel.org\n");
+ pr_info("CPUID(1).EAX: 0x%x, current revision: 0x%x\n", bsp_cpuid_1_eax, cur_rev);
+ return true;
+}
+
+static bool verify_sha256_digest(u32 patch_id, u32 cur_rev, const u8 *data, unsigned int len)
+{
+ struct patch_digest *pd = NULL;
+ u8 digest[SHA256_DIGEST_SIZE];
+ struct sha256_state s;
+ int i;
+
+ if (x86_family(bsp_cpuid_1_eax) < 0x17 ||
+ x86_family(bsp_cpuid_1_eax) > 0x19)
+ return true;
+
+ if (!need_sha_check(cur_rev))
+ return true;
+
+ if (!sha_check)
+ return true;
+
+ pd = bsearch(&patch_id, phashes, ARRAY_SIZE(phashes), sizeof(struct patch_digest), cmp_id);
+ if (!pd) {
+ pr_err("No sha256 digest for patch ID: 0x%x found\n", patch_id);
+ return false;
+ }
+
+ sha256_init(&s);
+ sha256_update(&s, data, len);
+ sha256_final(&s, digest);
+
+ if (memcmp(digest, pd->sha256, sizeof(digest))) {
+ pr_err("Patch 0x%x SHA256 digest mismatch!\n", patch_id);
+
+ for (i = 0; i < SHA256_DIGEST_SIZE; i++)
+ pr_cont("0x%x ", digest[i]);
+ pr_info("\n");
+
+ return false;
+ }
+
+ return true;
+}
+
+static u32 get_patch_level(void)
+{
+ u32 rev, dummy __always_unused;
+
+ native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
+
+ return rev;
+}
+
static union cpuid_1_eax ucode_rev_to_cpuid(unsigned int val)
{
union zen_patch_rev p;
@@ -246,8 +357,7 @@ static bool verify_equivalence_table(const u8 *buf, size_t buf_size)
* On success, @sh_psize returns the patch size according to the section header,
* to the caller.
*/
-static bool
-__verify_patch_section(const u8 *buf, size_t buf_size, u32 *sh_psize)
+static bool __verify_patch_section(const u8 *buf, size_t buf_size, u32 *sh_psize)
{
u32 p_type, p_size;
const u32 *hdr;
@@ -484,10 +594,13 @@ static void scan_containers(u8 *ucode, size_t size, struct cont_desc *desc)
}
}
-static bool __apply_microcode_amd(struct microcode_amd *mc, unsigned int psize)
+static bool __apply_microcode_amd(struct microcode_amd *mc, u32 *cur_rev,
+ unsigned int psize)
{
unsigned long p_addr = (unsigned long)&mc->hdr.data_code;
- u32 rev, dummy;
+
+ if (!verify_sha256_digest(mc->hdr.patch_id, *cur_rev, (const u8 *)p_addr, psize))
+ return -1;
native_wrmsrl(MSR_AMD64_PATCH_LOADER, p_addr);
@@ -505,47 +618,13 @@ static bool __apply_microcode_amd(struct microcode_amd *mc, unsigned int psize)
}
/* verify patch application was successful */
- native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
-
- if (rev != mc->hdr.patch_id)
+ *cur_rev = get_patch_level();
+ if (*cur_rev != mc->hdr.patch_id)
return false;
return true;
}
-/*
- * Early load occurs before we can vmalloc(). So we look for the microcode
- * patch container file in initrd, traverse equivalent cpu table, look for a
- * matching microcode patch, and update, all in initrd memory in place.
- * When vmalloc() is available for use later -- on 64-bit during first AP load,
- * and on 32-bit during save_microcode_in_initrd_amd() -- we can call
- * load_microcode_amd() to save equivalent cpu table and microcode patches in
- * kernel heap memory.
- *
- * Returns true if container found (sets @desc), false otherwise.
- */
-static bool early_apply_microcode(u32 old_rev, void *ucode, size_t size)
-{
- struct cont_desc desc = { 0 };
- struct microcode_amd *mc;
-
- scan_containers(ucode, size, &desc);
-
- mc = desc.mc;
- if (!mc)
- return false;
-
- /*
- * Allow application of the same revision to pick up SMT-specific
- * changes even if the revision of the other SMT thread is already
- * up-to-date.
- */
- if (old_rev > mc->hdr.patch_id)
- return false;
-
- return __apply_microcode_amd(mc, desc.psize);
-}
-
static bool get_builtin_microcode(struct cpio_data *cp)
{
char fw_name[36] = "amd-ucode/microcode_amd.bin";
@@ -583,14 +662,35 @@ static bool __init find_blobs_in_containers(struct cpio_data *ret)
return found;
}
+/*
+ * Early load occurs before we can vmalloc(). So we look for the microcode
+ * patch container file in initrd, traverse equivalent cpu table, look for a
+ * matching microcode patch, and update, all in initrd memory in place.
+ * When vmalloc() is available for use later -- on 64-bit during first AP load,
+ * and on 32-bit during save_microcode_in_initrd() -- we can call
+ * load_microcode_amd() to save equivalent cpu table and microcode patches in
+ * kernel heap memory.
+ */
void __init load_ucode_amd_bsp(struct early_load_data *ed, unsigned int cpuid_1_eax)
{
+ struct cont_desc desc = { };
+ struct microcode_amd *mc;
struct cpio_data cp = { };
- u32 dummy;
+ char buf[4];
+ u32 rev;
+
+ if (cmdline_find_option(boot_command_line, "microcode.amd_sha_check", buf, 4)) {
+ if (!strncmp(buf, "off", 3)) {
+ sha_check = false;
+ pr_warn_once("It is a very very bad idea to disable the blobs SHA check!\n");
+ add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK);
+ }
+ }
bsp_cpuid_1_eax = cpuid_1_eax;
- native_rdmsr(MSR_AMD64_PATCH_LEVEL, ed->old_rev, dummy);
+ rev = get_patch_level();
+ ed->old_rev = rev;
/* Needed in load_microcode_amd() */
ucode_cpu_info[0].cpu_sig.sig = cpuid_1_eax;
@@ -598,37 +698,23 @@ void __init load_ucode_amd_bsp(struct early_load_data *ed, unsigned int cpuid_1_
if (!find_blobs_in_containers(&cp))
return;
- if (early_apply_microcode(ed->old_rev, cp.data, cp.size))
- native_rdmsr(MSR_AMD64_PATCH_LEVEL, ed->new_rev, dummy);
-}
-
-static enum ucode_state _load_microcode_amd(u8 family, const u8 *data, size_t size);
-
-static int __init save_microcode_in_initrd(void)
-{
- unsigned int cpuid_1_eax = native_cpuid_eax(1);
- struct cpuinfo_x86 *c = &boot_cpu_data;
- struct cont_desc desc = { 0 };
- enum ucode_state ret;
- struct cpio_data cp;
-
- if (dis_ucode_ldr || c->x86_vendor != X86_VENDOR_AMD || c->x86 < 0x10)
- return 0;
-
- if (!find_blobs_in_containers(&cp))
- return -EINVAL;
-
scan_containers(cp.data, cp.size, &desc);
- if (!desc.mc)
- return -EINVAL;
- ret = _load_microcode_amd(x86_family(cpuid_1_eax), desc.data, desc.size);
- if (ret > UCODE_UPDATED)
- return -EINVAL;
+ mc = desc.mc;
+ if (!mc)
+ return;
- return 0;
+ /*
+ * Allow application of the same revision to pick up SMT-specific
+ * changes even if the revision of the other SMT thread is already
+ * up-to-date.
+ */
+ if (ed->old_rev > mc->hdr.patch_id)
+ return;
+
+ if (__apply_microcode_amd(mc, &rev, desc.psize))
+ ed->new_rev = rev;
}
-early_initcall(save_microcode_in_initrd);
static inline bool patch_cpus_equivalent(struct ucode_patch *p,
struct ucode_patch *n,
@@ -729,14 +815,9 @@ static void free_cache(void)
static struct ucode_patch *find_patch(unsigned int cpu)
{
struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
- u32 rev, dummy __always_unused;
u16 equiv_id = 0;
- /* fetch rev if not populated yet: */
- if (!uci->cpu_sig.rev) {
- rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
- uci->cpu_sig.rev = rev;
- }
+ uci->cpu_sig.rev = get_patch_level();
if (x86_family(bsp_cpuid_1_eax) < 0x17) {
equiv_id = find_equiv_id(&equiv_table, uci->cpu_sig.sig);
@@ -759,22 +840,20 @@ void reload_ucode_amd(unsigned int cpu)
mc = p->data;
- rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
-
+ rev = get_patch_level();
if (rev < mc->hdr.patch_id) {
- if (__apply_microcode_amd(mc, p->size))
- pr_info_once("reload revision: 0x%08x\n", mc->hdr.patch_id);
+ if (__apply_microcode_amd(mc, &rev, p->size))
+ pr_info_once("reload revision: 0x%08x\n", rev);
}
}
static int collect_cpu_info_amd(int cpu, struct cpu_signature *csig)
{
- struct cpuinfo_x86 *c = &cpu_data(cpu);
struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
struct ucode_patch *p;
csig->sig = cpuid_eax(0x00000001);
- csig->rev = c->microcode;
+ csig->rev = get_patch_level();
/*
* a patch could have been loaded early, set uci->mc so that
@@ -815,7 +894,7 @@ static enum ucode_state apply_microcode_amd(int cpu)
goto out;
}
- if (!__apply_microcode_amd(mc_amd, p->size)) {
+ if (!__apply_microcode_amd(mc_amd, &rev, p->size)) {
pr_err("CPU%d: update failed for patch_level=0x%08x\n",
cpu, mc_amd->hdr.patch_id);
return UCODE_ERROR;
@@ -937,8 +1016,7 @@ static int verify_and_add_patch(u8 family, u8 *fw, unsigned int leftover,
}
/* Scan the blob in @data and add microcode patches to the cache. */
-static enum ucode_state __load_microcode_amd(u8 family, const u8 *data,
- size_t size)
+static enum ucode_state __load_microcode_amd(u8 family, const u8 *data, size_t size)
{
u8 *fw = (u8 *)data;
size_t offset;
@@ -1013,6 +1091,32 @@ static enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t siz
return ret;
}
+static int __init save_microcode_in_initrd(void)
+{
+ unsigned int cpuid_1_eax = native_cpuid_eax(1);
+ struct cpuinfo_x86 *c = &boot_cpu_data;
+ struct cont_desc desc = { 0 };
+ enum ucode_state ret;
+ struct cpio_data cp;
+
+ if (dis_ucode_ldr || c->x86_vendor != X86_VENDOR_AMD || c->x86 < 0x10)
+ return 0;
+
+ if (!find_blobs_in_containers(&cp))
+ return -EINVAL;
+
+ scan_containers(cp.data, cp.size, &desc);
+ if (!desc.mc)
+ return -EINVAL;
+
+ ret = _load_microcode_amd(x86_family(cpuid_1_eax), desc.data, desc.size);
+ if (ret > UCODE_UPDATED)
+ return -EINVAL;
+
+ return 0;
+}
+early_initcall(save_microcode_in_initrd);
+
/*
* AMD microcode firmware naming convention, up to family 15h they are in
* the legacy file:
diff --git a/arch/x86/kernel/cpu/microcode/amd_shas.c b/arch/x86/kernel/cpu/microcode/amd_shas.c
new file mode 100644
index 000000000000..2a1655b1fdd8
--- /dev/null
+++ b/arch/x86/kernel/cpu/microcode/amd_shas.c
@@ -0,0 +1,444 @@
+/* Keep 'em sorted. */
+static const struct patch_digest phashes[] = {
+ { 0x8001227, {
+ 0x99,0xc0,0x9b,0x2b,0xcc,0x9f,0x52,0x1b,
+ 0x1a,0x5f,0x1d,0x83,0xa1,0x6c,0xc4,0x46,
+ 0xe2,0x6c,0xda,0x73,0xfb,0x2d,0x23,0xa8,
+ 0x77,0xdc,0x15,0x31,0x33,0x4a,0x46,0x18,
+ }
+ },
+ { 0x8001250, {
+ 0xc0,0x0b,0x6b,0x19,0xfd,0x5c,0x39,0x60,
+ 0xd5,0xc3,0x57,0x46,0x54,0xe4,0xd1,0xaa,
+ 0xa8,0xf7,0x1f,0xa8,0x6a,0x60,0x3e,0xe3,
+ 0x27,0x39,0x8e,0x53,0x30,0xf8,0x49,0x19,
+ }
+ },
+ { 0x800126e, {
+ 0xf3,0x8b,0x2b,0xb6,0x34,0xe3,0xc8,0x2c,
+ 0xef,0xec,0x63,0x6d,0xc8,0x76,0x77,0xb3,
+ 0x25,0x5a,0xb7,0x52,0x8c,0x83,0x26,0xe6,
+ 0x4c,0xbe,0xbf,0xe9,0x7d,0x22,0x6a,0x43,
+ }
+ },
+ { 0x800126f, {
+ 0x2b,0x5a,0xf2,0x9c,0xdd,0xd2,0x7f,0xec,
+ 0xec,0x96,0x09,0x57,0xb0,0x96,0x29,0x8b,
+ 0x2e,0x26,0x91,0xf0,0x49,0x33,0x42,0x18,
+ 0xdd,0x4b,0x65,0x5a,0xd4,0x15,0x3d,0x33,
+ }
+ },
+ { 0x800820d, {
+ 0x68,0x98,0x83,0xcd,0x22,0x0d,0xdd,0x59,
+ 0x73,0x2c,0x5b,0x37,0x1f,0x84,0x0e,0x67,
+ 0x96,0x43,0x83,0x0c,0x46,0x44,0xab,0x7c,
+ 0x7b,0x65,0x9e,0x57,0xb5,0x90,0x4b,0x0e,
+ }
+ },
+ { 0x8301025, {
+ 0xe4,0x7d,0xdb,0x1e,0x14,0xb4,0x5e,0x36,
+ 0x8f,0x3e,0x48,0x88,0x3c,0x6d,0x76,0xa1,
+ 0x59,0xc6,0xc0,0x72,0x42,0xdf,0x6c,0x30,
+ 0x6f,0x0b,0x28,0x16,0x61,0xfc,0x79,0x77,
+ }
+ },
+ { 0x8301055, {
+ 0x81,0x7b,0x99,0x1b,0xae,0x2d,0x4f,0x9a,
+ 0xef,0x13,0xce,0xb5,0x10,0xaf,0x6a,0xea,
+ 0xe5,0xb0,0x64,0x98,0x10,0x68,0x34,0x3b,
+ 0x9d,0x7a,0xd6,0x22,0x77,0x5f,0xb3,0x5b,
+ }
+ },
+ { 0x8301072, {
+ 0xcf,0x76,0xa7,0x1a,0x49,0xdf,0x2a,0x5e,
+ 0x9e,0x40,0x70,0xe5,0xdd,0x8a,0xa8,0x28,
+ 0x20,0xdc,0x91,0xd8,0x2c,0xa6,0xa0,0xb1,
+ 0x2d,0x22,0x26,0x94,0x4b,0x40,0x85,0x30,
+ }
+ },
+ { 0x830107a, {
+ 0x2a,0x65,0x8c,0x1a,0x5e,0x07,0x21,0x72,
+ 0xdf,0x90,0xa6,0x51,0x37,0xd3,0x4b,0x34,
+ 0xc4,0xda,0x03,0xe1,0x8a,0x6c,0xfb,0x20,
+ 0x04,0xb2,0x81,0x05,0xd4,0x87,0xf4,0x0a,
+ }
+ },
+ { 0x830107b, {
+ 0xb3,0x43,0x13,0x63,0x56,0xc1,0x39,0xad,
+ 0x10,0xa6,0x2b,0xcc,0x02,0xe6,0x76,0x2a,
+ 0x1e,0x39,0x58,0x3e,0x23,0x6e,0xa4,0x04,
+ 0x95,0xea,0xf9,0x6d,0xc2,0x8a,0x13,0x19,
+ }
+ },
+ { 0x830107c, {
+ 0x21,0x64,0xde,0xfb,0x9f,0x68,0x96,0x47,
+ 0x70,0x5c,0xe2,0x8f,0x18,0x52,0x6a,0xac,
+ 0xa4,0xd2,0x2e,0xe0,0xde,0x68,0x66,0xc3,
+ 0xeb,0x1e,0xd3,0x3f,0xbc,0x51,0x1d,0x38,
+ }
+ },
+ { 0x860010d, {
+ 0x86,0xb6,0x15,0x83,0xbc,0x3b,0x9c,0xe0,
+ 0xb3,0xef,0x1d,0x99,0x84,0x35,0x15,0xf7,
+ 0x7c,0x2a,0xc6,0x42,0xdb,0x73,0x07,0x5c,
+ 0x7d,0xc3,0x02,0xb5,0x43,0x06,0x5e,0xf8,
+ }
+ },
+ { 0x8608108, {
+ 0x14,0xfe,0x57,0x86,0x49,0xc8,0x68,0xe2,
+ 0x11,0xa3,0xcb,0x6e,0xff,0x6e,0xd5,0x38,
+ 0xfe,0x89,0x1a,0xe0,0x67,0xbf,0xc4,0xcc,
+ 0x1b,0x9f,0x84,0x77,0x2b,0x9f,0xaa,0xbd,
+ }
+ },
+ { 0x8701034, {
+ 0xc3,0x14,0x09,0xa8,0x9c,0x3f,0x8d,0x83,
+ 0x9b,0x4c,0xa5,0xb7,0x64,0x8b,0x91,0x5d,
+ 0x85,0x6a,0x39,0x26,0x1e,0x14,0x41,0xa8,
+ 0x75,0xea,0xa6,0xf9,0xc9,0xd1,0xea,0x2b,
+ }
+ },
+ { 0x8a00008, {
+ 0xd7,0x2a,0x93,0xdc,0x05,0x2f,0xa5,0x6e,
+ 0x0c,0x61,0x2c,0x07,0x9f,0x38,0xe9,0x8e,
+ 0xef,0x7d,0x2a,0x05,0x4d,0x56,0xaf,0x72,
+ 0xe7,0x56,0x47,0x6e,0x60,0x27,0xd5,0x8c,
+ }
+ },
+ { 0x8a0000a, {
+ 0x73,0x31,0x26,0x22,0xd4,0xf9,0xee,0x3c,
+ 0x07,0x06,0xe7,0xb9,0xad,0xd8,0x72,0x44,
+ 0x33,0x31,0xaa,0x7d,0xc3,0x67,0x0e,0xdb,
+ 0x47,0xb5,0xaa,0xbc,0xf5,0xbb,0xd9,0x20,
+ }
+ },
+ { 0xa00104c, {
+ 0x3c,0x8a,0xfe,0x04,0x62,0xd8,0x6d,0xbe,
+ 0xa7,0x14,0x28,0x64,0x75,0xc0,0xa3,0x76,
+ 0xb7,0x92,0x0b,0x97,0x0a,0x8e,0x9c,0x5b,
+ 0x1b,0xc8,0x9d,0x3a,0x1e,0x81,0x3d,0x3b,
+ }
+ },
+ { 0xa00104e, {
+ 0xc4,0x35,0x82,0x67,0xd2,0x86,0xe5,0xb2,
+ 0xfd,0x69,0x12,0x38,0xc8,0x77,0xba,0xe0,
+ 0x70,0xf9,0x77,0x89,0x10,0xa6,0x74,0x4e,
+ 0x56,0x58,0x13,0xf5,0x84,0x70,0x28,0x0b,
+ }
+ },
+ { 0xa001053, {
+ 0x92,0x0e,0xf4,0x69,0x10,0x3b,0xf9,0x9d,
+ 0x31,0x1b,0xa6,0x99,0x08,0x7d,0xd7,0x25,
+ 0x7e,0x1e,0x89,0xba,0x35,0x8d,0xac,0xcb,
+ 0x3a,0xb4,0xdf,0x58,0x12,0xcf,0xc0,0xc3,
+ }
+ },
+ { 0xa001058, {
+ 0x33,0x7d,0xa9,0xb5,0x4e,0x62,0x13,0x36,
+ 0xef,0x66,0xc9,0xbd,0x0a,0xa6,0x3b,0x19,
+ 0xcb,0xf5,0xc2,0xc3,0x55,0x47,0x20,0xec,
+ 0x1f,0x7b,0xa1,0x44,0x0e,0x8e,0xa4,0xb2,
+ }
+ },
+ { 0xa001075, {
+ 0x39,0x02,0x82,0xd0,0x7c,0x26,0x43,0xe9,
+ 0x26,0xa3,0xd9,0x96,0xf7,0x30,0x13,0x0a,
+ 0x8a,0x0e,0xac,0xe7,0x1d,0xdc,0xe2,0x0f,
+ 0xcb,0x9e,0x8d,0xbc,0xd2,0xa2,0x44,0xe0,
+ }
+ },
+ { 0xa001078, {
+ 0x2d,0x67,0xc7,0x35,0xca,0xef,0x2f,0x25,
+ 0x4c,0x45,0x93,0x3f,0x36,0x01,0x8c,0xce,
+ 0xa8,0x5b,0x07,0xd3,0xc1,0x35,0x3c,0x04,
+ 0x20,0xa2,0xfc,0xdc,0xe6,0xce,0x26,0x3e,
+ }
+ },
+ { 0xa001079, {
+ 0x43,0xe2,0x05,0x9c,0xfd,0xb7,0x5b,0xeb,
+ 0x5b,0xe9,0xeb,0x3b,0x96,0xf4,0xe4,0x93,
+ 0x73,0x45,0x3e,0xac,0x8d,0x3b,0xe4,0xdb,
+ 0x10,0x31,0xc1,0xe4,0xa2,0xd0,0x5a,0x8a,
+ }
+ },
+ { 0xa00107a, {
+ 0x5f,0x92,0xca,0xff,0xc3,0x59,0x22,0x5f,
+ 0x02,0xa0,0x91,0x3b,0x4a,0x45,0x10,0xfd,
+ 0x19,0xe1,0x8a,0x6d,0x9a,0x92,0xc1,0x3f,
+ 0x75,0x78,0xac,0x78,0x03,0x1d,0xdb,0x18,
+ }
+ },
+ { 0xa001143, {
+ 0x56,0xca,0xf7,0x43,0x8a,0x4c,0x46,0x80,
+ 0xec,0xde,0xe5,0x9c,0x50,0x84,0x9a,0x42,
+ 0x27,0xe5,0x51,0x84,0x8f,0x19,0xc0,0x8d,
+ 0x0c,0x25,0xb4,0xb0,0x8f,0x10,0xf3,0xf8,
+ }
+ },
+ { 0xa001144, {
+ 0x42,0xd5,0x9b,0xa7,0xd6,0x15,0x29,0x41,
+ 0x61,0xc4,0x72,0x3f,0xf3,0x06,0x78,0x4b,
+ 0x65,0xf3,0x0e,0xfa,0x9c,0x87,0xde,0x25,
+ 0xbd,0xb3,0x9a,0xf4,0x75,0x13,0x53,0xdc,
+ }
+ },
+ { 0xa00115d, {
+ 0xd4,0xc4,0x49,0x36,0x89,0x0b,0x47,0xdd,
+ 0xfb,0x2f,0x88,0x3b,0x5f,0xf2,0x8e,0x75,
+ 0xc6,0x6c,0x37,0x5a,0x90,0x25,0x94,0x3e,
+ 0x36,0x9c,0xae,0x02,0x38,0x6c,0xf5,0x05,
+ }
+ },
+ { 0xa001173, {
+ 0x28,0xbb,0x9b,0xd1,0xa0,0xa0,0x7e,0x3a,
+ 0x59,0x20,0xc0,0xa9,0xb2,0x5c,0xc3,0x35,
+ 0x53,0x89,0xe1,0x4c,0x93,0x2f,0x1d,0xc3,
+ 0xe5,0xf7,0xf3,0xc8,0x9b,0x61,0xaa,0x9e,
+ }
+ },
+ { 0xa0011a8, {
+ 0x97,0xc6,0x16,0x65,0x99,0xa4,0x85,0x3b,
+ 0xf6,0xce,0xaa,0x49,0x4a,0x3a,0xc5,0xb6,
+ 0x78,0x25,0xbc,0x53,0xaf,0x5d,0xcf,0xf4,
+ 0x23,0x12,0xbb,0xb1,0xbc,0x8a,0x02,0x2e,
+ }
+ },
+ { 0xa0011ce, {
+ 0xcf,0x1c,0x90,0xa3,0x85,0x0a,0xbf,0x71,
+ 0x94,0x0e,0x80,0x86,0x85,0x4f,0xd7,0x86,
+ 0xae,0x38,0x23,0x28,0x2b,0x35,0x9b,0x4e,
+ 0xfe,0xb8,0xcd,0x3d,0x3d,0x39,0xc9,0x6a,
+ }
+ },
+ { 0xa0011d1, {
+ 0xdf,0x0e,0xca,0xde,0xf6,0xce,0x5c,0x1e,
+ 0x4c,0xec,0xd7,0x71,0x83,0xcc,0xa8,0x09,
+ 0xc7,0xc5,0xfe,0xb2,0xf7,0x05,0xd2,0xc5,
+ 0x12,0xdd,0xe4,0xf3,0x92,0x1c,0x3d,0xb8,
+ }
+ },
+ { 0xa0011d3, {
+ 0x91,0xe6,0x10,0xd7,0x57,0xb0,0x95,0x0b,
+ 0x9a,0x24,0xee,0xf7,0xcf,0x56,0xc1,0xa6,
+ 0x4a,0x52,0x7d,0x5f,0x9f,0xdf,0xf6,0x00,
+ 0x65,0xf7,0xea,0xe8,0x2a,0x88,0xe2,0x26,
+ }
+ },
+ { 0xa0011d5, {
+ 0xed,0x69,0x89,0xf4,0xeb,0x64,0xc2,0x13,
+ 0xe0,0x51,0x1f,0x03,0x26,0x52,0x7d,0xb7,
+ 0x93,0x5d,0x65,0xca,0xb8,0x12,0x1d,0x62,
+ 0x0d,0x5b,0x65,0x34,0x69,0xb2,0x62,0x21,
+ }
+ },
+ { 0xa001223, {
+ 0xfb,0x32,0x5f,0xc6,0x83,0x4f,0x8c,0xb8,
+ 0xa4,0x05,0xf9,0x71,0x53,0x01,0x16,0xc4,
+ 0x83,0x75,0x94,0xdd,0xeb,0x7e,0xb7,0x15,
+ 0x8e,0x3b,0x50,0x29,0x8a,0x9c,0xcc,0x45,
+ }
+ },
+ { 0xa001224, {
+ 0x0e,0x0c,0xdf,0xb4,0x89,0xee,0x35,0x25,
+ 0xdd,0x9e,0xdb,0xc0,0x69,0x83,0x0a,0xad,
+ 0x26,0xa9,0xaa,0x9d,0xfc,0x3c,0xea,0xf9,
+ 0x6c,0xdc,0xd5,0x6d,0x8b,0x6e,0x85,0x4a,
+ }
+ },
+ { 0xa001227, {
+ 0xab,0xc6,0x00,0x69,0x4b,0x50,0x87,0xad,
+ 0x5f,0x0e,0x8b,0xea,0x57,0x38,0xce,0x1d,
+ 0x0f,0x75,0x26,0x02,0xf6,0xd6,0x96,0xe9,
+ 0x87,0xb9,0xd6,0x20,0x27,0x7c,0xd2,0xe0,
+ }
+ },
+ { 0xa001229, {
+ 0x7f,0x49,0x49,0x48,0x46,0xa5,0x50,0xa6,
+ 0x28,0x89,0x98,0xe2,0x9e,0xb4,0x7f,0x75,
+ 0x33,0xa7,0x04,0x02,0xe4,0x82,0xbf,0xb4,
+ 0xa5,0x3a,0xba,0x24,0x8d,0x31,0x10,0x1d,
+ }
+ },
+ { 0xa00122e, {
+ 0x56,0x94,0xa9,0x5d,0x06,0x68,0xfe,0xaf,
+ 0xdf,0x7a,0xff,0x2d,0xdf,0x74,0x0f,0x15,
+ 0x66,0xfb,0x00,0xb5,0x51,0x97,0x9b,0xfa,
+ 0xcb,0x79,0x85,0x46,0x25,0xb4,0xd2,0x10,
+ }
+ },
+ { 0xa001231, {
+ 0x0b,0x46,0xa5,0xfc,0x18,0x15,0xa0,0x9e,
+ 0xa6,0xdc,0xb7,0xff,0x17,0xf7,0x30,0x64,
+ 0xd4,0xda,0x9e,0x1b,0xc3,0xfc,0x02,0x3b,
+ 0xe2,0xc6,0x0e,0x41,0x54,0xb5,0x18,0xdd,
+ }
+ },
+ { 0xa001234, {
+ 0x88,0x8d,0xed,0xab,0xb5,0xbd,0x4e,0xf7,
+ 0x7f,0xd4,0x0e,0x95,0x34,0x91,0xff,0xcc,
+ 0xfb,0x2a,0xcd,0xf7,0xd5,0xdb,0x4c,0x9b,
+ 0xd6,0x2e,0x73,0x50,0x8f,0x83,0x79,0x1a,
+ }
+ },
+ { 0xa001236, {
+ 0x3d,0x30,0x00,0xb9,0x71,0xba,0x87,0x78,
+ 0xa8,0x43,0x55,0xc4,0x26,0x59,0xcf,0x9d,
+ 0x93,0xce,0x64,0x0e,0x8b,0x72,0x11,0x8b,
+ 0xa3,0x8f,0x51,0xe9,0xca,0x98,0xaa,0x25,
+ }
+ },
+ { 0xa001238, {
+ 0x72,0xf7,0x4b,0x0c,0x7d,0x58,0x65,0xcc,
+ 0x00,0xcc,0x57,0x16,0x68,0x16,0xf8,0x2a,
+ 0x1b,0xb3,0x8b,0xe1,0xb6,0x83,0x8c,0x7e,
+ 0xc0,0xcd,0x33,0xf2,0x8d,0xf9,0xef,0x59,
+ }
+ },
+ { 0xa00820c, {
+ 0xa8,0x0c,0x81,0xc0,0xa6,0x00,0xe7,0xf3,
+ 0x5f,0x65,0xd3,0xb9,0x6f,0xea,0x93,0x63,
+ 0xf1,0x8c,0x88,0x45,0xd7,0x82,0x80,0xd1,
+ 0xe1,0x3b,0x8d,0xb2,0xf8,0x22,0x03,0xe2,
+ }
+ },
+ { 0xa10113e, {
+ 0x05,0x3c,0x66,0xd7,0xa9,0x5a,0x33,0x10,
+ 0x1b,0xf8,0x9c,0x8f,0xed,0xfc,0xa7,0xa0,
+ 0x15,0xe3,0x3f,0x4b,0x1d,0x0d,0x0a,0xd5,
+ 0xfa,0x90,0xc4,0xed,0x9d,0x90,0xaf,0x53,
+ }
+ },
+ { 0xa101144, {
+ 0xb3,0x0b,0x26,0x9a,0xf8,0x7c,0x02,0x26,
+ 0x35,0x84,0x53,0xa4,0xd3,0x2c,0x7c,0x09,
+ 0x68,0x7b,0x96,0xb6,0x93,0xef,0xde,0xbc,
+ 0xfd,0x4b,0x15,0xd2,0x81,0xd3,0x51,0x47,
+ }
+ },
+ { 0xa101148, {
+ 0x20,0xd5,0x6f,0x40,0x4a,0xf6,0x48,0x90,
+ 0xc2,0x93,0x9a,0xc2,0xfd,0xac,0xef,0x4f,
+ 0xfa,0xc0,0x3d,0x92,0x3c,0x6d,0x01,0x08,
+ 0xf1,0x5e,0xb0,0xde,0xb4,0x98,0xae,0xc4,
+ }
+ },
+ { 0xa10123e, {
+ 0x03,0xb9,0x2c,0x76,0x48,0x93,0xc9,0x18,
+ 0xfb,0x56,0xfd,0xf7,0xe2,0x1d,0xca,0x4d,
+ 0x1d,0x13,0x53,0x63,0xfe,0x42,0x6f,0xfc,
+ 0x19,0x0f,0xf1,0xfc,0xa7,0xdd,0x89,0x1b,
+ }
+ },
+ { 0xa101244, {
+ 0x71,0x56,0xb5,0x9f,0x21,0xbf,0xb3,0x3c,
+ 0x8c,0xd7,0x36,0xd0,0x34,0x52,0x1b,0xb1,
+ 0x46,0x2f,0x04,0xf0,0x37,0xd8,0x1e,0x72,
+ 0x24,0xa2,0x80,0x84,0x83,0x65,0x84,0xc0,
+ }
+ },
+ { 0xa101248, {
+ 0xed,0x3b,0x95,0xa6,0x68,0xa7,0x77,0x3e,
+ 0xfc,0x17,0x26,0xe2,0x7b,0xd5,0x56,0x22,
+ 0x2c,0x1d,0xef,0xeb,0x56,0xdd,0xba,0x6e,
+ 0x1b,0x7d,0x64,0x9d,0x4b,0x53,0x13,0x75,
+ }
+ },
+ { 0xa108108, {
+ 0xed,0xc2,0xec,0xa1,0x15,0xc6,0x65,0xe9,
+ 0xd0,0xef,0x39,0xaa,0x7f,0x55,0x06,0xc6,
+ 0xf5,0xd4,0x3f,0x7b,0x14,0xd5,0x60,0x2c,
+ 0x28,0x1e,0x9c,0x59,0x69,0x99,0x4d,0x16,
+ }
+ },
+ { 0xa20102d, {
+ 0xf9,0x6e,0xf2,0x32,0xd3,0x0f,0x5f,0x11,
+ 0x59,0xa1,0xfe,0xcc,0xcd,0x9b,0x42,0x89,
+ 0x8b,0x89,0x2f,0xb5,0xbb,0x82,0xef,0x23,
+ 0x8c,0xe9,0x19,0x3e,0xcc,0x3f,0x7b,0xb4,
+ }
+ },
+ { 0xa201210, {
+ 0xe8,0x6d,0x51,0x6a,0x8e,0x72,0xf3,0xfe,
+ 0x6e,0x16,0xbc,0x62,0x59,0x40,0x17,0xe9,
+ 0x6d,0x3d,0x0e,0x6b,0xa7,0xac,0xe3,0x68,
+ 0xf7,0x55,0xf0,0x13,0xbb,0x22,0xf6,0x41,
+ }
+ },
+ { 0xa404107, {
+ 0xbb,0x04,0x4e,0x47,0xdd,0x5e,0x26,0x45,
+ 0x1a,0xc9,0x56,0x24,0xa4,0x4c,0x82,0xb0,
+ 0x8b,0x0d,0x9f,0xf9,0x3a,0xdf,0xc6,0x81,
+ 0x13,0xbc,0xc5,0x25,0xe4,0xc5,0xc3,0x99,
+ }
+ },
+ { 0xa500011, {
+ 0x23,0x3d,0x70,0x7d,0x03,0xc3,0xc4,0xf4,
+ 0x2b,0x82,0xc6,0x05,0xda,0x80,0x0a,0xf1,
+ 0xd7,0x5b,0x65,0x3a,0x7d,0xab,0xdf,0xa2,
+ 0x11,0x5e,0x96,0x7e,0x71,0xe9,0xfc,0x74,
+ }
+ },
+ { 0xa601209, {
+ 0x66,0x48,0xd4,0x09,0x05,0xcb,0x29,0x32,
+ 0x66,0xb7,0x9a,0x76,0xcd,0x11,0xf3,0x30,
+ 0x15,0x86,0xcc,0x5d,0x97,0x0f,0xc0,0x46,
+ 0xe8,0x73,0xe2,0xd6,0xdb,0xd2,0x77,0x1d,
+ }
+ },
+ { 0xa704107, {
+ 0xf3,0xc6,0x58,0x26,0xee,0xac,0x3f,0xd6,
+ 0xce,0xa1,0x72,0x47,0x3b,0xba,0x2b,0x93,
+ 0x2a,0xad,0x8e,0x6b,0xea,0x9b,0xb7,0xc2,
+ 0x64,0x39,0x71,0x8c,0xce,0xe7,0x41,0x39,
+ }
+ },
+ { 0xa705206, {
+ 0x8d,0xc0,0x76,0xbd,0x58,0x9f,0x8f,0xa4,
+ 0x12,0x9d,0x21,0xfb,0x48,0x21,0xbc,0xe7,
+ 0x67,0x6f,0x04,0x18,0xae,0x20,0x87,0x4b,
+ 0x03,0x35,0xe9,0xbe,0xfb,0x06,0xdf,0xfc,
+ }
+ },
+ { 0xa708007, {
+ 0x6b,0x76,0xcc,0x78,0xc5,0x8a,0xa3,0xe3,
+ 0x32,0x2d,0x79,0xe4,0xc3,0x80,0xdb,0xb2,
+ 0x07,0xaa,0x3a,0xe0,0x57,0x13,0x72,0x80,
+ 0xdf,0x92,0x73,0x84,0x87,0x3c,0x73,0x93,
+ }
+ },
+ { 0xa70c005, {
+ 0x88,0x5d,0xfb,0x79,0x64,0xd8,0x46,0x3b,
+ 0x4a,0x83,0x8e,0x77,0x7e,0xcf,0xb3,0x0f,
+ 0x1f,0x1f,0xf1,0x97,0xeb,0xfe,0x56,0x55,
+ 0xee,0x49,0xac,0xe1,0x8b,0x13,0xc5,0x13,
+ }
+ },
+ { 0xaa00116, {
+ 0xe8,0x4c,0x2c,0x88,0xa1,0xac,0x24,0x63,
+ 0x65,0xe5,0xaa,0x2d,0x16,0xa9,0xc3,0xf5,
+ 0xfe,0x1d,0x5e,0x65,0xc7,0xaa,0x92,0x4d,
+ 0x91,0xee,0x76,0xbb,0x4c,0x66,0x78,0xc9,
+ }
+ },
+ { 0xaa00212, {
+ 0xbd,0x57,0x5d,0x0a,0x0a,0x30,0xc1,0x75,
+ 0x95,0x58,0x5e,0x93,0x02,0x28,0x43,0x71,
+ 0xed,0x42,0x29,0xc8,0xec,0x34,0x2b,0xb2,
+ 0x1a,0x65,0x4b,0xfe,0x07,0x0f,0x34,0xa1,
+ }
+ },
+ { 0xaa00213, {
+ 0xed,0x58,0xb7,0x76,0x81,0x7f,0xd9,0x3a,
+ 0x1a,0xff,0x8b,0x34,0xb8,0x4a,0x99,0x0f,
+ 0x28,0x49,0x6c,0x56,0x2b,0xdc,0xb7,0xed,
+ 0x96,0xd5,0x9d,0xc1,0x7a,0xd4,0x51,0x9b,
+ }
+ },
+ { 0xaa00215, {
+ 0x55,0xd3,0x28,0xcb,0x87,0xa9,0x32,0xe9,
+ 0x4e,0x85,0x4b,0x7c,0x6b,0xd5,0x7c,0xd4,
+ 0x1b,0x51,0x71,0x3a,0x0e,0x0b,0xdc,0x9b,
+ 0x68,0x2f,0x46,0xee,0xfe,0xc6,0x6d,0xef,
+ }
+ },
+};
diff --git a/arch/x86/kernel/cpu/microcode/internal.h b/arch/x86/kernel/cpu/microcode/internal.h
index 21776c529fa9..5df621752fef 100644
--- a/arch/x86/kernel/cpu/microcode/internal.h
+++ b/arch/x86/kernel/cpu/microcode/internal.h
@@ -100,14 +100,12 @@ extern bool force_minrev;
#ifdef CONFIG_CPU_SUP_AMD
void load_ucode_amd_bsp(struct early_load_data *ed, unsigned int family);
void load_ucode_amd_ap(unsigned int family);
-int save_microcode_in_initrd_amd(unsigned int family);
void reload_ucode_amd(unsigned int cpu);
struct microcode_ops *init_amd_microcode(void);
void exit_amd_microcode(void);
#else /* CONFIG_CPU_SUP_AMD */
static inline void load_ucode_amd_bsp(struct early_load_data *ed, unsigned int family) { }
static inline void load_ucode_amd_ap(unsigned int family) { }
-static inline int save_microcode_in_initrd_amd(unsigned int family) { return -EINVAL; }
static inline void reload_ucode_amd(unsigned int cpu) { }
static inline struct microcode_ops *init_amd_microcode(void) { return NULL; }
static inline void exit_amd_microcode(void) { }
diff --git a/arch/x86/kernel/cpu/sgx/ioctl.c b/arch/x86/kernel/cpu/sgx/ioctl.c
index b65ab214bdf5..776a20172867 100644
--- a/arch/x86/kernel/cpu/sgx/ioctl.c
+++ b/arch/x86/kernel/cpu/sgx/ioctl.c
@@ -64,6 +64,13 @@ static int sgx_encl_create(struct sgx_encl *encl, struct sgx_secs *secs)
struct file *backing;
long ret;
+ /*
+ * ECREATE would detect this too, but checking here also ensures
+ * that the 'encl_size' calculations below can never overflow.
+ */
+ if (!is_power_of_2(secs->size))
+ return -EINVAL;
+
va_page = sgx_encl_grow(encl, true);
if (IS_ERR(va_page))
return PTR_ERR(va_page);
diff --git a/arch/x86/kernel/devicetree.c b/arch/x86/kernel/devicetree.c
index 59d23cdf4ed0..dd8748c45529 100644
--- a/arch/x86/kernel/devicetree.c
+++ b/arch/x86/kernel/devicetree.c
@@ -2,6 +2,7 @@
/*
* Architecture specific OF callbacks.
*/
+#include <linux/acpi.h>
#include <linux/export.h>
#include <linux/io.h>
#include <linux/interrupt.h>
@@ -313,6 +314,6 @@ void __init x86_flattree_get_config(void)
if (initial_dtb)
early_memunmap(dt, map_len);
#endif
- if (of_have_populated_dt())
+ if (acpi_disabled && of_have_populated_dt())
x86_init.mpparse.parse_smp_cfg = x86_dtb_parse_smp_config;
}
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
index 385e3a5fc304..feca4f20b06a 100644
--- a/arch/x86/kernel/irq.c
+++ b/arch/x86/kernel/irq.c
@@ -25,8 +25,10 @@
#include <asm/posted_intr.h>
#include <asm/irq_remapping.h>
+#if defined(CONFIG_X86_LOCAL_APIC) || defined(CONFIG_X86_THERMAL_VECTOR)
#define CREATE_TRACE_POINTS
#include <asm/trace/irq_vectors.h>
+#endif
DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
EXPORT_PER_CPU_SYMBOL(irq_stat);
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index 8eb3a88707f2..121edf1f2a79 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -1763,7 +1763,7 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
entry->ecx = entry->edx = 0;
if (!enable_pmu || !kvm_cpu_cap_has(X86_FEATURE_PERFMON_V2)) {
- entry->eax = entry->ebx;
+ entry->eax = entry->ebx = 0;
break;
}
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index d4ac4a1f8b81..8160870398b9 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -7460,7 +7460,7 @@ static bool kvm_nx_huge_page_recovery_worker(void *data)
return true;
}
-static void kvm_mmu_start_lpage_recovery(struct once *once)
+static int kvm_mmu_start_lpage_recovery(struct once *once)
{
struct kvm_arch *ka = container_of(once, struct kvm_arch, nx_once);
struct kvm *kvm = container_of(ka, struct kvm, arch);
@@ -7471,13 +7471,14 @@ static void kvm_mmu_start_lpage_recovery(struct once *once)
kvm_nx_huge_page_recovery_worker_kill,
kvm, "kvm-nx-lpage-recovery");
- if (!nx_thread)
- return;
+ if (IS_ERR(nx_thread))
+ return PTR_ERR(nx_thread);
vhost_task_start(nx_thread);
/* Make the task visible only once it is fully started. */
WRITE_ONCE(kvm->arch.nx_huge_page_recovery_thread, nx_thread);
+ return 0;
}
int kvm_mmu_post_init_vm(struct kvm *kvm)
@@ -7485,10 +7486,7 @@ int kvm_mmu_post_init_vm(struct kvm *kvm)
if (nx_hugepage_mitigation_hard_disabled)
return 0;
- call_once(&kvm->arch.nx_once, kvm_mmu_start_lpage_recovery);
- if (!kvm->arch.nx_huge_page_recovery_thread)
- return -ENOMEM;
- return 0;
+ return call_once(&kvm->arch.nx_once, kvm_mmu_start_lpage_recovery);
}
void kvm_mmu_pre_destroy_vm(struct kvm *kvm)
diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
index 0dbb25442ec1..661108d65ee7 100644
--- a/arch/x86/kvm/svm/sev.c
+++ b/arch/x86/kvm/svm/sev.c
@@ -4590,6 +4590,8 @@ void sev_es_vcpu_reset(struct vcpu_svm *svm)
void sev_es_prepare_switch_to_guest(struct vcpu_svm *svm, struct sev_es_save_area *hostsa)
{
+ struct kvm *kvm = svm->vcpu.kvm;
+
/*
* All host state for SEV-ES guests is categorized into three swap types
* based on how it is handled by hardware during a world switch:
@@ -4613,14 +4615,22 @@ void sev_es_prepare_switch_to_guest(struct vcpu_svm *svm, struct sev_es_save_are
/*
* If DebugSwap is enabled, debug registers are loaded but NOT saved by
- * the CPU (Type-B). If DebugSwap is disabled/unsupported, the CPU both
- * saves and loads debug registers (Type-A).
+ * the CPU (Type-B). If DebugSwap is disabled/unsupported, the CPU does
+ * not save or load debug registers. Sadly, KVM can't prevent SNP
+ * guests from lying about DebugSwap on secondary vCPUs, i.e. the
+ * SEV_FEATURES provided at "AP Create" isn't guaranteed to match what
+ * the guest has actually enabled (or not!) in the VMSA.
+ *
+ * If DebugSwap is *possible*, save the masks so that they're restored
+ * if the guest enables DebugSwap. But for the DRs themselves, do NOT
+ * rely on the CPU to restore the host values; KVM will restore them as
+ * needed in common code, via hw_breakpoint_restore(). Note, KVM does
+ * NOT support virtualizing Breakpoint Extensions, i.e. the mask MSRs
+ * don't need to be restored per se, KVM just needs to ensure they are
+ * loaded with the correct values *if* the CPU writes the MSRs.
*/
- if (sev_vcpu_has_debug_swap(svm)) {
- hostsa->dr0 = native_get_debugreg(0);
- hostsa->dr1 = native_get_debugreg(1);
- hostsa->dr2 = native_get_debugreg(2);
- hostsa->dr3 = native_get_debugreg(3);
+ if (sev_vcpu_has_debug_swap(svm) ||
+ (sev_snp_guest(kvm) && cpu_feature_enabled(X86_FEATURE_DEBUG_SWAP))) {
hostsa->dr0_addr_mask = amd_get_dr_addr_mask(0);
hostsa->dr1_addr_mask = amd_get_dr_addr_mask(1);
hostsa->dr2_addr_mask = amd_get_dr_addr_mask(2);
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index a713c803a3a3..e67de787fc71 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -3165,6 +3165,27 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
kvm_pr_unimpl_wrmsr(vcpu, ecx, data);
break;
}
+
+ /*
+ * AMD changed the architectural behavior of bits 5:2. On CPUs
+ * without BusLockTrap, bits 5:2 control "external pins", but
+ * on CPUs that support BusLockDetect, bit 2 enables BusLockTrap
+ * and bits 5:3 are reserved-to-zero. Sadly, old KVM allowed
+ * the guest to set bits 5:2 despite not actually virtualizing
+ * Performance-Monitoring/Breakpoint external pins. Drop bits
+ * 5:2 for backwards compatibility.
+ */
+ data &= ~GENMASK(5, 2);
+
+ /*
+ * Suppress BTF as KVM doesn't virtualize BTF, but there's no
+ * way to communicate lack of support to the guest.
+ */
+ if (data & DEBUGCTLMSR_BTF) {
+ kvm_pr_unimpl_wrmsr(vcpu, MSR_IA32_DEBUGCTLMSR, data);
+ data &= ~DEBUGCTLMSR_BTF;
+ }
+
if (data & DEBUGCTL_RESERVED_BITS)
return 1;
@@ -4189,6 +4210,18 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu, bool spec_ctrl_in
guest_state_enter_irqoff();
+ /*
+ * Set RFLAGS.IF prior to VMRUN, as the host's RFLAGS.IF at the time of
+ * VMRUN controls whether or not physical IRQs are masked (KVM always
+ * runs with V_INTR_MASKING_MASK). Toggle RFLAGS.IF here to avoid the
+ * temptation to do STI+VMRUN+CLI, as AMD CPUs bleed the STI shadow
+ * into guest state if delivery of an event during VMRUN triggers a
+ * #VMEXIT, and the guest_state transitions already tell lockdep that
+ * IRQs are being enabled/disabled. Note! GIF=0 for the entirety of
+ * this path, so IRQs aren't actually unmasked while running host code.
+ */
+ raw_local_irq_enable();
+
amd_clear_divider();
if (sev_es_guest(vcpu->kvm))
@@ -4197,6 +4230,8 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu, bool spec_ctrl_in
else
__svm_vcpu_run(svm, spec_ctrl_intercepted);
+ raw_local_irq_disable();
+
guest_state_exit_irqoff();
}
@@ -4253,6 +4288,16 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu,
clgi();
kvm_load_guest_xsave_state(vcpu);
+ /*
+ * Hardware only context switches DEBUGCTL if LBR virtualization is
+ * enabled. Manually load DEBUGCTL if necessary (and restore it after
+ * VM-Exit), as running with the host's DEBUGCTL can negatively affect
+ * guest state and can even be fatal, e.g. due to Bus Lock Detect.
+ */
+ if (!(svm->vmcb->control.virt_ext & LBR_CTL_ENABLE_MASK) &&
+ vcpu->arch.host_debugctl != svm->vmcb->save.dbgctl)
+ update_debugctlmsr(svm->vmcb->save.dbgctl);
+
kvm_wait_lapic_expire(vcpu);
/*
@@ -4280,6 +4325,10 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu,
if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
kvm_before_interrupt(vcpu, KVM_HANDLING_NMI);
+ if (!(svm->vmcb->control.virt_ext & LBR_CTL_ENABLE_MASK) &&
+ vcpu->arch.host_debugctl != svm->vmcb->save.dbgctl)
+ update_debugctlmsr(vcpu->arch.host_debugctl);
+
kvm_load_host_xsave_state(vcpu);
stgi();
diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
index 9d7cdb8fbf87..ea44c1da5a7c 100644
--- a/arch/x86/kvm/svm/svm.h
+++ b/arch/x86/kvm/svm/svm.h
@@ -584,7 +584,7 @@ static inline bool is_vnmi_enabled(struct vcpu_svm *svm)
/* svm.c */
#define MSR_INVALID 0xffffffffU
-#define DEBUGCTL_RESERVED_BITS (~(0x3fULL))
+#define DEBUGCTL_RESERVED_BITS (~DEBUGCTLMSR_LBR)
extern bool dump_invalid_vmcb;
diff --git a/arch/x86/kvm/svm/vmenter.S b/arch/x86/kvm/svm/vmenter.S
index 2ed80aea3bb1..0c61153b275f 100644
--- a/arch/x86/kvm/svm/vmenter.S
+++ b/arch/x86/kvm/svm/vmenter.S
@@ -170,12 +170,8 @@ SYM_FUNC_START(__svm_vcpu_run)
mov VCPU_RDI(%_ASM_DI), %_ASM_DI
/* Enter guest mode */
- sti
-
3: vmrun %_ASM_AX
4:
- cli
-
/* Pop @svm to RAX while it's the only available register. */
pop %_ASM_AX
@@ -340,12 +336,8 @@ SYM_FUNC_START(__svm_sev_es_vcpu_run)
mov KVM_VMCB_pa(%rax), %rax
/* Enter guest mode */
- sti
-
1: vmrun %rax
-
-2: cli
-
+2:
/* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
FILL_RETURN_BUFFER %rax, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_VMEXIT
diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index 8a7af02d466e..ed8a3cb53961 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -5084,6 +5084,17 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason,
load_vmcs12_host_state(vcpu, vmcs12);
+ /*
+ * Process events if an injectable IRQ or NMI is pending, even
+ * if the event is blocked (RFLAGS.IF is cleared on VM-Exit).
+ * If an event became pending while L2 was active, KVM needs to
+ * either inject the event or request an IRQ/NMI window. SMIs
+ * don't need to be processed as SMM is mutually exclusive with
+ * non-root mode. INIT/SIPI don't need to be checked as INIT
+ * is blocked post-VMXON, and SIPIs are ignored.
+ */
+ if (kvm_cpu_has_injectable_intr(vcpu) || vcpu->arch.nmi_pending)
+ kvm_make_request(KVM_REQ_EVENT, vcpu);
return;
}
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 6c56d5235f0f..3b92f893b239 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -1514,16 +1514,12 @@ void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu,
*/
void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
- struct vcpu_vmx *vmx = to_vmx(vcpu);
-
if (vcpu->scheduled_out && !kvm_pause_in_guest(vcpu->kvm))
shrink_ple_window(vcpu);
vmx_vcpu_load_vmcs(vcpu, cpu, NULL);
vmx_vcpu_pi_load(vcpu, cpu);
-
- vmx->host_debugctlmsr = get_debugctlmsr();
}
void vmx_vcpu_put(struct kvm_vcpu *vcpu)
@@ -7458,8 +7454,8 @@ fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu, bool force_immediate_exit)
}
/* MSR_IA32_DEBUGCTLMSR is zeroed on vmexit. Restore it if needed */
- if (vmx->host_debugctlmsr)
- update_debugctlmsr(vmx->host_debugctlmsr);
+ if (vcpu->arch.host_debugctl)
+ update_debugctlmsr(vcpu->arch.host_debugctl);
#ifndef CONFIG_X86_64
/*
diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h
index 8b111ce1087c..951e44dc9d0e 100644
--- a/arch/x86/kvm/vmx/vmx.h
+++ b/arch/x86/kvm/vmx/vmx.h
@@ -340,8 +340,6 @@ struct vcpu_vmx {
/* apic deadline value in host tsc */
u64 hv_deadline_tsc;
- unsigned long host_debugctlmsr;
-
/*
* Only bits masked by msr_ia32_feature_control_valid_bits can be set in
* msr_ia32_feature_control. FEAT_CTL_LOCKED is always included
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 02159c967d29..4b64ab350bcd 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -10968,6 +10968,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
set_debugreg(0, 7);
}
+ vcpu->arch.host_debugctl = get_debugctlmsr();
+
guest_timing_enter_irqoff();
for (;;) {
@@ -12877,11 +12879,11 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
mutex_unlock(&kvm->slots_lock);
}
kvm_unload_vcpu_mmus(kvm);
+ kvm_destroy_vcpus(kvm);
kvm_x86_call(vm_destroy)(kvm);
kvm_free_msr_filter(srcu_dereference_check(kvm->arch.msr_filter, &kvm->srcu, 1));
kvm_pic_destroy(kvm);
kvm_ioapic_destroy(kvm);
- kvm_destroy_vcpus(kvm);
kvfree(rcu_dereference_check(kvm->arch.apic_map, 1));
kfree(srcu_dereference_check(kvm->arch.pmu_event_filter, &kvm->srcu, 1));
kvm_mmu_uninit_vm(kvm);
diff --git a/block/bio.c b/block/bio.c
index f0c416e5931d..6ac5983ba51e 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -77,7 +77,7 @@ struct bio_slab {
struct kmem_cache *slab;
unsigned int slab_ref;
unsigned int slab_size;
- char name[8];
+ char name[12];
};
static DEFINE_MUTEX(bio_slab_lock);
static DEFINE_XARRAY(bio_slabs);
diff --git a/block/blk-merge.c b/block/blk-merge.c
index c7c85e10cf9c..1d1589c35297 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -329,7 +329,7 @@ int bio_split_rw_at(struct bio *bio, const struct queue_limits *lim,
if (nsegs < lim->max_segments &&
bytes + bv.bv_len <= max_bytes &&
- bv.bv_offset + bv.bv_len <= PAGE_SIZE) {
+ bv.bv_offset + bv.bv_len <= lim->min_segment_size) {
nsegs++;
bytes += bv.bv_len;
} else {
diff --git a/block/blk-settings.c b/block/blk-settings.c
index c44dadc35e1e..b9c6f0ec1c49 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -246,6 +246,7 @@ int blk_validate_limits(struct queue_limits *lim)
{
unsigned int max_hw_sectors;
unsigned int logical_block_sectors;
+ unsigned long seg_size;
int err;
/*
@@ -303,7 +304,7 @@ int blk_validate_limits(struct queue_limits *lim)
max_hw_sectors = min_not_zero(lim->max_hw_sectors,
lim->max_dev_sectors);
if (lim->max_user_sectors) {
- if (lim->max_user_sectors < PAGE_SIZE / SECTOR_SIZE)
+ if (lim->max_user_sectors < BLK_MIN_SEGMENT_SIZE / SECTOR_SIZE)
return -EINVAL;
lim->max_sectors = min(max_hw_sectors, lim->max_user_sectors);
} else if (lim->io_opt > (BLK_DEF_MAX_SECTORS_CAP << SECTOR_SHIFT)) {
@@ -341,7 +342,7 @@ int blk_validate_limits(struct queue_limits *lim)
*/
if (!lim->seg_boundary_mask)
lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
- if (WARN_ON_ONCE(lim->seg_boundary_mask < PAGE_SIZE - 1))
+ if (WARN_ON_ONCE(lim->seg_boundary_mask < BLK_MIN_SEGMENT_SIZE - 1))
return -EINVAL;
/*
@@ -362,10 +363,17 @@ int blk_validate_limits(struct queue_limits *lim)
*/
if (!lim->max_segment_size)
lim->max_segment_size = BLK_MAX_SEGMENT_SIZE;
- if (WARN_ON_ONCE(lim->max_segment_size < PAGE_SIZE))
+ if (WARN_ON_ONCE(lim->max_segment_size < BLK_MIN_SEGMENT_SIZE))
return -EINVAL;
}
+ /* setup min segment size for building new segment in fast path */
+ if (lim->seg_boundary_mask > lim->max_segment_size - 1)
+ seg_size = lim->max_segment_size;
+ else
+ seg_size = lim->seg_boundary_mask + 1;
+ lim->min_segment_size = min_t(unsigned int, seg_size, PAGE_SIZE);
+
/*
* We require drivers to at least do logical block aligned I/O, but
* historically could not check for that due to the separate calls
diff --git a/block/blk-zoned.c b/block/blk-zoned.c
index 761ea662ddc3..0c77244a35c9 100644
--- a/block/blk-zoned.c
+++ b/block/blk-zoned.c
@@ -410,13 +410,14 @@ static bool disk_insert_zone_wplug(struct gendisk *disk,
}
}
hlist_add_head_rcu(&zwplug->node, &disk->zone_wplugs_hash[idx]);
+ atomic_inc(&disk->nr_zone_wplugs);
spin_unlock_irqrestore(&disk->zone_wplugs_lock, flags);
return true;
}
-static struct blk_zone_wplug *disk_get_zone_wplug(struct gendisk *disk,
- sector_t sector)
+static struct blk_zone_wplug *disk_get_hashed_zone_wplug(struct gendisk *disk,
+ sector_t sector)
{
unsigned int zno = disk_zone_no(disk, sector);
unsigned int idx = hash_32(zno, disk->zone_wplugs_hash_bits);
@@ -437,6 +438,15 @@ static struct blk_zone_wplug *disk_get_zone_wplug(struct gendisk *disk,
return NULL;
}
+static inline struct blk_zone_wplug *disk_get_zone_wplug(struct gendisk *disk,
+ sector_t sector)
+{
+ if (!atomic_read(&disk->nr_zone_wplugs))
+ return NULL;
+
+ return disk_get_hashed_zone_wplug(disk, sector);
+}
+
static void disk_free_zone_wplug_rcu(struct rcu_head *rcu_head)
{
struct blk_zone_wplug *zwplug =
@@ -503,6 +513,7 @@ static void disk_remove_zone_wplug(struct gendisk *disk,
zwplug->flags |= BLK_ZONE_WPLUG_UNHASHED;
spin_lock_irqsave(&disk->zone_wplugs_lock, flags);
hlist_del_init_rcu(&zwplug->node);
+ atomic_dec(&disk->nr_zone_wplugs);
spin_unlock_irqrestore(&disk->zone_wplugs_lock, flags);
disk_put_zone_wplug(zwplug);
}
@@ -593,6 +604,11 @@ static void disk_zone_wplug_abort(struct blk_zone_wplug *zwplug)
{
struct bio *bio;
+ if (bio_list_empty(&zwplug->bio_list))
+ return;
+
+ pr_warn_ratelimited("%s: zone %u: Aborting plugged BIOs\n",
+ zwplug->disk->disk_name, zwplug->zone_no);
while ((bio = bio_list_pop(&zwplug->bio_list)))
blk_zone_wplug_bio_io_error(zwplug, bio);
}
@@ -1040,6 +1056,47 @@ plug:
return true;
}
+static void blk_zone_wplug_handle_native_zone_append(struct bio *bio)
+{
+ struct gendisk *disk = bio->bi_bdev->bd_disk;
+ struct blk_zone_wplug *zwplug;
+ unsigned long flags;
+
+ /*
+ * We have native support for zone append operations, so we are not
+ * going to handle @bio through plugging. However, we may already have a
+ * zone write plug for the target zone if that zone was previously
+ * partially written using regular writes. In such case, we risk leaving
+ * the plug in the disk hash table if the zone is fully written using
+ * zone append operations. Avoid this by removing the zone write plug.
+ */
+ zwplug = disk_get_zone_wplug(disk, bio->bi_iter.bi_sector);
+ if (likely(!zwplug))
+ return;
+
+ spin_lock_irqsave(&zwplug->lock, flags);
+
+ /*
+ * We are about to remove the zone write plug. But if the user
+ * (mistakenly) has issued regular writes together with native zone
+ * append, we must aborts the writes as otherwise the plugged BIOs would
+ * not be executed by the plug BIO work as disk_get_zone_wplug() will
+ * return NULL after the plug is removed. Aborting the plugged write
+ * BIOs is consistent with the fact that these writes will most likely
+ * fail anyway as there is no ordering guarantees between zone append
+ * operations and regular write operations.
+ */
+ if (!bio_list_empty(&zwplug->bio_list)) {
+ pr_warn_ratelimited("%s: zone %u: Invalid mix of zone append and regular writes\n",
+ disk->disk_name, zwplug->zone_no);
+ disk_zone_wplug_abort(zwplug);
+ }
+ disk_remove_zone_wplug(disk, zwplug);
+ spin_unlock_irqrestore(&zwplug->lock, flags);
+
+ disk_put_zone_wplug(zwplug);
+}
+
/**
* blk_zone_plug_bio - Handle a zone write BIO with zone write plugging
* @bio: The BIO being submitted
@@ -1096,8 +1153,10 @@ bool blk_zone_plug_bio(struct bio *bio, unsigned int nr_segs)
*/
switch (bio_op(bio)) {
case REQ_OP_ZONE_APPEND:
- if (!bdev_emulates_zone_append(bdev))
+ if (!bdev_emulates_zone_append(bdev)) {
+ blk_zone_wplug_handle_native_zone_append(bio);
return false;
+ }
fallthrough;
case REQ_OP_WRITE:
case REQ_OP_WRITE_ZEROES:
@@ -1284,6 +1343,7 @@ static int disk_alloc_zone_resources(struct gendisk *disk,
{
unsigned int i;
+ atomic_set(&disk->nr_zone_wplugs, 0);
disk->zone_wplugs_hash_bits =
min(ilog2(pool_size) + 1, BLK_ZONE_WPLUG_MAX_HASH_BITS);
@@ -1338,6 +1398,7 @@ static void disk_destroy_zone_wplugs_hash_table(struct gendisk *disk)
}
}
+ WARN_ON_ONCE(atomic_read(&disk->nr_zone_wplugs));
kfree(disk->zone_wplugs_hash);
disk->zone_wplugs_hash = NULL;
disk->zone_wplugs_hash_bits = 0;
@@ -1550,11 +1611,12 @@ static int blk_revalidate_seq_zone(struct blk_zone *zone, unsigned int idx,
}
/*
- * We need to track the write pointer of all zones that are not
- * empty nor full. So make sure we have a zone write plug for
- * such zone if the device has a zone write plug hash table.
+ * If the device needs zone append emulation, we need to track the
+ * write pointer of all zones that are not empty nor full. So make sure
+ * we have a zone write plug for such zone if the device has a zone
+ * write plug hash table.
*/
- if (!disk->zone_wplugs_hash)
+ if (!queue_emulates_zone_append(disk->queue) || !disk->zone_wplugs_hash)
return 0;
disk_zone_wplug_sync_wp_offset(disk, zone);
diff --git a/block/blk.h b/block/blk.h
index 90fa5f28ccab..9cf9a0099416 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -14,6 +14,7 @@
struct elevator_type;
#define BLK_DEV_MAX_SECTORS (LLONG_MAX >> 9)
+#define BLK_MIN_SEGMENT_SIZE 4096
/* Max future timer expiry for timeouts */
#define BLK_MAX_TIMEOUT (5 * HZ)
@@ -358,8 +359,12 @@ struct bio *bio_split_zone_append(struct bio *bio,
static inline bool bio_may_need_split(struct bio *bio,
const struct queue_limits *lim)
{
- return lim->chunk_sectors || bio->bi_vcnt != 1 ||
- bio->bi_io_vec->bv_len + bio->bi_io_vec->bv_offset > PAGE_SIZE;
+ if (lim->chunk_sectors)
+ return true;
+ if (bio->bi_vcnt != 1)
+ return true;
+ return bio->bi_io_vec->bv_len + bio->bi_io_vec->bv_offset >
+ lim->min_segment_size;
}
/**
diff --git a/block/partitions/efi.c b/block/partitions/efi.c
index 5e9be13a56a8..7acba66eed48 100644
--- a/block/partitions/efi.c
+++ b/block/partitions/efi.c
@@ -682,7 +682,7 @@ static void utf16_le_to_7bit(const __le16 *in, unsigned int size, u8 *out)
out[size] = 0;
while (i < size) {
- u8 c = le16_to_cpu(in[i]) & 0xff;
+ u8 c = le16_to_cpu(in[i]) & 0x7f;
if (c && !isprint(c))
c = '!';
diff --git a/drivers/acpi/platform_profile.c b/drivers/acpi/platform_profile.c
index 2ad53cc6aae5..ef9444482db1 100644
--- a/drivers/acpi/platform_profile.c
+++ b/drivers/acpi/platform_profile.c
@@ -21,9 +21,15 @@ struct platform_profile_handler {
struct device dev;
int minor;
unsigned long choices[BITS_TO_LONGS(PLATFORM_PROFILE_LAST)];
+ unsigned long hidden_choices[BITS_TO_LONGS(PLATFORM_PROFILE_LAST)];
const struct platform_profile_ops *ops;
};
+struct aggregate_choices_data {
+ unsigned long aggregate[BITS_TO_LONGS(PLATFORM_PROFILE_LAST)];
+ int count;
+};
+
static const char * const profile_names[] = {
[PLATFORM_PROFILE_LOW_POWER] = "low-power",
[PLATFORM_PROFILE_COOL] = "cool",
@@ -73,7 +79,7 @@ static int _store_class_profile(struct device *dev, void *data)
lockdep_assert_held(&profile_lock);
handler = to_pprof_handler(dev);
- if (!test_bit(*bit, handler->choices))
+ if (!test_bit(*bit, handler->choices) && !test_bit(*bit, handler->hidden_choices))
return -EOPNOTSUPP;
return handler->ops->profile_set(dev, *bit);
@@ -239,21 +245,44 @@ static const struct class platform_profile_class = {
/**
* _aggregate_choices - Aggregate the available profile choices
* @dev: The device
- * @data: The available profile choices
+ * @arg: struct aggregate_choices_data
*
* Return: 0 on success, -errno on failure
*/
-static int _aggregate_choices(struct device *dev, void *data)
+static int _aggregate_choices(struct device *dev, void *arg)
{
+ unsigned long tmp[BITS_TO_LONGS(PLATFORM_PROFILE_LAST)];
+ struct aggregate_choices_data *data = arg;
struct platform_profile_handler *handler;
- unsigned long *aggregate = data;
lockdep_assert_held(&profile_lock);
handler = to_pprof_handler(dev);
- if (test_bit(PLATFORM_PROFILE_LAST, aggregate))
- bitmap_copy(aggregate, handler->choices, PLATFORM_PROFILE_LAST);
+ bitmap_or(tmp, handler->choices, handler->hidden_choices, PLATFORM_PROFILE_LAST);
+ if (test_bit(PLATFORM_PROFILE_LAST, data->aggregate))
+ bitmap_copy(data->aggregate, tmp, PLATFORM_PROFILE_LAST);
else
- bitmap_and(aggregate, handler->choices, aggregate, PLATFORM_PROFILE_LAST);
+ bitmap_and(data->aggregate, tmp, data->aggregate, PLATFORM_PROFILE_LAST);
+ data->count++;
+
+ return 0;
+}
+
+/**
+ * _remove_hidden_choices - Remove hidden choices from aggregate data
+ * @dev: The device
+ * @arg: struct aggregate_choices_data
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int _remove_hidden_choices(struct device *dev, void *arg)
+{
+ struct aggregate_choices_data *data = arg;
+ struct platform_profile_handler *handler;
+
+ lockdep_assert_held(&profile_lock);
+ handler = to_pprof_handler(dev);
+ bitmap_andnot(data->aggregate, handler->choices,
+ handler->hidden_choices, PLATFORM_PROFILE_LAST);
return 0;
}
@@ -270,22 +299,31 @@ static ssize_t platform_profile_choices_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- unsigned long aggregate[BITS_TO_LONGS(PLATFORM_PROFILE_LAST)];
+ struct aggregate_choices_data data = {
+ .aggregate = { [0 ... BITS_TO_LONGS(PLATFORM_PROFILE_LAST) - 1] = ~0UL },
+ .count = 0,
+ };
int err;
- set_bit(PLATFORM_PROFILE_LAST, aggregate);
+ set_bit(PLATFORM_PROFILE_LAST, data.aggregate);
scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &profile_lock) {
err = class_for_each_device(&platform_profile_class, NULL,
- aggregate, _aggregate_choices);
+ &data, _aggregate_choices);
if (err)
return err;
+ if (data.count == 1) {
+ err = class_for_each_device(&platform_profile_class, NULL,
+ &data, _remove_hidden_choices);
+ if (err)
+ return err;
+ }
}
/* no profile handler registered any more */
- if (bitmap_empty(aggregate, PLATFORM_PROFILE_LAST))
+ if (bitmap_empty(data.aggregate, PLATFORM_PROFILE_LAST))
return -EINVAL;
- return _commmon_choices_show(aggregate, buf);
+ return _commmon_choices_show(data.aggregate, buf);
}
/**
@@ -373,7 +411,10 @@ static ssize_t platform_profile_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- unsigned long choices[BITS_TO_LONGS(PLATFORM_PROFILE_LAST)];
+ struct aggregate_choices_data data = {
+ .aggregate = { [0 ... BITS_TO_LONGS(PLATFORM_PROFILE_LAST) - 1] = ~0UL },
+ .count = 0,
+ };
int ret;
int i;
@@ -381,13 +422,13 @@ static ssize_t platform_profile_store(struct device *dev,
i = sysfs_match_string(profile_names, buf);
if (i < 0 || i == PLATFORM_PROFILE_CUSTOM)
return -EINVAL;
- set_bit(PLATFORM_PROFILE_LAST, choices);
+ set_bit(PLATFORM_PROFILE_LAST, data.aggregate);
scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &profile_lock) {
ret = class_for_each_device(&platform_profile_class, NULL,
- choices, _aggregate_choices);
+ &data, _aggregate_choices);
if (ret)
return ret;
- if (!test_bit(i, choices))
+ if (!test_bit(i, data.aggregate))
return -EOPNOTSUPP;
ret = class_for_each_device(&platform_profile_class, NULL, &i,
@@ -453,12 +494,15 @@ EXPORT_SYMBOL_GPL(platform_profile_notify);
*/
int platform_profile_cycle(void)
{
+ struct aggregate_choices_data data = {
+ .aggregate = { [0 ... BITS_TO_LONGS(PLATFORM_PROFILE_LAST) - 1] = ~0UL },
+ .count = 0,
+ };
enum platform_profile_option next = PLATFORM_PROFILE_LAST;
enum platform_profile_option profile = PLATFORM_PROFILE_LAST;
- unsigned long choices[BITS_TO_LONGS(PLATFORM_PROFILE_LAST)];
int err;
- set_bit(PLATFORM_PROFILE_LAST, choices);
+ set_bit(PLATFORM_PROFILE_LAST, data.aggregate);
scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &profile_lock) {
err = class_for_each_device(&platform_profile_class, NULL,
&profile, _aggregate_profiles);
@@ -470,14 +514,14 @@ int platform_profile_cycle(void)
return -EINVAL;
err = class_for_each_device(&platform_profile_class, NULL,
- choices, _aggregate_choices);
+ &data, _aggregate_choices);
if (err)
return err;
/* never iterate into a custom if all drivers supported it */
- clear_bit(PLATFORM_PROFILE_CUSTOM, choices);
+ clear_bit(PLATFORM_PROFILE_CUSTOM, data.aggregate);
- next = find_next_bit_wrap(choices,
+ next = find_next_bit_wrap(data.aggregate,
PLATFORM_PROFILE_LAST,
profile + 1);
@@ -532,6 +576,14 @@ struct device *platform_profile_register(struct device *dev, const char *name,
return ERR_PTR(-EINVAL);
}
+ if (ops->hidden_choices) {
+ err = ops->hidden_choices(drvdata, pprof->hidden_choices);
+ if (err) {
+ dev_err(dev, "platform_profile hidden_choices failed\n");
+ return ERR_PTR(err);
+ }
+ }
+
guard(mutex)(&profile_lock);
/* create class interface for individual handler */
diff --git a/drivers/android/binderfs.c b/drivers/android/binderfs.c
index bc6bae76ccaf..94c6446604fc 100644
--- a/drivers/android/binderfs.c
+++ b/drivers/android/binderfs.c
@@ -274,6 +274,7 @@ static void binderfs_evict_inode(struct inode *inode)
mutex_unlock(&binderfs_minors_mutex);
if (refcount_dec_and_test(&device->ref)) {
+ hlist_del_init(&device->hlist);
kfree(device->context.name);
kfree(device);
}
diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
index 8e895ae45c86..c842e2de6ef9 100644
--- a/drivers/ata/ahci.h
+++ b/drivers/ata/ahci.h
@@ -386,8 +386,12 @@ struct ahci_host_priv {
static inline bool ahci_ignore_port(struct ahci_host_priv *hpriv,
unsigned int portid)
{
- return portid >= hpriv->nports ||
- !(hpriv->mask_port_map & (1 << portid));
+ if (portid >= hpriv->nports)
+ return true;
+ /* mask_port_map not set means that all ports are available */
+ if (!hpriv->mask_port_map)
+ return false;
+ return !(hpriv->mask_port_map & (1 << portid));
}
extern int ahci_ignore_sss;
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index fdfa7b266218..e7ace4b10f15 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -541,6 +541,7 @@ void ahci_save_initial_config(struct device *dev, struct ahci_host_priv *hpriv)
hpriv->saved_port_map = port_map;
}
+ /* mask_port_map not set means that all ports are available */
if (hpriv->mask_port_map) {
dev_warn(dev, "masking port_map 0x%lx -> 0x%lx\n",
port_map,
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 63ec2f218431..c085dd81ebe7 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -4143,10 +4143,6 @@ static const struct ata_dev_quirks_entry __ata_dev_quirks[] = {
{ "Samsung SSD 860*", NULL, ATA_QUIRK_NO_NCQ_TRIM |
ATA_QUIRK_ZERO_AFTER_TRIM |
ATA_QUIRK_NO_NCQ_ON_ATI },
- { "Samsung SSD 870 QVO*", NULL, ATA_QUIRK_NO_NCQ_TRIM |
- ATA_QUIRK_ZERO_AFTER_TRIM |
- ATA_QUIRK_NO_NCQ_ON_ATI |
- ATA_QUIRK_NOLPM },
{ "Samsung SSD 870*", NULL, ATA_QUIRK_NO_NCQ_TRIM |
ATA_QUIRK_ZERO_AFTER_TRIM |
ATA_QUIRK_NO_NCQ_ON_ATI },
diff --git a/drivers/base/component.c b/drivers/base/component.c
index d63e01f4851d..a482708566bc 100644
--- a/drivers/base/component.c
+++ b/drivers/base/component.c
@@ -588,6 +588,9 @@ static void component_unbind(struct component *component,
{
WARN_ON(!component->bound);
+ dev_dbg(adev->parent, "unbinding %s component %p (ops %ps)\n",
+ dev_name(component->dev), component, component->ops);
+
if (component->ops && component->ops->unbind)
component->ops->unbind(component->dev, adev->parent, data);
component->bound = false;
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 5a1f05198114..2fde698430df 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -2079,6 +2079,7 @@ static bool __fw_devlink_relax_cycles(struct fwnode_handle *con_handle,
out:
sup_handle->flags &= ~FWNODE_FLAG_VISITED;
put_device(sup_dev);
+ put_device(con_dev);
put_device(par_dev);
return ret;
}
diff --git a/drivers/base/devres.c b/drivers/base/devres.c
index 93e7779ef21e..d8a733ea5e1a 100644
--- a/drivers/base/devres.c
+++ b/drivers/base/devres.c
@@ -576,7 +576,10 @@ void *devres_open_group(struct device *dev, void *id, gfp_t gfp)
}
EXPORT_SYMBOL_GPL(devres_open_group);
-/* Find devres group with ID @id. If @id is NULL, look for the latest. */
+/*
+ * Find devres group with ID @id. If @id is NULL, look for the latest open
+ * group.
+ */
static struct devres_group *find_group(struct device *dev, void *id)
{
struct devres_node *node;
@@ -687,6 +690,13 @@ int devres_release_group(struct device *dev, void *id)
spin_unlock_irqrestore(&dev->devres_lock, flags);
release_nodes(dev, &todo);
+ } else if (list_empty(&dev->devres_head)) {
+ /*
+ * dev is probably dying via devres_release_all(): groups
+ * have already been removed and are on the process of
+ * being released - don't touch and don't warn.
+ */
+ spin_unlock_irqrestore(&dev->devres_lock, flags);
} else {
WARN_ON(1);
spin_unlock_irqrestore(&dev->devres_lock, flags);
diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
index 529085181f35..ca9a67b5b537 100644
--- a/drivers/block/ublk_drv.c
+++ b/drivers/block/ublk_drv.c
@@ -2715,9 +2715,12 @@ static int ublk_ctrl_set_params(struct ublk_device *ub,
if (ph.len > sizeof(struct ublk_params))
ph.len = sizeof(struct ublk_params);
- /* parameters can only be changed when device isn't live */
mutex_lock(&ub->mutex);
- if (ub->dev_info.state == UBLK_S_DEV_LIVE) {
+ if (test_bit(UB_STATE_USED, &ub->state)) {
+ /*
+ * Parameters can only be changed when device hasn't
+ * been started yet
+ */
ret = -EACCES;
} else if (copy_from_user(&ub->params, argp, ph.len)) {
ret = -EFAULT;
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 90966dfbd278..2a8d91963c63 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -2102,7 +2102,8 @@ static int btusb_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
return submit_or_queue_tx_urb(hdev, urb);
case HCI_SCODATA_PKT:
- if (hci_conn_num(hdev, SCO_LINK) < 1)
+ if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
+ hci_conn_num(hdev, SCO_LINK) < 1)
return -ENODEV;
urb = alloc_isoc_urb(hdev, skb);
@@ -2576,7 +2577,8 @@ static int btusb_send_frame_intel(struct hci_dev *hdev, struct sk_buff *skb)
return submit_or_queue_tx_urb(hdev, urb);
case HCI_SCODATA_PKT:
- if (hci_conn_num(hdev, SCO_LINK) < 1)
+ if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
+ hci_conn_num(hdev, SCO_LINK) < 1)
return -ENODEV;
urb = alloc_isoc_urb(hdev, skb);
@@ -3642,6 +3644,7 @@ static ssize_t force_poll_sync_write(struct file *file,
}
static const struct file_operations force_poll_sync_fops = {
+ .owner = THIS_MODULE,
.open = simple_open,
.read = force_poll_sync_read,
.write = force_poll_sync_write,
diff --git a/drivers/bus/mhi/host/pci_generic.c b/drivers/bus/mhi/host/pci_generic.c
index c41119b9079f..7ffea0f98162 100644
--- a/drivers/bus/mhi/host/pci_generic.c
+++ b/drivers/bus/mhi/host/pci_generic.c
@@ -1095,8 +1095,9 @@ static void mhi_pci_recovery_work(struct work_struct *work)
err_unprepare:
mhi_unprepare_after_power_down(mhi_cntrl);
err_try_reset:
- if (pci_reset_function(pdev))
- dev_err(&pdev->dev, "Recovery failed\n");
+ err = pci_try_reset_function(pdev);
+ if (err)
+ dev_err(&pdev->dev, "Recovery failed: %d\n", err);
}
static void health_check(struct timer_list *t)
diff --git a/drivers/bus/simple-pm-bus.c b/drivers/bus/simple-pm-bus.c
index 5dea31769f9a..d8e029e7e53f 100644
--- a/drivers/bus/simple-pm-bus.c
+++ b/drivers/bus/simple-pm-bus.c
@@ -109,9 +109,29 @@ static int simple_pm_bus_runtime_resume(struct device *dev)
return 0;
}
+static int simple_pm_bus_suspend(struct device *dev)
+{
+ struct simple_pm_bus *bus = dev_get_drvdata(dev);
+
+ if (!bus)
+ return 0;
+
+ return pm_runtime_force_suspend(dev);
+}
+
+static int simple_pm_bus_resume(struct device *dev)
+{
+ struct simple_pm_bus *bus = dev_get_drvdata(dev);
+
+ if (!bus)
+ return 0;
+
+ return pm_runtime_force_resume(dev);
+}
+
static const struct dev_pm_ops simple_pm_bus_pm_ops = {
RUNTIME_PM_OPS(simple_pm_bus_runtime_suspend, simple_pm_bus_runtime_resume, NULL)
- NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(simple_pm_bus_suspend, simple_pm_bus_resume)
};
#define ONLY_BUS ((void *) 1) /* Match if the device is only a bus. */
diff --git a/drivers/cdx/cdx.c b/drivers/cdx/cdx.c
index c573ed2ee71a..7811aa734053 100644
--- a/drivers/cdx/cdx.c
+++ b/drivers/cdx/cdx.c
@@ -473,8 +473,12 @@ static ssize_t driver_override_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct cdx_device *cdx_dev = to_cdx_device(dev);
+ ssize_t len;
- return sysfs_emit(buf, "%s\n", cdx_dev->driver_override);
+ device_lock(dev);
+ len = sysfs_emit(buf, "%s\n", cdx_dev->driver_override);
+ device_unlock(dev);
+ return len;
}
static DEVICE_ATTR_RW(driver_override);
diff --git a/drivers/char/misc.c b/drivers/char/misc.c
index 2cf595d2e10b..f7dd455dd0dd 100644
--- a/drivers/char/misc.c
+++ b/drivers/char/misc.c
@@ -264,8 +264,8 @@ int misc_register(struct miscdevice *misc)
device_create_with_groups(&misc_class, misc->parent, dev,
misc, misc->groups, "%s", misc->name);
if (IS_ERR(misc->this_device)) {
+ misc_minor_free(misc->minor);
if (is_dynamic) {
- misc_minor_free(misc->minor);
misc->minor = MISC_DYNAMIC_MINOR;
}
err = PTR_ERR(misc->this_device);
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index 24442485e73e..18f92dd44d45 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -923,14 +923,14 @@ static ssize_t port_fops_splice_write(struct pipe_inode_info *pipe,
pipe_lock(pipe);
ret = 0;
- if (pipe_empty(pipe->head, pipe->tail))
+ if (pipe_is_empty(pipe))
goto error_out;
ret = wait_port_writable(port, filp->f_flags & O_NONBLOCK);
if (ret < 0)
goto error_out;
- occupancy = pipe_occupancy(pipe->head, pipe->tail);
+ occupancy = pipe_buf_usage(pipe);
buf = alloc_buf(port->portdev->vdev, 0, occupancy);
if (!buf) {
diff --git a/drivers/dma/qcom/bam_dma.c b/drivers/dma/qcom/bam_dma.c
index c14557efd577..bbc3276992bb 100644
--- a/drivers/dma/qcom/bam_dma.c
+++ b/drivers/dma/qcom/bam_dma.c
@@ -59,9 +59,6 @@ struct bam_desc_hw {
#define DESC_FLAG_NWD BIT(12)
#define DESC_FLAG_CMD BIT(11)
-#define BAM_NDP_REVISION_START 0x20
-#define BAM_NDP_REVISION_END 0x27
-
struct bam_async_desc {
struct virt_dma_desc vd;
@@ -401,7 +398,6 @@ struct bam_device {
/* dma start transaction tasklet */
struct tasklet_struct task;
- u32 bam_revision;
};
/**
@@ -445,10 +441,8 @@ static void bam_reset(struct bam_device *bdev)
writel_relaxed(val, bam_addr(bdev, 0, BAM_CTRL));
/* set descriptor threshold, start with 4 bytes */
- if (in_range(bdev->bam_revision, BAM_NDP_REVISION_START,
- BAM_NDP_REVISION_END))
- writel_relaxed(DEFAULT_CNT_THRSHLD,
- bam_addr(bdev, 0, BAM_DESC_CNT_TRSHLD));
+ writel_relaxed(DEFAULT_CNT_THRSHLD,
+ bam_addr(bdev, 0, BAM_DESC_CNT_TRSHLD));
/* Enable default set of h/w workarounds, ie all except BAM_FULL_PIPE */
writel_relaxed(BAM_CNFG_BITS_DEFAULT, bam_addr(bdev, 0, BAM_CNFG_BITS));
@@ -1006,10 +1000,9 @@ static void bam_apply_new_config(struct bam_chan *bchan,
maxburst = bchan->slave.src_maxburst;
else
maxburst = bchan->slave.dst_maxburst;
- if (in_range(bdev->bam_revision, BAM_NDP_REVISION_START,
- BAM_NDP_REVISION_END))
- writel_relaxed(maxburst,
- bam_addr(bdev, 0, BAM_DESC_CNT_TRSHLD));
+
+ writel_relaxed(maxburst,
+ bam_addr(bdev, 0, BAM_DESC_CNT_TRSHLD));
}
bchan->reconfigure = 0;
@@ -1199,11 +1192,10 @@ static int bam_init(struct bam_device *bdev)
u32 val;
/* read revision and configuration information */
- val = readl_relaxed(bam_addr(bdev, 0, BAM_REVISION));
- if (!bdev->num_ees)
+ if (!bdev->num_ees) {
+ val = readl_relaxed(bam_addr(bdev, 0, BAM_REVISION));
bdev->num_ees = (val >> NUM_EES_SHIFT) & NUM_EES_MASK;
-
- bdev->bam_revision = val & REVISION_MASK;
+ }
/* check that configured EE is within range */
if (bdev->ee >= bdev->num_ees)
diff --git a/drivers/dma/tegra210-adma.c b/drivers/dma/tegra210-adma.c
index 5c6a5b358987..ce80ac4b1a1b 100644
--- a/drivers/dma/tegra210-adma.c
+++ b/drivers/dma/tegra210-adma.c
@@ -83,7 +83,9 @@ struct tegra_adma;
* @nr_channels: Number of DMA channels available.
* @ch_fifo_size_mask: Mask for FIFO size field.
* @sreq_index_offset: Slave channel index offset.
+ * @max_page: Maximum ADMA Channel Page.
* @has_outstanding_reqs: If DMA channel can have outstanding requests.
+ * @set_global_pg_config: Global page programming.
*/
struct tegra_adma_chip_data {
unsigned int (*adma_get_burst_config)(unsigned int burst_size);
@@ -99,6 +101,7 @@ struct tegra_adma_chip_data {
unsigned int nr_channels;
unsigned int ch_fifo_size_mask;
unsigned int sreq_index_offset;
+ unsigned int max_page;
bool has_outstanding_reqs;
void (*set_global_pg_config)(struct tegra_adma *tdma);
};
@@ -854,6 +857,7 @@ static const struct tegra_adma_chip_data tegra210_chip_data = {
.nr_channels = 22,
.ch_fifo_size_mask = 0xf,
.sreq_index_offset = 2,
+ .max_page = 0,
.has_outstanding_reqs = false,
.set_global_pg_config = NULL,
};
@@ -871,6 +875,7 @@ static const struct tegra_adma_chip_data tegra186_chip_data = {
.nr_channels = 32,
.ch_fifo_size_mask = 0x1f,
.sreq_index_offset = 4,
+ .max_page = 4,
.has_outstanding_reqs = true,
.set_global_pg_config = tegra186_adma_global_page_config,
};
diff --git a/drivers/firmware/cirrus/cs_dsp.c b/drivers/firmware/cirrus/cs_dsp.c
index 5365e9a43000..42433c19eb30 100644
--- a/drivers/firmware/cirrus/cs_dsp.c
+++ b/drivers/firmware/cirrus/cs_dsp.c
@@ -1609,8 +1609,8 @@ static int cs_dsp_load(struct cs_dsp *dsp, const struct firmware *firmware,
goto out_fw;
}
- ret = regmap_raw_write_async(regmap, reg, buf->buf,
- le32_to_cpu(region->len));
+ ret = regmap_raw_write(regmap, reg, buf->buf,
+ le32_to_cpu(region->len));
if (ret != 0) {
cs_dsp_err(dsp,
"%s.%d: Failed to write %d bytes at %d in %s: %d\n",
@@ -1625,12 +1625,6 @@ static int cs_dsp_load(struct cs_dsp *dsp, const struct firmware *firmware,
regions++;
}
- ret = regmap_async_complete(regmap);
- if (ret != 0) {
- cs_dsp_err(dsp, "Failed to complete async write: %d\n", ret);
- goto out_fw;
- }
-
if (pos > firmware->size)
cs_dsp_warn(dsp, "%s.%d: %zu bytes at end of file\n",
file, regions, pos - firmware->size);
@@ -1638,7 +1632,6 @@ static int cs_dsp_load(struct cs_dsp *dsp, const struct firmware *firmware,
cs_dsp_debugfs_save_wmfwname(dsp, file);
out_fw:
- regmap_async_complete(regmap);
cs_dsp_buf_free(&buf_list);
if (ret == -EOVERFLOW)
@@ -2326,8 +2319,8 @@ static int cs_dsp_load_coeff(struct cs_dsp *dsp, const struct firmware *firmware
cs_dsp_dbg(dsp, "%s.%d: Writing %d bytes at %x\n",
file, blocks, le32_to_cpu(blk->len),
reg);
- ret = regmap_raw_write_async(regmap, reg, buf->buf,
- le32_to_cpu(blk->len));
+ ret = regmap_raw_write(regmap, reg, buf->buf,
+ le32_to_cpu(blk->len));
if (ret != 0) {
cs_dsp_err(dsp,
"%s.%d: Failed to write to %x in %s: %d\n",
@@ -2339,10 +2332,6 @@ static int cs_dsp_load_coeff(struct cs_dsp *dsp, const struct firmware *firmware
blocks++;
}
- ret = regmap_async_complete(regmap);
- if (ret != 0)
- cs_dsp_err(dsp, "Failed to complete async write: %d\n", ret);
-
if (pos > firmware->size)
cs_dsp_warn(dsp, "%s.%d: %zu bytes at end of file\n",
file, blocks, pos - firmware->size);
@@ -2350,7 +2339,6 @@ static int cs_dsp_load_coeff(struct cs_dsp *dsp, const struct firmware *firmware
cs_dsp_debugfs_save_binname(dsp, file);
out_fw:
- regmap_async_complete(regmap);
cs_dsp_buf_free(&buf_list);
if (ret == -EOVERFLOW)
@@ -2561,8 +2549,8 @@ static int cs_dsp_adsp2_enable_core(struct cs_dsp *dsp)
{
int ret;
- ret = regmap_update_bits_async(dsp->regmap, dsp->base + ADSP2_CONTROL,
- ADSP2_SYS_ENA, ADSP2_SYS_ENA);
+ ret = regmap_update_bits(dsp->regmap, dsp->base + ADSP2_CONTROL,
+ ADSP2_SYS_ENA, ADSP2_SYS_ENA);
if (ret != 0)
return ret;
diff --git a/drivers/firmware/efi/cper-arm.c b/drivers/firmware/efi/cper-arm.c
index fa9c1c3bf168..f0a63d09d3c4 100644
--- a/drivers/firmware/efi/cper-arm.c
+++ b/drivers/firmware/efi/cper-arm.c
@@ -311,7 +311,7 @@ void cper_print_proc_arm(const char *pfx,
ctx_info = (struct cper_arm_ctx_info *)err_info;
max_ctx_type = ARRAY_SIZE(arm_reg_ctx_strs) - 1;
for (i = 0; i < proc->context_info_num; i++) {
- int size = sizeof(*ctx_info) + ctx_info->size;
+ int size = ALIGN(sizeof(*ctx_info) + ctx_info->size, 16);
printk("%sContext info structure %d:\n", pfx, i);
if (len < size) {
diff --git a/drivers/firmware/efi/cper-x86.c b/drivers/firmware/efi/cper-x86.c
index 438ed9eff6d0..3949d7b5e808 100644
--- a/drivers/firmware/efi/cper-x86.c
+++ b/drivers/firmware/efi/cper-x86.c
@@ -325,7 +325,7 @@ void cper_print_proc_ia(const char *pfx, const struct cper_sec_proc_ia *proc)
ctx_info = (struct cper_ia_proc_ctx *)err_info;
for (i = 0; i < VALID_PROC_CXT_INFO_NUM(proc->validation_bits); i++) {
- int size = sizeof(*ctx_info) + ctx_info->reg_arr_size;
+ int size = ALIGN(sizeof(*ctx_info) + ctx_info->reg_arr_size, 16);
int groupsize = 4;
printk("%sContext Information Structure %d:\n", pfx, i);
diff --git a/drivers/firmware/efi/mokvar-table.c b/drivers/firmware/efi/mokvar-table.c
index 5ed0602c2f75..208db29613c6 100644
--- a/drivers/firmware/efi/mokvar-table.c
+++ b/drivers/firmware/efi/mokvar-table.c
@@ -99,14 +99,13 @@ static struct kobject *mokvar_kobj;
*/
void __init efi_mokvar_table_init(void)
{
+ struct efi_mokvar_table_entry __aligned(1) *mokvar_entry, *next_entry;
efi_memory_desc_t md;
void *va = NULL;
unsigned long cur_offset = 0;
unsigned long offset_limit;
- unsigned long map_size = 0;
unsigned long map_size_needed = 0;
unsigned long size;
- struct efi_mokvar_table_entry *mokvar_entry;
int err;
if (!efi_enabled(EFI_MEMMAP))
@@ -134,48 +133,46 @@ void __init efi_mokvar_table_init(void)
*/
err = -EINVAL;
while (cur_offset + sizeof(*mokvar_entry) <= offset_limit) {
- mokvar_entry = va + cur_offset;
- map_size_needed = cur_offset + sizeof(*mokvar_entry);
- if (map_size_needed > map_size) {
- if (va)
- early_memunmap(va, map_size);
- /*
- * Map a little more than the fixed size entry
- * header, anticipating some data. It's safe to
- * do so as long as we stay within current memory
- * descriptor.
- */
- map_size = min(map_size_needed + 2*EFI_PAGE_SIZE,
- offset_limit);
- va = early_memremap(efi.mokvar_table, map_size);
- if (!va) {
- pr_err("Failed to map EFI MOKvar config table pa=0x%lx, size=%lu.\n",
- efi.mokvar_table, map_size);
- return;
- }
- mokvar_entry = va + cur_offset;
+ if (va)
+ early_memunmap(va, sizeof(*mokvar_entry));
+ va = early_memremap(efi.mokvar_table + cur_offset, sizeof(*mokvar_entry));
+ if (!va) {
+ pr_err("Failed to map EFI MOKvar config table pa=0x%lx, size=%zu.\n",
+ efi.mokvar_table + cur_offset, sizeof(*mokvar_entry));
+ return;
}
-
+ mokvar_entry = va;
+next:
/* Check for last sentinel entry */
if (mokvar_entry->name[0] == '\0') {
if (mokvar_entry->data_size != 0)
break;
err = 0;
+ map_size_needed = cur_offset + sizeof(*mokvar_entry);
break;
}
- /* Sanity check that the name is null terminated */
- size = strnlen(mokvar_entry->name,
- sizeof(mokvar_entry->name));
- if (size >= sizeof(mokvar_entry->name))
- break;
+ /* Enforce that the name is NUL terminated */
+ mokvar_entry->name[sizeof(mokvar_entry->name) - 1] = '\0';
/* Advance to the next entry */
- cur_offset = map_size_needed + mokvar_entry->data_size;
+ size = sizeof(*mokvar_entry) + mokvar_entry->data_size;
+ cur_offset += size;
+
+ /*
+ * Don't bother remapping if the current entry header and the
+ * next one end on the same page.
+ */
+ next_entry = (void *)((unsigned long)mokvar_entry + size);
+ if (((((unsigned long)(mokvar_entry + 1) - 1) ^
+ ((unsigned long)(next_entry + 1) - 1)) & PAGE_MASK) == 0) {
+ mokvar_entry = next_entry;
+ goto next;
+ }
}
if (va)
- early_memunmap(va, map_size);
+ early_memunmap(va, sizeof(*mokvar_entry));
if (err) {
pr_err("EFI MOKvar config table is not valid\n");
return;
diff --git a/drivers/gpio/gpio-aggregator.c b/drivers/gpio/gpio-aggregator.c
index 65f41cc3eafc..d668ddb2e81d 100644
--- a/drivers/gpio/gpio-aggregator.c
+++ b/drivers/gpio/gpio-aggregator.c
@@ -119,10 +119,15 @@ static ssize_t new_device_store(struct device_driver *driver, const char *buf,
struct platform_device *pdev;
int res, id;
+ if (!try_module_get(THIS_MODULE))
+ return -ENOENT;
+
/* kernfs guarantees string termination, so count + 1 is safe */
aggr = kzalloc(sizeof(*aggr) + count + 1, GFP_KERNEL);
- if (!aggr)
- return -ENOMEM;
+ if (!aggr) {
+ res = -ENOMEM;
+ goto put_module;
+ }
memcpy(aggr->args, buf, count + 1);
@@ -161,6 +166,7 @@ static ssize_t new_device_store(struct device_driver *driver, const char *buf,
}
aggr->pdev = pdev;
+ module_put(THIS_MODULE);
return count;
remove_table:
@@ -175,6 +181,8 @@ free_table:
kfree(aggr->lookups);
free_ga:
kfree(aggr);
+put_module:
+ module_put(THIS_MODULE);
return res;
}
@@ -203,13 +211,19 @@ static ssize_t delete_device_store(struct device_driver *driver,
if (error)
return error;
+ if (!try_module_get(THIS_MODULE))
+ return -ENOENT;
+
mutex_lock(&gpio_aggregator_lock);
aggr = idr_remove(&gpio_aggregator_idr, id);
mutex_unlock(&gpio_aggregator_lock);
- if (!aggr)
+ if (!aggr) {
+ module_put(THIS_MODULE);
return -ENOENT;
+ }
gpio_aggregator_free(aggr);
+ module_put(THIS_MODULE);
return count;
}
static DRIVER_ATTR_WO(delete_device);
diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c
index 2ecee3269a0c..a7a1cdf7ac66 100644
--- a/drivers/gpio/gpio-rcar.c
+++ b/drivers/gpio/gpio-rcar.c
@@ -40,7 +40,7 @@ struct gpio_rcar_info {
struct gpio_rcar_priv {
void __iomem *base;
- spinlock_t lock;
+ raw_spinlock_t lock;
struct device *dev;
struct gpio_chip gpio_chip;
unsigned int irq_parent;
@@ -123,7 +123,7 @@ static void gpio_rcar_config_interrupt_input_mode(struct gpio_rcar_priv *p,
* "Setting Level-Sensitive Interrupt Input Mode"
*/
- spin_lock_irqsave(&p->lock, flags);
+ raw_spin_lock_irqsave(&p->lock, flags);
/* Configure positive or negative logic in POSNEG */
gpio_rcar_modify_bit(p, POSNEG, hwirq, !active_high_rising_edge);
@@ -142,7 +142,7 @@ static void gpio_rcar_config_interrupt_input_mode(struct gpio_rcar_priv *p,
if (!level_trigger)
gpio_rcar_write(p, INTCLR, BIT(hwirq));
- spin_unlock_irqrestore(&p->lock, flags);
+ raw_spin_unlock_irqrestore(&p->lock, flags);
}
static int gpio_rcar_irq_set_type(struct irq_data *d, unsigned int type)
@@ -246,7 +246,7 @@ static void gpio_rcar_config_general_input_output_mode(struct gpio_chip *chip,
* "Setting General Input Mode"
*/
- spin_lock_irqsave(&p->lock, flags);
+ raw_spin_lock_irqsave(&p->lock, flags);
/* Configure positive logic in POSNEG */
gpio_rcar_modify_bit(p, POSNEG, gpio, false);
@@ -261,7 +261,7 @@ static void gpio_rcar_config_general_input_output_mode(struct gpio_chip *chip,
if (p->info.has_outdtsel && output)
gpio_rcar_modify_bit(p, OUTDTSEL, gpio, false);
- spin_unlock_irqrestore(&p->lock, flags);
+ raw_spin_unlock_irqrestore(&p->lock, flags);
}
static int gpio_rcar_request(struct gpio_chip *chip, unsigned offset)
@@ -347,7 +347,7 @@ static int gpio_rcar_get_multiple(struct gpio_chip *chip, unsigned long *mask,
return 0;
}
- spin_lock_irqsave(&p->lock, flags);
+ raw_spin_lock_irqsave(&p->lock, flags);
outputs = gpio_rcar_read(p, INOUTSEL);
m = outputs & bankmask;
if (m)
@@ -356,7 +356,7 @@ static int gpio_rcar_get_multiple(struct gpio_chip *chip, unsigned long *mask,
m = ~outputs & bankmask;
if (m)
val |= gpio_rcar_read(p, INDT) & m;
- spin_unlock_irqrestore(&p->lock, flags);
+ raw_spin_unlock_irqrestore(&p->lock, flags);
bits[0] = val;
return 0;
@@ -367,9 +367,9 @@ static void gpio_rcar_set(struct gpio_chip *chip, unsigned offset, int value)
struct gpio_rcar_priv *p = gpiochip_get_data(chip);
unsigned long flags;
- spin_lock_irqsave(&p->lock, flags);
+ raw_spin_lock_irqsave(&p->lock, flags);
gpio_rcar_modify_bit(p, OUTDT, offset, value);
- spin_unlock_irqrestore(&p->lock, flags);
+ raw_spin_unlock_irqrestore(&p->lock, flags);
}
static void gpio_rcar_set_multiple(struct gpio_chip *chip, unsigned long *mask,
@@ -386,12 +386,12 @@ static void gpio_rcar_set_multiple(struct gpio_chip *chip, unsigned long *mask,
if (!bankmask)
return;
- spin_lock_irqsave(&p->lock, flags);
+ raw_spin_lock_irqsave(&p->lock, flags);
val = gpio_rcar_read(p, OUTDT);
val &= ~bankmask;
val |= (bankmask & bits[0]);
gpio_rcar_write(p, OUTDT, val);
- spin_unlock_irqrestore(&p->lock, flags);
+ raw_spin_unlock_irqrestore(&p->lock, flags);
}
static int gpio_rcar_direction_output(struct gpio_chip *chip, unsigned offset,
@@ -468,7 +468,12 @@ static int gpio_rcar_parse_dt(struct gpio_rcar_priv *p, unsigned int *npins)
p->info = *info;
ret = of_parse_phandle_with_fixed_args(np, "gpio-ranges", 3, 0, &args);
- *npins = ret == 0 ? args.args[2] : RCAR_MAX_GPIO_PER_BANK;
+ if (ret) {
+ *npins = RCAR_MAX_GPIO_PER_BANK;
+ } else {
+ *npins = args.args[2];
+ of_node_put(args.np);
+ }
if (*npins == 0 || *npins > RCAR_MAX_GPIO_PER_BANK) {
dev_warn(p->dev, "Invalid number of gpio lines %u, using %u\n",
@@ -505,7 +510,7 @@ static int gpio_rcar_probe(struct platform_device *pdev)
return -ENOMEM;
p->dev = dev;
- spin_lock_init(&p->lock);
+ raw_spin_lock_init(&p->lock);
/* Get device configuration from DT node */
ret = gpio_rcar_parse_dt(p, &npins);
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index fc19df5a64c2..8741600af7ef 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -2712,7 +2712,7 @@ EXPORT_SYMBOL_GPL(gpiod_direction_input);
int gpiod_direction_input_nonotify(struct gpio_desc *desc)
{
- int ret = 0;
+ int ret = 0, dir;
CLASS(gpio_chip_guard, guard)(desc);
if (!guard.gc)
@@ -2740,12 +2740,12 @@ int gpiod_direction_input_nonotify(struct gpio_desc *desc)
ret = guard.gc->direction_input(guard.gc,
gpio_chip_hwgpio(desc));
} else if (guard.gc->get_direction) {
- ret = guard.gc->get_direction(guard.gc,
+ dir = guard.gc->get_direction(guard.gc,
gpio_chip_hwgpio(desc));
- if (ret < 0)
- return ret;
+ if (dir < 0)
+ return dir;
- if (ret != GPIO_LINE_DIRECTION_IN) {
+ if (dir != GPIO_LINE_DIRECTION_IN) {
gpiod_warn(desc,
"%s: missing direction_input() operation and line is output\n",
__func__);
@@ -2764,7 +2764,7 @@ int gpiod_direction_input_nonotify(struct gpio_desc *desc)
static int gpiod_direction_output_raw_commit(struct gpio_desc *desc, int value)
{
- int val = !!value, ret = 0;
+ int val = !!value, ret = 0, dir;
CLASS(gpio_chip_guard, guard)(desc);
if (!guard.gc)
@@ -2788,12 +2788,12 @@ static int gpiod_direction_output_raw_commit(struct gpio_desc *desc, int value)
} else {
/* Check that we are in output mode if we can */
if (guard.gc->get_direction) {
- ret = guard.gc->get_direction(guard.gc,
+ dir = guard.gc->get_direction(guard.gc,
gpio_chip_hwgpio(desc));
- if (ret < 0)
- return ret;
+ if (dir < 0)
+ return dir;
- if (ret != GPIO_LINE_DIRECTION_OUT) {
+ if (dir != GPIO_LINE_DIRECTION_OUT) {
gpiod_warn(desc,
"%s: missing direction_output() operation\n",
__func__);
diff --git a/drivers/gpu/Makefile b/drivers/gpu/Makefile
index 8997f0096545..36a54d456630 100644
--- a/drivers/gpu/Makefile
+++ b/drivers/gpu/Makefile
@@ -5,3 +5,4 @@
obj-y += host1x/ drm/ vga/
obj-$(CONFIG_IMX_IPUV3_CORE) += ipu-v3/
obj-$(CONFIG_TRACE_GPU_MEM) += trace/
+obj-$(CONFIG_NOVA_CORE) += nova-core/
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index e5b59de28216..1be14d8634f4 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -278,6 +278,15 @@ config DRM_GPUVM
GPU-VM representation providing helpers to manage a GPUs virtual
address space
+config DRM_GPUSVM
+ tristate
+ depends on DRM && DEVICE_PRIVATE
+ select HMM_MIRROR
+ select MMU_NOTIFIER
+ help
+ GPU-SVM representation providing helpers to manage a GPUs shared
+ virtual memory
+
config DRM_BUDDY
tristate
depends on DRM
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 4cd054188faf..ed54a546bbe2 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -104,6 +104,7 @@ obj-$(CONFIG_DRM_PANEL_BACKLIGHT_QUIRKS) += drm_panel_backlight_quirks.o
#
obj-$(CONFIG_DRM_EXEC) += drm_exec.o
obj-$(CONFIG_DRM_GPUVM) += drm_gpuvm.o
+obj-$(CONFIG_DRM_GPUSVM) += drm_gpusvm.o
obj-$(CONFIG_DRM_BUDDY) += drm_buddy.o
diff --git a/drivers/gpu/drm/drm_client_event.c b/drivers/gpu/drm/drm_client_event.c
index e303de564485..bd93cd93d519 100644
--- a/drivers/gpu/drm/drm_client_event.c
+++ b/drivers/gpu/drm/drm_client_event.c
@@ -49,6 +49,29 @@ void drm_client_dev_unregister(struct drm_device *dev)
}
EXPORT_SYMBOL(drm_client_dev_unregister);
+static void drm_client_hotplug(struct drm_client_dev *client)
+{
+ struct drm_device *dev = client->dev;
+ int ret;
+
+ if (!client->funcs || !client->funcs->hotplug)
+ return;
+
+ if (client->hotplug_failed)
+ return;
+
+ if (client->suspended) {
+ client->hotplug_pending = true;
+ return;
+ }
+
+ client->hotplug_pending = false;
+ ret = client->funcs->hotplug(client);
+ drm_dbg_kms(dev, "%s: ret=%d\n", client->name, ret);
+ if (ret)
+ client->hotplug_failed = true;
+}
+
/**
* drm_client_dev_hotplug - Send hotplug event to clients
* @dev: DRM device
@@ -61,7 +84,6 @@ EXPORT_SYMBOL(drm_client_dev_unregister);
void drm_client_dev_hotplug(struct drm_device *dev)
{
struct drm_client_dev *client;
- int ret;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return;
@@ -72,18 +94,8 @@ void drm_client_dev_hotplug(struct drm_device *dev)
}
mutex_lock(&dev->clientlist_mutex);
- list_for_each_entry(client, &dev->clientlist, list) {
- if (!client->funcs || !client->funcs->hotplug)
- continue;
-
- if (client->hotplug_failed)
- continue;
-
- ret = client->funcs->hotplug(client);
- drm_dbg_kms(dev, "%s: ret=%d\n", client->name, ret);
- if (ret)
- client->hotplug_failed = true;
- }
+ list_for_each_entry(client, &dev->clientlist, list)
+ drm_client_hotplug(client);
mutex_unlock(&dev->clientlist_mutex);
}
EXPORT_SYMBOL(drm_client_dev_hotplug);
@@ -153,6 +165,9 @@ static int drm_client_resume(struct drm_client_dev *client, bool holds_console_l
client->suspended = false;
+ if (client->hotplug_pending)
+ drm_client_hotplug(client);
+
return ret;
}
diff --git a/drivers/gpu/drm/drm_damage_helper.c b/drivers/gpu/drm/drm_damage_helper.c
index afb02aae707b..44a5a36806e3 100644
--- a/drivers/gpu/drm/drm_damage_helper.c
+++ b/drivers/gpu/drm/drm_damage_helper.c
@@ -308,7 +308,7 @@ EXPORT_SYMBOL(drm_atomic_helper_damage_iter_next);
* True if there is valid plane damage otherwise false.
*/
bool drm_atomic_helper_damage_merged(const struct drm_plane_state *old_state,
- struct drm_plane_state *state,
+ const struct drm_plane_state *state,
struct drm_rect *rect)
{
struct drm_atomic_helper_damage_iter iter;
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index fb3614a7ba44..937c3939e502 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -245,6 +245,9 @@ __drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper,
if (do_delayed)
drm_fb_helper_hotplug_event(fb_helper);
+ if (fb_helper->funcs->fb_restore)
+ fb_helper->funcs->fb_restore(fb_helper);
+
return ret;
}
@@ -754,7 +757,12 @@ EXPORT_SYMBOL(drm_fb_helper_deferred_io);
*/
void drm_fb_helper_set_suspend(struct drm_fb_helper *fb_helper, bool suspend)
{
- if (fb_helper && fb_helper->info)
+ if (!fb_helper || !fb_helper->info)
+ return;
+
+ if (fb_helper->funcs->fb_set_suspend)
+ fb_helper->funcs->fb_set_suspend(fb_helper, suspend);
+ else
fb_set_suspend(fb_helper->info, suspend);
}
EXPORT_SYMBOL(drm_fb_helper_set_suspend);
@@ -800,7 +808,7 @@ void drm_fb_helper_set_suspend_unlocked(struct drm_fb_helper *fb_helper,
}
}
- fb_set_suspend(fb_helper->info, suspend);
+ drm_fb_helper_set_suspend(fb_helper, suspend);
console_unlock();
}
EXPORT_SYMBOL(drm_fb_helper_set_suspend_unlocked);
@@ -1626,6 +1634,9 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper)
struct fb_info *info;
int ret;
+ if (drm_WARN_ON(dev, !dev->driver->fbdev_probe))
+ return -EINVAL;
+
ret = drm_fb_helper_find_sizes(fb_helper, &sizes);
if (ret) {
/* First time: disable all crtc's.. */
@@ -1635,10 +1646,7 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper)
}
/* push down into drivers */
- if (dev->driver->fbdev_probe)
- ret = dev->driver->fbdev_probe(fb_helper, &sizes);
- else if (fb_helper->funcs)
- ret = fb_helper->funcs->fb_probe(fb_helper, &sizes);
+ ret = dev->driver->fbdev_probe(fb_helper, &sizes);
if (ret < 0)
return ret;
diff --git a/drivers/gpu/drm/drm_fbdev_dma.c b/drivers/gpu/drm/drm_fbdev_dma.c
index b14b581c059d..02a516e77192 100644
--- a/drivers/gpu/drm/drm_fbdev_dma.c
+++ b/drivers/gpu/drm/drm_fbdev_dma.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: MIT
#include <linux/fb.h>
+#include <linux/vmalloc.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_dma.h>
@@ -70,37 +71,102 @@ static const struct fb_ops drm_fbdev_dma_fb_ops = {
.fb_destroy = drm_fbdev_dma_fb_destroy,
};
-FB_GEN_DEFAULT_DEFERRED_DMAMEM_OPS(drm_fbdev_dma,
+FB_GEN_DEFAULT_DEFERRED_DMAMEM_OPS(drm_fbdev_dma_shadowed,
drm_fb_helper_damage_range,
drm_fb_helper_damage_area);
-static int drm_fbdev_dma_deferred_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
+static void drm_fbdev_dma_shadowed_fb_destroy(struct fb_info *info)
{
struct drm_fb_helper *fb_helper = info->par;
- struct drm_framebuffer *fb = fb_helper->fb;
- struct drm_gem_dma_object *dma = drm_fb_dma_get_gem_obj(fb, 0);
+ void *shadow = info->screen_buffer;
+
+ if (!fb_helper->dev)
+ return;
- if (!dma->map_noncoherent)
- vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+ if (info->fbdefio)
+ fb_deferred_io_cleanup(info);
+ drm_fb_helper_fini(fb_helper);
+ vfree(shadow);
- return fb_deferred_io_mmap(info, vma);
+ drm_client_buffer_vunmap(fb_helper->buffer);
+ drm_client_framebuffer_delete(fb_helper->buffer);
+ drm_client_release(&fb_helper->client);
+ drm_fb_helper_unprepare(fb_helper);
+ kfree(fb_helper);
}
-static const struct fb_ops drm_fbdev_dma_deferred_fb_ops = {
+static const struct fb_ops drm_fbdev_dma_shadowed_fb_ops = {
.owner = THIS_MODULE,
.fb_open = drm_fbdev_dma_fb_open,
.fb_release = drm_fbdev_dma_fb_release,
- __FB_DEFAULT_DEFERRED_OPS_RDWR(drm_fbdev_dma),
+ FB_DEFAULT_DEFERRED_OPS(drm_fbdev_dma_shadowed),
DRM_FB_HELPER_DEFAULT_OPS,
- __FB_DEFAULT_DEFERRED_OPS_DRAW(drm_fbdev_dma),
- .fb_mmap = drm_fbdev_dma_deferred_fb_mmap,
- .fb_destroy = drm_fbdev_dma_fb_destroy,
+ .fb_destroy = drm_fbdev_dma_shadowed_fb_destroy,
};
/*
* struct drm_fb_helper
*/
+static void drm_fbdev_dma_damage_blit_real(struct drm_fb_helper *fb_helper,
+ struct drm_clip_rect *clip,
+ struct iosys_map *dst)
+{
+ struct drm_framebuffer *fb = fb_helper->fb;
+ size_t offset = clip->y1 * fb->pitches[0];
+ size_t len = clip->x2 - clip->x1;
+ unsigned int y;
+ void *src;
+
+ switch (drm_format_info_bpp(fb->format, 0)) {
+ case 1:
+ offset += clip->x1 / 8;
+ len = DIV_ROUND_UP(len + clip->x1 % 8, 8);
+ break;
+ case 2:
+ offset += clip->x1 / 4;
+ len = DIV_ROUND_UP(len + clip->x1 % 4, 4);
+ break;
+ case 4:
+ offset += clip->x1 / 2;
+ len = DIV_ROUND_UP(len + clip->x1 % 2, 2);
+ break;
+ default:
+ offset += clip->x1 * fb->format->cpp[0];
+ len *= fb->format->cpp[0];
+ break;
+ }
+
+ src = fb_helper->info->screen_buffer + offset;
+ iosys_map_incr(dst, offset); /* go to first pixel within clip rect */
+
+ for (y = clip->y1; y < clip->y2; y++) {
+ iosys_map_memcpy_to(dst, 0, src, len);
+ iosys_map_incr(dst, fb->pitches[0]);
+ src += fb->pitches[0];
+ }
+}
+
+static int drm_fbdev_dma_damage_blit(struct drm_fb_helper *fb_helper,
+ struct drm_clip_rect *clip)
+{
+ struct drm_client_buffer *buffer = fb_helper->buffer;
+ struct iosys_map dst;
+
+ /*
+ * For fbdev emulation, we only have to protect against fbdev modeset
+ * operations. Nothing else will involve the client buffer's BO. So it
+ * is sufficient to acquire struct drm_fb_helper.lock here.
+ */
+ mutex_lock(&fb_helper->lock);
+
+ dst = buffer->map;
+ drm_fbdev_dma_damage_blit_real(fb_helper, clip, &dst);
+
+ mutex_unlock(&fb_helper->lock);
+
+ return 0;
+}
static int drm_fbdev_dma_helper_fb_dirty(struct drm_fb_helper *helper,
struct drm_clip_rect *clip)
{
@@ -112,6 +178,10 @@ static int drm_fbdev_dma_helper_fb_dirty(struct drm_fb_helper *helper,
return 0;
if (helper->fb->funcs->dirty) {
+ ret = drm_fbdev_dma_damage_blit(helper, clip);
+ if (drm_WARN_ONCE(dev, ret, "Damage blitter failed: ret=%d\n", ret))
+ return ret;
+
ret = helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, clip, 1);
if (drm_WARN_ONCE(dev, ret, "Dirty helper failed: ret=%d\n", ret))
return ret;
@@ -128,14 +198,80 @@ static const struct drm_fb_helper_funcs drm_fbdev_dma_helper_funcs = {
* struct drm_fb_helper
*/
+static int drm_fbdev_dma_driver_fbdev_probe_tail(struct drm_fb_helper *fb_helper,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ struct drm_device *dev = fb_helper->dev;
+ struct drm_client_buffer *buffer = fb_helper->buffer;
+ struct drm_gem_dma_object *dma_obj = to_drm_gem_dma_obj(buffer->gem);
+ struct drm_framebuffer *fb = fb_helper->fb;
+ struct fb_info *info = fb_helper->info;
+ struct iosys_map map = buffer->map;
+
+ info->fbops = &drm_fbdev_dma_fb_ops;
+
+ /* screen */
+ info->flags |= FBINFO_VIRTFB; /* system memory */
+ if (dma_obj->map_noncoherent)
+ info->flags |= FBINFO_READS_FAST; /* signal caching */
+ info->screen_size = sizes->surface_height * fb->pitches[0];
+ info->screen_buffer = map.vaddr;
+ if (!(info->flags & FBINFO_HIDE_SMEM_START)) {
+ if (!drm_WARN_ON(dev, is_vmalloc_addr(info->screen_buffer)))
+ info->fix.smem_start = page_to_phys(virt_to_page(info->screen_buffer));
+ }
+ info->fix.smem_len = info->screen_size;
+
+ return 0;
+}
+
+static int drm_fbdev_dma_driver_fbdev_probe_tail_shadowed(struct drm_fb_helper *fb_helper,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ struct drm_client_buffer *buffer = fb_helper->buffer;
+ struct fb_info *info = fb_helper->info;
+ size_t screen_size = buffer->gem->size;
+ void *screen_buffer;
+ int ret;
+
+ /*
+ * Deferred I/O requires struct page for framebuffer memory,
+ * which is not guaranteed for all DMA ranges. We thus create
+ * a shadow buffer in system memory.
+ */
+ screen_buffer = vzalloc(screen_size);
+ if (!screen_buffer)
+ return -ENOMEM;
+
+ info->fbops = &drm_fbdev_dma_shadowed_fb_ops;
+
+ /* screen */
+ info->flags |= FBINFO_VIRTFB; /* system memory */
+ info->flags |= FBINFO_READS_FAST; /* signal caching */
+ info->screen_buffer = screen_buffer;
+ info->fix.smem_len = screen_size;
+
+ fb_helper->fbdefio.delay = HZ / 20;
+ fb_helper->fbdefio.deferred_io = drm_fb_helper_deferred_io;
+
+ info->fbdefio = &fb_helper->fbdefio;
+ ret = fb_deferred_io_init(info);
+ if (ret)
+ goto err_vfree;
+
+ return 0;
+
+err_vfree:
+ vfree(screen_buffer);
+ return ret;
+}
+
int drm_fbdev_dma_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
struct drm_fb_helper_surface_size *sizes)
{
struct drm_client_dev *client = &fb_helper->client;
struct drm_device *dev = fb_helper->dev;
- bool use_deferred_io = false;
struct drm_client_buffer *buffer;
- struct drm_gem_dma_object *dma_obj;
struct drm_framebuffer *fb;
struct fb_info *info;
u32 format;
@@ -152,19 +288,9 @@ int drm_fbdev_dma_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
sizes->surface_height, format);
if (IS_ERR(buffer))
return PTR_ERR(buffer);
- dma_obj = to_drm_gem_dma_obj(buffer->gem);
fb = buffer->fb;
- /*
- * Deferred I/O requires struct page for framebuffer memory,
- * which is not guaranteed for all DMA ranges. We thus only
- * install deferred I/O if we have a framebuffer that requires
- * it.
- */
- if (fb->funcs->dirty)
- use_deferred_io = true;
-
ret = drm_client_buffer_vmap(buffer, &map);
if (ret) {
goto err_drm_client_buffer_delete;
@@ -185,45 +311,12 @@ int drm_fbdev_dma_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
drm_fb_helper_fill_info(info, fb_helper, sizes);
- if (use_deferred_io)
- info->fbops = &drm_fbdev_dma_deferred_fb_ops;
+ if (fb->funcs->dirty)
+ ret = drm_fbdev_dma_driver_fbdev_probe_tail_shadowed(fb_helper, sizes);
else
- info->fbops = &drm_fbdev_dma_fb_ops;
-
- /* screen */
- info->flags |= FBINFO_VIRTFB; /* system memory */
- if (dma_obj->map_noncoherent)
- info->flags |= FBINFO_READS_FAST; /* signal caching */
- info->screen_size = sizes->surface_height * fb->pitches[0];
- info->screen_buffer = map.vaddr;
- if (!(info->flags & FBINFO_HIDE_SMEM_START)) {
- if (!drm_WARN_ON(dev, is_vmalloc_addr(info->screen_buffer)))
- info->fix.smem_start = page_to_phys(virt_to_page(info->screen_buffer));
- }
- info->fix.smem_len = info->screen_size;
-
- /*
- * Only set up deferred I/O if the screen buffer supports
- * it. If this disagrees with the previous test for ->dirty,
- * mmap on the /dev/fb file might not work correctly.
- */
- if (!is_vmalloc_addr(info->screen_buffer) && info->fix.smem_start) {
- unsigned long pfn = info->fix.smem_start >> PAGE_SHIFT;
-
- if (drm_WARN_ON(dev, !pfn_to_page(pfn)))
- use_deferred_io = false;
- }
-
- /* deferred I/O */
- if (use_deferred_io) {
- fb_helper->fbdefio.delay = HZ / 20;
- fb_helper->fbdefio.deferred_io = drm_fb_helper_deferred_io;
-
- info->fbdefio = &fb_helper->fbdefio;
- ret = fb_deferred_io_init(info);
- if (ret)
- goto err_drm_fb_helper_release_info;
- }
+ ret = drm_fbdev_dma_driver_fbdev_probe_tail(fb_helper, sizes);
+ if (ret)
+ goto err_drm_fb_helper_release_info;
return 0;
diff --git a/drivers/gpu/drm/drm_gpusvm.c b/drivers/gpu/drm/drm_gpusvm.c
new file mode 100644
index 000000000000..2451c816edd5
--- /dev/null
+++ b/drivers/gpu/drm/drm_gpusvm.c
@@ -0,0 +1,2250 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/*
+ * Copyright © 2024 Intel Corporation
+ *
+ * Authors:
+ * Matthew Brost <matthew.brost@intel.com>
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/hmm.h>
+#include <linux/memremap.h>
+#include <linux/migrate.h>
+#include <linux/mm_types.h>
+#include <linux/pagemap.h>
+#include <linux/slab.h>
+
+#include <drm/drm_device.h>
+#include <drm/drm_gpusvm.h>
+#include <drm/drm_pagemap.h>
+#include <drm/drm_print.h>
+
+/**
+ * DOC: Overview
+ *
+ * GPU Shared Virtual Memory (GPU SVM) layer for the Direct Rendering Manager (DRM)
+ * is a component of the DRM framework designed to manage shared virtual memory
+ * between the CPU and GPU. It enables efficient data exchange and processing
+ * for GPU-accelerated applications by allowing memory sharing and
+ * synchronization between the CPU's and GPU's virtual address spaces.
+ *
+ * Key GPU SVM Components:
+ *
+ * - Notifiers:
+ * Used for tracking memory intervals and notifying the GPU of changes,
+ * notifiers are sized based on a GPU SVM initialization parameter, with a
+ * recommendation of 512M or larger. They maintain a Red-BlacK tree and a
+ * list of ranges that fall within the notifier interval. Notifiers are
+ * tracked within a GPU SVM Red-BlacK tree and list and are dynamically
+ * inserted or removed as ranges within the interval are created or
+ * destroyed.
+ * - Ranges:
+ * Represent memory ranges mapped in a DRM device and managed by GPU SVM.
+ * They are sized based on an array of chunk sizes, which is a GPU SVM
+ * initialization parameter, and the CPU address space. Upon GPU fault,
+ * the largest aligned chunk that fits within the faulting CPU address
+ * space is chosen for the range size. Ranges are expected to be
+ * dynamically allocated on GPU fault and removed on an MMU notifier UNMAP
+ * event. As mentioned above, ranges are tracked in a notifier's Red-Black
+ * tree.
+ *
+ * - Operations:
+ * Define the interface for driver-specific GPU SVM operations such as
+ * range allocation, notifier allocation, and invalidations.
+ *
+ * - Device Memory Allocations:
+ * Embedded structure containing enough information for GPU SVM to migrate
+ * to / from device memory.
+ *
+ * - Device Memory Operations:
+ * Define the interface for driver-specific device memory operations
+ * release memory, populate pfns, and copy to / from device memory.
+ *
+ * This layer provides interfaces for allocating, mapping, migrating, and
+ * releasing memory ranges between the CPU and GPU. It handles all core memory
+ * management interactions (DMA mapping, HMM, and migration) and provides
+ * driver-specific virtual functions (vfuncs). This infrastructure is sufficient
+ * to build the expected driver components for an SVM implementation as detailed
+ * below.
+ *
+ * Expected Driver Components:
+ *
+ * - GPU page fault handler:
+ * Used to create ranges and notifiers based on the fault address,
+ * optionally migrate the range to device memory, and create GPU bindings.
+ *
+ * - Garbage collector:
+ * Used to unmap and destroy GPU bindings for ranges. Ranges are expected
+ * to be added to the garbage collector upon a MMU_NOTIFY_UNMAP event in
+ * notifier callback.
+ *
+ * - Notifier callback:
+ * Used to invalidate and DMA unmap GPU bindings for ranges.
+ */
+
+/**
+ * DOC: Locking
+ *
+ * GPU SVM handles locking for core MM interactions, i.e., it locks/unlocks the
+ * mmap lock as needed.
+ *
+ * GPU SVM introduces a global notifier lock, which safeguards the notifier's
+ * range RB tree and list, as well as the range's DMA mappings and sequence
+ * number. GPU SVM manages all necessary locking and unlocking operations,
+ * except for the recheck range's pages being valid
+ * (drm_gpusvm_range_pages_valid) when the driver is committing GPU bindings.
+ * This lock corresponds to the ``driver->update`` lock mentioned in
+ * Documentation/mm/hmm.rst. Future revisions may transition from a GPU SVM
+ * global lock to a per-notifier lock if finer-grained locking is deemed
+ * necessary.
+ *
+ * In addition to the locking mentioned above, the driver should implement a
+ * lock to safeguard core GPU SVM function calls that modify state, such as
+ * drm_gpusvm_range_find_or_insert and drm_gpusvm_range_remove. This lock is
+ * denoted as 'driver_svm_lock' in code examples. Finer grained driver side
+ * locking should also be possible for concurrent GPU fault processing within a
+ * single GPU SVM. The 'driver_svm_lock' can be via drm_gpusvm_driver_set_lock
+ * to add annotations to GPU SVM.
+ */
+
+/**
+ * DOC: Migration
+ *
+ * The migration support is quite simple, allowing migration between RAM and
+ * device memory at the range granularity. For example, GPU SVM currently does
+ * not support mixing RAM and device memory pages within a range. This means
+ * that upon GPU fault, the entire range can be migrated to device memory, and
+ * upon CPU fault, the entire range is migrated to RAM. Mixed RAM and device
+ * memory storage within a range could be added in the future if required.
+ *
+ * The reasoning for only supporting range granularity is as follows: it
+ * simplifies the implementation, and range sizes are driver-defined and should
+ * be relatively small.
+ */
+
+/**
+ * DOC: Partial Unmapping of Ranges
+ *
+ * Partial unmapping of ranges (e.g., 1M out of 2M is unmapped by CPU resulting
+ * in MMU_NOTIFY_UNMAP event) presents several challenges, with the main one
+ * being that a subset of the range still has CPU and GPU mappings. If the
+ * backing store for the range is in device memory, a subset of the backing
+ * store has references. One option would be to split the range and device
+ * memory backing store, but the implementation for this would be quite
+ * complicated. Given that partial unmappings are rare and driver-defined range
+ * sizes are relatively small, GPU SVM does not support splitting of ranges.
+ *
+ * With no support for range splitting, upon partial unmapping of a range, the
+ * driver is expected to invalidate and destroy the entire range. If the range
+ * has device memory as its backing, the driver is also expected to migrate any
+ * remaining pages back to RAM.
+ */
+
+/**
+ * DOC: Examples
+ *
+ * This section provides three examples of how to build the expected driver
+ * components: the GPU page fault handler, the garbage collector, and the
+ * notifier callback.
+ *
+ * The generic code provided does not include logic for complex migration
+ * policies, optimized invalidations, fined grained driver locking, or other
+ * potentially required driver locking (e.g., DMA-resv locks).
+ *
+ * 1) GPU page fault handler
+ *
+ * .. code-block:: c
+ *
+ * int driver_bind_range(struct drm_gpusvm *gpusvm, struct drm_gpusvm_range *range)
+ * {
+ * int err = 0;
+ *
+ * driver_alloc_and_setup_memory_for_bind(gpusvm, range);
+ *
+ * drm_gpusvm_notifier_lock(gpusvm);
+ * if (drm_gpusvm_range_pages_valid(range))
+ * driver_commit_bind(gpusvm, range);
+ * else
+ * err = -EAGAIN;
+ * drm_gpusvm_notifier_unlock(gpusvm);
+ *
+ * return err;
+ * }
+ *
+ * int driver_gpu_fault(struct drm_gpusvm *gpusvm, unsigned long fault_addr,
+ * unsigned long gpuva_start, unsigned long gpuva_end)
+ * {
+ * struct drm_gpusvm_ctx ctx = {};
+ * int err;
+ *
+ * driver_svm_lock();
+ * retry:
+ * // Always process UNMAPs first so view of GPU SVM ranges is current
+ * driver_garbage_collector(gpusvm);
+ *
+ * range = drm_gpusvm_range_find_or_insert(gpusvm, fault_addr,
+ * gpuva_start, gpuva_end,
+ * &ctx);
+ * if (IS_ERR(range)) {
+ * err = PTR_ERR(range);
+ * goto unlock;
+ * }
+ *
+ * if (driver_migration_policy(range)) {
+ * mmap_read_lock(mm);
+ * devmem = driver_alloc_devmem();
+ * err = drm_gpusvm_migrate_to_devmem(gpusvm, range,
+ * devmem_allocation,
+ * &ctx);
+ * mmap_read_unlock(mm);
+ * if (err) // CPU mappings may have changed
+ * goto retry;
+ * }
+ *
+ * err = drm_gpusvm_range_get_pages(gpusvm, range, &ctx);
+ * if (err == -EOPNOTSUPP || err == -EFAULT || err == -EPERM) { // CPU mappings changed
+ * if (err == -EOPNOTSUPP)
+ * drm_gpusvm_range_evict(gpusvm, range);
+ * goto retry;
+ * } else if (err) {
+ * goto unlock;
+ * }
+ *
+ * err = driver_bind_range(gpusvm, range);
+ * if (err == -EAGAIN) // CPU mappings changed
+ * goto retry
+ *
+ * unlock:
+ * driver_svm_unlock();
+ * return err;
+ * }
+ *
+ * 2) Garbage Collector
+ *
+ * .. code-block:: c
+ *
+ * void __driver_garbage_collector(struct drm_gpusvm *gpusvm,
+ * struct drm_gpusvm_range *range)
+ * {
+ * assert_driver_svm_locked(gpusvm);
+ *
+ * // Partial unmap, migrate any remaining device memory pages back to RAM
+ * if (range->flags.partial_unmap)
+ * drm_gpusvm_range_evict(gpusvm, range);
+ *
+ * driver_unbind_range(range);
+ * drm_gpusvm_range_remove(gpusvm, range);
+ * }
+ *
+ * void driver_garbage_collector(struct drm_gpusvm *gpusvm)
+ * {
+ * assert_driver_svm_locked(gpusvm);
+ *
+ * for_each_range_in_garbage_collector(gpusvm, range)
+ * __driver_garbage_collector(gpusvm, range);
+ * }
+ *
+ * 3) Notifier callback
+ *
+ * .. code-block:: c
+ *
+ * void driver_invalidation(struct drm_gpusvm *gpusvm,
+ * struct drm_gpusvm_notifier *notifier,
+ * const struct mmu_notifier_range *mmu_range)
+ * {
+ * struct drm_gpusvm_ctx ctx = { .in_notifier = true, };
+ * struct drm_gpusvm_range *range = NULL;
+ *
+ * driver_invalidate_device_pages(gpusvm, mmu_range->start, mmu_range->end);
+ *
+ * drm_gpusvm_for_each_range(range, notifier, mmu_range->start,
+ * mmu_range->end) {
+ * drm_gpusvm_range_unmap_pages(gpusvm, range, &ctx);
+ *
+ * if (mmu_range->event != MMU_NOTIFY_UNMAP)
+ * continue;
+ *
+ * drm_gpusvm_range_set_unmapped(range, mmu_range);
+ * driver_garbage_collector_add(gpusvm, range);
+ * }
+ * }
+ */
+
+/**
+ * npages_in_range() - Calculate the number of pages in a given range
+ * @start: The start address of the range
+ * @end: The end address of the range
+ *
+ * This macro calculates the number of pages in a given memory range,
+ * specified by the start and end addresses. It divides the difference
+ * between the end and start addresses by the page size (PAGE_SIZE) to
+ * determine the number of pages in the range.
+ *
+ * Return: The number of pages in the specified range.
+ */
+static unsigned long
+npages_in_range(unsigned long start, unsigned long end)
+{
+ return (end - start) >> PAGE_SHIFT;
+}
+
+/**
+ * struct drm_gpusvm_zdd - GPU SVM zone device data
+ *
+ * @refcount: Reference count for the zdd
+ * @devmem_allocation: device memory allocation
+ * @device_private_page_owner: Device private pages owner
+ *
+ * This structure serves as a generic wrapper installed in
+ * page->zone_device_data. It provides infrastructure for looking up a device
+ * memory allocation upon CPU page fault and asynchronously releasing device
+ * memory once the CPU has no page references. Asynchronous release is useful
+ * because CPU page references can be dropped in IRQ contexts, while releasing
+ * device memory likely requires sleeping locks.
+ */
+struct drm_gpusvm_zdd {
+ struct kref refcount;
+ struct drm_gpusvm_devmem *devmem_allocation;
+ void *device_private_page_owner;
+};
+
+/**
+ * drm_gpusvm_zdd_alloc() - Allocate a zdd structure.
+ * @device_private_page_owner: Device private pages owner
+ *
+ * This function allocates and initializes a new zdd structure. It sets up the
+ * reference count and initializes the destroy work.
+ *
+ * Return: Pointer to the allocated zdd on success, ERR_PTR() on failure.
+ */
+static struct drm_gpusvm_zdd *
+drm_gpusvm_zdd_alloc(void *device_private_page_owner)
+{
+ struct drm_gpusvm_zdd *zdd;
+
+ zdd = kmalloc(sizeof(*zdd), GFP_KERNEL);
+ if (!zdd)
+ return NULL;
+
+ kref_init(&zdd->refcount);
+ zdd->devmem_allocation = NULL;
+ zdd->device_private_page_owner = device_private_page_owner;
+
+ return zdd;
+}
+
+/**
+ * drm_gpusvm_zdd_get() - Get a reference to a zdd structure.
+ * @zdd: Pointer to the zdd structure.
+ *
+ * This function increments the reference count of the provided zdd structure.
+ *
+ * Return: Pointer to the zdd structure.
+ */
+static struct drm_gpusvm_zdd *drm_gpusvm_zdd_get(struct drm_gpusvm_zdd *zdd)
+{
+ kref_get(&zdd->refcount);
+ return zdd;
+}
+
+/**
+ * drm_gpusvm_zdd_destroy() - Destroy a zdd structure.
+ * @ref: Pointer to the reference count structure.
+ *
+ * This function queues the destroy_work of the zdd for asynchronous destruction.
+ */
+static void drm_gpusvm_zdd_destroy(struct kref *ref)
+{
+ struct drm_gpusvm_zdd *zdd =
+ container_of(ref, struct drm_gpusvm_zdd, refcount);
+ struct drm_gpusvm_devmem *devmem = zdd->devmem_allocation;
+
+ if (devmem) {
+ complete_all(&devmem->detached);
+ if (devmem->ops->devmem_release)
+ devmem->ops->devmem_release(devmem);
+ }
+ kfree(zdd);
+}
+
+/**
+ * drm_gpusvm_zdd_put() - Put a zdd reference.
+ * @zdd: Pointer to the zdd structure.
+ *
+ * This function decrements the reference count of the provided zdd structure
+ * and schedules its destruction if the count drops to zero.
+ */
+static void drm_gpusvm_zdd_put(struct drm_gpusvm_zdd *zdd)
+{
+ kref_put(&zdd->refcount, drm_gpusvm_zdd_destroy);
+}
+
+/**
+ * drm_gpusvm_range_find() - Find GPU SVM range from GPU SVM notifier
+ * @notifier: Pointer to the GPU SVM notifier structure.
+ * @start: Start address of the range
+ * @end: End address of the range
+ *
+ * Return: A pointer to the drm_gpusvm_range if found or NULL
+ */
+struct drm_gpusvm_range *
+drm_gpusvm_range_find(struct drm_gpusvm_notifier *notifier, unsigned long start,
+ unsigned long end)
+{
+ struct interval_tree_node *itree;
+
+ itree = interval_tree_iter_first(&notifier->root, start, end - 1);
+
+ if (itree)
+ return container_of(itree, struct drm_gpusvm_range, itree);
+ else
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(drm_gpusvm_range_find);
+
+/**
+ * drm_gpusvm_for_each_range_safe() - Safely iterate over GPU SVM ranges in a notifier
+ * @range__: Iterator variable for the ranges
+ * @next__: Iterator variable for the ranges temporay storage
+ * @notifier__: Pointer to the GPU SVM notifier
+ * @start__: Start address of the range
+ * @end__: End address of the range
+ *
+ * This macro is used to iterate over GPU SVM ranges in a notifier while
+ * removing ranges from it.
+ */
+#define drm_gpusvm_for_each_range_safe(range__, next__, notifier__, start__, end__) \
+ for ((range__) = drm_gpusvm_range_find((notifier__), (start__), (end__)), \
+ (next__) = __drm_gpusvm_range_next(range__); \
+ (range__) && (drm_gpusvm_range_start(range__) < (end__)); \
+ (range__) = (next__), (next__) = __drm_gpusvm_range_next(range__))
+
+/**
+ * __drm_gpusvm_notifier_next() - get the next drm_gpusvm_notifier in the list
+ * @notifier: a pointer to the current drm_gpusvm_notifier
+ *
+ * Return: A pointer to the next drm_gpusvm_notifier if available, or NULL if
+ * the current notifier is the last one or if the input notifier is
+ * NULL.
+ */
+static struct drm_gpusvm_notifier *
+__drm_gpusvm_notifier_next(struct drm_gpusvm_notifier *notifier)
+{
+ if (notifier && !list_is_last(&notifier->entry,
+ &notifier->gpusvm->notifier_list))
+ return list_next_entry(notifier, entry);
+
+ return NULL;
+}
+
+static struct drm_gpusvm_notifier *
+notifier_iter_first(struct rb_root_cached *root, unsigned long start,
+ unsigned long last)
+{
+ struct interval_tree_node *itree;
+
+ itree = interval_tree_iter_first(root, start, last);
+
+ if (itree)
+ return container_of(itree, struct drm_gpusvm_notifier, itree);
+ else
+ return NULL;
+}
+
+/**
+ * drm_gpusvm_for_each_notifier() - Iterate over GPU SVM notifiers in a gpusvm
+ * @notifier__: Iterator variable for the notifiers
+ * @notifier__: Pointer to the GPU SVM notifier
+ * @start__: Start address of the notifier
+ * @end__: End address of the notifier
+ *
+ * This macro is used to iterate over GPU SVM notifiers in a gpusvm.
+ */
+#define drm_gpusvm_for_each_notifier(notifier__, gpusvm__, start__, end__) \
+ for ((notifier__) = notifier_iter_first(&(gpusvm__)->root, (start__), (end__) - 1); \
+ (notifier__) && (drm_gpusvm_notifier_start(notifier__) < (end__)); \
+ (notifier__) = __drm_gpusvm_notifier_next(notifier__))
+
+/**
+ * drm_gpusvm_for_each_notifier_safe() - Safely iterate over GPU SVM notifiers in a gpusvm
+ * @notifier__: Iterator variable for the notifiers
+ * @next__: Iterator variable for the notifiers temporay storage
+ * @notifier__: Pointer to the GPU SVM notifier
+ * @start__: Start address of the notifier
+ * @end__: End address of the notifier
+ *
+ * This macro is used to iterate over GPU SVM notifiers in a gpusvm while
+ * removing notifiers from it.
+ */
+#define drm_gpusvm_for_each_notifier_safe(notifier__, next__, gpusvm__, start__, end__) \
+ for ((notifier__) = notifier_iter_first(&(gpusvm__)->root, (start__), (end__) - 1), \
+ (next__) = __drm_gpusvm_notifier_next(notifier__); \
+ (notifier__) && (drm_gpusvm_notifier_start(notifier__) < (end__)); \
+ (notifier__) = (next__), (next__) = __drm_gpusvm_notifier_next(notifier__))
+
+/**
+ * drm_gpusvm_notifier_invalidate() - Invalidate a GPU SVM notifier.
+ * @mni: Pointer to the mmu_interval_notifier structure.
+ * @mmu_range: Pointer to the mmu_notifier_range structure.
+ * @cur_seq: Current sequence number.
+ *
+ * This function serves as a generic MMU notifier for GPU SVM. It sets the MMU
+ * notifier sequence number and calls the driver invalidate vfunc under
+ * gpusvm->notifier_lock.
+ *
+ * Return: true if the operation succeeds, false otherwise.
+ */
+static bool
+drm_gpusvm_notifier_invalidate(struct mmu_interval_notifier *mni,
+ const struct mmu_notifier_range *mmu_range,
+ unsigned long cur_seq)
+{
+ struct drm_gpusvm_notifier *notifier =
+ container_of(mni, typeof(*notifier), notifier);
+ struct drm_gpusvm *gpusvm = notifier->gpusvm;
+
+ if (!mmu_notifier_range_blockable(mmu_range))
+ return false;
+
+ down_write(&gpusvm->notifier_lock);
+ mmu_interval_set_seq(mni, cur_seq);
+ gpusvm->ops->invalidate(gpusvm, notifier, mmu_range);
+ up_write(&gpusvm->notifier_lock);
+
+ return true;
+}
+
+/*
+ * drm_gpusvm_notifier_ops - MMU interval notifier operations for GPU SVM
+ */
+static const struct mmu_interval_notifier_ops drm_gpusvm_notifier_ops = {
+ .invalidate = drm_gpusvm_notifier_invalidate,
+};
+
+/**
+ * drm_gpusvm_init() - Initialize the GPU SVM.
+ * @gpusvm: Pointer to the GPU SVM structure.
+ * @name: Name of the GPU SVM.
+ * @drm: Pointer to the DRM device structure.
+ * @mm: Pointer to the mm_struct for the address space.
+ * @device_private_page_owner: Device private pages owner.
+ * @mm_start: Start address of GPU SVM.
+ * @mm_range: Range of the GPU SVM.
+ * @notifier_size: Size of individual notifiers.
+ * @ops: Pointer to the operations structure for GPU SVM.
+ * @chunk_sizes: Pointer to the array of chunk sizes used in range allocation.
+ * Entries should be powers of 2 in descending order with last
+ * entry being SZ_4K.
+ * @num_chunks: Number of chunks.
+ *
+ * This function initializes the GPU SVM.
+ *
+ * Return: 0 on success, a negative error code on failure.
+ */
+int drm_gpusvm_init(struct drm_gpusvm *gpusvm,
+ const char *name, struct drm_device *drm,
+ struct mm_struct *mm, void *device_private_page_owner,
+ unsigned long mm_start, unsigned long mm_range,
+ unsigned long notifier_size,
+ const struct drm_gpusvm_ops *ops,
+ const unsigned long *chunk_sizes, int num_chunks)
+{
+ if (!ops->invalidate || !num_chunks)
+ return -EINVAL;
+
+ gpusvm->name = name;
+ gpusvm->drm = drm;
+ gpusvm->mm = mm;
+ gpusvm->device_private_page_owner = device_private_page_owner;
+ gpusvm->mm_start = mm_start;
+ gpusvm->mm_range = mm_range;
+ gpusvm->notifier_size = notifier_size;
+ gpusvm->ops = ops;
+ gpusvm->chunk_sizes = chunk_sizes;
+ gpusvm->num_chunks = num_chunks;
+
+ mmgrab(mm);
+ gpusvm->root = RB_ROOT_CACHED;
+ INIT_LIST_HEAD(&gpusvm->notifier_list);
+
+ init_rwsem(&gpusvm->notifier_lock);
+
+ fs_reclaim_acquire(GFP_KERNEL);
+ might_lock(&gpusvm->notifier_lock);
+ fs_reclaim_release(GFP_KERNEL);
+
+#ifdef CONFIG_LOCKDEP
+ gpusvm->lock_dep_map = NULL;
+#endif
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(drm_gpusvm_init);
+
+/**
+ * drm_gpusvm_notifier_find() - Find GPU SVM notifier
+ * @gpusvm: Pointer to the GPU SVM structure
+ * @fault_addr: Fault address
+ *
+ * This function finds the GPU SVM notifier associated with the fault address.
+ *
+ * Return: Pointer to the GPU SVM notifier on success, NULL otherwise.
+ */
+static struct drm_gpusvm_notifier *
+drm_gpusvm_notifier_find(struct drm_gpusvm *gpusvm,
+ unsigned long fault_addr)
+{
+ return notifier_iter_first(&gpusvm->root, fault_addr, fault_addr + 1);
+}
+
+/**
+ * to_drm_gpusvm_notifier() - retrieve the container struct for a given rbtree node
+ * @node: a pointer to the rbtree node embedded within a drm_gpusvm_notifier struct
+ *
+ * Return: A pointer to the containing drm_gpusvm_notifier structure.
+ */
+static struct drm_gpusvm_notifier *to_drm_gpusvm_notifier(struct rb_node *node)
+{
+ return container_of(node, struct drm_gpusvm_notifier, itree.rb);
+}
+
+/**
+ * drm_gpusvm_notifier_insert() - Insert GPU SVM notifier
+ * @gpusvm: Pointer to the GPU SVM structure
+ * @notifier: Pointer to the GPU SVM notifier structure
+ *
+ * This function inserts the GPU SVM notifier into the GPU SVM RB tree and list.
+ */
+static void drm_gpusvm_notifier_insert(struct drm_gpusvm *gpusvm,
+ struct drm_gpusvm_notifier *notifier)
+{
+ struct rb_node *node;
+ struct list_head *head;
+
+ interval_tree_insert(&notifier->itree, &gpusvm->root);
+
+ node = rb_prev(&notifier->itree.rb);
+ if (node)
+ head = &(to_drm_gpusvm_notifier(node))->entry;
+ else
+ head = &gpusvm->notifier_list;
+
+ list_add(&notifier->entry, head);
+}
+
+/**
+ * drm_gpusvm_notifier_remove() - Remove GPU SVM notifier
+ * @gpusvm: Pointer to the GPU SVM tructure
+ * @notifier: Pointer to the GPU SVM notifier structure
+ *
+ * This function removes the GPU SVM notifier from the GPU SVM RB tree and list.
+ */
+static void drm_gpusvm_notifier_remove(struct drm_gpusvm *gpusvm,
+ struct drm_gpusvm_notifier *notifier)
+{
+ interval_tree_remove(&notifier->itree, &gpusvm->root);
+ list_del(&notifier->entry);
+}
+
+/**
+ * drm_gpusvm_fini() - Finalize the GPU SVM.
+ * @gpusvm: Pointer to the GPU SVM structure.
+ *
+ * This function finalizes the GPU SVM by cleaning up any remaining ranges and
+ * notifiers, and dropping a reference to struct MM.
+ */
+void drm_gpusvm_fini(struct drm_gpusvm *gpusvm)
+{
+ struct drm_gpusvm_notifier *notifier, *next;
+
+ drm_gpusvm_for_each_notifier_safe(notifier, next, gpusvm, 0, LONG_MAX) {
+ struct drm_gpusvm_range *range, *__next;
+
+ /*
+ * Remove notifier first to avoid racing with any invalidation
+ */
+ mmu_interval_notifier_remove(&notifier->notifier);
+ notifier->flags.removed = true;
+
+ drm_gpusvm_for_each_range_safe(range, __next, notifier, 0,
+ LONG_MAX)
+ drm_gpusvm_range_remove(gpusvm, range);
+ }
+
+ mmdrop(gpusvm->mm);
+ WARN_ON(!RB_EMPTY_ROOT(&gpusvm->root.rb_root));
+}
+EXPORT_SYMBOL_GPL(drm_gpusvm_fini);
+
+/**
+ * drm_gpusvm_notifier_alloc() - Allocate GPU SVM notifier
+ * @gpusvm: Pointer to the GPU SVM structure
+ * @fault_addr: Fault address
+ *
+ * This function allocates and initializes the GPU SVM notifier structure.
+ *
+ * Return: Pointer to the allocated GPU SVM notifier on success, ERR_PTR() on failure.
+ */
+static struct drm_gpusvm_notifier *
+drm_gpusvm_notifier_alloc(struct drm_gpusvm *gpusvm, unsigned long fault_addr)
+{
+ struct drm_gpusvm_notifier *notifier;
+
+ if (gpusvm->ops->notifier_alloc)
+ notifier = gpusvm->ops->notifier_alloc();
+ else
+ notifier = kzalloc(sizeof(*notifier), GFP_KERNEL);
+
+ if (!notifier)
+ return ERR_PTR(-ENOMEM);
+
+ notifier->gpusvm = gpusvm;
+ notifier->itree.start = ALIGN_DOWN(fault_addr, gpusvm->notifier_size);
+ notifier->itree.last = ALIGN(fault_addr + 1, gpusvm->notifier_size) - 1;
+ INIT_LIST_HEAD(&notifier->entry);
+ notifier->root = RB_ROOT_CACHED;
+ INIT_LIST_HEAD(&notifier->range_list);
+
+ return notifier;
+}
+
+/**
+ * drm_gpusvm_notifier_free() - Free GPU SVM notifier
+ * @gpusvm: Pointer to the GPU SVM structure
+ * @notifier: Pointer to the GPU SVM notifier structure
+ *
+ * This function frees the GPU SVM notifier structure.
+ */
+static void drm_gpusvm_notifier_free(struct drm_gpusvm *gpusvm,
+ struct drm_gpusvm_notifier *notifier)
+{
+ WARN_ON(!RB_EMPTY_ROOT(&notifier->root.rb_root));
+
+ if (gpusvm->ops->notifier_free)
+ gpusvm->ops->notifier_free(notifier);
+ else
+ kfree(notifier);
+}
+
+/**
+ * to_drm_gpusvm_range() - retrieve the container struct for a given rbtree node
+ * @node: a pointer to the rbtree node embedded within a drm_gpusvm_range struct
+ *
+ * Return: A pointer to the containing drm_gpusvm_range structure.
+ */
+static struct drm_gpusvm_range *to_drm_gpusvm_range(struct rb_node *node)
+{
+ return container_of(node, struct drm_gpusvm_range, itree.rb);
+}
+
+/**
+ * drm_gpusvm_range_insert() - Insert GPU SVM range
+ * @notifier: Pointer to the GPU SVM notifier structure
+ * @range: Pointer to the GPU SVM range structure
+ *
+ * This function inserts the GPU SVM range into the notifier RB tree and list.
+ */
+static void drm_gpusvm_range_insert(struct drm_gpusvm_notifier *notifier,
+ struct drm_gpusvm_range *range)
+{
+ struct rb_node *node;
+ struct list_head *head;
+
+ drm_gpusvm_notifier_lock(notifier->gpusvm);
+ interval_tree_insert(&range->itree, &notifier->root);
+
+ node = rb_prev(&range->itree.rb);
+ if (node)
+ head = &(to_drm_gpusvm_range(node))->entry;
+ else
+ head = &notifier->range_list;
+
+ list_add(&range->entry, head);
+ drm_gpusvm_notifier_unlock(notifier->gpusvm);
+}
+
+/**
+ * __drm_gpusvm_range_remove() - Remove GPU SVM range
+ * @notifier: Pointer to the GPU SVM notifier structure
+ * @range: Pointer to the GPU SVM range structure
+ *
+ * This macro removes the GPU SVM range from the notifier RB tree and list.
+ */
+static void __drm_gpusvm_range_remove(struct drm_gpusvm_notifier *notifier,
+ struct drm_gpusvm_range *range)
+{
+ interval_tree_remove(&range->itree, &notifier->root);
+ list_del(&range->entry);
+}
+
+/**
+ * drm_gpusvm_range_alloc() - Allocate GPU SVM range
+ * @gpusvm: Pointer to the GPU SVM structure
+ * @notifier: Pointer to the GPU SVM notifier structure
+ * @fault_addr: Fault address
+ * @chunk_size: Chunk size
+ * @migrate_devmem: Flag indicating whether to migrate device memory
+ *
+ * This function allocates and initializes the GPU SVM range structure.
+ *
+ * Return: Pointer to the allocated GPU SVM range on success, ERR_PTR() on failure.
+ */
+static struct drm_gpusvm_range *
+drm_gpusvm_range_alloc(struct drm_gpusvm *gpusvm,
+ struct drm_gpusvm_notifier *notifier,
+ unsigned long fault_addr, unsigned long chunk_size,
+ bool migrate_devmem)
+{
+ struct drm_gpusvm_range *range;
+
+ if (gpusvm->ops->range_alloc)
+ range = gpusvm->ops->range_alloc(gpusvm);
+ else
+ range = kzalloc(sizeof(*range), GFP_KERNEL);
+
+ if (!range)
+ return ERR_PTR(-ENOMEM);
+
+ kref_init(&range->refcount);
+ range->gpusvm = gpusvm;
+ range->notifier = notifier;
+ range->itree.start = ALIGN_DOWN(fault_addr, chunk_size);
+ range->itree.last = ALIGN(fault_addr + 1, chunk_size) - 1;
+ INIT_LIST_HEAD(&range->entry);
+ range->notifier_seq = LONG_MAX;
+ range->flags.migrate_devmem = migrate_devmem ? 1 : 0;
+
+ return range;
+}
+
+/**
+ * drm_gpusvm_check_pages() - Check pages
+ * @gpusvm: Pointer to the GPU SVM structure
+ * @notifier: Pointer to the GPU SVM notifier structure
+ * @start: Start address
+ * @end: End address
+ *
+ * Check if pages between start and end have been faulted in on the CPU. Use to
+ * prevent migration of pages without CPU backing store.
+ *
+ * Return: True if pages have been faulted into CPU, False otherwise
+ */
+static bool drm_gpusvm_check_pages(struct drm_gpusvm *gpusvm,
+ struct drm_gpusvm_notifier *notifier,
+ unsigned long start, unsigned long end)
+{
+ struct hmm_range hmm_range = {
+ .default_flags = 0,
+ .notifier = &notifier->notifier,
+ .start = start,
+ .end = end,
+ .dev_private_owner = gpusvm->device_private_page_owner,
+ };
+ unsigned long timeout =
+ jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
+ unsigned long *pfns;
+ unsigned long npages = npages_in_range(start, end);
+ int err, i;
+
+ mmap_assert_locked(gpusvm->mm);
+
+ pfns = kvmalloc_array(npages, sizeof(*pfns), GFP_KERNEL);
+ if (!pfns)
+ return false;
+
+ hmm_range.notifier_seq = mmu_interval_read_begin(&notifier->notifier);
+ hmm_range.hmm_pfns = pfns;
+
+ while (true) {
+ err = hmm_range_fault(&hmm_range);
+ if (err == -EBUSY) {
+ if (time_after(jiffies, timeout))
+ break;
+
+ hmm_range.notifier_seq =
+ mmu_interval_read_begin(&notifier->notifier);
+ continue;
+ }
+ break;
+ }
+ if (err)
+ goto err_free;
+
+ for (i = 0; i < npages;) {
+ if (!(pfns[i] & HMM_PFN_VALID)) {
+ err = -EFAULT;
+ goto err_free;
+ }
+ i += 0x1 << hmm_pfn_to_map_order(pfns[i]);
+ }
+
+err_free:
+ kvfree(pfns);
+ return err ? false : true;
+}
+
+/**
+ * drm_gpusvm_range_chunk_size() - Determine chunk size for GPU SVM range
+ * @gpusvm: Pointer to the GPU SVM structure
+ * @notifier: Pointer to the GPU SVM notifier structure
+ * @vas: Pointer to the virtual memory area structure
+ * @fault_addr: Fault address
+ * @gpuva_start: Start address of GPUVA which mirrors CPU
+ * @gpuva_end: End address of GPUVA which mirrors CPU
+ * @check_pages_threshold: Check CPU pages for present threshold
+ *
+ * This function determines the chunk size for the GPU SVM range based on the
+ * fault address, GPU SVM chunk sizes, existing GPU SVM ranges, and the virtual
+ * memory area boundaries.
+ *
+ * Return: Chunk size on success, LONG_MAX on failure.
+ */
+static unsigned long
+drm_gpusvm_range_chunk_size(struct drm_gpusvm *gpusvm,
+ struct drm_gpusvm_notifier *notifier,
+ struct vm_area_struct *vas,
+ unsigned long fault_addr,
+ unsigned long gpuva_start,
+ unsigned long gpuva_end,
+ unsigned long check_pages_threshold)
+{
+ unsigned long start, end;
+ int i = 0;
+
+retry:
+ for (; i < gpusvm->num_chunks; ++i) {
+ start = ALIGN_DOWN(fault_addr, gpusvm->chunk_sizes[i]);
+ end = ALIGN(fault_addr + 1, gpusvm->chunk_sizes[i]);
+
+ if (start >= vas->vm_start && end <= vas->vm_end &&
+ start >= drm_gpusvm_notifier_start(notifier) &&
+ end <= drm_gpusvm_notifier_end(notifier) &&
+ start >= gpuva_start && end <= gpuva_end)
+ break;
+ }
+
+ if (i == gpusvm->num_chunks)
+ return LONG_MAX;
+
+ /*
+ * If allocation more than page, ensure not to overlap with existing
+ * ranges.
+ */
+ if (end - start != SZ_4K) {
+ struct drm_gpusvm_range *range;
+
+ range = drm_gpusvm_range_find(notifier, start, end);
+ if (range) {
+ ++i;
+ goto retry;
+ }
+
+ /*
+ * XXX: Only create range on pages CPU has faulted in. Without
+ * this check, or prefault, on BMG 'xe_exec_system_allocator --r
+ * process-many-malloc' fails. In the failure case, each process
+ * mallocs 16k but the CPU VMA is ~128k which results in 64k SVM
+ * ranges. When migrating the SVM ranges, some processes fail in
+ * drm_gpusvm_migrate_to_devmem with 'migrate.cpages != npages'
+ * and then upon drm_gpusvm_range_get_pages device pages from
+ * other processes are collected + faulted in which creates all
+ * sorts of problems. Unsure exactly how this happening, also
+ * problem goes away if 'xe_exec_system_allocator --r
+ * process-many-malloc' mallocs at least 64k at a time.
+ */
+ if (end - start <= check_pages_threshold &&
+ !drm_gpusvm_check_pages(gpusvm, notifier, start, end)) {
+ ++i;
+ goto retry;
+ }
+ }
+
+ return end - start;
+}
+
+#ifdef CONFIG_LOCKDEP
+/**
+ * drm_gpusvm_driver_lock_held() - Assert GPU SVM driver lock is held
+ * @gpusvm: Pointer to the GPU SVM structure.
+ *
+ * Ensure driver lock is held.
+ */
+static void drm_gpusvm_driver_lock_held(struct drm_gpusvm *gpusvm)
+{
+ if ((gpusvm)->lock_dep_map)
+ lockdep_assert(lock_is_held_type((gpusvm)->lock_dep_map, 0));
+}
+#else
+static void drm_gpusvm_driver_lock_held(struct drm_gpusvm *gpusvm)
+{
+}
+#endif
+
+/**
+ * drm_gpusvm_range_find_or_insert() - Find or insert GPU SVM range
+ * @gpusvm: Pointer to the GPU SVM structure
+ * @fault_addr: Fault address
+ * @gpuva_start: Start address of GPUVA which mirrors CPU
+ * @gpuva_end: End address of GPUVA which mirrors CPU
+ * @ctx: GPU SVM context
+ *
+ * This function finds or inserts a newly allocated a GPU SVM range based on the
+ * fault address. Caller must hold a lock to protect range lookup and insertion.
+ *
+ * Return: Pointer to the GPU SVM range on success, ERR_PTR() on failure.
+ */
+struct drm_gpusvm_range *
+drm_gpusvm_range_find_or_insert(struct drm_gpusvm *gpusvm,
+ unsigned long fault_addr,
+ unsigned long gpuva_start,
+ unsigned long gpuva_end,
+ const struct drm_gpusvm_ctx *ctx)
+{
+ struct drm_gpusvm_notifier *notifier;
+ struct drm_gpusvm_range *range;
+ struct mm_struct *mm = gpusvm->mm;
+ struct vm_area_struct *vas;
+ bool notifier_alloc = false;
+ unsigned long chunk_size;
+ int err;
+ bool migrate_devmem;
+
+ drm_gpusvm_driver_lock_held(gpusvm);
+
+ if (fault_addr < gpusvm->mm_start ||
+ fault_addr > gpusvm->mm_start + gpusvm->mm_range)
+ return ERR_PTR(-EINVAL);
+
+ if (!mmget_not_zero(mm))
+ return ERR_PTR(-EFAULT);
+
+ notifier = drm_gpusvm_notifier_find(gpusvm, fault_addr);
+ if (!notifier) {
+ notifier = drm_gpusvm_notifier_alloc(gpusvm, fault_addr);
+ if (IS_ERR(notifier)) {
+ err = PTR_ERR(notifier);
+ goto err_mmunlock;
+ }
+ notifier_alloc = true;
+ err = mmu_interval_notifier_insert(&notifier->notifier,
+ mm,
+ drm_gpusvm_notifier_start(notifier),
+ drm_gpusvm_notifier_size(notifier),
+ &drm_gpusvm_notifier_ops);
+ if (err)
+ goto err_notifier;
+ }
+
+ mmap_read_lock(mm);
+
+ vas = vma_lookup(mm, fault_addr);
+ if (!vas) {
+ err = -ENOENT;
+ goto err_notifier_remove;
+ }
+
+ if (!ctx->read_only && !(vas->vm_flags & VM_WRITE)) {
+ err = -EPERM;
+ goto err_notifier_remove;
+ }
+
+ range = drm_gpusvm_range_find(notifier, fault_addr, fault_addr + 1);
+ if (range)
+ goto out_mmunlock;
+ /*
+ * XXX: Short-circuiting migration based on migrate_vma_* current
+ * limitations. If/when migrate_vma_* add more support, this logic will
+ * have to change.
+ */
+ migrate_devmem = ctx->devmem_possible &&
+ vma_is_anonymous(vas) && !is_vm_hugetlb_page(vas);
+
+ chunk_size = drm_gpusvm_range_chunk_size(gpusvm, notifier, vas,
+ fault_addr, gpuva_start,
+ gpuva_end,
+ ctx->check_pages_threshold);
+ if (chunk_size == LONG_MAX) {
+ err = -EINVAL;
+ goto err_notifier_remove;
+ }
+
+ range = drm_gpusvm_range_alloc(gpusvm, notifier, fault_addr, chunk_size,
+ migrate_devmem);
+ if (IS_ERR(range)) {
+ err = PTR_ERR(range);
+ goto err_notifier_remove;
+ }
+
+ drm_gpusvm_range_insert(notifier, range);
+ if (notifier_alloc)
+ drm_gpusvm_notifier_insert(gpusvm, notifier);
+
+out_mmunlock:
+ mmap_read_unlock(mm);
+ mmput(mm);
+
+ return range;
+
+err_notifier_remove:
+ mmap_read_unlock(mm);
+ if (notifier_alloc)
+ mmu_interval_notifier_remove(&notifier->notifier);
+err_notifier:
+ if (notifier_alloc)
+ drm_gpusvm_notifier_free(gpusvm, notifier);
+err_mmunlock:
+ mmput(mm);
+ return ERR_PTR(err);
+}
+EXPORT_SYMBOL_GPL(drm_gpusvm_range_find_or_insert);
+
+/**
+ * __drm_gpusvm_range_unmap_pages() - Unmap pages associated with a GPU SVM range (internal)
+ * @gpusvm: Pointer to the GPU SVM structure
+ * @range: Pointer to the GPU SVM range structure
+ * @npages: Number of pages to unmap
+ *
+ * This function unmap pages associated with a GPU SVM range. Assumes and
+ * asserts correct locking is in place when called.
+ */
+static void __drm_gpusvm_range_unmap_pages(struct drm_gpusvm *gpusvm,
+ struct drm_gpusvm_range *range,
+ unsigned long npages)
+{
+ unsigned long i, j;
+ struct drm_pagemap *dpagemap = range->dpagemap;
+ struct device *dev = gpusvm->drm->dev;
+
+ lockdep_assert_held(&gpusvm->notifier_lock);
+
+ if (range->flags.has_dma_mapping) {
+ for (i = 0, j = 0; i < npages; j++) {
+ struct drm_pagemap_device_addr *addr = &range->dma_addr[j];
+
+ if (addr->proto == DRM_INTERCONNECT_SYSTEM)
+ dma_unmap_page(dev,
+ addr->addr,
+ PAGE_SIZE << addr->order,
+ addr->dir);
+ else if (dpagemap && dpagemap->ops->device_unmap)
+ dpagemap->ops->device_unmap(dpagemap,
+ dev, *addr);
+ i += 1 << addr->order;
+ }
+ range->flags.has_devmem_pages = false;
+ range->flags.has_dma_mapping = false;
+ range->dpagemap = NULL;
+ }
+}
+
+/**
+ * drm_gpusvm_range_free_pages() - Free pages associated with a GPU SVM range
+ * @gpusvm: Pointer to the GPU SVM structure
+ * @range: Pointer to the GPU SVM range structure
+ *
+ * This function frees the dma address array associated with a GPU SVM range.
+ */
+static void drm_gpusvm_range_free_pages(struct drm_gpusvm *gpusvm,
+ struct drm_gpusvm_range *range)
+{
+ lockdep_assert_held(&gpusvm->notifier_lock);
+
+ if (range->dma_addr) {
+ kvfree(range->dma_addr);
+ range->dma_addr = NULL;
+ }
+}
+
+/**
+ * drm_gpusvm_range_remove() - Remove GPU SVM range
+ * @gpusvm: Pointer to the GPU SVM structure
+ * @range: Pointer to the GPU SVM range to be removed
+ *
+ * This function removes the specified GPU SVM range and also removes the parent
+ * GPU SVM notifier if no more ranges remain in the notifier. The caller must
+ * hold a lock to protect range and notifier removal.
+ */
+void drm_gpusvm_range_remove(struct drm_gpusvm *gpusvm,
+ struct drm_gpusvm_range *range)
+{
+ unsigned long npages = npages_in_range(drm_gpusvm_range_start(range),
+ drm_gpusvm_range_end(range));
+ struct drm_gpusvm_notifier *notifier;
+
+ drm_gpusvm_driver_lock_held(gpusvm);
+
+ notifier = drm_gpusvm_notifier_find(gpusvm,
+ drm_gpusvm_range_start(range));
+ if (WARN_ON_ONCE(!notifier))
+ return;
+
+ drm_gpusvm_notifier_lock(gpusvm);
+ __drm_gpusvm_range_unmap_pages(gpusvm, range, npages);
+ drm_gpusvm_range_free_pages(gpusvm, range);
+ __drm_gpusvm_range_remove(notifier, range);
+ drm_gpusvm_notifier_unlock(gpusvm);
+
+ drm_gpusvm_range_put(range);
+
+ if (RB_EMPTY_ROOT(&notifier->root.rb_root)) {
+ if (!notifier->flags.removed)
+ mmu_interval_notifier_remove(&notifier->notifier);
+ drm_gpusvm_notifier_remove(gpusvm, notifier);
+ drm_gpusvm_notifier_free(gpusvm, notifier);
+ }
+}
+EXPORT_SYMBOL_GPL(drm_gpusvm_range_remove);
+
+/**
+ * drm_gpusvm_range_get() - Get a reference to GPU SVM range
+ * @range: Pointer to the GPU SVM range
+ *
+ * This function increments the reference count of the specified GPU SVM range.
+ *
+ * Return: Pointer to the GPU SVM range.
+ */
+struct drm_gpusvm_range *
+drm_gpusvm_range_get(struct drm_gpusvm_range *range)
+{
+ kref_get(&range->refcount);
+
+ return range;
+}
+EXPORT_SYMBOL_GPL(drm_gpusvm_range_get);
+
+/**
+ * drm_gpusvm_range_destroy() - Destroy GPU SVM range
+ * @refcount: Pointer to the reference counter embedded in the GPU SVM range
+ *
+ * This function destroys the specified GPU SVM range when its reference count
+ * reaches zero. If a custom range-free function is provided, it is invoked to
+ * free the range; otherwise, the range is deallocated using kfree().
+ */
+static void drm_gpusvm_range_destroy(struct kref *refcount)
+{
+ struct drm_gpusvm_range *range =
+ container_of(refcount, struct drm_gpusvm_range, refcount);
+ struct drm_gpusvm *gpusvm = range->gpusvm;
+
+ if (gpusvm->ops->range_free)
+ gpusvm->ops->range_free(range);
+ else
+ kfree(range);
+}
+
+/**
+ * drm_gpusvm_range_put() - Put a reference to GPU SVM range
+ * @range: Pointer to the GPU SVM range
+ *
+ * This function decrements the reference count of the specified GPU SVM range
+ * and frees it when the count reaches zero.
+ */
+void drm_gpusvm_range_put(struct drm_gpusvm_range *range)
+{
+ kref_put(&range->refcount, drm_gpusvm_range_destroy);
+}
+EXPORT_SYMBOL_GPL(drm_gpusvm_range_put);
+
+/**
+ * drm_gpusvm_range_pages_valid() - GPU SVM range pages valid
+ * @gpusvm: Pointer to the GPU SVM structure
+ * @range: Pointer to the GPU SVM range structure
+ *
+ * This function determines if a GPU SVM range pages are valid. Expected be
+ * called holding gpusvm->notifier_lock and as the last step before committing a
+ * GPU binding. This is akin to a notifier seqno check in the HMM documentation
+ * but due to wider notifiers (i.e., notifiers which span multiple ranges) this
+ * function is required for finer grained checking (i.e., per range) if pages
+ * are valid.
+ *
+ * Return: True if GPU SVM range has valid pages, False otherwise
+ */
+bool drm_gpusvm_range_pages_valid(struct drm_gpusvm *gpusvm,
+ struct drm_gpusvm_range *range)
+{
+ lockdep_assert_held(&gpusvm->notifier_lock);
+
+ return range->flags.has_devmem_pages || range->flags.has_dma_mapping;
+}
+EXPORT_SYMBOL_GPL(drm_gpusvm_range_pages_valid);
+
+/**
+ * drm_gpusvm_range_pages_valid_unlocked() - GPU SVM range pages valid unlocked
+ * @gpusvm: Pointer to the GPU SVM structure
+ * @range: Pointer to the GPU SVM range structure
+ *
+ * This function determines if a GPU SVM range pages are valid. Expected be
+ * called without holding gpusvm->notifier_lock.
+ *
+ * Return: True if GPU SVM range has valid pages, False otherwise
+ */
+static bool
+drm_gpusvm_range_pages_valid_unlocked(struct drm_gpusvm *gpusvm,
+ struct drm_gpusvm_range *range)
+{
+ bool pages_valid;
+
+ if (!range->dma_addr)
+ return false;
+
+ drm_gpusvm_notifier_lock(gpusvm);
+ pages_valid = drm_gpusvm_range_pages_valid(gpusvm, range);
+ if (!pages_valid)
+ drm_gpusvm_range_free_pages(gpusvm, range);
+ drm_gpusvm_notifier_unlock(gpusvm);
+
+ return pages_valid;
+}
+
+/**
+ * drm_gpusvm_range_get_pages() - Get pages for a GPU SVM range
+ * @gpusvm: Pointer to the GPU SVM structure
+ * @range: Pointer to the GPU SVM range structure
+ * @ctx: GPU SVM context
+ *
+ * This function gets pages for a GPU SVM range and ensures they are mapped for
+ * DMA access.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+int drm_gpusvm_range_get_pages(struct drm_gpusvm *gpusvm,
+ struct drm_gpusvm_range *range,
+ const struct drm_gpusvm_ctx *ctx)
+{
+ struct mmu_interval_notifier *notifier = &range->notifier->notifier;
+ struct hmm_range hmm_range = {
+ .default_flags = HMM_PFN_REQ_FAULT | (ctx->read_only ? 0 :
+ HMM_PFN_REQ_WRITE),
+ .notifier = notifier,
+ .start = drm_gpusvm_range_start(range),
+ .end = drm_gpusvm_range_end(range),
+ .dev_private_owner = gpusvm->device_private_page_owner,
+ };
+ struct mm_struct *mm = gpusvm->mm;
+ struct drm_gpusvm_zdd *zdd;
+ unsigned long timeout =
+ jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
+ unsigned long i, j;
+ unsigned long npages = npages_in_range(drm_gpusvm_range_start(range),
+ drm_gpusvm_range_end(range));
+ unsigned long num_dma_mapped;
+ unsigned int order = 0;
+ unsigned long *pfns;
+ struct page **pages;
+ int err = 0;
+ struct dev_pagemap *pagemap;
+ struct drm_pagemap *dpagemap;
+
+retry:
+ hmm_range.notifier_seq = mmu_interval_read_begin(notifier);
+ if (drm_gpusvm_range_pages_valid_unlocked(gpusvm, range))
+ goto set_seqno;
+
+ pfns = kvmalloc_array(npages, sizeof(*pfns), GFP_KERNEL);
+ if (!pfns)
+ return -ENOMEM;
+
+ if (!mmget_not_zero(mm)) {
+ err = -EFAULT;
+ goto err_free;
+ }
+
+ hmm_range.hmm_pfns = pfns;
+ while (true) {
+ mmap_read_lock(mm);
+ err = hmm_range_fault(&hmm_range);
+ mmap_read_unlock(mm);
+
+ if (err == -EBUSY) {
+ if (time_after(jiffies, timeout))
+ break;
+
+ hmm_range.notifier_seq =
+ mmu_interval_read_begin(notifier);
+ continue;
+ }
+ break;
+ }
+ mmput(mm);
+ if (err)
+ goto err_free;
+
+ pages = (struct page **)pfns;
+map_pages:
+ /*
+ * Perform all dma mappings under the notifier lock to not
+ * access freed pages. A notifier will either block on
+ * the notifier lock or unmap dma.
+ */
+ drm_gpusvm_notifier_lock(gpusvm);
+
+ if (range->flags.unmapped) {
+ drm_gpusvm_notifier_unlock(gpusvm);
+ err = -EFAULT;
+ goto err_free;
+ }
+
+ if (mmu_interval_read_retry(notifier, hmm_range.notifier_seq)) {
+ drm_gpusvm_notifier_unlock(gpusvm);
+ kvfree(pfns);
+ goto retry;
+ }
+
+ if (!range->dma_addr) {
+ /* Unlock and restart mapping to allocate memory. */
+ drm_gpusvm_notifier_unlock(gpusvm);
+ range->dma_addr = kvmalloc_array(npages,
+ sizeof(*range->dma_addr),
+ GFP_KERNEL);
+ if (!range->dma_addr) {
+ err = -ENOMEM;
+ goto err_free;
+ }
+ goto map_pages;
+ }
+
+ zdd = NULL;
+ num_dma_mapped = 0;
+ for (i = 0, j = 0; i < npages; ++j) {
+ struct page *page = hmm_pfn_to_page(pfns[i]);
+
+ order = hmm_pfn_to_map_order(pfns[i]);
+ if (is_device_private_page(page) ||
+ is_device_coherent_page(page)) {
+ if (zdd != page->zone_device_data && i > 0) {
+ err = -EOPNOTSUPP;
+ goto err_unmap;
+ }
+ zdd = page->zone_device_data;
+ if (pagemap != page->pgmap) {
+ if (i > 0) {
+ err = -EOPNOTSUPP;
+ goto err_unmap;
+ }
+
+ pagemap = page->pgmap;
+ dpagemap = zdd->devmem_allocation->dpagemap;
+ if (drm_WARN_ON(gpusvm->drm, !dpagemap)) {
+ /*
+ * Raced. This is not supposed to happen
+ * since hmm_range_fault() should've migrated
+ * this page to system.
+ */
+ err = -EAGAIN;
+ goto err_unmap;
+ }
+ }
+ range->dma_addr[j] =
+ dpagemap->ops->device_map(dpagemap,
+ gpusvm->drm->dev,
+ page, order,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(gpusvm->drm->dev,
+ range->dma_addr[j].addr)) {
+ err = -EFAULT;
+ goto err_unmap;
+ }
+
+ pages[i] = page;
+ } else {
+ dma_addr_t addr;
+
+ if (is_zone_device_page(page) || zdd) {
+ err = -EOPNOTSUPP;
+ goto err_unmap;
+ }
+
+ addr = dma_map_page(gpusvm->drm->dev,
+ page, 0,
+ PAGE_SIZE << order,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(gpusvm->drm->dev, addr)) {
+ err = -EFAULT;
+ goto err_unmap;
+ }
+
+ range->dma_addr[j] = drm_pagemap_device_addr_encode
+ (addr, DRM_INTERCONNECT_SYSTEM, order,
+ DMA_BIDIRECTIONAL);
+ }
+ i += 1 << order;
+ num_dma_mapped = i;
+ }
+
+ range->flags.has_dma_mapping = true;
+ if (zdd) {
+ range->flags.has_devmem_pages = true;
+ range->dpagemap = dpagemap;
+ }
+
+ drm_gpusvm_notifier_unlock(gpusvm);
+ kvfree(pfns);
+set_seqno:
+ range->notifier_seq = hmm_range.notifier_seq;
+
+ return 0;
+
+err_unmap:
+ __drm_gpusvm_range_unmap_pages(gpusvm, range, num_dma_mapped);
+ drm_gpusvm_notifier_unlock(gpusvm);
+err_free:
+ kvfree(pfns);
+ if (err == -EAGAIN)
+ goto retry;
+ return err;
+}
+EXPORT_SYMBOL_GPL(drm_gpusvm_range_get_pages);
+
+/**
+ * drm_gpusvm_range_unmap_pages() - Unmap pages associated with a GPU SVM range
+ * @gpusvm: Pointer to the GPU SVM structure
+ * @range: Pointer to the GPU SVM range structure
+ * @ctx: GPU SVM context
+ *
+ * This function unmaps pages associated with a GPU SVM range. If @in_notifier
+ * is set, it is assumed that gpusvm->notifier_lock is held in write mode; if it
+ * is clear, it acquires gpusvm->notifier_lock in read mode. Must be called on
+ * each GPU SVM range attached to notifier in gpusvm->ops->invalidate for IOMMU
+ * security model.
+ */
+void drm_gpusvm_range_unmap_pages(struct drm_gpusvm *gpusvm,
+ struct drm_gpusvm_range *range,
+ const struct drm_gpusvm_ctx *ctx)
+{
+ unsigned long npages = npages_in_range(drm_gpusvm_range_start(range),
+ drm_gpusvm_range_end(range));
+
+ if (ctx->in_notifier)
+ lockdep_assert_held_write(&gpusvm->notifier_lock);
+ else
+ drm_gpusvm_notifier_lock(gpusvm);
+
+ __drm_gpusvm_range_unmap_pages(gpusvm, range, npages);
+
+ if (!ctx->in_notifier)
+ drm_gpusvm_notifier_unlock(gpusvm);
+}
+EXPORT_SYMBOL_GPL(drm_gpusvm_range_unmap_pages);
+
+/**
+ * drm_gpusvm_migration_unlock_put_page() - Put a migration page
+ * @page: Pointer to the page to put
+ *
+ * This function unlocks and puts a page.
+ */
+static void drm_gpusvm_migration_unlock_put_page(struct page *page)
+{
+ unlock_page(page);
+ put_page(page);
+}
+
+/**
+ * drm_gpusvm_migration_unlock_put_pages() - Put migration pages
+ * @npages: Number of pages
+ * @migrate_pfn: Array of migrate page frame numbers
+ *
+ * This function unlocks and puts an array of pages.
+ */
+static void drm_gpusvm_migration_unlock_put_pages(unsigned long npages,
+ unsigned long *migrate_pfn)
+{
+ unsigned long i;
+
+ for (i = 0; i < npages; ++i) {
+ struct page *page;
+
+ if (!migrate_pfn[i])
+ continue;
+
+ page = migrate_pfn_to_page(migrate_pfn[i]);
+ drm_gpusvm_migration_unlock_put_page(page);
+ migrate_pfn[i] = 0;
+ }
+}
+
+/**
+ * drm_gpusvm_get_devmem_page() - Get a reference to a device memory page
+ * @page: Pointer to the page
+ * @zdd: Pointer to the GPU SVM zone device data
+ *
+ * This function associates the given page with the specified GPU SVM zone
+ * device data and initializes it for zone device usage.
+ */
+static void drm_gpusvm_get_devmem_page(struct page *page,
+ struct drm_gpusvm_zdd *zdd)
+{
+ page->zone_device_data = drm_gpusvm_zdd_get(zdd);
+ zone_device_page_init(page);
+}
+
+/**
+ * drm_gpusvm_migrate_map_pages() - Map migration pages for GPU SVM migration
+ * @dev: The device for which the pages are being mapped
+ * @dma_addr: Array to store DMA addresses corresponding to mapped pages
+ * @migrate_pfn: Array of migrate page frame numbers to map
+ * @npages: Number of pages to map
+ * @dir: Direction of data transfer (e.g., DMA_BIDIRECTIONAL)
+ *
+ * This function maps pages of memory for migration usage in GPU SVM. It
+ * iterates over each page frame number provided in @migrate_pfn, maps the
+ * corresponding page, and stores the DMA address in the provided @dma_addr
+ * array.
+ *
+ * Return: 0 on success, -EFAULT if an error occurs during mapping.
+ */
+static int drm_gpusvm_migrate_map_pages(struct device *dev,
+ dma_addr_t *dma_addr,
+ unsigned long *migrate_pfn,
+ unsigned long npages,
+ enum dma_data_direction dir)
+{
+ unsigned long i;
+
+ for (i = 0; i < npages; ++i) {
+ struct page *page = migrate_pfn_to_page(migrate_pfn[i]);
+
+ if (!page)
+ continue;
+
+ if (WARN_ON_ONCE(is_zone_device_page(page)))
+ return -EFAULT;
+
+ dma_addr[i] = dma_map_page(dev, page, 0, PAGE_SIZE, dir);
+ if (dma_mapping_error(dev, dma_addr[i]))
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+/**
+ * drm_gpusvm_migrate_unmap_pages() - Unmap pages previously mapped for GPU SVM migration
+ * @dev: The device for which the pages were mapped
+ * @dma_addr: Array of DMA addresses corresponding to mapped pages
+ * @npages: Number of pages to unmap
+ * @dir: Direction of data transfer (e.g., DMA_BIDIRECTIONAL)
+ *
+ * This function unmaps previously mapped pages of memory for GPU Shared Virtual
+ * Memory (SVM). It iterates over each DMA address provided in @dma_addr, checks
+ * if it's valid and not already unmapped, and unmaps the corresponding page.
+ */
+static void drm_gpusvm_migrate_unmap_pages(struct device *dev,
+ dma_addr_t *dma_addr,
+ unsigned long npages,
+ enum dma_data_direction dir)
+{
+ unsigned long i;
+
+ for (i = 0; i < npages; ++i) {
+ if (!dma_addr[i] || dma_mapping_error(dev, dma_addr[i]))
+ continue;
+
+ dma_unmap_page(dev, dma_addr[i], PAGE_SIZE, dir);
+ }
+}
+
+/**
+ * drm_gpusvm_migrate_to_devmem() - Migrate GPU SVM range to device memory
+ * @gpusvm: Pointer to the GPU SVM structure
+ * @range: Pointer to the GPU SVM range structure
+ * @devmem_allocation: Pointer to the device memory allocation. The caller
+ * should hold a reference to the device memory allocation,
+ * which should be dropped via ops->devmem_release or upon
+ * the failure of this function.
+ * @ctx: GPU SVM context
+ *
+ * This function migrates the specified GPU SVM range to device memory. It
+ * performs the necessary setup and invokes the driver-specific operations for
+ * migration to device memory. Upon successful return, @devmem_allocation can
+ * safely reference @range until ops->devmem_release is called which only upon
+ * successful return. Expected to be called while holding the mmap lock in read
+ * mode.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+int drm_gpusvm_migrate_to_devmem(struct drm_gpusvm *gpusvm,
+ struct drm_gpusvm_range *range,
+ struct drm_gpusvm_devmem *devmem_allocation,
+ const struct drm_gpusvm_ctx *ctx)
+{
+ const struct drm_gpusvm_devmem_ops *ops = devmem_allocation->ops;
+ unsigned long start = drm_gpusvm_range_start(range),
+ end = drm_gpusvm_range_end(range);
+ struct migrate_vma migrate = {
+ .start = start,
+ .end = end,
+ .pgmap_owner = gpusvm->device_private_page_owner,
+ .flags = MIGRATE_VMA_SELECT_SYSTEM,
+ };
+ struct mm_struct *mm = gpusvm->mm;
+ unsigned long i, npages = npages_in_range(start, end);
+ struct vm_area_struct *vas;
+ struct drm_gpusvm_zdd *zdd = NULL;
+ struct page **pages;
+ dma_addr_t *dma_addr;
+ void *buf;
+ int err;
+
+ mmap_assert_locked(gpusvm->mm);
+
+ if (!range->flags.migrate_devmem)
+ return -EINVAL;
+
+ if (!ops->populate_devmem_pfn || !ops->copy_to_devmem ||
+ !ops->copy_to_ram)
+ return -EOPNOTSUPP;
+
+ vas = vma_lookup(mm, start);
+ if (!vas) {
+ err = -ENOENT;
+ goto err_out;
+ }
+
+ if (end > vas->vm_end || start < vas->vm_start) {
+ err = -EINVAL;
+ goto err_out;
+ }
+
+ if (!vma_is_anonymous(vas)) {
+ err = -EBUSY;
+ goto err_out;
+ }
+
+ buf = kvcalloc(npages, 2 * sizeof(*migrate.src) + sizeof(*dma_addr) +
+ sizeof(*pages), GFP_KERNEL);
+ if (!buf) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+ dma_addr = buf + (2 * sizeof(*migrate.src) * npages);
+ pages = buf + (2 * sizeof(*migrate.src) + sizeof(*dma_addr)) * npages;
+
+ zdd = drm_gpusvm_zdd_alloc(gpusvm->device_private_page_owner);
+ if (!zdd) {
+ err = -ENOMEM;
+ goto err_free;
+ }
+
+ migrate.vma = vas;
+ migrate.src = buf;
+ migrate.dst = migrate.src + npages;
+
+ err = migrate_vma_setup(&migrate);
+ if (err)
+ goto err_free;
+
+ if (!migrate.cpages) {
+ err = -EFAULT;
+ goto err_free;
+ }
+
+ if (migrate.cpages != npages) {
+ err = -EBUSY;
+ goto err_finalize;
+ }
+
+ err = ops->populate_devmem_pfn(devmem_allocation, npages, migrate.dst);
+ if (err)
+ goto err_finalize;
+
+ err = drm_gpusvm_migrate_map_pages(devmem_allocation->dev, dma_addr,
+ migrate.src, npages, DMA_TO_DEVICE);
+ if (err)
+ goto err_finalize;
+
+ for (i = 0; i < npages; ++i) {
+ struct page *page = pfn_to_page(migrate.dst[i]);
+
+ pages[i] = page;
+ migrate.dst[i] = migrate_pfn(migrate.dst[i]);
+ drm_gpusvm_get_devmem_page(page, zdd);
+ }
+
+ err = ops->copy_to_devmem(pages, dma_addr, npages);
+ if (err)
+ goto err_finalize;
+
+ /* Upon success bind devmem allocation to range and zdd */
+ zdd->devmem_allocation = devmem_allocation; /* Owns ref */
+
+err_finalize:
+ if (err)
+ drm_gpusvm_migration_unlock_put_pages(npages, migrate.dst);
+ migrate_vma_pages(&migrate);
+ migrate_vma_finalize(&migrate);
+ drm_gpusvm_migrate_unmap_pages(devmem_allocation->dev, dma_addr, npages,
+ DMA_TO_DEVICE);
+err_free:
+ if (zdd)
+ drm_gpusvm_zdd_put(zdd);
+ kvfree(buf);
+err_out:
+ return err;
+}
+EXPORT_SYMBOL_GPL(drm_gpusvm_migrate_to_devmem);
+
+/**
+ * drm_gpusvm_migrate_populate_ram_pfn() - Populate RAM PFNs for a VM area
+ * @vas: Pointer to the VM area structure, can be NULL
+ * @fault_page: Fault page
+ * @npages: Number of pages to populate
+ * @mpages: Number of pages to migrate
+ * @src_mpfn: Source array of migrate PFNs
+ * @mpfn: Array of migrate PFNs to populate
+ * @addr: Start address for PFN allocation
+ *
+ * This function populates the RAM migrate page frame numbers (PFNs) for the
+ * specified VM area structure. It allocates and locks pages in the VM area for
+ * RAM usage. If vas is non-NULL use alloc_page_vma for allocation, if NULL use
+ * alloc_page for allocation.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+static int drm_gpusvm_migrate_populate_ram_pfn(struct vm_area_struct *vas,
+ struct page *fault_page,
+ unsigned long npages,
+ unsigned long *mpages,
+ unsigned long *src_mpfn,
+ unsigned long *mpfn,
+ unsigned long addr)
+{
+ unsigned long i;
+
+ for (i = 0; i < npages; ++i, addr += PAGE_SIZE) {
+ struct page *page, *src_page;
+
+ if (!(src_mpfn[i] & MIGRATE_PFN_MIGRATE))
+ continue;
+
+ src_page = migrate_pfn_to_page(src_mpfn[i]);
+ if (!src_page)
+ continue;
+
+ if (fault_page) {
+ if (src_page->zone_device_data !=
+ fault_page->zone_device_data)
+ continue;
+ }
+
+ if (vas)
+ page = alloc_page_vma(GFP_HIGHUSER, vas, addr);
+ else
+ page = alloc_page(GFP_HIGHUSER);
+
+ if (!page)
+ goto free_pages;
+
+ mpfn[i] = migrate_pfn(page_to_pfn(page));
+ }
+
+ for (i = 0; i < npages; ++i) {
+ struct page *page = migrate_pfn_to_page(mpfn[i]);
+
+ if (!page)
+ continue;
+
+ WARN_ON_ONCE(!trylock_page(page));
+ ++*mpages;
+ }
+
+ return 0;
+
+free_pages:
+ for (i = 0; i < npages; ++i) {
+ struct page *page = migrate_pfn_to_page(mpfn[i]);
+
+ if (!page)
+ continue;
+
+ put_page(page);
+ mpfn[i] = 0;
+ }
+ return -ENOMEM;
+}
+
+/**
+ * drm_gpusvm_evict_to_ram() - Evict GPU SVM range to RAM
+ * @devmem_allocation: Pointer to the device memory allocation
+ *
+ * Similar to __drm_gpusvm_migrate_to_ram but does not require mmap lock and
+ * migration done via migrate_device_* functions.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+int drm_gpusvm_evict_to_ram(struct drm_gpusvm_devmem *devmem_allocation)
+{
+ const struct drm_gpusvm_devmem_ops *ops = devmem_allocation->ops;
+ unsigned long npages, mpages = 0;
+ struct page **pages;
+ unsigned long *src, *dst;
+ dma_addr_t *dma_addr;
+ void *buf;
+ int i, err = 0;
+ unsigned int retry_count = 2;
+
+ npages = devmem_allocation->size >> PAGE_SHIFT;
+
+retry:
+ if (!mmget_not_zero(devmem_allocation->mm))
+ return -EFAULT;
+
+ buf = kvcalloc(npages, 2 * sizeof(*src) + sizeof(*dma_addr) +
+ sizeof(*pages), GFP_KERNEL);
+ if (!buf) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+ src = buf;
+ dst = buf + (sizeof(*src) * npages);
+ dma_addr = buf + (2 * sizeof(*src) * npages);
+ pages = buf + (2 * sizeof(*src) + sizeof(*dma_addr)) * npages;
+
+ err = ops->populate_devmem_pfn(devmem_allocation, npages, src);
+ if (err)
+ goto err_free;
+
+ err = migrate_device_pfns(src, npages);
+ if (err)
+ goto err_free;
+
+ err = drm_gpusvm_migrate_populate_ram_pfn(NULL, NULL, npages, &mpages,
+ src, dst, 0);
+ if (err || !mpages)
+ goto err_finalize;
+
+ err = drm_gpusvm_migrate_map_pages(devmem_allocation->dev, dma_addr,
+ dst, npages, DMA_FROM_DEVICE);
+ if (err)
+ goto err_finalize;
+
+ for (i = 0; i < npages; ++i)
+ pages[i] = migrate_pfn_to_page(src[i]);
+
+ err = ops->copy_to_ram(pages, dma_addr, npages);
+ if (err)
+ goto err_finalize;
+
+err_finalize:
+ if (err)
+ drm_gpusvm_migration_unlock_put_pages(npages, dst);
+ migrate_device_pages(src, dst, npages);
+ migrate_device_finalize(src, dst, npages);
+ drm_gpusvm_migrate_unmap_pages(devmem_allocation->dev, dma_addr, npages,
+ DMA_FROM_DEVICE);
+err_free:
+ kvfree(buf);
+err_out:
+ mmput_async(devmem_allocation->mm);
+
+ if (completion_done(&devmem_allocation->detached))
+ return 0;
+
+ if (retry_count--) {
+ cond_resched();
+ goto retry;
+ }
+
+ return err ?: -EBUSY;
+}
+EXPORT_SYMBOL_GPL(drm_gpusvm_evict_to_ram);
+
+/**
+ * __drm_gpusvm_migrate_to_ram() - Migrate GPU SVM range to RAM (internal)
+ * @vas: Pointer to the VM area structure
+ * @device_private_page_owner: Device private pages owner
+ * @page: Pointer to the page for fault handling (can be NULL)
+ * @fault_addr: Fault address
+ * @size: Size of migration
+ *
+ * This internal function performs the migration of the specified GPU SVM range
+ * to RAM. It sets up the migration, populates + dma maps RAM PFNs, and
+ * invokes the driver-specific operations for migration to RAM.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+static int __drm_gpusvm_migrate_to_ram(struct vm_area_struct *vas,
+ void *device_private_page_owner,
+ struct page *page,
+ unsigned long fault_addr,
+ unsigned long size)
+{
+ struct migrate_vma migrate = {
+ .vma = vas,
+ .pgmap_owner = device_private_page_owner,
+ .flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE |
+ MIGRATE_VMA_SELECT_DEVICE_COHERENT,
+ .fault_page = page,
+ };
+ struct drm_gpusvm_zdd *zdd;
+ const struct drm_gpusvm_devmem_ops *ops;
+ struct device *dev = NULL;
+ unsigned long npages, mpages = 0;
+ struct page **pages;
+ dma_addr_t *dma_addr;
+ unsigned long start, end;
+ void *buf;
+ int i, err = 0;
+
+ start = ALIGN_DOWN(fault_addr, size);
+ end = ALIGN(fault_addr + 1, size);
+
+ /* Corner where VMA area struct has been partially unmapped */
+ if (start < vas->vm_start)
+ start = vas->vm_start;
+ if (end > vas->vm_end)
+ end = vas->vm_end;
+
+ migrate.start = start;
+ migrate.end = end;
+ npages = npages_in_range(start, end);
+
+ buf = kvcalloc(npages, 2 * sizeof(*migrate.src) + sizeof(*dma_addr) +
+ sizeof(*pages), GFP_KERNEL);
+ if (!buf) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+ dma_addr = buf + (2 * sizeof(*migrate.src) * npages);
+ pages = buf + (2 * sizeof(*migrate.src) + sizeof(*dma_addr)) * npages;
+
+ migrate.vma = vas;
+ migrate.src = buf;
+ migrate.dst = migrate.src + npages;
+
+ err = migrate_vma_setup(&migrate);
+ if (err)
+ goto err_free;
+
+ /* Raced with another CPU fault, nothing to do */
+ if (!migrate.cpages)
+ goto err_free;
+
+ if (!page) {
+ for (i = 0; i < npages; ++i) {
+ if (!(migrate.src[i] & MIGRATE_PFN_MIGRATE))
+ continue;
+
+ page = migrate_pfn_to_page(migrate.src[i]);
+ break;
+ }
+
+ if (!page)
+ goto err_finalize;
+ }
+ zdd = page->zone_device_data;
+ ops = zdd->devmem_allocation->ops;
+ dev = zdd->devmem_allocation->dev;
+
+ err = drm_gpusvm_migrate_populate_ram_pfn(vas, page, npages, &mpages,
+ migrate.src, migrate.dst,
+ start);
+ if (err)
+ goto err_finalize;
+
+ err = drm_gpusvm_migrate_map_pages(dev, dma_addr, migrate.dst, npages,
+ DMA_FROM_DEVICE);
+ if (err)
+ goto err_finalize;
+
+ for (i = 0; i < npages; ++i)
+ pages[i] = migrate_pfn_to_page(migrate.src[i]);
+
+ err = ops->copy_to_ram(pages, dma_addr, npages);
+ if (err)
+ goto err_finalize;
+
+err_finalize:
+ if (err)
+ drm_gpusvm_migration_unlock_put_pages(npages, migrate.dst);
+ migrate_vma_pages(&migrate);
+ migrate_vma_finalize(&migrate);
+ if (dev)
+ drm_gpusvm_migrate_unmap_pages(dev, dma_addr, npages,
+ DMA_FROM_DEVICE);
+err_free:
+ kvfree(buf);
+err_out:
+
+ return err;
+}
+
+/**
+ * drm_gpusvm_range_evict - Evict GPU SVM range
+ * @range: Pointer to the GPU SVM range to be removed
+ *
+ * This function evicts the specified GPU SVM range. This function will not
+ * evict coherent pages.
+ *
+ * Return: 0 on success, a negative error code on failure.
+ */
+int drm_gpusvm_range_evict(struct drm_gpusvm *gpusvm,
+ struct drm_gpusvm_range *range)
+{
+ struct mmu_interval_notifier *notifier = &range->notifier->notifier;
+ struct hmm_range hmm_range = {
+ .default_flags = HMM_PFN_REQ_FAULT,
+ .notifier = notifier,
+ .start = drm_gpusvm_range_start(range),
+ .end = drm_gpusvm_range_end(range),
+ .dev_private_owner = NULL,
+ };
+ unsigned long timeout =
+ jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
+ unsigned long *pfns;
+ unsigned long npages = npages_in_range(drm_gpusvm_range_start(range),
+ drm_gpusvm_range_end(range));
+ int err = 0;
+ struct mm_struct *mm = gpusvm->mm;
+
+ if (!mmget_not_zero(mm))
+ return -EFAULT;
+
+ pfns = kvmalloc_array(npages, sizeof(*pfns), GFP_KERNEL);
+ if (!pfns)
+ return -ENOMEM;
+
+ hmm_range.hmm_pfns = pfns;
+ while (!time_after(jiffies, timeout)) {
+ hmm_range.notifier_seq = mmu_interval_read_begin(notifier);
+ if (time_after(jiffies, timeout)) {
+ err = -ETIME;
+ break;
+ }
+
+ mmap_read_lock(mm);
+ err = hmm_range_fault(&hmm_range);
+ mmap_read_unlock(mm);
+ if (err != -EBUSY)
+ break;
+ }
+
+ kvfree(pfns);
+ mmput(mm);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(drm_gpusvm_range_evict);
+
+/**
+ * drm_gpusvm_page_free() - Put GPU SVM zone device data associated with a page
+ * @page: Pointer to the page
+ *
+ * This function is a callback used to put the GPU SVM zone device data
+ * associated with a page when it is being released.
+ */
+static void drm_gpusvm_page_free(struct page *page)
+{
+ drm_gpusvm_zdd_put(page->zone_device_data);
+}
+
+/**
+ * drm_gpusvm_migrate_to_ram() - Migrate GPU SVM range to RAM (page fault handler)
+ * @vmf: Pointer to the fault information structure
+ *
+ * This function is a page fault handler used to migrate a GPU SVM range to RAM.
+ * It retrieves the GPU SVM range information from the faulting page and invokes
+ * the internal migration function to migrate the range back to RAM.
+ *
+ * Return: VM_FAULT_SIGBUS on failure, 0 on success.
+ */
+static vm_fault_t drm_gpusvm_migrate_to_ram(struct vm_fault *vmf)
+{
+ struct drm_gpusvm_zdd *zdd = vmf->page->zone_device_data;
+ int err;
+
+ err = __drm_gpusvm_migrate_to_ram(vmf->vma,
+ zdd->device_private_page_owner,
+ vmf->page, vmf->address,
+ zdd->devmem_allocation->size);
+
+ return err ? VM_FAULT_SIGBUS : 0;
+}
+
+/*
+ * drm_gpusvm_pagemap_ops - Device page map operations for GPU SVM
+ */
+static const struct dev_pagemap_ops drm_gpusvm_pagemap_ops = {
+ .page_free = drm_gpusvm_page_free,
+ .migrate_to_ram = drm_gpusvm_migrate_to_ram,
+};
+
+/**
+ * drm_gpusvm_pagemap_ops_get() - Retrieve GPU SVM device page map operations
+ *
+ * Return: Pointer to the GPU SVM device page map operations structure.
+ */
+const struct dev_pagemap_ops *drm_gpusvm_pagemap_ops_get(void)
+{
+ return &drm_gpusvm_pagemap_ops;
+}
+EXPORT_SYMBOL_GPL(drm_gpusvm_pagemap_ops_get);
+
+/**
+ * drm_gpusvm_has_mapping() - Check if GPU SVM has mapping for the given address range
+ * @gpusvm: Pointer to the GPU SVM structure.
+ * @start: Start address
+ * @end: End address
+ *
+ * Return: True if GPU SVM has mapping, False otherwise
+ */
+bool drm_gpusvm_has_mapping(struct drm_gpusvm *gpusvm, unsigned long start,
+ unsigned long end)
+{
+ struct drm_gpusvm_notifier *notifier;
+
+ drm_gpusvm_for_each_notifier(notifier, gpusvm, start, end) {
+ struct drm_gpusvm_range *range = NULL;
+
+ drm_gpusvm_for_each_range(range, notifier, start, end)
+ return true;
+ }
+
+ return false;
+}
+EXPORT_SYMBOL_GPL(drm_gpusvm_has_mapping);
+
+/**
+ * drm_gpusvm_range_set_unmapped() - Mark a GPU SVM range as unmapped
+ * @range: Pointer to the GPU SVM range structure.
+ * @mmu_range: Pointer to the MMU notifier range structure.
+ *
+ * This function marks a GPU SVM range as unmapped and sets the partial_unmap flag
+ * if the range partially falls within the provided MMU notifier range.
+ */
+void drm_gpusvm_range_set_unmapped(struct drm_gpusvm_range *range,
+ const struct mmu_notifier_range *mmu_range)
+{
+ lockdep_assert_held_write(&range->gpusvm->notifier_lock);
+
+ range->flags.unmapped = true;
+ if (drm_gpusvm_range_start(range) < mmu_range->start ||
+ drm_gpusvm_range_end(range) > mmu_range->end)
+ range->flags.partial_unmap = true;
+}
+EXPORT_SYMBOL_GPL(drm_gpusvm_range_set_unmapped);
+
+/**
+ * drm_gpusvm_devmem_init() - Initialize a GPU SVM device memory allocation
+ *
+ * @dev: Pointer to the device structure which device memory allocation belongs to
+ * @mm: Pointer to the mm_struct for the address space
+ * @ops: Pointer to the operations structure for GPU SVM device memory
+ * @dpagemap: The struct drm_pagemap we're allocating from.
+ * @size: Size of device memory allocation
+ */
+void drm_gpusvm_devmem_init(struct drm_gpusvm_devmem *devmem_allocation,
+ struct device *dev, struct mm_struct *mm,
+ const struct drm_gpusvm_devmem_ops *ops,
+ struct drm_pagemap *dpagemap, size_t size)
+{
+ init_completion(&devmem_allocation->detached);
+ devmem_allocation->dev = dev;
+ devmem_allocation->mm = mm;
+ devmem_allocation->ops = ops;
+ devmem_allocation->dpagemap = dpagemap;
+ devmem_allocation->size = size;
+}
+EXPORT_SYMBOL_GPL(drm_gpusvm_devmem_init);
+
+MODULE_DESCRIPTION("DRM GPUSVM");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/i915/display/g4x_dp.c b/drivers/gpu/drm/i915/display/g4x_dp.c
index 372c3683c193..55b9e9bfcc4d 100644
--- a/drivers/gpu/drm/i915/display/g4x_dp.c
+++ b/drivers/gpu/drm/i915/display/g4x_dp.c
@@ -1311,7 +1311,7 @@ bool g4x_dp_init(struct intel_display *display,
intel_encoder->devdata = devdata;
- mutex_init(&dig_port->hdcp_mutex);
+ mutex_init(&dig_port->hdcp.mutex);
if (drm_encoder_init(display->drm, &intel_encoder->base,
&intel_dp_enc_funcs, DRM_MODE_ENCODER_TMDS,
diff --git a/drivers/gpu/drm/i915/display/g4x_hdmi.c b/drivers/gpu/drm/i915/display/g4x_hdmi.c
index d9d1304dcc36..3dc2c59a3df0 100644
--- a/drivers/gpu/drm/i915/display/g4x_hdmi.c
+++ b/drivers/gpu/drm/i915/display/g4x_hdmi.c
@@ -715,7 +715,7 @@ bool g4x_hdmi_init(struct intel_display *display,
intel_encoder->devdata = devdata;
- mutex_init(&dig_port->hdcp_mutex);
+ mutex_init(&dig_port->hdcp.mutex);
if (drm_encoder_init(display->drm, &intel_encoder->base,
&intel_hdmi_enc_funcs, DRM_MODE_ENCODER_TMDS,
diff --git a/drivers/gpu/drm/i915/display/i9xx_wm.c b/drivers/gpu/drm/i915/display/i9xx_wm.c
index 497850a6ac81..7c80e37c1c5f 100644
--- a/drivers/gpu/drm/i915/display/i9xx_wm.c
+++ b/drivers/gpu/drm/i915/display/i9xx_wm.c
@@ -3902,12 +3902,6 @@ static void g4x_wm_sanitize(struct drm_i915_private *dev_priv)
mutex_unlock(&dev_priv->display.wm.wm_mutex);
}
-static void g4x_wm_get_hw_state_and_sanitize(struct drm_i915_private *i915)
-{
- g4x_wm_get_hw_state(i915);
- g4x_wm_sanitize(i915);
-}
-
static void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv)
{
struct vlv_wm_values *wm = &dev_priv->display.wm.vlv;
@@ -4055,12 +4049,6 @@ static void vlv_wm_sanitize(struct drm_i915_private *dev_priv)
mutex_unlock(&dev_priv->display.wm.wm_mutex);
}
-static void vlv_wm_get_hw_state_and_sanitize(struct drm_i915_private *i915)
-{
- vlv_wm_get_hw_state(i915);
- vlv_wm_sanitize(i915);
-}
-
/*
* FIXME should probably kill this and improve
* the real watermark readout/sanitation instead
@@ -4122,14 +4110,16 @@ static const struct intel_wm_funcs vlv_wm_funcs = {
.initial_watermarks = vlv_initial_watermarks,
.optimize_watermarks = vlv_optimize_watermarks,
.atomic_update_watermarks = vlv_atomic_update_fifo,
- .get_hw_state = vlv_wm_get_hw_state_and_sanitize,
+ .get_hw_state = vlv_wm_get_hw_state,
+ .sanitize = vlv_wm_sanitize,
};
static const struct intel_wm_funcs g4x_wm_funcs = {
.compute_watermarks = g4x_compute_watermarks,
.initial_watermarks = g4x_initial_watermarks,
.optimize_watermarks = g4x_optimize_watermarks,
- .get_hw_state = g4x_wm_get_hw_state_and_sanitize,
+ .get_hw_state = g4x_wm_get_hw_state,
+ .sanitize = g4x_wm_sanitize,
};
static const struct intel_wm_funcs pnv_wm_funcs = {
diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c
index 5d3d54922d62..402b7b2e1829 100644
--- a/drivers/gpu/drm/i915/display/icl_dsi.c
+++ b/drivers/gpu/drm/i915/display/icl_dsi.c
@@ -1647,7 +1647,7 @@ static int gen11_dsi_compute_config(struct intel_encoder *encoder,
if (ret)
return ret;
- ret = intel_panel_fitting(pipe_config, conn_state);
+ ret = intel_pfit_compute_config(pipe_config, conn_state);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/i915/display/intel_acpi.c b/drivers/gpu/drm/i915/display/intel_acpi.c
index bbf8c5a8fdbd..1addd6288241 100644
--- a/drivers/gpu/drm/i915/display/intel_acpi.c
+++ b/drivers/gpu/drm/i915/display/intel_acpi.c
@@ -9,6 +9,8 @@
#include <linux/acpi.h>
#include <acpi/video.h>
+#include <drm/drm_print.h>
+
#include "i915_utils.h"
#include "intel_acpi.h"
#include "intel_display_core.h"
diff --git a/drivers/gpu/drm/i915/display/intel_atomic.h b/drivers/gpu/drm/i915/display/intel_atomic.h
index e506f6a87344..a5a7e2906ba8 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic.h
+++ b/drivers/gpu/drm/i915/display/intel_atomic.h
@@ -14,7 +14,6 @@ struct drm_connector_state;
struct drm_crtc;
struct drm_crtc_state;
struct drm_device;
-struct drm_i915_private;
struct drm_property;
struct intel_atomic_state;
struct intel_connector;
diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.c b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
index 124cd9ddba0b..7276179df878 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
@@ -36,12 +36,15 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_blend.h>
+#include <drm/drm_damage_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_gem.h>
#include <drm/drm_gem_atomic_helper.h>
-#include "i915_drv.h"
+#include "gem/i915_gem_object.h"
#include "i915_config.h"
+#include "i915_scheduler_types.h"
+#include "i915_vma.h"
#include "i9xx_plane_regs.h"
#include "intel_atomic_plane.h"
#include "intel_cdclk.h"
@@ -131,6 +134,7 @@ intel_plane_duplicate_state(struct drm_plane *plane)
intel_state->ggtt_vma = NULL;
intel_state->dpt_vma = NULL;
intel_state->flags = 0;
+ intel_state->damage = DRM_RECT_INIT(0, 0, 0, 0);
/* add reference to fb */
if (intel_state->hw.fb)
@@ -164,10 +168,10 @@ intel_plane_destroy_state(struct drm_plane *plane,
bool intel_plane_needs_physical(struct intel_plane *plane)
{
- struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane);
return plane->id == PLANE_CURSOR &&
- DISPLAY_INFO(i915)->cursor_needs_physical;
+ DISPLAY_INFO(display)->cursor_needs_physical;
}
bool intel_plane_can_async_flip(struct intel_plane *plane, u64 modifier)
@@ -272,7 +276,7 @@ int intel_plane_calc_min_cdclk(struct intel_atomic_state *state,
struct intel_plane *plane,
bool *need_cdclk_calc)
{
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane);
const struct intel_plane_state *plane_state =
intel_atomic_get_new_plane_state(state, plane);
struct intel_crtc *crtc = to_intel_crtc(plane_state->hw.crtc);
@@ -317,7 +321,7 @@ int intel_plane_calc_min_cdclk(struct intel_atomic_state *state,
cdclk_state->min_cdclk[crtc->pipe])
return 0;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"[PLANE:%d:%s] min cdclk (%d kHz) > [CRTC:%d:%s] min cdclk (%d kHz)\n",
plane->base.base.id, plane->base.name,
new_crtc_state->min_cdclk[plane->id],
@@ -336,6 +340,25 @@ static void intel_plane_clear_hw_state(struct intel_plane_state *plane_state)
memset(&plane_state->hw, 0, sizeof(plane_state->hw));
}
+static void
+intel_plane_copy_uapi_plane_damage(struct intel_plane_state *new_plane_state,
+ const struct intel_plane_state *old_uapi_plane_state,
+ const struct intel_plane_state *new_uapi_plane_state)
+{
+ struct intel_display *display = to_intel_display(new_plane_state);
+ struct drm_rect *damage = &new_plane_state->damage;
+
+ /* damage property tracking enabled from display version 12 onwards */
+ if (DISPLAY_VER(display) < 12)
+ return;
+
+ if (!drm_atomic_helper_damage_merged(&old_uapi_plane_state->uapi,
+ &new_uapi_plane_state->uapi,
+ damage))
+ /* Incase helper fails, mark whole plane region as damage */
+ *damage = drm_plane_state_src(&new_uapi_plane_state->uapi);
+}
+
void intel_plane_copy_uapi_to_hw_state(struct intel_plane_state *plane_state,
const struct intel_plane_state *from_plane_state,
struct intel_crtc *crtc)
@@ -411,7 +434,7 @@ static bool intel_plane_do_async_flip(struct intel_plane *plane,
const struct intel_crtc_state *old_crtc_state,
const struct intel_crtc_state *new_crtc_state)
{
- struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane);
if (!plane->async_flip)
return false;
@@ -432,7 +455,7 @@ static bool intel_plane_do_async_flip(struct intel_plane *plane,
* extend this so other scanout parameters (stride/etc) could
* be changed as well...
*/
- return DISPLAY_VER(i915) < 9 || old_crtc_state->uapi.async_flip;
+ return DISPLAY_VER(display) < 9 || old_crtc_state->uapi.async_flip;
}
static bool i9xx_must_disable_cxsr(const struct intel_crtc_state *new_crtc_state,
@@ -536,16 +559,16 @@ static int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_cr
const struct intel_plane_state *old_plane_state,
struct intel_plane_state *new_plane_state)
{
+ struct intel_display *display = to_intel_display(new_crtc_state);
struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
struct intel_plane *plane = to_intel_plane(new_plane_state->uapi.plane);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
bool mode_changed = intel_crtc_needs_modeset(new_crtc_state);
bool was_crtc_enabled = old_crtc_state->hw.active;
bool is_crtc_enabled = new_crtc_state->hw.active;
bool turn_off, turn_on, visible, was_visible;
int ret;
- if (DISPLAY_VER(dev_priv) >= 9 && plane->id != PLANE_CURSOR) {
+ if (DISPLAY_VER(display) >= 9 && plane->id != PLANE_CURSOR) {
ret = skl_update_scaler_plane(new_crtc_state, new_plane_state);
if (ret)
return ret;
@@ -554,7 +577,7 @@ static int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_cr
was_visible = old_plane_state->uapi.visible;
visible = new_plane_state->uapi.visible;
- if (!was_crtc_enabled && drm_WARN_ON(&dev_priv->drm, was_visible))
+ if (!was_crtc_enabled && drm_WARN_ON(display->drm, was_visible))
was_visible = false;
/*
@@ -578,7 +601,7 @@ static int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_cr
turn_off = was_visible && (!visible || mode_changed);
turn_on = visible && (!was_visible || mode_changed);
- drm_dbg_atomic(&dev_priv->drm,
+ drm_dbg_atomic(display->drm,
"[CRTC:%d:%s] with [PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n",
crtc->base.base.id, crtc->base.name,
plane->base.base.id, plane->base.name,
@@ -588,11 +611,11 @@ static int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_cr
if (visible || was_visible)
new_crtc_state->fb_bits |= plane->frontbuffer_bit;
- if (HAS_GMCH(dev_priv) &&
+ if (HAS_GMCH(display) &&
i9xx_must_disable_cxsr(new_crtc_state, old_plane_state, new_plane_state))
new_crtc_state->disable_cxsr = true;
- if ((IS_IRONLAKE(dev_priv) || IS_SANDYBRIDGE(dev_priv) || IS_IVYBRIDGE(dev_priv)) &&
+ if ((display->platform.ironlake || display->platform.sandybridge || display->platform.ivybridge) &&
ilk_must_disable_cxsr(new_crtc_state, old_plane_state, new_plane_state))
new_crtc_state->disable_cxsr = true;
@@ -685,10 +708,10 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_
struct intel_plane *
intel_crtc_get_plane(struct intel_crtc *crtc, enum plane_id plane_id)
{
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
struct intel_plane *plane;
- for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) {
+ for_each_intel_plane_on_crtc(display->drm, crtc, plane) {
if (plane->id == plane_id)
return plane;
}
@@ -705,6 +728,7 @@ int intel_plane_atomic_check(struct intel_atomic_state *state,
const struct intel_plane_state *old_plane_state =
intel_atomic_get_old_plane_state(state, plane);
const struct intel_plane_state *new_primary_crtc_plane_state;
+ const struct intel_plane_state *old_primary_crtc_plane_state;
struct intel_crtc *crtc = intel_crtc_for_pipe(display, plane->pipe);
const struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(state, crtc);
@@ -719,10 +743,17 @@ int intel_plane_atomic_check(struct intel_atomic_state *state,
new_primary_crtc_plane_state =
intel_atomic_get_new_plane_state(state, primary_crtc_plane);
+ old_primary_crtc_plane_state =
+ intel_atomic_get_old_plane_state(state, primary_crtc_plane);
} else {
new_primary_crtc_plane_state = new_plane_state;
+ old_primary_crtc_plane_state = old_plane_state;
}
+ intel_plane_copy_uapi_plane_damage(new_plane_state,
+ old_primary_crtc_plane_state,
+ new_primary_crtc_plane_state);
+
intel_plane_copy_uapi_to_hw_state(new_plane_state,
new_primary_crtc_plane_state,
crtc);
@@ -788,6 +819,9 @@ void intel_plane_update_noarm(struct intel_dsb *dsb,
trace_intel_plane_update_noarm(plane_state, crtc);
+ if (plane->fbc)
+ intel_fbc_dirty_rect_update_noarm(dsb, plane);
+
if (plane->update_noarm)
plane->update_noarm(dsb, plane, crtc_state, plane_state);
}
@@ -926,9 +960,9 @@ void intel_crtc_planes_update_arm(struct intel_dsb *dsb,
struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
- if (DISPLAY_VER(i915) >= 9)
+ if (DISPLAY_VER(display) >= 9)
skl_crtc_planes_update_arm(dsb, state, crtc);
else
i9xx_crtc_planes_update_arm(dsb, state, crtc);
@@ -939,7 +973,7 @@ int intel_atomic_plane_check_clipping(struct intel_plane_state *plane_state,
int min_scale, int max_scale,
bool can_position)
{
- struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
+ struct intel_display *display = to_intel_display(plane_state);
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
struct drm_framebuffer *fb = plane_state->hw.fb;
struct drm_rect *src = &plane_state->uapi.src;
@@ -959,7 +993,7 @@ int intel_atomic_plane_check_clipping(struct intel_plane_state *plane_state,
hscale = drm_rect_calc_hscale(src, dst, min_scale, max_scale);
vscale = drm_rect_calc_vscale(src, dst, min_scale, max_scale);
if (hscale < 0 || vscale < 0) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[PLANE:%d:%s] invalid scaling "DRM_RECT_FP_FMT " -> " DRM_RECT_FMT "\n",
plane->base.base.id, plane->base.name,
DRM_RECT_FP_ARG(src), DRM_RECT_ARG(dst));
@@ -976,7 +1010,7 @@ int intel_atomic_plane_check_clipping(struct intel_plane_state *plane_state,
if (!can_position && plane_state->uapi.visible &&
!drm_rect_equals(dst, clip)) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[PLANE:%d:%s] plane (" DRM_RECT_FMT ") must cover entire CRTC (" DRM_RECT_FMT ")\n",
plane->base.base.id, plane->base.name,
DRM_RECT_ARG(dst), DRM_RECT_ARG(clip));
@@ -991,7 +1025,7 @@ int intel_atomic_plane_check_clipping(struct intel_plane_state *plane_state,
int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state)
{
- struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
+ struct intel_display *display = to_intel_display(plane_state);
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
const struct drm_framebuffer *fb = plane_state->hw.fb;
struct drm_rect *src = &plane_state->uapi.src;
@@ -1025,18 +1059,18 @@ int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state)
if (fb->format->format == DRM_FORMAT_RGB565 && rotated) {
hsub = 2;
vsub = 2;
- } else if (DISPLAY_VER(i915) >= 20 &&
+ } else if (DISPLAY_VER(display) >= 20 &&
intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier)) {
/*
* This allows NV12 and P0xx formats to have odd size and/or odd
- * source coordinates on DISPLAY_VER(i915) >= 20
+ * source coordinates on DISPLAY_VER(display) >= 20
*/
hsub = 1;
vsub = 1;
/* Wa_16023981245 */
- if ((DISPLAY_VERx100(i915) == 2000 ||
- DISPLAY_VERx100(i915) == 3000) &&
+ if ((DISPLAY_VERx100(display) == 2000 ||
+ DISPLAY_VERx100(display) == 3000) &&
src_x % 2 != 0)
hsub = 2;
} else {
@@ -1048,7 +1082,7 @@ int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state)
hsub = vsub = max(hsub, vsub);
if (src_x % hsub || src_w % hsub) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[PLANE:%d:%s] src x/w (%u, %u) must be a multiple of %u (rotated: %s)\n",
plane->base.base.id, plane->base.name,
src_x, src_w, hsub, str_yes_no(rotated));
@@ -1056,7 +1090,7 @@ int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state)
}
if (src_y % vsub || src_h % vsub) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[PLANE:%d:%s] src y/h (%u, %u) must be a multiple of %u (rotated: %s)\n",
plane->base.base.id, plane->base.name,
src_y, src_h, vsub, str_yes_no(rotated));
@@ -1119,11 +1153,11 @@ intel_prepare_plane_fb(struct drm_plane *_plane,
{
struct i915_sched_attr attr = { .priority = I915_PRIORITY_DISPLAY };
struct intel_plane *plane = to_intel_plane(_plane);
+ struct intel_display *display = to_intel_display(plane);
struct intel_plane_state *new_plane_state =
to_intel_plane_state(_new_plane_state);
struct intel_atomic_state *state =
to_intel_atomic_state(new_plane_state->uapi.state);
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
struct intel_plane_state *old_plane_state =
intel_atomic_get_old_plane_state(state, plane);
struct drm_gem_object *obj = intel_fb_bo(new_plane_state->hw.fb);
@@ -1181,7 +1215,7 @@ intel_prepare_plane_fb(struct drm_plane *_plane,
* that are not quite steady state without resorting to forcing
* maximum clocks following a vblank miss (see do_rps_boost()).
*/
- intel_display_rps_mark_interactive(dev_priv, state, true);
+ intel_display_rps_mark_interactive(display, state, true);
return 0;
@@ -1202,17 +1236,17 @@ static void
intel_cleanup_plane_fb(struct drm_plane *plane,
struct drm_plane_state *_old_plane_state)
{
+ struct intel_display *display = to_intel_display(plane->dev);
struct intel_plane_state *old_plane_state =
to_intel_plane_state(_old_plane_state);
struct intel_atomic_state *state =
to_intel_atomic_state(old_plane_state->uapi.state);
- struct drm_i915_private *dev_priv = to_i915(plane->dev);
struct drm_gem_object *obj = intel_fb_bo(old_plane_state->hw.fb);
if (!obj)
return;
- intel_display_rps_mark_interactive(dev_priv, state, false);
+ intel_display_rps_mark_interactive(display, state, false);
intel_plane_unpin_fb(old_plane_state);
}
@@ -1301,14 +1335,13 @@ static int icl_check_nv12_planes(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct intel_display *display = to_intel_display(state);
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
struct intel_plane_state *plane_state;
struct intel_plane *plane;
int i;
- if (DISPLAY_VER(dev_priv) < 11)
+ if (DISPLAY_VER(display) < 11)
return 0;
/*
@@ -1336,7 +1369,7 @@ static int icl_check_nv12_planes(struct intel_atomic_state *state,
if ((crtc_state->nv12_planes & BIT(plane->id)) == 0)
continue;
- for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, y_plane) {
+ for_each_intel_plane_on_crtc(display->drm, crtc, y_plane) {
if (!icl_is_nv12_y_plane(display, y_plane->id))
continue;
@@ -1351,7 +1384,7 @@ static int icl_check_nv12_planes(struct intel_atomic_state *state,
}
if (!y_plane_state) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"[CRTC:%d:%s] need %d free Y planes for planar YUV\n",
crtc->base.base.id, crtc->base.name,
hweight8(crtc_state->nv12_planes));
@@ -1368,10 +1401,10 @@ static int intel_crtc_add_planes_to_state(struct intel_atomic_state *state,
struct intel_crtc *crtc,
u8 plane_ids_mask)
{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_plane *plane;
- for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
+ for_each_intel_plane_on_crtc(display->drm, crtc, plane) {
struct intel_plane_state *plane_state;
if ((plane_ids_mask & BIT(plane->id)) == 0)
@@ -1398,12 +1431,12 @@ int intel_atomic_add_affected_planes(struct intel_atomic_state *state,
new_crtc_state->enabled_planes);
}
-static bool active_planes_affects_min_cdclk(struct drm_i915_private *dev_priv)
+static bool active_planes_affects_min_cdclk(struct intel_display *display)
{
/* See {hsw,vlv,ivb}_plane_ratio() */
- return IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv) ||
- IS_CHERRYVIEW(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
- IS_IVYBRIDGE(dev_priv);
+ return display->platform.broadwell || display->platform.haswell ||
+ display->platform.cherryview || display->platform.valleyview ||
+ display->platform.ivybridge;
}
static u8 intel_joiner_affected_planes(struct intel_atomic_state *state,
@@ -1482,7 +1515,7 @@ static int intel_add_affected_planes(struct intel_atomic_state *state)
int intel_atomic_check_planes(struct intel_atomic_state *state)
{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_crtc_state *old_crtc_state, *new_crtc_state;
struct intel_plane_state __maybe_unused *plane_state;
struct intel_plane *plane;
@@ -1496,7 +1529,7 @@ int intel_atomic_check_planes(struct intel_atomic_state *state)
for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
ret = intel_plane_atomic_check(state, plane);
if (ret) {
- drm_dbg_atomic(&dev_priv->drm,
+ drm_dbg_atomic(display->drm,
"[PLANE:%d:%s] atomic driver check failed\n",
plane->base.base.id, plane->base.name);
return ret;
@@ -1516,7 +1549,7 @@ int intel_atomic_check_planes(struct intel_atomic_state *state)
* the planes' minimum cdclk calculation. Add such planes
* to the state before we compute the minimum cdclk.
*/
- if (!active_planes_affects_min_cdclk(dev_priv))
+ if (!active_planes_affects_min_cdclk(display))
continue;
old_active_planes = old_crtc_state->active_planes & ~BIT(PLANE_CURSOR);
@@ -1532,3 +1565,8 @@ int intel_atomic_check_planes(struct intel_atomic_state *state)
return 0;
}
+
+u32 intel_plane_ggtt_offset(const struct intel_plane_state *plane_state)
+{
+ return i915_ggtt_offset(plane_state->ggtt_vma);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.h b/drivers/gpu/drm/i915/display/intel_atomic_plane.h
index 65edd88d28a9..6efac923dcbc 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic_plane.h
+++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.h
@@ -88,4 +88,6 @@ int intel_atomic_add_affected_planes(struct intel_atomic_state *state,
struct intel_crtc *crtc);
int intel_atomic_check_planes(struct intel_atomic_state *state);
+u32 intel_plane_ggtt_offset(const struct intel_plane_state *plane_state);
+
#endif /* __INTEL_ATOMIC_PLANE_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_audio.c b/drivers/gpu/drm/i915/display/intel_audio.c
index aaba438ab41e..ea935a5d94c8 100644
--- a/drivers/gpu/drm/i915/display/intel_audio.c
+++ b/drivers/gpu/drm/i915/display/intel_audio.c
@@ -190,7 +190,9 @@ static const struct hdmi_aud_ncts hdmi_aud_ncts_36bpp[] = {
*/
static bool needs_wa_14020863754(struct intel_display *display)
{
- return DISPLAY_VER(display) == 20 || display->platform.battlemage;
+ return DISPLAY_VERx100(display) == 3000 ||
+ DISPLAY_VERx100(display) == 2000 ||
+ DISPLAY_VERx100(display) == 1401;
}
/* get AUD_CONFIG_PIXEL_CLOCK_HDMI_* value for mode */
diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c
index e0e4e9b62d8d..a8d08d7d82b3 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.c
+++ b/drivers/gpu/drm/i915/display/intel_bios.c
@@ -2902,7 +2902,6 @@ init_vbt_panel_defaults(struct intel_panel *panel)
static void
init_vbt_missing_defaults(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
unsigned int ports = DISPLAY_RUNTIME_INFO(display)->port_mask;
enum port port;
@@ -2912,13 +2911,13 @@ init_vbt_missing_defaults(struct intel_display *display)
for_each_port_masked(port, ports) {
struct intel_bios_encoder_data *devdata;
struct child_device_config *child;
- enum phy phy = intel_port_to_phy(i915, port);
+ enum phy phy = intel_port_to_phy(display, port);
/*
* VBT has the TypeC mode (native,TBT/USB) and we don't want
* to detect it.
*/
- if (intel_phy_is_tc(i915, phy))
+ if (intel_phy_is_tc(display, phy))
continue;
/* Create fake child device config */
diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c
index 23edc81741de..048be2872247 100644
--- a/drivers/gpu/drm/i915/display/intel_bw.c
+++ b/drivers/gpu/drm/i915/display/intel_bw.c
@@ -806,24 +806,6 @@ static int intel_bw_crtc_min_cdclk(const struct intel_crtc_state *crtc_state)
return DIV_ROUND_UP_ULL(mul_u32_u32(intel_bw_crtc_data_rate(crtc_state), 10), 512);
}
-void intel_bw_crtc_update(struct intel_bw_state *bw_state,
- const struct intel_crtc_state *crtc_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
-
- bw_state->data_rate[crtc->pipe] =
- intel_bw_crtc_data_rate(crtc_state);
- bw_state->num_active_planes[crtc->pipe] =
- intel_bw_crtc_num_active_planes(crtc_state);
- bw_state->force_check_qgv = true;
-
- drm_dbg_kms(&i915->drm, "pipe %c data rate %u num active planes %u\n",
- pipe_name(crtc->pipe),
- bw_state->data_rate[crtc->pipe],
- bw_state->num_active_planes[crtc->pipe]);
-}
-
static unsigned int intel_bw_num_active_planes(struct drm_i915_private *dev_priv,
const struct intel_bw_state *bw_state)
{
@@ -1422,6 +1404,62 @@ int intel_bw_atomic_check(struct intel_atomic_state *state)
return 0;
}
+static void intel_bw_crtc_update(struct intel_bw_state *bw_state,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+
+ bw_state->data_rate[crtc->pipe] =
+ intel_bw_crtc_data_rate(crtc_state);
+ bw_state->num_active_planes[crtc->pipe] =
+ intel_bw_crtc_num_active_planes(crtc_state);
+ bw_state->force_check_qgv = true;
+
+ drm_dbg_kms(&i915->drm, "pipe %c data rate %u num active planes %u\n",
+ pipe_name(crtc->pipe),
+ bw_state->data_rate[crtc->pipe],
+ bw_state->num_active_planes[crtc->pipe]);
+}
+
+void intel_bw_update_hw_state(struct intel_display *display)
+{
+ struct intel_bw_state *bw_state =
+ to_intel_bw_state(display->bw.obj.state);
+ struct intel_crtc *crtc;
+
+ if (DISPLAY_VER(display) < 9)
+ return;
+
+ bw_state->active_pipes = 0;
+
+ for_each_intel_crtc(display->drm, crtc) {
+ const struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+ enum pipe pipe = crtc->pipe;
+
+ if (crtc_state->hw.active)
+ bw_state->active_pipes |= BIT(pipe);
+
+ if (DISPLAY_VER(display) >= 11)
+ intel_bw_crtc_update(bw_state, crtc_state);
+ }
+}
+
+void intel_bw_crtc_disable_noatomic(struct intel_crtc *crtc)
+{
+ struct intel_display *display = to_intel_display(crtc);
+ struct intel_bw_state *bw_state =
+ to_intel_bw_state(display->bw.obj.state);
+ enum pipe pipe = crtc->pipe;
+
+ if (DISPLAY_VER(display) < 9)
+ return;
+
+ bw_state->data_rate[pipe] = 0;
+ bw_state->num_active_planes[pipe] = 0;
+}
+
static struct intel_global_state *
intel_bw_duplicate_state(struct intel_global_obj *obj)
{
diff --git a/drivers/gpu/drm/i915/display/intel_bw.h b/drivers/gpu/drm/i915/display/intel_bw.h
index 161813cca473..3313e4eac4f0 100644
--- a/drivers/gpu/drm/i915/display/intel_bw.h
+++ b/drivers/gpu/drm/i915/display/intel_bw.h
@@ -14,7 +14,9 @@
struct drm_i915_private;
struct intel_atomic_state;
+struct intel_crtc;
struct intel_crtc_state;
+struct intel_display;
struct intel_dbuf_bw {
unsigned int max_bw[I915_MAX_DBUF_SLICES];
@@ -73,13 +75,13 @@ intel_atomic_get_bw_state(struct intel_atomic_state *state);
void intel_bw_init_hw(struct drm_i915_private *dev_priv);
int intel_bw_init(struct drm_i915_private *dev_priv);
int intel_bw_atomic_check(struct intel_atomic_state *state);
-void intel_bw_crtc_update(struct intel_bw_state *bw_state,
- const struct intel_crtc_state *crtc_state);
int icl_pcode_restrict_qgv_points(struct drm_i915_private *dev_priv,
u32 points_mask);
int intel_bw_calc_min_cdclk(struct intel_atomic_state *state,
bool *need_cdclk_calc);
int intel_bw_min_cdclk(struct drm_i915_private *i915,
const struct intel_bw_state *bw_state);
+void intel_bw_update_hw_state(struct intel_display *display);
+void intel_bw_crtc_disable_noatomic(struct intel_crtc *crtc);
#endif /* __INTEL_BW_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c
index c6cfc57a0346..2a8749a0213e 100644
--- a/drivers/gpu/drm/i915/display/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.c
@@ -2788,7 +2788,7 @@ static int intel_planes_min_cdclk(const struct intel_crtc_state *crtc_state)
return min_cdclk;
}
-int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state)
+static int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state)
{
int min_cdclk;
@@ -3340,6 +3340,34 @@ int intel_modeset_calc_cdclk(struct intel_atomic_state *state)
return 0;
}
+void intel_cdclk_update_hw_state(struct intel_display *display)
+{
+ struct intel_cdclk_state *cdclk_state =
+ to_intel_cdclk_state(display->cdclk.obj.state);
+ struct intel_crtc *crtc;
+
+ cdclk_state->active_pipes = 0;
+
+ for_each_intel_crtc(display->drm, crtc) {
+ const struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+ enum pipe pipe = crtc->pipe;
+
+ if (crtc_state->hw.active)
+ cdclk_state->active_pipes |= BIT(pipe);
+
+ cdclk_state->min_cdclk[pipe] = intel_crtc_compute_min_cdclk(crtc_state);
+ cdclk_state->min_voltage_level[pipe] = crtc_state->min_voltage_level;
+ }
+}
+
+void intel_cdclk_crtc_disable_noatomic(struct intel_crtc *crtc)
+{
+ struct intel_display *display = to_intel_display(crtc);
+
+ intel_cdclk_update_hw_state(display);
+}
+
static int intel_compute_max_dotclk(struct intel_display *display)
{
int ppc = intel_cdclk_ppc(display, HAS_DOUBLE_WIDE(display));
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.h b/drivers/gpu/drm/i915/display/intel_cdclk.h
index 6b0e7a41eba3..a1cefd455d92 100644
--- a/drivers/gpu/drm/i915/display/intel_cdclk.h
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.h
@@ -12,6 +12,7 @@
#include "intel_global_state.h"
struct intel_atomic_state;
+struct intel_crtc;
struct intel_crtc_state;
struct intel_display;
@@ -58,7 +59,6 @@ struct intel_cdclk_state {
bool disable_pipes;
};
-int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state);
void intel_cdclk_init_hw(struct intel_display *display);
void intel_cdclk_uninit_hw(struct intel_display *display);
void intel_init_cdclk_hooks(struct intel_display *display);
@@ -83,6 +83,8 @@ int intel_cdclk_atomic_check(struct intel_atomic_state *state,
int intel_cdclk_state_set_joined_mbus(struct intel_atomic_state *state, bool joined_mbus);
struct intel_cdclk_state *
intel_atomic_get_cdclk_state(struct intel_atomic_state *state);
+void intel_cdclk_update_hw_state(struct intel_display *display);
+void intel_cdclk_crtc_disable_noatomic(struct intel_crtc *crtc);
#define to_intel_cdclk_state(global_state) \
container_of_const((global_state), struct intel_cdclk_state, base)
diff --git a/drivers/gpu/drm/i915/display/intel_combo_phy_regs.h b/drivers/gpu/drm/i915/display/intel_combo_phy_regs.h
index 0964e392d02c..ee41acdccf4e 100644
--- a/drivers/gpu/drm/i915/display/intel_combo_phy_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_combo_phy_regs.h
@@ -133,6 +133,8 @@
#define TX_TRAINING_EN REG_BIT(31)
#define TAP2_DISABLE REG_BIT(30)
#define TAP3_DISABLE REG_BIT(29)
+#define CURSOR_PROGRAM REG_BIT(26)
+#define COEFF_POLARITY REG_BIT(25)
#define SCALING_MODE_SEL_MASK REG_GENMASK(20, 18)
#define SCALING_MODE_SEL(x) REG_FIELD_PREP(SCALING_MODE_SEL_MASK, (x))
#define RTERM_SELECT_MASK REG_GENMASK(5, 3)
diff --git a/drivers/gpu/drm/i915/display/intel_connector.c b/drivers/gpu/drm/i915/display/intel_connector.c
index 358965fc7f55..e42357bd9e80 100644
--- a/drivers/gpu/drm/i915/display/intel_connector.c
+++ b/drivers/gpu/drm/i915/display/intel_connector.c
@@ -145,8 +145,8 @@ void intel_connector_destroy(struct drm_connector *connector)
drm_connector_cleanup(connector);
- if (intel_connector->port)
- drm_dp_mst_put_port_malloc(intel_connector->port);
+ if (intel_connector->mst.port)
+ drm_dp_mst_put_port_malloc(intel_connector->mst.port);
kfree(connector);
}
diff --git a/drivers/gpu/drm/i915/display/intel_crt.c b/drivers/gpu/drm/i915/display/intel_crt.c
index 321580b095e7..76ffb3f8467c 100644
--- a/drivers/gpu/drm/i915/display/intel_crt.c
+++ b/drivers/gpu/drm/i915/display/intel_crt.c
@@ -54,6 +54,7 @@
#include "intel_load_detect.h"
#include "intel_pch_display.h"
#include "intel_pch_refclk.h"
+#include "intel_pfit.h"
/* Here's the desired hotplug mode */
#define ADPA_HOTPLUG_BITS (ADPA_CRT_HOTPLUG_ENABLE | \
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
index 7937f4de66cb..f38c998935b9 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi.c
@@ -70,6 +70,7 @@
#include "intel_lspcon.h"
#include "intel_mg_phy_regs.h"
#include "intel_modeset_lock.h"
+#include "intel_pfit.h"
#include "intel_pps.h"
#include "intel_psr.h"
#include "intel_quirks.h"
@@ -187,11 +188,8 @@ static i915_reg_t intel_ddi_buf_status_reg(struct intel_display *display, enum p
return DDI_BUF_CTL(port);
}
-void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv,
- enum port port)
+void intel_wait_ddi_buf_idle(struct intel_display *display, enum port port)
{
- struct intel_display *display = &dev_priv->display;
-
/*
* Bspec's platform specific timeouts:
* MTL+ : 100 us
@@ -890,7 +888,7 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
encoder->base.base.id, encoder->base.name);
if (!mst_pipe_mask && dp128b132b_pipe_mask) {
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
/*
* If we don't have 8b/10b MST, but have more than one
@@ -902,7 +900,8 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
* we don't expect MST to have been enabled at that point, and
* can assume it's SST.
*/
- if (hweight8(dp128b132b_pipe_mask) > 1 || intel_dp->is_mst)
+ if (hweight8(dp128b132b_pipe_mask) > 1 ||
+ intel_dp_mst_encoder_active_links(dig_port))
mst_pipe_mask = dp128b132b_pipe_mask;
}
@@ -1194,7 +1193,8 @@ static void icl_ddi_combo_vswing_program(struct intel_encoder *encoder,
/* Set PORT_TX_DW5 */
val = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN(0, phy));
val &= ~(SCALING_MODE_SEL_MASK | RTERM_SELECT_MASK |
- TAP2_DISABLE | TAP3_DISABLE);
+ COEFF_POLARITY | CURSOR_PROGRAM |
+ TAP2_DISABLE | TAP3_DISABLE);
val |= SCALING_MODE_SEL(0x2);
val |= RTERM_SELECT(0x6);
val |= TAP3_DISABLE;
@@ -3095,7 +3095,7 @@ static void intel_ddi_buf_disable(struct intel_encoder *encoder,
intel_de_rmw(dev_priv, DDI_BUF_CTL(port), DDI_BUF_CTL_ENABLE, 0);
if (DISPLAY_VER(display) >= 14)
- intel_wait_ddi_buf_idle(dev_priv, port);
+ intel_wait_ddi_buf_idle(display, port);
mtl_ddi_disable_d2d(encoder);
@@ -3107,7 +3107,7 @@ static void intel_ddi_buf_disable(struct intel_encoder *encoder,
intel_ddi_disable_fec(encoder, crtc_state);
if (DISPLAY_VER(display) < 14)
- intel_wait_ddi_buf_idle(dev_priv, port);
+ intel_wait_ddi_buf_idle(display, port);
intel_ddi_wait_for_fec_status(encoder, crtc_state, false);
}
@@ -4131,13 +4131,13 @@ static void intel_ddi_read_func_ctl(struct intel_encoder *encoder,
} else if (ddi_mode == TRANS_DDI_MODE_SELECT_DP_MST) {
intel_ddi_read_func_ctl_dp_mst(encoder, pipe_config, ddi_func_ctl);
} else if (ddi_mode == TRANS_DDI_MODE_SELECT_FDI_OR_128B132B && HAS_DP20(display)) {
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
/*
* If this is true, we know we're being called from mst stream
* encoder's ->get_config().
*/
- if (intel_dp->is_mst)
+ if (intel_dp_mst_encoder_active_links(dig_port))
intel_ddi_read_func_ctl_dp_mst(encoder, pipe_config, ddi_func_ctl);
else
intel_ddi_read_func_ctl_dp_sst(encoder, pipe_config, ddi_func_ctl);
@@ -4583,7 +4583,7 @@ static void intel_ddi_encoder_destroy(struct drm_encoder *encoder)
intel_display_power_flush_work(display);
drm_encoder_cleanup(encoder);
- kfree(dig_port->hdcp_port_data.streams);
+ kfree(dig_port->hdcp.port_data.streams);
kfree(dig_port);
}
@@ -4661,6 +4661,7 @@ static int intel_ddi_init_dp_connector(struct intel_digital_port *dig_port)
static int intel_hdmi_reset_link(struct intel_encoder *encoder,
struct drm_modeset_acquire_ctx *ctx)
{
+ struct intel_display *display = to_intel_display(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_hdmi *hdmi = enc_to_intel_hdmi(encoder);
struct intel_connector *connector = hdmi->attached_connector;
@@ -4727,7 +4728,7 @@ static int intel_hdmi_reset_link(struct intel_encoder *encoder,
* would be perfectly happy if were to just reconfigure
* the SCDC settings on the fly.
*/
- return intel_modeset_commit_pipes(dev_priv, BIT(crtc->pipe), ctx);
+ return intel_modeset_commit_pipes(display, BIT(crtc->pipe), ctx);
}
static void intel_ddi_link_check(struct intel_encoder *encoder)
@@ -5101,7 +5102,7 @@ void intel_ddi_init(struct intel_display *display,
return;
}
- phy = intel_port_to_phy(dev_priv, port);
+ phy = intel_port_to_phy(display, port);
/*
* On platforms with HTI (aka HDPORT), if it's enabled at boot it may
@@ -5138,7 +5139,7 @@ void intel_ddi_init(struct intel_display *display,
return;
}
- if (intel_phy_is_snps(dev_priv, phy) &&
+ if (intel_phy_is_snps(display, phy) &&
dev_priv->display.snps.phy_failed_calibration & BIT(phy)) {
drm_dbg_kms(&dev_priv->drm,
"SNPS PHY %c failed to calibrate, proceeding anyway\n",
@@ -5161,7 +5162,7 @@ void intel_ddi_init(struct intel_display *display,
port_name(port - PORT_D_XELPD + PORT_D),
phy_name(phy));
} else if (DISPLAY_VER(dev_priv) >= 12) {
- enum tc_port tc_port = intel_port_to_tc(dev_priv, port);
+ enum tc_port tc_port = intel_port_to_tc(display, port);
drm_encoder_init(&dev_priv->drm, &encoder->base, &intel_ddi_funcs,
DRM_MODE_ENCODER_TMDS,
@@ -5171,7 +5172,7 @@ void intel_ddi_init(struct intel_display *display,
tc_port != TC_PORT_NONE ? "TC" : "",
tc_port != TC_PORT_NONE ? tc_port_name(tc_port) : phy_name(phy));
} else if (DISPLAY_VER(dev_priv) >= 11) {
- enum tc_port tc_port = intel_port_to_tc(dev_priv, port);
+ enum tc_port tc_port = intel_port_to_tc(display, port);
drm_encoder_init(&dev_priv->drm, &encoder->base, &intel_ddi_funcs,
DRM_MODE_ENCODER_TMDS,
@@ -5188,8 +5189,8 @@ void intel_ddi_init(struct intel_display *display,
intel_encoder_link_check_init(encoder, intel_ddi_link_check);
- mutex_init(&dig_port->hdcp_mutex);
- dig_port->num_hdcp_streams = 0;
+ mutex_init(&dig_port->hdcp.mutex);
+ dig_port->hdcp.num_streams = 0;
encoder->hotplug = intel_ddi_hotplug;
encoder->compute_output_type = intel_ddi_compute_output_type;
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.h b/drivers/gpu/drm/i915/display/intel_ddi.h
index 2faadd1441e2..353eb04079e9 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.h
+++ b/drivers/gpu/drm/i915/display/intel_ddi.h
@@ -9,7 +9,6 @@
#include "i915_reg_defs.h"
struct drm_connector_state;
-struct drm_i915_private;
struct intel_atomic_state;
struct intel_bios_encoder_data;
struct intel_connector;
@@ -54,8 +53,7 @@ void hsw_ddi_get_config(struct intel_encoder *encoder,
struct intel_shared_dpll *icl_ddi_combo_get_pll(struct intel_encoder *encoder);
void hsw_prepare_dp_ddi_buffers(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
-void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv,
- enum port port);
+void intel_wait_ddi_buf_idle(struct intel_display *display, enum port port);
void intel_ddi_init(struct intel_display *display,
const struct intel_bios_encoder_data *devdata);
bool intel_ddi_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe);
diff --git a/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.h b/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.h
index 2133984a572b..29a190390192 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.h
+++ b/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.h
@@ -8,7 +8,6 @@
#include <linux/types.h>
-struct drm_i915_private;
struct intel_encoder;
struct intel_crtc_state;
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index 065fdf6dbb88..3afb85fe8536 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -104,6 +104,7 @@
#include "intel_pch_display.h"
#include "intel_pch_refclk.h"
#include "intel_pcode.h"
+#include "intel_pfit.h"
#include "intel_pipe_crc.h"
#include "intel_plane_initial.h"
#include "intel_pmdemand.h"
@@ -181,16 +182,17 @@ int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
return hpll;
}
-void intel_update_czclk(struct drm_i915_private *dev_priv)
+void intel_update_czclk(struct intel_display *display)
{
- if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
+
+ if (!display->platform.valleyview && !display->platform.cherryview)
return;
dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk",
CCK_CZ_CLOCK_CONTROL);
- drm_dbg(&dev_priv->drm, "CZ clock rate: %d kHz\n",
- dev_priv->czclk_freq);
+ drm_dbg_kms(display->drm, "CZ clock rate: %d kHz\n", dev_priv->czclk_freq);
}
static bool is_hdr_mode(const struct intel_crtc_state *crtc_state)
@@ -201,29 +203,29 @@ static bool is_hdr_mode(const struct intel_crtc_state *crtc_state)
/* WA Display #0827: Gen9:all */
static void
-skl_wa_827(struct drm_i915_private *dev_priv, enum pipe pipe, bool enable)
+skl_wa_827(struct intel_display *display, enum pipe pipe, bool enable)
{
- intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe),
+ intel_de_rmw(display, CLKGATE_DIS_PSL(pipe),
DUPS1_GATING_DIS | DUPS2_GATING_DIS,
enable ? DUPS1_GATING_DIS | DUPS2_GATING_DIS : 0);
}
/* Wa_2006604312:icl,ehl */
static void
-icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe,
+icl_wa_scalerclkgating(struct intel_display *display, enum pipe pipe,
bool enable)
{
- intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe),
+ intel_de_rmw(display, CLKGATE_DIS_PSL(pipe),
DPFR_GATING_DIS,
enable ? DPFR_GATING_DIS : 0);
}
/* Wa_1604331009:icl,jsl,ehl */
static void
-icl_wa_cursorclkgating(struct drm_i915_private *dev_priv, enum pipe pipe,
+icl_wa_cursorclkgating(struct intel_display *display, enum pipe pipe,
bool enable)
{
- intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe),
+ intel_de_rmw(display, CLKGATE_DIS_PSL(pipe),
CURSOR_GATING_DIS,
enable ? CURSOR_GATING_DIS : 0);
}
@@ -403,16 +405,16 @@ struct intel_crtc *intel_primary_crtc(const struct intel_crtc_state *crtc_state)
static void
intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state)
{
+ struct intel_display *display = to_intel_display(old_crtc_state);
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- if (DISPLAY_VER(dev_priv) >= 4) {
+ if (DISPLAY_VER(display) >= 4) {
enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
/* Wait for the Pipe State to go off */
- if (intel_de_wait_for_clear(dev_priv, TRANSCONF(dev_priv, cpu_transcoder),
+ if (intel_de_wait_for_clear(display, TRANSCONF(display, cpu_transcoder),
TRANSCONF_STATE_ENABLE, 100))
- drm_WARN(&dev_priv->drm, 1, "pipe_off wait timed out\n");
+ drm_WARN(display->drm, 1, "pipe_off wait timed out\n");
} else {
intel_wait_for_pipe_scanline_stopped(crtc);
}
@@ -466,10 +468,10 @@ static void assert_plane(struct intel_plane *plane, bool state)
static void assert_planes_disabled(struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
struct intel_plane *plane;
- for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane)
+ for_each_intel_plane_on_crtc(display->drm, crtc, plane)
assert_plane_disabled(plane);
}
@@ -477,7 +479,6 @@ void intel_enable_transcoder(const struct intel_crtc_state *new_crtc_state)
{
struct intel_display *display = to_intel_display(new_crtc_state);
struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
enum pipe pipe = crtc->pipe;
u32 val;
@@ -491,7 +492,7 @@ void intel_enable_transcoder(const struct intel_crtc_state *new_crtc_state)
* a plane. On ILK+ the pipe PLLs are integrated, so we don't
* need the check.
*/
- if (HAS_GMCH(dev_priv)) {
+ if (HAS_GMCH(display)) {
if (intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI))
assert_dsi_pll_enabled(display);
else
@@ -508,11 +509,11 @@ void intel_enable_transcoder(const struct intel_crtc_state *new_crtc_state)
}
/* Wa_22012358565:adl-p */
- if (DISPLAY_VER(dev_priv) == 13)
+ if (DISPLAY_VER(display) == 13)
intel_de_rmw(display, PIPE_ARB_CTL(display, pipe),
0, PIPE_ARB_USE_PROG_SLOTS);
- if (DISPLAY_VER(dev_priv) >= 14) {
+ if (DISPLAY_VER(display) >= 14) {
u32 clear = DP_DSC_INSERT_SF_AT_EOL_WA;
u32 set = 0;
@@ -526,7 +527,7 @@ void intel_enable_transcoder(const struct intel_crtc_state *new_crtc_state)
val = intel_de_read(display, TRANSCONF(display, cpu_transcoder));
if (val & TRANSCONF_ENABLE) {
/* we keep both pipes enabled on 830 */
- drm_WARN_ON(&dev_priv->drm, !IS_I830(dev_priv));
+ drm_WARN_ON(display->drm, !display->platform.i830);
return;
}
@@ -557,12 +558,11 @@ void intel_disable_transcoder(const struct intel_crtc_state *old_crtc_state)
{
struct intel_display *display = to_intel_display(old_crtc_state);
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
enum pipe pipe = crtc->pipe;
u32 val;
- drm_dbg_kms(&dev_priv->drm, "disabling pipe %c\n", pipe_name(pipe));
+ drm_dbg_kms(display->drm, "disabling pipe %c\n", pipe_name(pipe));
/*
* Make sure planes won't keep trying to pump pixels to us,
@@ -570,7 +570,7 @@ void intel_disable_transcoder(const struct intel_crtc_state *old_crtc_state)
*/
assert_planes_disabled(crtc);
- val = intel_de_read(dev_priv, TRANSCONF(dev_priv, cpu_transcoder));
+ val = intel_de_read(display, TRANSCONF(display, cpu_transcoder));
if ((val & TRANSCONF_ENABLE) == 0)
return;
@@ -582,17 +582,17 @@ void intel_disable_transcoder(const struct intel_crtc_state *old_crtc_state)
val &= ~TRANSCONF_DOUBLE_WIDE;
/* Don't disable pipe or pipe PLLs if needed */
- if (!IS_I830(dev_priv))
+ if (!display->platform.i830)
val &= ~TRANSCONF_ENABLE;
/* Wa_1409098942:adlp+ */
- if (DISPLAY_VER(dev_priv) >= 13 &&
+ if (DISPLAY_VER(display) >= 13 &&
old_crtc_state->dsc.compression_enable)
val &= ~TRANSCONF_PIXEL_COUNT_SCALING_MASK;
- intel_de_write(dev_priv, TRANSCONF(dev_priv, cpu_transcoder), val);
+ intel_de_write(display, TRANSCONF(display, cpu_transcoder), val);
- if (DISPLAY_VER(dev_priv) >= 12)
+ if (DISPLAY_VER(display) >= 12)
intel_de_rmw(display, CHICKEN_TRANS(display, cpu_transcoder),
FECSTALL_DIS_DPTSTREAM_DPTTG, 0);
@@ -641,7 +641,7 @@ void intel_set_plane_visible(struct intel_crtc_state *crtc_state,
void intel_plane_fixup_bitmasks(struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
struct drm_plane *plane;
/*
@@ -652,7 +652,7 @@ void intel_plane_fixup_bitmasks(struct intel_crtc_state *crtc_state)
crtc_state->enabled_planes = 0;
crtc_state->active_planes = 0;
- drm_for_each_plane_mask(plane, &dev_priv->drm,
+ drm_for_each_plane_mask(plane, display->drm,
crtc_state->uapi.plane_mask) {
crtc_state->enabled_planes |= BIT(to_intel_plane(plane)->id);
crtc_state->active_planes |= BIT(to_intel_plane(plane)->id);
@@ -669,18 +669,16 @@ void intel_plane_disable_noatomic(struct intel_crtc *crtc,
struct intel_plane_state *plane_state =
to_intel_plane_state(plane->base.state);
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Disabling [PLANE:%d:%s] on [CRTC:%d:%s]\n",
plane->base.base.id, plane->base.name,
crtc->base.base.id, crtc->base.name);
+ intel_plane_set_invisible(crtc_state, plane_state);
intel_set_plane_visible(crtc_state, plane_state, false);
intel_plane_fixup_bitmasks(crtc_state);
- crtc_state->data_rate[plane->id] = 0;
- crtc_state->data_rate_y[plane->id] = 0;
- crtc_state->rel_data_rate[plane->id] = 0;
- crtc_state->rel_data_rate_y[plane->id] = 0;
- crtc_state->min_cdclk[plane->id] = 0;
+
+ skl_wm_plane_disable_noatomic(crtc, plane);
if ((crtc_state->active_planes & ~BIT(PLANE_CURSOR)) == 0 &&
hsw_ips_disable(crtc_state)) {
@@ -697,7 +695,7 @@ void intel_plane_disable_noatomic(struct intel_crtc *crtc,
* event which is after the vblank start event, so we need to have a
* wait-for-vblank between disabling the plane and the pipe.
*/
- if (HAS_GMCH(dev_priv) &&
+ if (HAS_GMCH(display) &&
intel_set_memory_cxsr(dev_priv, false))
intel_plane_initial_vblank_wait(crtc);
@@ -705,7 +703,7 @@ void intel_plane_disable_noatomic(struct intel_crtc *crtc,
* Gen2 reports pipe underruns whenever all planes are disabled.
* So disable underrun reporting before all the planes get disabled.
*/
- if (DISPLAY_VER(dev_priv) == 2 && !crtc_state->active_planes)
+ if (DISPLAY_VER(display) == 2 && !crtc_state->active_planes)
intel_set_cpu_fifo_underrun_reporting(display, crtc->pipe, false);
intel_plane_disable_arm(NULL, plane, crtc_state);
@@ -725,12 +723,12 @@ intel_plane_fence_y_offset(const struct intel_plane_state *plane_state)
static void icl_set_pipe_chicken(const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
u32 tmp;
- tmp = intel_de_read(dev_priv, PIPE_CHICKEN(pipe));
+ tmp = intel_de_read(display, PIPE_CHICKEN(pipe));
/*
* Display WA #1153: icl
@@ -750,24 +748,24 @@ static void icl_set_pipe_chicken(const struct intel_crtc_state *crtc_state)
* Underrun recovery must always be disabled on display 13+.
* DG2 chicken bit meaning is inverted compared to other platforms.
*/
- if (IS_DG2(dev_priv))
+ if (display->platform.dg2)
tmp &= ~UNDERRUN_RECOVERY_ENABLE_DG2;
- else if ((DISPLAY_VER(dev_priv) >= 13) && (DISPLAY_VER(dev_priv) < 30))
+ else if ((DISPLAY_VER(display) >= 13) && (DISPLAY_VER(display) < 30))
tmp |= UNDERRUN_RECOVERY_DISABLE_ADLP;
/* Wa_14010547955:dg2 */
- if (IS_DG2(dev_priv))
+ if (display->platform.dg2)
tmp |= DG2_RENDER_CCSTAG_4_3_EN;
- intel_de_write(dev_priv, PIPE_CHICKEN(pipe), tmp);
+ intel_de_write(display, PIPE_CHICKEN(pipe), tmp);
}
-bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv)
+bool intel_has_pending_fb_unpin(struct intel_display *display)
{
struct drm_crtc *crtc;
bool cleanup_done;
- drm_for_each_crtc(crtc, &dev_priv->drm) {
+ drm_for_each_crtc(crtc, display->drm) {
struct drm_crtc_commit *commit;
spin_lock(&crtc->commit_lock);
commit = list_first_entry_or_null(&crtc->commit_list,
@@ -819,36 +817,6 @@ intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
return encoder;
}
-static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- const struct drm_rect *dst = &crtc_state->pch_pfit.dst;
- enum pipe pipe = crtc->pipe;
- int width = drm_rect_width(dst);
- int height = drm_rect_height(dst);
- int x = dst->x1;
- int y = dst->y1;
-
- if (!crtc_state->pch_pfit.enabled)
- return;
-
- /* Force use of hard-coded filter coefficients
- * as some pre-programmed values are broken,
- * e.g. x201.
- */
- if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv))
- intel_de_write_fw(dev_priv, PF_CTL(pipe), PF_ENABLE |
- PF_FILTER_MED_3x3 | PF_PIPE_SEL_IVB(pipe));
- else
- intel_de_write_fw(dev_priv, PF_CTL(pipe), PF_ENABLE |
- PF_FILTER_MED_3x3);
- intel_de_write_fw(dev_priv, PF_WIN_POS(pipe),
- PF_WIN_XPOS(x) | PF_WIN_YPOS(y));
- intel_de_write_fw(dev_priv, PF_WIN_SZ(pipe),
- PF_WIN_XSIZE(width) | PF_WIN_YSIZE(height));
-}
-
static void intel_crtc_dpms_overlay_disable(struct intel_crtc *crtc)
{
if (crtc->overlay)
@@ -861,13 +829,13 @@ static void intel_crtc_dpms_overlay_disable(struct intel_crtc *crtc)
static bool needs_nv12_wa(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
if (!crtc_state->nv12_planes)
return false;
/* WA Display #0827: Gen9:all */
- if (DISPLAY_VER(dev_priv) == 9)
+ if (DISPLAY_VER(display) == 9)
return true;
return false;
@@ -875,10 +843,10 @@ static bool needs_nv12_wa(const struct intel_crtc_state *crtc_state)
static bool needs_scalerclk_wa(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
/* Wa_2006604312:icl,ehl */
- if (crtc_state->scaler_state.scaler_users > 0 && DISPLAY_VER(dev_priv) == 11)
+ if (crtc_state->scaler_state.scaler_users > 0 && DISPLAY_VER(display) == 11)
return true;
return false;
@@ -886,31 +854,31 @@ static bool needs_scalerclk_wa(const struct intel_crtc_state *crtc_state)
static bool needs_cursorclk_wa(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
/* Wa_1604331009:icl,jsl,ehl */
if (is_hdr_mode(crtc_state) &&
crtc_state->active_planes & BIT(PLANE_CURSOR) &&
- DISPLAY_VER(dev_priv) == 11)
+ DISPLAY_VER(display) == 11)
return true;
return false;
}
-static void intel_async_flip_vtd_wa(struct drm_i915_private *i915,
+static void intel_async_flip_vtd_wa(struct intel_display *display,
enum pipe pipe, bool enable)
{
- if (DISPLAY_VER(i915) == 9) {
+ if (DISPLAY_VER(display) == 9) {
/*
* "Plane N stretch max must be programmed to 11b (x1)
* when Async flips are enabled on that plane."
*/
- intel_de_rmw(i915, CHICKEN_PIPESL_1(pipe),
+ intel_de_rmw(display, CHICKEN_PIPESL_1(pipe),
SKL_PLANE1_STRETCH_MAX_MASK,
enable ? SKL_PLANE1_STRETCH_MAX_X1 : SKL_PLANE1_STRETCH_MAX_X8);
} else {
/* Also needed on HSW/BDW albeit undocumented */
- intel_de_rmw(i915, CHICKEN_PIPESL_1(pipe),
+ intel_de_rmw(display, CHICKEN_PIPESL_1(pipe),
HSW_PRI_STRETCH_MAX_MASK,
enable ? HSW_PRI_STRETCH_MAX_X1 : HSW_PRI_STRETCH_MAX_X8);
}
@@ -918,10 +886,12 @@ static void intel_async_flip_vtd_wa(struct drm_i915_private *i915,
static bool needs_async_flip_vtd_wa(const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
return crtc_state->uapi.async_flip && i915_vtd_active(i915) &&
- (DISPLAY_VER(i915) == 9 || IS_BROADWELL(i915) || IS_HASWELL(i915));
+ (DISPLAY_VER(display) == 9 || display->platform.broadwell ||
+ display->platform.haswell);
}
static void intel_encoders_audio_enable(struct intel_atomic_state *state,
@@ -1070,6 +1040,7 @@ static bool audio_disabling(const struct intel_crtc_state *old_crtc_state,
static void intel_post_plane_update(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
+ struct intel_display *display = to_intel_display(state);
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
const struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(state, crtc);
@@ -1088,19 +1059,19 @@ static void intel_post_plane_update(struct intel_atomic_state *state,
if (needs_async_flip_vtd_wa(old_crtc_state) &&
!needs_async_flip_vtd_wa(new_crtc_state))
- intel_async_flip_vtd_wa(dev_priv, pipe, false);
+ intel_async_flip_vtd_wa(display, pipe, false);
if (needs_nv12_wa(old_crtc_state) &&
!needs_nv12_wa(new_crtc_state))
- skl_wa_827(dev_priv, pipe, false);
+ skl_wa_827(display, pipe, false);
if (needs_scalerclk_wa(old_crtc_state) &&
!needs_scalerclk_wa(new_crtc_state))
- icl_wa_scalerclkgating(dev_priv, pipe, false);
+ icl_wa_scalerclkgating(display, pipe, false);
if (needs_cursorclk_wa(old_crtc_state) &&
!needs_cursorclk_wa(new_crtc_state))
- icl_wa_cursorclkgating(dev_priv, pipe, false);
+ icl_wa_cursorclkgating(display, pipe, false);
if (intel_crtc_needs_color_update(new_crtc_state))
intel_color_post_update(new_crtc_state);
@@ -1222,22 +1193,22 @@ static void intel_pre_plane_update(struct intel_atomic_state *state,
if (!needs_async_flip_vtd_wa(old_crtc_state) &&
needs_async_flip_vtd_wa(new_crtc_state))
- intel_async_flip_vtd_wa(dev_priv, pipe, true);
+ intel_async_flip_vtd_wa(display, pipe, true);
/* Display WA 827 */
if (!needs_nv12_wa(old_crtc_state) &&
needs_nv12_wa(new_crtc_state))
- skl_wa_827(dev_priv, pipe, true);
+ skl_wa_827(display, pipe, true);
/* Wa_2006604312:icl,ehl */
if (!needs_scalerclk_wa(old_crtc_state) &&
needs_scalerclk_wa(new_crtc_state))
- icl_wa_scalerclkgating(dev_priv, pipe, true);
+ icl_wa_scalerclkgating(display, pipe, true);
/* Wa_1604331009:icl,jsl,ehl */
if (!needs_cursorclk_wa(old_crtc_state) &&
needs_cursorclk_wa(new_crtc_state))
- icl_wa_cursorclkgating(dev_priv, pipe, true);
+ icl_wa_cursorclkgating(display, pipe, true);
/*
* Vblank time updates from the shadow to live plane control register
@@ -1248,7 +1219,7 @@ static void intel_pre_plane_update(struct intel_atomic_state *state,
* event which is after the vblank start event, so we need to have a
* wait-for-vblank between disabling the plane and the pipe.
*/
- if (HAS_GMCH(dev_priv) && old_crtc_state->hw.active &&
+ if (HAS_GMCH(display) && old_crtc_state->hw.active &&
new_crtc_state->disable_cxsr && intel_set_memory_cxsr(dev_priv, false))
intel_crtc_wait_for_next_vblank(crtc);
@@ -1259,7 +1230,7 @@ static void intel_pre_plane_update(struct intel_atomic_state *state,
*
* WaCxSRDisabledForSpriteScaling:ivb
*/
- if (!HAS_GMCH(dev_priv) && old_crtc_state->hw.active &&
+ if (!HAS_GMCH(display) && old_crtc_state->hw.active &&
new_crtc_state->disable_cxsr && ilk_disable_cxsr(dev_priv))
intel_crtc_wait_for_next_vblank(crtc);
@@ -1295,7 +1266,7 @@ static void intel_pre_plane_update(struct intel_atomic_state *state,
* chance of catching underruns with the intermediate watermarks
* vs. the old plane configuration.
*/
- if (DISPLAY_VER(dev_priv) == 2 && planes_disabling(old_crtc_state, new_crtc_state))
+ if (DISPLAY_VER(display) == 2 && planes_disabling(old_crtc_state, new_crtc_state))
intel_set_cpu_fifo_underrun_reporting(display, pipe, false);
/*
@@ -1336,7 +1307,7 @@ static void intel_crtc_disable_planes(struct intel_atomic_state *state,
static void intel_encoders_update_prepare(struct intel_atomic_state *state)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_crtc_state *new_crtc_state, *old_crtc_state;
struct intel_crtc *crtc;
int i;
@@ -1345,7 +1316,7 @@ static void intel_encoders_update_prepare(struct intel_atomic_state *state)
* Make sure the DPLL state is up-to-date for fastset TypeC ports after non-blocking commits.
* TODO: Update the DPLL state for all cases in the encoder->update_prepare() hook.
*/
- if (i915->display.dpll.mgr) {
+ if (display->dpll.mgr) {
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
if (intel_crtc_needs_modeset(new_crtc_state))
continue;
@@ -1541,7 +1512,7 @@ static void ilk_crtc_enable(struct intel_atomic_state *state,
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
- if (drm_WARN_ON(&dev_priv->drm, crtc->active))
+ if (drm_WARN_ON(display->drm, crtc->active))
return;
/*
@@ -1568,8 +1539,8 @@ static void ilk_crtc_enable(struct intel_atomic_state *state,
if (new_crtc_state->has_pch_encoder) {
ilk_pch_pre_enable(state, crtc);
} else {
- assert_fdi_tx_disabled(dev_priv, pipe);
- assert_fdi_rx_disabled(dev_priv, pipe);
+ assert_fdi_tx_disabled(display, pipe);
+ assert_fdi_rx_disabled(display, pipe);
}
ilk_pfit_enable(new_crtc_state);
@@ -1610,26 +1581,26 @@ static void ilk_crtc_enable(struct intel_atomic_state *state,
/* Display WA #1180: WaDisableScalarClockGating: glk */
static bool glk_need_scaler_clock_gating_wa(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
- return DISPLAY_VER(i915) == 10 && crtc_state->pch_pfit.enabled;
+ return DISPLAY_VER(display) == 10 && crtc_state->pch_pfit.enabled;
}
static void glk_pipe_scaler_clock_gating_wa(struct intel_crtc *crtc, bool enable)
{
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
u32 mask = DPF_GATING_DIS | DPF_RAM_GATING_DIS | DPFR_GATING_DIS;
- intel_de_rmw(i915, CLKGATE_DIS_PSL(crtc->pipe),
+ intel_de_rmw(display, CLKGATE_DIS_PSL(crtc->pipe),
mask, enable ? mask : 0);
}
static void hsw_set_linetime_wm(const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- intel_de_write(dev_priv, WM_LINETIME(crtc->pipe),
+ intel_de_write(display, WM_LINETIME(crtc->pipe),
HSW_LINETIME(crtc_state->linetime) |
HSW_IPS_LINETIME(crtc_state->ips_linetime));
}
@@ -1645,8 +1616,8 @@ static void hsw_set_frame_start_delay(const struct intel_crtc_state *crtc_state)
static void hsw_configure_cpu_transcoder(const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
if (crtc_state->has_pch_encoder) {
@@ -1660,11 +1631,11 @@ static void hsw_configure_cpu_transcoder(const struct intel_crtc_state *crtc_sta
}
intel_set_transcoder_timings(crtc_state);
- if (HAS_VRR(dev_priv))
+ if (HAS_VRR(display))
intel_vrr_set_transcoder_timings(crtc_state);
if (cpu_transcoder != TRANSCODER_EDP)
- intel_de_write(dev_priv, TRANS_MULT(dev_priv, cpu_transcoder),
+ intel_de_write(display, TRANS_MULT(display, cpu_transcoder),
crtc_state->pixel_multiplier - 1);
hsw_set_frame_start_delay(crtc_state);
@@ -1678,12 +1649,11 @@ static void hsw_crtc_enable(struct intel_atomic_state *state,
struct intel_display *display = to_intel_display(state);
const struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
struct intel_crtc *pipe_crtc;
int i;
- if (drm_WARN_ON(&dev_priv->drm, crtc->active))
+ if (drm_WARN_ON(display->drm, crtc->active))
return;
for_each_pipe_crtc_modeset_enable(display, pipe_crtc, new_crtc_state, i)
intel_dmc_enable_pipe(display, pipe_crtc->pipe);
@@ -1706,12 +1676,12 @@ static void hsw_crtc_enable(struct intel_atomic_state *state,
intel_dsc_enable(pipe_crtc_state);
- if (HAS_UNCOMPRESSED_JOINER(dev_priv))
+ if (HAS_UNCOMPRESSED_JOINER(display))
intel_uncompressed_joiner_enable(pipe_crtc_state);
intel_set_pipe_src_size(pipe_crtc_state);
- if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
+ if (DISPLAY_VER(display) >= 9 || display->platform.broadwell)
bdw_set_pipe_misc(NULL, pipe_crtc_state);
}
@@ -1727,7 +1697,7 @@ static void hsw_crtc_enable(struct intel_atomic_state *state,
if (glk_need_scaler_clock_gating_wa(pipe_crtc_state))
glk_pipe_scaler_clock_gating_wa(pipe_crtc, true);
- if (DISPLAY_VER(dev_priv) >= 9)
+ if (DISPLAY_VER(display) >= 9)
skl_pfit_enable(pipe_crtc_state);
else
ilk_pfit_enable(pipe_crtc_state);
@@ -1740,7 +1710,7 @@ static void hsw_crtc_enable(struct intel_atomic_state *state,
hsw_set_linetime_wm(pipe_crtc_state);
- if (DISPLAY_VER(dev_priv) >= 11)
+ if (DISPLAY_VER(display) >= 11)
icl_set_pipe_chicken(pipe_crtc_state);
intel_initial_watermarks(state, pipe_crtc);
@@ -1763,7 +1733,7 @@ static void hsw_crtc_enable(struct intel_atomic_state *state,
* enabling, we need to change the workaround.
*/
hsw_workaround_pipe = pipe_crtc_state->hsw_workaround_pipe;
- if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) {
+ if (display->platform.haswell && hsw_workaround_pipe != INVALID_PIPE) {
struct intel_crtc *wa_crtc =
intel_crtc_for_pipe(display, hsw_workaround_pipe);
@@ -1773,22 +1743,6 @@ static void hsw_crtc_enable(struct intel_atomic_state *state,
}
}
-void ilk_pfit_disable(const struct intel_crtc_state *old_crtc_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- enum pipe pipe = crtc->pipe;
-
- /* To avoid upsetting the power well on haswell only disable the pfit if
- * it's in use. The hw state code will make sure we get this right. */
- if (!old_crtc_state->pch_pfit.enabled)
- return;
-
- intel_de_write_fw(dev_priv, PF_CTL(pipe), 0);
- intel_de_write_fw(dev_priv, PF_WIN_POS(pipe), 0);
- intel_de_write_fw(dev_priv, PF_WIN_SZ(pipe), 0);
-}
-
static void ilk_crtc_disable(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
@@ -1856,32 +1810,6 @@ static void hsw_crtc_disable(struct intel_atomic_state *state,
intel_dmc_disable_pipe(display, pipe_crtc->pipe);
}
-static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state)
-{
- struct intel_display *display = to_intel_display(crtc_state);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
-
- if (!crtc_state->gmch_pfit.control)
- return;
-
- /*
- * The panel fitter should only be adjusted whilst the pipe is disabled,
- * according to register description and PRM.
- */
- drm_WARN_ON(display->drm,
- intel_de_read(display, PFIT_CONTROL(display)) & PFIT_ENABLE);
- assert_transcoder_disabled(display, crtc_state->cpu_transcoder);
-
- intel_de_write(display, PFIT_PGM_RATIOS(display),
- crtc_state->gmch_pfit.pgm_ratios);
- intel_de_write(display, PFIT_CONTROL(display),
- crtc_state->gmch_pfit.control);
-
- /* Border color in case we don't scale up to the full screen. Black by
- * default, change to something else for debugging. */
- intel_de_write(display, BCLRPAT(display, crtc->pipe), 0);
-}
-
/* Prefer intel_encoder_is_combo() */
bool intel_phy_is_combo(struct intel_display *display, enum phy phy)
{
@@ -1905,47 +1833,47 @@ bool intel_phy_is_combo(struct intel_display *display, enum phy phy)
}
/* Prefer intel_encoder_is_tc() */
-bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy)
+bool intel_phy_is_tc(struct intel_display *display, enum phy phy)
{
/*
* Discrete GPU phy's are not attached to FIA's to support TC
* subsystem Legacy or non-legacy, and only support native DP/HDMI
*/
- if (IS_DGFX(dev_priv))
+ if (display->platform.dgfx)
return false;
- if (DISPLAY_VER(dev_priv) >= 13)
+ if (DISPLAY_VER(display) >= 13)
return phy >= PHY_F && phy <= PHY_I;
- else if (IS_TIGERLAKE(dev_priv))
+ else if (display->platform.tigerlake)
return phy >= PHY_D && phy <= PHY_I;
- else if (IS_ICELAKE(dev_priv))
+ else if (display->platform.icelake)
return phy >= PHY_C && phy <= PHY_F;
return false;
}
/* Prefer intel_encoder_is_snps() */
-bool intel_phy_is_snps(struct drm_i915_private *dev_priv, enum phy phy)
+bool intel_phy_is_snps(struct intel_display *display, enum phy phy)
{
/*
* For DG2, and for DG2 only, all four "combo" ports and the TC1 port
* (PHY E) use Synopsis PHYs. See intel_phy_is_tc().
*/
- return IS_DG2(dev_priv) && phy > PHY_NONE && phy <= PHY_E;
+ return display->platform.dg2 && phy > PHY_NONE && phy <= PHY_E;
}
/* Prefer intel_encoder_to_phy() */
-enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port)
+enum phy intel_port_to_phy(struct intel_display *display, enum port port)
{
- if (DISPLAY_VER(i915) >= 13 && port >= PORT_D_XELPD)
+ if (DISPLAY_VER(display) >= 13 && port >= PORT_D_XELPD)
return PHY_D + port - PORT_D_XELPD;
- else if (DISPLAY_VER(i915) >= 13 && port >= PORT_TC1)
+ else if (DISPLAY_VER(display) >= 13 && port >= PORT_TC1)
return PHY_F + port - PORT_TC1;
- else if (IS_ALDERLAKE_S(i915) && port >= PORT_TC1)
+ else if (display->platform.alderlake_s && port >= PORT_TC1)
return PHY_B + port - PORT_TC1;
- else if ((IS_DG1(i915) || IS_ROCKETLAKE(i915)) && port >= PORT_TC1)
+ else if ((display->platform.dg1 || display->platform.rocketlake) && port >= PORT_TC1)
return PHY_C + port - PORT_TC1;
- else if ((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) &&
+ else if ((display->platform.jasperlake || display->platform.elkhartlake) &&
port == PORT_D)
return PHY_A;
@@ -1953,12 +1881,12 @@ enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port)
}
/* Prefer intel_encoder_to_tc() */
-enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port)
+enum tc_port intel_port_to_tc(struct intel_display *display, enum port port)
{
- if (!intel_phy_is_tc(dev_priv, intel_port_to_phy(dev_priv, port)))
+ if (!intel_phy_is_tc(display, intel_port_to_phy(display, port)))
return TC_PORT_NONE;
- if (DISPLAY_VER(dev_priv) >= 12)
+ if (DISPLAY_VER(display) >= 12)
return TC_PORT_1 + port - PORT_TC1;
else
return TC_PORT_1 + port - PORT_C;
@@ -1966,9 +1894,9 @@ enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port)
enum phy intel_encoder_to_phy(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
- return intel_port_to_phy(i915, encoder->port);
+ return intel_port_to_phy(display, encoder->port);
}
bool intel_encoder_is_combo(struct intel_encoder *encoder)
@@ -1980,23 +1908,23 @@ bool intel_encoder_is_combo(struct intel_encoder *encoder)
bool intel_encoder_is_snps(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
- return intel_phy_is_snps(i915, intel_encoder_to_phy(encoder));
+ return intel_phy_is_snps(display, intel_encoder_to_phy(encoder));
}
bool intel_encoder_is_tc(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
- return intel_phy_is_tc(i915, intel_encoder_to_phy(encoder));
+ return intel_phy_is_tc(display, intel_encoder_to_phy(encoder));
}
enum tc_port intel_encoder_to_tc(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
- return intel_port_to_tc(i915, encoder->port);
+ return intel_port_to_tc(display, encoder->port);
}
enum intel_display_power_domain
@@ -2013,8 +1941,8 @@ intel_aux_power_domain(struct intel_digital_port *dig_port)
static void get_crtc_power_domains(struct intel_crtc_state *crtc_state,
struct intel_power_domain_mask *mask)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
struct drm_encoder *encoder;
enum pipe pipe = crtc->pipe;
@@ -2030,14 +1958,14 @@ static void get_crtc_power_domains(struct intel_crtc_state *crtc_state,
crtc_state->pch_pfit.force_thru)
set_bit(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe), mask->bits);
- drm_for_each_encoder_mask(encoder, &dev_priv->drm,
+ drm_for_each_encoder_mask(encoder, display->drm,
crtc_state->uapi.encoder_mask) {
struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
set_bit(intel_encoder->power_domain, mask->bits);
}
- if (HAS_DDI(dev_priv) && crtc_state->has_audio)
+ if (HAS_DDI(display) && crtc_state->has_audio)
set_bit(POWER_DOMAIN_AUDIO_MMIO, mask->bits);
if (crtc_state->shared_dpll)
@@ -2105,22 +2033,21 @@ static void valleyview_crtc_enable(struct intel_atomic_state *state,
struct intel_display *display = to_intel_display(crtc);
const struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
- if (drm_WARN_ON(&dev_priv->drm, crtc->active))
+ if (drm_WARN_ON(display->drm, crtc->active))
return;
i9xx_configure_cpu_transcoder(new_crtc_state);
intel_set_pipe_src_size(new_crtc_state);
- intel_de_write(dev_priv, VLV_PIPE_MSA_MISC(pipe), 0);
+ intel_de_write(display, VLV_PIPE_MSA_MISC(display, pipe), 0);
- if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
- intel_de_write(dev_priv, CHV_BLEND(dev_priv, pipe),
+ if (display->platform.cherryview && pipe == PIPE_B) {
+ intel_de_write(display, CHV_BLEND(display, pipe),
CHV_BLEND_LEGACY);
- intel_de_write(dev_priv, CHV_CANVAS(dev_priv, pipe), 0);
+ intel_de_write(display, CHV_CANVAS(display, pipe), 0);
}
crtc->active = true;
@@ -2129,7 +2056,7 @@ static void valleyview_crtc_enable(struct intel_atomic_state *state,
intel_encoders_pre_pll_enable(state, crtc);
- if (IS_CHERRYVIEW(dev_priv))
+ if (display->platform.cherryview)
chv_enable_pll(new_crtc_state);
else
vlv_enable_pll(new_crtc_state);
@@ -2157,7 +2084,7 @@ static void i9xx_crtc_enable(struct intel_atomic_state *state,
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
- if (drm_WARN_ON(&dev_priv->drm, crtc->active))
+ if (drm_WARN_ON(display->drm, crtc->active))
return;
i9xx_configure_cpu_transcoder(new_crtc_state);
@@ -2166,7 +2093,7 @@ static void i9xx_crtc_enable(struct intel_atomic_state *state,
crtc->active = true;
- if (DISPLAY_VER(dev_priv) != 2)
+ if (DISPLAY_VER(display) != 2)
intel_set_cpu_fifo_underrun_reporting(display, pipe, true);
intel_encoders_pre_enable(state, crtc);
@@ -2186,24 +2113,10 @@ static void i9xx_crtc_enable(struct intel_atomic_state *state,
intel_encoders_enable(state, crtc);
/* prevents spurious underruns */
- if (DISPLAY_VER(dev_priv) == 2)
+ if (DISPLAY_VER(display) == 2)
intel_crtc_wait_for_next_vblank(crtc);
}
-static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state)
-{
- struct intel_display *display = to_intel_display(old_crtc_state);
-
- if (!old_crtc_state->gmch_pfit.control)
- return;
-
- assert_transcoder_disabled(display, old_crtc_state->cpu_transcoder);
-
- drm_dbg_kms(display->drm, "disabling pfit, current: 0x%08x\n",
- intel_de_read(display, PFIT_CONTROL(display)));
- intel_de_write(display, PFIT_CONTROL(display), 0);
-}
-
static void i9xx_crtc_disable(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
@@ -2217,7 +2130,7 @@ static void i9xx_crtc_disable(struct intel_atomic_state *state,
* On gen2 planes are double buffered but the pipe isn't, so we must
* wait for planes to fully turn off before disabling the pipe.
*/
- if (DISPLAY_VER(dev_priv) == 2)
+ if (DISPLAY_VER(display) == 2)
intel_crtc_wait_for_next_vblank(crtc);
intel_encoders_disable(state, crtc);
@@ -2231,9 +2144,9 @@ static void i9xx_crtc_disable(struct intel_atomic_state *state,
intel_encoders_post_disable(state, crtc);
if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI)) {
- if (IS_CHERRYVIEW(dev_priv))
+ if (display->platform.cherryview)
chv_disable_pll(dev_priv, pipe);
- else if (IS_VALLEYVIEW(dev_priv))
+ else if (display->platform.valleyview)
vlv_disable_pll(dev_priv, pipe);
else
i9xx_disable_pll(old_crtc_state);
@@ -2241,14 +2154,14 @@ static void i9xx_crtc_disable(struct intel_atomic_state *state,
intel_encoders_post_pll_disable(state, crtc);
- if (DISPLAY_VER(dev_priv) != 2)
+ if (DISPLAY_VER(display) != 2)
intel_set_cpu_fifo_underrun_reporting(display, pipe, false);
- if (!dev_priv->display.funcs.wm->initial_watermarks)
+ if (!display->funcs.wm->initial_watermarks)
intel_update_watermarks(dev_priv);
/* clock the pipe down to 640x480@60 to potentially save power */
- if (IS_I830(dev_priv))
+ if (display->platform.i830)
i830_enable_pipe(display, pipe);
}
@@ -2262,11 +2175,11 @@ void intel_encoder_destroy(struct drm_encoder *encoder)
static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
{
- const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
/* GDG double wide on either pipe, otherwise pipe A only */
- return HAS_DOUBLE_WIDE(dev_priv) &&
- (crtc->pipe == PIPE_A || IS_I915G(dev_priv));
+ return HAS_DOUBLE_WIDE(display) &&
+ (crtc->pipe == PIPE_A || display->platform.i915g);
}
static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *crtc_state)
@@ -2313,9 +2226,9 @@ static void intel_mode_from_crtc_timings(struct drm_display_mode *mode,
static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
- if (HAS_GMCH(dev_priv))
+ if (HAS_GMCH(display))
/* FIXME calculate proper pipe pixel rate for GMCH pfit */
crtc_state->pixel_rate =
crtc_state->hw.pipe_mode.crtc_clock;
@@ -2426,6 +2339,7 @@ static void intel_joiner_compute_pipe_src(struct intel_crtc_state *crtc_state)
static int intel_crtc_compute_pipe_src(struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
@@ -2439,7 +2353,7 @@ static int intel_crtc_compute_pipe_src(struct intel_crtc_state *crtc_state)
*/
if (drm_rect_width(&crtc_state->pipe_src) & 1) {
if (crtc_state->double_wide) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CRTC:%d:%s] Odd pipe source width not supported with double wide pipe\n",
crtc->base.base.id, crtc->base.name);
return -EINVAL;
@@ -2447,7 +2361,7 @@ static int intel_crtc_compute_pipe_src(struct intel_crtc_state *crtc_state)
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
intel_is_dual_link_lvds(i915)) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CRTC:%d:%s] Odd pipe source width not supported with dual link LVDS\n",
crtc->base.base.id, crtc->base.name);
return -EINVAL;
@@ -2459,11 +2373,11 @@ static int intel_crtc_compute_pipe_src(struct intel_crtc_state *crtc_state)
static int intel_crtc_compute_pipe_mode(struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode;
- int clock_limit = i915->display.cdclk.max_dotclk_freq;
+ int clock_limit = display->cdclk.max_dotclk_freq;
/*
* Start with the adjusted_mode crtc timings, which
@@ -2478,8 +2392,8 @@ static int intel_crtc_compute_pipe_mode(struct intel_crtc_state *crtc_state)
intel_joiner_adjust_timings(crtc_state, pipe_mode);
intel_mode_from_crtc_timings(pipe_mode, pipe_mode);
- if (DISPLAY_VER(i915) < 4) {
- clock_limit = i915->display.cdclk.max_cdclk_freq * 9 / 10;
+ if (DISPLAY_VER(display) < 4) {
+ clock_limit = display->cdclk.max_cdclk_freq * 9 / 10;
/*
* Enable double wide mode when the dot clock
@@ -2487,13 +2401,13 @@ static int intel_crtc_compute_pipe_mode(struct intel_crtc_state *crtc_state)
*/
if (intel_crtc_supports_double_wide(crtc) &&
pipe_mode->crtc_clock > clock_limit) {
- clock_limit = i915->display.cdclk.max_dotclk_freq;
+ clock_limit = display->cdclk.max_dotclk_freq;
crtc_state->double_wide = true;
}
}
if (pipe_mode->crtc_clock > clock_limit) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CRTC:%d:%s] requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n",
crtc->base.base.id, crtc->base.name,
pipe_mode->crtc_clock, clock_limit,
@@ -2632,8 +2546,10 @@ intel_link_compute_m_n(u16 bits_per_pixel_x16, int nlanes,
0x80000);
}
-void intel_panel_sanitize_ssc(struct drm_i915_private *dev_priv)
+void intel_panel_sanitize_ssc(struct intel_display *display)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
+
/*
* There may be no VBT; and if the BIOS enabled SSC we can
* just keep using it to avoid unnecessary flicker. Whereas if the
@@ -2641,16 +2557,16 @@ void intel_panel_sanitize_ssc(struct drm_i915_private *dev_priv)
* indicates as much.
*/
if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
- bool bios_lvds_use_ssc = intel_de_read(dev_priv,
+ bool bios_lvds_use_ssc = intel_de_read(display,
PCH_DREF_CONTROL) &
DREF_SSC1_ENABLE;
- if (dev_priv->display.vbt.lvds_use_ssc != bios_lvds_use_ssc) {
- drm_dbg_kms(&dev_priv->drm,
+ if (display->vbt.lvds_use_ssc != bios_lvds_use_ssc) {
+ drm_dbg_kms(display->drm,
"SSC %s by BIOS, overriding VBT which says %s\n",
str_enabled_disabled(bios_lvds_use_ssc),
- str_enabled_disabled(dev_priv->display.vbt.lvds_use_ssc));
- dev_priv->display.vbt.lvds_use_ssc = bios_lvds_use_ssc;
+ str_enabled_disabled(display->vbt.lvds_use_ssc));
+ display->vbt.lvds_use_ssc = bios_lvds_use_ssc;
}
}
}
@@ -2662,45 +2578,45 @@ void intel_zero_m_n(struct intel_link_m_n *m_n)
m_n->tu = 1;
}
-void intel_set_m_n(struct drm_i915_private *i915,
+void intel_set_m_n(struct intel_display *display,
const struct intel_link_m_n *m_n,
i915_reg_t data_m_reg, i915_reg_t data_n_reg,
i915_reg_t link_m_reg, i915_reg_t link_n_reg)
{
- intel_de_write(i915, data_m_reg, TU_SIZE(m_n->tu) | m_n->data_m);
- intel_de_write(i915, data_n_reg, m_n->data_n);
- intel_de_write(i915, link_m_reg, m_n->link_m);
+ intel_de_write(display, data_m_reg, TU_SIZE(m_n->tu) | m_n->data_m);
+ intel_de_write(display, data_n_reg, m_n->data_n);
+ intel_de_write(display, link_m_reg, m_n->link_m);
/*
* On BDW+ writing LINK_N arms the double buffered update
* of all the M/N registers, so it must be written last.
*/
- intel_de_write(i915, link_n_reg, m_n->link_n);
+ intel_de_write(display, link_n_reg, m_n->link_n);
}
-bool intel_cpu_transcoder_has_m2_n2(struct drm_i915_private *dev_priv,
+bool intel_cpu_transcoder_has_m2_n2(struct intel_display *display,
enum transcoder transcoder)
{
- if (IS_HASWELL(dev_priv))
+ if (display->platform.haswell)
return transcoder == TRANSCODER_EDP;
- return IS_DISPLAY_VER(dev_priv, 5, 7) || IS_CHERRYVIEW(dev_priv);
+ return IS_DISPLAY_VER(display, 5, 7) || display->platform.cherryview;
}
void intel_cpu_transcoder_set_m1_n1(struct intel_crtc *crtc,
enum transcoder transcoder,
const struct intel_link_m_n *m_n)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
enum pipe pipe = crtc->pipe;
- if (DISPLAY_VER(dev_priv) >= 5)
- intel_set_m_n(dev_priv, m_n,
- PIPE_DATA_M1(dev_priv, transcoder),
- PIPE_DATA_N1(dev_priv, transcoder),
- PIPE_LINK_M1(dev_priv, transcoder),
- PIPE_LINK_N1(dev_priv, transcoder));
+ if (DISPLAY_VER(display) >= 5)
+ intel_set_m_n(display, m_n,
+ PIPE_DATA_M1(display, transcoder),
+ PIPE_DATA_N1(display, transcoder),
+ PIPE_LINK_M1(display, transcoder),
+ PIPE_LINK_N1(display, transcoder));
else
- intel_set_m_n(dev_priv, m_n,
+ intel_set_m_n(display, m_n,
PIPE_DATA_M_G4X(pipe), PIPE_DATA_N_G4X(pipe),
PIPE_LINK_M_G4X(pipe), PIPE_LINK_N_G4X(pipe));
}
@@ -2709,29 +2625,29 @@ void intel_cpu_transcoder_set_m2_n2(struct intel_crtc *crtc,
enum transcoder transcoder,
const struct intel_link_m_n *m_n)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
- if (!intel_cpu_transcoder_has_m2_n2(dev_priv, transcoder))
+ if (!intel_cpu_transcoder_has_m2_n2(display, transcoder))
return;
- intel_set_m_n(dev_priv, m_n,
- PIPE_DATA_M2(dev_priv, transcoder),
- PIPE_DATA_N2(dev_priv, transcoder),
- PIPE_LINK_M2(dev_priv, transcoder),
- PIPE_LINK_N2(dev_priv, transcoder));
+ intel_set_m_n(display, m_n,
+ PIPE_DATA_M2(display, transcoder),
+ PIPE_DATA_N2(display, transcoder),
+ PIPE_LINK_M2(display, transcoder),
+ PIPE_LINK_N2(display, transcoder));
}
static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
u32 crtc_vdisplay, crtc_vtotal, crtc_vblank_start, crtc_vblank_end;
int vsyncshift = 0;
- drm_WARN_ON(&dev_priv->drm, transcoder_is_dsi(cpu_transcoder));
+ drm_WARN_ON(display->drm, transcoder_is_dsi(cpu_transcoder));
/* We need to be careful not to changed the adjusted mode, for otherwise
* the hw state checker will get angry at the mismatch. */
@@ -2758,9 +2674,9 @@ static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_sta
* VBLANK_START no longer works on ADL+, instead we must use
* TRANS_SET_CONTEXT_LATENCY to configure the pipe vblank start.
*/
- if (DISPLAY_VER(dev_priv) >= 13) {
- intel_de_write(dev_priv,
- TRANS_SET_CONTEXT_LATENCY(dev_priv, cpu_transcoder),
+ if (DISPLAY_VER(display) >= 13) {
+ intel_de_write(display,
+ TRANS_SET_CONTEXT_LATENCY(display, cpu_transcoder),
crtc_vblank_start - crtc_vdisplay);
/*
@@ -2770,28 +2686,28 @@ static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_sta
crtc_vblank_start = 1;
}
- if (DISPLAY_VER(dev_priv) >= 4)
- intel_de_write(dev_priv,
- TRANS_VSYNCSHIFT(dev_priv, cpu_transcoder),
+ if (DISPLAY_VER(display) >= 4)
+ intel_de_write(display,
+ TRANS_VSYNCSHIFT(display, cpu_transcoder),
vsyncshift);
- intel_de_write(dev_priv, TRANS_HTOTAL(dev_priv, cpu_transcoder),
+ intel_de_write(display, TRANS_HTOTAL(display, cpu_transcoder),
HACTIVE(adjusted_mode->crtc_hdisplay - 1) |
HTOTAL(adjusted_mode->crtc_htotal - 1));
- intel_de_write(dev_priv, TRANS_HBLANK(dev_priv, cpu_transcoder),
+ intel_de_write(display, TRANS_HBLANK(display, cpu_transcoder),
HBLANK_START(adjusted_mode->crtc_hblank_start - 1) |
HBLANK_END(adjusted_mode->crtc_hblank_end - 1));
- intel_de_write(dev_priv, TRANS_HSYNC(dev_priv, cpu_transcoder),
+ intel_de_write(display, TRANS_HSYNC(display, cpu_transcoder),
HSYNC_START(adjusted_mode->crtc_hsync_start - 1) |
HSYNC_END(adjusted_mode->crtc_hsync_end - 1));
- intel_de_write(dev_priv, TRANS_VTOTAL(dev_priv, cpu_transcoder),
+ intel_de_write(display, TRANS_VTOTAL(display, cpu_transcoder),
VACTIVE(crtc_vdisplay - 1) |
VTOTAL(crtc_vtotal - 1));
- intel_de_write(dev_priv, TRANS_VBLANK(dev_priv, cpu_transcoder),
+ intel_de_write(display, TRANS_VBLANK(display, cpu_transcoder),
VBLANK_START(crtc_vblank_start - 1) |
VBLANK_END(crtc_vblank_end - 1));
- intel_de_write(dev_priv, TRANS_VSYNC(dev_priv, cpu_transcoder),
+ intel_de_write(display, TRANS_VSYNC(display, cpu_transcoder),
VSYNC_START(adjusted_mode->crtc_vsync_start - 1) |
VSYNC_END(adjusted_mode->crtc_vsync_end - 1));
@@ -2799,22 +2715,21 @@ static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_sta
* programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
* documented on the DDI_FUNC_CTL register description, EDP Input Select
* bits. */
- if (IS_HASWELL(dev_priv) && cpu_transcoder == TRANSCODER_EDP &&
+ if (display->platform.haswell && cpu_transcoder == TRANSCODER_EDP &&
(pipe == PIPE_B || pipe == PIPE_C))
- intel_de_write(dev_priv, TRANS_VTOTAL(dev_priv, pipe),
+ intel_de_write(display, TRANS_VTOTAL(display, pipe),
VACTIVE(crtc_vdisplay - 1) |
VTOTAL(crtc_vtotal - 1));
}
static void intel_set_transcoder_timings_lrr(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc_state);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
u32 crtc_vdisplay, crtc_vtotal, crtc_vblank_start, crtc_vblank_end;
- drm_WARN_ON(&dev_priv->drm, transcoder_is_dsi(cpu_transcoder));
+ drm_WARN_ON(display->drm, transcoder_is_dsi(cpu_transcoder));
crtc_vdisplay = adjusted_mode->crtc_vdisplay;
crtc_vtotal = adjusted_mode->crtc_vtotal;
@@ -2827,9 +2742,9 @@ static void intel_set_transcoder_timings_lrr(const struct intel_crtc_state *crtc
crtc_vblank_end -= 1;
}
- if (DISPLAY_VER(dev_priv) >= 13) {
- intel_de_write(dev_priv,
- TRANS_SET_CONTEXT_LATENCY(dev_priv, cpu_transcoder),
+ if (DISPLAY_VER(display) >= 13) {
+ intel_de_write(display,
+ TRANS_SET_CONTEXT_LATENCY(display, cpu_transcoder),
crtc_vblank_start - crtc_vdisplay);
/*
@@ -2843,22 +2758,22 @@ static void intel_set_transcoder_timings_lrr(const struct intel_crtc_state *crtc
* The hardware actually ignores TRANS_VBLANK.VBLANK_END in DP mode.
* But let's write it anyway to keep the state checker happy.
*/
- intel_de_write(dev_priv, TRANS_VBLANK(dev_priv, cpu_transcoder),
+ intel_de_write(display, TRANS_VBLANK(display, cpu_transcoder),
VBLANK_START(crtc_vblank_start - 1) |
VBLANK_END(crtc_vblank_end - 1));
/*
* The double buffer latch point for TRANS_VTOTAL
* is the transcoder's undelayed vblank.
*/
- intel_de_write(dev_priv, TRANS_VTOTAL(dev_priv, cpu_transcoder),
+ intel_de_write(display, TRANS_VTOTAL(display, cpu_transcoder),
VACTIVE(crtc_vdisplay - 1) |
VTOTAL(crtc_vtotal - 1));
}
static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
int width = drm_rect_width(&crtc_state->pipe_src);
int height = drm_rect_height(&crtc_state->pipe_src);
enum pipe pipe = crtc->pipe;
@@ -2866,63 +2781,62 @@ static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state)
/* pipesrc controls the size that is scaled from, which should
* always be the user's requested size.
*/
- intel_de_write(dev_priv, PIPESRC(dev_priv, pipe),
+ intel_de_write(display, PIPESRC(display, pipe),
PIPESRC_WIDTH(width - 1) | PIPESRC_HEIGHT(height - 1));
}
static bool intel_pipe_is_interlaced(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
- if (DISPLAY_VER(dev_priv) == 2)
+ if (DISPLAY_VER(display) == 2)
return false;
- if (DISPLAY_VER(dev_priv) >= 9 ||
- IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
- return intel_de_read(dev_priv,
- TRANSCONF(dev_priv, cpu_transcoder)) & TRANSCONF_INTERLACE_MASK_HSW;
+ if (DISPLAY_VER(display) >= 9 ||
+ display->platform.broadwell || display->platform.haswell)
+ return intel_de_read(display,
+ TRANSCONF(display, cpu_transcoder)) & TRANSCONF_INTERLACE_MASK_HSW;
else
- return intel_de_read(dev_priv,
- TRANSCONF(dev_priv, cpu_transcoder)) & TRANSCONF_INTERLACE_MASK;
+ return intel_de_read(display,
+ TRANSCONF(display, cpu_transcoder)) & TRANSCONF_INTERLACE_MASK;
}
static void intel_get_transcoder_timings(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_display *display = to_intel_display(crtc);
enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
u32 tmp;
- tmp = intel_de_read(dev_priv, TRANS_HTOTAL(dev_priv, cpu_transcoder));
+ tmp = intel_de_read(display, TRANS_HTOTAL(display, cpu_transcoder));
adjusted_mode->crtc_hdisplay = REG_FIELD_GET(HACTIVE_MASK, tmp) + 1;
adjusted_mode->crtc_htotal = REG_FIELD_GET(HTOTAL_MASK, tmp) + 1;
if (!transcoder_is_dsi(cpu_transcoder)) {
- tmp = intel_de_read(dev_priv,
- TRANS_HBLANK(dev_priv, cpu_transcoder));
+ tmp = intel_de_read(display,
+ TRANS_HBLANK(display, cpu_transcoder));
adjusted_mode->crtc_hblank_start = REG_FIELD_GET(HBLANK_START_MASK, tmp) + 1;
adjusted_mode->crtc_hblank_end = REG_FIELD_GET(HBLANK_END_MASK, tmp) + 1;
}
- tmp = intel_de_read(dev_priv, TRANS_HSYNC(dev_priv, cpu_transcoder));
+ tmp = intel_de_read(display, TRANS_HSYNC(display, cpu_transcoder));
adjusted_mode->crtc_hsync_start = REG_FIELD_GET(HSYNC_START_MASK, tmp) + 1;
adjusted_mode->crtc_hsync_end = REG_FIELD_GET(HSYNC_END_MASK, tmp) + 1;
- tmp = intel_de_read(dev_priv, TRANS_VTOTAL(dev_priv, cpu_transcoder));
+ tmp = intel_de_read(display, TRANS_VTOTAL(display, cpu_transcoder));
adjusted_mode->crtc_vdisplay = REG_FIELD_GET(VACTIVE_MASK, tmp) + 1;
adjusted_mode->crtc_vtotal = REG_FIELD_GET(VTOTAL_MASK, tmp) + 1;
/* FIXME TGL+ DSI transcoders have this! */
if (!transcoder_is_dsi(cpu_transcoder)) {
- tmp = intel_de_read(dev_priv,
- TRANS_VBLANK(dev_priv, cpu_transcoder));
+ tmp = intel_de_read(display,
+ TRANS_VBLANK(display, cpu_transcoder));
adjusted_mode->crtc_vblank_start = REG_FIELD_GET(VBLANK_START_MASK, tmp) + 1;
adjusted_mode->crtc_vblank_end = REG_FIELD_GET(VBLANK_END_MASK, tmp) + 1;
}
- tmp = intel_de_read(dev_priv, TRANS_VSYNC(dev_priv, cpu_transcoder));
+ tmp = intel_de_read(display, TRANS_VSYNC(display, cpu_transcoder));
adjusted_mode->crtc_vsync_start = REG_FIELD_GET(VSYNC_START_MASK, tmp) + 1;
adjusted_mode->crtc_vsync_end = REG_FIELD_GET(VSYNC_END_MASK, tmp) + 1;
@@ -2932,11 +2846,11 @@ static void intel_get_transcoder_timings(struct intel_crtc *crtc,
adjusted_mode->crtc_vblank_end += 1;
}
- if (DISPLAY_VER(dev_priv) >= 13 && !transcoder_is_dsi(cpu_transcoder))
+ if (DISPLAY_VER(display) >= 13 && !transcoder_is_dsi(cpu_transcoder))
adjusted_mode->crtc_vblank_start =
adjusted_mode->crtc_vdisplay +
- intel_de_read(dev_priv,
- TRANS_SET_CONTEXT_LATENCY(dev_priv, cpu_transcoder));
+ intel_de_read(display,
+ TRANS_SET_CONTEXT_LATENCY(display, cpu_transcoder));
}
static void intel_joiner_adjust_pipe_src(struct intel_crtc_state *crtc_state)
@@ -2959,11 +2873,10 @@ static void intel_joiner_adjust_pipe_src(struct intel_crtc_state *crtc_state)
static void intel_get_pipe_src_size(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_display *display = to_intel_display(crtc);
u32 tmp;
- tmp = intel_de_read(dev_priv, PIPESRC(dev_priv, crtc->pipe));
+ tmp = intel_de_read(display, PIPESRC(display, crtc->pipe));
drm_rect_init(&pipe_config->pipe_src, 0, 0,
REG_FIELD_GET(PIPESRC_WIDTH_MASK, tmp) + 1,
@@ -2974,8 +2887,7 @@ static void intel_get_pipe_src_size(struct intel_crtc *crtc,
void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc_state);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
u32 val = 0;
@@ -2984,15 +2896,15 @@ void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
* - During modeset the pipe is still disabled and must remain so
* - During fastset the pipe is already enabled and must remain so
*/
- if (IS_I830(dev_priv) || !intel_crtc_needs_modeset(crtc_state))
+ if (display->platform.i830 || !intel_crtc_needs_modeset(crtc_state))
val |= TRANSCONF_ENABLE;
if (crtc_state->double_wide)
val |= TRANSCONF_DOUBLE_WIDE;
/* only g4x and later have fancy bpc/dither controls */
- if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
- IS_CHERRYVIEW(dev_priv)) {
+ if (display->platform.g4x || display->platform.valleyview ||
+ display->platform.cherryview) {
/* Bspec claims that we can't use dithering for 30bpp pipes. */
if (crtc_state->dither && crtc_state->pipe_bpp != 30)
val |= TRANSCONF_DITHER_EN |
@@ -3016,7 +2928,7 @@ void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
}
if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
- if (DISPLAY_VER(dev_priv) < 4 ||
+ if (DISPLAY_VER(display) < 4 ||
intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
val |= TRANSCONF_INTERLACE_W_FIELD_INDICATION;
else
@@ -3025,8 +2937,8 @@ void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
val |= TRANSCONF_INTERLACE_PROGRESSIVE;
}
- if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
- crtc_state->limited_color_range)
+ if ((display->platform.valleyview || display->platform.cherryview) &&
+ crtc_state->limited_color_range)
val |= TRANSCONF_COLOR_RANGE_SELECT;
val |= TRANSCONF_GAMMA_MODE(crtc_state->gamma_mode);
@@ -3036,54 +2948,17 @@ void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
val |= TRANSCONF_FRAME_START_DELAY(crtc_state->framestart_delay - 1);
- intel_de_write(dev_priv, TRANSCONF(dev_priv, cpu_transcoder), val);
- intel_de_posting_read(dev_priv, TRANSCONF(dev_priv, cpu_transcoder));
-}
-
-static bool i9xx_has_pfit(struct drm_i915_private *dev_priv)
-{
- if (IS_I830(dev_priv))
- return false;
-
- return DISPLAY_VER(dev_priv) >= 4 ||
- IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
-}
-
-static void i9xx_get_pfit_config(struct intel_crtc_state *crtc_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- enum pipe pipe;
- u32 tmp;
-
- if (!i9xx_has_pfit(dev_priv))
- return;
-
- tmp = intel_de_read(dev_priv, PFIT_CONTROL(dev_priv));
- if (!(tmp & PFIT_ENABLE))
- return;
-
- /* Check whether the pfit is attached to our pipe. */
- if (DISPLAY_VER(dev_priv) >= 4)
- pipe = REG_FIELD_GET(PFIT_PIPE_MASK, tmp);
- else
- pipe = PIPE_B;
-
- if (pipe != crtc->pipe)
- return;
-
- crtc_state->gmch_pfit.control = tmp;
- crtc_state->gmch_pfit.pgm_ratios =
- intel_de_read(dev_priv, PFIT_PGM_RATIOS(dev_priv));
+ intel_de_write(display, TRANSCONF(display, cpu_transcoder), val);
+ intel_de_posting_read(display, TRANSCONF(display, cpu_transcoder));
}
static enum intel_output_format
bdw_get_pipe_misc_output_format(struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
u32 tmp;
- tmp = intel_de_read(dev_priv, PIPE_MISC(crtc->pipe));
+ tmp = intel_de_read(display, PIPE_MISC(crtc->pipe));
if (tmp & PIPE_MISC_YUV420_ENABLE) {
/*
@@ -3091,8 +2966,8 @@ bdw_get_pipe_misc_output_format(struct intel_crtc *crtc)
* For xe3_lpd+ this is implied in YUV420 Enable bit.
* Ensure the same for prior platforms in YUV420 Mode bit.
*/
- if (DISPLAY_VER(dev_priv) < 30)
- drm_WARN_ON(&dev_priv->drm,
+ if (DISPLAY_VER(display) < 30)
+ drm_WARN_ON(display->drm,
(tmp & PIPE_MISC_YUV420_MODE_FULL_BLEND) == 0);
return INTEL_OUTPUT_FORMAT_YCBCR420;
@@ -3107,31 +2982,28 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
struct intel_display *display = to_intel_display(crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum intel_display_power_domain power_domain;
+ enum transcoder cpu_transcoder = (enum transcoder)crtc->pipe;
intel_wakeref_t wakeref;
+ bool ret = false;
u32 tmp;
- bool ret;
power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
wakeref = intel_display_power_get_if_enabled(display, power_domain);
if (!wakeref)
return false;
- pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
- pipe_config->sink_format = pipe_config->output_format;
- pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
- pipe_config->shared_dpll = NULL;
-
- ret = false;
-
- tmp = intel_de_read(dev_priv,
- TRANSCONF(dev_priv, pipe_config->cpu_transcoder));
+ tmp = intel_de_read(display, TRANSCONF(display, cpu_transcoder));
if (!(tmp & TRANSCONF_ENABLE))
goto out;
- if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
- IS_CHERRYVIEW(dev_priv)) {
+ pipe_config->cpu_transcoder = cpu_transcoder;
+
+ pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
+ pipe_config->sink_format = pipe_config->output_format;
+
+ if (display->platform.g4x || display->platform.valleyview ||
+ display->platform.cherryview) {
switch (tmp & TRANSCONF_BPC_MASK) {
case TRANSCONF_BPC_6:
pipe_config->pipe_bpp = 18;
@@ -3148,7 +3020,7 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
}
}
- if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
+ if ((display->platform.valleyview || display->platform.cherryview) &&
(tmp & TRANSCONF_COLOR_RANGE_SELECT))
pipe_config->limited_color_range = true;
@@ -3156,29 +3028,29 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
pipe_config->framestart_delay = REG_FIELD_GET(TRANSCONF_FRAME_START_DELAY_MASK, tmp) + 1;
- if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
+ if ((display->platform.valleyview || display->platform.cherryview) &&
(tmp & TRANSCONF_WGC_ENABLE))
pipe_config->wgc_enable = true;
intel_color_get_config(pipe_config);
- if (HAS_DOUBLE_WIDE(dev_priv))
+ if (HAS_DOUBLE_WIDE(display))
pipe_config->double_wide = tmp & TRANSCONF_DOUBLE_WIDE;
intel_get_transcoder_timings(crtc, pipe_config);
intel_get_pipe_src_size(crtc, pipe_config);
- i9xx_get_pfit_config(pipe_config);
+ i9xx_pfit_get_config(pipe_config);
i9xx_dpll_get_hw_state(crtc, &pipe_config->dpll_hw_state);
- if (DISPLAY_VER(dev_priv) >= 4) {
+ if (DISPLAY_VER(display) >= 4) {
tmp = pipe_config->dpll_hw_state.i9xx.dpll_md;
pipe_config->pixel_multiplier =
((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
>> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
- } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
- IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
+ } else if (display->platform.i945g || display->platform.i945gm ||
+ display->platform.g33 || display->platform.pineview) {
tmp = pipe_config->dpll_hw_state.i9xx.dpll;
pipe_config->pixel_multiplier =
((tmp & SDVO_MULTIPLIER_MASK)
@@ -3190,9 +3062,9 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
pipe_config->pixel_multiplier = 1;
}
- if (IS_CHERRYVIEW(dev_priv))
+ if (display->platform.cherryview)
chv_crtc_clock_get(pipe_config);
- else if (IS_VALLEYVIEW(dev_priv))
+ else if (display->platform.valleyview)
vlv_crtc_clock_get(pipe_config);
else
i9xx_crtc_clock_get(pipe_config);
@@ -3215,8 +3087,7 @@ out:
void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc_state);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
u32 val = 0;
@@ -3258,7 +3129,7 @@ void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state)
* This would end up with an odd purple hue over
* the entire display. Make sure we don't do it.
*/
- drm_WARN_ON(&dev_priv->drm, crtc_state->limited_color_range &&
+ drm_WARN_ON(display->drm, crtc_state->limited_color_range &&
crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB);
if (crtc_state->limited_color_range &&
@@ -3273,14 +3144,13 @@ void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state)
val |= TRANSCONF_FRAME_START_DELAY(crtc_state->framestart_delay - 1);
val |= TRANSCONF_MSA_TIMING_DELAY(crtc_state->msa_timing_delay);
- intel_de_write(dev_priv, TRANSCONF(dev_priv, cpu_transcoder), val);
- intel_de_posting_read(dev_priv, TRANSCONF(dev_priv, cpu_transcoder));
+ intel_de_write(display, TRANSCONF(display, cpu_transcoder), val);
+ intel_de_posting_read(display, TRANSCONF(display, cpu_transcoder));
}
static void hsw_set_transconf(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc_state);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
u32 val = 0;
@@ -3291,7 +3161,7 @@ static void hsw_set_transconf(const struct intel_crtc_state *crtc_state)
if (!intel_crtc_needs_modeset(crtc_state))
val |= TRANSCONF_ENABLE;
- if (IS_HASWELL(dev_priv) && crtc_state->dither)
+ if (display->platform.haswell && crtc_state->dither)
val |= TRANSCONF_DITHER_EN | TRANSCONF_DITHER_TYPE_SP;
if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
@@ -3299,20 +3169,19 @@ static void hsw_set_transconf(const struct intel_crtc_state *crtc_state)
else
val |= TRANSCONF_INTERLACE_PF_PD_ILK;
- if (IS_HASWELL(dev_priv) &&
+ if (display->platform.haswell &&
crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
val |= TRANSCONF_OUTPUT_COLORSPACE_YUV_HSW;
- intel_de_write(dev_priv, TRANSCONF(dev_priv, cpu_transcoder), val);
- intel_de_posting_read(dev_priv, TRANSCONF(dev_priv, cpu_transcoder));
+ intel_de_write(display, TRANSCONF(display, cpu_transcoder), val);
+ intel_de_posting_read(display, TRANSCONF(display, cpu_transcoder));
}
static void bdw_set_pipe_misc(struct intel_dsb *dsb,
const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct intel_display *display = to_intel_display(crtc->base.dev);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 val = 0;
switch (crtc_state->pipe_bpp) {
@@ -3327,7 +3196,7 @@ static void bdw_set_pipe_misc(struct intel_dsb *dsb,
break;
case 36:
/* Port output 12BPC defined for ADLP+ */
- if (DISPLAY_VER(dev_priv) >= 13)
+ if (DISPLAY_VER(display) >= 13)
val |= PIPE_MISC_BPC_12_ADLP;
break;
default:
@@ -3346,14 +3215,14 @@ static void bdw_set_pipe_misc(struct intel_dsb *dsb,
val |= DISPLAY_VER(display) >= 30 ? PIPE_MISC_YUV420_ENABLE :
PIPE_MISC_YUV420_ENABLE | PIPE_MISC_YUV420_MODE_FULL_BLEND;
- if (DISPLAY_VER(dev_priv) >= 11 && is_hdr_mode(crtc_state))
+ if (DISPLAY_VER(display) >= 11 && is_hdr_mode(crtc_state))
val |= PIPE_MISC_HDR_MODE_PRECISION;
- if (DISPLAY_VER(dev_priv) >= 12)
+ if (DISPLAY_VER(display) >= 12)
val |= PIPE_MISC_PIXEL_ROUNDING_TRUNC;
/* allow PSR with sprite enabled */
- if (IS_BROADWELL(dev_priv))
+ if (display->platform.broadwell)
val |= PIPE_MISC_PSR_MASK_SPRITE_ENABLE;
intel_de_write_dsb(display, dsb, PIPE_MISC(crtc->pipe), val);
@@ -3361,10 +3230,10 @@ static void bdw_set_pipe_misc(struct intel_dsb *dsb,
int bdw_get_pipe_misc_bpp(struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
u32 tmp;
- tmp = intel_de_read(dev_priv, PIPE_MISC(crtc->pipe));
+ tmp = intel_de_read(display, PIPE_MISC(crtc->pipe));
switch (tmp & PIPE_MISC_BPC_MASK) {
case PIPE_MISC_BPC_6:
@@ -3384,7 +3253,7 @@ int bdw_get_pipe_misc_bpp(struct intel_crtc *crtc)
* MIPI DSI HW readout.
*/
case PIPE_MISC_BPC_12_ADLP:
- if (DISPLAY_VER(dev_priv) >= 13)
+ if (DISPLAY_VER(display) >= 13)
return 36;
fallthrough;
default:
@@ -3404,33 +3273,33 @@ int ilk_get_lanes_required(int target_clock, int link_bw, int bpp)
return DIV_ROUND_UP(bps, link_bw * 8);
}
-void intel_get_m_n(struct drm_i915_private *i915,
+void intel_get_m_n(struct intel_display *display,
struct intel_link_m_n *m_n,
i915_reg_t data_m_reg, i915_reg_t data_n_reg,
i915_reg_t link_m_reg, i915_reg_t link_n_reg)
{
- m_n->link_m = intel_de_read(i915, link_m_reg) & DATA_LINK_M_N_MASK;
- m_n->link_n = intel_de_read(i915, link_n_reg) & DATA_LINK_M_N_MASK;
- m_n->data_m = intel_de_read(i915, data_m_reg) & DATA_LINK_M_N_MASK;
- m_n->data_n = intel_de_read(i915, data_n_reg) & DATA_LINK_M_N_MASK;
- m_n->tu = REG_FIELD_GET(TU_SIZE_MASK, intel_de_read(i915, data_m_reg)) + 1;
+ m_n->link_m = intel_de_read(display, link_m_reg) & DATA_LINK_M_N_MASK;
+ m_n->link_n = intel_de_read(display, link_n_reg) & DATA_LINK_M_N_MASK;
+ m_n->data_m = intel_de_read(display, data_m_reg) & DATA_LINK_M_N_MASK;
+ m_n->data_n = intel_de_read(display, data_n_reg) & DATA_LINK_M_N_MASK;
+ m_n->tu = REG_FIELD_GET(TU_SIZE_MASK, intel_de_read(display, data_m_reg)) + 1;
}
void intel_cpu_transcoder_get_m1_n1(struct intel_crtc *crtc,
enum transcoder transcoder,
struct intel_link_m_n *m_n)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
enum pipe pipe = crtc->pipe;
- if (DISPLAY_VER(dev_priv) >= 5)
- intel_get_m_n(dev_priv, m_n,
- PIPE_DATA_M1(dev_priv, transcoder),
- PIPE_DATA_N1(dev_priv, transcoder),
- PIPE_LINK_M1(dev_priv, transcoder),
- PIPE_LINK_N1(dev_priv, transcoder));
+ if (DISPLAY_VER(display) >= 5)
+ intel_get_m_n(display, m_n,
+ PIPE_DATA_M1(display, transcoder),
+ PIPE_DATA_N1(display, transcoder),
+ PIPE_LINK_M1(display, transcoder),
+ PIPE_LINK_N1(display, transcoder));
else
- intel_get_m_n(dev_priv, m_n,
+ intel_get_m_n(display, m_n,
PIPE_DATA_M_G4X(pipe), PIPE_DATA_N_G4X(pipe),
PIPE_LINK_M_G4X(pipe), PIPE_LINK_N_G4X(pipe));
}
@@ -3439,78 +3308,39 @@ void intel_cpu_transcoder_get_m2_n2(struct intel_crtc *crtc,
enum transcoder transcoder,
struct intel_link_m_n *m_n)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-
- if (!intel_cpu_transcoder_has_m2_n2(dev_priv, transcoder))
- return;
-
- intel_get_m_n(dev_priv, m_n,
- PIPE_DATA_M2(dev_priv, transcoder),
- PIPE_DATA_N2(dev_priv, transcoder),
- PIPE_LINK_M2(dev_priv, transcoder),
- PIPE_LINK_N2(dev_priv, transcoder));
-}
-
-static void ilk_get_pfit_config(struct intel_crtc_state *crtc_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- u32 ctl, pos, size;
- enum pipe pipe;
+ struct intel_display *display = to_intel_display(crtc);
- ctl = intel_de_read(dev_priv, PF_CTL(crtc->pipe));
- if ((ctl & PF_ENABLE) == 0)
+ if (!intel_cpu_transcoder_has_m2_n2(display, transcoder))
return;
- if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv))
- pipe = REG_FIELD_GET(PF_PIPE_SEL_MASK_IVB, ctl);
- else
- pipe = crtc->pipe;
-
- crtc_state->pch_pfit.enabled = true;
-
- pos = intel_de_read(dev_priv, PF_WIN_POS(crtc->pipe));
- size = intel_de_read(dev_priv, PF_WIN_SZ(crtc->pipe));
-
- drm_rect_init(&crtc_state->pch_pfit.dst,
- REG_FIELD_GET(PF_WIN_XPOS_MASK, pos),
- REG_FIELD_GET(PF_WIN_YPOS_MASK, pos),
- REG_FIELD_GET(PF_WIN_XSIZE_MASK, size),
- REG_FIELD_GET(PF_WIN_YSIZE_MASK, size));
-
- /*
- * We currently do not free assignments of panel fitters on
- * ivb/hsw (since we don't use the higher upscaling modes which
- * differentiates them) so just WARN about this case for now.
- */
- drm_WARN_ON(&dev_priv->drm, pipe != crtc->pipe);
+ intel_get_m_n(display, m_n,
+ PIPE_DATA_M2(display, transcoder),
+ PIPE_DATA_N2(display, transcoder),
+ PIPE_LINK_M2(display, transcoder),
+ PIPE_LINK_N2(display, transcoder));
}
static bool ilk_get_pipe_config(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
struct intel_display *display = to_intel_display(crtc);
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
enum intel_display_power_domain power_domain;
+ enum transcoder cpu_transcoder = (enum transcoder)crtc->pipe;
intel_wakeref_t wakeref;
+ bool ret = false;
u32 tmp;
- bool ret;
power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
wakeref = intel_display_power_get_if_enabled(display, power_domain);
if (!wakeref)
return false;
- pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
- pipe_config->shared_dpll = NULL;
-
- ret = false;
- tmp = intel_de_read(dev_priv,
- TRANSCONF(dev_priv, pipe_config->cpu_transcoder));
+ tmp = intel_de_read(display, TRANSCONF(display, cpu_transcoder));
if (!(tmp & TRANSCONF_ENABLE))
goto out;
+ pipe_config->cpu_transcoder = cpu_transcoder;
+
switch (tmp & TRANSCONF_BPC_MASK) {
case TRANSCONF_BPC_6:
pipe_config->pipe_bpp = 18;
@@ -3558,7 +3388,7 @@ static bool ilk_get_pipe_config(struct intel_crtc *crtc,
intel_get_transcoder_timings(crtc, pipe_config);
intel_get_pipe_src_size(crtc, pipe_config);
- ilk_get_pfit_config(pipe_config);
+ ilk_pfit_get_config(pipe_config);
ret = true;
@@ -3568,24 +3398,23 @@ out:
return ret;
}
-static u8 joiner_pipes(struct drm_i915_private *i915)
+static u8 joiner_pipes(struct intel_display *display)
{
u8 pipes;
- if (DISPLAY_VER(i915) >= 12)
+ if (DISPLAY_VER(display) >= 12)
pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D);
- else if (DISPLAY_VER(i915) >= 11)
+ else if (DISPLAY_VER(display) >= 11)
pipes = BIT(PIPE_B) | BIT(PIPE_C);
else
pipes = 0;
- return pipes & DISPLAY_RUNTIME_INFO(i915)->pipe_mask;
+ return pipes & DISPLAY_RUNTIME_INFO(display)->pipe_mask;
}
-static bool transcoder_ddi_func_is_enabled(struct drm_i915_private *dev_priv,
+static bool transcoder_ddi_func_is_enabled(struct intel_display *display,
enum transcoder cpu_transcoder)
{
- struct intel_display *display = &dev_priv->display;
enum intel_display_power_domain power_domain;
intel_wakeref_t wakeref;
u32 tmp = 0;
@@ -3593,8 +3422,8 @@ static bool transcoder_ddi_func_is_enabled(struct drm_i915_private *dev_priv,
power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
with_intel_display_power_if_enabled(display, power_domain, wakeref)
- tmp = intel_de_read(dev_priv,
- TRANS_DDI_FUNC_CTL(dev_priv, cpu_transcoder));
+ tmp = intel_de_read(display,
+ TRANS_DDI_FUNC_CTL(display, cpu_transcoder));
return tmp & TRANS_DDI_FUNC_ENABLE;
}
@@ -3602,7 +3431,6 @@ static bool transcoder_ddi_func_is_enabled(struct drm_i915_private *dev_priv,
static void enabled_uncompressed_joiner_pipes(struct intel_display *display,
u8 *primary_pipes, u8 *secondary_pipes)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
struct intel_crtc *crtc;
*primary_pipes = 0;
@@ -3611,8 +3439,8 @@ static void enabled_uncompressed_joiner_pipes(struct intel_display *display,
if (!HAS_UNCOMPRESSED_JOINER(display))
return;
- for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc,
- joiner_pipes(i915)) {
+ for_each_intel_crtc_in_pipe_mask(display->drm, crtc,
+ joiner_pipes(display)) {
enum intel_display_power_domain power_domain;
enum pipe pipe = crtc->pipe;
intel_wakeref_t wakeref;
@@ -3632,7 +3460,6 @@ static void enabled_uncompressed_joiner_pipes(struct intel_display *display,
static void enabled_bigjoiner_pipes(struct intel_display *display,
u8 *primary_pipes, u8 *secondary_pipes)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
struct intel_crtc *crtc;
*primary_pipes = 0;
@@ -3641,8 +3468,8 @@ static void enabled_bigjoiner_pipes(struct intel_display *display,
if (!HAS_BIGJOINER(display))
return;
- for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc,
- joiner_pipes(i915)) {
+ for_each_intel_crtc_in_pipe_mask(display->drm, crtc,
+ joiner_pipes(display)) {
enum intel_display_power_domain power_domain;
enum pipe pipe = crtc->pipe;
intel_wakeref_t wakeref;
@@ -3700,10 +3527,9 @@ static u8 fixup_ultrajoiner_secondary_pipes(u8 ultrajoiner_primary_pipes,
return ultrajoiner_secondary_pipes | ultrajoiner_primary_pipes << 3;
}
-static void enabled_ultrajoiner_pipes(struct drm_i915_private *i915,
+static void enabled_ultrajoiner_pipes(struct intel_display *display,
u8 *primary_pipes, u8 *secondary_pipes)
{
- struct intel_display *display = &i915->display;
struct intel_crtc *crtc;
*primary_pipes = 0;
@@ -3712,15 +3538,15 @@ static void enabled_ultrajoiner_pipes(struct drm_i915_private *i915,
if (!HAS_ULTRAJOINER(display))
return;
- for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc,
- joiner_pipes(i915)) {
+ for_each_intel_crtc_in_pipe_mask(display->drm, crtc,
+ joiner_pipes(display)) {
enum intel_display_power_domain power_domain;
enum pipe pipe = crtc->pipe;
intel_wakeref_t wakeref;
power_domain = intel_dsc_power_domain(crtc, (enum transcoder)pipe);
with_intel_display_power_if_enabled(display, power_domain, wakeref) {
- u32 tmp = intel_de_read(i915, ICL_PIPE_DSS_CTL1(pipe));
+ u32 tmp = intel_de_read(display, ICL_PIPE_DSS_CTL1(pipe));
if (!(tmp & ULTRA_JOINER_ENABLE))
continue;
@@ -3733,11 +3559,10 @@ static void enabled_ultrajoiner_pipes(struct drm_i915_private *i915,
}
}
-static void enabled_joiner_pipes(struct drm_i915_private *dev_priv,
+static void enabled_joiner_pipes(struct intel_display *display,
enum pipe pipe,
u8 *primary_pipe, u8 *secondary_pipes)
{
- struct intel_display *display = to_intel_display(&dev_priv->drm);
u8 primary_ultrajoiner_pipes;
u8 primary_uncompressed_joiner_pipes, primary_bigjoiner_pipes;
u8 secondary_ultrajoiner_pipes;
@@ -3745,21 +3570,21 @@ static void enabled_joiner_pipes(struct drm_i915_private *dev_priv,
u8 ultrajoiner_pipes;
u8 uncompressed_joiner_pipes, bigjoiner_pipes;
- enabled_ultrajoiner_pipes(dev_priv, &primary_ultrajoiner_pipes,
+ enabled_ultrajoiner_pipes(display, &primary_ultrajoiner_pipes,
&secondary_ultrajoiner_pipes);
/*
* For some strange reason the last pipe in the set of four
* shouldn't have ultrajoiner enable bit set in hardware.
* Set the bit anyway to make life easier.
*/
- drm_WARN_ON(&dev_priv->drm,
+ drm_WARN_ON(display->drm,
expected_secondary_pipes(primary_ultrajoiner_pipes, 3) !=
secondary_ultrajoiner_pipes);
secondary_ultrajoiner_pipes =
fixup_ultrajoiner_secondary_pipes(primary_ultrajoiner_pipes,
secondary_ultrajoiner_pipes);
- drm_WARN_ON(&dev_priv->drm, (primary_ultrajoiner_pipes & secondary_ultrajoiner_pipes) != 0);
+ drm_WARN_ON(display->drm, (primary_ultrajoiner_pipes & secondary_ultrajoiner_pipes) != 0);
enabled_uncompressed_joiner_pipes(display, &primary_uncompressed_joiner_pipes,
&secondary_uncompressed_joiner_pipes);
@@ -3853,11 +3678,11 @@ static void enabled_joiner_pipes(struct drm_i915_private *dev_priv,
}
}
-static u8 hsw_panel_transcoders(struct drm_i915_private *i915)
+static u8 hsw_panel_transcoders(struct intel_display *display)
{
u8 panel_transcoder_mask = BIT(TRANSCODER_EDP);
- if (DISPLAY_VER(i915) >= 11)
+ if (DISPLAY_VER(display) >= 11)
panel_transcoder_mask |= BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1);
return panel_transcoder_mask;
@@ -3866,9 +3691,7 @@ static u8 hsw_panel_transcoders(struct drm_i915_private *i915)
static u8 hsw_enabled_transcoders(struct intel_crtc *crtc)
{
struct intel_display *display = to_intel_display(crtc);
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- u8 panel_transcoder_mask = hsw_panel_transcoders(dev_priv);
+ u8 panel_transcoder_mask = hsw_panel_transcoders(display);
enum transcoder cpu_transcoder;
u8 primary_pipe, secondary_pipes;
u8 enabled_transcoders = 0;
@@ -3877,7 +3700,7 @@ static u8 hsw_enabled_transcoders(struct intel_crtc *crtc)
* XXX: Do intel_display_power_get_if_enabled before reading this (for
* consistency and less surprising code; it's in always on power).
*/
- for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder,
+ for_each_cpu_transcoder_masked(display, cpu_transcoder,
panel_transcoder_mask) {
enum intel_display_power_domain power_domain;
intel_wakeref_t wakeref;
@@ -3886,15 +3709,15 @@ static u8 hsw_enabled_transcoders(struct intel_crtc *crtc)
power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
with_intel_display_power_if_enabled(display, power_domain, wakeref)
- tmp = intel_de_read(dev_priv,
- TRANS_DDI_FUNC_CTL(dev_priv, cpu_transcoder));
+ tmp = intel_de_read(display,
+ TRANS_DDI_FUNC_CTL(display, cpu_transcoder));
if (!(tmp & TRANS_DDI_FUNC_ENABLE))
continue;
switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
default:
- drm_WARN(dev, 1,
+ drm_WARN(display->drm, 1,
"unknown pipe linked to transcoder %s\n",
transcoder_name(cpu_transcoder));
fallthrough;
@@ -3919,14 +3742,14 @@ static u8 hsw_enabled_transcoders(struct intel_crtc *crtc)
/* single pipe or joiner primary */
cpu_transcoder = (enum transcoder) crtc->pipe;
- if (transcoder_ddi_func_is_enabled(dev_priv, cpu_transcoder))
+ if (transcoder_ddi_func_is_enabled(display, cpu_transcoder))
enabled_transcoders |= BIT(cpu_transcoder);
/* joiner secondary -> consider the primary pipe's transcoder as well */
- enabled_joiner_pipes(dev_priv, crtc->pipe, &primary_pipe, &secondary_pipes);
+ enabled_joiner_pipes(display, crtc->pipe, &primary_pipe, &secondary_pipes);
if (secondary_pipes & BIT(crtc->pipe)) {
cpu_transcoder = (enum transcoder)ffs(primary_pipe) - 1;
- if (transcoder_ddi_func_is_enabled(dev_priv, cpu_transcoder))
+ if (transcoder_ddi_func_is_enabled(display, cpu_transcoder))
enabled_transcoders |= BIT(cpu_transcoder);
}
@@ -3951,17 +3774,17 @@ static bool has_pipe_transcoders(u8 enabled_transcoders)
BIT(TRANSCODER_DSI_1));
}
-static void assert_enabled_transcoders(struct drm_i915_private *i915,
+static void assert_enabled_transcoders(struct intel_display *display,
u8 enabled_transcoders)
{
/* Only one type of transcoder please */
- drm_WARN_ON(&i915->drm,
+ drm_WARN_ON(display->drm,
has_edp_transcoders(enabled_transcoders) +
has_dsi_transcoders(enabled_transcoders) +
has_pipe_transcoders(enabled_transcoders) > 1);
/* Only DSI transcoders can be ganged */
- drm_WARN_ON(&i915->drm,
+ drm_WARN_ON(display->drm,
!has_dsi_transcoders(enabled_transcoders) &&
!is_power_of_2(enabled_transcoders));
}
@@ -3971,8 +3794,6 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
struct intel_display_power_domain_set *power_domain_set)
{
struct intel_display *display = to_intel_display(crtc);
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
unsigned long enabled_transcoders;
u32 tmp;
@@ -3980,7 +3801,7 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
if (!enabled_transcoders)
return false;
- assert_enabled_transcoders(dev_priv, enabled_transcoders);
+ assert_enabled_transcoders(display, enabled_transcoders);
/*
* With the exception of DSI we should only ever have
@@ -3993,16 +3814,16 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder)))
return false;
- if (hsw_panel_transcoders(dev_priv) & BIT(pipe_config->cpu_transcoder)) {
- tmp = intel_de_read(dev_priv,
- TRANS_DDI_FUNC_CTL(dev_priv, pipe_config->cpu_transcoder));
+ if (hsw_panel_transcoders(display) & BIT(pipe_config->cpu_transcoder)) {
+ tmp = intel_de_read(display,
+ TRANS_DDI_FUNC_CTL(display, pipe_config->cpu_transcoder));
if ((tmp & TRANS_DDI_EDP_INPUT_MASK) == TRANS_DDI_EDP_INPUT_A_ONOFF)
pipe_config->pch_pfit.force_thru = true;
}
- tmp = intel_de_read(dev_priv,
- TRANSCONF(dev_priv, pipe_config->cpu_transcoder));
+ tmp = intel_de_read(display,
+ TRANSCONF(display, pipe_config->cpu_transcoder));
return tmp & TRANSCONF_ENABLE;
}
@@ -4055,12 +3876,12 @@ static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
static void intel_joiner_get_config(struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
u8 primary_pipe, secondary_pipes;
enum pipe pipe = crtc->pipe;
- enabled_joiner_pipes(i915, pipe, &primary_pipe, &secondary_pipes);
+ enabled_joiner_pipes(display, pipe, &primary_pipe, &secondary_pipes);
if (((primary_pipe | secondary_pipes) & BIT(pipe)) == 0)
return;
@@ -4072,7 +3893,6 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
struct intel_display *display = to_intel_display(crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
bool active;
u32 tmp;
@@ -4080,13 +3900,11 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc,
POWER_DOMAIN_PIPE(crtc->pipe)))
return false;
- pipe_config->shared_dpll = NULL;
-
active = hsw_get_transcoder_state(crtc, pipe_config, &crtc->hw_readout_power_domains);
- if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) &&
+ if ((display->platform.geminilake || display->platform.broxton) &&
bxt_get_dsi_transcoder_state(crtc, pipe_config, &crtc->hw_readout_power_domains)) {
- drm_WARN_ON(&dev_priv->drm, active);
+ drm_WARN_ON(display->drm, active);
active = true;
}
@@ -4097,17 +3915,17 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc,
intel_dsc_get_config(pipe_config);
if (!transcoder_is_dsi(pipe_config->cpu_transcoder) ||
- DISPLAY_VER(dev_priv) >= 11)
+ DISPLAY_VER(display) >= 11)
intel_get_transcoder_timings(crtc, pipe_config);
- if (HAS_VRR(dev_priv) && !transcoder_is_dsi(pipe_config->cpu_transcoder))
+ if (HAS_VRR(display) && !transcoder_is_dsi(pipe_config->cpu_transcoder))
intel_vrr_get_config(pipe_config);
intel_get_pipe_src_size(crtc, pipe_config);
- if (IS_HASWELL(dev_priv)) {
- u32 tmp = intel_de_read(dev_priv,
- TRANSCONF(dev_priv, pipe_config->cpu_transcoder));
+ if (display->platform.haswell) {
+ u32 tmp = intel_de_read(display,
+ TRANSCONF(display, pipe_config->cpu_transcoder));
if (tmp & TRANSCONF_OUTPUT_COLORSPACE_YUV_HSW)
pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
@@ -4122,18 +3940,18 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc,
intel_color_get_config(pipe_config);
- tmp = intel_de_read(dev_priv, WM_LINETIME(crtc->pipe));
+ tmp = intel_de_read(display, WM_LINETIME(crtc->pipe));
pipe_config->linetime = REG_FIELD_GET(HSW_LINETIME_MASK, tmp);
- if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
+ if (display->platform.broadwell || display->platform.haswell)
pipe_config->ips_linetime =
REG_FIELD_GET(HSW_IPS_LINETIME_MASK, tmp);
if (intel_display_power_get_in_set_if_enabled(display, &crtc->hw_readout_power_domains,
POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe))) {
- if (DISPLAY_VER(dev_priv) >= 9)
+ if (DISPLAY_VER(display) >= 9)
skl_scaler_get_config(pipe_config);
else
- ilk_get_pfit_config(pipe_config);
+ ilk_pfit_get_config(pipe_config);
}
hsw_ips_get_config(pipe_config);
@@ -4141,8 +3959,8 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc,
if (pipe_config->cpu_transcoder != TRANSCODER_EDP &&
!transcoder_is_dsi(pipe_config->cpu_transcoder)) {
pipe_config->pixel_multiplier =
- intel_de_read(dev_priv,
- TRANS_MULT(dev_priv, pipe_config->cpu_transcoder)) + 1;
+ intel_de_read(display,
+ TRANS_MULT(display, pipe_config->cpu_transcoder)) + 1;
} else {
pipe_config->pixel_multiplier = 1;
}
@@ -4164,10 +3982,10 @@ out:
bool intel_crtc_get_pipe_config(struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
- if (!i915->display.funcs.display->get_pipe_config(crtc, crtc_state))
+ if (!display->funcs.display->get_pipe_config(crtc, crtc_state))
return false;
crtc_state->hw.active = true;
@@ -4326,6 +4144,7 @@ static u16 hsw_ips_linetime_wm(const struct intel_crtc_state *crtc_state,
static u16 skl_linetime_wm(const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct drm_display_mode *pipe_mode =
@@ -4339,7 +4158,7 @@ static u16 skl_linetime_wm(const struct intel_crtc_state *crtc_state)
crtc_state->pixel_rate);
/* Display WA #1135: BXT:ALL GLK:ALL */
- if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) &&
+ if ((display->platform.geminilake || display->platform.broxton) &&
skl_watermark_ipc_enabled(dev_priv))
linetime_wm /= 2;
@@ -4349,12 +4168,12 @@ static u16 skl_linetime_wm(const struct intel_crtc_state *crtc_state)
static int hsw_compute_linetime_wm(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
const struct intel_cdclk_state *cdclk_state;
- if (DISPLAY_VER(dev_priv) >= 9)
+ if (DISPLAY_VER(display) >= 9)
crtc_state->linetime = skl_linetime_wm(crtc_state);
else
crtc_state->linetime = hsw_linetime_wm(crtc_state);
@@ -4376,12 +4195,11 @@ static int intel_crtc_atomic_check(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct intel_display *display = to_intel_display(crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
int ret;
- if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv) &&
+ if (DISPLAY_VER(display) < 5 && !display->platform.g4x &&
intel_crtc_needs_modeset(crtc_state) &&
!crtc_state->hw.active)
crtc_state->update_wm_post = true;
@@ -4398,13 +4216,13 @@ static int intel_crtc_atomic_check(struct intel_atomic_state *state,
ret = intel_wm_compute(state, crtc);
if (ret) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"[CRTC:%d:%s] watermarks are invalid\n",
crtc->base.base.id, crtc->base.name);
return ret;
}
- if (DISPLAY_VER(dev_priv) >= 9) {
+ if (DISPLAY_VER(display) >= 9) {
if (intel_crtc_needs_modeset(crtc_state) ||
intel_crtc_needs_fastset(crtc_state)) {
ret = skl_update_scaler_crtc(crtc_state);
@@ -4423,8 +4241,8 @@ static int intel_crtc_atomic_check(struct intel_atomic_state *state,
return ret;
}
- if (DISPLAY_VER(dev_priv) >= 9 ||
- IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
+ if (DISPLAY_VER(display) >= 9 ||
+ display->platform.broadwell || display->platform.haswell) {
ret = hsw_compute_linetime_wm(state, crtc);
if (ret)
return ret;
@@ -4442,8 +4260,8 @@ static int
compute_sink_pipe_bpp(const struct drm_connector_state *conn_state,
struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct drm_connector *connector = conn_state->connector;
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
const struct drm_display_info *info = &connector->display_info;
int bpp;
@@ -4466,7 +4284,7 @@ compute_sink_pipe_bpp(const struct drm_connector_state *conn_state,
}
if (bpp < crtc_state->pipe_bpp) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CONNECTOR:%d:%s] Limiting display bpp to %d "
"(EDID bpp %d, max requested bpp %d, max platform bpp %d)\n",
connector->base.id, connector->name,
@@ -4484,17 +4302,17 @@ static int
compute_baseline_pipe_bpp(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
struct drm_connector *connector;
struct drm_connector_state *connector_state;
int bpp, i;
- if ((IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
- IS_CHERRYVIEW(dev_priv)))
+ if (display->platform.g4x || display->platform.valleyview ||
+ display->platform.cherryview)
bpp = 10*3;
- else if (DISPLAY_VER(dev_priv) >= 5)
+ else if (DISPLAY_VER(display) >= 5)
bpp = 12*3;
else
bpp = 8*3;
@@ -4518,7 +4336,7 @@ compute_baseline_pipe_bpp(struct intel_atomic_state *state,
static bool check_digital_port_conflicts(struct intel_atomic_state *state)
{
- struct drm_device *dev = state->base.dev;
+ struct intel_display *display = to_intel_display(state);
struct drm_connector *connector;
struct drm_connector_list_iter conn_iter;
unsigned int used_ports = 0;
@@ -4529,14 +4347,14 @@ static bool check_digital_port_conflicts(struct intel_atomic_state *state)
* We're going to peek into connector->state,
* hence connection_mutex must be held.
*/
- drm_modeset_lock_assert_held(&dev->mode_config.connection_mutex);
+ drm_modeset_lock_assert_held(&display->drm->mode_config.connection_mutex);
/*
* Walk the connector list instead of the encoder
* list to detect the problem on ddi platforms
* where there's just one encoder per digital port.
*/
- drm_connector_list_iter_begin(dev, &conn_iter);
+ drm_connector_list_iter_begin(display->drm, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter) {
struct drm_connector_state *connector_state;
struct intel_encoder *encoder;
@@ -4552,11 +4370,11 @@ static bool check_digital_port_conflicts(struct intel_atomic_state *state)
encoder = to_intel_encoder(connector_state->best_encoder);
- drm_WARN_ON(dev, !connector_state->crtc);
+ drm_WARN_ON(display->drm, !connector_state->crtc);
switch (encoder->type) {
case INTEL_OUTPUT_DDI:
- if (drm_WARN_ON(dev, !HAS_DDI(to_i915(dev))))
+ if (drm_WARN_ON(display->drm, !HAS_DDI(display)))
break;
fallthrough;
case INTEL_OUTPUT_DP:
@@ -4704,9 +4522,9 @@ static int
intel_crtc_prepare_cleared_state(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
+ struct intel_display *display = to_intel_display(state);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_crtc_state *saved_state;
saved_state = intel_crtc_state_alloc(crtc);
@@ -4731,8 +4549,8 @@ intel_crtc_prepare_cleared_state(struct intel_atomic_state *state,
memcpy(saved_state->icl_port_dplls, crtc_state->icl_port_dplls,
sizeof(saved_state->icl_port_dplls));
saved_state->crc_enabled = crtc_state->crc_enabled;
- if (IS_G4X(dev_priv) ||
- IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ if (display->platform.g4x ||
+ display->platform.valleyview || display->platform.cherryview)
saved_state->wm = crtc_state->wm;
memcpy(crtc_state, saved_state, sizeof(*crtc_state));
@@ -4748,7 +4566,7 @@ intel_modeset_pipe_config(struct intel_atomic_state *state,
struct intel_crtc *crtc,
const struct intel_link_bw_limits *limits)
{
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
struct drm_connector *connector;
@@ -4781,7 +4599,7 @@ intel_modeset_pipe_config(struct intel_atomic_state *state,
crtc_state->max_link_bpp_x16 = limits->max_bpp_x16[crtc->pipe];
if (crtc_state->pipe_bpp > fxp_q4_to_int(crtc_state->max_link_bpp_x16)) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CRTC:%d:%s] Link bpp limited to " FXP_Q4_FMT "\n",
crtc->base.base.id, crtc->base.name,
FXP_Q4_ARGS(crtc_state->max_link_bpp_x16));
@@ -4811,7 +4629,7 @@ intel_modeset_pipe_config(struct intel_atomic_state *state,
continue;
if (!check_single_encoder_cloning(state, crtc, encoder)) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[ENCODER:%d:%s] rejecting invalid cloning configuration\n",
encoder->base.base.id, encoder->base.name);
return -EINVAL;
@@ -4853,7 +4671,7 @@ intel_modeset_pipe_config(struct intel_atomic_state *state,
if (ret == -EDEADLK)
return ret;
if (ret < 0) {
- drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] config failure: %d\n",
+ drm_dbg_kms(display->drm, "[ENCODER:%d:%s] config failure: %d\n",
encoder->base.base.id, encoder->base.name, ret);
return ret;
}
@@ -4869,7 +4687,7 @@ intel_modeset_pipe_config(struct intel_atomic_state *state,
if (ret == -EDEADLK)
return ret;
if (ret < 0) {
- drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] config failure: %d\n",
+ drm_dbg_kms(display->drm, "[CRTC:%d:%s] config failure: %d\n",
crtc->base.base.id, crtc->base.name, ret);
return ret;
}
@@ -4880,7 +4698,7 @@ intel_modeset_pipe_config(struct intel_atomic_state *state,
*/
crtc_state->dither = (crtc_state->pipe_bpp == 6*3) &&
!crtc_state->dither_force_disable;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CRTC:%d:%s] hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
crtc->base.base.id, crtc->base.name,
base_bpp, crtc_state->pipe_bpp, crtc_state->dither);
@@ -5012,7 +4830,7 @@ pipe_config_infoframe_mismatch(struct drm_printer *p, bool fastset,
const union hdmi_infoframe *a,
const union hdmi_infoframe *b)
{
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
const char *loglevel;
if (fastset) {
@@ -5027,9 +4845,9 @@ pipe_config_infoframe_mismatch(struct drm_printer *p, bool fastset,
pipe_config_mismatch(p, fastset, crtc, name, "infoframe");
drm_printf(p, "expected:\n");
- hdmi_infoframe_log(loglevel, i915->drm.dev, a);
+ hdmi_infoframe_log(loglevel, display->drm->dev, a);
drm_printf(p, "found:\n");
- hdmi_infoframe_log(loglevel, i915->drm.dev, b);
+ hdmi_infoframe_log(loglevel, display->drm->dev, b);
}
static void
@@ -5145,16 +4963,15 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
bool fastset)
{
struct intel_display *display = to_intel_display(current_config);
- struct drm_i915_private *dev_priv = to_i915(current_config->uapi.crtc->dev);
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
struct drm_printer p;
u32 exclude_infoframes = 0;
bool ret = true;
if (fastset)
- p = drm_dbg_printer(&dev_priv->drm, DRM_UT_KMS, NULL);
+ p = drm_dbg_printer(display->drm, DRM_UT_KMS, NULL);
else
- p = drm_err_printer(&dev_priv->drm, NULL);
+ p = drm_err_printer(display->drm, NULL);
#define PIPE_CONF_CHECK_X(name) do { \
if (current_config->name != pipe_config->name) { \
@@ -5421,8 +5238,8 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_I(output_format);
PIPE_CONF_CHECK_BOOL(has_hdmi_sink);
- if ((DISPLAY_VER(dev_priv) < 8 && !IS_HASWELL(dev_priv)) ||
- IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ if ((DISPLAY_VER(display) < 8 && !display->platform.haswell) ||
+ display->platform.valleyview || display->platform.cherryview)
PIPE_CONF_CHECK_BOOL(limited_color_range);
PIPE_CONF_CHECK_BOOL(hdmi_scrambling);
@@ -5438,7 +5255,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_X(gmch_pfit.control);
/* pfit ratios are autocomputed by the hw on gen4+ */
- if (DISPLAY_VER(dev_priv) < 4)
+ if (DISPLAY_VER(display) < 4)
PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios);
PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits);
@@ -5458,7 +5275,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_I(pixel_rate);
PIPE_CONF_CHECK_X(gamma_mode);
- if (IS_CHERRYVIEW(dev_priv))
+ if (display->platform.cherryview)
PIPE_CONF_CHECK_X(cgm_mode);
else
PIPE_CONF_CHECK_X(csc_mode);
@@ -5478,21 +5295,21 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_BOOL(double_wide);
- if (dev_priv->display.dpll.mgr)
+ if (display->dpll.mgr)
PIPE_CONF_CHECK_P(shared_dpll);
/* FIXME convert everything over the dpll_mgr */
- if (dev_priv->display.dpll.mgr || HAS_GMCH(dev_priv))
+ if (display->dpll.mgr || HAS_GMCH(display))
PIPE_CONF_CHECK_PLL(dpll_hw_state);
/* FIXME convert MTL+ platforms over to dpll_mgr */
- if (DISPLAY_VER(dev_priv) >= 14)
+ if (DISPLAY_VER(display) >= 14)
PIPE_CONF_CHECK_PLL_CX0(dpll_hw_state.cx0pll);
PIPE_CONF_CHECK_X(dsi_pll.ctrl);
PIPE_CONF_CHECK_X(dsi_pll.div);
- if (IS_G4X(dev_priv) || DISPLAY_VER(dev_priv) >= 5)
+ if (display->platform.g4x || DISPLAY_VER(display) >= 5)
PIPE_CONF_CHECK_I(pipe_bpp);
if (!fastset || !pipe_config->update_m_n) {
@@ -5608,11 +5425,11 @@ static int intel_modeset_pipe(struct intel_atomic_state *state,
struct intel_crtc_state *crtc_state,
const char *reason)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
int ret;
- drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] Full modeset due to %s\n",
+ drm_dbg_kms(display->drm, "[CRTC:%d:%s] Full modeset due to %s\n",
crtc->base.base.id, crtc->base.name, reason);
ret = drm_atomic_add_affected_connectors(&state->base,
@@ -5652,10 +5469,10 @@ static int intel_modeset_pipe(struct intel_atomic_state *state,
int intel_modeset_pipes_in_mask_early(struct intel_atomic_state *state,
const char *reason, u8 mask)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_crtc *crtc;
- for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, mask) {
+ for_each_intel_crtc_in_pipe_mask(display->drm, crtc, mask) {
struct intel_crtc_state *crtc_state;
int ret;
@@ -5699,10 +5516,10 @@ intel_crtc_flag_modeset(struct intel_crtc_state *crtc_state)
int intel_modeset_all_pipes_late(struct intel_atomic_state *state,
const char *reason)
{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_crtc *crtc;
- for_each_intel_crtc(&dev_priv->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
struct intel_crtc_state *crtc_state;
int ret;
@@ -5728,7 +5545,7 @@ int intel_modeset_all_pipes_late(struct intel_atomic_state *state,
return 0;
}
-int intel_modeset_commit_pipes(struct drm_i915_private *i915,
+int intel_modeset_commit_pipes(struct intel_display *display,
u8 pipe_mask,
struct drm_modeset_acquire_ctx *ctx)
{
@@ -5736,14 +5553,14 @@ int intel_modeset_commit_pipes(struct drm_i915_private *i915,
struct intel_crtc *crtc;
int ret;
- state = drm_atomic_state_alloc(&i915->drm);
+ state = drm_atomic_state_alloc(display->drm);
if (!state)
return -ENOMEM;
state->acquire_ctx = ctx;
to_intel_atomic_state(state)->internal = true;
- for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, pipe_mask) {
+ for_each_intel_crtc_in_pipe_mask(display->drm, crtc, pipe_mask) {
struct intel_crtc_state *crtc_state =
intel_atomic_get_crtc_state(state, crtc);
@@ -5842,11 +5659,11 @@ u8 intel_calc_active_pipes(struct intel_atomic_state *state,
static int intel_modeset_checks(struct intel_atomic_state *state)
{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
state->modeset = true;
- if (IS_HASWELL(dev_priv))
+ if (display->platform.haswell)
return hsw_mode_set_planes_workaround(state);
return 0;
@@ -5863,15 +5680,15 @@ static bool lrr_params_changed(const struct drm_display_mode *old_adjusted_mode,
static void intel_crtc_check_fastset(const struct intel_crtc_state *old_crtc_state,
struct intel_crtc_state *new_crtc_state)
{
+ struct intel_display *display = to_intel_display(new_crtc_state);
struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
/* only allow LRR when the timings stay within the VRR range */
if (old_crtc_state->vrr.in_range != new_crtc_state->vrr.in_range)
new_crtc_state->update_lrr = false;
if (!intel_pipe_config_compare(old_crtc_state, new_crtc_state, true)) {
- drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] fastset requirement not met, forcing full modeset\n",
+ drm_dbg_kms(display->drm, "[CRTC:%d:%s] fastset requirement not met, forcing full modeset\n",
crtc->base.base.id, crtc->base.name);
} else {
if (allow_vblank_delay_fastset(old_crtc_state))
@@ -5895,17 +5712,17 @@ static void intel_crtc_check_fastset(const struct intel_crtc_state *old_crtc_sta
static int intel_atomic_check_crtcs(struct intel_atomic_state *state)
{
+ struct intel_display *display = to_intel_display(state);
struct intel_crtc_state __maybe_unused *crtc_state;
struct intel_crtc *crtc;
int i;
for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
int ret;
ret = intel_crtc_atomic_check(state, crtc);
if (ret) {
- drm_dbg_atomic(&i915->drm,
+ drm_dbg_atomic(display->drm,
"[CRTC:%d:%s] atomic driver check failed\n",
crtc->base.base.id, crtc->base.name);
return ret;
@@ -5952,7 +5769,7 @@ static bool intel_pipes_need_modeset(struct intel_atomic_state *state,
static int intel_atomic_check_joiner(struct intel_atomic_state *state,
struct intel_crtc *primary_crtc)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_crtc_state *primary_crtc_state =
intel_atomic_get_new_crtc_state(state, primary_crtc);
struct intel_crtc *secondary_crtc;
@@ -5961,20 +5778,20 @@ static int intel_atomic_check_joiner(struct intel_atomic_state *state,
return 0;
/* sanity check */
- if (drm_WARN_ON(&i915->drm,
+ if (drm_WARN_ON(display->drm,
primary_crtc->pipe != joiner_primary_pipe(primary_crtc_state)))
return -EINVAL;
- if (primary_crtc_state->joiner_pipes & ~joiner_pipes(i915)) {
- drm_dbg_kms(&i915->drm,
+ if (primary_crtc_state->joiner_pipes & ~joiner_pipes(display)) {
+ drm_dbg_kms(display->drm,
"[CRTC:%d:%s] Cannot act as joiner primary "
"(need 0x%x as pipes, only 0x%x possible)\n",
primary_crtc->base.base.id, primary_crtc->base.name,
- primary_crtc_state->joiner_pipes, joiner_pipes(i915));
+ primary_crtc_state->joiner_pipes, joiner_pipes(display));
return -EINVAL;
}
- for_each_intel_crtc_in_pipe_mask(&i915->drm, secondary_crtc,
+ for_each_intel_crtc_in_pipe_mask(display->drm, secondary_crtc,
intel_crtc_joiner_secondary_pipes(primary_crtc_state)) {
struct intel_crtc_state *secondary_crtc_state;
int ret;
@@ -5985,7 +5802,7 @@ static int intel_atomic_check_joiner(struct intel_atomic_state *state,
/* primary being enabled, secondary was already configured? */
if (secondary_crtc_state->uapi.enable) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CRTC:%d:%s] secondary is enabled as normal CRTC, but "
"[CRTC:%d:%s] claiming this CRTC for joiner.\n",
secondary_crtc->base.base.id, secondary_crtc->base.name,
@@ -6004,7 +5821,7 @@ static int intel_atomic_check_joiner(struct intel_atomic_state *state,
drm_crtc_index(&secondary_crtc->base)))
return -EINVAL;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CRTC:%d:%s] Used as secondary for joiner primary [CRTC:%d:%s]\n",
secondary_crtc->base.base.id, secondary_crtc->base.name,
primary_crtc->base.base.id, primary_crtc->base.name);
@@ -6023,12 +5840,12 @@ static int intel_atomic_check_joiner(struct intel_atomic_state *state,
static void kill_joiner_secondaries(struct intel_atomic_state *state,
struct intel_crtc *primary_crtc)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_crtc_state *primary_crtc_state =
intel_atomic_get_new_crtc_state(state, primary_crtc);
struct intel_crtc *secondary_crtc;
- for_each_intel_crtc_in_pipe_mask(&i915->drm, secondary_crtc,
+ for_each_intel_crtc_in_pipe_mask(display->drm, secondary_crtc,
intel_crtc_joiner_secondary_pipes(primary_crtc_state)) {
struct intel_crtc_state *secondary_crtc_state =
intel_atomic_get_new_crtc_state(state, secondary_crtc);
@@ -6062,7 +5879,7 @@ static void kill_joiner_secondaries(struct intel_atomic_state *state,
static int intel_async_flip_check_uapi(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
const struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
const struct intel_plane_state *old_plane_state;
@@ -6074,14 +5891,14 @@ static int intel_async_flip_check_uapi(struct intel_atomic_state *state,
return 0;
if (!new_crtc_state->uapi.active) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CRTC:%d:%s] not active\n",
crtc->base.base.id, crtc->base.name);
return -EINVAL;
}
if (intel_crtc_needs_modeset(new_crtc_state)) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CRTC:%d:%s] modeset required\n",
crtc->base.base.id, crtc->base.name);
return -EINVAL;
@@ -6092,7 +5909,7 @@ static int intel_async_flip_check_uapi(struct intel_atomic_state *state,
* Remove this check once the issues are fixed.
*/
if (new_crtc_state->joiner_pipes) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CRTC:%d:%s] async flip disallowed with joiner\n",
crtc->base.base.id, crtc->base.name);
return -EINVAL;
@@ -6111,14 +5928,14 @@ static int intel_async_flip_check_uapi(struct intel_atomic_state *state,
* enabled in the atomic IOCTL path.
*/
if (!plane->async_flip) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[PLANE:%d:%s] async flip not supported\n",
plane->base.base.id, plane->base.name);
return -EINVAL;
}
if (!old_plane_state->uapi.fb || !new_plane_state->uapi.fb) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[PLANE:%d:%s] no old or new framebuffer\n",
plane->base.base.id, plane->base.name);
return -EINVAL;
@@ -6130,7 +5947,7 @@ static int intel_async_flip_check_uapi(struct intel_atomic_state *state,
static int intel_async_flip_check_hw(struct intel_atomic_state *state, struct intel_crtc *crtc)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
const struct intel_crtc_state *old_crtc_state, *new_crtc_state;
const struct intel_plane_state *new_plane_state, *old_plane_state;
struct intel_plane *plane;
@@ -6143,21 +5960,21 @@ static int intel_async_flip_check_hw(struct intel_atomic_state *state, struct in
return 0;
if (!new_crtc_state->hw.active) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CRTC:%d:%s] not active\n",
crtc->base.base.id, crtc->base.name);
return -EINVAL;
}
if (intel_crtc_needs_modeset(new_crtc_state)) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CRTC:%d:%s] modeset required\n",
crtc->base.base.id, crtc->base.name);
return -EINVAL;
}
if (old_crtc_state->active_planes != new_crtc_state->active_planes) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CRTC:%d:%s] Active planes cannot be in async flip\n",
crtc->base.base.id, crtc->base.name);
return -EINVAL;
@@ -6173,7 +5990,7 @@ static int intel_async_flip_check_hw(struct intel_atomic_state *state, struct in
* if we're really about to ask the hardware to perform
* an async flip. We should never get this far otherwise.
*/
- if (drm_WARN_ON(&i915->drm,
+ if (drm_WARN_ON(display->drm,
new_crtc_state->do_async_flip && !plane->async_flip))
return -EINVAL;
@@ -6189,7 +6006,7 @@ static int intel_async_flip_check_hw(struct intel_atomic_state *state, struct in
continue;
if (!intel_plane_can_async_flip(plane, new_plane_state->hw.fb->modifier)) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[PLANE:%d:%s] Modifier 0x%llx does not support async flip\n",
plane->base.base.id, plane->base.name,
new_plane_state->hw.fb->modifier);
@@ -6198,7 +6015,7 @@ static int intel_async_flip_check_hw(struct intel_atomic_state *state, struct in
if (intel_format_info_is_yuv_semiplanar(new_plane_state->hw.fb->format,
new_plane_state->hw.fb->modifier)) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[PLANE:%d:%s] Planar formats do not support async flips\n",
plane->base.base.id, plane->base.name);
return -EINVAL;
@@ -6213,7 +6030,7 @@ static int intel_async_flip_check_hw(struct intel_atomic_state *state, struct in
if (old_plane_state->view.color_plane[0].mapping_stride !=
new_plane_state->view.color_plane[0].mapping_stride) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[PLANE:%d:%s] Stride cannot be changed in async flip\n",
plane->base.base.id, plane->base.name);
return -EINVAL;
@@ -6221,7 +6038,7 @@ static int intel_async_flip_check_hw(struct intel_atomic_state *state, struct in
if (old_plane_state->hw.fb->modifier !=
new_plane_state->hw.fb->modifier) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[PLANE:%d:%s] Modifier cannot be changed in async flip\n",
plane->base.base.id, plane->base.name);
return -EINVAL;
@@ -6229,7 +6046,7 @@ static int intel_async_flip_check_hw(struct intel_atomic_state *state, struct in
if (old_plane_state->hw.fb->format !=
new_plane_state->hw.fb->format) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[PLANE:%d:%s] Pixel format cannot be changed in async flip\n",
plane->base.base.id, plane->base.name);
return -EINVAL;
@@ -6237,7 +6054,7 @@ static int intel_async_flip_check_hw(struct intel_atomic_state *state, struct in
if (old_plane_state->hw.rotation !=
new_plane_state->hw.rotation) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[PLANE:%d:%s] Rotation cannot be changed in async flip\n",
plane->base.base.id, plane->base.name);
return -EINVAL;
@@ -6245,7 +6062,7 @@ static int intel_async_flip_check_hw(struct intel_atomic_state *state, struct in
if (skl_plane_aux_dist(old_plane_state, 0) !=
skl_plane_aux_dist(new_plane_state, 0)) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[PLANE:%d:%s] AUX_DIST cannot be changed in async flip\n",
plane->base.base.id, plane->base.name);
return -EINVAL;
@@ -6253,14 +6070,14 @@ static int intel_async_flip_check_hw(struct intel_atomic_state *state, struct in
if (!drm_rect_equals(&old_plane_state->uapi.src, &new_plane_state->uapi.src) ||
!drm_rect_equals(&old_plane_state->uapi.dst, &new_plane_state->uapi.dst)) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[PLANE:%d:%s] Size/co-ordinates cannot be changed in async flip\n",
plane->base.base.id, plane->base.name);
return -EINVAL;
}
if (old_plane_state->hw.alpha != new_plane_state->hw.alpha) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[PLANES:%d:%s] Alpha value cannot be changed in async flip\n",
plane->base.base.id, plane->base.name);
return -EINVAL;
@@ -6268,21 +6085,21 @@ static int intel_async_flip_check_hw(struct intel_atomic_state *state, struct in
if (old_plane_state->hw.pixel_blend_mode !=
new_plane_state->hw.pixel_blend_mode) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[PLANE:%d:%s] Pixel blend mode cannot be changed in async flip\n",
plane->base.base.id, plane->base.name);
return -EINVAL;
}
if (old_plane_state->hw.color_encoding != new_plane_state->hw.color_encoding) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[PLANE:%d:%s] Color encoding cannot be changed in async flip\n",
plane->base.base.id, plane->base.name);
return -EINVAL;
}
if (old_plane_state->hw.color_range != new_plane_state->hw.color_range) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[PLANE:%d:%s] Color range cannot be changed in async flip\n",
plane->base.base.id, plane->base.name);
return -EINVAL;
@@ -6290,7 +6107,7 @@ static int intel_async_flip_check_hw(struct intel_atomic_state *state, struct in
/* plane decryption is allow to change only in synchronous flips */
if (old_plane_state->decrypt != new_plane_state->decrypt) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[PLANE:%d:%s] Decryption cannot be changed in async flip\n",
plane->base.base.id, plane->base.name);
return -EINVAL;
@@ -6302,7 +6119,7 @@ static int intel_async_flip_check_hw(struct intel_atomic_state *state, struct in
static int intel_joiner_add_affected_crtcs(struct intel_atomic_state *state)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
const struct intel_plane_state *plane_state;
struct intel_crtc_state *crtc_state;
struct intel_plane *plane;
@@ -6333,13 +6150,13 @@ static int intel_joiner_add_affected_crtcs(struct intel_atomic_state *state)
modeset_pipes |= crtc_state->joiner_pipes;
}
- for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, affected_pipes) {
+ for_each_intel_crtc_in_pipe_mask(display->drm, crtc, affected_pipes) {
crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
if (IS_ERR(crtc_state))
return PTR_ERR(crtc_state);
}
- for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, modeset_pipes) {
+ for_each_intel_crtc_in_pipe_mask(display->drm, crtc, modeset_pipes) {
int ret;
crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
@@ -6369,7 +6186,7 @@ static int intel_atomic_check_config(struct intel_atomic_state *state,
struct intel_link_bw_limits *limits,
enum pipe *failed_pipe)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_crtc_state *new_crtc_state;
struct intel_crtc *crtc;
int ret;
@@ -6394,7 +6211,7 @@ static int intel_atomic_check_config(struct intel_atomic_state *state,
continue;
}
- if (drm_WARN_ON(&i915->drm, intel_crtc_is_joiner_secondary(new_crtc_state)))
+ if (drm_WARN_ON(display->drm, intel_crtc_is_joiner_secondary(new_crtc_state)))
continue;
ret = intel_crtc_prepare_cleared_state(state, crtc);
@@ -6413,7 +6230,7 @@ static int intel_atomic_check_config(struct intel_atomic_state *state,
if (!intel_crtc_needs_modeset(new_crtc_state))
continue;
- if (drm_WARN_ON(&i915->drm, intel_crtc_is_joiner_secondary(new_crtc_state)))
+ if (drm_WARN_ON(display->drm, intel_crtc_is_joiner_secondary(new_crtc_state)))
continue;
if (!new_crtc_state->hw.enable)
@@ -6478,7 +6295,6 @@ int intel_atomic_check(struct drm_device *dev,
struct drm_atomic_state *_state)
{
struct intel_display *display = to_intel_display(dev);
- struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_atomic_state *state = to_intel_atomic_state(_state);
struct intel_crtc_state *old_crtc_state, *new_crtc_state;
struct intel_crtc *crtc;
@@ -6526,7 +6342,7 @@ int intel_atomic_check(struct drm_device *dev,
continue;
if (intel_crtc_is_joiner_secondary(new_crtc_state)) {
- drm_WARN_ON(&dev_priv->drm, new_crtc_state->uapi.enable);
+ drm_WARN_ON(display->drm, new_crtc_state->uapi.enable);
continue;
}
@@ -6597,7 +6413,7 @@ int intel_atomic_check(struct drm_device *dev,
}
if (any_ms && !check_digital_port_conflicts(state)) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"rejecting conflicting digital port configuration\n");
ret = -EINVAL;
goto fail;
@@ -6653,7 +6469,7 @@ int intel_atomic_check(struct drm_device *dev,
goto fail;
/* Either full modeset or fastset (or neither), never both */
- drm_WARN_ON(&dev_priv->drm,
+ drm_WARN_ON(display->drm,
intel_crtc_needs_modeset(new_crtc_state) &&
intel_crtc_needs_fastset(new_crtc_state));
@@ -6713,6 +6529,7 @@ void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state,
const struct intel_crtc_state *new_crtc_state)
{
+ struct intel_display *display = to_intel_display(new_crtc_state);
struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
@@ -6727,7 +6544,7 @@ static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state,
intel_set_pipe_src_size(new_crtc_state);
/* on skylake this is done by detaching scalers */
- if (DISPLAY_VER(dev_priv) >= 9) {
+ if (DISPLAY_VER(display) >= 9) {
if (new_crtc_state->pch_pfit.enabled)
skl_pfit_enable(new_crtc_state);
} else if (HAS_PCH_SPLIT(dev_priv)) {
@@ -6745,8 +6562,8 @@ static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state,
* HSW/BDW only really need this here for fastboot, after
* that the value should not change without a full modeset.
*/
- if (DISPLAY_VER(dev_priv) >= 9 ||
- IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
+ if (DISPLAY_VER(display) >= 9 ||
+ display->platform.broadwell || display->platform.haswell)
hsw_set_linetime_wm(new_crtc_state);
if (new_crtc_state->update_m_n)
@@ -6760,14 +6577,14 @@ static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state,
static void commit_pipe_pre_planes(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
const struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(state, crtc);
const struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
bool modeset = intel_crtc_needs_modeset(new_crtc_state);
- drm_WARN_ON(&dev_priv->drm, new_crtc_state->use_dsb);
+ drm_WARN_ON(display->drm, new_crtc_state->use_dsb);
/*
* During modesets pipe configuration was programmed as the
@@ -6777,7 +6594,7 @@ static void commit_pipe_pre_planes(struct intel_atomic_state *state,
if (intel_crtc_needs_color_update(new_crtc_state))
intel_color_commit_arm(NULL, new_crtc_state);
- if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
+ if (DISPLAY_VER(display) >= 9 || display->platform.broadwell)
bdw_set_pipe_misc(NULL, new_crtc_state);
if (intel_crtc_needs_fastset(new_crtc_state))
@@ -6792,20 +6609,20 @@ static void commit_pipe_pre_planes(struct intel_atomic_state *state,
static void commit_pipe_post_planes(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
const struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- drm_WARN_ON(&dev_priv->drm, new_crtc_state->use_dsb);
+ drm_WARN_ON(display->drm, new_crtc_state->use_dsb);
/*
* Disable the scaler(s) after the plane(s) so that we don't
* get a catastrophic underrun even if the two operations
* end up happening in two different frames.
*/
- if (DISPLAY_VER(dev_priv) >= 9 &&
+ if (DISPLAY_VER(display) >= 9 &&
!intel_crtc_needs_modeset(new_crtc_state))
- skl_detach_scalers(new_crtc_state);
+ skl_detach_scalers(NULL, new_crtc_state);
if (intel_crtc_vrr_enabling(state, crtc))
intel_vrr_enable(new_crtc_state);
@@ -6814,7 +6631,7 @@ static void commit_pipe_post_planes(struct intel_atomic_state *state,
static void intel_enable_crtc(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
const struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
struct intel_crtc *pipe_crtc;
@@ -6822,7 +6639,7 @@ static void intel_enable_crtc(struct intel_atomic_state *state,
if (!intel_crtc_needs_modeset(new_crtc_state))
return;
- for_each_intel_crtc_in_pipe_mask_reverse(&dev_priv->drm, pipe_crtc,
+ for_each_intel_crtc_in_pipe_mask_reverse(display->drm, pipe_crtc,
intel_crtc_joined_pipe_mask(new_crtc_state)) {
const struct intel_crtc_state *pipe_crtc_state =
intel_atomic_get_new_crtc_state(state, pipe_crtc);
@@ -6831,7 +6648,7 @@ static void intel_enable_crtc(struct intel_atomic_state *state,
intel_crtc_update_active_timings(pipe_crtc_state, false);
}
- dev_priv->display.funcs.display->crtc_enable(state, crtc);
+ display->funcs.display->crtc_enable(state, crtc);
/* vblanks work again, re-enable pipe CRC. */
intel_crtc_enable_pipe_crc(crtc);
@@ -6841,7 +6658,6 @@ static void intel_pre_update_crtc(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct intel_display *display = to_intel_display(state);
- struct drm_i915_private *i915 = to_i915(state->base.dev);
const struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(state, crtc);
struct intel_crtc_state *new_crtc_state =
@@ -6850,7 +6666,7 @@ static void intel_pre_update_crtc(struct intel_atomic_state *state,
if (old_crtc_state->inherited ||
intel_crtc_needs_modeset(new_crtc_state)) {
- if (HAS_DPT(i915))
+ if (HAS_DPT(display))
intel_dpt_configure(crtc);
}
@@ -6864,7 +6680,7 @@ static void intel_pre_update_crtc(struct intel_atomic_state *state,
if (intel_crtc_needs_fastset(new_crtc_state))
intel_encoders_update_pipe(state, crtc);
- if (DISPLAY_VER(i915) >= 11 &&
+ if (DISPLAY_VER(display) >= 11 &&
intel_crtc_needs_fastset(new_crtc_state))
icl_set_pipe_chicken(new_crtc_state);
@@ -6938,7 +6754,7 @@ static void intel_update_crtc(struct intel_atomic_state *state,
static void intel_old_crtc_state_disables(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
const struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(state, crtc);
struct intel_crtc *pipe_crtc;
@@ -6947,13 +6763,13 @@ static void intel_old_crtc_state_disables(struct intel_atomic_state *state,
* We need to disable pipe CRC before disabling the pipe,
* or we race against vblank off.
*/
- for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, pipe_crtc,
+ for_each_intel_crtc_in_pipe_mask(display->drm, pipe_crtc,
intel_crtc_joined_pipe_mask(old_crtc_state))
intel_crtc_disable_pipe_crc(pipe_crtc);
- dev_priv->display.funcs.display->crtc_disable(state, crtc);
+ display->funcs.display->crtc_disable(state, crtc);
- for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, pipe_crtc,
+ for_each_intel_crtc_in_pipe_mask(display->drm, pipe_crtc,
intel_crtc_joined_pipe_mask(old_crtc_state)) {
const struct intel_crtc_state *new_pipe_crtc_state =
intel_atomic_get_new_crtc_state(state, pipe_crtc);
@@ -6968,7 +6784,7 @@ static void intel_old_crtc_state_disables(struct intel_atomic_state *state,
static void intel_commit_modeset_disables(struct intel_atomic_state *state)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
const struct intel_crtc_state *new_crtc_state, *old_crtc_state;
struct intel_crtc *crtc;
u8 disable_pipes = 0;
@@ -7035,7 +6851,7 @@ static void intel_commit_modeset_disables(struct intel_atomic_state *state)
disable_pipes &= ~intel_crtc_joined_pipe_mask(old_crtc_state);
}
- drm_WARN_ON(&i915->drm, disable_pipes);
+ drm_WARN_ON(display->drm, disable_pipes);
}
static void intel_commit_modeset_enables(struct intel_atomic_state *state)
@@ -7062,7 +6878,7 @@ static void intel_commit_modeset_enables(struct intel_atomic_state *state)
static void skl_commit_modeset_enables(struct intel_atomic_state *state)
{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_crtc *crtc;
struct intel_crtc_state *old_crtc_state, *new_crtc_state;
struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
@@ -7204,8 +7020,9 @@ static void skl_commit_modeset_enables(struct intel_atomic_state *state)
if ((update_pipes & BIT(pipe)) == 0)
continue;
- drm_WARN_ON(&dev_priv->drm, skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
- entries, I915_MAX_PIPES, pipe));
+ drm_WARN_ON(display->drm,
+ skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
+ entries, I915_MAX_PIPES, pipe));
entries[pipe] = new_crtc_state->wm.skl.ddb;
update_pipes &= ~BIT(pipe);
@@ -7213,8 +7030,8 @@ static void skl_commit_modeset_enables(struct intel_atomic_state *state)
intel_update_crtc(state, crtc);
}
- drm_WARN_ON(&dev_priv->drm, modeset_pipes);
- drm_WARN_ON(&dev_priv->drm, update_pipes);
+ drm_WARN_ON(display->drm, modeset_pipes);
+ drm_WARN_ON(display->drm, update_pipes);
}
static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_state)
@@ -7259,7 +7076,7 @@ static void intel_atomic_cleanup_work(struct work_struct *work)
{
struct intel_atomic_state *state =
container_of(work, struct intel_atomic_state, cleanup_work);
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_crtc_state *old_crtc_state;
struct intel_crtc *crtc;
int i;
@@ -7267,14 +7084,14 @@ static void intel_atomic_cleanup_work(struct work_struct *work)
for_each_old_intel_crtc_in_state(state, crtc, old_crtc_state, i)
intel_atomic_dsb_cleanup(old_crtc_state);
- drm_atomic_helper_cleanup_planes(&i915->drm, &state->base);
+ drm_atomic_helper_cleanup_planes(display->drm, &state->base);
drm_atomic_helper_commit_cleanup_done(&state->base);
drm_atomic_state_put(&state->base);
}
static void intel_atomic_prepare_plane_clear_colors(struct intel_atomic_state *state)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_plane *plane;
struct intel_plane_state *plane_state;
int i;
@@ -7311,7 +7128,7 @@ static void intel_atomic_prepare_plane_clear_colors(struct intel_atomic_state *s
&plane_state->ccval,
sizeof(plane_state->ccval));
/* The above could only fail if the FB obj has an unexpected backing store type. */
- drm_WARN_ON(&i915->drm, ret);
+ drm_WARN_ON(display->drm, ret);
}
}
@@ -7319,8 +7136,6 @@ static void intel_atomic_dsb_prepare(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct intel_display *display = to_intel_display(state);
- const struct intel_crtc_state *old_crtc_state =
- intel_atomic_get_old_crtc_state(state, crtc);
struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
@@ -7332,11 +7147,8 @@ static void intel_atomic_dsb_prepare(struct intel_atomic_state *state,
/* FIXME deal with everything */
new_crtc_state->use_dsb =
- new_crtc_state->update_planes &&
!new_crtc_state->do_async_flip &&
(DISPLAY_VER(display) >= 20 || !new_crtc_state->has_psr) &&
- !new_crtc_state->scaler_state.scaler_users &&
- !old_crtc_state->scaler_state.scaler_users &&
!intel_crtc_needs_modeset(new_crtc_state) &&
!intel_crtc_needs_fastset(new_crtc_state);
@@ -7346,6 +7158,7 @@ static void intel_atomic_dsb_prepare(struct intel_atomic_state *state,
static void intel_atomic_dsb_finish(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
+ struct intel_display *display = to_intel_display(state);
struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
@@ -7392,6 +7205,10 @@ static void intel_atomic_dsb_finish(struct intel_atomic_state *state,
intel_crtc_planes_update_arm(new_crtc_state->dsb_commit,
state, crtc);
+ if (DISPLAY_VER(display) >= 9)
+ skl_detach_scalers(new_crtc_state->dsb_commit,
+ new_crtc_state);
+
if (!new_crtc_state->dsb_color_vblank) {
intel_dsb_wait_vblanks(new_crtc_state->dsb_commit, 1);
@@ -7412,8 +7229,7 @@ static void intel_atomic_dsb_finish(struct intel_atomic_state *state,
static void intel_atomic_commit_tail(struct intel_atomic_state *state)
{
struct intel_display *display = to_intel_display(state);
- struct drm_device *dev = state->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_crtc_state *new_crtc_state, *old_crtc_state;
struct intel_crtc *crtc;
struct intel_power_domain_mask put_domains[I915_MAX_PIPES] = {};
@@ -7425,11 +7241,14 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
intel_atomic_commit_fence_wait(state);
- intel_td_flush(dev_priv);
+ intel_td_flush(display);
intel_atomic_prepare_plane_clear_colors(state);
for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
+ intel_fbc_prepare_dirty_rect(state, crtc);
+
+ for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
intel_atomic_dsb_finish(state, crtc);
drm_atomic_helper_wait_for_dependencies(&state->base);
@@ -7489,7 +7308,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
intel_pmdemand_pre_plane_update(state);
if (state->modeset) {
- drm_atomic_helper_update_legacy_modeset_state(dev, &state->base);
+ drm_atomic_helper_update_legacy_modeset_state(display->drm, &state->base);
intel_set_cdclk_pre_plane_update(state);
@@ -7504,10 +7323,10 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
/* Complete events for now disable pipes here. */
if (modeset && !new_crtc_state->hw.active && new_crtc_state->uapi.event) {
- spin_lock_irq(&dev->event_lock);
+ spin_lock_irq(&display->drm->event_lock);
drm_crtc_send_vblank_event(&crtc->base,
new_crtc_state->uapi.event);
- spin_unlock_irq(&dev->event_lock);
+ spin_unlock_irq(&display->drm->event_lock);
new_crtc_state->uapi.event = NULL;
}
@@ -7523,13 +7342,10 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
}
/* Now enable the clocks, plane, pipe, and connectors that we set up. */
- dev_priv->display.funcs.display->commit_modeset_enables(state);
+ display->funcs.display->commit_modeset_enables(state);
intel_program_dpkgc_latency(state);
- if (state->modeset)
- intel_set_cdclk_post_plane_update(state);
-
intel_wait_for_vblank_workers(state);
/* FIXME: We should call drm_atomic_helper_commit_hw_done() here
@@ -7541,7 +7357,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
* - switch over to the vblank wait helper in the core after that since
* we don't need out special handling any more.
*/
- drm_atomic_helper_wait_for_flip_done(dev, &state->base);
+ drm_atomic_helper_wait_for_flip_done(display->drm, &state->base);
for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
if (new_crtc_state->do_async_flip)
@@ -7570,7 +7386,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
* chance of catching underruns with the intermediate watermarks
* vs. the new plane configuration.
*/
- if (DISPLAY_VER(dev_priv) == 2 && planes_enabling(old_crtc_state, new_crtc_state))
+ if (DISPLAY_VER(display) == 2 && planes_enabling(old_crtc_state, new_crtc_state))
intel_set_cpu_fifo_underrun_reporting(display, crtc->pipe, true);
intel_optimize_watermarks(state, crtc);
@@ -7606,6 +7422,8 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
intel_verify_planes(state);
intel_sagv_post_plane_update(state);
+ if (state->modeset)
+ intel_set_cdclk_post_plane_update(state);
intel_pmdemand_post_plane_update(state);
drm_atomic_helper_commit_hw_done(&state->base);
@@ -7636,7 +7454,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
* down.
*/
INIT_WORK(&state->cleanup_work, intel_atomic_cleanup_work);
- queue_work(dev_priv->display.wq.cleanup, &state->cleanup_work);
+ queue_work(display->wq.cleanup, &state->cleanup_work);
}
static void intel_atomic_commit_work(struct work_struct *work)
@@ -7695,6 +7513,7 @@ static int intel_atomic_swap_state(struct intel_atomic_state *state)
int intel_atomic_commit(struct drm_device *dev, struct drm_atomic_state *_state,
bool nonblock)
{
+ struct intel_display *display = to_intel_display(dev);
struct intel_atomic_state *state = to_intel_atomic_state(_state);
struct drm_i915_private *dev_priv = to_i915(dev);
int ret = 0;
@@ -7718,7 +7537,7 @@ int intel_atomic_commit(struct drm_device *dev, struct drm_atomic_state *_state,
* FIXME doing watermarks and fb cleanup from a vblank worker
* (assuming we had any) would solve these problems.
*/
- if (DISPLAY_VER(dev_priv) < 9 && state->base.legacy_cursor_update) {
+ if (DISPLAY_VER(display) < 9 && state->base.legacy_cursor_update) {
struct intel_crtc_state *new_crtc_state;
struct intel_crtc *crtc;
int i;
@@ -7731,7 +7550,7 @@ int intel_atomic_commit(struct drm_device *dev, struct drm_atomic_state *_state,
ret = intel_atomic_prepare_commit(state);
if (ret) {
- drm_dbg_atomic(&dev_priv->drm,
+ drm_dbg_atomic(display->drm,
"Preparing state failed with %i\n", ret);
intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
return ret;
@@ -7751,12 +7570,12 @@ int intel_atomic_commit(struct drm_device *dev, struct drm_atomic_state *_state,
INIT_WORK(&state->base.commit_work, intel_atomic_commit_work);
if (nonblock && state->modeset) {
- queue_work(dev_priv->display.wq.modeset, &state->base.commit_work);
+ queue_work(display->wq.modeset, &state->base.commit_work);
} else if (nonblock) {
- queue_work(dev_priv->display.wq.flip, &state->base.commit_work);
+ queue_work(display->wq.flip, &state->base.commit_work);
} else {
if (state->modeset)
- flush_workqueue(dev_priv->display.wq.modeset);
+ flush_workqueue(display->wq.modeset);
intel_atomic_commit_tail(state);
}
@@ -7765,11 +7584,11 @@ int intel_atomic_commit(struct drm_device *dev, struct drm_atomic_state *_state,
static u32 intel_encoder_possible_clones(struct intel_encoder *encoder)
{
- struct drm_device *dev = encoder->base.dev;
+ struct intel_display *display = to_intel_display(encoder);
struct intel_encoder *source_encoder;
u32 possible_clones = 0;
- for_each_intel_encoder(dev, source_encoder) {
+ for_each_intel_encoder(display->drm, source_encoder) {
if (encoders_cloneable(encoder, source_encoder))
possible_clones |= drm_encoder_mask(&source_encoder->base);
}
@@ -7779,47 +7598,49 @@ static u32 intel_encoder_possible_clones(struct intel_encoder *encoder)
static u32 intel_encoder_possible_crtcs(struct intel_encoder *encoder)
{
- struct drm_device *dev = encoder->base.dev;
+ struct intel_display *display = to_intel_display(encoder);
struct intel_crtc *crtc;
u32 possible_crtcs = 0;
- for_each_intel_crtc_in_pipe_mask(dev, crtc, encoder->pipe_mask)
+ for_each_intel_crtc_in_pipe_mask(display->drm, crtc, encoder->pipe_mask)
possible_crtcs |= drm_crtc_mask(&crtc->base);
return possible_crtcs;
}
-static bool ilk_has_edp_a(struct drm_i915_private *dev_priv)
+static bool ilk_has_edp_a(struct intel_display *display)
{
- if (!IS_MOBILE(dev_priv))
+ if (!display->platform.mobile)
return false;
- if ((intel_de_read(dev_priv, DP_A) & DP_DETECTED) == 0)
+ if ((intel_de_read(display, DP_A) & DP_DETECTED) == 0)
return false;
- if (IS_IRONLAKE(dev_priv) && (intel_de_read(dev_priv, FUSE_STRAP) & ILK_eDP_A_DISABLE))
+ if (display->platform.ironlake && (intel_de_read(display, FUSE_STRAP) & ILK_eDP_A_DISABLE))
return false;
return true;
}
-static bool intel_ddi_crt_present(struct drm_i915_private *dev_priv)
+static bool intel_ddi_crt_present(struct intel_display *display)
{
- if (DISPLAY_VER(dev_priv) >= 9)
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
+
+ if (DISPLAY_VER(display) >= 9)
return false;
- if (IS_HASWELL_ULT(dev_priv) || IS_BROADWELL_ULT(dev_priv))
+ if (display->platform.haswell_ult || display->platform.broadwell_ult)
return false;
if (HAS_PCH_LPT_H(dev_priv) &&
- intel_de_read(dev_priv, SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
+ intel_de_read(display, SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
return false;
/* DDI E can't be used if DDI A requires 4 lanes */
- if (intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
+ if (intel_de_read(display, DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
return false;
- if (!dev_priv->display.vbt.int_crt_support)
+ if (!display->vbt.int_crt_support)
return false;
return true;
@@ -7831,24 +7652,24 @@ bool assert_port_valid(struct intel_display *display, enum port port)
"Platform does not support port %c\n", port_name(port));
}
-void intel_setup_outputs(struct drm_i915_private *dev_priv)
+void intel_setup_outputs(struct intel_display *display)
{
- struct intel_display *display = &dev_priv->display;
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_encoder *encoder;
bool dpd_is_edp = false;
intel_pps_unlock_regs_wa(display);
- if (!HAS_DISPLAY(dev_priv))
+ if (!HAS_DISPLAY(display))
return;
- if (HAS_DDI(dev_priv)) {
- if (intel_ddi_crt_present(dev_priv))
+ if (HAS_DDI(display)) {
+ if (intel_ddi_crt_present(display))
intel_crt_init(display);
intel_bios_for_each_encoder(display, intel_ddi_init);
- if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
+ if (display->platform.geminilake || display->platform.broxton)
vlv_dsi_init(dev_priv);
} else if (HAS_PCH_SPLIT(dev_priv)) {
int found;
@@ -7863,33 +7684,33 @@ void intel_setup_outputs(struct drm_i915_private *dev_priv)
dpd_is_edp = intel_dp_is_port_edp(display, PORT_D);
- if (ilk_has_edp_a(dev_priv))
+ if (ilk_has_edp_a(display))
g4x_dp_init(display, DP_A, PORT_A);
- if (intel_de_read(dev_priv, PCH_HDMIB) & SDVO_DETECTED) {
+ if (intel_de_read(display, PCH_HDMIB) & SDVO_DETECTED) {
/* PCH SDVOB multiplex with HDMIB */
found = intel_sdvo_init(display, PCH_SDVOB, PORT_B);
if (!found)
g4x_hdmi_init(display, PCH_HDMIB, PORT_B);
- if (!found && (intel_de_read(dev_priv, PCH_DP_B) & DP_DETECTED))
+ if (!found && (intel_de_read(display, PCH_DP_B) & DP_DETECTED))
g4x_dp_init(display, PCH_DP_B, PORT_B);
}
- if (intel_de_read(dev_priv, PCH_HDMIC) & SDVO_DETECTED)
+ if (intel_de_read(display, PCH_HDMIC) & SDVO_DETECTED)
g4x_hdmi_init(display, PCH_HDMIC, PORT_C);
- if (!dpd_is_edp && intel_de_read(dev_priv, PCH_HDMID) & SDVO_DETECTED)
+ if (!dpd_is_edp && intel_de_read(display, PCH_HDMID) & SDVO_DETECTED)
g4x_hdmi_init(display, PCH_HDMID, PORT_D);
- if (intel_de_read(dev_priv, PCH_DP_C) & DP_DETECTED)
+ if (intel_de_read(display, PCH_DP_C) & DP_DETECTED)
g4x_dp_init(display, PCH_DP_C, PORT_C);
- if (intel_de_read(dev_priv, PCH_DP_D) & DP_DETECTED)
+ if (intel_de_read(display, PCH_DP_D) & DP_DETECTED)
g4x_dp_init(display, PCH_DP_D, PORT_D);
- } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+ } else if (display->platform.valleyview || display->platform.cherryview) {
bool has_edp, has_port;
- if (IS_VALLEYVIEW(dev_priv) && dev_priv->display.vbt.int_crt_support)
+ if (display->platform.valleyview && display->vbt.int_crt_support)
intel_crt_init(display);
/*
@@ -7909,87 +7730,87 @@ void intel_setup_outputs(struct drm_i915_private *dev_priv)
*/
has_edp = intel_dp_is_port_edp(display, PORT_B);
has_port = intel_bios_is_port_present(display, PORT_B);
- if (intel_de_read(dev_priv, VLV_DP_B) & DP_DETECTED || has_port)
+ if (intel_de_read(display, VLV_DP_B) & DP_DETECTED || has_port)
has_edp &= g4x_dp_init(display, VLV_DP_B, PORT_B);
- if ((intel_de_read(dev_priv, VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
+ if ((intel_de_read(display, VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
g4x_hdmi_init(display, VLV_HDMIB, PORT_B);
has_edp = intel_dp_is_port_edp(display, PORT_C);
has_port = intel_bios_is_port_present(display, PORT_C);
- if (intel_de_read(dev_priv, VLV_DP_C) & DP_DETECTED || has_port)
+ if (intel_de_read(display, VLV_DP_C) & DP_DETECTED || has_port)
has_edp &= g4x_dp_init(display, VLV_DP_C, PORT_C);
- if ((intel_de_read(dev_priv, VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
+ if ((intel_de_read(display, VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
g4x_hdmi_init(display, VLV_HDMIC, PORT_C);
- if (IS_CHERRYVIEW(dev_priv)) {
+ if (display->platform.cherryview) {
/*
* eDP not supported on port D,
* so no need to worry about it
*/
has_port = intel_bios_is_port_present(display, PORT_D);
- if (intel_de_read(dev_priv, CHV_DP_D) & DP_DETECTED || has_port)
+ if (intel_de_read(display, CHV_DP_D) & DP_DETECTED || has_port)
g4x_dp_init(display, CHV_DP_D, PORT_D);
- if (intel_de_read(dev_priv, CHV_HDMID) & SDVO_DETECTED || has_port)
+ if (intel_de_read(display, CHV_HDMID) & SDVO_DETECTED || has_port)
g4x_hdmi_init(display, CHV_HDMID, PORT_D);
}
vlv_dsi_init(dev_priv);
- } else if (IS_PINEVIEW(dev_priv)) {
+ } else if (display->platform.pineview) {
intel_lvds_init(dev_priv);
intel_crt_init(display);
- } else if (IS_DISPLAY_VER(dev_priv, 3, 4)) {
+ } else if (IS_DISPLAY_VER(display, 3, 4)) {
bool found = false;
- if (IS_MOBILE(dev_priv))
+ if (display->platform.mobile)
intel_lvds_init(dev_priv);
intel_crt_init(display);
- if (intel_de_read(dev_priv, GEN3_SDVOB) & SDVO_DETECTED) {
- drm_dbg_kms(&dev_priv->drm, "probing SDVOB\n");
+ if (intel_de_read(display, GEN3_SDVOB) & SDVO_DETECTED) {
+ drm_dbg_kms(display->drm, "probing SDVOB\n");
found = intel_sdvo_init(display, GEN3_SDVOB, PORT_B);
- if (!found && IS_G4X(dev_priv)) {
- drm_dbg_kms(&dev_priv->drm,
+ if (!found && display->platform.g4x) {
+ drm_dbg_kms(display->drm,
"probing HDMI on SDVOB\n");
g4x_hdmi_init(display, GEN4_HDMIB, PORT_B);
}
- if (!found && IS_G4X(dev_priv))
+ if (!found && display->platform.g4x)
g4x_dp_init(display, DP_B, PORT_B);
}
/* Before G4X SDVOC doesn't have its own detect register */
- if (intel_de_read(dev_priv, GEN3_SDVOB) & SDVO_DETECTED) {
- drm_dbg_kms(&dev_priv->drm, "probing SDVOC\n");
+ if (intel_de_read(display, GEN3_SDVOB) & SDVO_DETECTED) {
+ drm_dbg_kms(display->drm, "probing SDVOC\n");
found = intel_sdvo_init(display, GEN3_SDVOC, PORT_C);
}
- if (!found && (intel_de_read(dev_priv, GEN3_SDVOC) & SDVO_DETECTED)) {
+ if (!found && (intel_de_read(display, GEN3_SDVOC) & SDVO_DETECTED)) {
- if (IS_G4X(dev_priv)) {
- drm_dbg_kms(&dev_priv->drm,
+ if (display->platform.g4x) {
+ drm_dbg_kms(display->drm,
"probing HDMI on SDVOC\n");
g4x_hdmi_init(display, GEN4_HDMIC, PORT_C);
}
- if (IS_G4X(dev_priv))
+ if (display->platform.g4x)
g4x_dp_init(display, DP_C, PORT_C);
}
- if (IS_G4X(dev_priv) && (intel_de_read(dev_priv, DP_D) & DP_DETECTED))
+ if (display->platform.g4x && (intel_de_read(display, DP_D) & DP_DETECTED))
g4x_dp_init(display, DP_D, PORT_D);
- if (SUPPORTS_TV(dev_priv))
+ if (SUPPORTS_TV(display))
intel_tv_init(display);
- } else if (DISPLAY_VER(dev_priv) == 2) {
- if (IS_I85X(dev_priv))
+ } else if (DISPLAY_VER(display) == 2) {
+ if (display->platform.i85x)
intel_lvds_init(dev_priv);
intel_crt_init(display);
intel_dvo_init(dev_priv);
}
- for_each_intel_encoder(&dev_priv->drm, encoder) {
+ for_each_intel_encoder(display->drm, encoder) {
encoder->base.possible_crtcs =
intel_encoder_possible_crtcs(encoder);
encoder->base.possible_clones =
@@ -7998,12 +7819,11 @@ void intel_setup_outputs(struct drm_i915_private *dev_priv)
intel_init_pch_refclk(dev_priv);
- drm_helper_move_panel_connectors_to_head(&dev_priv->drm);
+ drm_helper_move_panel_connectors_to_head(display->drm);
}
-static int max_dotclock(struct drm_i915_private *i915)
+static int max_dotclock(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
int max_dotclock = display->cdclk.max_dotclk_freq;
if (HAS_ULTRAJOINER(display))
@@ -8017,7 +7837,7 @@ static int max_dotclock(struct drm_i915_private *i915)
enum drm_mode_status intel_mode_valid(struct drm_device *dev,
const struct drm_display_mode *mode)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_display *display = to_intel_display(dev);
int hdisplay_max, htotal_max;
int vdisplay_max, vtotal_max;
@@ -8054,22 +7874,22 @@ enum drm_mode_status intel_mode_valid(struct drm_device *dev,
* Reject clearly excessive dotclocks early to
* avoid having to worry about huge integers later.
*/
- if (mode->clock > max_dotclock(dev_priv))
+ if (mode->clock > max_dotclock(display))
return MODE_CLOCK_HIGH;
/* Transcoder timing limits */
- if (DISPLAY_VER(dev_priv) >= 11) {
+ if (DISPLAY_VER(display) >= 11) {
hdisplay_max = 16384;
vdisplay_max = 8192;
htotal_max = 16384;
vtotal_max = 8192;
- } else if (DISPLAY_VER(dev_priv) >= 9 ||
- IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
+ } else if (DISPLAY_VER(display) >= 9 ||
+ display->platform.broadwell || display->platform.haswell) {
hdisplay_max = 8192; /* FDI max 4096 handled elsewhere */
vdisplay_max = 4096;
htotal_max = 8192;
vtotal_max = 8192;
- } else if (DISPLAY_VER(dev_priv) >= 3) {
+ } else if (DISPLAY_VER(display) >= 3) {
hdisplay_max = 4096;
vdisplay_max = 4096;
htotal_max = 8192;
@@ -8215,32 +8035,34 @@ static const struct intel_display_funcs i9xx_display_funcs = {
/**
* intel_init_display_hooks - initialize the display modesetting hooks
- * @dev_priv: device private
+ * @display: display device private
*/
-void intel_init_display_hooks(struct drm_i915_private *dev_priv)
+void intel_init_display_hooks(struct intel_display *display)
{
- if (DISPLAY_VER(dev_priv) >= 9) {
- dev_priv->display.funcs.display = &skl_display_funcs;
- } else if (HAS_DDI(dev_priv)) {
- dev_priv->display.funcs.display = &ddi_display_funcs;
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
+
+ if (DISPLAY_VER(display) >= 9) {
+ display->funcs.display = &skl_display_funcs;
+ } else if (HAS_DDI(display)) {
+ display->funcs.display = &ddi_display_funcs;
} else if (HAS_PCH_SPLIT(dev_priv)) {
- dev_priv->display.funcs.display = &pch_split_display_funcs;
- } else if (IS_CHERRYVIEW(dev_priv) ||
- IS_VALLEYVIEW(dev_priv)) {
- dev_priv->display.funcs.display = &vlv_display_funcs;
+ display->funcs.display = &pch_split_display_funcs;
+ } else if (display->platform.cherryview ||
+ display->platform.valleyview) {
+ display->funcs.display = &vlv_display_funcs;
} else {
- dev_priv->display.funcs.display = &i9xx_display_funcs;
+ display->funcs.display = &i9xx_display_funcs;
}
}
-int intel_initial_commit(struct drm_device *dev)
+int intel_initial_commit(struct intel_display *display)
{
struct drm_atomic_state *state = NULL;
struct drm_modeset_acquire_ctx ctx;
struct intel_crtc *crtc;
int ret = 0;
- state = drm_atomic_state_alloc(dev);
+ state = drm_atomic_state_alloc(display->drm);
if (!state)
return -ENOMEM;
@@ -8250,7 +8072,7 @@ int intel_initial_commit(struct drm_device *dev)
to_intel_atomic_state(state)->internal = true;
retry:
- for_each_intel_crtc(dev, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
struct intel_crtc_state *crtc_state =
intel_atomic_get_crtc_state(state, crtc);
@@ -8274,7 +8096,7 @@ retry:
*/
crtc_state->uapi.color_mgmt_changed = true;
- for_each_intel_encoder_mask(dev, encoder,
+ for_each_intel_encoder_mask(display->drm, encoder,
crtc_state->uapi.encoder_mask) {
if (encoder->initial_fastset_check &&
!encoder->initial_fastset_check(encoder, crtc_state)) {
diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h
index e58daefc978e..3b54a62c290a 100644
--- a/drivers/gpu/drm/i915/display/intel_display.h
+++ b/drivers/gpu/drm/i915/display/intel_display.h
@@ -426,7 +426,7 @@ intel_mode_valid_max_plane_size(struct intel_display *display,
enum drm_mode_status
intel_cpu_transcoder_mode_valid(struct intel_display *display,
const struct drm_display_mode *mode);
-enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port);
+enum phy intel_port_to_phy(struct intel_display *display, enum port port);
bool is_trans_port_sync_mode(const struct intel_crtc_state *state);
bool is_trans_port_sync_master(const struct intel_crtc_state *state);
u8 intel_crtc_joined_pipe_mask(const struct intel_crtc_state *crtc_state);
@@ -457,18 +457,16 @@ int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
const char *name, u32 reg, int ref_freq);
int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
const char *name, u32 reg);
-void intel_init_display_hooks(struct drm_i915_private *dev_priv);
-bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv);
+bool intel_has_pending_fb_unpin(struct intel_display *display);
void intel_encoder_destroy(struct drm_encoder *encoder);
struct drm_display_mode *
intel_encoder_current_mode(struct intel_encoder *encoder);
void intel_encoder_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state);
bool intel_phy_is_combo(struct intel_display *display, enum phy phy);
-bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy);
-bool intel_phy_is_snps(struct drm_i915_private *dev_priv, enum phy phy);
-enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv,
- enum port port);
+bool intel_phy_is_tc(struct intel_display *display, enum phy phy);
+bool intel_phy_is_snps(struct intel_display *display, enum phy phy);
+enum tc_port intel_port_to_tc(struct intel_display *display, enum port port);
enum phy intel_encoder_to_phy(struct intel_encoder *encoder);
bool intel_encoder_is_combo(struct intel_encoder *encoder);
@@ -481,15 +479,15 @@ int ilk_get_lanes_required(int target_clock, int link_bw, int bpp);
bool intel_fuzzy_clock_check(int clock1, int clock2);
void intel_zero_m_n(struct intel_link_m_n *m_n);
-void intel_set_m_n(struct drm_i915_private *i915,
+void intel_set_m_n(struct intel_display *display,
const struct intel_link_m_n *m_n,
i915_reg_t data_m_reg, i915_reg_t data_n_reg,
i915_reg_t link_m_reg, i915_reg_t link_n_reg);
-void intel_get_m_n(struct drm_i915_private *i915,
+void intel_get_m_n(struct intel_display *display,
struct intel_link_m_n *m_n,
i915_reg_t data_m_reg, i915_reg_t data_n_reg,
i915_reg_t link_m_reg, i915_reg_t link_n_reg);
-bool intel_cpu_transcoder_has_m2_n2(struct drm_i915_private *dev_priv,
+bool intel_cpu_transcoder_has_m2_n2(struct intel_display *display,
enum transcoder transcoder);
void intel_cpu_transcoder_set_m1_n1(struct intel_crtc *crtc,
enum transcoder cpu_transcoder,
@@ -510,8 +508,6 @@ enum intel_display_power_domain
intel_aux_power_domain(struct intel_digital_port *dig_port);
void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state);
-void ilk_pfit_disable(const struct intel_crtc_state *old_crtc_state);
-
int bdw_get_pipe_misc_bpp(struct intel_crtc *crtc);
unsigned int intel_plane_fence_y_offset(const struct intel_plane_state *plane_state);
@@ -525,8 +521,6 @@ void intel_set_plane_visible(struct intel_crtc_state *crtc_state,
bool visible);
void intel_plane_fixup_bitmasks(struct intel_crtc_state *crtc_state);
-void intel_update_watermarks(struct drm_i915_private *i915);
-
bool intel_crtc_vrr_disabling(struct intel_atomic_state *state,
struct intel_crtc *crtc);
@@ -535,7 +529,7 @@ int intel_modeset_pipes_in_mask_early(struct intel_atomic_state *state,
const char *reason, u8 pipe_mask);
int intel_modeset_all_pipes_late(struct intel_atomic_state *state,
const char *reason);
-int intel_modeset_commit_pipes(struct drm_i915_private *i915,
+int intel_modeset_commit_pipes(struct intel_display *display,
u8 pipe_mask,
struct drm_modeset_acquire_ctx *ctx);
void intel_modeset_get_crtc_power_domains(struct intel_crtc_state *crtc_state,
@@ -544,11 +538,11 @@ void intel_modeset_put_crtc_power_domains(struct intel_crtc *crtc,
struct intel_power_domain_mask *domains);
/* interface for intel_display_driver.c */
-void intel_setup_outputs(struct drm_i915_private *i915);
-int intel_initial_commit(struct drm_device *dev);
-void intel_panel_sanitize_ssc(struct drm_i915_private *i915);
-void intel_update_czclk(struct drm_i915_private *i915);
-void intel_atomic_helper_free_state_worker(struct work_struct *work);
+void intel_init_display_hooks(struct intel_display *display);
+void intel_setup_outputs(struct intel_display *display);
+int intel_initial_commit(struct intel_display *display);
+void intel_panel_sanitize_ssc(struct intel_display *display);
+void intel_update_czclk(struct intel_display *display);
enum drm_mode_status intel_mode_valid(struct drm_device *dev,
const struct drm_display_mode *mode);
int intel_atomic_commit(struct drm_device *dev, struct drm_atomic_state *_state,
diff --git a/drivers/gpu/drm/i915/display/intel_display_core.h b/drivers/gpu/drm/i915/display/intel_display_core.h
index 554870d2494b..eeb7ae3eaea8 100644
--- a/drivers/gpu/drm/i915/display/intel_display_core.h
+++ b/drivers/gpu/drm/i915/display/intel_display_core.h
@@ -91,6 +91,7 @@ struct intel_wm_funcs {
struct intel_crtc *crtc);
int (*compute_global_watermarks)(struct intel_atomic_state *state);
void (*get_hw_state)(struct drm_i915_private *i915);
+ void (*sanitize)(struct drm_i915_private *i915);
};
struct intel_audio_state {
@@ -386,7 +387,6 @@ struct intel_display {
struct {
/* list of fbdev register on this device */
struct intel_fbdev *fbdev;
- struct work_struct suspend_work;
} fbdev;
struct {
@@ -512,6 +512,8 @@ struct intel_display {
/* restore state for suspend/resume and display reset */
struct drm_atomic_state *modeset_state;
struct drm_modeset_acquire_ctx reset_ctx;
+ /* modeset stuck tracking for reset */
+ atomic_t pending_fb_pin;
u32 saveDSPARB;
u32 saveSWF0[16];
u32 saveSWF1[16];
diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.c b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
index 9de7e512c0ab..fdedf65bee53 100644
--- a/drivers/gpu/drm/i915/display/intel_display_debugfs.c
+++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
@@ -49,11 +49,6 @@ static struct intel_display *node_to_intel_display(struct drm_info_node *node)
return to_intel_display(node->minor->dev);
}
-static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
-{
- return to_i915(node->minor->dev);
-}
-
static int intel_display_caps(struct seq_file *m, void *data)
{
struct intel_display *display = node_to_intel_display(m->private);
@@ -85,8 +80,8 @@ static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
static int i915_sr_status(struct seq_file *m, void *unused)
{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
struct intel_display *display = node_to_intel_display(m->private);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
intel_wakeref_t wakeref;
bool sr_enabled = false;
@@ -102,7 +97,7 @@ static int i915_sr_status(struct seq_file *m, void *unused)
else if (display->platform.i915gm)
sr_enabled = intel_de_read(display, INSTPM) & INSTPM_SELF_EN;
else if (display->platform.pineview)
- sr_enabled = intel_de_read(display, DSPFW3(dev_priv)) & PINEVIEW_SELF_REFRESH_EN;
+ sr_enabled = intel_de_read(display, DSPFW3(display)) & PINEVIEW_SELF_REFRESH_EN;
else if (display->platform.valleyview || display->platform.cherryview)
sr_enabled = intel_de_read(display, FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
@@ -119,7 +114,6 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
struct intel_framebuffer *fbdev_fb = NULL;
struct drm_framebuffer *drm_fb;
-#ifdef CONFIG_DRM_FBDEV_EMULATION
fbdev_fb = intel_fbdev_framebuffer(display->fbdev.fbdev);
if (fbdev_fb) {
seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
@@ -132,7 +126,6 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
intel_bo_describe(m, intel_fb_bo(&fbdev_fb->base));
seq_putc(m, '\n');
}
-#endif
mutex_lock(&display->drm->mode_config.fb_lock);
drm_for_each_fb(drm_fb, display->drm) {
@@ -157,8 +150,7 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
static int i915_power_domain_info(struct seq_file *m, void *unused)
{
- struct drm_i915_private *i915 = node_to_i915(m->private);
- struct intel_display *display = &i915->display;
+ struct intel_display *display = node_to_intel_display(m->private);
intel_display_power_debug(display, m);
@@ -267,7 +259,7 @@ static void intel_connector_info(struct seq_file *m,
switch (connector->connector_type) {
case DRM_MODE_CONNECTOR_DisplayPort:
case DRM_MODE_CONNECTOR_eDP:
- if (intel_connector->mst_port)
+ if (intel_connector->mst.dp)
intel_dp_mst_info(m, intel_connector);
else
intel_dp_info(m, intel_connector);
@@ -588,7 +580,7 @@ static void intel_crtc_info(struct seq_file *m, struct intel_crtc *crtc)
static int i915_display_info(struct seq_file *m, void *unused)
{
struct intel_display *display = node_to_intel_display(m->private);
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_crtc *crtc;
struct drm_connector *connector;
struct drm_connector_list_iter conn_iter;
@@ -713,14 +705,13 @@ intel_lpsp_power_well_enabled(struct intel_display *display,
static int i915_lpsp_status(struct seq_file *m, void *unused)
{
struct intel_display *display = node_to_intel_display(m->private);
- struct drm_i915_private *i915 = node_to_i915(m->private);
bool lpsp_enabled = false;
if (DISPLAY_VER(display) >= 13 || IS_DISPLAY_VER(display, 9, 10)) {
lpsp_enabled = !intel_lpsp_power_well_enabled(display, SKL_DISP_PW_2);
} else if (IS_DISPLAY_VER(display, 11, 12)) {
lpsp_enabled = !intel_lpsp_power_well_enabled(display, ICL_DISP_PW_3);
- } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
+ } else if (display->platform.haswell || display->platform.broadwell) {
lpsp_enabled = !intel_lpsp_power_well_enabled(display, HSW_DISP_PW_GLOBAL);
} else {
seq_puts(m, "LPSP: not supported\n");
@@ -756,7 +747,7 @@ static int i915_dp_mst_info(struct seq_file *m, void *unused)
seq_printf(m, "MST Source Port [ENCODER:%d:%s]\n",
dig_port->base.base.base.id,
dig_port->base.base.name);
- drm_dp_mst_dump_topology(m, &dig_port->dp.mst_mgr);
+ drm_dp_mst_dump_topology(m, &dig_port->dp.mst.mgr);
}
drm_connector_list_iter_end(&conn_iter);
@@ -836,10 +827,10 @@ static const struct drm_info_list intel_display_debugfs_list[] = {
{"i915_lpsp_status", i915_lpsp_status, 0},
};
-void intel_display_debugfs_register(struct drm_i915_private *i915)
+void intel_display_debugfs_register(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
- struct drm_minor *minor = i915->drm.primary;
+ struct drm_i915_private *i915 = to_i915(display->drm);
+ struct drm_minor *minor = display->drm->primary;
debugfs_create_file("i915_fifo_underrun_reset", 0644, minor->debugfs_root,
display, &i915_fifo_underrun_reset_ops);
@@ -865,7 +856,6 @@ static int i915_lpsp_capability_show(struct seq_file *m, void *data)
struct intel_connector *connector = m->private;
struct intel_display *display = to_intel_display(connector);
struct intel_encoder *encoder = intel_attached_encoder(connector);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
int connector_type = connector->base.connector_type;
bool lpsp_capable = false;
@@ -892,7 +882,7 @@ static int i915_lpsp_capability_show(struct seq_file *m, void *data)
(connector_type == DRM_MODE_CONNECTOR_DSI ||
connector_type == DRM_MODE_CONNECTOR_eDP ||
connector_type == DRM_MODE_CONNECTOR_DisplayPort));
- else if (IS_HASWELL(i915) || IS_BROADWELL(i915))
+ else if (display->platform.haswell || display->platform.broadwell)
lpsp_capable = connector_type == DRM_MODE_CONNECTOR_eDP;
seq_printf(m, "LPSP: %s\n", lpsp_capable ? "capable" : "incapable");
@@ -1349,7 +1339,7 @@ void intel_connector_debugfs_add(struct intel_connector *connector)
intel_dp_link_training_debugfs_add(connector);
if (DISPLAY_VER(display) >= 11 &&
- ((connector_type == DRM_MODE_CONNECTOR_DisplayPort && !connector->mst_port) ||
+ ((connector_type == DRM_MODE_CONNECTOR_DisplayPort && !connector->mst.dp) ||
connector_type == DRM_MODE_CONNECTOR_eDP)) {
debugfs_create_file("i915_dsc_fec_support", 0644, root,
connector, &i915_dsc_fec_support_fops);
diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.h b/drivers/gpu/drm/i915/display/intel_display_debugfs.h
index e1f479b7acd1..82af2f608111 100644
--- a/drivers/gpu/drm/i915/display/intel_display_debugfs.h
+++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.h
@@ -6,16 +6,16 @@
#ifndef __INTEL_DISPLAY_DEBUGFS_H__
#define __INTEL_DISPLAY_DEBUGFS_H__
-struct drm_i915_private;
struct intel_connector;
struct intel_crtc;
+struct intel_display;
#ifdef CONFIG_DEBUG_FS
-void intel_display_debugfs_register(struct drm_i915_private *i915);
+void intel_display_debugfs_register(struct intel_display *display);
void intel_connector_debugfs_add(struct intel_connector *connector);
void intel_crtc_debugfs_add(struct intel_crtc *crtc);
#else
-static inline void intel_display_debugfs_register(struct drm_i915_private *i915) {}
+static inline void intel_display_debugfs_register(struct intel_display *display) {}
static inline void intel_connector_debugfs_add(struct intel_connector *connector) {}
static inline void intel_crtc_debugfs_add(struct intel_crtc *crtc) {}
#endif
diff --git a/drivers/gpu/drm/i915/display/intel_display_device.h b/drivers/gpu/drm/i915/display/intel_display_device.h
index fc33791f02b9..717286981687 100644
--- a/drivers/gpu/drm/i915/display/intel_display_device.h
+++ b/drivers/gpu/drm/i915/display/intel_display_device.h
@@ -163,6 +163,7 @@ struct intel_display_platforms {
#define HAS_DSC(__display) (DISPLAY_RUNTIME_INFO(__display)->has_dsc)
#define HAS_DSC_MST(__display) (DISPLAY_VER(__display) >= 12 && HAS_DSC(__display))
#define HAS_FBC(__display) (DISPLAY_RUNTIME_INFO(__display)->fbc_mask != 0)
+#define HAS_FBC_DIRTY_RECT(__display) (DISPLAY_VER(__display) >= 30)
#define HAS_FPGA_DBG_UNCLAIMED(__display) (DISPLAY_INFO(__display)->has_fpga_dbg)
#define HAS_FW_BLC(__display) (DISPLAY_VER(__display) >= 3)
#define HAS_GMBUS_IRQ(__display) (DISPLAY_VER(__display) >= 4)
diff --git a/drivers/gpu/drm/i915/display/intel_display_driver.c b/drivers/gpu/drm/i915/display/intel_display_driver.c
index b72b07329fbf..31740a677dd8 100644
--- a/drivers/gpu/drm/i915/display/intel_display_driver.c
+++ b/drivers/gpu/drm/i915/display/intel_display_driver.c
@@ -194,13 +194,13 @@ void intel_display_driver_early_probe(struct intel_display *display)
mutex_init(&display->hdcp.hdcp_mutex);
intel_display_irq_init(i915);
- intel_dkl_phy_init(i915);
+ intel_dkl_phy_init(display);
intel_color_init_hooks(display);
intel_init_cdclk_hooks(display);
intel_audio_hooks_init(display);
intel_dpll_init_clock_hook(i915);
- intel_init_display_hooks(i915);
- intel_fdi_init_hook(i915);
+ intel_init_display_hooks(display);
+ intel_fdi_init_hook(display);
intel_dmc_wl_init(display);
}
@@ -431,7 +431,7 @@ int intel_display_driver_probe_nogem(struct intel_display *display)
intel_wm_init(i915);
- intel_panel_sanitize_ssc(i915);
+ intel_panel_sanitize_ssc(display);
intel_pps_setup(display);
@@ -449,9 +449,9 @@ int intel_display_driver_probe_nogem(struct intel_display *display)
intel_plane_possible_crtcs_init(display);
intel_shared_dpll_init(display);
- intel_fdi_pll_freq_update(i915);
+ intel_fdi_pll_freq_update(display);
- intel_update_czclk(i915);
+ intel_update_czclk(display);
intel_display_driver_init_hw(display);
intel_dpll_update_ref_clks(display);
@@ -462,7 +462,7 @@ int intel_display_driver_probe_nogem(struct intel_display *display)
/* Just disable it once at startup */
intel_vga_disable(display);
- intel_setup_outputs(i915);
+ intel_setup_outputs(display);
ret = intel_dp_tunnel_mgr_init(display);
if (ret)
@@ -517,7 +517,7 @@ int intel_display_driver_probe(struct intel_display *display)
* are already calculated and there is no assert_plane warnings
* during bootup.
*/
- ret = intel_initial_commit(display->drm);
+ ret = intel_initial_commit(display);
if (ret)
drm_dbg_kms(display->drm, "Initial modeset failed, %d\n", ret);
@@ -550,7 +550,7 @@ void intel_display_driver_register(struct intel_display *display)
intel_audio_register(display);
- intel_display_debugfs_register(i915);
+ intel_display_debugfs_register(display);
/*
* We need to coordinate the hotplugs with the asynchronous
diff --git a/drivers/gpu/drm/i915/display/intel_display_irq.c b/drivers/gpu/drm/i915/display/intel_display_irq.c
index 99fb7fc7be39..aa23bb817805 100644
--- a/drivers/gpu/drm/i915/display/intel_display_irq.c
+++ b/drivers/gpu/drm/i915/display/intel_display_irq.c
@@ -1101,7 +1101,7 @@ static bool handle_plane_ats_fault(struct intel_crtc *crtc, enum plane_id plane_
"[CRTC:%d:%s] PLANE ATS fault\n",
crtc->base.base.id, crtc->base.name);
- return false;
+ return true;
}
static bool handle_pipedmc_ats_fault(struct intel_crtc *crtc, enum plane_id plane_id)
@@ -1112,7 +1112,7 @@ static bool handle_pipedmc_ats_fault(struct intel_crtc *crtc, enum plane_id plan
"[CRTC:%d:%s] PIPEDMC ATS fault\n",
crtc->base.base.id, crtc->base.name);
- return false;
+ return true;
}
static bool handle_pipedmc_fault(struct intel_crtc *crtc, enum plane_id plane_id)
@@ -1123,7 +1123,7 @@ static bool handle_pipedmc_fault(struct intel_crtc *crtc, enum plane_id plane_id
"[CRTC:%d:%s] PIPEDMC fault\n",
crtc->base.base.id, crtc->base.name);
- return false;
+ return true;
}
static const struct pipe_fault_handler mtl_pipe_fault_handlers[] = {
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c
index 396930937d98..f7171e6932dc 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power.c
@@ -1684,7 +1684,7 @@ static void icl_display_core_init(struct intel_display *display,
/* 8. Ensure PHYs have completed calibration and adaptation */
if (display->platform.dg2)
- intel_snps_phy_wait_for_calibration(dev_priv);
+ intel_snps_phy_wait_for_calibration(display);
/* 9. XE2_HPD: Program CHICKEN_MISC_2 before any cursor or planes are enabled */
if (DISPLAY_VERx100(display) == 1401)
@@ -2317,6 +2317,9 @@ void intel_display_power_debug(struct intel_display *display, struct seq_file *m
mutex_lock(&power_domains->lock);
+ seq_printf(m, "Runtime power status: %s\n",
+ str_enabled_disabled(!power_domains->init_wakeref));
+
seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count");
for (i = 0; i < power_domains->power_well_count; i++) {
struct i915_power_well *power_well;
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.h b/drivers/gpu/drm/i915/display/intel_display_power.h
index a3a5c1be8bab..1b53d67f9b60 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.h
+++ b/drivers/gpu/drm/i915/display/intel_display_power.h
@@ -117,12 +117,13 @@ enum intel_display_power_domain {
POWER_DOMAIN_INVALID = POWER_DOMAIN_NUM,
};
-#define POWER_DOMAIN_PIPE(pipe) ((pipe) + POWER_DOMAIN_PIPE_A)
+#define POWER_DOMAIN_PIPE(pipe) \
+ ((enum intel_display_power_domain)((pipe) - PIPE_A + POWER_DOMAIN_PIPE_A))
#define POWER_DOMAIN_PIPE_PANEL_FITTER(pipe) \
- ((pipe) + POWER_DOMAIN_PIPE_PANEL_FITTER_A)
+ ((enum intel_display_power_domain)((pipe) - PIPE_A + POWER_DOMAIN_PIPE_PANEL_FITTER_A))
#define POWER_DOMAIN_TRANSCODER(tran) \
((tran) == TRANSCODER_EDP ? POWER_DOMAIN_TRANSCODER_EDP : \
- (tran) + POWER_DOMAIN_TRANSCODER_A)
+ (enum intel_display_power_domain)((tran) - TRANSCODER_A + POWER_DOMAIN_TRANSCODER_A))
struct intel_power_domain_mask {
DECLARE_BITMAP(bits, POWER_DOMAIN_NUM);
diff --git a/drivers/gpu/drm/i915/display/intel_display_power_map.c b/drivers/gpu/drm/i915/display/intel_display_power_map.c
index 0c8ac1af6db7..e80e1fd611ca 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power_map.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power_map.c
@@ -3,6 +3,8 @@
* Copyright © 2022 Intel Corporation
*/
+#include <drm/drm_print.h>
+
#include "i915_reg.h"
#include "intel_display_core.h"
#include "intel_display_power_map.h"
diff --git a/drivers/gpu/drm/i915/display/intel_display_power_well.c b/drivers/gpu/drm/i915/display/intel_display_power_well.c
index 5b60db597329..8ec87ffd87d2 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power_well.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power_well.c
@@ -549,10 +549,9 @@ static void
icl_aux_power_well_enable(struct intel_display *display,
struct i915_power_well *power_well)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
enum phy phy = icl_aux_pw_to_phy(display, power_well);
- if (intel_phy_is_tc(dev_priv, phy))
+ if (intel_phy_is_tc(display, phy))
return icl_tc_phy_aux_power_well_enable(display, power_well);
else if (display->platform.icelake)
return icl_combo_phy_aux_power_well_enable(display,
@@ -565,10 +564,9 @@ static void
icl_aux_power_well_disable(struct intel_display *display,
struct i915_power_well *power_well)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
enum phy phy = icl_aux_pw_to_phy(display, power_well);
- if (intel_phy_is_tc(dev_priv, phy))
+ if (intel_phy_is_tc(display, phy))
return hsw_power_well_disable(display, power_well);
else if (display->platform.icelake)
return icl_combo_phy_aux_power_well_disable(display,
@@ -1829,11 +1827,10 @@ tgl_tc_cold_off_power_well_is_enabled(struct intel_display *display,
static void xelpdp_aux_power_well_enable(struct intel_display *display,
struct i915_power_well *power_well)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
enum aux_ch aux_ch = i915_power_well_instance(power_well)->xelpdp.aux_ch;
enum phy phy = icl_aux_pw_to_phy(display, power_well);
- if (intel_phy_is_tc(dev_priv, phy))
+ if (intel_phy_is_tc(display, phy))
icl_tc_port_assert_ref_held(display, power_well,
aux_ch_to_digital_port(display, aux_ch));
diff --git a/drivers/gpu/drm/i915/display/intel_display_reset.c b/drivers/gpu/drm/i915/display/intel_display_reset.c
index a690968885bf..1f2798404f2c 100644
--- a/drivers/gpu/drm/i915/display/intel_display_reset.c
+++ b/drivers/gpu/drm/i915/display/intel_display_reset.c
@@ -14,45 +14,36 @@
#include "intel_hotplug.h"
#include "intel_pps.h"
-static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv)
+bool intel_display_reset_test(struct intel_display *display)
{
- return (INTEL_INFO(dev_priv)->gpu_reset_clobbers_display &&
- intel_has_gpu_reset(to_gt(dev_priv)));
+ return display->params.force_reset_modeset_test;
}
-void intel_display_reset_prepare(struct drm_i915_private *dev_priv)
+/* returns true if intel_display_reset_finish() needs to be called */
+bool intel_display_reset_prepare(struct intel_display *display,
+ modeset_stuck_fn modeset_stuck, void *context)
{
- struct drm_modeset_acquire_ctx *ctx = &dev_priv->display.restore.reset_ctx;
+ struct drm_modeset_acquire_ctx *ctx = &display->restore.reset_ctx;
struct drm_atomic_state *state;
int ret;
- if (!HAS_DISPLAY(dev_priv))
- return;
+ if (!HAS_DISPLAY(display))
+ return false;
- /* reset doesn't touch the display */
- if (!dev_priv->display.params.force_reset_modeset_test &&
- !gpu_reset_clobbers_display(dev_priv))
- return;
-
- /* We have a modeset vs reset deadlock, defensively unbreak it. */
- set_bit(I915_RESET_MODESET, &to_gt(dev_priv)->reset.flags);
- smp_mb__after_atomic();
- wake_up_bit(&to_gt(dev_priv)->reset.flags, I915_RESET_MODESET);
-
- if (atomic_read(&dev_priv->gpu_error.pending_fb_pin)) {
- drm_dbg_kms(&dev_priv->drm,
+ if (atomic_read(&display->restore.pending_fb_pin)) {
+ drm_dbg_kms(display->drm,
"Modeset potentially stuck, unbreaking through wedging\n");
- intel_gt_set_wedged(to_gt(dev_priv));
+ modeset_stuck(context);
}
/*
* Need mode_config.mutex so that we don't
* trample ongoing ->detect() and whatnot.
*/
- mutex_lock(&dev_priv->drm.mode_config.mutex);
+ mutex_lock(&display->drm->mode_config.mutex);
drm_modeset_acquire_init(ctx, 0);
while (1) {
- ret = drm_modeset_lock_all_ctx(&dev_priv->drm, ctx);
+ ret = drm_modeset_lock_all_ctx(display->drm, ctx);
if (ret != -EDEADLK)
break;
@@ -62,38 +53,36 @@ void intel_display_reset_prepare(struct drm_i915_private *dev_priv)
* Disabling the crtcs gracefully seems nicer. Also the
* g33 docs say we should at least disable all the planes.
*/
- state = drm_atomic_helper_duplicate_state(&dev_priv->drm, ctx);
+ state = drm_atomic_helper_duplicate_state(display->drm, ctx);
if (IS_ERR(state)) {
ret = PTR_ERR(state);
- drm_err(&dev_priv->drm, "Duplicating state failed with %i\n",
+ drm_err(display->drm, "Duplicating state failed with %i\n",
ret);
- return;
+ return true;
}
- ret = drm_atomic_helper_disable_all(&dev_priv->drm, ctx);
+ ret = drm_atomic_helper_disable_all(display->drm, ctx);
if (ret) {
- drm_err(&dev_priv->drm, "Suspending crtc's failed with %i\n",
+ drm_err(display->drm, "Suspending crtc's failed with %i\n",
ret);
drm_atomic_state_put(state);
- return;
+ return true;
}
- dev_priv->display.restore.modeset_state = state;
+ display->restore.modeset_state = state;
state->acquire_ctx = ctx;
+
+ return true;
}
-void intel_display_reset_finish(struct drm_i915_private *i915)
+void intel_display_reset_finish(struct intel_display *display, bool test_only)
{
- struct intel_display *display = &i915->display;
+ struct drm_i915_private *i915 = to_i915(display->drm);
struct drm_modeset_acquire_ctx *ctx = &display->restore.reset_ctx;
struct drm_atomic_state *state;
int ret;
- if (!HAS_DISPLAY(i915))
- return;
-
- /* reset doesn't touch the display */
- if (!test_bit(I915_RESET_MODESET, &to_gt(i915)->reset.flags))
+ if (!HAS_DISPLAY(display))
return;
state = fetch_and_zero(&display->restore.modeset_state);
@@ -101,12 +90,12 @@ void intel_display_reset_finish(struct drm_i915_private *i915)
goto unlock;
/* reset doesn't touch the display */
- if (!gpu_reset_clobbers_display(i915)) {
+ if (test_only) {
/* for testing only restore the display */
ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
if (ret) {
- drm_WARN_ON(&i915->drm, ret == -EDEADLK);
- drm_err(&i915->drm,
+ drm_WARN_ON(display->drm, ret == -EDEADLK);
+ drm_err(display->drm,
"Restoring old state failed with %i\n", ret);
}
} else {
@@ -122,7 +111,7 @@ void intel_display_reset_finish(struct drm_i915_private *i915)
ret = __intel_display_driver_resume(display, state, ctx);
if (ret)
- drm_err(&i915->drm,
+ drm_err(display->drm,
"Restoring old state failed with %i\n", ret);
intel_hpd_poll_disable(i915);
@@ -132,7 +121,5 @@ void intel_display_reset_finish(struct drm_i915_private *i915)
unlock:
drm_modeset_drop_locks(ctx);
drm_modeset_acquire_fini(ctx);
- mutex_unlock(&i915->drm.mode_config.mutex);
-
- clear_bit_unlock(I915_RESET_MODESET, &to_gt(i915)->reset.flags);
+ mutex_unlock(&display->drm->mode_config.mutex);
}
diff --git a/drivers/gpu/drm/i915/display/intel_display_reset.h b/drivers/gpu/drm/i915/display/intel_display_reset.h
index f06d0d35b86b..8b3bda134454 100644
--- a/drivers/gpu/drm/i915/display/intel_display_reset.h
+++ b/drivers/gpu/drm/i915/display/intel_display_reset.h
@@ -6,9 +6,15 @@
#ifndef __INTEL_RESET_H__
#define __INTEL_RESET_H__
-struct drm_i915_private;
+#include <linux/types.h>
-void intel_display_reset_prepare(struct drm_i915_private *i915);
-void intel_display_reset_finish(struct drm_i915_private *i915);
+struct intel_display;
+
+typedef void modeset_stuck_fn(void *context);
+
+bool intel_display_reset_test(struct intel_display *display);
+bool intel_display_reset_prepare(struct intel_display *display,
+ modeset_stuck_fn modeset_stuck, void *context);
+void intel_display_reset_finish(struct intel_display *display, bool test_only);
#endif /* __INTEL_RESET_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_display_rps.c b/drivers/gpu/drm/i915/display/intel_display_rps.c
index 918d0327169a..4074a1879828 100644
--- a/drivers/gpu/drm/i915/display/intel_display_rps.c
+++ b/drivers/gpu/drm/i915/display/intel_display_rps.c
@@ -69,10 +69,12 @@ void intel_display_rps_boost_after_vblank(struct drm_crtc *crtc,
add_wait_queue(drm_crtc_vblank_waitqueue(crtc), &wait->wait);
}
-void intel_display_rps_mark_interactive(struct drm_i915_private *i915,
+void intel_display_rps_mark_interactive(struct intel_display *display,
struct intel_atomic_state *state,
bool interactive)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
+
if (state->rps_interactive == interactive)
return;
diff --git a/drivers/gpu/drm/i915/display/intel_display_rps.h b/drivers/gpu/drm/i915/display/intel_display_rps.h
index e19009c2371a..556891edb2dd 100644
--- a/drivers/gpu/drm/i915/display/intel_display_rps.h
+++ b/drivers/gpu/drm/i915/display/intel_display_rps.h
@@ -10,12 +10,12 @@
struct dma_fence;
struct drm_crtc;
-struct drm_i915_private;
struct intel_atomic_state;
+struct intel_display;
void intel_display_rps_boost_after_vblank(struct drm_crtc *crtc,
struct dma_fence *fence);
-void intel_display_rps_mark_interactive(struct drm_i915_private *i915,
+void intel_display_rps_mark_interactive(struct intel_display *display,
struct intel_atomic_state *state,
bool interactive);
diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h
index 4440521e3e9e..99a6fd2900b9 100644
--- a/drivers/gpu/drm/i915/display/intel_display_types.h
+++ b/drivers/gpu/drm/i915/display/intel_display_types.h
@@ -40,9 +40,9 @@
#include <drm/drm_rect.h>
#include <drm/drm_vblank_work.h>
#include <drm/intel/i915_hdcp_interface.h>
+#include <uapi/drm/i915_drm.h>
-#include "i915_vma.h"
-#include "i915_vma_types.h"
+#include "i915_gtt_view_types.h"
#include "intel_bios.h"
#include "intel_display.h"
#include "intel_display_conversion.h"
@@ -534,10 +534,6 @@ struct intel_connector {
state of connector->polled in case hotplug storm detection changes it */
u8 polled;
- struct drm_dp_mst_port *port;
-
- struct intel_dp *mst_port;
-
int force_joined_pipes;
struct {
@@ -549,6 +545,11 @@ struct intel_connector {
u8 dsc_decompression_enabled:1;
} dp;
+ struct {
+ struct drm_dp_mst_port *port;
+ struct intel_dp *dp;
+ } mst;
+
/* Work struct to schedule a uevent on link train failure */
struct work_struct modeset_retry_work;
@@ -692,6 +693,8 @@ struct intel_plane_state {
u64 ccval;
const char *no_fbc_reason;
+
+ struct drm_rect damage;
};
struct intel_initial_plane_config {
@@ -1724,7 +1727,6 @@ struct intel_dp {
struct intel_pps pps;
bool is_mst;
- int active_mst_links;
enum drm_dp_mst_mode mst_detect;
/* connector directly attached - won't be use for modeset in mst world */
@@ -1734,9 +1736,11 @@ struct intel_dp {
struct drm_dp_tunnel *tunnel;
bool tunnel_suspended:1;
- /* mst connector list */
- struct intel_dp_mst_encoder *mst_encoders[I915_MAX_PIPES];
- struct drm_dp_mst_topology_mgr mst_mgr;
+ struct {
+ struct intel_dp_mst_encoder *stream_encoders[I915_MAX_PIPES];
+ struct drm_dp_mst_topology_mgr mgr;
+ int active_links;
+ } mst;
u32 (*get_aux_clock_divider)(struct intel_dp *dp, int index);
/*
@@ -1847,16 +1851,18 @@ struct intel_digital_port {
struct intel_tc_port *tc;
- /* protects num_hdcp_streams reference count, hdcp_port_data and hdcp_auth_status */
- struct mutex hdcp_mutex;
- /* the number of pipes using HDCP signalling out of this port */
- unsigned int num_hdcp_streams;
- /* port HDCP auth status */
- bool hdcp_auth_status;
- /* HDCP port data need to pass to security f/w */
- struct hdcp_port_data hdcp_port_data;
- /* Whether the MST topology supports HDCP Type 1 Content */
- bool hdcp_mst_type1_capable;
+ struct {
+ /* protects num_streams reference count, port_data and auth_status */
+ struct mutex mutex;
+ /* the number of pipes using HDCP signalling out of this port */
+ unsigned int num_streams;
+ /* port HDCP auth status */
+ bool auth_status;
+ /* HDCP port data need to pass to security f/w */
+ struct hdcp_port_data port_data;
+ /* Whether the MST topology supports HDCP Type 1 Content */
+ bool mst_type1_capable;
+ } hdcp;
void (*write_infoframe)(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
@@ -1955,8 +1961,8 @@ static inline struct intel_dp *enc_to_intel_dp(struct intel_encoder *encoder)
static inline struct intel_dp *intel_attached_dp(struct intel_connector *connector)
{
- if (connector->mst_port)
- return connector->mst_port;
+ if (connector->mst.dp)
+ return connector->mst.dp;
else
return enc_to_intel_dp(intel_attached_encoder(connector));
}
@@ -2100,11 +2106,6 @@ intel_crtc_needs_color_update(const struct intel_crtc_state *crtc_state)
intel_crtc_needs_modeset(crtc_state);
}
-static inline u32 intel_plane_ggtt_offset(const struct intel_plane_state *plane_state)
-{
- return i915_ggtt_offset(plane_state->ggtt_vma);
-}
-
static inline struct intel_frontbuffer *
to_intel_frontbuffer(struct drm_framebuffer *fb)
{
diff --git a/drivers/gpu/drm/i915/display/intel_dkl_phy.c b/drivers/gpu/drm/i915/display/intel_dkl_phy.c
index 0920f78f182e..0813fb9b5823 100644
--- a/drivers/gpu/drm/i915/display/intel_dkl_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_dkl_phy.c
@@ -3,7 +3,7 @@
* Copyright © 2022 Intel Corporation
*/
-#include "i915_drv.h"
+#include <drm/drm_device.h>
#include "intel_de.h"
#include "intel_display.h"
@@ -12,11 +12,11 @@
/**
* intel_dkl_phy_init - initialize Dekel PHY
- * @i915: i915 device instance
+ * @display: display device instance
*/
-void intel_dkl_phy_init(struct drm_i915_private *i915)
+void intel_dkl_phy_init(struct intel_display *display)
{
- spin_lock_init(&i915->display.dkl.phy_lock);
+ spin_lock_init(&display->dkl.phy_lock);
}
static void
diff --git a/drivers/gpu/drm/i915/display/intel_dkl_phy.h b/drivers/gpu/drm/i915/display/intel_dkl_phy.h
index 1d96e6be657c..ccb445c0022b 100644
--- a/drivers/gpu/drm/i915/display/intel_dkl_phy.h
+++ b/drivers/gpu/drm/i915/display/intel_dkl_phy.h
@@ -10,10 +10,9 @@
#include "intel_dkl_phy_regs.h"
-struct drm_i915_private;
struct intel_display;
-void intel_dkl_phy_init(struct drm_i915_private *i915);
+void intel_dkl_phy_init(struct intel_display *display);
u32
intel_dkl_phy_read(struct intel_display *display, struct intel_dkl_phy_reg reg);
void
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index 03ca2e02ab02..a236b5fc7a3d 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -1376,7 +1376,7 @@ bool intel_dp_has_dsc(const struct intel_connector *connector)
if (!HAS_DSC(display))
return false;
- if (connector->mst_port && !HAS_DSC_MST(display))
+ if (connector->mst.dp && !HAS_DSC_MST(display))
return false;
if (connector->base.connector_type == DRM_MODE_CONNECTOR_eDP &&
@@ -2912,7 +2912,7 @@ static bool can_enable_drrs(struct intel_connector *connector,
const struct intel_crtc_state *pipe_config,
const struct drm_display_mode *downclock_mode)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
if (pipe_config->vrr.enable)
return false;
@@ -2930,7 +2930,7 @@ static bool can_enable_drrs(struct intel_connector *connector,
if (pipe_config->has_pch_encoder)
return false;
- if (!intel_cpu_transcoder_has_drrs(i915, pipe_config->cpu_transcoder))
+ if (!intel_cpu_transcoder_has_drrs(display, pipe_config->cpu_transcoder))
return false;
return downclock_mode &&
@@ -2943,7 +2943,6 @@ intel_dp_drrs_compute_config(struct intel_connector *connector,
int link_bpp_x16)
{
struct intel_display *display = to_intel_display(connector);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
const struct drm_display_mode *downclock_mode =
intel_panel_downclock_mode(connector, &pipe_config->hw.adjusted_mode);
int pixel_clock;
@@ -2956,7 +2955,7 @@ intel_dp_drrs_compute_config(struct intel_connector *connector,
pipe_config->update_m_n = true;
if (!can_enable_drrs(connector, pipe_config, downclock_mode)) {
- if (intel_cpu_transcoder_has_m2_n2(i915, pipe_config->cpu_transcoder))
+ if (intel_cpu_transcoder_has_m2_n2(display, pipe_config->cpu_transcoder))
intel_zero_m_n(&pipe_config->dp_m2_n2);
return;
}
@@ -3081,7 +3080,7 @@ intel_dp_queue_modeset_retry_for_link(struct intel_atomic_state *state,
if (!conn_state->base.crtc)
continue;
- if (connector->mst_port == intel_dp)
+ if (connector->mst.dp == intel_dp)
intel_connector_queue_modeset_retry_work(connector);
}
}
@@ -3131,7 +3130,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
if ((intel_dp_is_edp(intel_dp) && fixed_mode) ||
pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) {
- ret = intel_panel_fitting(pipe_config, conn_state);
+ ret = intel_pfit_compute_config(pipe_config, conn_state);
if (ret)
return ret;
}
@@ -3303,8 +3302,8 @@ intel_dp_sink_set_dsc_passthrough(const struct intel_connector *connector,
bool enable)
{
struct intel_display *display = to_intel_display(connector);
- struct drm_dp_aux *aux = connector->port ?
- connector->port->passthrough_aux : NULL;
+ struct drm_dp_aux *aux = connector->mst.port ?
+ connector->mst.port->passthrough_aux : NULL;
if (!aux)
return;
@@ -3331,7 +3330,7 @@ static int intel_dp_dsc_aux_ref_count(struct intel_atomic_state *state,
* On SST the decompression AUX device won't be shared, each connector
* uses for this its own AUX targeting the sink device.
*/
- if (!connector->mst_port)
+ if (!connector->mst.dp)
return connector->dp.dsc_decompression_enabled ? 1 : 0;
for_each_oldnew_connector_in_state(&state->base, _connector_iter,
@@ -3339,7 +3338,7 @@ static int intel_dp_dsc_aux_ref_count(struct intel_atomic_state *state,
const struct intel_connector *
connector_iter = to_intel_connector(_connector_iter);
- if (connector_iter->mst_port != connector->mst_port)
+ if (connector_iter->mst.dp != connector->mst.dp)
continue;
if (!connector_iter->dp.dsc_decompression_enabled)
@@ -4397,7 +4396,7 @@ intel_dp_mst_configure(struct intel_dp *intel_dp)
if (intel_dp->is_mst)
intel_dp_mst_prepare_probe(intel_dp);
- drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
+ drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst.mgr, intel_dp->is_mst);
/* Avoid stale info on the next detect cycle. */
intel_dp->mst_detect = DRM_DP_SST;
@@ -4413,9 +4412,9 @@ intel_dp_mst_disconnect(struct intel_dp *intel_dp)
drm_dbg_kms(display->drm,
"MST device may have disappeared %d vs %d\n",
- intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
+ intel_dp->is_mst, intel_dp->mst.mgr.mst_state);
intel_dp->is_mst = false;
- drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
+ drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst.mgr, intel_dp->is_mst);
}
static bool
@@ -4921,7 +4920,7 @@ intel_dp_mst_hpd_irq(struct intel_dp *intel_dp, u8 *esi, u8 *ack)
{
bool handled = false;
- drm_dp_mst_hpd_irq_handle_event(&intel_dp->mst_mgr, esi, ack, &handled);
+ drm_dp_mst_hpd_irq_handle_event(&intel_dp->mst.mgr, esi, ack, &handled);
if (esi[1] & DP_CP_IRQ) {
intel_hdcp_handle_cp_irq(intel_dp->attached_connector);
@@ -4970,7 +4969,7 @@ intel_dp_check_mst_status(struct intel_dp *intel_dp)
bool link_ok = true;
bool reprobe_needed = false;
- drm_WARN_ON_ONCE(display->drm, intel_dp->active_mst_links < 0);
+ drm_WARN_ON_ONCE(display->drm, intel_dp->mst.active_links < 0);
for (;;) {
u8 esi[4] = {};
@@ -4986,7 +4985,7 @@ intel_dp_check_mst_status(struct intel_dp *intel_dp)
drm_dbg_kms(display->drm, "DPRX ESI: %4ph\n", esi);
- if (intel_dp->active_mst_links > 0 && link_ok &&
+ if (intel_dp->mst.active_links > 0 && link_ok &&
esi[3] & LINK_STATUS_CHANGED) {
if (!intel_dp_mst_link_status(intel_dp))
link_ok = false;
@@ -5009,7 +5008,7 @@ intel_dp_check_mst_status(struct intel_dp *intel_dp)
drm_dbg_kms(display->drm, "Failed to ack ESI\n");
if (ack[1] & (DP_DOWN_REP_MSG_RDY | DP_UP_REQ_MSG_RDY))
- drm_dp_mst_hpd_irq_send_new_request(&intel_dp->mst_mgr);
+ drm_dp_mst_hpd_irq_send_new_request(&intel_dp->mst.mgr);
}
if (!link_ok || intel_dp->link.force_retrain)
@@ -5108,7 +5107,7 @@ bool intel_dp_has_connector(struct intel_dp *intel_dp,
/* MST */
for_each_pipe(display, pipe) {
- encoder = &intel_dp->mst_encoders[pipe]->base;
+ encoder = &intel_dp->mst.stream_encoders[pipe]->base;
if (conn_state->best_encoder == &encoder->base)
return true;
}
@@ -5194,7 +5193,6 @@ static int intel_dp_retrain_link(struct intel_encoder *encoder,
struct drm_modeset_acquire_ctx *ctx)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
u8 pipe_mask;
int ret;
@@ -5225,7 +5223,7 @@ static int intel_dp_retrain_link(struct intel_encoder *encoder,
encoder->base.base.id, encoder->base.name,
str_yes_no(intel_dp->link.force_retrain));
- ret = intel_modeset_commit_pipes(dev_priv, pipe_mask, ctx);
+ ret = intel_modeset_commit_pipes(display, pipe_mask, ctx);
if (ret == -EDEADLK)
return ret;
@@ -6067,7 +6065,7 @@ static int intel_dp_connector_atomic_check(struct drm_connector *conn,
return ret;
if (intel_dp_mst_source_support(intel_dp)) {
- ret = drm_dp_mst_root_conn_atomic_check(conn_state, &intel_dp->mst_mgr);
+ ret = drm_dp_mst_root_conn_atomic_check(conn_state, &intel_dp->mst.mgr);
if (ret)
return ret;
}
@@ -6605,7 +6603,7 @@ void intel_dp_mst_suspend(struct intel_display *display)
continue;
if (intel_dp->is_mst)
- drm_dp_mst_topology_mgr_suspend(&intel_dp->mst_mgr);
+ drm_dp_mst_topology_mgr_suspend(&intel_dp->mst.mgr);
}
}
@@ -6628,12 +6626,10 @@ void intel_dp_mst_resume(struct intel_display *display)
if (!intel_dp_mst_source_support(intel_dp))
continue;
- ret = drm_dp_mst_topology_mgr_resume(&intel_dp->mst_mgr,
- true);
+ ret = drm_dp_mst_topology_mgr_resume(&intel_dp->mst.mgr, true);
if (ret) {
intel_dp->is_mst = false;
- drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
- false);
+ drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst.mgr, false);
}
}
}
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
index f53c8355d5be..8173de8aec63 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
@@ -34,6 +34,8 @@
* for some reason.
*/
+#include <drm/drm_print.h>
+
#include "i915_utils.h"
#include "intel_backlight.h"
#include "intel_display_core.h"
diff --git a/drivers/gpu/drm/i915/display/intel_dp_hdcp.c b/drivers/gpu/drm/i915/display/intel_dp_hdcp.c
index 00c493cc8a4b..cc312596fb77 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_hdcp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_hdcp.c
@@ -705,10 +705,10 @@ int intel_dp_hdcp_get_remote_capability(struct intel_connector *connector,
*hdcp_capable = false;
*hdcp2_capable = false;
- if (!connector->mst_port)
+ if (!connector->mst.dp)
return -EINVAL;
- aux = &connector->port->aux;
+ aux = &connector->mst.port->aux;
ret = _intel_dp_hdcp2_get_capability(aux, hdcp2_capable);
if (ret)
drm_dbg_kms(display->drm,
@@ -799,7 +799,7 @@ intel_dp_mst_hdcp2_stream_encryption(struct intel_connector *connector,
{
struct intel_display *display = to_intel_display(connector);
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
- struct hdcp_port_data *data = &dig_port->hdcp_port_data;
+ struct hdcp_port_data *data = &dig_port->hdcp.port_data;
struct intel_hdcp *hdcp = &connector->hdcp;
enum transcoder cpu_transcoder = hdcp->stream_transcoder;
enum pipe pipe = (enum pipe)cpu_transcoder;
@@ -883,7 +883,7 @@ int intel_dp_hdcp_init(struct intel_digital_port *dig_port,
if (!is_hdcp_supported(display, port))
return 0;
- if (intel_connector->mst_port)
+ if (intel_connector->mst.dp)
return intel_hdcp_init(intel_connector, dig_port,
&intel_dp_mst_hdcp_shim);
else if (!intel_dp_is_edp(intel_dp))
diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
index 66fcd90f0028..2966f5b39392 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
@@ -24,6 +24,7 @@
#include <linux/debugfs.h>
#include <drm/display/drm_dp_helper.h>
+#include <drm/drm_print.h>
#include "i915_utils.h"
#include "intel_display_core.h"
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c
index 167e4a70ab12..02f95108c637 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c
@@ -49,6 +49,7 @@
#include "intel_hdcp.h"
#include "intel_hotplug.h"
#include "intel_link_bw.h"
+#include "intel_pfit.h"
#include "intel_psr.h"
#include "intel_vdsc.h"
#include "skl_scaler.h"
@@ -252,7 +253,7 @@ int intel_dp_mtp_tu_compute_config(struct intel_dp *intel_dp,
fxp_q4_to_frac(bpp_step_x16)));
if (is_mst) {
- mst_state = drm_atomic_get_mst_topology_state(state, &intel_dp->mst_mgr);
+ mst_state = drm_atomic_get_mst_topology_state(state, &intel_dp->mst.mgr);
if (IS_ERR(mst_state))
return PTR_ERR(mst_state);
@@ -354,8 +355,8 @@ int intel_dp_mtp_tu_compute_config(struct intel_dp *intel_dp,
drm_WARN_ON(display->drm, remote_tu < crtc_state->dp_m_n.tu);
crtc_state->dp_m_n.tu = remote_tu;
- slots = drm_dp_atomic_find_time_slots(state, &intel_dp->mst_mgr,
- connector->port,
+ slots = drm_dp_atomic_find_time_slots(state, &intel_dp->mst.mgr,
+ connector->mst.port,
dfixed_trunc(pbn));
} else {
/* Same as above for remote_tu */
@@ -478,7 +479,7 @@ static int mst_stream_update_slots(struct intel_dp *intel_dp,
struct drm_connector_state *conn_state)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct drm_dp_mst_topology_mgr *mgr = &intel_dp->mst_mgr;
+ struct drm_dp_mst_topology_mgr *mgr = &intel_dp->mst.mgr;
struct drm_dp_mst_topology_state *topology_state;
u8 link_coding_cap = intel_dp_is_uhbr(crtc_state) ?
DP_CAP_ANSI_128B132B : DP_CAP_ANSI_8B10B;
@@ -508,8 +509,8 @@ hblank_expansion_quirk_needs_dsc(const struct intel_connector *connector,
{
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
- bool is_uhbr_sink = connector->mst_port &&
- drm_dp_128b132b_supported(connector->mst_port->dpcd);
+ bool is_uhbr_sink = connector->mst.dp &&
+ drm_dp_128b132b_supported(connector->mst.dp->dpcd);
int hblank_limit = is_uhbr_sink ? 500 : 300;
if (!connector->dp.dsc_hblank_expansion_quirk)
@@ -740,7 +741,7 @@ intel_dp_mst_transcoder_mask(struct intel_atomic_state *state,
const struct intel_crtc_state *crtc_state;
struct intel_crtc *crtc;
- if (connector->mst_port != mst_port || !conn_state->base.crtc)
+ if (connector->mst.dp != mst_port || !conn_state->base.crtc)
continue;
crtc = to_intel_crtc(conn_state->base.crtc);
@@ -768,12 +769,12 @@ static u8 get_pipes_downstream_of_mst_port(struct intel_atomic_state *state,
if (!conn_state->base.crtc)
continue;
- if (&connector->mst_port->mst_mgr != mst_mgr)
+ if (&connector->mst.dp->mst.mgr != mst_mgr)
continue;
- if (connector->port != parent_port &&
+ if (connector->mst.port != parent_port &&
!drm_dp_mst_port_downstream_of_parent(mst_mgr,
- connector->port,
+ connector->mst.port,
parent_port))
continue;
@@ -924,7 +925,7 @@ mst_connector_atomic_topology_check(struct intel_connector *connector,
struct intel_crtc_state *crtc_state;
struct intel_crtc *crtc;
- if (connector_iter->mst_port != connector->mst_port ||
+ if (connector_iter->mst.dp != connector->mst.dp ||
connector_iter == connector)
continue;
@@ -973,15 +974,15 @@ mst_connector_atomic_check(struct drm_connector *_connector,
if (intel_connector_needs_modeset(state, &connector->base)) {
ret = intel_dp_tunnel_atomic_check_state(state,
- connector->mst_port,
+ connector->mst.dp,
connector);
if (ret)
return ret;
}
return drm_dp_atomic_release_time_slots(&state->base,
- &connector->mst_port->mst_mgr,
- connector->port);
+ &connector->mst.dp->mst.mgr,
+ connector->mst.port);
}
static void mst_stream_disable(struct intel_atomic_state *state,
@@ -997,9 +998,9 @@ static void mst_stream_disable(struct intel_atomic_state *state,
enum transcoder trans = old_crtc_state->cpu_transcoder;
drm_dbg_kms(display->drm, "active links %d\n",
- intel_dp->active_mst_links);
+ intel_dp->mst.active_links);
- if (intel_dp->active_mst_links == 1)
+ if (intel_dp->mst.active_links == 1)
intel_dp->link_trained = false;
intel_hdcp_disable(intel_mst->connector);
@@ -1022,19 +1023,19 @@ static void mst_stream_post_disable(struct intel_atomic_state *state,
struct intel_connector *connector =
to_intel_connector(old_conn_state->connector);
struct drm_dp_mst_topology_state *old_mst_state =
- drm_atomic_get_old_mst_topology_state(&state->base, &intel_dp->mst_mgr);
+ drm_atomic_get_old_mst_topology_state(&state->base, &intel_dp->mst.mgr);
struct drm_dp_mst_topology_state *new_mst_state =
- drm_atomic_get_new_mst_topology_state(&state->base, &intel_dp->mst_mgr);
+ drm_atomic_get_new_mst_topology_state(&state->base, &intel_dp->mst.mgr);
const struct drm_dp_mst_atomic_payload *old_payload =
- drm_atomic_get_mst_payload_state(old_mst_state, connector->port);
+ drm_atomic_get_mst_payload_state(old_mst_state, connector->mst.port);
struct drm_dp_mst_atomic_payload *new_payload =
- drm_atomic_get_mst_payload_state(new_mst_state, connector->port);
+ drm_atomic_get_mst_payload_state(new_mst_state, connector->mst.port);
struct intel_crtc *pipe_crtc;
bool last_mst_stream;
int i;
- intel_dp->active_mst_links--;
- last_mst_stream = intel_dp->active_mst_links == 0;
+ intel_dp->mst.active_links--;
+ last_mst_stream = intel_dp->mst.active_links == 0;
drm_WARN_ON(display->drm, DISPLAY_VER(display) >= 12 && last_mst_stream &&
!intel_dp_mst_is_master_trans(old_crtc_state));
@@ -1047,7 +1048,7 @@ static void mst_stream_post_disable(struct intel_atomic_state *state,
intel_disable_transcoder(old_crtc_state);
- drm_dp_remove_payload_part1(&intel_dp->mst_mgr, new_mst_state, new_payload);
+ drm_dp_remove_payload_part1(&intel_dp->mst.mgr, new_mst_state, new_payload);
intel_ddi_clear_act_sent(encoder, old_crtc_state);
@@ -1056,9 +1057,9 @@ static void mst_stream_post_disable(struct intel_atomic_state *state,
TRANS_DDI_DP_VC_PAYLOAD_ALLOC, 0);
intel_ddi_wait_for_act_sent(encoder, old_crtc_state);
- drm_dp_check_act_status(&intel_dp->mst_mgr);
+ drm_dp_check_act_status(&intel_dp->mst.mgr);
- drm_dp_remove_payload_part2(&intel_dp->mst_mgr, new_mst_state,
+ drm_dp_remove_payload_part2(&intel_dp->mst.mgr, new_mst_state,
old_payload, new_payload);
intel_ddi_disable_transcoder_func(old_crtc_state);
@@ -1079,7 +1080,7 @@ static void mst_stream_post_disable(struct intel_atomic_state *state,
* Power down mst path before disabling the port, otherwise we end
* up getting interrupts from the sink upon detecting link loss.
*/
- drm_dp_send_power_updown_phy(&intel_dp->mst_mgr, connector->port,
+ drm_dp_send_power_updown_phy(&intel_dp->mst.mgr, connector->mst.port,
false);
/*
@@ -1104,7 +1105,7 @@ static void mst_stream_post_disable(struct intel_atomic_state *state,
old_crtc_state, NULL);
drm_dbg_kms(display->drm, "active links %d\n",
- intel_dp->active_mst_links);
+ intel_dp->mst.active_links);
}
static void mst_stream_post_pll_disable(struct intel_atomic_state *state,
@@ -1115,7 +1116,7 @@ static void mst_stream_post_pll_disable(struct intel_atomic_state *state,
struct intel_encoder *primary_encoder = to_primary_encoder(encoder);
struct intel_dp *intel_dp = to_primary_dp(encoder);
- if (intel_dp->active_mst_links == 0 &&
+ if (intel_dp->mst.active_links == 0 &&
primary_encoder->post_pll_disable)
primary_encoder->post_pll_disable(state, primary_encoder, old_crtc_state, old_conn_state);
}
@@ -1128,7 +1129,7 @@ static void mst_stream_pre_pll_enable(struct intel_atomic_state *state,
struct intel_encoder *primary_encoder = to_primary_encoder(encoder);
struct intel_dp *intel_dp = to_primary_dp(encoder);
- if (intel_dp->active_mst_links == 0)
+ if (intel_dp->mst.active_links == 0)
primary_encoder->pre_pll_enable(state, primary_encoder,
pipe_config, NULL);
else
@@ -1161,7 +1162,7 @@ static void intel_mst_reprobe_topology(struct intel_dp *intel_dp,
crtc_state->port_clock, crtc_state->lane_count))
return;
- drm_dp_mst_topology_queue_probe(&intel_dp->mst_mgr);
+ drm_dp_mst_topology_queue_probe(&intel_dp->mst.mgr);
intel_mst_set_probed_link_params(intel_dp,
crtc_state->port_clock, crtc_state->lane_count);
@@ -1179,7 +1180,7 @@ static void mst_stream_pre_enable(struct intel_atomic_state *state,
struct intel_connector *connector =
to_intel_connector(conn_state->connector);
struct drm_dp_mst_topology_state *mst_state =
- drm_atomic_get_new_mst_topology_state(&state->base, &intel_dp->mst_mgr);
+ drm_atomic_get_new_mst_topology_state(&state->base, &intel_dp->mst.mgr);
int ret;
bool first_mst_stream;
@@ -1188,17 +1189,17 @@ static void mst_stream_pre_enable(struct intel_atomic_state *state,
*/
connector->encoder = encoder;
intel_mst->connector = connector;
- first_mst_stream = intel_dp->active_mst_links == 0;
+ first_mst_stream = intel_dp->mst.active_links == 0;
drm_WARN_ON(display->drm, DISPLAY_VER(display) >= 12 && first_mst_stream &&
!intel_dp_mst_is_master_trans(pipe_config));
drm_dbg_kms(display->drm, "active links %d\n",
- intel_dp->active_mst_links);
+ intel_dp->mst.active_links);
if (first_mst_stream)
intel_dp_set_power(intel_dp, DP_SET_POWER_D0);
- drm_dp_send_power_updown_phy(&intel_dp->mst_mgr, connector->port, true);
+ drm_dp_send_power_updown_phy(&intel_dp->mst.mgr, connector->mst.port, true);
intel_dp_sink_enable_decompression(state, connector, pipe_config);
@@ -1209,10 +1210,10 @@ static void mst_stream_pre_enable(struct intel_atomic_state *state,
intel_mst_reprobe_topology(intel_dp, pipe_config);
}
- intel_dp->active_mst_links++;
+ intel_dp->mst.active_links++;
- ret = drm_dp_add_payload_part1(&intel_dp->mst_mgr, mst_state,
- drm_atomic_get_mst_payload_state(mst_state, connector->port));
+ ret = drm_dp_add_payload_part1(&intel_dp->mst.mgr, mst_state,
+ drm_atomic_get_mst_payload_state(mst_state, connector->mst.port));
if (ret < 0)
intel_dp_queue_modeset_retry_for_link(state, primary_encoder, pipe_config);
@@ -1276,9 +1277,9 @@ static void mst_stream_enable(struct intel_atomic_state *state,
struct intel_dp *intel_dp = to_primary_dp(encoder);
struct intel_connector *connector = to_intel_connector(conn_state->connector);
struct drm_dp_mst_topology_state *mst_state =
- drm_atomic_get_new_mst_topology_state(&state->base, &intel_dp->mst_mgr);
+ drm_atomic_get_new_mst_topology_state(&state->base, &intel_dp->mst.mgr);
enum transcoder trans = pipe_config->cpu_transcoder;
- bool first_mst_stream = intel_dp->active_mst_links == 1;
+ bool first_mst_stream = intel_dp->mst.active_links == 1;
struct intel_crtc *pipe_crtc;
int ret, i, min_hblank;
@@ -1328,17 +1329,17 @@ static void mst_stream_enable(struct intel_atomic_state *state,
TRANS_DDI_DP_VC_PAYLOAD_ALLOC);
drm_dbg_kms(display->drm, "active links %d\n",
- intel_dp->active_mst_links);
+ intel_dp->mst.active_links);
intel_ddi_wait_for_act_sent(encoder, pipe_config);
- drm_dp_check_act_status(&intel_dp->mst_mgr);
+ drm_dp_check_act_status(&intel_dp->mst.mgr);
if (first_mst_stream)
intel_ddi_wait_for_fec_status(encoder, pipe_config, true);
- ret = drm_dp_add_payload_part2(&intel_dp->mst_mgr,
+ ret = drm_dp_add_payload_part2(&intel_dp->mst.mgr,
drm_atomic_get_mst_payload_state(mst_state,
- connector->port));
+ connector->mst.port));
if (ret < 0)
intel_dp_queue_modeset_retry_for_link(state, primary_encoder, pipe_config);
@@ -1391,7 +1392,7 @@ static int mst_connector_get_ddc_modes(struct drm_connector *_connector)
{
struct intel_connector *connector = to_intel_connector(_connector);
struct intel_display *display = to_intel_display(connector);
- struct intel_dp *intel_dp = connector->mst_port;
+ struct intel_dp *intel_dp = connector->mst.dp;
const struct drm_edid *drm_edid;
int ret;
@@ -1401,7 +1402,7 @@ static int mst_connector_get_ddc_modes(struct drm_connector *_connector)
if (!intel_display_driver_check_access(display))
return drm_edid_connector_add_modes(&connector->base);
- drm_edid = drm_dp_mst_edid_read(&connector->base, &intel_dp->mst_mgr, connector->port);
+ drm_edid = drm_dp_mst_edid_read(&connector->base, &intel_dp->mst.mgr, connector->mst.port);
ret = intel_connector_update_modes(&connector->base, drm_edid);
@@ -1416,13 +1417,13 @@ mst_connector_late_register(struct drm_connector *_connector)
struct intel_connector *connector = to_intel_connector(_connector);
int ret;
- ret = drm_dp_mst_connector_late_register(&connector->base, connector->port);
+ ret = drm_dp_mst_connector_late_register(&connector->base, connector->mst.port);
if (ret < 0)
return ret;
ret = intel_connector_register(&connector->base);
if (ret < 0)
- drm_dp_mst_connector_early_unregister(&connector->base, connector->port);
+ drm_dp_mst_connector_early_unregister(&connector->base, connector->mst.port);
return ret;
}
@@ -1433,7 +1434,7 @@ mst_connector_early_unregister(struct drm_connector *_connector)
struct intel_connector *connector = to_intel_connector(_connector);
intel_connector_unregister(&connector->base);
- drm_dp_mst_connector_early_unregister(&connector->base, connector->port);
+ drm_dp_mst_connector_early_unregister(&connector->base, connector->mst.port);
}
static const struct drm_connector_funcs mst_connector_funcs = {
@@ -1462,9 +1463,9 @@ mst_connector_mode_valid_ctx(struct drm_connector *_connector,
{
struct intel_connector *connector = to_intel_connector(_connector);
struct intel_display *display = to_intel_display(connector);
- struct intel_dp *intel_dp = connector->mst_port;
- struct drm_dp_mst_topology_mgr *mgr = &intel_dp->mst_mgr;
- struct drm_dp_mst_port *port = connector->port;
+ struct intel_dp *intel_dp = connector->mst.dp;
+ struct drm_dp_mst_topology_mgr *mgr = &intel_dp->mst.mgr;
+ struct drm_dp_mst_port *port = connector->mst.port;
const int min_bpp = 18;
int max_dotclk = display->cdclk.max_dotclk_freq;
int max_rate, mode_rate, max_lanes, max_link_clock;
@@ -1575,10 +1576,10 @@ mst_connector_atomic_best_encoder(struct drm_connector *_connector,
struct intel_connector *connector = to_intel_connector(_connector);
struct drm_connector_state *connector_state =
drm_atomic_get_new_connector_state(state, &connector->base);
- struct intel_dp *intel_dp = connector->mst_port;
+ struct intel_dp *intel_dp = connector->mst.dp;
struct intel_crtc *crtc = to_intel_crtc(connector_state->crtc);
- return &intel_dp->mst_encoders[crtc->pipe]->base.base;
+ return &intel_dp->mst.stream_encoders[crtc->pipe]->base.base;
}
static int
@@ -1587,7 +1588,7 @@ mst_connector_detect_ctx(struct drm_connector *_connector,
{
struct intel_connector *connector = to_intel_connector(_connector);
struct intel_display *display = to_intel_display(connector);
- struct intel_dp *intel_dp = connector->mst_port;
+ struct intel_dp *intel_dp = connector->mst.dp;
if (!intel_display_device_enabled(display))
return connector_status_disconnected;
@@ -1600,8 +1601,8 @@ mst_connector_detect_ctx(struct drm_connector *_connector,
intel_dp_flush_connector_commits(connector);
- return drm_dp_mst_detect_port(&connector->base, ctx, &intel_dp->mst_mgr,
- connector->port);
+ return drm_dp_mst_detect_port(&connector->base, ctx, &intel_dp->mst.mgr,
+ connector->mst.port);
}
static const struct drm_connector_helper_funcs mst_connector_helper_funcs = {
@@ -1692,10 +1693,10 @@ static bool detect_dsc_hblank_expansion_quirk(const struct intel_connector *conn
* A logical port's OUI (at least for affected sinks) is all 0, so
* instead of that the parent port's OUI is used for identification.
*/
- if (drm_dp_mst_port_is_logical(connector->port)) {
- aux = drm_dp_mst_aux_for_parent(connector->port);
+ if (drm_dp_mst_port_is_logical(connector->mst.port)) {
+ aux = drm_dp_mst_aux_for_parent(connector->mst.port);
if (!aux)
- aux = &connector->mst_port->aux;
+ aux = &connector->mst.dp->aux;
}
if (drm_dp_read_dpcd_caps(aux, dpcd) < 0)
@@ -1730,7 +1731,7 @@ mst_topology_add_connector(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_port *port,
const char *pathprop)
{
- struct intel_dp *intel_dp = container_of(mgr, struct intel_dp, mst_mgr);
+ struct intel_dp *intel_dp = container_of(mgr, struct intel_dp, mst.mgr);
struct intel_display *display = to_intel_display(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct intel_connector *connector;
@@ -1743,8 +1744,8 @@ mst_topology_add_connector(struct drm_dp_mst_topology_mgr *mgr,
connector->get_hw_state = mst_connector_get_hw_state;
connector->sync_state = intel_dp_connector_sync_state;
- connector->mst_port = intel_dp;
- connector->port = port;
+ connector->mst.dp = intel_dp;
+ connector->mst.port = port;
drm_dp_mst_get_port_malloc(port);
ret = drm_connector_dynamic_init(display->drm, &connector->base, &mst_connector_funcs,
@@ -1761,7 +1762,7 @@ mst_topology_add_connector(struct drm_dp_mst_topology_mgr *mgr,
for_each_pipe(display, pipe) {
struct drm_encoder *enc =
- &intel_dp->mst_encoders[pipe]->base.base;
+ &intel_dp->mst.stream_encoders[pipe]->base.base;
ret = drm_connector_attach_encoder(&connector->base, enc);
if (ret)
@@ -1791,7 +1792,7 @@ err_put_port:
static void
mst_topology_poll_hpd_irq(struct drm_dp_mst_topology_mgr *mgr)
{
- struct intel_dp *intel_dp = container_of(mgr, struct intel_dp, mst_mgr);
+ struct intel_dp *intel_dp = container_of(mgr, struct intel_dp, mst.mgr);
intel_hpd_trigger_irq(dp_to_dig_port(intel_dp));
}
@@ -1864,14 +1865,14 @@ mst_stream_encoders_create(struct intel_digital_port *dig_port)
enum pipe pipe;
for_each_pipe(display, pipe)
- intel_dp->mst_encoders[pipe] = mst_stream_encoder_create(dig_port, pipe);
+ intel_dp->mst.stream_encoders[pipe] = mst_stream_encoder_create(dig_port, pipe);
return true;
}
int
intel_dp_mst_encoder_active_links(struct intel_digital_port *dig_port)
{
- return dig_port->dp.active_mst_links;
+ return dig_port->dp.mst.active_links;
}
int
@@ -1891,14 +1892,15 @@ intel_dp_mst_encoder_init(struct intel_digital_port *dig_port, int conn_base_id)
if (DISPLAY_VER(display) < 11 && port == PORT_E)
return 0;
- intel_dp->mst_mgr.cbs = &mst_topology_cbs;
+ intel_dp->mst.mgr.cbs = &mst_topology_cbs;
/* create encoders */
mst_stream_encoders_create(dig_port);
- ret = drm_dp_mst_topology_mgr_init(&intel_dp->mst_mgr, display->drm,
- &intel_dp->aux, 16, 3, conn_base_id);
+ ret = drm_dp_mst_topology_mgr_init(&intel_dp->mst.mgr, display->drm,
+ &intel_dp->aux, 16,
+ INTEL_NUM_PIPES(display), conn_base_id);
if (ret) {
- intel_dp->mst_mgr.cbs = NULL;
+ intel_dp->mst.mgr.cbs = NULL;
return ret;
}
@@ -1907,7 +1909,7 @@ intel_dp_mst_encoder_init(struct intel_digital_port *dig_port, int conn_base_id)
bool intel_dp_mst_source_support(struct intel_dp *intel_dp)
{
- return intel_dp->mst_mgr.cbs;
+ return intel_dp->mst.mgr.cbs;
}
void
@@ -1918,10 +1920,10 @@ intel_dp_mst_encoder_cleanup(struct intel_digital_port *dig_port)
if (!intel_dp_mst_source_support(intel_dp))
return;
- drm_dp_mst_topology_mgr_destroy(&intel_dp->mst_mgr);
+ drm_dp_mst_topology_mgr_destroy(&intel_dp->mst.mgr);
/* encoders will get killed by normal cleanup */
- intel_dp->mst_mgr.cbs = NULL;
+ intel_dp->mst.mgr.cbs = NULL;
}
bool intel_dp_mst_is_master_trans(const struct intel_crtc_state *crtc_state)
@@ -1952,11 +1954,11 @@ intel_dp_mst_add_topology_state_for_connector(struct intel_atomic_state *state,
{
struct drm_dp_mst_topology_state *mst_state;
- if (!connector->mst_port)
+ if (!connector->mst.dp)
return 0;
mst_state = drm_atomic_get_mst_topology_state(&state->base,
- &connector->mst_port->mst_mgr);
+ &connector->mst.dp->mst.mgr);
if (IS_ERR(mst_state))
return PTR_ERR(mst_state);
@@ -2054,7 +2056,7 @@ bool intel_dp_mst_crtc_needs_modeset(struct intel_atomic_state *state,
const struct intel_crtc_state *old_crtc_state;
struct intel_crtc *crtc_iter;
- if (connector->mst_port != crtc_connector->mst_port ||
+ if (connector->mst.dp != crtc_connector->mst.dp ||
!conn_state->crtc)
continue;
@@ -2077,7 +2079,7 @@ bool intel_dp_mst_crtc_needs_modeset(struct intel_atomic_state *state,
* case.
*/
if (connector->dp.dsc_decompression_aux ==
- &connector->mst_port->aux)
+ &connector->mst.dp->aux)
return true;
}
@@ -2138,7 +2140,7 @@ bool intel_dp_mst_verify_dpcd_state(struct intel_dp *intel_dp)
if (!intel_dp->is_mst)
return true;
- ret = drm_dp_dpcd_readb(intel_dp->mst_mgr.aux, DP_MSTM_CTRL, &val);
+ ret = drm_dp_dpcd_readb(intel_dp->mst.mgr.aux, DP_MSTM_CTRL, &val);
/* Adjust the expected register value for SST + SideBand. */
if (ret < 0 || val != (DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC)) {
diff --git a/drivers/gpu/drm/i915/display/intel_dp_test.c b/drivers/gpu/drm/i915/display/intel_dp_test.c
index 614b90d6938f..bd61f3c3ec91 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_test.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_test.c
@@ -6,6 +6,8 @@
#include <drm/display/drm_dp.h>
#include <drm/display/drm_dp_helper.h>
#include <drm/drm_edid.h>
+#include <drm/drm_file.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include "i915_reg.h"
diff --git a/drivers/gpu/drm/i915/display/intel_dp_tunnel.c b/drivers/gpu/drm/i915/display/intel_dp_tunnel.c
index 280f302967e3..faa2b7a46699 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_tunnel.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_tunnel.c
@@ -4,6 +4,7 @@
*/
#include <drm/display/drm_dp_tunnel.h>
+#include <drm/drm_print.h>
#include "intel_atomic.h"
#include "intel_display_core.h"
diff --git a/drivers/gpu/drm/i915/display/intel_dpt.c b/drivers/gpu/drm/i915/display/intel_dpt.c
index 8b1f0e92a11c..0d8ebe38226e 100644
--- a/drivers/gpu/drm/i915/display/intel_dpt.c
+++ b/drivers/gpu/drm/i915/display/intel_dpt.c
@@ -125,6 +125,7 @@ struct i915_vma *intel_dpt_pin_to_ggtt(struct i915_address_space *vm,
unsigned int alignment)
{
struct drm_i915_private *i915 = vm->i915;
+ struct intel_display *display = &i915->display;
struct i915_dpt *dpt = i915_vm_to_dpt(vm);
intel_wakeref_t wakeref;
struct i915_vma *vma;
@@ -137,7 +138,7 @@ struct i915_vma *intel_dpt_pin_to_ggtt(struct i915_address_space *vm,
pin_flags |= PIN_MAPPABLE;
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
- atomic_inc(&i915->gpu_error.pending_fb_pin);
+ atomic_inc(&display->restore.pending_fb_pin);
for_i915_gem_ww(&ww, err, true) {
err = i915_gem_object_lock(dpt->obj, &ww);
@@ -167,7 +168,7 @@ struct i915_vma *intel_dpt_pin_to_ggtt(struct i915_address_space *vm,
dpt->obj->mm.dirty = true;
- atomic_dec(&i915->gpu_error.pending_fb_pin);
+ atomic_dec(&display->restore.pending_fb_pin);
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
return err ? ERR_PTR(err) : vma;
@@ -183,7 +184,7 @@ void intel_dpt_unpin_from_ggtt(struct i915_address_space *vm)
/**
* intel_dpt_resume - restore the memory mapping for all DPT FBs during system resume
- * @i915: device instance
+ * @display: display device instance
*
* Restore the memory mapping during system resume for all framebuffers which
* are mapped to HW via a GGTT->DPT page table. The content of these page
@@ -193,26 +194,26 @@ void intel_dpt_unpin_from_ggtt(struct i915_address_space *vm)
* This function must be called after the mappings in GGTT have been restored calling
* i915_ggtt_resume().
*/
-void intel_dpt_resume(struct drm_i915_private *i915)
+void intel_dpt_resume(struct intel_display *display)
{
struct drm_framebuffer *drm_fb;
- if (!HAS_DISPLAY(i915))
+ if (!HAS_DISPLAY(display))
return;
- mutex_lock(&i915->drm.mode_config.fb_lock);
- drm_for_each_fb(drm_fb, &i915->drm) {
+ mutex_lock(&display->drm->mode_config.fb_lock);
+ drm_for_each_fb(drm_fb, display->drm) {
struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
if (fb->dpt_vm)
i915_ggtt_resume_vm(fb->dpt_vm, true);
}
- mutex_unlock(&i915->drm.mode_config.fb_lock);
+ mutex_unlock(&display->drm->mode_config.fb_lock);
}
/**
* intel_dpt_suspend - suspend the memory mapping for all DPT FBs during system suspend
- * @i915: device instance
+ * @display: display device instance
*
* Suspend the memory mapping during system suspend for all framebuffers which
* are mapped to HW via a GGTT->DPT page table.
@@ -220,23 +221,23 @@ void intel_dpt_resume(struct drm_i915_private *i915)
* This function must be called before the mappings in GGTT are suspended calling
* i915_ggtt_suspend().
*/
-void intel_dpt_suspend(struct drm_i915_private *i915)
+void intel_dpt_suspend(struct intel_display *display)
{
struct drm_framebuffer *drm_fb;
- if (!HAS_DISPLAY(i915))
+ if (!HAS_DISPLAY(display))
return;
- mutex_lock(&i915->drm.mode_config.fb_lock);
+ mutex_lock(&display->drm->mode_config.fb_lock);
- drm_for_each_fb(drm_fb, &i915->drm) {
+ drm_for_each_fb(drm_fb, display->drm) {
struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
if (fb->dpt_vm)
i915_ggtt_suspend_vm(fb->dpt_vm, true);
}
- mutex_unlock(&i915->drm.mode_config.fb_lock);
+ mutex_unlock(&display->drm->mode_config.fb_lock);
}
struct i915_address_space *
diff --git a/drivers/gpu/drm/i915/display/intel_dpt.h b/drivers/gpu/drm/i915/display/intel_dpt.h
index 1f88b0ee17e7..db521401b828 100644
--- a/drivers/gpu/drm/i915/display/intel_dpt.h
+++ b/drivers/gpu/drm/i915/display/intel_dpt.h
@@ -8,18 +8,17 @@
#include <linux/types.h>
-struct drm_i915_private;
-
struct i915_address_space;
struct i915_vma;
+struct intel_display;
struct intel_framebuffer;
void intel_dpt_destroy(struct i915_address_space *vm);
struct i915_vma *intel_dpt_pin_to_ggtt(struct i915_address_space *vm,
unsigned int alignment);
void intel_dpt_unpin_from_ggtt(struct i915_address_space *vm);
-void intel_dpt_suspend(struct drm_i915_private *i915);
-void intel_dpt_resume(struct drm_i915_private *i915);
+void intel_dpt_suspend(struct intel_display *display);
+void intel_dpt_resume(struct intel_display *display);
struct i915_address_space *
intel_dpt_create(struct intel_framebuffer *fb);
u64 intel_dpt_offset(struct i915_vma *dpt_vma);
diff --git a/drivers/gpu/drm/i915/display/intel_drrs.c b/drivers/gpu/drm/i915/display/intel_drrs.c
index 0fec01b79b23..05cd0f6e6d71 100644
--- a/drivers/gpu/drm/i915/display/intel_drrs.c
+++ b/drivers/gpu/drm/i915/display/intel_drrs.c
@@ -65,31 +65,29 @@ const char *intel_drrs_type_str(enum drrs_type drrs_type)
return str[drrs_type];
}
-bool intel_cpu_transcoder_has_drrs(struct drm_i915_private *i915,
+bool intel_cpu_transcoder_has_drrs(struct intel_display *display,
enum transcoder cpu_transcoder)
{
- struct intel_display *display = &i915->display;
-
if (HAS_DOUBLE_BUFFERED_M_N(display))
return true;
- return intel_cpu_transcoder_has_m2_n2(i915, cpu_transcoder);
+ return intel_cpu_transcoder_has_m2_n2(display, cpu_transcoder);
}
static void
intel_drrs_set_refresh_rate_pipeconf(struct intel_crtc *crtc,
enum drrs_refresh_rate refresh_rate)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
enum transcoder cpu_transcoder = crtc->drrs.cpu_transcoder;
u32 bit;
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ if (display->platform.valleyview || display->platform.cherryview)
bit = TRANSCONF_REFRESH_RATE_ALT_VLV;
else
bit = TRANSCONF_REFRESH_RATE_ALT_ILK;
- intel_de_rmw(dev_priv, TRANSCONF(dev_priv, cpu_transcoder),
+ intel_de_rmw(display, TRANSCONF(display, cpu_transcoder),
bit, refresh_rate == DRRS_REFRESH_RATE_LOW ? bit : 0);
}
@@ -110,12 +108,12 @@ bool intel_drrs_is_active(struct intel_crtc *crtc)
static void intel_drrs_set_state(struct intel_crtc *crtc,
enum drrs_refresh_rate refresh_rate)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
if (refresh_rate == crtc->drrs.refresh_rate)
return;
- if (intel_cpu_transcoder_has_m2_n2(dev_priv, crtc->drrs.cpu_transcoder))
+ if (intel_cpu_transcoder_has_m2_n2(display, crtc->drrs.cpu_transcoder))
intel_drrs_set_refresh_rate_pipeconf(crtc, refresh_rate);
else
intel_drrs_set_refresh_rate_m_n(crtc, refresh_rate);
@@ -132,13 +130,13 @@ static void intel_drrs_schedule_work(struct intel_crtc *crtc)
static unsigned int intel_drrs_frontbuffer_bits(const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
unsigned int frontbuffer_bits;
frontbuffer_bits = INTEL_FRONTBUFFER_ALL_MASK(crtc->pipe);
- for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc,
+ for_each_intel_crtc_in_pipe_mask(display->drm, crtc,
crtc_state->joiner_pipes)
frontbuffer_bits |= INTEL_FRONTBUFFER_ALL_MASK(crtc->pipe);
@@ -222,13 +220,13 @@ static void intel_drrs_downclock_work(struct work_struct *work)
mutex_unlock(&crtc->drrs.mutex);
}
-static void intel_drrs_frontbuffer_update(struct drm_i915_private *dev_priv,
+static void intel_drrs_frontbuffer_update(struct intel_display *display,
unsigned int all_frontbuffer_bits,
bool invalidate)
{
struct intel_crtc *crtc;
- for_each_intel_crtc(&dev_priv->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
unsigned int frontbuffer_bits;
mutex_lock(&crtc->drrs.mutex);
@@ -262,7 +260,7 @@ static void intel_drrs_frontbuffer_update(struct drm_i915_private *dev_priv,
/**
* intel_drrs_invalidate - Disable Idleness DRRS
- * @dev_priv: i915 device
+ * @display: display device
* @frontbuffer_bits: frontbuffer plane tracking bits
*
* This function gets called everytime rendering on the given planes start.
@@ -270,15 +268,15 @@ static void intel_drrs_frontbuffer_update(struct drm_i915_private *dev_priv,
*
* Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
*/
-void intel_drrs_invalidate(struct drm_i915_private *dev_priv,
+void intel_drrs_invalidate(struct intel_display *display,
unsigned int frontbuffer_bits)
{
- intel_drrs_frontbuffer_update(dev_priv, frontbuffer_bits, true);
+ intel_drrs_frontbuffer_update(display, frontbuffer_bits, true);
}
/**
* intel_drrs_flush - Restart Idleness DRRS
- * @dev_priv: i915 device
+ * @display: display device
* @frontbuffer_bits: frontbuffer plane tracking bits
*
* This function gets called every time rendering on the given planes has
@@ -288,10 +286,10 @@ void intel_drrs_invalidate(struct drm_i915_private *dev_priv,
*
* Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
*/
-void intel_drrs_flush(struct drm_i915_private *dev_priv,
+void intel_drrs_flush(struct intel_display *display,
unsigned int frontbuffer_bits)
{
- intel_drrs_frontbuffer_update(dev_priv, frontbuffer_bits, false);
+ intel_drrs_frontbuffer_update(display, frontbuffer_bits, false);
}
/**
@@ -312,7 +310,7 @@ void intel_drrs_crtc_init(struct intel_crtc *crtc)
static int intel_drrs_debugfs_status_show(struct seq_file *m, void *unused)
{
struct intel_crtc *crtc = m->private;
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
const struct intel_crtc_state *crtc_state;
int ret;
@@ -325,7 +323,7 @@ static int intel_drrs_debugfs_status_show(struct seq_file *m, void *unused)
mutex_lock(&crtc->drrs.mutex);
seq_printf(m, "DRRS capable: %s\n",
- str_yes_no(intel_cpu_transcoder_has_drrs(i915,
+ str_yes_no(intel_cpu_transcoder_has_drrs(display,
crtc_state->cpu_transcoder)));
seq_printf(m, "DRRS enabled: %s\n",
@@ -353,7 +351,7 @@ DEFINE_SHOW_ATTRIBUTE(intel_drrs_debugfs_status);
static int intel_drrs_debugfs_ctl_set(void *data, u64 val)
{
struct intel_crtc *crtc = data;
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
struct intel_crtc_state *crtc_state;
struct drm_crtc_commit *commit;
int ret;
@@ -375,8 +373,7 @@ static int intel_drrs_debugfs_ctl_set(void *data, u64 val)
goto out;
}
- drm_dbg(&i915->drm,
- "Manually %sactivating DRRS\n", val ? "" : "de");
+ drm_dbg_kms(display->drm, "Manually %sactivating DRRS\n", val ? "" : "de");
if (val)
intel_drrs_activate(crtc_state);
diff --git a/drivers/gpu/drm/i915/display/intel_drrs.h b/drivers/gpu/drm/i915/display/intel_drrs.h
index 0982f95eab72..32b45a93a68f 100644
--- a/drivers/gpu/drm/i915/display/intel_drrs.h
+++ b/drivers/gpu/drm/i915/display/intel_drrs.h
@@ -10,21 +10,21 @@
enum drrs_type;
enum transcoder;
-struct drm_i915_private;
struct intel_atomic_state;
+struct intel_connector;
struct intel_crtc;
struct intel_crtc_state;
-struct intel_connector;
+struct intel_display;
-bool intel_cpu_transcoder_has_drrs(struct drm_i915_private *i915,
+bool intel_cpu_transcoder_has_drrs(struct intel_display *display,
enum transcoder cpu_transcoder);
const char *intel_drrs_type_str(enum drrs_type drrs_type);
bool intel_drrs_is_active(struct intel_crtc *crtc);
void intel_drrs_activate(const struct intel_crtc_state *crtc_state);
void intel_drrs_deactivate(const struct intel_crtc_state *crtc_state);
-void intel_drrs_invalidate(struct drm_i915_private *dev_priv,
+void intel_drrs_invalidate(struct intel_display *display,
unsigned int frontbuffer_bits);
-void intel_drrs_flush(struct drm_i915_private *dev_priv,
+void intel_drrs_flush(struct intel_display *display,
unsigned int frontbuffer_bits);
void intel_drrs_crtc_init(struct intel_crtc *crtc);
void intel_drrs_crtc_debugfs_add(struct intel_crtc *crtc);
diff --git a/drivers/gpu/drm/i915/display/intel_fb_pin.c b/drivers/gpu/drm/i915/display/intel_fb_pin.c
index 204e7e3e48ca..30ac9b089ad6 100644
--- a/drivers/gpu/drm/i915/display/intel_fb_pin.c
+++ b/drivers/gpu/drm/i915/display/intel_fb_pin.c
@@ -25,6 +25,7 @@ intel_fb_pin_to_dpt(const struct drm_framebuffer *fb,
struct i915_address_space *vm)
{
struct drm_device *dev = fb->dev;
+ struct intel_display *display = to_intel_display(dev);
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_gem_object *_obj = intel_fb_bo(fb);
struct drm_i915_gem_object *obj = to_intel_bo(_obj);
@@ -42,7 +43,7 @@ intel_fb_pin_to_dpt(const struct drm_framebuffer *fb,
if (WARN_ON(!i915_gem_object_is_framebuffer(obj)))
return ERR_PTR(-EINVAL);
- atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
+ atomic_inc(&display->restore.pending_fb_pin);
for_i915_gem_ww(&ww, ret, true) {
ret = i915_gem_object_lock(obj, &ww);
@@ -97,7 +98,7 @@ intel_fb_pin_to_dpt(const struct drm_framebuffer *fb,
i915_vma_get(vma);
err:
- atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
+ atomic_dec(&display->restore.pending_fb_pin);
return vma;
}
@@ -112,6 +113,7 @@ intel_fb_pin_to_ggtt(const struct drm_framebuffer *fb,
unsigned long *out_flags)
{
struct drm_device *dev = fb->dev;
+ struct intel_display *display = to_intel_display(dev);
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_gem_object *_obj = intel_fb_bo(fb);
struct drm_i915_gem_object *obj = to_intel_bo(_obj);
@@ -136,7 +138,7 @@ intel_fb_pin_to_ggtt(const struct drm_framebuffer *fb,
*/
wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
- atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
+ atomic_inc(&display->restore.pending_fb_pin);
/*
* Valleyview is definitely limited to scanning out the first
@@ -212,7 +214,7 @@ err:
if (ret)
vma = ERR_PTR(ret);
- atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
+ atomic_dec(&display->restore.pending_fb_pin);
intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
return vma;
}
diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c
index df05904bac8a..b6978135e8ad 100644
--- a/drivers/gpu/drm/i915/display/intel_fbc.c
+++ b/drivers/gpu/drm/i915/display/intel_fbc.c
@@ -88,6 +88,7 @@ struct intel_fbc_state {
u16 override_cfb_stride;
u16 interval;
s8 fence_id;
+ struct drm_rect dirty_rect;
};
struct intel_fbc {
@@ -215,11 +216,9 @@ static unsigned int intel_fbc_cfb_stride(const struct intel_plane_state *plane_s
*/
static unsigned int intel_fbc_max_cfb_height(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
if (DISPLAY_VER(display) >= 8)
return 2560;
- else if (DISPLAY_VER(display) >= 5 || IS_G4X(i915))
+ else if (DISPLAY_VER(display) >= 5 || display->platform.g4x)
return 2048;
else
return 1536;
@@ -269,9 +268,8 @@ static bool intel_fbc_has_fences(struct intel_display *display)
static u32 i8xx_fbc_ctl(struct intel_fbc *fbc)
{
- const struct intel_fbc_state *fbc_state = &fbc->state;
struct intel_display *display = fbc->display;
- struct drm_i915_private *i915 = to_i915(display->drm);
+ const struct intel_fbc_state *fbc_state = &fbc->state;
unsigned int cfb_stride;
u32 fbc_ctl;
@@ -287,7 +285,7 @@ static u32 i8xx_fbc_ctl(struct intel_fbc *fbc)
FBC_CTL_INTERVAL(fbc_state->interval) |
FBC_CTL_STRIDE(cfb_stride);
- if (IS_I945GM(i915))
+ if (display->platform.i945gm)
fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
if (fbc_state->fence_id >= 0)
@@ -333,8 +331,8 @@ static void i8xx_fbc_deactivate(struct intel_fbc *fbc)
static void i8xx_fbc_activate(struct intel_fbc *fbc)
{
- const struct intel_fbc_state *fbc_state = &fbc->state;
struct intel_display *display = fbc->display;
+ const struct intel_fbc_state *fbc_state = &fbc->state;
int i;
/* Clear old tags */
@@ -365,12 +363,12 @@ static bool i8xx_fbc_is_compressing(struct intel_fbc *fbc)
static void i8xx_fbc_nuke(struct intel_fbc *fbc)
{
+ struct intel_display *display = fbc->display;
struct intel_fbc_state *fbc_state = &fbc->state;
enum i9xx_plane_id i9xx_plane = fbc_state->plane->i9xx_plane;
- struct drm_i915_private *dev_priv = to_i915(fbc->display->drm);
- intel_de_write_fw(dev_priv, DSPADDR(dev_priv, i9xx_plane),
- intel_de_read_fw(dev_priv, DSPADDR(dev_priv, i9xx_plane)));
+ intel_de_write_fw(display, DSPADDR(display, i9xx_plane),
+ intel_de_read_fw(display, DSPADDR(display, i9xx_plane)));
}
static void i8xx_fbc_program_cfb(struct intel_fbc *fbc)
@@ -386,9 +384,9 @@ static void i8xx_fbc_program_cfb(struct intel_fbc *fbc)
range_overflows_end_t(u64, i915_gem_stolen_area_address(i915),
i915_gem_stolen_node_offset(&fbc->compressed_llb),
U32_MAX));
- intel_de_write(i915, FBC_CFB_BASE,
+ intel_de_write(display, FBC_CFB_BASE,
i915_gem_stolen_node_address(i915, &fbc->compressed_fb));
- intel_de_write(i915, FBC_LL_BASE,
+ intel_de_write(display, FBC_LL_BASE,
i915_gem_stolen_node_address(i915, &fbc->compressed_llb));
}
@@ -403,12 +401,12 @@ static const struct intel_fbc_funcs i8xx_fbc_funcs = {
static void i965_fbc_nuke(struct intel_fbc *fbc)
{
+ struct intel_display *display = fbc->display;
struct intel_fbc_state *fbc_state = &fbc->state;
enum i9xx_plane_id i9xx_plane = fbc_state->plane->i9xx_plane;
- struct drm_i915_private *dev_priv = to_i915(fbc->display->drm);
- intel_de_write_fw(dev_priv, DSPSURF(dev_priv, i9xx_plane),
- intel_de_read_fw(dev_priv, DSPSURF(dev_priv, i9xx_plane)));
+ intel_de_write_fw(display, DSPSURF(display, i9xx_plane),
+ intel_de_read_fw(display, DSPSURF(display, i9xx_plane)));
}
static const struct intel_fbc_funcs i965_fbc_funcs = {
@@ -437,15 +435,14 @@ static u32 g4x_dpfc_ctl_limit(struct intel_fbc *fbc)
static u32 g4x_dpfc_ctl(struct intel_fbc *fbc)
{
- const struct intel_fbc_state *fbc_state = &fbc->state;
struct intel_display *display = fbc->display;
- struct drm_i915_private *i915 = to_i915(display->drm);
+ const struct intel_fbc_state *fbc_state = &fbc->state;
u32 dpfc_ctl;
dpfc_ctl = g4x_dpfc_ctl_limit(fbc) |
DPFC_CTL_PLANE_G4X(fbc_state->plane->i9xx_plane);
- if (IS_G4X(i915))
+ if (display->platform.g4x)
dpfc_ctl |= DPFC_CTL_SR_EN;
if (fbc_state->fence_id >= 0) {
@@ -460,8 +457,8 @@ static u32 g4x_dpfc_ctl(struct intel_fbc *fbc)
static void g4x_fbc_activate(struct intel_fbc *fbc)
{
- const struct intel_fbc_state *fbc_state = &fbc->state;
struct intel_display *display = fbc->display;
+ const struct intel_fbc_state *fbc_state = &fbc->state;
intel_de_write(display, DPFC_FENCE_YOFF,
fbc_state->fence_y_offset);
@@ -512,8 +509,8 @@ static const struct intel_fbc_funcs g4x_fbc_funcs = {
static void ilk_fbc_activate(struct intel_fbc *fbc)
{
- struct intel_fbc_state *fbc_state = &fbc->state;
struct intel_display *display = fbc->display;
+ struct intel_fbc_state *fbc_state = &fbc->state;
intel_de_write(display, ILK_DPFC_FENCE_YOFF(fbc->id),
fbc_state->fence_y_offset);
@@ -527,6 +524,9 @@ static void ilk_fbc_deactivate(struct intel_fbc *fbc)
struct intel_display *display = fbc->display;
u32 dpfc_ctl;
+ if (HAS_FBC_DIRTY_RECT(display))
+ intel_de_write(display, XE3_FBC_DIRTY_CTL(fbc->id), 0);
+
/* Disable compression */
dpfc_ctl = intel_de_read(display, ILK_DPFC_CONTROL(fbc->id));
if (dpfc_ctl & DPFC_CTL_EN) {
@@ -564,8 +564,8 @@ static const struct intel_fbc_funcs ilk_fbc_funcs = {
static void snb_fbc_program_fence(struct intel_fbc *fbc)
{
- const struct intel_fbc_state *fbc_state = &fbc->state;
struct intel_display *display = fbc->display;
+ const struct intel_fbc_state *fbc_state = &fbc->state;
u32 ctl = 0;
if (fbc_state->fence_id >= 0)
@@ -601,8 +601,8 @@ static const struct intel_fbc_funcs snb_fbc_funcs = {
static void glk_fbc_program_cfb_stride(struct intel_fbc *fbc)
{
- const struct intel_fbc_state *fbc_state = &fbc->state;
struct intel_display *display = fbc->display;
+ const struct intel_fbc_state *fbc_state = &fbc->state;
u32 val = 0;
if (fbc_state->override_cfb_stride)
@@ -614,8 +614,8 @@ static void glk_fbc_program_cfb_stride(struct intel_fbc *fbc)
static void skl_fbc_program_cfb_stride(struct intel_fbc *fbc)
{
- const struct intel_fbc_state *fbc_state = &fbc->state;
struct intel_display *display = fbc->display;
+ const struct intel_fbc_state *fbc_state = &fbc->state;
u32 val = 0;
/* Display WA #0529: skl, kbl, bxt. */
@@ -630,14 +630,13 @@ static void skl_fbc_program_cfb_stride(struct intel_fbc *fbc)
static u32 ivb_dpfc_ctl(struct intel_fbc *fbc)
{
- const struct intel_fbc_state *fbc_state = &fbc->state;
struct intel_display *display = fbc->display;
- struct drm_i915_private *i915 = to_i915(display->drm);
+ const struct intel_fbc_state *fbc_state = &fbc->state;
u32 dpfc_ctl;
dpfc_ctl = g4x_dpfc_ctl_limit(fbc);
- if (IS_IVYBRIDGE(i915))
+ if (display->platform.ivybridge)
dpfc_ctl |= DPFC_CTL_PLANE_IVB(fbc_state->plane->i9xx_plane);
if (DISPLAY_VER(display) >= 20)
@@ -670,6 +669,10 @@ static void ivb_fbc_activate(struct intel_fbc *fbc)
if (DISPLAY_VER(display) >= 20)
intel_de_write(display, ILK_DPFC_CONTROL(fbc->id), dpfc_ctl);
+ if (HAS_FBC_DIRTY_RECT(display))
+ intel_de_write(display, XE3_FBC_DIRTY_CTL(fbc->id),
+ FBC_DIRTY_RECT_EN);
+
intel_de_write(display, ILK_DPFC_CONTROL(fbc->id),
DPFC_CTL_EN | dpfc_ctl);
}
@@ -739,8 +742,19 @@ static void intel_fbc_nuke(struct intel_fbc *fbc)
static void intel_fbc_activate(struct intel_fbc *fbc)
{
+ struct intel_display *display = fbc->display;
+
lockdep_assert_held(&fbc->lock);
+ /* only the fence can change for a flip nuke */
+ if (fbc->active && !intel_fbc_has_fences(display))
+ return;
+ /*
+ * In case of FBC dirt rect, any updates to the FBC registers will
+ * trigger the nuke.
+ */
+ drm_WARN_ON(display->drm, fbc->active && HAS_FBC_DIRTY_RECT(display));
+
intel_fbc_hw_activate(fbc);
intel_fbc_nuke(fbc);
@@ -759,9 +773,7 @@ static void intel_fbc_deactivate(struct intel_fbc *fbc, const char *reason)
static u64 intel_fbc_cfb_base_max(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
- if (DISPLAY_VER(display) >= 5 || IS_G4X(i915))
+ if (DISPLAY_VER(display) >= 5 || display->platform.g4x)
return BIT_ULL(28);
else
return BIT_ULL(32);
@@ -776,8 +788,8 @@ static u64 intel_fbc_stolen_end(struct intel_display *display)
* reserved range size, so it always assumes the maximum (8mb) is used.
* If we enable FBC using a CFB on that memory range we'll get FIFO
* underruns, even if that range is not reserved by the BIOS. */
- if (IS_BROADWELL(i915) ||
- (DISPLAY_VER(display) == 9 && !IS_BROXTON(i915)))
+ if (display->platform.broadwell ||
+ (DISPLAY_VER(display) == 9 && !display->platform.broxton))
end = i915_gem_stolen_area_size(i915) - 8 * 1024 * 1024;
else
end = U64_MAX;
@@ -792,10 +804,8 @@ static int intel_fbc_min_limit(const struct intel_plane_state *plane_state)
static int intel_fbc_max_limit(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
/* WaFbcOnly1to1Ratio:ctg */
- if (IS_G4X(i915))
+ if (display->platform.g4x)
return 1;
/*
@@ -843,7 +853,7 @@ static int intel_fbc_alloc_cfb(struct intel_fbc *fbc,
drm_WARN_ON(display->drm,
i915_gem_stolen_node_allocated(&fbc->compressed_llb));
- if (DISPLAY_VER(display) < 5 && !IS_G4X(i915)) {
+ if (DISPLAY_VER(display) < 5 && !display->platform.g4x) {
ret = i915_gem_stolen_insert_node(i915, &fbc->compressed_llb,
4096, 4096);
if (ret)
@@ -882,9 +892,8 @@ static void intel_fbc_program_cfb(struct intel_fbc *fbc)
static void intel_fbc_program_workarounds(struct intel_fbc *fbc)
{
struct intel_display *display = fbc->display;
- struct drm_i915_private *i915 = to_i915(display->drm);
- if (IS_SKYLAKE(i915) || IS_BROXTON(i915)) {
+ if (display->platform.skylake || display->platform.broxton) {
/*
* WaFbcHighMemBwCorruptionAvoidance:skl,bxt
* Display WA #0883: skl,bxt
@@ -893,8 +902,8 @@ static void intel_fbc_program_workarounds(struct intel_fbc *fbc)
0, DPFC_DISABLE_DUMMY0);
}
- if (IS_SKYLAKE(i915) || IS_KABYLAKE(i915) ||
- IS_COFFEELAKE(i915) || IS_COMETLAKE(i915)) {
+ if (display->platform.skylake || display->platform.kabylake ||
+ display->platform.coffeelake || display->platform.cometlake) {
/*
* WaFbcNukeOnHostModify:skl,kbl,cfl
* Display WA #0873: skl,kbl,cfl
@@ -909,7 +918,7 @@ static void intel_fbc_program_workarounds(struct intel_fbc *fbc)
0, DPFC_CHICKEN_COMP_DUMMY_PIXEL);
/* Wa_22014263786:icl,jsl,tgl,dg1,rkl,adls,adlp,mtl */
- if (DISPLAY_VER(display) >= 11 && !IS_DG2(i915))
+ if (DISPLAY_VER(display) >= 11 && !display->platform.dg2)
intel_de_rmw(display, ILK_DPFC_CHICKEN(fbc->id),
0, DPFC_CHICKEN_FORCE_SLB_INVALIDATION);
}
@@ -986,13 +995,12 @@ static bool icl_fbc_stride_is_valid(const struct intel_plane_state *plane_state)
static bool stride_is_valid(const struct intel_plane_state *plane_state)
{
struct intel_display *display = to_intel_display(plane_state->uapi.plane->dev);
- struct drm_i915_private *i915 = to_i915(display->drm);
if (DISPLAY_VER(display) >= 11)
return icl_fbc_stride_is_valid(plane_state);
else if (DISPLAY_VER(display) >= 9)
return skl_fbc_stride_is_valid(plane_state);
- else if (DISPLAY_VER(display) >= 5 || IS_G4X(i915))
+ else if (DISPLAY_VER(display) >= 5 || display->platform.g4x)
return g4x_fbc_stride_is_valid(plane_state);
else if (DISPLAY_VER(display) == 4)
return i965_fbc_stride_is_valid(plane_state);
@@ -1023,7 +1031,6 @@ static bool i8xx_fbc_pixel_format_is_valid(const struct intel_plane_state *plane
static bool g4x_fbc_pixel_format_is_valid(const struct intel_plane_state *plane_state)
{
struct intel_display *display = to_intel_display(plane_state->uapi.plane->dev);
- struct drm_i915_private *i915 = to_i915(display->drm);
const struct drm_framebuffer *fb = plane_state->hw.fb;
switch (fb->format->format) {
@@ -1032,7 +1039,7 @@ static bool g4x_fbc_pixel_format_is_valid(const struct intel_plane_state *plane_
return true;
case DRM_FORMAT_RGB565:
/* WaFbcOnly1to1Ratio:ctg */
- if (IS_G4X(i915))
+ if (display->platform.g4x)
return false;
return true;
default:
@@ -1059,11 +1066,10 @@ static bool lnl_fbc_pixel_format_is_valid(const struct intel_plane_state *plane_
static bool pixel_format_is_valid(const struct intel_plane_state *plane_state)
{
struct intel_display *display = to_intel_display(plane_state->uapi.plane->dev);
- struct drm_i915_private *i915 = to_i915(display->drm);
if (DISPLAY_VER(display) >= 20)
return lnl_fbc_pixel_format_is_valid(plane_state);
- else if (DISPLAY_VER(display) >= 5 || IS_G4X(i915))
+ else if (DISPLAY_VER(display) >= 5 || display->platform.g4x)
return g4x_fbc_pixel_format_is_valid(plane_state);
else
return i8xx_fbc_pixel_format_is_valid(plane_state);
@@ -1094,11 +1100,10 @@ static bool skl_fbc_rotation_is_valid(const struct intel_plane_state *plane_stat
static bool rotation_is_valid(const struct intel_plane_state *plane_state)
{
struct intel_display *display = to_intel_display(plane_state->uapi.plane->dev);
- struct drm_i915_private *i915 = to_i915(display->drm);
if (DISPLAY_VER(display) >= 9)
return skl_fbc_rotation_is_valid(plane_state);
- else if (DISPLAY_VER(display) >= 5 || IS_G4X(i915))
+ else if (DISPLAY_VER(display) >= 5 || display->platform.g4x)
return g4x_fbc_rotation_is_valid(plane_state);
else
return i8xx_fbc_rotation_is_valid(plane_state);
@@ -1107,8 +1112,6 @@ static bool rotation_is_valid(const struct intel_plane_state *plane_state)
static void intel_fbc_max_surface_size(struct intel_display *display,
unsigned int *w, unsigned int *h)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
if (DISPLAY_VER(display) >= 11) {
*w = 8192;
*h = 4096;
@@ -1118,7 +1121,7 @@ static void intel_fbc_max_surface_size(struct intel_display *display,
} else if (DISPLAY_VER(display) >= 7) {
*w = 4096;
*h = 4096;
- } else if (DISPLAY_VER(display) >= 5 || IS_G4X(i915)) {
+ } else if (DISPLAY_VER(display) >= 5 || display->platform.g4x) {
*w = 4096;
*h = 2048;
} else {
@@ -1151,15 +1154,13 @@ static bool intel_fbc_surface_size_ok(const struct intel_plane_state *plane_stat
static void intel_fbc_max_plane_size(struct intel_display *display,
unsigned int *w, unsigned int *h)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
if (DISPLAY_VER(display) >= 10) {
*w = 5120;
*h = 4096;
- } else if (DISPLAY_VER(display) >= 8 || IS_HASWELL(i915)) {
+ } else if (DISPLAY_VER(display) >= 8 || display->platform.haswell) {
*w = 4096;
*h = 4096;
- } else if (DISPLAY_VER(display) >= 5 || IS_G4X(i915)) {
+ } else if (DISPLAY_VER(display) >= 5 || display->platform.g4x) {
*w = 4096;
*h = 2048;
} else {
@@ -1203,6 +1204,74 @@ static bool tiling_is_valid(const struct intel_plane_state *plane_state)
return i8xx_fbc_tiling_valid(plane_state);
}
+static void
+intel_fbc_invalidate_dirty_rect(struct intel_fbc *fbc)
+{
+ lockdep_assert_held(&fbc->lock);
+
+ fbc->state.dirty_rect = DRM_RECT_INIT(0, 0, 0, 0);
+}
+
+static void
+intel_fbc_program_dirty_rect(struct intel_dsb *dsb, struct intel_fbc *fbc,
+ const struct drm_rect *fbc_dirty_rect)
+{
+ struct intel_display *display = fbc->display;
+
+ drm_WARN_ON(display->drm, fbc_dirty_rect->y2 == 0);
+
+ intel_de_write_dsb(display, dsb, XE3_FBC_DIRTY_RECT(fbc->id),
+ FBC_DIRTY_RECT_START_LINE(fbc_dirty_rect->y1) |
+ FBC_DIRTY_RECT_END_LINE(fbc_dirty_rect->y2 - 1));
+}
+
+static void
+intel_fbc_dirty_rect_update(struct intel_dsb *dsb, struct intel_fbc *fbc)
+{
+ const struct drm_rect *fbc_dirty_rect = &fbc->state.dirty_rect;
+
+ lockdep_assert_held(&fbc->lock);
+
+ if (!drm_rect_visible(fbc_dirty_rect))
+ return;
+
+ intel_fbc_program_dirty_rect(dsb, fbc, fbc_dirty_rect);
+}
+
+void
+intel_fbc_dirty_rect_update_noarm(struct intel_dsb *dsb,
+ struct intel_plane *plane)
+{
+ struct intel_display *display = to_intel_display(plane);
+ struct intel_fbc *fbc = plane->fbc;
+
+ if (!HAS_FBC_DIRTY_RECT(display))
+ return;
+
+ mutex_lock(&fbc->lock);
+
+ if (fbc->state.plane == plane)
+ intel_fbc_dirty_rect_update(dsb, fbc);
+
+ mutex_unlock(&fbc->lock);
+}
+
+static void
+intel_fbc_hw_intialize_dirty_rect(struct intel_fbc *fbc,
+ const struct intel_plane_state *plane_state)
+{
+ struct drm_rect src;
+
+ /*
+ * Initializing the FBC HW with the whole plane area as the dirty rect.
+ * This is to ensure that we have valid coords be written to the
+ * HW as dirty rect.
+ */
+ drm_rect_fp_to_int(&src, &plane_state->uapi.src);
+
+ intel_fbc_program_dirty_rect(NULL, fbc, &src);
+}
+
static void intel_fbc_update_state(struct intel_atomic_state *state,
struct intel_crtc *crtc,
struct intel_plane *plane)
@@ -1276,6 +1345,62 @@ static bool intel_fbc_is_ok(const struct intel_plane_state *plane_state)
intel_fbc_is_cfb_ok(plane_state);
}
+static void
+__intel_fbc_prepare_dirty_rect(const struct intel_plane_state *plane_state,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+ struct intel_fbc *fbc = plane->fbc;
+ struct drm_rect *fbc_dirty_rect = &fbc->state.dirty_rect;
+ int width = drm_rect_width(&plane_state->uapi.src) >> 16;
+ const struct drm_rect *damage = &plane_state->damage;
+ int y_offset = plane_state->view.color_plane[0].y;
+
+ lockdep_assert_held(&fbc->lock);
+
+ if (intel_crtc_needs_modeset(crtc_state) ||
+ !intel_fbc_is_ok(plane_state)) {
+ intel_fbc_invalidate_dirty_rect(fbc);
+ return;
+ }
+
+ if (drm_rect_visible(damage))
+ *fbc_dirty_rect = *damage;
+ else
+ /* dirty rect must cover at least one line */
+ *fbc_dirty_rect = DRM_RECT_INIT(0, y_offset, width, 1);
+}
+
+void
+intel_fbc_prepare_dirty_rect(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct intel_display *display = to_intel_display(state);
+ const struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ struct intel_plane_state *plane_state;
+ struct intel_plane *plane;
+ int i;
+
+ if (!HAS_FBC_DIRTY_RECT(display))
+ return;
+
+ for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
+ struct intel_fbc *fbc = plane->fbc;
+
+ if (!fbc || plane->pipe != crtc->pipe)
+ continue;
+
+ mutex_lock(&fbc->lock);
+
+ if (fbc->state.plane == plane)
+ __intel_fbc_prepare_dirty_rect(plane_state,
+ crtc_state);
+
+ mutex_unlock(&fbc->lock);
+ }
+}
+
static int intel_fbc_check_plane(struct intel_atomic_state *state,
struct intel_plane *plane)
{
@@ -1317,7 +1442,7 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state,
}
/* WaFbcTurnOffFbcWhenHyperVisorIsUsed:skl,bxt */
- if (i915_vtd_active(i915) && (IS_SKYLAKE(i915) || IS_BROXTON(i915))) {
+ if (i915_vtd_active(i915) && (display->platform.skylake || display->platform.broxton)) {
plane_state->no_fbc_reason = "VT-d enabled";
return 0;
}
@@ -1338,16 +1463,21 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state,
* Display 12+ is not supporting FBC with PSR2.
* Recommendation is to keep this combination disabled
* Bspec: 50422 HSD: 14010260002
+ *
+ * In Xe3, PSR2 selective fetch and FBC dirty rect feature cannot
+ * coexist. So if PSR2 selective fetch is supported then mark that
+ * FBC is not supported.
+ * TODO: Need a logic to decide between PSR2 and FBC Dirty rect
*/
- if (IS_DISPLAY_VER(display, 12, 14) && crtc_state->has_sel_update &&
- !crtc_state->has_panel_replay) {
+ if ((IS_DISPLAY_VER(display, 12, 14) || HAS_FBC_DIRTY_RECT(display)) &&
+ crtc_state->has_sel_update && !crtc_state->has_panel_replay) {
plane_state->no_fbc_reason = "PSR2 enabled";
return 0;
}
/* Wa_14016291713 */
if ((IS_DISPLAY_VER(display, 12, 13) ||
- IS_DISPLAY_VERx100_STEP(i915, 1400, STEP_A0, STEP_C0)) &&
+ IS_DISPLAY_VERx100_STEP(display, 1400, STEP_A0, STEP_C0)) &&
crtc_state->has_psr && !crtc_state->has_panel_replay) {
plane_state->no_fbc_reason = "PSR1 enabled (Wa_14016291713)";
return 0;
@@ -1410,7 +1540,7 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state,
}
/* WaFbcExceedCdClockThreshold:hsw,bdw */
- if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
+ if (display->platform.haswell || display->platform.broadwell) {
const struct intel_cdclk_state *cdclk_state;
cdclk_state = intel_atomic_get_cdclk_state(state);
@@ -1547,6 +1677,8 @@ static void __intel_fbc_disable(struct intel_fbc *fbc)
drm_dbg_kms(display->drm, "Disabling FBC on [PLANE:%d:%s]\n",
plane->base.base.id, plane->base.name);
+ intel_fbc_invalidate_dirty_rect(fbc);
+
__intel_fbc_cleanup_cfb(fbc);
fbc->state.plane = NULL;
@@ -1614,14 +1746,14 @@ out:
mutex_unlock(&fbc->lock);
}
-void intel_fbc_invalidate(struct drm_i915_private *i915,
+void intel_fbc_invalidate(struct intel_display *display,
unsigned int frontbuffer_bits,
enum fb_op_origin origin)
{
struct intel_fbc *fbc;
enum intel_fbc_id fbc_id;
- for_each_intel_fbc(&i915->display, fbc, fbc_id)
+ for_each_intel_fbc(display, fbc, fbc_id)
__intel_fbc_invalidate(fbc, frontbuffer_bits, origin);
}
@@ -1653,14 +1785,14 @@ out:
mutex_unlock(&fbc->lock);
}
-void intel_fbc_flush(struct drm_i915_private *i915,
+void intel_fbc_flush(struct intel_display *display,
unsigned int frontbuffer_bits,
enum fb_op_origin origin)
{
struct intel_fbc *fbc;
enum intel_fbc_id fbc_id;
- for_each_intel_fbc(&i915->display, fbc, fbc_id)
+ for_each_intel_fbc(display, fbc, fbc_id)
__intel_fbc_flush(fbc, frontbuffer_bits, origin);
}
@@ -1732,6 +1864,9 @@ static void __intel_fbc_enable(struct intel_atomic_state *state,
intel_fbc_update_state(state, crtc, plane);
+ if (HAS_FBC_DIRTY_RECT(display))
+ intel_fbc_hw_intialize_dirty_rect(fbc, plane_state);
+
intel_fbc_program_workarounds(fbc);
intel_fbc_program_cfb(fbc);
}
@@ -1897,15 +2032,13 @@ void intel_fbc_handle_fifo_underrun_irq(struct intel_display *display)
*/
static int intel_sanitize_fbc_option(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
if (display->params.enable_fbc >= 0)
return !!display->params.enable_fbc;
if (!HAS_FBC(display))
return 0;
- if (IS_BROADWELL(i915) || DISPLAY_VER(display) >= 9)
+ if (display->platform.broadwell || DISPLAY_VER(display) >= 9)
return 1;
return 0;
@@ -1919,7 +2052,6 @@ void intel_fbc_add_plane(struct intel_fbc *fbc, struct intel_plane *plane)
static struct intel_fbc *intel_fbc_create(struct intel_display *display,
enum intel_fbc_id fbc_id)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
struct intel_fbc *fbc;
fbc = kzalloc(sizeof(*fbc), GFP_KERNEL);
@@ -1937,7 +2069,7 @@ static struct intel_fbc *intel_fbc_create(struct intel_display *display,
fbc->funcs = &snb_fbc_funcs;
else if (DISPLAY_VER(display) == 5)
fbc->funcs = &ilk_fbc_funcs;
- else if (IS_G4X(i915))
+ else if (display->platform.g4x)
fbc->funcs = &g4x_fbc_funcs;
else if (DISPLAY_VER(display) == 4)
fbc->funcs = &i965_fbc_funcs;
diff --git a/drivers/gpu/drm/i915/display/intel_fbc.h b/drivers/gpu/drm/i915/display/intel_fbc.h
index ceae55458e14..0e715cb6b4e6 100644
--- a/drivers/gpu/drm/i915/display/intel_fbc.h
+++ b/drivers/gpu/drm/i915/display/intel_fbc.h
@@ -9,11 +9,11 @@
#include <linux/types.h>
enum fb_op_origin;
-struct drm_i915_private;
struct intel_atomic_state;
struct intel_crtc;
struct intel_crtc_state;
struct intel_display;
+struct intel_dsb;
struct intel_fbc;
struct intel_plane;
struct intel_plane_state;
@@ -38,15 +38,19 @@ void intel_fbc_sanitize(struct intel_display *display);
void intel_fbc_update(struct intel_atomic_state *state,
struct intel_crtc *crtc);
void intel_fbc_disable(struct intel_crtc *crtc);
-void intel_fbc_invalidate(struct drm_i915_private *dev_priv,
+void intel_fbc_invalidate(struct intel_display *display,
unsigned int frontbuffer_bits,
enum fb_op_origin origin);
-void intel_fbc_flush(struct drm_i915_private *dev_priv,
+void intel_fbc_flush(struct intel_display *display,
unsigned int frontbuffer_bits, enum fb_op_origin origin);
void intel_fbc_add_plane(struct intel_fbc *fbc, struct intel_plane *plane);
void intel_fbc_handle_fifo_underrun_irq(struct intel_display *display);
void intel_fbc_reset_underrun(struct intel_display *display);
void intel_fbc_crtc_debugfs_add(struct intel_crtc *crtc);
void intel_fbc_debugfs_register(struct intel_display *display);
+void intel_fbc_prepare_dirty_rect(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+void intel_fbc_dirty_rect_update_noarm(struct intel_dsb *dsb,
+ struct intel_plane *plane);
#endif /* __INTEL_FBC_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_fbc_regs.h b/drivers/gpu/drm/i915/display/intel_fbc_regs.h
index ae0699c3c2fe..b1d0161a3196 100644
--- a/drivers/gpu/drm/i915/display/intel_fbc_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_fbc_regs.h
@@ -100,6 +100,15 @@
#define FBC_STRIDE_MASK REG_GENMASK(14, 0)
#define FBC_STRIDE(x) REG_FIELD_PREP(FBC_STRIDE_MASK, (x))
+#define XE3_FBC_DIRTY_RECT(fbc_id) _MMIO_PIPE((fbc_id), 0x43230, 0x43270)
+#define FBC_DIRTY_RECT_END_LINE_MASK REG_GENMASK(31, 16)
+#define FBC_DIRTY_RECT_END_LINE(val) REG_FIELD_PREP(FBC_DIRTY_RECT_END_LINE_MASK, (val))
+#define FBC_DIRTY_RECT_START_LINE_MASK REG_GENMASK(15, 0)
+#define FBC_DIRTY_RECT_START_LINE(val) REG_FIELD_PREP(FBC_DIRTY_RECT_START_LINE_MASK, (val))
+
+#define XE3_FBC_DIRTY_CTL(fbc_id) _MMIO_PIPE((fbc_id), 0x43234, 0x43274)
+#define FBC_DIRTY_RECT_EN REG_BIT(31)
+
#define ILK_FBC_RT_BASE _MMIO(0x2128)
#define ILK_FBC_RT_VALID REG_BIT(0)
#define SNB_FBC_FRONT_BUFFER REG_BIT(1)
diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.c b/drivers/gpu/drm/i915/display/intel_fbdev.c
index 301b5fd301a2..adc19d5607de 100644
--- a/drivers/gpu/drm/i915/display/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/display/intel_fbdev.c
@@ -37,14 +37,18 @@
#include <linux/tty.h>
#include <linux/vga_switcheroo.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_gem.h>
#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_managed.h>
+#include <drm/drm_print.h>
#include "i915_drv.h"
+#include "i915_vma.h"
#include "intel_bo.h"
#include "intel_display_types.h"
#include "intel_fb.h"
@@ -54,24 +58,16 @@
#include "intel_frontbuffer.h"
struct intel_fbdev {
- struct drm_fb_helper helper;
struct intel_framebuffer *fb;
struct i915_vma *vma;
unsigned long vma_flags;
- int preferred_bpp;
-
- /* Whether or not fbdev hpd processing is temporarily suspended */
- bool hpd_suspended: 1;
- /* Set when a hotplug was received while HPD processing was suspended */
- bool hpd_waiting: 1;
-
- /* Protects hpd_suspended */
- struct mutex hpd_lock;
};
static struct intel_fbdev *to_intel_fbdev(struct drm_fb_helper *fb_helper)
{
- return container_of(fb_helper, struct intel_fbdev, helper);
+ struct drm_i915_private *i915 = to_i915(fb_helper->client.dev);
+
+ return i915->display.fbdev.fbdev;
}
static struct intel_frontbuffer *to_frontbuffer(struct intel_fbdev *ifbdev)
@@ -127,8 +123,8 @@ static int intel_fbdev_pan_display(struct fb_var_screeninfo *var,
static int intel_fbdev_mmap(struct fb_info *info, struct vm_area_struct *vma)
{
- struct intel_fbdev *fbdev = to_intel_fbdev(info->par);
- struct drm_gem_object *obj = drm_gem_fb_get_obj(&fbdev->fb->base, 0);
+ struct drm_fb_helper *fb_helper = info->par;
+ struct drm_gem_object *obj = drm_gem_fb_get_obj(fb_helper->fb, 0);
return intel_bo_fb_mmap(obj, vma);
}
@@ -136,9 +132,9 @@ static int intel_fbdev_mmap(struct fb_info *info, struct vm_area_struct *vma)
static void intel_fbdev_fb_destroy(struct fb_info *info)
{
struct drm_fb_helper *fb_helper = info->par;
- struct intel_fbdev *ifbdev = container_of(fb_helper, struct intel_fbdev, helper);
+ struct intel_fbdev *ifbdev = to_intel_fbdev(fb_helper);
- drm_fb_helper_fini(&ifbdev->helper);
+ drm_fb_helper_fini(fb_helper);
/*
* We rely on the object-free to release the VMA pinning for
@@ -146,11 +142,11 @@ static void intel_fbdev_fb_destroy(struct fb_info *info)
* trying to rectify all the possible error paths leading here.
*/
intel_fb_unpin_vma(ifbdev->vma, ifbdev->vma_flags);
- drm_framebuffer_remove(&ifbdev->fb->base);
+ drm_framebuffer_remove(fb_helper->fb);
drm_client_release(&fb_helper->client);
- drm_fb_helper_unprepare(&ifbdev->helper);
- kfree(ifbdev);
+ drm_fb_helper_unprepare(fb_helper);
+ kfree(fb_helper);
}
__diag_push();
@@ -170,8 +166,48 @@ static const struct fb_ops intelfb_ops = {
__diag_pop();
-static int intelfb_create(struct drm_fb_helper *helper,
- struct drm_fb_helper_surface_size *sizes)
+static int intelfb_dirty(struct drm_fb_helper *helper, struct drm_clip_rect *clip)
+{
+ if (!(clip->x1 < clip->x2 && clip->y1 < clip->y2))
+ return 0;
+
+ if (helper->fb->funcs->dirty)
+ return helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, clip, 1);
+
+ return 0;
+}
+
+static void intelfb_restore(struct drm_fb_helper *fb_helper)
+{
+ struct intel_fbdev *ifbdev = to_intel_fbdev(fb_helper);
+
+ intel_fbdev_invalidate(ifbdev);
+}
+
+static void intelfb_set_suspend(struct drm_fb_helper *fb_helper, bool suspend)
+{
+ struct fb_info *info = fb_helper->info;
+
+ /*
+ * When resuming from hibernation, Linux restores the object's
+ * content from swap if the buffer is backed by shmemfs. If the
+ * object is stolen however, it will be full of whatever garbage
+ * was left in there. Clear it to zero in this case.
+ */
+ if (!suspend && !intel_bo_is_shmem(intel_fb_bo(fb_helper->fb)))
+ memset_io(info->screen_base, 0, info->screen_size);
+
+ fb_set_suspend(info, suspend);
+}
+
+static const struct drm_fb_helper_funcs intel_fb_helper_funcs = {
+ .fb_dirty = intelfb_dirty,
+ .fb_restore = intelfb_restore,
+ .fb_set_suspend = intelfb_set_suspend,
+};
+
+int intel_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper,
+ struct drm_fb_helper_surface_size *sizes)
{
struct intel_fbdev *ifbdev = to_intel_fbdev(helper);
struct intel_framebuffer *fb = ifbdev->fb;
@@ -185,12 +221,6 @@ static int intelfb_create(struct drm_fb_helper *helper,
struct drm_gem_object *obj;
int ret;
- mutex_lock(&ifbdev->hpd_lock);
- ret = ifbdev->hpd_suspended ? -EAGAIN : 0;
- mutex_unlock(&ifbdev->hpd_lock);
- if (ret)
- return ret;
-
ifbdev->fb = NULL;
if (fb &&
@@ -240,7 +270,8 @@ static int intelfb_create(struct drm_fb_helper *helper,
goto out_unpin;
}
- ifbdev->helper.fb = &fb->base;
+ helper->funcs = &intel_fb_helper_funcs;
+ helper->fb = &fb->base;
info->fbops = &intelfb_ops;
@@ -250,7 +281,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
if (ret)
goto out_unpin;
- drm_fb_helper_fill_info(info, &ifbdev->helper, sizes);
+ drm_fb_helper_fill_info(info, dev->fb_helper, sizes);
/* If the object is shmemfs backed, it will have given us zeroed pages.
* If the object is stolen however, it will be full of whatever
@@ -279,22 +310,6 @@ out_unlock:
return ret;
}
-static int intelfb_dirty(struct drm_fb_helper *helper, struct drm_clip_rect *clip)
-{
- if (!(clip->x1 < clip->x2 && clip->y1 < clip->y2))
- return 0;
-
- if (helper->fb->funcs->dirty)
- return helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, clip, 1);
-
- return 0;
-}
-
-static const struct drm_fb_helper_funcs intel_fb_helper_funcs = {
- .fb_probe = intelfb_create,
- .fb_dirty = intelfb_dirty,
-};
-
/*
* Build an intel_fbdev struct using a BIOS allocated framebuffer, if possible.
* The core display code will have read out the current plane configuration,
@@ -417,7 +432,6 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
goto out;
}
- ifbdev->preferred_bpp = fb->base.format->cpp[0] * 8;
ifbdev->fb = fb;
drm_framebuffer_get(&ifbdev->fb->base);
@@ -448,251 +462,51 @@ out:
return false;
}
-static void intel_fbdev_suspend_worker(struct work_struct *work)
-{
- intel_fbdev_set_suspend(&container_of(work,
- struct drm_i915_private,
- display.fbdev.suspend_work)->drm,
- FBINFO_STATE_RUNNING,
- true);
-}
-
-/* Suspends/resumes fbdev processing of incoming HPD events. When resuming HPD
- * processing, fbdev will perform a full connector reprobe if a hotplug event
- * was received while HPD was suspended.
- */
-static void intel_fbdev_hpd_set_suspend(struct drm_i915_private *i915, int state)
-{
- struct intel_fbdev *ifbdev = i915->display.fbdev.fbdev;
- bool send_hpd = false;
-
- mutex_lock(&ifbdev->hpd_lock);
- ifbdev->hpd_suspended = state == FBINFO_STATE_SUSPENDED;
- send_hpd = !ifbdev->hpd_suspended && ifbdev->hpd_waiting;
- ifbdev->hpd_waiting = false;
- mutex_unlock(&ifbdev->hpd_lock);
-
- if (send_hpd) {
- drm_dbg_kms(&i915->drm, "Handling delayed fbcon HPD event\n");
- drm_fb_helper_hotplug_event(&ifbdev->helper);
- }
-}
-
-void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous)
-{
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_fbdev *ifbdev = dev_priv->display.fbdev.fbdev;
- struct fb_info *info;
-
- if (!ifbdev)
- return;
-
- if (drm_WARN_ON(&dev_priv->drm, !HAS_DISPLAY(dev_priv)))
- return;
-
- if (!ifbdev->vma)
- goto set_suspend;
-
- info = ifbdev->helper.info;
-
- if (synchronous) {
- /* Flush any pending work to turn the console on, and then
- * wait to turn it off. It must be synchronous as we are
- * about to suspend or unload the driver.
- *
- * Note that from within the work-handler, we cannot flush
- * ourselves, so only flush outstanding work upon suspend!
- */
- if (state != FBINFO_STATE_RUNNING)
- flush_work(&dev_priv->display.fbdev.suspend_work);
-
- console_lock();
- } else {
- /*
- * The console lock can be pretty contented on resume due
- * to all the printk activity. Try to keep it out of the hot
- * path of resume if possible.
- */
- drm_WARN_ON(dev, state != FBINFO_STATE_RUNNING);
- if (!console_trylock()) {
- /* Don't block our own workqueue as this can
- * be run in parallel with other i915.ko tasks.
- */
- queue_work(dev_priv->unordered_wq,
- &dev_priv->display.fbdev.suspend_work);
- return;
- }
- }
-
- /* On resume from hibernation: If the object is shmemfs backed, it has
- * been restored from swap. If the object is stolen however, it will be
- * full of whatever garbage was left in there.
- */
- if (state == FBINFO_STATE_RUNNING &&
- !intel_bo_is_shmem(intel_fb_bo(&ifbdev->fb->base)))
- memset_io(info->screen_base, 0, info->screen_size);
-
- drm_fb_helper_set_suspend(&ifbdev->helper, state);
- console_unlock();
-
-set_suspend:
- intel_fbdev_hpd_set_suspend(dev_priv, state);
-}
-
-static int intel_fbdev_output_poll_changed(struct drm_device *dev)
+static unsigned int intel_fbdev_color_mode(const struct drm_format_info *info)
{
- struct intel_fbdev *ifbdev = to_i915(dev)->display.fbdev.fbdev;
- bool send_hpd;
-
- if (!ifbdev)
- return -EINVAL;
-
- mutex_lock(&ifbdev->hpd_lock);
- send_hpd = !ifbdev->hpd_suspended;
- ifbdev->hpd_waiting = true;
- mutex_unlock(&ifbdev->hpd_lock);
-
- if (send_hpd && (ifbdev->vma || ifbdev->helper.deferred_setup))
- drm_fb_helper_hotplug_event(&ifbdev->helper);
+ unsigned int bpp;
- return 0;
-}
-
-static int intel_fbdev_restore_mode(struct drm_i915_private *dev_priv)
-{
- struct intel_fbdev *ifbdev = dev_priv->display.fbdev.fbdev;
- int ret;
-
- if (!ifbdev)
- return -EINVAL;
-
- if (!ifbdev->vma)
- return -ENOMEM;
-
- ret = drm_fb_helper_restore_fbdev_mode_unlocked(&ifbdev->helper);
- if (ret)
- return ret;
-
- intel_fbdev_invalidate(ifbdev);
-
- return 0;
-}
-
-/*
- * Fbdev client and struct drm_client_funcs
- */
+ if (!info->depth || info->num_planes != 1 || info->has_alpha || info->is_yuv)
+ return 0;
-static void intel_fbdev_client_unregister(struct drm_client_dev *client)
-{
- struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
- struct drm_device *dev = fb_helper->dev;
- struct pci_dev *pdev = to_pci_dev(dev->dev);
+ bpp = drm_format_info_bpp(info, 0);
- if (fb_helper->info) {
- vga_switcheroo_client_fb_set(pdev, NULL);
- drm_fb_helper_unregister_info(fb_helper);
- } else {
- drm_fb_helper_unprepare(fb_helper);
- drm_client_release(&fb_helper->client);
- kfree(fb_helper);
+ switch (bpp) {
+ case 16:
+ return info->depth; // 15 or 16
+ default:
+ return bpp;
}
}
-static int intel_fbdev_client_restore(struct drm_client_dev *client)
-{
- struct drm_i915_private *dev_priv = to_i915(client->dev);
- int ret;
-
- ret = intel_fbdev_restore_mode(dev_priv);
- if (ret)
- return ret;
-
- vga_switcheroo_process_delayed_switch();
-
- return 0;
-}
-
-static int intel_fbdev_client_hotplug(struct drm_client_dev *client)
-{
- struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
- struct drm_device *dev = client->dev;
- struct pci_dev *pdev = to_pci_dev(dev->dev);
- int ret;
-
- if (dev->fb_helper)
- return intel_fbdev_output_poll_changed(dev);
-
- ret = drm_fb_helper_init(dev, fb_helper);
- if (ret)
- goto err_drm_err;
-
- ret = drm_fb_helper_initial_config(fb_helper);
- if (ret)
- goto err_drm_fb_helper_fini;
-
- vga_switcheroo_client_fb_set(pdev, fb_helper->info);
-
- return 0;
-
-err_drm_fb_helper_fini:
- drm_fb_helper_fini(fb_helper);
-err_drm_err:
- drm_err(dev, "Failed to setup i915 fbdev emulation (ret=%d)\n", ret);
- return ret;
-}
-
-static const struct drm_client_funcs intel_fbdev_client_funcs = {
- .owner = THIS_MODULE,
- .unregister = intel_fbdev_client_unregister,
- .restore = intel_fbdev_client_restore,
- .hotplug = intel_fbdev_client_hotplug,
-};
-
void intel_fbdev_setup(struct drm_i915_private *i915)
{
struct drm_device *dev = &i915->drm;
struct intel_fbdev *ifbdev;
- int ret;
+ unsigned int preferred_bpp = 0;
if (!HAS_DISPLAY(i915))
return;
- ifbdev = kzalloc(sizeof(*ifbdev), GFP_KERNEL);
+ ifbdev = drmm_kzalloc(dev, sizeof(*ifbdev), GFP_KERNEL);
if (!ifbdev)
return;
- drm_fb_helper_prepare(dev, &ifbdev->helper, 32, &intel_fb_helper_funcs);
i915->display.fbdev.fbdev = ifbdev;
- INIT_WORK(&i915->display.fbdev.suspend_work, intel_fbdev_suspend_worker);
- mutex_init(&ifbdev->hpd_lock);
if (intel_fbdev_init_bios(dev, ifbdev))
- ifbdev->helper.preferred_bpp = ifbdev->preferred_bpp;
- else
- ifbdev->preferred_bpp = ifbdev->helper.preferred_bpp;
-
- ret = drm_client_init(dev, &ifbdev->helper.client, "intel-fbdev",
- &intel_fbdev_client_funcs);
- if (ret) {
- drm_err(dev, "Failed to register client: %d\n", ret);
- goto err_drm_fb_helper_unprepare;
- }
-
- drm_client_register(&ifbdev->helper.client);
-
- return;
+ preferred_bpp = intel_fbdev_color_mode(ifbdev->fb->base.format);
+ if (!preferred_bpp)
+ preferred_bpp = 32;
-err_drm_fb_helper_unprepare:
- drm_fb_helper_unprepare(&ifbdev->helper);
- mutex_destroy(&ifbdev->hpd_lock);
- kfree(ifbdev);
+ drm_client_setup_with_color_mode(dev, preferred_bpp);
}
struct intel_framebuffer *intel_fbdev_framebuffer(struct intel_fbdev *fbdev)
{
- if (!fbdev || !fbdev->helper.fb)
+ if (!fbdev)
return NULL;
- return to_intel_framebuffer(fbdev->helper.fb);
+ return fbdev->fb;
}
struct i915_vma *intel_fbdev_vma_pointer(struct intel_fbdev *fbdev)
diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.h b/drivers/gpu/drm/i915/display/intel_fbdev.h
index 24a3434558cb..ca2c8c438f02 100644
--- a/drivers/gpu/drm/i915/display/intel_fbdev.h
+++ b/drivers/gpu/drm/i915/display/intel_fbdev.h
@@ -6,28 +6,27 @@
#ifndef __INTEL_FBDEV_H__
#define __INTEL_FBDEV_H__
-#include <linux/types.h>
-
-struct drm_device;
+struct drm_fb_helper;
+struct drm_fb_helper_surface_size;
struct drm_i915_private;
struct intel_fbdev;
struct intel_framebuffer;
#ifdef CONFIG_DRM_FBDEV_EMULATION
+int intel_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper,
+ struct drm_fb_helper_surface_size *sizes);
+#define INTEL_FBDEV_DRIVER_OPS \
+ .fbdev_probe = intel_fbdev_driver_fbdev_probe
void intel_fbdev_setup(struct drm_i915_private *dev_priv);
-void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous);
struct intel_framebuffer *intel_fbdev_framebuffer(struct intel_fbdev *fbdev);
struct i915_vma *intel_fbdev_vma_pointer(struct intel_fbdev *fbdev);
#else
+#define INTEL_FBDEV_DRIVER_OPS \
+ .fbdev_probe = NULL
static inline void intel_fbdev_setup(struct drm_i915_private *dev_priv)
{
}
-
-static inline void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous)
-{
-}
-
static inline struct intel_framebuffer *intel_fbdev_framebuffer(struct intel_fbdev *fbdev)
{
return NULL;
diff --git a/drivers/gpu/drm/i915/display/intel_fdi.c b/drivers/gpu/drm/i915/display/intel_fdi.c
index 024d0c7e0a88..40deee0769ae 100644
--- a/drivers/gpu/drm/i915/display/intel_fdi.c
+++ b/drivers/gpu/drm/i915/display/intel_fdi.c
@@ -24,10 +24,9 @@ struct intel_fdi_funcs {
const struct intel_crtc_state *crtc_state);
};
-static void assert_fdi_tx(struct drm_i915_private *dev_priv,
+static void assert_fdi_tx(struct intel_display *display,
enum pipe pipe, bool state)
{
- struct intel_display *display = &dev_priv->display;
bool cur_state;
if (HAS_DDI(display)) {
@@ -48,20 +47,19 @@ static void assert_fdi_tx(struct drm_i915_private *dev_priv,
str_on_off(state), str_on_off(cur_state));
}
-void assert_fdi_tx_enabled(struct drm_i915_private *i915, enum pipe pipe)
+void assert_fdi_tx_enabled(struct intel_display *display, enum pipe pipe)
{
- assert_fdi_tx(i915, pipe, true);
+ assert_fdi_tx(display, pipe, true);
}
-void assert_fdi_tx_disabled(struct drm_i915_private *i915, enum pipe pipe)
+void assert_fdi_tx_disabled(struct intel_display *display, enum pipe pipe)
{
- assert_fdi_tx(i915, pipe, false);
+ assert_fdi_tx(display, pipe, false);
}
-static void assert_fdi_rx(struct drm_i915_private *dev_priv,
+static void assert_fdi_rx(struct intel_display *display,
enum pipe pipe, bool state)
{
- struct intel_display *display = &dev_priv->display;
bool cur_state;
cur_state = intel_de_read(display, FDI_RX_CTL(pipe)) & FDI_RX_ENABLE;
@@ -70,18 +68,17 @@ static void assert_fdi_rx(struct drm_i915_private *dev_priv,
str_on_off(state), str_on_off(cur_state));
}
-void assert_fdi_rx_enabled(struct drm_i915_private *i915, enum pipe pipe)
+void assert_fdi_rx_enabled(struct intel_display *display, enum pipe pipe)
{
- assert_fdi_rx(i915, pipe, true);
+ assert_fdi_rx(display, pipe, true);
}
-void assert_fdi_rx_disabled(struct drm_i915_private *i915, enum pipe pipe)
+void assert_fdi_rx_disabled(struct intel_display *display, enum pipe pipe)
{
- assert_fdi_rx(i915, pipe, false);
+ assert_fdi_rx(display, pipe, false);
}
-void assert_fdi_tx_pll_enabled(struct intel_display *display,
- enum pipe pipe)
+void assert_fdi_tx_pll_enabled(struct intel_display *display, enum pipe pipe)
{
bool cur_state;
@@ -122,9 +119,9 @@ void assert_fdi_rx_pll_disabled(struct intel_display *display, enum pipe pipe)
void intel_fdi_link_train(struct intel_crtc *crtc,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
- dev_priv->display.funcs.fdi->fdi_link_train(crtc, crtc_state);
+ display->funcs.fdi->fdi_link_train(crtc, crtc_state);
}
/**
@@ -141,12 +138,11 @@ void intel_fdi_link_train(struct intel_crtc *crtc,
int intel_fdi_add_affected_crtcs(struct intel_atomic_state *state)
{
struct intel_display *display = to_intel_display(state);
- struct drm_i915_private *i915 = to_i915(state->base.dev);
const struct intel_crtc_state *old_crtc_state;
const struct intel_crtc_state *new_crtc_state;
struct intel_crtc *crtc;
- if (!IS_IVYBRIDGE(i915) || INTEL_NUM_PIPES(i915) != 3)
+ if (!display->platform.ivybridge || INTEL_NUM_PIPES(display) != 3)
return 0;
crtc = intel_crtc_for_pipe(display, PIPE_C);
@@ -184,31 +180,29 @@ static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state)
return 0;
}
-static int ilk_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
+static int ilk_check_fdi_lanes(struct intel_display *display, enum pipe pipe,
struct intel_crtc_state *pipe_config,
enum pipe *pipe_to_reduce)
{
- struct intel_display *display = to_intel_display(dev);
- struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_atomic_state *state = pipe_config->uapi.state;
struct intel_crtc *other_crtc;
struct intel_crtc_state *other_crtc_state;
*pipe_to_reduce = pipe;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"checking fdi config on pipe %c, lanes %i\n",
pipe_name(pipe), pipe_config->fdi_lanes);
if (pipe_config->fdi_lanes > 4) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"invalid fdi lane config on pipe %c: %i lanes\n",
pipe_name(pipe), pipe_config->fdi_lanes);
return -EINVAL;
}
- if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
+ if (display->platform.haswell || display->platform.broadwell) {
if (pipe_config->fdi_lanes > 2) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"only 2 lanes on haswell, required: %i lanes\n",
pipe_config->fdi_lanes);
return -EINVAL;
@@ -217,7 +211,7 @@ static int ilk_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
}
}
- if (INTEL_NUM_PIPES(dev_priv) == 2)
+ if (INTEL_NUM_PIPES(display) == 2)
return 0;
/* Ivybridge 3 pipe is really complicated */
@@ -235,7 +229,7 @@ static int ilk_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
return PTR_ERR(other_crtc_state);
if (pipe_required_fdi_lanes(other_crtc_state) > 0) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"invalid shared fdi lane config on pipe %c: %i lanes\n",
pipe_name(pipe), pipe_config->fdi_lanes);
return -EINVAL;
@@ -243,7 +237,7 @@ static int ilk_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
return 0;
case PIPE_C:
if (pipe_config->fdi_lanes > 2) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"only 2 lanes on pipe %c: required %i lanes\n",
pipe_name(pipe), pipe_config->fdi_lanes);
return -EINVAL;
@@ -256,7 +250,7 @@ static int ilk_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
return PTR_ERR(other_crtc_state);
if (pipe_required_fdi_lanes(other_crtc_state) > 2) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"fdi link B uses too many lanes to enable link C\n");
*pipe_to_reduce = PIPE_B;
@@ -270,29 +264,30 @@ static int ilk_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
}
}
-void intel_fdi_pll_freq_update(struct drm_i915_private *i915)
+void intel_fdi_pll_freq_update(struct intel_display *display)
{
- if (IS_IRONLAKE(i915)) {
- u32 fdi_pll_clk =
- intel_de_read(i915, FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK;
+ if (display->platform.ironlake) {
+ u32 fdi_pll_clk;
- i915->display.fdi.pll_freq = (fdi_pll_clk + 2) * 10000;
- } else if (IS_SANDYBRIDGE(i915) || IS_IVYBRIDGE(i915)) {
- i915->display.fdi.pll_freq = 270000;
+ fdi_pll_clk = intel_de_read(display, FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK;
+
+ display->fdi.pll_freq = (fdi_pll_clk + 2) * 10000;
+ } else if (display->platform.sandybridge || display->platform.ivybridge) {
+ display->fdi.pll_freq = 270000;
} else {
return;
}
- drm_dbg(&i915->drm, "FDI PLL freq=%d\n", i915->display.fdi.pll_freq);
+ drm_dbg(display->drm, "FDI PLL freq=%d\n", display->fdi.pll_freq);
}
-int intel_fdi_link_freq(struct drm_i915_private *i915,
+int intel_fdi_link_freq(struct intel_display *display,
const struct intel_crtc_state *pipe_config)
{
- if (HAS_DDI(i915))
+ if (HAS_DDI(display))
return pipe_config->port_clock; /* SPLL */
else
- return i915->display.fdi.pll_freq;
+ return display->fdi.pll_freq;
}
/**
@@ -326,8 +321,7 @@ bool intel_fdi_compute_pipe_bpp(struct intel_crtc_state *crtc_state)
int ilk_fdi_compute_config(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *i915 = to_i915(dev);
+ struct intel_display *display = to_intel_display(crtc);
const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
int lane, link_bw, fdi_dotclock;
@@ -338,7 +332,7 @@ int ilk_fdi_compute_config(struct intel_crtc *crtc,
* Hence the bw of each lane in terms of the mode signal
* is:
*/
- link_bw = intel_fdi_link_freq(i915, pipe_config);
+ link_bw = intel_fdi_link_freq(display, pipe_config);
fdi_dotclock = adjusted_mode->crtc_clock;
@@ -361,11 +355,11 @@ static int intel_fdi_atomic_check_bw(struct intel_atomic_state *state,
struct intel_crtc_state *pipe_config,
struct intel_link_bw_limits *limits)
{
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
enum pipe pipe_to_reduce;
int ret;
- ret = ilk_check_fdi_lanes(&i915->drm, crtc->pipe, pipe_config,
+ ret = ilk_check_fdi_lanes(display, crtc->pipe, pipe_config,
&pipe_to_reduce);
if (ret != -EINVAL)
return ret;
@@ -418,48 +412,48 @@ int intel_fdi_atomic_check_link(struct intel_atomic_state *state,
return 0;
}
-static void cpt_set_fdi_bc_bifurcation(struct drm_i915_private *dev_priv, bool enable)
+static void cpt_set_fdi_bc_bifurcation(struct intel_display *display, bool enable)
{
u32 temp;
- temp = intel_de_read(dev_priv, SOUTH_CHICKEN1);
+ temp = intel_de_read(display, SOUTH_CHICKEN1);
if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable)
return;
- drm_WARN_ON(&dev_priv->drm,
- intel_de_read(dev_priv, FDI_RX_CTL(PIPE_B)) &
+ drm_WARN_ON(display->drm,
+ intel_de_read(display, FDI_RX_CTL(PIPE_B)) &
FDI_RX_ENABLE);
- drm_WARN_ON(&dev_priv->drm,
- intel_de_read(dev_priv, FDI_RX_CTL(PIPE_C)) &
+ drm_WARN_ON(display->drm,
+ intel_de_read(display, FDI_RX_CTL(PIPE_C)) &
FDI_RX_ENABLE);
temp &= ~FDI_BC_BIFURCATION_SELECT;
if (enable)
temp |= FDI_BC_BIFURCATION_SELECT;
- drm_dbg_kms(&dev_priv->drm, "%sabling fdi C rx\n",
+ drm_dbg_kms(display->drm, "%sabling fdi C rx\n",
enable ? "en" : "dis");
- intel_de_write(dev_priv, SOUTH_CHICKEN1, temp);
- intel_de_posting_read(dev_priv, SOUTH_CHICKEN1);
+ intel_de_write(display, SOUTH_CHICKEN1, temp);
+ intel_de_posting_read(display, SOUTH_CHICKEN1);
}
static void ivb_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
switch (crtc->pipe) {
case PIPE_A:
break;
case PIPE_B:
if (crtc_state->fdi_lanes > 2)
- cpt_set_fdi_bc_bifurcation(dev_priv, false);
+ cpt_set_fdi_bc_bifurcation(display, false);
else
- cpt_set_fdi_bc_bifurcation(dev_priv, true);
+ cpt_set_fdi_bc_bifurcation(display, true);
break;
case PIPE_C:
- cpt_set_fdi_bc_bifurcation(dev_priv, true);
+ cpt_set_fdi_bc_bifurcation(display, true);
break;
default:
@@ -469,26 +463,26 @@ static void ivb_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_st
void intel_fdi_normal_train(struct intel_crtc *crtc)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_display *display = to_intel_display(crtc);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
enum pipe pipe = crtc->pipe;
i915_reg_t reg;
u32 temp;
/* enable normal train */
reg = FDI_TX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- if (IS_IVYBRIDGE(dev_priv)) {
+ temp = intel_de_read(display, reg);
+ if (display->platform.ivybridge) {
temp &= ~FDI_LINK_TRAIN_NONE_IVB;
temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
} else {
temp &= ~FDI_LINK_TRAIN_NONE;
temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
}
- intel_de_write(dev_priv, reg, temp);
+ intel_de_write(display, reg, temp);
reg = FDI_RX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
+ temp = intel_de_read(display, reg);
if (HAS_PCH_CPT(dev_priv)) {
temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
temp |= FDI_LINK_TRAIN_NORMAL_CPT;
@@ -496,15 +490,15 @@ void intel_fdi_normal_train(struct intel_crtc *crtc)
temp &= ~FDI_LINK_TRAIN_NONE;
temp |= FDI_LINK_TRAIN_NONE;
}
- intel_de_write(dev_priv, reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
+ intel_de_write(display, reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
/* wait one idle pattern time */
- intel_de_posting_read(dev_priv, reg);
+ intel_de_posting_read(display, reg);
udelay(1000);
/* IVB wants error correction enabled */
- if (IS_IVYBRIDGE(dev_priv))
- intel_de_rmw(dev_priv, reg, 0, FDI_FS_ERRC_ENABLE | FDI_FE_ERRC_ENABLE);
+ if (display->platform.ivybridge)
+ intel_de_rmw(display, reg, 0, FDI_FS_ERRC_ENABLE | FDI_FE_ERRC_ENABLE);
}
/* The FDI link training functions for ILK/Ibexpeak. */
@@ -512,8 +506,6 @@ static void ilk_fdi_link_train(struct intel_crtc *crtc,
const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc);
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
enum pipe pipe = crtc->pipe;
i915_reg_t reg;
u32 temp, tries;
@@ -522,8 +514,8 @@ static void ilk_fdi_link_train(struct intel_crtc *crtc,
* Write the TU size bits before fdi link training, so that error
* detection works.
*/
- intel_de_write(dev_priv, FDI_RX_TUSIZE1(pipe),
- intel_de_read(dev_priv, PIPE_DATA_M1(dev_priv, pipe)) & TU_SIZE_MASK);
+ intel_de_write(display, FDI_RX_TUSIZE1(pipe),
+ intel_de_read(display, PIPE_DATA_M1(display, pipe)) & TU_SIZE_MASK);
/* FDI needs bits from pipe first */
assert_transcoder_enabled(display, crtc_state->cpu_transcoder);
@@ -531,75 +523,75 @@ static void ilk_fdi_link_train(struct intel_crtc *crtc,
/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
for train result */
reg = FDI_RX_IMR(pipe);
- temp = intel_de_read(dev_priv, reg);
+ temp = intel_de_read(display, reg);
temp &= ~FDI_RX_SYMBOL_LOCK;
temp &= ~FDI_RX_BIT_LOCK;
- intel_de_write(dev_priv, reg, temp);
- intel_de_read(dev_priv, reg);
+ intel_de_write(display, reg, temp);
+ intel_de_read(display, reg);
udelay(150);
/* enable CPU FDI TX and PCH FDI RX */
reg = FDI_TX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
+ temp = intel_de_read(display, reg);
temp &= ~FDI_DP_PORT_WIDTH_MASK;
temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
temp &= ~FDI_LINK_TRAIN_NONE;
temp |= FDI_LINK_TRAIN_PATTERN_1;
- intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE);
+ intel_de_write(display, reg, temp | FDI_TX_ENABLE);
reg = FDI_RX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
+ temp = intel_de_read(display, reg);
temp &= ~FDI_LINK_TRAIN_NONE;
temp |= FDI_LINK_TRAIN_PATTERN_1;
- intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE);
+ intel_de_write(display, reg, temp | FDI_RX_ENABLE);
- intel_de_posting_read(dev_priv, reg);
+ intel_de_posting_read(display, reg);
udelay(150);
/* Ironlake workaround, enable clock pointer after FDI enable*/
- intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe),
+ intel_de_write(display, FDI_RX_CHICKEN(pipe),
FDI_RX_PHASE_SYNC_POINTER_OVR);
- intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe),
+ intel_de_write(display, FDI_RX_CHICKEN(pipe),
FDI_RX_PHASE_SYNC_POINTER_OVR | FDI_RX_PHASE_SYNC_POINTER_EN);
reg = FDI_RX_IIR(pipe);
for (tries = 0; tries < 5; tries++) {
- temp = intel_de_read(dev_priv, reg);
- drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
+ temp = intel_de_read(display, reg);
+ drm_dbg_kms(display->drm, "FDI_RX_IIR 0x%x\n", temp);
if ((temp & FDI_RX_BIT_LOCK)) {
- drm_dbg_kms(&dev_priv->drm, "FDI train 1 done.\n");
- intel_de_write(dev_priv, reg, temp | FDI_RX_BIT_LOCK);
+ drm_dbg_kms(display->drm, "FDI train 1 done.\n");
+ intel_de_write(display, reg, temp | FDI_RX_BIT_LOCK);
break;
}
}
if (tries == 5)
- drm_err(&dev_priv->drm, "FDI train 1 fail!\n");
+ drm_err(display->drm, "FDI train 1 fail!\n");
/* Train 2 */
- intel_de_rmw(dev_priv, FDI_TX_CTL(pipe),
+ intel_de_rmw(display, FDI_TX_CTL(pipe),
FDI_LINK_TRAIN_NONE, FDI_LINK_TRAIN_PATTERN_2);
- intel_de_rmw(dev_priv, FDI_RX_CTL(pipe),
+ intel_de_rmw(display, FDI_RX_CTL(pipe),
FDI_LINK_TRAIN_NONE, FDI_LINK_TRAIN_PATTERN_2);
- intel_de_posting_read(dev_priv, FDI_RX_CTL(pipe));
+ intel_de_posting_read(display, FDI_RX_CTL(pipe));
udelay(150);
reg = FDI_RX_IIR(pipe);
for (tries = 0; tries < 5; tries++) {
- temp = intel_de_read(dev_priv, reg);
- drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
+ temp = intel_de_read(display, reg);
+ drm_dbg_kms(display->drm, "FDI_RX_IIR 0x%x\n", temp);
if (temp & FDI_RX_SYMBOL_LOCK) {
- intel_de_write(dev_priv, reg,
+ intel_de_write(display, reg,
temp | FDI_RX_SYMBOL_LOCK);
- drm_dbg_kms(&dev_priv->drm, "FDI train 2 done.\n");
+ drm_dbg_kms(display->drm, "FDI train 2 done.\n");
break;
}
}
if (tries == 5)
- drm_err(&dev_priv->drm, "FDI train 2 fail!\n");
+ drm_err(display->drm, "FDI train 2 fail!\n");
- drm_dbg_kms(&dev_priv->drm, "FDI train done\n");
+ drm_dbg_kms(display->drm, "FDI train done\n");
}
@@ -614,8 +606,8 @@ static const int snb_b_fdi_train_param[] = {
static void gen6_fdi_link_train(struct intel_crtc *crtc,
const struct intel_crtc_state *crtc_state)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_display *display = to_intel_display(crtc);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
enum pipe pipe = crtc->pipe;
i915_reg_t reg;
u32 temp, i, retry;
@@ -624,23 +616,23 @@ static void gen6_fdi_link_train(struct intel_crtc *crtc,
* Write the TU size bits before fdi link training, so that error
* detection works.
*/
- intel_de_write(dev_priv, FDI_RX_TUSIZE1(pipe),
- intel_de_read(dev_priv, PIPE_DATA_M1(dev_priv, pipe)) & TU_SIZE_MASK);
+ intel_de_write(display, FDI_RX_TUSIZE1(pipe),
+ intel_de_read(display, PIPE_DATA_M1(display, pipe)) & TU_SIZE_MASK);
/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
for train result */
reg = FDI_RX_IMR(pipe);
- temp = intel_de_read(dev_priv, reg);
+ temp = intel_de_read(display, reg);
temp &= ~FDI_RX_SYMBOL_LOCK;
temp &= ~FDI_RX_BIT_LOCK;
- intel_de_write(dev_priv, reg, temp);
+ intel_de_write(display, reg, temp);
- intel_de_posting_read(dev_priv, reg);
+ intel_de_posting_read(display, reg);
udelay(150);
/* enable CPU FDI TX and PCH FDI RX */
reg = FDI_TX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
+ temp = intel_de_read(display, reg);
temp &= ~FDI_DP_PORT_WIDTH_MASK;
temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
temp &= ~FDI_LINK_TRAIN_NONE;
@@ -648,13 +640,13 @@ static void gen6_fdi_link_train(struct intel_crtc *crtc,
temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
/* SNB-B */
temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
- intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE);
+ intel_de_write(display, reg, temp | FDI_TX_ENABLE);
- intel_de_write(dev_priv, FDI_RX_MISC(pipe),
+ intel_de_write(display, FDI_RX_MISC(pipe),
FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
reg = FDI_RX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
+ temp = intel_de_read(display, reg);
if (HAS_PCH_CPT(dev_priv)) {
temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
@@ -662,25 +654,25 @@ static void gen6_fdi_link_train(struct intel_crtc *crtc,
temp &= ~FDI_LINK_TRAIN_NONE;
temp |= FDI_LINK_TRAIN_PATTERN_1;
}
- intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE);
+ intel_de_write(display, reg, temp | FDI_RX_ENABLE);
- intel_de_posting_read(dev_priv, reg);
+ intel_de_posting_read(display, reg);
udelay(150);
for (i = 0; i < 4; i++) {
- intel_de_rmw(dev_priv, FDI_TX_CTL(pipe),
+ intel_de_rmw(display, FDI_TX_CTL(pipe),
FDI_LINK_TRAIN_VOL_EMP_MASK, snb_b_fdi_train_param[i]);
- intel_de_posting_read(dev_priv, FDI_TX_CTL(pipe));
+ intel_de_posting_read(display, FDI_TX_CTL(pipe));
udelay(500);
for (retry = 0; retry < 5; retry++) {
reg = FDI_RX_IIR(pipe);
- temp = intel_de_read(dev_priv, reg);
- drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
+ temp = intel_de_read(display, reg);
+ drm_dbg_kms(display->drm, "FDI_RX_IIR 0x%x\n", temp);
if (temp & FDI_RX_BIT_LOCK) {
- intel_de_write(dev_priv, reg,
+ intel_de_write(display, reg,
temp | FDI_RX_BIT_LOCK);
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"FDI train 1 done.\n");
break;
}
@@ -690,22 +682,22 @@ static void gen6_fdi_link_train(struct intel_crtc *crtc,
break;
}
if (i == 4)
- drm_err(&dev_priv->drm, "FDI train 1 fail!\n");
+ drm_err(display->drm, "FDI train 1 fail!\n");
/* Train 2 */
reg = FDI_TX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
+ temp = intel_de_read(display, reg);
temp &= ~FDI_LINK_TRAIN_NONE;
temp |= FDI_LINK_TRAIN_PATTERN_2;
- if (IS_SANDYBRIDGE(dev_priv)) {
+ if (display->platform.sandybridge) {
temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
/* SNB-B */
temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
}
- intel_de_write(dev_priv, reg, temp);
+ intel_de_write(display, reg, temp);
reg = FDI_RX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
+ temp = intel_de_read(display, reg);
if (HAS_PCH_CPT(dev_priv)) {
temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
@@ -713,25 +705,25 @@ static void gen6_fdi_link_train(struct intel_crtc *crtc,
temp &= ~FDI_LINK_TRAIN_NONE;
temp |= FDI_LINK_TRAIN_PATTERN_2;
}
- intel_de_write(dev_priv, reg, temp);
+ intel_de_write(display, reg, temp);
- intel_de_posting_read(dev_priv, reg);
+ intel_de_posting_read(display, reg);
udelay(150);
for (i = 0; i < 4; i++) {
- intel_de_rmw(dev_priv, FDI_TX_CTL(pipe),
+ intel_de_rmw(display, FDI_TX_CTL(pipe),
FDI_LINK_TRAIN_VOL_EMP_MASK, snb_b_fdi_train_param[i]);
- intel_de_posting_read(dev_priv, FDI_TX_CTL(pipe));
+ intel_de_posting_read(display, FDI_TX_CTL(pipe));
udelay(500);
for (retry = 0; retry < 5; retry++) {
reg = FDI_RX_IIR(pipe);
- temp = intel_de_read(dev_priv, reg);
- drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
+ temp = intel_de_read(display, reg);
+ drm_dbg_kms(display->drm, "FDI_RX_IIR 0x%x\n", temp);
if (temp & FDI_RX_SYMBOL_LOCK) {
- intel_de_write(dev_priv, reg,
+ intel_de_write(display, reg,
temp | FDI_RX_SYMBOL_LOCK);
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"FDI train 2 done.\n");
break;
}
@@ -741,17 +733,16 @@ static void gen6_fdi_link_train(struct intel_crtc *crtc,
break;
}
if (i == 4)
- drm_err(&dev_priv->drm, "FDI train 2 fail!\n");
+ drm_err(display->drm, "FDI train 2 fail!\n");
- drm_dbg_kms(&dev_priv->drm, "FDI train done.\n");
+ drm_dbg_kms(display->drm, "FDI train done.\n");
}
/* Manual link training for Ivy Bridge A0 parts */
static void ivb_manual_fdi_link_train(struct intel_crtc *crtc,
const struct intel_crtc_state *crtc_state)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_display *display = to_intel_display(crtc);
enum pipe pipe = crtc->pipe;
i915_reg_t reg;
u32 temp, i, j;
@@ -762,72 +753,72 @@ static void ivb_manual_fdi_link_train(struct intel_crtc *crtc,
* Write the TU size bits before fdi link training, so that error
* detection works.
*/
- intel_de_write(dev_priv, FDI_RX_TUSIZE1(pipe),
- intel_de_read(dev_priv, PIPE_DATA_M1(dev_priv, pipe)) & TU_SIZE_MASK);
+ intel_de_write(display, FDI_RX_TUSIZE1(pipe),
+ intel_de_read(display, PIPE_DATA_M1(display, pipe)) & TU_SIZE_MASK);
/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
for train result */
reg = FDI_RX_IMR(pipe);
- temp = intel_de_read(dev_priv, reg);
+ temp = intel_de_read(display, reg);
temp &= ~FDI_RX_SYMBOL_LOCK;
temp &= ~FDI_RX_BIT_LOCK;
- intel_de_write(dev_priv, reg, temp);
+ intel_de_write(display, reg, temp);
- intel_de_posting_read(dev_priv, reg);
+ intel_de_posting_read(display, reg);
udelay(150);
- drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR before link train 0x%x\n",
- intel_de_read(dev_priv, FDI_RX_IIR(pipe)));
+ drm_dbg_kms(display->drm, "FDI_RX_IIR before link train 0x%x\n",
+ intel_de_read(display, FDI_RX_IIR(pipe)));
/* Try each vswing and preemphasis setting twice before moving on */
for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
/* disable first in case we need to retry */
reg = FDI_TX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
+ temp = intel_de_read(display, reg);
temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
temp &= ~FDI_TX_ENABLE;
- intel_de_write(dev_priv, reg, temp);
+ intel_de_write(display, reg, temp);
reg = FDI_RX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
+ temp = intel_de_read(display, reg);
temp &= ~FDI_LINK_TRAIN_AUTO;
temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
temp &= ~FDI_RX_ENABLE;
- intel_de_write(dev_priv, reg, temp);
+ intel_de_write(display, reg, temp);
/* enable CPU FDI TX and PCH FDI RX */
reg = FDI_TX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
+ temp = intel_de_read(display, reg);
temp &= ~FDI_DP_PORT_WIDTH_MASK;
temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
temp |= snb_b_fdi_train_param[j/2];
temp |= FDI_COMPOSITE_SYNC;
- intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE);
+ intel_de_write(display, reg, temp | FDI_TX_ENABLE);
- intel_de_write(dev_priv, FDI_RX_MISC(pipe),
+ intel_de_write(display, FDI_RX_MISC(pipe),
FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
reg = FDI_RX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
+ temp = intel_de_read(display, reg);
temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
temp |= FDI_COMPOSITE_SYNC;
- intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE);
+ intel_de_write(display, reg, temp | FDI_RX_ENABLE);
- intel_de_posting_read(dev_priv, reg);
+ intel_de_posting_read(display, reg);
udelay(1); /* should be 0.5us */
for (i = 0; i < 4; i++) {
reg = FDI_RX_IIR(pipe);
- temp = intel_de_read(dev_priv, reg);
- drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
+ temp = intel_de_read(display, reg);
+ drm_dbg_kms(display->drm, "FDI_RX_IIR 0x%x\n", temp);
if (temp & FDI_RX_BIT_LOCK ||
- (intel_de_read(dev_priv, reg) & FDI_RX_BIT_LOCK)) {
- intel_de_write(dev_priv, reg,
+ (intel_de_read(display, reg) & FDI_RX_BIT_LOCK)) {
+ intel_de_write(display, reg,
temp | FDI_RX_BIT_LOCK);
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"FDI train 1 done, level %i.\n",
i);
break;
@@ -835,31 +826,31 @@ static void ivb_manual_fdi_link_train(struct intel_crtc *crtc,
udelay(1); /* should be 0.5us */
}
if (i == 4) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"FDI train 1 fail on vswing %d\n", j / 2);
continue;
}
/* Train 2 */
- intel_de_rmw(dev_priv, FDI_TX_CTL(pipe),
+ intel_de_rmw(display, FDI_TX_CTL(pipe),
FDI_LINK_TRAIN_NONE_IVB,
FDI_LINK_TRAIN_PATTERN_2_IVB);
- intel_de_rmw(dev_priv, FDI_RX_CTL(pipe),
+ intel_de_rmw(display, FDI_RX_CTL(pipe),
FDI_LINK_TRAIN_PATTERN_MASK_CPT,
FDI_LINK_TRAIN_PATTERN_2_CPT);
- intel_de_posting_read(dev_priv, FDI_RX_CTL(pipe));
+ intel_de_posting_read(display, FDI_RX_CTL(pipe));
udelay(2); /* should be 1.5us */
for (i = 0; i < 4; i++) {
reg = FDI_RX_IIR(pipe);
- temp = intel_de_read(dev_priv, reg);
- drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
+ temp = intel_de_read(display, reg);
+ drm_dbg_kms(display->drm, "FDI_RX_IIR 0x%x\n", temp);
if (temp & FDI_RX_SYMBOL_LOCK ||
- (intel_de_read(dev_priv, reg) & FDI_RX_SYMBOL_LOCK)) {
- intel_de_write(dev_priv, reg,
+ (intel_de_read(display, reg) & FDI_RX_SYMBOL_LOCK)) {
+ intel_de_write(display, reg,
temp | FDI_RX_SYMBOL_LOCK);
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"FDI train 2 done, level %i.\n",
i);
goto train_done;
@@ -867,12 +858,12 @@ static void ivb_manual_fdi_link_train(struct intel_crtc *crtc,
udelay(2); /* should be 1.5us */
}
if (i == 4)
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"FDI train 2 fail on vswing %d\n", j / 2);
}
train_done:
- drm_dbg_kms(&dev_priv->drm, "FDI train done.\n");
+ drm_dbg_kms(display->drm, "FDI train done.\n");
}
/* Starting with Haswell, different DDI ports can work in FDI mode for
@@ -886,8 +877,7 @@ train_done:
void hsw_fdi_link_train(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc_state);
u32 temp, i, rx_ctl_val;
int n_entries;
@@ -902,33 +892,33 @@ void hsw_fdi_link_train(struct intel_encoder *encoder,
*
* WaFDIAutoLinkSetTimingOverrride:hsw
*/
- intel_de_write(dev_priv, FDI_RX_MISC(PIPE_A),
+ intel_de_write(display, FDI_RX_MISC(PIPE_A),
FDI_RX_PWRDN_LANE1_VAL(2) |
FDI_RX_PWRDN_LANE0_VAL(2) |
FDI_RX_TP1_TO_TP2_48 |
FDI_RX_FDI_DELAY_90);
/* Enable the PCH Receiver FDI PLL */
- rx_ctl_val = dev_priv->display.fdi.rx_config | FDI_RX_ENHANCE_FRAME_ENABLE |
+ rx_ctl_val = display->fdi.rx_config | FDI_RX_ENHANCE_FRAME_ENABLE |
FDI_RX_PLL_ENABLE |
FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
- intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val);
- intel_de_posting_read(dev_priv, FDI_RX_CTL(PIPE_A));
+ intel_de_write(display, FDI_RX_CTL(PIPE_A), rx_ctl_val);
+ intel_de_posting_read(display, FDI_RX_CTL(PIPE_A));
udelay(220);
/* Switch from Rawclk to PCDclk */
rx_ctl_val |= FDI_PCDCLK;
- intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val);
+ intel_de_write(display, FDI_RX_CTL(PIPE_A), rx_ctl_val);
/* Configure Port Clock Select */
- drm_WARN_ON(&dev_priv->drm, crtc_state->shared_dpll->info->id != DPLL_ID_SPLL);
+ drm_WARN_ON(display->drm, crtc_state->shared_dpll->info->id != DPLL_ID_SPLL);
intel_ddi_enable_clock(encoder, crtc_state);
/* Start the training iterating through available voltages and emphasis,
* testing each value twice. */
for (i = 0; i < n_entries * 2; i++) {
/* Configure DP_TP_CTL with auto-training */
- intel_de_write(dev_priv, DP_TP_CTL(PORT_E),
+ intel_de_write(display, DP_TP_CTL(PORT_E),
DP_TP_CTL_FDI_AUTOTRAIN |
DP_TP_CTL_ENHANCED_FRAME_ENABLE |
DP_TP_CTL_LINK_TRAIN_PAT1 |
@@ -938,36 +928,36 @@ void hsw_fdi_link_train(struct intel_encoder *encoder,
* DDI E does not support port reversal, the functionality is
* achieved on the PCH side in FDI_RX_CTL, so no need to set the
* port reversal bit */
- intel_de_write(dev_priv, DDI_BUF_CTL(PORT_E),
+ intel_de_write(display, DDI_BUF_CTL(PORT_E),
DDI_BUF_CTL_ENABLE |
((crtc_state->fdi_lanes - 1) << 1) |
DDI_BUF_TRANS_SELECT(i / 2));
- intel_de_posting_read(dev_priv, DDI_BUF_CTL(PORT_E));
+ intel_de_posting_read(display, DDI_BUF_CTL(PORT_E));
udelay(600);
/* Program PCH FDI Receiver TU */
- intel_de_write(dev_priv, FDI_RX_TUSIZE1(PIPE_A), TU_SIZE(64));
+ intel_de_write(display, FDI_RX_TUSIZE1(PIPE_A), TU_SIZE(64));
/* Enable PCH FDI Receiver with auto-training */
rx_ctl_val |= FDI_RX_ENABLE | FDI_LINK_TRAIN_AUTO;
- intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val);
- intel_de_posting_read(dev_priv, FDI_RX_CTL(PIPE_A));
+ intel_de_write(display, FDI_RX_CTL(PIPE_A), rx_ctl_val);
+ intel_de_posting_read(display, FDI_RX_CTL(PIPE_A));
/* Wait for FDI receiver lane calibration */
udelay(30);
/* Unset FDI_RX_MISC pwrdn lanes */
- intel_de_rmw(dev_priv, FDI_RX_MISC(PIPE_A),
+ intel_de_rmw(display, FDI_RX_MISC(PIPE_A),
FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK, 0);
- intel_de_posting_read(dev_priv, FDI_RX_MISC(PIPE_A));
+ intel_de_posting_read(display, FDI_RX_MISC(PIPE_A));
/* Wait for FDI auto training time */
udelay(5);
- temp = intel_de_read(dev_priv, DP_TP_STATUS(PORT_E));
+ temp = intel_de_read(display, DP_TP_STATUS(PORT_E));
if (temp & DP_TP_STATUS_AUTOTRAIN_DONE) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"FDI link training done on step %d\n", i);
break;
}
@@ -977,32 +967,32 @@ void hsw_fdi_link_train(struct intel_encoder *encoder,
* Results in less fireworks from the state checker.
*/
if (i == n_entries * 2 - 1) {
- drm_err(&dev_priv->drm, "FDI link training failed!\n");
+ drm_err(display->drm, "FDI link training failed!\n");
break;
}
rx_ctl_val &= ~FDI_RX_ENABLE;
- intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val);
- intel_de_posting_read(dev_priv, FDI_RX_CTL(PIPE_A));
+ intel_de_write(display, FDI_RX_CTL(PIPE_A), rx_ctl_val);
+ intel_de_posting_read(display, FDI_RX_CTL(PIPE_A));
- intel_de_rmw(dev_priv, DDI_BUF_CTL(PORT_E), DDI_BUF_CTL_ENABLE, 0);
- intel_de_posting_read(dev_priv, DDI_BUF_CTL(PORT_E));
+ intel_de_rmw(display, DDI_BUF_CTL(PORT_E), DDI_BUF_CTL_ENABLE, 0);
+ intel_de_posting_read(display, DDI_BUF_CTL(PORT_E));
/* Disable DP_TP_CTL and FDI_RX_CTL and retry */
- intel_de_rmw(dev_priv, DP_TP_CTL(PORT_E), DP_TP_CTL_ENABLE, 0);
- intel_de_posting_read(dev_priv, DP_TP_CTL(PORT_E));
+ intel_de_rmw(display, DP_TP_CTL(PORT_E), DP_TP_CTL_ENABLE, 0);
+ intel_de_posting_read(display, DP_TP_CTL(PORT_E));
- intel_wait_ddi_buf_idle(dev_priv, PORT_E);
+ intel_wait_ddi_buf_idle(display, PORT_E);
/* Reset FDI_RX_MISC pwrdn lanes */
- intel_de_rmw(dev_priv, FDI_RX_MISC(PIPE_A),
+ intel_de_rmw(display, FDI_RX_MISC(PIPE_A),
FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK,
FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2));
- intel_de_posting_read(dev_priv, FDI_RX_MISC(PIPE_A));
+ intel_de_posting_read(display, FDI_RX_MISC(PIPE_A));
}
/* Enable normal pixel sending for FDI */
- intel_de_write(dev_priv, DP_TP_CTL(PORT_E),
+ intel_de_write(display, DP_TP_CTL(PORT_E),
DP_TP_CTL_FDI_AUTOTRAIN |
DP_TP_CTL_LINK_TRAIN_NORMAL |
DP_TP_CTL_ENHANCED_FRAME_ENABLE |
@@ -1011,7 +1001,7 @@ void hsw_fdi_link_train(struct intel_encoder *encoder,
void hsw_fdi_disable(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
/*
* Bspec lists this as both step 13 (before DDI_BUF_CTL disable)
@@ -1019,103 +1009,103 @@ void hsw_fdi_disable(struct intel_encoder *encoder)
* step 13 is the correct place for it. Step 18 is where it was
* originally before the BUN.
*/
- intel_de_rmw(dev_priv, FDI_RX_CTL(PIPE_A), FDI_RX_ENABLE, 0);
- intel_de_rmw(dev_priv, DDI_BUF_CTL(PORT_E), DDI_BUF_CTL_ENABLE, 0);
- intel_wait_ddi_buf_idle(dev_priv, PORT_E);
+ intel_de_rmw(display, FDI_RX_CTL(PIPE_A), FDI_RX_ENABLE, 0);
+ intel_de_rmw(display, DDI_BUF_CTL(PORT_E), DDI_BUF_CTL_ENABLE, 0);
+ intel_wait_ddi_buf_idle(display, PORT_E);
intel_ddi_disable_clock(encoder);
- intel_de_rmw(dev_priv, FDI_RX_MISC(PIPE_A),
+ intel_de_rmw(display, FDI_RX_MISC(PIPE_A),
FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK,
FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2));
- intel_de_rmw(dev_priv, FDI_RX_CTL(PIPE_A), FDI_PCDCLK, 0);
- intel_de_rmw(dev_priv, FDI_RX_CTL(PIPE_A), FDI_RX_PLL_ENABLE, 0);
+ intel_de_rmw(display, FDI_RX_CTL(PIPE_A), FDI_PCDCLK, 0);
+ intel_de_rmw(display, FDI_RX_CTL(PIPE_A), FDI_RX_PLL_ENABLE, 0);
}
void ilk_fdi_pll_enable(const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
i915_reg_t reg;
u32 temp;
/* enable PCH FDI RX PLL, wait warmup plus DMI latency */
reg = FDI_RX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
+ temp = intel_de_read(display, reg);
temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
- temp |= (intel_de_read(dev_priv, TRANSCONF(dev_priv, pipe)) & TRANSCONF_BPC_MASK) << 11;
- intel_de_write(dev_priv, reg, temp | FDI_RX_PLL_ENABLE);
+ temp |= (intel_de_read(display, TRANSCONF(display, pipe)) & TRANSCONF_BPC_MASK) << 11;
+ intel_de_write(display, reg, temp | FDI_RX_PLL_ENABLE);
- intel_de_posting_read(dev_priv, reg);
+ intel_de_posting_read(display, reg);
udelay(200);
/* Switch from Rawclk to PCDclk */
- intel_de_rmw(dev_priv, reg, 0, FDI_PCDCLK);
- intel_de_posting_read(dev_priv, reg);
+ intel_de_rmw(display, reg, 0, FDI_PCDCLK);
+ intel_de_posting_read(display, reg);
udelay(200);
/* Enable CPU FDI TX PLL, always on for Ironlake */
reg = FDI_TX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
+ temp = intel_de_read(display, reg);
if ((temp & FDI_TX_PLL_ENABLE) == 0) {
- intel_de_write(dev_priv, reg, temp | FDI_TX_PLL_ENABLE);
+ intel_de_write(display, reg, temp | FDI_TX_PLL_ENABLE);
- intel_de_posting_read(dev_priv, reg);
+ intel_de_posting_read(display, reg);
udelay(100);
}
}
void ilk_fdi_pll_disable(struct intel_crtc *crtc)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_display *display = to_intel_display(crtc);
enum pipe pipe = crtc->pipe;
/* Switch from PCDclk to Rawclk */
- intel_de_rmw(dev_priv, FDI_RX_CTL(pipe), FDI_PCDCLK, 0);
+ intel_de_rmw(display, FDI_RX_CTL(pipe), FDI_PCDCLK, 0);
/* Disable CPU FDI TX PLL */
- intel_de_rmw(dev_priv, FDI_TX_CTL(pipe), FDI_TX_PLL_ENABLE, 0);
- intel_de_posting_read(dev_priv, FDI_TX_CTL(pipe));
+ intel_de_rmw(display, FDI_TX_CTL(pipe), FDI_TX_PLL_ENABLE, 0);
+ intel_de_posting_read(display, FDI_TX_CTL(pipe));
udelay(100);
/* Wait for the clocks to turn off. */
- intel_de_rmw(dev_priv, FDI_RX_CTL(pipe), FDI_RX_PLL_ENABLE, 0);
- intel_de_posting_read(dev_priv, FDI_RX_CTL(pipe));
+ intel_de_rmw(display, FDI_RX_CTL(pipe), FDI_RX_PLL_ENABLE, 0);
+ intel_de_posting_read(display, FDI_RX_CTL(pipe));
udelay(100);
}
void ilk_fdi_disable(struct intel_crtc *crtc)
{
+ struct intel_display *display = to_intel_display(crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
i915_reg_t reg;
u32 temp;
/* disable CPU FDI tx and PCH FDI rx */
- intel_de_rmw(dev_priv, FDI_TX_CTL(pipe), FDI_TX_ENABLE, 0);
- intel_de_posting_read(dev_priv, FDI_TX_CTL(pipe));
+ intel_de_rmw(display, FDI_TX_CTL(pipe), FDI_TX_ENABLE, 0);
+ intel_de_posting_read(display, FDI_TX_CTL(pipe));
reg = FDI_RX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
+ temp = intel_de_read(display, reg);
temp &= ~(0x7 << 16);
- temp |= (intel_de_read(dev_priv, TRANSCONF(dev_priv, pipe)) & TRANSCONF_BPC_MASK) << 11;
- intel_de_write(dev_priv, reg, temp & ~FDI_RX_ENABLE);
+ temp |= (intel_de_read(display, TRANSCONF(display, pipe)) & TRANSCONF_BPC_MASK) << 11;
+ intel_de_write(display, reg, temp & ~FDI_RX_ENABLE);
- intel_de_posting_read(dev_priv, reg);
+ intel_de_posting_read(display, reg);
udelay(100);
/* Ironlake workaround, disable clock pointer after downing FDI */
if (HAS_PCH_IBX(dev_priv))
- intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe),
+ intel_de_write(display, FDI_RX_CHICKEN(pipe),
FDI_RX_PHASE_SYNC_POINTER_OVR);
/* still set train pattern 1 */
- intel_de_rmw(dev_priv, FDI_TX_CTL(pipe),
+ intel_de_rmw(display, FDI_TX_CTL(pipe),
FDI_LINK_TRAIN_NONE, FDI_LINK_TRAIN_PATTERN_1);
reg = FDI_RX_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
+ temp = intel_de_read(display, reg);
if (HAS_PCH_CPT(dev_priv)) {
temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
@@ -1125,10 +1115,10 @@ void ilk_fdi_disable(struct intel_crtc *crtc)
}
/* BPC in FDI rx is consistent with that in TRANSCONF */
temp &= ~(0x07 << 16);
- temp |= (intel_de_read(dev_priv, TRANSCONF(dev_priv, pipe)) & TRANSCONF_BPC_MASK) << 11;
- intel_de_write(dev_priv, reg, temp);
+ temp |= (intel_de_read(display, TRANSCONF(display, pipe)) & TRANSCONF_BPC_MASK) << 11;
+ intel_de_write(display, reg, temp);
- intel_de_posting_read(dev_priv, reg);
+ intel_de_posting_read(display, reg);
udelay(100);
}
@@ -1145,14 +1135,14 @@ static const struct intel_fdi_funcs ivb_funcs = {
};
void
-intel_fdi_init_hook(struct drm_i915_private *dev_priv)
+intel_fdi_init_hook(struct intel_display *display)
{
- if (IS_IRONLAKE(dev_priv)) {
- dev_priv->display.funcs.fdi = &ilk_funcs;
- } else if (IS_SANDYBRIDGE(dev_priv)) {
- dev_priv->display.funcs.fdi = &gen6_funcs;
- } else if (IS_IVYBRIDGE(dev_priv)) {
+ if (display->platform.ironlake) {
+ display->funcs.fdi = &ilk_funcs;
+ } else if (display->platform.sandybridge) {
+ display->funcs.fdi = &gen6_funcs;
+ } else if (display->platform.ivybridge) {
/* FIXME: detect B0+ stepping and use auto training */
- dev_priv->display.funcs.fdi = &ivb_funcs;
+ display->funcs.fdi = &ivb_funcs;
}
}
diff --git a/drivers/gpu/drm/i915/display/intel_fdi.h b/drivers/gpu/drm/i915/display/intel_fdi.h
index b5be09efb36f..ad5e103c38a8 100644
--- a/drivers/gpu/drm/i915/display/intel_fdi.h
+++ b/drivers/gpu/drm/i915/display/intel_fdi.h
@@ -9,16 +9,16 @@
#include <linux/types.h>
enum pipe;
-struct drm_i915_private;
struct intel_atomic_state;
struct intel_crtc;
struct intel_crtc_state;
struct intel_display;
+struct intel_display;
struct intel_encoder;
struct intel_link_bw_limits;
int intel_fdi_add_affected_crtcs(struct intel_atomic_state *state);
-int intel_fdi_link_freq(struct drm_i915_private *i915,
+int intel_fdi_link_freq(struct intel_display *display,
const struct intel_crtc_state *pipe_config);
bool intel_fdi_compute_pipe_bpp(struct intel_crtc_state *crtc_state);
int ilk_fdi_compute_config(struct intel_crtc *intel_crtc,
@@ -29,19 +29,19 @@ void intel_fdi_normal_train(struct intel_crtc *crtc);
void ilk_fdi_disable(struct intel_crtc *crtc);
void ilk_fdi_pll_disable(struct intel_crtc *intel_crtc);
void ilk_fdi_pll_enable(const struct intel_crtc_state *crtc_state);
-void intel_fdi_init_hook(struct drm_i915_private *dev_priv);
+void intel_fdi_init_hook(struct intel_display *display);
void hsw_fdi_link_train(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
void hsw_fdi_disable(struct intel_encoder *encoder);
-void intel_fdi_pll_freq_update(struct drm_i915_private *i915);
+void intel_fdi_pll_freq_update(struct intel_display *display);
void intel_fdi_link_train(struct intel_crtc *crtc,
const struct intel_crtc_state *crtc_state);
-void assert_fdi_tx_enabled(struct drm_i915_private *i915, enum pipe pipe);
-void assert_fdi_tx_disabled(struct drm_i915_private *i915, enum pipe pipe);
-void assert_fdi_rx_enabled(struct drm_i915_private *i915, enum pipe pipe);
-void assert_fdi_rx_disabled(struct drm_i915_private *i915, enum pipe pipe);
+void assert_fdi_tx_enabled(struct intel_display *display, enum pipe pipe);
+void assert_fdi_tx_disabled(struct intel_display *display, enum pipe pipe);
+void assert_fdi_rx_enabled(struct intel_display *display, enum pipe pipe);
+void assert_fdi_rx_disabled(struct intel_display *display, enum pipe pipe);
void assert_fdi_tx_pll_enabled(struct intel_display *display, enum pipe pipe);
void assert_fdi_rx_pll_enabled(struct intel_display *display, enum pipe pipe);
void assert_fdi_rx_pll_disabled(struct intel_display *display, enum pipe pipe);
diff --git a/drivers/gpu/drm/i915/display/intel_frontbuffer.c b/drivers/gpu/drm/i915/display/intel_frontbuffer.c
index 26128c610cb4..ba2f88ca6117 100644
--- a/drivers/gpu/drm/i915/display/intel_frontbuffer.c
+++ b/drivers/gpu/drm/i915/display/intel_frontbuffer.c
@@ -59,6 +59,7 @@
#include "i915_active.h"
#include "i915_drv.h"
+#include "i915_vma.h"
#include "intel_bo.h"
#include "intel_display_trace.h"
#include "intel_display_types.h"
@@ -98,10 +99,10 @@ static void frontbuffer_flush(struct drm_i915_private *i915,
trace_intel_frontbuffer_flush(display, frontbuffer_bits, origin);
might_sleep();
- intel_td_flush(i915);
- intel_drrs_flush(i915, frontbuffer_bits);
+ intel_td_flush(display);
+ intel_drrs_flush(display, frontbuffer_bits);
intel_psr_flush(display, frontbuffer_bits, origin);
- intel_fbc_flush(i915, frontbuffer_bits, origin);
+ intel_fbc_flush(display, frontbuffer_bits, origin);
}
/**
@@ -176,7 +177,6 @@ void __intel_fb_invalidate(struct intel_frontbuffer *front,
unsigned int frontbuffer_bits)
{
struct intel_display *display = to_intel_display(front->obj->dev);
- struct drm_i915_private *i915 = to_i915(display->drm);
if (origin == ORIGIN_CS) {
spin_lock(&display->fb_tracking.lock);
@@ -189,8 +189,8 @@ void __intel_fb_invalidate(struct intel_frontbuffer *front,
might_sleep();
intel_psr_invalidate(display, frontbuffer_bits, origin);
- intel_drrs_invalidate(i915, frontbuffer_bits);
- intel_fbc_invalidate(i915, frontbuffer_bits, origin);
+ intel_drrs_invalidate(display, frontbuffer_bits);
+ intel_fbc_invalidate(display, frontbuffer_bits, origin);
}
void __intel_fb_flush(struct intel_frontbuffer *front,
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c
index 7063e3f5c538..1bf424a822f3 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp.c
+++ b/drivers/gpu/drm/i915/display/intel_hdcp.c
@@ -70,13 +70,13 @@ static int intel_conn_to_vcpi(struct intel_atomic_state *state,
int vcpi = 0;
/* For HDMI this is forced to be 0x0. For DP SST also this is 0x0. */
- if (!connector->port)
+ if (!connector->mst.port)
return 0;
- mgr = connector->port->mgr;
+ mgr = connector->mst.port->mgr;
drm_modeset_lock(&mgr->base.lock, state->base.acquire_ctx);
mst_state = to_drm_dp_mst_topology_state(mgr->base.state);
- payload = drm_atomic_get_mst_payload_state(mst_state, connector->port);
+ payload = drm_atomic_get_mst_payload_state(mst_state, connector->mst.port);
if (drm_WARN_ON(mgr->dev, !payload))
goto out;
@@ -107,16 +107,16 @@ intel_hdcp_required_content_stream(struct intel_atomic_state *state,
struct drm_connector_list_iter conn_iter;
struct intel_digital_port *conn_dig_port;
struct intel_connector *connector;
- struct hdcp_port_data *data = &dig_port->hdcp_port_data;
+ struct hdcp_port_data *data = &dig_port->hdcp.port_data;
bool enforce_type0 = false;
int k;
- if (dig_port->hdcp_auth_status)
+ if (dig_port->hdcp.auth_status)
return 0;
data->k = 0;
- if (!dig_port->hdcp_mst_type1_capable)
+ if (!dig_port->hdcp.mst_type1_capable)
enforce_type0 = true;
drm_connector_list_iter_begin(display->drm, &conn_iter);
@@ -136,7 +136,7 @@ intel_hdcp_required_content_stream(struct intel_atomic_state *state,
data->k++;
/* if there is only one active stream */
- if (dig_port->dp.active_mst_links <= 1)
+ if (dig_port->dp.mst.active_links <= 1)
break;
}
drm_connector_list_iter_end(&conn_iter);
@@ -159,7 +159,7 @@ static int intel_hdcp_prepare_streams(struct intel_atomic_state *state,
struct intel_connector *connector)
{
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
- struct hdcp_port_data *data = &dig_port->hdcp_port_data;
+ struct hdcp_port_data *data = &dig_port->hdcp.port_data;
struct intel_hdcp *hdcp = &connector->hdcp;
if (intel_encoder_is_mst(intel_attached_encoder(connector)))
@@ -1001,7 +1001,7 @@ static int _intel_hdcp_disable(struct intel_connector *connector)
* don't disable it until it disabled HDCP encryption for
* all connectors in MST topology.
*/
- if (dig_port->num_hdcp_streams > 0)
+ if (dig_port->hdcp.num_streams > 0)
return 0;
}
@@ -1094,13 +1094,13 @@ static void intel_hdcp_update_value(struct intel_connector *connector,
if (hdcp->value == value)
return;
- drm_WARN_ON(display->drm, !mutex_is_locked(&dig_port->hdcp_mutex));
+ drm_WARN_ON(display->drm, !mutex_is_locked(&dig_port->hdcp.mutex));
if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
- if (!drm_WARN_ON(display->drm, dig_port->num_hdcp_streams == 0))
- dig_port->num_hdcp_streams--;
+ if (!drm_WARN_ON(display->drm, dig_port->hdcp.num_streams == 0))
+ dig_port->hdcp.num_streams--;
} else if (value == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
- dig_port->num_hdcp_streams++;
+ dig_port->hdcp.num_streams++;
}
hdcp->value = value;
@@ -1122,7 +1122,7 @@ static int intel_hdcp_check_link(struct intel_connector *connector)
int ret = 0;
mutex_lock(&hdcp->mutex);
- mutex_lock(&dig_port->hdcp_mutex);
+ mutex_lock(&dig_port->hdcp.mutex);
cpu_transcoder = hdcp->cpu_transcoder;
@@ -1177,7 +1177,7 @@ static int intel_hdcp_check_link(struct intel_connector *connector)
}
out:
- mutex_unlock(&dig_port->hdcp_mutex);
+ mutex_unlock(&dig_port->hdcp.mutex);
mutex_unlock(&hdcp->mutex);
return ret;
}
@@ -1219,7 +1219,7 @@ hdcp2_prepare_ake_init(struct intel_connector *connector,
{
struct intel_display *display = to_intel_display(connector);
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
- struct hdcp_port_data *data = &dig_port->hdcp_port_data;
+ struct hdcp_port_data *data = &dig_port->hdcp.port_data;
struct i915_hdcp_arbiter *arbiter;
int ret;
@@ -1249,7 +1249,7 @@ hdcp2_verify_rx_cert_prepare_km(struct intel_connector *connector,
{
struct intel_display *display = to_intel_display(connector);
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
- struct hdcp_port_data *data = &dig_port->hdcp_port_data;
+ struct hdcp_port_data *data = &dig_port->hdcp.port_data;
struct i915_hdcp_arbiter *arbiter;
int ret;
@@ -1277,7 +1277,7 @@ static int hdcp2_verify_hprime(struct intel_connector *connector,
{
struct intel_display *display = to_intel_display(connector);
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
- struct hdcp_port_data *data = &dig_port->hdcp_port_data;
+ struct hdcp_port_data *data = &dig_port->hdcp.port_data;
struct i915_hdcp_arbiter *arbiter;
int ret;
@@ -1303,7 +1303,7 @@ hdcp2_store_pairing_info(struct intel_connector *connector,
{
struct intel_display *display = to_intel_display(connector);
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
- struct hdcp_port_data *data = &dig_port->hdcp_port_data;
+ struct hdcp_port_data *data = &dig_port->hdcp.port_data;
struct i915_hdcp_arbiter *arbiter;
int ret;
@@ -1330,7 +1330,7 @@ hdcp2_prepare_lc_init(struct intel_connector *connector,
{
struct intel_display *display = to_intel_display(connector);
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
- struct hdcp_port_data *data = &dig_port->hdcp_port_data;
+ struct hdcp_port_data *data = &dig_port->hdcp.port_data;
struct i915_hdcp_arbiter *arbiter;
int ret;
@@ -1357,7 +1357,7 @@ hdcp2_verify_lprime(struct intel_connector *connector,
{
struct intel_display *display = to_intel_display(connector);
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
- struct hdcp_port_data *data = &dig_port->hdcp_port_data;
+ struct hdcp_port_data *data = &dig_port->hdcp.port_data;
struct i915_hdcp_arbiter *arbiter;
int ret;
@@ -1383,7 +1383,7 @@ static int hdcp2_prepare_skey(struct intel_connector *connector,
{
struct intel_display *display = to_intel_display(connector);
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
- struct hdcp_port_data *data = &dig_port->hdcp_port_data;
+ struct hdcp_port_data *data = &dig_port->hdcp.port_data;
struct i915_hdcp_arbiter *arbiter;
int ret;
@@ -1412,7 +1412,7 @@ hdcp2_verify_rep_topology_prepare_ack(struct intel_connector *connector,
{
struct intel_display *display = to_intel_display(connector);
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
- struct hdcp_port_data *data = &dig_port->hdcp_port_data;
+ struct hdcp_port_data *data = &dig_port->hdcp.port_data;
struct i915_hdcp_arbiter *arbiter;
int ret;
@@ -1442,7 +1442,7 @@ hdcp2_verify_mprime(struct intel_connector *connector,
{
struct intel_display *display = to_intel_display(connector);
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
- struct hdcp_port_data *data = &dig_port->hdcp_port_data;
+ struct hdcp_port_data *data = &dig_port->hdcp.port_data;
struct i915_hdcp_arbiter *arbiter;
int ret;
@@ -1466,7 +1466,7 @@ static int hdcp2_authenticate_port(struct intel_connector *connector)
{
struct intel_display *display = to_intel_display(connector);
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
- struct hdcp_port_data *data = &dig_port->hdcp_port_data;
+ struct hdcp_port_data *data = &dig_port->hdcp.port_data;
struct i915_hdcp_arbiter *arbiter;
int ret;
@@ -1503,7 +1503,7 @@ static int hdcp2_close_session(struct intel_connector *connector)
}
ret = arbiter->ops->close_hdcp_session(arbiter->hdcp_dev,
- &dig_port->hdcp_port_data);
+ &dig_port->hdcp.port_data);
mutex_unlock(&display->hdcp.hdcp_mutex);
return ret;
@@ -1691,7 +1691,7 @@ static
int _hdcp2_propagate_stream_management_info(struct intel_connector *connector)
{
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
- struct hdcp_port_data *data = &dig_port->hdcp_port_data;
+ struct hdcp_port_data *data = &dig_port->hdcp.port_data;
struct intel_hdcp *hdcp = &connector->hdcp;
union {
struct hdcp2_rep_stream_manage stream_manage;
@@ -1769,11 +1769,11 @@ int hdcp2_authenticate_repeater_topology(struct intel_connector *connector)
* MST topology is not Type 1 capable if it contains a downstream
* device that is only HDCP 1.x or Legacy HDCP 2.0/2.1 compliant.
*/
- dig_port->hdcp_mst_type1_capable =
+ dig_port->hdcp.mst_type1_capable =
!HDCP_2_2_HDCP1_DEVICE_CONNECTED(rx_info[1]) &&
!HDCP_2_2_HDCP_2_0_REP_CONNECTED(rx_info[1]);
- if (!dig_port->hdcp_mst_type1_capable && hdcp->content_type) {
+ if (!dig_port->hdcp.mst_type1_capable && hdcp->content_type) {
drm_dbg_kms(display->drm,
"HDCP1.x or 2.0 Legacy Device Downstream\n");
return -EINVAL;
@@ -1869,7 +1869,7 @@ static int hdcp2_enable_stream_encryption(struct intel_connector *connector)
{
struct intel_display *display = to_intel_display(connector);
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
- struct hdcp_port_data *data = &dig_port->hdcp_port_data;
+ struct hdcp_port_data *data = &dig_port->hdcp.port_data;
struct intel_hdcp *hdcp = &connector->hdcp;
enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
enum port port = dig_port->base.port;
@@ -1900,7 +1900,7 @@ link_recover:
if (hdcp2_deauthenticate_port(connector) < 0)
drm_dbg_kms(display->drm, "Port deauth failed.\n");
- dig_port->hdcp_auth_status = false;
+ dig_port->hdcp.auth_status = false;
data->k = 0;
return ret;
@@ -1940,7 +1940,7 @@ static int hdcp2_enable_encryption(struct intel_connector *connector)
port),
LINK_ENCRYPTION_STATUS,
HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
- dig_port->hdcp_auth_status = true;
+ dig_port->hdcp.auth_status = true;
return ret;
}
@@ -2019,7 +2019,7 @@ static int hdcp2_authenticate_and_encrypt(struct intel_atomic_state *state,
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
int ret = 0, i, tries = 3;
- for (i = 0; i < tries && !dig_port->hdcp_auth_status; i++) {
+ for (i = 0; i < tries && !dig_port->hdcp.auth_status; i++) {
ret = hdcp2_authenticate_sink(connector);
if (!ret) {
ret = intel_hdcp_prepare_streams(state, connector);
@@ -2052,7 +2052,7 @@ static int hdcp2_authenticate_and_encrypt(struct intel_atomic_state *state,
drm_dbg_kms(display->drm, "Port deauth failed.\n");
}
- if (!ret && !dig_port->hdcp_auth_status) {
+ if (!ret && !dig_port->hdcp.auth_status) {
/*
* Ensuring the required 200mSec min time interval between
* Session Key Exchange and encryption.
@@ -2106,7 +2106,7 @@ _intel_hdcp2_disable(struct intel_connector *connector, bool hdcp2_link_recovery
{
struct intel_display *display = to_intel_display(connector);
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
- struct hdcp_port_data *data = &dig_port->hdcp_port_data;
+ struct hdcp_port_data *data = &dig_port->hdcp.port_data;
struct intel_hdcp *hdcp = &connector->hdcp;
int ret;
@@ -2123,7 +2123,7 @@ _intel_hdcp2_disable(struct intel_connector *connector, bool hdcp2_link_recovery
drm_dbg_kms(display->drm, "HDCP 2.2 transcoder: %s stream encryption disabled\n",
transcoder_name(hdcp->stream_transcoder));
- if (dig_port->num_hdcp_streams > 0 && !hdcp2_link_recovery)
+ if (dig_port->hdcp.num_streams > 0 && !hdcp2_link_recovery)
return 0;
}
@@ -2133,7 +2133,7 @@ _intel_hdcp2_disable(struct intel_connector *connector, bool hdcp2_link_recovery
drm_dbg_kms(display->drm, "Port deauth failed.\n");
connector->hdcp.hdcp2_encrypted = false;
- dig_port->hdcp_auth_status = false;
+ dig_port->hdcp.auth_status = false;
data->k = 0;
return ret;
@@ -2150,7 +2150,7 @@ static int intel_hdcp2_check_link(struct intel_connector *connector)
int ret = 0;
mutex_lock(&hdcp->mutex);
- mutex_lock(&dig_port->hdcp_mutex);
+ mutex_lock(&dig_port->hdcp.mutex);
cpu_transcoder = hdcp->cpu_transcoder;
/* hdcp2_check_link is expected only when HDCP2.2 is Enabled */
@@ -2221,7 +2221,7 @@ static int intel_hdcp2_check_link(struct intel_connector *connector)
intel_hdcp_update_value(connector,
DRM_MODE_CONTENT_PROTECTION_DESIRED, true);
out:
- mutex_unlock(&dig_port->hdcp_mutex);
+ mutex_unlock(&dig_port->hdcp.mutex);
mutex_unlock(&hdcp->mutex);
return ret;
}
@@ -2303,7 +2303,7 @@ static int initialize_hdcp_port_data(struct intel_connector *connector,
const struct intel_hdcp_shim *shim)
{
struct intel_display *display = to_intel_display(connector);
- struct hdcp_port_data *data = &dig_port->hdcp_port_data;
+ struct hdcp_port_data *data = &dig_port->hdcp.port_data;
enum port port = dig_port->base.port;
if (DISPLAY_VER(display) < 12)
@@ -2414,7 +2414,7 @@ int intel_hdcp_init(struct intel_connector *connector,
hdcp->hdcp2_supported);
if (ret) {
hdcp->hdcp2_supported = false;
- kfree(dig_port->hdcp_port_data.streams);
+ kfree(dig_port->hdcp.port_data.streams);
return ret;
}
@@ -2451,7 +2451,7 @@ static int _intel_hdcp_enable(struct intel_atomic_state *state,
}
mutex_lock(&hdcp->mutex);
- mutex_lock(&dig_port->hdcp_mutex);
+ mutex_lock(&dig_port->hdcp.mutex);
drm_WARN_ON(display->drm,
hdcp->value == DRM_MODE_CONTENT_PROTECTION_ENABLED);
hdcp->content_type = (u8)conn_state->hdcp_content_type;
@@ -2465,7 +2465,7 @@ static int _intel_hdcp_enable(struct intel_atomic_state *state,
}
if (DISPLAY_VER(display) >= 12)
- dig_port->hdcp_port_data.hdcp_transcoder =
+ dig_port->hdcp.port_data.hdcp_transcoder =
intel_get_hdcp_transcoder(hdcp->cpu_transcoder);
/*
@@ -2499,7 +2499,7 @@ static int _intel_hdcp_enable(struct intel_atomic_state *state,
true);
}
- mutex_unlock(&dig_port->hdcp_mutex);
+ mutex_unlock(&dig_port->hdcp.mutex);
mutex_unlock(&hdcp->mutex);
return ret;
}
@@ -2535,7 +2535,7 @@ int intel_hdcp_disable(struct intel_connector *connector)
return -ENOENT;
mutex_lock(&hdcp->mutex);
- mutex_lock(&dig_port->hdcp_mutex);
+ mutex_lock(&dig_port->hdcp.mutex);
if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
goto out;
@@ -2548,7 +2548,7 @@ int intel_hdcp_disable(struct intel_connector *connector)
ret = _intel_hdcp_disable(connector);
out:
- mutex_unlock(&dig_port->hdcp_mutex);
+ mutex_unlock(&dig_port->hdcp.mutex);
mutex_unlock(&hdcp->mutex);
cancel_delayed_work_sync(&hdcp->check_work);
return ret;
@@ -2775,7 +2775,7 @@ out:
void intel_hdcp_info(struct seq_file *m, struct intel_connector *connector)
{
seq_puts(m, "\tHDCP version: ");
- if (connector->mst_port) {
+ if (connector->mst.dp) {
__intel_hdcp_info(m, connector, true);
seq_puts(m, "\tMST Hub HDCP version: ");
}
diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c
index ed017d9de920..33b8d5229db0 100644
--- a/drivers/gpu/drm/i915/display/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.c
@@ -2360,7 +2360,7 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder,
}
if (intel_hdmi_is_ycbcr420(pipe_config)) {
- ret = intel_panel_fitting(pipe_config, conn_state);
+ ret = intel_pfit_compute_config(pipe_config, conn_state);
if (ret)
return ret;
}
diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.h b/drivers/gpu/drm/i915/display/intel_hdmi.h
index d237fe08c3e6..dec2ad7dd8a2 100644
--- a/drivers/gpu/drm/i915/display/intel_hdmi.h
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.h
@@ -14,7 +14,6 @@ enum port;
struct drm_connector;
struct drm_connector_state;
struct drm_encoder;
-struct drm_i915_private;
struct intel_connector;
struct intel_crtc_state;
struct intel_digital_port;
diff --git a/drivers/gpu/drm/i915/display/intel_link_bw.c b/drivers/gpu/drm/i915/display/intel_link_bw.c
index f4d60e77aa18..a10cd3992607 100644
--- a/drivers/gpu/drm/i915/display/intel_link_bw.c
+++ b/drivers/gpu/drm/i915/display/intel_link_bw.c
@@ -4,6 +4,7 @@
*/
#include <drm/drm_fixed.h>
+#include <drm/drm_print.h>
#include "intel_atomic.h"
#include "intel_crtc.h"
diff --git a/drivers/gpu/drm/i915/display/intel_lvds.c b/drivers/gpu/drm/i915/display/intel_lvds.c
index 7ed8625193fe..19f52d1659fa 100644
--- a/drivers/gpu/drm/i915/display/intel_lvds.c
+++ b/drivers/gpu/drm/i915/display/intel_lvds.c
@@ -53,6 +53,7 @@
#include "intel_lvds_regs.h"
#include "intel_panel.h"
#include "intel_pfit.h"
+#include "intel_pfit_regs.h"
#include "intel_pps_regs.h"
/* Private structure for the integrated LVDS support */
@@ -468,7 +469,7 @@ static int intel_lvds_compute_config(struct intel_encoder *encoder,
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
return -EINVAL;
- ret = intel_panel_fitting(crtc_state, conn_state);
+ ret = intel_pfit_compute_config(crtc_state, conn_state);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/i915/display/intel_modeset_setup.c b/drivers/gpu/drm/i915/display/intel_modeset_setup.c
index a5a00b3ce98f..312b21b1ab59 100644
--- a/drivers/gpu/drm/i915/display/intel_modeset_setup.c
+++ b/drivers/gpu/drm/i915/display/intel_modeset_setup.c
@@ -156,12 +156,6 @@ static void intel_crtc_disable_noatomic_complete(struct intel_crtc *crtc)
{
struct intel_display *display = to_intel_display(crtc);
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
- struct intel_bw_state *bw_state =
- to_intel_bw_state(i915->display.bw.obj.state);
- struct intel_cdclk_state *cdclk_state =
- to_intel_cdclk_state(i915->display.cdclk.obj.state);
- struct intel_dbuf_state *dbuf_state =
- to_intel_dbuf_state(i915->display.dbuf.obj.state);
struct intel_pmdemand_state *pmdemand_state =
to_intel_pmdemand_state(i915->display.pmdemand.obj.state);
struct intel_crtc_state *crtc_state =
@@ -179,14 +173,9 @@ static void intel_crtc_disable_noatomic_complete(struct intel_crtc *crtc)
intel_display_power_put_all_in_set(display, &crtc->enabled_power_domains);
- cdclk_state->min_cdclk[pipe] = 0;
- cdclk_state->min_voltage_level[pipe] = 0;
- cdclk_state->active_pipes &= ~BIT(pipe);
-
- dbuf_state->active_pipes &= ~BIT(pipe);
-
- bw_state->data_rate[pipe] = 0;
- bw_state->num_active_planes[pipe] = 0;
+ intel_cdclk_crtc_disable_noatomic(crtc);
+ skl_wm_crtc_disable_noatomic(crtc);
+ intel_bw_crtc_disable_noatomic(crtc);
intel_pmdemand_update_port_clock(display, pmdemand_state, pipe, 0);
}
@@ -704,10 +693,6 @@ static void readout_plane_state(struct drm_i915_private *i915)
static void intel_modeset_readout_hw_state(struct drm_i915_private *i915)
{
struct intel_display *display = &i915->display;
- struct intel_cdclk_state *cdclk_state =
- to_intel_cdclk_state(i915->display.cdclk.obj.state);
- struct intel_dbuf_state *dbuf_state =
- to_intel_dbuf_state(i915->display.dbuf.obj.state);
struct intel_pmdemand_state *pmdemand_state =
to_intel_pmdemand_state(i915->display.pmdemand.obj.state);
enum pipe pipe;
@@ -715,7 +700,6 @@ static void intel_modeset_readout_hw_state(struct drm_i915_private *i915)
struct intel_encoder *encoder;
struct intel_connector *connector;
struct drm_connector_list_iter conn_iter;
- u8 active_pipes = 0;
for_each_intel_crtc(&i915->drm, crtc) {
struct intel_crtc_state *crtc_state =
@@ -732,18 +716,12 @@ static void intel_modeset_readout_hw_state(struct drm_i915_private *i915)
crtc->base.enabled = crtc_state->hw.enable;
crtc->active = crtc_state->hw.active;
- if (crtc_state->hw.active)
- active_pipes |= BIT(crtc->pipe);
-
drm_dbg_kms(&i915->drm,
"[CRTC:%d:%s] hw state readout: %s\n",
crtc->base.base.id, crtc->base.name,
str_enabled_disabled(crtc_state->hw.active));
}
- cdclk_state->active_pipes = active_pipes;
- dbuf_state->active_pipes = active_pipes;
-
readout_plane_state(i915);
for_each_intel_encoder(&i915->drm, encoder) {
@@ -839,12 +817,9 @@ static void intel_modeset_readout_hw_state(struct drm_i915_private *i915)
drm_connector_list_iter_end(&conn_iter);
for_each_intel_crtc(&i915->drm, crtc) {
- struct intel_bw_state *bw_state =
- to_intel_bw_state(i915->display.bw.obj.state);
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
struct intel_plane *plane;
- int min_cdclk = 0;
if (crtc_state->hw.active) {
/*
@@ -893,22 +868,17 @@ static void intel_modeset_readout_hw_state(struct drm_i915_private *i915)
crtc_state->min_cdclk[plane->id]);
}
- if (crtc_state->hw.active) {
- min_cdclk = intel_crtc_compute_min_cdclk(crtc_state);
- if (drm_WARN_ON(&i915->drm, min_cdclk < 0))
- min_cdclk = 0;
- }
-
- cdclk_state->min_cdclk[crtc->pipe] = min_cdclk;
- cdclk_state->min_voltage_level[crtc->pipe] =
- crtc_state->min_voltage_level;
-
intel_pmdemand_update_port_clock(display, pmdemand_state, pipe,
crtc_state->port_clock);
-
- intel_bw_crtc_update(bw_state, crtc_state);
}
+ /* TODO move here (or even earlier?) on all platforms */
+ if (DISPLAY_VER(display) >= 9)
+ intel_wm_get_hw_state(i915);
+
+ intel_bw_update_hw_state(display);
+ intel_cdclk_update_hw_state(display);
+
intel_pmdemand_init_pmdemand_params(display, pmdemand_state);
}
@@ -1016,7 +986,10 @@ void intel_modeset_setup_hw_state(struct drm_i915_private *i915,
intel_dpll_sanitize_state(display);
- intel_wm_get_hw_state(i915);
+ /* TODO move earlier on all platforms */
+ if (DISPLAY_VER(display) < 9)
+ intel_wm_get_hw_state(i915);
+ intel_wm_sanitize(i915);
for_each_intel_crtc(&i915->drm, crtc) {
struct intel_crtc_state *crtc_state =
diff --git a/drivers/gpu/drm/i915/display/intel_modeset_verify.c b/drivers/gpu/drm/i915/display/intel_modeset_verify.c
index bc70e72ccc2e..a008412fdd04 100644
--- a/drivers/gpu/drm/i915/display/intel_modeset_verify.c
+++ b/drivers/gpu/drm/i915/display/intel_modeset_verify.c
@@ -90,10 +90,11 @@ verify_connector_state(struct intel_atomic_state *state,
static void intel_pipe_config_sanity_check(const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
if (crtc_state->has_pch_encoder) {
- int fdi_dotclock = intel_dotclock_calculate(intel_fdi_link_freq(i915, crtc_state),
+ int fdi_dotclock = intel_dotclock_calculate(intel_fdi_link_freq(display, crtc_state),
&crtc_state->fdi_m_n);
int dotclock = crtc_state->hw.adjusted_mode.crtc_clock;
diff --git a/drivers/gpu/drm/i915/display/intel_overlay.c b/drivers/gpu/drm/i915/display/intel_overlay.c
index 4d00db86131b..aff9a3455c1b 100644
--- a/drivers/gpu/drm/i915/display/intel_overlay.c
+++ b/drivers/gpu/drm/i915/display/intel_overlay.c
@@ -42,6 +42,7 @@
#include "intel_frontbuffer.h"
#include "intel_overlay.h"
#include "intel_pci_config.h"
+#include "intel_pfit_regs.h"
/* Limits for overlay size. According to intel doc, the real limits are:
* Y width: 4095, UV width (planar): 2047, Y height: 2047,
@@ -799,7 +800,6 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
struct drm_intel_overlay_put_image *params)
{
struct intel_display *display = overlay->display;
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct overlay_registers __iomem *regs = overlay->regs;
u32 swidth, swidthsw, sheight, ostride;
enum pipe pipe = overlay->crtc->pipe;
@@ -814,7 +814,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
if (ret != 0)
return ret;
- atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
+ atomic_inc(&display->restore.pending_fb_pin);
vma = intel_overlay_pin_fb(new_bo);
if (IS_ERR(vma)) {
@@ -902,7 +902,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
out_unpin:
i915_vma_unpin(vma);
out_pin_section:
- atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
+ atomic_dec(&display->restore.pending_fb_pin);
return ret;
}
diff --git a/drivers/gpu/drm/i915/display/intel_overlay.h b/drivers/gpu/drm/i915/display/intel_overlay.h
index 45a42fce754e..d259e4c74b03 100644
--- a/drivers/gpu/drm/i915/display/intel_overlay.h
+++ b/drivers/gpu/drm/i915/display/intel_overlay.h
@@ -10,7 +10,6 @@
struct drm_device;
struct drm_file;
-struct drm_i915_private;
struct drm_printer;
struct intel_display;
struct intel_overlay;
diff --git a/drivers/gpu/drm/i915/display/intel_panel.c b/drivers/gpu/drm/i915/display/intel_panel.c
index 4e6c5592c7ae..f5c972880391 100644
--- a/drivers/gpu/drm/i915/display/intel_panel.c
+++ b/drivers/gpu/drm/i915/display/intel_panel.c
@@ -32,6 +32,7 @@
#include <linux/pwm.h>
#include <drm/drm_edid.h>
+#include <drm/drm_print.h>
#include "intel_backlight.h"
#include "intel_connector.h"
diff --git a/drivers/gpu/drm/i915/display/intel_pch_display.c b/drivers/gpu/drm/i915/display/intel_pch_display.c
index 1abe0a784570..99f6d6f53fa7 100644
--- a/drivers/gpu/drm/i915/display/intel_pch_display.c
+++ b/drivers/gpu/drm/i915/display/intel_pch_display.c
@@ -181,10 +181,10 @@ static void ibx_sanitize_pch_ports(struct drm_i915_private *dev_priv)
static void intel_pch_transcoder_set_m1_n1(struct intel_crtc *crtc,
const struct intel_link_m_n *m_n)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
enum pipe pipe = crtc->pipe;
- intel_set_m_n(dev_priv, m_n,
+ intel_set_m_n(display, m_n,
PCH_TRANS_DATA_M1(pipe), PCH_TRANS_DATA_N1(pipe),
PCH_TRANS_LINK_M1(pipe), PCH_TRANS_LINK_N1(pipe));
}
@@ -192,10 +192,10 @@ static void intel_pch_transcoder_set_m1_n1(struct intel_crtc *crtc,
static void intel_pch_transcoder_set_m2_n2(struct intel_crtc *crtc,
const struct intel_link_m_n *m_n)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
enum pipe pipe = crtc->pipe;
- intel_set_m_n(dev_priv, m_n,
+ intel_set_m_n(display, m_n,
PCH_TRANS_DATA_M2(pipe), PCH_TRANS_DATA_N2(pipe),
PCH_TRANS_LINK_M2(pipe), PCH_TRANS_LINK_N2(pipe));
}
@@ -203,10 +203,10 @@ static void intel_pch_transcoder_set_m2_n2(struct intel_crtc *crtc,
void intel_pch_transcoder_get_m1_n1(struct intel_crtc *crtc,
struct intel_link_m_n *m_n)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
enum pipe pipe = crtc->pipe;
- intel_get_m_n(dev_priv, m_n,
+ intel_get_m_n(display, m_n,
PCH_TRANS_DATA_M1(pipe), PCH_TRANS_DATA_N1(pipe),
PCH_TRANS_LINK_M1(pipe), PCH_TRANS_LINK_N1(pipe));
}
@@ -214,10 +214,10 @@ void intel_pch_transcoder_get_m1_n1(struct intel_crtc *crtc,
void intel_pch_transcoder_get_m2_n2(struct intel_crtc *crtc,
struct intel_link_m_n *m_n)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
enum pipe pipe = crtc->pipe;
- intel_get_m_n(dev_priv, m_n,
+ intel_get_m_n(display, m_n,
PCH_TRANS_DATA_M2(pipe), PCH_TRANS_DATA_N2(pipe),
PCH_TRANS_LINK_M2(pipe), PCH_TRANS_LINK_N2(pipe));
}
@@ -259,8 +259,8 @@ static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
assert_shared_dpll_enabled(display, crtc_state->shared_dpll);
/* FDI must be feeding us bits for PCH ports */
- assert_fdi_tx_enabled(dev_priv, pipe);
- assert_fdi_rx_enabled(dev_priv, pipe);
+ assert_fdi_tx_enabled(display, pipe);
+ assert_fdi_rx_enabled(display, pipe);
if (HAS_PCH_CPT(dev_priv)) {
reg = TRANS_CHICKEN2(pipe);
@@ -316,13 +316,14 @@ static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
static void ilk_disable_pch_transcoder(struct intel_crtc *crtc)
{
+ struct intel_display *display = to_intel_display(crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
i915_reg_t reg;
/* FDI relies on the transcoder */
- assert_fdi_tx_disabled(dev_priv, pipe);
- assert_fdi_rx_disabled(dev_priv, pipe);
+ assert_fdi_tx_disabled(display, pipe);
+ assert_fdi_rx_disabled(display, pipe);
/* Ports must be off as well */
assert_pch_ports_disabled(dev_priv, pipe);
@@ -479,8 +480,7 @@ void ilk_pch_post_disable(struct intel_atomic_state *state,
static void ilk_pch_clock_get(struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc_state);
/* read out port_clock from the DPLL */
i9xx_crtc_clock_get(crtc_state);
@@ -491,7 +491,7 @@ static void ilk_pch_clock_get(struct intel_crtc_state *crtc_state)
* Calculate one based on the FDI configuration.
*/
crtc_state->hw.adjusted_mode.crtc_clock =
- intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, crtc_state),
+ intel_dotclock_calculate(intel_fdi_link_freq(display, crtc_state),
&crtc_state->fdi_m_n);
}
@@ -549,14 +549,15 @@ void ilk_pch_get_config(struct intel_crtc_state *crtc_state)
static void lpt_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
u32 val, pipeconf_val;
/* FDI must be feeding us bits for PCH ports */
- assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
- assert_fdi_rx_enabled(dev_priv, PIPE_A);
+ assert_fdi_tx_enabled(display, (enum pipe)cpu_transcoder);
+ assert_fdi_rx_enabled(display, PIPE_A);
val = intel_de_read(dev_priv, TRANS_CHICKEN2(PIPE_A));
/* Workaround: set timing override bit. */
diff --git a/drivers/gpu/drm/i915/display/intel_pfit.c b/drivers/gpu/drm/i915/display/intel_pfit.c
index 4ee03d9d14ad..3c3ecf288570 100644
--- a/drivers/gpu/drm/i915/display/intel_pfit.c
+++ b/drivers/gpu/drm/i915/display/intel_pfit.c
@@ -3,13 +3,17 @@
* Copyright © 2024 Intel Corporation
*/
+#include <drm/drm_print.h>
+
#include "i915_reg.h"
#include "i915_utils.h"
+#include "intel_de.h"
#include "intel_display_core.h"
#include "intel_display_driver.h"
#include "intel_display_types.h"
#include "intel_lvds_regs.h"
#include "intel_pfit.h"
+#include "intel_pfit_regs.h"
static int intel_pch_pfit_check_dst_window(const struct intel_crtc_state *crtc_state)
{
@@ -542,8 +546,8 @@ out:
return intel_gmch_pfit_check_timings(crtc_state);
}
-int intel_panel_fitting(struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state)
+int intel_pfit_compute_config(struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
{
struct intel_display *display = to_intel_display(crtc_state);
@@ -552,3 +556,165 @@ int intel_panel_fitting(struct intel_crtc_state *crtc_state,
else
return pch_panel_fitting(crtc_state, conn_state);
}
+
+void ilk_pfit_enable(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ const struct drm_rect *dst = &crtc_state->pch_pfit.dst;
+ enum pipe pipe = crtc->pipe;
+ int width = drm_rect_width(dst);
+ int height = drm_rect_height(dst);
+ int x = dst->x1;
+ int y = dst->y1;
+
+ if (!crtc_state->pch_pfit.enabled)
+ return;
+
+ /*
+ * Force use of hard-coded filter coefficients as some pre-programmed
+ * values are broken, e.g. x201.
+ */
+ if (display->platform.ivybridge || display->platform.haswell)
+ intel_de_write_fw(display, PF_CTL(pipe), PF_ENABLE |
+ PF_FILTER_MED_3x3 | PF_PIPE_SEL_IVB(pipe));
+ else
+ intel_de_write_fw(display, PF_CTL(pipe), PF_ENABLE |
+ PF_FILTER_MED_3x3);
+ intel_de_write_fw(display, PF_WIN_POS(pipe),
+ PF_WIN_XPOS(x) | PF_WIN_YPOS(y));
+ intel_de_write_fw(display, PF_WIN_SZ(pipe),
+ PF_WIN_XSIZE(width) | PF_WIN_YSIZE(height));
+}
+
+void ilk_pfit_disable(const struct intel_crtc_state *old_crtc_state)
+{
+ struct intel_display *display = to_intel_display(old_crtc_state);
+ struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
+ enum pipe pipe = crtc->pipe;
+
+ /*
+ * To avoid upsetting the power well on haswell only disable the pfit if
+ * it's in use. The hw state code will make sure we get this right.
+ */
+ if (!old_crtc_state->pch_pfit.enabled)
+ return;
+
+ intel_de_write_fw(display, PF_CTL(pipe), 0);
+ intel_de_write_fw(display, PF_WIN_POS(pipe), 0);
+ intel_de_write_fw(display, PF_WIN_SZ(pipe), 0);
+}
+
+void ilk_pfit_get_config(struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ u32 ctl, pos, size;
+ enum pipe pipe;
+
+ ctl = intel_de_read(display, PF_CTL(crtc->pipe));
+ if ((ctl & PF_ENABLE) == 0)
+ return;
+
+ if (display->platform.ivybridge || display->platform.haswell)
+ pipe = REG_FIELD_GET(PF_PIPE_SEL_MASK_IVB, ctl);
+ else
+ pipe = crtc->pipe;
+
+ crtc_state->pch_pfit.enabled = true;
+
+ pos = intel_de_read(display, PF_WIN_POS(crtc->pipe));
+ size = intel_de_read(display, PF_WIN_SZ(crtc->pipe));
+
+ drm_rect_init(&crtc_state->pch_pfit.dst,
+ REG_FIELD_GET(PF_WIN_XPOS_MASK, pos),
+ REG_FIELD_GET(PF_WIN_YPOS_MASK, pos),
+ REG_FIELD_GET(PF_WIN_XSIZE_MASK, size),
+ REG_FIELD_GET(PF_WIN_YSIZE_MASK, size));
+
+ /*
+ * We currently do not free assignments of panel fitters on
+ * ivb/hsw (since we don't use the higher upscaling modes which
+ * differentiates them) so just WARN about this case for now.
+ */
+ drm_WARN_ON(display->drm, pipe != crtc->pipe);
+}
+
+void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+
+ if (!crtc_state->gmch_pfit.control)
+ return;
+
+ /*
+ * The panel fitter should only be adjusted whilst the pipe is disabled,
+ * according to register description and PRM.
+ */
+ drm_WARN_ON(display->drm,
+ intel_de_read(display, PFIT_CONTROL(display)) & PFIT_ENABLE);
+ assert_transcoder_disabled(display, crtc_state->cpu_transcoder);
+
+ intel_de_write(display, PFIT_PGM_RATIOS(display),
+ crtc_state->gmch_pfit.pgm_ratios);
+ intel_de_write(display, PFIT_CONTROL(display),
+ crtc_state->gmch_pfit.control);
+
+ /*
+ * Border color in case we don't scale up to the full screen. Black by
+ * default, change to something else for debugging.
+ */
+ intel_de_write(display, BCLRPAT(display, crtc->pipe), 0);
+}
+
+void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state)
+{
+ struct intel_display *display = to_intel_display(old_crtc_state);
+
+ if (!old_crtc_state->gmch_pfit.control)
+ return;
+
+ assert_transcoder_disabled(display, old_crtc_state->cpu_transcoder);
+
+ drm_dbg_kms(display->drm, "disabling pfit, current: 0x%08x\n",
+ intel_de_read(display, PFIT_CONTROL(display)));
+ intel_de_write(display, PFIT_CONTROL(display), 0);
+}
+
+static bool i9xx_has_pfit(struct intel_display *display)
+{
+ if (display->platform.i830)
+ return false;
+
+ return DISPLAY_VER(display) >= 4 ||
+ display->platform.pineview || display->platform.mobile;
+}
+
+void i9xx_pfit_get_config(struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ enum pipe pipe;
+ u32 tmp;
+
+ if (!i9xx_has_pfit(display))
+ return;
+
+ tmp = intel_de_read(display, PFIT_CONTROL(display));
+ if (!(tmp & PFIT_ENABLE))
+ return;
+
+ /* Check whether the pfit is attached to our pipe. */
+ if (DISPLAY_VER(display) >= 4)
+ pipe = REG_FIELD_GET(PFIT_PIPE_MASK, tmp);
+ else
+ pipe = PIPE_B;
+
+ if (pipe != crtc->pipe)
+ return;
+
+ crtc_state->gmch_pfit.control = tmp;
+ crtc_state->gmch_pfit.pgm_ratios =
+ intel_de_read(display, PFIT_PGM_RATIOS(display));
+}
diff --git a/drivers/gpu/drm/i915/display/intel_pfit.h b/drivers/gpu/drm/i915/display/intel_pfit.h
index add8d78de2c9..ef34f9b49d09 100644
--- a/drivers/gpu/drm/i915/display/intel_pfit.h
+++ b/drivers/gpu/drm/i915/display/intel_pfit.h
@@ -9,7 +9,13 @@
struct drm_connector_state;
struct intel_crtc_state;
-int intel_panel_fitting(struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state);
+int intel_pfit_compute_config(struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state);
+void ilk_pfit_enable(const struct intel_crtc_state *crtc_state);
+void ilk_pfit_disable(const struct intel_crtc_state *old_crtc_state);
+void ilk_pfit_get_config(struct intel_crtc_state *crtc_state);
+void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state);
+void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state);
+void i9xx_pfit_get_config(struct intel_crtc_state *crtc_state);
#endif /* __INTEL_PFIT_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_pfit_regs.h b/drivers/gpu/drm/i915/display/intel_pfit_regs.h
new file mode 100644
index 000000000000..add8ce28004e
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_pfit_regs.h
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright © 2025 Intel Corporation */
+
+#ifndef __INTEL_PFIT_REGS_H__
+#define __INTEL_PFIT_REGS_H__
+
+#include "intel_display_reg_defs.h"
+
+/* Panel fitting */
+#define PFIT_CONTROL(dev_priv) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61230)
+#define PFIT_ENABLE REG_BIT(31)
+#define PFIT_PIPE_MASK REG_GENMASK(30, 29) /* 965+ */
+#define PFIT_PIPE(pipe) REG_FIELD_PREP(PFIT_PIPE_MASK, (pipe))
+#define PFIT_SCALING_MASK REG_GENMASK(28, 26) /* 965+ */
+#define PFIT_SCALING_AUTO REG_FIELD_PREP(PFIT_SCALING_MASK, 0)
+#define PFIT_SCALING_PROGRAMMED REG_FIELD_PREP(PFIT_SCALING_MASK, 1)
+#define PFIT_SCALING_PILLAR REG_FIELD_PREP(PFIT_SCALING_MASK, 2)
+#define PFIT_SCALING_LETTER REG_FIELD_PREP(PFIT_SCALING_MASK, 3)
+#define PFIT_FILTER_MASK REG_GENMASK(25, 24) /* 965+ */
+#define PFIT_FILTER_FUZZY REG_FIELD_PREP(PFIT_FILTER_MASK, 0)
+#define PFIT_FILTER_CRISP REG_FIELD_PREP(PFIT_FILTER_MASK, 1)
+#define PFIT_FILTER_MEDIAN REG_FIELD_PREP(PFIT_FILTER_MASK, 2)
+#define PFIT_VERT_INTERP_MASK REG_GENMASK(11, 10) /* pre-965 */
+#define PFIT_VERT_INTERP_BILINEAR REG_FIELD_PREP(PFIT_VERT_INTERP_MASK, 1)
+#define PFIT_VERT_AUTO_SCALE REG_BIT(9) /* pre-965 */
+#define PFIT_HORIZ_INTERP_MASK REG_GENMASK(7, 6) /* pre-965 */
+#define PFIT_HORIZ_INTERP_BILINEAR REG_FIELD_PREP(PFIT_HORIZ_INTERP_MASK, 1)
+#define PFIT_HORIZ_AUTO_SCALE REG_BIT(5) /* pre-965 */
+#define PFIT_PANEL_8TO6_DITHER_ENABLE REG_BIT(3) /* pre-965 */
+
+#define PFIT_PGM_RATIOS(dev_priv) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61234)
+#define PFIT_VERT_SCALE_MASK REG_GENMASK(31, 20) /* pre-965 */
+#define PFIT_VERT_SCALE(x) REG_FIELD_PREP(PFIT_VERT_SCALE_MASK, (x))
+#define PFIT_HORIZ_SCALE_MASK REG_GENMASK(15, 4) /* pre-965 */
+#define PFIT_HORIZ_SCALE(x) REG_FIELD_PREP(PFIT_HORIZ_SCALE_MASK, (x))
+#define PFIT_VERT_SCALE_MASK_965 REG_GENMASK(28, 16) /* 965+ */
+#define PFIT_HORIZ_SCALE_MASK_965 REG_GENMASK(12, 0) /* 965+ */
+
+#define PFIT_AUTO_RATIOS(dev_priv) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61238)
+
+/* CPU panel fitter */
+/* IVB+ has 3 fitters, 0 is 7x5 capable, the other two only 3x3 */
+#define _PFA_CTL_1 0x68080
+#define _PFB_CTL_1 0x68880
+#define PF_CTL(pipe) _MMIO_PIPE(pipe, _PFA_CTL_1, _PFB_CTL_1)
+#define PF_ENABLE REG_BIT(31)
+#define PF_PIPE_SEL_MASK_IVB REG_GENMASK(30, 29) /* ivb/hsw */
+#define PF_PIPE_SEL_IVB(pipe) REG_FIELD_PREP(PF_PIPE_SEL_MASK_IVB, (pipe))
+#define PF_FILTER_MASK REG_GENMASK(24, 23)
+#define PF_FILTER_PROGRAMMED REG_FIELD_PREP(PF_FILTER_MASK, 0)
+#define PF_FILTER_MED_3x3 REG_FIELD_PREP(PF_FILTER_MASK, 1)
+#define PF_FILTER_EDGE_ENHANCE REG_FIELD_PREP(PF_FILTER_EDGE_MASK, 2)
+#define PF_FILTER_EDGE_SOFTEN REG_FIELD_PREP(PF_FILTER_EDGE_MASK, 3)
+
+#define _PFA_WIN_SZ 0x68074
+#define _PFB_WIN_SZ 0x68874
+#define PF_WIN_SZ(pipe) _MMIO_PIPE(pipe, _PFA_WIN_SZ, _PFB_WIN_SZ)
+#define PF_WIN_XSIZE_MASK REG_GENMASK(31, 16)
+#define PF_WIN_XSIZE(w) REG_FIELD_PREP(PF_WIN_XSIZE_MASK, (w))
+#define PF_WIN_YSIZE_MASK REG_GENMASK(15, 0)
+#define PF_WIN_YSIZE(h) REG_FIELD_PREP(PF_WIN_YSIZE_MASK, (h))
+
+#define _PFA_WIN_POS 0x68070
+#define _PFB_WIN_POS 0x68870
+#define PF_WIN_POS(pipe) _MMIO_PIPE(pipe, _PFA_WIN_POS, _PFB_WIN_POS)
+#define PF_WIN_XPOS_MASK REG_GENMASK(31, 16)
+#define PF_WIN_XPOS(x) REG_FIELD_PREP(PF_WIN_XPOS_MASK, (x))
+#define PF_WIN_YPOS_MASK REG_GENMASK(15, 0)
+#define PF_WIN_YPOS(y) REG_FIELD_PREP(PF_WIN_YPOS_MASK, (y))
+
+#define _PFA_VSCALE 0x68084
+#define _PFB_VSCALE 0x68884
+#define PF_VSCALE(pipe) _MMIO_PIPE(pipe, _PFA_VSCALE, _PFB_VSCALE)
+
+#define _PFA_HSCALE 0x68090
+#define _PFB_HSCALE 0x68890
+#define PF_HSCALE(pipe) _MMIO_PIPE(pipe, _PFA_HSCALE, _PFB_HSCALE)
+
+#endif /* __INTEL_PFIT_REGS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_pipe_crc.h b/drivers/gpu/drm/i915/display/intel_pipe_crc.h
index 43012b189415..6ddcea38488b 100644
--- a/drivers/gpu/drm/i915/display/intel_pipe_crc.h
+++ b/drivers/gpu/drm/i915/display/intel_pipe_crc.h
@@ -9,7 +9,6 @@
#include <linux/types.h>
struct drm_crtc;
-struct drm_i915_private;
struct intel_crtc;
#ifdef CONFIG_DEBUG_FS
diff --git a/drivers/gpu/drm/i915/display/intel_quirks.c b/drivers/gpu/drm/i915/display/intel_quirks.c
index 8b30e9fd936e..a32fae510ed2 100644
--- a/drivers/gpu/drm/i915/display/intel_quirks.c
+++ b/drivers/gpu/drm/i915/display/intel_quirks.c
@@ -5,6 +5,8 @@
#include <linux/dmi.h>
+#include <drm/drm_print.h>
+
#include "intel_display_core.h"
#include "intel_display_types.h"
#include "intel_quirks.h"
diff --git a/drivers/gpu/drm/i915/display/intel_snps_phy.c b/drivers/gpu/drm/i915/display/intel_snps_phy.c
index 353221d3e29f..b9acd9fe160c 100644
--- a/drivers/gpu/drm/i915/display/intel_snps_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_snps_phy.c
@@ -5,8 +5,8 @@
#include <linux/math.h>
-#include "i915_drv.h"
#include "i915_reg.h"
+#include "i915_utils.h"
#include "intel_ddi.h"
#include "intel_ddi_buf_trans.h"
#include "intel_de.h"
@@ -27,12 +27,12 @@
* since it is not handled by the shared DPLL framework as on other platforms.
*/
-void intel_snps_phy_wait_for_calibration(struct drm_i915_private *i915)
+void intel_snps_phy_wait_for_calibration(struct intel_display *display)
{
enum phy phy;
for_each_phy_masked(phy, ~0) {
- if (!intel_phy_is_snps(i915, phy))
+ if (!intel_phy_is_snps(display, phy))
continue;
/*
@@ -40,16 +40,16 @@ void intel_snps_phy_wait_for_calibration(struct drm_i915_private *i915)
* which phy was affected and skip setup of the corresponding
* output later.
*/
- if (intel_de_wait_for_clear(i915, DG2_PHY_MISC(phy),
+ if (intel_de_wait_for_clear(display, DG2_PHY_MISC(phy),
DG2_PHY_DP_TX_ACK_MASK, 25))
- i915->display.snps.phy_failed_calibration |= BIT(phy);
+ display->snps.phy_failed_calibration |= BIT(phy);
}
}
void intel_snps_phy_update_psr_power_state(struct intel_encoder *encoder,
bool enable)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum phy phy = intel_encoder_to_phy(encoder);
u32 val;
@@ -58,20 +58,20 @@ void intel_snps_phy_update_psr_power_state(struct intel_encoder *encoder,
val = REG_FIELD_PREP(SNPS_PHY_TX_REQ_LN_DIS_PWR_STATE_PSR,
enable ? 2 : 3);
- intel_de_rmw(i915, SNPS_PHY_TX_REQ(phy),
+ intel_de_rmw(display, SNPS_PHY_TX_REQ(phy),
SNPS_PHY_TX_REQ_LN_DIS_PWR_STATE_PSR, val);
}
void intel_snps_phy_set_signal_levels(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
const struct intel_ddi_buf_trans *trans;
enum phy phy = intel_encoder_to_phy(encoder);
int n_entries, ln;
trans = encoder->get_buf_trans(encoder, crtc_state, &n_entries);
- if (drm_WARN_ON_ONCE(&dev_priv->drm, !trans))
+ if (drm_WARN_ON_ONCE(display->drm, !trans))
return;
for (ln = 0; ln < 4; ln++) {
@@ -82,7 +82,7 @@ void intel_snps_phy_set_signal_levels(struct intel_encoder *encoder,
val |= REG_FIELD_PREP(SNPS_PHY_TX_EQ_PRE, trans->entries[level].snps.pre_cursor);
val |= REG_FIELD_PREP(SNPS_PHY_TX_EQ_POST, trans->entries[level].snps.post_cursor);
- intel_de_write(dev_priv, SNPS_PHY_TX_EQ(ln, phy), val);
+ intel_de_write(display, SNPS_PHY_TX_EQ(ln, phy), val);
}
}
@@ -1817,7 +1817,7 @@ int intel_mpllb_calc_state(struct intel_crtc_state *crtc_state,
void intel_mpllb_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
const struct intel_mpllb_state *pll_state = &crtc_state->dpll_hw_state.mpllb;
enum phy phy = intel_encoder_to_phy(encoder);
i915_reg_t enable_reg = (phy <= PHY_D ?
@@ -1827,13 +1827,13 @@ void intel_mpllb_enable(struct intel_encoder *encoder,
* 3. Software programs the following PLL registers for the desired
* frequency.
*/
- intel_de_write(dev_priv, SNPS_PHY_MPLLB_CP(phy), pll_state->mpllb_cp);
- intel_de_write(dev_priv, SNPS_PHY_MPLLB_DIV(phy), pll_state->mpllb_div);
- intel_de_write(dev_priv, SNPS_PHY_MPLLB_DIV2(phy), pll_state->mpllb_div2);
- intel_de_write(dev_priv, SNPS_PHY_MPLLB_SSCEN(phy), pll_state->mpllb_sscen);
- intel_de_write(dev_priv, SNPS_PHY_MPLLB_SSCSTEP(phy), pll_state->mpllb_sscstep);
- intel_de_write(dev_priv, SNPS_PHY_MPLLB_FRACN1(phy), pll_state->mpllb_fracn1);
- intel_de_write(dev_priv, SNPS_PHY_MPLLB_FRACN2(phy), pll_state->mpllb_fracn2);
+ intel_de_write(display, SNPS_PHY_MPLLB_CP(phy), pll_state->mpllb_cp);
+ intel_de_write(display, SNPS_PHY_MPLLB_DIV(phy), pll_state->mpllb_div);
+ intel_de_write(display, SNPS_PHY_MPLLB_DIV2(phy), pll_state->mpllb_div2);
+ intel_de_write(display, SNPS_PHY_MPLLB_SSCEN(phy), pll_state->mpllb_sscen);
+ intel_de_write(display, SNPS_PHY_MPLLB_SSCSTEP(phy), pll_state->mpllb_sscstep);
+ intel_de_write(display, SNPS_PHY_MPLLB_FRACN1(phy), pll_state->mpllb_fracn1);
+ intel_de_write(display, SNPS_PHY_MPLLB_FRACN2(phy), pll_state->mpllb_fracn2);
/*
* 4. If the frequency will result in a change to the voltage
@@ -1844,7 +1844,7 @@ void intel_mpllb_enable(struct intel_encoder *encoder,
*/
/* 5. Software sets DPLL_ENABLE [PLL Enable] to "1". */
- intel_de_rmw(dev_priv, enable_reg, 0, PLL_ENABLE);
+ intel_de_rmw(display, enable_reg, 0, PLL_ENABLE);
/*
* 9. Software sets SNPS_PHY_MPLLB_DIV dp_mpllb_force_en to "1". This
@@ -1853,7 +1853,7 @@ void intel_mpllb_enable(struct intel_encoder *encoder,
* PLL because that will start the PLL before it has sampled the
* divider values.
*/
- intel_de_write(dev_priv, SNPS_PHY_MPLLB_DIV(phy),
+ intel_de_write(display, SNPS_PHY_MPLLB_DIV(phy),
pll_state->mpllb_div | SNPS_PHY_MPLLB_FORCE_EN);
/*
@@ -1861,8 +1861,8 @@ void intel_mpllb_enable(struct intel_encoder *encoder,
* is locked at new settings. This register bit is sampling PHY
* dp_mpllb_state interface signal.
*/
- if (intel_de_wait_for_set(dev_priv, enable_reg, PLL_LOCK, 5))
- drm_dbg_kms(&dev_priv->drm, "Port %c PLL not locked\n", phy_name(phy));
+ if (intel_de_wait_for_set(display, enable_reg, PLL_LOCK, 5))
+ drm_dbg_kms(display->drm, "Port %c PLL not locked\n", phy_name(phy));
/*
* 11. If the frequency will result in a change to the voltage
@@ -1875,7 +1875,7 @@ void intel_mpllb_enable(struct intel_encoder *encoder,
void intel_mpllb_disable(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum phy phy = intel_encoder_to_phy(encoder);
i915_reg_t enable_reg = (phy <= PHY_D ?
DG2_PLL_ENABLE(phy) : MG_PLL_ENABLE(0));
@@ -1889,20 +1889,20 @@ void intel_mpllb_disable(struct intel_encoder *encoder)
*/
/* 2. Software programs DPLL_ENABLE [PLL Enable] to "0" */
- intel_de_rmw(i915, enable_reg, PLL_ENABLE, 0);
+ intel_de_rmw(display, enable_reg, PLL_ENABLE, 0);
/*
* 4. Software programs SNPS_PHY_MPLLB_DIV dp_mpllb_force_en to "0".
* This will allow the PLL to stop running.
*/
- intel_de_rmw(i915, SNPS_PHY_MPLLB_DIV(phy), SNPS_PHY_MPLLB_FORCE_EN, 0);
+ intel_de_rmw(display, SNPS_PHY_MPLLB_DIV(phy), SNPS_PHY_MPLLB_FORCE_EN, 0);
/*
* 5. Software polls DPLL_ENABLE [PLL Lock] for PHY acknowledgment
* (dp_txX_ack) that the new transmitter setting request is completed.
*/
- if (intel_de_wait_for_clear(i915, enable_reg, PLL_LOCK, 5))
- drm_err(&i915->drm, "Port %c PLL not locked\n", phy_name(phy));
+ if (intel_de_wait_for_clear(display, enable_reg, PLL_LOCK, 5))
+ drm_err(display->drm, "Port %c PLL not locked\n", phy_name(phy));
/*
* 6. If the frequency will result in a change to the voltage
@@ -1947,16 +1947,16 @@ int intel_mpllb_calc_port_clock(struct intel_encoder *encoder,
void intel_mpllb_readout_hw_state(struct intel_encoder *encoder,
struct intel_mpllb_state *pll_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum phy phy = intel_encoder_to_phy(encoder);
- pll_state->mpllb_cp = intel_de_read(dev_priv, SNPS_PHY_MPLLB_CP(phy));
- pll_state->mpllb_div = intel_de_read(dev_priv, SNPS_PHY_MPLLB_DIV(phy));
- pll_state->mpllb_div2 = intel_de_read(dev_priv, SNPS_PHY_MPLLB_DIV2(phy));
- pll_state->mpllb_sscen = intel_de_read(dev_priv, SNPS_PHY_MPLLB_SSCEN(phy));
- pll_state->mpllb_sscstep = intel_de_read(dev_priv, SNPS_PHY_MPLLB_SSCSTEP(phy));
- pll_state->mpllb_fracn1 = intel_de_read(dev_priv, SNPS_PHY_MPLLB_FRACN1(phy));
- pll_state->mpllb_fracn2 = intel_de_read(dev_priv, SNPS_PHY_MPLLB_FRACN2(phy));
+ pll_state->mpllb_cp = intel_de_read(display, SNPS_PHY_MPLLB_CP(phy));
+ pll_state->mpllb_div = intel_de_read(display, SNPS_PHY_MPLLB_DIV(phy));
+ pll_state->mpllb_div2 = intel_de_read(display, SNPS_PHY_MPLLB_DIV2(phy));
+ pll_state->mpllb_sscen = intel_de_read(display, SNPS_PHY_MPLLB_SSCEN(phy));
+ pll_state->mpllb_sscstep = intel_de_read(display, SNPS_PHY_MPLLB_SSCSTEP(phy));
+ pll_state->mpllb_fracn1 = intel_de_read(display, SNPS_PHY_MPLLB_FRACN1(phy));
+ pll_state->mpllb_fracn2 = intel_de_read(display, SNPS_PHY_MPLLB_FRACN2(phy));
/*
* REF_CONTROL is under firmware control and never programmed by the
@@ -1964,7 +1964,7 @@ void intel_mpllb_readout_hw_state(struct intel_encoder *encoder,
* only tells us the expected value for one field in this register,
* so we'll only read out those specific bits here.
*/
- pll_state->ref_control = intel_de_read(dev_priv, SNPS_PHY_REF_CONTROL(phy)) &
+ pll_state->ref_control = intel_de_read(display, SNPS_PHY_REF_CONTROL(phy)) &
SNPS_PHY_REF_CONTROL_REF_RANGE;
/*
@@ -1980,14 +1980,13 @@ void intel_mpllb_state_verify(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct intel_display *display = to_intel_display(state);
- struct drm_i915_private *i915 = to_i915(state->base.dev);
const struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
struct intel_mpllb_state mpllb_hw_state = {};
const struct intel_mpllb_state *mpllb_sw_state = &new_crtc_state->dpll_hw_state.mpllb;
struct intel_encoder *encoder;
- if (!IS_DG2(i915))
+ if (!display->platform.dg2)
return;
if (!new_crtc_state->hw.active)
diff --git a/drivers/gpu/drm/i915/display/intel_snps_phy.h b/drivers/gpu/drm/i915/display/intel_snps_phy.h
index 1dd564ed9fa8..7f96da22d028 100644
--- a/drivers/gpu/drm/i915/display/intel_snps_phy.h
+++ b/drivers/gpu/drm/i915/display/intel_snps_phy.h
@@ -8,15 +8,15 @@
#include <linux/types.h>
-struct drm_i915_private;
+enum phy;
struct intel_atomic_state;
struct intel_crtc;
struct intel_crtc_state;
+struct intel_display;
struct intel_encoder;
struct intel_mpllb_state;
-enum phy;
-void intel_snps_phy_wait_for_calibration(struct drm_i915_private *dev_priv);
+void intel_snps_phy_wait_for_calibration(struct intel_display *display);
void intel_snps_phy_update_psr_power_state(struct intel_encoder *encoder,
bool enable);
diff --git a/drivers/gpu/drm/i915/display/intel_tdf.h b/drivers/gpu/drm/i915/display/intel_tdf.h
index 353cde21f6c2..0862c2bfd9cd 100644
--- a/drivers/gpu/drm/i915/display/intel_tdf.h
+++ b/drivers/gpu/drm/i915/display/intel_tdf.h
@@ -14,12 +14,12 @@
* the display flip, since display engine is never coherent with CPU/GPU caches.
*/
-struct drm_i915_private;
+struct intel_display;
#ifdef I915
-static inline void intel_td_flush(struct drm_i915_private *i915) {}
+static inline void intel_td_flush(struct intel_display *display) {}
#else
-void intel_td_flush(struct drm_i915_private *i915);
+void intel_td_flush(struct intel_display *display);
#endif
#endif
diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.c b/drivers/gpu/drm/i915/display/intel_vdsc.c
index 6e7151346382..3ed64c17bdff 100644
--- a/drivers/gpu/drm/i915/display/intel_vdsc.c
+++ b/drivers/gpu/drm/i915/display/intel_vdsc.c
@@ -10,7 +10,7 @@
#include <drm/display/drm_dsc_helper.h>
#include <drm/drm_fixed.h>
-#include "i915_drv.h"
+#include "i915_utils.h"
#include "intel_crtc.h"
#include "intel_de.h"
#include "intel_display_types.h"
@@ -22,14 +22,13 @@
bool intel_dsc_source_support(const struct intel_crtc_state *crtc_state)
{
- const struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc_state);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
- if (!HAS_DSC(i915))
+ if (!HAS_DSC(display))
return false;
- if (DISPLAY_VER(i915) == 11 && cpu_transcoder == TRANSCODER_A)
+ if (DISPLAY_VER(display) == 11 && cpu_transcoder == TRANSCODER_A)
return false;
return true;
@@ -37,9 +36,9 @@ bool intel_dsc_source_support(const struct intel_crtc_state *crtc_state)
static bool is_pipe_dsc(struct intel_crtc *crtc, enum transcoder cpu_transcoder)
{
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
- if (DISPLAY_VER(i915) >= 12)
+ if (DISPLAY_VER(display) >= 12)
return true;
if (cpu_transcoder == TRANSCODER_EDP ||
@@ -48,7 +47,7 @@ static bool is_pipe_dsc(struct intel_crtc *crtc, enum transcoder cpu_transcoder)
return false;
/* There's no pipe A DSC engine on ICL */
- drm_WARN_ON(&i915->drm, crtc->pipe == PIPE_A);
+ drm_WARN_ON(display->drm, crtc->pipe == PIPE_A);
return true;
}
@@ -262,8 +261,7 @@ static int intel_dsc_slice_dimensions_valid(struct intel_crtc_state *pipe_config
int intel_dsc_compute_params(struct intel_crtc_state *pipe_config)
{
- struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(pipe_config);
struct drm_dsc_config *vdsc_cfg = &pipe_config->dsc.config;
u16 compressed_bpp = fxp_q4_to_int(pipe_config->dsc.compressed_bpp_x16);
int err;
@@ -276,7 +274,7 @@ int intel_dsc_compute_params(struct intel_crtc_state *pipe_config)
err = intel_dsc_slice_dimensions_valid(pipe_config, vdsc_cfg);
if (err) {
- drm_dbg_kms(&dev_priv->drm, "Slice dimension requirements not met\n");
+ drm_dbg_kms(display->drm, "Slice dimension requirements not met\n");
return err;
}
@@ -287,7 +285,7 @@ int intel_dsc_compute_params(struct intel_crtc_state *pipe_config)
vdsc_cfg->convert_rgb = pipe_config->output_format != INTEL_OUTPUT_FORMAT_YCBCR420 &&
pipe_config->output_format != INTEL_OUTPUT_FORMAT_YCBCR444;
- if (DISPLAY_VER(dev_priv) >= 14 &&
+ if (DISPLAY_VER(display) >= 14 &&
pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
vdsc_cfg->native_420 = true;
/* We do not support YcBCr422 as of now */
@@ -308,7 +306,7 @@ int intel_dsc_compute_params(struct intel_crtc_state *pipe_config)
vdsc_cfg->bits_per_component = pipe_config->pipe_bpp / 3;
if (vdsc_cfg->bits_per_component < 8) {
- drm_dbg_kms(&dev_priv->drm, "DSC bpc requirements not met bpc: %d\n",
+ drm_dbg_kms(display->drm, "DSC bpc requirements not met bpc: %d\n",
vdsc_cfg->bits_per_component);
return -EINVAL;
}
@@ -320,7 +318,7 @@ int intel_dsc_compute_params(struct intel_crtc_state *pipe_config)
* upto uncompressed bpp-1, hence add calculations for all the rc
* parameters
*/
- if (DISPLAY_VER(dev_priv) >= 13) {
+ if (DISPLAY_VER(display) >= 13) {
calculate_rc_params(vdsc_cfg);
} else {
if ((compressed_bpp == 8 ||
@@ -356,7 +354,7 @@ int intel_dsc_compute_params(struct intel_crtc_state *pipe_config)
enum intel_display_power_domain
intel_dsc_power_domain(struct intel_crtc *crtc, enum transcoder cpu_transcoder)
{
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
enum pipe pipe = crtc->pipe;
/*
@@ -370,7 +368,8 @@ intel_dsc_power_domain(struct intel_crtc *crtc, enum transcoder cpu_transcoder)
* the pipe in use. Hence another reference on the pipe power domain
* will suffice. (Except no VDSC/joining on ICL pipe A.)
*/
- if (DISPLAY_VER(i915) == 12 && !IS_ROCKETLAKE(i915) && pipe == PIPE_A)
+ if (DISPLAY_VER(display) == 12 && !display->platform.rocketlake &&
+ pipe == PIPE_A)
return POWER_DOMAIN_TRANSCODER_VDSC_PW2;
else if (is_pipe_dsc(crtc, cpu_transcoder))
return POWER_DOMAIN_PIPE(pipe);
@@ -416,26 +415,25 @@ static void intel_dsc_get_pps_reg(const struct intel_crtc_state *crtc_state, int
static void intel_dsc_pps_write(const struct intel_crtc_state *crtc_state,
int pps, u32 pps_val)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc_state);
i915_reg_t dsc_reg[3];
int i, vdsc_per_pipe, dsc_reg_num;
vdsc_per_pipe = intel_dsc_get_vdsc_per_pipe(crtc_state);
dsc_reg_num = min_t(int, ARRAY_SIZE(dsc_reg), vdsc_per_pipe);
- drm_WARN_ON_ONCE(&i915->drm, dsc_reg_num < vdsc_per_pipe);
+ drm_WARN_ON_ONCE(display->drm, dsc_reg_num < vdsc_per_pipe);
intel_dsc_get_pps_reg(crtc_state, pps, dsc_reg, dsc_reg_num);
for (i = 0; i < dsc_reg_num; i++)
- intel_de_write(i915, dsc_reg[i], pps_val);
+ intel_de_write(display, dsc_reg[i], pps_val);
}
static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
enum pipe pipe = crtc->pipe;
@@ -529,7 +527,7 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state)
vdsc_cfg->slice_height);
intel_dsc_pps_write(crtc_state, 16, pps_val);
- if (DISPLAY_VER(dev_priv) >= 14) {
+ if (DISPLAY_VER(display) >= 14) {
/* PPS 17 */
pps_val = DSC_PPS17_SL_BPG_OFFSET(vdsc_cfg->second_line_bpg_offset);
intel_dsc_pps_write(crtc_state, 17, pps_val);
@@ -547,44 +545,44 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state)
(u32)(vdsc_cfg->rc_buf_thresh[i] <<
BITS_PER_BYTE * (i % 4));
if (!is_pipe_dsc(crtc, cpu_transcoder)) {
- intel_de_write(dev_priv, DSCA_RC_BUF_THRESH_0,
+ intel_de_write(display, DSCA_RC_BUF_THRESH_0,
rc_buf_thresh_dword[0]);
- intel_de_write(dev_priv, DSCA_RC_BUF_THRESH_0_UDW,
+ intel_de_write(display, DSCA_RC_BUF_THRESH_0_UDW,
rc_buf_thresh_dword[1]);
- intel_de_write(dev_priv, DSCA_RC_BUF_THRESH_1,
+ intel_de_write(display, DSCA_RC_BUF_THRESH_1,
rc_buf_thresh_dword[2]);
- intel_de_write(dev_priv, DSCA_RC_BUF_THRESH_1_UDW,
+ intel_de_write(display, DSCA_RC_BUF_THRESH_1_UDW,
rc_buf_thresh_dword[3]);
if (vdsc_instances_per_pipe > 1) {
- intel_de_write(dev_priv, DSCC_RC_BUF_THRESH_0,
+ intel_de_write(display, DSCC_RC_BUF_THRESH_0,
rc_buf_thresh_dword[0]);
- intel_de_write(dev_priv, DSCC_RC_BUF_THRESH_0_UDW,
+ intel_de_write(display, DSCC_RC_BUF_THRESH_0_UDW,
rc_buf_thresh_dword[1]);
- intel_de_write(dev_priv, DSCC_RC_BUF_THRESH_1,
+ intel_de_write(display, DSCC_RC_BUF_THRESH_1,
rc_buf_thresh_dword[2]);
- intel_de_write(dev_priv, DSCC_RC_BUF_THRESH_1_UDW,
+ intel_de_write(display, DSCC_RC_BUF_THRESH_1_UDW,
rc_buf_thresh_dword[3]);
}
} else {
- intel_de_write(dev_priv, ICL_DSC0_RC_BUF_THRESH_0(pipe),
+ intel_de_write(display, ICL_DSC0_RC_BUF_THRESH_0(pipe),
rc_buf_thresh_dword[0]);
- intel_de_write(dev_priv, ICL_DSC0_RC_BUF_THRESH_0_UDW(pipe),
+ intel_de_write(display, ICL_DSC0_RC_BUF_THRESH_0_UDW(pipe),
rc_buf_thresh_dword[1]);
- intel_de_write(dev_priv, ICL_DSC0_RC_BUF_THRESH_1(pipe),
+ intel_de_write(display, ICL_DSC0_RC_BUF_THRESH_1(pipe),
rc_buf_thresh_dword[2]);
- intel_de_write(dev_priv, ICL_DSC0_RC_BUF_THRESH_1_UDW(pipe),
+ intel_de_write(display, ICL_DSC0_RC_BUF_THRESH_1_UDW(pipe),
rc_buf_thresh_dword[3]);
if (vdsc_instances_per_pipe > 1) {
- intel_de_write(dev_priv,
+ intel_de_write(display,
ICL_DSC1_RC_BUF_THRESH_0(pipe),
rc_buf_thresh_dword[0]);
- intel_de_write(dev_priv,
+ intel_de_write(display,
ICL_DSC1_RC_BUF_THRESH_0_UDW(pipe),
rc_buf_thresh_dword[1]);
- intel_de_write(dev_priv,
+ intel_de_write(display,
ICL_DSC1_RC_BUF_THRESH_1(pipe),
rc_buf_thresh_dword[2]);
- intel_de_write(dev_priv,
+ intel_de_write(display,
ICL_DSC1_RC_BUF_THRESH_1_UDW(pipe),
rc_buf_thresh_dword[3]);
}
@@ -601,88 +599,88 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state)
(vdsc_cfg->rc_range_params[i].range_min_qp <<
RC_MIN_QP_SHIFT)) << 16 * (i % 2));
if (!is_pipe_dsc(crtc, cpu_transcoder)) {
- intel_de_write(dev_priv, DSCA_RC_RANGE_PARAMETERS_0,
+ intel_de_write(display, DSCA_RC_RANGE_PARAMETERS_0,
rc_range_params_dword[0]);
- intel_de_write(dev_priv, DSCA_RC_RANGE_PARAMETERS_0_UDW,
+ intel_de_write(display, DSCA_RC_RANGE_PARAMETERS_0_UDW,
rc_range_params_dword[1]);
- intel_de_write(dev_priv, DSCA_RC_RANGE_PARAMETERS_1,
+ intel_de_write(display, DSCA_RC_RANGE_PARAMETERS_1,
rc_range_params_dword[2]);
- intel_de_write(dev_priv, DSCA_RC_RANGE_PARAMETERS_1_UDW,
+ intel_de_write(display, DSCA_RC_RANGE_PARAMETERS_1_UDW,
rc_range_params_dword[3]);
- intel_de_write(dev_priv, DSCA_RC_RANGE_PARAMETERS_2,
+ intel_de_write(display, DSCA_RC_RANGE_PARAMETERS_2,
rc_range_params_dword[4]);
- intel_de_write(dev_priv, DSCA_RC_RANGE_PARAMETERS_2_UDW,
+ intel_de_write(display, DSCA_RC_RANGE_PARAMETERS_2_UDW,
rc_range_params_dword[5]);
- intel_de_write(dev_priv, DSCA_RC_RANGE_PARAMETERS_3,
+ intel_de_write(display, DSCA_RC_RANGE_PARAMETERS_3,
rc_range_params_dword[6]);
- intel_de_write(dev_priv, DSCA_RC_RANGE_PARAMETERS_3_UDW,
+ intel_de_write(display, DSCA_RC_RANGE_PARAMETERS_3_UDW,
rc_range_params_dword[7]);
if (vdsc_instances_per_pipe > 1) {
- intel_de_write(dev_priv, DSCC_RC_RANGE_PARAMETERS_0,
+ intel_de_write(display, DSCC_RC_RANGE_PARAMETERS_0,
rc_range_params_dword[0]);
- intel_de_write(dev_priv,
+ intel_de_write(display,
DSCC_RC_RANGE_PARAMETERS_0_UDW,
rc_range_params_dword[1]);
- intel_de_write(dev_priv, DSCC_RC_RANGE_PARAMETERS_1,
+ intel_de_write(display, DSCC_RC_RANGE_PARAMETERS_1,
rc_range_params_dword[2]);
- intel_de_write(dev_priv,
+ intel_de_write(display,
DSCC_RC_RANGE_PARAMETERS_1_UDW,
rc_range_params_dword[3]);
- intel_de_write(dev_priv, DSCC_RC_RANGE_PARAMETERS_2,
+ intel_de_write(display, DSCC_RC_RANGE_PARAMETERS_2,
rc_range_params_dword[4]);
- intel_de_write(dev_priv,
+ intel_de_write(display,
DSCC_RC_RANGE_PARAMETERS_2_UDW,
rc_range_params_dword[5]);
- intel_de_write(dev_priv, DSCC_RC_RANGE_PARAMETERS_3,
+ intel_de_write(display, DSCC_RC_RANGE_PARAMETERS_3,
rc_range_params_dword[6]);
- intel_de_write(dev_priv,
+ intel_de_write(display,
DSCC_RC_RANGE_PARAMETERS_3_UDW,
rc_range_params_dword[7]);
}
} else {
- intel_de_write(dev_priv, ICL_DSC0_RC_RANGE_PARAMETERS_0(pipe),
+ intel_de_write(display, ICL_DSC0_RC_RANGE_PARAMETERS_0(pipe),
rc_range_params_dword[0]);
- intel_de_write(dev_priv,
+ intel_de_write(display,
ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW(pipe),
rc_range_params_dword[1]);
- intel_de_write(dev_priv, ICL_DSC0_RC_RANGE_PARAMETERS_1(pipe),
+ intel_de_write(display, ICL_DSC0_RC_RANGE_PARAMETERS_1(pipe),
rc_range_params_dword[2]);
- intel_de_write(dev_priv,
+ intel_de_write(display,
ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW(pipe),
rc_range_params_dword[3]);
- intel_de_write(dev_priv, ICL_DSC0_RC_RANGE_PARAMETERS_2(pipe),
+ intel_de_write(display, ICL_DSC0_RC_RANGE_PARAMETERS_2(pipe),
rc_range_params_dword[4]);
- intel_de_write(dev_priv,
+ intel_de_write(display,
ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW(pipe),
rc_range_params_dword[5]);
- intel_de_write(dev_priv, ICL_DSC0_RC_RANGE_PARAMETERS_3(pipe),
+ intel_de_write(display, ICL_DSC0_RC_RANGE_PARAMETERS_3(pipe),
rc_range_params_dword[6]);
- intel_de_write(dev_priv,
+ intel_de_write(display,
ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW(pipe),
rc_range_params_dword[7]);
if (vdsc_instances_per_pipe > 1) {
- intel_de_write(dev_priv,
+ intel_de_write(display,
ICL_DSC1_RC_RANGE_PARAMETERS_0(pipe),
rc_range_params_dword[0]);
- intel_de_write(dev_priv,
+ intel_de_write(display,
ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW(pipe),
rc_range_params_dword[1]);
- intel_de_write(dev_priv,
+ intel_de_write(display,
ICL_DSC1_RC_RANGE_PARAMETERS_1(pipe),
rc_range_params_dword[2]);
- intel_de_write(dev_priv,
+ intel_de_write(display,
ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW(pipe),
rc_range_params_dword[3]);
- intel_de_write(dev_priv,
+ intel_de_write(display,
ICL_DSC1_RC_RANGE_PARAMETERS_2(pipe),
rc_range_params_dword[4]);
- intel_de_write(dev_priv,
+ intel_de_write(display,
ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW(pipe),
rc_range_params_dword[5]);
- intel_de_write(dev_priv,
+ intel_de_write(display,
ICL_DSC1_RC_RANGE_PARAMETERS_3(pipe),
rc_range_params_dword[6]);
- intel_de_write(dev_priv,
+ intel_de_write(display,
ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW(pipe),
rc_range_params_dword[7]);
}
@@ -746,8 +744,8 @@ static i915_reg_t dss_ctl2_reg(struct intel_crtc *crtc, enum transcoder cpu_tran
void intel_uncompressed_joiner_enable(const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 dss_ctl1_val = 0;
if (crtc_state->joiner_pipes && !crtc_state->dsc.compression_enable) {
@@ -756,14 +754,15 @@ void intel_uncompressed_joiner_enable(const struct intel_crtc_state *crtc_state)
else
dss_ctl1_val |= UNCOMPRESSED_JOINER_PRIMARY;
- intel_de_write(dev_priv, dss_ctl1_reg(crtc, crtc_state->cpu_transcoder), dss_ctl1_val);
+ intel_de_write(display, dss_ctl1_reg(crtc, crtc_state->cpu_transcoder),
+ dss_ctl1_val);
}
}
void intel_dsc_enable(const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 dss_ctl1_val = 0;
u32 dss_ctl2_val = 0;
int vdsc_instances_per_pipe = intel_dsc_get_vdsc_per_pipe(crtc_state);
@@ -796,28 +795,27 @@ void intel_dsc_enable(const struct intel_crtc_state *crtc_state)
if (intel_crtc_is_bigjoiner_primary(crtc_state))
dss_ctl1_val |= PRIMARY_BIG_JOINER_ENABLE;
}
- intel_de_write(dev_priv, dss_ctl1_reg(crtc, crtc_state->cpu_transcoder), dss_ctl1_val);
- intel_de_write(dev_priv, dss_ctl2_reg(crtc, crtc_state->cpu_transcoder), dss_ctl2_val);
+ intel_de_write(display, dss_ctl1_reg(crtc, crtc_state->cpu_transcoder), dss_ctl1_val);
+ intel_de_write(display, dss_ctl2_reg(crtc, crtc_state->cpu_transcoder), dss_ctl2_val);
}
void intel_dsc_disable(const struct intel_crtc_state *old_crtc_state)
{
+ struct intel_display *display = to_intel_display(old_crtc_state);
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
/* Disable only if either of them is enabled */
if (old_crtc_state->dsc.compression_enable ||
old_crtc_state->joiner_pipes) {
- intel_de_write(dev_priv, dss_ctl1_reg(crtc, old_crtc_state->cpu_transcoder), 0);
- intel_de_write(dev_priv, dss_ctl2_reg(crtc, old_crtc_state->cpu_transcoder), 0);
+ intel_de_write(display, dss_ctl1_reg(crtc, old_crtc_state->cpu_transcoder), 0);
+ intel_de_write(display, dss_ctl2_reg(crtc, old_crtc_state->cpu_transcoder), 0);
}
}
static u32 intel_dsc_pps_read(struct intel_crtc_state *crtc_state, int pps,
bool *all_equal)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc_state);
i915_reg_t dsc_reg[3];
int i, vdsc_per_pipe, dsc_reg_num;
u32 val;
@@ -825,16 +823,16 @@ static u32 intel_dsc_pps_read(struct intel_crtc_state *crtc_state, int pps,
vdsc_per_pipe = intel_dsc_get_vdsc_per_pipe(crtc_state);
dsc_reg_num = min_t(int, ARRAY_SIZE(dsc_reg), vdsc_per_pipe);
- drm_WARN_ON_ONCE(&i915->drm, dsc_reg_num < vdsc_per_pipe);
+ drm_WARN_ON_ONCE(display->drm, dsc_reg_num < vdsc_per_pipe);
intel_dsc_get_pps_reg(crtc_state, pps, dsc_reg, dsc_reg_num);
*all_equal = true;
- val = intel_de_read(i915, dsc_reg[0]);
+ val = intel_de_read(display, dsc_reg[0]);
for (i = 1; i < dsc_reg_num; i++) {
- if (intel_de_read(i915, dsc_reg[i]) != val) {
+ if (intel_de_read(display, dsc_reg[i]) != val) {
*all_equal = false;
break;
}
@@ -845,22 +843,20 @@ static u32 intel_dsc_pps_read(struct intel_crtc_state *crtc_state, int pps,
static u32 intel_dsc_pps_read_and_verify(struct intel_crtc_state *crtc_state, int pps)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc_state);
u32 val;
bool all_equal;
val = intel_dsc_pps_read(crtc_state, pps, &all_equal);
- drm_WARN_ON(&i915->drm, !all_equal);
+ drm_WARN_ON(display->drm, !all_equal);
return val;
}
static void intel_dsc_get_pps_config(struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
int num_vdsc_instances = intel_dsc_get_num_vdsc_instances(crtc_state);
u32 pps_temp;
@@ -946,7 +942,7 @@ static void intel_dsc_get_pps_config(struct intel_crtc_state *crtc_state)
vdsc_cfg->slice_chunk_size = REG_FIELD_GET(DSC_PPS16_SLICE_CHUNK_SIZE_MASK, pps_temp);
- if (DISPLAY_VER(i915) >= 14) {
+ if (DISPLAY_VER(display) >= 14) {
/* PPS 17 */
pps_temp = intel_dsc_pps_read_and_verify(crtc_state, 17);
@@ -964,7 +960,6 @@ void intel_dsc_get_config(struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
enum intel_display_power_domain power_domain;
intel_wakeref_t wakeref;
@@ -979,8 +974,8 @@ void intel_dsc_get_config(struct intel_crtc_state *crtc_state)
if (!wakeref)
return;
- dss_ctl1 = intel_de_read(dev_priv, dss_ctl1_reg(crtc, cpu_transcoder));
- dss_ctl2 = intel_de_read(dev_priv, dss_ctl2_reg(crtc, cpu_transcoder));
+ dss_ctl1 = intel_de_read(display, dss_ctl1_reg(crtc, cpu_transcoder));
+ dss_ctl2 = intel_de_read(display, dss_ctl2_reg(crtc, cpu_transcoder));
crtc_state->dsc.compression_enable = dss_ctl2 & VDSC0_ENABLE;
if (!crtc_state->dsc.compression_enable)
@@ -1020,8 +1015,7 @@ void intel_vdsc_state_dump(struct drm_printer *p, int indent,
int intel_vdsc_min_cdclk(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct intel_display *display = to_intel_display(crtc);
+ struct intel_display *display = to_intel_display(crtc_state);
int num_vdsc_instances = intel_dsc_get_num_vdsc_instances(crtc_state);
int min_cdclk;
diff --git a/drivers/gpu/drm/i915/display/intel_wm.c b/drivers/gpu/drm/i915/display/intel_wm.c
index d7dc49aecd27..f00f4cfc58e5 100644
--- a/drivers/gpu/drm/i915/display/intel_wm.c
+++ b/drivers/gpu/drm/i915/display/intel_wm.c
@@ -108,6 +108,12 @@ void intel_wm_get_hw_state(struct drm_i915_private *i915)
return i915->display.funcs.wm->get_hw_state(i915);
}
+void intel_wm_sanitize(struct drm_i915_private *i915)
+{
+ if (i915->display.funcs.wm->sanitize)
+ return i915->display.funcs.wm->sanitize(i915);
+}
+
bool intel_wm_plane_visible(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
diff --git a/drivers/gpu/drm/i915/display/intel_wm.h b/drivers/gpu/drm/i915/display/intel_wm.h
index e97cdca89a5c..7d3a447054b3 100644
--- a/drivers/gpu/drm/i915/display/intel_wm.h
+++ b/drivers/gpu/drm/i915/display/intel_wm.h
@@ -25,6 +25,7 @@ void intel_optimize_watermarks(struct intel_atomic_state *state,
struct intel_crtc *crtc);
int intel_compute_global_watermarks(struct intel_atomic_state *state);
void intel_wm_get_hw_state(struct drm_i915_private *i915);
+void intel_wm_sanitize(struct drm_i915_private *i915);
bool intel_wm_plane_visible(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state);
void intel_print_wm_latency(struct drm_i915_private *i915,
diff --git a/drivers/gpu/drm/i915/display/skl_scaler.c b/drivers/gpu/drm/i915/display/skl_scaler.c
index 3d24fa773094..ee81220a7c88 100644
--- a/drivers/gpu/drm/i915/display/skl_scaler.c
+++ b/drivers/gpu/drm/i915/display/skl_scaler.c
@@ -666,12 +666,14 @@ static u16 glk_nearest_filter_coef(int t)
*/
static void glk_program_nearest_filter_coefs(struct intel_display *display,
+ struct intel_dsb *dsb,
enum pipe pipe, int id, int set)
{
int i;
- intel_de_write_fw(display, GLK_PS_COEF_INDEX_SET(pipe, id, set),
- PS_COEF_INDEX_AUTO_INC);
+ intel_de_write_dsb(display, dsb,
+ GLK_PS_COEF_INDEX_SET(pipe, id, set),
+ PS_COEF_INDEX_AUTO_INC);
for (i = 0; i < 17 * 7; i += 2) {
u32 tmp;
@@ -683,11 +685,12 @@ static void glk_program_nearest_filter_coefs(struct intel_display *display,
t = glk_coef_tap(i + 1);
tmp |= glk_nearest_filter_coef(t) << 16;
- intel_de_write_fw(display, GLK_PS_COEF_DATA_SET(pipe, id, set),
- tmp);
+ intel_de_write_dsb(display, dsb,
+ GLK_PS_COEF_DATA_SET(pipe, id, set), tmp);
}
- intel_de_write_fw(display, GLK_PS_COEF_INDEX_SET(pipe, id, set), 0);
+ intel_de_write_dsb(display, dsb,
+ GLK_PS_COEF_INDEX_SET(pipe, id, set), 0);
}
static u32 skl_scaler_get_filter_select(enum drm_scaling_filter filter, int set)
@@ -703,14 +706,15 @@ static u32 skl_scaler_get_filter_select(enum drm_scaling_filter filter, int set)
return PS_FILTER_MEDIUM;
}
-static void skl_scaler_setup_filter(struct intel_display *display, enum pipe pipe,
+static void skl_scaler_setup_filter(struct intel_display *display,
+ struct intel_dsb *dsb, enum pipe pipe,
int id, int set, enum drm_scaling_filter filter)
{
switch (filter) {
case DRM_SCALING_FILTER_DEFAULT:
break;
case DRM_SCALING_FILTER_NEAREST_NEIGHBOR:
- glk_program_nearest_filter_coefs(display, pipe, id, set);
+ glk_program_nearest_filter_coefs(display, dsb, pipe, id, set);
break;
default:
MISSING_CASE(filter);
@@ -759,7 +763,7 @@ void skl_pfit_enable(const struct intel_crtc_state *crtc_state)
trace_intel_pipe_scaler_update_arm(crtc, id, x, y, width, height);
- skl_scaler_setup_filter(display, pipe, id, 0,
+ skl_scaler_setup_filter(display, NULL, pipe, id, 0,
crtc_state->hw.scaling_filter);
intel_de_write_fw(display, SKL_PS_CTRL(pipe, id), ps_ctrl);
@@ -775,7 +779,8 @@ void skl_pfit_enable(const struct intel_crtc_state *crtc_state)
}
void
-skl_program_plane_scaler(struct intel_plane *plane,
+skl_program_plane_scaler(struct intel_dsb *dsb,
+ struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
@@ -825,35 +830,38 @@ skl_program_plane_scaler(struct intel_plane *plane,
trace_intel_plane_scaler_update_arm(plane, scaler_id,
crtc_x, crtc_y, crtc_w, crtc_h);
- skl_scaler_setup_filter(display, pipe, scaler_id, 0,
+ skl_scaler_setup_filter(display, dsb, pipe, scaler_id, 0,
plane_state->hw.scaling_filter);
- intel_de_write_fw(display, SKL_PS_CTRL(pipe, scaler_id), ps_ctrl);
- intel_de_write_fw(display, SKL_PS_VPHASE(pipe, scaler_id),
- PS_Y_PHASE(y_vphase) | PS_UV_RGB_PHASE(uv_rgb_vphase));
- intel_de_write_fw(display, SKL_PS_HPHASE(pipe, scaler_id),
- PS_Y_PHASE(y_hphase) | PS_UV_RGB_PHASE(uv_rgb_hphase));
- intel_de_write_fw(display, SKL_PS_WIN_POS(pipe, scaler_id),
- PS_WIN_XPOS(crtc_x) | PS_WIN_YPOS(crtc_y));
- intel_de_write_fw(display, SKL_PS_WIN_SZ(pipe, scaler_id),
- PS_WIN_XSIZE(crtc_w) | PS_WIN_YSIZE(crtc_h));
+ intel_de_write_dsb(display, dsb, SKL_PS_CTRL(pipe, scaler_id),
+ ps_ctrl);
+ intel_de_write_dsb(display, dsb, SKL_PS_VPHASE(pipe, scaler_id),
+ PS_Y_PHASE(y_vphase) | PS_UV_RGB_PHASE(uv_rgb_vphase));
+ intel_de_write_dsb(display, dsb, SKL_PS_HPHASE(pipe, scaler_id),
+ PS_Y_PHASE(y_hphase) | PS_UV_RGB_PHASE(uv_rgb_hphase));
+ intel_de_write_dsb(display, dsb, SKL_PS_WIN_POS(pipe, scaler_id),
+ PS_WIN_XPOS(crtc_x) | PS_WIN_YPOS(crtc_y));
+ intel_de_write_dsb(display, dsb, SKL_PS_WIN_SZ(pipe, scaler_id),
+ PS_WIN_XSIZE(crtc_w) | PS_WIN_YSIZE(crtc_h));
}
-static void skl_detach_scaler(struct intel_crtc *crtc, int id)
+static void skl_detach_scaler(struct intel_dsb *dsb,
+ struct intel_crtc *crtc, int id)
{
struct intel_display *display = to_intel_display(crtc);
trace_intel_scaler_disable_arm(crtc, id);
- intel_de_write_fw(display, SKL_PS_CTRL(crtc->pipe, id), 0);
- intel_de_write_fw(display, SKL_PS_WIN_POS(crtc->pipe, id), 0);
- intel_de_write_fw(display, SKL_PS_WIN_SZ(crtc->pipe, id), 0);
+ intel_de_write_dsb(display, dsb, SKL_PS_CTRL(crtc->pipe, id), 0);
+ intel_de_write_dsb(display, dsb, SKL_PS_WIN_POS(crtc->pipe, id), 0);
+ intel_de_write_dsb(display, dsb, SKL_PS_WIN_SZ(crtc->pipe, id), 0);
}
/*
* This function detaches (aka. unbinds) unused scalers in hardware
*/
-void skl_detach_scalers(const struct intel_crtc_state *crtc_state)
+void skl_detach_scalers(struct intel_dsb *dsb,
+ const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
const struct intel_crtc_scaler_state *scaler_state =
@@ -863,7 +871,7 @@ void skl_detach_scalers(const struct intel_crtc_state *crtc_state)
/* loop through and disable scalers that aren't in use */
for (i = 0; i < crtc->num_scalers; i++) {
if (!scaler_state->scalers[i].in_use)
- skl_detach_scaler(crtc, i);
+ skl_detach_scaler(dsb, crtc, i);
}
}
@@ -873,7 +881,7 @@ void skl_scaler_disable(const struct intel_crtc_state *old_crtc_state)
int i;
for (i = 0; i < crtc->num_scalers; i++)
- skl_detach_scaler(crtc, i);
+ skl_detach_scaler(NULL, crtc, i);
}
void skl_scaler_get_config(struct intel_crtc_state *crtc_state)
diff --git a/drivers/gpu/drm/i915/display/skl_scaler.h b/drivers/gpu/drm/i915/display/skl_scaler.h
index 4d2e2dbb1666..355ea15260ca 100644
--- a/drivers/gpu/drm/i915/display/skl_scaler.h
+++ b/drivers/gpu/drm/i915/display/skl_scaler.h
@@ -8,6 +8,7 @@
struct intel_atomic_state;
struct intel_crtc;
struct intel_crtc_state;
+struct intel_dsb;
struct intel_plane;
struct intel_plane_state;
@@ -21,10 +22,12 @@ int intel_atomic_setup_scalers(struct intel_atomic_state *state,
void skl_pfit_enable(const struct intel_crtc_state *crtc_state);
-void skl_program_plane_scaler(struct intel_plane *plane,
+void skl_program_plane_scaler(struct intel_dsb *dsb,
+ struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state);
-void skl_detach_scalers(const struct intel_crtc_state *crtc_state);
+void skl_detach_scalers(struct intel_dsb *dsb,
+ const struct intel_crtc_state *crtc_state);
void skl_scaler_disable(const struct intel_crtc_state *old_crtc_state);
void skl_scaler_get_config(struct intel_crtc_state *crtc_state);
diff --git a/drivers/gpu/drm/i915/display/skl_universal_plane.c b/drivers/gpu/drm/i915/display/skl_universal_plane.c
index cd9762947f1d..70e550539bb2 100644
--- a/drivers/gpu/drm/i915/display/skl_universal_plane.c
+++ b/drivers/gpu/drm/i915/display/skl_universal_plane.c
@@ -1466,7 +1466,7 @@ skl_plane_update_arm(struct intel_dsb *dsb,
* TODO: split into noarm+arm pair
*/
if (plane_state->scaler_id >= 0)
- skl_program_plane_scaler(plane, crtc_state, plane_state);
+ skl_program_plane_scaler(dsb, plane, crtc_state, plane_state);
/*
* The control register self-arms if the plane was previously
@@ -1646,7 +1646,7 @@ icl_plane_update_arm(struct intel_dsb *dsb,
* TODO: split into noarm+arm pair
*/
if (plane_state->scaler_id >= 0)
- skl_program_plane_scaler(plane, crtc_state, plane_state);
+ skl_program_plane_scaler(dsb, plane, crtc_state, plane_state);
icl_plane_update_sel_fetch_arm(dsb, plane, crtc_state, plane_state);
@@ -2258,18 +2258,55 @@ static bool skl_fb_scalable(const struct drm_framebuffer *fb)
static void check_protection(struct intel_plane_state *plane_state)
{
struct intel_display *display = to_intel_display(plane_state);
- struct drm_i915_private *i915 = to_i915(display->drm);
const struct drm_framebuffer *fb = plane_state->hw.fb;
struct drm_gem_object *obj = intel_fb_bo(fb);
if (DISPLAY_VER(display) < 11)
return;
- plane_state->decrypt = intel_pxp_key_check(i915->pxp, obj, false) == 0;
+ plane_state->decrypt = intel_pxp_key_check(obj, false) == 0;
plane_state->force_black = intel_bo_is_protected(obj) &&
!plane_state->decrypt;
}
+static void
+make_damage_viewport_relative(struct intel_plane_state *plane_state)
+{
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ const struct drm_rect *src = &plane_state->uapi.src;
+ unsigned int rotation = plane_state->hw.rotation;
+ struct drm_rect *damage = &plane_state->damage;
+
+ if (!drm_rect_visible(damage))
+ return;
+
+ if (!fb || !plane_state->uapi.visible) {
+ plane_state->damage = DRM_RECT_INIT(0, 0, 0, 0);
+ return;
+ }
+
+ if (drm_rotation_90_or_270(rotation)) {
+ drm_rect_rotate(damage, fb->width, fb->height,
+ DRM_MODE_ROTATE_270);
+ drm_rect_translate(damage, -(src->y1 >> 16), -(src->x1 >> 16));
+ } else {
+ drm_rect_translate(damage, -(src->x1 >> 16), -(src->y1 >> 16));
+ }
+}
+
+static void clip_damage(struct intel_plane_state *plane_state)
+{
+ struct drm_rect *damage = &plane_state->damage;
+ struct drm_rect src;
+
+ if (!drm_rect_visible(damage))
+ return;
+
+ drm_rect_fp_to_int(&src, &plane_state->uapi.src);
+ drm_rect_translate(damage, src.x1, src.y1);
+ drm_rect_intersect(damage, &src);
+}
+
static int skl_plane_check(struct intel_crtc_state *crtc_state,
struct intel_plane_state *plane_state)
{
@@ -2295,6 +2332,8 @@ static int skl_plane_check(struct intel_crtc_state *crtc_state,
if (ret)
return ret;
+ make_damage_viewport_relative(plane_state);
+
ret = skl_check_plane_surface(plane_state);
if (ret)
return ret;
@@ -2310,6 +2349,8 @@ static int skl_plane_check(struct intel_crtc_state *crtc_state,
if (ret)
return ret;
+ clip_damage(plane_state);
+
ret = skl_plane_check_nv12_rotation(plane_state);
if (ret)
return ret;
@@ -2317,8 +2358,10 @@ static int skl_plane_check(struct intel_crtc_state *crtc_state,
check_protection(plane_state);
/* HW only has 8 bits pixel precision, disable plane if invisible */
- if (!(plane_state->hw.alpha >> 8))
+ if (!(plane_state->hw.alpha >> 8)) {
plane_state->uapi.visible = false;
+ plane_state->damage = DRM_RECT_INIT(0, 0, 0, 0);
+ }
plane_state->ctl = skl_plane_ctl(crtc_state, plane_state);
diff --git a/drivers/gpu/drm/i915/display/skl_watermark.c b/drivers/gpu/drm/i915/display/skl_watermark.c
index 10a1daad28eb..2d0de1c63308 100644
--- a/drivers/gpu/drm/i915/display/skl_watermark.c
+++ b/drivers/gpu/drm/i915/display/skl_watermark.c
@@ -3157,6 +3157,7 @@ static void skl_wm_get_hw_state(struct drm_i915_private *i915)
dbuf_state->joined_mbus = intel_de_read(display, MBUS_CTL) & MBUS_JOIN;
dbuf_state->mdclk_cdclk_ratio = intel_mdclk_cdclk_ratio(display, &display->cdclk.hw);
+ dbuf_state->active_pipes = 0;
for_each_intel_crtc(display->drm, crtc) {
struct intel_crtc_state *crtc_state =
@@ -3168,8 +3169,10 @@ static void skl_wm_get_hw_state(struct drm_i915_private *i915)
memset(&crtc_state->wm.skl.optimal, 0,
sizeof(crtc_state->wm.skl.optimal));
- if (crtc_state->hw.active)
+ if (crtc_state->hw.active) {
skl_pipe_wm_get_hw_state(crtc, &crtc_state->wm.skl.optimal);
+ dbuf_state->active_pipes |= BIT(pipe);
+ }
crtc_state->wm.skl.raw = crtc_state->wm.skl.optimal;
memset(&dbuf_state->ddb[pipe], 0, sizeof(dbuf_state->ddb[pipe]));
@@ -3837,14 +3840,56 @@ static void skl_dbuf_sanitize(struct drm_i915_private *i915)
}
}
-static void skl_wm_get_hw_state_and_sanitize(struct drm_i915_private *i915)
+static void skl_wm_sanitize(struct drm_i915_private *i915)
{
- skl_wm_get_hw_state(i915);
-
skl_mbus_sanitize(i915);
skl_dbuf_sanitize(i915);
}
+void skl_wm_crtc_disable_noatomic(struct intel_crtc *crtc)
+{
+ struct intel_display *display = to_intel_display(crtc);
+ struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+ struct intel_dbuf_state *dbuf_state =
+ to_intel_dbuf_state(display->dbuf.obj.state);
+ enum pipe pipe = crtc->pipe;
+
+ if (DISPLAY_VER(display) < 9)
+ return;
+
+ dbuf_state->active_pipes &= ~BIT(pipe);
+
+ dbuf_state->weight[pipe] = 0;
+ dbuf_state->slices[pipe] = 0;
+
+ memset(&dbuf_state->ddb[pipe], 0, sizeof(dbuf_state->ddb[pipe]));
+
+ memset(&crtc_state->wm.skl.ddb, 0, sizeof(crtc_state->wm.skl.ddb));
+}
+
+void skl_wm_plane_disable_noatomic(struct intel_crtc *crtc,
+ struct intel_plane *plane)
+{
+ struct intel_display *display = to_intel_display(crtc);
+ struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+
+ if (DISPLAY_VER(display) < 9)
+ return;
+
+ skl_ddb_entry_init(&crtc_state->wm.skl.plane_ddb[plane->id], 0, 0);
+ skl_ddb_entry_init(&crtc_state->wm.skl.plane_ddb[plane->id], 0, 0);
+
+ crtc_state->wm.skl.plane_min_ddb[plane->id] = 0;
+ crtc_state->wm.skl.plane_interim_ddb[plane->id] = 0;
+
+ memset(&crtc_state->wm.skl.raw.planes[plane->id], 0,
+ sizeof(crtc_state->wm.skl.raw.planes[plane->id]));
+ memset(&crtc_state->wm.skl.optimal.planes[plane->id], 0,
+ sizeof(crtc_state->wm.skl.optimal.planes[plane->id]));
+}
+
void intel_wm_state_verify(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
@@ -3972,7 +4017,8 @@ void intel_wm_state_verify(struct intel_atomic_state *state,
static const struct intel_wm_funcs skl_wm_funcs = {
.compute_global_watermarks = skl_compute_wm,
- .get_hw_state = skl_wm_get_hw_state_and_sanitize,
+ .get_hw_state = skl_wm_get_hw_state,
+ .sanitize = skl_wm_sanitize,
};
void skl_wm_init(struct drm_i915_private *i915)
diff --git a/drivers/gpu/drm/i915/display/skl_watermark.h b/drivers/gpu/drm/i915/display/skl_watermark.h
index c5547485225a..d9cff6c54310 100644
--- a/drivers/gpu/drm/i915/display/skl_watermark.h
+++ b/drivers/gpu/drm/i915/display/skl_watermark.h
@@ -41,6 +41,10 @@ bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb,
void intel_wm_state_verify(struct intel_atomic_state *state,
struct intel_crtc *crtc);
+void skl_wm_crtc_disable_noatomic(struct intel_crtc *crtc);
+void skl_wm_plane_disable_noatomic(struct intel_crtc *crtc,
+ struct intel_plane *plane);
+
void skl_watermark_ipc_init(struct drm_i915_private *i915);
void skl_watermark_ipc_update(struct drm_i915_private *i915);
bool skl_watermark_ipc_enabled(struct drm_i915_private *i915);
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c
index 7414794889e9..af717df83197 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi.c
+++ b/drivers/gpu/drm/i915/display/vlv_dsi.c
@@ -283,7 +283,7 @@ static int intel_dsi_compute_config(struct intel_encoder *encoder,
if (ret)
return ret;
- ret = intel_panel_fitting(pipe_config, conn_state);
+ ret = intel_pfit_compute_config(pipe_config, conn_state);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi_pll.h b/drivers/gpu/drm/i915/display/vlv_dsi_pll.h
index a032cc2a2524..f975660fa609 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi_pll.h
+++ b/drivers/gpu/drm/i915/display/vlv_dsi_pll.h
@@ -23,7 +23,6 @@ u32 vlv_dsi_get_pclk(struct intel_encoder *encoder,
struct intel_crtc_state *config);
void vlv_dsi_reset_clocks(struct intel_encoder *encoder, enum port port);
-bool bxt_dsi_pll_is_enabled(struct drm_i915_private *dev_priv);
int bxt_dsi_pll_compute(struct intel_encoder *encoder,
struct intel_crtc_state *config);
void bxt_dsi_pll_enable(struct intel_encoder *encoder,
@@ -34,9 +33,14 @@ u32 bxt_dsi_get_pclk(struct intel_encoder *encoder,
void bxt_dsi_reset_clocks(struct intel_encoder *encoder, enum port port);
#ifdef I915
+bool bxt_dsi_pll_is_enabled(struct drm_i915_private *dev_priv);
void assert_dsi_pll_enabled(struct intel_display *display);
void assert_dsi_pll_disabled(struct intel_display *display);
#else
+static inline bool bxt_dsi_pll_is_enabled(struct drm_i915_private *dev_priv)
+{
+ return false;
+}
static inline void assert_dsi_pll_enabled(struct intel_display *display)
{
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index c8107502190d..7796c4119ef5 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -915,7 +915,7 @@ static struct i915_vma *eb_lookup_vma(struct i915_execbuffer *eb, u32 handle)
*/
if (i915_gem_context_uses_protected_content(eb->gem_context) &&
i915_gem_object_is_protected(obj)) {
- err = intel_pxp_key_check(eb->i915->pxp, intel_bo_to_drm_bo(obj), true);
+ err = intel_pxp_key_check(intel_bo_to_drm_bo(obj), true);
if (err) {
i915_gem_object_put(obj);
return ERR_PTR(err);
diff --git a/drivers/gpu/drm/i915/gem/selftests/mock_context.c b/drivers/gpu/drm/i915/gem/selftests/mock_context.c
index 2b0327cc47c2..fd8babb513e5 100644
--- a/drivers/gpu/drm/i915/gem/selftests/mock_context.c
+++ b/drivers/gpu/drm/i915/gem/selftests/mock_context.c
@@ -108,44 +108,6 @@ err_ctx:
}
struct i915_gem_context *
-live_context_for_engine(struct intel_engine_cs *engine, struct file *file)
-{
- struct i915_gem_engines *engines;
- struct i915_gem_context *ctx;
- struct intel_sseu null_sseu = {};
- struct intel_context *ce;
-
- engines = alloc_engines(1);
- if (!engines)
- return ERR_PTR(-ENOMEM);
-
- ctx = live_context(engine->i915, file);
- if (IS_ERR(ctx)) {
- __free_engines(engines, 0);
- return ctx;
- }
-
- ce = intel_context_create(engine);
- if (IS_ERR(ce)) {
- __free_engines(engines, 0);
- return ERR_CAST(ce);
- }
-
- intel_context_set_gem(ce, ctx, null_sseu);
- engines->engines[0] = ce;
- engines->num_engines = 1;
-
- mutex_lock(&ctx->engines_mutex);
- i915_gem_context_set_user_engines(ctx);
- engines = rcu_replace_pointer(ctx->engines, engines, 1);
- mutex_unlock(&ctx->engines_mutex);
-
- engines_idle_release(ctx, engines);
-
- return ctx;
-}
-
-struct i915_gem_context *
kernel_context(struct drm_i915_private *i915,
struct i915_address_space *vm)
{
diff --git a/drivers/gpu/drm/i915/gem/selftests/mock_context.h b/drivers/gpu/drm/i915/gem/selftests/mock_context.h
index 7a02fd9b5866..bc8fb37d2d24 100644
--- a/drivers/gpu/drm/i915/gem/selftests/mock_context.h
+++ b/drivers/gpu/drm/i915/gem/selftests/mock_context.h
@@ -23,9 +23,6 @@ void mock_context_close(struct i915_gem_context *ctx);
struct i915_gem_context *
live_context(struct drm_i915_private *i915, struct file *file);
-struct i915_gem_context *
-live_context_for_engine(struct intel_engine_cs *engine, struct file *file);
-
struct i915_gem_context *kernel_context(struct drm_i915_private *i915,
struct i915_address_space *vm);
void kernel_context_close(struct i915_gem_context *ctx);
diff --git a/drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c b/drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c
index 075657018739..5cd58e0f0dcf 100644
--- a/drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c
+++ b/drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c
@@ -103,8 +103,7 @@ static struct dma_buf *mock_dmabuf(int npages)
struct dma_buf *dmabuf;
int i;
- mock = kmalloc(sizeof(*mock) + npages * sizeof(struct page *),
- GFP_KERNEL);
+ mock = kmalloc(struct_size(mock, pages, npages), GFP_KERNEL);
if (!mock)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index ec136eb12d48..39f6ba4bf1ab 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -677,7 +677,7 @@ void intel_engines_release(struct intel_gt *gt)
* in case we aborted before completely initialising the engines.
*/
GEM_BUG_ON(intel_gt_pm_is_awake(gt));
- if (!INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
+ if (!intel_gt_gpu_reset_clobbers_display(gt))
intel_gt_reset_all_engines(gt);
/* Decouple the backend; but keep the layout for late GPU resets */
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
index 0ffba50981e3..0c723e7c71a2 100644
--- a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
@@ -328,6 +328,7 @@ static bool fence_is_active(const struct i915_fence_reg *fence)
static struct i915_fence_reg *fence_find(struct i915_ggtt *ggtt)
{
+ struct intel_display *display = &ggtt->vm.i915->display;
struct i915_fence_reg *active = NULL;
struct i915_fence_reg *fence, *fn;
@@ -353,7 +354,7 @@ static struct i915_fence_reg *fence_find(struct i915_ggtt *ggtt)
}
/* Wait for completion of pending flips which consume fences */
- if (intel_has_pending_fb_unpin(ggtt->vm.i915))
+ if (intel_has_pending_fb_unpin(display))
return ERR_PTR(-EAGAIN);
return ERR_PTR(-ENOBUFS);
@@ -749,7 +750,7 @@ static void swizzle_page(struct page *page)
char *vaddr;
int i;
- vaddr = kmap(page);
+ vaddr = kmap_local_page(page);
for (i = 0; i < PAGE_SIZE; i += 128) {
memcpy(temp, &vaddr[i], 64);
@@ -757,7 +758,7 @@ static void swizzle_page(struct page *page)
memcpy(&vaddr[i + 64], temp, 64);
}
- kunmap(page);
+ kunmap_local(vaddr);
}
/**
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
index 175fa2db0551..3182f19b9837 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
@@ -158,7 +158,7 @@ void intel_gt_pm_init(struct intel_gt *gt)
static bool reset_engines(struct intel_gt *gt)
{
- if (INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
+ if (intel_gt_gpu_reset_clobbers_display(gt))
return false;
return intel_gt_reset_all_engines(gt) == 0;
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_regs.h b/drivers/gpu/drm/i915/gt/intel_gt_regs.h
index 6dba65e54cdb..a6e50af44b46 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_regs.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_regs.h
@@ -409,6 +409,9 @@
#define GEN7_SO_PRIM_STORAGE_NEEDED(n) _MMIO(0x5240 + (n) * 8)
#define GEN7_SO_PRIM_STORAGE_NEEDED_UDW(n) _MMIO(0x5240 + (n) * 8 + 4)
+#define GEN8_WM_CHICKEN2 MCR_REG(0x5584)
+#define WAIT_ON_DEPTH_STALL_DONE_DISABLE REG_BIT(5)
+
#define GEN9_WM_CHICKEN3 _MMIO(0x5588)
#define GEN9_FACTOR_IN_CLR_VAL_HIZ (1 << 9)
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c
index d7784650e4d9..1154cd2b7c34 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c
@@ -464,6 +464,45 @@ static ssize_t slpc_ignore_eff_freq_store(struct kobject *kobj,
return err ?: count;
}
+static ssize_t slpc_power_profile_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buff)
+{
+ struct intel_gt *gt = intel_gt_sysfs_get_drvdata(kobj, attr->attr.name);
+ struct intel_guc_slpc *slpc = &gt->uc.guc.slpc;
+
+ switch (slpc->power_profile) {
+ case SLPC_POWER_PROFILES_BASE:
+ return sysfs_emit(buff, "[%s] %s\n", "base", "power_saving");
+ case SLPC_POWER_PROFILES_POWER_SAVING:
+ return sysfs_emit(buff, "%s [%s]\n", "base", "power_saving");
+ }
+
+ return sysfs_emit(buff, "%u\n", slpc->power_profile);
+}
+
+static ssize_t slpc_power_profile_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buff, size_t count)
+{
+ struct intel_gt *gt = intel_gt_sysfs_get_drvdata(kobj, attr->attr.name);
+ struct intel_guc_slpc *slpc = &gt->uc.guc.slpc;
+ char power_saving[] = "power_saving";
+ char base[] = "base";
+ int err;
+ u32 val;
+
+ if (!strncmp(buff, power_saving, sizeof(power_saving) - 1))
+ val = SLPC_POWER_PROFILES_POWER_SAVING;
+ else if (!strncmp(buff, base, sizeof(base) - 1))
+ val = SLPC_POWER_PROFILES_BASE;
+ else
+ return -EINVAL;
+
+ err = intel_guc_slpc_set_power_profile(slpc, val);
+ return err ?: count;
+}
+
struct intel_gt_bool_throttle_attr {
struct attribute attr;
ssize_t (*show)(struct kobject *kobj, struct kobj_attribute *attr,
@@ -668,6 +707,7 @@ INTEL_GT_ATTR_RO(media_RP0_freq_mhz);
INTEL_GT_ATTR_RO(media_RPn_freq_mhz);
INTEL_GT_ATTR_RW(slpc_ignore_eff_freq);
+INTEL_GT_ATTR_RW(slpc_power_profile);
static const struct attribute *media_perf_power_attrs[] = {
&attr_media_freq_factor.attr,
@@ -864,6 +904,13 @@ void intel_gt_sysfs_pm_init(struct intel_gt *gt, struct kobject *kobj)
gt_warn(gt, "failed to create ignore_eff_freq sysfs (%pe)", ERR_PTR(ret));
}
+ if (intel_uc_uses_guc_slpc(&gt->uc)) {
+ ret = sysfs_create_file(kobj, &attr_slpc_power_profile.attr);
+ if (ret)
+ gt_warn(gt, "failed to create slpc_power_profile sysfs (%pe)",
+ ERR_PTR(ret));
+ }
+
if (i915_mmio_reg_valid(intel_gt_perf_limit_reasons_reg(gt))) {
ret = sysfs_create_files(kobj, throttle_reason_attrs);
if (ret)
diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c
index b33007cd1504..dbdcfe130ad4 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset.c
+++ b/drivers/gpu/drm/i915/gt/intel_reset.c
@@ -986,7 +986,7 @@ static void __intel_gt_set_wedged(struct intel_gt *gt)
awake = reset_prepare(gt);
/* Even if the GPU reset fails, it should still stop the engines */
- if (!INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
+ if (!intel_gt_gpu_reset_clobbers_display(gt))
intel_gt_reset_all_engines(gt);
for_each_engine(engine, gt, id)
@@ -1106,14 +1106,13 @@ static bool __intel_gt_unset_wedged(struct intel_gt *gt)
/* We must reset pending GPU events before restoring our submission */
ok = !HAS_EXECLISTS(gt->i915); /* XXX better agnosticism desired */
- if (!INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
+ if (!intel_gt_gpu_reset_clobbers_display(gt))
ok = intel_gt_reset_all_engines(gt) == 0;
if (!ok) {
/*
* Warn CI about the unrecoverable wedged condition.
* Time for a reboot.
*/
- gt_err(gt, "Unrecoverable wedged condition\n");
add_taint_for_CI(gt->i915, TAINT_WARN);
return false;
}
@@ -1178,6 +1177,13 @@ static int resume(struct intel_gt *gt)
return 0;
}
+bool intel_gt_gpu_reset_clobbers_display(struct intel_gt *gt)
+{
+ struct drm_i915_private *i915 = gt->i915;
+
+ return INTEL_INFO(i915)->gpu_reset_clobbers_display;
+}
+
/**
* intel_gt_reset - reset chip after a hang
* @gt: #intel_gt to reset
@@ -1234,7 +1240,7 @@ void intel_gt_reset(struct intel_gt *gt,
goto error;
}
- if (INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
+ if (intel_gt_gpu_reset_clobbers_display(gt))
intel_irq_suspend(gt->i915);
if (do_reset(gt, stalled_mask)) {
@@ -1242,7 +1248,7 @@ void intel_gt_reset(struct intel_gt *gt,
goto taint;
}
- if (INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
+ if (intel_gt_gpu_reset_clobbers_display(gt))
intel_irq_resume(gt->i915);
intel_overlay_reset(display);
@@ -1265,10 +1271,8 @@ void intel_gt_reset(struct intel_gt *gt,
}
ret = resume(gt);
- if (ret) {
- gt_err(gt, "Failed to resume (%d)\n", ret);
+ if (ret)
goto taint;
- }
finish:
reset_finish(gt, awake);
@@ -1396,6 +1400,11 @@ int intel_engine_reset(struct intel_engine_cs *engine, const char *msg)
return err;
}
+static void display_reset_modeset_stuck(void *gt)
+{
+ intel_gt_set_wedged(gt);
+}
+
static void intel_gt_reset_global(struct intel_gt *gt,
u32 engine_mask,
const char *reason)
@@ -1413,11 +1422,26 @@ static void intel_gt_reset_global(struct intel_gt *gt,
/* Use a watchdog to ensure that our reset completes */
intel_wedge_on_timeout(&w, gt, 60 * HZ) {
- intel_display_reset_prepare(gt->i915);
+ struct drm_i915_private *i915 = gt->i915;
+ struct intel_display *display = &i915->display;
+ bool need_display_reset;
+ bool reset_display;
+
+ need_display_reset = intel_gt_gpu_reset_clobbers_display(gt) &&
+ intel_has_gpu_reset(gt);
+
+ reset_display = intel_display_reset_test(display) ||
+ need_display_reset;
+
+ if (reset_display)
+ reset_display = intel_display_reset_prepare(display,
+ display_reset_modeset_stuck,
+ gt);
intel_gt_reset(gt, engine_mask, reason);
- intel_display_reset_finish(gt->i915);
+ if (reset_display)
+ intel_display_reset_finish(display, !need_display_reset);
}
if (!test_bit(I915_WEDGED, &gt->reset.flags))
@@ -1485,7 +1509,7 @@ void intel_gt_handle_error(struct intel_gt *gt,
intel_has_reset_engine(gt) && !intel_gt_is_wedged(gt)) {
local_bh_disable();
for_each_engine_masked(engine, gt, engine_mask, tmp) {
- BUILD_BUG_ON(I915_RESET_MODESET >= I915_RESET_ENGINE);
+ BUILD_BUG_ON(I915_RESET_BACKOFF >= I915_RESET_ENGINE);
if (test_and_set_bit(I915_RESET_ENGINE + engine->id,
&gt->reset.flags))
continue;
@@ -1614,7 +1638,6 @@ void intel_gt_set_wedged_on_init(struct intel_gt *gt)
set_bit(I915_WEDGED_ON_INIT, &gt->reset.flags);
/* Wedged on init is non-recoverable */
- gt_err(gt, "Non-recoverable wedged on init\n");
add_taint_for_CI(gt->i915, TAINT_WARN);
}
diff --git a/drivers/gpu/drm/i915/gt/intel_reset.h b/drivers/gpu/drm/i915/gt/intel_reset.h
index c00de353075c..724ea6d64f33 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset.h
+++ b/drivers/gpu/drm/i915/gt/intel_reset.h
@@ -28,6 +28,8 @@ void intel_gt_handle_error(struct intel_gt *gt,
const char *fmt, ...);
#define I915_ERROR_CAPTURE BIT(0)
+bool intel_gt_gpu_reset_clobbers_display(struct intel_gt *gt);
+
void intel_gt_reset(struct intel_gt *gt,
intel_engine_mask_t stalled_mask,
const char *reason);
diff --git a/drivers/gpu/drm/i915/gt/intel_reset_types.h b/drivers/gpu/drm/i915/gt/intel_reset_types.h
index 80351f0a856c..4f5fd393af6f 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_reset_types.h
@@ -41,8 +41,7 @@ struct intel_reset {
*/
unsigned long flags;
#define I915_RESET_BACKOFF 0
-#define I915_RESET_MODESET 1
-#define I915_RESET_ENGINE 2
+#define I915_RESET_ENGINE 1
#define I915_WEDGED_ON_INIT (BITS_PER_LONG - 3)
#define I915_WEDGED_ON_FINI (BITS_PER_LONG - 2)
#define I915_WEDGED (BITS_PER_LONG - 1)
diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c
index fa304ea088e4..2cfaedb04876 100644
--- a/drivers/gpu/drm/i915/gt/intel_rps.c
+++ b/drivers/gpu/drm/i915/gt/intel_rps.c
@@ -1025,6 +1025,10 @@ void intel_rps_boost(struct i915_request *rq)
if (rps_uses_slpc(rps)) {
slpc = rps_to_slpc(rps);
+ /* Waitboost should not be done with power saving profile */
+ if (slpc->power_profile == SLPC_POWER_PROFILES_POWER_SAVING)
+ return;
+
if (slpc->min_freq_softlimit >= slpc->boost_freq)
return;
diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c
index 3ea9b06de1be..116683ebe074 100644
--- a/drivers/gpu/drm/i915/gt/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c
@@ -691,16 +691,17 @@ static void gen12_ctx_workarounds_init(struct intel_engine_cs *engine,
struct drm_i915_private *i915 = engine->i915;
/*
- * Wa_1409142259:tgl,dg1,adl-p
+ * Wa_1409142259:tgl,dg1,adl-p,adl-n
* Wa_1409347922:tgl,dg1,adl-p
* Wa_1409252684:tgl,dg1,adl-p
* Wa_1409217633:tgl,dg1,adl-p
* Wa_1409207793:tgl,dg1,adl-p
- * Wa_1409178076:tgl,dg1,adl-p
- * Wa_1408979724:tgl,dg1,adl-p
- * Wa_14010443199:tgl,rkl,dg1,adl-p
- * Wa_14010698770:tgl,rkl,dg1,adl-s,adl-p
- * Wa_1409342910:tgl,rkl,dg1,adl-s,adl-p
+ * Wa_1409178076:tgl,dg1,adl-p,adl-n
+ * Wa_1408979724:tgl,dg1,adl-p,adl-n
+ * Wa_14010443199:tgl,rkl,dg1,adl-p,adl-n
+ * Wa_14010698770:tgl,rkl,dg1,adl-s,adl-p,adl-n
+ * Wa_1409342910:tgl,rkl,dg1,adl-s,adl-p,adl-n
+ * Wa_22010465259:tgl,rkl,dg1,adl-s,adl-p,adl-n
*/
wa_masked_en(wal, GEN11_COMMON_SLICE_CHICKEN3,
GEN12_DISABLE_CPS_AWARE_COLOR_PIPE);
@@ -741,6 +742,12 @@ static void gen12_ctx_workarounds_init(struct intel_engine_cs *engine,
/* Wa_1606376872 */
wa_masked_en(wal, COMMON_SLICE_CHICKEN4, DISABLE_TDC_LOAD_BALANCING_CALC);
}
+
+ /*
+ * This bit must be set to enable performance optimization for fast
+ * clears.
+ */
+ wa_mcr_write_or(wal, GEN8_WM_CHICKEN2, WAIT_ON_DEPTH_STALL_DONE_DISABLE);
}
static void dg1_ctx_workarounds_init(struct intel_engine_cs *engine,
diff --git a/drivers/gpu/drm/i915/gt/selftest_rps.c b/drivers/gpu/drm/i915/gt/selftest_rps.c
index 78c03e6c0861..73bc91c6ea07 100644
--- a/drivers/gpu/drm/i915/gt/selftest_rps.c
+++ b/drivers/gpu/drm/i915/gt/selftest_rps.c
@@ -477,12 +477,13 @@ int live_rps_control(void *arg)
limit, intel_gpu_freq(rps, limit),
min, max, ktime_to_ns(min_dt), ktime_to_ns(max_dt));
- if (limit == rps->min_freq) {
- pr_err("%s: GPU throttled to minimum!\n",
- engine->name);
+ if (limit != rps->max_freq) {
+ u32 throttle = intel_uncore_read(gt->uncore,
+ intel_gt_perf_limit_reasons_reg(gt));
+
+ pr_warn("%s: GPU throttled with reasons 0x%08x\n",
+ engine->name, throttle & GT0_PERF_LIMIT_REASONS_MASK);
show_pstate_limits(rps);
- err = -ENODEV;
- break;
}
if (igt_flush_test(gt->i915)) {
@@ -1115,7 +1116,7 @@ static u64 measure_power(struct intel_rps *rps, int *freq)
for (i = 0; i < 5; i++)
x[i] = __measure_power(5);
- *freq = (*freq + intel_rps_read_actual_frequency(rps)) / 2;
+ *freq = (*freq + read_cagf(rps)) / 2;
/* A simple triangle filter for better result stability */
sort(x, 5, sizeof(*x), cmp_u64, NULL);
diff --git a/drivers/gpu/drm/i915/gt/selftest_slpc.c b/drivers/gpu/drm/i915/gt/selftest_slpc.c
index e218b229681f..e61bb0bad12c 100644
--- a/drivers/gpu/drm/i915/gt/selftest_slpc.c
+++ b/drivers/gpu/drm/i915/gt/selftest_slpc.c
@@ -95,6 +95,21 @@ static int slpc_restore_freq(struct intel_guc_slpc *slpc, u32 min, u32 max)
return 0;
}
+static u64 slpc_measure_power(struct intel_rps *rps, int *freq)
+{
+ u64 x[5];
+ int i;
+
+ for (i = 0; i < 5; i++)
+ x[i] = __measure_power(5);
+
+ *freq = (*freq + intel_rps_read_actual_frequency(rps)) / 2;
+
+ /* A simple triangle filter for better result stability */
+ sort(x, 5, sizeof(*x), cmp_u64, NULL);
+ return div_u64(x[1] + 2 * x[2] + x[3], 4);
+}
+
static u64 measure_power_at_freq(struct intel_gt *gt, int *freq, u64 *power)
{
int err = 0;
@@ -103,7 +118,7 @@ static u64 measure_power_at_freq(struct intel_gt *gt, int *freq, u64 *power)
if (err)
return err;
*freq = intel_rps_read_actual_frequency(&gt->rps);
- *power = measure_power(&gt->rps, freq);
+ *power = slpc_measure_power(&gt->rps, freq);
return err;
}
diff --git a/drivers/gpu/drm/i915/gt/shmem_utils.c b/drivers/gpu/drm/i915/gt/shmem_utils.c
index bb696b29ee2c..365c4b8b04f4 100644
--- a/drivers/gpu/drm/i915/gt/shmem_utils.c
+++ b/drivers/gpu/drm/i915/gt/shmem_utils.c
@@ -108,7 +108,7 @@ static int __shmem_rw(struct file *file, loff_t off,
if (IS_ERR(page))
return PTR_ERR(page);
- vaddr = kmap(page);
+ vaddr = kmap_local_page(page);
if (write) {
memcpy(vaddr + offset_in_page(off), ptr, this);
set_page_dirty(page);
@@ -116,7 +116,7 @@ static int __shmem_rw(struct file *file, loff_t off,
memcpy(ptr, vaddr + offset_in_page(off), this);
}
mark_page_accessed(page);
- kunmap(page);
+ kunmap_local(vaddr);
put_page(page);
len -= this;
@@ -143,11 +143,11 @@ int shmem_read_to_iosys_map(struct file *file, loff_t off,
if (IS_ERR(page))
return PTR_ERR(page);
- vaddr = kmap(page);
+ vaddr = kmap_local_page(page);
iosys_map_memcpy_to(map, map_off, vaddr + offset_in_page(off),
this);
mark_page_accessed(page);
- kunmap(page);
+ kunmap_local(vaddr);
put_page(page);
len -= this;
diff --git a/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_slpc_abi.h b/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_slpc_abi.h
index c34674e797c6..6de87ae5669e 100644
--- a/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_slpc_abi.h
+++ b/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_slpc_abi.h
@@ -228,6 +228,11 @@ struct slpc_optimized_strategies {
#define SLPC_OPTIMIZED_STRATEGY_COMPUTE REG_BIT(0)
+enum slpc_power_profiles {
+ SLPC_POWER_PROFILES_BASE = 0x0,
+ SLPC_POWER_PROFILES_POWER_SAVING = 0x1
+};
+
/**
* DOC: SLPC H2G MESSAGE FORMAT
*
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c
index fe53e8eccf4b..e7ccfa520df3 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c
@@ -259,13 +259,14 @@ static int guc_wait_ucode(struct intel_guc *guc)
} else if (delta_ms > 200) {
guc_warn(guc, "excessive init time: %lldms! [status = 0x%08X, count = %d, ret = %d]\n",
delta_ms, status, count, ret);
- guc_warn(guc, "excessive init time: [freq = %dMHz, before = %dMHz, perf_limit_reasons = 0x%08X]\n",
- intel_rps_read_actual_frequency(&gt->rps), before_freq,
+ guc_warn(guc, "excessive init time: [freq = %dMHz -> %dMHz vs %dMHz, perf_limit_reasons = 0x%08X]\n",
+ before_freq, intel_rps_read_actual_frequency(&gt->rps),
+ intel_rps_get_requested_frequency(&gt->rps),
intel_uncore_read(uncore, intel_gt_perf_limit_reasons_reg(gt)));
} else {
- guc_dbg(guc, "init took %lldms, freq = %dMHz, before = %dMHz, status = 0x%08X, count = %d, ret = %d\n",
- delta_ms, intel_rps_read_actual_frequency(&gt->rps),
- before_freq, status, count, ret);
+ guc_dbg(guc, "init took %lldms, freq = %dMHz -> %dMHz vs %dMHz, status = 0x%08X, count = %d, ret = %d\n",
+ delta_ms, before_freq, intel_rps_read_actual_frequency(&gt->rps),
+ intel_rps_get_requested_frequency(&gt->rps), status, count, ret);
}
return ret;
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_hwconfig.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_hwconfig.c
index b67a15f74276..868195c33f5b 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_hwconfig.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_hwconfig.c
@@ -7,6 +7,7 @@
#include "gt/intel_hwconfig.h"
#include "i915_drv.h"
#include "i915_memcpy.h"
+#include "intel_guc_print.h"
/*
* GuC has a blob containing hardware configuration information (HWConfig).
@@ -42,6 +43,8 @@ static int __guc_action_get_hwconfig(struct intel_guc *guc,
};
int ret;
+ guc_dbg(guc, "Querying HW config table: size = %d, offset = 0x%08X\n",
+ ggtt_size, ggtt_offset);
ret = intel_guc_send_mmio(guc, action, ARRAY_SIZE(action), NULL, 0);
if (ret == -ENXIO)
return -ENOENT;
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
index 1a0e1a412fdb..d5ee6e5e1443 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
@@ -15,6 +15,34 @@
#include "gt/intel_gt_regs.h"
#include "gt/intel_rps.h"
+/**
+ * DOC: SLPC - Dynamic Frequency management
+ *
+ * Single Loop Power Control (SLPC) is a GuC algorithm that manages
+ * GT frequency based on busyness and how KMD initializes it. SLPC is
+ * almost completely in control after initialization except for a few
+ * scenarios mentioned below.
+ *
+ * KMD uses the concept of waitboost to ramp frequency to RP0 when there
+ * are pending submissions for a context. It achieves this by sending GuC a
+ * request to update the min frequency to RP0. Waitboost is disabled
+ * when the request retires.
+ *
+ * Another form of frequency control happens through per-context hints.
+ * A context can be marked as low latency during creation. That will ensure
+ * that SLPC uses an aggressive frequency ramp when that context is active.
+ *
+ * Power profiles add another level of control to these mechanisms.
+ * When power saving profile is chosen, SLPC will use conservative
+ * thresholds to ramp frequency, thus saving power. KMD will disable
+ * waitboosts as well, which achieves further power savings. Base profile
+ * is default and ensures balanced performance for any workload.
+ *
+ * Lastly, users have some level of control through sysfs, where min/max
+ * frequency values can be altered and the use of efficient freq
+ * can be toggled.
+ */
+
static inline struct intel_guc *slpc_to_guc(struct intel_guc_slpc *slpc)
{
return container_of(slpc, struct intel_guc, slpc);
@@ -265,6 +293,8 @@ int intel_guc_slpc_init(struct intel_guc_slpc *slpc)
slpc->num_boosts = 0;
slpc->media_ratio_mode = SLPC_MEDIA_RATIO_MODE_DYNAMIC_CONTROL;
+ slpc->power_profile = SLPC_POWER_PROFILES_BASE;
+
mutex_init(&slpc->lock);
INIT_WORK(&slpc->boost_work, slpc_boost_work);
@@ -575,6 +605,34 @@ int intel_guc_slpc_set_media_ratio_mode(struct intel_guc_slpc *slpc, u32 val)
return ret;
}
+int intel_guc_slpc_set_power_profile(struct intel_guc_slpc *slpc, u32 val)
+{
+ struct drm_i915_private *i915 = slpc_to_i915(slpc);
+ intel_wakeref_t wakeref;
+ int ret = 0;
+
+ if (val > SLPC_POWER_PROFILES_POWER_SAVING)
+ return -EINVAL;
+
+ mutex_lock(&slpc->lock);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+
+ ret = slpc_set_param(slpc,
+ SLPC_PARAM_POWER_PROFILE,
+ val);
+ if (ret)
+ guc_err(slpc_to_guc(slpc),
+ "Failed to set power profile to %d: %pe\n",
+ val, ERR_PTR(ret));
+ else
+ slpc->power_profile = val;
+
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+ mutex_unlock(&slpc->lock);
+
+ return ret;
+}
+
void intel_guc_pm_intrmsk_enable(struct intel_gt *gt)
{
u32 pm_intrmsk_mbz = 0;
@@ -736,6 +794,13 @@ int intel_guc_slpc_enable(struct intel_guc_slpc *slpc)
/* Enable SLPC Optimized Strategy for compute */
intel_guc_slpc_set_strategy(slpc, SLPC_OPTIMIZED_STRATEGY_COMPUTE);
+ /* Set cached value of power_profile */
+ ret = intel_guc_slpc_set_power_profile(slpc, slpc->power_profile);
+ if (unlikely(ret)) {
+ guc_probe_error(guc, "Failed to set SLPC power profile: %pe\n", ERR_PTR(ret));
+ return ret;
+ }
+
return 0;
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.h
index 1cb5fd44f05c..fc9f761b4372 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.h
@@ -46,5 +46,6 @@ void intel_guc_slpc_boost(struct intel_guc_slpc *slpc);
void intel_guc_slpc_dec_waiters(struct intel_guc_slpc *slpc);
int intel_guc_slpc_set_ignore_eff_freq(struct intel_guc_slpc *slpc, bool val);
int intel_guc_slpc_set_strategy(struct intel_guc_slpc *slpc, u32 val);
+int intel_guc_slpc_set_power_profile(struct intel_guc_slpc *slpc, u32 val);
#endif
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc_types.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc_types.h
index a88651331497..83673b10ac4e 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc_types.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc_types.h
@@ -33,6 +33,9 @@ struct intel_guc_slpc {
u32 max_freq_softlimit;
bool ignore_eff_freq;
+ /* Base or power saving */
+ u32 power_profile;
+
/* cached media ratio mode */
u32 media_ratio_mode;
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc.c b/drivers/gpu/drm/i915/gt/uc/intel_huc.c
index b3cbf85c00cb..f30c90650b7e 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_huc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.c
@@ -489,13 +489,15 @@ int intel_huc_wait_for_auth_complete(struct intel_huc *huc,
if (delta_ms > 50) {
huc_warn(huc, "excessive auth time: %lldms! [status = 0x%08X, count = %d, ret = %d]\n",
delta_ms, huc->status[type].reg.reg, count, ret);
- huc_warn(huc, "excessive auth time: [freq = %dMHz, before = %dMHz, perf_limit_reasons = 0x%08X]\n",
- intel_rps_read_actual_frequency(&gt->rps), before_freq,
+ huc_warn(huc, "excessive auth time: [freq = %dMHz -> %dMHz vs %dMHz, perf_limit_reasons = 0x%08X]\n",
+ before_freq, intel_rps_read_actual_frequency(&gt->rps),
+ intel_rps_get_requested_frequency(&gt->rps),
intel_uncore_read(uncore, intel_gt_perf_limit_reasons_reg(gt)));
} else {
- huc_dbg(huc, "auth took %lldms, freq = %dMHz, before = %dMHz, status = 0x%08X, count = %d, ret = %d\n",
- delta_ms, intel_rps_read_actual_frequency(&gt->rps),
- before_freq, huc->status[type].reg.reg, count, ret);
+ huc_dbg(huc, "auth took %lldms, freq = %dMHz -> %dMHz vs %dMHz, status = 0x%08X, count = %d, ret = %d\n",
+ delta_ms, before_freq, intel_rps_read_actual_frequency(&gt->rps),
+ intel_rps_get_requested_frequency(&gt->rps),
+ huc->status[type].reg.reg, count, ret);
}
/* mark the load process as complete even if the wait failed */
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 1c2a97f593c7..0d9e263913ff 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -411,9 +411,6 @@ static int i915_runtime_pm_status(struct seq_file *m, void *unused)
if (!HAS_RUNTIME_PM(dev_priv))
seq_puts(m, "Runtime power management not supported\n");
- seq_printf(m, "Runtime power status: %s\n",
- str_enabled_disabled(!dev_priv->display.power.domains.init_wakeref));
-
seq_printf(m, "GPU idle: %s\n", str_yes_no(!to_gt(dev_priv)->awake));
seq_printf(m, "IRQs disabled: %s\n",
str_yes_no(!intel_irqs_enabled(dev_priv)));
diff --git a/drivers/gpu/drm/i915/i915_driver.c b/drivers/gpu/drm/i915/i915_driver.c
index 1dfd6269b355..ce3cc93ea211 100644
--- a/drivers/gpu/drm/i915/i915_driver.c
+++ b/drivers/gpu/drm/i915/i915_driver.c
@@ -41,6 +41,8 @@
#include <linux/vt.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_client.h>
+#include <drm/drm_client_event.h>
#include <drm/drm_ioctl.h>
#include <drm/drm_managed.h>
#include <drm/drm_probe_helper.h>
@@ -200,7 +202,7 @@ static void intel_detect_preproduction_hw(struct drm_i915_private *dev_priv)
static void sanitize_gpu(struct drm_i915_private *i915)
{
- if (!INTEL_INFO(i915)->gpu_reset_clobbers_display) {
+ if (!intel_gt_gpu_reset_clobbers_display(to_gt(i915))) {
struct intel_gt *gt;
unsigned int i;
@@ -968,7 +970,7 @@ void i915_driver_shutdown(struct drm_i915_private *i915)
intel_runtime_pm_disable(&i915->runtime_pm);
intel_power_domains_disable(display);
- intel_fbdev_set_suspend(&i915->drm, FBINFO_STATE_SUSPENDED, true);
+ drm_client_dev_suspend(&i915->drm, false);
if (HAS_DISPLAY(i915)) {
drm_kms_helper_poll_disable(&i915->drm);
intel_display_driver_disable_user_access(display);
@@ -1051,7 +1053,7 @@ static int i915_drm_suspend(struct drm_device *dev)
/* We do a lot of poking in a lot of registers, make sure they work
* properly. */
intel_power_domains_disable(display);
- intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true);
+ drm_client_dev_suspend(dev, false);
if (HAS_DISPLAY(dev_priv)) {
drm_kms_helper_poll_disable(dev);
intel_display_driver_disable_user_access(display);
@@ -1070,7 +1072,7 @@ static int i915_drm_suspend(struct drm_device *dev)
intel_encoder_suspend_all(&dev_priv->display);
/* Must be called before GGTT is suspended. */
- intel_dpt_suspend(dev_priv);
+ intel_dpt_suspend(display);
i915_ggtt_suspend(to_gt(dev_priv)->ggtt);
i9xx_display_sr_save(display);
@@ -1187,7 +1189,7 @@ static int i915_drm_resume(struct drm_device *dev)
setup_private_pat(gt);
/* Must be called after GGTT is resumed. */
- intel_dpt_resume(dev_priv);
+ intel_dpt_resume(display);
intel_dmc_resume(display);
@@ -1237,7 +1239,7 @@ static int i915_drm_resume(struct drm_device *dev)
intel_opregion_resume(display);
- intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false);
+ drm_client_dev_resume(dev, false);
intel_power_domains_enable(display);
@@ -1807,6 +1809,8 @@ static const struct drm_driver i915_drm_driver = {
.dumb_create = i915_gem_dumb_create,
.dumb_map_offset = i915_gem_dumb_mmap_offset,
+ INTEL_FBDEV_DRIVER_OPS,
+
.ioctls = i915_ioctls,
.num_ioctls = ARRAY_SIZE(i915_ioctls),
.fops = &i915_driver_fops,
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h
index 78a8928562a9..749e1c55613e 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.h
+++ b/drivers/gpu/drm/i915/i915_gpu_error.h
@@ -224,8 +224,6 @@ struct i915_gpu_error {
/* Protected by the above dev->gpu_error.lock. */
struct i915_gpu_coredump *first_error;
- atomic_t pending_fb_pin;
-
/** Number of times the device has been reset (global) */
atomic_t reset_count;
diff --git a/drivers/gpu/drm/i915/i915_gtt_view_types.h b/drivers/gpu/drm/i915/i915_gtt_view_types.h
new file mode 100644
index 000000000000..c084f67bc880
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gtt_view_types.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright © 2025 Intel Corporation */
+
+#ifndef __I915_GTT_VIEW_TYPES_H__
+#define __I915_GTT_VIEW_TYPES_H__
+
+#include <linux/types.h>
+
+struct intel_remapped_plane_info {
+ /* in gtt pages */
+ u32 offset:31;
+ u32 linear:1;
+ union {
+ /* in gtt pages for !linear */
+ struct {
+ u16 width;
+ u16 height;
+ u16 src_stride;
+ u16 dst_stride;
+ };
+
+ /* in gtt pages for linear */
+ u32 size;
+ };
+} __packed;
+
+struct intel_rotation_info {
+ struct intel_remapped_plane_info plane[2];
+} __packed;
+
+struct intel_partial_info {
+ u64 offset;
+ unsigned int size;
+} __packed;
+
+struct intel_remapped_info {
+ struct intel_remapped_plane_info plane[4];
+ /* in gtt pages */
+ u32 plane_alignment;
+} __packed;
+
+enum i915_gtt_view_type {
+ I915_GTT_VIEW_NORMAL = 0,
+ I915_GTT_VIEW_ROTATED = sizeof(struct intel_rotation_info),
+ I915_GTT_VIEW_PARTIAL = sizeof(struct intel_partial_info),
+ I915_GTT_VIEW_REMAPPED = sizeof(struct intel_remapped_info),
+};
+
+struct i915_gtt_view {
+ enum i915_gtt_view_type type;
+ union {
+ /* Members need to contain no holes/padding */
+ struct intel_partial_info partial;
+ struct intel_rotation_info rotated;
+ struct intel_remapped_info remapped;
+ };
+};
+
+#endif /* __I915_GTT_VIEW_TYPES_H__ */
diff --git a/drivers/gpu/drm/i915/i915_module.c b/drivers/gpu/drm/i915/i915_module.c
index 2f88970cc0a9..5862754c662c 100644
--- a/drivers/gpu/drm/i915/i915_module.c
+++ b/drivers/gpu/drm/i915/i915_module.c
@@ -71,8 +71,6 @@ static const struct {
{ .init = i915_vma_resource_module_init,
.exit = i915_vma_resource_module_exit },
{ .init = i915_mock_selftests },
- { .init = i915_pmu_init,
- .exit = i915_pmu_exit },
{ .init = i915_pci_register_driver,
.exit = i915_pci_unregister_driver },
{ .init = i915_perf_sysctl_register,
diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
index e55db036be1b..69a109d02116 100644
--- a/drivers/gpu/drm/i915/i915_pmu.c
+++ b/drivers/gpu/drm/i915/i915_pmu.c
@@ -28,9 +28,6 @@
BIT(I915_SAMPLE_WAIT) | \
BIT(I915_SAMPLE_SEMA))
-static cpumask_t i915_pmu_cpumask;
-static unsigned int i915_pmu_target_cpu = -1;
-
static struct i915_pmu *event_to_pmu(struct perf_event *event)
{
return container_of(event->pmu, struct i915_pmu, base);
@@ -642,10 +639,6 @@ static int i915_pmu_event_init(struct perf_event *event)
if (event->cpu < 0)
return -EINVAL;
- /* only allow running on one cpu at a time */
- if (!cpumask_test_cpu(event->cpu, &i915_pmu_cpumask))
- return -EINVAL;
-
if (is_engine_event(event))
ret = engine_event_init(event);
else
@@ -891,11 +884,6 @@ static void i915_pmu_event_del(struct perf_event *event, int flags)
i915_pmu_event_stop(event, PERF_EF_UPDATE);
}
-static int i915_pmu_event_event_idx(struct perf_event *event)
-{
- return 0;
-}
-
struct i915_str_attribute {
struct device_attribute attr;
const char *str;
@@ -940,23 +928,6 @@ static ssize_t i915_pmu_event_show(struct device *dev,
return sprintf(buf, "config=0x%lx\n", eattr->val);
}
-static ssize_t cpumask_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- return cpumap_print_to_pagebuf(true, buf, &i915_pmu_cpumask);
-}
-
-static DEVICE_ATTR_RO(cpumask);
-
-static struct attribute *i915_cpumask_attrs[] = {
- &dev_attr_cpumask.attr,
- NULL,
-};
-
-static const struct attribute_group i915_pmu_cpumask_attr_group = {
- .attrs = i915_cpumask_attrs,
-};
-
#define __event(__counter, __name, __unit) \
{ \
.counter = (__counter), \
@@ -1173,92 +1144,12 @@ static void free_event_attributes(struct i915_pmu *pmu)
pmu->pmu_attr = NULL;
}
-static int i915_pmu_cpu_online(unsigned int cpu, struct hlist_node *node)
-{
- struct i915_pmu *pmu = hlist_entry_safe(node, typeof(*pmu), cpuhp.node);
-
- /* Select the first online CPU as a designated reader. */
- if (cpumask_empty(&i915_pmu_cpumask))
- cpumask_set_cpu(cpu, &i915_pmu_cpumask);
-
- return 0;
-}
-
-static int i915_pmu_cpu_offline(unsigned int cpu, struct hlist_node *node)
-{
- struct i915_pmu *pmu = hlist_entry_safe(node, typeof(*pmu), cpuhp.node);
- unsigned int target = i915_pmu_target_cpu;
-
- /*
- * Unregistering an instance generates a CPU offline event which we must
- * ignore to avoid incorrectly modifying the shared i915_pmu_cpumask.
- */
- if (!pmu->registered)
- return 0;
-
- if (cpumask_test_and_clear_cpu(cpu, &i915_pmu_cpumask)) {
- target = cpumask_any_but(topology_sibling_cpumask(cpu), cpu);
-
- /* Migrate events if there is a valid target */
- if (target < nr_cpu_ids) {
- cpumask_set_cpu(target, &i915_pmu_cpumask);
- i915_pmu_target_cpu = target;
- }
- }
-
- if (target < nr_cpu_ids && target != pmu->cpuhp.cpu) {
- perf_pmu_migrate_context(&pmu->base, cpu, target);
- pmu->cpuhp.cpu = target;
- }
-
- return 0;
-}
-
-static enum cpuhp_state cpuhp_state = CPUHP_INVALID;
-
-int i915_pmu_init(void)
-{
- int ret;
-
- ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
- "perf/x86/intel/i915:online",
- i915_pmu_cpu_online,
- i915_pmu_cpu_offline);
- if (ret < 0)
- pr_notice("Failed to setup cpuhp state for i915 PMU! (%d)\n",
- ret);
- else
- cpuhp_state = ret;
-
- return 0;
-}
-
-void i915_pmu_exit(void)
-{
- if (cpuhp_state != CPUHP_INVALID)
- cpuhp_remove_multi_state(cpuhp_state);
-}
-
-static int i915_pmu_register_cpuhp_state(struct i915_pmu *pmu)
-{
- if (cpuhp_state == CPUHP_INVALID)
- return -EINVAL;
-
- return cpuhp_state_add_instance(cpuhp_state, &pmu->cpuhp.node);
-}
-
-static void i915_pmu_unregister_cpuhp_state(struct i915_pmu *pmu)
-{
- cpuhp_state_remove_instance(cpuhp_state, &pmu->cpuhp.node);
-}
-
void i915_pmu_register(struct drm_i915_private *i915)
{
struct i915_pmu *pmu = &i915->pmu;
const struct attribute_group *attr_groups[] = {
&i915_pmu_format_attr_group,
&pmu->events_attr_group,
- &i915_pmu_cpumask_attr_group,
NULL
};
int ret = -ENOMEM;
@@ -1266,7 +1157,6 @@ void i915_pmu_register(struct drm_i915_private *i915)
spin_lock_init(&pmu->lock);
hrtimer_init(&pmu->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
pmu->timer.function = i915_sample;
- pmu->cpuhp.cpu = -1;
init_rc6(pmu);
if (IS_DGFX(i915)) {
@@ -1295,28 +1185,22 @@ void i915_pmu_register(struct drm_i915_private *i915)
pmu->base.module = THIS_MODULE;
pmu->base.task_ctx_nr = perf_invalid_context;
+ pmu->base.scope = PERF_PMU_SCOPE_SYS_WIDE;
pmu->base.event_init = i915_pmu_event_init;
pmu->base.add = i915_pmu_event_add;
pmu->base.del = i915_pmu_event_del;
pmu->base.start = i915_pmu_event_start;
pmu->base.stop = i915_pmu_event_stop;
pmu->base.read = i915_pmu_event_read;
- pmu->base.event_idx = i915_pmu_event_event_idx;
ret = perf_pmu_register(&pmu->base, pmu->name, -1);
if (ret)
goto err_groups;
- ret = i915_pmu_register_cpuhp_state(pmu);
- if (ret)
- goto err_unreg;
-
pmu->registered = true;
return;
-err_unreg:
- perf_pmu_unregister(&pmu->base);
err_groups:
kfree(pmu->base.attr_groups);
err_attr:
@@ -1340,8 +1224,6 @@ void i915_pmu_unregister(struct drm_i915_private *i915)
hrtimer_cancel(&pmu->timer);
- i915_pmu_unregister_cpuhp_state(pmu);
-
perf_pmu_unregister(&pmu->base);
kfree(pmu->base.attr_groups);
if (IS_DGFX(i915))
diff --git a/drivers/gpu/drm/i915/i915_pmu.h b/drivers/gpu/drm/i915/i915_pmu.h
index 0ec78c2b4f20..5826cc81858c 100644
--- a/drivers/gpu/drm/i915/i915_pmu.h
+++ b/drivers/gpu/drm/i915/i915_pmu.h
@@ -57,13 +57,6 @@ struct i915_pmu_sample {
struct i915_pmu {
/**
- * @cpuhp: Struct used for CPU hotplug handling.
- */
- struct {
- struct hlist_node node;
- unsigned int cpu;
- } cpuhp;
- /**
* @base: PMU base.
*/
struct pmu base;
@@ -155,15 +148,11 @@ struct i915_pmu {
};
#ifdef CONFIG_PERF_EVENTS
-int i915_pmu_init(void);
-void i915_pmu_exit(void);
void i915_pmu_register(struct drm_i915_private *i915);
void i915_pmu_unregister(struct drm_i915_private *i915);
void i915_pmu_gt_parked(struct intel_gt *gt);
void i915_pmu_gt_unparked(struct intel_gt *gt);
#else
-static inline int i915_pmu_init(void) { return 0; }
-static inline void i915_pmu_exit(void) {}
static inline void i915_pmu_register(struct drm_i915_private *i915) {}
static inline void i915_pmu_unregister(struct drm_i915_private *i915) {}
static inline void i915_pmu_gt_parked(struct intel_gt *gt) {}
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index b31b26e9a685..c5064eebe063 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -1385,38 +1385,6 @@
/* ADL and later: */
#define VIDEO_DIP_ENABLE_AS_ADL REG_BIT(23)
-/* Panel fitting */
-#define PFIT_CONTROL(dev_priv) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61230)
-#define PFIT_ENABLE REG_BIT(31)
-#define PFIT_PIPE_MASK REG_GENMASK(30, 29) /* 965+ */
-#define PFIT_PIPE(pipe) REG_FIELD_PREP(PFIT_PIPE_MASK, (pipe))
-#define PFIT_SCALING_MASK REG_GENMASK(28, 26) /* 965+ */
-#define PFIT_SCALING_AUTO REG_FIELD_PREP(PFIT_SCALING_MASK, 0)
-#define PFIT_SCALING_PROGRAMMED REG_FIELD_PREP(PFIT_SCALING_MASK, 1)
-#define PFIT_SCALING_PILLAR REG_FIELD_PREP(PFIT_SCALING_MASK, 2)
-#define PFIT_SCALING_LETTER REG_FIELD_PREP(PFIT_SCALING_MASK, 3)
-#define PFIT_FILTER_MASK REG_GENMASK(25, 24) /* 965+ */
-#define PFIT_FILTER_FUZZY REG_FIELD_PREP(PFIT_FILTER_MASK, 0)
-#define PFIT_FILTER_CRISP REG_FIELD_PREP(PFIT_FILTER_MASK, 1)
-#define PFIT_FILTER_MEDIAN REG_FIELD_PREP(PFIT_FILTER_MASK, 2)
-#define PFIT_VERT_INTERP_MASK REG_GENMASK(11, 10) /* pre-965 */
-#define PFIT_VERT_INTERP_BILINEAR REG_FIELD_PREP(PFIT_VERT_INTERP_MASK, 1)
-#define PFIT_VERT_AUTO_SCALE REG_BIT(9) /* pre-965 */
-#define PFIT_HORIZ_INTERP_MASK REG_GENMASK(7, 6) /* pre-965 */
-#define PFIT_HORIZ_INTERP_BILINEAR REG_FIELD_PREP(PFIT_HORIZ_INTERP_MASK, 1)
-#define PFIT_HORIZ_AUTO_SCALE REG_BIT(5) /* pre-965 */
-#define PFIT_PANEL_8TO6_DITHER_ENABLE REG_BIT(3) /* pre-965 */
-
-#define PFIT_PGM_RATIOS(dev_priv) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61234)
-#define PFIT_VERT_SCALE_MASK REG_GENMASK(31, 20) /* pre-965 */
-#define PFIT_VERT_SCALE(x) REG_FIELD_PREP(PFIT_VERT_SCALE_MASK, (x))
-#define PFIT_HORIZ_SCALE_MASK REG_GENMASK(15, 4) /* pre-965 */
-#define PFIT_HORIZ_SCALE(x) REG_FIELD_PREP(PFIT_HORIZ_SCALE_MASK, (x))
-#define PFIT_VERT_SCALE_MASK_965 REG_GENMASK(28, 16) /* 965+ */
-#define PFIT_HORIZ_SCALE_MASK_965 REG_GENMASK(12, 0) /* 965+ */
-
-#define PFIT_AUTO_RATIOS(dev_priv) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61238)
-
#define PCH_GTC_CTL _MMIO(0xe7000)
#define PCH_GTC_ENABLE (1 << 31)
@@ -1911,44 +1879,6 @@
#define _PIPEB_LINK_N2 0x6104c
#define PIPE_LINK_N2(dev_priv, tran) _MMIO_TRANS2(dev_priv, tran, _PIPEA_LINK_N2)
-/* CPU panel fitter */
-/* IVB+ has 3 fitters, 0 is 7x5 capable, the other two only 3x3 */
-#define _PFA_CTL_1 0x68080
-#define _PFB_CTL_1 0x68880
-#define PF_CTL(pipe) _MMIO_PIPE(pipe, _PFA_CTL_1, _PFB_CTL_1)
-#define PF_ENABLE REG_BIT(31)
-#define PF_PIPE_SEL_MASK_IVB REG_GENMASK(30, 29) /* ivb/hsw */
-#define PF_PIPE_SEL_IVB(pipe) REG_FIELD_PREP(PF_PIPE_SEL_MASK_IVB, (pipe))
-#define PF_FILTER_MASK REG_GENMASK(24, 23)
-#define PF_FILTER_PROGRAMMED REG_FIELD_PREP(PF_FILTER_MASK, 0)
-#define PF_FILTER_MED_3x3 REG_FIELD_PREP(PF_FILTER_MASK, 1)
-#define PF_FILTER_EDGE_ENHANCE REG_FIELD_PREP(PF_FILTER_EDGE_MASK, 2)
-#define PF_FILTER_EDGE_SOFTEN REG_FIELD_PREP(PF_FILTER_EDGE_MASK, 3)
-
-#define _PFA_WIN_SZ 0x68074
-#define _PFB_WIN_SZ 0x68874
-#define PF_WIN_SZ(pipe) _MMIO_PIPE(pipe, _PFA_WIN_SZ, _PFB_WIN_SZ)
-#define PF_WIN_XSIZE_MASK REG_GENMASK(31, 16)
-#define PF_WIN_XSIZE(w) REG_FIELD_PREP(PF_WIN_XSIZE_MASK, (w))
-#define PF_WIN_YSIZE_MASK REG_GENMASK(15, 0)
-#define PF_WIN_YSIZE(h) REG_FIELD_PREP(PF_WIN_YSIZE_MASK, (h))
-
-#define _PFA_WIN_POS 0x68070
-#define _PFB_WIN_POS 0x68870
-#define PF_WIN_POS(pipe) _MMIO_PIPE(pipe, _PFA_WIN_POS, _PFB_WIN_POS)
-#define PF_WIN_XPOS_MASK REG_GENMASK(31, 16)
-#define PF_WIN_XPOS(x) REG_FIELD_PREP(PF_WIN_XPOS_MASK, (x))
-#define PF_WIN_YPOS_MASK REG_GENMASK(15, 0)
-#define PF_WIN_YPOS(y) REG_FIELD_PREP(PF_WIN_YPOS_MASK, (y))
-
-#define _PFA_VSCALE 0x68084
-#define _PFB_VSCALE 0x68884
-#define PF_VSCALE(pipe) _MMIO_PIPE(pipe, _PFA_VSCALE, _PFB_VSCALE)
-
-#define _PFA_HSCALE 0x68090
-#define _PFB_HSCALE 0x68890
-#define PF_HSCALE(pipe) _MMIO_PIPE(pipe, _PFA_HSCALE, _PFB_HSCALE)
-
/*
* Skylake scalers
*/
@@ -4235,8 +4165,8 @@ enum skl_power_gate {
_MMIO_PIPE(pipe, _PIPE_FLIPDONETMSTMP_A, _PIPE_FLIPDONETMSTMP_B)
#define _VLV_PIPE_MSA_MISC_A 0x70048
-#define VLV_PIPE_MSA_MISC(pipe) \
- _MMIO_PIPE2(dev_priv, pipe, _VLV_PIPE_MSA_MISC_A)
+#define VLV_PIPE_MSA_MISC(__display, pipe) \
+ _MMIO_PIPE2(__display, pipe, _VLV_PIPE_MSA_MISC_A)
#define VLV_MSA_MISC1_HW_ENABLE REG_BIT(31)
#define VLV_MSA_MISC1_SW_S3D_MASK REG_GENMASK(2, 0) /* MSA MISC1 3:1 */
diff --git a/drivers/gpu/drm/i915/i915_vma_types.h b/drivers/gpu/drm/i915/i915_vma_types.h
index 559de74d0b11..a499a3bea874 100644
--- a/drivers/gpu/drm/i915/i915_vma_types.h
+++ b/drivers/gpu/drm/i915/i915_vma_types.h
@@ -32,6 +32,8 @@
#include "gem/i915_gem_object_types.h"
+#include "i915_gtt_view_types.h"
+
/**
* DOC: Global GTT views
*
@@ -95,46 +97,6 @@
struct i915_vma_resource;
-struct intel_remapped_plane_info {
- /* in gtt pages */
- u32 offset:31;
- u32 linear:1;
- union {
- /* in gtt pages for !linear */
- struct {
- u16 width;
- u16 height;
- u16 src_stride;
- u16 dst_stride;
- };
-
- /* in gtt pages for linear */
- u32 size;
- };
-} __packed;
-
-struct intel_remapped_info {
- struct intel_remapped_plane_info plane[4];
- /* in gtt pages */
- u32 plane_alignment;
-} __packed;
-
-struct intel_rotation_info {
- struct intel_remapped_plane_info plane[2];
-} __packed;
-
-struct intel_partial_info {
- u64 offset;
- unsigned int size;
-} __packed;
-
-enum i915_gtt_view_type {
- I915_GTT_VIEW_NORMAL = 0,
- I915_GTT_VIEW_ROTATED = sizeof(struct intel_rotation_info),
- I915_GTT_VIEW_PARTIAL = sizeof(struct intel_partial_info),
- I915_GTT_VIEW_REMAPPED = sizeof(struct intel_remapped_info),
-};
-
static inline void assert_i915_gem_gtt_types(void)
{
BUILD_BUG_ON(sizeof(struct intel_rotation_info) != 2 * sizeof(u32) + 8 * sizeof(u16));
@@ -160,16 +122,6 @@ static inline void assert_i915_gem_gtt_types(void)
}
}
-struct i915_gtt_view {
- enum i915_gtt_view_type type;
- union {
- /* Members need to contain no holes/padding */
- struct intel_partial_info partial;
- struct intel_rotation_info rotated;
- struct intel_remapped_info remapped;
- };
-};
-
/**
* DOC: Virtual Memory Address
*
diff --git a/drivers/gpu/drm/i915/intel_gvt_mmio_table.c b/drivers/gpu/drm/i915/intel_gvt_mmio_table.c
index 04076316e139..76d84cbb8361 100644
--- a/drivers/gpu/drm/i915/intel_gvt_mmio_table.c
+++ b/drivers/gpu/drm/i915/intel_gvt_mmio_table.c
@@ -18,6 +18,7 @@
#include "display/intel_fbc_regs.h"
#include "display/intel_fdi_regs.h"
#include "display/intel_lvds_regs.h"
+#include "display/intel_pfit_regs.h"
#include "display/intel_psr_regs.h"
#include "display/intel_sprite_regs.h"
#include "display/skl_universal_plane_regs.h"
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp.c b/drivers/gpu/drm/i915/pxp/intel_pxp.c
index 9cf169665d7c..f8da693ad3ce 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp.c
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp.c
@@ -460,11 +460,11 @@ void intel_pxp_fini_hw(struct intel_pxp *pxp)
intel_pxp_irq_disable(pxp);
}
-int intel_pxp_key_check(struct intel_pxp *pxp,
- struct drm_gem_object *_obj,
- bool assign)
+int intel_pxp_key_check(struct drm_gem_object *_obj, bool assign)
{
struct drm_i915_gem_object *obj = to_intel_bo(_obj);
+ struct drm_i915_private *i915 = to_i915(_obj->dev);
+ struct intel_pxp *pxp = i915->pxp;
if (!intel_pxp_is_active(pxp))
return -ENODEV;
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp.h b/drivers/gpu/drm/i915/pxp/intel_pxp.h
index 4ed97db5e7c6..7b19109845a3 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp.h
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp.h
@@ -31,9 +31,7 @@ int intel_pxp_get_backend_timeout_ms(struct intel_pxp *pxp);
int intel_pxp_start(struct intel_pxp *pxp);
void intel_pxp_end(struct intel_pxp *pxp);
-int intel_pxp_key_check(struct intel_pxp *pxp,
- struct drm_gem_object *obj,
- bool assign);
+int intel_pxp_key_check(struct drm_gem_object *obj, bool assign);
void intel_pxp_invalidate(struct intel_pxp *pxp);
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem.c b/drivers/gpu/drm/i915/selftests/i915_gem.c
index e817d233df61..ad650f67114a 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem.c
@@ -45,13 +45,15 @@ static void trash_stolen(struct drm_i915_private *i915)
struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
const u64 slot = ggtt->error_capture.start;
const resource_size_t size = resource_size(&i915->dsm.stolen);
+ struct rnd_state prng;
unsigned long page;
- u32 prng = 0x12345678;
/* XXX: fsck. needs some more thought... */
if (!i915_ggtt_has_aperture(ggtt))
return;
+ prandom_seed_state(&prng, 0x12345678);
+
for (page = 0; page < size; page += PAGE_SIZE) {
const dma_addr_t dma = i915->dsm.stolen.start + page;
u32 __iomem *s;
@@ -64,8 +66,7 @@ static void trash_stolen(struct drm_i915_private *i915)
s = io_mapping_map_atomic_wc(&ggtt->iomap, slot);
for (x = 0; x < PAGE_SIZE / sizeof(u32); x++) {
- prng = next_pseudo_random32(prng);
- iowrite32(prng, &s[x]);
+ iowrite32(prandom_u32_state(&prng), &s[x]);
}
io_mapping_unmap_atomic(s);
}
diff --git a/drivers/gpu/drm/imagination/Makefile b/drivers/gpu/drm/imagination/Makefile
index 9bc6a3884c22..3d9d4d40fb80 100644
--- a/drivers/gpu/drm/imagination/Makefile
+++ b/drivers/gpu/drm/imagination/Makefile
@@ -1,8 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only OR MIT
# Copyright (c) 2023 Imagination Technologies Ltd.
-subdir-ccflags-y := -I$(src)
-
powervr-y := \
pvr_ccb.o \
pvr_cccb.o \
diff --git a/drivers/gpu/drm/imagination/pvr_fw_meta.c b/drivers/gpu/drm/imagination/pvr_fw_meta.c
index c39beb70c317..6d13864851fc 100644
--- a/drivers/gpu/drm/imagination/pvr_fw_meta.c
+++ b/drivers/gpu/drm/imagination/pvr_fw_meta.c
@@ -527,8 +527,10 @@ pvr_meta_vm_map(struct pvr_device *pvr_dev, struct pvr_fw_object *fw_obj)
static void
pvr_meta_vm_unmap(struct pvr_device *pvr_dev, struct pvr_fw_object *fw_obj)
{
- pvr_vm_unmap(pvr_dev->kernel_vm_ctx, fw_obj->fw_mm_node.start,
- fw_obj->fw_mm_node.size);
+ struct pvr_gem_object *pvr_obj = fw_obj->gem;
+
+ pvr_vm_unmap_obj(pvr_dev->kernel_vm_ctx, pvr_obj,
+ fw_obj->fw_mm_node.start, fw_obj->fw_mm_node.size);
}
static bool
diff --git a/drivers/gpu/drm/imagination/pvr_fw_trace.c b/drivers/gpu/drm/imagination/pvr_fw_trace.c
index 73707daa4e52..5dbb636d7d4f 100644
--- a/drivers/gpu/drm/imagination/pvr_fw_trace.c
+++ b/drivers/gpu/drm/imagination/pvr_fw_trace.c
@@ -333,8 +333,8 @@ static int fw_trace_seq_show(struct seq_file *s, void *v)
if (sf_id == ROGUE_FW_SF_LAST)
return -EINVAL;
- timestamp = read_fw_trace(trace_seq_data, 1) |
- ((u64)read_fw_trace(trace_seq_data, 2) << 32);
+ timestamp = ((u64)read_fw_trace(trace_seq_data, 1) << 32) |
+ read_fw_trace(trace_seq_data, 2);
timestamp = (timestamp & ~ROGUE_FWT_TIMESTAMP_TIME_CLRMSK) >>
ROGUE_FWT_TIMESTAMP_TIME_SHIFT;
diff --git a/drivers/gpu/drm/imagination/pvr_queue.c b/drivers/gpu/drm/imagination/pvr_queue.c
index 21c185d18bb2..eba69309bb6c 100644
--- a/drivers/gpu/drm/imagination/pvr_queue.c
+++ b/drivers/gpu/drm/imagination/pvr_queue.c
@@ -109,12 +109,20 @@ pvr_queue_fence_get_driver_name(struct dma_fence *f)
return PVR_DRIVER_NAME;
}
+static void pvr_queue_fence_release_work(struct work_struct *w)
+{
+ struct pvr_queue_fence *fence = container_of(w, struct pvr_queue_fence, release_work);
+
+ pvr_context_put(fence->queue->ctx);
+ dma_fence_free(&fence->base);
+}
+
static void pvr_queue_fence_release(struct dma_fence *f)
{
struct pvr_queue_fence *fence = container_of(f, struct pvr_queue_fence, base);
+ struct pvr_device *pvr_dev = fence->queue->ctx->pvr_dev;
- pvr_context_put(fence->queue->ctx);
- dma_fence_free(f);
+ queue_work(pvr_dev->sched_wq, &fence->release_work);
}
static const char *
@@ -268,6 +276,7 @@ pvr_queue_fence_init(struct dma_fence *f,
pvr_context_get(queue->ctx);
fence->queue = queue;
+ INIT_WORK(&fence->release_work, pvr_queue_fence_release_work);
dma_fence_init(&fence->base, fence_ops,
&fence_ctx->lock, fence_ctx->id,
atomic_inc_return(&fence_ctx->seqno));
@@ -304,8 +313,9 @@ pvr_queue_cccb_fence_init(struct dma_fence *fence, struct pvr_queue *queue)
static void
pvr_queue_job_fence_init(struct dma_fence *fence, struct pvr_queue *queue)
{
- pvr_queue_fence_init(fence, queue, &pvr_queue_job_fence_ops,
- &queue->job_fence_ctx);
+ if (!fence->ops)
+ pvr_queue_fence_init(fence, queue, &pvr_queue_job_fence_ops,
+ &queue->job_fence_ctx);
}
/**
diff --git a/drivers/gpu/drm/imagination/pvr_queue.h b/drivers/gpu/drm/imagination/pvr_queue.h
index e06ced69302f..93fe9ac9f58c 100644
--- a/drivers/gpu/drm/imagination/pvr_queue.h
+++ b/drivers/gpu/drm/imagination/pvr_queue.h
@@ -5,6 +5,7 @@
#define PVR_QUEUE_H
#include <drm/gpu_scheduler.h>
+#include <linux/workqueue.h>
#include "pvr_cccb.h"
#include "pvr_device.h"
@@ -63,6 +64,9 @@ struct pvr_queue_fence {
/** @queue: Queue that created this fence. */
struct pvr_queue *queue;
+
+ /** @release_work: Fence release work structure. */
+ struct work_struct release_work;
};
/**
diff --git a/drivers/gpu/drm/imagination/pvr_vm.c b/drivers/gpu/drm/imagination/pvr_vm.c
index 363f885a7098..2896fa7501b1 100644
--- a/drivers/gpu/drm/imagination/pvr_vm.c
+++ b/drivers/gpu/drm/imagination/pvr_vm.c
@@ -293,8 +293,9 @@ err_bind_op_fini:
static int
pvr_vm_bind_op_unmap_init(struct pvr_vm_bind_op *bind_op,
- struct pvr_vm_context *vm_ctx, u64 device_addr,
- u64 size)
+ struct pvr_vm_context *vm_ctx,
+ struct pvr_gem_object *pvr_obj,
+ u64 device_addr, u64 size)
{
int err;
@@ -318,6 +319,7 @@ pvr_vm_bind_op_unmap_init(struct pvr_vm_bind_op *bind_op,
goto err_bind_op_fini;
}
+ bind_op->pvr_obj = pvr_obj;
bind_op->vm_ctx = vm_ctx;
bind_op->device_addr = device_addr;
bind_op->size = size;
@@ -598,20 +600,6 @@ err_free:
}
/**
- * pvr_vm_unmap_all() - Unmap all mappings associated with a VM context.
- * @vm_ctx: Target VM context.
- *
- * This function ensures that no mappings are left dangling by unmapping them
- * all in order of ascending device-virtual address.
- */
-void
-pvr_vm_unmap_all(struct pvr_vm_context *vm_ctx)
-{
- WARN_ON(pvr_vm_unmap(vm_ctx, vm_ctx->gpuvm_mgr.mm_start,
- vm_ctx->gpuvm_mgr.mm_range));
-}
-
-/**
* pvr_vm_context_release() - Teardown a VM context.
* @ref_count: Pointer to reference counter of the VM context.
*
@@ -703,11 +691,7 @@ pvr_vm_lock_extra(struct drm_gpuvm_exec *vm_exec)
struct pvr_vm_bind_op *bind_op = vm_exec->extra.priv;
struct pvr_gem_object *pvr_obj = bind_op->pvr_obj;
- /* Unmap operations don't have an object to lock. */
- if (!pvr_obj)
- return 0;
-
- /* Acquire lock on the GEM being mapped. */
+ /* Acquire lock on the GEM object being mapped/unmapped. */
return drm_exec_lock_obj(&vm_exec->exec, gem_from_pvr_gem(pvr_obj));
}
@@ -772,8 +756,10 @@ err_cleanup:
}
/**
- * pvr_vm_unmap() - Unmap an already mapped section of device-virtual memory.
+ * pvr_vm_unmap_obj_locked() - Unmap an already mapped section of device-virtual
+ * memory.
* @vm_ctx: Target VM context.
+ * @pvr_obj: Target PowerVR memory object.
* @device_addr: Virtual device address at the start of the target mapping.
* @size: Size of the target mapping.
*
@@ -784,9 +770,13 @@ err_cleanup:
* * Any error encountered while performing internal operations required to
* destroy the mapping (returned from pvr_vm_gpuva_unmap or
* pvr_vm_gpuva_remap).
+ *
+ * The vm_ctx->lock must be held when calling this function.
*/
-int
-pvr_vm_unmap(struct pvr_vm_context *vm_ctx, u64 device_addr, u64 size)
+static int
+pvr_vm_unmap_obj_locked(struct pvr_vm_context *vm_ctx,
+ struct pvr_gem_object *pvr_obj,
+ u64 device_addr, u64 size)
{
struct pvr_vm_bind_op bind_op = {0};
struct drm_gpuvm_exec vm_exec = {
@@ -799,11 +789,13 @@ pvr_vm_unmap(struct pvr_vm_context *vm_ctx, u64 device_addr, u64 size)
},
};
- int err = pvr_vm_bind_op_unmap_init(&bind_op, vm_ctx, device_addr,
- size);
+ int err = pvr_vm_bind_op_unmap_init(&bind_op, vm_ctx, pvr_obj,
+ device_addr, size);
if (err)
return err;
+ pvr_gem_object_get(pvr_obj);
+
err = drm_gpuvm_exec_lock(&vm_exec);
if (err)
goto err_cleanup;
@@ -818,6 +810,96 @@ err_cleanup:
return err;
}
+/**
+ * pvr_vm_unmap_obj() - Unmap an already mapped section of device-virtual
+ * memory.
+ * @vm_ctx: Target VM context.
+ * @pvr_obj: Target PowerVR memory object.
+ * @device_addr: Virtual device address at the start of the target mapping.
+ * @size: Size of the target mapping.
+ *
+ * Return:
+ * * 0 on success,
+ * * Any error encountered by pvr_vm_unmap_obj_locked.
+ */
+int
+pvr_vm_unmap_obj(struct pvr_vm_context *vm_ctx, struct pvr_gem_object *pvr_obj,
+ u64 device_addr, u64 size)
+{
+ int err;
+
+ mutex_lock(&vm_ctx->lock);
+ err = pvr_vm_unmap_obj_locked(vm_ctx, pvr_obj, device_addr, size);
+ mutex_unlock(&vm_ctx->lock);
+
+ return err;
+}
+
+/**
+ * pvr_vm_unmap() - Unmap an already mapped section of device-virtual memory.
+ * @vm_ctx: Target VM context.
+ * @device_addr: Virtual device address at the start of the target mapping.
+ * @size: Size of the target mapping.
+ *
+ * Return:
+ * * 0 on success,
+ * * Any error encountered by drm_gpuva_find,
+ * * Any error encountered by pvr_vm_unmap_obj_locked.
+ */
+int
+pvr_vm_unmap(struct pvr_vm_context *vm_ctx, u64 device_addr, u64 size)
+{
+ struct pvr_gem_object *pvr_obj;
+ struct drm_gpuva *va;
+ int err;
+
+ mutex_lock(&vm_ctx->lock);
+
+ va = drm_gpuva_find(&vm_ctx->gpuvm_mgr, device_addr, size);
+ if (va) {
+ pvr_obj = gem_to_pvr_gem(va->gem.obj);
+ err = pvr_vm_unmap_obj_locked(vm_ctx, pvr_obj,
+ va->va.addr, va->va.range);
+ } else {
+ err = -ENOENT;
+ }
+
+ mutex_unlock(&vm_ctx->lock);
+
+ return err;
+}
+
+/**
+ * pvr_vm_unmap_all() - Unmap all mappings associated with a VM context.
+ * @vm_ctx: Target VM context.
+ *
+ * This function ensures that no mappings are left dangling by unmapping them
+ * all in order of ascending device-virtual address.
+ */
+void
+pvr_vm_unmap_all(struct pvr_vm_context *vm_ctx)
+{
+ mutex_lock(&vm_ctx->lock);
+
+ for (;;) {
+ struct pvr_gem_object *pvr_obj;
+ struct drm_gpuva *va;
+
+ va = drm_gpuva_find_first(&vm_ctx->gpuvm_mgr,
+ vm_ctx->gpuvm_mgr.mm_start,
+ vm_ctx->gpuvm_mgr.mm_range);
+ if (!va)
+ break;
+
+ pvr_obj = gem_to_pvr_gem(va->gem.obj);
+
+ WARN_ON(pvr_vm_unmap_obj_locked(vm_ctx, pvr_obj,
+ va->va.addr, va->va.range));
+ }
+
+ mutex_unlock(&vm_ctx->lock);
+}
+
/* Static data areas are determined by firmware. */
static const struct drm_pvr_static_data_area static_data_areas[] = {
{
diff --git a/drivers/gpu/drm/imagination/pvr_vm.h b/drivers/gpu/drm/imagination/pvr_vm.h
index 79406243617c..b0528dffa7f1 100644
--- a/drivers/gpu/drm/imagination/pvr_vm.h
+++ b/drivers/gpu/drm/imagination/pvr_vm.h
@@ -38,6 +38,9 @@ struct pvr_vm_context *pvr_vm_create_context(struct pvr_device *pvr_dev,
int pvr_vm_map(struct pvr_vm_context *vm_ctx,
struct pvr_gem_object *pvr_obj, u64 pvr_obj_offset,
u64 device_addr, u64 size);
+int pvr_vm_unmap_obj(struct pvr_vm_context *vm_ctx,
+ struct pvr_gem_object *pvr_obj,
+ u64 device_addr, u64 size);
int pvr_vm_unmap(struct pvr_vm_context *vm_ctx, u64 device_addr, u64 size);
void pvr_vm_unmap_all(struct pvr_vm_context *vm_ctx);
diff --git a/drivers/gpu/drm/mediatek/mtk_crtc.c b/drivers/gpu/drm/mediatek/mtk_crtc.c
index 5674f5707cca..8f6fba4217ec 100644
--- a/drivers/gpu/drm/mediatek/mtk_crtc.c
+++ b/drivers/gpu/drm/mediatek/mtk_crtc.c
@@ -620,13 +620,16 @@ static void mtk_crtc_update_config(struct mtk_crtc *mtk_crtc, bool needs_vblank)
mbox_send_message(mtk_crtc->cmdq_client.chan, cmdq_handle);
mbox_client_txdone(mtk_crtc->cmdq_client.chan, 0);
+ goto update_config_out;
}
-#else
+#endif
spin_lock_irqsave(&mtk_crtc->config_lock, flags);
mtk_crtc->config_updating = false;
spin_unlock_irqrestore(&mtk_crtc->config_lock, flags);
-#endif
+#if IS_REACHABLE(CONFIG_MTK_CMDQ)
+update_config_out:
+#endif
mutex_unlock(&mtk_crtc->hw_lock);
}
diff --git a/drivers/gpu/drm/mediatek/mtk_dp.c b/drivers/gpu/drm/mediatek/mtk_dp.c
index 3d4648d2e15f..ccdc57cef3ea 100644
--- a/drivers/gpu/drm/mediatek/mtk_dp.c
+++ b/drivers/gpu/drm/mediatek/mtk_dp.c
@@ -1766,7 +1766,7 @@ static int mtk_dp_parse_capabilities(struct mtk_dp *mtk_dp)
ret = drm_dp_dpcd_readb(&mtk_dp->aux, DP_MSTM_CAP, &val);
if (ret < 1) {
- drm_err(mtk_dp->drm_dev, "Read mstm cap failed\n");
+ dev_err(mtk_dp->dev, "Read mstm cap failed: %zd\n", ret);
return ret == 0 ? -EIO : ret;
}
@@ -1776,7 +1776,7 @@ static int mtk_dp_parse_capabilities(struct mtk_dp *mtk_dp)
DP_DEVICE_SERVICE_IRQ_VECTOR_ESI0,
&val);
if (ret < 1) {
- drm_err(mtk_dp->drm_dev, "Read irq vector failed\n");
+ dev_err(mtk_dp->dev, "Read irq vector failed: %zd\n", ret);
return ret == 0 ? -EIO : ret;
}
@@ -2059,7 +2059,7 @@ static int mtk_dp_wait_hpd_asserted(struct drm_dp_aux *mtk_aux, unsigned long wa
ret = mtk_dp_parse_capabilities(mtk_dp);
if (ret) {
- drm_err(mtk_dp->drm_dev, "Can't parse capabilities\n");
+ dev_err(mtk_dp->dev, "Can't parse capabilities: %d\n", ret);
return ret;
}
diff --git a/drivers/gpu/drm/mediatek/mtk_dpi.c b/drivers/gpu/drm/mediatek/mtk_dpi.c
index 1864eb02dbf5..0fd13e6dd3f1 100644
--- a/drivers/gpu/drm/mediatek/mtk_dpi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dpi.c
@@ -4,8 +4,10 @@
* Author: Jie Qiu <jie.qiu@mediatek.com>
*/
+#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/component.h>
+#include <linux/debugfs.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/media-bus-format.h>
@@ -116,9 +118,15 @@ struct mtk_dpi_yc_limit {
u16 c_bottom;
};
+struct mtk_dpi_factor {
+ u32 clock;
+ u8 factor;
+};
+
/**
* struct mtk_dpi_conf - Configuration of mediatek dpi.
- * @cal_factor: Callback function to calculate factor value.
+ * @dpi_factor: SoC-specific pixel clock PLL factor values.
+ * @num_dpi_factor: Number of pixel clock PLL factor values.
* @reg_h_fre_con: Register address of frequency control.
* @max_clock_khz: Max clock frequency supported for this SoCs in khz units.
* @edge_sel_en: Enable of edge selection.
@@ -127,19 +135,24 @@ struct mtk_dpi_yc_limit {
* @is_ck_de_pol: Support CK/DE polarity.
* @swap_input_support: Support input swap function.
* @support_direct_pin: IP supports direct connection to dpi panels.
- * @input_2pixel: Input pixel of dp_intf is 2 pixel per round, so enable this
- * config to enable this feature.
* @dimension_mask: Mask used for HWIDTH, HPORCH, VSYNC_WIDTH and VSYNC_PORCH
* (no shift).
* @hvsize_mask: Mask of HSIZE and VSIZE mask (no shift).
* @channel_swap_shift: Shift value of channel swap.
* @yuv422_en_bit: Enable bit of yuv422.
* @csc_enable_bit: Enable bit of CSC.
+ * @input_2p_en_bit: Enable bit for input two pixel per round feature.
+ * If present, implies that the feature must be enabled.
* @pixels_per_iter: Quantity of transferred pixels per iteration.
* @edge_cfg_in_mmsys: If the edge configuration for DPI's output needs to be set in MMSYS.
+ * @clocked_by_hdmi: HDMI IP outputs clock to dpi_pixel_clk input clock, needed
+ * for DPI registers access.
+ * @output_1pixel: Enable outputting one pixel per round; if the input is two pixel per
+ * round, the DPI hardware will internally transform it to 1T1P.
*/
struct mtk_dpi_conf {
- unsigned int (*cal_factor)(int clock);
+ const struct mtk_dpi_factor *dpi_factor;
+ const u8 num_dpi_factor;
u32 reg_h_fre_con;
u32 max_clock_khz;
bool edge_sel_en;
@@ -148,14 +161,16 @@ struct mtk_dpi_conf {
bool is_ck_de_pol;
bool swap_input_support;
bool support_direct_pin;
- bool input_2pixel;
u32 dimension_mask;
u32 hvsize_mask;
u32 channel_swap_shift;
u32 yuv422_en_bit;
u32 csc_enable_bit;
+ u32 input_2p_en_bit;
u32 pixels_per_iter;
bool edge_cfg_in_mmsys;
+ bool clocked_by_hdmi;
+ bool output_1pixel;
};
static void mtk_dpi_mask(struct mtk_dpi *dpi, u32 offset, u32 val, u32 mask)
@@ -166,6 +181,18 @@ static void mtk_dpi_mask(struct mtk_dpi *dpi, u32 offset, u32 val, u32 mask)
writel(tmp, dpi->regs + offset);
}
+static void mtk_dpi_test_pattern_en(struct mtk_dpi *dpi, u8 type, bool enable)
+{
+ u32 val;
+
+ if (enable)
+ val = FIELD_PREP(DPI_PAT_SEL, type) | DPI_PAT_EN;
+ else
+ val = 0;
+
+ mtk_dpi_mask(dpi, DPI_PATTERN0, val, DPI_PAT_SEL | DPI_PAT_EN);
+}
+
static void mtk_dpi_sw_reset(struct mtk_dpi *dpi, bool reset)
{
mtk_dpi_mask(dpi, DPI_RET, reset ? RST : 0, RST);
@@ -410,12 +437,13 @@ static void mtk_dpi_config_swap_input(struct mtk_dpi *dpi, bool enable)
static void mtk_dpi_config_2n_h_fre(struct mtk_dpi *dpi)
{
- mtk_dpi_mask(dpi, dpi->conf->reg_h_fre_con, H_FRE_2N, H_FRE_2N);
+ if (dpi->conf->reg_h_fre_con)
+ mtk_dpi_mask(dpi, dpi->conf->reg_h_fre_con, H_FRE_2N, H_FRE_2N);
}
static void mtk_dpi_config_disable_edge(struct mtk_dpi *dpi)
{
- if (dpi->conf->edge_sel_en)
+ if (dpi->conf->edge_sel_en && dpi->conf->reg_h_fre_con)
mtk_dpi_mask(dpi, dpi->conf->reg_h_fre_con, 0, EDGE_SEL_EN);
}
@@ -471,6 +499,7 @@ static void mtk_dpi_power_off(struct mtk_dpi *dpi)
mtk_dpi_disable(dpi);
clk_disable_unprepare(dpi->pixel_clk);
+ clk_disable_unprepare(dpi->tvd_clk);
clk_disable_unprepare(dpi->engine_clk);
}
@@ -487,6 +516,12 @@ static int mtk_dpi_power_on(struct mtk_dpi *dpi)
goto err_refcount;
}
+ ret = clk_prepare_enable(dpi->tvd_clk);
+ if (ret) {
+ dev_err(dpi->dev, "Failed to enable tvd pll: %d\n", ret);
+ goto err_engine;
+ }
+
ret = clk_prepare_enable(dpi->pixel_clk);
if (ret) {
dev_err(dpi->dev, "Failed to enable pixel clock: %d\n", ret);
@@ -496,32 +531,39 @@ static int mtk_dpi_power_on(struct mtk_dpi *dpi)
return 0;
err_pixel:
+ clk_disable_unprepare(dpi->tvd_clk);
+err_engine:
clk_disable_unprepare(dpi->engine_clk);
err_refcount:
dpi->refcount--;
return ret;
}
-static int mtk_dpi_set_display_mode(struct mtk_dpi *dpi,
- struct drm_display_mode *mode)
+static unsigned int mtk_dpi_calculate_factor(struct mtk_dpi *dpi, int mode_clk)
+{
+ const struct mtk_dpi_factor *dpi_factor = dpi->conf->dpi_factor;
+ int i;
+
+ for (i = 0; i < dpi->conf->num_dpi_factor; i++) {
+ if (mode_clk <= dpi_factor[i].clock)
+ return dpi_factor[i].factor;
+ }
+
+ /* If no match try the lowest possible factor */
+ return dpi_factor[dpi->conf->num_dpi_factor - 1].factor;
+}
+
+static void mtk_dpi_set_pixel_clk(struct mtk_dpi *dpi, struct videomode *vm, int mode_clk)
{
- struct mtk_dpi_polarities dpi_pol;
- struct mtk_dpi_sync_param hsync;
- struct mtk_dpi_sync_param vsync_lodd = { 0 };
- struct mtk_dpi_sync_param vsync_leven = { 0 };
- struct mtk_dpi_sync_param vsync_rodd = { 0 };
- struct mtk_dpi_sync_param vsync_reven = { 0 };
- struct videomode vm = { 0 };
unsigned long pll_rate;
unsigned int factor;
/* let pll_rate can fix the valid range of tvdpll (1G~2GHz) */
- factor = dpi->conf->cal_factor(mode->clock);
- drm_display_mode_to_videomode(mode, &vm);
- pll_rate = vm.pixelclock * factor;
+ factor = mtk_dpi_calculate_factor(dpi, mode_clk);
+ pll_rate = vm->pixelclock * factor;
dev_dbg(dpi->dev, "Want PLL %lu Hz, pixel clock %lu Hz\n",
- pll_rate, vm.pixelclock);
+ pll_rate, vm->pixelclock);
clk_set_rate(dpi->tvd_clk, pll_rate);
pll_rate = clk_get_rate(dpi->tvd_clk);
@@ -531,20 +573,36 @@ static int mtk_dpi_set_display_mode(struct mtk_dpi *dpi,
* pixels for each iteration: divide the clock by this number and
* adjust the display porches accordingly.
*/
- vm.pixelclock = pll_rate / factor;
- vm.pixelclock /= dpi->conf->pixels_per_iter;
+ vm->pixelclock = pll_rate / factor;
+ vm->pixelclock /= dpi->conf->pixels_per_iter;
if ((dpi->output_fmt == MEDIA_BUS_FMT_RGB888_2X12_LE) ||
(dpi->output_fmt == MEDIA_BUS_FMT_RGB888_2X12_BE))
- clk_set_rate(dpi->pixel_clk, vm.pixelclock * 2);
+ clk_set_rate(dpi->pixel_clk, vm->pixelclock * 2);
else
- clk_set_rate(dpi->pixel_clk, vm.pixelclock);
-
+ clk_set_rate(dpi->pixel_clk, vm->pixelclock);
- vm.pixelclock = clk_get_rate(dpi->pixel_clk);
+ vm->pixelclock = clk_get_rate(dpi->pixel_clk);
dev_dbg(dpi->dev, "Got PLL %lu Hz, pixel clock %lu Hz\n",
- pll_rate, vm.pixelclock);
+ pll_rate, vm->pixelclock);
+}
+
+static int mtk_dpi_set_display_mode(struct mtk_dpi *dpi,
+ struct drm_display_mode *mode)
+{
+ struct mtk_dpi_polarities dpi_pol;
+ struct mtk_dpi_sync_param hsync;
+ struct mtk_dpi_sync_param vsync_lodd = { 0 };
+ struct mtk_dpi_sync_param vsync_leven = { 0 };
+ struct mtk_dpi_sync_param vsync_rodd = { 0 };
+ struct mtk_dpi_sync_param vsync_reven = { 0 };
+ struct videomode vm = { 0 };
+
+ drm_display_mode_to_videomode(mode, &vm);
+
+ if (!dpi->conf->clocked_by_hdmi)
+ mtk_dpi_set_pixel_clk(dpi, &vm, mode->clock);
dpi_pol.ck_pol = MTK_DPI_POLARITY_FALLING;
dpi_pol.de_pol = MTK_DPI_POLARITY_RISING;
@@ -607,12 +665,18 @@ static int mtk_dpi_set_display_mode(struct mtk_dpi *dpi,
if (dpi->conf->support_direct_pin) {
mtk_dpi_config_yc_map(dpi, dpi->yc_map);
mtk_dpi_config_2n_h_fre(dpi);
- mtk_dpi_dual_edge(dpi);
+
+ /* DPI can connect to either an external bridge or the internal HDMI encoder */
+ if (dpi->conf->output_1pixel)
+ mtk_dpi_mask(dpi, DPI_CON, DPI_OUTPUT_1T1P_EN, DPI_OUTPUT_1T1P_EN);
+ else
+ mtk_dpi_dual_edge(dpi);
+
mtk_dpi_config_disable_edge(dpi);
}
- if (dpi->conf->input_2pixel) {
- mtk_dpi_mask(dpi, DPI_CON, DPINTF_INPUT_2P_EN,
- DPINTF_INPUT_2P_EN);
+ if (dpi->conf->input_2p_en_bit) {
+ mtk_dpi_mask(dpi, DPI_CON, dpi->conf->input_2p_en_bit,
+ dpi->conf->input_2p_en_bit);
}
mtk_dpi_sw_reset(dpi, false);
@@ -767,6 +831,99 @@ mtk_dpi_bridge_mode_valid(struct drm_bridge *bridge,
return MODE_OK;
}
+static int mtk_dpi_debug_tp_show(struct seq_file *m, void *arg)
+{
+ struct mtk_dpi *dpi = m->private;
+ bool en;
+ u32 val;
+
+ if (!dpi)
+ return -EINVAL;
+
+ val = readl(dpi->regs + DPI_PATTERN0);
+ en = val & DPI_PAT_EN;
+ val = FIELD_GET(DPI_PAT_SEL, val);
+
+ seq_printf(m, "DPI Test Pattern: %s\n", en ? "Enabled" : "Disabled");
+
+ if (en) {
+ seq_printf(m, "Internal pattern %d: ", val);
+ switch (val) {
+ case 0:
+ seq_puts(m, "256 Vertical Gray\n");
+ break;
+ case 1:
+ seq_puts(m, "1024 Vertical Gray\n");
+ break;
+ case 2:
+ seq_puts(m, "256 Horizontal Gray\n");
+ break;
+ case 3:
+ seq_puts(m, "1024 Horizontal Gray\n");
+ break;
+ case 4:
+ seq_puts(m, "Vertical Color bars\n");
+ break;
+ case 6:
+ seq_puts(m, "Frame border\n");
+ break;
+ case 7:
+ seq_puts(m, "Dot moire\n");
+ break;
+ default:
+ seq_puts(m, "Invalid selection\n");
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static ssize_t mtk_dpi_debug_tp_write(struct file *file, const char __user *ubuf,
+ size_t len, loff_t *offp)
+{
+ struct seq_file *m = file->private_data;
+ u32 en, type;
+ char buf[6];
+
+ if (!m || !m->private || *offp || len > sizeof(buf) - 1)
+ return -EINVAL;
+
+ memset(buf, 0, sizeof(buf));
+ if (copy_from_user(buf, ubuf, len))
+ return -EFAULT;
+
+ if (sscanf(buf, "%u %u", &en, &type) != 2)
+ return -EINVAL;
+
+ if (en < 0 || en > 1 || type < 0 || type > 7)
+ return -EINVAL;
+
+ mtk_dpi_test_pattern_en((struct mtk_dpi *)m->private, type, en);
+ return len;
+}
+
+static int mtk_dpi_debug_tp_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, mtk_dpi_debug_tp_show, inode->i_private);
+}
+
+static const struct file_operations mtk_dpi_debug_tp_fops = {
+ .owner = THIS_MODULE,
+ .open = mtk_dpi_debug_tp_open,
+ .read = seq_read,
+ .write = mtk_dpi_debug_tp_write,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static void mtk_dpi_debugfs_init(struct drm_bridge *bridge, struct dentry *root)
+{
+ struct mtk_dpi *dpi = bridge_to_dpi(bridge);
+
+ debugfs_create_file("dpi_test_pattern", 0640, root, dpi, &mtk_dpi_debug_tp_fops);
+}
+
static const struct drm_bridge_funcs mtk_dpi_bridge_funcs = {
.attach = mtk_dpi_bridge_attach,
.mode_set = mtk_dpi_bridge_mode_set,
@@ -779,20 +936,23 @@ static const struct drm_bridge_funcs mtk_dpi_bridge_funcs = {
.atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
.atomic_reset = drm_atomic_helper_bridge_reset,
+ .debugfs_init = mtk_dpi_debugfs_init,
};
void mtk_dpi_start(struct device *dev)
{
struct mtk_dpi *dpi = dev_get_drvdata(dev);
- mtk_dpi_power_on(dpi);
+ if (!dpi->conf->clocked_by_hdmi)
+ mtk_dpi_power_on(dpi);
}
void mtk_dpi_stop(struct device *dev)
{
struct mtk_dpi *dpi = dev_get_drvdata(dev);
- mtk_dpi_power_off(dpi);
+ if (!dpi->conf->clocked_by_hdmi)
+ mtk_dpi_power_off(dpi);
}
unsigned int mtk_dpi_encoder_index(struct device *dev)
@@ -857,48 +1017,6 @@ static const struct component_ops mtk_dpi_component_ops = {
.unbind = mtk_dpi_unbind,
};
-static unsigned int mt8173_calculate_factor(int clock)
-{
- if (clock <= 27000)
- return 3 << 4;
- else if (clock <= 84000)
- return 3 << 3;
- else if (clock <= 167000)
- return 3 << 2;
- else
- return 3 << 1;
-}
-
-static unsigned int mt2701_calculate_factor(int clock)
-{
- if (clock <= 64000)
- return 4;
- else if (clock <= 128000)
- return 2;
- else
- return 1;
-}
-
-static unsigned int mt8183_calculate_factor(int clock)
-{
- if (clock <= 27000)
- return 8;
- else if (clock <= 167000)
- return 4;
- else
- return 2;
-}
-
-static unsigned int mt8195_dpintf_calculate_factor(int clock)
-{
- if (clock < 70000)
- return 4;
- else if (clock < 200000)
- return 2;
- else
- return 1;
-}
-
static const u32 mt8173_output_fmts[] = {
MEDIA_BUS_FMT_RGB888_1X24,
};
@@ -913,8 +1031,25 @@ static const u32 mt8195_output_fmts[] = {
MEDIA_BUS_FMT_YUYV8_1X16,
};
+static const struct mtk_dpi_factor dpi_factor_mt2701[] = {
+ { 64000, 4 }, { 128000, 2 }, { U32_MAX, 1 }
+};
+
+static const struct mtk_dpi_factor dpi_factor_mt8173[] = {
+ { 27000, 48 }, { 84000, 24 }, { 167000, 12 }, { U32_MAX, 6 }
+};
+
+static const struct mtk_dpi_factor dpi_factor_mt8183[] = {
+ { 27000, 8 }, { 167000, 4 }, { U32_MAX, 2 }
+};
+
+static const struct mtk_dpi_factor dpi_factor_mt8195_dp_intf[] = {
+ { 70000 - 1, 4 }, { 200000 - 1, 2 }, { U32_MAX, 1 }
+};
+
static const struct mtk_dpi_conf mt8173_conf = {
- .cal_factor = mt8173_calculate_factor,
+ .dpi_factor = dpi_factor_mt8173,
+ .num_dpi_factor = ARRAY_SIZE(dpi_factor_mt8173),
.reg_h_fre_con = 0xe0,
.max_clock_khz = 300000,
.output_fmts = mt8173_output_fmts,
@@ -931,7 +1066,8 @@ static const struct mtk_dpi_conf mt8173_conf = {
};
static const struct mtk_dpi_conf mt2701_conf = {
- .cal_factor = mt2701_calculate_factor,
+ .dpi_factor = dpi_factor_mt2701,
+ .num_dpi_factor = ARRAY_SIZE(dpi_factor_mt2701),
.reg_h_fre_con = 0xb0,
.edge_sel_en = true,
.max_clock_khz = 150000,
@@ -949,7 +1085,8 @@ static const struct mtk_dpi_conf mt2701_conf = {
};
static const struct mtk_dpi_conf mt8183_conf = {
- .cal_factor = mt8183_calculate_factor,
+ .dpi_factor = dpi_factor_mt8183,
+ .num_dpi_factor = ARRAY_SIZE(dpi_factor_mt8183),
.reg_h_fre_con = 0xe0,
.max_clock_khz = 100000,
.output_fmts = mt8183_output_fmts,
@@ -966,7 +1103,8 @@ static const struct mtk_dpi_conf mt8183_conf = {
};
static const struct mtk_dpi_conf mt8186_conf = {
- .cal_factor = mt8183_calculate_factor,
+ .dpi_factor = dpi_factor_mt8183,
+ .num_dpi_factor = ARRAY_SIZE(dpi_factor_mt8183),
.reg_h_fre_con = 0xe0,
.max_clock_khz = 150000,
.output_fmts = mt8183_output_fmts,
@@ -984,7 +1122,8 @@ static const struct mtk_dpi_conf mt8186_conf = {
};
static const struct mtk_dpi_conf mt8192_conf = {
- .cal_factor = mt8183_calculate_factor,
+ .dpi_factor = dpi_factor_mt8183,
+ .num_dpi_factor = ARRAY_SIZE(dpi_factor_mt8183),
.reg_h_fre_con = 0xe0,
.max_clock_khz = 150000,
.output_fmts = mt8183_output_fmts,
@@ -1000,18 +1139,37 @@ static const struct mtk_dpi_conf mt8192_conf = {
.csc_enable_bit = CSC_ENABLE,
};
+static const struct mtk_dpi_conf mt8195_conf = {
+ .max_clock_khz = 594000,
+ .output_fmts = mt8183_output_fmts,
+ .num_output_fmts = ARRAY_SIZE(mt8183_output_fmts),
+ .pixels_per_iter = 1,
+ .is_ck_de_pol = true,
+ .swap_input_support = true,
+ .support_direct_pin = true,
+ .dimension_mask = HPW_MASK,
+ .hvsize_mask = HSIZE_MASK,
+ .channel_swap_shift = CH_SWAP,
+ .yuv422_en_bit = YUV422_EN,
+ .csc_enable_bit = CSC_ENABLE,
+ .input_2p_en_bit = DPI_INPUT_2P_EN,
+ .clocked_by_hdmi = true,
+ .output_1pixel = true,
+};
+
static const struct mtk_dpi_conf mt8195_dpintf_conf = {
- .cal_factor = mt8195_dpintf_calculate_factor,
+ .dpi_factor = dpi_factor_mt8195_dp_intf,
+ .num_dpi_factor = ARRAY_SIZE(dpi_factor_mt8195_dp_intf),
.max_clock_khz = 600000,
.output_fmts = mt8195_output_fmts,
.num_output_fmts = ARRAY_SIZE(mt8195_output_fmts),
.pixels_per_iter = 4,
- .input_2pixel = true,
.dimension_mask = DPINTF_HPW_MASK,
.hvsize_mask = DPINTF_HSIZE_MASK,
.channel_swap_shift = DPINTF_CH_SWAP,
.yuv422_en_bit = DPINTF_YUV422_EN,
.csc_enable_bit = DPINTF_CSC_ENABLE,
+ .input_2p_en_bit = DPINTF_INPUT_2P_EN,
};
static int mtk_dpi_probe(struct platform_device *pdev)
@@ -1102,6 +1260,7 @@ static const struct of_device_id mtk_dpi_of_ids[] = {
{ .compatible = "mediatek,mt8188-dp-intf", .data = &mt8195_dpintf_conf },
{ .compatible = "mediatek,mt8192-dpi", .data = &mt8192_conf },
{ .compatible = "mediatek,mt8195-dp-intf", .data = &mt8195_dpintf_conf },
+ { .compatible = "mediatek,mt8195-dpi", .data = &mt8195_conf },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, mtk_dpi_of_ids);
diff --git a/drivers/gpu/drm/mediatek/mtk_dpi_regs.h b/drivers/gpu/drm/mediatek/mtk_dpi_regs.h
index 62bd4931b344..23eeefce8fd2 100644
--- a/drivers/gpu/drm/mediatek/mtk_dpi_regs.h
+++ b/drivers/gpu/drm/mediatek/mtk_dpi_regs.h
@@ -40,6 +40,11 @@
#define FAKE_DE_LEVEN BIT(21)
#define FAKE_DE_RODD BIT(22)
#define FAKE_DE_REVEN BIT(23)
+
+/* DPI_CON: DPI instances */
+#define DPI_OUTPUT_1T1P_EN BIT(24)
+#define DPI_INPUT_2P_EN BIT(25)
+/* DPI_CON: DPINTF instances */
#define DPINTF_YUV422_EN BIT(24)
#define DPINTF_CSC_ENABLE BIT(26)
#define DPINTF_INPUT_2P_EN BIT(29)
@@ -235,4 +240,8 @@
#define MATRIX_SEL_RGB_TO_JPEG 0
#define MATRIX_SEL_RGB_TO_BT601 2
+#define DPI_PATTERN0 0xf00
+#define DPI_PAT_EN BIT(0)
+#define DPI_PAT_SEL GENMASK(6, 4)
+
#endif /* __MTK_DPI_REGS_H */
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
index f22ad2882697..74158b9d6503 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
@@ -327,6 +327,10 @@ static const struct mtk_mmsys_driver_data mt8195_vdosys1_driver_data = {
.min_height = 1,
};
+static const struct mtk_mmsys_driver_data mt8365_mmsys_driver_data = {
+ .mmsys_dev_num = 1,
+};
+
static const struct of_device_id mtk_drm_of_ids[] = {
{ .compatible = "mediatek,mt2701-mmsys",
.data = &mt2701_mmsys_driver_data},
@@ -354,6 +358,8 @@ static const struct of_device_id mtk_drm_of_ids[] = {
.data = &mt8195_vdosys0_driver_data},
{ .compatible = "mediatek,mt8195-vdosys1",
.data = &mt8195_vdosys1_driver_data},
+ { .compatible = "mediatek,mt8365-mmsys",
+ .data = &mt8365_mmsys_driver_data},
{ }
};
MODULE_DEVICE_TABLE(of, mtk_drm_of_ids);
@@ -754,6 +760,8 @@ static const struct of_device_id mtk_ddp_comp_dt_ids[] = {
.data = (void *)MTK_DISP_MUTEX },
{ .compatible = "mediatek,mt8195-disp-mutex",
.data = (void *)MTK_DISP_MUTEX },
+ { .compatible = "mediatek,mt8365-disp-mutex",
+ .data = (void *)MTK_DISP_MUTEX },
{ .compatible = "mediatek,mt8173-disp-od",
.data = (void *)MTK_DISP_OD },
{ .compatible = "mediatek,mt2701-disp-ovl",
@@ -810,6 +818,8 @@ static const struct of_device_id mtk_ddp_comp_dt_ids[] = {
.data = (void *)MTK_DPI },
{ .compatible = "mediatek,mt8195-dp-intf",
.data = (void *)MTK_DP_INTF },
+ { .compatible = "mediatek,mt8195-dpi",
+ .data = (void *)MTK_DPI },
{ .compatible = "mediatek,mt2701-dsi",
.data = (void *)MTK_DSI },
{ .compatible = "mediatek,mt8173-dsi",
diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c
index 0683c2b3ca5b..d1f407fb7eb1 100644
--- a/drivers/gpu/drm/mediatek/mtk_dsi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dsi.c
@@ -1116,12 +1116,12 @@ static ssize_t mtk_dsi_host_transfer(struct mipi_dsi_host *host,
const struct mipi_dsi_msg *msg)
{
struct mtk_dsi *dsi = host_to_dsi(host);
- u32 recv_cnt, i;
+ ssize_t recv_cnt;
u8 read_data[16];
void *src_addr;
u8 irq_flag = CMD_DONE_INT_FLAG;
u32 dsi_mode;
- int ret;
+ int ret, i;
dsi_mode = readl(dsi->regs + DSI_MODE_CTRL);
if (dsi_mode & MODE) {
@@ -1170,7 +1170,7 @@ static ssize_t mtk_dsi_host_transfer(struct mipi_dsi_host *host,
if (recv_cnt)
memcpy(msg->rx_buf, src_addr, recv_cnt);
- DRM_INFO("dsi get %d byte data from the panel address(0x%x)\n",
+ DRM_INFO("dsi get %zd byte data from the panel address(0x%x)\n",
recv_cnt, *((u8 *)(msg->tx_buf)));
restore_dsi_mode:
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c
index d4ab098e1174..06e4fac152b7 100644
--- a/drivers/gpu/drm/mediatek/mtk_hdmi.c
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c
@@ -137,7 +137,7 @@ enum hdmi_aud_channel_swap_type {
struct hdmi_audio_param {
enum hdmi_audio_coding_type aud_codec;
- enum hdmi_audio_sample_size aud_sampe_size;
+ enum hdmi_audio_sample_size aud_sample_size;
enum hdmi_aud_input_type aud_input_type;
enum hdmi_aud_i2s_fmt aud_i2s_fmt;
enum hdmi_aud_mclk aud_mclk;
@@ -163,16 +163,10 @@ struct mtk_hdmi {
struct clk *clk[MTK_HDMI_CLK_COUNT];
struct drm_display_mode mode;
bool dvi_mode;
- u32 min_clock;
- u32 max_clock;
- u32 max_hdisplay;
- u32 max_vdisplay;
- u32 ibias;
- u32 ibias_up;
struct regmap *sys_regmap;
unsigned int sys_offset;
void __iomem *regs;
- enum hdmi_colorspace csp;
+ struct platform_device *audio_pdev;
struct hdmi_audio_param aud_param;
bool audio_enable;
bool powered;
@@ -987,15 +981,14 @@ static int mtk_hdmi_setup_avi_infoframe(struct mtk_hdmi *hdmi,
return 0;
}
-static int mtk_hdmi_setup_spd_infoframe(struct mtk_hdmi *hdmi,
- const char *vendor,
- const char *product)
+static int mtk_hdmi_setup_spd_infoframe(struct mtk_hdmi *hdmi)
{
+ struct drm_bridge *bridge = &hdmi->bridge;
struct hdmi_spd_infoframe frame;
u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_SPD_INFOFRAME_SIZE];
ssize_t err;
- err = hdmi_spd_infoframe_init(&frame, vendor, product);
+ err = hdmi_spd_infoframe_init(&frame, bridge->vendor, bridge->product);
if (err < 0) {
dev_err(hdmi->dev, "Failed to initialize SPD infoframe: %zd\n",
err);
@@ -1072,9 +1065,8 @@ static int mtk_hdmi_output_init(struct mtk_hdmi *hdmi)
{
struct hdmi_audio_param *aud_param = &hdmi->aud_param;
- hdmi->csp = HDMI_COLORSPACE_RGB;
aud_param->aud_codec = HDMI_AUDIO_CODING_TYPE_PCM;
- aud_param->aud_sampe_size = HDMI_AUDIO_SAMPLE_SIZE_16;
+ aud_param->aud_sample_size = HDMI_AUDIO_SAMPLE_SIZE_16;
aud_param->aud_input_type = HDMI_AUD_INPUT_I2S;
aud_param->aud_i2s_fmt = HDMI_I2S_MODE_I2S_24BIT;
aud_param->aud_mclk = HDMI_AUD_MCLK_128FS;
@@ -1167,13 +1159,12 @@ static int mtk_hdmi_clk_enable_audio(struct mtk_hdmi *hdmi)
return ret;
ret = clk_prepare_enable(hdmi->clk[MTK_HDMI_CLK_AUD_SPDIF]);
- if (ret)
- goto err;
+ if (ret) {
+ clk_disable_unprepare(hdmi->clk[MTK_HDMI_CLK_AUD_BCLK]);
+ return ret;
+ }
return 0;
-err:
- clk_disable_unprepare(hdmi->clk[MTK_HDMI_CLK_AUD_BCLK]);
- return ret;
}
static void mtk_hdmi_clk_disable_audio(struct mtk_hdmi *hdmi)
@@ -1377,7 +1368,7 @@ static void mtk_hdmi_send_infoframe(struct mtk_hdmi *hdmi,
{
mtk_hdmi_setup_audio_infoframe(hdmi);
mtk_hdmi_setup_avi_infoframe(hdmi, mode);
- mtk_hdmi_setup_spd_infoframe(hdmi, "mediatek", "On-chip HDMI");
+ mtk_hdmi_setup_spd_infoframe(hdmi);
if (mode->flags & DRM_MODE_FLAG_3D_MASK)
mtk_hdmi_setup_vendor_specific_infoframe(hdmi, mode);
}
@@ -1569,14 +1560,14 @@ static int mtk_hdmi_audio_hw_params(struct device *dev, void *data,
switch (daifmt->fmt) {
case HDMI_I2S:
hdmi_params.aud_codec = HDMI_AUDIO_CODING_TYPE_PCM;
- hdmi_params.aud_sampe_size = HDMI_AUDIO_SAMPLE_SIZE_16;
+ hdmi_params.aud_sample_size = HDMI_AUDIO_SAMPLE_SIZE_16;
hdmi_params.aud_input_type = HDMI_AUD_INPUT_I2S;
hdmi_params.aud_i2s_fmt = HDMI_I2S_MODE_I2S_24BIT;
hdmi_params.aud_mclk = HDMI_AUD_MCLK_128FS;
break;
case HDMI_SPDIF:
hdmi_params.aud_codec = HDMI_AUDIO_CODING_TYPE_PCM;
- hdmi_params.aud_sampe_size = HDMI_AUDIO_SAMPLE_SIZE_16;
+ hdmi_params.aud_sample_size = HDMI_AUDIO_SAMPLE_SIZE_16;
hdmi_params.aud_input_type = HDMI_AUD_INPUT_SPDIF;
break;
default:
@@ -1659,6 +1650,11 @@ static const struct hdmi_codec_ops mtk_hdmi_audio_codec_ops = {
.hook_plugged_cb = mtk_hdmi_audio_hook_plugged_cb,
};
+static void mtk_hdmi_unregister_audio_driver(void *data)
+{
+ platform_device_unregister(data);
+}
+
static int mtk_hdmi_register_audio_driver(struct device *dev)
{
struct mtk_hdmi *hdmi = dev_get_drvdata(dev);
@@ -1669,15 +1665,21 @@ static int mtk_hdmi_register_audio_driver(struct device *dev)
.data = hdmi,
.no_capture_mute = 1,
};
- struct platform_device *pdev;
+ int ret;
- pdev = platform_device_register_data(dev, HDMI_CODEC_DRV_NAME,
- PLATFORM_DEVID_AUTO, &codec_data,
- sizeof(codec_data));
- if (IS_ERR(pdev))
- return PTR_ERR(pdev);
+ hdmi->audio_pdev = platform_device_register_data(dev,
+ HDMI_CODEC_DRV_NAME,
+ PLATFORM_DEVID_AUTO,
+ &codec_data,
+ sizeof(codec_data));
+ if (IS_ERR(hdmi->audio_pdev))
+ return PTR_ERR(hdmi->audio_pdev);
+
+ ret = devm_add_action_or_reset(dev, mtk_hdmi_unregister_audio_driver,
+ hdmi->audio_pdev);
+ if (ret)
+ return ret;
- DRM_INFO("%s driver bound to HDMI\n", HDMI_CODEC_DRV_NAME);
return 0;
}
@@ -1721,14 +1723,17 @@ static int mtk_hdmi_probe(struct platform_device *pdev)
hdmi->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID
| DRM_BRIDGE_OP_HPD;
hdmi->bridge.type = DRM_MODE_CONNECTOR_HDMIA;
- drm_bridge_add(&hdmi->bridge);
+ hdmi->bridge.vendor = "MediaTek";
+ hdmi->bridge.product = "On-Chip HDMI";
+
+ ret = devm_drm_bridge_add(dev, &hdmi->bridge);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to add bridge\n");
ret = mtk_hdmi_clk_enable_audio(hdmi);
- if (ret) {
- drm_bridge_remove(&hdmi->bridge);
+ if (ret)
return dev_err_probe(dev, ret,
"Failed to enable audio clocks\n");
- }
return 0;
}
@@ -1737,12 +1742,10 @@ static void mtk_hdmi_remove(struct platform_device *pdev)
{
struct mtk_hdmi *hdmi = platform_get_drvdata(pdev);
- drm_bridge_remove(&hdmi->bridge);
mtk_hdmi_clk_disable_audio(hdmi);
}
-#ifdef CONFIG_PM_SLEEP
-static int mtk_hdmi_suspend(struct device *dev)
+static __maybe_unused int mtk_hdmi_suspend(struct device *dev)
{
struct mtk_hdmi *hdmi = dev_get_drvdata(dev);
@@ -1751,22 +1754,14 @@ static int mtk_hdmi_suspend(struct device *dev)
return 0;
}
-static int mtk_hdmi_resume(struct device *dev)
+static __maybe_unused int mtk_hdmi_resume(struct device *dev)
{
struct mtk_hdmi *hdmi = dev_get_drvdata(dev);
- int ret = 0;
-
- ret = mtk_hdmi_clk_enable_audio(hdmi);
- if (ret) {
- dev_err(dev, "hdmi resume failed!\n");
- return ret;
- }
- return 0;
+ return mtk_hdmi_clk_enable_audio(hdmi);
}
-#endif
-static SIMPLE_DEV_PM_OPS(mtk_hdmi_pm_ops,
- mtk_hdmi_suspend, mtk_hdmi_resume);
+
+static SIMPLE_DEV_PM_OPS(mtk_hdmi_pm_ops, mtk_hdmi_suspend, mtk_hdmi_resume);
static const struct mtk_hdmi_conf mtk_hdmi_conf_mt2701 = {
.tz_disabled = true,
@@ -1778,15 +1773,10 @@ static const struct mtk_hdmi_conf mtk_hdmi_conf_mt8167 = {
};
static const struct of_device_id mtk_hdmi_of_ids[] = {
- { .compatible = "mediatek,mt2701-hdmi",
- .data = &mtk_hdmi_conf_mt2701,
- },
- { .compatible = "mediatek,mt8167-hdmi",
- .data = &mtk_hdmi_conf_mt8167,
- },
- { .compatible = "mediatek,mt8173-hdmi",
- },
- {}
+ { .compatible = "mediatek,mt2701-hdmi", .data = &mtk_hdmi_conf_mt2701 },
+ { .compatible = "mediatek,mt8167-hdmi", .data = &mtk_hdmi_conf_mt8167 },
+ { .compatible = "mediatek,mt8173-hdmi" },
+ { /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, mtk_hdmi_of_ids);
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index 7ec833b6d829..974bc7c0ea76 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -170,6 +170,8 @@ config DRM_MSM_HDMI
bool "Enable HDMI support in MSM DRM driver"
depends on DRM_MSM
default y
+ select DRM_DISPLAY_HDMI_HELPER
+ select DRM_DISPLAY_HDMI_STATE_HELPER
help
Compile in support for the HDMI output MSM DRM driver. It can
be a primary or a secondary display on device. Note that this is used
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_catalog.c b/drivers/gpu/drm/msm/adreno/a6xx_catalog.c
index edffb7737a97..53e2ff4406d8 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_catalog.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_catalog.c
@@ -880,6 +880,35 @@ static const struct adreno_info a6xx_gpus[] = {
{ 137, 1 },
),
}, {
+ .chip_ids = ADRENO_CHIP_IDS(0x06020300),
+ .family = ADRENO_6XX_GEN3,
+ .fw = {
+ [ADRENO_FW_SQE] = "a650_sqe.fw",
+ [ADRENO_FW_GMU] = "a623_gmu.bin",
+ },
+ .gmem = SZ_512K,
+ .inactive_period = DRM_MSM_INACTIVE_PERIOD,
+ .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT |
+ ADRENO_QUIRK_HAS_HW_APRIV,
+ .init = a6xx_gpu_init,
+ .a6xx = &(const struct a6xx_info) {
+ .hwcg = a690_hwcg,
+ .protect = &a650_protect,
+ .gmu_cgc_mode = 0x00020200,
+ .prim_fifo_threshold = 0x00010000,
+ .bcms = (const struct a6xx_bcm[]) {
+ { .name = "SH0", .buswidth = 16 },
+ { .name = "MC0", .buswidth = 4 },
+ {
+ .name = "ACV",
+ .fixed = true,
+ .perfmode = BIT(3),
+ },
+ { /* sentinel */ },
+ },
+ },
+ .address_space_size = SZ_16G,
+ }, {
.chip_ids = ADRENO_CHIP_IDS(
0x06030001,
0x06030002
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
index 699b0dd34b18..38c94915d4c9 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
@@ -1169,49 +1169,50 @@ static void a6xx_gmu_shutdown(struct a6xx_gmu *gmu)
struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
u32 val;
+ int ret;
/*
- * The GMU may still be in slumber unless the GPU started so check and
- * skip putting it back into slumber if so
+ * GMU firmware's internal power state gets messed up if we send "prepare_slumber" hfi when
+ * oob_gpu handshake wasn't done after the last wake up. So do a dummy handshake here when
+ * required
*/
- val = gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE);
+ if (adreno_gpu->base.needs_hw_init) {
+ if (a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET))
+ goto force_off;
- if (val != 0xf) {
- int ret = a6xx_gmu_wait_for_idle(gmu);
+ a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET);
+ }
- /* If the GMU isn't responding assume it is hung */
- if (ret) {
- a6xx_gmu_force_off(gmu);
- return;
- }
+ ret = a6xx_gmu_wait_for_idle(gmu);
- a6xx_bus_clear_pending_transactions(adreno_gpu, a6xx_gpu->hung);
+ /* If the GMU isn't responding assume it is hung */
+ if (ret)
+ goto force_off;
- /* tell the GMU we want to slumber */
- ret = a6xx_gmu_notify_slumber(gmu);
- if (ret) {
- a6xx_gmu_force_off(gmu);
- return;
- }
+ a6xx_bus_clear_pending_transactions(adreno_gpu, a6xx_gpu->hung);
- ret = gmu_poll_timeout(gmu,
- REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS, val,
- !(val & A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS_GPUBUSYIGNAHB),
- 100, 10000);
+ /* tell the GMU we want to slumber */
+ ret = a6xx_gmu_notify_slumber(gmu);
+ if (ret)
+ goto force_off;
- /*
- * Let the user know we failed to slumber but don't worry too
- * much because we are powering down anyway
- */
+ ret = gmu_poll_timeout(gmu,
+ REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS, val,
+ !(val & A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS_GPUBUSYIGNAHB),
+ 100, 10000);
- if (ret)
- DRM_DEV_ERROR(gmu->dev,
- "Unable to slumber GMU: status = 0%x/0%x\n",
- gmu_read(gmu,
- REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS),
- gmu_read(gmu,
- REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS2));
- }
+ /*
+ * Let the user know we failed to slumber but don't worry too
+ * much because we are powering down anyway
+ */
+
+ if (ret)
+ DRM_DEV_ERROR(gmu->dev,
+ "Unable to slumber GMU: status = 0%x/0%x\n",
+ gmu_read(gmu,
+ REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS),
+ gmu_read(gmu,
+ REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS2));
/* Turn off HFI */
a6xx_hfi_stop(gmu);
@@ -1221,6 +1222,11 @@ static void a6xx_gmu_shutdown(struct a6xx_gmu *gmu)
/* Tell RPMh to power off the GPU */
a6xx_rpmh_stop(gmu);
+
+ return;
+
+force_off:
+ a6xx_gmu_force_off(gmu);
}
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
index 0ae29a7c8a4d..1820c167fcee 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -616,6 +616,14 @@ static void a6xx_calc_ubwc_config(struct adreno_gpu *gpu)
gpu->ubwc_config.uavflagprd_inv = 2;
}
+ if (adreno_is_a623(gpu)) {
+ gpu->ubwc_config.highest_bank_bit = 16;
+ gpu->ubwc_config.amsbc = 1;
+ gpu->ubwc_config.rgb565_predicator = 1;
+ gpu->ubwc_config.uavflagprd_inv = 2;
+ gpu->ubwc_config.macrotile_mode = 1;
+ }
+
if (adreno_is_a640_family(gpu))
gpu->ubwc_config.amsbc = 1;
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
index 0fcae53c0b14..341a72a67401 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
@@ -1214,12 +1214,12 @@ static void a6xx_get_gmu_registers(struct msm_gpu *gpu,
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
a6xx_state->gmu_registers = state_kcalloc(a6xx_state,
- 3, sizeof(*a6xx_state->gmu_registers));
+ 4, sizeof(*a6xx_state->gmu_registers));
if (!a6xx_state->gmu_registers)
return;
- a6xx_state->nr_gmu_registers = 3;
+ a6xx_state->nr_gmu_registers = 4;
/* Get the CX GMU registers from AHB */
_a6xx_get_gmu_registers(gpu, a6xx_state, &a6xx_gmu_reglist[0],
@@ -1227,6 +1227,13 @@ static void a6xx_get_gmu_registers(struct msm_gpu *gpu,
_a6xx_get_gmu_registers(gpu, a6xx_state, &a6xx_gmu_reglist[1],
&a6xx_state->gmu_registers[1], true);
+ if (adreno_is_a621(adreno_gpu) || adreno_is_a623(adreno_gpu))
+ _a6xx_get_gmu_registers(gpu, a6xx_state, &a621_gpucc_reg,
+ &a6xx_state->gmu_registers[2], false);
+ else
+ _a6xx_get_gmu_registers(gpu, a6xx_state, &a6xx_gpucc_reg,
+ &a6xx_state->gmu_registers[2], false);
+
if (!a6xx_gmu_gx_is_on(&a6xx_gpu->gmu))
return;
@@ -1234,7 +1241,7 @@ static void a6xx_get_gmu_registers(struct msm_gpu *gpu,
gpu_write(gpu, REG_A6XX_GMU_AO_AHB_FENCE_CTRL, 0);
_a6xx_get_gmu_registers(gpu, a6xx_state, &a6xx_gmu_reglist[2],
- &a6xx_state->gmu_registers[2], false);
+ &a6xx_state->gmu_registers[3], false);
}
static struct msm_gpu_state_bo *a6xx_snapshot_gmu_bo(
@@ -1507,6 +1514,8 @@ static void a6xx_get_indexed_registers(struct msm_gpu *gpu,
/* Restore the size in the hardware */
gpu_write(gpu, REG_A6XX_CP_MEM_POOL_SIZE, mempool_size);
+
+ a6xx_state->nr_indexed_regs = count;
}
static void a7xx_get_indexed_registers(struct msm_gpu *gpu,
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h
index dd4c28a8d923..e545106c70be 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h
@@ -363,6 +363,9 @@ static const u32 a6xx_gmu_cx_registers[] = {
0x51e0, 0x51e2, 0x51f0, 0x51f0, 0x5200, 0x5201,
/* GMU AO */
0x9300, 0x9316, 0x9400, 0x9400,
+};
+
+static const u32 a6xx_gmu_gpucc_registers[] = {
/* GPU CC */
0x9800, 0x9812, 0x9840, 0x9852, 0x9c00, 0x9c04, 0x9c07, 0x9c0b,
0x9c15, 0x9c1c, 0x9c1e, 0x9c2d, 0x9c3c, 0x9c3d, 0x9c3f, 0x9c40,
@@ -373,6 +376,17 @@ static const u32 a6xx_gmu_cx_registers[] = {
0xbc00, 0xbc16, 0xbc20, 0xbc27,
};
+static const u32 a621_gmu_gpucc_registers[] = {
+ /* GPU CC */
+ 0x9800, 0x980e, 0x9c00, 0x9c0e, 0xb000, 0xb004, 0xb400, 0xb404,
+ 0xb800, 0xb804, 0xbc00, 0xbc05, 0xbc14, 0xbc1d, 0xbc2a, 0xbc30,
+ 0xbc32, 0xbc32, 0xbc41, 0xbc55, 0xbc66, 0xbc68, 0xbc78, 0xbc7a,
+ 0xbc89, 0xbc8a, 0xbc9c, 0xbc9e, 0xbca0, 0xbca3, 0xbcb3, 0xbcb5,
+ 0xbcc5, 0xbcc7, 0xbcd6, 0xbcd8, 0xbce8, 0xbce9, 0xbcf9, 0xbcfc,
+ 0xbd0b, 0xbd0c, 0xbd1c, 0xbd1e, 0xbd40, 0xbd70, 0xbe00, 0xbe16,
+ 0xbe20, 0xbe2d,
+};
+
static const u32 a6xx_gmu_cx_rscc_registers[] = {
/* GPU RSCC */
0x008c, 0x008c, 0x0101, 0x0102, 0x0340, 0x0342, 0x0344, 0x0347,
@@ -386,6 +400,9 @@ static const struct a6xx_registers a6xx_gmu_reglist[] = {
REGS(a6xx_gmu_gx_registers, 0, 0),
};
+static const struct a6xx_registers a6xx_gpucc_reg = REGS(a6xx_gmu_gpucc_registers, 0, 0);
+static const struct a6xx_registers a621_gpucc_reg = REGS(a621_gmu_gpucc_registers, 0, 0);
+
static u32 a6xx_get_cp_roq_size(struct msm_gpu *gpu);
static u32 a7xx_get_cp_roq_size(struct msm_gpu *gpu);
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index 1238f3265978..7156cda07b03 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -883,6 +883,16 @@ void adreno_show(struct msm_gpu *gpu, struct msm_gpu_state *state,
drm_printf(p, " - dir=%s\n", info->flags & IOMMU_FAULT_WRITE ? "WRITE" : "READ");
drm_printf(p, " - type=%s\n", info->type);
drm_printf(p, " - source=%s\n", info->block);
+
+ /* Information extracted from what we think are the current
+ * pgtables. Hopefully the TTBR0 matches what we've extracted
+ * from the SMMU registers in smmu_info!
+ */
+ drm_puts(p, "pgtable-fault-info:\n");
+ drm_printf(p, " - ttbr0: %.16llx\n", (u64)info->pgtbl_ttbr0);
+ drm_printf(p, " - asid: %d\n", info->asid);
+ drm_printf(p, " - ptes: %.16llx %.16llx %.16llx %.16llx\n",
+ info->ptes[0], info->ptes[1], info->ptes[2], info->ptes[3]);
}
drm_printf(p, "rbbm-status: 0x%08x\n", state->rbbm_status);
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
index dcf454629ce0..92caba3584da 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
@@ -442,6 +442,11 @@ static inline int adreno_is_a621(const struct adreno_gpu *gpu)
return gpu->info->chip_ids[0] == 0x06020100;
}
+static inline int adreno_is_a623(const struct adreno_gpu *gpu)
+{
+ return gpu->info->chip_ids[0] == 0x06020300;
+}
+
static inline int adreno_is_a630(const struct adreno_gpu *gpu)
{
return adreno_is_revn(gpu, 630);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_10_0_sm8650.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_10_0_sm8650.h
index bcb39807fe61..6ac97c378056 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_10_0_sm8650.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_10_0_sm8650.h
@@ -343,8 +343,8 @@ static const struct dpu_wb_cfg sm8650_wb[] = {
.name = "wb_2", .id = WB_2,
.base = 0x65000, .len = 0x2c8,
.features = WB_SM8250_MASK,
- .format_list = wb2_formats_rgb,
- .num_formats = ARRAY_SIZE(wb2_formats_rgb),
+ .format_list = wb2_formats_rgb_yuv,
+ .num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv),
.xin_id = 6,
.vbif_idx = VBIF_RT,
.maxlinewidth = 4096,
@@ -452,6 +452,7 @@ const struct dpu_mdss_cfg dpu_sm8650_cfg = {
.mdss_ver = &sm8650_mdss_ver,
.caps = &sm8650_dpu_caps,
.mdp = &sm8650_mdp,
+ .cdm = &dpu_cdm_5_x,
.ctl_count = ARRAY_SIZE(sm8650_ctl),
.ctl = sm8650_ctl,
.sspp_count = ARRAY_SIZE(sm8650_sspp),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_14_msm8937.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_14_msm8937.h
index ab3dfb0b374e..1f32807bb5e5 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_14_msm8937.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_14_msm8937.h
@@ -190,6 +190,7 @@ const struct dpu_mdss_cfg dpu_msm8937_cfg = {
.mdss_ver = &msm8937_mdss_ver,
.caps = &msm8937_dpu_caps,
.mdp = msm8937_mdp,
+ .cdm = &dpu_cdm_1_x_4_x,
.ctl_count = ARRAY_SIZE(msm8937_ctl),
.ctl = msm8937_ctl,
.sspp_count = ARRAY_SIZE(msm8937_sspp),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_15_msm8917.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_15_msm8917.h
index 6bdaecca6761..42131959ff22 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_15_msm8917.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_15_msm8917.h
@@ -167,6 +167,7 @@ const struct dpu_mdss_cfg dpu_msm8917_cfg = {
.mdss_ver = &msm8917_mdss_ver,
.caps = &msm8917_dpu_caps,
.mdp = msm8917_mdp,
+ .cdm = &dpu_cdm_1_x_4_x,
.ctl_count = ARRAY_SIZE(msm8917_ctl),
.ctl = msm8917_ctl,
.sspp_count = ARRAY_SIZE(msm8917_sspp),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_16_msm8953.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_16_msm8953.h
index 14f36ea6ad0e..2b4723a5c676 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_16_msm8953.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_16_msm8953.h
@@ -198,6 +198,7 @@ const struct dpu_mdss_cfg dpu_msm8953_cfg = {
.mdss_ver = &msm8953_mdss_ver,
.caps = &msm8953_dpu_caps,
.mdp = msm8953_mdp,
+ .cdm = &dpu_cdm_1_x_4_x,
.ctl_count = ARRAY_SIZE(msm8953_ctl),
.ctl = msm8953_ctl,
.sspp_count = ARRAY_SIZE(msm8953_sspp),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_7_msm8996.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_7_msm8996.h
index 491f6f5827d1..5cf19de71f06 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_7_msm8996.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_7_msm8996.h
@@ -316,6 +316,7 @@ const struct dpu_mdss_cfg dpu_msm8996_cfg = {
.mdss_ver = &msm8996_mdss_ver,
.caps = &msm8996_dpu_caps,
.mdp = msm8996_mdp,
+ .cdm = &dpu_cdm_1_x_4_x,
.ctl_count = ARRAY_SIZE(msm8996_ctl),
.ctl = msm8996_ctl,
.sspp_count = ARRAY_SIZE(msm8996_sspp),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_0_msm8998.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_0_msm8998.h
index 64c94e919a69..746474679ef5 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_0_msm8998.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_0_msm8998.h
@@ -302,6 +302,7 @@ const struct dpu_mdss_cfg dpu_msm8998_cfg = {
.mdss_ver = &msm8998_mdss_ver,
.caps = &msm8998_dpu_caps,
.mdp = &msm8998_mdp,
+ .cdm = &dpu_cdm_1_x_4_x,
.ctl_count = ARRAY_SIZE(msm8998_ctl),
.ctl = msm8998_ctl,
.sspp_count = ARRAY_SIZE(msm8998_sspp),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_2_sdm660.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_2_sdm660.h
index 424815e7fb7d..4f2f68b07f20 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_2_sdm660.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_2_sdm660.h
@@ -269,6 +269,7 @@ const struct dpu_mdss_cfg dpu_sdm660_cfg = {
.mdss_ver = &sdm660_mdss_ver,
.caps = &sdm660_dpu_caps,
.mdp = &sdm660_mdp,
+ .cdm = &dpu_cdm_1_x_4_x,
.ctl_count = ARRAY_SIZE(sdm660_ctl),
.ctl = sdm660_ctl,
.sspp_count = ARRAY_SIZE(sdm660_sspp),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_3_sdm630.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_3_sdm630.h
index df01227fc364..c70bef025ac4 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_3_sdm630.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_3_sdm630.h
@@ -205,6 +205,7 @@ const struct dpu_mdss_cfg dpu_sdm630_cfg = {
.mdss_ver = &sdm630_mdss_ver,
.caps = &sdm630_dpu_caps,
.mdp = &sdm630_mdp,
+ .cdm = &dpu_cdm_1_x_4_x,
.ctl_count = ARRAY_SIZE(sdm630_ctl),
.ctl = sdm630_ctl,
.sspp_count = ARRAY_SIZE(sdm630_sspp),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_0_sdm845.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_0_sdm845.h
index 72bd4f7e9e50..ab7b4822ca63 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_0_sdm845.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_0_sdm845.h
@@ -319,6 +319,7 @@ const struct dpu_mdss_cfg dpu_sdm845_cfg = {
.mdss_ver = &sdm845_mdss_ver,
.caps = &sdm845_dpu_caps,
.mdp = &sdm845_mdp,
+ .cdm = &dpu_cdm_1_x_4_x,
.ctl_count = ARRAY_SIZE(sdm845_ctl),
.ctl = sdm845_ctl,
.sspp_count = ARRAY_SIZE(sdm845_sspp),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_1_sdm670.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_1_sdm670.h
index daef07924886..c2fde980fb52 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_1_sdm670.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_1_sdm670.h
@@ -132,6 +132,7 @@ const struct dpu_mdss_cfg dpu_sdm670_cfg = {
.mdss_ver = &sdm670_mdss_ver,
.caps = &sdm845_dpu_caps,
.mdp = &sdm670_mdp,
+ .cdm = &dpu_cdm_1_x_4_x,
.ctl_count = ARRAY_SIZE(sdm845_ctl),
.ctl = sdm845_ctl,
.sspp_count = ARRAY_SIZE(sdm670_sspp),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h
index 36cc9dbc00b5..979527d98fbc 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h
@@ -298,8 +298,8 @@ static const struct dpu_wb_cfg sm8150_wb[] = {
.name = "wb_2", .id = WB_2,
.base = 0x65000, .len = 0x2c8,
.features = WB_SM8250_MASK,
- .format_list = wb2_formats_rgb,
- .num_formats = ARRAY_SIZE(wb2_formats_rgb),
+ .format_list = wb2_formats_rgb_yuv,
+ .num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv),
.clk_ctrl = DPU_CLK_CTRL_WB2,
.xin_id = 6,
.vbif_idx = VBIF_RT,
@@ -388,6 +388,7 @@ const struct dpu_mdss_cfg dpu_sm8150_cfg = {
.mdss_ver = &sm8150_mdss_ver,
.caps = &sm8150_dpu_caps,
.mdp = &sm8150_mdp,
+ .cdm = &dpu_cdm_5_x,
.ctl_count = ARRAY_SIZE(sm8150_ctl),
.ctl = sm8150_ctl,
.sspp_count = ARRAY_SIZE(sm8150_sspp),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h
index e8eacdb47967..d76b8992a6c1 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h
@@ -305,8 +305,8 @@ static const struct dpu_wb_cfg sc8180x_wb[] = {
.name = "wb_2", .id = WB_2,
.base = 0x65000, .len = 0x2c8,
.features = WB_SM8250_MASK,
- .format_list = wb2_formats_rgb,
- .num_formats = ARRAY_SIZE(wb2_formats_rgb),
+ .format_list = wb2_formats_rgb_yuv,
+ .num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv),
.clk_ctrl = DPU_CLK_CTRL_WB2,
.xin_id = 6,
.vbif_idx = VBIF_RT,
@@ -414,6 +414,7 @@ const struct dpu_mdss_cfg dpu_sc8180x_cfg = {
.mdss_ver = &sc8180x_mdss_ver,
.caps = &sc8180x_dpu_caps,
.mdp = &sc8180x_mdp,
+ .cdm = &dpu_cdm_5_x,
.ctl_count = ARRAY_SIZE(sc8180x_ctl),
.ctl = sc8180x_ctl,
.sspp_count = ARRAY_SIZE(sc8180x_sspp),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_2_sm7150.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_2_sm7150.h
index 2fe674d1e059..83db11339b29 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_2_sm7150.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_2_sm7150.h
@@ -261,8 +261,8 @@ static const struct dpu_wb_cfg sm7150_wb[] = {
.name = "wb_2", .id = WB_2,
.base = 0x65000, .len = 0x2c8,
.features = WB_SM8250_MASK,
- .format_list = wb2_formats_rgb,
- .num_formats = ARRAY_SIZE(wb2_formats_rgb),
+ .format_list = wb2_formats_rgb_yuv,
+ .num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv),
.clk_ctrl = DPU_CLK_CTRL_WB2,
.xin_id = 6,
.vbif_idx = VBIF_RT,
@@ -309,6 +309,7 @@ const struct dpu_mdss_cfg dpu_sm7150_cfg = {
.mdss_ver = &sm7150_mdss_ver,
.caps = &sm7150_dpu_caps,
.mdp = &sm7150_mdp,
+ .cdm = &dpu_cdm_5_x,
.ctl_count = ARRAY_SIZE(sm7150_ctl),
.ctl = sm7150_ctl,
.sspp_count = ARRAY_SIZE(sm7150_sspp),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_3_sm6150.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_3_sm6150.h
index d761ed705bac..da11830d4407 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_3_sm6150.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_3_sm6150.h
@@ -27,6 +27,7 @@ static const struct dpu_mdp_cfg sm6150_mdp = {
[DPU_CLK_CTRL_DMA1] = { .reg_off = 0x2b4, .bit_off = 8 },
[DPU_CLK_CTRL_DMA2] = { .reg_off = 0x2bc, .bit_off = 8 },
[DPU_CLK_CTRL_DMA3] = { .reg_off = 0x2c4, .bit_off = 8 },
+ [DPU_CLK_CTRL_WB2] = { .reg_off = 0x2bc, .bit_off = 16 },
},
};
@@ -162,6 +163,21 @@ static const struct dpu_pingpong_cfg sm6150_pp[] = {
},
};
+static const struct dpu_wb_cfg sm6150_wb[] = {
+ {
+ .name = "wb_2", .id = WB_2,
+ .base = 0x65000, .len = 0x2c8,
+ .features = WB_SM8250_MASK,
+ .format_list = wb2_formats_rgb_yuv,
+ .num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv),
+ .clk_ctrl = DPU_CLK_CTRL_WB2,
+ .xin_id = 6,
+ .vbif_idx = VBIF_RT,
+ .maxlinewidth = 2160,
+ .intr_wb_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 4),
+ },
+};
+
static const struct dpu_intf_cfg sm6150_intf[] = {
{
.name = "intf_0", .id = INTF_0,
@@ -232,6 +248,7 @@ const struct dpu_mdss_cfg dpu_sm6150_cfg = {
.mdss_ver = &sm6150_mdss_ver,
.caps = &sm6150_dpu_caps,
.mdp = &sm6150_mdp,
+ .cdm = &dpu_cdm_5_x,
.ctl_count = ARRAY_SIZE(sm6150_ctl),
.ctl = sm6150_ctl,
.sspp_count = ARRAY_SIZE(sm6150_sspp),
@@ -242,6 +259,8 @@ const struct dpu_mdss_cfg dpu_sm6150_cfg = {
.dspp = sm6150_dspp,
.pingpong_count = ARRAY_SIZE(sm6150_pp),
.pingpong = sm6150_pp,
+ .wb_count = ARRAY_SIZE(sm6150_wb),
+ .wb = sm6150_wb,
.intf_count = ARRAY_SIZE(sm6150_intf),
.intf = sm6150_intf,
.vbif_count = ARRAY_SIZE(sdm845_vbif),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h
index 76f60a2df7a8..d3d3a34d0b45 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h
@@ -145,8 +145,8 @@ static const struct dpu_wb_cfg sm6125_wb[] = {
.name = "wb_2", .id = WB_2,
.base = 0x65000, .len = 0x2c8,
.features = WB_SM8250_MASK,
- .format_list = wb2_formats_rgb,
- .num_formats = ARRAY_SIZE(wb2_formats_rgb),
+ .format_list = wb2_formats_rgb_yuv,
+ .num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv),
.clk_ctrl = DPU_CLK_CTRL_WB2,
.xin_id = 6,
.vbif_idx = VBIF_RT,
@@ -216,6 +216,7 @@ const struct dpu_mdss_cfg dpu_sm6125_cfg = {
.mdss_ver = &sm6125_mdss_ver,
.caps = &sm6125_dpu_caps,
.mdp = &sm6125_mdp,
+ .cdm = &dpu_cdm_5_x,
.ctl_count = ARRAY_SIZE(sm6125_ctl),
.ctl = sm6125_ctl,
.sspp_count = ARRAY_SIZE(sm6125_sspp),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_0_sm8250.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_0_sm8250.h
index e8916ae826a6..47e01c3c242f 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_0_sm8250.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_0_sm8250.h
@@ -386,7 +386,7 @@ const struct dpu_mdss_cfg dpu_sm8250_cfg = {
.mdss_ver = &sm8250_mdss_ver,
.caps = &sm8250_dpu_caps,
.mdp = &sm8250_mdp,
- .cdm = &sc7280_cdm,
+ .cdm = &dpu_cdm_5_x,
.ctl_count = ARRAY_SIZE(sm8250_ctl),
.ctl = sm8250_ctl,
.sspp_count = ARRAY_SIZE(sm8250_sspp),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_2_sc7180.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_2_sc7180.h
index 7382ebb6e5b2..040c94c0bb66 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_2_sc7180.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_2_sc7180.h
@@ -157,8 +157,8 @@ static const struct dpu_wb_cfg sc7180_wb[] = {
.name = "wb_2", .id = WB_2,
.base = 0x65000, .len = 0x2c8,
.features = WB_SM8250_MASK,
- .format_list = wb2_formats_rgb,
- .num_formats = ARRAY_SIZE(wb2_formats_rgb),
+ .format_list = wb2_formats_rgb_yuv,
+ .num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv),
.clk_ctrl = DPU_CLK_CTRL_WB2,
.xin_id = 6,
.vbif_idx = VBIF_RT,
@@ -204,6 +204,7 @@ const struct dpu_mdss_cfg dpu_sc7180_cfg = {
.mdss_ver = &sc7180_mdss_ver,
.caps = &sc7180_dpu_caps,
.mdp = &sc7180_mdp,
+ .cdm = &dpu_cdm_5_x,
.ctl_count = ARRAY_SIZE(sc7180_ctl),
.ctl = sc7180_ctl,
.sspp_count = ARRAY_SIZE(sc7180_sspp),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_4_sm6350.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_4_sm6350.h
index 0502cee2f116..397278ba999b 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_4_sm6350.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_4_sm6350.h
@@ -151,8 +151,8 @@ static const struct dpu_wb_cfg sm6350_wb[] = {
.name = "wb_2", .id = WB_2,
.base = 0x65000, .len = 0x2c8,
.features = WB_SM8250_MASK,
- .format_list = wb2_formats_rgb,
- .num_formats = ARRAY_SIZE(wb2_formats_rgb),
+ .format_list = wb2_formats_rgb_yuv,
+ .num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv),
.clk_ctrl = DPU_CLK_CTRL_WB2,
.xin_id = 6,
.vbif_idx = VBIF_RT,
@@ -222,6 +222,7 @@ const struct dpu_mdss_cfg dpu_sm6350_cfg = {
.mdss_ver = &sm6350_mdss_ver,
.caps = &sm6350_dpu_caps,
.mdp = &sm6350_mdp,
+ .cdm = &dpu_cdm_5_x,
.ctl_count = ARRAY_SIZE(sm6350_ctl),
.ctl = sm6350_ctl,
.sspp_count = ARRAY_SIZE(sm6350_sspp),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_0_sm8350.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_0_sm8350.h
index f7c08e89c882..0c860e804cab 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_0_sm8350.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_0_sm8350.h
@@ -305,8 +305,8 @@ static const struct dpu_wb_cfg sm8350_wb[] = {
.name = "wb_2", .id = WB_2,
.base = 0x65000, .len = 0x2c8,
.features = WB_SM8250_MASK,
- .format_list = wb2_formats_rgb,
- .num_formats = ARRAY_SIZE(wb2_formats_rgb),
+ .format_list = wb2_formats_rgb_yuv,
+ .num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv),
.clk_ctrl = DPU_CLK_CTRL_WB2,
.xin_id = 6,
.vbif_idx = VBIF_RT,
@@ -396,6 +396,7 @@ const struct dpu_mdss_cfg dpu_sm8350_cfg = {
.mdss_ver = &sm8350_mdss_ver,
.caps = &sm8350_dpu_caps,
.mdp = &sm8350_mdp,
+ .cdm = &dpu_cdm_5_x,
.ctl_count = ARRAY_SIZE(sm8350_ctl),
.ctl = sm8350_ctl,
.sspp_count = ARRAY_SIZE(sm8350_sspp),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_2_sc7280.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_2_sc7280.h
index 2f153e0b5c6a..e9625c48c567 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_2_sc7280.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_2_sc7280.h
@@ -248,7 +248,7 @@ const struct dpu_mdss_cfg dpu_sc7280_cfg = {
.mdss_ver = &sc7280_mdss_ver,
.caps = &sc7280_dpu_caps,
.mdp = &sc7280_mdp,
- .cdm = &sc7280_cdm,
+ .cdm = &dpu_cdm_5_x,
.ctl_count = ARRAY_SIZE(sc7280_ctl),
.ctl = sc7280_ctl,
.sspp_count = ARRAY_SIZE(sc7280_sspp),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_0_sc8280xp.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_0_sc8280xp.h
index 0d143e390eca..fcee1c3665f8 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_0_sc8280xp.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_0_sc8280xp.h
@@ -435,6 +435,7 @@ const struct dpu_mdss_cfg dpu_sc8280xp_cfg = {
.mdss_ver = &sc8280xp_mdss_ver,
.caps = &sc8280xp_dpu_caps,
.mdp = &sc8280xp_mdp,
+ .cdm = &dpu_cdm_5_x,
.ctl_count = ARRAY_SIZE(sc8280xp_ctl),
.ctl = sc8280xp_ctl,
.sspp_count = ARRAY_SIZE(sc8280xp_sspp),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_1_sm8450.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_1_sm8450.h
index 08742472f9cc..19b2ee8bbd5f 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_1_sm8450.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_1_sm8450.h
@@ -321,8 +321,8 @@ static const struct dpu_wb_cfg sm8450_wb[] = {
.name = "wb_2", .id = WB_2,
.base = 0x65000, .len = 0x2c8,
.features = WB_SM8250_MASK,
- .format_list = wb2_formats_rgb,
- .num_formats = ARRAY_SIZE(wb2_formats_rgb),
+ .format_list = wb2_formats_rgb_yuv,
+ .num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv),
.clk_ctrl = DPU_CLK_CTRL_WB2,
.xin_id = 6,
.vbif_idx = VBIF_RT,
@@ -412,6 +412,7 @@ const struct dpu_mdss_cfg dpu_sm8450_cfg = {
.mdss_ver = &sm8450_mdss_ver,
.caps = &sm8450_dpu_caps,
.mdp = &sm8450_mdp,
+ .cdm = &dpu_cdm_5_x,
.ctl_count = ARRAY_SIZE(sm8450_ctl),
.ctl = sm8450_ctl,
.sspp_count = ARRAY_SIZE(sm8450_sspp),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_4_sa8775p.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_4_sa8775p.h
index 76ec72a32378..4d96ce71746f 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_4_sa8775p.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_4_sa8775p.h
@@ -458,7 +458,7 @@ const struct dpu_mdss_cfg dpu_sa8775p_cfg = {
.mdss_ver = &sa8775p_mdss_ver,
.caps = &sa8775p_dpu_caps,
.mdp = &sa8775p_mdp,
- .cdm = &sc7280_cdm,
+ .cdm = &dpu_cdm_5_x,
.ctl_count = ARRAY_SIZE(sa8775p_ctl),
.ctl = sa8775p_ctl,
.sspp_count = ARRAY_SIZE(sa8775p_sspp),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_0_sm8550.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_0_sm8550.h
index 4d3787fceb72..24f988465bf6 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_0_sm8550.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_0_sm8550.h
@@ -317,8 +317,8 @@ static const struct dpu_wb_cfg sm8550_wb[] = {
.name = "wb_2", .id = WB_2,
.base = 0x65000, .len = 0x2c8,
.features = WB_SM8250_MASK,
- .format_list = wb2_formats_rgb,
- .num_formats = ARRAY_SIZE(wb2_formats_rgb),
+ .format_list = wb2_formats_rgb_yuv,
+ .num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv),
.xin_id = 6,
.vbif_idx = VBIF_RT,
.maxlinewidth = 4096,
@@ -407,6 +407,7 @@ const struct dpu_mdss_cfg dpu_sm8550_cfg = {
.mdss_ver = &sm8550_mdss_ver,
.caps = &sm8550_dpu_caps,
.mdp = &sm8550_mdp,
+ .cdm = &dpu_cdm_5_x,
.ctl_count = ARRAY_SIZE(sm8550_ctl),
.ctl = sm8550_ctl,
.sspp_count = ARRAY_SIZE(sm8550_sspp),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_2_x1e80100.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_2_x1e80100.h
index 6b112e3d17da..6417baa84f82 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_2_x1e80100.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_2_x1e80100.h
@@ -317,8 +317,8 @@ static const struct dpu_wb_cfg x1e80100_wb[] = {
.name = "wb_2", .id = WB_2,
.base = 0x65000, .len = 0x2c8,
.features = WB_SM8250_MASK,
- .format_list = wb2_formats_rgb,
- .num_formats = ARRAY_SIZE(wb2_formats_rgb),
+ .format_list = wb2_formats_rgb_yuv,
+ .num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv),
.xin_id = 6,
.vbif_idx = VBIF_RT,
.maxlinewidth = 4096,
@@ -453,6 +453,7 @@ const struct dpu_mdss_cfg dpu_x1e80100_cfg = {
.mdss_ver = &x1e80100_mdss_ver,
.caps = &x1e80100_dpu_caps,
.mdp = &x1e80100_mdp,
+ .cdm = &dpu_cdm_5_x,
.ctl_count = ARRAY_SIZE(x1e80100_ctl),
.ctl = x1e80100_ctl,
.sspp_count = ARRAY_SIZE(x1e80100_sspp),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
index 6f0a37f954fe..0fb5789c60d0 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
@@ -118,26 +118,38 @@ static void _dpu_core_perf_calc_crtc(const struct dpu_core_perf *core_perf,
return;
}
- memset(perf, 0, sizeof(struct dpu_core_perf_params));
-
- if (core_perf->perf_tune.mode == DPU_PERF_MODE_MINIMUM) {
- perf->bw_ctl = 0;
- perf->max_per_pipe_ib = 0;
- perf->core_clk_rate = 0;
- } else if (core_perf->perf_tune.mode == DPU_PERF_MODE_FIXED) {
- perf->bw_ctl = core_perf->fix_core_ab_vote;
- perf->max_per_pipe_ib = core_perf->fix_core_ib_vote;
- perf->core_clk_rate = core_perf->fix_core_clk_rate;
- } else {
- perf->bw_ctl = _dpu_core_perf_calc_bw(perf_cfg, crtc);
- perf->max_per_pipe_ib = perf_cfg->min_dram_ib;
- perf->core_clk_rate = _dpu_core_perf_calc_clk(perf_cfg, crtc, state);
- }
-
+ perf->bw_ctl = _dpu_core_perf_calc_bw(perf_cfg, crtc);
+ perf->max_per_pipe_ib = perf_cfg->min_dram_ib;
+ perf->core_clk_rate = _dpu_core_perf_calc_clk(perf_cfg, crtc, state);
DRM_DEBUG_ATOMIC(
- "crtc=%d clk_rate=%llu core_ib=%llu core_ab=%llu\n",
+ "crtc=%d clk_rate=%llu core_ib=%u core_ab=%u\n",
crtc->base.id, perf->core_clk_rate,
- perf->max_per_pipe_ib, perf->bw_ctl);
+ perf->max_per_pipe_ib,
+ (u32)DIV_ROUND_UP_ULL(perf->bw_ctl, 1000));
+}
+
+static void dpu_core_perf_aggregate(struct drm_device *ddev,
+ enum dpu_crtc_client_type curr_client_type,
+ struct dpu_core_perf_params *perf)
+{
+ struct dpu_crtc_state *dpu_cstate;
+ struct drm_crtc *tmp_crtc;
+
+ drm_for_each_crtc(tmp_crtc, ddev) {
+ if (tmp_crtc->enabled &&
+ curr_client_type == dpu_crtc_get_client_type(tmp_crtc)) {
+ dpu_cstate = to_dpu_crtc_state(tmp_crtc->state);
+
+ perf->max_per_pipe_ib = max(perf->max_per_pipe_ib,
+ dpu_cstate->new_perf.max_per_pipe_ib);
+
+ perf->bw_ctl += dpu_cstate->new_perf.bw_ctl;
+
+ DRM_DEBUG_ATOMIC("crtc=%d bw=%llu\n",
+ tmp_crtc->base.id,
+ dpu_cstate->new_perf.bw_ctl);
+ }
+ }
}
/**
@@ -150,11 +162,9 @@ int dpu_core_perf_crtc_check(struct drm_crtc *crtc,
struct drm_crtc_state *state)
{
u32 bw, threshold;
- u64 bw_sum_of_intfs = 0;
- enum dpu_crtc_client_type curr_client_type;
struct dpu_crtc_state *dpu_cstate;
- struct drm_crtc *tmp_crtc;
struct dpu_kms *kms;
+ struct dpu_core_perf_params perf = { 0 };
if (!crtc || !state) {
DPU_ERROR("invalid crtc\n");
@@ -172,80 +182,56 @@ int dpu_core_perf_crtc_check(struct drm_crtc *crtc,
/* obtain new values */
_dpu_core_perf_calc_crtc(&kms->perf, crtc, state, &dpu_cstate->new_perf);
- bw_sum_of_intfs = dpu_cstate->new_perf.bw_ctl;
- curr_client_type = dpu_crtc_get_client_type(crtc);
-
- drm_for_each_crtc(tmp_crtc, crtc->dev) {
- if (tmp_crtc->enabled &&
- dpu_crtc_get_client_type(tmp_crtc) == curr_client_type &&
- tmp_crtc != crtc) {
- struct dpu_crtc_state *tmp_cstate =
- to_dpu_crtc_state(tmp_crtc->state);
-
- DRM_DEBUG_ATOMIC("crtc:%d bw:%llu ctrl:%d\n",
- tmp_crtc->base.id, tmp_cstate->new_perf.bw_ctl,
- tmp_cstate->bw_control);
+ dpu_core_perf_aggregate(crtc->dev, dpu_crtc_get_client_type(crtc), &perf);
- bw_sum_of_intfs += tmp_cstate->new_perf.bw_ctl;
- }
-
- /* convert bandwidth to kb */
- bw = DIV_ROUND_UP_ULL(bw_sum_of_intfs, 1000);
- DRM_DEBUG_ATOMIC("calculated bandwidth=%uk\n", bw);
+ /* convert bandwidth to kb */
+ bw = DIV_ROUND_UP_ULL(perf.bw_ctl, 1000);
+ DRM_DEBUG_ATOMIC("calculated bandwidth=%uk\n", bw);
- threshold = kms->perf.perf_cfg->max_bw_high;
+ threshold = kms->perf.perf_cfg->max_bw_high;
- DRM_DEBUG_ATOMIC("final threshold bw limit = %d\n", threshold);
+ DRM_DEBUG_ATOMIC("final threshold bw limit = %d\n", threshold);
- if (!threshold) {
- DPU_ERROR("no bandwidth limits specified\n");
- return -E2BIG;
- } else if (bw > threshold) {
- DPU_ERROR("exceeds bandwidth: %ukb > %ukb\n", bw,
- threshold);
- return -E2BIG;
- }
+ if (!threshold) {
+ DPU_ERROR("no bandwidth limits specified\n");
+ return -E2BIG;
+ } else if (bw > threshold) {
+ DPU_ERROR("exceeds bandwidth: %ukb > %ukb\n", bw,
+ threshold);
+ return -E2BIG;
}
return 0;
}
static int _dpu_core_perf_crtc_update_bus(struct dpu_kms *kms,
- struct drm_crtc *crtc)
+ struct drm_crtc *crtc)
{
struct dpu_core_perf_params perf = { 0 };
- enum dpu_crtc_client_type curr_client_type
- = dpu_crtc_get_client_type(crtc);
- struct drm_crtc *tmp_crtc;
- struct dpu_crtc_state *dpu_cstate;
int i, ret = 0;
- u64 avg_bw;
+ u32 avg_bw;
+ u32 peak_bw;
if (!kms->num_paths)
return 0;
- drm_for_each_crtc(tmp_crtc, crtc->dev) {
- if (tmp_crtc->enabled &&
- curr_client_type ==
- dpu_crtc_get_client_type(tmp_crtc)) {
- dpu_cstate = to_dpu_crtc_state(tmp_crtc->state);
-
- perf.max_per_pipe_ib = max(perf.max_per_pipe_ib,
- dpu_cstate->new_perf.max_per_pipe_ib);
-
- perf.bw_ctl += dpu_cstate->new_perf.bw_ctl;
+ if (kms->perf.perf_tune.mode == DPU_PERF_MODE_MINIMUM) {
+ avg_bw = 0;
+ peak_bw = 0;
+ } else if (kms->perf.perf_tune.mode == DPU_PERF_MODE_FIXED) {
+ avg_bw = kms->perf.fix_core_ab_vote;
+ peak_bw = kms->perf.fix_core_ib_vote;
+ } else {
+ dpu_core_perf_aggregate(crtc->dev, dpu_crtc_get_client_type(crtc), &perf);
- DRM_DEBUG_ATOMIC("crtc=%d bw=%llu paths:%d\n",
- tmp_crtc->base.id,
- dpu_cstate->new_perf.bw_ctl, kms->num_paths);
- }
+ avg_bw = div_u64(perf.bw_ctl, 1000); /*Bps_to_icc*/
+ peak_bw = perf.max_per_pipe_ib;
}
- avg_bw = perf.bw_ctl;
- do_div(avg_bw, (kms->num_paths * 1000)); /*Bps_to_icc*/
+ avg_bw /= kms->num_paths;
for (i = 0; i < kms->num_paths; i++)
- icc_set_bw(kms->path[i], avg_bw, perf.max_per_pipe_ib);
+ icc_set_bw(kms->path[i], avg_bw, peak_bw);
return ret;
}
@@ -476,9 +462,9 @@ int dpu_core_perf_debugfs_init(struct dpu_kms *dpu_kms, struct dentry *parent)
&perf->core_clk_rate);
debugfs_create_u32("enable_bw_release", 0600, entry,
(u32 *)&perf->enable_bw_release);
- debugfs_create_u32("threshold_low", 0400, entry,
+ debugfs_create_u32("low_core_ab", 0400, entry,
(u32 *)&perf->perf_cfg->max_bw_low);
- debugfs_create_u32("threshold_high", 0400, entry,
+ debugfs_create_u32("max_core_ab", 0400, entry,
(u32 *)&perf->perf_cfg->max_bw_high);
debugfs_create_u32("min_core_ib", 0400, entry,
(u32 *)&perf->perf_cfg->min_core_ib);
@@ -490,9 +476,9 @@ int dpu_core_perf_debugfs_init(struct dpu_kms *dpu_kms, struct dentry *parent)
(u32 *)perf, &dpu_core_perf_mode_fops);
debugfs_create_u64("fix_core_clk_rate", 0600, entry,
&perf->fix_core_clk_rate);
- debugfs_create_u64("fix_core_ib_vote", 0600, entry,
+ debugfs_create_u32("fix_core_ib_vote", 0600, entry,
&perf->fix_core_ib_vote);
- debugfs_create_u64("fix_core_ab_vote", 0600, entry,
+ debugfs_create_u32("fix_core_ab_vote", 0600, entry,
&perf->fix_core_ab_vote);
return 0;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h
index 451bf8021114..d2f21d34e501 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h
@@ -19,7 +19,7 @@
* @core_clk_rate: core clock rate request
*/
struct dpu_core_perf_params {
- u64 max_per_pipe_ib;
+ u32 max_per_pipe_ib;
u64 bw_ctl;
u64 core_clk_rate;
};
@@ -40,8 +40,8 @@ struct dpu_core_perf_tune {
* @perf_tune: debug control for performance tuning
* @enable_bw_release: debug control for bandwidth release
* @fix_core_clk_rate: fixed core clock request in Hz used in mode 2
- * @fix_core_ib_vote: fixed core ib vote in bps used in mode 2
- * @fix_core_ab_vote: fixed core ab vote in bps used in mode 2
+ * @fix_core_ib_vote: fixed core ib vote in KBps used in mode 2
+ * @fix_core_ab_vote: fixed core ab vote in KBps used in mode 2
*/
struct dpu_core_perf {
const struct dpu_perf_cfg *perf_cfg;
@@ -50,8 +50,8 @@ struct dpu_core_perf {
struct dpu_core_perf_tune perf_tune;
u32 enable_bw_release;
u64 fix_core_clk_rate;
- u64 fix_core_ib_vote;
- u64 fix_core_ab_vote;
+ u32 fix_core_ib_vote;
+ u32 fix_core_ab_vote;
};
int dpu_core_perf_crtc_check(struct drm_crtc *crtc,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
index e5dcd41a361f..0714936d8835 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
@@ -953,6 +953,45 @@ static int _dpu_crtc_wait_for_frame_done(struct drm_crtc *crtc)
return rc;
}
+static int dpu_crtc_kickoff_clone_mode(struct drm_crtc *crtc)
+{
+ struct drm_encoder *encoder;
+ struct drm_encoder *rt_encoder = NULL, *wb_encoder = NULL;
+ struct dpu_kms *dpu_kms = _dpu_crtc_get_kms(crtc);
+
+ /* Find encoder for real time display */
+ drm_for_each_encoder_mask(encoder, crtc->dev,
+ crtc->state->encoder_mask) {
+ if (encoder->encoder_type == DRM_MODE_ENCODER_VIRTUAL)
+ wb_encoder = encoder;
+ else
+ rt_encoder = encoder;
+ }
+
+ if (!rt_encoder || !wb_encoder) {
+ DRM_DEBUG_ATOMIC("real time or wb encoder not found\n");
+ return -EINVAL;
+ }
+
+ dpu_encoder_prepare_for_kickoff(wb_encoder);
+ dpu_encoder_prepare_for_kickoff(rt_encoder);
+
+ dpu_vbif_clear_errors(dpu_kms);
+
+ /*
+ * Kickoff real time encoder last as it's the encoder that
+ * will do the flush
+ */
+ dpu_encoder_kickoff(wb_encoder);
+ dpu_encoder_kickoff(rt_encoder);
+
+ /* Don't start frame done timers until the kickoffs have finished */
+ dpu_encoder_start_frame_done_timer(wb_encoder);
+ dpu_encoder_start_frame_done_timer(rt_encoder);
+
+ return 0;
+}
+
/**
* dpu_crtc_commit_kickoff - trigger kickoff of the commit for this crtc
* @crtc: Pointer to drm crtc object
@@ -981,13 +1020,27 @@ void dpu_crtc_commit_kickoff(struct drm_crtc *crtc)
goto end;
}
}
- /*
- * Encoder will flush/start now, unless it has a tx pending. If so, it
- * may delay and flush at an irq event (e.g. ppdone)
- */
- drm_for_each_encoder_mask(encoder, crtc->dev,
- crtc->state->encoder_mask)
- dpu_encoder_prepare_for_kickoff(encoder);
+
+ if (drm_crtc_in_clone_mode(crtc->state)) {
+ if (dpu_crtc_kickoff_clone_mode(crtc))
+ goto end;
+ } else {
+ /*
+ * Encoder will flush/start now, unless it has a tx pending.
+ * If so, it may delay and flush at an irq event (e.g. ppdone)
+ */
+ drm_for_each_encoder_mask(encoder, crtc->dev,
+ crtc->state->encoder_mask)
+ dpu_encoder_prepare_for_kickoff(encoder);
+
+ dpu_vbif_clear_errors(dpu_kms);
+
+ drm_for_each_encoder_mask(encoder, crtc->dev,
+ crtc->state->encoder_mask) {
+ dpu_encoder_kickoff(encoder);
+ dpu_encoder_start_frame_done_timer(encoder);
+ }
+ }
if (atomic_inc_return(&dpu_crtc->frame_pending) == 1) {
/* acquire bandwidth and other resources */
@@ -997,11 +1050,6 @@ void dpu_crtc_commit_kickoff(struct drm_crtc *crtc)
dpu_crtc->play_count++;
- dpu_vbif_clear_errors(dpu_kms);
-
- drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
- dpu_encoder_kickoff(encoder);
-
reinit_completion(&dpu_crtc->frame_done_comp);
end:
@@ -1230,6 +1278,151 @@ done:
return ret;
}
+#define MAX_CHANNELS_PER_CRTC 2
+#define MAX_HDISPLAY_SPLIT 1080
+
+static struct msm_display_topology dpu_crtc_get_topology(
+ struct drm_crtc *crtc,
+ struct dpu_kms *dpu_kms,
+ struct drm_crtc_state *crtc_state)
+{
+ struct drm_display_mode *mode = &crtc_state->adjusted_mode;
+ struct msm_display_topology topology = {0};
+ struct drm_encoder *drm_enc;
+
+ drm_for_each_encoder_mask(drm_enc, crtc->dev, crtc_state->encoder_mask)
+ dpu_encoder_update_topology(drm_enc, &topology, crtc_state->state,
+ &crtc_state->adjusted_mode);
+
+ topology.cwb_enabled = drm_crtc_in_clone_mode(crtc_state);
+
+ /*
+ * Datapath topology selection
+ *
+ * Dual display
+ * 2 LM, 2 INTF ( Split display using 2 interfaces)
+ *
+ * Single display
+ * 1 LM, 1 INTF
+ * 2 LM, 1 INTF (stream merge to support high resolution interfaces)
+ *
+ * If DSC is enabled, use 2 LMs for 2:2:1 topology
+ *
+ * Add dspps to the reservation requirements if ctm is requested
+ *
+ * Only hardcode num_lm to 2 for cases where num_intf == 2 and CWB is not
+ * enabled. This is because in cases where CWB is enabled, num_intf will
+ * count both the WB and real-time phys encoders.
+ *
+ * For non-DSC CWB usecases, have the num_lm be decided by the
+ * (mode->hdisplay > MAX_HDISPLAY_SPLIT) check.
+ */
+
+ if (topology.num_intf == 2 && !topology.cwb_enabled)
+ topology.num_lm = 2;
+ else if (topology.num_dsc == 2)
+ topology.num_lm = 2;
+ else if (dpu_kms->catalog->caps->has_3d_merge)
+ topology.num_lm = (mode->hdisplay > MAX_HDISPLAY_SPLIT) ? 2 : 1;
+ else
+ topology.num_lm = 1;
+
+ if (crtc_state->ctm)
+ topology.num_dspp = topology.num_lm;
+
+ return topology;
+}
+
+static int dpu_crtc_assign_resources(struct drm_crtc *crtc,
+ struct drm_crtc_state *crtc_state)
+{
+ struct dpu_hw_blk *hw_ctl[MAX_CHANNELS_PER_CRTC];
+ struct dpu_hw_blk *hw_lm[MAX_CHANNELS_PER_CRTC];
+ struct dpu_hw_blk *hw_dspp[MAX_CHANNELS_PER_CRTC];
+ int i, num_lm, num_ctl, num_dspp;
+ struct dpu_kms *dpu_kms = _dpu_crtc_get_kms(crtc);
+ struct dpu_global_state *global_state;
+ struct dpu_crtc_state *cstate;
+ struct msm_display_topology topology;
+ int ret;
+
+ /*
+ * Release and Allocate resources on every modeset
+ */
+ global_state = dpu_kms_get_global_state(crtc_state->state);
+ if (IS_ERR(global_state))
+ return PTR_ERR(global_state);
+
+ dpu_rm_release(global_state, crtc);
+
+ if (!crtc_state->enable)
+ return 0;
+
+ topology = dpu_crtc_get_topology(crtc, dpu_kms, crtc_state);
+ ret = dpu_rm_reserve(&dpu_kms->rm, global_state,
+ crtc_state->crtc, &topology);
+ if (ret)
+ return ret;
+
+ cstate = to_dpu_crtc_state(crtc_state);
+
+ num_ctl = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
+ crtc_state->crtc,
+ DPU_HW_BLK_CTL, hw_ctl,
+ ARRAY_SIZE(hw_ctl));
+ num_lm = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
+ crtc_state->crtc,
+ DPU_HW_BLK_LM, hw_lm,
+ ARRAY_SIZE(hw_lm));
+ num_dspp = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
+ crtc_state->crtc,
+ DPU_HW_BLK_DSPP, hw_dspp,
+ ARRAY_SIZE(hw_dspp));
+
+ for (i = 0; i < num_lm; i++) {
+ int ctl_idx = (i < num_ctl) ? i : (num_ctl-1);
+
+ cstate->mixers[i].hw_lm = to_dpu_hw_mixer(hw_lm[i]);
+ cstate->mixers[i].lm_ctl = to_dpu_hw_ctl(hw_ctl[ctl_idx]);
+ if (i < num_dspp)
+ cstate->mixers[i].hw_dspp = to_dpu_hw_dspp(hw_dspp[i]);
+ }
+
+ cstate->num_mixers = num_lm;
+
+ return 0;
+}
+
+/**
+ * dpu_crtc_check_mode_changed: check if full modeset is required
+ * @old_crtc_state: Previous CRTC state
+ * @new_crtc_state: Corresponding CRTC state to be checked
+ *
+ * Check if the changes in the object properties demand full mode set.
+ */
+int dpu_crtc_check_mode_changed(struct drm_crtc_state *old_crtc_state,
+ struct drm_crtc_state *new_crtc_state)
+{
+ struct drm_encoder *drm_enc;
+ struct drm_crtc *crtc = new_crtc_state->crtc;
+ bool clone_mode_enabled = drm_crtc_in_clone_mode(old_crtc_state);
+ bool clone_mode_requested = drm_crtc_in_clone_mode(new_crtc_state);
+
+ DRM_DEBUG_ATOMIC("%d\n", crtc->base.id);
+
+ /* there might be cases where encoder needs a modeset too */
+ drm_for_each_encoder_mask(drm_enc, crtc->dev, new_crtc_state->encoder_mask) {
+ if (dpu_encoder_needs_modeset(drm_enc, new_crtc_state->state))
+ new_crtc_state->mode_changed = true;
+ }
+
+ if ((clone_mode_requested && !clone_mode_enabled) ||
+ (!clone_mode_requested && clone_mode_enabled))
+ new_crtc_state->mode_changed = true;
+
+ return 0;
+}
+
static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
@@ -1245,6 +1438,13 @@ static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
bool needs_dirtyfb = dpu_crtc_needs_dirtyfb(crtc_state);
+ /* don't reallocate resources if only ACTIVE has beeen changed */
+ if (crtc_state->mode_changed || crtc_state->connectors_changed) {
+ rc = dpu_crtc_assign_resources(crtc, crtc_state);
+ if (rc < 0)
+ return rc;
+ }
+
if (dpu_use_virtual_planes &&
(crtc_state->planes_changed || crtc_state->zpos_changed)) {
rc = dpu_crtc_reassign_planes(crtc, crtc_state);
@@ -1262,10 +1462,6 @@ static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
DRM_DEBUG_ATOMIC("%s: check\n", dpu_crtc->name);
- /* force a full mode set if active state changed */
- if (crtc_state->active_changed)
- crtc_state->mode_changed = true;
-
if (cstate->num_mixers) {
rc = _dpu_crtc_check_and_setup_lm_bounds(crtc, crtc_state);
if (rc)
@@ -1484,8 +1680,9 @@ static int dpu_crtc_debugfs_state_show(struct seq_file *s, void *v)
seq_printf(s, "intf_mode: %d\n", dpu_crtc_get_intf_mode(crtc));
seq_printf(s, "core_clk_rate: %llu\n",
dpu_crtc->cur_perf.core_clk_rate);
- seq_printf(s, "bw_ctl: %llu\n", dpu_crtc->cur_perf.bw_ctl);
- seq_printf(s, "max_per_pipe_ib: %llu\n",
+ seq_printf(s, "bw_ctl: %uk\n",
+ (u32)DIV_ROUND_UP_ULL(dpu_crtc->cur_perf.bw_ctl, 1000));
+ seq_printf(s, "max_per_pipe_ib: %u\n",
dpu_crtc->cur_perf.max_per_pipe_ib);
return 0;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h
index 0b148f3ce0d7..94392b9b9245 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h
@@ -239,6 +239,9 @@ static inline int dpu_crtc_frame_pending(struct drm_crtc *crtc)
return crtc ? atomic_read(&to_dpu_crtc(crtc)->frame_pending) : -EINVAL;
}
+int dpu_crtc_check_mode_changed(struct drm_crtc_state *old_crtc_state,
+ struct drm_crtc_state *new_crtc_state);
+
int dpu_crtc_vblank(struct drm_crtc *crtc, bool en);
void dpu_crtc_vblank_callback(struct drm_crtc *crtc);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
index 48e6e8d74c85..284e69bb47c1 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2013 Red Hat
* Copyright (c) 2014-2018, 2020-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*
* Author: Rob Clark <robdclark@gmail.com>
*/
@@ -24,6 +24,7 @@
#include "dpu_hw_catalog.h"
#include "dpu_hw_intf.h"
#include "dpu_hw_ctl.h"
+#include "dpu_hw_cwb.h"
#include "dpu_hw_dspp.h"
#include "dpu_hw_dsc.h"
#include "dpu_hw_merge3d.h"
@@ -58,8 +59,6 @@
#define IDLE_SHORT_TIMEOUT 1
-#define MAX_HDISPLAY_SPLIT 1080
-
/* timeout in frames waiting for frame done */
#define DPU_ENCODER_FRAME_DONE_TIMEOUT_FRAMES 5
@@ -135,8 +134,12 @@ enum dpu_enc_rc_states {
* @cur_slave: As above but for the slave encoder.
* @hw_pp: Handle to the pingpong blocks used for the display. No.
* pingpong blocks can be different than num_phys_encs.
+ * @hw_cwb: Handle to the CWB muxes used for concurrent writeback
+ * display. Number of CWB muxes can be different than
+ * num_phys_encs.
* @hw_dsc: Handle to the DSC blocks used for the display.
* @dsc_mask: Bitmask of used DSC blocks.
+ * @cwb_mask: Bitmask of used CWB muxes
* @intfs_swapped: Whether or not the phys_enc interfaces have been swapped
* for partial update right-only cases, such as pingpong
* split where virtual pingpong does not generate IRQs
@@ -179,9 +182,11 @@ struct dpu_encoder_virt {
struct dpu_encoder_phys *cur_master;
struct dpu_encoder_phys *cur_slave;
struct dpu_hw_pingpong *hw_pp[MAX_CHANNELS_PER_ENC];
+ struct dpu_hw_cwb *hw_cwb[MAX_CHANNELS_PER_ENC];
struct dpu_hw_dsc *hw_dsc[MAX_CHANNELS_PER_ENC];
unsigned int dsc_mask;
+ unsigned int cwb_mask;
bool intfs_swapped;
@@ -622,9 +627,9 @@ bool dpu_encoder_use_dsc_merge(struct drm_encoder *drm_enc)
if (dpu_enc->phys_encs[i])
intf_count++;
- /* See dpu_encoder_get_topology, we only support 2:2:1 topology */
- if (dpu_enc->dsc)
- num_dsc = 2;
+ for (i = 0; i < MAX_CHANNELS_PER_ENC; i++)
+ if (dpu_enc->hw_dsc[i])
+ num_dsc++;
return (num_dsc > 0) && (num_dsc > intf_count);
}
@@ -647,130 +652,51 @@ struct drm_dsc_config *dpu_encoder_get_dsc_config(struct drm_encoder *drm_enc)
return NULL;
}
-static struct msm_display_topology dpu_encoder_get_topology(
- struct dpu_encoder_virt *dpu_enc,
- struct dpu_kms *dpu_kms,
- struct drm_display_mode *mode,
- struct drm_crtc_state *crtc_state,
- struct drm_dsc_config *dsc)
+void dpu_encoder_update_topology(struct drm_encoder *drm_enc,
+ struct msm_display_topology *topology,
+ struct drm_atomic_state *state,
+ const struct drm_display_mode *adj_mode)
{
- struct msm_display_topology topology = {0};
- int i, intf_count = 0;
+ struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
+ struct msm_drm_private *priv = dpu_enc->base.dev->dev_private;
+ struct msm_display_info *disp_info = &dpu_enc->disp_info;
+ struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms);
+ struct drm_connector *connector;
+ struct drm_connector_state *conn_state;
+ struct drm_framebuffer *fb;
+ struct drm_dsc_config *dsc;
+
+ int i;
for (i = 0; i < MAX_PHYS_ENCODERS_PER_VIRTUAL; i++)
if (dpu_enc->phys_encs[i])
- intf_count++;
+ topology->num_intf++;
- /* Datapath topology selection
- *
- * Dual display
- * 2 LM, 2 INTF ( Split display using 2 interfaces)
- *
- * Single display
- * 1 LM, 1 INTF
- * 2 LM, 1 INTF (stream merge to support high resolution interfaces)
- *
- * Add dspps to the reservation requirements if ctm is requested
- */
- if (intf_count == 2)
- topology.num_lm = 2;
- else if (!dpu_kms->catalog->caps->has_3d_merge)
- topology.num_lm = 1;
- else
- topology.num_lm = (mode->hdisplay > MAX_HDISPLAY_SPLIT) ? 2 : 1;
-
- if (crtc_state->ctm)
- topology.num_dspp = topology.num_lm;
-
- topology.num_intf = intf_count;
+ dsc = dpu_encoder_get_dsc_config(drm_enc);
+ /* We only support 2 DSC mode (with 2 LM and 1 INTF) */
if (dsc) {
/*
- * In case of Display Stream Compression (DSC), we would use
- * 2 DSC encoders, 2 layer mixers and 1 interface
- * this is power optimal and can drive up to (including) 4k
- * screens
+ * Use 2 DSC encoders, 2 layer mixers and 1 or 2 interfaces
+ * when Display Stream Compression (DSC) is enabled,
+ * and when enough DSC blocks are available.
+ * This is power-optimal and can drive up to (including) 4k
+ * screens.
*/
- topology.num_dsc = 2;
- topology.num_lm = 2;
- topology.num_intf = 1;
- }
-
- return topology;
-}
-
-static void dpu_encoder_assign_crtc_resources(struct dpu_kms *dpu_kms,
- struct drm_encoder *drm_enc,
- struct dpu_global_state *global_state,
- struct drm_crtc_state *crtc_state)
-{
- struct dpu_crtc_state *cstate;
- struct dpu_hw_blk *hw_ctl[MAX_CHANNELS_PER_ENC];
- struct dpu_hw_blk *hw_lm[MAX_CHANNELS_PER_ENC];
- struct dpu_hw_blk *hw_dspp[MAX_CHANNELS_PER_ENC];
- int num_lm, num_ctl, num_dspp, i;
-
- cstate = to_dpu_crtc_state(crtc_state);
-
- memset(cstate->mixers, 0, sizeof(cstate->mixers));
-
- num_ctl = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
- drm_enc->base.id, DPU_HW_BLK_CTL, hw_ctl, ARRAY_SIZE(hw_ctl));
- num_lm = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
- drm_enc->base.id, DPU_HW_BLK_LM, hw_lm, ARRAY_SIZE(hw_lm));
- num_dspp = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
- drm_enc->base.id, DPU_HW_BLK_DSPP, hw_dspp,
- ARRAY_SIZE(hw_dspp));
-
- for (i = 0; i < num_lm; i++) {
- int ctl_idx = (i < num_ctl) ? i : (num_ctl-1);
-
- cstate->mixers[i].hw_lm = to_dpu_hw_mixer(hw_lm[i]);
- cstate->mixers[i].lm_ctl = to_dpu_hw_ctl(hw_ctl[ctl_idx]);
- cstate->mixers[i].hw_dspp = i < num_dspp ? to_dpu_hw_dspp(hw_dspp[i]) : NULL;
- }
-
- cstate->num_mixers = num_lm;
-}
-
-static int dpu_encoder_virt_atomic_check(
- struct drm_encoder *drm_enc,
- struct drm_crtc_state *crtc_state,
- struct drm_connector_state *conn_state)
-{
- struct dpu_encoder_virt *dpu_enc;
- struct msm_drm_private *priv;
- struct dpu_kms *dpu_kms;
- struct drm_display_mode *adj_mode;
- struct msm_display_topology topology;
- struct msm_display_info *disp_info;
- struct dpu_global_state *global_state;
- struct drm_framebuffer *fb;
- struct drm_dsc_config *dsc;
- int ret = 0;
-
- if (!drm_enc || !crtc_state || !conn_state) {
- DPU_ERROR("invalid arg(s), drm_enc %d, crtc/conn state %d/%d\n",
- drm_enc != NULL, crtc_state != NULL, conn_state != NULL);
- return -EINVAL;
+ WARN(topology->num_intf > 2,
+ "DSC topology cannot support more than 2 interfaces\n");
+ if (topology->num_intf >= 2 || dpu_kms->catalog->dsc_count >= 2)
+ topology->num_dsc = 2;
+ else
+ topology->num_dsc = 1;
}
- dpu_enc = to_dpu_encoder_virt(drm_enc);
- DPU_DEBUG_ENC(dpu_enc, "\n");
-
- priv = drm_enc->dev->dev_private;
- disp_info = &dpu_enc->disp_info;
- dpu_kms = to_dpu_kms(priv->kms);
- adj_mode = &crtc_state->adjusted_mode;
- global_state = dpu_kms_get_global_state(crtc_state->state);
- if (IS_ERR(global_state))
- return PTR_ERR(global_state);
-
- trace_dpu_enc_atomic_check(DRMID(drm_enc));
-
- dsc = dpu_encoder_get_dsc_config(drm_enc);
-
- topology = dpu_encoder_get_topology(dpu_enc, dpu_kms, adj_mode, crtc_state, dsc);
+ connector = drm_atomic_get_new_connector_for_encoder(state, drm_enc);
+ if (!connector)
+ return;
+ conn_state = drm_atomic_get_new_connector_state(state, connector);
+ if (!conn_state)
+ return;
/*
* Use CDM only for writeback or DP at the moment as other interfaces cannot handle it.
@@ -781,34 +707,45 @@ static int dpu_encoder_virt_atomic_check(
fb = conn_state->writeback_job->fb;
if (fb && MSM_FORMAT_IS_YUV(msm_framebuffer_format(fb)))
- topology.needs_cdm = true;
+ topology->num_cdm++;
} else if (disp_info->intf_type == INTF_DP) {
if (msm_dp_is_yuv_420_enabled(priv->dp[disp_info->h_tile_instance[0]], adj_mode))
- topology.needs_cdm = true;
+ topology->num_cdm++;
}
+}
- if (topology.needs_cdm && !dpu_enc->cur_master->hw_cdm)
- crtc_state->mode_changed = true;
- else if (!topology.needs_cdm && dpu_enc->cur_master->hw_cdm)
- crtc_state->mode_changed = true;
- /*
- * Release and Allocate resources on every modeset
- * Dont allocate when active is false.
- */
- if (drm_atomic_crtc_needs_modeset(crtc_state)) {
- dpu_rm_release(global_state, drm_enc);
+bool dpu_encoder_needs_modeset(struct drm_encoder *drm_enc, struct drm_atomic_state *state)
+{
+ struct drm_connector *connector;
+ struct drm_connector_state *conn_state;
+ struct drm_framebuffer *fb;
+ struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
- if (!crtc_state->active_changed || crtc_state->enable)
- ret = dpu_rm_reserve(&dpu_kms->rm, global_state,
- drm_enc, crtc_state, &topology);
- if (!ret)
- dpu_encoder_assign_crtc_resources(dpu_kms, drm_enc,
- global_state, crtc_state);
- }
+ if (!drm_enc || !state)
+ return false;
- trace_dpu_enc_atomic_check_flags(DRMID(drm_enc), adj_mode->flags);
+ connector = drm_atomic_get_new_connector_for_encoder(state, drm_enc);
+ if (!connector)
+ return false;
- return ret;
+ conn_state = drm_atomic_get_new_connector_state(state, connector);
+
+ /**
+ * These checks are duplicated from dpu_encoder_update_topology() since
+ * CRTC and encoder don't hold topology information
+ */
+ if (dpu_enc->disp_info.intf_type == INTF_WB && conn_state->writeback_job) {
+ fb = conn_state->writeback_job->fb;
+ if (fb && MSM_FORMAT_IS_YUV(msm_framebuffer_format(fb))) {
+ if (!dpu_enc->cur_master->hw_cdm)
+ return true;
+ } else {
+ if (dpu_enc->cur_master->hw_cdm)
+ return true;
+ }
+ }
+
+ return false;
}
static void _dpu_encoder_update_vsync_source(struct dpu_encoder_virt *dpu_enc,
@@ -1219,8 +1156,12 @@ static void dpu_encoder_virt_atomic_mode_set(struct drm_encoder *drm_enc,
struct dpu_hw_blk *hw_pp[MAX_CHANNELS_PER_ENC];
struct dpu_hw_blk *hw_ctl[MAX_CHANNELS_PER_ENC];
struct dpu_hw_blk *hw_dsc[MAX_CHANNELS_PER_ENC];
+ struct dpu_hw_blk *hw_cwb[MAX_CHANNELS_PER_ENC];
int num_ctl, num_pp, num_dsc;
+ int num_cwb = 0;
+ bool is_cwb_encoder;
unsigned int dsc_mask = 0;
+ unsigned int cwb_mask = 0;
int i;
if (!drm_enc) {
@@ -1233,6 +1174,8 @@ static void dpu_encoder_virt_atomic_mode_set(struct drm_encoder *drm_enc,
priv = drm_enc->dev->dev_private;
dpu_kms = to_dpu_kms(priv->kms);
+ is_cwb_encoder = drm_crtc_in_clone_mode(crtc_state) &&
+ dpu_enc->disp_info.intf_type == INTF_WB;
global_state = dpu_kms_get_existing_global_state(dpu_kms);
if (IS_ERR_OR_NULL(global_state)) {
@@ -1243,18 +1186,38 @@ static void dpu_encoder_virt_atomic_mode_set(struct drm_encoder *drm_enc,
trace_dpu_enc_mode_set(DRMID(drm_enc));
/* Query resource that have been reserved in atomic check step. */
- num_pp = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
- drm_enc->base.id, DPU_HW_BLK_PINGPONG, hw_pp,
- ARRAY_SIZE(hw_pp));
+ if (is_cwb_encoder) {
+ num_pp = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
+ drm_enc->crtc,
+ DPU_HW_BLK_DCWB_PINGPONG,
+ hw_pp, ARRAY_SIZE(hw_pp));
+ num_cwb = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
+ drm_enc->crtc,
+ DPU_HW_BLK_CWB,
+ hw_cwb, ARRAY_SIZE(hw_cwb));
+ } else {
+ num_pp = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
+ drm_enc->crtc,
+ DPU_HW_BLK_PINGPONG, hw_pp,
+ ARRAY_SIZE(hw_pp));
+ }
+
+ for (i = 0; i < num_cwb; i++) {
+ dpu_enc->hw_cwb[i] = to_dpu_hw_cwb(hw_cwb[i]);
+ cwb_mask |= BIT(dpu_enc->hw_cwb[i]->idx - CWB_0);
+ }
+
+ dpu_enc->cwb_mask = cwb_mask;
+
num_ctl = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
- drm_enc->base.id, DPU_HW_BLK_CTL, hw_ctl, ARRAY_SIZE(hw_ctl));
+ drm_enc->crtc, DPU_HW_BLK_CTL, hw_ctl, ARRAY_SIZE(hw_ctl));
for (i = 0; i < MAX_CHANNELS_PER_ENC; i++)
dpu_enc->hw_pp[i] = i < num_pp ? to_dpu_hw_pingpong(hw_pp[i])
: NULL;
num_dsc = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
- drm_enc->base.id, DPU_HW_BLK_DSC,
+ drm_enc->crtc, DPU_HW_BLK_DSC,
hw_dsc, ARRAY_SIZE(hw_dsc));
for (i = 0; i < num_dsc; i++) {
dpu_enc->hw_dsc[i] = to_dpu_hw_dsc(hw_dsc[i]);
@@ -1268,7 +1231,7 @@ static void dpu_encoder_virt_atomic_mode_set(struct drm_encoder *drm_enc,
struct dpu_hw_blk *hw_cdm = NULL;
dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
- drm_enc->base.id, DPU_HW_BLK_CDM,
+ drm_enc->crtc, DPU_HW_BLK_CDM,
&hw_cdm, 1);
dpu_enc->cur_master->hw_cdm = hw_cdm ? to_dpu_hw_cdm(hw_cdm) : NULL;
}
@@ -1654,6 +1617,7 @@ static void dpu_encoder_off_work(struct work_struct *work)
static void _dpu_encoder_trigger_flush(struct drm_encoder *drm_enc,
struct dpu_encoder_phys *phys, uint32_t extra_flush_bits)
{
+ struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
struct dpu_hw_ctl *ctl;
int pending_kickoff_cnt;
u32 ret = UINT_MAX;
@@ -1671,6 +1635,15 @@ static void _dpu_encoder_trigger_flush(struct drm_encoder *drm_enc,
pending_kickoff_cnt = dpu_encoder_phys_inc_pending(phys);
+ /* Return early if encoder is writeback and in clone mode */
+ if (drm_enc->encoder_type == DRM_MODE_ENCODER_VIRTUAL &&
+ dpu_enc->cwb_mask) {
+ DPU_DEBUG("encoder %d skip flush for concurrent writeback encoder\n",
+ DRMID(drm_enc));
+ return;
+ }
+
+
if (extra_flush_bits && ctl->ops.update_pending_flush)
ctl->ops.update_pending_flush(ctl, extra_flush_bits);
@@ -1693,6 +1666,8 @@ static void _dpu_encoder_trigger_flush(struct drm_encoder *drm_enc,
*/
static void _dpu_encoder_trigger_start(struct dpu_encoder_phys *phys)
{
+ struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(phys->parent);
+
if (!phys) {
DPU_ERROR("invalid argument(s)\n");
return;
@@ -1703,6 +1678,12 @@ static void _dpu_encoder_trigger_start(struct dpu_encoder_phys *phys)
return;
}
+ if (phys->parent->encoder_type == DRM_MODE_ENCODER_VIRTUAL &&
+ dpu_enc->cwb_mask) {
+ DPU_DEBUG("encoder %d CWB enabled, skipping\n", DRMID(phys->parent));
+ return;
+ }
+
if (phys->ops.trigger_start && phys->enable_state != DPU_ENC_DISABLED)
phys->ops.trigger_start(phys);
}
@@ -2020,7 +2001,6 @@ static void dpu_encoder_dsc_pipe_cfg(struct dpu_hw_ctl *ctl,
static void dpu_encoder_prep_dsc(struct dpu_encoder_virt *dpu_enc,
struct drm_dsc_config *dsc)
{
- /* coding only for 2LM, 2enc, 1 dsc config */
struct dpu_encoder_phys *enc_master = dpu_enc->cur_master;
struct dpu_hw_ctl *ctl = enc_master->hw_ctl;
struct dpu_hw_dsc *hw_dsc[MAX_CHANNELS_PER_ENC];
@@ -2030,22 +2010,24 @@ static void dpu_encoder_prep_dsc(struct dpu_encoder_virt *dpu_enc,
int dsc_common_mode;
int pic_width;
u32 initial_lines;
+ int num_dsc = 0;
int i;
for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
hw_pp[i] = dpu_enc->hw_pp[i];
hw_dsc[i] = dpu_enc->hw_dsc[i];
- if (!hw_pp[i] || !hw_dsc[i]) {
- DPU_ERROR_ENC(dpu_enc, "invalid params for DSC\n");
- return;
- }
+ if (!hw_pp[i] || !hw_dsc[i])
+ break;
+
+ num_dsc++;
}
- dsc_common_mode = 0;
pic_width = dsc->pic_width;
- dsc_common_mode = DSC_MODE_SPLIT_PANEL;
+ dsc_common_mode = 0;
+ if (num_dsc > 1)
+ dsc_common_mode |= DSC_MODE_SPLIT_PANEL;
if (dpu_encoder_use_dsc_merge(enc_master->parent))
dsc_common_mode |= DSC_MODE_MULTIPLEX;
if (enc_master->intf_mode == INTF_MODE_VIDEO)
@@ -2054,14 +2036,10 @@ static void dpu_encoder_prep_dsc(struct dpu_encoder_virt *dpu_enc,
this_frame_slices = pic_width / dsc->slice_width;
intf_ip_w = this_frame_slices * dsc->slice_width;
- /*
- * dsc merge case: when using 2 encoders for the same stream,
- * no. of slices need to be same on both the encoders.
- */
- enc_ip_w = intf_ip_w / 2;
+ enc_ip_w = intf_ip_w / num_dsc;
initial_lines = dpu_encoder_dsc_initial_line_calc(dsc, enc_ip_w);
- for (i = 0; i < MAX_CHANNELS_PER_ENC; i++)
+ for (i = 0; i < num_dsc; i++)
dpu_encoder_dsc_pipe_cfg(ctl, hw_dsc[i], hw_pp[i],
dsc, dsc_common_mode, initial_lines);
}
@@ -2135,6 +2113,25 @@ bool dpu_encoder_is_valid_for_commit(struct drm_encoder *drm_enc)
}
/**
+ * dpu_encoder_start_frame_done_timer - Start the encoder frame done timer
+ * @drm_enc: Pointer to drm encoder structure
+ */
+void dpu_encoder_start_frame_done_timer(struct drm_encoder *drm_enc)
+{
+ struct dpu_encoder_virt *dpu_enc;
+ unsigned long timeout_ms;
+
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+ timeout_ms = DPU_ENCODER_FRAME_DONE_TIMEOUT_FRAMES * 1000 /
+ drm_mode_vrefresh(&drm_enc->crtc->state->adjusted_mode);
+
+ atomic_set(&dpu_enc->frame_done_timeout_ms, timeout_ms);
+ mod_timer(&dpu_enc->frame_done_timer,
+ jiffies + msecs_to_jiffies(timeout_ms));
+
+}
+
+/**
* dpu_encoder_kickoff - trigger a double buffer flip of the ctl path
* (i.e. ctl flush and start) immediately.
* @drm_enc: encoder pointer
@@ -2143,7 +2140,6 @@ void dpu_encoder_kickoff(struct drm_encoder *drm_enc)
{
struct dpu_encoder_virt *dpu_enc;
struct dpu_encoder_phys *phys;
- unsigned long timeout_ms;
unsigned int i;
DPU_ATRACE_BEGIN("encoder_kickoff");
@@ -2151,13 +2147,6 @@ void dpu_encoder_kickoff(struct drm_encoder *drm_enc)
trace_dpu_enc_kickoff(DRMID(drm_enc));
- timeout_ms = DPU_ENCODER_FRAME_DONE_TIMEOUT_FRAMES * 1000 /
- drm_mode_vrefresh(&drm_enc->crtc->state->adjusted_mode);
-
- atomic_set(&dpu_enc->frame_done_timeout_ms, timeout_ms);
- mod_timer(&dpu_enc->frame_done_timer,
- jiffies + msecs_to_jiffies(timeout_ms));
-
/* All phys encs are ready to go, trigger the kickoff */
_dpu_encoder_kickoff_phys(dpu_enc);
@@ -2183,22 +2172,22 @@ static void dpu_encoder_helper_reset_mixers(struct dpu_encoder_phys *phys_enc)
memset(&mixer, 0, sizeof(mixer));
/* reset all mixers for this encoder */
- if (phys_enc->hw_ctl->ops.clear_all_blendstages)
- phys_enc->hw_ctl->ops.clear_all_blendstages(phys_enc->hw_ctl);
+ if (ctl->ops.clear_all_blendstages)
+ ctl->ops.clear_all_blendstages(ctl);
global_state = dpu_kms_get_existing_global_state(phys_enc->dpu_kms);
num_lm = dpu_rm_get_assigned_resources(&phys_enc->dpu_kms->rm, global_state,
- phys_enc->parent->base.id, DPU_HW_BLK_LM, hw_lm, ARRAY_SIZE(hw_lm));
+ phys_enc->parent->crtc, DPU_HW_BLK_LM, hw_lm, ARRAY_SIZE(hw_lm));
for (i = 0; i < num_lm; i++) {
hw_mixer[i] = to_dpu_hw_mixer(hw_lm[i]);
- if (phys_enc->hw_ctl->ops.update_pending_flush_mixer)
- phys_enc->hw_ctl->ops.update_pending_flush_mixer(ctl, hw_mixer[i]->idx);
+ if (ctl->ops.update_pending_flush_mixer)
+ ctl->ops.update_pending_flush_mixer(ctl, hw_mixer[i]->idx);
/* clear all blendstages */
- if (phys_enc->hw_ctl->ops.setup_blendstage)
- phys_enc->hw_ctl->ops.setup_blendstage(ctl, hw_mixer[i]->idx, NULL);
+ if (ctl->ops.setup_blendstage)
+ ctl->ops.setup_blendstage(ctl, hw_mixer[i]->idx, NULL);
}
}
@@ -2250,7 +2239,7 @@ void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc)
dpu_enc = to_dpu_encoder_virt(phys_enc->parent);
- phys_enc->hw_ctl->ops.reset(ctl);
+ ctl->ops.reset(ctl);
dpu_encoder_helper_reset_mixers(phys_enc);
@@ -2265,8 +2254,8 @@ void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc)
phys_enc->hw_wb->ops.bind_pingpong_blk(phys_enc->hw_wb, PINGPONG_NONE);
/* mark WB flush as pending */
- if (phys_enc->hw_ctl->ops.update_pending_flush_wb)
- phys_enc->hw_ctl->ops.update_pending_flush_wb(ctl, phys_enc->hw_wb->idx);
+ if (ctl->ops.update_pending_flush_wb)
+ ctl->ops.update_pending_flush_wb(ctl, phys_enc->hw_wb->idx);
} else {
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
if (dpu_enc->phys_encs[i] && phys_enc->hw_intf->ops.bind_pingpong_blk)
@@ -2275,8 +2264,8 @@ void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc)
PINGPONG_NONE);
/* mark INTF flush as pending */
- if (phys_enc->hw_ctl->ops.update_pending_flush_intf)
- phys_enc->hw_ctl->ops.update_pending_flush_intf(phys_enc->hw_ctl,
+ if (ctl->ops.update_pending_flush_intf)
+ ctl->ops.update_pending_flush_intf(ctl,
dpu_enc->phys_encs[i]->hw_intf->idx);
}
}
@@ -2284,12 +2273,15 @@ void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc)
if (phys_enc->hw_pp && phys_enc->hw_pp->ops.setup_dither)
phys_enc->hw_pp->ops.setup_dither(phys_enc->hw_pp, NULL);
+ if (dpu_enc->cwb_mask)
+ dpu_encoder_helper_phys_setup_cwb(phys_enc, false);
+
/* reset the merge 3D HW block */
if (phys_enc->hw_pp && phys_enc->hw_pp->merge_3d) {
phys_enc->hw_pp->merge_3d->ops.setup_3d_mode(phys_enc->hw_pp->merge_3d,
BLEND_3D_NONE);
- if (phys_enc->hw_ctl->ops.update_pending_flush_merge_3d)
- phys_enc->hw_ctl->ops.update_pending_flush_merge_3d(ctl,
+ if (ctl->ops.update_pending_flush_merge_3d)
+ ctl->ops.update_pending_flush_merge_3d(ctl,
phys_enc->hw_pp->merge_3d->idx);
}
@@ -2297,9 +2289,9 @@ void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc)
if (phys_enc->hw_cdm->ops.bind_pingpong_blk && phys_enc->hw_pp)
phys_enc->hw_cdm->ops.bind_pingpong_blk(phys_enc->hw_cdm,
PINGPONG_NONE);
- if (phys_enc->hw_ctl->ops.update_pending_flush_cdm)
- phys_enc->hw_ctl->ops.update_pending_flush_cdm(phys_enc->hw_ctl,
- phys_enc->hw_cdm->idx);
+ if (ctl->ops.update_pending_flush_cdm)
+ ctl->ops.update_pending_flush_cdm(ctl,
+ phys_enc->hw_cdm->idx);
}
if (dpu_enc->dsc) {
@@ -2310,6 +2302,7 @@ void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc)
intf_cfg.stream_sel = 0; /* Don't care value for video mode */
intf_cfg.mode_3d = dpu_encoder_helper_get_3d_blend_mode(phys_enc);
intf_cfg.dsc = dpu_encoder_helper_get_dsc(phys_enc);
+ intf_cfg.cwb = dpu_enc->cwb_mask;
if (phys_enc->hw_intf)
intf_cfg.intf = phys_enc->hw_intf->idx;
@@ -2327,6 +2320,68 @@ void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc)
ctl->ops.clear_pending_flush(ctl);
}
+void dpu_encoder_helper_phys_setup_cwb(struct dpu_encoder_phys *phys_enc,
+ bool enable)
+{
+ struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(phys_enc->parent);
+ struct dpu_hw_cwb *hw_cwb;
+ struct dpu_hw_ctl *hw_ctl;
+ struct dpu_hw_cwb_setup_cfg cwb_cfg;
+
+ struct dpu_kms *dpu_kms;
+ struct dpu_global_state *global_state;
+ struct dpu_hw_blk *rt_pp_list[MAX_CHANNELS_PER_ENC];
+ int num_pp;
+
+ if (!phys_enc->hw_wb)
+ return;
+
+ hw_ctl = phys_enc->hw_ctl;
+
+ if (!phys_enc->hw_ctl) {
+ DPU_DEBUG("[wb:%d] no ctl assigned\n",
+ phys_enc->hw_wb->idx - WB_0);
+ return;
+ }
+
+ dpu_kms = phys_enc->dpu_kms;
+ global_state = dpu_kms_get_existing_global_state(dpu_kms);
+ num_pp = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
+ phys_enc->parent->crtc,
+ DPU_HW_BLK_PINGPONG, rt_pp_list,
+ ARRAY_SIZE(rt_pp_list));
+
+ if (num_pp == 0 || num_pp > MAX_CHANNELS_PER_ENC) {
+ DPU_DEBUG_ENC(dpu_enc, "invalid num_pp %d\n", num_pp);
+ return;
+ }
+
+ /*
+ * The CWB mux supports using LM or DSPP as tap points. For now,
+ * always use LM tap point
+ */
+ cwb_cfg.input = INPUT_MODE_LM_OUT;
+
+ for (int i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
+ hw_cwb = dpu_enc->hw_cwb[i];
+ if (!hw_cwb)
+ continue;
+
+ if (enable) {
+ struct dpu_hw_pingpong *hw_pp =
+ to_dpu_hw_pingpong(rt_pp_list[i]);
+ cwb_cfg.pp_idx = hw_pp->idx;
+ } else {
+ cwb_cfg.pp_idx = PINGPONG_NONE;
+ }
+
+ hw_cwb->ops.config_cwb(hw_cwb, &cwb_cfg);
+
+ if (hw_ctl->ops.update_pending_flush_cwb)
+ hw_ctl->ops.update_pending_flush_cwb(hw_ctl, hw_cwb->idx);
+ }
+}
+
/**
* dpu_encoder_helper_phys_setup_cdm - setup chroma down sampling block
* @phys_enc: Pointer to physical encoder
@@ -2513,6 +2568,38 @@ static int dpu_encoder_virt_add_phys_encs(
return 0;
}
+/**
+ * dpu_encoder_get_clones - Calculate the possible_clones for DPU encoder
+ * @drm_enc: DRM encoder pointer
+ * Returns: possible_clones mask
+ */
+uint32_t dpu_encoder_get_clones(struct drm_encoder *drm_enc)
+{
+ struct drm_encoder *curr;
+ int type = drm_enc->encoder_type;
+ uint32_t clone_mask = drm_encoder_mask(drm_enc);
+
+ /*
+ * Set writeback as possible clones of real-time DSI encoders and vice
+ * versa
+ *
+ * Writeback encoders can't be clones of each other and DSI
+ * encoders can't be clones of each other.
+ *
+ * TODO: Add DP encoders as valid possible clones for writeback encoders
+ * (and vice versa) once concurrent writeback has been validated for DP
+ */
+ drm_for_each_encoder(curr, drm_enc->dev) {
+ if ((type == DRM_MODE_ENCODER_VIRTUAL &&
+ curr->encoder_type == DRM_MODE_ENCODER_DSI) ||
+ (type == DRM_MODE_ENCODER_DSI &&
+ curr->encoder_type == DRM_MODE_ENCODER_VIRTUAL))
+ clone_mask |= drm_encoder_mask(curr);
+ }
+
+ return clone_mask;
+}
+
static int dpu_encoder_setup_display(struct dpu_encoder_virt *dpu_enc,
struct dpu_kms *dpu_kms,
struct msm_display_info *disp_info)
@@ -2630,7 +2717,6 @@ static const struct drm_encoder_helper_funcs dpu_encoder_helper_funcs = {
.atomic_mode_set = dpu_encoder_virt_atomic_mode_set,
.atomic_disable = dpu_encoder_virt_atomic_disable,
.atomic_enable = dpu_encoder_virt_atomic_enable,
- .atomic_check = dpu_encoder_virt_atomic_check,
};
static const struct drm_encoder_funcs dpu_encoder_funcs = {
@@ -2789,6 +2875,18 @@ enum dpu_intf_mode dpu_encoder_get_intf_mode(struct drm_encoder *encoder)
}
/**
+ * dpu_encoder_helper_get_cwb_mask - get CWB blocks mask for the DPU encoder
+ * @phys_enc: Pointer to physical encoder structure
+ */
+unsigned int dpu_encoder_helper_get_cwb_mask(struct dpu_encoder_phys *phys_enc)
+{
+ struct drm_encoder *encoder = phys_enc->parent;
+ struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(encoder);
+
+ return dpu_enc->cwb_mask;
+}
+
+/**
* dpu_encoder_helper_get_dsc - get DSC blocks mask for the DPU encoder
* This helper function is used by physical encoder to get DSC blocks mask
* used for this encoder.
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h
index 92b5ee390788..ca1ca2e51d7e 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
@@ -60,6 +60,8 @@ enum dpu_intf_mode dpu_encoder_get_intf_mode(struct drm_encoder *encoder);
void dpu_encoder_virt_runtime_resume(struct drm_encoder *encoder);
+uint32_t dpu_encoder_get_clones(struct drm_encoder *drm_enc);
+
struct drm_encoder *dpu_encoder_init(struct drm_device *dev,
int drm_enc_mode,
struct msm_display_info *disp_info);
@@ -80,6 +82,13 @@ int dpu_encoder_get_crc(const struct drm_encoder *drm_enc, u32 *crcs, int pos);
bool dpu_encoder_use_dsc_merge(struct drm_encoder *drm_enc);
+void dpu_encoder_update_topology(struct drm_encoder *drm_enc,
+ struct msm_display_topology *topology,
+ struct drm_atomic_state *state,
+ const struct drm_display_mode *adj_mode);
+
+bool dpu_encoder_needs_modeset(struct drm_encoder *drm_enc, struct drm_atomic_state *state);
+
void dpu_encoder_prepare_wb_job(struct drm_encoder *drm_enc,
struct drm_writeback_job *job);
@@ -88,4 +97,5 @@ void dpu_encoder_cleanup_wb_job(struct drm_encoder *drm_enc,
bool dpu_encoder_is_valid_for_commit(struct drm_encoder *drm_enc);
+void dpu_encoder_start_frame_done_timer(struct drm_encoder *drm_enc);
#endif /* __DPU_ENCODER_H__ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h
index 63f09857025c..61b22d949454 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2015-2018 The Linux Foundation. All rights reserved.
*/
@@ -309,6 +309,8 @@ static inline enum dpu_3d_blend_mode dpu_encoder_helper_get_3d_blend_mode(
return BLEND_3D_NONE;
}
+unsigned int dpu_encoder_helper_get_cwb_mask(struct dpu_encoder_phys *phys_enc);
+
unsigned int dpu_encoder_helper_get_dsc(struct dpu_encoder_phys *phys_enc);
struct drm_dsc_config *dpu_encoder_get_dsc_config(struct drm_encoder *drm_enc);
@@ -331,6 +333,9 @@ int dpu_encoder_helper_wait_for_irq(struct dpu_encoder_phys *phys_enc,
void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc);
+void dpu_encoder_helper_phys_setup_cwb(struct dpu_encoder_phys *phys_enc,
+ bool enable);
+
void dpu_encoder_helper_phys_setup_cdm(struct dpu_encoder_phys *phys_enc,
const struct msm_format *dpu_fmt,
u32 output_type);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
index e9bbccc44dad..da9994a79ca2 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
@@ -5,6 +5,7 @@
#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
#include <linux/delay.h>
+#include <linux/string_choices.h>
#include "dpu_encoder_phys.h"
#include "dpu_hw_interrupts.h"
#include "dpu_hw_pingpong.h"
@@ -261,7 +262,7 @@ static int dpu_encoder_phys_cmd_control_vblank_irq(
DRM_DEBUG_KMS("id:%u pp:%d enable=%s/%d\n", DRMID(phys_enc->parent),
phys_enc->hw_pp->idx - PINGPONG_0,
- enable ? "true" : "false", refcount);
+ str_true_false(enable), refcount);
if (enable) {
if (phys_enc->vblank_refcount == 0)
@@ -285,7 +286,7 @@ end:
DRM_ERROR("vblank irq err id:%u pp:%d ret:%d, enable %s/%d\n",
DRMID(phys_enc->parent),
phys_enc->hw_pp->idx - PINGPONG_0, ret,
- enable ? "true" : "false", refcount);
+ str_true_false(enable), refcount);
}
return ret;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c
index 4c006ec74575..849fea580a4c 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
@@ -68,7 +68,7 @@ static void dpu_encoder_phys_wb_set_ot_limit(
ot_params.num = hw_wb->idx - WB_0;
ot_params.width = phys_enc->cached_mode.hdisplay;
ot_params.height = phys_enc->cached_mode.vdisplay;
- ot_params.is_wfd = true;
+ ot_params.is_wfd = !dpu_encoder_helper_get_cwb_mask(phys_enc);
ot_params.frame_rate = drm_mode_vrefresh(&phys_enc->cached_mode);
ot_params.vbif_idx = hw_wb->caps->vbif_idx;
ot_params.rd = false;
@@ -111,7 +111,7 @@ static void dpu_encoder_phys_wb_set_qos_remap(
qos_params.vbif_idx = hw_wb->caps->vbif_idx;
qos_params.xin_id = hw_wb->caps->xin_id;
qos_params.num = hw_wb->idx - WB_0;
- qos_params.is_rt = false;
+ qos_params.is_rt = dpu_encoder_helper_get_cwb_mask(phys_enc);
DPU_DEBUG("[qos_remap] wb:%d vbif:%d xin:%d is_rt:%d\n",
qos_params.num,
@@ -174,6 +174,7 @@ static void dpu_encoder_phys_wb_setup_fb(struct dpu_encoder_phys *phys_enc,
struct dpu_encoder_phys_wb *wb_enc = to_dpu_encoder_phys_wb(phys_enc);
struct dpu_hw_wb *hw_wb;
struct dpu_hw_wb_cfg *wb_cfg;
+ u32 cdp_usage;
if (!phys_enc || !phys_enc->dpu_kms || !phys_enc->dpu_kms->catalog) {
DPU_ERROR("invalid encoder\n");
@@ -182,6 +183,10 @@ static void dpu_encoder_phys_wb_setup_fb(struct dpu_encoder_phys *phys_enc,
hw_wb = phys_enc->hw_wb;
wb_cfg = &wb_enc->wb_cfg;
+ if (dpu_encoder_helper_get_cwb_mask(phys_enc))
+ cdp_usage = DPU_PERF_CDP_USAGE_RT;
+ else
+ cdp_usage = DPU_PERF_CDP_USAGE_NRT;
wb_cfg->intf_mode = phys_enc->intf_mode;
wb_cfg->roi.x1 = 0;
@@ -199,7 +204,7 @@ static void dpu_encoder_phys_wb_setup_fb(struct dpu_encoder_phys *phys_enc,
const struct dpu_perf_cfg *perf = phys_enc->dpu_kms->catalog->perf;
hw_wb->ops.setup_cdp(hw_wb, format,
- perf->cdp_cfg[DPU_PERF_CDP_USAGE_NRT].wr_enable);
+ perf->cdp_cfg[cdp_usage].wr_enable);
}
if (hw_wb->ops.setup_outaddress)
@@ -236,6 +241,7 @@ static void dpu_encoder_phys_wb_setup_ctl(struct dpu_encoder_phys *phys_enc)
intf_cfg.intf = DPU_NONE;
intf_cfg.wb = hw_wb->idx;
+ intf_cfg.cwb = dpu_encoder_helper_get_cwb_mask(phys_enc);
if (mode_3d && hw_pp && hw_pp->merge_3d)
intf_cfg.merge_3d = hw_pp->merge_3d->idx;
@@ -340,6 +346,8 @@ static void dpu_encoder_phys_wb_setup(
dpu_encoder_helper_phys_setup_cdm(phys_enc, format, CDM_CDWN_OUTPUT_WB);
+ dpu_encoder_helper_phys_setup_cwb(phys_enc, true);
+
dpu_encoder_phys_wb_setup_ctl(phys_enc);
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
index 0b342c043875..64265ca4656a 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
@@ -232,37 +232,6 @@ static const u32 rotation_v2_formats[] = {
/* TODO add formats after validation */
};
-static const u32 wb2_formats_rgb[] = {
- DRM_FORMAT_RGB565,
- DRM_FORMAT_BGR565,
- DRM_FORMAT_RGB888,
- DRM_FORMAT_ARGB8888,
- DRM_FORMAT_RGBA8888,
- DRM_FORMAT_ABGR8888,
- DRM_FORMAT_XRGB8888,
- DRM_FORMAT_RGBX8888,
- DRM_FORMAT_XBGR8888,
- DRM_FORMAT_ARGB1555,
- DRM_FORMAT_RGBA5551,
- DRM_FORMAT_XRGB1555,
- DRM_FORMAT_RGBX5551,
- DRM_FORMAT_ARGB4444,
- DRM_FORMAT_RGBA4444,
- DRM_FORMAT_RGBX4444,
- DRM_FORMAT_XRGB4444,
- DRM_FORMAT_BGR888,
- DRM_FORMAT_BGRA8888,
- DRM_FORMAT_BGRX8888,
- DRM_FORMAT_ABGR1555,
- DRM_FORMAT_BGRA5551,
- DRM_FORMAT_XBGR1555,
- DRM_FORMAT_BGRX5551,
- DRM_FORMAT_ABGR4444,
- DRM_FORMAT_BGRA4444,
- DRM_FORMAT_BGRX4444,
- DRM_FORMAT_XBGR4444,
-};
-
static const u32 wb2_formats_rgb_yuv[] = {
DRM_FORMAT_RGB565,
DRM_FORMAT_BGR565,
@@ -507,7 +476,14 @@ static const struct dpu_dsc_sub_blks dsc_sblk_1 = {
/*************************************************************
* CDM block config
*************************************************************/
-static const struct dpu_cdm_cfg sc7280_cdm = {
+static const struct dpu_cdm_cfg dpu_cdm_1_x_4_x = {
+ .name = "cdm_0",
+ .id = CDM_0,
+ .len = 0x224,
+ .base = 0x79200,
+};
+
+static const struct dpu_cdm_cfg dpu_cdm_5_x = {
.name = "cdm_0",
.id = CDM_0,
.len = 0x228,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.c
index ae1534c49ae0..3f88c3641d4a 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.c
@@ -214,7 +214,9 @@ static void dpu_hw_cdm_bind_pingpong_blk(struct dpu_hw_cdm *ctx, const enum dpu_
mux_cfg = DPU_REG_READ(c, CDM_MUX);
mux_cfg &= ~0xf;
- if (pp)
+ if (pp >= PINGPONG_CWB_0)
+ mux_cfg |= 0xd;
+ else if (pp)
mux_cfg |= (pp - PINGPONG_0) & 0x7;
else
mux_cfg |= 0xf;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
index 4893f10d6a58..411a7cf088eb 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
- * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/delay.h>
@@ -31,12 +31,14 @@
#define CTL_MERGE_3D_ACTIVE 0x0E4
#define CTL_DSC_ACTIVE 0x0E8
#define CTL_WB_ACTIVE 0x0EC
+#define CTL_CWB_ACTIVE 0x0F0
#define CTL_INTF_ACTIVE 0x0F4
#define CTL_CDM_ACTIVE 0x0F8
#define CTL_FETCH_PIPE_ACTIVE 0x0FC
#define CTL_MERGE_3D_FLUSH 0x100
#define CTL_DSC_FLUSH 0x104
#define CTL_WB_FLUSH 0x108
+#define CTL_CWB_FLUSH 0x10C
#define CTL_INTF_FLUSH 0x110
#define CTL_CDM_FLUSH 0x114
#define CTL_PERIPH_FLUSH 0x128
@@ -53,6 +55,7 @@
#define PERIPH_IDX 30
#define INTF_IDX 31
#define WB_IDX 16
+#define CWB_IDX 28
#define DSPP_IDX 29 /* From DPU hw rev 7.x.x */
#define CTL_INVALID_BIT 0xffff
#define CTL_DEFAULT_GROUP_ID 0xf
@@ -110,6 +113,7 @@ static inline void dpu_hw_ctl_clear_pending_flush(struct dpu_hw_ctl *ctx)
ctx->pending_flush_mask = 0x0;
ctx->pending_intf_flush_mask = 0;
ctx->pending_wb_flush_mask = 0;
+ ctx->pending_cwb_flush_mask = 0;
ctx->pending_merge_3d_flush_mask = 0;
ctx->pending_dsc_flush_mask = 0;
ctx->pending_cdm_flush_mask = 0;
@@ -144,6 +148,9 @@ static inline void dpu_hw_ctl_trigger_flush_v1(struct dpu_hw_ctl *ctx)
if (ctx->pending_flush_mask & BIT(WB_IDX))
DPU_REG_WRITE(&ctx->hw, CTL_WB_FLUSH,
ctx->pending_wb_flush_mask);
+ if (ctx->pending_flush_mask & BIT(CWB_IDX))
+ DPU_REG_WRITE(&ctx->hw, CTL_CWB_FLUSH,
+ ctx->pending_cwb_flush_mask);
if (ctx->pending_flush_mask & BIT(DSPP_IDX))
for (dspp = DSPP_0; dspp < DSPP_MAX; dspp++) {
@@ -310,6 +317,13 @@ static void dpu_hw_ctl_update_pending_flush_wb_v1(struct dpu_hw_ctl *ctx,
ctx->pending_flush_mask |= BIT(WB_IDX);
}
+static void dpu_hw_ctl_update_pending_flush_cwb_v1(struct dpu_hw_ctl *ctx,
+ enum dpu_cwb cwb)
+{
+ ctx->pending_cwb_flush_mask |= BIT(cwb - CWB_0);
+ ctx->pending_flush_mask |= BIT(CWB_IDX);
+}
+
static void dpu_hw_ctl_update_pending_flush_intf_v1(struct dpu_hw_ctl *ctx,
enum dpu_intf intf)
{
@@ -547,6 +561,7 @@ static void dpu_hw_ctl_intf_cfg_v1(struct dpu_hw_ctl *ctx,
u32 intf_active = 0;
u32 dsc_active = 0;
u32 wb_active = 0;
+ u32 cwb_active = 0;
u32 mode_sel = 0;
/* CTL_TOP[31:28] carries group_id to collate CTL paths
@@ -561,6 +576,7 @@ static void dpu_hw_ctl_intf_cfg_v1(struct dpu_hw_ctl *ctx,
intf_active = DPU_REG_READ(c, CTL_INTF_ACTIVE);
wb_active = DPU_REG_READ(c, CTL_WB_ACTIVE);
+ cwb_active = DPU_REG_READ(c, CTL_CWB_ACTIVE);
dsc_active = DPU_REG_READ(c, CTL_DSC_ACTIVE);
if (cfg->intf)
@@ -569,12 +585,16 @@ static void dpu_hw_ctl_intf_cfg_v1(struct dpu_hw_ctl *ctx,
if (cfg->wb)
wb_active |= BIT(cfg->wb - WB_0);
+ if (cfg->cwb)
+ cwb_active |= cfg->cwb;
+
if (cfg->dsc)
dsc_active |= cfg->dsc;
DPU_REG_WRITE(c, CTL_TOP, mode_sel);
DPU_REG_WRITE(c, CTL_INTF_ACTIVE, intf_active);
DPU_REG_WRITE(c, CTL_WB_ACTIVE, wb_active);
+ DPU_REG_WRITE(c, CTL_CWB_ACTIVE, cwb_active);
DPU_REG_WRITE(c, CTL_DSC_ACTIVE, dsc_active);
if (cfg->merge_3d)
@@ -624,6 +644,7 @@ static void dpu_hw_ctl_reset_intf_cfg_v1(struct dpu_hw_ctl *ctx,
struct dpu_hw_blk_reg_map *c = &ctx->hw;
u32 intf_active = 0;
u32 wb_active = 0;
+ u32 cwb_active = 0;
u32 merge3d_active = 0;
u32 dsc_active;
u32 cdm_active;
@@ -651,6 +672,12 @@ static void dpu_hw_ctl_reset_intf_cfg_v1(struct dpu_hw_ctl *ctx,
DPU_REG_WRITE(c, CTL_INTF_ACTIVE, intf_active);
}
+ if (cfg->cwb) {
+ cwb_active = DPU_REG_READ(c, CTL_CWB_ACTIVE);
+ cwb_active &= ~cfg->cwb;
+ DPU_REG_WRITE(c, CTL_CWB_ACTIVE, cwb_active);
+ }
+
if (cfg->wb) {
wb_active = DPU_REG_READ(c, CTL_WB_ACTIVE);
wb_active &= ~BIT(cfg->wb - WB_0);
@@ -703,6 +730,7 @@ static void _setup_ctl_ops(struct dpu_hw_ctl_ops *ops,
ops->update_pending_flush_merge_3d =
dpu_hw_ctl_update_pending_flush_merge_3d_v1;
ops->update_pending_flush_wb = dpu_hw_ctl_update_pending_flush_wb_v1;
+ ops->update_pending_flush_cwb = dpu_hw_ctl_update_pending_flush_cwb_v1;
ops->update_pending_flush_dsc =
dpu_hw_ctl_update_pending_flush_dsc_v1;
ops->update_pending_flush_cdm = dpu_hw_ctl_update_pending_flush_cdm_v1;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
index 85c6c835cc87..080a9550a0cc 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
- * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _DPU_HW_CTL_H
@@ -42,6 +42,7 @@ struct dpu_hw_stage_cfg {
* @cdm: CDM block used
* @stream_sel: Stream selection for multi-stream interfaces
* @dsc: DSC BIT masks used
+ * @cwb: CWB BIT masks used
*/
struct dpu_hw_intf_cfg {
enum dpu_intf intf;
@@ -51,6 +52,7 @@ struct dpu_hw_intf_cfg {
enum dpu_ctl_mode_sel intf_mode_sel;
enum dpu_cdm cdm;
int stream_sel;
+ unsigned int cwb;
unsigned int dsc;
};
@@ -115,6 +117,15 @@ struct dpu_hw_ctl_ops {
enum dpu_wb blk);
/**
+ * OR in the given flushbits to the cached pending_(cwb_)flush_mask
+ * No effect on hardware
+ * @ctx : ctl path ctx pointer
+ * @blk : concurrent writeback block index
+ */
+ void (*update_pending_flush_cwb)(struct dpu_hw_ctl *ctx,
+ enum dpu_cwb blk);
+
+ /**
* OR in the given flushbits to the cached pending_(intf_)flush_mask
* No effect on hardware
* @ctx : ctl path ctx pointer
@@ -258,6 +269,7 @@ struct dpu_hw_ctl_ops {
* @pending_flush_mask: storage for pending ctl_flush managed via ops
* @pending_intf_flush_mask: pending INTF flush
* @pending_wb_flush_mask: pending WB flush
+ * @pending_cwb_flush_mask: pending CWB flush
* @pending_dsc_flush_mask: pending DSC flush
* @pending_cdm_flush_mask: pending CDM flush
* @ops: operation list
@@ -274,6 +286,7 @@ struct dpu_hw_ctl {
u32 pending_flush_mask;
u32 pending_intf_flush_mask;
u32 pending_wb_flush_mask;
+ u32 pending_cwb_flush_mask;
u32 pending_periph_flush_mask;
u32 pending_merge_3d_flush_mask;
u32 pending_dspp_flush_mask[DSPP_MAX - DSPP_0];
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h
index ba7bb05efe9b..8d820cd1b554 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h
@@ -77,12 +77,14 @@ enum dpu_hw_blk_type {
DPU_HW_BLK_LM,
DPU_HW_BLK_CTL,
DPU_HW_BLK_PINGPONG,
+ DPU_HW_BLK_DCWB_PINGPONG,
DPU_HW_BLK_INTF,
DPU_HW_BLK_WB,
DPU_HW_BLK_DSPP,
DPU_HW_BLK_MERGE_3D,
DPU_HW_BLK_DSC,
DPU_HW_BLK_CDM,
+ DPU_HW_BLK_CWB,
DPU_HW_BLK_MAX,
};
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
index 97e9cb8c2b09..3305ad0623ca 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2013 Red Hat
* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
- * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*
* Author: Rob Clark <robdclark@gmail.com>
*/
@@ -446,6 +446,19 @@ static void dpu_kms_disable_commit(struct msm_kms *kms)
pm_runtime_put_sync(&dpu_kms->pdev->dev);
}
+static int dpu_kms_check_mode_changed(struct msm_kms *kms, struct drm_atomic_state *state)
+{
+ struct drm_crtc_state *new_crtc_state;
+ struct drm_crtc_state *old_crtc_state;
+ struct drm_crtc *crtc;
+ int i;
+
+ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i)
+ dpu_crtc_check_mode_changed(old_crtc_state, new_crtc_state);
+
+ return 0;
+}
+
static void dpu_kms_flush_commit(struct msm_kms *kms, unsigned crtc_mask)
{
struct dpu_kms *dpu_kms = to_dpu_kms(kms);
@@ -811,8 +824,11 @@ static int _dpu_kms_drm_obj_init(struct dpu_kms *dpu_kms)
return ret;
num_encoders = 0;
- drm_for_each_encoder(encoder, dev)
+ drm_for_each_encoder(encoder, dev) {
num_encoders++;
+ if (catalog->cwb_count > 0)
+ encoder->possible_clones = dpu_encoder_get_clones(encoder);
+ }
max_crtc_count = min(catalog->mixer_count, num_encoders);
@@ -1062,6 +1078,7 @@ static const struct msm_kms_funcs kms_funcs = {
.irq = dpu_core_irq,
.enable_commit = dpu_kms_enable_commit,
.disable_commit = dpu_kms_disable_commit,
+ .check_mode_changed = dpu_kms_check_mode_changed,
.flush_commit = dpu_kms_flush_commit,
.wait_flush = dpu_kms_wait_flush,
.complete_commit = dpu_kms_complete_commit,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
index 547cdb2c0c78..a57ec2ec1060 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
@@ -124,14 +124,15 @@ struct dpu_global_state {
struct dpu_rm *rm;
- uint32_t pingpong_to_enc_id[PINGPONG_MAX - PINGPONG_0];
- uint32_t mixer_to_enc_id[LM_MAX - LM_0];
- uint32_t ctl_to_enc_id[CTL_MAX - CTL_0];
- uint32_t dspp_to_enc_id[DSPP_MAX - DSPP_0];
- uint32_t dsc_to_enc_id[DSC_MAX - DSC_0];
- uint32_t cdm_to_enc_id;
+ uint32_t pingpong_to_crtc_id[PINGPONG_MAX - PINGPONG_0];
+ uint32_t mixer_to_crtc_id[LM_MAX - LM_0];
+ uint32_t ctl_to_crtc_id[CTL_MAX - CTL_0];
+ uint32_t dspp_to_crtc_id[DSPP_MAX - DSPP_0];
+ uint32_t dsc_to_crtc_id[DSC_MAX - DSC_0];
+ uint32_t cdm_to_crtc_id;
uint32_t sspp_to_crtc_id[SSPP_MAX - SSPP_NONE];
+ uint32_t cwb_to_crtc_id[CWB_MAX - CWB_0];
};
struct dpu_global_state
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
index 5baf9df702b8..3efbba425ca6 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
@@ -22,9 +22,9 @@
static inline bool reserved_by_other(uint32_t *res_map, int idx,
- uint32_t enc_id)
+ uint32_t crtc_id)
{
- return res_map[idx] && res_map[idx] != enc_id;
+ return res_map[idx] && res_map[idx] != crtc_id;
}
/**
@@ -233,13 +233,66 @@ static int _dpu_rm_get_lm_peer(struct dpu_rm *rm, int primary_idx)
return -EINVAL;
}
+static int _dpu_rm_reserve_cwb_mux_and_pingpongs(struct dpu_rm *rm,
+ struct dpu_global_state *global_state,
+ uint32_t crtc_id,
+ struct msm_display_topology *topology)
+{
+ int num_cwb_mux = topology->num_lm, cwb_mux_count = 0;
+ int cwb_pp_start_idx = PINGPONG_CWB_0 - PINGPONG_0;
+ int cwb_pp_idx[MAX_BLOCKS];
+ int cwb_mux_idx[MAX_BLOCKS];
+
+ /*
+ * Reserve additional dedicated CWB PINGPONG blocks and muxes for each
+ * mixer
+ *
+ * TODO: add support reserving resources for platforms with no
+ * PINGPONG_CWB
+ */
+ for (int i = 0; i < ARRAY_SIZE(rm->mixer_blks) &&
+ cwb_mux_count < num_cwb_mux; i++) {
+ for (int j = 0; j < ARRAY_SIZE(rm->cwb_blks); j++) {
+ /*
+ * Odd LMs must be assigned to odd CWB muxes and even
+ * LMs with even CWB muxes.
+ *
+ * Since the RM HW block array index is based on the HW
+ * block ids, we can also use the array index to enforce
+ * the odd/even rule. See dpu_rm_init() for more
+ * information
+ */
+ if (reserved_by_other(global_state->cwb_to_crtc_id, j, crtc_id) ||
+ i % 2 != j % 2)
+ continue;
+
+ cwb_mux_idx[cwb_mux_count] = j;
+ cwb_pp_idx[cwb_mux_count] = j + cwb_pp_start_idx;
+ cwb_mux_count++;
+ break;
+ }
+ }
+
+ if (cwb_mux_count != num_cwb_mux) {
+ DPU_ERROR("Unable to reserve all CWB PINGPONGs\n");
+ return -ENAVAIL;
+ }
+
+ for (int i = 0; i < cwb_mux_count; i++) {
+ global_state->pingpong_to_crtc_id[cwb_pp_idx[i]] = crtc_id;
+ global_state->cwb_to_crtc_id[cwb_mux_idx[i]] = crtc_id;
+ }
+
+ return 0;
+}
+
/**
* _dpu_rm_check_lm_and_get_connected_blks - check if proposed layer mixer meets
* proposed use case requirements, incl. hardwired dependent blocks like
* pingpong
* @rm: dpu resource manager handle
* @global_state: resources shared across multiple kms objects
- * @enc_id: encoder id requesting for allocation
+ * @crtc_id: crtc id requesting for allocation
* @lm_idx: index of proposed layer mixer in rm->mixer_blks[], function checks
* if lm, and all other hardwired blocks connected to the lm (pp) is
* available and appropriate
@@ -252,14 +305,14 @@ static int _dpu_rm_get_lm_peer(struct dpu_rm *rm, int primary_idx)
*/
static bool _dpu_rm_check_lm_and_get_connected_blks(struct dpu_rm *rm,
struct dpu_global_state *global_state,
- uint32_t enc_id, int lm_idx, int *pp_idx, int *dspp_idx,
+ uint32_t crtc_id, int lm_idx, int *pp_idx, int *dspp_idx,
struct msm_display_topology *topology)
{
const struct dpu_lm_cfg *lm_cfg;
int idx;
/* Already reserved? */
- if (reserved_by_other(global_state->mixer_to_enc_id, lm_idx, enc_id)) {
+ if (reserved_by_other(global_state->mixer_to_crtc_id, lm_idx, crtc_id)) {
DPU_DEBUG("lm %d already reserved\n", lm_idx + LM_0);
return false;
}
@@ -271,7 +324,7 @@ static bool _dpu_rm_check_lm_and_get_connected_blks(struct dpu_rm *rm,
return false;
}
- if (reserved_by_other(global_state->pingpong_to_enc_id, idx, enc_id)) {
+ if (reserved_by_other(global_state->pingpong_to_crtc_id, idx, crtc_id)) {
DPU_DEBUG("lm %d pp %d already reserved\n", lm_cfg->id,
lm_cfg->pingpong);
return false;
@@ -287,7 +340,7 @@ static bool _dpu_rm_check_lm_and_get_connected_blks(struct dpu_rm *rm,
return false;
}
- if (reserved_by_other(global_state->dspp_to_enc_id, idx, enc_id)) {
+ if (reserved_by_other(global_state->dspp_to_crtc_id, idx, crtc_id)) {
DPU_DEBUG("lm %d dspp %d already reserved\n", lm_cfg->id,
lm_cfg->dspp);
return false;
@@ -299,7 +352,7 @@ static bool _dpu_rm_check_lm_and_get_connected_blks(struct dpu_rm *rm,
static int _dpu_rm_reserve_lms(struct dpu_rm *rm,
struct dpu_global_state *global_state,
- uint32_t enc_id,
+ uint32_t crtc_id,
struct msm_display_topology *topology)
{
@@ -323,7 +376,7 @@ static int _dpu_rm_reserve_lms(struct dpu_rm *rm,
lm_idx[lm_count] = i;
if (!_dpu_rm_check_lm_and_get_connected_blks(rm, global_state,
- enc_id, i, &pp_idx[lm_count],
+ crtc_id, i, &pp_idx[lm_count],
&dspp_idx[lm_count], topology)) {
continue;
}
@@ -342,7 +395,7 @@ static int _dpu_rm_reserve_lms(struct dpu_rm *rm,
continue;
if (!_dpu_rm_check_lm_and_get_connected_blks(rm,
- global_state, enc_id, j,
+ global_state, crtc_id, j,
&pp_idx[lm_count], &dspp_idx[lm_count],
topology)) {
continue;
@@ -359,12 +412,12 @@ static int _dpu_rm_reserve_lms(struct dpu_rm *rm,
}
for (i = 0; i < lm_count; i++) {
- global_state->mixer_to_enc_id[lm_idx[i]] = enc_id;
- global_state->pingpong_to_enc_id[pp_idx[i]] = enc_id;
- global_state->dspp_to_enc_id[dspp_idx[i]] =
- topology->num_dspp ? enc_id : 0;
+ global_state->mixer_to_crtc_id[lm_idx[i]] = crtc_id;
+ global_state->pingpong_to_crtc_id[pp_idx[i]] = crtc_id;
+ global_state->dspp_to_crtc_id[dspp_idx[i]] =
+ topology->num_dspp ? crtc_id : 0;
- trace_dpu_rm_reserve_lms(lm_idx[i] + LM_0, enc_id,
+ trace_dpu_rm_reserve_lms(lm_idx[i] + LM_0, crtc_id,
pp_idx[i] + PINGPONG_0);
}
@@ -374,15 +427,25 @@ static int _dpu_rm_reserve_lms(struct dpu_rm *rm,
static int _dpu_rm_reserve_ctls(
struct dpu_rm *rm,
struct dpu_global_state *global_state,
- uint32_t enc_id,
+ uint32_t crtc_id,
const struct msm_display_topology *top)
{
int ctl_idx[MAX_BLOCKS];
int i = 0, j, num_ctls;
bool needs_split_display;
- /* each hw_intf needs its own hw_ctrl to program its control path */
- num_ctls = top->num_intf;
+ /*
+ * For non-CWB mode, each hw_intf needs its own hw_ctl to program its
+ * control path.
+ *
+ * Hardcode num_ctls to 1 if CWB is enabled because in CWB, both the
+ * writeback and real-time encoders must be driven by the same control
+ * path
+ */
+ if (top->cwb_enabled)
+ num_ctls = 1;
+ else
+ num_ctls = top->num_intf;
needs_split_display = _dpu_rm_needs_split_display(top);
@@ -393,7 +456,7 @@ static int _dpu_rm_reserve_ctls(
if (!rm->ctl_blks[j])
continue;
- if (reserved_by_other(global_state->ctl_to_enc_id, j, enc_id))
+ if (reserved_by_other(global_state->ctl_to_crtc_id, j, crtc_id))
continue;
ctl = to_dpu_hw_ctl(rm->ctl_blks[j]);
@@ -417,8 +480,8 @@ static int _dpu_rm_reserve_ctls(
return -ENAVAIL;
for (i = 0; i < ARRAY_SIZE(ctl_idx) && i < num_ctls; i++) {
- global_state->ctl_to_enc_id[ctl_idx[i]] = enc_id;
- trace_dpu_rm_reserve_ctls(i + CTL_0, enc_id);
+ global_state->ctl_to_crtc_id[ctl_idx[i]] = crtc_id;
+ trace_dpu_rm_reserve_ctls(i + CTL_0, crtc_id);
}
return 0;
@@ -426,12 +489,12 @@ static int _dpu_rm_reserve_ctls(
static int _dpu_rm_pingpong_next_index(struct dpu_global_state *global_state,
int start,
- uint32_t enc_id)
+ uint32_t crtc_id)
{
int i;
for (i = start; i < (PINGPONG_MAX - PINGPONG_0); i++) {
- if (global_state->pingpong_to_enc_id[i] == enc_id)
+ if (global_state->pingpong_to_crtc_id[i] == crtc_id)
return i;
}
@@ -452,7 +515,7 @@ static int _dpu_rm_pingpong_dsc_check(int dsc_idx, int pp_idx)
static int _dpu_rm_dsc_alloc(struct dpu_rm *rm,
struct dpu_global_state *global_state,
- uint32_t enc_id,
+ uint32_t crtc_id,
const struct msm_display_topology *top)
{
int num_dsc = 0;
@@ -465,10 +528,10 @@ static int _dpu_rm_dsc_alloc(struct dpu_rm *rm,
if (!rm->dsc_blks[dsc_idx])
continue;
- if (reserved_by_other(global_state->dsc_to_enc_id, dsc_idx, enc_id))
+ if (reserved_by_other(global_state->dsc_to_crtc_id, dsc_idx, crtc_id))
continue;
- pp_idx = _dpu_rm_pingpong_next_index(global_state, pp_idx, enc_id);
+ pp_idx = _dpu_rm_pingpong_next_index(global_state, pp_idx, crtc_id);
if (pp_idx < 0)
return -ENAVAIL;
@@ -476,7 +539,7 @@ static int _dpu_rm_dsc_alloc(struct dpu_rm *rm,
if (ret)
return -ENAVAIL;
- global_state->dsc_to_enc_id[dsc_idx] = enc_id;
+ global_state->dsc_to_crtc_id[dsc_idx] = crtc_id;
num_dsc++;
pp_idx++;
}
@@ -492,7 +555,7 @@ static int _dpu_rm_dsc_alloc(struct dpu_rm *rm,
static int _dpu_rm_dsc_alloc_pair(struct dpu_rm *rm,
struct dpu_global_state *global_state,
- uint32_t enc_id,
+ uint32_t crtc_id,
const struct msm_display_topology *top)
{
int num_dsc = 0;
@@ -507,11 +570,11 @@ static int _dpu_rm_dsc_alloc_pair(struct dpu_rm *rm,
continue;
/* consective dsc index to be paired */
- if (reserved_by_other(global_state->dsc_to_enc_id, dsc_idx, enc_id) ||
- reserved_by_other(global_state->dsc_to_enc_id, dsc_idx + 1, enc_id))
+ if (reserved_by_other(global_state->dsc_to_crtc_id, dsc_idx, crtc_id) ||
+ reserved_by_other(global_state->dsc_to_crtc_id, dsc_idx + 1, crtc_id))
continue;
- pp_idx = _dpu_rm_pingpong_next_index(global_state, pp_idx, enc_id);
+ pp_idx = _dpu_rm_pingpong_next_index(global_state, pp_idx, crtc_id);
if (pp_idx < 0)
return -ENAVAIL;
@@ -521,7 +584,7 @@ static int _dpu_rm_dsc_alloc_pair(struct dpu_rm *rm,
continue;
}
- pp_idx = _dpu_rm_pingpong_next_index(global_state, pp_idx + 1, enc_id);
+ pp_idx = _dpu_rm_pingpong_next_index(global_state, pp_idx + 1, crtc_id);
if (pp_idx < 0)
return -ENAVAIL;
@@ -531,8 +594,8 @@ static int _dpu_rm_dsc_alloc_pair(struct dpu_rm *rm,
continue;
}
- global_state->dsc_to_enc_id[dsc_idx] = enc_id;
- global_state->dsc_to_enc_id[dsc_idx + 1] = enc_id;
+ global_state->dsc_to_crtc_id[dsc_idx] = crtc_id;
+ global_state->dsc_to_crtc_id[dsc_idx + 1] = crtc_id;
num_dsc += 2;
pp_idx++; /* start for next pair */
}
@@ -548,11 +611,9 @@ static int _dpu_rm_dsc_alloc_pair(struct dpu_rm *rm,
static int _dpu_rm_reserve_dsc(struct dpu_rm *rm,
struct dpu_global_state *global_state,
- struct drm_encoder *enc,
+ uint32_t crtc_id,
const struct msm_display_topology *top)
{
- uint32_t enc_id = enc->base.id;
-
if (!top->num_dsc || !top->num_intf)
return 0;
@@ -568,16 +629,17 @@ static int _dpu_rm_reserve_dsc(struct dpu_rm *rm,
/* num_dsc should be either 1, 2 or 4 */
if (top->num_dsc > top->num_intf) /* merge mode */
- return _dpu_rm_dsc_alloc_pair(rm, global_state, enc_id, top);
+ return _dpu_rm_dsc_alloc_pair(rm, global_state, crtc_id, top);
else
- return _dpu_rm_dsc_alloc(rm, global_state, enc_id, top);
+ return _dpu_rm_dsc_alloc(rm, global_state, crtc_id, top);
return 0;
}
static int _dpu_rm_reserve_cdm(struct dpu_rm *rm,
struct dpu_global_state *global_state,
- struct drm_encoder *enc)
+ uint32_t crtc_id,
+ int num_cdm)
{
/* try allocating only one CDM block */
if (!rm->cdm_blk) {
@@ -585,12 +647,17 @@ static int _dpu_rm_reserve_cdm(struct dpu_rm *rm,
return -EIO;
}
- if (global_state->cdm_to_enc_id) {
+ if (num_cdm > 1) {
+ DPU_ERROR("More than 1 INTF requesting CDM\n");
+ return -EINVAL;
+ }
+
+ if (global_state->cdm_to_crtc_id) {
DPU_ERROR("CDM_0 is already allocated\n");
return -EIO;
}
- global_state->cdm_to_enc_id = enc->base.id;
+ global_state->cdm_to_crtc_id = crtc_id;
return 0;
}
@@ -598,30 +665,37 @@ static int _dpu_rm_reserve_cdm(struct dpu_rm *rm,
static int _dpu_rm_make_reservation(
struct dpu_rm *rm,
struct dpu_global_state *global_state,
- struct drm_encoder *enc,
+ uint32_t crtc_id,
struct msm_display_topology *topology)
{
int ret;
- ret = _dpu_rm_reserve_lms(rm, global_state, enc->base.id, topology);
+ ret = _dpu_rm_reserve_lms(rm, global_state, crtc_id, topology);
if (ret) {
DPU_ERROR("unable to find appropriate mixers\n");
return ret;
}
- ret = _dpu_rm_reserve_ctls(rm, global_state, enc->base.id,
+ if (topology->cwb_enabled) {
+ ret = _dpu_rm_reserve_cwb_mux_and_pingpongs(rm, global_state,
+ crtc_id, topology);
+ if (ret)
+ return ret;
+ }
+
+ ret = _dpu_rm_reserve_ctls(rm, global_state, crtc_id,
topology);
if (ret) {
DPU_ERROR("unable to find appropriate CTL\n");
return ret;
}
- ret = _dpu_rm_reserve_dsc(rm, global_state, enc, topology);
+ ret = _dpu_rm_reserve_dsc(rm, global_state, crtc_id, topology);
if (ret)
return ret;
- if (topology->needs_cdm) {
- ret = _dpu_rm_reserve_cdm(rm, global_state, enc);
+ if (topology->num_cdm > 0) {
+ ret = _dpu_rm_reserve_cdm(rm, global_state, crtc_id, topology->num_cdm);
if (ret) {
DPU_ERROR("unable to find CDM blk\n");
return ret;
@@ -632,12 +706,12 @@ static int _dpu_rm_make_reservation(
}
static void _dpu_rm_clear_mapping(uint32_t *res_mapping, int cnt,
- uint32_t enc_id)
+ uint32_t crtc_id)
{
int i;
for (i = 0; i < cnt; i++) {
- if (res_mapping[i] == enc_id)
+ if (res_mapping[i] == crtc_id)
res_mapping[i] = 0;
}
}
@@ -646,23 +720,27 @@ static void _dpu_rm_clear_mapping(uint32_t *res_mapping, int cnt,
* dpu_rm_release - Given the encoder for the display chain, release any
* HW blocks previously reserved for that use case.
* @global_state: resources shared across multiple kms objects
- * @enc: DRM Encoder handle
+ * @crtc: DRM CRTC handle
* @return: 0 on Success otherwise -ERROR
*/
void dpu_rm_release(struct dpu_global_state *global_state,
- struct drm_encoder *enc)
+ struct drm_crtc *crtc)
{
- _dpu_rm_clear_mapping(global_state->pingpong_to_enc_id,
- ARRAY_SIZE(global_state->pingpong_to_enc_id), enc->base.id);
- _dpu_rm_clear_mapping(global_state->mixer_to_enc_id,
- ARRAY_SIZE(global_state->mixer_to_enc_id), enc->base.id);
- _dpu_rm_clear_mapping(global_state->ctl_to_enc_id,
- ARRAY_SIZE(global_state->ctl_to_enc_id), enc->base.id);
- _dpu_rm_clear_mapping(global_state->dsc_to_enc_id,
- ARRAY_SIZE(global_state->dsc_to_enc_id), enc->base.id);
- _dpu_rm_clear_mapping(global_state->dspp_to_enc_id,
- ARRAY_SIZE(global_state->dspp_to_enc_id), enc->base.id);
- _dpu_rm_clear_mapping(&global_state->cdm_to_enc_id, 1, enc->base.id);
+ uint32_t crtc_id = crtc->base.id;
+
+ _dpu_rm_clear_mapping(global_state->pingpong_to_crtc_id,
+ ARRAY_SIZE(global_state->pingpong_to_crtc_id), crtc_id);
+ _dpu_rm_clear_mapping(global_state->mixer_to_crtc_id,
+ ARRAY_SIZE(global_state->mixer_to_crtc_id), crtc_id);
+ _dpu_rm_clear_mapping(global_state->ctl_to_crtc_id,
+ ARRAY_SIZE(global_state->ctl_to_crtc_id), crtc_id);
+ _dpu_rm_clear_mapping(global_state->dsc_to_crtc_id,
+ ARRAY_SIZE(global_state->dsc_to_crtc_id), crtc_id);
+ _dpu_rm_clear_mapping(global_state->dspp_to_crtc_id,
+ ARRAY_SIZE(global_state->dspp_to_crtc_id), crtc_id);
+ _dpu_rm_clear_mapping(&global_state->cdm_to_crtc_id, 1, crtc_id);
+ _dpu_rm_clear_mapping(global_state->cwb_to_crtc_id,
+ ARRAY_SIZE(global_state->cwb_to_crtc_id), crtc_id);
}
/**
@@ -674,42 +752,33 @@ void dpu_rm_release(struct dpu_global_state *global_state,
* HW Reservations should be released via dpu_rm_release_hw.
* @rm: DPU Resource Manager handle
* @global_state: resources shared across multiple kms objects
- * @enc: DRM Encoder handle
- * @crtc_state: Proposed Atomic DRM CRTC State handle
+ * @crtc: DRM CRTC handle
* @topology: Pointer to topology info for the display
* @return: 0 on Success otherwise -ERROR
*/
int dpu_rm_reserve(
struct dpu_rm *rm,
struct dpu_global_state *global_state,
- struct drm_encoder *enc,
- struct drm_crtc_state *crtc_state,
+ struct drm_crtc *crtc,
struct msm_display_topology *topology)
{
int ret;
- /* Check if this is just a page-flip */
- if (!drm_atomic_crtc_needs_modeset(crtc_state))
- return 0;
-
if (IS_ERR(global_state)) {
DPU_ERROR("failed to global state\n");
return PTR_ERR(global_state);
}
- DRM_DEBUG_KMS("reserving hw for enc %d crtc %d\n",
- enc->base.id, crtc_state->crtc->base.id);
+ DRM_DEBUG_KMS("reserving hw for crtc %d\n", crtc->base.id);
DRM_DEBUG_KMS("num_lm: %d num_dsc: %d num_intf: %d\n",
topology->num_lm, topology->num_dsc,
topology->num_intf);
- ret = _dpu_rm_make_reservation(rm, global_state, enc, topology);
+ ret = _dpu_rm_make_reservation(rm, global_state, crtc->base.id, topology);
if (ret)
DPU_ERROR("failed to reserve hw resources: %d\n", ret);
-
-
return ret;
}
@@ -800,50 +869,57 @@ void dpu_rm_release_all_sspp(struct dpu_global_state *global_state,
* assigned to this encoder
* @rm: DPU Resource Manager handle
* @global_state: resources shared across multiple kms objects
- * @enc_id: encoder id requesting for allocation
+ * @crtc: DRM CRTC handle
* @type: resource type to return data for
* @blks: pointer to the array to be filled by HW resources
* @blks_size: size of the @blks array
*/
int dpu_rm_get_assigned_resources(struct dpu_rm *rm,
- struct dpu_global_state *global_state, uint32_t enc_id,
+ struct dpu_global_state *global_state, struct drm_crtc *crtc,
enum dpu_hw_blk_type type, struct dpu_hw_blk **blks, int blks_size)
{
+ uint32_t crtc_id = crtc->base.id;
struct dpu_hw_blk **hw_blks;
- uint32_t *hw_to_enc_id;
+ uint32_t *hw_to_crtc_id;
int i, num_blks, max_blks;
switch (type) {
case DPU_HW_BLK_PINGPONG:
+ case DPU_HW_BLK_DCWB_PINGPONG:
hw_blks = rm->pingpong_blks;
- hw_to_enc_id = global_state->pingpong_to_enc_id;
+ hw_to_crtc_id = global_state->pingpong_to_crtc_id;
max_blks = ARRAY_SIZE(rm->pingpong_blks);
break;
case DPU_HW_BLK_LM:
hw_blks = rm->mixer_blks;
- hw_to_enc_id = global_state->mixer_to_enc_id;
+ hw_to_crtc_id = global_state->mixer_to_crtc_id;
max_blks = ARRAY_SIZE(rm->mixer_blks);
break;
case DPU_HW_BLK_CTL:
hw_blks = rm->ctl_blks;
- hw_to_enc_id = global_state->ctl_to_enc_id;
+ hw_to_crtc_id = global_state->ctl_to_crtc_id;
max_blks = ARRAY_SIZE(rm->ctl_blks);
break;
case DPU_HW_BLK_DSPP:
hw_blks = rm->dspp_blks;
- hw_to_enc_id = global_state->dspp_to_enc_id;
+ hw_to_crtc_id = global_state->dspp_to_crtc_id;
max_blks = ARRAY_SIZE(rm->dspp_blks);
break;
case DPU_HW_BLK_DSC:
hw_blks = rm->dsc_blks;
- hw_to_enc_id = global_state->dsc_to_enc_id;
+ hw_to_crtc_id = global_state->dsc_to_crtc_id;
max_blks = ARRAY_SIZE(rm->dsc_blks);
break;
case DPU_HW_BLK_CDM:
hw_blks = &rm->cdm_blk;
- hw_to_enc_id = &global_state->cdm_to_enc_id;
+ hw_to_crtc_id = &global_state->cdm_to_crtc_id;
max_blks = 1;
break;
+ case DPU_HW_BLK_CWB:
+ hw_blks = rm->cwb_blks;
+ hw_to_crtc_id = global_state->cwb_to_crtc_id;
+ max_blks = ARRAY_SIZE(rm->cwb_blks);
+ break;
default:
DPU_ERROR("blk type %d not managed by rm\n", type);
return 0;
@@ -851,17 +927,31 @@ int dpu_rm_get_assigned_resources(struct dpu_rm *rm,
num_blks = 0;
for (i = 0; i < max_blks; i++) {
- if (hw_to_enc_id[i] != enc_id)
+ if (hw_to_crtc_id[i] != crtc_id)
continue;
+ if (type == DPU_HW_BLK_PINGPONG) {
+ struct dpu_hw_pingpong *pp = to_dpu_hw_pingpong(hw_blks[i]);
+
+ if (pp->idx >= PINGPONG_CWB_0)
+ continue;
+ }
+
+ if (type == DPU_HW_BLK_DCWB_PINGPONG) {
+ struct dpu_hw_pingpong *pp = to_dpu_hw_pingpong(hw_blks[i]);
+
+ if (pp->idx < PINGPONG_CWB_0)
+ continue;
+ }
+
if (num_blks == blks_size) {
- DPU_ERROR("More than %d resources assigned to enc %d\n",
- blks_size, enc_id);
+ DPU_ERROR("More than %d resources assigned to crtc %d\n",
+ blks_size, crtc_id);
break;
}
if (!hw_blks[i]) {
- DPU_ERROR("Allocated resource %d unavailable to assign to enc %d\n",
- type, enc_id);
+ DPU_ERROR("Allocated resource %d unavailable to assign to crtc %d\n",
+ type, crtc_id);
break;
}
blks[num_blks++] = hw_blks[i];
@@ -896,38 +986,38 @@ void dpu_rm_print_state(struct drm_printer *p,
drm_puts(p, "resource mapping:\n");
drm_puts(p, "\tpingpong=");
- for (i = 0; i < ARRAY_SIZE(global_state->pingpong_to_enc_id); i++)
+ for (i = 0; i < ARRAY_SIZE(global_state->pingpong_to_crtc_id); i++)
dpu_rm_print_state_helper(p, rm->pingpong_blks[i],
- global_state->pingpong_to_enc_id[i]);
+ global_state->pingpong_to_crtc_id[i]);
drm_puts(p, "\n");
drm_puts(p, "\tmixer=");
- for (i = 0; i < ARRAY_SIZE(global_state->mixer_to_enc_id); i++)
+ for (i = 0; i < ARRAY_SIZE(global_state->mixer_to_crtc_id); i++)
dpu_rm_print_state_helper(p, rm->mixer_blks[i],
- global_state->mixer_to_enc_id[i]);
+ global_state->mixer_to_crtc_id[i]);
drm_puts(p, "\n");
drm_puts(p, "\tctl=");
- for (i = 0; i < ARRAY_SIZE(global_state->ctl_to_enc_id); i++)
+ for (i = 0; i < ARRAY_SIZE(global_state->ctl_to_crtc_id); i++)
dpu_rm_print_state_helper(p, rm->ctl_blks[i],
- global_state->ctl_to_enc_id[i]);
+ global_state->ctl_to_crtc_id[i]);
drm_puts(p, "\n");
drm_puts(p, "\tdspp=");
- for (i = 0; i < ARRAY_SIZE(global_state->dspp_to_enc_id); i++)
+ for (i = 0; i < ARRAY_SIZE(global_state->dspp_to_crtc_id); i++)
dpu_rm_print_state_helper(p, rm->dspp_blks[i],
- global_state->dspp_to_enc_id[i]);
+ global_state->dspp_to_crtc_id[i]);
drm_puts(p, "\n");
drm_puts(p, "\tdsc=");
- for (i = 0; i < ARRAY_SIZE(global_state->dsc_to_enc_id); i++)
+ for (i = 0; i < ARRAY_SIZE(global_state->dsc_to_crtc_id); i++)
dpu_rm_print_state_helper(p, rm->dsc_blks[i],
- global_state->dsc_to_enc_id[i]);
+ global_state->dsc_to_crtc_id[i]);
drm_puts(p, "\n");
drm_puts(p, "\tcdm=");
dpu_rm_print_state_helper(p, rm->cdm_blk,
- global_state->cdm_to_enc_id);
+ global_state->cdm_to_crtc_id);
drm_puts(p, "\n");
drm_puts(p, "\tsspp=");
@@ -936,4 +1026,10 @@ void dpu_rm_print_state(struct drm_printer *p,
dpu_rm_print_state_helper(p, rm->hw_sspp[i] ? &rm->hw_sspp[i]->base : NULL,
global_state->sspp_to_crtc_id[i]);
drm_puts(p, "\n");
+
+ drm_puts(p, "\tcwb=");
+ for (i = 0; i < ARRAY_SIZE(global_state->cwb_to_crtc_id); i++)
+ dpu_rm_print_state_helper(p, rm->cwb_blks[i],
+ global_state->cwb_to_crtc_id[i]);
+ drm_puts(p, "\n");
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h
index 99bd594ee0d1..a19dbdb1b6f4 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h
@@ -51,14 +51,17 @@ struct dpu_rm_sspp_requirements {
* @num_intf: number of interfaces the panel is mounted on
* @num_dspp: number of dspp blocks used
* @num_dsc: number of Display Stream Compression (DSC) blocks used
- * @needs_cdm: indicates whether cdm block is needed for this display topology
+ * @num_cdm: indicates how many outputs are requesting cdm block for
+ * this display topology
+ * @cwb_enabled: indicates whether CWB is enabled for this display topology
*/
struct msm_display_topology {
u32 num_lm;
u32 num_intf;
u32 num_dspp;
u32 num_dsc;
- bool needs_cdm;
+ int num_cdm;
+ bool cwb_enabled;
};
int dpu_rm_init(struct drm_device *dev,
@@ -69,12 +72,11 @@ int dpu_rm_init(struct drm_device *dev,
int dpu_rm_reserve(struct dpu_rm *rm,
struct dpu_global_state *global_state,
- struct drm_encoder *drm_enc,
- struct drm_crtc_state *crtc_state,
+ struct drm_crtc *crtc,
struct msm_display_topology *topology);
void dpu_rm_release(struct dpu_global_state *global_state,
- struct drm_encoder *enc);
+ struct drm_crtc *crtc);
struct dpu_hw_sspp *dpu_rm_reserve_sspp(struct dpu_rm *rm,
struct dpu_global_state *global_state,
@@ -85,7 +87,7 @@ void dpu_rm_release_all_sspp(struct dpu_global_state *global_state,
struct drm_crtc *crtc);
int dpu_rm_get_assigned_resources(struct dpu_rm *rm,
- struct dpu_global_state *global_state, uint32_t enc_id,
+ struct dpu_global_state *global_state, struct drm_crtc *crtc,
enum dpu_hw_blk_type type, struct dpu_hw_blk **blks, int blks_size);
void dpu_rm_print_state(struct drm_printer *p,
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c
index 666de99a46a5..fc183fe37f56 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c
@@ -3,6 +3,7 @@
* Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
*/
+#include <linux/string_choices.h>
#include "mdp5_kms.h"
#include "mdp5_ctl.h"
@@ -233,7 +234,7 @@ int mdp5_ctl_set_encoder_state(struct mdp5_ctl *ctl,
return -EINVAL;
ctl->encoder_enabled = enabled;
- DBG("intf_%d: %s", intf->num, enabled ? "on" : "off");
+ DBG("intf_%d: %s", intf->num, str_on_off(enabled));
if (start_signal_needed(ctl, pipeline)) {
send_start_signal(ctl);
diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.c b/drivers/gpu/drm/msm/dp/dp_ctrl.c
index 9c463ae2f8fa..d8633a596f8d 100644
--- a/drivers/gpu/drm/msm/dp/dp_ctrl.c
+++ b/drivers/gpu/drm/msm/dp/dp_ctrl.c
@@ -11,6 +11,7 @@
#include <linux/phy/phy.h>
#include <linux/phy/phy-dp.h>
#include <linux/pm_opp.h>
+#include <linux/string_choices.h>
#include <drm/display/drm_dp_helper.h>
#include <drm/drm_fixed.h>
@@ -1366,9 +1367,9 @@ int msm_dp_ctrl_core_clk_enable(struct msm_dp_ctrl *msm_dp_ctrl)
drm_dbg_dp(ctrl->drm_dev, "enable core clocks \n");
drm_dbg_dp(ctrl->drm_dev, "stream_clks:%s link_clks:%s core_clks:%s\n",
- ctrl->stream_clks_on ? "on" : "off",
- ctrl->link_clks_on ? "on" : "off",
- ctrl->core_clks_on ? "on" : "off");
+ str_on_off(ctrl->stream_clks_on),
+ str_on_off(ctrl->link_clks_on),
+ str_on_off(ctrl->core_clks_on));
return 0;
}
@@ -1385,9 +1386,9 @@ void msm_dp_ctrl_core_clk_disable(struct msm_dp_ctrl *msm_dp_ctrl)
drm_dbg_dp(ctrl->drm_dev, "disable core clocks \n");
drm_dbg_dp(ctrl->drm_dev, "stream_clks:%s link_clks:%s core_clks:%s\n",
- ctrl->stream_clks_on ? "on" : "off",
- ctrl->link_clks_on ? "on" : "off",
- ctrl->core_clks_on ? "on" : "off");
+ str_on_off(ctrl->stream_clks_on),
+ str_on_off(ctrl->link_clks_on),
+ str_on_off(ctrl->core_clks_on));
}
static int msm_dp_ctrl_link_clk_enable(struct msm_dp_ctrl *msm_dp_ctrl)
@@ -1416,9 +1417,9 @@ static int msm_dp_ctrl_link_clk_enable(struct msm_dp_ctrl *msm_dp_ctrl)
drm_dbg_dp(ctrl->drm_dev, "enable link clocks\n");
drm_dbg_dp(ctrl->drm_dev, "stream_clks:%s link_clks:%s core_clks:%s\n",
- ctrl->stream_clks_on ? "on" : "off",
- ctrl->link_clks_on ? "on" : "off",
- ctrl->core_clks_on ? "on" : "off");
+ str_on_off(ctrl->stream_clks_on),
+ str_on_off(ctrl->link_clks_on),
+ str_on_off(ctrl->core_clks_on));
return 0;
}
@@ -1435,9 +1436,9 @@ static void msm_dp_ctrl_link_clk_disable(struct msm_dp_ctrl *msm_dp_ctrl)
drm_dbg_dp(ctrl->drm_dev, "disabled link clocks\n");
drm_dbg_dp(ctrl->drm_dev, "stream_clks:%s link_clks:%s core_clks:%s\n",
- ctrl->stream_clks_on ? "on" : "off",
- ctrl->link_clks_on ? "on" : "off",
- ctrl->core_clks_on ? "on" : "off");
+ str_on_off(ctrl->stream_clks_on),
+ str_on_off(ctrl->link_clks_on),
+ str_on_off(ctrl->core_clks_on));
}
static int msm_dp_ctrl_enable_mainlink_clocks(struct msm_dp_ctrl_private *ctrl)
diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c
index 9aa8bbd73d86..bbc47d86ae9e 100644
--- a/drivers/gpu/drm/msm/dp/dp_display.c
+++ b/drivers/gpu/drm/msm/dp/dp_display.c
@@ -11,6 +11,7 @@
#include <linux/of_irq.h>
#include <linux/phy/phy.h>
#include <linux/delay.h>
+#include <linux/string_choices.h>
#include <drm/display/drm_dp_aux_bus.h>
#include <drm/drm_edid.h>
@@ -343,8 +344,7 @@ static int msm_dp_display_send_hpd_notification(struct msm_dp_display_private *d
{
if ((hpd && dp->msm_dp_display.link_ready) ||
(!hpd && !dp->msm_dp_display.link_ready)) {
- drm_dbg_dp(dp->drm_dev, "HPD already %s\n",
- (hpd ? "on" : "off"));
+ drm_dbg_dp(dp->drm_dev, "HPD already %s\n", str_on_off(hpd));
return 0;
}
diff --git a/drivers/gpu/drm/msm/dp/dp_drm.c b/drivers/gpu/drm/msm/dp/dp_drm.c
index 022b3e815cf3..cca57e56c906 100644
--- a/drivers/gpu/drm/msm/dp/dp_drm.c
+++ b/drivers/gpu/drm/msm/dp/dp_drm.c
@@ -3,6 +3,7 @@
* Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
*/
+#include <linux/string_choices.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_atomic.h>
#include <drm/drm_bridge.h>
@@ -25,7 +26,7 @@ static enum drm_connector_status msm_dp_bridge_detect(struct drm_bridge *bridge)
dp = to_dp_bridge(bridge)->msm_dp_display;
drm_dbg_dp(dp->drm_dev, "link_ready = %s\n",
- (dp->link_ready) ? "true" : "false");
+ str_true_false(dp->link_ready));
return (dp->link_ready) ? connector_status_connected :
connector_status_disconnected;
@@ -41,7 +42,7 @@ static int msm_dp_bridge_atomic_check(struct drm_bridge *bridge,
dp = to_dp_bridge(bridge)->msm_dp_display;
drm_dbg_dp(dp->drm_dev, "link_ready = %s\n",
- (dp->link_ready) ? "true" : "false");
+ str_true_false(dp->link_ready));
/*
* There is no protection in the DRM framework to check if the display
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
index 007311c21fda..4d75529c0e85 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
@@ -179,18 +179,18 @@ struct msm_dsi_host {
int irq;
};
-
static inline u32 dsi_read(struct msm_dsi_host *msm_host, u32 reg)
{
return readl(msm_host->ctrl_base + reg);
}
+
static inline void dsi_write(struct msm_dsi_host *msm_host, u32 reg, u32 data)
{
writel(data, msm_host->ctrl_base + reg);
}
-static const struct msm_dsi_cfg_handler *dsi_get_config(
- struct msm_dsi_host *msm_host)
+static const struct msm_dsi_cfg_handler *
+dsi_get_config(struct msm_dsi_host *msm_host)
{
const struct msm_dsi_cfg_handler *cfg_hnd = NULL;
struct device *dev = &msm_host->pdev->dev;
@@ -200,7 +200,8 @@ static const struct msm_dsi_cfg_handler *dsi_get_config(
ahb_clk = msm_clk_get(msm_host->pdev, "iface");
if (IS_ERR(ahb_clk)) {
- pr_err("%s: cannot get interface clock\n", __func__);
+ dev_err_probe(dev, PTR_ERR(ahb_clk), "%s: cannot get interface clock\n",
+ __func__);
goto exit;
}
@@ -208,13 +209,13 @@ static const struct msm_dsi_cfg_handler *dsi_get_config(
ret = clk_prepare_enable(ahb_clk);
if (ret) {
- pr_err("%s: unable to enable ahb_clk\n", __func__);
+ dev_err_probe(dev, ret, "%s: unable to enable ahb_clk\n", __func__);
goto runtime_put;
}
ret = dsi_get_version(msm_host->ctrl_base, &major, &minor);
if (ret) {
- pr_err("%s: Invalid version\n", __func__);
+ dev_err_probe(dev, ret, "%s: Invalid version\n", __func__);
goto disable_clks;
}
@@ -281,42 +282,31 @@ static int dsi_clk_init(struct msm_dsi_host *msm_host)
msm_host->num_bus_clks = cfg->num_bus_clks;
ret = devm_clk_bulk_get(&pdev->dev, msm_host->num_bus_clks, msm_host->bus_clks);
- if (ret < 0) {
- dev_err(&pdev->dev, "Unable to get clocks, ret = %d\n", ret);
- goto exit;
- }
+ if (ret < 0)
+ return dev_err_probe(&pdev->dev, ret, "Unable to get clocks\n");
/* get link and source clocks */
msm_host->byte_clk = msm_clk_get(pdev, "byte");
- if (IS_ERR(msm_host->byte_clk)) {
- ret = PTR_ERR(msm_host->byte_clk);
- pr_err("%s: can't find dsi_byte clock. ret=%d\n",
- __func__, ret);
- msm_host->byte_clk = NULL;
- goto exit;
- }
+ if (IS_ERR(msm_host->byte_clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(msm_host->byte_clk),
+ "%s: can't find dsi_byte clock\n",
+ __func__);
msm_host->pixel_clk = msm_clk_get(pdev, "pixel");
- if (IS_ERR(msm_host->pixel_clk)) {
- ret = PTR_ERR(msm_host->pixel_clk);
- pr_err("%s: can't find dsi_pixel clock. ret=%d\n",
- __func__, ret);
- msm_host->pixel_clk = NULL;
- goto exit;
- }
+ if (IS_ERR(msm_host->pixel_clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(msm_host->pixel_clk),
+ "%s: can't find dsi_pixel clock\n",
+ __func__);
msm_host->esc_clk = msm_clk_get(pdev, "core");
- if (IS_ERR(msm_host->esc_clk)) {
- ret = PTR_ERR(msm_host->esc_clk);
- pr_err("%s: can't find dsi_esc clock. ret=%d\n",
- __func__, ret);
- msm_host->esc_clk = NULL;
- goto exit;
- }
+ if (IS_ERR(msm_host->esc_clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(msm_host->esc_clk),
+ "%s: can't find dsi_esc clock\n",
+ __func__);
if (cfg_hnd->ops->clk_init_ver)
ret = cfg_hnd->ops->clk_init_ver(msm_host);
-exit:
+
return ret;
}
@@ -380,7 +370,6 @@ int dsi_link_clk_set_rate_6g(struct msm_dsi_host *msm_host)
return 0;
}
-
int dsi_link_clk_enable_6g(struct msm_dsi_host *msm_host)
{
int ret;
@@ -598,7 +587,6 @@ static void dsi_calc_pclk(struct msm_dsi_host *msm_host, bool is_bonded_dsi)
DBG("pclk=%lu, bclk=%lu", msm_host->pixel_clk_rate,
msm_host->byte_clk_rate);
-
}
int dsi_calc_clk_rate_6g(struct msm_dsi_host *msm_host, bool is_bonded_dsi)
@@ -687,8 +675,8 @@ static inline enum dsi_traffic_mode dsi_get_traffic_mode(const u32 mode_flags)
return NON_BURST_SYNCH_EVENT;
}
-static inline enum dsi_vid_dst_format dsi_get_vid_fmt(
- const enum mipi_dsi_pixel_format mipi_fmt)
+static inline enum dsi_vid_dst_format
+dsi_get_vid_fmt(const enum mipi_dsi_pixel_format mipi_fmt)
{
switch (mipi_fmt) {
case MIPI_DSI_FMT_RGB888: return VID_DST_FORMAT_RGB888;
@@ -699,8 +687,8 @@ static inline enum dsi_vid_dst_format dsi_get_vid_fmt(
}
}
-static inline enum dsi_cmd_dst_format dsi_get_cmd_fmt(
- const enum mipi_dsi_pixel_format mipi_fmt)
+static inline enum dsi_cmd_dst_format
+dsi_get_cmd_fmt(const enum mipi_dsi_pixel_format mipi_fmt)
{
switch (mipi_fmt) {
case MIPI_DSI_FMT_RGB888: return CMD_DST_FORMAT_RGB888;
@@ -846,7 +834,7 @@ static void dsi_ctrl_enable(struct msm_dsi_host *msm_host,
dsi_write(msm_host, REG_DSI_CPHY_MODE_CTRL, BIT(0));
}
-static void dsi_update_dsc_timing(struct msm_dsi_host *msm_host, bool is_cmd_mode, u32 hdisplay)
+static void dsi_update_dsc_timing(struct msm_dsi_host *msm_host, bool is_cmd_mode)
{
struct drm_dsc_config *dsc = msm_host->dsc;
u32 reg, reg_ctrl, reg_ctrl2;
@@ -858,7 +846,7 @@ static void dsi_update_dsc_timing(struct msm_dsi_host *msm_host, bool is_cmd_mod
/* first calculate dsc parameters and then program
* compress mode registers
*/
- slice_per_intf = msm_dsc_get_slices_per_intf(dsc, hdisplay);
+ slice_per_intf = dsc->slice_count;
total_bytes_per_intf = dsc->slice_chunk_size * slice_per_intf;
bytes_per_pkt = dsc->slice_chunk_size; /* * slice_per_pkt; */
@@ -991,7 +979,7 @@ static void dsi_timing_setup(struct msm_dsi_host *msm_host, bool is_bonded_dsi)
if (msm_host->mode_flags & MIPI_DSI_MODE_VIDEO) {
if (msm_host->dsc)
- dsi_update_dsc_timing(msm_host, false, mode->hdisplay);
+ dsi_update_dsc_timing(msm_host, false);
dsi_write(msm_host, REG_DSI_ACTIVE_H,
DSI_ACTIVE_H_START(ha_start) |
@@ -1012,7 +1000,7 @@ static void dsi_timing_setup(struct msm_dsi_host *msm_host, bool is_bonded_dsi)
DSI_ACTIVE_VSYNC_VPOS_END(vs_end));
} else { /* command mode */
if (msm_host->dsc)
- dsi_update_dsc_timing(msm_host, true, mode->hdisplay);
+ dsi_update_dsc_timing(msm_host, true);
/* image data and 1 byte write_memory_start cmd */
if (!msm_host->dsc)
@@ -1292,14 +1280,15 @@ static int dsi_cmd_dma_add(struct msm_dsi_host *msm_host,
static int dsi_short_read1_resp(u8 *buf, const struct mipi_dsi_msg *msg)
{
u8 *data = msg->rx_buf;
+
if (data && (msg->rx_len >= 1)) {
*data = buf[1]; /* strip out dcs type */
return 1;
- } else {
- pr_err("%s: read data does not match with rx_buf len %zu\n",
- __func__, msg->rx_len);
- return -EINVAL;
}
+
+ pr_err("%s: read data does not match with rx_buf len %zu\n",
+ __func__, msg->rx_len);
+ return -EINVAL;
}
/*
@@ -1308,15 +1297,16 @@ static int dsi_short_read1_resp(u8 *buf, const struct mipi_dsi_msg *msg)
static int dsi_short_read2_resp(u8 *buf, const struct mipi_dsi_msg *msg)
{
u8 *data = msg->rx_buf;
+
if (data && (msg->rx_len >= 2)) {
data[0] = buf[1]; /* strip out dcs type */
data[1] = buf[2];
return 2;
- } else {
- pr_err("%s: read data does not match with rx_buf len %zu\n",
- __func__, msg->rx_len);
- return -EINVAL;
}
+
+ pr_err("%s: read data does not match with rx_buf len %zu\n",
+ __func__, msg->rx_len);
+ return -EINVAL;
}
static int dsi_long_read_resp(u8 *buf, const struct mipi_dsi_msg *msg)
@@ -1376,8 +1366,9 @@ static int dsi_cmd_dma_tx(struct msm_dsi_host *msm_host, int len)
ret = -ETIMEDOUT;
else
ret = len;
- } else
+ } else {
ret = len;
+ }
return ret;
}
@@ -1445,11 +1436,12 @@ static int dsi_cmds2buf_tx(struct msm_dsi_host *msm_host,
return len;
}
- /* for video mode, do not send cmds more than
- * one pixel line, since it only transmit it
- * during BLLP.
- */
- /* TODO: if the command is sent in LP mode, the bit rate is only
+ /*
+ * for video mode, do not send cmds more than
+ * one pixel line, since it only transmit it
+ * during BLLP.
+ *
+ * TODO: if the command is sent in LP mode, the bit rate is only
* half of esc clk rate. In this case, if the video is already
* actively streaming, we need to check more carefully if the
* command can be fit into one BLLP.
@@ -1767,8 +1759,20 @@ static int dsi_populate_dsc_params(struct msm_dsi_host *msm_host, struct drm_dsc
return -EINVAL;
}
- if (dsc->bits_per_component != 8) {
- DRM_DEV_ERROR(&msm_host->pdev->dev, "DSI does not support bits_per_component != 8 yet\n");
+ switch (dsc->bits_per_component) {
+ case 8:
+ case 10:
+ case 12:
+ /*
+ * Only 8, 10, and 12 bpc are supported for DSC 1.1 block.
+ * If additional bpc values need to be supported, update
+ * this quard with the appropriate DSC version verification.
+ */
+ break;
+ default:
+ DRM_DEV_ERROR(&msm_host->pdev->dev,
+ "Unsupported bits_per_component value: %d\n",
+ dsc->bits_per_component);
return -EOPNOTSUPP;
}
@@ -1779,7 +1783,7 @@ static int dsi_populate_dsc_params(struct msm_dsi_host *msm_host, struct drm_dsc
drm_dsc_set_const_params(dsc);
drm_dsc_set_rc_buf_thresh(dsc);
- /* handle only bpp = bpc = 8, pre-SCR panels */
+ /* DPU supports only pre-SCR panels */
ret = drm_dsc_setup_rc_params(dsc, DRM_DSC_1_1_PRE_SCR);
if (ret) {
DRM_DEV_ERROR(&msm_host->pdev->dev, "could not find DSC RC parameters\n");
@@ -1827,8 +1831,15 @@ static int dsi_host_parse_dt(struct msm_dsi_host *msm_host)
__func__, ret);
goto err;
}
- if (!ret)
+ if (!ret) {
msm_dsi->te_source = devm_kstrdup(dev, te_source, GFP_KERNEL);
+ if (!msm_dsi->te_source) {
+ DRM_DEV_ERROR(dev, "%s: failed to allocate te_source\n",
+ __func__);
+ ret = -ENOMEM;
+ goto err;
+ }
+ }
ret = 0;
if (of_property_present(np, "syscon-sfpb")) {
@@ -1874,39 +1885,35 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi)
int ret;
msm_host = devm_kzalloc(&pdev->dev, sizeof(*msm_host), GFP_KERNEL);
- if (!msm_host) {
+ if (!msm_host)
return -ENOMEM;
- }
msm_host->pdev = pdev;
msm_dsi->host = &msm_host->base;
ret = dsi_host_parse_dt(msm_host);
- if (ret) {
- pr_err("%s: failed to parse dt\n", __func__);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "%s: failed to parse dt\n",
+ __func__);
msm_host->ctrl_base = msm_ioremap_size(pdev, "dsi_ctrl", &msm_host->ctrl_size);
- if (IS_ERR(msm_host->ctrl_base)) {
- pr_err("%s: unable to map Dsi ctrl base\n", __func__);
- return PTR_ERR(msm_host->ctrl_base);
- }
+ if (IS_ERR(msm_host->ctrl_base))
+ return dev_err_probe(&pdev->dev, PTR_ERR(msm_host->ctrl_base),
+ "%s: unable to map Dsi ctrl base\n", __func__);
pm_runtime_enable(&pdev->dev);
msm_host->cfg_hnd = dsi_get_config(msm_host);
- if (!msm_host->cfg_hnd) {
- pr_err("%s: get config failed\n", __func__);
- return -EINVAL;
- }
+ if (!msm_host->cfg_hnd)
+ return dev_err_probe(&pdev->dev, -EINVAL,
+ "%s: get config failed\n", __func__);
cfg = msm_host->cfg_hnd->cfg;
msm_host->id = dsi_host_get_id(msm_host);
- if (msm_host->id < 0) {
- pr_err("%s: unable to identify DSI host index\n", __func__);
- return msm_host->id;
- }
+ if (msm_host->id < 0)
+ return dev_err_probe(&pdev->dev, msm_host->id,
+ "%s: unable to identify DSI host index\n",
+ __func__);
/* fixup base address by io offset */
msm_host->ctrl_base += cfg->io_offset;
@@ -1918,42 +1925,32 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi)
return ret;
ret = dsi_clk_init(msm_host);
- if (ret) {
- pr_err("%s: unable to initialize dsi clks\n", __func__);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "%s: unable to initialize dsi clks\n", __func__);
msm_host->rx_buf = devm_kzalloc(&pdev->dev, SZ_4K, GFP_KERNEL);
- if (!msm_host->rx_buf) {
- pr_err("%s: alloc rx temp buf failed\n", __func__);
+ if (!msm_host->rx_buf)
return -ENOMEM;
- }
ret = devm_pm_opp_set_clkname(&pdev->dev, "byte");
if (ret)
return ret;
/* OPP table is optional */
ret = devm_pm_opp_of_add_table(&pdev->dev);
- if (ret && ret != -ENODEV) {
- dev_err(&pdev->dev, "invalid OPP table in device tree\n");
- return ret;
- }
+ if (ret && ret != -ENODEV)
+ return dev_err_probe(&pdev->dev, ret, "invalid OPP table in device tree\n");
msm_host->irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
- if (!msm_host->irq) {
- dev_err(&pdev->dev, "failed to get irq\n");
- return -EINVAL;
- }
+ if (!msm_host->irq)
+ return dev_err_probe(&pdev->dev, -EINVAL, "failed to get irq\n");
/* do not autoenable, will be enabled later */
ret = devm_request_irq(&pdev->dev, msm_host->irq, dsi_host_irq,
IRQF_TRIGGER_HIGH | IRQF_NO_AUTOEN,
"dsi_isr", msm_host);
- if (ret < 0) {
- dev_err(&pdev->dev, "failed to request IRQ%u: %d\n",
- msm_host->irq, ret);
- return ret;
- }
+ if (ret < 0)
+ return dev_err_probe(&pdev->dev, ret, "failed to request IRQ%u\n",
+ msm_host->irq);
init_completion(&msm_host->dma_comp);
init_completion(&msm_host->video_comp);
diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c
index a210b7c9e5ca..4fabb01345aa 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_manager.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c
@@ -74,17 +74,35 @@ static int dsi_mgr_setup_components(int id)
int ret;
if (!IS_BONDED_DSI()) {
+ /*
+ * Set the usecase before calling msm_dsi_host_register(), which would
+ * already program the PLL source mux based on a default usecase.
+ */
+ msm_dsi_phy_set_usecase(msm_dsi->phy, MSM_DSI_PHY_STANDALONE);
+ msm_dsi_host_set_phy_mode(msm_dsi->host, msm_dsi->phy);
+
ret = msm_dsi_host_register(msm_dsi->host);
if (ret)
return ret;
-
- msm_dsi_phy_set_usecase(msm_dsi->phy, MSM_DSI_PHY_STANDALONE);
- msm_dsi_host_set_phy_mode(msm_dsi->host, msm_dsi->phy);
} else if (other_dsi) {
struct msm_dsi *master_link_dsi = IS_MASTER_DSI_LINK(id) ?
msm_dsi : other_dsi;
struct msm_dsi *slave_link_dsi = IS_MASTER_DSI_LINK(id) ?
other_dsi : msm_dsi;
+
+ /*
+ * PLL0 is to drive both DSI link clocks in bonded DSI mode.
+ *
+ * Set the usecase before calling msm_dsi_host_register(), which would
+ * already program the PLL source mux based on a default usecase.
+ */
+ msm_dsi_phy_set_usecase(clk_master_dsi->phy,
+ MSM_DSI_PHY_MASTER);
+ msm_dsi_phy_set_usecase(clk_slave_dsi->phy,
+ MSM_DSI_PHY_SLAVE);
+ msm_dsi_host_set_phy_mode(msm_dsi->host, msm_dsi->phy);
+ msm_dsi_host_set_phy_mode(other_dsi->host, other_dsi->phy);
+
/* Register slave host first, so that slave DSI device
* has a chance to probe, and do not block the master
* DSI device's probe.
@@ -98,14 +116,6 @@ static int dsi_mgr_setup_components(int id)
ret = msm_dsi_host_register(master_link_dsi->host);
if (ret)
return ret;
-
- /* PLL0 is to drive both 2 DSI link clocks in bonded DSI mode. */
- msm_dsi_phy_set_usecase(clk_master_dsi->phy,
- MSM_DSI_PHY_MASTER);
- msm_dsi_phy_set_usecase(clk_slave_dsi->phy,
- MSM_DSI_PHY_SLAVE);
- msm_dsi_host_set_phy_mode(msm_dsi->host, msm_dsi->phy);
- msm_dsi_host_set_phy_mode(other_dsi->host, other_dsi->phy);
}
return 0;
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
index 8985818bb2e0..1925418d9999 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
@@ -6,6 +6,7 @@
#ifndef __DSI_PHY_H__
#define __DSI_PHY_H__
+#include <dt-bindings/clock/qcom,dsi-phy-28nm.h>
#include <linux/clk-provider.h>
#include <linux/delay.h>
#include <linux/regulator/consumer.h>
@@ -84,9 +85,7 @@ struct msm_dsi_dphy_timing {
u8 hs_halfbyte_en_ckln;
};
-#define DSI_BYTE_PLL_CLK 0
-#define DSI_PIXEL_PLL_CLK 1
-#define NUM_PROVIDED_CLKS 2
+#define NUM_PROVIDED_CLKS (DSI_PIXEL_PLL_CLK + 1)
#define DSI_LANE_MAX 5
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c
index 677c62571811..9812b4d69197 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c
@@ -3,6 +3,7 @@
* Copyright (c) 2018, The Linux Foundation
*/
+#include <dt-bindings/clock/qcom,dsi-phy-28nm.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/iopoll.h>
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c
index 2c3cbe0f2870..3a1c8ece6657 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c
@@ -3,6 +3,7 @@
* Copyright (c) 2016, The Linux Foundation. All rights reserved.
*/
+#include <dt-bindings/clock/qcom,dsi-phy-28nm.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/delay.h>
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c
index 1383e3a4e050..90348a2af3e9 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c
@@ -3,6 +3,7 @@
* Copyright (c) 2015, The Linux Foundation. All rights reserved.
*/
+#include <dt-bindings/clock/qcom,dsi-phy-28nm.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c
index 5311ab7f3c70..f3643320ff2f 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c
@@ -3,6 +3,7 @@
* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
*/
+#include <dt-bindings/clock/qcom,dsi-phy-28nm.h>
#include <linux/clk-provider.h>
#include <linux/delay.h>
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
index 798168180c1a..a92decbee5b5 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
@@ -3,6 +3,8 @@
* Copyright (c) 2018, The Linux Foundation
*/
+#include <dt-bindings/clock/qcom,dsi-phy-28nm.h>
+#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/iopoll.h>
@@ -305,7 +307,7 @@ static void dsi_pll_commit(struct dsi_pll_7nm *pll, struct dsi_pll_config *confi
writel(pll->phy->cphy_mode ? 0x00 : 0x10,
base + REG_DSI_7nm_PHY_PLL_CMODE_1);
writel(config->pll_clock_inverters,
- base + REG_DSI_7nm_PHY_PLL_CLOCK_INVERTERS);
+ base + REG_DSI_7nm_PHY_PLL_CLOCK_INVERTERS_1);
}
static int dsi_pll_7nm_vco_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -572,11 +574,11 @@ static void dsi_7nm_pll_save_state(struct msm_dsi_phy *phy)
cached->pll_out_div &= 0x3;
cmn_clk_cfg0 = readl(phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG0);
- cached->bit_clk_div = cmn_clk_cfg0 & 0xf;
- cached->pix_clk_div = (cmn_clk_cfg0 & 0xf0) >> 4;
+ cached->bit_clk_div = FIELD_GET(DSI_7nm_PHY_CMN_CLK_CFG0_DIV_CTRL_3_0__MASK, cmn_clk_cfg0);
+ cached->pix_clk_div = FIELD_GET(DSI_7nm_PHY_CMN_CLK_CFG0_DIV_CTRL_7_4__MASK, cmn_clk_cfg0);
cmn_clk_cfg1 = readl(phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
- cached->pll_mux = cmn_clk_cfg1 & 0x3;
+ cached->pll_mux = FIELD_GET(DSI_7nm_PHY_CMN_CLK_CFG1_DSICLK_SEL__MASK, cmn_clk_cfg1);
DBG("DSI PLL%d outdiv %x bit_clk_div %x pix_clk_div %x pll_mux %x",
pll_7nm->phy->id, cached->pll_out_div, cached->bit_clk_div,
@@ -598,7 +600,8 @@ static int dsi_7nm_pll_restore_state(struct msm_dsi_phy *phy)
dsi_pll_cmn_clk_cfg0_write(pll_7nm,
DSI_7nm_PHY_CMN_CLK_CFG0_DIV_CTRL_3_0(cached->bit_clk_div) |
DSI_7nm_PHY_CMN_CLK_CFG0_DIV_CTRL_7_4(cached->pix_clk_div));
- dsi_pll_cmn_clk_cfg1_update(pll_7nm, 0x3, cached->pll_mux);
+ dsi_pll_cmn_clk_cfg1_update(pll_7nm, DSI_7nm_PHY_CMN_CLK_CFG1_DSICLK_SEL__MASK,
+ cached->pll_mux);
ret = dsi_pll_7nm_vco_set_rate(phy->vco_hw,
pll_7nm->vco_current_rate,
@@ -736,11 +739,9 @@ static int pll_7nm_register(struct dsi_pll_7nm *pll_7nm, struct clk_hw **provide
* don't register a pclk_mux clock and just use post_out_div instead
*/
if (pll_7nm->phy->cphy_mode) {
- u32 data;
-
- data = readl(pll_7nm->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
- writel(data | 3, pll_7nm->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
-
+ dsi_pll_cmn_clk_cfg1_update(pll_7nm,
+ DSI_7nm_PHY_CMN_CLK_CFG1_DSICLK_SEL__MASK,
+ DSI_7nm_PHY_CMN_CLK_CFG1_DSICLK_SEL(3));
phy_pll_out_dsi_parent = pll_post_out_div;
} else {
snprintf(clk_name, sizeof(clk_name), "dsi%d_pclk_mux", pll_7nm->phy->id);
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c
index 37b3809c6bdd..248541ff4492 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.c
@@ -12,8 +12,8 @@
#include <drm/drm_bridge_connector.h>
#include <drm/drm_of.h>
+#include <drm/display/drm_hdmi_state_helper.h>
-#include <sound/hdmi-codec.h>
#include "hdmi.h"
void msm_hdmi_set_mode(struct hdmi *hdmi, bool power_on)
@@ -24,7 +24,7 @@ void msm_hdmi_set_mode(struct hdmi *hdmi, bool power_on)
spin_lock_irqsave(&hdmi->reg_lock, flags);
if (power_on) {
ctrl |= HDMI_CTRL_ENABLE;
- if (!hdmi->hdmi_mode) {
+ if (!hdmi->connector->display_info.is_hdmi) {
ctrl |= HDMI_CTRL_HDMI;
hdmi_write(hdmi, REG_HDMI_CTRL, ctrl);
ctrl &= ~HDMI_CTRL_HDMI;
@@ -165,8 +165,6 @@ int msm_hdmi_modeset_init(struct hdmi *hdmi,
hdmi->dev = dev;
hdmi->encoder = encoder;
- hdmi_audio_infoframe_init(&hdmi->audio.infoframe);
-
ret = msm_hdmi_bridge_init(hdmi);
if (ret) {
DRM_DEV_ERROR(dev->dev, "failed to create HDMI bridge: %d\n", ret);
@@ -246,111 +244,6 @@ static const struct hdmi_platform_config hdmi_tx_8974_config = {
.hpd_freq = hpd_clk_freq_8x74,
};
-/*
- * HDMI audio codec callbacks
- */
-static int msm_hdmi_audio_hw_params(struct device *dev, void *data,
- struct hdmi_codec_daifmt *daifmt,
- struct hdmi_codec_params *params)
-{
- struct hdmi *hdmi = dev_get_drvdata(dev);
- unsigned int chan;
- unsigned int channel_allocation = 0;
- unsigned int rate;
- unsigned int level_shift = 0; /* 0dB */
- bool down_mix = false;
-
- DRM_DEV_DEBUG(dev, "%u Hz, %d bit, %d channels\n", params->sample_rate,
- params->sample_width, params->cea.channels);
-
- switch (params->cea.channels) {
- case 2:
- /* FR and FL speakers */
- channel_allocation = 0;
- chan = MSM_HDMI_AUDIO_CHANNEL_2;
- break;
- case 4:
- /* FC, LFE, FR and FL speakers */
- channel_allocation = 0x3;
- chan = MSM_HDMI_AUDIO_CHANNEL_4;
- break;
- case 6:
- /* RR, RL, FC, LFE, FR and FL speakers */
- channel_allocation = 0x0B;
- chan = MSM_HDMI_AUDIO_CHANNEL_6;
- break;
- case 8:
- /* FRC, FLC, RR, RL, FC, LFE, FR and FL speakers */
- channel_allocation = 0x1F;
- chan = MSM_HDMI_AUDIO_CHANNEL_8;
- break;
- default:
- return -EINVAL;
- }
-
- switch (params->sample_rate) {
- case 32000:
- rate = HDMI_SAMPLE_RATE_32KHZ;
- break;
- case 44100:
- rate = HDMI_SAMPLE_RATE_44_1KHZ;
- break;
- case 48000:
- rate = HDMI_SAMPLE_RATE_48KHZ;
- break;
- case 88200:
- rate = HDMI_SAMPLE_RATE_88_2KHZ;
- break;
- case 96000:
- rate = HDMI_SAMPLE_RATE_96KHZ;
- break;
- case 176400:
- rate = HDMI_SAMPLE_RATE_176_4KHZ;
- break;
- case 192000:
- rate = HDMI_SAMPLE_RATE_192KHZ;
- break;
- default:
- DRM_DEV_ERROR(dev, "rate[%d] not supported!\n",
- params->sample_rate);
- return -EINVAL;
- }
-
- msm_hdmi_audio_set_sample_rate(hdmi, rate);
- msm_hdmi_audio_info_setup(hdmi, 1, chan, channel_allocation,
- level_shift, down_mix);
-
- return 0;
-}
-
-static void msm_hdmi_audio_shutdown(struct device *dev, void *data)
-{
- struct hdmi *hdmi = dev_get_drvdata(dev);
-
- msm_hdmi_audio_info_setup(hdmi, 0, 0, 0, 0, 0);
-}
-
-static const struct hdmi_codec_ops msm_hdmi_audio_codec_ops = {
- .hw_params = msm_hdmi_audio_hw_params,
- .audio_shutdown = msm_hdmi_audio_shutdown,
-};
-
-static struct hdmi_codec_pdata codec_data = {
- .ops = &msm_hdmi_audio_codec_ops,
- .max_i2s_channels = 8,
- .i2s = 1,
-};
-
-static int msm_hdmi_register_audio_driver(struct hdmi *hdmi, struct device *dev)
-{
- hdmi->audio_pdev = platform_device_register_data(dev,
- HDMI_CODEC_DRV_NAME,
- PLATFORM_DEVID_AUTO,
- &codec_data,
- sizeof(codec_data));
- return PTR_ERR_OR_ZERO(hdmi->audio_pdev);
-}
-
static int msm_hdmi_bind(struct device *dev, struct device *master, void *data)
{
struct msm_drm_private *priv = dev_get_drvdata(master);
@@ -362,12 +255,6 @@ static int msm_hdmi_bind(struct device *dev, struct device *master, void *data)
return err;
priv->hdmi = hdmi;
- err = msm_hdmi_register_audio_driver(hdmi, dev);
- if (err) {
- DRM_ERROR("Failed to attach an audio codec %d\n", err);
- hdmi->audio_pdev = NULL;
- }
-
return 0;
}
@@ -377,9 +264,6 @@ static void msm_hdmi_unbind(struct device *dev, struct device *master,
struct msm_drm_private *priv = dev_get_drvdata(master);
if (priv->hdmi) {
- if (priv->hdmi->audio_pdev)
- platform_device_unregister(priv->hdmi->audio_pdev);
-
if (priv->hdmi->bridge)
msm_hdmi_hpd_disable(priv->hdmi);
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.h b/drivers/gpu/drm/msm/hdmi/hdmi.h
index a62d2aedfbb7..a5f481c39277 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.h
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.h
@@ -24,8 +24,8 @@ struct hdmi_platform_config;
struct hdmi_audio {
bool enabled;
- struct hdmi_audio_infoframe infoframe;
int rate;
+ int channels;
};
struct hdmi_hdcp_ctrl;
@@ -33,7 +33,6 @@ struct hdmi_hdcp_ctrl;
struct hdmi {
struct drm_device *dev;
struct platform_device *pdev;
- struct platform_device *audio_pdev;
const struct hdmi_platform_config *config;
@@ -67,8 +66,6 @@ struct hdmi {
/* the encoder we are hooked to (outside of hdmi block) */
struct drm_encoder *encoder;
- bool hdmi_mode; /* are we in hdmi mode? */
-
int irq;
struct workqueue_struct *workq;
@@ -207,26 +204,16 @@ static inline int msm_hdmi_pll_8998_init(struct platform_device *pdev)
/*
* audio:
*/
-/* Supported HDMI Audio channels and rates */
-#define MSM_HDMI_AUDIO_CHANNEL_2 0
-#define MSM_HDMI_AUDIO_CHANNEL_4 1
-#define MSM_HDMI_AUDIO_CHANNEL_6 2
-#define MSM_HDMI_AUDIO_CHANNEL_8 3
-
-#define HDMI_SAMPLE_RATE_32KHZ 0
-#define HDMI_SAMPLE_RATE_44_1KHZ 1
-#define HDMI_SAMPLE_RATE_48KHZ 2
-#define HDMI_SAMPLE_RATE_88_2KHZ 3
-#define HDMI_SAMPLE_RATE_96KHZ 4
-#define HDMI_SAMPLE_RATE_176_4KHZ 5
-#define HDMI_SAMPLE_RATE_192KHZ 6
+struct hdmi_codec_daifmt;
+struct hdmi_codec_params;
int msm_hdmi_audio_update(struct hdmi *hdmi);
-int msm_hdmi_audio_info_setup(struct hdmi *hdmi, bool enabled,
- uint32_t num_of_channels, uint32_t channel_allocation,
- uint32_t level_shift, bool down_mix);
-void msm_hdmi_audio_set_sample_rate(struct hdmi *hdmi, int rate);
-
+int msm_hdmi_bridge_audio_prepare(struct drm_connector *connector,
+ struct drm_bridge *bridge,
+ struct hdmi_codec_daifmt *daifmt,
+ struct hdmi_codec_params *params);
+void msm_hdmi_bridge_audio_shutdown(struct drm_connector *connector,
+ struct drm_bridge *bridge);
/*
* hdmi bridge:
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_audio.c b/drivers/gpu/drm/msm/hdmi/hdmi_audio.c
index 4c2058c4adc1..8bb975e82c17 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_audio.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_audio.c
@@ -4,11 +4,13 @@
* Author: Rob Clark <robdclark@gmail.com>
*/
+#include <drm/display/drm_hdmi_state_helper.h>
+
#include <linux/hdmi.h>
-#include "hdmi.h"
-/* maps MSM_HDMI_AUDIO_CHANNEL_n consts used by audio driver to # of channels: */
-static int nchannels[] = { 2, 4, 6, 8 };
+#include <sound/hdmi-codec.h>
+
+#include "hdmi.h"
/* Supported HDMI Audio sample rates */
#define MSM_HDMI_SAMPLE_RATE_32KHZ 0
@@ -74,16 +76,17 @@ static const struct hdmi_msm_audio_arcs *get_arcs(unsigned long int pixclock)
int msm_hdmi_audio_update(struct hdmi *hdmi)
{
struct hdmi_audio *audio = &hdmi->audio;
- struct hdmi_audio_infoframe *info = &audio->infoframe;
const struct hdmi_msm_audio_arcs *arcs = NULL;
bool enabled = audio->enabled;
uint32_t acr_pkt_ctrl, vbi_pkt_ctrl, aud_pkt_ctrl;
- uint32_t infofrm_ctrl, audio_config;
+ uint32_t audio_config;
+
+ if (!hdmi->connector->display_info.is_hdmi)
+ return -EINVAL;
+
+ DBG("audio: enabled=%d, channels=%d, rate=%d",
+ audio->enabled, audio->channels, audio->rate);
- DBG("audio: enabled=%d, channels=%d, channel_allocation=0x%x, "
- "level_shift_value=%d, downmix_inhibit=%d, rate=%d",
- audio->enabled, info->channels, info->channel_allocation,
- info->level_shift_value, info->downmix_inhibit, audio->rate);
DBG("video: power_on=%d, pixclock=%lu", hdmi->power_on, hdmi->pixclock);
if (enabled && !(hdmi->power_on && hdmi->pixclock)) {
@@ -104,7 +107,6 @@ int msm_hdmi_audio_update(struct hdmi *hdmi)
acr_pkt_ctrl = hdmi_read(hdmi, REG_HDMI_ACR_PKT_CTRL);
vbi_pkt_ctrl = hdmi_read(hdmi, REG_HDMI_VBI_PKT_CTRL);
aud_pkt_ctrl = hdmi_read(hdmi, REG_HDMI_AUDIO_PKT_CTRL1);
- infofrm_ctrl = hdmi_read(hdmi, REG_HDMI_INFOFRAME_CTRL0);
audio_config = hdmi_read(hdmi, REG_HDMI_AUDIO_CFG);
/* Clear N/CTS selection bits */
@@ -113,7 +115,6 @@ int msm_hdmi_audio_update(struct hdmi *hdmi)
if (enabled) {
uint32_t n, cts, multiplier;
enum hdmi_acr_cts select;
- uint8_t buf[14];
n = arcs->lut[audio->rate].n;
cts = arcs->lut[audio->rate].cts;
@@ -155,20 +156,12 @@ int msm_hdmi_audio_update(struct hdmi *hdmi)
HDMI_ACR_1_N(n));
hdmi_write(hdmi, REG_HDMI_AUDIO_PKT_CTRL2,
- COND(info->channels != 2, HDMI_AUDIO_PKT_CTRL2_LAYOUT) |
+ COND(audio->channels != 2, HDMI_AUDIO_PKT_CTRL2_LAYOUT) |
HDMI_AUDIO_PKT_CTRL2_OVERRIDE);
acr_pkt_ctrl |= HDMI_ACR_PKT_CTRL_CONT;
acr_pkt_ctrl |= HDMI_ACR_PKT_CTRL_SEND;
- /* configure infoframe: */
- hdmi_audio_infoframe_pack(info, buf, sizeof(buf));
- hdmi_write(hdmi, REG_HDMI_AUDIO_INFO0,
- (buf[3] << 0) | (buf[4] << 8) |
- (buf[5] << 16) | (buf[6] << 24));
- hdmi_write(hdmi, REG_HDMI_AUDIO_INFO1,
- (buf[7] << 0) | (buf[8] << 8));
-
hdmi_write(hdmi, REG_HDMI_GC, 0);
vbi_pkt_ctrl |= HDMI_VBI_PKT_CTRL_GC_ENABLE;
@@ -176,11 +169,6 @@ int msm_hdmi_audio_update(struct hdmi *hdmi)
aud_pkt_ctrl |= HDMI_AUDIO_PKT_CTRL1_AUDIO_SAMPLE_SEND;
- infofrm_ctrl |= HDMI_INFOFRAME_CTRL0_AUDIO_INFO_SEND;
- infofrm_ctrl |= HDMI_INFOFRAME_CTRL0_AUDIO_INFO_CONT;
- infofrm_ctrl |= HDMI_INFOFRAME_CTRL0_AUDIO_INFO_SOURCE;
- infofrm_ctrl |= HDMI_INFOFRAME_CTRL0_AUDIO_INFO_UPDATE;
-
audio_config &= ~HDMI_AUDIO_CFG_FIFO_WATERMARK__MASK;
audio_config |= HDMI_AUDIO_CFG_FIFO_WATERMARK(4);
audio_config |= HDMI_AUDIO_CFG_ENGINE_ENABLE;
@@ -190,17 +178,12 @@ int msm_hdmi_audio_update(struct hdmi *hdmi)
vbi_pkt_ctrl &= ~HDMI_VBI_PKT_CTRL_GC_ENABLE;
vbi_pkt_ctrl &= ~HDMI_VBI_PKT_CTRL_GC_EVERY_FRAME;
aud_pkt_ctrl &= ~HDMI_AUDIO_PKT_CTRL1_AUDIO_SAMPLE_SEND;
- infofrm_ctrl &= ~HDMI_INFOFRAME_CTRL0_AUDIO_INFO_SEND;
- infofrm_ctrl &= ~HDMI_INFOFRAME_CTRL0_AUDIO_INFO_CONT;
- infofrm_ctrl &= ~HDMI_INFOFRAME_CTRL0_AUDIO_INFO_SOURCE;
- infofrm_ctrl &= ~HDMI_INFOFRAME_CTRL0_AUDIO_INFO_UPDATE;
audio_config &= ~HDMI_AUDIO_CFG_ENGINE_ENABLE;
}
hdmi_write(hdmi, REG_HDMI_ACR_PKT_CTRL, acr_pkt_ctrl);
hdmi_write(hdmi, REG_HDMI_VBI_PKT_CTRL, vbi_pkt_ctrl);
hdmi_write(hdmi, REG_HDMI_AUDIO_PKT_CTRL1, aud_pkt_ctrl);
- hdmi_write(hdmi, REG_HDMI_INFOFRAME_CTRL0, infofrm_ctrl);
hdmi_write(hdmi, REG_HDMI_AUD_INT,
COND(enabled, HDMI_AUD_INT_AUD_FIFO_URUN_INT) |
@@ -214,41 +197,72 @@ int msm_hdmi_audio_update(struct hdmi *hdmi)
return 0;
}
-int msm_hdmi_audio_info_setup(struct hdmi *hdmi, bool enabled,
- uint32_t num_of_channels, uint32_t channel_allocation,
- uint32_t level_shift, bool down_mix)
+int msm_hdmi_bridge_audio_prepare(struct drm_connector *connector,
+ struct drm_bridge *bridge,
+ struct hdmi_codec_daifmt *daifmt,
+ struct hdmi_codec_params *params)
{
- struct hdmi_audio *audio;
-
- if (!hdmi)
- return -ENXIO;
-
- audio = &hdmi->audio;
-
- if (num_of_channels >= ARRAY_SIZE(nchannels))
+ struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
+ struct hdmi *hdmi = hdmi_bridge->hdmi;
+ unsigned int rate;
+ int ret;
+
+ drm_dbg_driver(bridge->dev, "%u Hz, %d bit, %d channels\n",
+ params->sample_rate,
+ params->sample_width,
+ params->cea.channels);
+
+ switch (params->sample_rate) {
+ case 32000:
+ rate = MSM_HDMI_SAMPLE_RATE_32KHZ;
+ break;
+ case 44100:
+ rate = MSM_HDMI_SAMPLE_RATE_44_1KHZ;
+ break;
+ case 48000:
+ rate = MSM_HDMI_SAMPLE_RATE_48KHZ;
+ break;
+ case 88200:
+ rate = MSM_HDMI_SAMPLE_RATE_88_2KHZ;
+ break;
+ case 96000:
+ rate = MSM_HDMI_SAMPLE_RATE_96KHZ;
+ break;
+ case 176400:
+ rate = MSM_HDMI_SAMPLE_RATE_176_4KHZ;
+ break;
+ case 192000:
+ rate = MSM_HDMI_SAMPLE_RATE_192KHZ;
+ break;
+ default:
+ drm_err(bridge->dev, "rate[%d] not supported!\n",
+ params->sample_rate);
return -EINVAL;
+ }
+
+ ret = drm_atomic_helper_connector_hdmi_update_audio_infoframe(connector,
+ &params->cea);
+ if (ret)
+ return ret;
- audio->enabled = enabled;
- audio->infoframe.channels = nchannels[num_of_channels];
- audio->infoframe.channel_allocation = channel_allocation;
- audio->infoframe.level_shift_value = level_shift;
- audio->infoframe.downmix_inhibit = down_mix;
+ hdmi->audio.rate = rate;
+ hdmi->audio.channels = params->cea.channels;
+ hdmi->audio.enabled = true;
return msm_hdmi_audio_update(hdmi);
}
-void msm_hdmi_audio_set_sample_rate(struct hdmi *hdmi, int rate)
+void msm_hdmi_bridge_audio_shutdown(struct drm_connector *connector,
+ struct drm_bridge *bridge)
{
- struct hdmi_audio *audio;
-
- if (!hdmi)
- return;
+ struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
+ struct hdmi *hdmi = hdmi_bridge->hdmi;
- audio = &hdmi->audio;
+ drm_atomic_helper_connector_hdmi_clear_audio_infoframe(connector);
- if ((rate < 0) || (rate >= MSM_HDMI_SAMPLE_RATE_MAX))
- return;
+ hdmi->audio.rate = 0;
+ hdmi->audio.channels = 2;
+ hdmi->audio.enabled = false;
- audio->rate = rate;
msm_hdmi_audio_update(hdmi);
}
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
index 4a5b5112227f..1456354c8af4 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
@@ -7,6 +7,8 @@
#include <linux/delay.h>
#include <drm/drm_bridge_connector.h>
#include <drm/drm_edid.h>
+#include <drm/display/drm_hdmi_helper.h>
+#include <drm/display/drm_hdmi_state_helper.h>
#include "msm_kms.h"
#include "hdmi.h"
@@ -67,24 +69,20 @@ static void power_off(struct drm_bridge *bridge)
}
#define AVI_IFRAME_LINE_NUMBER 1
+#define SPD_IFRAME_LINE_NUMBER 1
+#define VENSPEC_IFRAME_LINE_NUMBER 3
-static void msm_hdmi_config_avi_infoframe(struct hdmi *hdmi)
+static int msm_hdmi_config_avi_infoframe(struct hdmi *hdmi,
+ const u8 *buffer, size_t len)
{
- struct drm_crtc *crtc = hdmi->encoder->crtc;
- const struct drm_display_mode *mode = &crtc->state->adjusted_mode;
- union hdmi_infoframe frame;
- u8 buffer[HDMI_INFOFRAME_SIZE(AVI)];
+ u32 buf[4] = {};
u32 val;
- int len;
+ int i;
- drm_hdmi_avi_infoframe_from_display_mode(&frame.avi,
- hdmi->connector, mode);
-
- len = hdmi_infoframe_pack(&frame, buffer, sizeof(buffer));
- if (len < 0) {
+ if (len != HDMI_INFOFRAME_SIZE(AVI) || len - 3 > sizeof(buf)) {
DRM_DEV_ERROR(&hdmi->pdev->dev,
"failed to configure avi infoframe\n");
- return;
+ return -EINVAL;
}
/*
@@ -93,57 +91,245 @@ static void msm_hdmi_config_avi_infoframe(struct hdmi *hdmi)
* written to the LSB byte of AVI_INFO0 and the version is written to
* the third byte from the LSB of AVI_INFO3
*/
- hdmi_write(hdmi, REG_HDMI_AVI_INFO(0),
+ memcpy(buf, &buffer[3], len - 3);
+
+ buf[3] |= buffer[1] << 24;
+
+ for (i = 0; i < ARRAY_SIZE(buf); i++)
+ hdmi_write(hdmi, REG_HDMI_AVI_INFO(i), buf[i]);
+
+ val = hdmi_read(hdmi, REG_HDMI_INFOFRAME_CTRL1);
+ val |= HDMI_INFOFRAME_CTRL0_AVI_SEND |
+ HDMI_INFOFRAME_CTRL0_AVI_CONT;
+ hdmi_write(hdmi, REG_HDMI_INFOFRAME_CTRL0, val);
+
+ val = hdmi_read(hdmi, REG_HDMI_INFOFRAME_CTRL1);
+ val &= ~HDMI_INFOFRAME_CTRL1_AVI_INFO_LINE__MASK;
+ val |= HDMI_INFOFRAME_CTRL1_AVI_INFO_LINE(AVI_IFRAME_LINE_NUMBER);
+ hdmi_write(hdmi, REG_HDMI_INFOFRAME_CTRL1, val);
+
+ return 0;
+}
+
+static int msm_hdmi_config_audio_infoframe(struct hdmi *hdmi,
+ const u8 *buffer, size_t len)
+{
+ u32 val;
+
+ if (len != HDMI_INFOFRAME_SIZE(AUDIO)) {
+ DRM_DEV_ERROR(&hdmi->pdev->dev,
+ "failed to configure audio infoframe\n");
+ return -EINVAL;
+ }
+
+ hdmi_write(hdmi, REG_HDMI_AUDIO_INFO0,
buffer[3] |
buffer[4] << 8 |
buffer[5] << 16 |
buffer[6] << 24);
- hdmi_write(hdmi, REG_HDMI_AVI_INFO(1),
+ hdmi_write(hdmi, REG_HDMI_AUDIO_INFO1,
buffer[7] |
buffer[8] << 8 |
buffer[9] << 16 |
buffer[10] << 24);
- hdmi_write(hdmi, REG_HDMI_AVI_INFO(2),
- buffer[11] |
- buffer[12] << 8 |
- buffer[13] << 16 |
- buffer[14] << 24);
+ val = hdmi_read(hdmi, REG_HDMI_INFOFRAME_CTRL1);
+ val |= HDMI_INFOFRAME_CTRL0_AUDIO_INFO_SEND |
+ HDMI_INFOFRAME_CTRL0_AUDIO_INFO_CONT |
+ HDMI_INFOFRAME_CTRL0_AUDIO_INFO_SOURCE |
+ HDMI_INFOFRAME_CTRL0_AUDIO_INFO_UPDATE;
+ hdmi_write(hdmi, REG_HDMI_INFOFRAME_CTRL0, val);
- hdmi_write(hdmi, REG_HDMI_AVI_INFO(3),
- buffer[15] |
- buffer[16] << 8 |
- buffer[1] << 24);
+ return 0;
+}
- hdmi_write(hdmi, REG_HDMI_INFOFRAME_CTRL0,
- HDMI_INFOFRAME_CTRL0_AVI_SEND |
- HDMI_INFOFRAME_CTRL0_AVI_CONT);
+static int msm_hdmi_config_spd_infoframe(struct hdmi *hdmi,
+ const u8 *buffer, size_t len)
+{
+ u32 buf[7] = {};
+ u32 val;
+ int i;
- val = hdmi_read(hdmi, REG_HDMI_INFOFRAME_CTRL1);
- val &= ~HDMI_INFOFRAME_CTRL1_AVI_INFO_LINE__MASK;
- val |= HDMI_INFOFRAME_CTRL1_AVI_INFO_LINE(AVI_IFRAME_LINE_NUMBER);
- hdmi_write(hdmi, REG_HDMI_INFOFRAME_CTRL1, val);
+ if (len != HDMI_INFOFRAME_SIZE(SPD) || len - 3 > sizeof(buf)) {
+ DRM_DEV_ERROR(&hdmi->pdev->dev,
+ "failed to configure SPD infoframe\n");
+ return -EINVAL;
+ }
+
+ /* checksum gets written together with the body of the frame */
+ hdmi_write(hdmi, REG_HDMI_GENERIC1_HDR,
+ buffer[0] |
+ buffer[1] << 8 |
+ buffer[2] << 16);
+
+ memcpy(buf, &buffer[3], len - 3);
+
+ for (i = 0; i < ARRAY_SIZE(buf); i++)
+ hdmi_write(hdmi, REG_HDMI_GENERIC1(i), buf[i]);
+
+ val = hdmi_read(hdmi, REG_HDMI_GEN_PKT_CTRL);
+ val |= HDMI_GEN_PKT_CTRL_GENERIC1_SEND |
+ HDMI_GEN_PKT_CTRL_GENERIC1_CONT |
+ HDMI_GEN_PKT_CTRL_GENERIC1_LINE(SPD_IFRAME_LINE_NUMBER);
+ hdmi_write(hdmi, REG_HDMI_GEN_PKT_CTRL, val);
+
+ return 0;
}
-static void msm_hdmi_bridge_pre_enable(struct drm_bridge *bridge)
+static int msm_hdmi_config_hdmi_infoframe(struct hdmi *hdmi,
+ const u8 *buffer, size_t len)
+{
+ u32 buf[7] = {};
+ u32 val;
+ int i;
+
+ if (len < HDMI_INFOFRAME_HEADER_SIZE + HDMI_VENDOR_INFOFRAME_SIZE ||
+ len - 3 > sizeof(buf)) {
+ DRM_DEV_ERROR(&hdmi->pdev->dev,
+ "failed to configure HDMI infoframe\n");
+ return -EINVAL;
+ }
+
+ /* checksum gets written together with the body of the frame */
+ hdmi_write(hdmi, REG_HDMI_GENERIC0_HDR,
+ buffer[0] |
+ buffer[1] << 8 |
+ buffer[2] << 16);
+
+ memcpy(buf, &buffer[3], len - 3);
+
+ for (i = 0; i < ARRAY_SIZE(buf); i++)
+ hdmi_write(hdmi, REG_HDMI_GENERIC0(i), buf[i]);
+
+ val = hdmi_read(hdmi, REG_HDMI_GEN_PKT_CTRL);
+ val |= HDMI_GEN_PKT_CTRL_GENERIC0_SEND |
+ HDMI_GEN_PKT_CTRL_GENERIC0_CONT |
+ HDMI_GEN_PKT_CTRL_GENERIC0_UPDATE |
+ HDMI_GEN_PKT_CTRL_GENERIC0_LINE(VENSPEC_IFRAME_LINE_NUMBER);
+ hdmi_write(hdmi, REG_HDMI_GEN_PKT_CTRL, val);
+
+ return 0;
+}
+
+static int msm_hdmi_bridge_clear_infoframe(struct drm_bridge *bridge,
+ enum hdmi_infoframe_type type)
+{
+ struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
+ struct hdmi *hdmi = hdmi_bridge->hdmi;
+ u32 val;
+
+ switch (type) {
+ case HDMI_INFOFRAME_TYPE_AVI:
+ val = hdmi_read(hdmi, REG_HDMI_INFOFRAME_CTRL0);
+ val &= ~(HDMI_INFOFRAME_CTRL0_AVI_SEND |
+ HDMI_INFOFRAME_CTRL0_AVI_CONT);
+ hdmi_write(hdmi, REG_HDMI_INFOFRAME_CTRL0, val);
+
+ val = hdmi_read(hdmi, REG_HDMI_INFOFRAME_CTRL1);
+ val &= ~HDMI_INFOFRAME_CTRL1_AVI_INFO_LINE__MASK;
+ hdmi_write(hdmi, REG_HDMI_INFOFRAME_CTRL1, val);
+
+ break;
+
+ case HDMI_INFOFRAME_TYPE_AUDIO:
+ val = hdmi_read(hdmi, REG_HDMI_INFOFRAME_CTRL0);
+ val &= ~(HDMI_INFOFRAME_CTRL0_AUDIO_INFO_SEND |
+ HDMI_INFOFRAME_CTRL0_AUDIO_INFO_CONT |
+ HDMI_INFOFRAME_CTRL0_AUDIO_INFO_SOURCE |
+ HDMI_INFOFRAME_CTRL0_AUDIO_INFO_UPDATE);
+ hdmi_write(hdmi, REG_HDMI_INFOFRAME_CTRL0, val);
+
+ val = hdmi_read(hdmi, REG_HDMI_INFOFRAME_CTRL1);
+ val &= ~HDMI_INFOFRAME_CTRL1_AUDIO_INFO_LINE__MASK;
+ hdmi_write(hdmi, REG_HDMI_INFOFRAME_CTRL1, val);
+
+ break;
+
+ case HDMI_INFOFRAME_TYPE_SPD:
+ val = hdmi_read(hdmi, REG_HDMI_GEN_PKT_CTRL);
+ val &= ~(HDMI_GEN_PKT_CTRL_GENERIC1_SEND |
+ HDMI_GEN_PKT_CTRL_GENERIC1_CONT |
+ HDMI_GEN_PKT_CTRL_GENERIC1_LINE__MASK);
+ hdmi_write(hdmi, REG_HDMI_GEN_PKT_CTRL, val);
+
+ break;
+
+ case HDMI_INFOFRAME_TYPE_VENDOR:
+ val = hdmi_read(hdmi, REG_HDMI_GEN_PKT_CTRL);
+ val &= ~(HDMI_GEN_PKT_CTRL_GENERIC0_SEND |
+ HDMI_GEN_PKT_CTRL_GENERIC0_CONT |
+ HDMI_GEN_PKT_CTRL_GENERIC0_UPDATE |
+ HDMI_GEN_PKT_CTRL_GENERIC0_LINE__MASK);
+ hdmi_write(hdmi, REG_HDMI_GEN_PKT_CTRL, val);
+
+ break;
+
+ default:
+ drm_dbg_driver(hdmi_bridge->base.dev, "Unsupported infoframe type %x\n", type);
+ }
+
+ return 0;
+}
+
+static int msm_hdmi_bridge_write_infoframe(struct drm_bridge *bridge,
+ enum hdmi_infoframe_type type,
+ const u8 *buffer, size_t len)
+{
+ struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
+ struct hdmi *hdmi = hdmi_bridge->hdmi;
+
+ msm_hdmi_bridge_clear_infoframe(bridge, type);
+
+ switch (type) {
+ case HDMI_INFOFRAME_TYPE_AVI:
+ return msm_hdmi_config_avi_infoframe(hdmi, buffer, len);
+ case HDMI_INFOFRAME_TYPE_AUDIO:
+ return msm_hdmi_config_audio_infoframe(hdmi, buffer, len);
+ case HDMI_INFOFRAME_TYPE_SPD:
+ return msm_hdmi_config_spd_infoframe(hdmi, buffer, len);
+ case HDMI_INFOFRAME_TYPE_VENDOR:
+ return msm_hdmi_config_hdmi_infoframe(hdmi, buffer, len);
+ default:
+ drm_dbg_driver(hdmi_bridge->base.dev, "Unsupported infoframe type %x\n", type);
+ return 0;
+ }
+}
+
+static void msm_hdmi_set_timings(struct hdmi *hdmi,
+ const struct drm_display_mode *mode);
+
+static void msm_hdmi_bridge_atomic_pre_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
struct hdmi *hdmi = hdmi_bridge->hdmi;
struct hdmi_phy *phy = hdmi->phy;
+ struct drm_encoder *encoder = bridge->encoder;
+ struct drm_connector *connector;
+ struct drm_connector_state *conn_state;
+ struct drm_crtc_state *crtc_state;
DBG("power up");
+ connector = drm_atomic_get_new_connector_for_encoder(state, encoder);
+ conn_state = drm_atomic_get_new_connector_state(state, connector);
+ crtc_state = drm_atomic_get_new_crtc_state(state, conn_state->crtc);
+
+ hdmi->pixclock = conn_state->hdmi.tmds_char_rate;
+
+ msm_hdmi_set_timings(hdmi, &crtc_state->adjusted_mode);
+
if (!hdmi->power_on) {
msm_hdmi_phy_resource_enable(phy);
msm_hdmi_power_on(bridge);
hdmi->power_on = true;
- if (hdmi->hdmi_mode) {
- msm_hdmi_config_avi_infoframe(hdmi);
+ if (connector->display_info.is_hdmi)
msm_hdmi_audio_update(hdmi);
- }
}
+ drm_atomic_helper_connector_hdmi_update_infoframes(connector, state);
+
msm_hdmi_phy_powerup(phy, hdmi->pixclock);
msm_hdmi_set_mode(hdmi, true);
@@ -152,7 +338,8 @@ static void msm_hdmi_bridge_pre_enable(struct drm_bridge *bridge)
msm_hdmi_hdcp_on(hdmi->hdcp_ctrl);
}
-static void msm_hdmi_bridge_post_disable(struct drm_bridge *bridge)
+static void msm_hdmi_bridge_atomic_post_disable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
struct hdmi *hdmi = hdmi_bridge->hdmi;
@@ -169,25 +356,18 @@ static void msm_hdmi_bridge_post_disable(struct drm_bridge *bridge)
if (hdmi->power_on) {
power_off(bridge);
hdmi->power_on = false;
- if (hdmi->hdmi_mode)
+ if (hdmi->connector->display_info.is_hdmi)
msm_hdmi_audio_update(hdmi);
msm_hdmi_phy_resource_disable(phy);
}
}
-static void msm_hdmi_bridge_mode_set(struct drm_bridge *bridge,
- const struct drm_display_mode *mode,
- const struct drm_display_mode *adjusted_mode)
+static void msm_hdmi_set_timings(struct hdmi *hdmi,
+ const struct drm_display_mode *mode)
{
- struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
- struct hdmi *hdmi = hdmi_bridge->hdmi;
int hstart, hend, vstart, vend;
uint32_t frame_ctrl;
- mode = adjusted_mode;
-
- hdmi->pixclock = mode->clock * 1000;
-
hstart = mode->htotal - mode->hsync_start;
hend = mode->htotal - mode->hsync_start + mode->hdisplay;
@@ -232,7 +412,7 @@ static void msm_hdmi_bridge_mode_set(struct drm_bridge *bridge,
DBG("frame_ctrl=%08x", frame_ctrl);
hdmi_write(hdmi, REG_HDMI_FRAME_CTRL, frame_ctrl);
- if (hdmi->hdmi_mode)
+ if (hdmi->connector->display_info.is_hdmi)
msm_hdmi_audio_update(hdmi);
}
@@ -251,32 +431,19 @@ static const struct drm_edid *msm_hdmi_bridge_edid_read(struct drm_bridge *bridg
hdmi_write(hdmi, REG_HDMI_CTRL, hdmi_ctrl);
- if (drm_edid) {
- /*
- * FIXME: This should use connector->display_info.is_hdmi from a
- * path that has read the EDID and called
- * drm_edid_connector_update().
- */
- const struct edid *edid = drm_edid_raw(drm_edid);
-
- hdmi->hdmi_mode = drm_detect_hdmi_monitor(edid);
- }
-
return drm_edid;
}
-static enum drm_mode_status msm_hdmi_bridge_mode_valid(struct drm_bridge *bridge,
- const struct drm_display_info *info,
- const struct drm_display_mode *mode)
+static enum drm_mode_status msm_hdmi_bridge_tmds_char_rate_valid(const struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ unsigned long long tmds_rate)
{
struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
struct hdmi *hdmi = hdmi_bridge->hdmi;
const struct hdmi_platform_config *config = hdmi->config;
struct msm_drm_private *priv = bridge->dev->dev_private;
struct msm_kms *kms = priv->kms;
- long actual, requested;
-
- requested = 1000 * mode->clock;
+ long actual;
/* for mdp5/apq8074, we manage our own pixel clk (as opposed to
* mdp4/dtv stuff where pixel clk is assigned to mdp/encoder
@@ -284,27 +451,34 @@ static enum drm_mode_status msm_hdmi_bridge_mode_valid(struct drm_bridge *bridge
*/
if (kms->funcs->round_pixclk)
actual = kms->funcs->round_pixclk(kms,
- requested, hdmi_bridge->hdmi->encoder);
+ tmds_rate,
+ hdmi_bridge->hdmi->encoder);
else if (config->pwr_clk_cnt > 0)
- actual = clk_round_rate(hdmi->pwr_clks[0], requested);
+ actual = clk_round_rate(hdmi->pwr_clks[0], tmds_rate);
else
- actual = requested;
+ actual = tmds_rate;
- DBG("requested=%ld, actual=%ld", requested, actual);
+ DBG("requested=%lld, actual=%ld", tmds_rate, actual);
- if (actual != requested)
+ if (actual != tmds_rate)
return MODE_CLOCK_RANGE;
return 0;
}
static const struct drm_bridge_funcs msm_hdmi_bridge_funcs = {
- .pre_enable = msm_hdmi_bridge_pre_enable,
- .post_disable = msm_hdmi_bridge_post_disable,
- .mode_set = msm_hdmi_bridge_mode_set,
- .mode_valid = msm_hdmi_bridge_mode_valid,
+ .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
+ .atomic_reset = drm_atomic_helper_bridge_reset,
+ .atomic_pre_enable = msm_hdmi_bridge_atomic_pre_enable,
+ .atomic_post_disable = msm_hdmi_bridge_atomic_post_disable,
.edid_read = msm_hdmi_bridge_edid_read,
.detect = msm_hdmi_bridge_detect,
+ .hdmi_tmds_char_rate_valid = msm_hdmi_bridge_tmds_char_rate_valid,
+ .hdmi_clear_infoframe = msm_hdmi_bridge_clear_infoframe,
+ .hdmi_write_infoframe = msm_hdmi_bridge_write_infoframe,
+ .hdmi_audio_prepare = msm_hdmi_bridge_audio_prepare,
+ .hdmi_audio_shutdown = msm_hdmi_bridge_audio_shutdown,
};
static void
@@ -336,9 +510,15 @@ int msm_hdmi_bridge_init(struct hdmi *hdmi)
bridge->funcs = &msm_hdmi_bridge_funcs;
bridge->ddc = hdmi->i2c;
bridge->type = DRM_MODE_CONNECTOR_HDMIA;
+ bridge->vendor = "Qualcomm";
+ bridge->product = "Snapdragon";
bridge->ops = DRM_BRIDGE_OP_HPD |
DRM_BRIDGE_OP_DETECT |
+ DRM_BRIDGE_OP_HDMI |
DRM_BRIDGE_OP_EDID;
+ bridge->hdmi_audio_max_i2s_playback_channels = 8;
+ bridge->hdmi_audio_dev = &hdmi->pdev->dev;
+ bridge->hdmi_audio_dai_port = -1;
ret = devm_drm_bridge_add(hdmi->dev->dev, bridge);
if (ret)
diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c
index a7a2384044ff..87a91148a731 100644
--- a/drivers/gpu/drm/msm/msm_atomic.c
+++ b/drivers/gpu/drm/msm/msm_atomic.c
@@ -183,10 +183,16 @@ static unsigned get_crtc_mask(struct drm_atomic_state *state)
int msm_atomic_check(struct drm_device *dev, struct drm_atomic_state *state)
{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_kms *kms = priv->kms;
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
struct drm_crtc *crtc;
- int i;
+ int i, ret = 0;
+ /*
+ * FIXME: stop setting allow_modeset and move this check to the DPU
+ * driver.
+ */
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
new_crtc_state, i) {
if ((old_crtc_state->ctm && !new_crtc_state->ctm) ||
@@ -196,6 +202,11 @@ int msm_atomic_check(struct drm_device *dev, struct drm_atomic_state *state)
}
}
+ if (kms && kms->funcs && kms->funcs->check_mode_changed)
+ ret = kms->funcs->check_mode_changed(kms, state);
+ if (ret)
+ return ret;
+
return drm_atomic_helper_check(dev, state);
}
@@ -221,6 +232,8 @@ void msm_atomic_commit_tail(struct drm_atomic_state *state)
kms->funcs->wait_flush(kms, crtc_mask);
trace_msm_atomic_wait_flush_finish(crtc_mask);
+ atomic_set(&kms->fault_snapshot_capture, 0);
+
/*
* Now that there is no in-progress flush, prepare the
* current update:
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index ff7a7a9f7b0d..c3588dc9e537 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -894,6 +894,7 @@ static const struct drm_driver msm_driver = {
DRIVER_RENDER |
DRIVER_ATOMIC |
DRIVER_MODESET |
+ DRIVER_SYNCOBJ_TIMELINE |
DRIVER_SYNCOBJ,
.open = msm_open,
.postclose = msm_postclose,
diff --git a/drivers/gpu/drm/msm/msm_dsc_helper.h b/drivers/gpu/drm/msm/msm_dsc_helper.h
index b9049fe1e279..63f95523b2cb 100644
--- a/drivers/gpu/drm/msm/msm_dsc_helper.h
+++ b/drivers/gpu/drm/msm/msm_dsc_helper.h
@@ -13,17 +13,6 @@
#include <drm/display/drm_dsc_helper.h>
/**
- * msm_dsc_get_slices_per_intf() - calculate number of slices per interface
- * @dsc: Pointer to drm dsc config struct
- * @intf_width: interface width in pixels
- * Returns: Integer representing the number of slices for the given interface
- */
-static inline u32 msm_dsc_get_slices_per_intf(const struct drm_dsc_config *dsc, u32 intf_width)
-{
- return DIV_ROUND_UP(intf_width, dsc->slice_width);
-}
-
-/**
* msm_dsc_get_bytes_per_line() - calculate bytes per line
* @dsc: Pointer to drm dsc config struct
* Returns: Integer value representing bytes per line. DSI and DP need
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index dee470403036..3e9aa2cc38ef 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -509,7 +509,7 @@ static struct drm_syncobj **msm_parse_deps(struct msm_gem_submit *submit,
}
if (syncobj_desc.flags & ~MSM_SUBMIT_SYNCOBJ_FLAGS) {
- ret = -SUBMIT_ERROR(EINVAL, submit, "invalid syncobj flags: %x", syncobj_desc.flags);
+ ret = SUBMIT_ERROR(EINVAL, submit, "invalid syncobj flags: %x", syncobj_desc.flags);
break;
}
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index 8557998e0c92..c380d9d9f5af 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -281,6 +281,15 @@ static void msm_gpu_crashstate_capture(struct msm_gpu *gpu,
if (submit) {
int i;
+ if (state->fault_info.ttbr0) {
+ struct msm_gpu_fault_info *info = &state->fault_info;
+ struct msm_mmu *mmu = submit->aspace->mmu;
+
+ msm_iommu_pagetable_params(mmu, &info->pgtbl_ttbr0,
+ &info->asid);
+ msm_iommu_pagetable_walk(mmu, info->iova, info->ptes);
+ }
+
state->bos = kcalloc(submit->nr_bos,
sizeof(struct msm_gpu_state_bo), GFP_KERNEL);
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
index 7cabc8480d7c..e25009150579 100644
--- a/drivers/gpu/drm/msm/msm_gpu.h
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -101,6 +101,14 @@ struct msm_gpu_fault_info {
int flags;
const char *type;
const char *block;
+
+ /* Information about what we think/expect is the current SMMU state,
+ * for example expected_ttbr0 should match smmu_info.ttbr0 which
+ * was read back from SMMU registers.
+ */
+ phys_addr_t pgtbl_ttbr0;
+ u64 ptes[4];
+ int asid;
};
/**
diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c
index 2a94e82316f9..fd73dcd3f30e 100644
--- a/drivers/gpu/drm/msm/msm_iommu.c
+++ b/drivers/gpu/drm/msm/msm_iommu.c
@@ -195,6 +195,28 @@ struct iommu_domain_geometry *msm_iommu_get_geometry(struct msm_mmu *mmu)
return &iommu->domain->geometry;
}
+int
+msm_iommu_pagetable_walk(struct msm_mmu *mmu, unsigned long iova, uint64_t ptes[4])
+{
+ struct msm_iommu_pagetable *pagetable;
+ struct arm_lpae_io_pgtable_walk_data wd = {};
+
+ if (mmu->type != MSM_MMU_IOMMU_PAGETABLE)
+ return -EINVAL;
+
+ pagetable = to_pagetable(mmu);
+
+ if (!pagetable->pgtbl_ops->pgtable_walk)
+ return -EINVAL;
+
+ pagetable->pgtbl_ops->pgtable_walk(pagetable->pgtbl_ops, iova, &wd);
+
+ for (int i = 0; i < ARRAY_SIZE(wd.ptes); i++)
+ ptes[i] = wd.ptes[i];
+
+ return 0;
+}
+
static const struct msm_mmu_funcs pagetable_funcs = {
.map = msm_iommu_pagetable_map,
.unmap = msm_iommu_pagetable_unmap,
@@ -243,7 +265,7 @@ static const struct iommu_flush_ops tlb_ops = {
.tlb_add_page = msm_iommu_tlb_add_page,
};
-static int msm_fault_handler(struct iommu_domain *domain, struct device *dev,
+static int msm_gpu_fault_handler(struct iommu_domain *domain, struct device *dev,
unsigned long iova, int flags, void *arg);
struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent)
@@ -319,7 +341,7 @@ struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent)
return &pagetable->base;
}
-static int msm_fault_handler(struct iommu_domain *domain, struct device *dev,
+static int msm_gpu_fault_handler(struct iommu_domain *domain, struct device *dev,
unsigned long iova, int flags, void *arg)
{
struct msm_iommu *iommu = arg;
@@ -343,6 +365,17 @@ static int msm_fault_handler(struct iommu_domain *domain, struct device *dev,
return 0;
}
+static int msm_disp_fault_handler(struct iommu_domain *domain, struct device *dev,
+ unsigned long iova, int flags, void *arg)
+{
+ struct msm_iommu *iommu = arg;
+
+ if (iommu->base.handler)
+ return iommu->base.handler(iommu->base.arg, iova, flags, NULL);
+
+ return -ENOSYS;
+}
+
static void msm_iommu_resume_translation(struct msm_mmu *mmu)
{
struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(mmu->dev);
@@ -437,6 +470,21 @@ struct msm_mmu *msm_iommu_new(struct device *dev, unsigned long quirks)
return &iommu->base;
}
+struct msm_mmu *msm_iommu_disp_new(struct device *dev, unsigned long quirks)
+{
+ struct msm_iommu *iommu;
+ struct msm_mmu *mmu;
+
+ mmu = msm_iommu_new(dev, quirks);
+ if (IS_ERR_OR_NULL(mmu))
+ return mmu;
+
+ iommu = to_msm_iommu(mmu);
+ iommu_set_fault_handler(iommu->domain, msm_disp_fault_handler, iommu);
+
+ return mmu;
+}
+
struct msm_mmu *msm_iommu_gpu_new(struct device *dev, struct msm_gpu *gpu, unsigned long quirks)
{
struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(dev);
@@ -448,7 +496,7 @@ struct msm_mmu *msm_iommu_gpu_new(struct device *dev, struct msm_gpu *gpu, unsig
return mmu;
iommu = to_msm_iommu(mmu);
- iommu_set_fault_handler(iommu->domain, msm_fault_handler, iommu);
+ iommu_set_fault_handler(iommu->domain, msm_gpu_fault_handler, iommu);
/* Enable stall on iommu fault: */
if (adreno_smmu->set_stall)
diff --git a/drivers/gpu/drm/msm/msm_kms.c b/drivers/gpu/drm/msm/msm_kms.c
index 38965e12a6bf..35d5397e73b4 100644
--- a/drivers/gpu/drm/msm/msm_kms.c
+++ b/drivers/gpu/drm/msm/msm_kms.c
@@ -164,12 +164,26 @@ void msm_crtc_disable_vblank(struct drm_crtc *crtc)
vblank_ctrl_queue_work(priv, crtc, false);
}
+static int msm_kms_fault_handler(void *arg, unsigned long iova, int flags, void *data)
+{
+ struct msm_kms *kms = arg;
+
+ if (atomic_read(&kms->fault_snapshot_capture) == 0) {
+ msm_disp_snapshot_state(kms->dev);
+ atomic_inc(&kms->fault_snapshot_capture);
+ }
+
+ return -ENOSYS;
+}
+
struct msm_gem_address_space *msm_kms_init_aspace(struct drm_device *dev)
{
struct msm_gem_address_space *aspace;
struct msm_mmu *mmu;
struct device *mdp_dev = dev->dev;
struct device *mdss_dev = mdp_dev->parent;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_kms *kms = priv->kms;
struct device *iommu_dev;
/*
@@ -181,7 +195,7 @@ struct msm_gem_address_space *msm_kms_init_aspace(struct drm_device *dev)
else
iommu_dev = mdss_dev;
- mmu = msm_iommu_new(iommu_dev, 0);
+ mmu = msm_iommu_disp_new(iommu_dev, 0);
if (IS_ERR(mmu))
return ERR_CAST(mmu);
@@ -195,8 +209,11 @@ struct msm_gem_address_space *msm_kms_init_aspace(struct drm_device *dev)
if (IS_ERR(aspace)) {
dev_err(mdp_dev, "aspace create, error %pe\n", aspace);
mmu->funcs->destroy(mmu);
+ return aspace;
}
+ msm_mmu_set_fault_handler(aspace->mmu, kms, msm_kms_fault_handler);
+
return aspace;
}
diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h
index e60162744c66..43b58d052ee6 100644
--- a/drivers/gpu/drm/msm/msm_kms.h
+++ b/drivers/gpu/drm/msm/msm_kms.h
@@ -60,6 +60,13 @@ struct msm_kms_funcs {
void (*disable_commit)(struct msm_kms *kms);
/**
+ * @check_mode_changed:
+ *
+ * Verify if the commit requires a full modeset on one of CRTCs.
+ */
+ int (*check_mode_changed)(struct msm_kms *kms, struct drm_atomic_state *state);
+
+ /**
* Prepare for atomic commit. This is called after any previous
* (async or otherwise) commit has completed.
*/
@@ -128,6 +135,9 @@ struct msm_kms {
int irq;
bool irq_requested;
+ /* rate limit the snapshot capture to once per attach */
+ atomic_t fault_snapshot_capture;
+
/* mapper-id used to request GEM buffer mapped for scanout: */
struct msm_gem_address_space *aspace;
diff --git a/drivers/gpu/drm/msm/msm_mmu.h b/drivers/gpu/drm/msm/msm_mmu.h
index 88af4f490881..daf91529e02b 100644
--- a/drivers/gpu/drm/msm/msm_mmu.h
+++ b/drivers/gpu/drm/msm/msm_mmu.h
@@ -42,6 +42,7 @@ static inline void msm_mmu_init(struct msm_mmu *mmu, struct device *dev,
struct msm_mmu *msm_iommu_new(struct device *dev, unsigned long quirks);
struct msm_mmu *msm_iommu_gpu_new(struct device *dev, struct msm_gpu *gpu, unsigned long quirks);
+struct msm_mmu *msm_iommu_disp_new(struct device *dev, unsigned long quirks);
static inline void msm_mmu_set_fault_handler(struct msm_mmu *mmu, void *arg,
int (*handler)(void *arg, unsigned long iova, int flags, void *data))
@@ -53,7 +54,8 @@ static inline void msm_mmu_set_fault_handler(struct msm_mmu *mmu, void *arg,
struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent);
int msm_iommu_pagetable_params(struct msm_mmu *mmu, phys_addr_t *ttbr,
- int *asid);
+ int *asid);
+int msm_iommu_pagetable_walk(struct msm_mmu *mmu, unsigned long iova, uint64_t ptes[4]);
struct iommu_domain_geometry *msm_iommu_get_geometry(struct msm_mmu *mmu);
#endif /* __MSM_MMU_H__ */
diff --git a/drivers/gpu/drm/msm/registers/display/dsi_phy_7nm.xml b/drivers/gpu/drm/msm/registers/display/dsi_phy_7nm.xml
index 35f7f40e405b..d2c8c46bb041 100644
--- a/drivers/gpu/drm/msm/registers/display/dsi_phy_7nm.xml
+++ b/drivers/gpu/drm/msm/registers/display/dsi_phy_7nm.xml
@@ -17,6 +17,7 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
<bitfield name="CLK_EN" pos="5" type="boolean"/>
<bitfield name="CLK_EN_SEL" pos="4" type="boolean"/>
<bitfield name="BITCLK_SEL" low="2" high="3" type="uint"/>
+ <bitfield name="DSICLK_SEL" low="0" high="1" type="uint"/>
</reg32>
<reg32 offset="0x00018" name="GLBL_CTRL"/>
<reg32 offset="0x0001c" name="RBUF_CTRL"/>
diff --git a/drivers/gpu/drm/msm/registers/display/hdmi.xml b/drivers/gpu/drm/msm/registers/display/hdmi.xml
index 1cf1b14fbd91..0ebb96297dae 100644
--- a/drivers/gpu/drm/msm/registers/display/hdmi.xml
+++ b/drivers/gpu/drm/msm/registers/display/hdmi.xml
@@ -131,7 +131,7 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
-->
<bitfield name="GENERIC0_SEND" pos="0" type="boolean"/>
<bitfield name="GENERIC0_CONT" pos="1" type="boolean"/>
- <bitfield name="GENERIC0_UPDATE" low="2" high="3" type="uint"/> <!-- ??? -->
+ <bitfield name="GENERIC0_UPDATE" pos="2" type="boolean"/>
<bitfield name="GENERIC1_SEND" pos="4" type="boolean"/>
<bitfield name="GENERIC1_CONT" pos="5" type="boolean"/>
<bitfield name="GENERIC0_LINE" low="16" high="21" type="uint"/>
diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig
index ac76c0787010..7b3e979c51ec 100644
--- a/drivers/gpu/drm/nouveau/Kconfig
+++ b/drivers/gpu/drm/nouveau/Kconfig
@@ -4,6 +4,7 @@ config DRM_NOUVEAU
depends on DRM && PCI && MMU
select IOMMU_API
select FW_LOADER
+ select FW_CACHE if PM_SLEEP
select DRM_CLIENT_SELECTION
select DRM_DISPLAY_DP_HELPER
select DRM_DISPLAY_HDMI_HELPER
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 6fb9719d721f..1b10c6c12f46 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -775,7 +775,6 @@ nouveau_connector_force(struct drm_connector *connector)
if (!nv_encoder) {
NV_ERROR(drm, "can't find encoder to force %s on!\n",
connector->name);
- connector->status = connector_status_disconnected;
return;
}
diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h b/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h
index c75302ca3427..f56e77e7f6d0 100644
--- a/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h
+++ b/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h
@@ -21,7 +21,7 @@
*
*/
-#if !defined(_GPU_SCHED_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#if !defined(_GPU_SCHED_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
#define _GPU_SCHED_TRACE_H_
#include <linux/stringify.h>
@@ -106,7 +106,7 @@ TRACE_EVENT(drm_sched_job_wait_dep,
__entry->seqno)
);
-#endif
+#endif /* _GPU_SCHED_TRACE_H_ */
/* This part must be outside protection */
#undef TRACE_INCLUDE_PATH
diff --git a/drivers/gpu/drm/tiny/appletbdrm.c b/drivers/gpu/drm/tiny/appletbdrm.c
index 394c8f9bd41a..4370ba22dd88 100644
--- a/drivers/gpu/drm/tiny/appletbdrm.c
+++ b/drivers/gpu/drm/tiny/appletbdrm.c
@@ -805,7 +805,6 @@ static void appletbdrm_disconnect(struct usb_interface *intf)
struct appletbdrm_device *adev = usb_get_intfdata(intf);
struct drm_device *drm = &adev->drm;
- put_device(adev->dmadev);
drm_dev_unplug(drm);
drm_atomic_helper_shutdown(drm);
}
diff --git a/drivers/gpu/drm/tiny/bochs.c b/drivers/gpu/drm/tiny/bochs.c
index c67e1f906785..8706763af8fb 100644
--- a/drivers/gpu/drm/tiny/bochs.c
+++ b/drivers/gpu/drm/tiny/bochs.c
@@ -335,8 +335,6 @@ static void bochs_hw_setmode(struct bochs_device *bochs, struct drm_display_mode
bochs->xres, bochs->yres, bochs->bpp,
bochs->yres_virtual);
- bochs_hw_blank(bochs, false);
-
bochs_dispi_write(bochs, VBE_DISPI_INDEX_ENABLE, 0);
bochs_dispi_write(bochs, VBE_DISPI_INDEX_BPP, bochs->bpp);
bochs_dispi_write(bochs, VBE_DISPI_INDEX_XRES, bochs->xres);
@@ -506,6 +504,9 @@ static int bochs_crtc_helper_atomic_check(struct drm_crtc *crtc,
static void bochs_crtc_helper_atomic_enable(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
+ struct bochs_device *bochs = to_bochs_device(crtc->dev);
+
+ bochs_hw_blank(bochs, false);
}
static void bochs_crtc_helper_atomic_disable(struct drm_crtc *crtc,
diff --git a/drivers/gpu/drm/vkms/vkms_composer.c b/drivers/gpu/drm/vkms/vkms_composer.c
index b20ac1705726..fa269d279e25 100644
--- a/drivers/gpu/drm/vkms/vkms_composer.c
+++ b/drivers/gpu/drm/vkms/vkms_composer.c
@@ -67,7 +67,7 @@ static u16 lerp_u16(u16 a, u16 b, s64 t)
s64 delta = drm_fixp_mul(b_fp - a_fp, t);
- return drm_fixp2int(a_fp + delta);
+ return drm_fixp2int_round(a_fp + delta);
}
static s64 get_lut_index(const struct vkms_color_lut *lut, u16 channel_value)
diff --git a/drivers/gpu/drm/xe/Kconfig b/drivers/gpu/drm/xe/Kconfig
index 99219c16e8aa..7d7995196702 100644
--- a/drivers/gpu/drm/xe/Kconfig
+++ b/drivers/gpu/drm/xe/Kconfig
@@ -39,6 +39,7 @@ config DRM_XE
select DRM_TTM_HELPER
select DRM_EXEC
select DRM_GPUVM
+ select DRM_GPUSVM if !UML && DEVICE_PRIVATE
select DRM_SCHED
select MMU_NOTIFIER
select WANT_DEV_COREDUMP
@@ -73,6 +74,15 @@ config DRM_XE_DP_TUNNEL
If in doubt say "Y".
+config DRM_XE_DEVMEM_MIRROR
+ bool "Enable device memory mirror"
+ depends on DRM_XE
+ select GET_FREE_REGION
+ default y
+ help
+ Disable this option only if you want to compile out without device
+ memory mirror. Will reduce KMD memory footprint when disabled.
+
config DRM_XE_FORCE_PROBE
string "Force probe xe for selected Intel hardware IDs"
depends on DRM_XE
diff --git a/drivers/gpu/drm/xe/Makefile b/drivers/gpu/drm/xe/Makefile
index 81b8914b877c..9699b08585f7 100644
--- a/drivers/gpu/drm/xe/Makefile
+++ b/drivers/gpu/drm/xe/Makefile
@@ -33,6 +33,7 @@ xe-y += xe_bb.o \
xe_device_sysfs.o \
xe_dma_buf.o \
xe_drm_client.o \
+ xe_eu_stall.o \
xe_exec.o \
xe_exec_queue.o \
xe_execlist.o \
@@ -60,6 +61,7 @@ xe-y += xe_bb.o \
xe_guc_capture.o \
xe_guc_ct.o \
xe_guc_db_mgr.o \
+ xe_guc_engine_activity.o \
xe_guc_hwconfig.o \
xe_guc_id_mgr.o \
xe_guc_klv_helpers.o \
@@ -123,6 +125,7 @@ xe-y += xe_bb.o \
xe_wopcm.o
xe-$(CONFIG_HMM_MIRROR) += xe_hmm.o
+xe-$(CONFIG_DRM_GPUSVM) += xe_svm.o
# graphics hardware monitoring (HWMON) support
xe-$(CONFIG_HWMON) += xe_hwmon.o
diff --git a/drivers/gpu/drm/xe/abi/guc_actions_abi.h b/drivers/gpu/drm/xe/abi/guc_actions_abi.h
index fee385532fb0..ec516e838ee8 100644
--- a/drivers/gpu/drm/xe/abi/guc_actions_abi.h
+++ b/drivers/gpu/drm/xe/abi/guc_actions_abi.h
@@ -140,6 +140,7 @@ enum xe_guc_action {
XE_GUC_ACTION_REGISTER_CONTEXT_MULTI_LRC = 0x4601,
XE_GUC_ACTION_CLIENT_SOFT_RESET = 0x5507,
XE_GUC_ACTION_SET_ENG_UTIL_BUFF = 0x550A,
+ XE_GUC_ACTION_SET_DEVICE_ENGINE_ACTIVITY_BUFFER = 0x550C,
XE_GUC_ACTION_NOTIFY_MEMORY_CAT_ERROR = 0x6000,
XE_GUC_ACTION_REPORT_PAGE_FAULT_REQ_DESC = 0x6002,
XE_GUC_ACTION_PAGE_FAULT_RES_DESC = 0x6003,
diff --git a/drivers/gpu/drm/xe/abi/guc_actions_slpc_abi.h b/drivers/gpu/drm/xe/abi/guc_actions_slpc_abi.h
index 85abe4f09ae2..b28c8fa061f7 100644
--- a/drivers/gpu/drm/xe/abi/guc_actions_slpc_abi.h
+++ b/drivers/gpu/drm/xe/abi/guc_actions_slpc_abi.h
@@ -174,6 +174,9 @@ struct slpc_task_state_data {
};
} __packed;
+#define SLPC_CTX_FREQ_REQ_IS_COMPUTE REG_BIT(28)
+#define SLPC_OPTIMIZED_STRATEGY_COMPUTE REG_BIT(0)
+
struct slpc_shared_data_header {
/* Total size in bytes of this shared buffer. */
u32 size;
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_object.h b/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_object.h
new file mode 100644
index 000000000000..8a048980ea38
--- /dev/null
+++ b/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_object.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright © 2025 Intel Corporation */
+
+#ifndef __I915_GEM_OBJECT_H__
+#define __I915_GEM_OBJECT_H__
+
+struct dma_fence;
+struct i915_sched_attr;
+
+static inline void i915_gem_fence_wait_priority(struct dma_fence *fence,
+ const struct i915_sched_attr *attr)
+{
+}
+
+#endif
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h b/drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h
index 84b0991b35b3..dfec5108d2c3 100644
--- a/drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h
+++ b/drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h
@@ -95,14 +95,6 @@ static inline struct drm_i915_private *to_i915(const struct drm_device *dev)
#define HAS_128_BYTE_Y_TILING(xe) (xe || 1)
-#define I915_PRIORITY_DISPLAY 0
-struct i915_sched_attr {
- int priority;
-};
-#define i915_gem_fence_wait_priority(fence, attr) do { (void) attr; } while (0)
-
-#define FORCEWAKE_ALL XE_FORCEWAKE_ALL
-
#ifdef CONFIG_ARM64
/*
* arm64 indirectly includes linux/rtc.h,
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/i915_gtt_view_types.h b/drivers/gpu/drm/xe/compat-i915-headers/i915_gtt_view_types.h
new file mode 100644
index 000000000000..b261910cd6f9
--- /dev/null
+++ b/drivers/gpu/drm/xe/compat-i915-headers/i915_gtt_view_types.h
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright © 2025 Intel Corporation */
+
+#include "../../i915/i915_gtt_view_types.h"
+
+/* Partial view not supported in xe, fail build if used. */
+#define I915_GTT_VIEW_PARTIAL
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/i915_scheduler_types.h b/drivers/gpu/drm/xe/compat-i915-headers/i915_scheduler_types.h
new file mode 100644
index 000000000000..c11130440d31
--- /dev/null
+++ b/drivers/gpu/drm/xe/compat-i915-headers/i915_scheduler_types.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright © 2025 Intel Corporation */
+
+#ifndef __I915_SCHEDULER_TYPES_H__
+#define __I915_SCHEDULER_TYPES_H__
+
+#define I915_PRIORITY_DISPLAY 0
+
+struct i915_sched_attr {
+ int priority;
+};
+
+#endif
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/i915_vma_types.h b/drivers/gpu/drm/xe/compat-i915-headers/i915_vma_types.h
deleted file mode 100644
index e7aaf50f5485..000000000000
--- a/drivers/gpu/drm/xe/compat-i915-headers/i915_vma_types.h
+++ /dev/null
@@ -1,74 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2023 Intel Corporation
- */
-
-#include <linux/types.h>
-#include <linux/build_bug.h>
-
-/* XX: Figure out how to handle this vma mapping in xe */
-struct intel_remapped_plane_info {
- /* in gtt pages */
- u32 offset:31;
- u32 linear:1;
- union {
- /* in gtt pages for !linear */
- struct {
- u16 width;
- u16 height;
- u16 src_stride;
- u16 dst_stride;
- };
-
- /* in gtt pages for linear */
- u32 size;
- };
-} __packed;
-
-struct intel_remapped_info {
- struct intel_remapped_plane_info plane[4];
- /* in gtt pages */
- u32 plane_alignment;
-} __packed;
-
-struct intel_rotation_info {
- struct intel_remapped_plane_info plane[2];
-} __packed;
-
-enum i915_gtt_view_type {
- I915_GTT_VIEW_NORMAL = 0,
- I915_GTT_VIEW_ROTATED = sizeof(struct intel_rotation_info),
- I915_GTT_VIEW_REMAPPED = sizeof(struct intel_remapped_info),
-};
-
-static inline void assert_i915_gem_gtt_types(void)
-{
- BUILD_BUG_ON(sizeof(struct intel_rotation_info) != 2 * sizeof(u32) + 8 * sizeof(u16));
- BUILD_BUG_ON(sizeof(struct intel_remapped_info) != 5 * sizeof(u32) + 16 * sizeof(u16));
-
- /* Check that rotation/remapped shares offsets for simplicity */
- BUILD_BUG_ON(offsetof(struct intel_remapped_info, plane[0]) !=
- offsetof(struct intel_rotation_info, plane[0]));
- BUILD_BUG_ON(offsetofend(struct intel_remapped_info, plane[1]) !=
- offsetofend(struct intel_rotation_info, plane[1]));
-
- /* As we encode the size of each branch inside the union into its type,
- * we have to be careful that each branch has a unique size.
- */
- switch ((enum i915_gtt_view_type)0) {
- case I915_GTT_VIEW_NORMAL:
- case I915_GTT_VIEW_ROTATED:
- case I915_GTT_VIEW_REMAPPED:
- /* gcc complains if these are identical cases */
- break;
- }
-}
-
-struct i915_gtt_view {
- enum i915_gtt_view_type type;
- union {
- /* Members need to contain no holes/padding */
- struct intel_rotation_info rotated;
- struct intel_remapped_info remapped;
- };
-};
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/intel_uncore.h b/drivers/gpu/drm/xe/compat-i915-headers/intel_uncore.h
index 4fc3e535de91..0c1e88e36a1e 100644
--- a/drivers/gpu/drm/xe/compat-i915-headers/intel_uncore.h
+++ b/drivers/gpu/drm/xe/compat-i915-headers/intel_uncore.h
@@ -10,6 +10,8 @@
#include "xe_device_types.h"
#include "xe_mmio.h"
+#define FORCEWAKE_ALL XE_FORCEWAKE_ALL
+
static inline struct intel_uncore *to_intel_uncore(struct drm_device *drm)
{
return &to_xe_device(drm)->uncore;
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/pxp/intel_pxp.h b/drivers/gpu/drm/xe/compat-i915-headers/pxp/intel_pxp.h
index d2eb8e1f6c4b..97fd0ddf0b3a 100644
--- a/drivers/gpu/drm/xe/compat-i915-headers/pxp/intel_pxp.h
+++ b/drivers/gpu/drm/xe/compat-i915-headers/pxp/intel_pxp.h
@@ -12,11 +12,8 @@
#include "xe_pxp.h"
struct drm_gem_object;
-struct xe_pxp;
-static inline int intel_pxp_key_check(struct xe_pxp *pxp,
- struct drm_gem_object *obj,
- bool assign)
+static inline int intel_pxp_key_check(struct drm_gem_object *obj, bool assign)
{
/*
* The assign variable is used in i915 to assign the key to the BO at
@@ -26,7 +23,7 @@ static inline int intel_pxp_key_check(struct xe_pxp *pxp,
if (assign)
return -EINVAL;
- return xe_pxp_obj_key_check(pxp, obj);
+ return xe_pxp_obj_key_check(obj);
}
#endif
diff --git a/drivers/gpu/drm/xe/display/intel_fbdev_fb.c b/drivers/gpu/drm/xe/display/intel_fbdev_fb.c
index ca95fcd098ec..3a1e505ff182 100644
--- a/drivers/gpu/drm/xe/display/intel_fbdev_fb.c
+++ b/drivers/gpu/drm/xe/display/intel_fbdev_fb.c
@@ -45,7 +45,7 @@ struct intel_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper,
NULL, size,
ttm_bo_type_kernel, XE_BO_FLAG_SCANOUT |
XE_BO_FLAG_STOLEN |
- XE_BO_FLAG_PINNED);
+ XE_BO_FLAG_GGTT | XE_BO_FLAG_PINNED);
if (!IS_ERR(obj))
drm_info(&xe->drm, "Allocated fbdev into stolen\n");
else
@@ -56,7 +56,7 @@ struct intel_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper,
obj = xe_bo_create_pin_map(xe, xe_device_get_root_tile(xe), NULL, size,
ttm_bo_type_kernel, XE_BO_FLAG_SCANOUT |
XE_BO_FLAG_VRAM_IF_DGFX(xe_device_get_root_tile(xe)) |
- XE_BO_FLAG_PINNED);
+ XE_BO_FLAG_GGTT | XE_BO_FLAG_PINNED);
}
if (IS_ERR(obj)) {
diff --git a/drivers/gpu/drm/xe/display/xe_display.c b/drivers/gpu/drm/xe/display/xe_display.c
index 02a413a07382..0b0aca7a25af 100644
--- a/drivers/gpu/drm/xe/display/xe_display.c
+++ b/drivers/gpu/drm/xe/display/xe_display.c
@@ -8,6 +8,8 @@
#include <linux/fb.h>
+#include <drm/drm_client.h>
+#include <drm/drm_client_event.h>
#include <drm/drm_drv.h>
#include <drm/drm_managed.h>
#include <drm/drm_probe_helper.h>
@@ -67,6 +69,10 @@ void xe_display_driver_set_hooks(struct drm_driver *driver)
if (!xe_modparam.probe_display)
return;
+#ifdef CONFIG_DRM_FBDEV_EMULATION
+ driver->fbdev_probe = intel_fbdev_driver_fbdev_probe;
+#endif
+
driver->driver_features |= DRIVER_MODESET | DRIVER_ATOMIC;
}
@@ -170,6 +176,7 @@ static void xe_display_fini(void *arg)
intel_hpd_poll_fini(xe);
intel_hdcp_component_fini(display);
intel_audio_deinit(display);
+ intel_display_driver_remove(display);
}
int xe_display_init(struct xe_device *xe)
@@ -184,7 +191,7 @@ int xe_display_init(struct xe_device *xe)
if (err)
return err;
- return xe_device_add_action_or_reset(xe, xe_display_fini, xe);
+ return devm_add_action_or_reset(xe->drm.dev, xe_display_fini, xe);
}
void xe_display_register(struct xe_device *xe)
@@ -209,16 +216,6 @@ void xe_display_unregister(struct xe_device *xe)
intel_display_driver_unregister(display);
}
-void xe_display_driver_remove(struct xe_device *xe)
-{
- struct intel_display *display = &xe->display;
-
- if (!xe->info.probe_display)
- return;
-
- intel_display_driver_remove(display);
-}
-
/* IRQ-related functions */
void xe_display_irq_handler(struct xe_device *xe, u32 master_ctl)
@@ -348,7 +345,7 @@ void xe_display_pm_suspend(struct xe_device *xe)
* properly.
*/
intel_power_domains_disable(display);
- intel_fbdev_set_suspend(&xe->drm, FBINFO_STATE_SUSPENDED, true);
+ drm_client_dev_suspend(&xe->drm, false);
if (has_display(xe)) {
drm_kms_helper_poll_disable(&xe->drm);
@@ -378,7 +375,8 @@ void xe_display_pm_shutdown(struct xe_device *xe)
return;
intel_power_domains_disable(display);
- intel_fbdev_set_suspend(&xe->drm, FBINFO_STATE_SUSPENDED, true);
+ drm_client_dev_suspend(&xe->drm, false);
+
if (has_display(xe)) {
drm_kms_helper_poll_disable(&xe->drm);
intel_display_driver_disable_user_access(display);
@@ -497,7 +495,7 @@ void xe_display_pm_resume(struct xe_device *xe)
intel_opregion_resume(display);
- intel_fbdev_set_suspend(&xe->drm, FBINFO_STATE_RUNNING, false);
+ drm_client_dev_resume(&xe->drm, false);
intel_power_domains_enable(display);
}
diff --git a/drivers/gpu/drm/xe/display/xe_display.h b/drivers/gpu/drm/xe/display/xe_display.h
index 685dc74402fb..46e14f8dee28 100644
--- a/drivers/gpu/drm/xe/display/xe_display.h
+++ b/drivers/gpu/drm/xe/display/xe_display.h
@@ -14,7 +14,6 @@ struct drm_driver;
bool xe_display_driver_probe_defer(struct pci_dev *pdev);
void xe_display_driver_set_hooks(struct drm_driver *driver);
-void xe_display_driver_remove(struct xe_device *xe);
int xe_display_create(struct xe_device *xe);
diff --git a/drivers/gpu/drm/xe/display/xe_display_rps.c b/drivers/gpu/drm/xe/display/xe_display_rps.c
index ab21c581c192..fa616f9688a5 100644
--- a/drivers/gpu/drm/xe/display/xe_display_rps.c
+++ b/drivers/gpu/drm/xe/display/xe_display_rps.c
@@ -10,7 +10,7 @@ void intel_display_rps_boost_after_vblank(struct drm_crtc *crtc,
{
}
-void intel_display_rps_mark_interactive(struct drm_i915_private *i915,
+void intel_display_rps_mark_interactive(struct intel_display *display,
struct intel_atomic_state *state,
bool interactive)
{
diff --git a/drivers/gpu/drm/xe/display/xe_fb_pin.c b/drivers/gpu/drm/xe/display/xe_fb_pin.c
index 11a6b996d739..d918ae1c8061 100644
--- a/drivers/gpu/drm/xe/display/xe_fb_pin.c
+++ b/drivers/gpu/drm/xe/display/xe_fb_pin.c
@@ -5,6 +5,7 @@
#include <drm/ttm/ttm_bo.h>
+#include "i915_vma.h"
#include "intel_display_types.h"
#include "intel_dpt.h"
#include "intel_fb.h"
@@ -81,7 +82,7 @@ write_dpt_remapped(struct xe_bo *bo, struct iosys_map *map, u32 *dpt_ofs,
static int __xe_pin_fb_vma_dpt(const struct intel_framebuffer *fb,
const struct i915_gtt_view *view,
struct i915_vma *vma,
- u64 physical_alignment)
+ unsigned int alignment)
{
struct xe_device *xe = to_xe_device(fb->base.dev);
struct xe_tile *tile0 = xe_device_get_root_tile(xe);
@@ -107,7 +108,7 @@ static int __xe_pin_fb_vma_dpt(const struct intel_framebuffer *fb,
XE_BO_FLAG_VRAM0 |
XE_BO_FLAG_GGTT |
XE_BO_FLAG_PAGETABLE,
- physical_alignment);
+ alignment);
else
dpt = xe_bo_create_pin_map_at_aligned(xe, tile0, NULL,
dpt_size, ~0ull,
@@ -115,7 +116,7 @@ static int __xe_pin_fb_vma_dpt(const struct intel_framebuffer *fb,
XE_BO_FLAG_STOLEN |
XE_BO_FLAG_GGTT |
XE_BO_FLAG_PAGETABLE,
- physical_alignment);
+ alignment);
if (IS_ERR(dpt))
dpt = xe_bo_create_pin_map_at_aligned(xe, tile0, NULL,
dpt_size, ~0ull,
@@ -123,7 +124,7 @@ static int __xe_pin_fb_vma_dpt(const struct intel_framebuffer *fb,
XE_BO_FLAG_SYSTEM |
XE_BO_FLAG_GGTT |
XE_BO_FLAG_PAGETABLE,
- physical_alignment);
+ alignment);
if (IS_ERR(dpt))
return PTR_ERR(dpt);
@@ -193,7 +194,7 @@ write_ggtt_rotated(struct xe_bo *bo, struct xe_ggtt *ggtt, u32 *ggtt_ofs, u32 bo
static int __xe_pin_fb_vma_ggtt(const struct intel_framebuffer *fb,
const struct i915_gtt_view *view,
struct i915_vma *vma,
- u64 physical_alignment)
+ unsigned int alignment)
{
struct drm_gem_object *obj = intel_fb_bo(&fb->base);
struct xe_bo *bo = gem_to_xe_bo(obj);
@@ -276,7 +277,7 @@ out:
static struct i915_vma *__xe_pin_fb_vma(const struct intel_framebuffer *fb,
const struct i915_gtt_view *view,
- u64 physical_alignment)
+ unsigned int alignment)
{
struct drm_device *dev = fb->base.dev;
struct xe_device *xe = to_xe_device(dev);
@@ -326,9 +327,9 @@ static struct i915_vma *__xe_pin_fb_vma(const struct intel_framebuffer *fb,
vma->bo = bo;
if (intel_fb_uses_dpt(&fb->base))
- ret = __xe_pin_fb_vma_dpt(fb, view, vma, physical_alignment);
+ ret = __xe_pin_fb_vma_dpt(fb, view, vma, alignment);
else
- ret = __xe_pin_fb_vma_ggtt(fb, view, vma, physical_alignment);
+ ret = __xe_pin_fb_vma_ggtt(fb, view, vma, alignment);
if (ret)
goto err_unpin;
@@ -421,7 +422,7 @@ int intel_plane_pin_fb(struct intel_plane_state *new_plane_state,
struct i915_vma *vma;
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
struct intel_plane *plane = to_intel_plane(new_plane_state->uapi.plane);
- u64 phys_alignment = plane->min_alignment(plane, fb, 0);
+ unsigned int alignment = plane->min_alignment(plane, fb, 0);
if (reuse_vma(new_plane_state, old_plane_state))
return 0;
@@ -429,7 +430,7 @@ int intel_plane_pin_fb(struct intel_plane_state *new_plane_state,
/* We reject creating !SCANOUT fb's, so this is weird.. */
drm_WARN_ON(bo->ttm.base.dev, !(bo->flags & XE_BO_FLAG_SCANOUT));
- vma = __xe_pin_fb_vma(intel_fb, &new_plane_state->view.gtt, phys_alignment);
+ vma = __xe_pin_fb_vma(intel_fb, &new_plane_state->view.gtt, alignment);
if (IS_ERR(vma))
return PTR_ERR(vma);
diff --git a/drivers/gpu/drm/xe/display/xe_tdf.c b/drivers/gpu/drm/xe/display/xe_tdf.c
index 2c0d4e144e09..2a7fccbeb1d5 100644
--- a/drivers/gpu/drm/xe/display/xe_tdf.c
+++ b/drivers/gpu/drm/xe/display/xe_tdf.c
@@ -7,7 +7,9 @@
#include "intel_display_types.h"
#include "intel_tdf.h"
-void intel_td_flush(struct drm_i915_private *i915)
+void intel_td_flush(struct intel_display *display)
{
- xe_device_td_flush(i915);
+ struct xe_device *xe = to_xe_device(display->drm);
+
+ xe_device_td_flush(xe);
}
diff --git a/drivers/gpu/drm/xe/regs/xe_engine_regs.h b/drivers/gpu/drm/xe/regs/xe_engine_regs.h
index c8fd3d5ca502..4f372dc2cb89 100644
--- a/drivers/gpu/drm/xe/regs/xe_engine_regs.h
+++ b/drivers/gpu/drm/xe/regs/xe_engine_regs.h
@@ -53,7 +53,6 @@
#define RING_CTL(base) XE_REG((base) + 0x3c)
#define RING_CTL_SIZE(size) ((size) - PAGE_SIZE) /* in bytes -> pages */
-#define RING_CTL_SIZE(size) ((size) - PAGE_SIZE) /* in bytes -> pages */
#define RING_START_UDW(base) XE_REG((base) + 0x48)
diff --git a/drivers/gpu/drm/xe/regs/xe_eu_stall_regs.h b/drivers/gpu/drm/xe/regs/xe_eu_stall_regs.h
new file mode 100644
index 000000000000..c53f57fdde65
--- /dev/null
+++ b/drivers/gpu/drm/xe/regs/xe_eu_stall_regs.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef _XE_EU_STALL_REGS_H_
+#define _XE_EU_STALL_REGS_H_
+
+#include "regs/xe_reg_defs.h"
+
+#define XEHPC_EUSTALL_BASE XE_REG_MCR(0xe520)
+#define XEHPC_EUSTALL_BASE_BUF_ADDR REG_GENMASK(31, 6)
+#define XEHPC_EUSTALL_BASE_XECORE_BUF_SZ REG_GENMASK(5, 3)
+#define XEHPC_EUSTALL_BASE_ENABLE_SAMPLING REG_BIT(1)
+
+#define XEHPC_EUSTALL_BASE_UPPER XE_REG_MCR(0xe524)
+
+#define XEHPC_EUSTALL_REPORT XE_REG_MCR(0xe528, XE_REG_OPTION_MASKED)
+#define XEHPC_EUSTALL_REPORT_WRITE_PTR_MASK REG_GENMASK(15, 2)
+#define XEHPC_EUSTALL_REPORT_OVERFLOW_DROP REG_BIT(1)
+
+#define XEHPC_EUSTALL_REPORT1 XE_REG_MCR(0xe52c, XE_REG_OPTION_MASKED)
+#define XEHPC_EUSTALL_REPORT1_READ_PTR_MASK REG_GENMASK(15, 2)
+
+#define XEHPC_EUSTALL_CTRL XE_REG_MCR(0xe53c, XE_REG_OPTION_MASKED)
+#define EUSTALL_MOCS REG_GENMASK(9, 3)
+#define EUSTALL_SAMPLE_RATE REG_GENMASK(2, 0)
+
+#endif
diff --git a/drivers/gpu/drm/xe/regs/xe_gt_regs.h b/drivers/gpu/drm/xe/regs/xe_gt_regs.h
index 096859072396..da1f198ac107 100644
--- a/drivers/gpu/drm/xe/regs/xe_gt_regs.h
+++ b/drivers/gpu/drm/xe/regs/xe_gt_regs.h
@@ -358,14 +358,18 @@
#define RENDER_AWAKE_STATUS REG_BIT(1)
#define MEDIA_SLICE0_AWAKE_STATUS REG_BIT(0)
+#define MISC_STATUS_0 XE_REG(0xa500)
+
#define FORCEWAKE_MEDIA_VDBOX(n) XE_REG(0xa540 + (n) * 4)
#define FORCEWAKE_MEDIA_VEBOX(n) XE_REG(0xa560 + (n) * 4)
#define FORCEWAKE_GSC XE_REG(0xa618)
+#define XELP_GARBCNTL XE_REG(0xb004)
+#define XELP_BUS_HASH_CTL_BIT_EXC REG_BIT(7)
+
#define XEHPC_LNCFMISCCFGREG0 XE_REG_MCR(0xb01c, XE_REG_OPTION_MASKED)
#define XEHPC_OVRLSCCC REG_BIT(0)
-/* L3 Cache Control */
#define LNCFCMOCS_REG_COUNT 32
#define XELP_LNCFCMOCS(i) XE_REG(0xb020 + (i) * 4)
#define XEHP_LNCFCMOCS(i) XE_REG_MCR(0xb020 + (i) * 4)
@@ -478,6 +482,7 @@
#define TDL_TSL_CHICKEN XE_REG_MCR(0xe4c4, XE_REG_OPTION_MASKED)
#define STK_ID_RESTRICT REG_BIT(12)
#define SLM_WMTP_RESTORE REG_BIT(11)
+#define RES_CHK_SPR_DIS REG_BIT(6)
#define ROW_CHICKEN XE_REG_MCR(0xe4f0, XE_REG_OPTION_MASKED)
#define UGM_BACKUP_MODE REG_BIT(13)
diff --git a/drivers/gpu/drm/xe/regs/xe_regs.h b/drivers/gpu/drm/xe/regs/xe_regs.h
index 6cf282618836..3abb17d2ca33 100644
--- a/drivers/gpu/drm/xe/regs/xe_regs.h
+++ b/drivers/gpu/drm/xe/regs/xe_regs.h
@@ -7,10 +7,6 @@
#include "regs/xe_reg_defs.h"
-#define TIMESTAMP_OVERRIDE XE_REG(0x44074)
-#define TIMESTAMP_OVERRIDE_US_COUNTER_DENOMINATOR_MASK REG_GENMASK(15, 12)
-#define TIMESTAMP_OVERRIDE_US_COUNTER_DIVIDER_MASK REG_GENMASK(9, 0)
-
#define GU_CNTL_PROTECTED XE_REG(0x10100C)
#define DRIVERINT_FLR_DIS REG_BIT(31)
diff --git a/drivers/gpu/drm/xe/tests/xe_pci.c b/drivers/gpu/drm/xe/tests/xe_pci.c
index 67404863087e..1d3e2e50c355 100644
--- a/drivers/gpu/drm/xe/tests/xe_pci.c
+++ b/drivers/gpu/drm/xe/tests/xe_pci.c
@@ -21,15 +21,15 @@
*/
void xe_call_for_each_graphics_ip(xe_graphics_fn xe_fn)
{
- const struct xe_graphics_desc *ip, *last = NULL;
+ const struct xe_graphics_desc *desc, *last = NULL;
- for (int i = 0; i < ARRAY_SIZE(graphics_ip_map); i++) {
- ip = graphics_ip_map[i].ip;
- if (ip == last)
+ for (int i = 0; i < ARRAY_SIZE(graphics_ips); i++) {
+ desc = graphics_ips[i].desc;
+ if (desc == last)
continue;
- xe_fn(ip);
- last = ip;
+ xe_fn(desc);
+ last = desc;
}
}
EXPORT_SYMBOL_IF_KUNIT(xe_call_for_each_graphics_ip);
@@ -43,15 +43,15 @@ EXPORT_SYMBOL_IF_KUNIT(xe_call_for_each_graphics_ip);
*/
void xe_call_for_each_media_ip(xe_media_fn xe_fn)
{
- const struct xe_media_desc *ip, *last = NULL;
+ const struct xe_media_desc *desc, *last = NULL;
- for (int i = 0; i < ARRAY_SIZE(media_ip_map); i++) {
- ip = media_ip_map[i].ip;
- if (ip == last)
+ for (int i = 0; i < ARRAY_SIZE(media_ips); i++) {
+ desc = media_ips[i].desc;
+ if (desc == last)
continue;
- xe_fn(ip);
- last = ip;
+ xe_fn(desc);
+ last = desc;
}
}
EXPORT_SYMBOL_IF_KUNIT(xe_call_for_each_media_ip);
@@ -110,7 +110,7 @@ done:
kunit_activate_static_stub(test, read_gmdid, fake_read_gmdid);
xe_info_init_early(xe, desc, subplatform_desc);
- xe_info_init(xe, desc->graphics, desc->media);
+ xe_info_init(xe, desc);
return 0;
}
diff --git a/drivers/gpu/drm/xe/tests/xe_rtp_test.c b/drivers/gpu/drm/xe/tests/xe_rtp_test.c
index 36a3b5420fef..b0254b014fe4 100644
--- a/drivers/gpu/drm/xe/tests/xe_rtp_test.c
+++ b/drivers/gpu/drm/xe/tests/xe_rtp_test.c
@@ -320,7 +320,7 @@ static void xe_rtp_process_to_sr_tests(struct kunit *test)
count_rtp_entries++;
xe_rtp_process_ctx_enable_active_tracking(&ctx, &active, count_rtp_entries);
- xe_rtp_process_to_sr(&ctx, param->entries, reg_sr);
+ xe_rtp_process_to_sr(&ctx, param->entries, count_rtp_entries, reg_sr);
xa_for_each(&reg_sr->xa, idx, sre) {
if (idx == param->expected_reg.addr)
diff --git a/drivers/gpu/drm/xe/xe_bo.c b/drivers/gpu/drm/xe/xe_bo.c
index c21e6bca3141..64f9c936eea0 100644
--- a/drivers/gpu/drm/xe/xe_bo.c
+++ b/drivers/gpu/drm/xe/xe_bo.c
@@ -281,6 +281,8 @@ int xe_bo_placement_for_flags(struct xe_device *xe, struct xe_bo *bo,
static void xe_evict_flags(struct ttm_buffer_object *tbo,
struct ttm_placement *placement)
{
+ struct xe_bo *bo;
+
if (!xe_bo_is_xe_bo(tbo)) {
/* Don't handle scatter gather BOs */
if (tbo->type == ttm_bo_type_sg) {
@@ -292,6 +294,12 @@ static void xe_evict_flags(struct ttm_buffer_object *tbo,
return;
}
+ bo = ttm_to_xe_bo(tbo);
+ if (bo->flags & XE_BO_FLAG_CPU_ADDR_MIRROR) {
+ *placement = sys_placement;
+ return;
+ }
+
/*
* For xe, sg bos that are evicted to system just triggers a
* rebind of the sg list upon subsequent validation to XE_PL_TT.
@@ -789,6 +797,20 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict,
goto out;
}
+ if (!move_lacks_source && (bo->flags & XE_BO_FLAG_CPU_ADDR_MIRROR) &&
+ new_mem->mem_type == XE_PL_SYSTEM) {
+ ret = xe_svm_bo_evict(bo);
+ if (!ret) {
+ drm_dbg(&xe->drm, "Evict system allocator BO success\n");
+ ttm_bo_move_null(ttm_bo, new_mem);
+ } else {
+ drm_dbg(&xe->drm, "Evict system allocator BO failed=%pe\n",
+ ERR_PTR(ret));
+ }
+
+ goto out;
+ }
+
if (old_mem_type == XE_PL_SYSTEM && new_mem->mem_type == XE_PL_TT && !handle_system_ccs) {
ttm_bo_move_null(ttm_bo, new_mem);
goto out;
@@ -2441,6 +2463,7 @@ int xe_gem_create_ioctl(struct drm_device *dev, void *data,
struct xe_file *xef = to_xe_file(file);
struct drm_xe_gem_create *args = data;
struct xe_vm *vm = NULL;
+ ktime_t end = 0;
struct xe_bo *bo;
unsigned int bo_flags;
u32 handle;
@@ -2512,6 +2535,10 @@ int xe_gem_create_ioctl(struct drm_device *dev, void *data,
vm = xe_vm_lookup(xef, args->vm_id);
if (XE_IOCTL_DBG(xe, !vm))
return -ENOENT;
+ }
+
+retry:
+ if (vm) {
err = xe_vm_lock(vm, true);
if (err)
goto out_vm;
@@ -2525,6 +2552,8 @@ int xe_gem_create_ioctl(struct drm_device *dev, void *data,
if (IS_ERR(bo)) {
err = PTR_ERR(bo);
+ if (xe_vm_validate_should_retry(NULL, err, &end))
+ goto retry;
goto out_vm;
}
@@ -2821,6 +2850,31 @@ void xe_bo_put_commit(struct llist_head *deferred)
drm_gem_object_free(&bo->ttm.base.refcount);
}
+static void xe_bo_dev_work_func(struct work_struct *work)
+{
+ struct xe_bo_dev *bo_dev = container_of(work, typeof(*bo_dev), async_free);
+
+ xe_bo_put_commit(&bo_dev->async_list);
+}
+
+/**
+ * xe_bo_dev_init() - Initialize BO dev to manage async BO freeing
+ * @bo_dev: The BO dev structure
+ */
+void xe_bo_dev_init(struct xe_bo_dev *bo_dev)
+{
+ INIT_WORK(&bo_dev->async_free, xe_bo_dev_work_func);
+}
+
+/**
+ * xe_bo_dev_fini() - Finalize BO dev managing async BO freeing
+ * @bo_dev: The BO dev structure
+ */
+void xe_bo_dev_fini(struct xe_bo_dev *bo_dev)
+{
+ flush_work(&bo_dev->async_free);
+}
+
void xe_bo_put(struct xe_bo *bo)
{
struct xe_tile *tile;
diff --git a/drivers/gpu/drm/xe/xe_bo.h b/drivers/gpu/drm/xe/xe_bo.h
index 2a7240c45511..bda3fdd408da 100644
--- a/drivers/gpu/drm/xe/xe_bo.h
+++ b/drivers/gpu/drm/xe/xe_bo.h
@@ -47,6 +47,7 @@
XE_BO_FLAG_GGTT1 | \
XE_BO_FLAG_GGTT2 | \
XE_BO_FLAG_GGTT3)
+#define XE_BO_FLAG_CPU_ADDR_MIRROR BIT(22)
/* this one is trigger internally only */
#define XE_BO_FLAG_INTERNAL_TEST BIT(30)
@@ -345,6 +346,25 @@ xe_bo_put_deferred(struct xe_bo *bo, struct llist_head *deferred)
void xe_bo_put_commit(struct llist_head *deferred);
+/**
+ * xe_bo_put_async() - Put BO async
+ * @bo: The bo to put.
+ *
+ * Put BO async, the final put is deferred to a worker to exit an IRQ context.
+ */
+static inline void
+xe_bo_put_async(struct xe_bo *bo)
+{
+ struct xe_bo_dev *bo_device = &xe_bo_device(bo)->bo_device;
+
+ if (xe_bo_put_deferred(bo, &bo_device->async_list))
+ schedule_work(&bo_device->async_free);
+}
+
+void xe_bo_dev_init(struct xe_bo_dev *bo_device);
+
+void xe_bo_dev_fini(struct xe_bo_dev *bo_device);
+
struct sg_table *xe_bo_sg(struct xe_bo *bo);
/*
diff --git a/drivers/gpu/drm/xe/xe_bo_types.h b/drivers/gpu/drm/xe/xe_bo_types.h
index 60c522866500..15a92e3d4898 100644
--- a/drivers/gpu/drm/xe/xe_bo_types.h
+++ b/drivers/gpu/drm/xe/xe_bo_types.h
@@ -8,6 +8,7 @@
#include <linux/iosys-map.h>
+#include <drm/drm_gpusvm.h>
#include <drm/ttm/ttm_bo.h>
#include <drm/ttm/ttm_device.h>
#include <drm/ttm/ttm_placement.h>
@@ -80,6 +81,9 @@ struct xe_bo {
*/
u16 cpu_caching;
+ /** @devmem_allocation: SVM device memory allocation */
+ struct drm_gpusvm_devmem devmem_allocation;
+
/** @vram_userfault_link: Link into @mem_access.vram_userfault.list */
struct list_head vram_userfault_link;
diff --git a/drivers/gpu/drm/xe/xe_devcoredump.c b/drivers/gpu/drm/xe/xe_devcoredump.c
index 39fe485d2085..81b9d9bb3f57 100644
--- a/drivers/gpu/drm/xe/xe_devcoredump.c
+++ b/drivers/gpu/drm/xe/xe_devcoredump.c
@@ -237,7 +237,7 @@ static void xe_devcoredump_deferred_snap_work(struct work_struct *work)
/*
* NB: Despite passing a GFP_ flags parameter here, more allocations are done
- * internally using GFP_KERNEL expliictly. Hence this call must be in the worker
+ * internally using GFP_KERNEL explicitly. Hence this call must be in the worker
* thread and not in the initial capture call.
*/
dev_coredumpm_timeout(gt_to_xe(ss->gt)->drm.dev, THIS_MODULE, coredump, 0, GFP_KERNEL,
@@ -423,11 +423,11 @@ void xe_print_blob_ascii85(struct drm_printer *p, const char *prefix, char suffi
if (size & 3)
drm_printf(p, "Size not word aligned: %zu", size);
if (offset & 3)
- drm_printf(p, "Offset not word aligned: %zu", size);
+ drm_printf(p, "Offset not word aligned: %zu", offset);
line_buff = kzalloc(DMESG_MAX_LINE_LEN, GFP_KERNEL);
- if (IS_ERR_OR_NULL(line_buff)) {
- drm_printf(p, "Failed to allocate line buffer: %pe", line_buff);
+ if (!line_buff) {
+ drm_printf(p, "Failed to allocate line buffer\n");
return;
}
diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c
index 68ef12b57344..5d79b439dd62 100644
--- a/drivers/gpu/drm/xe/xe_device.c
+++ b/drivers/gpu/drm/xe/xe_device.c
@@ -54,7 +54,6 @@
#include "xe_query.h"
#include "xe_shrinker.h"
#include "xe_sriov.h"
-#include "xe_survivability_mode.h"
#include "xe_tile.h"
#include "xe_ttm_stolen_mgr.h"
#include "xe_ttm_sys_mgr.h"
@@ -66,12 +65,6 @@
#include <generated/xe_wa_oob.h>
-struct xe_device_remove_action {
- struct list_head node;
- void (*action)(void *);
- void *data;
-};
-
static int xe_file_open(struct drm_device *dev, struct drm_file *file)
{
struct xe_device *xe = to_xe_device(dev);
@@ -395,6 +388,8 @@ static void xe_device_destroy(struct drm_device *dev, void *dummy)
{
struct xe_device *xe = to_xe_device(dev);
+ xe_bo_dev_fini(&xe->bo_device);
+
if (xe->preempt_fence_wq)
destroy_workqueue(xe->preempt_fence_wq);
@@ -435,6 +430,7 @@ struct xe_device *xe_device_create(struct pci_dev *pdev,
if (WARN_ON(err))
goto err;
+ xe_bo_dev_init(&xe->bo_device);
err = drmm_add_action_or_reset(&xe->drm, xe_device_destroy, NULL);
if (err)
goto err;
@@ -675,7 +671,7 @@ static int wait_for_lmem_ready(struct xe_device *xe)
}
ALLOW_ERROR_INJECTION(wait_for_lmem_ready, ERRNO); /* See xe_pci_probe() */
-static void update_device_info(struct xe_device *xe)
+static void sriov_update_device_info(struct xe_device *xe)
{
/* disable features that are not available/applicable to VFs */
if (IS_SRIOV_VF(xe)) {
@@ -706,15 +702,11 @@ int xe_device_probe_early(struct xe_device *xe)
xe_sriov_probe_early(xe);
- update_device_info(xe);
+ sriov_update_device_info(xe);
err = xe_pcode_probe_early(xe);
- if (err) {
- if (xe_survivability_mode_required(xe))
- xe_survivability_mode_init(xe);
-
+ if (err)
return err;
- }
err = wait_for_lmem_ready(xe);
if (err)
@@ -760,9 +752,6 @@ int xe_device_probe(struct xe_device *xe)
int err;
u8 id;
- xe->probing = true;
- INIT_LIST_HEAD(&xe->remove_action_list);
-
xe_pat_init_early(xe);
err = xe_sriov_init(xe);
@@ -770,6 +759,7 @@ int xe_device_probe(struct xe_device *xe)
return err;
xe->info.mem_region_mask = 1;
+
err = xe_set_dma_info(xe);
if (err)
return err;
@@ -778,7 +768,9 @@ int xe_device_probe(struct xe_device *xe)
if (err)
return err;
- xe_ttm_sys_mgr_init(xe);
+ err = xe_ttm_sys_mgr_init(xe);
+ if (err)
+ return err;
for_each_gt(gt, xe, id) {
err = xe_gt_init_early(gt);
@@ -873,7 +865,9 @@ int xe_device_probe(struct xe_device *xe)
return err;
}
- xe_heci_gsc_init(xe);
+ err = xe_heci_gsc_init(xe);
+ if (err)
+ return err;
err = xe_oa_init(xe);
if (err)
@@ -885,11 +879,11 @@ int xe_device_probe(struct xe_device *xe)
err = xe_pxp_init(xe);
if (err)
- goto err_remove_display;
+ return err;
err = drm_dev_register(&xe->drm, 0);
if (err)
- goto err_remove_display;
+ return err;
xe_display_register(xe);
@@ -912,84 +906,19 @@ int xe_device_probe(struct xe_device *xe)
xe_vsec_init(xe);
- xe->probing = false;
-
return devm_add_action_or_reset(xe->drm.dev, xe_device_sanitize, xe);
err_unregister_display:
xe_display_unregister(xe);
-err_remove_display:
- xe_display_driver_remove(xe);
return err;
}
-/**
- * xe_device_call_remove_actions - Call the remove actions
- * @xe: xe device instance
- *
- * This is only to be used by xe_pci and xe_device to call the remove actions
- * while removing the driver or handling probe failures.
- */
-void xe_device_call_remove_actions(struct xe_device *xe)
-{
- struct xe_device_remove_action *ra, *tmp;
-
- list_for_each_entry_safe(ra, tmp, &xe->remove_action_list, node) {
- ra->action(ra->data);
- list_del(&ra->node);
- kfree(ra);
- }
-
- xe->probing = false;
-}
-
-/**
- * xe_device_add_action_or_reset - Add an action to run on driver removal
- * @xe: xe device instance
- * @action: Function that should be called on device remove
- * @data: Pointer to data passed to @action implementation
- *
- * This adds a custom action to the list of remove callbacks executed on device
- * remove, before any dev or drm managed resources are removed. This is only
- * needed if the action leads to component_del()/component_master_del() since
- * that is not compatible with devres cleanup.
- *
- * Returns: 0 on success or a negative error code on failure, in which case
- * @action is already called.
- */
-int xe_device_add_action_or_reset(struct xe_device *xe,
- void (*action)(void *), void *data)
-{
- struct xe_device_remove_action *ra;
-
- drm_WARN_ON(&xe->drm, !xe->probing);
-
- ra = kmalloc(sizeof(*ra), GFP_KERNEL);
- if (!ra) {
- action(data);
- return -ENOMEM;
- }
-
- INIT_LIST_HEAD(&ra->node);
- ra->action = action;
- ra->data = data;
- list_add(&ra->node, &xe->remove_action_list);
-
- return 0;
-}
-
void xe_device_remove(struct xe_device *xe)
{
xe_display_unregister(xe);
drm_dev_unplug(&xe->drm);
-
- xe_display_driver_remove(xe);
-
- xe_heci_gsc_fini(xe);
-
- xe_device_call_remove_actions(xe);
}
void xe_device_shutdown(struct xe_device *xe)
diff --git a/drivers/gpu/drm/xe/xe_device.h b/drivers/gpu/drm/xe/xe_device.h
index 079dad32a6f5..0bc3bc8e6803 100644
--- a/drivers/gpu/drm/xe/xe_device.h
+++ b/drivers/gpu/drm/xe/xe_device.h
@@ -45,9 +45,6 @@ struct xe_device *xe_device_create(struct pci_dev *pdev,
const struct pci_device_id *ent);
int xe_device_probe_early(struct xe_device *xe);
int xe_device_probe(struct xe_device *xe);
-int xe_device_add_action_or_reset(struct xe_device *xe,
- void (*action)(void *), void *data);
-void xe_device_call_remove_actions(struct xe_device *xe);
void xe_device_remove(struct xe_device *xe);
void xe_device_shutdown(struct xe_device *xe);
diff --git a/drivers/gpu/drm/xe/xe_device_sysfs.c b/drivers/gpu/drm/xe/xe_device_sysfs.c
index 7375937934fa..7efbd4c52791 100644
--- a/drivers/gpu/drm/xe/xe_device_sysfs.c
+++ b/drivers/gpu/drm/xe/xe_device_sysfs.c
@@ -32,9 +32,6 @@ vram_d3cold_threshold_show(struct device *dev,
struct xe_device *xe = pdev_to_xe_device(pdev);
int ret;
- if (!xe)
- return -EINVAL;
-
xe_pm_runtime_get(xe);
ret = sysfs_emit(buf, "%d\n", xe->d3cold.vram_threshold);
xe_pm_runtime_put(xe);
@@ -51,9 +48,6 @@ vram_d3cold_threshold_store(struct device *dev, struct device_attribute *attr,
u32 vram_d3cold_threshold;
int ret;
- if (!xe)
- return -EINVAL;
-
ret = kstrtou32(buff, 0, &vram_d3cold_threshold);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/xe/xe_device_types.h b/drivers/gpu/drm/xe/xe_device_types.h
index e312595dda7e..72ef0b6fc425 100644
--- a/drivers/gpu/drm/xe/xe_device_types.h
+++ b/drivers/gpu/drm/xe/xe_device_types.h
@@ -10,6 +10,7 @@
#include <drm/drm_device.h>
#include <drm/drm_file.h>
+#include <drm/drm_pagemap.h>
#include <drm/ttm/ttm_device.h>
#include "xe_devcoredump_types.h"
@@ -106,6 +107,19 @@ struct xe_vram_region {
resource_size_t actual_physical_size;
/** @mapping: pointer to VRAM mappable space */
void __iomem *mapping;
+ /** @pagemap: Used to remap device memory as ZONE_DEVICE */
+ struct dev_pagemap pagemap;
+ /**
+ * @dpagemap: The struct drm_pagemap of the ZONE_DEVICE memory
+ * pages of this tile.
+ */
+ struct drm_pagemap dpagemap;
+ /**
+ * @hpa_base: base host physical address
+ *
+ * This is generated when remap device memory as ZONE_DEVICE
+ */
+ resource_size_t hpa_base;
/** @ttm: VRAM TTM manager */
struct xe_ttm_vram_mgr ttm;
};
@@ -431,20 +445,6 @@ struct xe_device {
struct xe_tile tiles[XE_MAX_TILES_PER_DEVICE];
/**
- * @remove_action_list: list of actions to execute on device remove.
- * Use xe_device_add_remove_action() for that. Actions can only be added
- * during probe and are executed during the call from PCI subsystem to
- * remove the driver from the device.
- */
- struct list_head remove_action_list;
-
- /**
- * @probing: cover the section in which @remove_action_list can be used
- * to post cleaning actions
- */
- bool probing;
-
- /**
* @mem_access: keep track of memory access in the device, possibly
* triggering additional actions when they occur.
*/
@@ -541,6 +541,14 @@ struct xe_device {
int mode;
} wedged;
+ /** @bo_device: Struct to control async free of BOs */
+ struct xe_bo_dev {
+ /** @bo_device.async_free: Free worker */
+ struct work_struct async_free;
+ /** @bo_device.async_list: List of BOs to be freed */
+ struct llist_head async_list;
+ } bo_device;
+
/** @pmu: performance monitoring unit */
struct xe_pmu pmu;
diff --git a/drivers/gpu/drm/xe/xe_eu_stall.c b/drivers/gpu/drm/xe/xe_eu_stall.c
new file mode 100644
index 000000000000..88a92baf5c95
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_eu_stall.c
@@ -0,0 +1,960 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#include <linux/anon_inodes.h>
+#include <linux/fs.h>
+#include <linux/poll.h>
+#include <linux/types.h>
+
+#include <drm/drm_drv.h>
+#include <generated/xe_wa_oob.h>
+#include <uapi/drm/xe_drm.h>
+
+#include "xe_bo.h"
+#include "xe_device.h"
+#include "xe_eu_stall.h"
+#include "xe_force_wake.h"
+#include "xe_gt_mcr.h"
+#include "xe_gt_printk.h"
+#include "xe_gt_topology.h"
+#include "xe_macros.h"
+#include "xe_observation.h"
+#include "xe_pm.h"
+#include "xe_trace.h"
+#include "xe_wa.h"
+
+#include "regs/xe_eu_stall_regs.h"
+#include "regs/xe_gt_regs.h"
+
+#define POLL_PERIOD_MS 5
+
+static size_t per_xecore_buf_size = SZ_512K;
+
+struct per_xecore_buf {
+ /* Buffer vaddr */
+ u8 *vaddr;
+ /* Write pointer */
+ u32 write;
+ /* Read pointer */
+ u32 read;
+};
+
+struct xe_eu_stall_data_stream {
+ bool pollin;
+ bool enabled;
+ int wait_num_reports;
+ int sampling_rate_mult;
+ wait_queue_head_t poll_wq;
+ size_t data_record_size;
+ size_t per_xecore_buf_size;
+
+ struct xe_gt *gt;
+ struct xe_bo *bo;
+ struct per_xecore_buf *xecore_buf;
+ struct {
+ bool reported_to_user;
+ xe_dss_mask_t mask;
+ } data_drop;
+ struct delayed_work buf_poll_work;
+};
+
+struct xe_eu_stall_gt {
+ /* Lock to protect stream */
+ struct mutex stream_lock;
+ /* EU stall data stream */
+ struct xe_eu_stall_data_stream *stream;
+ /* Workqueue to schedule buffer pointers polling work */
+ struct workqueue_struct *buf_ptr_poll_wq;
+};
+
+/**
+ * struct eu_stall_open_properties - EU stall sampling properties received
+ * from user space at open.
+ * @sampling_rate_mult: EU stall sampling rate multiplier.
+ * HW will sample every (sampling_rate_mult x 251) cycles.
+ * @wait_num_reports: Minimum number of EU stall data reports to unblock poll().
+ * @gt: GT on which EU stall data will be captured.
+ */
+struct eu_stall_open_properties {
+ int sampling_rate_mult;
+ int wait_num_reports;
+ struct xe_gt *gt;
+};
+
+/*
+ * EU stall data format for PVC
+ */
+struct xe_eu_stall_data_pvc {
+ __u64 ip_addr:29; /* Bits 0 to 28 */
+ __u64 active_count:8; /* Bits 29 to 36 */
+ __u64 other_count:8; /* Bits 37 to 44 */
+ __u64 control_count:8; /* Bits 45 to 52 */
+ __u64 pipestall_count:8; /* Bits 53 to 60 */
+ __u64 send_count:8; /* Bits 61 to 68 */
+ __u64 dist_acc_count:8; /* Bits 69 to 76 */
+ __u64 sbid_count:8; /* Bits 77 to 84 */
+ __u64 sync_count:8; /* Bits 85 to 92 */
+ __u64 inst_fetch_count:8; /* Bits 93 to 100 */
+ __u64 unused_bits:27;
+ __u64 unused[6];
+} __packed;
+
+/*
+ * EU stall data format for Xe2 arch GPUs (LNL, BMG).
+ */
+struct xe_eu_stall_data_xe2 {
+ __u64 ip_addr:29; /* Bits 0 to 28 */
+ __u64 tdr_count:8; /* Bits 29 to 36 */
+ __u64 other_count:8; /* Bits 37 to 44 */
+ __u64 control_count:8; /* Bits 45 to 52 */
+ __u64 pipestall_count:8; /* Bits 53 to 60 */
+ __u64 send_count:8; /* Bits 61 to 68 */
+ __u64 dist_acc_count:8; /* Bits 69 to 76 */
+ __u64 sbid_count:8; /* Bits 77 to 84 */
+ __u64 sync_count:8; /* Bits 85 to 92 */
+ __u64 inst_fetch_count:8; /* Bits 93 to 100 */
+ __u64 active_count:8; /* Bits 101 to 108 */
+ __u64 ex_id:3; /* Bits 109 to 111 */
+ __u64 end_flag:1; /* Bit 112 */
+ __u64 unused_bits:15;
+ __u64 unused[6];
+} __packed;
+
+const u64 eu_stall_sampling_rates[] = {251, 251 * 2, 251 * 3, 251 * 4, 251 * 5, 251 * 6, 251 * 7};
+
+/**
+ * xe_eu_stall_get_sampling_rates - get EU stall sampling rates information.
+ *
+ * @num_rates: Pointer to a u32 to return the number of sampling rates.
+ * @rates: double u64 pointer to point to an array of sampling rates.
+ *
+ * Stores the number of sampling rates and pointer to the array of
+ * sampling rates in the input pointers.
+ *
+ * Returns: Size of the EU stall sampling rates array.
+ */
+size_t xe_eu_stall_get_sampling_rates(u32 *num_rates, const u64 **rates)
+{
+ *num_rates = ARRAY_SIZE(eu_stall_sampling_rates);
+ *rates = eu_stall_sampling_rates;
+
+ return sizeof(eu_stall_sampling_rates);
+}
+
+/**
+ * xe_eu_stall_get_per_xecore_buf_size - get per XeCore buffer size.
+ *
+ * Returns: The per XeCore buffer size used to allocate the per GT
+ * EU stall data buffer.
+ */
+size_t xe_eu_stall_get_per_xecore_buf_size(void)
+{
+ return per_xecore_buf_size;
+}
+
+/**
+ * xe_eu_stall_data_record_size - get EU stall data record size.
+ *
+ * @xe: Pointer to a Xe device.
+ *
+ * Returns: EU stall data record size.
+ */
+size_t xe_eu_stall_data_record_size(struct xe_device *xe)
+{
+ size_t record_size = 0;
+
+ if (xe->info.platform == XE_PVC)
+ record_size = sizeof(struct xe_eu_stall_data_pvc);
+ else if (GRAPHICS_VER(xe) >= 20)
+ record_size = sizeof(struct xe_eu_stall_data_xe2);
+
+ xe_assert(xe, is_power_of_2(record_size));
+
+ return record_size;
+}
+
+/**
+ * num_data_rows - Return the number of EU stall data rows of 64B each
+ * for a given data size.
+ *
+ * @data_size: EU stall data size
+ */
+static u32 num_data_rows(u32 data_size)
+{
+ return data_size >> 6;
+}
+
+static void xe_eu_stall_fini(void *arg)
+{
+ struct xe_gt *gt = arg;
+
+ destroy_workqueue(gt->eu_stall->buf_ptr_poll_wq);
+ mutex_destroy(&gt->eu_stall->stream_lock);
+ kfree(gt->eu_stall);
+}
+
+/**
+ * xe_eu_stall_init() - Allocate and initialize GT level EU stall data
+ * structure xe_eu_stall_gt within struct xe_gt.
+ *
+ * @gt: GT being initialized.
+ *
+ * Returns: zero on success or a negative error code.
+ */
+int xe_eu_stall_init(struct xe_gt *gt)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+ int ret;
+
+ gt->eu_stall = kzalloc(sizeof(*gt->eu_stall), GFP_KERNEL);
+ if (!gt->eu_stall) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ mutex_init(&gt->eu_stall->stream_lock);
+
+ gt->eu_stall->buf_ptr_poll_wq = alloc_ordered_workqueue("xe_eu_stall", 0);
+ if (!gt->eu_stall->buf_ptr_poll_wq) {
+ ret = -ENOMEM;
+ goto exit_free;
+ }
+
+ ret = devm_add_action_or_reset(xe->drm.dev, xe_eu_stall_fini, gt);
+ if (ret)
+ goto exit_destroy;
+
+ return 0;
+exit_destroy:
+ destroy_workqueue(gt->eu_stall->buf_ptr_poll_wq);
+exit_free:
+ mutex_destroy(&gt->eu_stall->stream_lock);
+ kfree(gt->eu_stall);
+exit:
+ return ret;
+}
+
+static int set_prop_eu_stall_sampling_rate(struct xe_device *xe, u64 value,
+ struct eu_stall_open_properties *props)
+{
+ value = div_u64(value, 251);
+ if (value == 0 || value > 7) {
+ drm_dbg(&xe->drm, "Invalid EU stall sampling rate %llu\n", value);
+ return -EINVAL;
+ }
+ props->sampling_rate_mult = value;
+ return 0;
+}
+
+static int set_prop_eu_stall_wait_num_reports(struct xe_device *xe, u64 value,
+ struct eu_stall_open_properties *props)
+{
+ props->wait_num_reports = value;
+
+ return 0;
+}
+
+static int set_prop_eu_stall_gt_id(struct xe_device *xe, u64 value,
+ struct eu_stall_open_properties *props)
+{
+ if (value >= xe->info.gt_count) {
+ drm_dbg(&xe->drm, "Invalid GT ID %llu for EU stall sampling\n", value);
+ return -EINVAL;
+ }
+ props->gt = xe_device_get_gt(xe, value);
+ return 0;
+}
+
+typedef int (*set_eu_stall_property_fn)(struct xe_device *xe, u64 value,
+ struct eu_stall_open_properties *props);
+
+static const set_eu_stall_property_fn xe_set_eu_stall_property_funcs[] = {
+ [DRM_XE_EU_STALL_PROP_SAMPLE_RATE] = set_prop_eu_stall_sampling_rate,
+ [DRM_XE_EU_STALL_PROP_WAIT_NUM_REPORTS] = set_prop_eu_stall_wait_num_reports,
+ [DRM_XE_EU_STALL_PROP_GT_ID] = set_prop_eu_stall_gt_id,
+};
+
+static int xe_eu_stall_user_ext_set_property(struct xe_device *xe, u64 extension,
+ struct eu_stall_open_properties *props)
+{
+ u64 __user *address = u64_to_user_ptr(extension);
+ struct drm_xe_ext_set_property ext;
+ int err;
+ u32 idx;
+
+ err = __copy_from_user(&ext, address, sizeof(ext));
+ if (XE_IOCTL_DBG(xe, err))
+ return -EFAULT;
+
+ if (XE_IOCTL_DBG(xe, ext.property >= ARRAY_SIZE(xe_set_eu_stall_property_funcs)) ||
+ XE_IOCTL_DBG(xe, ext.pad))
+ return -EINVAL;
+
+ idx = array_index_nospec(ext.property, ARRAY_SIZE(xe_set_eu_stall_property_funcs));
+ return xe_set_eu_stall_property_funcs[idx](xe, ext.value, props);
+}
+
+typedef int (*xe_eu_stall_user_extension_fn)(struct xe_device *xe, u64 extension,
+ struct eu_stall_open_properties *props);
+static const xe_eu_stall_user_extension_fn xe_eu_stall_user_extension_funcs[] = {
+ [DRM_XE_EU_STALL_EXTENSION_SET_PROPERTY] = xe_eu_stall_user_ext_set_property,
+};
+
+#define MAX_USER_EXTENSIONS 5
+static int xe_eu_stall_user_extensions(struct xe_device *xe, u64 extension,
+ int ext_number, struct eu_stall_open_properties *props)
+{
+ u64 __user *address = u64_to_user_ptr(extension);
+ struct drm_xe_user_extension ext;
+ int err;
+ u32 idx;
+
+ if (XE_IOCTL_DBG(xe, ext_number >= MAX_USER_EXTENSIONS))
+ return -E2BIG;
+
+ err = __copy_from_user(&ext, address, sizeof(ext));
+ if (XE_IOCTL_DBG(xe, err))
+ return -EFAULT;
+
+ if (XE_IOCTL_DBG(xe, ext.pad) ||
+ XE_IOCTL_DBG(xe, ext.name >= ARRAY_SIZE(xe_eu_stall_user_extension_funcs)))
+ return -EINVAL;
+
+ idx = array_index_nospec(ext.name, ARRAY_SIZE(xe_eu_stall_user_extension_funcs));
+ err = xe_eu_stall_user_extension_funcs[idx](xe, extension, props);
+ if (XE_IOCTL_DBG(xe, err))
+ return err;
+
+ if (ext.next_extension)
+ return xe_eu_stall_user_extensions(xe, ext.next_extension, ++ext_number, props);
+
+ return 0;
+}
+
+/**
+ * buf_data_size - Calculate the number of bytes in a circular buffer
+ * given the read and write pointers and the size of
+ * the buffer.
+ *
+ * @buf_size: Size of the circular buffer
+ * @read_ptr: Read pointer with an additional overflow bit
+ * @write_ptr: Write pointer with an additional overflow bit
+ *
+ * Since the read and write pointers have an additional overflow bit,
+ * this function calculates the offsets from the pointers and use the
+ * offsets to calculate the data size in the buffer.
+ *
+ * Returns: number of bytes of data in the buffer
+ */
+static u32 buf_data_size(size_t buf_size, u32 read_ptr, u32 write_ptr)
+{
+ u32 read_offset, write_offset, size = 0;
+
+ if (read_ptr == write_ptr)
+ goto exit;
+
+ read_offset = read_ptr & (buf_size - 1);
+ write_offset = write_ptr & (buf_size - 1);
+
+ if (write_offset > read_offset)
+ size = write_offset - read_offset;
+ else
+ size = buf_size - read_offset + write_offset;
+exit:
+ return size;
+}
+
+/**
+ * eu_stall_data_buf_poll - Poll for EU stall data in the buffer.
+ *
+ * @stream: xe EU stall data stream instance
+ *
+ * Returns: true if the EU stall buffer contains minimum stall data as
+ * specified by the event report count, else false.
+ */
+static bool eu_stall_data_buf_poll(struct xe_eu_stall_data_stream *stream)
+{
+ u32 read_ptr, write_ptr_reg, write_ptr, total_data = 0;
+ u32 buf_size = stream->per_xecore_buf_size;
+ struct per_xecore_buf *xecore_buf;
+ struct xe_gt *gt = stream->gt;
+ bool min_data_present = false;
+ u16 group, instance;
+ unsigned int xecore;
+
+ mutex_lock(&gt->eu_stall->stream_lock);
+ for_each_dss_steering(xecore, gt, group, instance) {
+ xecore_buf = &stream->xecore_buf[xecore];
+ read_ptr = xecore_buf->read;
+ write_ptr_reg = xe_gt_mcr_unicast_read(gt, XEHPC_EUSTALL_REPORT,
+ group, instance);
+ write_ptr = REG_FIELD_GET(XEHPC_EUSTALL_REPORT_WRITE_PTR_MASK, write_ptr_reg);
+ write_ptr <<= 6;
+ write_ptr &= ((buf_size << 1) - 1);
+ if (!min_data_present) {
+ total_data += buf_data_size(buf_size, read_ptr, write_ptr);
+ if (num_data_rows(total_data) >= stream->wait_num_reports)
+ min_data_present = true;
+ }
+ if (write_ptr_reg & XEHPC_EUSTALL_REPORT_OVERFLOW_DROP)
+ set_bit(xecore, stream->data_drop.mask);
+ xecore_buf->write = write_ptr;
+ }
+ mutex_unlock(&gt->eu_stall->stream_lock);
+
+ return min_data_present;
+}
+
+static void clear_dropped_eviction_line_bit(struct xe_gt *gt, u16 group, u16 instance)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+ u32 write_ptr_reg;
+
+ /* On PVC, the overflow bit has to be cleared by writing 1 to it.
+ * On Xe2 and later GPUs, the bit has to be cleared by writing 0 to it.
+ */
+ if (GRAPHICS_VER(xe) >= 20)
+ write_ptr_reg = _MASKED_BIT_DISABLE(XEHPC_EUSTALL_REPORT_OVERFLOW_DROP);
+ else
+ write_ptr_reg = _MASKED_BIT_ENABLE(XEHPC_EUSTALL_REPORT_OVERFLOW_DROP);
+
+ xe_gt_mcr_unicast_write(gt, XEHPC_EUSTALL_REPORT, write_ptr_reg, group, instance);
+}
+
+static int xe_eu_stall_data_buf_read(struct xe_eu_stall_data_stream *stream,
+ char __user *buf, size_t count,
+ size_t *total_data_size, struct xe_gt *gt,
+ u16 group, u16 instance, unsigned int xecore)
+{
+ size_t read_data_size, copy_size, buf_size;
+ u32 read_ptr_reg, read_ptr, write_ptr;
+ u8 *xecore_start_vaddr, *read_vaddr;
+ struct per_xecore_buf *xecore_buf;
+ u32 read_offset, write_offset;
+
+ /* Hardware increments the read and write pointers such that they can
+ * overflow into one additional bit. For example, a 256KB size buffer
+ * offset pointer needs 18 bits. But HW uses 19 bits for the read and
+ * write pointers. This technique avoids wasting a slot in the buffer.
+ * Read and write offsets are calculated from the pointers in order to
+ * check if the write pointer has wrapped around the array.
+ */
+ xecore_buf = &stream->xecore_buf[xecore];
+ xecore_start_vaddr = xecore_buf->vaddr;
+ read_ptr = xecore_buf->read;
+ write_ptr = xecore_buf->write;
+ buf_size = stream->per_xecore_buf_size;
+
+ read_data_size = buf_data_size(buf_size, read_ptr, write_ptr);
+ /* Read only the data that the user space buffer can accommodate */
+ read_data_size = min_t(size_t, count - *total_data_size, read_data_size);
+ if (read_data_size == 0)
+ goto exit_drop;
+
+ read_offset = read_ptr & (buf_size - 1);
+ write_offset = write_ptr & (buf_size - 1);
+ read_vaddr = xecore_start_vaddr + read_offset;
+
+ if (write_offset > read_offset) {
+ if (copy_to_user(buf + *total_data_size, read_vaddr, read_data_size))
+ return -EFAULT;
+ } else {
+ if (read_data_size >= buf_size - read_offset)
+ copy_size = buf_size - read_offset;
+ else
+ copy_size = read_data_size;
+ if (copy_to_user(buf + *total_data_size, read_vaddr, copy_size))
+ return -EFAULT;
+ if (copy_to_user(buf + *total_data_size + copy_size,
+ xecore_start_vaddr, read_data_size - copy_size))
+ return -EFAULT;
+ }
+
+ *total_data_size += read_data_size;
+ read_ptr += read_data_size;
+
+ /* Read pointer can overflow into one additional bit */
+ read_ptr &= (buf_size << 1) - 1;
+ read_ptr_reg = REG_FIELD_PREP(XEHPC_EUSTALL_REPORT1_READ_PTR_MASK, (read_ptr >> 6));
+ read_ptr_reg = _MASKED_FIELD(XEHPC_EUSTALL_REPORT1_READ_PTR_MASK, read_ptr_reg);
+ xe_gt_mcr_unicast_write(gt, XEHPC_EUSTALL_REPORT1, read_ptr_reg, group, instance);
+ xecore_buf->read = read_ptr;
+ trace_xe_eu_stall_data_read(group, instance, read_ptr, write_ptr,
+ read_data_size, *total_data_size);
+exit_drop:
+ /* Clear drop bit (if set) after any data was read or if the buffer was empty.
+ * Drop bit can be set even if the buffer is empty as the buffer may have been emptied
+ * in the previous read() and the data drop bit was set during the previous read().
+ */
+ if (test_bit(xecore, stream->data_drop.mask)) {
+ clear_dropped_eviction_line_bit(gt, group, instance);
+ clear_bit(xecore, stream->data_drop.mask);
+ }
+ return 0;
+}
+
+/**
+ * xe_eu_stall_stream_read_locked - copy EU stall counters data from the
+ * per xecore buffers to the userspace buffer
+ * @stream: A stream opened for EU stall count metrics
+ * @file: An xe EU stall data stream file
+ * @buf: destination buffer given by userspace
+ * @count: the number of bytes userspace wants to read
+ *
+ * Returns: Number of bytes copied or a negative error code
+ * If we've successfully copied any data then reporting that takes
+ * precedence over any internal error status, so the data isn't lost.
+ */
+static ssize_t xe_eu_stall_stream_read_locked(struct xe_eu_stall_data_stream *stream,
+ struct file *file, char __user *buf,
+ size_t count)
+{
+ struct xe_gt *gt = stream->gt;
+ size_t total_size = 0;
+ u16 group, instance;
+ unsigned int xecore;
+ int ret = 0;
+
+ if (bitmap_weight(stream->data_drop.mask, XE_MAX_DSS_FUSE_BITS)) {
+ if (!stream->data_drop.reported_to_user) {
+ stream->data_drop.reported_to_user = true;
+ xe_gt_dbg(gt, "EU stall data dropped in XeCores: %*pb\n",
+ XE_MAX_DSS_FUSE_BITS, stream->data_drop.mask);
+ return -EIO;
+ }
+ stream->data_drop.reported_to_user = false;
+ }
+
+ for_each_dss_steering(xecore, gt, group, instance) {
+ ret = xe_eu_stall_data_buf_read(stream, buf, count, &total_size,
+ gt, group, instance, xecore);
+ if (ret || count == total_size)
+ break;
+ }
+ return total_size ?: (ret ?: -EAGAIN);
+}
+
+/*
+ * Userspace must enable the EU stall stream with DRM_XE_OBSERVATION_IOCTL_ENABLE
+ * before calling read().
+ *
+ * Returns: The number of bytes copied or a negative error code on failure.
+ * -EIO if HW drops any EU stall data when the buffer is full.
+ */
+static ssize_t xe_eu_stall_stream_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct xe_eu_stall_data_stream *stream = file->private_data;
+ struct xe_gt *gt = stream->gt;
+ ssize_t ret, aligned_count;
+
+ aligned_count = ALIGN_DOWN(count, stream->data_record_size);
+ if (aligned_count == 0)
+ return -EINVAL;
+
+ if (!stream->enabled) {
+ xe_gt_dbg(gt, "EU stall data stream not enabled to read\n");
+ return -EINVAL;
+ }
+
+ if (!(file->f_flags & O_NONBLOCK)) {
+ do {
+ ret = wait_event_interruptible(stream->poll_wq, stream->pollin);
+ if (ret)
+ return -EINTR;
+
+ mutex_lock(&gt->eu_stall->stream_lock);
+ ret = xe_eu_stall_stream_read_locked(stream, file, buf, aligned_count);
+ mutex_unlock(&gt->eu_stall->stream_lock);
+ } while (ret == -EAGAIN);
+ } else {
+ mutex_lock(&gt->eu_stall->stream_lock);
+ ret = xe_eu_stall_stream_read_locked(stream, file, buf, aligned_count);
+ mutex_unlock(&gt->eu_stall->stream_lock);
+ }
+
+ /*
+ * This may not work correctly if the user buffer is very small.
+ * We don't want to block the next read() when there is data in the buffer
+ * now, but couldn't be accommodated in the small user buffer.
+ */
+ stream->pollin = false;
+
+ return ret;
+}
+
+static void xe_eu_stall_stream_free(struct xe_eu_stall_data_stream *stream)
+{
+ struct xe_gt *gt = stream->gt;
+
+ gt->eu_stall->stream = NULL;
+ kfree(stream);
+}
+
+static void xe_eu_stall_data_buf_destroy(struct xe_eu_stall_data_stream *stream)
+{
+ xe_bo_unpin_map_no_vm(stream->bo);
+ kfree(stream->xecore_buf);
+}
+
+static int xe_eu_stall_data_buf_alloc(struct xe_eu_stall_data_stream *stream,
+ u16 last_xecore)
+{
+ struct xe_tile *tile = stream->gt->tile;
+ struct xe_bo *bo;
+ u32 size;
+
+ stream->xecore_buf = kcalloc(last_xecore, sizeof(*stream->xecore_buf), GFP_KERNEL);
+ if (!stream->xecore_buf)
+ return -ENOMEM;
+
+ size = stream->per_xecore_buf_size * last_xecore;
+
+ bo = xe_bo_create_pin_map_at_aligned(tile->xe, tile, NULL,
+ size, ~0ull, ttm_bo_type_kernel,
+ XE_BO_FLAG_SYSTEM | XE_BO_FLAG_GGTT, SZ_64);
+ if (IS_ERR(bo)) {
+ kfree(stream->xecore_buf);
+ return PTR_ERR(bo);
+ }
+
+ XE_WARN_ON(!IS_ALIGNED(xe_bo_ggtt_addr(bo), SZ_64));
+ stream->bo = bo;
+
+ return 0;
+}
+
+static int xe_eu_stall_stream_enable(struct xe_eu_stall_data_stream *stream)
+{
+ u32 write_ptr_reg, write_ptr, read_ptr_reg, reg_value;
+ struct per_xecore_buf *xecore_buf;
+ struct xe_gt *gt = stream->gt;
+ u16 group, instance;
+ unsigned int fw_ref;
+ int xecore;
+
+ /* Take runtime pm ref and forcewake to disable RC6 */
+ xe_pm_runtime_get(gt_to_xe(gt));
+ fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_RENDER);
+ if (!xe_force_wake_ref_has_domain(fw_ref, XE_FW_RENDER)) {
+ xe_gt_err(gt, "Failed to get RENDER forcewake\n");
+ xe_pm_runtime_put(gt_to_xe(gt));
+ return -ETIMEDOUT;
+ }
+
+ if (XE_WA(gt, 22016596838))
+ xe_gt_mcr_multicast_write(gt, ROW_CHICKEN2,
+ _MASKED_BIT_ENABLE(DISABLE_DOP_GATING));
+
+ for_each_dss_steering(xecore, gt, group, instance) {
+ write_ptr_reg = xe_gt_mcr_unicast_read(gt, XEHPC_EUSTALL_REPORT, group, instance);
+ /* Clear any drop bits set and not cleared in the previous session. */
+ if (write_ptr_reg & XEHPC_EUSTALL_REPORT_OVERFLOW_DROP)
+ clear_dropped_eviction_line_bit(gt, group, instance);
+ write_ptr = REG_FIELD_GET(XEHPC_EUSTALL_REPORT_WRITE_PTR_MASK, write_ptr_reg);
+ read_ptr_reg = REG_FIELD_PREP(XEHPC_EUSTALL_REPORT1_READ_PTR_MASK, write_ptr);
+ read_ptr_reg = _MASKED_FIELD(XEHPC_EUSTALL_REPORT1_READ_PTR_MASK, read_ptr_reg);
+ /* Initialize the read pointer to the write pointer */
+ xe_gt_mcr_unicast_write(gt, XEHPC_EUSTALL_REPORT1, read_ptr_reg, group, instance);
+ write_ptr <<= 6;
+ write_ptr &= (stream->per_xecore_buf_size << 1) - 1;
+ xecore_buf = &stream->xecore_buf[xecore];
+ xecore_buf->write = write_ptr;
+ xecore_buf->read = write_ptr;
+ }
+ stream->data_drop.reported_to_user = false;
+ bitmap_zero(stream->data_drop.mask, XE_MAX_DSS_FUSE_BITS);
+
+ reg_value = _MASKED_FIELD(EUSTALL_MOCS | EUSTALL_SAMPLE_RATE,
+ REG_FIELD_PREP(EUSTALL_MOCS, gt->mocs.uc_index << 1) |
+ REG_FIELD_PREP(EUSTALL_SAMPLE_RATE,
+ stream->sampling_rate_mult));
+ xe_gt_mcr_multicast_write(gt, XEHPC_EUSTALL_CTRL, reg_value);
+ /* GGTT addresses can never be > 32 bits */
+ xe_gt_mcr_multicast_write(gt, XEHPC_EUSTALL_BASE_UPPER, 0);
+ reg_value = xe_bo_ggtt_addr(stream->bo);
+ reg_value |= REG_FIELD_PREP(XEHPC_EUSTALL_BASE_XECORE_BUF_SZ,
+ stream->per_xecore_buf_size / SZ_256K);
+ reg_value |= XEHPC_EUSTALL_BASE_ENABLE_SAMPLING;
+ xe_gt_mcr_multicast_write(gt, XEHPC_EUSTALL_BASE, reg_value);
+
+ return 0;
+}
+
+static void eu_stall_data_buf_poll_work_fn(struct work_struct *work)
+{
+ struct xe_eu_stall_data_stream *stream =
+ container_of(work, typeof(*stream), buf_poll_work.work);
+ struct xe_gt *gt = stream->gt;
+
+ if (eu_stall_data_buf_poll(stream)) {
+ stream->pollin = true;
+ wake_up(&stream->poll_wq);
+ }
+ queue_delayed_work(gt->eu_stall->buf_ptr_poll_wq,
+ &stream->buf_poll_work,
+ msecs_to_jiffies(POLL_PERIOD_MS));
+}
+
+static int xe_eu_stall_stream_init(struct xe_eu_stall_data_stream *stream,
+ struct eu_stall_open_properties *props)
+{
+ unsigned int max_wait_num_reports, xecore, last_xecore, num_xecores;
+ struct per_xecore_buf *xecore_buf;
+ struct xe_gt *gt = stream->gt;
+ xe_dss_mask_t all_xecores;
+ u16 group, instance;
+ u32 vaddr_offset;
+ int ret;
+
+ bitmap_or(all_xecores, gt->fuse_topo.g_dss_mask, gt->fuse_topo.c_dss_mask,
+ XE_MAX_DSS_FUSE_BITS);
+ num_xecores = bitmap_weight(all_xecores, XE_MAX_DSS_FUSE_BITS);
+ last_xecore = xe_gt_topology_mask_last_dss(all_xecores) + 1;
+
+ max_wait_num_reports = num_data_rows(per_xecore_buf_size * num_xecores);
+ if (props->wait_num_reports == 0 || props->wait_num_reports > max_wait_num_reports) {
+ xe_gt_dbg(gt, "Invalid EU stall event report count %u\n",
+ props->wait_num_reports);
+ xe_gt_dbg(gt, "Minimum event report count is 1, maximum is %u\n",
+ max_wait_num_reports);
+ return -EINVAL;
+ }
+
+ init_waitqueue_head(&stream->poll_wq);
+ INIT_DELAYED_WORK(&stream->buf_poll_work, eu_stall_data_buf_poll_work_fn);
+ stream->per_xecore_buf_size = per_xecore_buf_size;
+ stream->sampling_rate_mult = props->sampling_rate_mult;
+ stream->wait_num_reports = props->wait_num_reports;
+ stream->data_record_size = xe_eu_stall_data_record_size(gt_to_xe(gt));
+
+ ret = xe_eu_stall_data_buf_alloc(stream, last_xecore);
+ if (ret)
+ return ret;
+
+ for_each_dss_steering(xecore, gt, group, instance) {
+ xecore_buf = &stream->xecore_buf[xecore];
+ vaddr_offset = xecore * stream->per_xecore_buf_size;
+ xecore_buf->vaddr = stream->bo->vmap.vaddr + vaddr_offset;
+ }
+ return 0;
+}
+
+static __poll_t xe_eu_stall_stream_poll_locked(struct xe_eu_stall_data_stream *stream,
+ struct file *file, poll_table *wait)
+{
+ __poll_t events = 0;
+
+ poll_wait(file, &stream->poll_wq, wait);
+
+ if (stream->pollin)
+ events |= EPOLLIN;
+
+ return events;
+}
+
+static __poll_t xe_eu_stall_stream_poll(struct file *file, poll_table *wait)
+{
+ struct xe_eu_stall_data_stream *stream = file->private_data;
+ struct xe_gt *gt = stream->gt;
+ __poll_t ret;
+
+ mutex_lock(&gt->eu_stall->stream_lock);
+ ret = xe_eu_stall_stream_poll_locked(stream, file, wait);
+ mutex_unlock(&gt->eu_stall->stream_lock);
+
+ return ret;
+}
+
+static int xe_eu_stall_enable_locked(struct xe_eu_stall_data_stream *stream)
+{
+ struct xe_gt *gt = stream->gt;
+ int ret = 0;
+
+ if (stream->enabled)
+ return ret;
+
+ stream->enabled = true;
+
+ ret = xe_eu_stall_stream_enable(stream);
+
+ queue_delayed_work(gt->eu_stall->buf_ptr_poll_wq,
+ &stream->buf_poll_work,
+ msecs_to_jiffies(POLL_PERIOD_MS));
+ return ret;
+}
+
+static int xe_eu_stall_disable_locked(struct xe_eu_stall_data_stream *stream)
+{
+ struct xe_gt *gt = stream->gt;
+
+ if (!stream->enabled)
+ return 0;
+
+ stream->enabled = false;
+
+ xe_gt_mcr_multicast_write(gt, XEHPC_EUSTALL_BASE, 0);
+
+ cancel_delayed_work_sync(&stream->buf_poll_work);
+
+ if (XE_WA(gt, 22016596838))
+ xe_gt_mcr_multicast_write(gt, ROW_CHICKEN2,
+ _MASKED_BIT_DISABLE(DISABLE_DOP_GATING));
+
+ xe_force_wake_put(gt_to_fw(gt), XE_FW_RENDER);
+ xe_pm_runtime_put(gt_to_xe(gt));
+
+ return 0;
+}
+
+static long xe_eu_stall_stream_ioctl_locked(struct xe_eu_stall_data_stream *stream,
+ unsigned int cmd, unsigned long arg)
+{
+ switch (cmd) {
+ case DRM_XE_OBSERVATION_IOCTL_ENABLE:
+ return xe_eu_stall_enable_locked(stream);
+ case DRM_XE_OBSERVATION_IOCTL_DISABLE:
+ return xe_eu_stall_disable_locked(stream);
+ }
+
+ return -EINVAL;
+}
+
+static long xe_eu_stall_stream_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ struct xe_eu_stall_data_stream *stream = file->private_data;
+ struct xe_gt *gt = stream->gt;
+ long ret;
+
+ mutex_lock(&gt->eu_stall->stream_lock);
+ ret = xe_eu_stall_stream_ioctl_locked(stream, cmd, arg);
+ mutex_unlock(&gt->eu_stall->stream_lock);
+
+ return ret;
+}
+
+static int xe_eu_stall_stream_close(struct inode *inode, struct file *file)
+{
+ struct xe_eu_stall_data_stream *stream = file->private_data;
+ struct xe_gt *gt = stream->gt;
+
+ drm_dev_put(&gt->tile->xe->drm);
+
+ mutex_lock(&gt->eu_stall->stream_lock);
+ xe_eu_stall_disable_locked(stream);
+ xe_eu_stall_data_buf_destroy(stream);
+ xe_eu_stall_stream_free(stream);
+ mutex_unlock(&gt->eu_stall->stream_lock);
+
+ return 0;
+}
+
+static const struct file_operations fops_eu_stall = {
+ .owner = THIS_MODULE,
+ .llseek = noop_llseek,
+ .release = xe_eu_stall_stream_close,
+ .poll = xe_eu_stall_stream_poll,
+ .read = xe_eu_stall_stream_read,
+ .unlocked_ioctl = xe_eu_stall_stream_ioctl,
+ .compat_ioctl = xe_eu_stall_stream_ioctl,
+};
+
+static int xe_eu_stall_stream_open_locked(struct drm_device *dev,
+ struct eu_stall_open_properties *props,
+ struct drm_file *file)
+{
+ struct xe_eu_stall_data_stream *stream;
+ struct xe_gt *gt = props->gt;
+ unsigned long f_flags = 0;
+ int ret, stream_fd;
+
+ /* Only one session can be active at any time */
+ if (gt->eu_stall->stream) {
+ xe_gt_dbg(gt, "EU stall sampling session already active\n");
+ return -EBUSY;
+ }
+
+ stream = kzalloc(sizeof(*stream), GFP_KERNEL);
+ if (!stream)
+ return -ENOMEM;
+
+ gt->eu_stall->stream = stream;
+ stream->gt = gt;
+
+ ret = xe_eu_stall_stream_init(stream, props);
+ if (ret) {
+ xe_gt_dbg(gt, "EU stall stream init failed : %d\n", ret);
+ goto err_free;
+ }
+
+ stream_fd = anon_inode_getfd("[xe_eu_stall]", &fops_eu_stall, stream, f_flags);
+ if (stream_fd < 0) {
+ ret = stream_fd;
+ xe_gt_dbg(gt, "EU stall inode get fd failed : %d\n", ret);
+ goto err_destroy;
+ }
+
+ /* Take a reference on the driver that will be kept with stream_fd
+ * until its release.
+ */
+ drm_dev_get(&gt->tile->xe->drm);
+
+ return stream_fd;
+
+err_destroy:
+ xe_eu_stall_data_buf_destroy(stream);
+err_free:
+ xe_eu_stall_stream_free(stream);
+ return ret;
+}
+
+/**
+ * xe_eu_stall_stream_open - Open a xe EU stall data stream fd
+ *
+ * @dev: DRM device pointer
+ * @data: pointer to first struct @drm_xe_ext_set_property in
+ * the chain of input properties from the user space.
+ * @file: DRM file pointer
+ *
+ * This function opens a EU stall data stream with input properties from
+ * the user space.
+ *
+ * Returns: EU stall data stream fd on success or a negative error code.
+ */
+int xe_eu_stall_stream_open(struct drm_device *dev, u64 data, struct drm_file *file)
+{
+ struct xe_device *xe = to_xe_device(dev);
+ struct eu_stall_open_properties props = {};
+ int ret;
+
+ if (!xe_eu_stall_supported_on_platform(xe)) {
+ drm_dbg(&xe->drm, "EU stall monitoring is not supported on this platform\n");
+ return -ENODEV;
+ }
+
+ if (xe_observation_paranoid && !perfmon_capable()) {
+ drm_dbg(&xe->drm, "Insufficient privileges for EU stall monitoring\n");
+ return -EACCES;
+ }
+
+ /* Initialize and set default values */
+ props.wait_num_reports = 1;
+ props.sampling_rate_mult = 4;
+
+ ret = xe_eu_stall_user_extensions(xe, data, 0, &props);
+ if (ret)
+ return ret;
+
+ if (!props.gt) {
+ drm_dbg(&xe->drm, "GT ID not provided for EU stall sampling\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&props.gt->eu_stall->stream_lock);
+ ret = xe_eu_stall_stream_open_locked(dev, &props, file);
+ mutex_unlock(&props.gt->eu_stall->stream_lock);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/xe/xe_eu_stall.h b/drivers/gpu/drm/xe/xe_eu_stall.h
new file mode 100644
index 000000000000..ed9d0f233566
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_eu_stall.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef __XE_EU_STALL_H__
+#define __XE_EU_STALL_H__
+
+#include "xe_gt_types.h"
+
+size_t xe_eu_stall_get_per_xecore_buf_size(void);
+size_t xe_eu_stall_data_record_size(struct xe_device *xe);
+size_t xe_eu_stall_get_sampling_rates(u32 *num_rates, const u64 **rates);
+
+int xe_eu_stall_init(struct xe_gt *gt);
+int xe_eu_stall_stream_open(struct drm_device *dev,
+ u64 data,
+ struct drm_file *file);
+
+static inline bool xe_eu_stall_supported_on_platform(struct xe_device *xe)
+{
+ return xe->info.platform == XE_PVC || GRAPHICS_VER(xe) >= 20;
+}
+#endif
diff --git a/drivers/gpu/drm/xe/xe_exec_queue.c b/drivers/gpu/drm/xe/xe_exec_queue.c
index 23a9f519ce1c..606922d9dd73 100644
--- a/drivers/gpu/drm/xe/xe_exec_queue.c
+++ b/drivers/gpu/drm/xe/xe_exec_queue.c
@@ -203,6 +203,7 @@ err_post_alloc:
__xe_exec_queue_free(q);
return ERR_PTR(err);
}
+ALLOW_ERROR_INJECTION(xe_exec_queue_create, ERRNO);
struct xe_exec_queue *xe_exec_queue_create_class(struct xe_device *xe, struct xe_gt *gt,
struct xe_vm *vm,
@@ -604,11 +605,12 @@ int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data,
struct xe_tile *tile;
struct xe_exec_queue *q = NULL;
u32 logical_mask;
+ u32 flags = 0;
u32 id;
u32 len;
int err;
- if (XE_IOCTL_DBG(xe, args->flags) ||
+ if (XE_IOCTL_DBG(xe, args->flags & ~DRM_XE_EXEC_QUEUE_LOW_LATENCY_HINT) ||
XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
return -EINVAL;
@@ -625,6 +627,9 @@ int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data,
if (XE_IOCTL_DBG(xe, eci[0].gt_id >= xe->info.gt_count))
return -EINVAL;
+ if (args->flags & DRM_XE_EXEC_QUEUE_LOW_LATENCY_HINT)
+ flags |= EXEC_QUEUE_FLAG_LOW_LATENCY;
+
if (eci[0].engine_class == DRM_XE_ENGINE_CLASS_VM_BIND) {
if (XE_IOCTL_DBG(xe, args->width != 1) ||
XE_IOCTL_DBG(xe, args->num_placements != 1) ||
@@ -633,8 +638,8 @@ int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data,
for_each_tile(tile, xe, id) {
struct xe_exec_queue *new;
- u32 flags = EXEC_QUEUE_FLAG_VM;
+ flags |= EXEC_QUEUE_FLAG_VM;
if (id)
flags |= EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD;
@@ -680,7 +685,7 @@ int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data,
}
q = xe_exec_queue_create(xe, vm, logical_mask,
- args->width, hwe, 0,
+ args->width, hwe, flags,
args->extensions);
up_read(&vm->lock);
xe_vm_put(vm);
diff --git a/drivers/gpu/drm/xe/xe_exec_queue_types.h b/drivers/gpu/drm/xe/xe_exec_queue_types.h
index 6eb7ff091534..cc1cffb5c87f 100644
--- a/drivers/gpu/drm/xe/xe_exec_queue_types.h
+++ b/drivers/gpu/drm/xe/xe_exec_queue_types.h
@@ -85,6 +85,8 @@ struct xe_exec_queue {
#define EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD BIT(3)
/* kernel exec_queue only, set priority to highest level */
#define EXEC_QUEUE_FLAG_HIGH_PRIORITY BIT(4)
+/* flag to indicate low latency hint to guc */
+#define EXEC_QUEUE_FLAG_LOW_LATENCY BIT(5)
/**
* @flags: flags for this exec queue, should statically setup aside from ban
diff --git a/drivers/gpu/drm/xe/xe_gen_wa_oob.c b/drivers/gpu/drm/xe/xe_gen_wa_oob.c
index 904cf47925aa..ed9183599e31 100644
--- a/drivers/gpu/drm/xe/xe_gen_wa_oob.c
+++ b/drivers/gpu/drm/xe/xe_gen_wa_oob.c
@@ -28,10 +28,10 @@
"\n" \
"#endif\n"
-static void print_usage(FILE *f)
+static void print_usage(FILE *f, const char *progname)
{
fprintf(f, "usage: %s <input-rule-file> <generated-c-source-file> <generated-c-header-file>\n",
- program_invocation_short_name);
+ progname);
}
static void print_parse_error(const char *err_msg, const char *line,
@@ -144,7 +144,7 @@ int main(int argc, const char *argv[])
if (argc < 3) {
fprintf(stderr, "ERROR: wrong arguments\n");
- print_usage(stderr);
+ print_usage(stderr, argv[0]);
return 1;
}
diff --git a/drivers/gpu/drm/xe/xe_gsc_proxy.c b/drivers/gpu/drm/xe/xe_gsc_proxy.c
index 31c90577faf0..8cf70b228ff3 100644
--- a/drivers/gpu/drm/xe/xe_gsc_proxy.c
+++ b/drivers/gpu/drm/xe/xe_gsc_proxy.c
@@ -490,7 +490,7 @@ int xe_gsc_proxy_init(struct xe_gsc *gsc)
gsc->proxy.component_added = true;
- return xe_device_add_action_or_reset(xe, xe_gsc_proxy_remove, gsc);
+ return devm_add_action_or_reset(xe->drm.dev, xe_gsc_proxy_remove, gsc);
}
/**
diff --git a/drivers/gpu/drm/xe/xe_gt.c b/drivers/gpu/drm/xe/xe_gt.c
index 650a0ee56e97..10a9e3c72b36 100644
--- a/drivers/gpu/drm/xe/xe_gt.c
+++ b/drivers/gpu/drm/xe/xe_gt.c
@@ -19,6 +19,7 @@
#include "xe_bb.h"
#include "xe_bo.h"
#include "xe_device.h"
+#include "xe_eu_stall.h"
#include "xe_exec_queue.h"
#include "xe_execlist.h"
#include "xe_force_wake.h"
@@ -361,9 +362,11 @@ int xe_gt_init_early(struct xe_gt *gt)
if (err)
return err;
- xe_wa_process_gt(gt);
+ err = xe_tuning_init(gt);
+ if (err)
+ return err;
+
xe_wa_process_oob(gt);
- xe_tuning_process_gt(gt);
xe_force_wake_init_gt(gt, gt_to_fw(gt));
spin_lock_init(&gt->global_invl_lock);
@@ -450,6 +453,8 @@ static int all_fw_domain_init(struct xe_gt *gt)
}
xe_gt_mcr_set_implicit_defaults(gt);
+ xe_wa_process_gt(gt);
+ xe_tuning_process_gt(gt);
xe_reg_sr_apply_mmio(&gt->reg_sr, gt);
err = xe_gt_clock_init(gt);
@@ -613,6 +618,10 @@ int xe_gt_init(struct xe_gt *gt)
xe_gt_record_user_engines(gt);
+ err = xe_eu_stall_init(gt);
+ if (err)
+ return err;
+
return 0;
}
diff --git a/drivers/gpu/drm/xe/xe_gt_clock.c b/drivers/gpu/drm/xe/xe_gt_clock.c
index cc2ae159298e..2a958c92d8ea 100644
--- a/drivers/gpu/drm/xe/xe_gt_clock.c
+++ b/drivers/gpu/drm/xe/xe_gt_clock.c
@@ -12,25 +12,10 @@
#include "xe_assert.h"
#include "xe_device.h"
#include "xe_gt.h"
+#include "xe_gt_printk.h"
#include "xe_macros.h"
#include "xe_mmio.h"
-static u32 read_reference_ts_freq(struct xe_gt *gt)
-{
- u32 ts_override = xe_mmio_read32(&gt->mmio, TIMESTAMP_OVERRIDE);
- u32 base_freq, frac_freq;
-
- base_freq = REG_FIELD_GET(TIMESTAMP_OVERRIDE_US_COUNTER_DIVIDER_MASK,
- ts_override) + 1;
- base_freq *= 1000000;
-
- frac_freq = REG_FIELD_GET(TIMESTAMP_OVERRIDE_US_COUNTER_DENOMINATOR_MASK,
- ts_override);
- frac_freq = 1000000 / (frac_freq + 1);
-
- return base_freq + frac_freq;
-}
-
static u32 get_crystal_clock_freq(u32 rpm_config_reg)
{
const u32 f19_2_mhz = 19200000;
@@ -57,26 +42,30 @@ static u32 get_crystal_clock_freq(u32 rpm_config_reg)
int xe_gt_clock_init(struct xe_gt *gt)
{
- u32 ctc_reg = xe_mmio_read32(&gt->mmio, CTC_MODE);
+ u32 c0 = xe_mmio_read32(&gt->mmio, RPM_CONFIG0);
u32 freq = 0;
- /* Assuming gen11+ so assert this assumption is correct */
- xe_gt_assert(gt, GRAPHICS_VER(gt_to_xe(gt)) >= 11);
-
- if (ctc_reg & CTC_SOURCE_DIVIDE_LOGIC) {
- freq = read_reference_ts_freq(gt);
- } else {
- u32 c0 = xe_mmio_read32(&gt->mmio, RPM_CONFIG0);
-
- freq = get_crystal_clock_freq(c0);
-
- /*
- * Now figure out how the command stream's timestamp
- * register increments from this frequency (it might
- * increment only every few clock cycle).
- */
- freq >>= 3 - REG_FIELD_GET(RPM_CONFIG0_CTC_SHIFT_PARAMETER_MASK, c0);
- }
+ /*
+ * CTC_MODE[0] = 1 is definitely not supported for Xe2 and later
+ * platforms. In theory it could be a valid setting for pre-Xe2
+ * platforms, but there's no documentation on how to properly handle
+ * this case. Reading TIMESTAMP_OVERRIDE, as the driver attempted in
+ * the past has been confirmed as incorrect by the hardware architects.
+ *
+ * For now just warn if we ever encounter hardware in the wild that
+ * has this setting and move on as if it hadn't been set.
+ */
+ if (xe_mmio_read32(&gt->mmio, CTC_MODE) & CTC_SOURCE_DIVIDE_LOGIC)
+ xe_gt_warn(gt, "CTC_MODE[0] is set; this is unexpected and undocumented\n");
+
+ freq = get_crystal_clock_freq(c0);
+
+ /*
+ * Now figure out how the command stream's timestamp
+ * register increments from this frequency (it might
+ * increment only every few clock cycle).
+ */
+ freq >>= 3 - REG_FIELD_GET(RPM_CONFIG0_CTC_SHIFT_PARAMETER_MASK, c0);
gt->info.reference_clock = freq;
return 0;
diff --git a/drivers/gpu/drm/xe/xe_gt_debugfs.c b/drivers/gpu/drm/xe/xe_gt_debugfs.c
index e7792858b1e4..2d63a69cbfa3 100644
--- a/drivers/gpu/drm/xe/xe_gt_debugfs.c
+++ b/drivers/gpu/drm/xe/xe_gt_debugfs.c
@@ -30,6 +30,7 @@
#include "xe_reg_sr.h"
#include "xe_reg_whitelist.h"
#include "xe_sriov.h"
+#include "xe_tuning.h"
#include "xe_uc_debugfs.h"
#include "xe_wa.h"
@@ -217,6 +218,15 @@ static int workarounds(struct xe_gt *gt, struct drm_printer *p)
return 0;
}
+static int tunings(struct xe_gt *gt, struct drm_printer *p)
+{
+ xe_pm_runtime_get(gt_to_xe(gt));
+ xe_tuning_dump(gt, p);
+ xe_pm_runtime_put(gt_to_xe(gt));
+
+ return 0;
+}
+
static int pat(struct xe_gt *gt, struct drm_printer *p)
{
xe_pm_runtime_get(gt_to_xe(gt));
@@ -300,6 +310,7 @@ static const struct drm_info_list debugfs_list[] = {
{"powergate_info", .show = xe_gt_debugfs_simple_show, .data = powergate_info},
{"register-save-restore", .show = xe_gt_debugfs_simple_show, .data = register_save_restore},
{"workarounds", .show = xe_gt_debugfs_simple_show, .data = workarounds},
+ {"tunings", .show = xe_gt_debugfs_simple_show, .data = tunings},
{"pat", .show = xe_gt_debugfs_simple_show, .data = pat},
{"mocs", .show = xe_gt_debugfs_simple_show, .data = mocs},
{"default_lrc_rcs", .show = xe_gt_debugfs_simple_show, .data = rcs_default_lrc},
diff --git a/drivers/gpu/drm/xe/xe_gt_pagefault.c b/drivers/gpu/drm/xe/xe_gt_pagefault.c
index 46701ca11ce0..c5ad9a0a89c2 100644
--- a/drivers/gpu/drm/xe/xe_gt_pagefault.c
+++ b/drivers/gpu/drm/xe/xe_gt_pagefault.c
@@ -19,6 +19,7 @@
#include "xe_guc.h"
#include "xe_guc_ct.h"
#include "xe_migrate.h"
+#include "xe_svm.h"
#include "xe_trace_bo.h"
#include "xe_vm.h"
@@ -125,8 +126,8 @@ static int xe_pf_begin(struct drm_exec *exec, struct xe_vma *vma,
return 0;
}
-static int handle_vma_pagefault(struct xe_gt *gt, struct pagefault *pf,
- struct xe_vma *vma)
+static int handle_vma_pagefault(struct xe_gt *gt, struct xe_vma *vma,
+ bool atomic)
{
struct xe_vm *vm = xe_vma_vm(vma);
struct xe_tile *tile = gt_to_tile(gt);
@@ -134,13 +135,13 @@ static int handle_vma_pagefault(struct xe_gt *gt, struct pagefault *pf,
struct dma_fence *fence;
ktime_t end = 0;
int err;
- bool atomic;
+
+ lockdep_assert_held_write(&vm->lock);
xe_gt_stats_incr(gt, XE_GT_STATS_ID_VMA_PAGEFAULT_COUNT, 1);
- xe_gt_stats_incr(gt, XE_GT_STATS_ID_VMA_PAGEFAULT_BYTES, xe_vma_size(vma));
+ xe_gt_stats_incr(gt, XE_GT_STATS_ID_VMA_PAGEFAULT_KB, xe_vma_size(vma) / 1024);
trace_xe_vma_pagefault(vma);
- atomic = access_is_atomic(pf->access_type);
/* Check if VMA is valid */
if (vma_is_valid(tile, vma) && !atomic)
@@ -210,6 +211,7 @@ static int handle_pagefault(struct xe_gt *gt, struct pagefault *pf)
struct xe_vm *vm;
struct xe_vma *vma = NULL;
int err;
+ bool atomic;
/* SW isn't expected to handle TRTT faults */
if (pf->trva_fault)
@@ -235,7 +237,13 @@ static int handle_pagefault(struct xe_gt *gt, struct pagefault *pf)
goto unlock_vm;
}
- err = handle_vma_pagefault(gt, pf, vma);
+ atomic = access_is_atomic(pf->access_type);
+
+ if (xe_vma_is_cpu_addr_mirror(vma))
+ err = xe_svm_handle_pagefault(vm, vma, gt_to_tile(gt),
+ pf->page_addr, atomic);
+ else
+ err = handle_vma_pagefault(gt, vma, atomic);
unlock_vm:
if (!err)
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_service.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_service.c
index 6b5f849a0722..4efde5f46b43 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_service.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_service.c
@@ -114,7 +114,6 @@ static const struct xe_reg tgl_runtime_regs[] = {
GT_VEBOX_VDBOX_DISABLE, /* _MMIO(0x9140) */
CTC_MODE, /* _MMIO(0xa26c) */
HUC_KERNEL_LOAD_INFO, /* _MMIO(0xc1dc) */
- TIMESTAMP_OVERRIDE, /* _MMIO(0x44074) */
};
static const struct xe_reg ats_m_runtime_regs[] = {
@@ -127,7 +126,6 @@ static const struct xe_reg ats_m_runtime_regs[] = {
XEHP_GT_COMPUTE_DSS_ENABLE, /* _MMIO(0x9144) */
CTC_MODE, /* _MMIO(0xa26c) */
HUC_KERNEL_LOAD_INFO, /* _MMIO(0xc1dc) */
- TIMESTAMP_OVERRIDE, /* _MMIO(0x44074) */
};
static const struct xe_reg pvc_runtime_regs[] = {
@@ -140,7 +138,6 @@ static const struct xe_reg pvc_runtime_regs[] = {
XEHPC_GT_COMPUTE_DSS_ENABLE_EXT,/* _MMIO(0x9148) */
CTC_MODE, /* _MMIO(0xA26C) */
HUC_KERNEL_LOAD_INFO, /* _MMIO(0xc1dc) */
- TIMESTAMP_OVERRIDE, /* _MMIO(0x44074) */
};
static const struct xe_reg ver_1270_runtime_regs[] = {
@@ -155,7 +152,6 @@ static const struct xe_reg ver_1270_runtime_regs[] = {
XEHPC_GT_COMPUTE_DSS_ENABLE_EXT,/* _MMIO(0x9148) */
CTC_MODE, /* _MMIO(0xa26c) */
HUC_KERNEL_LOAD_INFO, /* _MMIO(0xc1dc) */
- TIMESTAMP_OVERRIDE, /* _MMIO(0x44074) */
};
static const struct xe_reg ver_2000_runtime_regs[] = {
@@ -173,7 +169,6 @@ static const struct xe_reg ver_2000_runtime_regs[] = {
XE2_GT_GEOMETRY_DSS_2, /* _MMIO(0x9154) */
CTC_MODE, /* _MMIO(0xa26c) */
HUC_KERNEL_LOAD_INFO, /* _MMIO(0xc1dc) */
- TIMESTAMP_OVERRIDE, /* _MMIO(0x44074) */
};
static const struct xe_reg ver_3000_runtime_regs[] = {
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_vf.c b/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
index 4831549da319..a439261bf4d7 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
@@ -47,12 +47,19 @@ static int guc_action_vf_reset(struct xe_guc *guc)
return ret > 0 ? -EPROTO : ret;
}
+#define GUC_RESET_VF_STATE_RETRY_MAX 10
static int vf_reset_guc_state(struct xe_gt *gt)
{
+ unsigned int retry = GUC_RESET_VF_STATE_RETRY_MAX;
struct xe_guc *guc = &gt->uc.guc;
int err;
- err = guc_action_vf_reset(guc);
+ do {
+ err = guc_action_vf_reset(guc);
+ if (!err || err != -ETIMEDOUT)
+ break;
+ } while (--retry);
+
if (unlikely(err))
xe_gt_sriov_err(gt, "Failed to reset GuC state (%pe)\n", ERR_PTR(err));
return err;
diff --git a/drivers/gpu/drm/xe/xe_gt_stats.c b/drivers/gpu/drm/xe/xe_gt_stats.c
index 2e9879ea4674..6155ea354432 100644
--- a/drivers/gpu/drm/xe/xe_gt_stats.c
+++ b/drivers/gpu/drm/xe/xe_gt_stats.c
@@ -23,13 +23,13 @@ void xe_gt_stats_incr(struct xe_gt *gt, const enum xe_gt_stats_id id, int incr)
if (id >= __XE_GT_STATS_NUM_IDS)
return;
- atomic_add(incr, &gt->stats.counters[id]);
+ atomic64_add(incr, &gt->stats.counters[id]);
}
static const char *const stat_description[__XE_GT_STATS_NUM_IDS] = {
"tlb_inval_count",
"vma_pagefault_count",
- "vma_pagefault_bytes",
+ "vma_pagefault_kb",
};
/**
@@ -44,8 +44,8 @@ int xe_gt_stats_print_info(struct xe_gt *gt, struct drm_printer *p)
enum xe_gt_stats_id id;
for (id = 0; id < __XE_GT_STATS_NUM_IDS; ++id)
- drm_printf(p, "%s: %d\n", stat_description[id],
- atomic_read(&gt->stats.counters[id]));
+ drm_printf(p, "%s: %lld\n", stat_description[id],
+ atomic64_read(&gt->stats.counters[id]));
return 0;
}
diff --git a/drivers/gpu/drm/xe/xe_gt_stats_types.h b/drivers/gpu/drm/xe/xe_gt_stats_types.h
index b072bd80c4b9..d556771f99d6 100644
--- a/drivers/gpu/drm/xe/xe_gt_stats_types.h
+++ b/drivers/gpu/drm/xe/xe_gt_stats_types.h
@@ -9,7 +9,7 @@
enum xe_gt_stats_id {
XE_GT_STATS_ID_TLB_INVAL,
XE_GT_STATS_ID_VMA_PAGEFAULT_COUNT,
- XE_GT_STATS_ID_VMA_PAGEFAULT_BYTES,
+ XE_GT_STATS_ID_VMA_PAGEFAULT_KB,
/* must be the last entry */
__XE_GT_STATS_NUM_IDS,
};
diff --git a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
index 0a93831c0a02..03072e094991 100644
--- a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
+++ b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
@@ -411,6 +411,28 @@ int xe_gt_tlb_invalidation_range(struct xe_gt *gt,
}
/**
+ * xe_gt_tlb_invalidation_vm - Issue a TLB invalidation on this GT for a VM
+ * @gt: graphics tile
+ * @vm: VM to invalidate
+ *
+ * Invalidate entire VM's address space
+ */
+void xe_gt_tlb_invalidation_vm(struct xe_gt *gt, struct xe_vm *vm)
+{
+ struct xe_gt_tlb_invalidation_fence fence;
+ u64 range = 1ull << vm->xe->info.va_bits;
+ int ret;
+
+ xe_gt_tlb_invalidation_fence_init(gt, &fence, true);
+
+ ret = xe_gt_tlb_invalidation_range(gt, &fence, 0, range, vm->usm.asid);
+ if (ret < 0)
+ return;
+
+ xe_gt_tlb_invalidation_fence_wait(&fence);
+}
+
+/**
* xe_gt_tlb_invalidation_vma - Issue a TLB invalidation on this GT for a VMA
* @gt: GT structure
* @fence: invalidation fence which will be signal on TLB invalidation
diff --git a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h
index 672acfcdf0d7..abe9b03d543e 100644
--- a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h
+++ b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h
@@ -12,6 +12,7 @@
struct xe_gt;
struct xe_guc;
+struct xe_vm;
struct xe_vma;
int xe_gt_tlb_invalidation_init_early(struct xe_gt *gt);
@@ -21,6 +22,7 @@ int xe_gt_tlb_invalidation_ggtt(struct xe_gt *gt);
int xe_gt_tlb_invalidation_vma(struct xe_gt *gt,
struct xe_gt_tlb_invalidation_fence *fence,
struct xe_vma *vma);
+void xe_gt_tlb_invalidation_vm(struct xe_gt *gt, struct xe_vm *vm);
int xe_gt_tlb_invalidation_range(struct xe_gt *gt,
struct xe_gt_tlb_invalidation_fence *fence,
u64 start, u64 end, u32 asid);
diff --git a/drivers/gpu/drm/xe/xe_gt_topology.h b/drivers/gpu/drm/xe/xe_gt_topology.h
index 746b325bbf6e..a72d26ba0653 100644
--- a/drivers/gpu/drm/xe/xe_gt_topology.h
+++ b/drivers/gpu/drm/xe/xe_gt_topology.h
@@ -25,6 +25,19 @@ void xe_gt_topology_init(struct xe_gt *gt);
void xe_gt_topology_dump(struct xe_gt *gt, struct drm_printer *p);
+/**
+ * xe_gt_topology_mask_last_dss() - Returns the index of the last DSS in a mask.
+ * @mask: Input DSS mask
+ *
+ * Return: Index of the last DSS in the input DSS mask,
+ * XE_MAX_DSS_FUSE_BITS if DSS mask is empty.
+ */
+static inline unsigned int
+xe_gt_topology_mask_last_dss(const xe_dss_mask_t mask)
+{
+ return find_last_bit(mask, XE_MAX_DSS_FUSE_BITS);
+}
+
unsigned int
xe_dss_mask_group_ffs(const xe_dss_mask_t mask, int groupsize, int groupnum);
diff --git a/drivers/gpu/drm/xe/xe_gt_types.h b/drivers/gpu/drm/xe/xe_gt_types.h
index 6e66bf0e8b3f..e3cfb026ac88 100644
--- a/drivers/gpu/drm/xe/xe_gt_types.h
+++ b/drivers/gpu/drm/xe/xe_gt_types.h
@@ -139,7 +139,7 @@ struct xe_gt {
/** @stats: GT stats */
struct {
/** @stats.counters: counters for various GT stats */
- atomic_t counters[__XE_GT_STATS_NUM_IDS];
+ atomic64_t counters[__XE_GT_STATS_NUM_IDS];
} stats;
#endif
@@ -413,6 +413,16 @@ struct xe_gt {
bool oob_initialized;
} wa_active;
+ /** @tuning_active: keep track of active tunings */
+ struct {
+ /** @tuning_active.gt: bitmap with active GT tunings */
+ unsigned long *gt;
+ /** @tuning_active.engine: bitmap with active engine tunings */
+ unsigned long *engine;
+ /** @tuning_active.lrc: bitmap with active LRC tunings */
+ unsigned long *lrc;
+ } tuning_active;
+
/** @user_engines: engines present in GT and available to userspace */
struct {
/**
@@ -430,6 +440,9 @@ struct xe_gt {
/** @oa: oa observation subsystem per gt info */
struct xe_oa_gt oa;
+
+ /** @eu_stall: EU stall counters subsystem per gt info */
+ struct xe_eu_stall_gt *eu_stall;
};
#endif
diff --git a/drivers/gpu/drm/xe/xe_guc.c b/drivers/gpu/drm/xe/xe_guc.c
index 1619c0a52db9..bc5714a5b36b 100644
--- a/drivers/gpu/drm/xe/xe_guc.c
+++ b/drivers/gpu/drm/xe/xe_guc.c
@@ -27,6 +27,7 @@
#include "xe_guc_capture.h"
#include "xe_guc_ct.h"
#include "xe_guc_db_mgr.h"
+#include "xe_guc_engine_activity.h"
#include "xe_guc_hwconfig.h"
#include "xe_guc_log.h"
#include "xe_guc_pc.h"
@@ -744,6 +745,10 @@ int xe_guc_init_post_hwconfig(struct xe_guc *guc)
if (ret)
return ret;
+ ret = xe_guc_engine_activity_init(guc);
+ if (ret)
+ return ret;
+
ret = xe_guc_buf_cache_init(&guc->buf);
if (ret)
return ret;
@@ -1491,14 +1496,6 @@ void xe_guc_stop(struct xe_guc *guc)
int xe_guc_start(struct xe_guc *guc)
{
- if (!IS_SRIOV_VF(guc_to_xe(guc))) {
- int err;
-
- err = xe_guc_pc_start(&guc->pc);
- xe_gt_WARN(guc_to_gt(guc), err, "Failed to start GuC PC: %pe\n",
- ERR_PTR(err));
- }
-
return xe_guc_submit_start(guc);
}
diff --git a/drivers/gpu/drm/xe/xe_guc_ads.c b/drivers/gpu/drm/xe/xe_guc_ads.c
index fab259adc380..e7c9e095a19f 100644
--- a/drivers/gpu/drm/xe/xe_guc_ads.c
+++ b/drivers/gpu/drm/xe/xe_guc_ads.c
@@ -342,7 +342,7 @@ static void guc_waklv_init(struct xe_guc_ads *ads)
offset = guc_ads_waklv_offset(ads);
remain = guc_ads_waklv_size(ads);
- if (XE_WA(gt, 14019882105))
+ if (XE_WA(gt, 14019882105) || XE_WA(gt, 16021333562))
guc_waklv_enable_simple(ads,
GUC_WORKAROUND_KLV_BLOCK_INTERRUPTS_WHEN_MGSR_BLOCKED,
&offset, &remain);
diff --git a/drivers/gpu/drm/xe/xe_guc_engine_activity.c b/drivers/gpu/drm/xe/xe_guc_engine_activity.c
new file mode 100644
index 000000000000..2a457dcf31d5
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_guc_engine_activity.c
@@ -0,0 +1,373 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#include <drm/drm_managed.h>
+
+#include "abi/guc_actions_abi.h"
+#include "regs/xe_gt_regs.h"
+
+#include "xe_bo.h"
+#include "xe_force_wake.h"
+#include "xe_gt_printk.h"
+#include "xe_guc.h"
+#include "xe_guc_engine_activity.h"
+#include "xe_guc_ct.h"
+#include "xe_hw_engine.h"
+#include "xe_map.h"
+#include "xe_mmio.h"
+#include "xe_trace_guc.h"
+
+#define TOTAL_QUANTA 0x8000
+
+static struct iosys_map engine_activity_map(struct xe_guc *guc, struct xe_hw_engine *hwe)
+{
+ struct xe_guc_engine_activity *engine_activity = &guc->engine_activity;
+ struct engine_activity_buffer *buffer = &engine_activity->device_buffer;
+ u16 guc_class = xe_engine_class_to_guc_class(hwe->class);
+ size_t offset;
+
+ offset = offsetof(struct guc_engine_activity_data,
+ engine_activity[guc_class][hwe->logical_instance]);
+
+ return IOSYS_MAP_INIT_OFFSET(&buffer->activity_bo->vmap, offset);
+}
+
+static struct iosys_map engine_metadata_map(struct xe_guc *guc)
+{
+ struct xe_guc_engine_activity *engine_activity = &guc->engine_activity;
+ struct engine_activity_buffer *buffer = &engine_activity->device_buffer;
+
+ return buffer->metadata_bo->vmap;
+}
+
+static int allocate_engine_activity_group(struct xe_guc *guc)
+{
+ struct xe_guc_engine_activity *engine_activity = &guc->engine_activity;
+ struct xe_device *xe = guc_to_xe(guc);
+ u32 num_activity_group = 1; /* Will be modified for VF */
+
+ engine_activity->eag = drmm_kcalloc(&xe->drm, num_activity_group,
+ sizeof(struct engine_activity_group), GFP_KERNEL);
+
+ if (!engine_activity->eag)
+ return -ENOMEM;
+
+ engine_activity->num_activity_group = num_activity_group;
+
+ return 0;
+}
+
+static int allocate_engine_activity_buffers(struct xe_guc *guc,
+ struct engine_activity_buffer *buffer)
+{
+ u32 metadata_size = sizeof(struct guc_engine_activity_metadata);
+ u32 size = sizeof(struct guc_engine_activity_data);
+ struct xe_gt *gt = guc_to_gt(guc);
+ struct xe_tile *tile = gt_to_tile(gt);
+ struct xe_bo *bo, *metadata_bo;
+
+ metadata_bo = xe_bo_create_pin_map(gt_to_xe(gt), tile, NULL, PAGE_ALIGN(metadata_size),
+ ttm_bo_type_kernel, XE_BO_FLAG_SYSTEM |
+ XE_BO_FLAG_GGTT | XE_BO_FLAG_GGTT_INVALIDATE);
+
+ if (IS_ERR(metadata_bo))
+ return PTR_ERR(metadata_bo);
+
+ bo = xe_bo_create_pin_map(gt_to_xe(gt), tile, NULL, PAGE_ALIGN(size),
+ ttm_bo_type_kernel, XE_BO_FLAG_VRAM_IF_DGFX(tile) |
+ XE_BO_FLAG_GGTT | XE_BO_FLAG_GGTT_INVALIDATE);
+
+ if (IS_ERR(bo)) {
+ xe_bo_unpin_map_no_vm(metadata_bo);
+ return PTR_ERR(bo);
+ }
+
+ buffer->metadata_bo = metadata_bo;
+ buffer->activity_bo = bo;
+ return 0;
+}
+
+static void free_engine_activity_buffers(struct engine_activity_buffer *buffer)
+{
+ xe_bo_unpin_map_no_vm(buffer->metadata_bo);
+ xe_bo_unpin_map_no_vm(buffer->activity_bo);
+}
+
+static bool is_engine_activity_supported(struct xe_guc *guc)
+{
+ struct xe_uc_fw_version *version = &guc->fw.versions.found[XE_UC_FW_VER_COMPATIBILITY];
+ struct xe_uc_fw_version required = { 1, 14, 1 };
+ struct xe_gt *gt = guc_to_gt(guc);
+
+ if (IS_SRIOV_VF(gt_to_xe(gt))) {
+ xe_gt_info(gt, "engine activity stats not supported on VFs\n");
+ return false;
+ }
+
+ /* engine activity stats is supported from GuC interface version (1.14.1) */
+ if (GUC_SUBMIT_VER(guc) < MAKE_GUC_VER_STRUCT(required)) {
+ xe_gt_info(gt,
+ "engine activity stats unsupported in GuC interface v%u.%u.%u, need v%u.%u.%u or higher\n",
+ version->major, version->minor, version->patch, required.major,
+ required.minor, required.patch);
+ return false;
+ }
+
+ return true;
+}
+
+static struct engine_activity *hw_engine_to_engine_activity(struct xe_hw_engine *hwe)
+{
+ struct xe_guc *guc = &hwe->gt->uc.guc;
+ struct engine_activity_group *eag = &guc->engine_activity.eag[0];
+ u16 guc_class = xe_engine_class_to_guc_class(hwe->class);
+
+ return &eag->engine[guc_class][hwe->logical_instance];
+}
+
+static u64 cpu_ns_to_guc_tsc_tick(ktime_t ns, u32 freq)
+{
+ return mul_u64_u32_div(ns, freq, NSEC_PER_SEC);
+}
+
+#define read_engine_activity_record(xe_, map_, field_) \
+ xe_map_rd_field(xe_, map_, 0, struct guc_engine_activity, field_)
+
+#define read_metadata_record(xe_, map_, field_) \
+ xe_map_rd_field(xe_, map_, 0, struct guc_engine_activity_metadata, field_)
+
+static u64 get_engine_active_ticks(struct xe_guc *guc, struct xe_hw_engine *hwe)
+{
+ struct engine_activity *ea = hw_engine_to_engine_activity(hwe);
+ struct guc_engine_activity *cached_activity = &ea->activity;
+ struct guc_engine_activity_metadata *cached_metadata = &ea->metadata;
+ struct xe_guc_engine_activity *engine_activity = &guc->engine_activity;
+ struct iosys_map activity_map, metadata_map;
+ struct xe_device *xe = guc_to_xe(guc);
+ struct xe_gt *gt = guc_to_gt(guc);
+ u32 last_update_tick, global_change_num;
+ u64 active_ticks, gpm_ts;
+ u16 change_num;
+
+ activity_map = engine_activity_map(guc, hwe);
+ metadata_map = engine_metadata_map(guc);
+ global_change_num = read_metadata_record(xe, &metadata_map, global_change_num);
+
+ /* GuC has not initialized activity data yet, return 0 */
+ if (!global_change_num)
+ goto update;
+
+ if (global_change_num == cached_metadata->global_change_num)
+ goto update;
+
+ cached_metadata->global_change_num = global_change_num;
+ change_num = read_engine_activity_record(xe, &activity_map, change_num);
+
+ if (!change_num || change_num == cached_activity->change_num)
+ goto update;
+
+ /* read engine activity values */
+ last_update_tick = read_engine_activity_record(xe, &activity_map, last_update_tick);
+ active_ticks = read_engine_activity_record(xe, &activity_map, active_ticks);
+
+ /* activity calculations */
+ ea->running = !!last_update_tick;
+ ea->total += active_ticks - cached_activity->active_ticks;
+ ea->active = 0;
+
+ /* cache the counter */
+ cached_activity->change_num = change_num;
+ cached_activity->last_update_tick = last_update_tick;
+ cached_activity->active_ticks = active_ticks;
+
+update:
+ if (ea->running) {
+ gpm_ts = xe_mmio_read64_2x32(&gt->mmio, MISC_STATUS_0) >>
+ engine_activity->gpm_timestamp_shift;
+ ea->active = lower_32_bits(gpm_ts) - cached_activity->last_update_tick;
+ }
+
+ trace_xe_guc_engine_activity(xe, ea, hwe->name, hwe->instance);
+
+ return ea->total + ea->active;
+}
+
+static u64 get_engine_total_ticks(struct xe_guc *guc, struct xe_hw_engine *hwe)
+{
+ struct engine_activity *ea = hw_engine_to_engine_activity(hwe);
+ struct guc_engine_activity_metadata *cached_metadata = &ea->metadata;
+ struct guc_engine_activity *cached_activity = &ea->activity;
+ struct iosys_map activity_map, metadata_map;
+ struct xe_device *xe = guc_to_xe(guc);
+ ktime_t now, cpu_delta;
+ u64 numerator;
+ u16 quanta_ratio;
+
+ activity_map = engine_activity_map(guc, hwe);
+ metadata_map = engine_metadata_map(guc);
+
+ if (!cached_metadata->guc_tsc_frequency_hz)
+ cached_metadata->guc_tsc_frequency_hz = read_metadata_record(xe, &metadata_map,
+ guc_tsc_frequency_hz);
+
+ quanta_ratio = read_engine_activity_record(xe, &activity_map, quanta_ratio);
+ cached_activity->quanta_ratio = quanta_ratio;
+
+ /* Total ticks calculations */
+ now = ktime_get();
+ cpu_delta = now - ea->last_cpu_ts;
+ ea->last_cpu_ts = now;
+ numerator = (ea->quanta_remainder_ns + cpu_delta) * cached_activity->quanta_ratio;
+ ea->quanta_ns += numerator / TOTAL_QUANTA;
+ ea->quanta_remainder_ns = numerator % TOTAL_QUANTA;
+ ea->quanta = cpu_ns_to_guc_tsc_tick(ea->quanta_ns, cached_metadata->guc_tsc_frequency_hz);
+
+ trace_xe_guc_engine_activity(xe, ea, hwe->name, hwe->instance);
+
+ return ea->quanta;
+}
+
+static int enable_engine_activity_stats(struct xe_guc *guc)
+{
+ struct xe_guc_engine_activity *engine_activity = &guc->engine_activity;
+ struct engine_activity_buffer *buffer = &engine_activity->device_buffer;
+ u32 action[] = {
+ XE_GUC_ACTION_SET_DEVICE_ENGINE_ACTIVITY_BUFFER,
+ xe_bo_ggtt_addr(buffer->metadata_bo),
+ 0,
+ xe_bo_ggtt_addr(buffer->activity_bo),
+ 0,
+ };
+
+ /* Blocking here to ensure the buffers are ready before reading them */
+ return xe_guc_ct_send_block(&guc->ct, action, ARRAY_SIZE(action));
+}
+
+static void engine_activity_set_cpu_ts(struct xe_guc *guc)
+{
+ struct xe_guc_engine_activity *engine_activity = &guc->engine_activity;
+ struct engine_activity_group *eag = &engine_activity->eag[0];
+ int i, j;
+
+ for (i = 0; i < GUC_MAX_ENGINE_CLASSES; i++)
+ for (j = 0; j < GUC_MAX_INSTANCES_PER_CLASS; j++)
+ eag->engine[i][j].last_cpu_ts = ktime_get();
+}
+
+static u32 gpm_timestamp_shift(struct xe_gt *gt)
+{
+ u32 reg;
+
+ reg = xe_mmio_read32(&gt->mmio, RPM_CONFIG0);
+
+ return 3 - REG_FIELD_GET(RPM_CONFIG0_CTC_SHIFT_PARAMETER_MASK, reg);
+}
+
+/**
+ * xe_guc_engine_activity_active_ticks - Get engine active ticks
+ * @guc: The GuC object
+ * @hwe: The hw_engine object
+ *
+ * Return: accumulated ticks @hwe was active since engine activity stats were enabled.
+ */
+u64 xe_guc_engine_activity_active_ticks(struct xe_guc *guc, struct xe_hw_engine *hwe)
+{
+ if (!xe_guc_engine_activity_supported(guc))
+ return 0;
+
+ return get_engine_active_ticks(guc, hwe);
+}
+
+/**
+ * xe_guc_engine_activity_total_ticks - Get engine total ticks
+ * @guc: The GuC object
+ * @hwe: The hw_engine object
+ *
+ * Return: accumulated quanta of ticks allocated for the engine
+ */
+u64 xe_guc_engine_activity_total_ticks(struct xe_guc *guc, struct xe_hw_engine *hwe)
+{
+ if (!xe_guc_engine_activity_supported(guc))
+ return 0;
+
+ return get_engine_total_ticks(guc, hwe);
+}
+
+/**
+ * xe_guc_engine_activity_supported - Check support for engine activity stats
+ * @guc: The GuC object
+ *
+ * Engine activity stats is supported from GuC interface version (1.14.1)
+ *
+ * Return: true if engine activity stats supported, false otherwise
+ */
+bool xe_guc_engine_activity_supported(struct xe_guc *guc)
+{
+ struct xe_guc_engine_activity *engine_activity = &guc->engine_activity;
+
+ return engine_activity->supported;
+}
+
+/**
+ * xe_guc_engine_activity_enable_stats - Enable engine activity stats
+ * @guc: The GuC object
+ *
+ * Enable engine activity stats and set initial timestamps
+ */
+void xe_guc_engine_activity_enable_stats(struct xe_guc *guc)
+{
+ int ret;
+
+ if (!xe_guc_engine_activity_supported(guc))
+ return;
+
+ ret = enable_engine_activity_stats(guc);
+ if (ret)
+ xe_gt_err(guc_to_gt(guc), "failed to enable activity stats%d\n", ret);
+ else
+ engine_activity_set_cpu_ts(guc);
+}
+
+static void engine_activity_fini(void *arg)
+{
+ struct xe_guc_engine_activity *engine_activity = arg;
+ struct engine_activity_buffer *buffer = &engine_activity->device_buffer;
+
+ free_engine_activity_buffers(buffer);
+}
+
+/**
+ * xe_guc_engine_activity_init - Initialize the engine activity data
+ * @guc: The GuC object
+ *
+ * Return: 0 on success, negative error code otherwise.
+ */
+int xe_guc_engine_activity_init(struct xe_guc *guc)
+{
+ struct xe_guc_engine_activity *engine_activity = &guc->engine_activity;
+ struct xe_gt *gt = guc_to_gt(guc);
+ int ret;
+
+ engine_activity->supported = is_engine_activity_supported(guc);
+ if (!engine_activity->supported)
+ return 0;
+
+ ret = allocate_engine_activity_group(guc);
+ if (ret) {
+ xe_gt_err(gt, "failed to allocate engine activity group (%pe)\n", ERR_PTR(ret));
+ return ret;
+ }
+
+ ret = allocate_engine_activity_buffers(guc, &engine_activity->device_buffer);
+ if (ret) {
+ xe_gt_err(gt, "failed to allocate engine activity buffers (%pe)\n", ERR_PTR(ret));
+ return ret;
+ }
+
+ engine_activity->gpm_timestamp_shift = gpm_timestamp_shift(gt);
+
+ return devm_add_action_or_reset(gt_to_xe(gt)->drm.dev, engine_activity_fini,
+ engine_activity);
+}
diff --git a/drivers/gpu/drm/xe/xe_guc_engine_activity.h b/drivers/gpu/drm/xe/xe_guc_engine_activity.h
new file mode 100644
index 000000000000..a042d4cb404c
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_guc_engine_activity.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef _XE_GUC_ENGINE_ACTIVITY_H_
+#define _XE_GUC_ENGINE_ACTIVITY_H_
+
+#include <linux/types.h>
+
+struct xe_hw_engine;
+struct xe_guc;
+
+int xe_guc_engine_activity_init(struct xe_guc *guc);
+bool xe_guc_engine_activity_supported(struct xe_guc *guc);
+void xe_guc_engine_activity_enable_stats(struct xe_guc *guc);
+u64 xe_guc_engine_activity_active_ticks(struct xe_guc *guc, struct xe_hw_engine *hwe);
+u64 xe_guc_engine_activity_total_ticks(struct xe_guc *guc, struct xe_hw_engine *hwe);
+#endif
diff --git a/drivers/gpu/drm/xe/xe_guc_engine_activity_types.h b/drivers/gpu/drm/xe/xe_guc_engine_activity_types.h
new file mode 100644
index 000000000000..5cdd034b6b70
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_guc_engine_activity_types.h
@@ -0,0 +1,92 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef _XE_GUC_ENGINE_ACTIVITY_TYPES_H_
+#define _XE_GUC_ENGINE_ACTIVITY_TYPES_H_
+
+#include <linux/types.h>
+
+#include "xe_guc_fwif.h"
+/**
+ * struct engine_activity - Engine specific activity data
+ *
+ * Contains engine specific activity data and snapshot of the
+ * structures from GuC
+ */
+struct engine_activity {
+ /** @active: current activity */
+ u64 active;
+
+ /** @last_cpu_ts: cpu timestamp in nsec of previous sample */
+ u64 last_cpu_ts;
+
+ /** @quanta: total quanta used on HW */
+ u64 quanta;
+
+ /** @quanta_ns: total quanta_ns used on HW */
+ u64 quanta_ns;
+
+ /**
+ * @quanta_remainder_ns: remainder when the CPU time is scaled as
+ * per the quanta_ratio. This remainder is used in subsequent
+ * quanta calculations.
+ */
+ u64 quanta_remainder_ns;
+
+ /** @total: total engine activity */
+ u64 total;
+
+ /** @running: true if engine is running some work */
+ bool running;
+
+ /** @metadata: snapshot of engine activity metadata */
+ struct guc_engine_activity_metadata metadata;
+
+ /** @activity: snapshot of engine activity counter */
+ struct guc_engine_activity activity;
+};
+
+/**
+ * struct engine_activity_group - Activity data for all engines
+ */
+struct engine_activity_group {
+ /** @engine: engine specific activity data */
+ struct engine_activity engine[GUC_MAX_ENGINE_CLASSES][GUC_MAX_INSTANCES_PER_CLASS];
+};
+
+/**
+ * struct engine_activity_buffer - engine activity buffers
+ *
+ * This contains the buffers allocated for metadata and activity data
+ */
+struct engine_activity_buffer {
+ /** @activity_bo: object allocated to hold activity data */
+ struct xe_bo *activity_bo;
+
+ /** @metadata_bo: object allocated to hold activity metadata */
+ struct xe_bo *metadata_bo;
+};
+
+/**
+ * struct xe_guc_engine_activity - Data used by engine activity implementation
+ */
+struct xe_guc_engine_activity {
+ /** @gpm_timestamp_shift: Right shift value for the gpm timestamp */
+ u32 gpm_timestamp_shift;
+
+ /** @num_activity_group: number of activity groups */
+ u32 num_activity_group;
+
+ /** @supported: indicates support for engine activity stats */
+ bool supported;
+
+ /** @eag: holds the device level engine activity data */
+ struct engine_activity_group *eag;
+
+ /** @device_buffer: buffer object for global engine activity */
+ struct engine_activity_buffer device_buffer;
+};
+#endif
+
diff --git a/drivers/gpu/drm/xe/xe_guc_fwif.h b/drivers/gpu/drm/xe/xe_guc_fwif.h
index 057153f89b30..6f57578b07cb 100644
--- a/drivers/gpu/drm/xe/xe_guc_fwif.h
+++ b/drivers/gpu/drm/xe/xe_guc_fwif.h
@@ -208,6 +208,25 @@ struct guc_engine_usage {
struct guc_engine_usage_record engines[GUC_MAX_ENGINE_CLASSES][GUC_MAX_INSTANCES_PER_CLASS];
} __packed;
+/* Engine Activity stats */
+struct guc_engine_activity {
+ u16 change_num;
+ u16 quanta_ratio;
+ u32 last_update_tick;
+ u64 active_ticks;
+} __packed;
+
+struct guc_engine_activity_data {
+ struct guc_engine_activity engine_activity[GUC_MAX_ENGINE_CLASSES][GUC_MAX_INSTANCES_PER_CLASS];
+} __packed;
+
+struct guc_engine_activity_metadata {
+ u32 guc_tsc_frequency_hz;
+ u32 lag_latency_usec;
+ u32 global_change_num;
+ u32 reserved;
+} __packed;
+
/* This action will be programmed in C1BC - SOFT_SCRATCH_15_REG */
enum xe_guc_recv_message {
XE_GUC_RECV_MSG_CRASH_DUMP_POSTED = BIT(1),
diff --git a/drivers/gpu/drm/xe/xe_guc_pc.c b/drivers/gpu/drm/xe/xe_guc_pc.c
index 02409eedb914..25040efa043f 100644
--- a/drivers/gpu/drm/xe/xe_guc_pc.c
+++ b/drivers/gpu/drm/xe/xe_guc_pc.c
@@ -995,6 +995,17 @@ out:
return ret;
}
+static int pc_action_set_strategy(struct xe_guc_pc *pc, u32 val)
+{
+ int ret = 0;
+
+ ret = pc_action_set_param(pc,
+ SLPC_PARAM_STRATEGIES,
+ val);
+
+ return ret;
+}
+
/**
* xe_guc_pc_start - Start GuC's Power Conservation component
* @pc: Xe_GuC_PC instance
@@ -1054,6 +1065,11 @@ int xe_guc_pc_start(struct xe_guc_pc *pc)
}
ret = pc_action_setup_gucrc(pc, GUCRC_FIRMWARE_CONTROL);
+ if (ret)
+ goto out;
+
+ /* Enable SLPC Optimized Strategy for compute */
+ ret = pc_action_set_strategy(pc, SLPC_OPTIMIZED_STRATEGY_COMPUTE);
out:
xe_force_wake_put(gt_to_fw(gt), fw_ref);
diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c
index 913c74d6e2ae..b95934055f72 100644
--- a/drivers/gpu/drm/xe/xe_guc_submit.c
+++ b/drivers/gpu/drm/xe/xe_guc_submit.c
@@ -15,6 +15,7 @@
#include <drm/drm_managed.h>
#include "abi/guc_actions_abi.h"
+#include "abi/guc_actions_slpc_abi.h"
#include "abi/guc_klvs_abi.h"
#include "regs/xe_lrc_layout.h"
#include "xe_assert.h"
@@ -400,6 +401,7 @@ static void __guc_exec_queue_policy_add_##func(struct exec_queue_policy *policy,
MAKE_EXEC_QUEUE_POLICY_ADD(execution_quantum, EXECUTION_QUANTUM)
MAKE_EXEC_QUEUE_POLICY_ADD(preemption_timeout, PREEMPTION_TIMEOUT)
MAKE_EXEC_QUEUE_POLICY_ADD(priority, SCHEDULING_PRIORITY)
+MAKE_EXEC_QUEUE_POLICY_ADD(slpc_exec_queue_freq_req, SLPM_GT_FREQUENCY)
#undef MAKE_EXEC_QUEUE_POLICY_ADD
static const int xe_exec_queue_prio_to_guc[] = {
@@ -414,14 +416,20 @@ static void init_policies(struct xe_guc *guc, struct xe_exec_queue *q)
struct exec_queue_policy policy;
enum xe_exec_queue_priority prio = q->sched_props.priority;
u32 timeslice_us = q->sched_props.timeslice_us;
+ u32 slpc_exec_queue_freq_req = 0;
u32 preempt_timeout_us = q->sched_props.preempt_timeout_us;
xe_gt_assert(guc_to_gt(guc), exec_queue_registered(q));
+ if (q->flags & EXEC_QUEUE_FLAG_LOW_LATENCY)
+ slpc_exec_queue_freq_req |= SLPC_CTX_FREQ_REQ_IS_COMPUTE;
+
__guc_exec_queue_policy_start_klv(&policy, q->guc->id);
__guc_exec_queue_policy_add_priority(&policy, xe_exec_queue_prio_to_guc[prio]);
__guc_exec_queue_policy_add_execution_quantum(&policy, timeslice_us);
__guc_exec_queue_policy_add_preemption_timeout(&policy, preempt_timeout_us);
+ __guc_exec_queue_policy_add_slpc_exec_queue_freq_req(&policy,
+ slpc_exec_queue_freq_req);
xe_guc_ct_send(&guc->ct, (u32 *)&policy.h2g,
__guc_exec_queue_policy_action_size(&policy), 0, 0);
@@ -1248,6 +1256,8 @@ static void __guc_exec_queue_fini_async(struct work_struct *w)
if (xe_exec_queue_is_lr(q))
cancel_work_sync(&ge->lr_tdr);
+ /* Confirm no work left behind accessing device structures */
+ cancel_delayed_work_sync(&ge->sched.base.work_tdr);
release_guc_id(guc, q);
xe_sched_entity_fini(&ge->entity);
xe_sched_fini(&ge->sched);
diff --git a/drivers/gpu/drm/xe/xe_guc_types.h b/drivers/gpu/drm/xe/xe_guc_types.h
index 573aa6308380..63bac64429a5 100644
--- a/drivers/gpu/drm/xe/xe_guc_types.h
+++ b/drivers/gpu/drm/xe/xe_guc_types.h
@@ -13,6 +13,7 @@
#include "xe_guc_ads_types.h"
#include "xe_guc_buf_types.h"
#include "xe_guc_ct_types.h"
+#include "xe_guc_engine_activity_types.h"
#include "xe_guc_fwif.h"
#include "xe_guc_log_types.h"
#include "xe_guc_pc_types.h"
@@ -103,6 +104,9 @@ struct xe_guc {
/** @relay: GuC Relay Communication used in SR-IOV */
struct xe_guc_relay relay;
+ /** @engine_activity: Device specific engine activity */
+ struct xe_guc_engine_activity engine_activity;
+
/**
* @notify_reg: Register which is written to notify GuC of H2G messages
*/
diff --git a/drivers/gpu/drm/xe/xe_heci_gsc.c b/drivers/gpu/drm/xe/xe_heci_gsc.c
index 06dc78d3a812..27d11e06a82b 100644
--- a/drivers/gpu/drm/xe/xe_heci_gsc.c
+++ b/drivers/gpu/drm/xe/xe_heci_gsc.c
@@ -89,12 +89,9 @@ static void heci_gsc_release_dev(struct device *dev)
kfree(adev);
}
-void xe_heci_gsc_fini(struct xe_device *xe)
+static void xe_heci_gsc_fini(void *arg)
{
- struct xe_heci_gsc *heci_gsc = &xe->heci_gsc;
-
- if (!xe->info.has_heci_gscfi && !xe->info.has_heci_cscfi)
- return;
+ struct xe_heci_gsc *heci_gsc = arg;
if (heci_gsc->adev) {
struct auxiliary_device *aux_dev = &heci_gsc->adev->aux_dev;
@@ -106,6 +103,7 @@ void xe_heci_gsc_fini(struct xe_device *xe)
if (heci_gsc->irq >= 0)
irq_free_desc(heci_gsc->irq);
+
heci_gsc->irq = -1;
}
@@ -172,14 +170,14 @@ static int heci_gsc_add_device(struct xe_device *xe, const struct heci_gsc_def *
return ret;
}
-void xe_heci_gsc_init(struct xe_device *xe)
+int xe_heci_gsc_init(struct xe_device *xe)
{
struct xe_heci_gsc *heci_gsc = &xe->heci_gsc;
- const struct heci_gsc_def *def;
+ const struct heci_gsc_def *def = NULL;
int ret;
if (!xe->info.has_heci_gscfi && !xe->info.has_heci_cscfi)
- return;
+ return 0;
heci_gsc->irq = -1;
@@ -191,29 +189,24 @@ void xe_heci_gsc_init(struct xe_device *xe)
def = &heci_gsc_def_dg2;
} else if (xe->info.platform == XE_DG1) {
def = &heci_gsc_def_dg1;
- } else {
- drm_warn_once(&xe->drm, "Unknown platform\n");
- return;
}
- if (!def->name) {
- drm_warn_once(&xe->drm, "HECI is not implemented!\n");
- return;
+ if (!def || !def->name) {
+ drm_warn(&xe->drm, "HECI is not implemented!\n");
+ return 0;
}
- if (!def->use_polling && !xe_survivability_mode_enabled(xe)) {
+ ret = devm_add_action_or_reset(xe->drm.dev, xe_heci_gsc_fini, heci_gsc);
+ if (ret)
+ return ret;
+
+ if (!def->use_polling && !xe_survivability_mode_is_enabled(xe)) {
ret = heci_gsc_irq_setup(xe);
if (ret)
- goto fail;
+ return ret;
}
- ret = heci_gsc_add_device(xe, def);
- if (ret)
- goto fail;
-
- return;
-fail:
- xe_heci_gsc_fini(xe);
+ return heci_gsc_add_device(xe, def);
}
void xe_heci_gsc_irq_handler(struct xe_device *xe, u32 iir)
diff --git a/drivers/gpu/drm/xe/xe_heci_gsc.h b/drivers/gpu/drm/xe/xe_heci_gsc.h
index 48b3b1838045..745eb6783942 100644
--- a/drivers/gpu/drm/xe/xe_heci_gsc.h
+++ b/drivers/gpu/drm/xe/xe_heci_gsc.h
@@ -33,8 +33,7 @@ struct xe_heci_gsc {
int irq;
};
-void xe_heci_gsc_init(struct xe_device *xe);
-void xe_heci_gsc_fini(struct xe_device *xe);
+int xe_heci_gsc_init(struct xe_device *xe);
void xe_heci_gsc_irq_handler(struct xe_device *xe, u32 iir);
void xe_heci_csc_irq_handler(struct xe_device *xe, u32 iir);
diff --git a/drivers/gpu/drm/xe/xe_hmm.c b/drivers/gpu/drm/xe/xe_hmm.c
index 089834467880..392102515f3d 100644
--- a/drivers/gpu/drm/xe/xe_hmm.c
+++ b/drivers/gpu/drm/xe/xe_hmm.c
@@ -19,11 +19,10 @@ static u64 xe_npages_in_range(unsigned long start, unsigned long end)
return (end - start) >> PAGE_SHIFT;
}
-/*
+/**
* xe_mark_range_accessed() - mark a range is accessed, so core mm
* have such information for memory eviction or write back to
* hard disk
- *
* @range: the range to mark
* @write: if write to this range, we mark pages in this range
* as dirty
@@ -43,15 +42,51 @@ static void xe_mark_range_accessed(struct hmm_range *range, bool write)
}
}
-/*
+static int xe_alloc_sg(struct xe_device *xe, struct sg_table *st,
+ struct hmm_range *range, struct rw_semaphore *notifier_sem)
+{
+ unsigned long i, npages, hmm_pfn;
+ unsigned long num_chunks = 0;
+ int ret;
+
+ /* HMM docs says this is needed. */
+ ret = down_read_interruptible(notifier_sem);
+ if (ret)
+ return ret;
+
+ if (mmu_interval_read_retry(range->notifier, range->notifier_seq)) {
+ up_read(notifier_sem);
+ return -EAGAIN;
+ }
+
+ npages = xe_npages_in_range(range->start, range->end);
+ for (i = 0; i < npages;) {
+ unsigned long len;
+
+ hmm_pfn = range->hmm_pfns[i];
+ xe_assert(xe, hmm_pfn & HMM_PFN_VALID);
+
+ len = 1UL << hmm_pfn_to_map_order(hmm_pfn);
+
+ /* If order > 0 the page may extend beyond range->start */
+ len -= (hmm_pfn & ~HMM_PFN_FLAGS) & (len - 1);
+ i += len;
+ num_chunks++;
+ }
+ up_read(notifier_sem);
+
+ return sg_alloc_table(st, num_chunks, GFP_KERNEL);
+}
+
+/**
* xe_build_sg() - build a scatter gather table for all the physical pages/pfn
* in a hmm_range. dma-map pages if necessary. dma-address is save in sg table
* and will be used to program GPU page table later.
- *
* @xe: the xe device who will access the dma-address in sg table
* @range: the hmm range that we build the sg table from. range->hmm_pfns[]
* has the pfn numbers of pages that back up this hmm address range.
* @st: pointer to the sg table.
+ * @notifier_sem: The xe notifier lock.
* @write: whether we write to this range. This decides dma map direction
* for system pages. If write we map it bi-diretional; otherwise
* DMA_TO_DEVICE
@@ -78,43 +113,84 @@ static void xe_mark_range_accessed(struct hmm_range *range, bool write)
* Returns 0 if successful; -ENOMEM if fails to allocate memory
*/
static int xe_build_sg(struct xe_device *xe, struct hmm_range *range,
- struct sg_table *st, bool write)
+ struct sg_table *st,
+ struct rw_semaphore *notifier_sem,
+ bool write)
{
+ unsigned long npages = xe_npages_in_range(range->start, range->end);
struct device *dev = xe->drm.dev;
- struct page **pages;
- u64 i, npages;
- int ret;
+ struct scatterlist *sgl;
+ struct page *page;
+ unsigned long i, j;
- npages = xe_npages_in_range(range->start, range->end);
- pages = kvmalloc_array(npages, sizeof(*pages), GFP_KERNEL);
- if (!pages)
- return -ENOMEM;
+ lockdep_assert_held(notifier_sem);
- for (i = 0; i < npages; i++) {
- pages[i] = hmm_pfn_to_page(range->hmm_pfns[i]);
- xe_assert(xe, !is_device_private_page(pages[i]));
+ i = 0;
+ for_each_sg(st->sgl, sgl, st->nents, j) {
+ unsigned long hmm_pfn, size;
+
+ hmm_pfn = range->hmm_pfns[i];
+ page = hmm_pfn_to_page(hmm_pfn);
+ xe_assert(xe, !is_device_private_page(page));
+
+ size = 1UL << hmm_pfn_to_map_order(hmm_pfn);
+ size -= page_to_pfn(page) & (size - 1);
+ i += size;
+
+ if (unlikely(j == st->nents - 1)) {
+ if (i > npages)
+ size -= (i - npages);
+ sg_mark_end(sgl);
+ }
+ sg_set_page(sgl, page, size << PAGE_SHIFT, 0);
}
+ xe_assert(xe, i == npages);
- ret = sg_alloc_table_from_pages_segment(st, pages, npages, 0, npages << PAGE_SHIFT,
- xe_sg_segment_size(dev), GFP_KERNEL);
- if (ret)
- goto free_pages;
+ return dma_map_sgtable(dev, st, write ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE,
+ DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_NO_KERNEL_MAPPING);
+}
+
+static void xe_hmm_userptr_set_mapped(struct xe_userptr_vma *uvma)
+{
+ struct xe_userptr *userptr = &uvma->userptr;
+ struct xe_vm *vm = xe_vma_vm(&uvma->vma);
+
+ lockdep_assert_held_write(&vm->lock);
+ lockdep_assert_held(&vm->userptr.notifier_lock);
+
+ mutex_lock(&userptr->unmap_mutex);
+ xe_assert(vm->xe, !userptr->mapped);
+ userptr->mapped = true;
+ mutex_unlock(&userptr->unmap_mutex);
+}
+
+void xe_hmm_userptr_unmap(struct xe_userptr_vma *uvma)
+{
+ struct xe_userptr *userptr = &uvma->userptr;
+ struct xe_vma *vma = &uvma->vma;
+ bool write = !xe_vma_read_only(vma);
+ struct xe_vm *vm = xe_vma_vm(vma);
+ struct xe_device *xe = vm->xe;
- ret = dma_map_sgtable(dev, st, write ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE,
- DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_NO_KERNEL_MAPPING);
- if (ret) {
- sg_free_table(st);
- st = NULL;
+ if (!lockdep_is_held_type(&vm->userptr.notifier_lock, 0) &&
+ !lockdep_is_held_type(&vm->lock, 0) &&
+ !(vma->gpuva.flags & XE_VMA_DESTROYED)) {
+ /* Don't unmap in exec critical section. */
+ xe_vm_assert_held(vm);
+ /* Don't unmap while mapping the sg. */
+ lockdep_assert_held(&vm->lock);
}
-free_pages:
- kvfree(pages);
- return ret;
+ mutex_lock(&userptr->unmap_mutex);
+ if (userptr->sg && userptr->mapped)
+ dma_unmap_sgtable(xe->drm.dev, userptr->sg,
+ write ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE, 0);
+ userptr->mapped = false;
+ mutex_unlock(&userptr->unmap_mutex);
}
-/*
+/**
* xe_hmm_userptr_free_sg() - Free the scatter gather table of userptr
- *
* @uvma: the userptr vma which hold the scatter gather table
*
* With function xe_userptr_populate_range, we allocate storage of
@@ -124,16 +200,9 @@ free_pages:
void xe_hmm_userptr_free_sg(struct xe_userptr_vma *uvma)
{
struct xe_userptr *userptr = &uvma->userptr;
- struct xe_vma *vma = &uvma->vma;
- bool write = !xe_vma_read_only(vma);
- struct xe_vm *vm = xe_vma_vm(vma);
- struct xe_device *xe = vm->xe;
- struct device *dev = xe->drm.dev;
-
- xe_assert(xe, userptr->sg);
- dma_unmap_sgtable(dev, userptr->sg,
- write ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE, 0);
+ xe_assert(xe_vma_vm(&uvma->vma)->xe, userptr->sg);
+ xe_hmm_userptr_unmap(uvma);
sg_free_table(userptr->sg);
userptr->sg = NULL;
}
@@ -166,13 +235,20 @@ int xe_hmm_userptr_populate_range(struct xe_userptr_vma *uvma,
{
unsigned long timeout =
jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
- unsigned long *pfns, flags = HMM_PFN_REQ_FAULT;
+ unsigned long *pfns;
struct xe_userptr *userptr;
struct xe_vma *vma = &uvma->vma;
u64 userptr_start = xe_vma_userptr(vma);
u64 userptr_end = userptr_start + xe_vma_size(vma);
struct xe_vm *vm = xe_vma_vm(vma);
- struct hmm_range hmm_range;
+ struct hmm_range hmm_range = {
+ .pfn_flags_mask = 0, /* ignore pfns */
+ .default_flags = HMM_PFN_REQ_FAULT,
+ .start = userptr_start,
+ .end = userptr_end,
+ .notifier = &uvma->userptr.notifier,
+ .dev_private_owner = vm->xe,
+ };
bool write = !xe_vma_read_only(vma);
unsigned long notifier_seq;
u64 npages;
@@ -199,19 +275,14 @@ int xe_hmm_userptr_populate_range(struct xe_userptr_vma *uvma,
return -ENOMEM;
if (write)
- flags |= HMM_PFN_REQ_WRITE;
+ hmm_range.default_flags |= HMM_PFN_REQ_WRITE;
if (!mmget_not_zero(userptr->notifier.mm)) {
ret = -EFAULT;
goto free_pfns;
}
- hmm_range.default_flags = flags;
hmm_range.hmm_pfns = pfns;
- hmm_range.notifier = &userptr->notifier;
- hmm_range.start = userptr_start;
- hmm_range.end = userptr_end;
- hmm_range.dev_private_owner = vm->xe;
while (true) {
hmm_range.notifier_seq = mmu_interval_read_begin(&userptr->notifier);
@@ -238,16 +309,37 @@ int xe_hmm_userptr_populate_range(struct xe_userptr_vma *uvma,
if (ret)
goto free_pfns;
- ret = xe_build_sg(vm->xe, &hmm_range, &userptr->sgt, write);
+ ret = xe_alloc_sg(vm->xe, &userptr->sgt, &hmm_range, &vm->userptr.notifier_lock);
if (ret)
goto free_pfns;
+ ret = down_read_interruptible(&vm->userptr.notifier_lock);
+ if (ret)
+ goto free_st;
+
+ if (mmu_interval_read_retry(hmm_range.notifier, hmm_range.notifier_seq)) {
+ ret = -EAGAIN;
+ goto out_unlock;
+ }
+
+ ret = xe_build_sg(vm->xe, &hmm_range, &userptr->sgt,
+ &vm->userptr.notifier_lock, write);
+ if (ret)
+ goto out_unlock;
+
xe_mark_range_accessed(&hmm_range, write);
userptr->sg = &userptr->sgt;
+ xe_hmm_userptr_set_mapped(uvma);
userptr->notifier_seq = hmm_range.notifier_seq;
+ up_read(&vm->userptr.notifier_lock);
+ kvfree(pfns);
+ return 0;
+out_unlock:
+ up_read(&vm->userptr.notifier_lock);
+free_st:
+ sg_free_table(&userptr->sgt);
free_pfns:
kvfree(pfns);
return ret;
}
-
diff --git a/drivers/gpu/drm/xe/xe_hmm.h b/drivers/gpu/drm/xe/xe_hmm.h
index 909dc2bdcd97..0ea98d8e7bbc 100644
--- a/drivers/gpu/drm/xe/xe_hmm.h
+++ b/drivers/gpu/drm/xe/xe_hmm.h
@@ -3,9 +3,16 @@
* Copyright © 2024 Intel Corporation
*/
+#ifndef _XE_HMM_H_
+#define _XE_HMM_H_
+
#include <linux/types.h>
struct xe_userptr_vma;
int xe_hmm_userptr_populate_range(struct xe_userptr_vma *uvma, bool is_mm_mmap_locked);
+
void xe_hmm_userptr_free_sg(struct xe_userptr_vma *uvma);
+
+void xe_hmm_userptr_unmap(struct xe_userptr_vma *uvma);
+#endif
diff --git a/drivers/gpu/drm/xe/xe_hw_engine.c b/drivers/gpu/drm/xe/xe_hw_engine.c
index fc447751fe78..223b95de388c 100644
--- a/drivers/gpu/drm/xe/xe_hw_engine.c
+++ b/drivers/gpu/drm/xe/xe_hw_engine.c
@@ -400,10 +400,9 @@ xe_hw_engine_setup_default_lrc_state(struct xe_hw_engine *hwe)
PREEMPT_GPGPU_THREAD_GROUP_LEVEL)),
XE_RTP_ENTRY_FLAG(FOREACH_ENGINE)
},
- {}
};
- xe_rtp_process_to_sr(&ctx, lrc_setup, &hwe->reg_lrc);
+ xe_rtp_process_to_sr(&ctx, lrc_setup, ARRAY_SIZE(lrc_setup), &hwe->reg_lrc);
}
static void
@@ -459,10 +458,9 @@ hw_engine_setup_default_state(struct xe_hw_engine *hwe)
XE_RTP_ACTIONS(SET(CSFE_CHICKEN1(0), CS_PRIORITY_MEM_READ,
XE_RTP_ACTION_FLAG(ENGINE_BASE)))
},
- {}
};
- xe_rtp_process_to_sr(&ctx, engine_entries, &hwe->reg_sr);
+ xe_rtp_process_to_sr(&ctx, engine_entries, ARRAY_SIZE(engine_entries), &hwe->reg_sr);
}
static const struct engine_info *find_engine_info(enum xe_engine_class class, int instance)
diff --git a/drivers/gpu/drm/xe/xe_hw_engine_group.c b/drivers/gpu/drm/xe/xe_hw_engine_group.c
index 82750520a90a..2d68c5b5262a 100644
--- a/drivers/gpu/drm/xe/xe_hw_engine_group.c
+++ b/drivers/gpu/drm/xe/xe_hw_engine_group.c
@@ -178,6 +178,7 @@ err_suspend:
up_write(&group->mode_sem);
return err;
}
+ALLOW_ERROR_INJECTION(xe_hw_engine_group_add_exec_queue, ERRNO);
/**
* xe_hw_engine_group_del_exec_queue() - Delete an exec queue from a hw engine group
diff --git a/drivers/gpu/drm/xe/xe_migrate.c b/drivers/gpu/drm/xe/xe_migrate.c
index 278bc96cf593..df4282c71bf0 100644
--- a/drivers/gpu/drm/xe/xe_migrate.c
+++ b/drivers/gpu/drm/xe/xe_migrate.c
@@ -1544,6 +1544,181 @@ void xe_migrate_wait(struct xe_migrate *m)
dma_fence_wait(m->fence, false);
}
+static u32 pte_update_cmd_size(u64 size)
+{
+ u32 num_dword;
+ u64 entries = DIV_ROUND_UP(size, XE_PAGE_SIZE);
+
+ XE_WARN_ON(size > MAX_PREEMPTDISABLE_TRANSFER);
+ /*
+ * MI_STORE_DATA_IMM command is used to update page table. Each
+ * instruction can update maximumly 0x1ff pte entries. To update
+ * n (n <= 0x1ff) pte entries, we need:
+ * 1 dword for the MI_STORE_DATA_IMM command header (opcode etc)
+ * 2 dword for the page table's physical location
+ * 2*n dword for value of pte to fill (each pte entry is 2 dwords)
+ */
+ num_dword = (1 + 2) * DIV_ROUND_UP(entries, 0x1ff);
+ num_dword += entries * 2;
+
+ return num_dword;
+}
+
+static void build_pt_update_batch_sram(struct xe_migrate *m,
+ struct xe_bb *bb, u32 pt_offset,
+ dma_addr_t *sram_addr, u32 size)
+{
+ u16 pat_index = tile_to_xe(m->tile)->pat.idx[XE_CACHE_WB];
+ u32 ptes;
+ int i = 0;
+
+ ptes = DIV_ROUND_UP(size, XE_PAGE_SIZE);
+ while (ptes) {
+ u32 chunk = min(0x1ffU, ptes);
+
+ bb->cs[bb->len++] = MI_STORE_DATA_IMM | MI_SDI_NUM_QW(chunk);
+ bb->cs[bb->len++] = pt_offset;
+ bb->cs[bb->len++] = 0;
+
+ pt_offset += chunk * 8;
+ ptes -= chunk;
+
+ while (chunk--) {
+ u64 addr = sram_addr[i++] & PAGE_MASK;
+
+ xe_tile_assert(m->tile, addr);
+ addr = m->q->vm->pt_ops->pte_encode_addr(m->tile->xe,
+ addr, pat_index,
+ 0, false, 0);
+ bb->cs[bb->len++] = lower_32_bits(addr);
+ bb->cs[bb->len++] = upper_32_bits(addr);
+ }
+ }
+}
+
+enum xe_migrate_copy_dir {
+ XE_MIGRATE_COPY_TO_VRAM,
+ XE_MIGRATE_COPY_TO_SRAM,
+};
+
+static struct dma_fence *xe_migrate_vram(struct xe_migrate *m,
+ unsigned long npages,
+ dma_addr_t *sram_addr, u64 vram_addr,
+ const enum xe_migrate_copy_dir dir)
+{
+ struct xe_gt *gt = m->tile->primary_gt;
+ struct xe_device *xe = gt_to_xe(gt);
+ struct dma_fence *fence = NULL;
+ u32 batch_size = 2;
+ u64 src_L0_ofs, dst_L0_ofs;
+ u64 round_update_size;
+ struct xe_sched_job *job;
+ struct xe_bb *bb;
+ u32 update_idx, pt_slot = 0;
+ int err;
+
+ if (npages * PAGE_SIZE > MAX_PREEMPTDISABLE_TRANSFER)
+ return ERR_PTR(-EINVAL);
+
+ round_update_size = npages * PAGE_SIZE;
+ batch_size += pte_update_cmd_size(round_update_size);
+ batch_size += EMIT_COPY_DW;
+
+ bb = xe_bb_new(gt, batch_size, true);
+ if (IS_ERR(bb)) {
+ err = PTR_ERR(bb);
+ return ERR_PTR(err);
+ }
+
+ build_pt_update_batch_sram(m, bb, pt_slot * XE_PAGE_SIZE,
+ sram_addr, round_update_size);
+
+ if (dir == XE_MIGRATE_COPY_TO_VRAM) {
+ src_L0_ofs = xe_migrate_vm_addr(pt_slot, 0);
+ dst_L0_ofs = xe_migrate_vram_ofs(xe, vram_addr, false);
+
+ } else {
+ src_L0_ofs = xe_migrate_vram_ofs(xe, vram_addr, false);
+ dst_L0_ofs = xe_migrate_vm_addr(pt_slot, 0);
+ }
+
+ bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
+ update_idx = bb->len;
+
+ emit_copy(gt, bb, src_L0_ofs, dst_L0_ofs, round_update_size,
+ XE_PAGE_SIZE);
+
+ job = xe_bb_create_migration_job(m->q, bb,
+ xe_migrate_batch_base(m, true),
+ update_idx);
+ if (IS_ERR(job)) {
+ err = PTR_ERR(job);
+ goto err;
+ }
+
+ xe_sched_job_add_migrate_flush(job, 0);
+
+ mutex_lock(&m->job_mutex);
+ xe_sched_job_arm(job);
+ fence = dma_fence_get(&job->drm.s_fence->finished);
+ xe_sched_job_push(job);
+
+ dma_fence_put(m->fence);
+ m->fence = dma_fence_get(fence);
+ mutex_unlock(&m->job_mutex);
+
+ xe_bb_free(bb, fence);
+
+ return fence;
+
+err:
+ xe_bb_free(bb, NULL);
+
+ return ERR_PTR(err);
+}
+
+/**
+ * xe_migrate_to_vram() - Migrate to VRAM
+ * @m: The migration context.
+ * @npages: Number of pages to migrate.
+ * @src_addr: Array of dma addresses (source of migrate)
+ * @dst_addr: Device physical address of VRAM (destination of migrate)
+ *
+ * Copy from an array dma addresses to a VRAM device physical address
+ *
+ * Return: dma fence for migrate to signal completion on succees, ERR_PTR on
+ * failure
+ */
+struct dma_fence *xe_migrate_to_vram(struct xe_migrate *m,
+ unsigned long npages,
+ dma_addr_t *src_addr,
+ u64 dst_addr)
+{
+ return xe_migrate_vram(m, npages, src_addr, dst_addr,
+ XE_MIGRATE_COPY_TO_VRAM);
+}
+
+/**
+ * xe_migrate_from_vram() - Migrate from VRAM
+ * @m: The migration context.
+ * @npages: Number of pages to migrate.
+ * @src_addr: Device physical address of VRAM (source of migrate)
+ * @dst_addr: Array of dma addresses (destination of migrate)
+ *
+ * Copy from a VRAM device physical address to an array dma addresses
+ *
+ * Return: dma fence for migrate to signal completion on succees, ERR_PTR on
+ * failure
+ */
+struct dma_fence *xe_migrate_from_vram(struct xe_migrate *m,
+ unsigned long npages,
+ u64 src_addr,
+ dma_addr_t *dst_addr)
+{
+ return xe_migrate_vram(m, npages, dst_addr, src_addr,
+ XE_MIGRATE_COPY_TO_SRAM);
+}
+
#if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
#include "tests/xe_migrate.c"
#endif
diff --git a/drivers/gpu/drm/xe/xe_migrate.h b/drivers/gpu/drm/xe/xe_migrate.h
index 0109866e398a..6ff9a963425c 100644
--- a/drivers/gpu/drm/xe/xe_migrate.h
+++ b/drivers/gpu/drm/xe/xe_migrate.h
@@ -95,6 +95,16 @@ struct xe_migrate_pt_update {
struct xe_migrate *xe_migrate_init(struct xe_tile *tile);
+struct dma_fence *xe_migrate_to_vram(struct xe_migrate *m,
+ unsigned long npages,
+ dma_addr_t *src_addr,
+ u64 dst_addr);
+
+struct dma_fence *xe_migrate_from_vram(struct xe_migrate *m,
+ unsigned long npages,
+ u64 src_addr,
+ dma_addr_t *dst_addr);
+
struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
struct xe_bo *src_bo,
struct xe_bo *dst_bo,
diff --git a/drivers/gpu/drm/xe/xe_mocs.c b/drivers/gpu/drm/xe/xe_mocs.c
index 54d199b5cfb2..31dade91a089 100644
--- a/drivers/gpu/drm/xe/xe_mocs.c
+++ b/drivers/gpu/drm/xe/xe_mocs.c
@@ -781,7 +781,9 @@ void xe_mocs_dump(struct xe_gt *gt, struct drm_printer *p)
flags = get_mocs_settings(xe, &table);
xe_pm_runtime_get_noresume(xe);
- fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
+ fw_ref = xe_force_wake_get(gt_to_fw(gt),
+ flags & HAS_LNCF_MOCS ?
+ XE_FORCEWAKE_ALL : XE_FW_GT);
if (!fw_ref)
goto err_fw;
diff --git a/drivers/gpu/drm/xe/xe_module.c b/drivers/gpu/drm/xe/xe_module.c
index 7185a2cdf6e3..9f4632e39a1a 100644
--- a/drivers/gpu/drm/xe/xe_module.c
+++ b/drivers/gpu/drm/xe/xe_module.c
@@ -22,9 +22,16 @@ struct xe_modparam xe_modparam = {
.guc_log_level = 3,
.force_probe = CONFIG_DRM_XE_FORCE_PROBE,
.wedged_mode = 1,
+ .svm_notifier_size = 512,
/* the rest are 0 by default */
};
+module_param_named(svm_notifier_size, xe_modparam.svm_notifier_size, uint, 0600);
+MODULE_PARM_DESC(svm_notifier_size, "Set the svm notifier size(in MiB), must be power of 2");
+
+module_param_named(always_migrate_to_vram, xe_modparam.always_migrate_to_vram, bool, 0444);
+MODULE_PARM_DESC(always_migrate_to_vram, "Always migrate to VRAM on GPU fault");
+
module_param_named_unsafe(force_execlist, xe_modparam.force_execlist, bool, 0444);
MODULE_PARM_DESC(force_execlist, "Force Execlist submission");
diff --git a/drivers/gpu/drm/xe/xe_module.h b/drivers/gpu/drm/xe/xe_module.h
index 161a5e6f717f..84339e509c80 100644
--- a/drivers/gpu/drm/xe/xe_module.h
+++ b/drivers/gpu/drm/xe/xe_module.h
@@ -12,6 +12,7 @@
struct xe_modparam {
bool force_execlist;
bool probe_display;
+ bool always_migrate_to_vram;
u32 force_vram_bar_size;
int guc_log_level;
char *guc_firmware_path;
@@ -22,6 +23,7 @@ struct xe_modparam {
unsigned int max_vfs;
#endif
int wedged_mode;
+ u32 svm_notifier_size;
};
extern struct xe_modparam xe_modparam;
diff --git a/drivers/gpu/drm/xe/xe_oa.c b/drivers/gpu/drm/xe/xe_oa.c
index 2c5a24a13e87..6f185632da14 100644
--- a/drivers/gpu/drm/xe/xe_oa.c
+++ b/drivers/gpu/drm/xe/xe_oa.c
@@ -12,6 +12,8 @@
#include <drm/drm_managed.h>
#include <uapi/drm/xe_drm.h>
+#include <generated/xe_wa_oob.h>
+
#include "abi/guc_actions_slpc_abi.h"
#include "instructions/xe_mi_commands.h"
#include "regs/xe_engine_regs.h"
@@ -35,6 +37,7 @@
#include "xe_sched_job.h"
#include "xe_sriov.h"
#include "xe_sync.h"
+#include "xe_wa.h"
#define DEFAULT_POLL_FREQUENCY_HZ 200
#define DEFAULT_POLL_PERIOD_NS (NSEC_PER_SEC / DEFAULT_POLL_FREQUENCY_HZ)
@@ -812,11 +815,8 @@ static void xe_oa_disable_metric_set(struct xe_oa_stream *stream)
struct xe_mmio *mmio = &stream->gt->mmio;
u32 sqcnt1;
- /*
- * Wa_1508761755:xehpsdv, dg2
- * Enable thread stall DOP gating and EU DOP gating.
- */
- if (stream->oa->xe->info.platform == XE_DG2) {
+ /* Enable thread stall DOP gating and EU DOP gating. */
+ if (XE_WA(stream->gt, 1508761755)) {
xe_gt_mcr_multicast_write(stream->gt, ROW_CHICKEN,
_MASKED_BIT_DISABLE(STALL_DOP_GATING_DISABLE));
xe_gt_mcr_multicast_write(stream->gt, ROW_CHICKEN2,
@@ -1065,11 +1065,10 @@ static int xe_oa_enable_metric_set(struct xe_oa_stream *stream)
int ret;
/*
- * Wa_1508761755:xehpsdv, dg2
* EU NOA signals behave incorrectly if EU clock gating is enabled.
* Disable thread stall DOP gating and EU DOP gating.
*/
- if (stream->oa->xe->info.platform == XE_DG2) {
+ if (XE_WA(stream->gt, 1508761755)) {
xe_gt_mcr_multicast_write(stream->gt, ROW_CHICKEN,
_MASKED_BIT_ENABLE(STALL_DOP_GATING_DISABLE));
xe_gt_mcr_multicast_write(stream->gt, ROW_CHICKEN2,
@@ -1690,7 +1689,7 @@ static int xe_oa_stream_init(struct xe_oa_stream *stream,
stream->oa_buffer.format = &stream->oa->oa_formats[param->oa_format];
stream->sample = param->sample;
- stream->periodic = param->period_exponent > 0;
+ stream->periodic = param->period_exponent >= 0;
stream->period_exponent = param->period_exponent;
stream->no_preempt = param->no_preempt;
stream->wait_num_reports = param->wait_num_reports;
@@ -1720,12 +1719,10 @@ static int xe_oa_stream_init(struct xe_oa_stream *stream,
}
/*
- * Wa_1509372804:pvc
- *
* GuC reset of engines causes OA to lose configuration
* state. Prevent this by overriding GUCRC mode.
*/
- if (stream->oa->xe->info.platform == XE_PVC) {
+ if (XE_WA(stream->gt, 1509372804)) {
ret = xe_guc_pc_override_gucrc_mode(&gt->uc.guc.pc,
SLPC_GUCRC_MODE_GUCRC_NO_RC6);
if (ret)
@@ -1857,23 +1854,14 @@ u32 xe_oa_timestamp_frequency(struct xe_gt *gt)
{
u32 reg, shift;
- /*
- * Wa_18013179988:dg2
- * Wa_14015568240:pvc
- * Wa_14015846243:mtl
- */
- switch (gt_to_xe(gt)->info.platform) {
- case XE_DG2:
- case XE_PVC:
- case XE_METEORLAKE:
+ if (XE_WA(gt, 18013179988) || XE_WA(gt, 14015568240)) {
xe_pm_runtime_get(gt_to_xe(gt));
reg = xe_mmio_read32(&gt->mmio, RPM_CONFIG0);
xe_pm_runtime_put(gt_to_xe(gt));
shift = REG_FIELD_GET(RPM_CONFIG0_CTC_SHIFT_PARAMETER_MASK, reg);
return gt->info.reference_clock << (3 - shift);
-
- default:
+ } else {
return gt->info.reference_clock;
}
}
@@ -1971,6 +1959,7 @@ int xe_oa_stream_open_ioctl(struct drm_device *dev, u64 data, struct drm_file *f
}
param.xef = xef;
+ param.period_exponent = -1;
ret = xe_oa_user_extensions(oa, XE_OA_USER_EXTN_FROM_OPEN, data, 0, &param);
if (ret)
return ret;
@@ -2025,7 +2014,7 @@ int xe_oa_stream_open_ioctl(struct drm_device *dev, u64 data, struct drm_file *f
goto err_exec_q;
}
- if (param.period_exponent > 0) {
+ if (param.period_exponent >= 0) {
u64 oa_period, oa_freq_hz;
/* Requesting samples from OAG buffer is a privileged operation */
diff --git a/drivers/gpu/drm/xe/xe_observation.c b/drivers/gpu/drm/xe/xe_observation.c
index 57cf01efc07f..e3f9b546207e 100644
--- a/drivers/gpu/drm/xe/xe_observation.c
+++ b/drivers/gpu/drm/xe/xe_observation.c
@@ -8,6 +8,7 @@
#include <uapi/drm/xe_drm.h>
+#include "xe_eu_stall.h"
#include "xe_oa.h"
#include "xe_observation.h"
@@ -29,6 +30,17 @@ static int xe_oa_ioctl(struct drm_device *dev, struct drm_xe_observation_param *
}
}
+static int xe_eu_stall_ioctl(struct drm_device *dev, struct drm_xe_observation_param *arg,
+ struct drm_file *file)
+{
+ switch (arg->observation_op) {
+ case DRM_XE_OBSERVATION_OP_STREAM_OPEN:
+ return xe_eu_stall_stream_open(dev, arg->param, file);
+ default:
+ return -EINVAL;
+ }
+}
+
/**
* xe_observation_ioctl - The top level observation layer ioctl
* @dev: @drm_device
@@ -51,6 +63,8 @@ int xe_observation_ioctl(struct drm_device *dev, void *data, struct drm_file *fi
switch (arg->observation_type) {
case DRM_XE_OBSERVATION_TYPE_OA:
return xe_oa_ioctl(dev, arg, file);
+ case DRM_XE_OBSERVATION_TYPE_EU_STALL:
+ return xe_eu_stall_ioctl(dev, arg, file);
default:
return -EINVAL;
}
diff --git a/drivers/gpu/drm/xe/xe_pci.c b/drivers/gpu/drm/xe/xe_pci.c
index f8417f4d8ce6..da9679c8cf26 100644
--- a/drivers/gpu/drm/xe/xe_pci.c
+++ b/drivers/gpu/drm/xe/xe_pci.c
@@ -46,9 +46,9 @@ struct xe_subplatform_desc {
struct xe_device_desc {
/* Should only ever be set for platforms without GMD_ID */
- const struct xe_graphics_desc *graphics;
+ const struct xe_ip *pre_gmdid_graphics_ip;
/* Should only ever be set for platforms without GMD_ID */
- const struct xe_media_desc *media;
+ const struct xe_ip *pre_gmdid_media_ip;
const char *platform_name;
const struct xe_subplatform_desc *subplatforms;
@@ -82,21 +82,6 @@ __diag_ignore_all("-Woverride-init", "Allow field overrides in table");
#define NOP(x) x
static const struct xe_graphics_desc graphics_xelp = {
- .name = "Xe_LP",
- .ver = 12,
- .rel = 0,
-
- .hw_engine_mask = BIT(XE_HW_ENGINE_RCS0) | BIT(XE_HW_ENGINE_BCS0),
-
- .va_bits = 48,
- .vm_max_level = 3,
-};
-
-static const struct xe_graphics_desc graphics_xelpp = {
- .name = "Xe_LP+",
- .ver = 12,
- .rel = 10,
-
.hw_engine_mask = BIT(XE_HW_ENGINE_RCS0) | BIT(XE_HW_ENGINE_BCS0),
.va_bits = 48,
@@ -109,10 +94,6 @@ static const struct xe_graphics_desc graphics_xelpp = {
.vm_max_level = 3
static const struct xe_graphics_desc graphics_xehpg = {
- .name = "Xe_HPG",
- .ver = 12,
- .rel = 55,
-
.hw_engine_mask =
BIT(XE_HW_ENGINE_RCS0) | BIT(XE_HW_ENGINE_BCS0) |
BIT(XE_HW_ENGINE_CCS0) | BIT(XE_HW_ENGINE_CCS1) |
@@ -125,10 +106,6 @@ static const struct xe_graphics_desc graphics_xehpg = {
};
static const struct xe_graphics_desc graphics_xehpc = {
- .name = "Xe_HPC",
- .ver = 12,
- .rel = 60,
-
.hw_engine_mask =
BIT(XE_HW_ENGINE_BCS0) | BIT(XE_HW_ENGINE_BCS1) |
BIT(XE_HW_ENGINE_BCS2) | BIT(XE_HW_ENGINE_BCS3) |
@@ -149,7 +126,6 @@ static const struct xe_graphics_desc graphics_xehpc = {
};
static const struct xe_graphics_desc graphics_xelpg = {
- .name = "Xe_LPG",
.hw_engine_mask =
BIT(XE_HW_ENGINE_RCS0) | BIT(XE_HW_ENGINE_BCS0) |
BIT(XE_HW_ENGINE_CCS0),
@@ -172,50 +148,54 @@ static const struct xe_graphics_desc graphics_xelpg = {
GENMASK(XE_HW_ENGINE_CCS3, XE_HW_ENGINE_CCS0)
static const struct xe_graphics_desc graphics_xe2 = {
- .name = "Xe2_LPG / Xe2_HPG / Xe3_LPG",
-
XE2_GFX_FEATURES,
};
static const struct xe_media_desc media_xem = {
- .name = "Xe_M",
- .ver = 12,
- .rel = 0,
-
- .hw_engine_mask =
- GENMASK(XE_HW_ENGINE_VCS7, XE_HW_ENGINE_VCS0) |
- GENMASK(XE_HW_ENGINE_VECS3, XE_HW_ENGINE_VECS0),
-};
-
-static const struct xe_media_desc media_xehpm = {
- .name = "Xe_HPM",
- .ver = 12,
- .rel = 55,
-
.hw_engine_mask =
GENMASK(XE_HW_ENGINE_VCS7, XE_HW_ENGINE_VCS0) |
GENMASK(XE_HW_ENGINE_VECS3, XE_HW_ENGINE_VECS0),
};
static const struct xe_media_desc media_xelpmp = {
- .name = "Xe_LPM+",
.hw_engine_mask =
GENMASK(XE_HW_ENGINE_VCS7, XE_HW_ENGINE_VCS0) |
GENMASK(XE_HW_ENGINE_VECS3, XE_HW_ENGINE_VECS0) |
BIT(XE_HW_ENGINE_GSCCS0)
};
-static const struct xe_media_desc media_xe2 = {
- .name = "Xe2_LPM / Xe2_HPM / Xe3_LPM",
- .hw_engine_mask =
- GENMASK(XE_HW_ENGINE_VCS7, XE_HW_ENGINE_VCS0) |
- GENMASK(XE_HW_ENGINE_VECS3, XE_HW_ENGINE_VECS0) |
- BIT(XE_HW_ENGINE_GSCCS0)
+/* Pre-GMDID Graphics IPs */
+static const struct xe_ip graphics_ip_xelp = { 1200, "Xe_LP", &graphics_xelp };
+static const struct xe_ip graphics_ip_xelpp = { 1210, "Xe_LP+", &graphics_xelp };
+static const struct xe_ip graphics_ip_xehpg = { 1255, "Xe_HPG", &graphics_xehpg };
+static const struct xe_ip graphics_ip_xehpc = { 1260, "Xe_HPC", &graphics_xehpc };
+
+/* GMDID-based Graphics IPs */
+static const struct xe_ip graphics_ips[] = {
+ { 1270, "Xe_LPG", &graphics_xelpg },
+ { 1271, "Xe_LPG", &graphics_xelpg },
+ { 1274, "Xe_LPG+", &graphics_xelpg },
+ { 2001, "Xe2_HPG", &graphics_xe2 },
+ { 2004, "Xe2_LPG", &graphics_xe2 },
+ { 3000, "Xe3_LPG", &graphics_xe2 },
+ { 3001, "Xe3_LPG", &graphics_xe2 },
+};
+
+/* Pre-GMDID Media IPs */
+static const struct xe_ip media_ip_xem = { 1200, "Xe_M", &media_xem };
+static const struct xe_ip media_ip_xehpm = { 1255, "Xe_HPM", &media_xem };
+
+/* GMDID-based Media IPs */
+static const struct xe_ip media_ips[] = {
+ { 1300, "Xe_LPM+", &media_xelpmp },
+ { 1301, "Xe2_HPM", &media_xelpmp },
+ { 2000, "Xe2_LPM", &media_xelpmp },
+ { 3000, "Xe3_LPM", &media_xelpmp },
};
static const struct xe_device_desc tgl_desc = {
- .graphics = &graphics_xelp,
- .media = &media_xem,
+ .pre_gmdid_graphics_ip = &graphics_ip_xelp,
+ .pre_gmdid_media_ip = &media_ip_xem,
PLATFORM(TIGERLAKE),
.dma_mask_size = 39,
.has_display = true,
@@ -224,8 +204,8 @@ static const struct xe_device_desc tgl_desc = {
};
static const struct xe_device_desc rkl_desc = {
- .graphics = &graphics_xelp,
- .media = &media_xem,
+ .pre_gmdid_graphics_ip = &graphics_ip_xelp,
+ .pre_gmdid_media_ip = &media_ip_xem,
PLATFORM(ROCKETLAKE),
.dma_mask_size = 39,
.has_display = true,
@@ -236,8 +216,8 @@ static const struct xe_device_desc rkl_desc = {
static const u16 adls_rpls_ids[] = { INTEL_RPLS_IDS(NOP), 0 };
static const struct xe_device_desc adl_s_desc = {
- .graphics = &graphics_xelp,
- .media = &media_xem,
+ .pre_gmdid_graphics_ip = &graphics_ip_xelp,
+ .pre_gmdid_media_ip = &media_ip_xem,
PLATFORM(ALDERLAKE_S),
.dma_mask_size = 39,
.has_display = true,
@@ -252,8 +232,8 @@ static const struct xe_device_desc adl_s_desc = {
static const u16 adlp_rplu_ids[] = { INTEL_RPLU_IDS(NOP), 0 };
static const struct xe_device_desc adl_p_desc = {
- .graphics = &graphics_xelp,
- .media = &media_xem,
+ .pre_gmdid_graphics_ip = &graphics_ip_xelp,
+ .pre_gmdid_media_ip = &media_ip_xem,
PLATFORM(ALDERLAKE_P),
.dma_mask_size = 39,
.has_display = true,
@@ -266,8 +246,8 @@ static const struct xe_device_desc adl_p_desc = {
};
static const struct xe_device_desc adl_n_desc = {
- .graphics = &graphics_xelp,
- .media = &media_xem,
+ .pre_gmdid_graphics_ip = &graphics_ip_xelp,
+ .pre_gmdid_media_ip = &media_ip_xem,
PLATFORM(ALDERLAKE_N),
.dma_mask_size = 39,
.has_display = true,
@@ -279,8 +259,8 @@ static const struct xe_device_desc adl_n_desc = {
.is_dgfx = 1
static const struct xe_device_desc dg1_desc = {
- .graphics = &graphics_xelpp,
- .media = &media_xem,
+ .pre_gmdid_graphics_ip = &graphics_ip_xelpp,
+ .pre_gmdid_media_ip = &media_ip_xem,
DGFX_FEATURES,
PLATFORM(DG1),
.dma_mask_size = 39,
@@ -305,8 +285,8 @@ static const u16 dg2_g12_ids[] = { INTEL_DG2_G12_IDS(NOP), 0 };
}
static const struct xe_device_desc ats_m_desc = {
- .graphics = &graphics_xehpg,
- .media = &media_xehpm,
+ .pre_gmdid_graphics_ip = &graphics_ip_xehpg,
+ .pre_gmdid_media_ip = &media_ip_xehpm,
.dma_mask_size = 46,
.require_force_probe = true,
@@ -315,8 +295,8 @@ static const struct xe_device_desc ats_m_desc = {
};
static const struct xe_device_desc dg2_desc = {
- .graphics = &graphics_xehpg,
- .media = &media_xehpm,
+ .pre_gmdid_graphics_ip = &graphics_ip_xehpg,
+ .pre_gmdid_media_ip = &media_ip_xehpm,
.dma_mask_size = 46,
.require_force_probe = true,
@@ -325,7 +305,7 @@ static const struct xe_device_desc dg2_desc = {
};
static const __maybe_unused struct xe_device_desc pvc_desc = {
- .graphics = &graphics_xehpc,
+ .pre_gmdid_graphics_ip = &graphics_ip_xehpc,
DGFX_FEATURES,
PLATFORM(PVC),
.dma_mask_size = 52,
@@ -370,25 +350,6 @@ static const struct xe_device_desc ptl_desc = {
#undef PLATFORM
__diag_pop();
-/* Map of GMD_ID values to graphics IP */
-static const struct gmdid_map graphics_ip_map[] = {
- { 1270, &graphics_xelpg },
- { 1271, &graphics_xelpg },
- { 1274, &graphics_xelpg }, /* Xe_LPG+ */
- { 2001, &graphics_xe2 },
- { 2004, &graphics_xe2 },
- { 3000, &graphics_xe2 },
- { 3001, &graphics_xe2 },
-};
-
-/* Map of GMD_ID values to media IP */
-static const struct gmdid_map media_ip_map[] = {
- { 1300, &media_xelpmp },
- { 1301, &media_xe2 },
- { 2000, &media_xe2 },
- { 3000, &media_xe2 },
-};
-
/*
* Make sure any device matches here are from most specific to most
* general. For example, since the Quanta match is based on the subsystem
@@ -549,66 +510,49 @@ static void read_gmdid(struct xe_device *xe, enum xe_gmdid_type type, u32 *ver,
}
/*
- * Pre-GMD_ID platform: device descriptor already points to the appropriate
- * graphics descriptor. Simply forward the description and calculate the version
- * appropriately. "graphics" should be present in all such platforms, while
- * media is optional.
- */
-static void handle_pre_gmdid(struct xe_device *xe,
- const struct xe_graphics_desc *graphics,
- const struct xe_media_desc *media)
-{
- xe->info.graphics_verx100 = graphics->ver * 100 + graphics->rel;
-
- if (media)
- xe->info.media_verx100 = media->ver * 100 + media->rel;
-
-}
-
-/*
- * GMD_ID platform: read IP version from hardware and select graphics descriptor
+ * Read IP version from hardware and select graphics/media IP descriptors
* based on the result.
*/
static void handle_gmdid(struct xe_device *xe,
- const struct xe_graphics_desc **graphics,
- const struct xe_media_desc **media,
+ const struct xe_ip **graphics_ip,
+ const struct xe_ip **media_ip,
u32 *graphics_revid,
u32 *media_revid)
{
u32 ver;
+ *graphics_ip = NULL;
+ *media_ip = NULL;
+
read_gmdid(xe, GMDID_GRAPHICS, &ver, graphics_revid);
- for (int i = 0; i < ARRAY_SIZE(graphics_ip_map); i++) {
- if (ver == graphics_ip_map[i].ver) {
- xe->info.graphics_verx100 = ver;
- *graphics = graphics_ip_map[i].ip;
+ for (int i = 0; i < ARRAY_SIZE(graphics_ips); i++) {
+ if (ver == graphics_ips[i].verx100) {
+ *graphics_ip = &graphics_ips[i];
break;
}
}
- if (!xe->info.graphics_verx100) {
+ if (!*graphics_ip) {
drm_err(&xe->drm, "Hardware reports unknown graphics version %u.%02u\n",
ver / 100, ver % 100);
}
read_gmdid(xe, GMDID_MEDIA, &ver, media_revid);
-
/* Media may legitimately be fused off / not present */
if (ver == 0)
return;
- for (int i = 0; i < ARRAY_SIZE(media_ip_map); i++) {
- if (ver == media_ip_map[i].ver) {
- xe->info.media_verx100 = ver;
- *media = media_ip_map[i].ip;
+ for (int i = 0; i < ARRAY_SIZE(media_ips); i++) {
+ if (ver == media_ips[i].verx100) {
+ *media_ip = &media_ips[i];
break;
}
}
- if (!xe->info.media_verx100) {
+ if (!*media_ip) {
drm_err(&xe->drm, "Hardware reports unknown media version %u.%02u\n",
ver / 100, ver % 100);
}
@@ -659,26 +603,31 @@ static int xe_info_init_early(struct xe_device *xe,
* present in device info.
*/
static int xe_info_init(struct xe_device *xe,
- const struct xe_graphics_desc *graphics_desc,
- const struct xe_media_desc *media_desc)
+ const struct xe_device_desc *desc)
{
u32 graphics_gmdid_revid = 0, media_gmdid_revid = 0;
+ const struct xe_ip *graphics_ip;
+ const struct xe_ip *media_ip;
+ const struct xe_graphics_desc *graphics_desc;
+ const struct xe_media_desc *media_desc;
struct xe_tile *tile;
struct xe_gt *gt;
u8 id;
/*
* If this platform supports GMD_ID, we'll detect the proper IP
- * descriptor to use from hardware registers. desc->graphics will only
- * ever be set at this point for platforms before GMD_ID. In that case
- * the IP descriptions and versions are simply derived from that.
+ * descriptor to use from hardware registers.
+ * desc->pre_gmdid_graphics_ip will only ever be set at this point for
+ * platforms before GMD_ID. In that case the IP descriptions and
+ * versions are simply derived from that.
*/
- if (graphics_desc) {
- handle_pre_gmdid(xe, graphics_desc, media_desc);
+ if (desc->pre_gmdid_graphics_ip) {
+ graphics_ip = desc->pre_gmdid_graphics_ip;
+ media_ip = desc->pre_gmdid_media_ip;
xe->info.step = xe_step_pre_gmdid_get(xe);
} else {
- xe_assert(xe, !media_desc);
- handle_gmdid(xe, &graphics_desc, &media_desc,
+ xe_assert(xe, !desc->pre_gmdid_media_ip);
+ handle_gmdid(xe, &graphics_ip, &media_ip,
&graphics_gmdid_revid, &media_gmdid_revid);
xe->info.step = xe_step_gmdid_get(xe,
graphics_gmdid_revid,
@@ -690,11 +639,21 @@ static int xe_info_init(struct xe_device *xe,
* error and we should abort driver load. Failing to detect media
* IP is non-fatal; we'll just proceed without enabling media support.
*/
- if (!graphics_desc)
+ if (!graphics_ip)
return -ENODEV;
- xe->info.graphics_name = graphics_desc->name;
- xe->info.media_name = media_desc ? media_desc->name : "none";
+ xe->info.graphics_verx100 = graphics_ip->verx100;
+ xe->info.graphics_name = graphics_ip->name;
+ graphics_desc = graphics_ip->desc;
+
+ if (media_ip) {
+ xe->info.media_verx100 = media_ip->verx100;
+ xe->info.media_name = media_ip->name;
+ media_desc = media_ip->desc;
+ } else {
+ xe->info.media_name = "none";
+ media_desc = NULL;
+ }
xe->info.vram_flags = graphics_desc->vram_flags;
xe->info.va_bits = graphics_desc->va_bits;
@@ -765,21 +724,16 @@ static int xe_info_init(struct xe_device *xe,
static void xe_pci_remove(struct pci_dev *pdev)
{
- struct xe_device *xe;
-
- xe = pdev_to_xe_device(pdev);
- if (!xe) /* driver load aborted, nothing to cleanup */
- return;
+ struct xe_device *xe = pdev_to_xe_device(pdev);
if (IS_SRIOV_PF(xe))
xe_pci_sriov_configure(pdev, 0);
- if (xe_survivability_mode_enabled(xe))
- return xe_survivability_mode_remove(xe);
+ if (xe_survivability_mode_is_enabled(xe))
+ return;
xe_device_remove(xe);
xe_pm_runtime_fini(xe);
- pci_set_drvdata(pdev, NULL);
}
/*
@@ -851,19 +805,20 @@ static int xe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err = xe_device_probe_early(xe);
/*
- * In Boot Survivability mode, no drm card is exposed
- * and driver is loaded with bare minimum to allow
- * for firmware to be flashed through mei. Return
- * success if survivability mode is enabled.
+ * In Boot Survivability mode, no drm card is exposed and driver is
+ * loaded with bare minimum to allow for firmware to be flashed through
+ * mei. If early probe fails, check if survivability mode is flagged by
+ * HW to be enabled. In that case enable it and return success.
*/
if (err) {
- if (xe_survivability_mode_enabled(xe))
+ if (xe_survivability_mode_required(xe) &&
+ xe_survivability_mode_enable(xe))
return 0;
return err;
}
- err = xe_info_init(xe, desc->graphics, desc->media);
+ err = xe_info_init(xe, desc);
if (err)
return err;
@@ -900,10 +855,8 @@ static int xe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return err;
err = xe_device_probe(xe);
- if (err) {
- xe_device_call_remove_actions(xe);
+ if (err)
return err;
- }
err = xe_pm_init(xe);
if (err)
@@ -953,7 +906,7 @@ static int xe_pci_suspend(struct device *dev)
struct xe_device *xe = pdev_to_xe_device(pdev);
int err;
- if (xe_survivability_mode_enabled(xe))
+ if (xe_survivability_mode_is_enabled(xe))
return -EBUSY;
err = xe_pm_suspend(xe);
diff --git a/drivers/gpu/drm/xe/xe_pci_sriov.c b/drivers/gpu/drm/xe/xe_pci_sriov.c
index aaceee748287..09ee8a06fe2e 100644
--- a/drivers/gpu/drm/xe/xe_pci_sriov.c
+++ b/drivers/gpu/drm/xe/xe_pci_sriov.c
@@ -62,6 +62,55 @@ static void pf_reset_vfs(struct xe_device *xe, unsigned int num_vfs)
xe_gt_sriov_pf_control_trigger_flr(gt, n);
}
+static struct pci_dev *xe_pci_pf_get_vf_dev(struct xe_device *xe, unsigned int vf_id)
+{
+ struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
+
+ xe_assert(xe, IS_SRIOV_PF(xe));
+
+ /* caller must use pci_dev_put() */
+ return pci_get_domain_bus_and_slot(pci_domain_nr(pdev->bus),
+ pdev->bus->number,
+ pci_iov_virtfn_devfn(pdev, vf_id));
+}
+
+static void pf_link_vfs(struct xe_device *xe, int num_vfs)
+{
+ struct pci_dev *pdev_pf = to_pci_dev(xe->drm.dev);
+ struct device_link *link;
+ struct pci_dev *pdev_vf;
+ unsigned int n;
+
+ /*
+ * When both PF and VF devices are enabled on the host, during system
+ * resume they are resuming in parallel.
+ *
+ * But PF has to complete the provision of VF first to allow any VFs to
+ * successfully resume.
+ *
+ * Create a parent-child device link between PF and VF devices that will
+ * enforce correct resume order.
+ */
+ for (n = 1; n <= num_vfs; n++) {
+ pdev_vf = xe_pci_pf_get_vf_dev(xe, n - 1);
+
+ /* unlikely, something weird is happening, abort */
+ if (!pdev_vf) {
+ xe_sriov_err(xe, "Cannot find VF%u device, aborting link%s creation!\n",
+ n, str_plural(num_vfs));
+ break;
+ }
+
+ link = device_link_add(&pdev_vf->dev, &pdev_pf->dev,
+ DL_FLAG_AUTOREMOVE_CONSUMER);
+ /* unlikely and harmless, continue with other VFs */
+ if (!link)
+ xe_sriov_notice(xe, "Failed linking VF%u\n", n);
+
+ pci_dev_put(pdev_vf);
+ }
+}
+
static int pf_enable_vfs(struct xe_device *xe, int num_vfs)
{
struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
@@ -92,6 +141,8 @@ static int pf_enable_vfs(struct xe_device *xe, int num_vfs)
if (err < 0)
goto failed;
+ pf_link_vfs(xe, num_vfs);
+
xe_sriov_info(xe, "Enabled %u of %u VF%s\n",
num_vfs, total_vfs, str_plural(total_vfs));
return num_vfs;
diff --git a/drivers/gpu/drm/xe/xe_pci_types.h b/drivers/gpu/drm/xe/xe_pci_types.h
index b96423844952..e9b9bbc138d3 100644
--- a/drivers/gpu/drm/xe/xe_pci_types.h
+++ b/drivers/gpu/drm/xe/xe_pci_types.h
@@ -9,10 +9,6 @@
#include <linux/types.h>
struct xe_graphics_desc {
- const char *name;
- u8 ver;
- u8 rel;
-
u8 va_bits;
u8 vm_max_level;
u8 vram_flags;
@@ -28,18 +24,15 @@ struct xe_graphics_desc {
};
struct xe_media_desc {
- const char *name;
- u8 ver;
- u8 rel;
-
u64 hw_engine_mask; /* hardware engines provided by media IP */
u8 has_indirect_ring_state:1;
};
-struct gmdid_map {
- unsigned int ver;
- const void *ip;
+struct xe_ip {
+ unsigned int verx100;
+ const char *name;
+ const void *desc;
};
#endif
diff --git a/drivers/gpu/drm/xe/xe_pmu.c b/drivers/gpu/drm/xe/xe_pmu.c
index 3910a82328ee..4f62a6e515d6 100644
--- a/drivers/gpu/drm/xe/xe_pmu.c
+++ b/drivers/gpu/drm/xe/xe_pmu.c
@@ -7,16 +7,18 @@
#include <linux/device.h>
#include "xe_device.h"
+#include "xe_force_wake.h"
#include "xe_gt_idle.h"
+#include "xe_guc_engine_activity.h"
+#include "xe_hw_engine.h"
#include "xe_pm.h"
#include "xe_pmu.h"
/**
* DOC: Xe PMU (Performance Monitoring Unit)
*
- * Expose events/counters like GT-C6 residency and GT frequency to user land via
- * the perf interface. Events are per device. The GT can be selected with an
- * extra config sub-field (bits 60-63).
+ * Expose events/counters like GT-C6 residency, GT frequency and per-class-engine
+ * activity to user land via the perf interface. Events are per device.
*
* All events are listed in sysfs:
*
@@ -24,7 +26,18 @@
* $ ls /sys/bus/event_source/devices/xe_0000_00_02.0/events/
* $ ls /sys/bus/event_source/devices/xe_0000_00_02.0/format/
*
- * The format directory has info regarding the configs that can be used.
+ * The following format parameters are available to read events,
+ * but only few are valid with each event:
+ *
+ * gt[60:63] Selects gt for the event
+ * engine_class[20:27] Selects engine-class for event
+ * engine_instance[12:19] Selects the engine-instance for the event
+ *
+ * For engine specific events (engine-*), gt, engine_class and engine_instance parameters must be
+ * set as populated by DRM_XE_DEVICE_QUERY_ENGINES.
+ *
+ * For gt specific events (gt-*) gt parameter must be passed. All other parameters will be 0.
+ *
* The standard perf tool can be used to grep for a certain event as well.
* Example:
*
@@ -35,20 +48,34 @@
* $ perf stat -e <event_name,gt=> -I <interval>
*/
-#define XE_PMU_EVENT_GT_MASK GENMASK_ULL(63, 60)
-#define XE_PMU_EVENT_ID_MASK GENMASK_ULL(11, 0)
+#define XE_PMU_EVENT_GT_MASK GENMASK_ULL(63, 60)
+#define XE_PMU_EVENT_ENGINE_CLASS_MASK GENMASK_ULL(27, 20)
+#define XE_PMU_EVENT_ENGINE_INSTANCE_MASK GENMASK_ULL(19, 12)
+#define XE_PMU_EVENT_ID_MASK GENMASK_ULL(11, 0)
static unsigned int config_to_event_id(u64 config)
{
return FIELD_GET(XE_PMU_EVENT_ID_MASK, config);
}
+static unsigned int config_to_engine_class(u64 config)
+{
+ return FIELD_GET(XE_PMU_EVENT_ENGINE_CLASS_MASK, config);
+}
+
+static unsigned int config_to_engine_instance(u64 config)
+{
+ return FIELD_GET(XE_PMU_EVENT_ENGINE_INSTANCE_MASK, config);
+}
+
static unsigned int config_to_gt_id(u64 config)
{
return FIELD_GET(XE_PMU_EVENT_GT_MASK, config);
}
-#define XE_PMU_EVENT_GT_C6_RESIDENCY 0x01
+#define XE_PMU_EVENT_GT_C6_RESIDENCY 0x01
+#define XE_PMU_EVENT_ENGINE_ACTIVE_TICKS 0x02
+#define XE_PMU_EVENT_ENGINE_TOTAL_TICKS 0x03
static struct xe_gt *event_to_gt(struct perf_event *event)
{
@@ -58,6 +85,59 @@ static struct xe_gt *event_to_gt(struct perf_event *event)
return xe_device_get_gt(xe, gt);
}
+static struct xe_hw_engine *event_to_hwe(struct perf_event *event)
+{
+ struct xe_device *xe = container_of(event->pmu, typeof(*xe), pmu.base);
+ struct drm_xe_engine_class_instance eci;
+ u64 config = event->attr.config;
+ struct xe_hw_engine *hwe;
+
+ eci.engine_class = config_to_engine_class(config);
+ eci.engine_instance = config_to_engine_instance(config);
+ eci.gt_id = config_to_gt_id(config);
+
+ hwe = xe_hw_engine_lookup(xe, eci);
+ if (!hwe || xe_hw_engine_is_reserved(hwe))
+ return NULL;
+
+ return hwe;
+}
+
+static bool is_engine_event(u64 config)
+{
+ unsigned int event_id = config_to_event_id(config);
+
+ return (event_id == XE_PMU_EVENT_ENGINE_TOTAL_TICKS ||
+ event_id == XE_PMU_EVENT_ENGINE_ACTIVE_TICKS);
+}
+
+static bool event_gt_forcewake(struct perf_event *event)
+{
+ struct xe_device *xe = container_of(event->pmu, typeof(*xe), pmu.base);
+ u64 config = event->attr.config;
+ struct xe_gt *gt;
+ unsigned int *fw_ref;
+
+ if (!is_engine_event(config))
+ return true;
+
+ gt = xe_device_get_gt(xe, config_to_gt_id(config));
+
+ fw_ref = kzalloc(sizeof(*fw_ref), GFP_KERNEL);
+ if (!fw_ref)
+ return false;
+
+ *fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
+ if (!*fw_ref) {
+ kfree(fw_ref);
+ return false;
+ }
+
+ event->pmu_private = fw_ref;
+
+ return true;
+}
+
static bool event_supported(struct xe_pmu *pmu, unsigned int gt,
unsigned int id)
{
@@ -68,9 +148,47 @@ static bool event_supported(struct xe_pmu *pmu, unsigned int gt,
pmu->supported_events & BIT_ULL(id);
}
+static bool event_param_valid(struct perf_event *event)
+{
+ struct xe_device *xe = container_of(event->pmu, typeof(*xe), pmu.base);
+ unsigned int engine_class, engine_instance;
+ u64 config = event->attr.config;
+ struct xe_gt *gt;
+
+ gt = xe_device_get_gt(xe, config_to_gt_id(config));
+ if (!gt)
+ return false;
+
+ engine_class = config_to_engine_class(config);
+ engine_instance = config_to_engine_instance(config);
+
+ switch (config_to_event_id(config)) {
+ case XE_PMU_EVENT_GT_C6_RESIDENCY:
+ if (engine_class || engine_instance)
+ return false;
+ break;
+ case XE_PMU_EVENT_ENGINE_ACTIVE_TICKS:
+ case XE_PMU_EVENT_ENGINE_TOTAL_TICKS:
+ if (!event_to_hwe(event))
+ return false;
+ break;
+ }
+
+ return true;
+}
+
static void xe_pmu_event_destroy(struct perf_event *event)
{
struct xe_device *xe = container_of(event->pmu, typeof(*xe), pmu.base);
+ struct xe_gt *gt;
+ unsigned int *fw_ref = event->pmu_private;
+
+ if (fw_ref) {
+ gt = xe_device_get_gt(xe, config_to_gt_id(event->attr.config));
+ xe_force_wake_put(gt_to_fw(gt), *fw_ref);
+ kfree(fw_ref);
+ event->pmu_private = NULL;
+ }
drm_WARN_ON(&xe->drm, event->parent);
xe_pm_runtime_put(xe);
@@ -104,15 +222,37 @@ static int xe_pmu_event_init(struct perf_event *event)
if (has_branch_stack(event))
return -EOPNOTSUPP;
+ if (!event_param_valid(event))
+ return -ENOENT;
+
if (!event->parent) {
drm_dev_get(&xe->drm);
xe_pm_runtime_get(xe);
+ if (!event_gt_forcewake(event)) {
+ xe_pm_runtime_put(xe);
+ drm_dev_put(&xe->drm);
+ return -EINVAL;
+ }
event->destroy = xe_pmu_event_destroy;
}
return 0;
}
+static u64 read_engine_events(struct xe_gt *gt, struct perf_event *event)
+{
+ struct xe_hw_engine *hwe;
+ u64 val = 0;
+
+ hwe = event_to_hwe(event);
+ if (config_to_event_id(event->attr.config) == XE_PMU_EVENT_ENGINE_ACTIVE_TICKS)
+ val = xe_guc_engine_activity_active_ticks(&gt->uc.guc, hwe);
+ else
+ val = xe_guc_engine_activity_total_ticks(&gt->uc.guc, hwe);
+
+ return val;
+}
+
static u64 __xe_pmu_event_read(struct perf_event *event)
{
struct xe_gt *gt = event_to_gt(event);
@@ -123,6 +263,9 @@ static u64 __xe_pmu_event_read(struct perf_event *event)
switch (config_to_event_id(event->attr.config)) {
case XE_PMU_EVENT_GT_C6_RESIDENCY:
return xe_gt_idle_residency_msec(&gt->gtidle);
+ case XE_PMU_EVENT_ENGINE_ACTIVE_TICKS:
+ case XE_PMU_EVENT_ENGINE_TOTAL_TICKS:
+ return read_engine_events(gt, event);
}
return 0;
@@ -207,11 +350,15 @@ static void xe_pmu_event_del(struct perf_event *event, int flags)
xe_pmu_event_stop(event, PERF_EF_UPDATE);
}
-PMU_FORMAT_ATTR(gt, "config:60-63");
-PMU_FORMAT_ATTR(event, "config:0-11");
+PMU_FORMAT_ATTR(gt, "config:60-63");
+PMU_FORMAT_ATTR(engine_class, "config:20-27");
+PMU_FORMAT_ATTR(engine_instance, "config:12-19");
+PMU_FORMAT_ATTR(event, "config:0-11");
static struct attribute *pmu_format_attrs[] = {
&format_attr_event.attr,
+ &format_attr_engine_class.attr,
+ &format_attr_engine_instance.attr,
&format_attr_gt.attr,
NULL,
};
@@ -270,6 +417,8 @@ static ssize_t event_attr_show(struct device *dev,
XE_EVENT_ATTR_GROUP(v_, id_, &pmu_event_ ##v_.attr.attr)
XE_EVENT_ATTR_SIMPLE(gt-c6-residency, gt_c6_residency, XE_PMU_EVENT_GT_C6_RESIDENCY, "ms");
+XE_EVENT_ATTR_NOUNIT(engine-active-ticks, engine_active_ticks, XE_PMU_EVENT_ENGINE_ACTIVE_TICKS);
+XE_EVENT_ATTR_NOUNIT(engine-total-ticks, engine_total_ticks, XE_PMU_EVENT_ENGINE_TOTAL_TICKS);
static struct attribute *pmu_empty_event_attrs[] = {
/* Empty - all events are added as groups with .attr_update() */
@@ -283,15 +432,23 @@ static const struct attribute_group pmu_events_attr_group = {
static const struct attribute_group *pmu_events_attr_update[] = {
&pmu_group_gt_c6_residency,
+ &pmu_group_engine_active_ticks,
+ &pmu_group_engine_total_ticks,
NULL,
};
static void set_supported_events(struct xe_pmu *pmu)
{
struct xe_device *xe = container_of(pmu, typeof(*xe), pmu);
+ struct xe_gt *gt = xe_device_get_gt(xe, 0);
if (!xe->info.skip_guc_pc)
pmu->supported_events |= BIT_ULL(XE_PMU_EVENT_GT_C6_RESIDENCY);
+
+ if (xe_guc_engine_activity_supported(&gt->uc.guc)) {
+ pmu->supported_events |= BIT_ULL(XE_PMU_EVENT_ENGINE_ACTIVE_TICKS);
+ pmu->supported_events |= BIT_ULL(XE_PMU_EVENT_ENGINE_TOTAL_TICKS);
+ }
}
/**
diff --git a/drivers/gpu/drm/xe/xe_pt.c b/drivers/gpu/drm/xe/xe_pt.c
index 1ddcc7e79a93..ffaf0d02dc7d 100644
--- a/drivers/gpu/drm/xe/xe_pt.c
+++ b/drivers/gpu/drm/xe/xe_pt.c
@@ -20,6 +20,7 @@
#include "xe_res_cursor.h"
#include "xe_sched_job.h"
#include "xe_sync.h"
+#include "xe_svm.h"
#include "xe_trace.h"
#include "xe_ttm_stolen_mgr.h"
#include "xe_vm.h"
@@ -28,6 +29,8 @@ struct xe_pt_dir {
struct xe_pt pt;
/** @children: Array of page-table child nodes */
struct xe_ptw *children[XE_PDES];
+ /** @staging: Array of page-table staging nodes */
+ struct xe_ptw *staging[XE_PDES];
};
#if IS_ENABLED(CONFIG_DRM_XE_DEBUG_VM)
@@ -48,9 +51,10 @@ static struct xe_pt_dir *as_xe_pt_dir(struct xe_pt *pt)
return container_of(pt, struct xe_pt_dir, pt);
}
-static struct xe_pt *xe_pt_entry(struct xe_pt_dir *pt_dir, unsigned int index)
+static struct xe_pt *
+xe_pt_entry_staging(struct xe_pt_dir *pt_dir, unsigned int index)
{
- return container_of(pt_dir->children[index], struct xe_pt, base);
+ return container_of(pt_dir->staging[index], struct xe_pt, base);
}
static u64 __xe_pt_empty_pte(struct xe_tile *tile, struct xe_vm *vm,
@@ -125,6 +129,7 @@ struct xe_pt *xe_pt_create(struct xe_vm *vm, struct xe_tile *tile,
}
pt->bo = bo;
pt->base.children = level ? as_xe_pt_dir(pt)->children : NULL;
+ pt->base.staging = level ? as_xe_pt_dir(pt)->staging : NULL;
if (vm->xef)
xe_drm_client_add_bo(vm->xef->client, pt->bo);
@@ -206,8 +211,8 @@ void xe_pt_destroy(struct xe_pt *pt, u32 flags, struct llist_head *deferred)
struct xe_pt_dir *pt_dir = as_xe_pt_dir(pt);
for (i = 0; i < XE_PDES; i++) {
- if (xe_pt_entry(pt_dir, i))
- xe_pt_destroy(xe_pt_entry(pt_dir, i), flags,
+ if (xe_pt_entry_staging(pt_dir, i))
+ xe_pt_destroy(xe_pt_entry_staging(pt_dir, i), flags,
deferred);
}
}
@@ -215,6 +220,20 @@ void xe_pt_destroy(struct xe_pt *pt, u32 flags, struct llist_head *deferred)
}
/**
+ * xe_pt_clear() - Clear a page-table.
+ * @xe: xe device.
+ * @pt: The page-table.
+ *
+ * Clears page-table by setting to zero.
+ */
+void xe_pt_clear(struct xe_device *xe, struct xe_pt *pt)
+{
+ struct iosys_map *map = &pt->bo->vmap;
+
+ xe_map_memset(xe, map, 0, 0, SZ_4K);
+}
+
+/**
* DOC: Pagetable building
*
* Below we use the term "page-table" for both page-directories, containing
@@ -376,8 +395,10 @@ xe_pt_insert_entry(struct xe_pt_stage_bind_walk *xe_walk, struct xe_pt *parent,
/* Continue building a non-connected subtree. */
struct iosys_map *map = &parent->bo->vmap;
- if (unlikely(xe_child))
+ if (unlikely(xe_child)) {
parent->base.children[offset] = &xe_child->base;
+ parent->base.staging[offset] = &xe_child->base;
+ }
xe_pt_write(xe_walk->vm->xe, map, offset, pte);
parent->num_live++;
@@ -587,6 +608,7 @@ static const struct xe_pt_walk_ops xe_pt_stage_bind_ops = {
* range.
* @tile: The tile we're building for.
* @vma: The vma indicating the address range.
+ * @range: The range indicating the address range.
* @entries: Storage for the update entries used for connecting the tree to
* the main tree at commit time.
* @num_entries: On output contains the number of @entries used.
@@ -602,6 +624,7 @@ static const struct xe_pt_walk_ops xe_pt_stage_bind_ops = {
*/
static int
xe_pt_stage_bind(struct xe_tile *tile, struct xe_vma *vma,
+ struct xe_svm_range *range,
struct xe_vm_pgtable_update *entries, u32 *num_entries)
{
struct xe_device *xe = tile_to_xe(tile);
@@ -614,18 +637,48 @@ xe_pt_stage_bind(struct xe_tile *tile, struct xe_vma *vma,
.ops = &xe_pt_stage_bind_ops,
.shifts = xe_normal_pt_shifts,
.max_level = XE_PT_HIGHEST_LEVEL,
+ .staging = true,
},
.vm = xe_vma_vm(vma),
.tile = tile,
.curs = &curs,
- .va_curs_start = xe_vma_start(vma),
+ .va_curs_start = range ? range->base.itree.start :
+ xe_vma_start(vma),
.vma = vma,
.wupd.entries = entries,
- .needs_64K = (xe_vma_vm(vma)->flags & XE_VM_FLAG_64K) && is_devmem,
};
struct xe_pt *pt = xe_vma_vm(vma)->pt_root[tile->id];
int ret;
+ if (range) {
+ /* Move this entire thing to xe_svm.c? */
+ xe_svm_notifier_lock(xe_vma_vm(vma));
+ if (!xe_svm_range_pages_valid(range)) {
+ xe_svm_range_debug(range, "BIND PREPARE - RETRY");
+ xe_svm_notifier_unlock(xe_vma_vm(vma));
+ return -EAGAIN;
+ }
+ if (xe_svm_range_has_dma_mapping(range)) {
+ xe_res_first_dma(range->base.dma_addr, 0,
+ range->base.itree.last + 1 - range->base.itree.start,
+ &curs);
+ is_devmem = xe_res_is_vram(&curs);
+ if (is_devmem)
+ xe_svm_range_debug(range, "BIND PREPARE - DMA VRAM");
+ else
+ xe_svm_range_debug(range, "BIND PREPARE - DMA");
+ } else {
+ xe_assert(xe, false);
+ }
+ /*
+ * Note, when unlocking the resource cursor dma addresses may become
+ * stale, but the bind will be aborted anyway at commit time.
+ */
+ xe_svm_notifier_unlock(xe_vma_vm(vma));
+ }
+
+ xe_walk.needs_64K = (xe_vma_vm(vma)->flags & XE_VM_FLAG_64K) && is_devmem;
+
/**
* Default atomic expectations for different allocation scenarios are as follows:
*
@@ -647,7 +700,7 @@ xe_pt_stage_bind(struct xe_tile *tile, struct xe_vma *vma,
* gets migrated to LMEM, bind such allocations with
* device atomics enabled.
*/
- else if (is_devmem && !xe_bo_has_single_placement(bo))
+ else if (is_devmem)
xe_walk.default_pte |= XE_USM_PPGTT_PTE_AE;
} else {
xe_walk.default_pte |= XE_USM_PPGTT_PTE_AE;
@@ -663,15 +716,16 @@ xe_pt_stage_bind(struct xe_tile *tile, struct xe_vma *vma,
if (is_devmem) {
xe_walk.default_pte |= XE_PPGTT_PTE_DM;
- xe_walk.dma_offset = vram_region_gpu_offset(bo->ttm.resource);
+ xe_walk.dma_offset = bo ? vram_region_gpu_offset(bo->ttm.resource) : 0;
}
if (!xe_vma_has_no_bo(vma) && xe_bo_is_stolen(bo))
xe_walk.dma_offset = xe_ttm_stolen_gpu_offset(xe_bo_device(bo));
- xe_bo_assert_held(bo);
+ if (!range)
+ xe_bo_assert_held(bo);
- if (!xe_vma_is_null(vma)) {
+ if (!xe_vma_is_null(vma) && !range) {
if (xe_vma_is_userptr(vma))
xe_res_first_sg(to_userptr_vma(vma)->userptr.sg, 0,
xe_vma_size(vma), &curs);
@@ -681,12 +735,14 @@ xe_pt_stage_bind(struct xe_tile *tile, struct xe_vma *vma,
else
xe_res_first_sg(xe_bo_sg(bo), xe_vma_bo_offset(vma),
xe_vma_size(vma), &curs);
- } else {
+ } else if (!range) {
curs.size = xe_vma_size(vma);
}
- ret = xe_pt_walk_range(&pt->base, pt->level, xe_vma_start(vma),
- xe_vma_end(vma), &xe_walk.base);
+ ret = xe_pt_walk_range(&pt->base, pt->level,
+ range ? range->base.itree.start : xe_vma_start(vma),
+ range ? range->base.itree.last + 1 : xe_vma_end(vma),
+ &xe_walk.base);
*num_entries = xe_walk.wupd.num_used_entries;
return ret;
@@ -830,6 +886,46 @@ bool xe_pt_zap_ptes(struct xe_tile *tile, struct xe_vma *vma)
return xe_walk.needs_invalidate;
}
+/**
+ * xe_pt_zap_ptes_range() - Zap (zero) gpu ptes of a SVM range
+ * @tile: The tile we're zapping for.
+ * @vm: The VM we're zapping for.
+ * @range: The SVM range we're zapping for.
+ *
+ * SVM invalidation needs to be able to zap the gpu ptes of a given address
+ * range. In order to be able to do that, that function needs access to the
+ * shared page-table entries so it can either clear the leaf PTEs or
+ * clear the pointers to lower-level page-tables. The caller is required
+ * to hold the SVM notifier lock.
+ *
+ * Return: Whether ptes were actually updated and a TLB invalidation is
+ * required.
+ */
+bool xe_pt_zap_ptes_range(struct xe_tile *tile, struct xe_vm *vm,
+ struct xe_svm_range *range)
+{
+ struct xe_pt_zap_ptes_walk xe_walk = {
+ .base = {
+ .ops = &xe_pt_zap_ptes_ops,
+ .shifts = xe_normal_pt_shifts,
+ .max_level = XE_PT_HIGHEST_LEVEL,
+ },
+ .tile = tile,
+ };
+ struct xe_pt *pt = vm->pt_root[tile->id];
+ u8 pt_mask = (range->tile_present & ~range->tile_invalidated);
+
+ xe_svm_assert_in_notifier(vm);
+
+ if (!(pt_mask & BIT(tile->id)))
+ return false;
+
+ (void)xe_pt_walk_shared(&pt->base, pt->level, range->base.itree.start,
+ range->base.itree.last + 1, &xe_walk.base);
+
+ return xe_walk.needs_invalidate;
+}
+
static void
xe_vm_populate_pgtable(struct xe_migrate_pt_update *pt_update, struct xe_tile *tile,
struct iosys_map *map, void *data,
@@ -873,18 +969,38 @@ static void xe_pt_cancel_bind(struct xe_vma *vma,
}
}
-static void xe_pt_commit_locks_assert(struct xe_vma *vma)
+#define XE_INVALID_VMA ((struct xe_vma *)(0xdeaddeadull))
+
+static void xe_pt_commit_prepare_locks_assert(struct xe_vma *vma)
{
- struct xe_vm *vm = xe_vma_vm(vma);
+ struct xe_vm *vm;
+ if (vma == XE_INVALID_VMA)
+ return;
+
+ vm = xe_vma_vm(vma);
lockdep_assert_held(&vm->lock);
- if (!xe_vma_is_userptr(vma) && !xe_vma_is_null(vma))
+ if (!xe_vma_has_no_bo(vma))
dma_resv_assert_held(xe_vma_bo(vma)->ttm.base.resv);
xe_vm_assert_held(vm);
}
+static void xe_pt_commit_locks_assert(struct xe_vma *vma)
+{
+ struct xe_vm *vm;
+
+ if (vma == XE_INVALID_VMA)
+ return;
+
+ vm = xe_vma_vm(vma);
+ xe_pt_commit_prepare_locks_assert(vma);
+
+ if (xe_vma_is_userptr(vma))
+ lockdep_assert_held_read(&vm->userptr.notifier_lock);
+}
+
static void xe_pt_commit(struct xe_vma *vma,
struct xe_vm_pgtable_update *entries,
u32 num_entries, struct llist_head *deferred)
@@ -895,14 +1011,19 @@ static void xe_pt_commit(struct xe_vma *vma,
for (i = 0; i < num_entries; i++) {
struct xe_pt *pt = entries[i].pt;
+ struct xe_pt_dir *pt_dir;
if (!pt->level)
continue;
+ pt_dir = as_xe_pt_dir(pt);
for (j = 0; j < entries[i].qwords; j++) {
struct xe_pt *oldpte = entries[i].pt_entries[j].pt;
+ int j_ = j + entries[i].ofs;
- xe_pt_destroy(oldpte, xe_vma_vm(vma)->flags, deferred);
+ pt_dir->children[j_] = pt_dir->staging[j_];
+ xe_pt_destroy(oldpte, (vma == XE_INVALID_VMA) ? 0 :
+ xe_vma_vm(vma)->flags, deferred);
}
}
}
@@ -913,7 +1034,7 @@ static void xe_pt_abort_bind(struct xe_vma *vma,
{
int i, j;
- xe_pt_commit_locks_assert(vma);
+ xe_pt_commit_prepare_locks_assert(vma);
for (i = num_entries - 1; i >= 0; --i) {
struct xe_pt *pt = entries[i].pt;
@@ -928,10 +1049,10 @@ static void xe_pt_abort_bind(struct xe_vma *vma,
pt_dir = as_xe_pt_dir(pt);
for (j = 0; j < entries[i].qwords; j++) {
u32 j_ = j + entries[i].ofs;
- struct xe_pt *newpte = xe_pt_entry(pt_dir, j_);
+ struct xe_pt *newpte = xe_pt_entry_staging(pt_dir, j_);
struct xe_pt *oldpte = entries[i].pt_entries[j].pt;
- pt_dir->children[j_] = oldpte ? &oldpte->base : 0;
+ pt_dir->staging[j_] = oldpte ? &oldpte->base : 0;
xe_pt_destroy(newpte, xe_vma_vm(vma)->flags, NULL);
}
}
@@ -943,7 +1064,7 @@ static void xe_pt_commit_prepare_bind(struct xe_vma *vma,
{
u32 i, j;
- xe_pt_commit_locks_assert(vma);
+ xe_pt_commit_prepare_locks_assert(vma);
for (i = 0; i < num_entries; i++) {
struct xe_pt *pt = entries[i].pt;
@@ -961,10 +1082,10 @@ static void xe_pt_commit_prepare_bind(struct xe_vma *vma,
struct xe_pt *newpte = entries[i].pt_entries[j].pt;
struct xe_pt *oldpte = NULL;
- if (xe_pt_entry(pt_dir, j_))
- oldpte = xe_pt_entry(pt_dir, j_);
+ if (xe_pt_entry_staging(pt_dir, j_))
+ oldpte = xe_pt_entry_staging(pt_dir, j_);
- pt_dir->children[j_] = &newpte->base;
+ pt_dir->staging[j_] = &newpte->base;
entries[i].pt_entries[j].pt = oldpte;
}
}
@@ -981,12 +1102,13 @@ static void xe_pt_free_bind(struct xe_vm_pgtable_update *entries,
static int
xe_pt_prepare_bind(struct xe_tile *tile, struct xe_vma *vma,
+ struct xe_svm_range *range,
struct xe_vm_pgtable_update *entries, u32 *num_entries)
{
int err;
*num_entries = 0;
- err = xe_pt_stage_bind(tile, vma, entries, num_entries);
+ err = xe_pt_stage_bind(tile, vma, range, entries, num_entries);
if (!err)
xe_tile_assert(tile, *num_entries);
@@ -1069,6 +1191,11 @@ static int op_add_deps(struct xe_vm *vm, struct xe_vma_op *op,
{
int err = 0;
+ /*
+ * No need to check for is_cpu_addr_mirror here as vma_add_deps is a
+ * NOP if VMA is_cpu_addr_mirror
+ */
+
switch (op->base.op) {
case DRM_GPUVA_OP_MAP:
if (!op->map.immediate && xe_vm_in_fault_mode(vm))
@@ -1087,6 +1214,8 @@ static int op_add_deps(struct xe_vm *vm, struct xe_vma_op *op,
case DRM_GPUVA_OP_PREFETCH:
err = vma_add_deps(gpuva_to_vma(op->base.prefetch.va), job);
break;
+ case DRM_GPUVA_OP_DRIVER:
+ break;
default:
drm_warn(&vm->xe->drm, "NOT POSSIBLE");
}
@@ -1213,42 +1342,22 @@ static int vma_check_userptr(struct xe_vm *vm, struct xe_vma *vma,
return 0;
uvma = to_userptr_vma(vma);
- notifier_seq = uvma->userptr.notifier_seq;
+ if (xe_pt_userptr_inject_eagain(uvma))
+ xe_vma_userptr_force_invalidate(uvma);
- if (uvma->userptr.initial_bind && !xe_vm_in_fault_mode(vm))
- return 0;
+ notifier_seq = uvma->userptr.notifier_seq;
if (!mmu_interval_read_retry(&uvma->userptr.notifier,
- notifier_seq) &&
- !xe_pt_userptr_inject_eagain(uvma))
+ notifier_seq))
return 0;
- if (xe_vm_in_fault_mode(vm)) {
+ if (xe_vm_in_fault_mode(vm))
return -EAGAIN;
- } else {
- spin_lock(&vm->userptr.invalidated_lock);
- list_move_tail(&uvma->userptr.invalidate_link,
- &vm->userptr.invalidated);
- spin_unlock(&vm->userptr.invalidated_lock);
-
- if (xe_vm_in_preempt_fence_mode(vm)) {
- struct dma_resv_iter cursor;
- struct dma_fence *fence;
- long err;
-
- dma_resv_iter_begin(&cursor, xe_vm_resv(vm),
- DMA_RESV_USAGE_BOOKKEEP);
- dma_resv_for_each_fence_unlocked(&cursor, fence)
- dma_fence_enable_sw_signaling(fence);
- dma_resv_iter_end(&cursor);
-
- err = dma_resv_wait_timeout(xe_vm_resv(vm),
- DMA_RESV_USAGE_BOOKKEEP,
- false, MAX_SCHEDULE_TIMEOUT);
- XE_WARN_ON(err <= 0);
- }
- }
+ /*
+ * Just continue the operation since exec or rebind worker
+ * will take care of rebinding.
+ */
return 0;
}
@@ -1311,6 +1420,40 @@ static int xe_pt_userptr_pre_commit(struct xe_migrate_pt_update *pt_update)
return err;
}
+static int xe_pt_svm_pre_commit(struct xe_migrate_pt_update *pt_update)
+{
+ struct xe_vm *vm = pt_update->vops->vm;
+ struct xe_vma_ops *vops = pt_update->vops;
+ struct xe_vma_op *op;
+ int err;
+
+ err = xe_pt_pre_commit(pt_update);
+ if (err)
+ return err;
+
+ xe_svm_notifier_lock(vm);
+
+ list_for_each_entry(op, &vops->list, link) {
+ struct xe_svm_range *range = op->map_range.range;
+
+ if (op->subop == XE_VMA_SUBOP_UNMAP_RANGE)
+ continue;
+
+ xe_svm_range_debug(range, "PRE-COMMIT");
+
+ xe_assert(vm->xe, xe_vma_is_cpu_addr_mirror(op->map_range.vma));
+ xe_assert(vm->xe, op->subop == XE_VMA_SUBOP_MAP_RANGE);
+
+ if (!xe_svm_range_pages_valid(range)) {
+ xe_svm_range_debug(range, "PRE-COMMIT - RETRY");
+ xe_svm_notifier_unlock(vm);
+ return -EAGAIN;
+ }
+ }
+
+ return 0;
+}
+
struct invalidation_fence {
struct xe_gt_tlb_invalidation_fence base;
struct xe_gt *gt;
@@ -1496,7 +1639,9 @@ static const struct xe_pt_walk_ops xe_pt_stage_unbind_ops = {
* xe_pt_stage_unbind() - Build page-table update structures for an unbind
* operation
* @tile: The tile we're unbinding for.
+ * @vm: The vm
* @vma: The vma we're unbinding.
+ * @range: The range we're unbinding.
* @entries: Caller-provided storage for the update structures.
*
* Builds page-table update structures for an unbind operation. The function
@@ -1506,24 +1651,30 @@ static const struct xe_pt_walk_ops xe_pt_stage_unbind_ops = {
*
* Return: The number of entries used.
*/
-static unsigned int xe_pt_stage_unbind(struct xe_tile *tile, struct xe_vma *vma,
+static unsigned int xe_pt_stage_unbind(struct xe_tile *tile,
+ struct xe_vm *vm,
+ struct xe_vma *vma,
+ struct xe_svm_range *range,
struct xe_vm_pgtable_update *entries)
{
+ u64 start = range ? range->base.itree.start : xe_vma_start(vma);
+ u64 end = range ? range->base.itree.last + 1 : xe_vma_end(vma);
struct xe_pt_stage_unbind_walk xe_walk = {
.base = {
.ops = &xe_pt_stage_unbind_ops,
.shifts = xe_normal_pt_shifts,
.max_level = XE_PT_HIGHEST_LEVEL,
+ .staging = true,
},
.tile = tile,
- .modified_start = xe_vma_start(vma),
- .modified_end = xe_vma_end(vma),
+ .modified_start = start,
+ .modified_end = end,
.wupd.entries = entries,
};
- struct xe_pt *pt = xe_vma_vm(vma)->pt_root[tile->id];
+ struct xe_pt *pt = vm->pt_root[tile->id];
- (void)xe_pt_walk_shared(&pt->base, pt->level, xe_vma_start(vma),
- xe_vma_end(vma), &xe_walk.base);
+ (void)xe_pt_walk_shared(&pt->base, pt->level, start, end,
+ &xe_walk.base);
return xe_walk.wupd.num_used_entries;
}
@@ -1555,7 +1706,7 @@ static void xe_pt_abort_unbind(struct xe_vma *vma,
{
int i, j;
- xe_pt_commit_locks_assert(vma);
+ xe_pt_commit_prepare_locks_assert(vma);
for (i = num_entries - 1; i >= 0; --i) {
struct xe_vm_pgtable_update *entry = &entries[i];
@@ -1568,7 +1719,7 @@ static void xe_pt_abort_unbind(struct xe_vma *vma,
continue;
for (j = entry->ofs; j < entry->ofs + entry->qwords; j++)
- pt_dir->children[j] =
+ pt_dir->staging[j] =
entries[i].pt_entries[j - entry->ofs].pt ?
&entries[i].pt_entries[j - entry->ofs].pt->base : NULL;
}
@@ -1581,7 +1732,7 @@ xe_pt_commit_prepare_unbind(struct xe_vma *vma,
{
int i, j;
- xe_pt_commit_locks_assert(vma);
+ xe_pt_commit_prepare_locks_assert(vma);
for (i = 0; i < num_entries; ++i) {
struct xe_vm_pgtable_update *entry = &entries[i];
@@ -1595,20 +1746,20 @@ xe_pt_commit_prepare_unbind(struct xe_vma *vma,
pt_dir = as_xe_pt_dir(pt);
for (j = entry->ofs; j < entry->ofs + entry->qwords; j++) {
entry->pt_entries[j - entry->ofs].pt =
- xe_pt_entry(pt_dir, j);
- pt_dir->children[j] = NULL;
+ xe_pt_entry_staging(pt_dir, j);
+ pt_dir->staging[j] = NULL;
}
}
}
static void
xe_pt_update_ops_rfence_interval(struct xe_vm_pgtable_update_ops *pt_update_ops,
- struct xe_vma *vma)
+ u64 start, u64 end)
{
+ u64 last;
u32 current_op = pt_update_ops->current_op;
struct xe_vm_pgtable_update_op *pt_op = &pt_update_ops->ops[current_op];
int i, level = 0;
- u64 start, last;
for (i = 0; i < pt_op->num_entries; i++) {
const struct xe_vm_pgtable_update *entry = &pt_op->entries[i];
@@ -1618,8 +1769,8 @@ xe_pt_update_ops_rfence_interval(struct xe_vm_pgtable_update_ops *pt_update_ops,
}
/* Greedy (non-optimal) calculation but simple */
- start = ALIGN_DOWN(xe_vma_start(vma), 0x1ull << xe_pt_shift(level));
- last = ALIGN(xe_vma_end(vma), 0x1ull << xe_pt_shift(level)) - 1;
+ start = ALIGN_DOWN(start, 0x1ull << xe_pt_shift(level));
+ last = ALIGN(end, 0x1ull << xe_pt_shift(level)) - 1;
if (start < pt_update_ops->start)
pt_update_ops->start = start;
@@ -1646,6 +1797,7 @@ static int bind_op_prepare(struct xe_vm *vm, struct xe_tile *tile,
struct xe_vm_pgtable_update_op *pt_op = &pt_update_ops->ops[current_op];
int err;
+ xe_tile_assert(tile, !xe_vma_is_cpu_addr_mirror(vma));
xe_bo_assert_held(xe_vma_bo(vma));
vm_dbg(&xe_vma_vm(vma)->xe->drm,
@@ -1660,7 +1812,7 @@ static int bind_op_prepare(struct xe_vm *vm, struct xe_tile *tile,
if (err)
return err;
- err = xe_pt_prepare_bind(tile, vma, pt_op->entries,
+ err = xe_pt_prepare_bind(tile, vma, NULL, pt_op->entries,
&pt_op->num_entries);
if (!err) {
xe_tile_assert(tile, pt_op->num_entries <=
@@ -1668,7 +1820,9 @@ static int bind_op_prepare(struct xe_vm *vm, struct xe_tile *tile,
xe_vm_dbg_print_entries(tile_to_xe(tile), pt_op->entries,
pt_op->num_entries, true);
- xe_pt_update_ops_rfence_interval(pt_update_ops, vma);
+ xe_pt_update_ops_rfence_interval(pt_update_ops,
+ xe_vma_start(vma),
+ xe_vma_end(vma));
++pt_update_ops->current_op;
pt_update_ops->needs_userptr_lock |= xe_vma_is_userptr(vma);
@@ -1702,6 +1856,48 @@ static int bind_op_prepare(struct xe_vm *vm, struct xe_tile *tile,
return err;
}
+static int bind_range_prepare(struct xe_vm *vm, struct xe_tile *tile,
+ struct xe_vm_pgtable_update_ops *pt_update_ops,
+ struct xe_vma *vma, struct xe_svm_range *range)
+{
+ u32 current_op = pt_update_ops->current_op;
+ struct xe_vm_pgtable_update_op *pt_op = &pt_update_ops->ops[current_op];
+ int err;
+
+ xe_tile_assert(tile, xe_vma_is_cpu_addr_mirror(vma));
+
+ vm_dbg(&xe_vma_vm(vma)->xe->drm,
+ "Preparing bind, with range [%lx...%lx)\n",
+ range->base.itree.start, range->base.itree.last);
+
+ pt_op->vma = NULL;
+ pt_op->bind = true;
+ pt_op->rebind = BIT(tile->id) & range->tile_present;
+
+ err = xe_pt_prepare_bind(tile, vma, range, pt_op->entries,
+ &pt_op->num_entries);
+ if (!err) {
+ xe_tile_assert(tile, pt_op->num_entries <=
+ ARRAY_SIZE(pt_op->entries));
+ xe_vm_dbg_print_entries(tile_to_xe(tile), pt_op->entries,
+ pt_op->num_entries, true);
+
+ xe_pt_update_ops_rfence_interval(pt_update_ops,
+ range->base.itree.start,
+ range->base.itree.last + 1);
+ ++pt_update_ops->current_op;
+ pt_update_ops->needs_svm_lock = true;
+
+ pt_op->vma = vma;
+ xe_pt_commit_prepare_bind(vma, pt_op->entries,
+ pt_op->num_entries, pt_op->rebind);
+ } else {
+ xe_pt_cancel_bind(vma, pt_op->entries, pt_op->num_entries);
+ }
+
+ return err;
+}
+
static int unbind_op_prepare(struct xe_tile *tile,
struct xe_vm_pgtable_update_ops *pt_update_ops,
struct xe_vma *vma)
@@ -1713,19 +1909,13 @@ static int unbind_op_prepare(struct xe_tile *tile,
if (!((vma->tile_present | vma->tile_staged) & BIT(tile->id)))
return 0;
+ xe_tile_assert(tile, !xe_vma_is_cpu_addr_mirror(vma));
xe_bo_assert_held(xe_vma_bo(vma));
vm_dbg(&xe_vma_vm(vma)->xe->drm,
"Preparing unbind, with range [%llx...%llx)\n",
xe_vma_start(vma), xe_vma_end(vma) - 1);
- /*
- * Wait for invalidation to complete. Can corrupt internal page table
- * state if an invalidation is running while preparing an unbind.
- */
- if (xe_vma_is_userptr(vma) && xe_vm_in_fault_mode(xe_vma_vm(vma)))
- mmu_interval_read_begin(&to_userptr_vma(vma)->userptr.notifier);
-
pt_op->vma = vma;
pt_op->bind = false;
pt_op->rebind = false;
@@ -1734,11 +1924,13 @@ static int unbind_op_prepare(struct xe_tile *tile,
if (err)
return err;
- pt_op->num_entries = xe_pt_stage_unbind(tile, vma, pt_op->entries);
+ pt_op->num_entries = xe_pt_stage_unbind(tile, xe_vma_vm(vma),
+ vma, NULL, pt_op->entries);
xe_vm_dbg_print_entries(tile_to_xe(tile), pt_op->entries,
pt_op->num_entries, false);
- xe_pt_update_ops_rfence_interval(pt_update_ops, vma);
+ xe_pt_update_ops_rfence_interval(pt_update_ops, xe_vma_start(vma),
+ xe_vma_end(vma));
++pt_update_ops->current_op;
pt_update_ops->needs_userptr_lock |= xe_vma_is_userptr(vma);
pt_update_ops->needs_invalidation = true;
@@ -1748,6 +1940,42 @@ static int unbind_op_prepare(struct xe_tile *tile,
return 0;
}
+static int unbind_range_prepare(struct xe_vm *vm,
+ struct xe_tile *tile,
+ struct xe_vm_pgtable_update_ops *pt_update_ops,
+ struct xe_svm_range *range)
+{
+ u32 current_op = pt_update_ops->current_op;
+ struct xe_vm_pgtable_update_op *pt_op = &pt_update_ops->ops[current_op];
+
+ if (!(range->tile_present & BIT(tile->id)))
+ return 0;
+
+ vm_dbg(&vm->xe->drm,
+ "Preparing unbind, with range [%lx...%lx)\n",
+ range->base.itree.start, range->base.itree.last);
+
+ pt_op->vma = XE_INVALID_VMA;
+ pt_op->bind = false;
+ pt_op->rebind = false;
+
+ pt_op->num_entries = xe_pt_stage_unbind(tile, vm, NULL, range,
+ pt_op->entries);
+
+ xe_vm_dbg_print_entries(tile_to_xe(tile), pt_op->entries,
+ pt_op->num_entries, false);
+ xe_pt_update_ops_rfence_interval(pt_update_ops, range->base.itree.start,
+ range->base.itree.last + 1);
+ ++pt_update_ops->current_op;
+ pt_update_ops->needs_svm_lock = true;
+ pt_update_ops->needs_invalidation = true;
+
+ xe_pt_commit_prepare_unbind(XE_INVALID_VMA, pt_op->entries,
+ pt_op->num_entries);
+
+ return 0;
+}
+
static int op_prepare(struct xe_vm *vm,
struct xe_tile *tile,
struct xe_vm_pgtable_update_ops *pt_update_ops,
@@ -1759,15 +1987,21 @@ static int op_prepare(struct xe_vm *vm,
switch (op->base.op) {
case DRM_GPUVA_OP_MAP:
- if (!op->map.immediate && xe_vm_in_fault_mode(vm))
+ if ((!op->map.immediate && xe_vm_in_fault_mode(vm)) ||
+ op->map.is_cpu_addr_mirror)
break;
err = bind_op_prepare(vm, tile, pt_update_ops, op->map.vma);
pt_update_ops->wait_vm_kernel = true;
break;
case DRM_GPUVA_OP_REMAP:
- err = unbind_op_prepare(tile, pt_update_ops,
- gpuva_to_vma(op->base.remap.unmap->va));
+ {
+ struct xe_vma *old = gpuva_to_vma(op->base.remap.unmap->va);
+
+ if (xe_vma_is_cpu_addr_mirror(old))
+ break;
+
+ err = unbind_op_prepare(tile, pt_update_ops, old);
if (!err && op->remap.prev) {
err = bind_op_prepare(vm, tile, pt_update_ops,
@@ -1780,15 +2014,40 @@ static int op_prepare(struct xe_vm *vm,
pt_update_ops->wait_vm_bookkeep = true;
}
break;
+ }
case DRM_GPUVA_OP_UNMAP:
- err = unbind_op_prepare(tile, pt_update_ops,
- gpuva_to_vma(op->base.unmap.va));
+ {
+ struct xe_vma *vma = gpuva_to_vma(op->base.unmap.va);
+
+ if (xe_vma_is_cpu_addr_mirror(vma))
+ break;
+
+ err = unbind_op_prepare(tile, pt_update_ops, vma);
break;
+ }
case DRM_GPUVA_OP_PREFETCH:
- err = bind_op_prepare(vm, tile, pt_update_ops,
- gpuva_to_vma(op->base.prefetch.va));
+ {
+ struct xe_vma *vma = gpuva_to_vma(op->base.prefetch.va);
+
+ if (xe_vma_is_cpu_addr_mirror(vma))
+ break;
+
+ err = bind_op_prepare(vm, tile, pt_update_ops, vma);
pt_update_ops->wait_vm_kernel = true;
break;
+ }
+ case DRM_GPUVA_OP_DRIVER:
+ if (op->subop == XE_VMA_SUBOP_MAP_RANGE) {
+ xe_assert(vm->xe, xe_vma_is_cpu_addr_mirror(op->map_range.vma));
+
+ err = bind_range_prepare(vm, tile, pt_update_ops,
+ op->map_range.vma,
+ op->map_range.range);
+ } else if (op->subop == XE_VMA_SUBOP_UNMAP_RANGE) {
+ err = unbind_range_prepare(vm, tile, pt_update_ops,
+ op->unmap_range.range);
+ }
+ break;
default:
drm_warn(&vm->xe->drm, "NOT POSSIBLE");
}
@@ -1858,6 +2117,8 @@ static void bind_op_commit(struct xe_vm *vm, struct xe_tile *tile,
struct xe_vma *vma, struct dma_fence *fence,
struct dma_fence *fence2)
{
+ xe_tile_assert(tile, !xe_vma_is_cpu_addr_mirror(vma));
+
if (!xe_vma_has_no_bo(vma) && !xe_vma_bo(vma)->vm) {
dma_resv_add_fence(xe_vma_bo(vma)->ttm.base.resv, fence,
pt_update_ops->wait_vm_bookkeep ?
@@ -1891,6 +2152,8 @@ static void unbind_op_commit(struct xe_vm *vm, struct xe_tile *tile,
struct xe_vma *vma, struct dma_fence *fence,
struct dma_fence *fence2)
{
+ xe_tile_assert(tile, !xe_vma_is_cpu_addr_mirror(vma));
+
if (!xe_vma_has_no_bo(vma) && !xe_vma_bo(vma)->vm) {
dma_resv_add_fence(xe_vma_bo(vma)->ttm.base.resv, fence,
pt_update_ops->wait_vm_bookkeep ?
@@ -1925,16 +2188,21 @@ static void op_commit(struct xe_vm *vm,
switch (op->base.op) {
case DRM_GPUVA_OP_MAP:
- if (!op->map.immediate && xe_vm_in_fault_mode(vm))
+ if ((!op->map.immediate && xe_vm_in_fault_mode(vm)) ||
+ op->map.is_cpu_addr_mirror)
break;
bind_op_commit(vm, tile, pt_update_ops, op->map.vma, fence,
fence2);
break;
case DRM_GPUVA_OP_REMAP:
- unbind_op_commit(vm, tile, pt_update_ops,
- gpuva_to_vma(op->base.remap.unmap->va), fence,
- fence2);
+ {
+ struct xe_vma *old = gpuva_to_vma(op->base.remap.unmap->va);
+
+ if (xe_vma_is_cpu_addr_mirror(old))
+ break;
+
+ unbind_op_commit(vm, tile, pt_update_ops, old, fence, fence2);
if (op->remap.prev)
bind_op_commit(vm, tile, pt_update_ops, op->remap.prev,
@@ -1943,14 +2211,35 @@ static void op_commit(struct xe_vm *vm,
bind_op_commit(vm, tile, pt_update_ops, op->remap.next,
fence, fence2);
break;
+ }
case DRM_GPUVA_OP_UNMAP:
- unbind_op_commit(vm, tile, pt_update_ops,
- gpuva_to_vma(op->base.unmap.va), fence, fence2);
+ {
+ struct xe_vma *vma = gpuva_to_vma(op->base.unmap.va);
+
+ if (!xe_vma_is_cpu_addr_mirror(vma))
+ unbind_op_commit(vm, tile, pt_update_ops, vma, fence,
+ fence2);
break;
+ }
case DRM_GPUVA_OP_PREFETCH:
- bind_op_commit(vm, tile, pt_update_ops,
- gpuva_to_vma(op->base.prefetch.va), fence, fence2);
+ {
+ struct xe_vma *vma = gpuva_to_vma(op->base.prefetch.va);
+
+ if (!xe_vma_is_cpu_addr_mirror(vma))
+ bind_op_commit(vm, tile, pt_update_ops, vma, fence,
+ fence2);
+ break;
+ }
+ case DRM_GPUVA_OP_DRIVER:
+ {
+ if (op->subop == XE_VMA_SUBOP_MAP_RANGE) {
+ op->map_range.range->tile_present |= BIT(tile->id);
+ op->map_range.range->tile_invalidated &= ~BIT(tile->id);
+ } else if (op->subop == XE_VMA_SUBOP_UNMAP_RANGE) {
+ op->unmap_range.range->tile_present &= ~BIT(tile->id);
+ }
break;
+ }
default:
drm_warn(&vm->xe->drm, "NOT POSSIBLE");
}
@@ -1968,6 +2257,12 @@ static const struct xe_migrate_pt_update_ops userptr_migrate_ops = {
.pre_commit = xe_pt_userptr_pre_commit,
};
+static const struct xe_migrate_pt_update_ops svm_migrate_ops = {
+ .populate = xe_vm_populate_pgtable,
+ .clear = xe_migrate_clear_pgtable_callback,
+ .pre_commit = xe_pt_svm_pre_commit,
+};
+
/**
* xe_pt_update_ops_run() - Run PT update operations
* @tile: Tile of PT update operations
@@ -1993,7 +2288,9 @@ xe_pt_update_ops_run(struct xe_tile *tile, struct xe_vma_ops *vops)
struct xe_vma_op *op;
int err = 0, i;
struct xe_migrate_pt_update update = {
- .ops = pt_update_ops->needs_userptr_lock ?
+ .ops = pt_update_ops->needs_svm_lock ?
+ &svm_migrate_ops :
+ pt_update_ops->needs_userptr_lock ?
&userptr_migrate_ops :
&migrate_ops,
.vops = vops,
@@ -2114,6 +2411,8 @@ xe_pt_update_ops_run(struct xe_tile *tile, struct xe_vma_ops *vops)
&ifence->base.base, &mfence->base.base);
}
+ if (pt_update_ops->needs_svm_lock)
+ xe_svm_notifier_unlock(vm);
if (pt_update_ops->needs_userptr_lock)
up_read(&vm->userptr.notifier_lock);
diff --git a/drivers/gpu/drm/xe/xe_pt.h b/drivers/gpu/drm/xe/xe_pt.h
index 9ab386431cad..5ecf003d513c 100644
--- a/drivers/gpu/drm/xe/xe_pt.h
+++ b/drivers/gpu/drm/xe/xe_pt.h
@@ -13,6 +13,7 @@ struct dma_fence;
struct xe_bo;
struct xe_device;
struct xe_exec_queue;
+struct xe_svm_range;
struct xe_sync_entry;
struct xe_tile;
struct xe_vm;
@@ -35,6 +36,8 @@ void xe_pt_populate_empty(struct xe_tile *tile, struct xe_vm *vm,
void xe_pt_destroy(struct xe_pt *pt, u32 flags, struct llist_head *deferred);
+void xe_pt_clear(struct xe_device *xe, struct xe_pt *pt);
+
int xe_pt_update_ops_prepare(struct xe_tile *tile, struct xe_vma_ops *vops);
struct dma_fence *xe_pt_update_ops_run(struct xe_tile *tile,
struct xe_vma_ops *vops);
@@ -42,5 +45,7 @@ void xe_pt_update_ops_fini(struct xe_tile *tile, struct xe_vma_ops *vops);
void xe_pt_update_ops_abort(struct xe_tile *tile, struct xe_vma_ops *vops);
bool xe_pt_zap_ptes(struct xe_tile *tile, struct xe_vma *vma);
+bool xe_pt_zap_ptes_range(struct xe_tile *tile, struct xe_vm *vm,
+ struct xe_svm_range *range);
#endif
diff --git a/drivers/gpu/drm/xe/xe_pt_types.h b/drivers/gpu/drm/xe/xe_pt_types.h
index 384cc04de719..69eab6f37cfe 100644
--- a/drivers/gpu/drm/xe/xe_pt_types.h
+++ b/drivers/gpu/drm/xe/xe_pt_types.h
@@ -104,6 +104,8 @@ struct xe_vm_pgtable_update_ops {
u32 num_ops;
/** @current_op: current operations */
u32 current_op;
+ /** @needs_svm_lock: Needs SVM lock */
+ bool needs_svm_lock;
/** @needs_userptr_lock: Needs userptr lock */
bool needs_userptr_lock;
/** @needs_invalidation: Needs invalidation */
diff --git a/drivers/gpu/drm/xe/xe_pt_walk.c b/drivers/gpu/drm/xe/xe_pt_walk.c
index b8b3d2aea492..be602a763ff3 100644
--- a/drivers/gpu/drm/xe/xe_pt_walk.c
+++ b/drivers/gpu/drm/xe/xe_pt_walk.c
@@ -74,7 +74,8 @@ int xe_pt_walk_range(struct xe_ptw *parent, unsigned int level,
u64 addr, u64 end, struct xe_pt_walk *walk)
{
pgoff_t offset = xe_pt_offset(addr, level, walk);
- struct xe_ptw **entries = parent->children ? parent->children : NULL;
+ struct xe_ptw **entries = walk->staging ? (parent->staging ?: NULL) :
+ (parent->children ?: NULL);
const struct xe_pt_walk_ops *ops = walk->ops;
enum page_walk_action action;
struct xe_ptw *child;
diff --git a/drivers/gpu/drm/xe/xe_pt_walk.h b/drivers/gpu/drm/xe/xe_pt_walk.h
index 5ecc4d2f0f65..5c02c244f7de 100644
--- a/drivers/gpu/drm/xe/xe_pt_walk.h
+++ b/drivers/gpu/drm/xe/xe_pt_walk.h
@@ -11,12 +11,14 @@
/**
* struct xe_ptw - base class for driver pagetable subclassing.
* @children: Pointer to an array of children if any.
+ * @staging: Pointer to an array of staging if any.
*
* Drivers could subclass this, and if it's a page-directory, typically
* embed an array of xe_ptw pointers.
*/
struct xe_ptw {
struct xe_ptw **children;
+ struct xe_ptw **staging;
};
/**
@@ -41,6 +43,8 @@ struct xe_pt_walk {
* as shared pagetables.
*/
bool shared_pt_mode;
+ /** @staging: Walk staging PT structure */
+ bool staging;
};
/**
diff --git a/drivers/gpu/drm/xe/xe_pxp.c b/drivers/gpu/drm/xe/xe_pxp.c
index 3cd3f83e86b0..454ea7dc08ac 100644
--- a/drivers/gpu/drm/xe/xe_pxp.c
+++ b/drivers/gpu/drm/xe/xe_pxp.c
@@ -132,14 +132,6 @@ static int pxp_wait_for_session_state(struct xe_pxp *pxp, u32 id, bool in_play)
static void pxp_invalidate_queues(struct xe_pxp *pxp);
-static void pxp_invalidate_state(struct xe_pxp *pxp)
-{
- pxp_invalidate_queues(pxp);
-
- if (pxp->status == XE_PXP_ACTIVE)
- pxp->key_instance++;
-}
-
static int pxp_terminate_hw(struct xe_pxp *pxp)
{
struct xe_gt *gt = pxp->gt;
@@ -193,7 +185,8 @@ static void pxp_terminate(struct xe_pxp *pxp)
mutex_lock(&pxp->mutex);
- pxp_invalidate_state(pxp);
+ if (pxp->status == XE_PXP_ACTIVE)
+ pxp->key_instance++;
/*
* we'll mark the status as needing termination on resume, so no need to
@@ -220,6 +213,8 @@ static void pxp_terminate(struct xe_pxp *pxp)
mutex_unlock(&pxp->mutex);
+ pxp_invalidate_queues(pxp);
+
ret = pxp_terminate_hw(pxp);
if (ret) {
drm_err(&xe->drm, "PXP termination failed: %pe\n", ERR_PTR(ret));
@@ -665,23 +660,15 @@ out:
return ret;
}
-/**
- * xe_pxp_exec_queue_remove - remove a queue from the PXP list
- * @pxp: the xe->pxp pointer (it will be NULL if PXP is disabled)
- * @q: the queue to remove from the list
- *
- * If PXP is enabled and the exec_queue is in the list, the queue will be
- * removed from the list and its PM reference will be released. It is safe to
- * call this function multiple times for the same queue.
- */
-void xe_pxp_exec_queue_remove(struct xe_pxp *pxp, struct xe_exec_queue *q)
+static void __pxp_exec_queue_remove(struct xe_pxp *pxp, struct xe_exec_queue *q, bool lock)
{
bool need_pm_put = false;
if (!xe_pxp_is_enabled(pxp))
return;
- spin_lock_irq(&pxp->queues.lock);
+ if (lock)
+ spin_lock_irq(&pxp->queues.lock);
if (!list_empty(&q->pxp.link)) {
list_del_init(&q->pxp.link);
@@ -690,36 +677,54 @@ void xe_pxp_exec_queue_remove(struct xe_pxp *pxp, struct xe_exec_queue *q)
q->pxp.type = DRM_XE_PXP_TYPE_NONE;
- spin_unlock_irq(&pxp->queues.lock);
+ if (lock)
+ spin_unlock_irq(&pxp->queues.lock);
if (need_pm_put)
xe_pm_runtime_put(pxp->xe);
}
+/**
+ * xe_pxp_exec_queue_remove - remove a queue from the PXP list
+ * @pxp: the xe->pxp pointer (it will be NULL if PXP is disabled)
+ * @q: the queue to remove from the list
+ *
+ * If PXP is enabled and the exec_queue is in the list, the queue will be
+ * removed from the list and its PM reference will be released. It is safe to
+ * call this function multiple times for the same queue.
+ */
+void xe_pxp_exec_queue_remove(struct xe_pxp *pxp, struct xe_exec_queue *q)
+{
+ __pxp_exec_queue_remove(pxp, q, true);
+}
+
static void pxp_invalidate_queues(struct xe_pxp *pxp)
{
struct xe_exec_queue *tmp, *q;
+ LIST_HEAD(to_clean);
spin_lock_irq(&pxp->queues.lock);
- /*
- * Removing a queue from the PXP list requires a put of the RPM ref that
- * the queue holds to keep the PXP session alive, which can't be done
- * under spinlock. Since it is safe to kill a queue multiple times, we
- * can leave the invalid queue in the list for now and postpone the
- * removal and associated RPM put to when the queue is destroyed.
- */
- list_for_each_entry(tmp, &pxp->queues.list, pxp.link) {
- q = xe_exec_queue_get_unless_zero(tmp);
-
+ list_for_each_entry_safe(q, tmp, &pxp->queues.list, pxp.link) {
+ q = xe_exec_queue_get_unless_zero(q);
if (!q)
continue;
+ list_move_tail(&q->pxp.link, &to_clean);
+ }
+ spin_unlock_irq(&pxp->queues.lock);
+
+ list_for_each_entry_safe(q, tmp, &to_clean, pxp.link) {
xe_exec_queue_kill(q);
+
+ /*
+ * We hold a ref to the queue so there is no risk of racing with
+ * the calls to exec_queue_remove coming from exec_queue_destroy.
+ */
+ __pxp_exec_queue_remove(pxp, q, false);
+
xe_exec_queue_put(q);
}
-
- spin_unlock_irq(&pxp->queues.lock);
}
/**
@@ -791,7 +796,6 @@ int xe_pxp_bo_key_check(struct xe_pxp *pxp, struct xe_bo *bo)
/**
* xe_pxp_obj_key_check - check if the key used by a drm_gem_obj is valid
- * @pxp: the xe->pxp pointer (it will be NULL if PXP is disabled)
* @obj: the drm_gem_obj we want to check
*
* Checks whether a drm_gem_obj was encrypted with the current key or an
@@ -800,9 +804,13 @@ int xe_pxp_bo_key_check(struct xe_pxp *pxp, struct xe_bo *bo)
* Returns: 0 if the key is valid, -ENODEV if PXP is disabled, -EINVAL if the
* obj is not using PXP, -ENOEXEC if the key is not valid.
*/
-int xe_pxp_obj_key_check(struct xe_pxp *pxp, struct drm_gem_object *obj)
+int xe_pxp_obj_key_check(struct drm_gem_object *obj)
{
- return xe_pxp_bo_key_check(pxp, gem_to_xe_bo(obj));
+ struct xe_bo *bo = gem_to_xe_bo(obj);
+ struct xe_device *xe = xe_bo_device(bo);
+ struct xe_pxp *pxp = xe->pxp;
+
+ return xe_pxp_bo_key_check(pxp, bo);
}
/**
@@ -816,6 +824,7 @@ int xe_pxp_obj_key_check(struct xe_pxp *pxp, struct drm_gem_object *obj)
*/
int xe_pxp_pm_suspend(struct xe_pxp *pxp)
{
+ bool needs_queue_inval = false;
int ret = 0;
if (!xe_pxp_is_enabled(pxp))
@@ -848,7 +857,8 @@ wait_for_activation:
break;
fallthrough;
case XE_PXP_ACTIVE:
- pxp_invalidate_state(pxp);
+ pxp->key_instance++;
+ needs_queue_inval = true;
break;
default:
drm_err(&pxp->xe->drm, "unexpected state during PXP suspend: %u",
@@ -865,6 +875,9 @@ wait_for_activation:
mutex_unlock(&pxp->mutex);
+ if (needs_queue_inval)
+ pxp_invalidate_queues(pxp);
+
/*
* if there is a termination in progress, wait for it.
* We need to wait outside the lock because the completion is done from
diff --git a/drivers/gpu/drm/xe/xe_pxp.h b/drivers/gpu/drm/xe/xe_pxp.h
index 546b156d63aa..71a23280b900 100644
--- a/drivers/gpu/drm/xe/xe_pxp.h
+++ b/drivers/gpu/drm/xe/xe_pxp.h
@@ -30,6 +30,6 @@ void xe_pxp_exec_queue_remove(struct xe_pxp *pxp, struct xe_exec_queue *q);
int xe_pxp_key_assign(struct xe_pxp *pxp, struct xe_bo *bo);
int xe_pxp_bo_key_check(struct xe_pxp *pxp, struct xe_bo *bo);
-int xe_pxp_obj_key_check(struct xe_pxp *pxp, struct drm_gem_object *obj);
+int xe_pxp_obj_key_check(struct drm_gem_object *obj);
#endif /* __XE_PXP_H__ */
diff --git a/drivers/gpu/drm/xe/xe_query.c b/drivers/gpu/drm/xe/xe_query.c
index ebfae746f861..5e65830dad25 100644
--- a/drivers/gpu/drm/xe/xe_query.c
+++ b/drivers/gpu/drm/xe/xe_query.c
@@ -16,6 +16,7 @@
#include "regs/xe_gt_regs.h"
#include "xe_bo.h"
#include "xe_device.h"
+#include "xe_eu_stall.h"
#include "xe_exec_queue.h"
#include "xe_force_wake.h"
#include "xe_ggtt.h"
@@ -337,8 +338,13 @@ static int query_config(struct xe_device *xe, struct drm_xe_device_query *query)
config->info[DRM_XE_QUERY_CONFIG_REV_AND_DEVICE_ID] =
xe->info.devid | (xe->info.revid << 16);
if (xe_device_get_root_tile(xe)->mem.vram.usable_size)
- config->info[DRM_XE_QUERY_CONFIG_FLAGS] =
+ config->info[DRM_XE_QUERY_CONFIG_FLAGS] |=
DRM_XE_QUERY_CONFIG_FLAG_HAS_VRAM;
+ if (xe->info.has_usm && IS_ENABLED(CONFIG_DRM_GPUSVM))
+ config->info[DRM_XE_QUERY_CONFIG_FLAGS] |=
+ DRM_XE_QUERY_CONFIG_FLAG_HAS_CPU_ADDR_MIRROR;
+ config->info[DRM_XE_QUERY_CONFIG_FLAGS] |=
+ DRM_XE_QUERY_CONFIG_FLAG_HAS_LOW_LATENCY;
config->info[DRM_XE_QUERY_CONFIG_MIN_ALIGNMENT] =
xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K ? SZ_64K : SZ_4K;
config->info[DRM_XE_QUERY_CONFIG_VA_BITS] = xe->info.va_bits;
@@ -729,6 +735,47 @@ static int query_pxp_status(struct xe_device *xe, struct drm_xe_device_query *qu
return 0;
}
+static int query_eu_stall(struct xe_device *xe,
+ struct drm_xe_device_query *query)
+{
+ void __user *query_ptr = u64_to_user_ptr(query->data);
+ struct drm_xe_query_eu_stall *info;
+ size_t size, array_size;
+ const u64 *rates;
+ u32 num_rates;
+ int ret;
+
+ if (!xe_eu_stall_supported_on_platform(xe)) {
+ drm_dbg(&xe->drm, "EU stall monitoring is not supported on this platform\n");
+ return -ENODEV;
+ }
+
+ array_size = xe_eu_stall_get_sampling_rates(&num_rates, &rates);
+ size = sizeof(struct drm_xe_query_eu_stall) + array_size;
+
+ if (query->size == 0) {
+ query->size = size;
+ return 0;
+ } else if (XE_IOCTL_DBG(xe, query->size != size)) {
+ return -EINVAL;
+ }
+
+ info = kzalloc(size, GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ info->num_sampling_rates = num_rates;
+ info->capabilities = DRM_XE_EU_STALL_CAPS_BASE;
+ info->record_size = xe_eu_stall_data_record_size(xe);
+ info->per_xecore_buf_size = xe_eu_stall_get_per_xecore_buf_size();
+ memcpy(info->sampling_rates, rates, array_size);
+
+ ret = copy_to_user(query_ptr, info, size);
+ kfree(info);
+
+ return ret ? -EFAULT : 0;
+}
+
static int (* const xe_query_funcs[])(struct xe_device *xe,
struct drm_xe_device_query *query) = {
query_engines,
@@ -741,6 +788,7 @@ static int (* const xe_query_funcs[])(struct xe_device *xe,
query_uc_fw_version,
query_oa_units,
query_pxp_status,
+ query_eu_stall,
};
int xe_query_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
diff --git a/drivers/gpu/drm/xe/xe_reg_whitelist.c b/drivers/gpu/drm/xe/xe_reg_whitelist.c
index edab5d4e3ba5..23f6c81d9994 100644
--- a/drivers/gpu/drm/xe/xe_reg_whitelist.c
+++ b/drivers/gpu/drm/xe/xe_reg_whitelist.c
@@ -88,7 +88,6 @@ static const struct xe_rtp_entry_sr register_whitelist[] = {
RING_FORCE_TO_NONPRIV_ACCESS_RD |
RING_FORCE_TO_NONPRIV_RANGE_4))
},
- {}
};
static void whitelist_apply_to_hwe(struct xe_hw_engine *hwe)
@@ -137,7 +136,8 @@ void xe_reg_whitelist_process_engine(struct xe_hw_engine *hwe)
{
struct xe_rtp_process_ctx ctx = XE_RTP_PROCESS_CTX_INITIALIZER(hwe);
- xe_rtp_process_to_sr(&ctx, register_whitelist, &hwe->reg_whitelist);
+ xe_rtp_process_to_sr(&ctx, register_whitelist, ARRAY_SIZE(register_whitelist),
+ &hwe->reg_whitelist);
whitelist_apply_to_hwe(hwe);
}
diff --git a/drivers/gpu/drm/xe/xe_res_cursor.h b/drivers/gpu/drm/xe/xe_res_cursor.h
index dca374b6521c..d1a403cfb628 100644
--- a/drivers/gpu/drm/xe/xe_res_cursor.h
+++ b/drivers/gpu/drm/xe/xe_res_cursor.h
@@ -26,6 +26,7 @@
#include <linux/scatterlist.h>
+#include <drm/drm_pagemap.h>
#include <drm/ttm/ttm_placement.h>
#include <drm/ttm/ttm_range_manager.h>
#include <drm/ttm/ttm_resource.h>
@@ -34,17 +35,38 @@
#include "xe_bo.h"
#include "xe_device.h"
#include "xe_macros.h"
+#include "xe_svm.h"
#include "xe_ttm_vram_mgr.h"
-/* state back for walking over vram_mgr, stolen_mgr, and gtt_mgr allocations */
+/**
+ * struct xe_res_cursor - state for walking over dma mapping, vram_mgr,
+ * stolen_mgr, and gtt_mgr allocations
+ */
struct xe_res_cursor {
+ /** @start: Start of cursor */
u64 start;
+ /** @size: Size of the current segment. */
u64 size;
+ /** @remaining: Remaining bytes in cursor */
u64 remaining;
+ /** @node: Opaque point current node cursor */
void *node;
+ /** @mem_type: Memory type */
u32 mem_type;
+ /** @sgl: Scatterlist for cursor */
struct scatterlist *sgl;
+ /** @dma_addr: Current element in a struct drm_pagemap_device_addr array */
+ const struct drm_pagemap_device_addr *dma_addr;
+ /** @mm: Buddy allocator for VRAM cursor */
struct drm_buddy *mm;
+ /**
+ * @dma_start: DMA start address for the current segment.
+ * This may be different to @dma_addr.addr since elements in
+ * the array may be coalesced to a single segment.
+ */
+ u64 dma_start;
+ /** @dma_seg_size: Size of the current DMA segment. */
+ u64 dma_seg_size;
};
static struct drm_buddy *xe_res_get_buddy(struct ttm_resource *res)
@@ -70,6 +92,7 @@ static inline void xe_res_first(struct ttm_resource *res,
struct xe_res_cursor *cur)
{
cur->sgl = NULL;
+ cur->dma_addr = NULL;
if (!res)
goto fallback;
@@ -142,6 +165,36 @@ static inline void __xe_res_sg_next(struct xe_res_cursor *cur)
}
/**
+ * __xe_res_dma_next() - Advance the cursor when end-of-segment is reached
+ * @cur: The cursor
+ */
+static inline void __xe_res_dma_next(struct xe_res_cursor *cur)
+{
+ const struct drm_pagemap_device_addr *addr = cur->dma_addr;
+ u64 start = cur->start;
+
+ while (start >= cur->dma_seg_size) {
+ start -= cur->dma_seg_size;
+ addr++;
+ cur->dma_seg_size = PAGE_SIZE << addr->order;
+ }
+ cur->dma_start = addr->addr;
+
+ /* Coalesce array_elements */
+ while (cur->dma_seg_size - start < cur->remaining) {
+ if (cur->dma_start + cur->dma_seg_size != addr[1].addr ||
+ addr->proto != addr[1].proto)
+ break;
+ addr++;
+ cur->dma_seg_size += PAGE_SIZE << addr->order;
+ }
+
+ cur->dma_addr = addr;
+ cur->start = start;
+ cur->size = cur->dma_seg_size - start;
+}
+
+/**
* xe_res_first_sg - initialize a xe_res_cursor with a scatter gather table
*
* @sg: scatter gather table to walk
@@ -160,12 +213,43 @@ static inline void xe_res_first_sg(const struct sg_table *sg,
cur->start = start;
cur->remaining = size;
cur->size = 0;
+ cur->dma_addr = NULL;
cur->sgl = sg->sgl;
cur->mem_type = XE_PL_TT;
__xe_res_sg_next(cur);
}
/**
+ * xe_res_first_dma - initialize a xe_res_cursor with dma_addr array
+ *
+ * @dma_addr: struct drm_pagemap_device_addr array to walk
+ * @start: Start of the range
+ * @size: Size of the range
+ * @cur: cursor object to initialize
+ *
+ * Start walking over the range of allocations between @start and @size.
+ */
+static inline void xe_res_first_dma(const struct drm_pagemap_device_addr *dma_addr,
+ u64 start, u64 size,
+ struct xe_res_cursor *cur)
+{
+ XE_WARN_ON(!dma_addr);
+ XE_WARN_ON(!IS_ALIGNED(start, PAGE_SIZE) ||
+ !IS_ALIGNED(size, PAGE_SIZE));
+
+ cur->node = NULL;
+ cur->start = start;
+ cur->remaining = size;
+ cur->dma_seg_size = PAGE_SIZE << dma_addr->order;
+ cur->dma_start = 0;
+ cur->size = 0;
+ cur->dma_addr = dma_addr;
+ __xe_res_dma_next(cur);
+ cur->sgl = NULL;
+ cur->mem_type = XE_PL_TT;
+}
+
+/**
* xe_res_next - advance the cursor
*
* @cur: the cursor to advance
@@ -191,6 +275,12 @@ static inline void xe_res_next(struct xe_res_cursor *cur, u64 size)
return;
}
+ if (cur->dma_addr) {
+ cur->start += size;
+ __xe_res_dma_next(cur);
+ return;
+ }
+
if (cur->sgl) {
cur->start += size;
__xe_res_sg_next(cur);
@@ -232,6 +322,35 @@ static inline void xe_res_next(struct xe_res_cursor *cur, u64 size)
*/
static inline u64 xe_res_dma(const struct xe_res_cursor *cur)
{
- return cur->sgl ? sg_dma_address(cur->sgl) + cur->start : cur->start;
+ if (cur->dma_addr)
+ return cur->dma_start + cur->start;
+ else if (cur->sgl)
+ return sg_dma_address(cur->sgl) + cur->start;
+ else
+ return cur->start;
+}
+
+/**
+ * xe_res_is_vram() - Whether the cursor current dma address points to
+ * same-device VRAM
+ * @cur: The cursor.
+ *
+ * Return: true iff the address returned by xe_res_dma() points to internal vram.
+ */
+static inline bool xe_res_is_vram(const struct xe_res_cursor *cur)
+{
+ if (cur->dma_addr)
+ return cur->dma_addr->proto == XE_INTERCONNECT_VRAM;
+
+ switch (cur->mem_type) {
+ case XE_PL_STOLEN:
+ case XE_PL_VRAM0:
+ case XE_PL_VRAM1:
+ return true;
+ default:
+ break;
+ }
+
+ return false;
}
#endif
diff --git a/drivers/gpu/drm/xe/xe_ring_ops.c b/drivers/gpu/drm/xe/xe_ring_ops.c
index 0c230ee53bba..917fc16de866 100644
--- a/drivers/gpu/drm/xe/xe_ring_ops.c
+++ b/drivers/gpu/drm/xe/xe_ring_ops.c
@@ -90,11 +90,10 @@ static int emit_flush_dw(u32 *dw, int i)
return i;
}
-static int emit_flush_imm_ggtt(u32 addr, u32 value, bool invalidate_tlb,
- u32 *dw, int i)
+static int emit_flush_imm_ggtt(u32 addr, u32 value, u32 flags, u32 *dw, int i)
{
dw[i++] = MI_FLUSH_DW | MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_IMM_DW |
- (invalidate_tlb ? MI_INVALIDATE_TLB : 0);
+ flags;
dw[i++] = addr | MI_FLUSH_DW_USE_GTT;
dw[i++] = 0;
dw[i++] = value;
@@ -111,16 +110,13 @@ static int emit_bb_start(u64 batch_addr, u32 ppgtt_flag, u32 *dw, int i)
return i;
}
-static int emit_flush_invalidate(u32 flag, u32 *dw, int i)
+static int emit_flush_invalidate(u32 *dw, int i)
{
- dw[i] = MI_FLUSH_DW;
- dw[i] |= flag;
- dw[i++] |= MI_INVALIDATE_TLB | MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_IMM_DW |
- MI_FLUSH_DW_STORE_INDEX;
-
- dw[i++] = LRC_PPHWSP_FLUSH_INVAL_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT;
+ dw[i++] = MI_FLUSH_DW | MI_INVALIDATE_TLB | MI_FLUSH_DW_OP_STOREDW |
+ MI_FLUSH_IMM_DW | MI_FLUSH_DW_STORE_INDEX;
+ dw[i++] = LRC_PPHWSP_FLUSH_INVAL_SCRATCH_ADDR;
+ dw[i++] = 0;
dw[i++] = 0;
- dw[i++] = ~0U;
return i;
}
@@ -177,6 +173,10 @@ static int emit_render_cache_flush(struct xe_sched_job *job, u32 *dw, int i)
bool lacks_render = !(gt->info.engine_mask & XE_HW_ENGINE_RCS_MASK);
u32 flags;
+ if (XE_WA(gt, 14016712196))
+ i = emit_pipe_control(dw, i, 0, PIPE_CONTROL_DEPTH_CACHE_FLUSH,
+ LRC_PPHWSP_FLUSH_INVAL_SCRATCH_ADDR, 0);
+
flags = (PIPE_CONTROL_CS_STALL |
PIPE_CONTROL_TILE_CACHE_FLUSH |
PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
@@ -253,7 +253,7 @@ static void __emit_job_gen12_simple(struct xe_sched_job *job, struct xe_lrc *lrc
if (job->ring_ops_flush_tlb) {
dw[i++] = preparser_disable(true);
i = emit_flush_imm_ggtt(xe_lrc_start_seqno_ggtt_addr(lrc),
- seqno, true, dw, i);
+ seqno, MI_INVALIDATE_TLB, dw, i);
dw[i++] = preparser_disable(false);
} else {
i = emit_store_imm_ggtt(xe_lrc_start_seqno_ggtt_addr(lrc),
@@ -269,7 +269,7 @@ static void __emit_job_gen12_simple(struct xe_sched_job *job, struct xe_lrc *lrc
dw, i);
}
- i = emit_flush_imm_ggtt(xe_lrc_seqno_ggtt_addr(lrc), seqno, false, dw, i);
+ i = emit_flush_imm_ggtt(xe_lrc_seqno_ggtt_addr(lrc), seqno, 0, dw, i);
i = emit_user_interrupt(dw, i);
@@ -315,7 +315,7 @@ static void __emit_job_gen12_video(struct xe_sched_job *job, struct xe_lrc *lrc,
if (job->ring_ops_flush_tlb)
i = emit_flush_imm_ggtt(xe_lrc_start_seqno_ggtt_addr(lrc),
- seqno, true, dw, i);
+ seqno, MI_INVALIDATE_TLB, dw, i);
dw[i++] = preparser_disable(false);
@@ -332,7 +332,7 @@ static void __emit_job_gen12_video(struct xe_sched_job *job, struct xe_lrc *lrc,
dw, i);
}
- i = emit_flush_imm_ggtt(xe_lrc_seqno_ggtt_addr(lrc), seqno, false, dw, i);
+ i = emit_flush_imm_ggtt(xe_lrc_seqno_ggtt_addr(lrc), seqno, 0, dw, i);
i = emit_user_interrupt(dw, i);
@@ -409,7 +409,7 @@ static void emit_migration_job_gen12(struct xe_sched_job *job,
if (!IS_SRIOV_VF(gt_to_xe(job->q->gt))) {
/* XXX: Do we need this? Leaving for now. */
dw[i++] = preparser_disable(true);
- i = emit_flush_invalidate(0, dw, i);
+ i = emit_flush_invalidate(dw, i);
dw[i++] = preparser_disable(false);
}
diff --git a/drivers/gpu/drm/xe/xe_rtp.c b/drivers/gpu/drm/xe/xe_rtp.c
index 7a1c78fdfc92..13bb62d3e615 100644
--- a/drivers/gpu/drm/xe/xe_rtp.c
+++ b/drivers/gpu/drm/xe/xe_rtp.c
@@ -237,6 +237,7 @@ static void rtp_mark_active(struct xe_device *xe,
* the save-restore argument.
* @ctx: The context for processing the table, with one of device, gt or hwe
* @entries: Table with RTP definitions
+ * @n_entries: Number of entries to process, usually ARRAY_SIZE(entries)
* @sr: Save-restore struct where matching rules execute the action. This can be
* viewed as the "coalesced view" of multiple the tables. The bits for each
* register set are expected not to collide with previously added entries
@@ -247,6 +248,7 @@ static void rtp_mark_active(struct xe_device *xe,
*/
void xe_rtp_process_to_sr(struct xe_rtp_process_ctx *ctx,
const struct xe_rtp_entry_sr *entries,
+ size_t n_entries,
struct xe_reg_sr *sr)
{
const struct xe_rtp_entry_sr *entry;
@@ -259,7 +261,9 @@ void xe_rtp_process_to_sr(struct xe_rtp_process_ctx *ctx,
if (IS_SRIOV_VF(xe))
return;
- for (entry = entries; entry && entry->name; entry++) {
+ xe_assert(xe, entries);
+
+ for (entry = entries; entry - entries < n_entries; entry++) {
bool match = false;
if (entry->flags & XE_RTP_ENTRY_FLAG_FOREACH_ENGINE) {
diff --git a/drivers/gpu/drm/xe/xe_rtp.h b/drivers/gpu/drm/xe/xe_rtp.h
index 38b9f13bba5e..4fe736a11c42 100644
--- a/drivers/gpu/drm/xe/xe_rtp.h
+++ b/drivers/gpu/drm/xe/xe_rtp.h
@@ -430,7 +430,7 @@ void xe_rtp_process_ctx_enable_active_tracking(struct xe_rtp_process_ctx *ctx,
void xe_rtp_process_to_sr(struct xe_rtp_process_ctx *ctx,
const struct xe_rtp_entry_sr *entries,
- struct xe_reg_sr *sr);
+ size_t n_entries, struct xe_reg_sr *sr);
void xe_rtp_process(struct xe_rtp_process_ctx *ctx,
const struct xe_rtp_entry *entries);
diff --git a/drivers/gpu/drm/xe/xe_survivability_mode.c b/drivers/gpu/drm/xe/xe_survivability_mode.c
index 02b4eadf8407..d939ce70e6fa 100644
--- a/drivers/gpu/drm/xe/xe_survivability_mode.c
+++ b/drivers/gpu/drm/xe/xe_survivability_mode.c
@@ -127,40 +127,55 @@ static ssize_t survivability_mode_show(struct device *dev,
static DEVICE_ATTR_ADMIN_RO(survivability_mode);
-static void enable_survivability_mode(struct pci_dev *pdev)
+static void xe_survivability_mode_fini(void *arg)
+{
+ struct xe_device *xe = arg;
+ struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
+ struct device *dev = &pdev->dev;
+
+ sysfs_remove_file(&dev->kobj, &dev_attr_survivability_mode.attr);
+}
+
+static int enable_survivability_mode(struct pci_dev *pdev)
{
struct device *dev = &pdev->dev;
struct xe_device *xe = pdev_to_xe_device(pdev);
struct xe_survivability *survivability = &xe->survivability;
int ret = 0;
- /* set survivability mode */
- survivability->mode = true;
- dev_info(dev, "In Survivability Mode\n");
-
/* create survivability mode sysfs */
ret = sysfs_create_file(&dev->kobj, &dev_attr_survivability_mode.attr);
if (ret) {
dev_warn(dev, "Failed to create survivability sysfs files\n");
- return;
+ return ret;
}
- xe_heci_gsc_init(xe);
+ ret = devm_add_action_or_reset(xe->drm.dev,
+ xe_survivability_mode_fini, xe);
+ if (ret)
+ return ret;
+
+ ret = xe_heci_gsc_init(xe);
+ if (ret)
+ return ret;
xe_vsec_init(xe);
+
+ survivability->mode = true;
+ dev_err(dev, "In Survivability Mode\n");
+
+ return 0;
}
/**
- * xe_survivability_mode_enabled - check if survivability mode is enabled
+ * xe_survivability_mode_is_enabled - check if survivability mode is enabled
* @xe: xe device instance
*
* Returns true if in survivability mode, false otherwise
*/
-bool xe_survivability_mode_enabled(struct xe_device *xe)
+bool xe_survivability_mode_is_enabled(struct xe_device *xe)
{
- struct xe_survivability *survivability = &xe->survivability;
-
- return survivability->mode;
+ return xe->survivability.mode;
}
/**
@@ -183,35 +198,19 @@ bool xe_survivability_mode_required(struct xe_device *xe)
data = xe_mmio_read32(mmio, PCODE_SCRATCH(0));
survivability->boot_status = REG_FIELD_GET(BOOT_STATUS, data);
- return (survivability->boot_status == NON_CRITICAL_FAILURE ||
- survivability->boot_status == CRITICAL_FAILURE);
+ return survivability->boot_status == NON_CRITICAL_FAILURE ||
+ survivability->boot_status == CRITICAL_FAILURE;
}
/**
- * xe_survivability_mode_remove - remove survivability mode
+ * xe_survivability_mode_enable - Initialize and enable the survivability mode
* @xe: xe device instance
*
- * clean up sysfs entries of survivability mode
- */
-void xe_survivability_mode_remove(struct xe_device *xe)
-{
- struct xe_survivability *survivability = &xe->survivability;
- struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
- struct device *dev = &pdev->dev;
-
- sysfs_remove_file(&dev->kobj, &dev_attr_survivability_mode.attr);
- xe_heci_gsc_fini(xe);
- kfree(survivability->info);
- pci_set_drvdata(pdev, NULL);
-}
-
-/**
- * xe_survivability_mode_init - Initialize the survivability mode
- * @xe: xe device instance
+ * Initialize survivability information and enable survivability mode
*
- * Initializes survivability information and enables survivability mode
+ * Return: 0 for success, negative error code otherwise.
*/
-void xe_survivability_mode_init(struct xe_device *xe)
+int xe_survivability_mode_enable(struct xe_device *xe)
{
struct xe_survivability *survivability = &xe->survivability;
struct xe_survivability_info *info;
@@ -219,9 +218,10 @@ void xe_survivability_mode_init(struct xe_device *xe)
survivability->size = MAX_SCRATCH_MMIO;
- info = kcalloc(survivability->size, sizeof(*info), GFP_KERNEL);
+ info = devm_kcalloc(xe->drm.dev, survivability->size, sizeof(*info),
+ GFP_KERNEL);
if (!info)
- return;
+ return -ENOMEM;
survivability->info = info;
@@ -230,9 +230,8 @@ void xe_survivability_mode_init(struct xe_device *xe)
/* Only log debug information and exit if it is a critical failure */
if (survivability->boot_status == CRITICAL_FAILURE) {
log_survivability_info(pdev);
- kfree(survivability->info);
- return;
+ return -ENXIO;
}
- enable_survivability_mode(pdev);
+ return enable_survivability_mode(pdev);
}
diff --git a/drivers/gpu/drm/xe/xe_survivability_mode.h b/drivers/gpu/drm/xe/xe_survivability_mode.h
index f530507a22c6..f4df5f9025ce 100644
--- a/drivers/gpu/drm/xe/xe_survivability_mode.h
+++ b/drivers/gpu/drm/xe/xe_survivability_mode.h
@@ -10,9 +10,8 @@
struct xe_device;
-void xe_survivability_mode_init(struct xe_device *xe);
-void xe_survivability_mode_remove(struct xe_device *xe);
-bool xe_survivability_mode_enabled(struct xe_device *xe);
+int xe_survivability_mode_enable(struct xe_device *xe);
+bool xe_survivability_mode_is_enabled(struct xe_device *xe);
bool xe_survivability_mode_required(struct xe_device *xe);
#endif /* _XE_SURVIVABILITY_MODE_H_ */
diff --git a/drivers/gpu/drm/xe/xe_svm.c b/drivers/gpu/drm/xe/xe_svm.c
new file mode 100644
index 000000000000..516898e99b26
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_svm.c
@@ -0,0 +1,946 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+#include "xe_bo.h"
+#include "xe_gt_tlb_invalidation.h"
+#include "xe_migrate.h"
+#include "xe_module.h"
+#include "xe_pt.h"
+#include "xe_svm.h"
+#include "xe_ttm_vram_mgr.h"
+#include "xe_vm.h"
+#include "xe_vm_types.h"
+
+static bool xe_svm_range_in_vram(struct xe_svm_range *range)
+{
+ /* Not reliable without notifier lock */
+ return range->base.flags.has_devmem_pages;
+}
+
+static bool xe_svm_range_has_vram_binding(struct xe_svm_range *range)
+{
+ /* Not reliable without notifier lock */
+ return xe_svm_range_in_vram(range) && range->tile_present;
+}
+
+static struct xe_vm *gpusvm_to_vm(struct drm_gpusvm *gpusvm)
+{
+ return container_of(gpusvm, struct xe_vm, svm.gpusvm);
+}
+
+static struct xe_vm *range_to_vm(struct drm_gpusvm_range *r)
+{
+ return gpusvm_to_vm(r->gpusvm);
+}
+
+static unsigned long xe_svm_range_start(struct xe_svm_range *range)
+{
+ return drm_gpusvm_range_start(&range->base);
+}
+
+static unsigned long xe_svm_range_end(struct xe_svm_range *range)
+{
+ return drm_gpusvm_range_end(&range->base);
+}
+
+static unsigned long xe_svm_range_size(struct xe_svm_range *range)
+{
+ return drm_gpusvm_range_size(&range->base);
+}
+
+#define range_debug(r__, operaton__) \
+ vm_dbg(&range_to_vm(&(r__)->base)->xe->drm, \
+ "%s: asid=%u, gpusvm=%p, vram=%d,%d, seqno=%lu, " \
+ "start=0x%014lx, end=0x%014lx, size=%lu", \
+ (operaton__), range_to_vm(&(r__)->base)->usm.asid, \
+ (r__)->base.gpusvm, \
+ xe_svm_range_in_vram((r__)) ? 1 : 0, \
+ xe_svm_range_has_vram_binding((r__)) ? 1 : 0, \
+ (r__)->base.notifier_seq, \
+ xe_svm_range_start((r__)), xe_svm_range_end((r__)), \
+ xe_svm_range_size((r__)))
+
+void xe_svm_range_debug(struct xe_svm_range *range, const char *operation)
+{
+ range_debug(range, operation);
+}
+
+static void *xe_svm_devm_owner(struct xe_device *xe)
+{
+ return xe;
+}
+
+static struct drm_gpusvm_range *
+xe_svm_range_alloc(struct drm_gpusvm *gpusvm)
+{
+ struct xe_svm_range *range;
+
+ range = kzalloc(sizeof(*range), GFP_KERNEL);
+ if (!range)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&range->garbage_collector_link);
+ xe_vm_get(gpusvm_to_vm(gpusvm));
+
+ return &range->base;
+}
+
+static void xe_svm_range_free(struct drm_gpusvm_range *range)
+{
+ xe_vm_put(range_to_vm(range));
+ kfree(range);
+}
+
+static struct xe_svm_range *to_xe_range(struct drm_gpusvm_range *r)
+{
+ return container_of(r, struct xe_svm_range, base);
+}
+
+static void
+xe_svm_garbage_collector_add_range(struct xe_vm *vm, struct xe_svm_range *range,
+ const struct mmu_notifier_range *mmu_range)
+{
+ struct xe_device *xe = vm->xe;
+
+ range_debug(range, "GARBAGE COLLECTOR ADD");
+
+ drm_gpusvm_range_set_unmapped(&range->base, mmu_range);
+
+ spin_lock(&vm->svm.garbage_collector.lock);
+ if (list_empty(&range->garbage_collector_link))
+ list_add_tail(&range->garbage_collector_link,
+ &vm->svm.garbage_collector.range_list);
+ spin_unlock(&vm->svm.garbage_collector.lock);
+
+ queue_work(xe_device_get_root_tile(xe)->primary_gt->usm.pf_wq,
+ &vm->svm.garbage_collector.work);
+}
+
+static u8
+xe_svm_range_notifier_event_begin(struct xe_vm *vm, struct drm_gpusvm_range *r,
+ const struct mmu_notifier_range *mmu_range,
+ u64 *adj_start, u64 *adj_end)
+{
+ struct xe_svm_range *range = to_xe_range(r);
+ struct xe_device *xe = vm->xe;
+ struct xe_tile *tile;
+ u8 tile_mask = 0;
+ u8 id;
+
+ xe_svm_assert_in_notifier(vm);
+
+ range_debug(range, "NOTIFIER");
+
+ /* Skip if already unmapped or if no binding exist */
+ if (range->base.flags.unmapped || !range->tile_present)
+ return 0;
+
+ range_debug(range, "NOTIFIER - EXECUTE");
+
+ /* Adjust invalidation to range boundaries */
+ *adj_start = min(xe_svm_range_start(range), mmu_range->start);
+ *adj_end = max(xe_svm_range_end(range), mmu_range->end);
+
+ /*
+ * XXX: Ideally would zap PTEs in one shot in xe_svm_invalidate but the
+ * invalidation code can't correctly cope with sparse ranges or
+ * invalidations spanning multiple ranges.
+ */
+ for_each_tile(tile, xe, id)
+ if (xe_pt_zap_ptes_range(tile, vm, range)) {
+ tile_mask |= BIT(id);
+ range->tile_invalidated |= BIT(id);
+ }
+
+ return tile_mask;
+}
+
+static void
+xe_svm_range_notifier_event_end(struct xe_vm *vm, struct drm_gpusvm_range *r,
+ const struct mmu_notifier_range *mmu_range)
+{
+ struct drm_gpusvm_ctx ctx = { .in_notifier = true, };
+
+ xe_svm_assert_in_notifier(vm);
+
+ drm_gpusvm_range_unmap_pages(&vm->svm.gpusvm, r, &ctx);
+ if (!xe_vm_is_closed(vm) && mmu_range->event == MMU_NOTIFY_UNMAP)
+ xe_svm_garbage_collector_add_range(vm, to_xe_range(r),
+ mmu_range);
+}
+
+static void xe_svm_invalidate(struct drm_gpusvm *gpusvm,
+ struct drm_gpusvm_notifier *notifier,
+ const struct mmu_notifier_range *mmu_range)
+{
+ struct xe_vm *vm = gpusvm_to_vm(gpusvm);
+ struct xe_device *xe = vm->xe;
+ struct xe_tile *tile;
+ struct drm_gpusvm_range *r, *first;
+ struct xe_gt_tlb_invalidation_fence
+ fence[XE_MAX_TILES_PER_DEVICE * XE_MAX_GT_PER_TILE];
+ u64 adj_start = mmu_range->start, adj_end = mmu_range->end;
+ u8 tile_mask = 0;
+ u8 id;
+ u32 fence_id = 0;
+ long err;
+
+ xe_svm_assert_in_notifier(vm);
+
+ vm_dbg(&gpusvm_to_vm(gpusvm)->xe->drm,
+ "INVALIDATE: asid=%u, gpusvm=%p, seqno=%lu, start=0x%016lx, end=0x%016lx, event=%d",
+ vm->usm.asid, gpusvm, notifier->notifier.invalidate_seq,
+ mmu_range->start, mmu_range->end, mmu_range->event);
+
+ /* Adjust invalidation to notifier boundaries */
+ adj_start = max(drm_gpusvm_notifier_start(notifier), adj_start);
+ adj_end = min(drm_gpusvm_notifier_end(notifier), adj_end);
+
+ first = drm_gpusvm_range_find(notifier, adj_start, adj_end);
+ if (!first)
+ return;
+
+ /*
+ * PTs may be getting destroyed so not safe to touch these but PT should
+ * be invalidated at this point in time. Regardless we still need to
+ * ensure any dma mappings are unmapped in the here.
+ */
+ if (xe_vm_is_closed(vm))
+ goto range_notifier_event_end;
+
+ /*
+ * XXX: Less than ideal to always wait on VM's resv slots if an
+ * invalidation is not required. Could walk range list twice to figure
+ * out if an invalidations is need, but also not ideal.
+ */
+ err = dma_resv_wait_timeout(xe_vm_resv(vm),
+ DMA_RESV_USAGE_BOOKKEEP,
+ false, MAX_SCHEDULE_TIMEOUT);
+ XE_WARN_ON(err <= 0);
+
+ r = first;
+ drm_gpusvm_for_each_range(r, notifier, adj_start, adj_end)
+ tile_mask |= xe_svm_range_notifier_event_begin(vm, r, mmu_range,
+ &adj_start,
+ &adj_end);
+ if (!tile_mask)
+ goto range_notifier_event_end;
+
+ xe_device_wmb(xe);
+
+ for_each_tile(tile, xe, id) {
+ if (tile_mask & BIT(id)) {
+ int err;
+
+ xe_gt_tlb_invalidation_fence_init(tile->primary_gt,
+ &fence[fence_id], true);
+
+ err = xe_gt_tlb_invalidation_range(tile->primary_gt,
+ &fence[fence_id],
+ adj_start,
+ adj_end,
+ vm->usm.asid);
+ if (WARN_ON_ONCE(err < 0))
+ goto wait;
+ ++fence_id;
+
+ if (!tile->media_gt)
+ continue;
+
+ xe_gt_tlb_invalidation_fence_init(tile->media_gt,
+ &fence[fence_id], true);
+
+ err = xe_gt_tlb_invalidation_range(tile->media_gt,
+ &fence[fence_id],
+ adj_start,
+ adj_end,
+ vm->usm.asid);
+ if (WARN_ON_ONCE(err < 0))
+ goto wait;
+ ++fence_id;
+ }
+ }
+
+wait:
+ for (id = 0; id < fence_id; ++id)
+ xe_gt_tlb_invalidation_fence_wait(&fence[id]);
+
+range_notifier_event_end:
+ r = first;
+ drm_gpusvm_for_each_range(r, notifier, adj_start, adj_end)
+ xe_svm_range_notifier_event_end(vm, r, mmu_range);
+}
+
+static int __xe_svm_garbage_collector(struct xe_vm *vm,
+ struct xe_svm_range *range)
+{
+ struct dma_fence *fence;
+
+ range_debug(range, "GARBAGE COLLECTOR");
+
+ xe_vm_lock(vm, false);
+ fence = xe_vm_range_unbind(vm, range);
+ xe_vm_unlock(vm);
+ if (IS_ERR(fence))
+ return PTR_ERR(fence);
+ dma_fence_put(fence);
+
+ drm_gpusvm_range_remove(&vm->svm.gpusvm, &range->base);
+
+ return 0;
+}
+
+static int xe_svm_garbage_collector(struct xe_vm *vm)
+{
+ struct xe_svm_range *range;
+ int err;
+
+ lockdep_assert_held_write(&vm->lock);
+
+ if (xe_vm_is_closed_or_banned(vm))
+ return -ENOENT;
+
+ spin_lock(&vm->svm.garbage_collector.lock);
+ for (;;) {
+ range = list_first_entry_or_null(&vm->svm.garbage_collector.range_list,
+ typeof(*range),
+ garbage_collector_link);
+ if (!range)
+ break;
+
+ list_del(&range->garbage_collector_link);
+ spin_unlock(&vm->svm.garbage_collector.lock);
+
+ err = __xe_svm_garbage_collector(vm, range);
+ if (err) {
+ drm_warn(&vm->xe->drm,
+ "Garbage collection failed: %pe\n",
+ ERR_PTR(err));
+ xe_vm_kill(vm, true);
+ return err;
+ }
+
+ spin_lock(&vm->svm.garbage_collector.lock);
+ }
+ spin_unlock(&vm->svm.garbage_collector.lock);
+
+ return 0;
+}
+
+static void xe_svm_garbage_collector_work_func(struct work_struct *w)
+{
+ struct xe_vm *vm = container_of(w, struct xe_vm,
+ svm.garbage_collector.work);
+
+ down_write(&vm->lock);
+ xe_svm_garbage_collector(vm);
+ up_write(&vm->lock);
+}
+
+static struct xe_vram_region *page_to_vr(struct page *page)
+{
+ return container_of(page->pgmap, struct xe_vram_region, pagemap);
+}
+
+static struct xe_tile *vr_to_tile(struct xe_vram_region *vr)
+{
+ return container_of(vr, struct xe_tile, mem.vram);
+}
+
+static u64 xe_vram_region_page_to_dpa(struct xe_vram_region *vr,
+ struct page *page)
+{
+ u64 dpa;
+ struct xe_tile *tile = vr_to_tile(vr);
+ u64 pfn = page_to_pfn(page);
+ u64 offset;
+
+ xe_tile_assert(tile, is_device_private_page(page));
+ xe_tile_assert(tile, (pfn << PAGE_SHIFT) >= vr->hpa_base);
+
+ offset = (pfn << PAGE_SHIFT) - vr->hpa_base;
+ dpa = vr->dpa_base + offset;
+
+ return dpa;
+}
+
+enum xe_svm_copy_dir {
+ XE_SVM_COPY_TO_VRAM,
+ XE_SVM_COPY_TO_SRAM,
+};
+
+static int xe_svm_copy(struct page **pages, dma_addr_t *dma_addr,
+ unsigned long npages, const enum xe_svm_copy_dir dir)
+{
+ struct xe_vram_region *vr = NULL;
+ struct xe_tile *tile;
+ struct dma_fence *fence = NULL;
+ unsigned long i;
+#define XE_VRAM_ADDR_INVALID ~0x0ull
+ u64 vram_addr = XE_VRAM_ADDR_INVALID;
+ int err = 0, pos = 0;
+ bool sram = dir == XE_SVM_COPY_TO_SRAM;
+
+ /*
+ * This flow is complex: it locates physically contiguous device pages,
+ * derives the starting physical address, and performs a single GPU copy
+ * to for every 8M chunk in a DMA address array. Both device pages and
+ * DMA addresses may be sparsely populated. If either is NULL, a copy is
+ * triggered based on the current search state. The last GPU copy is
+ * waited on to ensure all copies are complete.
+ */
+
+ for (i = 0; i < npages; ++i) {
+ struct page *spage = pages[i];
+ struct dma_fence *__fence;
+ u64 __vram_addr;
+ bool match = false, chunk, last;
+
+#define XE_MIGRATE_CHUNK_SIZE SZ_8M
+ chunk = (i - pos) == (XE_MIGRATE_CHUNK_SIZE / PAGE_SIZE);
+ last = (i + 1) == npages;
+
+ /* No CPU page and no device pages queue'd to copy */
+ if (!dma_addr[i] && vram_addr == XE_VRAM_ADDR_INVALID)
+ continue;
+
+ if (!vr && spage) {
+ vr = page_to_vr(spage);
+ tile = vr_to_tile(vr);
+ }
+ XE_WARN_ON(spage && page_to_vr(spage) != vr);
+
+ /*
+ * CPU page and device page valid, capture physical address on
+ * first device page, check if physical contiguous on subsequent
+ * device pages.
+ */
+ if (dma_addr[i] && spage) {
+ __vram_addr = xe_vram_region_page_to_dpa(vr, spage);
+ if (vram_addr == XE_VRAM_ADDR_INVALID) {
+ vram_addr = __vram_addr;
+ pos = i;
+ }
+
+ match = vram_addr + PAGE_SIZE * (i - pos) == __vram_addr;
+ }
+
+ /*
+ * Mismatched physical address, 8M copy chunk, or last page -
+ * trigger a copy.
+ */
+ if (!match || chunk || last) {
+ /*
+ * Extra page for first copy if last page and matching
+ * physical address.
+ */
+ int incr = (match && last) ? 1 : 0;
+
+ if (vram_addr != XE_VRAM_ADDR_INVALID) {
+ if (sram) {
+ vm_dbg(&tile->xe->drm,
+ "COPY TO SRAM - 0x%016llx -> 0x%016llx, NPAGES=%ld",
+ vram_addr, (u64)dma_addr[pos], i - pos + incr);
+ __fence = xe_migrate_from_vram(tile->migrate,
+ i - pos + incr,
+ vram_addr,
+ dma_addr + pos);
+ } else {
+ vm_dbg(&tile->xe->drm,
+ "COPY TO VRAM - 0x%016llx -> 0x%016llx, NPAGES=%ld",
+ (u64)dma_addr[pos], vram_addr, i - pos + incr);
+ __fence = xe_migrate_to_vram(tile->migrate,
+ i - pos + incr,
+ dma_addr + pos,
+ vram_addr);
+ }
+ if (IS_ERR(__fence)) {
+ err = PTR_ERR(__fence);
+ goto err_out;
+ }
+
+ dma_fence_put(fence);
+ fence = __fence;
+ }
+
+ /* Setup physical address of next device page */
+ if (dma_addr[i] && spage) {
+ vram_addr = __vram_addr;
+ pos = i;
+ } else {
+ vram_addr = XE_VRAM_ADDR_INVALID;
+ }
+
+ /* Extra mismatched device page, copy it */
+ if (!match && last && vram_addr != XE_VRAM_ADDR_INVALID) {
+ if (sram) {
+ vm_dbg(&tile->xe->drm,
+ "COPY TO SRAM - 0x%016llx -> 0x%016llx, NPAGES=%d",
+ vram_addr, (u64)dma_addr[pos], 1);
+ __fence = xe_migrate_from_vram(tile->migrate, 1,
+ vram_addr,
+ dma_addr + pos);
+ } else {
+ vm_dbg(&tile->xe->drm,
+ "COPY TO VRAM - 0x%016llx -> 0x%016llx, NPAGES=%d",
+ (u64)dma_addr[pos], vram_addr, 1);
+ __fence = xe_migrate_to_vram(tile->migrate, 1,
+ dma_addr + pos,
+ vram_addr);
+ }
+ if (IS_ERR(__fence)) {
+ err = PTR_ERR(__fence);
+ goto err_out;
+ }
+
+ dma_fence_put(fence);
+ fence = __fence;
+ }
+ }
+ }
+
+err_out:
+ /* Wait for all copies to complete */
+ if (fence) {
+ dma_fence_wait(fence, false);
+ dma_fence_put(fence);
+ }
+
+ return err;
+#undef XE_MIGRATE_CHUNK_SIZE
+#undef XE_VRAM_ADDR_INVALID
+}
+
+static int xe_svm_copy_to_devmem(struct page **pages, dma_addr_t *dma_addr,
+ unsigned long npages)
+{
+ return xe_svm_copy(pages, dma_addr, npages, XE_SVM_COPY_TO_VRAM);
+}
+
+static int xe_svm_copy_to_ram(struct page **pages, dma_addr_t *dma_addr,
+ unsigned long npages)
+{
+ return xe_svm_copy(pages, dma_addr, npages, XE_SVM_COPY_TO_SRAM);
+}
+
+static struct xe_bo *to_xe_bo(struct drm_gpusvm_devmem *devmem_allocation)
+{
+ return container_of(devmem_allocation, struct xe_bo, devmem_allocation);
+}
+
+static void xe_svm_devmem_release(struct drm_gpusvm_devmem *devmem_allocation)
+{
+ struct xe_bo *bo = to_xe_bo(devmem_allocation);
+
+ xe_bo_put_async(bo);
+}
+
+static u64 block_offset_to_pfn(struct xe_vram_region *vr, u64 offset)
+{
+ return PHYS_PFN(offset + vr->hpa_base);
+}
+
+static struct drm_buddy *tile_to_buddy(struct xe_tile *tile)
+{
+ return &tile->mem.vram.ttm.mm;
+}
+
+static int xe_svm_populate_devmem_pfn(struct drm_gpusvm_devmem *devmem_allocation,
+ unsigned long npages, unsigned long *pfn)
+{
+ struct xe_bo *bo = to_xe_bo(devmem_allocation);
+ struct ttm_resource *res = bo->ttm.resource;
+ struct list_head *blocks = &to_xe_ttm_vram_mgr_resource(res)->blocks;
+ struct drm_buddy_block *block;
+ int j = 0;
+
+ list_for_each_entry(block, blocks, link) {
+ struct xe_vram_region *vr = block->private;
+ struct xe_tile *tile = vr_to_tile(vr);
+ struct drm_buddy *buddy = tile_to_buddy(tile);
+ u64 block_pfn = block_offset_to_pfn(vr, drm_buddy_block_offset(block));
+ int i;
+
+ for (i = 0; i < drm_buddy_block_size(buddy, block) >> PAGE_SHIFT; ++i)
+ pfn[j++] = block_pfn + i;
+ }
+
+ return 0;
+}
+
+static const struct drm_gpusvm_devmem_ops gpusvm_devmem_ops = {
+ .devmem_release = xe_svm_devmem_release,
+ .populate_devmem_pfn = xe_svm_populate_devmem_pfn,
+ .copy_to_devmem = xe_svm_copy_to_devmem,
+ .copy_to_ram = xe_svm_copy_to_ram,
+};
+
+static const struct drm_gpusvm_ops gpusvm_ops = {
+ .range_alloc = xe_svm_range_alloc,
+ .range_free = xe_svm_range_free,
+ .invalidate = xe_svm_invalidate,
+};
+
+static const unsigned long fault_chunk_sizes[] = {
+ SZ_2M,
+ SZ_64K,
+ SZ_4K,
+};
+
+/**
+ * xe_svm_init() - SVM initialize
+ * @vm: The VM.
+ *
+ * Initialize SVM state which is embedded within the VM.
+ *
+ * Return: 0 on success, negative error code on error.
+ */
+int xe_svm_init(struct xe_vm *vm)
+{
+ int err;
+
+ spin_lock_init(&vm->svm.garbage_collector.lock);
+ INIT_LIST_HEAD(&vm->svm.garbage_collector.range_list);
+ INIT_WORK(&vm->svm.garbage_collector.work,
+ xe_svm_garbage_collector_work_func);
+
+ err = drm_gpusvm_init(&vm->svm.gpusvm, "Xe SVM", &vm->xe->drm,
+ current->mm, xe_svm_devm_owner(vm->xe), 0,
+ vm->size, xe_modparam.svm_notifier_size * SZ_1M,
+ &gpusvm_ops, fault_chunk_sizes,
+ ARRAY_SIZE(fault_chunk_sizes));
+ if (err)
+ return err;
+
+ drm_gpusvm_driver_set_lock(&vm->svm.gpusvm, &vm->lock);
+
+ return 0;
+}
+
+/**
+ * xe_svm_close() - SVM close
+ * @vm: The VM.
+ *
+ * Close SVM state (i.e., stop and flush all SVM actions).
+ */
+void xe_svm_close(struct xe_vm *vm)
+{
+ xe_assert(vm->xe, xe_vm_is_closed(vm));
+ flush_work(&vm->svm.garbage_collector.work);
+}
+
+/**
+ * xe_svm_fini() - SVM finalize
+ * @vm: The VM.
+ *
+ * Finalize SVM state which is embedded within the VM.
+ */
+void xe_svm_fini(struct xe_vm *vm)
+{
+ xe_assert(vm->xe, xe_vm_is_closed(vm));
+
+ drm_gpusvm_fini(&vm->svm.gpusvm);
+}
+
+static bool xe_svm_range_is_valid(struct xe_svm_range *range,
+ struct xe_tile *tile)
+{
+ return (range->tile_present & ~range->tile_invalidated) & BIT(tile->id);
+}
+
+static struct xe_vram_region *tile_to_vr(struct xe_tile *tile)
+{
+ return &tile->mem.vram;
+}
+
+static int xe_svm_alloc_vram(struct xe_vm *vm, struct xe_tile *tile,
+ struct xe_svm_range *range,
+ const struct drm_gpusvm_ctx *ctx)
+{
+ struct mm_struct *mm = vm->svm.gpusvm.mm;
+ struct xe_vram_region *vr = tile_to_vr(tile);
+ struct drm_buddy_block *block;
+ struct list_head *blocks;
+ struct xe_bo *bo;
+ ktime_t end = 0;
+ int err;
+
+ range_debug(range, "ALLOCATE VRAM");
+
+ if (!mmget_not_zero(mm))
+ return -EFAULT;
+ mmap_read_lock(mm);
+
+retry:
+ bo = xe_bo_create_locked(tile_to_xe(tile), NULL, NULL,
+ xe_svm_range_size(range),
+ ttm_bo_type_device,
+ XE_BO_FLAG_VRAM_IF_DGFX(tile) |
+ XE_BO_FLAG_CPU_ADDR_MIRROR);
+ if (IS_ERR(bo)) {
+ err = PTR_ERR(bo);
+ if (xe_vm_validate_should_retry(NULL, err, &end))
+ goto retry;
+ goto unlock;
+ }
+
+ drm_gpusvm_devmem_init(&bo->devmem_allocation,
+ vm->xe->drm.dev, mm,
+ &gpusvm_devmem_ops,
+ &tile->mem.vram.dpagemap,
+ xe_svm_range_size(range));
+
+ blocks = &to_xe_ttm_vram_mgr_resource(bo->ttm.resource)->blocks;
+ list_for_each_entry(block, blocks, link)
+ block->private = vr;
+
+ err = drm_gpusvm_migrate_to_devmem(&vm->svm.gpusvm, &range->base,
+ &bo->devmem_allocation, ctx);
+ xe_bo_unlock(bo);
+ if (err)
+ xe_bo_put(bo); /* Creation ref */
+
+unlock:
+ mmap_read_unlock(mm);
+ mmput(mm);
+
+ return err;
+}
+
+/**
+ * xe_svm_handle_pagefault() - SVM handle page fault
+ * @vm: The VM.
+ * @vma: The CPU address mirror VMA.
+ * @tile: The tile upon the fault occurred.
+ * @fault_addr: The GPU fault address.
+ * @atomic: The fault atomic access bit.
+ *
+ * Create GPU bindings for a SVM page fault. Optionally migrate to device
+ * memory.
+ *
+ * Return: 0 on success, negative error code on error.
+ */
+int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
+ struct xe_tile *tile, u64 fault_addr,
+ bool atomic)
+{
+ struct drm_gpusvm_ctx ctx = {
+ .read_only = xe_vma_read_only(vma),
+ .devmem_possible = IS_DGFX(vm->xe) &&
+ IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR),
+ .check_pages_threshold = IS_DGFX(vm->xe) &&
+ IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR) ? SZ_64K : 0,
+ };
+ struct xe_svm_range *range;
+ struct drm_gpusvm_range *r;
+ struct drm_exec exec;
+ struct dma_fence *fence;
+ ktime_t end = 0;
+ int err;
+
+ lockdep_assert_held_write(&vm->lock);
+ xe_assert(vm->xe, xe_vma_is_cpu_addr_mirror(vma));
+
+retry:
+ /* Always process UNMAPs first so view SVM ranges is current */
+ err = xe_svm_garbage_collector(vm);
+ if (err)
+ return err;
+
+ r = drm_gpusvm_range_find_or_insert(&vm->svm.gpusvm, fault_addr,
+ xe_vma_start(vma), xe_vma_end(vma),
+ &ctx);
+ if (IS_ERR(r))
+ return PTR_ERR(r);
+
+ range = to_xe_range(r);
+ if (xe_svm_range_is_valid(range, tile))
+ return 0;
+
+ range_debug(range, "PAGE FAULT");
+
+ /* XXX: Add migration policy, for now migrate range once */
+ if (!range->skip_migrate && range->base.flags.migrate_devmem &&
+ xe_svm_range_size(range) >= SZ_64K) {
+ range->skip_migrate = true;
+
+ err = xe_svm_alloc_vram(vm, tile, range, &ctx);
+ if (err) {
+ drm_dbg(&vm->xe->drm,
+ "VRAM allocation failed, falling back to "
+ "retrying fault, asid=%u, errno=%pe\n",
+ vm->usm.asid, ERR_PTR(err));
+ goto retry;
+ }
+ }
+
+ range_debug(range, "GET PAGES");
+ err = drm_gpusvm_range_get_pages(&vm->svm.gpusvm, r, &ctx);
+ /* Corner where CPU mappings have changed */
+ if (err == -EOPNOTSUPP || err == -EFAULT || err == -EPERM) {
+ if (err == -EOPNOTSUPP) {
+ range_debug(range, "PAGE FAULT - EVICT PAGES");
+ drm_gpusvm_range_evict(&vm->svm.gpusvm, &range->base);
+ }
+ drm_dbg(&vm->xe->drm,
+ "Get pages failed, falling back to retrying, asid=%u, gpusvm=%p, errno=%pe\n",
+ vm->usm.asid, &vm->svm.gpusvm, ERR_PTR(err));
+ range_debug(range, "PAGE FAULT - RETRY PAGES");
+ goto retry;
+ }
+ if (err) {
+ range_debug(range, "PAGE FAULT - FAIL PAGE COLLECT");
+ goto err_out;
+ }
+
+ range_debug(range, "PAGE FAULT - BIND");
+
+retry_bind:
+ drm_exec_init(&exec, 0, 0);
+ drm_exec_until_all_locked(&exec) {
+ err = drm_exec_lock_obj(&exec, vm->gpuvm.r_obj);
+ drm_exec_retry_on_contention(&exec);
+ if (err) {
+ drm_exec_fini(&exec);
+ goto err_out;
+ }
+
+ fence = xe_vm_range_rebind(vm, vma, range, BIT(tile->id));
+ if (IS_ERR(fence)) {
+ drm_exec_fini(&exec);
+ err = PTR_ERR(fence);
+ if (err == -EAGAIN) {
+ range_debug(range, "PAGE FAULT - RETRY BIND");
+ goto retry;
+ }
+ if (xe_vm_validate_should_retry(&exec, err, &end))
+ goto retry_bind;
+ goto err_out;
+ }
+ }
+ drm_exec_fini(&exec);
+
+ if (xe_modparam.always_migrate_to_vram)
+ range->skip_migrate = false;
+
+ dma_fence_wait(fence, false);
+ dma_fence_put(fence);
+
+err_out:
+
+ return err;
+}
+
+/**
+ * xe_svm_has_mapping() - SVM has mappings
+ * @vm: The VM.
+ * @start: Start address.
+ * @end: End address.
+ *
+ * Check if an address range has SVM mappings.
+ *
+ * Return: True if address range has a SVM mapping, False otherwise
+ */
+bool xe_svm_has_mapping(struct xe_vm *vm, u64 start, u64 end)
+{
+ return drm_gpusvm_has_mapping(&vm->svm.gpusvm, start, end);
+}
+
+/**
+ * xe_svm_bo_evict() - SVM evict BO to system memory
+ * @bo: BO to evict
+ *
+ * SVM evict BO to system memory. GPU SVM layer ensures all device pages
+ * are evicted before returning.
+ *
+ * Return: 0 on success standard error code otherwise
+ */
+int xe_svm_bo_evict(struct xe_bo *bo)
+{
+ return drm_gpusvm_evict_to_ram(&bo->devmem_allocation);
+}
+
+#if IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR)
+static struct drm_pagemap_device_addr
+xe_drm_pagemap_device_map(struct drm_pagemap *dpagemap,
+ struct device *dev,
+ struct page *page,
+ unsigned int order,
+ enum dma_data_direction dir)
+{
+ struct device *pgmap_dev = dpagemap->dev;
+ enum drm_interconnect_protocol prot;
+ dma_addr_t addr;
+
+ if (pgmap_dev == dev) {
+ addr = xe_vram_region_page_to_dpa(page_to_vr(page), page);
+ prot = XE_INTERCONNECT_VRAM;
+ } else {
+ addr = DMA_MAPPING_ERROR;
+ prot = 0;
+ }
+
+ return drm_pagemap_device_addr_encode(addr, prot, order, dir);
+}
+
+static const struct drm_pagemap_ops xe_drm_pagemap_ops = {
+ .device_map = xe_drm_pagemap_device_map,
+};
+
+/**
+ * xe_devm_add: Remap and provide memmap backing for device memory
+ * @tile: tile that the memory region belongs to
+ * @vr: vram memory region to remap
+ *
+ * This remap device memory to host physical address space and create
+ * struct page to back device memory
+ *
+ * Return: 0 on success standard error code otherwise
+ */
+int xe_devm_add(struct xe_tile *tile, struct xe_vram_region *vr)
+{
+ struct xe_device *xe = tile_to_xe(tile);
+ struct device *dev = &to_pci_dev(xe->drm.dev)->dev;
+ struct resource *res;
+ void *addr;
+ int ret;
+
+ res = devm_request_free_mem_region(dev, &iomem_resource,
+ vr->usable_size);
+ if (IS_ERR(res)) {
+ ret = PTR_ERR(res);
+ return ret;
+ }
+
+ vr->pagemap.type = MEMORY_DEVICE_PRIVATE;
+ vr->pagemap.range.start = res->start;
+ vr->pagemap.range.end = res->end;
+ vr->pagemap.nr_range = 1;
+ vr->pagemap.ops = drm_gpusvm_pagemap_ops_get();
+ vr->pagemap.owner = xe_svm_devm_owner(xe);
+ addr = devm_memremap_pages(dev, &vr->pagemap);
+
+ vr->dpagemap.dev = dev;
+ vr->dpagemap.ops = &xe_drm_pagemap_ops;
+
+ if (IS_ERR(addr)) {
+ devm_release_mem_region(dev, res->start, resource_size(res));
+ ret = PTR_ERR(addr);
+ drm_err(&xe->drm, "Failed to remap tile %d memory, errno %pe\n",
+ tile->id, ERR_PTR(ret));
+ return ret;
+ }
+ vr->hpa_base = res->start;
+
+ drm_dbg(&xe->drm, "Added tile %d memory [%llx-%llx] to devm, remapped to %pr\n",
+ tile->id, vr->io_start, vr->io_start + vr->usable_size, res);
+ return 0;
+}
+#else
+int xe_devm_add(struct xe_tile *tile, struct xe_vram_region *vr)
+{
+ return 0;
+}
+#endif
diff --git a/drivers/gpu/drm/xe/xe_svm.h b/drivers/gpu/drm/xe/xe_svm.h
new file mode 100644
index 000000000000..e059590e5076
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_svm.h
@@ -0,0 +1,150 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+#ifndef _XE_SVM_H_
+#define _XE_SVM_H_
+
+#include <drm/drm_pagemap.h>
+#include <drm/drm_gpusvm.h>
+
+#define XE_INTERCONNECT_VRAM DRM_INTERCONNECT_DRIVER
+
+struct xe_bo;
+struct xe_vram_region;
+struct xe_tile;
+struct xe_vm;
+struct xe_vma;
+
+/** struct xe_svm_range - SVM range */
+struct xe_svm_range {
+ /** @base: base drm_gpusvm_range */
+ struct drm_gpusvm_range base;
+ /**
+ * @garbage_collector_link: Link into VM's garbage collect SVM range
+ * list. Protected by VM's garbage collect lock.
+ */
+ struct list_head garbage_collector_link;
+ /**
+ * @tile_present: Tile mask of binding is present for this range.
+ * Protected by GPU SVM notifier lock.
+ */
+ u8 tile_present;
+ /**
+ * @tile_invalidated: Tile mask of binding is invalidated for this
+ * range. Protected by GPU SVM notifier lock.
+ */
+ u8 tile_invalidated;
+ /**
+ * @skip_migrate: Skip migration to VRAM, protected by GPU fault handler
+ * locking.
+ */
+ u8 skip_migrate :1;
+};
+
+#if IS_ENABLED(CONFIG_DRM_GPUSVM)
+/**
+ * xe_svm_range_pages_valid() - SVM range pages valid
+ * @range: SVM range
+ *
+ * Return: True if SVM range pages are valid, False otherwise
+ */
+static inline bool xe_svm_range_pages_valid(struct xe_svm_range *range)
+{
+ return drm_gpusvm_range_pages_valid(range->base.gpusvm, &range->base);
+}
+
+int xe_devm_add(struct xe_tile *tile, struct xe_vram_region *vr);
+
+int xe_svm_init(struct xe_vm *vm);
+
+void xe_svm_fini(struct xe_vm *vm);
+
+void xe_svm_close(struct xe_vm *vm);
+
+int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
+ struct xe_tile *tile, u64 fault_addr,
+ bool atomic);
+
+bool xe_svm_has_mapping(struct xe_vm *vm, u64 start, u64 end);
+
+int xe_svm_bo_evict(struct xe_bo *bo);
+
+void xe_svm_range_debug(struct xe_svm_range *range, const char *operation);
+#else
+static inline bool xe_svm_range_pages_valid(struct xe_svm_range *range)
+{
+ return false;
+}
+
+static inline
+int xe_devm_add(struct xe_tile *tile, struct xe_vram_region *vr)
+{
+ return 0;
+}
+
+static inline
+int xe_svm_init(struct xe_vm *vm)
+{
+ return 0;
+}
+
+static inline
+void xe_svm_fini(struct xe_vm *vm)
+{
+}
+
+static inline
+void xe_svm_close(struct xe_vm *vm)
+{
+}
+
+static inline
+int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
+ struct xe_tile *tile, u64 fault_addr,
+ bool atomic)
+{
+ return 0;
+}
+
+static inline
+bool xe_svm_has_mapping(struct xe_vm *vm, u64 start, u64 end)
+{
+ return false;
+}
+
+static inline
+int xe_svm_bo_evict(struct xe_bo *bo)
+{
+ return 0;
+}
+
+static inline
+void xe_svm_range_debug(struct xe_svm_range *range, const char *operation)
+{
+}
+#endif
+
+/**
+ * xe_svm_range_has_dma_mapping() - SVM range has DMA mapping
+ * @range: SVM range
+ *
+ * Return: True if SVM range has a DMA mapping, False otherwise
+ */
+static inline bool xe_svm_range_has_dma_mapping(struct xe_svm_range *range)
+{
+ lockdep_assert_held(&range->base.gpusvm->notifier_lock);
+ return range->base.flags.has_dma_mapping;
+}
+
+#define xe_svm_assert_in_notifier(vm__) \
+ lockdep_assert_held_write(&(vm__)->svm.gpusvm.notifier_lock)
+
+#define xe_svm_notifier_lock(vm__) \
+ drm_gpusvm_notifier_lock(&(vm__)->svm.gpusvm)
+
+#define xe_svm_notifier_unlock(vm__) \
+ drm_gpusvm_notifier_unlock(&(vm__)->svm.gpusvm)
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_tile.c b/drivers/gpu/drm/xe/xe_tile.c
index d29658ff4dd4..0771acbbf367 100644
--- a/drivers/gpu/drm/xe/xe_tile.c
+++ b/drivers/gpu/drm/xe/xe_tile.c
@@ -13,6 +13,7 @@
#include "xe_migrate.h"
#include "xe_pcode.h"
#include "xe_sa.h"
+#include "xe_svm.h"
#include "xe_tile.h"
#include "xe_tile_sysfs.h"
#include "xe_ttm_vram_mgr.h"
@@ -160,6 +161,7 @@ static int tile_ttm_mgr_init(struct xe_tile *tile)
*/
int xe_tile_init_noalloc(struct xe_tile *tile)
{
+ struct xe_device *xe = tile_to_xe(tile);
int err;
err = tile_ttm_mgr_init(tile);
@@ -168,6 +170,9 @@ int xe_tile_init_noalloc(struct xe_tile *tile)
xe_wa_apply_tile_workarounds(tile);
+ if (xe->info.has_usm && IS_DGFX(xe))
+ xe_devm_add(tile, &tile->mem.vram);
+
return xe_tile_sysfs_init(tile);
}
diff --git a/drivers/gpu/drm/xe/xe_trace.h b/drivers/gpu/drm/xe/xe_trace.h
index d5281de04d54..b4a3577df70c 100644
--- a/drivers/gpu/drm/xe/xe_trace.h
+++ b/drivers/gpu/drm/xe/xe_trace.h
@@ -427,6 +427,36 @@ DEFINE_EVENT(xe_pm_runtime, xe_pm_runtime_get_ioctl,
TP_ARGS(xe, caller)
);
+TRACE_EVENT(xe_eu_stall_data_read,
+ TP_PROTO(u8 slice, u8 subslice,
+ u32 read_ptr, u32 write_ptr,
+ size_t read_size, size_t total_size),
+ TP_ARGS(slice, subslice,
+ read_ptr, write_ptr,
+ read_size, total_size),
+
+ TP_STRUCT__entry(__field(u8, slice)
+ __field(u8, subslice)
+ __field(u32, read_ptr)
+ __field(u32, write_ptr)
+ __field(size_t, read_size)
+ __field(size_t, total_size)
+ ),
+
+ TP_fast_assign(__entry->slice = slice;
+ __entry->subslice = subslice;
+ __entry->read_ptr = read_ptr;
+ __entry->write_ptr = write_ptr;
+ __entry->read_size = read_size;
+ __entry->total_size = total_size;
+ ),
+
+ TP_printk("slice: %u subslice: %u read ptr: 0x%x write ptr: 0x%x read size: %zu total read size: %zu",
+ __entry->slice, __entry->subslice,
+ __entry->read_ptr, __entry->write_ptr,
+ __entry->read_size, __entry->total_size)
+);
+
#endif
/* This part must be outside protection */
diff --git a/drivers/gpu/drm/xe/xe_trace_guc.h b/drivers/gpu/drm/xe/xe_trace_guc.h
index 23abdd55dc62..78949db9cfce 100644
--- a/drivers/gpu/drm/xe/xe_trace_guc.h
+++ b/drivers/gpu/drm/xe/xe_trace_guc.h
@@ -14,6 +14,7 @@
#include "xe_device_types.h"
#include "xe_guc_exec_queue_types.h"
+#include "xe_guc_engine_activity_types.h"
#define __dev_name_xe(xe) dev_name((xe)->drm.dev)
@@ -100,6 +101,54 @@ DEFINE_EVENT_PRINT(xe_guc_ctb, xe_guc_ctb_g2h,
);
+TRACE_EVENT(xe_guc_engine_activity,
+ TP_PROTO(struct xe_device *xe, struct engine_activity *ea, const char *name,
+ u16 instance),
+ TP_ARGS(xe, ea, name, instance),
+
+ TP_STRUCT__entry(
+ __string(dev, __dev_name_xe(xe))
+ __string(name, name)
+ __field(u32, global_change_num)
+ __field(u32, guc_tsc_frequency_hz)
+ __field(u32, lag_latency_usec)
+ __field(u16, instance)
+ __field(u16, change_num)
+ __field(u16, quanta_ratio)
+ __field(u32, last_update_tick)
+ __field(u64, active_ticks)
+ __field(u64, active)
+ __field(u64, total)
+ __field(u64, quanta)
+ __field(u64, last_cpu_ts)
+ ),
+
+ TP_fast_assign(
+ __assign_str(dev);
+ __assign_str(name);
+ __entry->global_change_num = ea->metadata.global_change_num;
+ __entry->guc_tsc_frequency_hz = ea->metadata.guc_tsc_frequency_hz;
+ __entry->lag_latency_usec = ea->metadata.lag_latency_usec;
+ __entry->instance = instance;
+ __entry->change_num = ea->activity.change_num;
+ __entry->quanta_ratio = ea->activity.quanta_ratio;
+ __entry->last_update_tick = ea->activity.last_update_tick;
+ __entry->active_ticks = ea->activity.active_ticks;
+ __entry->active = ea->active;
+ __entry->total = ea->total;
+ __entry->quanta = ea->quanta;
+ __entry->last_cpu_ts = ea->last_cpu_ts;
+ ),
+
+ TP_printk("dev=%s engine %s:%d Active=%llu, quanta=%llu, last_cpu_ts=%llu\n"
+ "Activity metadata: global_change_num=%u, guc_tsc_frequency_hz=%u lag_latency_usec=%u\n"
+ "Activity data: change_num=%u, quanta_ratio=0x%x, last_update_tick=%u, active_ticks=%llu\n",
+ __get_str(dev), __get_str(name), __entry->instance,
+ (__entry->active + __entry->total), __entry->quanta, __entry->last_cpu_ts,
+ __entry->global_change_num, __entry->guc_tsc_frequency_hz,
+ __entry->lag_latency_usec, __entry->change_num, __entry->quanta_ratio,
+ __entry->last_update_tick, __entry->active_ticks)
+);
#endif
/* This part must be outside protection */
diff --git a/drivers/gpu/drm/xe/xe_tuning.c b/drivers/gpu/drm/xe/xe_tuning.c
index d449de0fb6ec..49ddbda7cdef 100644
--- a/drivers/gpu/drm/xe/xe_tuning.c
+++ b/drivers/gpu/drm/xe/xe_tuning.c
@@ -7,6 +7,8 @@
#include <kunit/visibility.h>
+#include <drm/drm_managed.h>
+
#include "regs/xe_gt_regs.h"
#include "xe_gt_types.h"
#include "xe_platform_types.h"
@@ -83,28 +85,22 @@ static const struct xe_rtp_entry_sr gt_tunings[] = {
XE_RTP_RULES(MEDIA_VERSION(2000)),
XE_RTP_ACTIONS(SET(XE2LPM_SCRATCH3_LBCF, RWFLUSHALLEN))
},
-
- {}
};
static const struct xe_rtp_entry_sr engine_tunings[] = {
+ { XE_RTP_NAME("Tuning: L3 Hashing Mask"),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(1200, 1210),
+ FUNC(xe_rtp_match_first_render_or_compute)),
+ XE_RTP_ACTIONS(CLR(XELP_GARBCNTL, XELP_BUS_HASH_CTL_BIT_EXC))
+ },
{ XE_RTP_NAME("Tuning: Set Indirect State Override"),
XE_RTP_RULES(GRAPHICS_VERSION_RANGE(1200, 1274),
ENGINE_CLASS(RENDER)),
XE_RTP_ACTIONS(SET(SAMPLER_MODE, INDIRECT_STATE_BASE_ADDR_OVERRIDE))
},
- {}
};
static const struct xe_rtp_entry_sr lrc_tunings[] = {
- { XE_RTP_NAME("Tuning: ganged timer, also known as 16011163337"),
- XE_RTP_RULES(GRAPHICS_VERSION_RANGE(1200, 1210), ENGINE_CLASS(RENDER)),
- /* read verification is ignored due to 1608008084. */
- XE_RTP_ACTIONS(FIELD_SET_NO_READ_MASK(FF_MODE2,
- FF_MODE2_GS_TIMER_MASK,
- FF_MODE2_GS_TIMER_224))
- },
-
/* DG2 */
{ XE_RTP_NAME("Tuning: L3 cache"),
@@ -139,15 +135,47 @@ static const struct xe_rtp_entry_sr lrc_tunings[] = {
XE_RTP_ACTIONS(FIELD_SET(FF_MODE, VS_HIT_MAX_VALUE_MASK,
REG_FIELD_PREP(VS_HIT_MAX_VALUE_MASK, 0x3f)))
},
-
- {}
};
+/**
+ * xe_tuning_init - initialize gt with tunings bookkeeping
+ * @gt: GT instance to initialize
+ *
+ * Returns 0 for success, negative error code otherwise.
+ */
+int xe_tuning_init(struct xe_gt *gt)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+ size_t n_lrc, n_engine, n_gt, total;
+ unsigned long *p;
+
+ n_gt = BITS_TO_LONGS(ARRAY_SIZE(gt_tunings));
+ n_engine = BITS_TO_LONGS(ARRAY_SIZE(engine_tunings));
+ n_lrc = BITS_TO_LONGS(ARRAY_SIZE(lrc_tunings));
+ total = n_gt + n_engine + n_lrc;
+
+ p = drmm_kzalloc(&xe->drm, sizeof(*p) * total, GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
+
+ gt->tuning_active.gt = p;
+ p += n_gt;
+ gt->tuning_active.engine = p;
+ p += n_engine;
+ gt->tuning_active.lrc = p;
+
+ return 0;
+}
+ALLOW_ERROR_INJECTION(xe_tuning_init, ERRNO); /* See xe_pci_probe() */
+
void xe_tuning_process_gt(struct xe_gt *gt)
{
struct xe_rtp_process_ctx ctx = XE_RTP_PROCESS_CTX_INITIALIZER(gt);
- xe_rtp_process_to_sr(&ctx, gt_tunings, &gt->reg_sr);
+ xe_rtp_process_ctx_enable_active_tracking(&ctx,
+ gt->tuning_active.gt,
+ ARRAY_SIZE(gt_tunings));
+ xe_rtp_process_to_sr(&ctx, gt_tunings, ARRAY_SIZE(gt_tunings), &gt->reg_sr);
}
EXPORT_SYMBOL_IF_KUNIT(xe_tuning_process_gt);
@@ -155,7 +183,11 @@ void xe_tuning_process_engine(struct xe_hw_engine *hwe)
{
struct xe_rtp_process_ctx ctx = XE_RTP_PROCESS_CTX_INITIALIZER(hwe);
- xe_rtp_process_to_sr(&ctx, engine_tunings, &hwe->reg_sr);
+ xe_rtp_process_ctx_enable_active_tracking(&ctx,
+ hwe->gt->tuning_active.engine,
+ ARRAY_SIZE(engine_tunings));
+ xe_rtp_process_to_sr(&ctx, engine_tunings, ARRAY_SIZE(engine_tunings),
+ &hwe->reg_sr);
}
EXPORT_SYMBOL_IF_KUNIT(xe_tuning_process_engine);
@@ -171,5 +203,25 @@ void xe_tuning_process_lrc(struct xe_hw_engine *hwe)
{
struct xe_rtp_process_ctx ctx = XE_RTP_PROCESS_CTX_INITIALIZER(hwe);
- xe_rtp_process_to_sr(&ctx, lrc_tunings, &hwe->reg_lrc);
+ xe_rtp_process_ctx_enable_active_tracking(&ctx,
+ hwe->gt->tuning_active.lrc,
+ ARRAY_SIZE(lrc_tunings));
+ xe_rtp_process_to_sr(&ctx, lrc_tunings, ARRAY_SIZE(lrc_tunings), &hwe->reg_lrc);
+}
+
+void xe_tuning_dump(struct xe_gt *gt, struct drm_printer *p)
+{
+ size_t idx;
+
+ drm_printf(p, "GT Tunings\n");
+ for_each_set_bit(idx, gt->tuning_active.gt, ARRAY_SIZE(gt_tunings))
+ drm_printf_indent(p, 1, "%s\n", gt_tunings[idx].name);
+
+ drm_printf(p, "\nEngine Tunings\n");
+ for_each_set_bit(idx, gt->tuning_active.engine, ARRAY_SIZE(engine_tunings))
+ drm_printf_indent(p, 1, "%s\n", engine_tunings[idx].name);
+
+ drm_printf(p, "\nLRC Tunings\n");
+ for_each_set_bit(idx, gt->tuning_active.lrc, ARRAY_SIZE(lrc_tunings))
+ drm_printf_indent(p, 1, "%s\n", lrc_tunings[idx].name);
}
diff --git a/drivers/gpu/drm/xe/xe_tuning.h b/drivers/gpu/drm/xe/xe_tuning.h
index 4f9c3ac3b516..dd0d3ccc9c65 100644
--- a/drivers/gpu/drm/xe/xe_tuning.h
+++ b/drivers/gpu/drm/xe/xe_tuning.h
@@ -6,11 +6,14 @@
#ifndef _XE_TUNING_
#define _XE_TUNING_
+struct drm_printer;
struct xe_gt;
struct xe_hw_engine;
+int xe_tuning_init(struct xe_gt *gt);
void xe_tuning_process_gt(struct xe_gt *gt);
void xe_tuning_process_engine(struct xe_hw_engine *hwe);
void xe_tuning_process_lrc(struct xe_hw_engine *hwe);
+void xe_tuning_dump(struct xe_gt *gt, struct drm_printer *p);
#endif
diff --git a/drivers/gpu/drm/xe/xe_uc.c b/drivers/gpu/drm/xe/xe_uc.c
index d8167e818280..c14bd2282044 100644
--- a/drivers/gpu/drm/xe/xe_uc.c
+++ b/drivers/gpu/drm/xe/xe_uc.c
@@ -14,6 +14,7 @@
#include "xe_gt_sriov_vf.h"
#include "xe_guc.h"
#include "xe_guc_pc.h"
+#include "xe_guc_engine_activity.h"
#include "xe_huc.h"
#include "xe_sriov.h"
#include "xe_uc_fw.h"
@@ -210,6 +211,8 @@ int xe_uc_init_hw(struct xe_uc *uc)
if (ret)
return ret;
+ xe_guc_engine_activity_enable_stats(&uc->guc);
+
/* We don't fail the driver load if HuC fails to auth, but let's warn */
ret = xe_huc_auth(&uc->huc, XE_HUC_AUTH_VIA_GUC);
xe_gt_assert(uc_to_gt(uc), !ret);
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index d664f2e418b2..22a26aff3a6e 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -8,6 +8,7 @@
#include <linux/dma-fence-array.h>
#include <linux/nospec.h>
+#include <drm/drm_drv.h>
#include <drm/drm_exec.h>
#include <drm/drm_print.h>
#include <drm/ttm/ttm_tt.h>
@@ -35,6 +36,7 @@
#include "xe_pt.h"
#include "xe_pxp.h"
#include "xe_res_cursor.h"
+#include "xe_svm.h"
#include "xe_sync.h"
#include "xe_trace_bo.h"
#include "xe_wa.h"
@@ -270,6 +272,7 @@ out_up_write:
return err;
}
+ALLOW_ERROR_INJECTION(xe_vm_add_compute_exec_queue, ERRNO);
/**
* xe_vm_remove_compute_exec_queue() - Remove compute exec queue from VM
@@ -580,51 +583,26 @@ out_unlock_outer:
trace_xe_vm_rebind_worker_exit(vm);
}
-static bool vma_userptr_invalidate(struct mmu_interval_notifier *mni,
- const struct mmu_notifier_range *range,
- unsigned long cur_seq)
+static void __vma_userptr_invalidate(struct xe_vm *vm, struct xe_userptr_vma *uvma)
{
- struct xe_userptr *userptr = container_of(mni, typeof(*userptr), notifier);
- struct xe_userptr_vma *uvma = container_of(userptr, typeof(*uvma), userptr);
+ struct xe_userptr *userptr = &uvma->userptr;
struct xe_vma *vma = &uvma->vma;
- struct xe_vm *vm = xe_vma_vm(vma);
struct dma_resv_iter cursor;
struct dma_fence *fence;
long err;
- xe_assert(vm->xe, xe_vma_is_userptr(vma));
- trace_xe_vma_userptr_invalidate(vma);
-
- if (!mmu_notifier_range_blockable(range))
- return false;
-
- vm_dbg(&xe_vma_vm(vma)->xe->drm,
- "NOTIFIER: addr=0x%016llx, range=0x%016llx",
- xe_vma_start(vma), xe_vma_size(vma));
-
- down_write(&vm->userptr.notifier_lock);
- mmu_interval_set_seq(mni, cur_seq);
-
- /* No need to stop gpu access if the userptr is not yet bound. */
- if (!userptr->initial_bind) {
- up_write(&vm->userptr.notifier_lock);
- return true;
- }
-
/*
* Tell exec and rebind worker they need to repin and rebind this
* userptr.
*/
if (!xe_vm_in_fault_mode(vm) &&
- !(vma->gpuva.flags & XE_VMA_DESTROYED) && vma->tile_present) {
+ !(vma->gpuva.flags & XE_VMA_DESTROYED)) {
spin_lock(&vm->userptr.invalidated_lock);
list_move_tail(&userptr->invalidate_link,
&vm->userptr.invalidated);
spin_unlock(&vm->userptr.invalidated_lock);
}
- up_write(&vm->userptr.notifier_lock);
-
/*
* Preempt fences turn into schedule disables, pipeline these.
* Note that even in fault mode, we need to wait for binds and
@@ -642,11 +620,37 @@ static bool vma_userptr_invalidate(struct mmu_interval_notifier *mni,
false, MAX_SCHEDULE_TIMEOUT);
XE_WARN_ON(err <= 0);
- if (xe_vm_in_fault_mode(vm)) {
+ if (xe_vm_in_fault_mode(vm) && userptr->initial_bind) {
err = xe_vm_invalidate_vma(vma);
XE_WARN_ON(err);
}
+ xe_hmm_userptr_unmap(uvma);
+}
+
+static bool vma_userptr_invalidate(struct mmu_interval_notifier *mni,
+ const struct mmu_notifier_range *range,
+ unsigned long cur_seq)
+{
+ struct xe_userptr_vma *uvma = container_of(mni, typeof(*uvma), userptr.notifier);
+ struct xe_vma *vma = &uvma->vma;
+ struct xe_vm *vm = xe_vma_vm(vma);
+
+ xe_assert(vm->xe, xe_vma_is_userptr(vma));
+ trace_xe_vma_userptr_invalidate(vma);
+
+ if (!mmu_notifier_range_blockable(range))
+ return false;
+
+ vm_dbg(&xe_vma_vm(vma)->xe->drm,
+ "NOTIFIER: addr=0x%016llx, range=0x%016llx",
+ xe_vma_start(vma), xe_vma_size(vma));
+
+ down_write(&vm->userptr.notifier_lock);
+ mmu_interval_set_seq(mni, cur_seq);
+
+ __vma_userptr_invalidate(vm, uvma);
+ up_write(&vm->userptr.notifier_lock);
trace_xe_vma_userptr_invalidate_complete(vma);
return true;
@@ -656,31 +660,71 @@ static const struct mmu_interval_notifier_ops vma_userptr_notifier_ops = {
.invalidate = vma_userptr_invalidate,
};
+#if IS_ENABLED(CONFIG_DRM_XE_USERPTR_INVAL_INJECT)
+/**
+ * xe_vma_userptr_force_invalidate() - force invalidate a userptr
+ * @uvma: The userptr vma to invalidate
+ *
+ * Perform a forced userptr invalidation for testing purposes.
+ */
+void xe_vma_userptr_force_invalidate(struct xe_userptr_vma *uvma)
+{
+ struct xe_vm *vm = xe_vma_vm(&uvma->vma);
+
+ /* Protect against concurrent userptr pinning */
+ lockdep_assert_held(&vm->lock);
+ /* Protect against concurrent notifiers */
+ lockdep_assert_held(&vm->userptr.notifier_lock);
+ /*
+ * Protect against concurrent instances of this function and
+ * the critical exec sections
+ */
+ xe_vm_assert_held(vm);
+
+ if (!mmu_interval_read_retry(&uvma->userptr.notifier,
+ uvma->userptr.notifier_seq))
+ uvma->userptr.notifier_seq -= 2;
+ __vma_userptr_invalidate(vm, uvma);
+}
+#endif
+
int xe_vm_userptr_pin(struct xe_vm *vm)
{
struct xe_userptr_vma *uvma, *next;
int err = 0;
- LIST_HEAD(tmp_evict);
xe_assert(vm->xe, !xe_vm_in_fault_mode(vm));
lockdep_assert_held_write(&vm->lock);
/* Collect invalidated userptrs */
spin_lock(&vm->userptr.invalidated_lock);
+ xe_assert(vm->xe, list_empty(&vm->userptr.repin_list));
list_for_each_entry_safe(uvma, next, &vm->userptr.invalidated,
userptr.invalidate_link) {
list_del_init(&uvma->userptr.invalidate_link);
- list_move_tail(&uvma->userptr.repin_link,
- &vm->userptr.repin_list);
+ list_add_tail(&uvma->userptr.repin_link,
+ &vm->userptr.repin_list);
}
spin_unlock(&vm->userptr.invalidated_lock);
- /* Pin and move to temporary list */
+ /* Pin and move to bind list */
list_for_each_entry_safe(uvma, next, &vm->userptr.repin_list,
userptr.repin_link) {
err = xe_vma_userptr_pin_pages(uvma);
if (err == -EFAULT) {
list_del_init(&uvma->userptr.repin_link);
+ /*
+ * We might have already done the pin once already, but
+ * then had to retry before the re-bind happened, due
+ * some other condition in the caller, but in the
+ * meantime the userptr got dinged by the notifier such
+ * that we need to revalidate here, but this time we hit
+ * the EFAULT. In such a case make sure we remove
+ * ourselves from the rebind list to avoid going down in
+ * flames.
+ */
+ if (!list_empty(&uvma->vma.combined_links.rebind))
+ list_del_init(&uvma->vma.combined_links.rebind);
/* Wait for pending binds */
xe_vm_lock(vm, false);
@@ -691,10 +735,10 @@ int xe_vm_userptr_pin(struct xe_vm *vm)
err = xe_vm_invalidate_vma(&uvma->vma);
xe_vm_unlock(vm);
if (err)
- return err;
+ break;
} else {
- if (err < 0)
- return err;
+ if (err)
+ break;
list_del_init(&uvma->userptr.repin_link);
list_move_tail(&uvma->vma.combined_links.rebind,
@@ -702,7 +746,19 @@ int xe_vm_userptr_pin(struct xe_vm *vm)
}
}
- return 0;
+ if (err) {
+ down_write(&vm->userptr.notifier_lock);
+ spin_lock(&vm->userptr.invalidated_lock);
+ list_for_each_entry_safe(uvma, next, &vm->userptr.repin_list,
+ userptr.repin_link) {
+ list_del_init(&uvma->userptr.repin_link);
+ list_move_tail(&uvma->userptr.invalidate_link,
+ &vm->userptr.invalidated);
+ }
+ spin_unlock(&vm->userptr.invalidated_lock);
+ up_write(&vm->userptr.notifier_lock);
+ }
+ return err;
}
/**
@@ -894,6 +950,179 @@ free_ops:
return fence;
}
+static void xe_vm_populate_range_rebind(struct xe_vma_op *op,
+ struct xe_vma *vma,
+ struct xe_svm_range *range,
+ u8 tile_mask)
+{
+ INIT_LIST_HEAD(&op->link);
+ op->tile_mask = tile_mask;
+ op->base.op = DRM_GPUVA_OP_DRIVER;
+ op->subop = XE_VMA_SUBOP_MAP_RANGE;
+ op->map_range.vma = vma;
+ op->map_range.range = range;
+}
+
+static int
+xe_vm_ops_add_range_rebind(struct xe_vma_ops *vops,
+ struct xe_vma *vma,
+ struct xe_svm_range *range,
+ u8 tile_mask)
+{
+ struct xe_vma_op *op;
+
+ op = kzalloc(sizeof(*op), GFP_KERNEL);
+ if (!op)
+ return -ENOMEM;
+
+ xe_vm_populate_range_rebind(op, vma, range, tile_mask);
+ list_add_tail(&op->link, &vops->list);
+ xe_vma_ops_incr_pt_update_ops(vops, tile_mask);
+
+ return 0;
+}
+
+/**
+ * xe_vm_range_rebind() - VM range (re)bind
+ * @vm: The VM which the range belongs to.
+ * @vma: The VMA which the range belongs to.
+ * @range: SVM range to rebind.
+ * @tile_mask: Tile mask to bind the range to.
+ *
+ * (re)bind SVM range setting up GPU page tables for the range.
+ *
+ * Return: dma fence for rebind to signal completion on succees, ERR_PTR on
+ * failure
+ */
+struct dma_fence *xe_vm_range_rebind(struct xe_vm *vm,
+ struct xe_vma *vma,
+ struct xe_svm_range *range,
+ u8 tile_mask)
+{
+ struct dma_fence *fence = NULL;
+ struct xe_vma_ops vops;
+ struct xe_vma_op *op, *next_op;
+ struct xe_tile *tile;
+ u8 id;
+ int err;
+
+ lockdep_assert_held(&vm->lock);
+ xe_vm_assert_held(vm);
+ xe_assert(vm->xe, xe_vm_in_fault_mode(vm));
+ xe_assert(vm->xe, xe_vma_is_cpu_addr_mirror(vma));
+
+ xe_vma_ops_init(&vops, vm, NULL, NULL, 0);
+ for_each_tile(tile, vm->xe, id) {
+ vops.pt_update_ops[id].wait_vm_bookkeep = true;
+ vops.pt_update_ops[tile->id].q =
+ xe_tile_migrate_exec_queue(tile);
+ }
+
+ err = xe_vm_ops_add_range_rebind(&vops, vma, range, tile_mask);
+ if (err)
+ return ERR_PTR(err);
+
+ err = xe_vma_ops_alloc(&vops, false);
+ if (err) {
+ fence = ERR_PTR(err);
+ goto free_ops;
+ }
+
+ fence = ops_execute(vm, &vops);
+
+free_ops:
+ list_for_each_entry_safe(op, next_op, &vops.list, link) {
+ list_del(&op->link);
+ kfree(op);
+ }
+ xe_vma_ops_fini(&vops);
+
+ return fence;
+}
+
+static void xe_vm_populate_range_unbind(struct xe_vma_op *op,
+ struct xe_svm_range *range)
+{
+ INIT_LIST_HEAD(&op->link);
+ op->tile_mask = range->tile_present;
+ op->base.op = DRM_GPUVA_OP_DRIVER;
+ op->subop = XE_VMA_SUBOP_UNMAP_RANGE;
+ op->unmap_range.range = range;
+}
+
+static int
+xe_vm_ops_add_range_unbind(struct xe_vma_ops *vops,
+ struct xe_svm_range *range)
+{
+ struct xe_vma_op *op;
+
+ op = kzalloc(sizeof(*op), GFP_KERNEL);
+ if (!op)
+ return -ENOMEM;
+
+ xe_vm_populate_range_unbind(op, range);
+ list_add_tail(&op->link, &vops->list);
+ xe_vma_ops_incr_pt_update_ops(vops, range->tile_present);
+
+ return 0;
+}
+
+/**
+ * xe_vm_range_unbind() - VM range unbind
+ * @vm: The VM which the range belongs to.
+ * @range: SVM range to rebind.
+ *
+ * Unbind SVM range removing the GPU page tables for the range.
+ *
+ * Return: dma fence for unbind to signal completion on succees, ERR_PTR on
+ * failure
+ */
+struct dma_fence *xe_vm_range_unbind(struct xe_vm *vm,
+ struct xe_svm_range *range)
+{
+ struct dma_fence *fence = NULL;
+ struct xe_vma_ops vops;
+ struct xe_vma_op *op, *next_op;
+ struct xe_tile *tile;
+ u8 id;
+ int err;
+
+ lockdep_assert_held(&vm->lock);
+ xe_vm_assert_held(vm);
+ xe_assert(vm->xe, xe_vm_in_fault_mode(vm));
+
+ if (!range->tile_present)
+ return dma_fence_get_stub();
+
+ xe_vma_ops_init(&vops, vm, NULL, NULL, 0);
+ for_each_tile(tile, vm->xe, id) {
+ vops.pt_update_ops[id].wait_vm_bookkeep = true;
+ vops.pt_update_ops[tile->id].q =
+ xe_tile_migrate_exec_queue(tile);
+ }
+
+ err = xe_vm_ops_add_range_unbind(&vops, range);
+ if (err)
+ return ERR_PTR(err);
+
+ err = xe_vma_ops_alloc(&vops, false);
+ if (err) {
+ fence = ERR_PTR(err);
+ goto free_ops;
+ }
+
+ fence = ops_execute(vm, &vops);
+
+free_ops:
+ list_for_each_entry_safe(op, next_op, &vops.list, link) {
+ list_del(&op->link);
+ kfree(op);
+ }
+ xe_vma_ops_fini(&vops);
+
+ return fence;
+}
+
static void xe_vma_free(struct xe_vma *vma)
{
if (xe_vma_is_userptr(vma))
@@ -902,9 +1131,10 @@ static void xe_vma_free(struct xe_vma *vma)
kfree(vma);
}
-#define VMA_CREATE_FLAG_READ_ONLY BIT(0)
-#define VMA_CREATE_FLAG_IS_NULL BIT(1)
-#define VMA_CREATE_FLAG_DUMPABLE BIT(2)
+#define VMA_CREATE_FLAG_READ_ONLY BIT(0)
+#define VMA_CREATE_FLAG_IS_NULL BIT(1)
+#define VMA_CREATE_FLAG_DUMPABLE BIT(2)
+#define VMA_CREATE_FLAG_IS_SYSTEM_ALLOCATOR BIT(3)
static struct xe_vma *xe_vma_create(struct xe_vm *vm,
struct xe_bo *bo,
@@ -918,6 +1148,8 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm,
bool read_only = (flags & VMA_CREATE_FLAG_READ_ONLY);
bool is_null = (flags & VMA_CREATE_FLAG_IS_NULL);
bool dumpable = (flags & VMA_CREATE_FLAG_DUMPABLE);
+ bool is_cpu_addr_mirror =
+ (flags & VMA_CREATE_FLAG_IS_SYSTEM_ALLOCATOR);
xe_assert(vm->xe, start < end);
xe_assert(vm->xe, end < vm->size);
@@ -926,7 +1158,7 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm,
* Allocate and ensure that the xe_vma_is_userptr() return
* matches what was allocated.
*/
- if (!bo && !is_null) {
+ if (!bo && !is_null && !is_cpu_addr_mirror) {
struct xe_userptr_vma *uvma = kzalloc(sizeof(*uvma), GFP_KERNEL);
if (!uvma)
@@ -938,6 +1170,8 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm,
if (!vma)
return ERR_PTR(-ENOMEM);
+ if (is_cpu_addr_mirror)
+ vma->gpuva.flags |= XE_VMA_SYSTEM_ALLOCATOR;
if (is_null)
vma->gpuva.flags |= DRM_GPUVA_SPARSE;
if (bo)
@@ -980,7 +1214,7 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm,
drm_gpuva_link(&vma->gpuva, vm_bo);
drm_gpuvm_bo_put(vm_bo);
} else /* userptr or null */ {
- if (!is_null) {
+ if (!is_null && !is_cpu_addr_mirror) {
struct xe_userptr *userptr = &to_userptr_vma(vma)->userptr;
u64 size = end - start + 1;
int err;
@@ -988,6 +1222,7 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm,
INIT_LIST_HEAD(&userptr->invalidate_link);
INIT_LIST_HEAD(&userptr->repin_link);
vma->gpuva.gem.offset = bo_offset_or_userptr;
+ mutex_init(&userptr->unmap_mutex);
err = mmu_interval_notifier_insert(&userptr->notifier,
current->mm,
@@ -1029,8 +1264,9 @@ static void xe_vma_destroy_late(struct xe_vma *vma)
* them anymore
*/
mmu_interval_notifier_remove(&userptr->notifier);
+ mutex_destroy(&userptr->unmap_mutex);
xe_vm_put(vm);
- } else if (xe_vma_is_null(vma)) {
+ } else if (xe_vma_is_null(vma) || xe_vma_is_cpu_addr_mirror(vma)) {
xe_vm_put(vm);
} else {
xe_bo_put(xe_vma_bo(vma));
@@ -1067,9 +1303,10 @@ static void xe_vma_destroy(struct xe_vma *vma, struct dma_fence *fence)
xe_assert(vm->xe, vma->gpuva.flags & XE_VMA_DESTROYED);
spin_lock(&vm->userptr.invalidated_lock);
+ xe_assert(vm->xe, list_empty(&to_userptr_vma(vma)->userptr.repin_link));
list_del(&to_userptr_vma(vma)->userptr.invalidate_link);
spin_unlock(&vm->userptr.invalidated_lock);
- } else if (!xe_vma_is_null(vma)) {
+ } else if (!xe_vma_is_null(vma) && !xe_vma_is_cpu_addr_mirror(vma)) {
xe_bo_assert_held(xe_vma_bo(vma));
drm_gpuva_unlink(&vma->gpuva);
@@ -1520,6 +1757,12 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags)
}
}
+ if (flags & XE_VM_FLAG_FAULT_MODE) {
+ err = xe_svm_init(vm);
+ if (err)
+ goto err_close;
+ }
+
if (number_tiles > 1)
vm->composite_fence_ctx = dma_fence_context_alloc(1);
@@ -1546,9 +1789,44 @@ err_no_resv:
static void xe_vm_close(struct xe_vm *vm)
{
+ struct xe_device *xe = vm->xe;
+ bool bound;
+ int idx;
+
+ bound = drm_dev_enter(&xe->drm, &idx);
+
down_write(&vm->lock);
+ if (xe_vm_in_fault_mode(vm))
+ xe_svm_notifier_lock(vm);
+
vm->size = 0;
+
+ if (!((vm->flags & XE_VM_FLAG_MIGRATION))) {
+ struct xe_tile *tile;
+ struct xe_gt *gt;
+ u8 id;
+
+ /* Wait for pending binds */
+ dma_resv_wait_timeout(xe_vm_resv(vm),
+ DMA_RESV_USAGE_BOOKKEEP,
+ false, MAX_SCHEDULE_TIMEOUT);
+
+ if (bound) {
+ for_each_tile(tile, xe, id)
+ if (vm->pt_root[id])
+ xe_pt_clear(xe, vm->pt_root[id]);
+
+ for_each_gt(gt, xe, id)
+ xe_gt_tlb_invalidation_vm(gt, vm);
+ }
+ }
+
+ if (xe_vm_in_fault_mode(vm))
+ xe_svm_notifier_unlock(vm);
up_write(&vm->lock);
+
+ if (bound)
+ drm_dev_exit(idx);
}
void xe_vm_close_and_put(struct xe_vm *vm)
@@ -1565,6 +1843,8 @@ void xe_vm_close_and_put(struct xe_vm *vm)
xe_vm_close(vm);
if (xe_vm_in_preempt_fence_mode(vm))
flush_work(&vm->preempt.rebind_work);
+ if (xe_vm_in_fault_mode(vm))
+ xe_svm_close(vm);
down_write(&vm->lock);
for_each_tile(tile, xe, id) {
@@ -1633,6 +1913,9 @@ void xe_vm_close_and_put(struct xe_vm *vm)
xe_vma_destroy_unlocked(vma);
}
+ if (xe_vm_in_fault_mode(vm))
+ xe_svm_fini(vm);
+
up_write(&vm->lock);
down_write(&xe->usm.lock);
@@ -1989,6 +2272,8 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo,
op->map.read_only =
flags & DRM_XE_VM_BIND_FLAG_READONLY;
op->map.is_null = flags & DRM_XE_VM_BIND_FLAG_NULL;
+ op->map.is_cpu_addr_mirror = flags &
+ DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR;
op->map.dumpable = flags & DRM_XE_VM_BIND_FLAG_DUMPABLE;
op->map.pat_index = pat_index;
} else if (__op->op == DRM_GPUVA_OP_PREFETCH) {
@@ -2181,6 +2466,8 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
VMA_CREATE_FLAG_IS_NULL : 0;
flags |= op->map.dumpable ?
VMA_CREATE_FLAG_DUMPABLE : 0;
+ flags |= op->map.is_cpu_addr_mirror ?
+ VMA_CREATE_FLAG_IS_SYSTEM_ALLOCATOR : 0;
vma = new_vma(vm, &op->base.map, op->map.pat_index,
flags);
@@ -2188,7 +2475,8 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
return PTR_ERR(vma);
op->map.vma = vma;
- if (op->map.immediate || !xe_vm_in_fault_mode(vm))
+ if ((op->map.immediate || !xe_vm_in_fault_mode(vm)) &&
+ !op->map.is_cpu_addr_mirror)
xe_vma_ops_incr_pt_update_ops(vops,
op->tile_mask);
break;
@@ -2197,21 +2485,35 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
{
struct xe_vma *old =
gpuva_to_vma(op->base.remap.unmap->va);
+ bool skip = xe_vma_is_cpu_addr_mirror(old);
+ u64 start = xe_vma_start(old), end = xe_vma_end(old);
+
+ if (op->base.remap.prev)
+ start = op->base.remap.prev->va.addr +
+ op->base.remap.prev->va.range;
+ if (op->base.remap.next)
+ end = op->base.remap.next->va.addr;
+
+ if (xe_vma_is_cpu_addr_mirror(old) &&
+ xe_svm_has_mapping(vm, start, end))
+ return -EBUSY;
op->remap.start = xe_vma_start(old);
op->remap.range = xe_vma_size(old);
- if (op->base.remap.prev) {
- flags |= op->base.remap.unmap->va->flags &
- XE_VMA_READ_ONLY ?
- VMA_CREATE_FLAG_READ_ONLY : 0;
- flags |= op->base.remap.unmap->va->flags &
- DRM_GPUVA_SPARSE ?
- VMA_CREATE_FLAG_IS_NULL : 0;
- flags |= op->base.remap.unmap->va->flags &
- XE_VMA_DUMPABLE ?
- VMA_CREATE_FLAG_DUMPABLE : 0;
+ flags |= op->base.remap.unmap->va->flags &
+ XE_VMA_READ_ONLY ?
+ VMA_CREATE_FLAG_READ_ONLY : 0;
+ flags |= op->base.remap.unmap->va->flags &
+ DRM_GPUVA_SPARSE ?
+ VMA_CREATE_FLAG_IS_NULL : 0;
+ flags |= op->base.remap.unmap->va->flags &
+ XE_VMA_DUMPABLE ?
+ VMA_CREATE_FLAG_DUMPABLE : 0;
+ flags |= xe_vma_is_cpu_addr_mirror(old) ?
+ VMA_CREATE_FLAG_IS_SYSTEM_ALLOCATOR : 0;
+ if (op->base.remap.prev) {
vma = new_vma(vm, op->base.remap.prev,
old->pat_index, flags);
if (IS_ERR(vma))
@@ -2223,9 +2525,10 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
* Userptr creates a new SG mapping so
* we must also rebind.
*/
- op->remap.skip_prev = !xe_vma_is_userptr(old) &&
+ op->remap.skip_prev = skip ||
+ (!xe_vma_is_userptr(old) &&
IS_ALIGNED(xe_vma_end(vma),
- xe_vma_max_pte_size(old));
+ xe_vma_max_pte_size(old)));
if (op->remap.skip_prev) {
xe_vma_set_pte_size(vma, xe_vma_max_pte_size(old));
op->remap.range -=
@@ -2241,16 +2544,6 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
}
if (op->base.remap.next) {
- flags |= op->base.remap.unmap->va->flags &
- XE_VMA_READ_ONLY ?
- VMA_CREATE_FLAG_READ_ONLY : 0;
- flags |= op->base.remap.unmap->va->flags &
- DRM_GPUVA_SPARSE ?
- VMA_CREATE_FLAG_IS_NULL : 0;
- flags |= op->base.remap.unmap->va->flags &
- XE_VMA_DUMPABLE ?
- VMA_CREATE_FLAG_DUMPABLE : 0;
-
vma = new_vma(vm, op->base.remap.next,
old->pat_index, flags);
if (IS_ERR(vma))
@@ -2262,9 +2555,10 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
* Userptr creates a new SG mapping so
* we must also rebind.
*/
- op->remap.skip_next = !xe_vma_is_userptr(old) &&
+ op->remap.skip_next = skip ||
+ (!xe_vma_is_userptr(old) &&
IS_ALIGNED(xe_vma_start(vma),
- xe_vma_max_pte_size(old));
+ xe_vma_max_pte_size(old)));
if (op->remap.skip_next) {
xe_vma_set_pte_size(vma, xe_vma_max_pte_size(old));
op->remap.range -=
@@ -2277,13 +2571,32 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask);
}
}
- xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask);
+ if (!skip)
+ xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask);
break;
}
case DRM_GPUVA_OP_UNMAP:
+ vma = gpuva_to_vma(op->base.unmap.va);
+
+ if (xe_vma_is_cpu_addr_mirror(vma) &&
+ xe_svm_has_mapping(vm, xe_vma_start(vma),
+ xe_vma_end(vma)))
+ return -EBUSY;
+
+ if (!xe_vma_is_cpu_addr_mirror(vma))
+ xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask);
+ break;
case DRM_GPUVA_OP_PREFETCH:
- /* FIXME: Need to skip some prefetch ops */
- xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask);
+ vma = gpuva_to_vma(op->base.prefetch.va);
+
+ if (xe_vma_is_userptr(vma)) {
+ err = xe_vma_userptr_pin_pages(to_userptr_vma(vma));
+ if (err)
+ return err;
+ }
+
+ if (!xe_vma_is_cpu_addr_mirror(vma))
+ xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask);
break;
default:
drm_warn(&vm->xe->drm, "NOT POSSIBLE");
@@ -2509,6 +2822,8 @@ static void op_trace(struct xe_vma_op *op)
case DRM_GPUVA_OP_PREFETCH:
trace_xe_vma_bind(gpuva_to_vma(op->base.prefetch.va));
break;
+ case DRM_GPUVA_OP_DRIVER:
+ break;
default:
XE_WARN_ON("NOT POSSIBLE");
}
@@ -2686,9 +3001,11 @@ static void vm_bind_ioctl_ops_fini(struct xe_vm *vm, struct xe_vma_ops *vops,
}
if (ufence)
xe_sync_ufence_put(ufence);
- for (i = 0; i < vops->num_syncs; i++)
- xe_sync_entry_signal(vops->syncs + i, fence);
- xe_exec_queue_last_fence_set(wait_exec_queue, vm, fence);
+ if (fence) {
+ for (i = 0; i < vops->num_syncs; i++)
+ xe_sync_entry_signal(vops->syncs + i, fence);
+ xe_exec_queue_last_fence_set(wait_exec_queue, vm, fence);
+ }
}
static struct dma_fence *vm_bind_ioctl_ops_execute(struct xe_vm *vm,
@@ -2711,8 +3028,11 @@ static struct dma_fence *vm_bind_ioctl_ops_execute(struct xe_vm *vm,
}
fence = ops_execute(vm, vops);
- if (IS_ERR(fence))
+ if (IS_ERR(fence)) {
+ if (PTR_ERR(fence) == -ENODATA)
+ vm_bind_ioctl_ops_fini(vm, vops, NULL);
goto unlock;
+ }
vm_bind_ioctl_ops_fini(vm, vops, fence);
}
@@ -2728,7 +3048,8 @@ ALLOW_ERROR_INJECTION(vm_bind_ioctl_ops_execute, ERRNO);
DRM_XE_VM_BIND_FLAG_IMMEDIATE | \
DRM_XE_VM_BIND_FLAG_NULL | \
DRM_XE_VM_BIND_FLAG_DUMPABLE | \
- DRM_XE_VM_BIND_FLAG_CHECK_PXP)
+ DRM_XE_VM_BIND_FLAG_CHECK_PXP | \
+ DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR)
#ifdef TEST_VM_OPS_ERROR
#define SUPPORTED_FLAGS (SUPPORTED_FLAGS_STUB | FORCE_OP_ERROR)
@@ -2739,7 +3060,7 @@ ALLOW_ERROR_INJECTION(vm_bind_ioctl_ops_execute, ERRNO);
#define XE_64K_PAGE_MASK 0xffffull
#define ALL_DRM_XE_SYNCS_FLAGS (DRM_XE_SYNCS_FLAG_WAIT_FOR_OP)
-static int vm_bind_ioctl_check_args(struct xe_device *xe,
+static int vm_bind_ioctl_check_args(struct xe_device *xe, struct xe_vm *vm,
struct drm_xe_vm_bind *args,
struct drm_xe_vm_bind_op **bind_ops)
{
@@ -2784,9 +3105,18 @@ static int vm_bind_ioctl_check_args(struct xe_device *xe,
u64 obj_offset = (*bind_ops)[i].obj_offset;
u32 prefetch_region = (*bind_ops)[i].prefetch_mem_region_instance;
bool is_null = flags & DRM_XE_VM_BIND_FLAG_NULL;
+ bool is_cpu_addr_mirror = flags &
+ DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR;
u16 pat_index = (*bind_ops)[i].pat_index;
u16 coh_mode;
+ if (XE_IOCTL_DBG(xe, is_cpu_addr_mirror &&
+ (!xe_vm_in_fault_mode(vm) ||
+ !IS_ENABLED(CONFIG_DRM_GPUSVM)))) {
+ err = -EINVAL;
+ goto free_bind_ops;
+ }
+
if (XE_IOCTL_DBG(xe, pat_index >= xe->pat.n_entries)) {
err = -EINVAL;
goto free_bind_ops;
@@ -2807,13 +3137,14 @@ static int vm_bind_ioctl_check_args(struct xe_device *xe,
if (XE_IOCTL_DBG(xe, op > DRM_XE_VM_BIND_OP_PREFETCH) ||
XE_IOCTL_DBG(xe, flags & ~SUPPORTED_FLAGS) ||
- XE_IOCTL_DBG(xe, obj && is_null) ||
- XE_IOCTL_DBG(xe, obj_offset && is_null) ||
+ XE_IOCTL_DBG(xe, obj && (is_null || is_cpu_addr_mirror)) ||
+ XE_IOCTL_DBG(xe, obj_offset && (is_null ||
+ is_cpu_addr_mirror)) ||
XE_IOCTL_DBG(xe, op != DRM_XE_VM_BIND_OP_MAP &&
- is_null) ||
+ (is_null || is_cpu_addr_mirror)) ||
XE_IOCTL_DBG(xe, !obj &&
op == DRM_XE_VM_BIND_OP_MAP &&
- !is_null) ||
+ !is_null && !is_cpu_addr_mirror) ||
XE_IOCTL_DBG(xe, !obj &&
op == DRM_XE_VM_BIND_OP_UNMAP_ALL) ||
XE_IOCTL_DBG(xe, addr &&
@@ -2962,15 +3293,19 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
int err;
int i;
- err = vm_bind_ioctl_check_args(xe, args, &bind_ops);
+ vm = xe_vm_lookup(xef, args->vm_id);
+ if (XE_IOCTL_DBG(xe, !vm))
+ return -EINVAL;
+
+ err = vm_bind_ioctl_check_args(xe, vm, args, &bind_ops);
if (err)
- return err;
+ goto put_vm;
if (args->exec_queue_id) {
q = xe_exec_queue_lookup(xef, args->exec_queue_id);
if (XE_IOCTL_DBG(xe, !q)) {
err = -ENOENT;
- goto free_objs;
+ goto put_vm;
}
if (XE_IOCTL_DBG(xe, !(q->flags & EXEC_QUEUE_FLAG_VM))) {
@@ -2979,15 +3314,13 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
}
}
- vm = xe_vm_lookup(xef, args->vm_id);
- if (XE_IOCTL_DBG(xe, !vm)) {
- err = -EINVAL;
- goto put_exec_queue;
- }
+ /* Ensure all UNMAPs visible */
+ if (xe_vm_in_fault_mode(vm))
+ flush_work(&vm->svm.garbage_collector.work);
err = down_write_killable(&vm->lock);
if (err)
- goto put_vm;
+ goto put_exec_queue;
if (XE_IOCTL_DBG(xe, xe_vm_is_closed_or_banned(vm))) {
err = -ENOENT;
@@ -3151,12 +3484,11 @@ put_obj:
xe_bo_put(bos[i]);
release_vm_lock:
up_write(&vm->lock);
-put_vm:
- xe_vm_put(vm);
put_exec_queue:
if (q)
xe_exec_queue_put(q);
-free_objs:
+put_vm:
+ xe_vm_put(vm);
kvfree(bos);
kvfree(ops);
if (args->num_binds > 1)
@@ -3288,6 +3620,7 @@ int xe_vm_invalidate_vma(struct xe_vma *vma)
int ret = 0;
xe_assert(xe, !xe_vma_is_null(vma));
+ xe_assert(xe, !xe_vma_is_cpu_addr_mirror(vma));
trace_xe_vma_invalidate(vma);
vm_dbg(&xe_vma_vm(vma)->xe->drm,
diff --git a/drivers/gpu/drm/xe/xe_vm.h b/drivers/gpu/drm/xe/xe_vm.h
index f66075f8a6fe..0ef811fc2bde 100644
--- a/drivers/gpu/drm/xe/xe_vm.h
+++ b/drivers/gpu/drm/xe/xe_vm.h
@@ -23,6 +23,7 @@ struct dma_fence;
struct xe_exec_queue;
struct xe_file;
struct xe_sync_entry;
+struct xe_svm_range;
struct drm_exec;
struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags);
@@ -152,6 +153,11 @@ static inline bool xe_vma_is_null(struct xe_vma *vma)
return vma->gpuva.flags & DRM_GPUVA_SPARSE;
}
+static inline bool xe_vma_is_cpu_addr_mirror(struct xe_vma *vma)
+{
+ return vma->gpuva.flags & XE_VMA_SYSTEM_ALLOCATOR;
+}
+
static inline bool xe_vma_has_no_bo(struct xe_vma *vma)
{
return !xe_vma_bo(vma);
@@ -159,7 +165,8 @@ static inline bool xe_vma_has_no_bo(struct xe_vma *vma)
static inline bool xe_vma_is_userptr(struct xe_vma *vma)
{
- return xe_vma_has_no_bo(vma) && !xe_vma_is_null(vma);
+ return xe_vma_has_no_bo(vma) && !xe_vma_is_null(vma) &&
+ !xe_vma_is_cpu_addr_mirror(vma);
}
/**
@@ -212,6 +219,12 @@ int xe_vm_userptr_check_repin(struct xe_vm *vm);
int xe_vm_rebind(struct xe_vm *vm, bool rebind_worker);
struct dma_fence *xe_vma_rebind(struct xe_vm *vm, struct xe_vma *vma,
u8 tile_mask);
+struct dma_fence *xe_vm_range_rebind(struct xe_vm *vm,
+ struct xe_vma *vma,
+ struct xe_svm_range *range,
+ u8 tile_mask);
+struct dma_fence *xe_vm_range_unbind(struct xe_vm *vm,
+ struct xe_svm_range *range);
int xe_vm_invalidate_vma(struct xe_vma *vma);
@@ -282,9 +295,17 @@ static inline void vm_dbg(const struct drm_device *dev,
const char *format, ...)
{ /* noop */ }
#endif
-#endif
struct xe_vm_snapshot *xe_vm_snapshot_capture(struct xe_vm *vm);
void xe_vm_snapshot_capture_delayed(struct xe_vm_snapshot *snap);
void xe_vm_snapshot_print(struct xe_vm_snapshot *snap, struct drm_printer *p);
void xe_vm_snapshot_free(struct xe_vm_snapshot *snap);
+
+#if IS_ENABLED(CONFIG_DRM_XE_USERPTR_INVAL_INJECT)
+void xe_vma_userptr_force_invalidate(struct xe_userptr_vma *uvma);
+#else
+static inline void xe_vma_userptr_force_invalidate(struct xe_userptr_vma *uvma)
+{
+}
+#endif
+#endif
diff --git a/drivers/gpu/drm/xe/xe_vm_types.h b/drivers/gpu/drm/xe/xe_vm_types.h
index 52467b9b5348..84fa41b9fa20 100644
--- a/drivers/gpu/drm/xe/xe_vm_types.h
+++ b/drivers/gpu/drm/xe/xe_vm_types.h
@@ -6,6 +6,7 @@
#ifndef _XE_VM_TYPES_H_
#define _XE_VM_TYPES_H_
+#include <drm/drm_gpusvm.h>
#include <drm/drm_gpuvm.h>
#include <linux/dma-resv.h>
@@ -18,6 +19,7 @@
#include "xe_range_fence.h"
struct xe_bo;
+struct xe_svm_range;
struct xe_sync_entry;
struct xe_user_fence;
struct xe_vm;
@@ -42,6 +44,7 @@ struct xe_vm_pgtable_update_op;
#define XE_VMA_PTE_64K (DRM_GPUVA_USERBITS << 6)
#define XE_VMA_PTE_COMPACT (DRM_GPUVA_USERBITS << 7)
#define XE_VMA_DUMPABLE (DRM_GPUVA_USERBITS << 8)
+#define XE_VMA_SYSTEM_ALLOCATOR (DRM_GPUVA_USERBITS << 9)
/** struct xe_userptr - User pointer */
struct xe_userptr {
@@ -59,12 +62,16 @@ struct xe_userptr {
struct sg_table *sg;
/** @notifier_seq: notifier sequence number */
unsigned long notifier_seq;
+ /** @unmap_mutex: Mutex protecting dma-unmapping */
+ struct mutex unmap_mutex;
/**
* @initial_bind: user pointer has been bound at least once.
* write: vm->userptr.notifier_lock in read mode and vm->resv held.
* read: vm->userptr.notifier_lock in write mode or vm->resv held.
*/
bool initial_bind;
+ /** @mapped: Whether the @sgt sg-table is dma-mapped. Protected by @unmap_mutex. */
+ bool mapped;
#if IS_ENABLED(CONFIG_DRM_XE_USERPTR_INVAL_INJECT)
u32 divisor;
#endif
@@ -139,6 +146,30 @@ struct xe_vm {
/** @gpuvm: base GPUVM used to track VMAs */
struct drm_gpuvm gpuvm;
+ /** @svm: Shared virtual memory state */
+ struct {
+ /** @svm.gpusvm: base GPUSVM used to track fault allocations */
+ struct drm_gpusvm gpusvm;
+ /**
+ * @svm.garbage_collector: Garbage collector which is used unmap
+ * SVM range's GPU bindings and destroy the ranges.
+ */
+ struct {
+ /** @svm.garbage_collector.lock: Protect's range list */
+ spinlock_t lock;
+ /**
+ * @svm.garbage_collector.range_list: List of SVM ranges
+ * in the garbage collector.
+ */
+ struct list_head range_list;
+ /**
+ * @svm.garbage_collector.work: Worker which the
+ * garbage collector runs on.
+ */
+ struct work_struct work;
+ } garbage_collector;
+ } svm;
+
struct xe_device *xe;
/* exec queue used for (un)binding vma's */
@@ -228,8 +259,8 @@ struct xe_vm {
* up for revalidation. Protected from access with the
* @invalidated_lock. Removing items from the list
* additionally requires @lock in write mode, and adding
- * items to the list requires the @userptr.notifer_lock in
- * write mode.
+ * items to the list requires either the @userptr.notifer_lock in
+ * write mode, OR @lock in write mode.
*/
struct list_head invalidated;
} userptr;
@@ -295,6 +326,8 @@ struct xe_vma_op_map {
bool read_only;
/** @is_null: is NULL binding */
bool is_null;
+ /** @is_cpu_addr_mirror: is CPU address mirror binding */
+ bool is_cpu_addr_mirror;
/** @dumpable: whether BO is dumped on GPU hang */
bool dumpable;
/** @pat_index: The pat index to use for this operation. */
@@ -325,6 +358,20 @@ struct xe_vma_op_prefetch {
u32 region;
};
+/** struct xe_vma_op_map_range - VMA map range operation */
+struct xe_vma_op_map_range {
+ /** @vma: VMA to map (system allocator VMA) */
+ struct xe_vma *vma;
+ /** @range: SVM range to map */
+ struct xe_svm_range *range;
+};
+
+/** struct xe_vma_op_unmap_range - VMA unmap range operation */
+struct xe_vma_op_unmap_range {
+ /** @range: SVM range to unmap */
+ struct xe_svm_range *range;
+};
+
/** enum xe_vma_op_flags - flags for VMA operation */
enum xe_vma_op_flags {
/** @XE_VMA_OP_COMMITTED: VMA operation committed */
@@ -335,6 +382,14 @@ enum xe_vma_op_flags {
XE_VMA_OP_NEXT_COMMITTED = BIT(2),
};
+/** enum xe_vma_subop - VMA sub-operation */
+enum xe_vma_subop {
+ /** @XE_VMA_SUBOP_MAP_RANGE: Map range */
+ XE_VMA_SUBOP_MAP_RANGE,
+ /** @XE_VMA_SUBOP_UNMAP_RANGE: Unmap range */
+ XE_VMA_SUBOP_UNMAP_RANGE,
+};
+
/** struct xe_vma_op - VMA operation */
struct xe_vma_op {
/** @base: GPUVA base operation */
@@ -343,6 +398,8 @@ struct xe_vma_op {
struct list_head link;
/** @flags: operation flags */
enum xe_vma_op_flags flags;
+ /** @subop: user defined sub-operation */
+ enum xe_vma_subop subop;
/** @tile_mask: Tile mask for operation */
u8 tile_mask;
@@ -353,6 +410,10 @@ struct xe_vma_op {
struct xe_vma_op_remap remap;
/** @prefetch: VMA prefetch operation specific data */
struct xe_vma_op_prefetch prefetch;
+ /** @map_range: VMA map range operation specific data */
+ struct xe_vma_op_map_range map_range;
+ /** @unmap_range: VMA unmap range operation specific data */
+ struct xe_vma_op_unmap_range unmap_range;
};
};
diff --git a/drivers/gpu/drm/xe/xe_wa.c b/drivers/gpu/drm/xe/xe_wa.c
index d4982799383c..a25afb757f70 100644
--- a/drivers/gpu/drm/xe/xe_wa.c
+++ b/drivers/gpu/drm/xe/xe_wa.c
@@ -279,8 +279,6 @@ static const struct xe_rtp_entry_sr gt_was[] = {
XE_RTP_ACTIONS(SET(VDBOX_CGCTL3F10(0), RAMDFTUNIT_CLKGATE_DIS)),
XE_RTP_ENTRY_FLAG(FOREACH_ENGINE),
},
-
- {}
};
static const struct xe_rtp_entry_sr engine_was[] = {
@@ -619,11 +617,28 @@ static const struct xe_rtp_entry_sr engine_was[] = {
FUNC(xe_rtp_match_first_render_or_compute)),
XE_RTP_ACTIONS(SET(TDL_CHICKEN, QID_WAIT_FOR_THREAD_NOT_RUN_DISABLE))
},
-
- {}
+ { XE_RTP_NAME("13012615864"),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(3000, 3001),
+ FUNC(xe_rtp_match_first_render_or_compute)),
+ XE_RTP_ACTIONS(SET(TDL_TSL_CHICKEN, RES_CHK_SPR_DIS))
+ },
};
static const struct xe_rtp_entry_sr lrc_was[] = {
+ { XE_RTP_NAME("16011163337"),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(1200, 1210), ENGINE_CLASS(RENDER)),
+ /* read verification is ignored due to 1608008084. */
+ XE_RTP_ACTIONS(FIELD_SET_NO_READ_MASK(FF_MODE2,
+ FF_MODE2_GS_TIMER_MASK,
+ FF_MODE2_GS_TIMER_224))
+ },
+ { XE_RTP_NAME("1604555607"),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(1200, 1210), ENGINE_CLASS(RENDER)),
+ /* read verification is ignored due to 1608008084. */
+ XE_RTP_ACTIONS(FIELD_SET_NO_READ_MASK(FF_MODE2,
+ FF_MODE2_TDS_TIMER_MASK,
+ FF_MODE2_TDS_TIMER_128))
+ },
{ XE_RTP_NAME("1409342910, 14010698770, 14010443199, 1408979724, 1409178076, 1409207793, 1409217633, 1409252684, 1409347922, 1409142259"),
XE_RTP_RULES(GRAPHICS_VERSION_RANGE(1200, 1210)),
XE_RTP_ACTIONS(SET(COMMON_SLICE_CHICKEN3,
@@ -806,8 +821,6 @@ static const struct xe_rtp_entry_sr lrc_was[] = {
DIS_PARTIAL_AUTOSTRIP |
DIS_AUTOSTRIP))
},
-
- {}
};
static __maybe_unused const struct xe_rtp_entry oob_was[] = {
@@ -849,7 +862,7 @@ void xe_wa_process_gt(struct xe_gt *gt)
xe_rtp_process_ctx_enable_active_tracking(&ctx, gt->wa_active.gt,
ARRAY_SIZE(gt_was));
- xe_rtp_process_to_sr(&ctx, gt_was, &gt->reg_sr);
+ xe_rtp_process_to_sr(&ctx, gt_was, ARRAY_SIZE(gt_was), &gt->reg_sr);
}
EXPORT_SYMBOL_IF_KUNIT(xe_wa_process_gt);
@@ -867,7 +880,7 @@ void xe_wa_process_engine(struct xe_hw_engine *hwe)
xe_rtp_process_ctx_enable_active_tracking(&ctx, hwe->gt->wa_active.engine,
ARRAY_SIZE(engine_was));
- xe_rtp_process_to_sr(&ctx, engine_was, &hwe->reg_sr);
+ xe_rtp_process_to_sr(&ctx, engine_was, ARRAY_SIZE(engine_was), &hwe->reg_sr);
}
/**
@@ -884,7 +897,7 @@ void xe_wa_process_lrc(struct xe_hw_engine *hwe)
xe_rtp_process_ctx_enable_active_tracking(&ctx, hwe->gt->wa_active.lrc,
ARRAY_SIZE(lrc_was));
- xe_rtp_process_to_sr(&ctx, lrc_was, &hwe->reg_lrc);
+ xe_rtp_process_to_sr(&ctx, lrc_was, ARRAY_SIZE(lrc_was), &hwe->reg_lrc);
}
/**
diff --git a/drivers/gpu/drm/xe/xe_wa_oob.rules b/drivers/gpu/drm/xe/xe_wa_oob.rules
index 228436532282..e0c5fa460487 100644
--- a/drivers/gpu/drm/xe/xe_wa_oob.rules
+++ b/drivers/gpu/drm/xe/xe_wa_oob.rules
@@ -5,6 +5,7 @@
22011391025 PLATFORM(DG2)
22012727170 SUBPLATFORM(DG2, G11)
22012727685 SUBPLATFORM(DG2, G11)
+22016596838 PLATFORM(PVC)
18020744125 PLATFORM(PVC)
1509372804 PLATFORM(PVC), GRAPHICS_STEP(A0, C0)
1409600907 GRAPHICS_VERSION_RANGE(1200, 1250)
@@ -43,3 +44,12 @@
no_media_l3 MEDIA_VERSION(3000)
14022866841 GRAPHICS_VERSION(3000), GRAPHICS_STEP(A0, B0)
MEDIA_VERSION(3000), MEDIA_STEP(A0, B0)
+16021333562 GRAPHICS_VERSION_RANGE(1200, 1274)
+ MEDIA_VERSION(1300)
+14016712196 GRAPHICS_VERSION(1255)
+ GRAPHICS_VERSION_RANGE(1270, 1274)
+14015568240 GRAPHICS_VERSION_RANGE(1255, 1260)
+18013179988 GRAPHICS_VERSION(1255)
+ GRAPHICS_VERSION_RANGE(1270, 1274)
+1508761755 GRAPHICS_VERSION(1255)
+ GRAPHICS_VERSION(1260), GRAPHICS_STEP(A0, B0)
diff --git a/drivers/gpu/nova-core/Kconfig b/drivers/gpu/nova-core/Kconfig
new file mode 100644
index 000000000000..ad0c06756516
--- /dev/null
+++ b/drivers/gpu/nova-core/Kconfig
@@ -0,0 +1,14 @@
+config NOVA_CORE
+ tristate "Nova Core GPU driver"
+ depends on PCI
+ depends on RUST
+ depends on RUST_FW_LOADER_ABSTRACTIONS
+ default n
+ help
+ Choose this if you want to build the Nova Core driver for Nvidia
+ GPUs based on the GPU System Processor (GSP). This is true for Turing
+ and later GPUs.
+
+ This driver is work in progress and may not be functional.
+
+ If M is selected, the module will be called nova_core.
diff --git a/drivers/gpu/nova-core/Makefile b/drivers/gpu/nova-core/Makefile
new file mode 100644
index 000000000000..2d78c50126e1
--- /dev/null
+++ b/drivers/gpu/nova-core/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_NOVA_CORE) += nova_core.o
diff --git a/drivers/gpu/nova-core/driver.rs b/drivers/gpu/nova-core/driver.rs
new file mode 100644
index 000000000000..63c19f140fbd
--- /dev/null
+++ b/drivers/gpu/nova-core/driver.rs
@@ -0,0 +1,47 @@
+// SPDX-License-Identifier: GPL-2.0
+
+use kernel::{bindings, c_str, pci, prelude::*};
+
+use crate::gpu::Gpu;
+
+#[pin_data]
+pub(crate) struct NovaCore {
+ #[pin]
+ pub(crate) gpu: Gpu,
+}
+
+const BAR0_SIZE: usize = 8;
+pub(crate) type Bar0 = pci::Bar<BAR0_SIZE>;
+
+kernel::pci_device_table!(
+ PCI_TABLE,
+ MODULE_PCI_TABLE,
+ <NovaCore as pci::Driver>::IdInfo,
+ [(
+ pci::DeviceId::from_id(bindings::PCI_VENDOR_ID_NVIDIA, bindings::PCI_ANY_ID as _),
+ ()
+ )]
+);
+
+impl pci::Driver for NovaCore {
+ type IdInfo = ();
+ const ID_TABLE: pci::IdTable<Self::IdInfo> = &PCI_TABLE;
+
+ fn probe(pdev: &mut pci::Device, _info: &Self::IdInfo) -> Result<Pin<KBox<Self>>> {
+ dev_dbg!(pdev.as_ref(), "Probe Nova Core GPU driver.\n");
+
+ pdev.enable_device_mem()?;
+ pdev.set_master();
+
+ let bar = pdev.iomap_region_sized::<BAR0_SIZE>(0, c_str!("nova-core/bar0"))?;
+
+ let this = KBox::pin_init(
+ try_pin_init!(Self {
+ gpu <- Gpu::new(pdev, bar)?,
+ }),
+ GFP_KERNEL,
+ )?;
+
+ Ok(this)
+ }
+}
diff --git a/drivers/gpu/nova-core/firmware.rs b/drivers/gpu/nova-core/firmware.rs
new file mode 100644
index 000000000000..6e6361c59ca1
--- /dev/null
+++ b/drivers/gpu/nova-core/firmware.rs
@@ -0,0 +1,45 @@
+// SPDX-License-Identifier: GPL-2.0
+
+use crate::gpu;
+use kernel::firmware;
+
+pub(crate) struct ModInfoBuilder<const N: usize>(firmware::ModInfoBuilder<N>);
+
+impl<const N: usize> ModInfoBuilder<N> {
+ const VERSION: &'static str = "535.113.01";
+
+ const fn make_entry_file(self, chipset: &str, fw: &str) -> Self {
+ ModInfoBuilder(
+ self.0
+ .new_entry()
+ .push("nvidia/")
+ .push(chipset)
+ .push("/gsp/")
+ .push(fw)
+ .push("-")
+ .push(Self::VERSION)
+ .push(".bin"),
+ )
+ }
+
+ const fn make_entry_chipset(self, chipset: &str) -> Self {
+ self.make_entry_file(chipset, "booter_load")
+ .make_entry_file(chipset, "booter_unload")
+ .make_entry_file(chipset, "bootloader")
+ .make_entry_file(chipset, "gsp")
+ }
+
+ pub(crate) const fn create(
+ module_name: &'static kernel::str::CStr,
+ ) -> firmware::ModInfoBuilder<N> {
+ let mut this = Self(firmware::ModInfoBuilder::new(module_name));
+ let mut i = 0;
+
+ while i < gpu::Chipset::NAMES.len() {
+ this = this.make_entry_chipset(gpu::Chipset::NAMES[i]);
+ i += 1;
+ }
+
+ this.0
+ }
+}
diff --git a/drivers/gpu/nova-core/gpu.rs b/drivers/gpu/nova-core/gpu.rs
new file mode 100644
index 000000000000..17c9660da450
--- /dev/null
+++ b/drivers/gpu/nova-core/gpu.rs
@@ -0,0 +1,199 @@
+// SPDX-License-Identifier: GPL-2.0
+
+use kernel::{
+ device, devres::Devres, error::code::*, firmware, fmt, pci, prelude::*, str::CString,
+};
+
+use crate::driver::Bar0;
+use crate::regs;
+use crate::util;
+use core::fmt;
+
+macro_rules! define_chipset {
+ ({ $($variant:ident = $value:expr),* $(,)* }) =>
+ {
+ /// Enum representation of the GPU chipset.
+ #[derive(fmt::Debug)]
+ pub(crate) enum Chipset {
+ $($variant = $value),*,
+ }
+
+ impl Chipset {
+ pub(crate) const ALL: &'static [Chipset] = &[
+ $( Chipset::$variant, )*
+ ];
+
+ pub(crate) const NAMES: [&'static str; Self::ALL.len()] = [
+ $( util::const_bytes_to_str(
+ util::to_lowercase_bytes::<{ stringify!($variant).len() }>(
+ stringify!($variant)
+ ).as_slice()
+ ), )*
+ ];
+ }
+
+ // TODO replace with something like derive(FromPrimitive)
+ impl TryFrom<u32> for Chipset {
+ type Error = kernel::error::Error;
+
+ fn try_from(value: u32) -> Result<Self, Self::Error> {
+ match value {
+ $( $value => Ok(Chipset::$variant), )*
+ _ => Err(ENODEV),
+ }
+ }
+ }
+ }
+}
+
+define_chipset!({
+ // Turing
+ TU102 = 0x162,
+ TU104 = 0x164,
+ TU106 = 0x166,
+ TU117 = 0x167,
+ TU116 = 0x168,
+ // Ampere
+ GA102 = 0x172,
+ GA103 = 0x173,
+ GA104 = 0x174,
+ GA106 = 0x176,
+ GA107 = 0x177,
+ // Ada
+ AD102 = 0x192,
+ AD103 = 0x193,
+ AD104 = 0x194,
+ AD106 = 0x196,
+ AD107 = 0x197,
+});
+
+impl Chipset {
+ pub(crate) fn arch(&self) -> Architecture {
+ match self {
+ Self::TU102 | Self::TU104 | Self::TU106 | Self::TU117 | Self::TU116 => {
+ Architecture::Turing
+ }
+ Self::GA102 | Self::GA103 | Self::GA104 | Self::GA106 | Self::GA107 => {
+ Architecture::Ampere
+ }
+ Self::AD102 | Self::AD103 | Self::AD104 | Self::AD106 | Self::AD107 => {
+ Architecture::Ada
+ }
+ }
+ }
+}
+
+// TODO
+//
+// The resulting strings are used to generate firmware paths, hence the
+// generated strings have to be stable.
+//
+// Hence, replace with something like strum_macros derive(Display).
+//
+// For now, redirect to fmt::Debug for convenience.
+impl fmt::Display for Chipset {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "{:?}", self)
+ }
+}
+
+/// Enum representation of the GPU generation.
+#[derive(fmt::Debug)]
+pub(crate) enum Architecture {
+ Turing,
+ Ampere,
+ Ada,
+}
+
+pub(crate) struct Revision {
+ major: u8,
+ minor: u8,
+}
+
+impl Revision {
+ fn from_boot0(boot0: regs::Boot0) -> Self {
+ Self {
+ major: boot0.major_rev(),
+ minor: boot0.minor_rev(),
+ }
+ }
+}
+
+impl fmt::Display for Revision {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "{:x}.{:x}", self.major, self.minor)
+ }
+}
+
+/// Structure holding the metadata of the GPU.
+pub(crate) struct Spec {
+ chipset: Chipset,
+ /// The revision of the chipset.
+ revision: Revision,
+}
+
+impl Spec {
+ fn new(bar: &Devres<Bar0>) -> Result<Spec> {
+ let bar = bar.try_access().ok_or(ENXIO)?;
+ let boot0 = regs::Boot0::read(&bar);
+
+ Ok(Self {
+ chipset: boot0.chipset().try_into()?,
+ revision: Revision::from_boot0(boot0),
+ })
+ }
+}
+
+/// Structure encapsulating the firmware blobs required for the GPU to operate.
+#[expect(dead_code)]
+pub(crate) struct Firmware {
+ booter_load: firmware::Firmware,
+ booter_unload: firmware::Firmware,
+ bootloader: firmware::Firmware,
+ gsp: firmware::Firmware,
+}
+
+impl Firmware {
+ fn new(dev: &device::Device, spec: &Spec, ver: &str) -> Result<Firmware> {
+ let mut chip_name = CString::try_from_fmt(fmt!("{}", spec.chipset))?;
+ chip_name.make_ascii_lowercase();
+
+ let request = |name_| {
+ CString::try_from_fmt(fmt!("nvidia/{}/gsp/{}-{}.bin", &*chip_name, name_, ver))
+ .and_then(|path| firmware::Firmware::request(&path, dev))
+ };
+
+ Ok(Firmware {
+ booter_load: request("booter_load")?,
+ booter_unload: request("booter_unload")?,
+ bootloader: request("bootloader")?,
+ gsp: request("gsp")?,
+ })
+ }
+}
+
+/// Structure holding the resources required to operate the GPU.
+#[pin_data]
+pub(crate) struct Gpu {
+ spec: Spec,
+ /// MMIO mapping of PCI BAR 0
+ bar: Devres<Bar0>,
+ fw: Firmware,
+}
+
+impl Gpu {
+ pub(crate) fn new(pdev: &pci::Device, bar: Devres<Bar0>) -> Result<impl PinInit<Self>> {
+ let spec = Spec::new(&bar)?;
+ let fw = Firmware::new(pdev.as_ref(), &spec, "535.113.01")?;
+
+ dev_info!(
+ pdev.as_ref(),
+ "NVIDIA (Chipset: {}, Architecture: {:?}, Revision: {})\n",
+ spec.chipset,
+ spec.chipset.arch(),
+ spec.revision
+ );
+
+ Ok(pin_init!(Self { spec, bar, fw }))
+ }
+}
diff --git a/drivers/gpu/nova-core/nova_core.rs b/drivers/gpu/nova-core/nova_core.rs
new file mode 100644
index 000000000000..a91cd924054b
--- /dev/null
+++ b/drivers/gpu/nova-core/nova_core.rs
@@ -0,0 +1,20 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Nova Core GPU Driver
+
+mod driver;
+mod firmware;
+mod gpu;
+mod regs;
+mod util;
+
+kernel::module_pci_driver! {
+ type: driver::NovaCore,
+ name: "NovaCore",
+ author: "Danilo Krummrich",
+ description: "Nova Core GPU driver",
+ license: "GPL v2",
+ firmware: [],
+}
+
+kernel::module_firmware!(firmware::ModInfoBuilder);
diff --git a/drivers/gpu/nova-core/regs.rs b/drivers/gpu/nova-core/regs.rs
new file mode 100644
index 000000000000..50aefb150b0b
--- /dev/null
+++ b/drivers/gpu/nova-core/regs.rs
@@ -0,0 +1,55 @@
+// SPDX-License-Identifier: GPL-2.0
+
+use crate::driver::Bar0;
+
+// TODO
+//
+// Create register definitions via generic macros. See task "Generic register
+// abstraction" in Documentation/gpu/nova/core/todo.rst.
+
+const BOOT0_OFFSET: usize = 0x00000000;
+
+// 3:0 - chipset minor revision
+const BOOT0_MINOR_REV_SHIFT: u8 = 0;
+const BOOT0_MINOR_REV_MASK: u32 = 0x0000000f;
+
+// 7:4 - chipset major revision
+const BOOT0_MAJOR_REV_SHIFT: u8 = 4;
+const BOOT0_MAJOR_REV_MASK: u32 = 0x000000f0;
+
+// 23:20 - chipset implementation Identifier (depends on architecture)
+const BOOT0_IMPL_SHIFT: u8 = 20;
+const BOOT0_IMPL_MASK: u32 = 0x00f00000;
+
+// 28:24 - chipset architecture identifier
+const BOOT0_ARCH_MASK: u32 = 0x1f000000;
+
+// 28:20 - chipset identifier (virtual register field combining BOOT0_IMPL and
+// BOOT0_ARCH)
+const BOOT0_CHIPSET_SHIFT: u8 = BOOT0_IMPL_SHIFT;
+const BOOT0_CHIPSET_MASK: u32 = BOOT0_IMPL_MASK | BOOT0_ARCH_MASK;
+
+#[derive(Copy, Clone)]
+pub(crate) struct Boot0(u32);
+
+impl Boot0 {
+ #[inline]
+ pub(crate) fn read(bar: &Bar0) -> Self {
+ Self(bar.readl(BOOT0_OFFSET))
+ }
+
+ #[inline]
+ pub(crate) fn chipset(&self) -> u32 {
+ (self.0 & BOOT0_CHIPSET_MASK) >> BOOT0_CHIPSET_SHIFT
+ }
+
+ #[inline]
+ pub(crate) fn minor_rev(&self) -> u8 {
+ ((self.0 & BOOT0_MINOR_REV_MASK) >> BOOT0_MINOR_REV_SHIFT) as u8
+ }
+
+ #[inline]
+ pub(crate) fn major_rev(&self) -> u8 {
+ ((self.0 & BOOT0_MAJOR_REV_MASK) >> BOOT0_MAJOR_REV_SHIFT) as u8
+ }
+}
diff --git a/drivers/gpu/nova-core/util.rs b/drivers/gpu/nova-core/util.rs
new file mode 100644
index 000000000000..332a64cfc6a9
--- /dev/null
+++ b/drivers/gpu/nova-core/util.rs
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0
+
+pub(crate) const fn to_lowercase_bytes<const N: usize>(s: &str) -> [u8; N] {
+ let src = s.as_bytes();
+ let mut dst = [0; N];
+ let mut i = 0;
+
+ while i < src.len() && i < N {
+ dst[i] = (src[i] as char).to_ascii_lowercase() as u8;
+ i += 1;
+ }
+
+ dst
+}
+
+pub(crate) const fn const_bytes_to_str(bytes: &[u8]) -> &str {
+ match core::str::from_utf8(bytes) {
+ Ok(string) => string,
+ Err(_) => kernel::build_error!("Bytes are not valid UTF-8."),
+ }
+}
diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
index 49812a76b7ed..d900dd05c335 100644
--- a/drivers/hid/hid-apple.c
+++ b/drivers/hid/hid-apple.c
@@ -378,6 +378,12 @@ static bool apple_is_non_apple_keyboard(struct hid_device *hdev)
return false;
}
+static bool apple_is_omoton_kb066(struct hid_device *hdev)
+{
+ return hdev->product == USB_DEVICE_ID_APPLE_ALU_WIRELESS_ANSI &&
+ strcmp(hdev->name, "Bluetooth Keyboard") == 0;
+}
+
static inline void apple_setup_key_translation(struct input_dev *input,
const struct apple_key_translation *table)
{
@@ -546,9 +552,6 @@ static int hidinput_apple_event(struct hid_device *hid, struct input_dev *input,
}
}
- if (usage->hid == 0xc0301) /* Omoton KB066 quirk */
- code = KEY_F6;
-
if (usage->code != code) {
input_event_with_scancode(input, usage->type, code, usage->hid, value);
@@ -728,7 +731,7 @@ static int apple_input_configured(struct hid_device *hdev,
{
struct apple_sc *asc = hid_get_drvdata(hdev);
- if ((asc->quirks & APPLE_HAS_FN) && !asc->fn_found) {
+ if (((asc->quirks & APPLE_HAS_FN) && !asc->fn_found) || apple_is_omoton_kb066(hdev)) {
hid_info(hdev, "Fn key not found (Apple Wireless Keyboard clone?), disabling Fn key handling\n");
asc->quirks &= ~APPLE_HAS_FN;
}
diff --git a/drivers/hid/hid-appleir.c b/drivers/hid/hid-appleir.c
index 8deded185725..c45e5aa569d2 100644
--- a/drivers/hid/hid-appleir.c
+++ b/drivers/hid/hid-appleir.c
@@ -188,7 +188,7 @@ static int appleir_raw_event(struct hid_device *hid, struct hid_report *report,
static const u8 flatbattery[] = { 0x25, 0x87, 0xe0 };
unsigned long flags;
- if (len != 5)
+ if (len != 5 || !(hid->claimed & HID_CLAIMED_INPUT))
goto out;
if (!memcmp(data, keydown, sizeof(keydown))) {
diff --git a/drivers/hid/hid-corsair-void.c b/drivers/hid/hid-corsair-void.c
index 56e858066c3c..afbd67aa9719 100644
--- a/drivers/hid/hid-corsair-void.c
+++ b/drivers/hid/hid-corsair-void.c
@@ -71,11 +71,9 @@
#include <linux/bitfield.h>
#include <linux/bitops.h>
-#include <linux/cleanup.h>
#include <linux/device.h>
#include <linux/hid.h>
#include <linux/module.h>
-#include <linux/mutex.h>
#include <linux/power_supply.h>
#include <linux/usb.h>
#include <linux/workqueue.h>
@@ -120,6 +118,12 @@ enum {
CORSAIR_VOID_BATTERY_CHARGING = 5,
};
+enum {
+ CORSAIR_VOID_ADD_BATTERY = 0,
+ CORSAIR_VOID_REMOVE_BATTERY = 1,
+ CORSAIR_VOID_UPDATE_BATTERY = 2,
+};
+
static enum power_supply_property corsair_void_battery_props[] = {
POWER_SUPPLY_PROP_STATUS,
POWER_SUPPLY_PROP_PRESENT,
@@ -155,12 +159,12 @@ struct corsair_void_drvdata {
struct power_supply *battery;
struct power_supply_desc battery_desc;
- struct mutex battery_mutex;
struct delayed_work delayed_status_work;
struct delayed_work delayed_firmware_work;
- struct work_struct battery_remove_work;
- struct work_struct battery_add_work;
+
+ unsigned long battery_work_flags;
+ struct work_struct battery_work;
};
/*
@@ -260,11 +264,9 @@ success:
/* Inform power supply if battery values changed */
if (memcmp(&orig_battery_data, battery_data, sizeof(*battery_data))) {
- scoped_guard(mutex, &drvdata->battery_mutex) {
- if (drvdata->battery) {
- power_supply_changed(drvdata->battery);
- }
- }
+ set_bit(CORSAIR_VOID_UPDATE_BATTERY,
+ &drvdata->battery_work_flags);
+ schedule_work(&drvdata->battery_work);
}
}
@@ -536,29 +538,11 @@ static void corsair_void_firmware_work_handler(struct work_struct *work)
}
-static void corsair_void_battery_remove_work_handler(struct work_struct *work)
-{
- struct corsair_void_drvdata *drvdata;
-
- drvdata = container_of(work, struct corsair_void_drvdata,
- battery_remove_work);
- scoped_guard(mutex, &drvdata->battery_mutex) {
- if (drvdata->battery) {
- power_supply_unregister(drvdata->battery);
- drvdata->battery = NULL;
- }
- }
-}
-
-static void corsair_void_battery_add_work_handler(struct work_struct *work)
+static void corsair_void_add_battery(struct corsair_void_drvdata *drvdata)
{
- struct corsair_void_drvdata *drvdata;
struct power_supply_config psy_cfg = {};
struct power_supply *new_supply;
- drvdata = container_of(work, struct corsair_void_drvdata,
- battery_add_work);
- guard(mutex)(&drvdata->battery_mutex);
if (drvdata->battery)
return;
@@ -583,16 +567,42 @@ static void corsair_void_battery_add_work_handler(struct work_struct *work)
drvdata->battery = new_supply;
}
+static void corsair_void_battery_work_handler(struct work_struct *work)
+{
+ struct corsair_void_drvdata *drvdata = container_of(work,
+ struct corsair_void_drvdata, battery_work);
+
+ bool add_battery = test_and_clear_bit(CORSAIR_VOID_ADD_BATTERY,
+ &drvdata->battery_work_flags);
+ bool remove_battery = test_and_clear_bit(CORSAIR_VOID_REMOVE_BATTERY,
+ &drvdata->battery_work_flags);
+ bool update_battery = test_and_clear_bit(CORSAIR_VOID_UPDATE_BATTERY,
+ &drvdata->battery_work_flags);
+
+ if (add_battery && !remove_battery) {
+ corsair_void_add_battery(drvdata);
+ } else if (remove_battery && !add_battery && drvdata->battery) {
+ power_supply_unregister(drvdata->battery);
+ drvdata->battery = NULL;
+ }
+
+ if (update_battery && drvdata->battery)
+ power_supply_changed(drvdata->battery);
+
+}
+
static void corsair_void_headset_connected(struct corsair_void_drvdata *drvdata)
{
- schedule_work(&drvdata->battery_add_work);
+ set_bit(CORSAIR_VOID_ADD_BATTERY, &drvdata->battery_work_flags);
+ schedule_work(&drvdata->battery_work);
schedule_delayed_work(&drvdata->delayed_firmware_work,
msecs_to_jiffies(100));
}
static void corsair_void_headset_disconnected(struct corsair_void_drvdata *drvdata)
{
- schedule_work(&drvdata->battery_remove_work);
+ set_bit(CORSAIR_VOID_REMOVE_BATTERY, &drvdata->battery_work_flags);
+ schedule_work(&drvdata->battery_work);
corsair_void_set_unknown_wireless_data(drvdata);
corsair_void_set_unknown_batt(drvdata);
@@ -678,13 +688,7 @@ static int corsair_void_probe(struct hid_device *hid_dev,
drvdata->battery_desc.get_property = corsair_void_battery_get_property;
drvdata->battery = NULL;
- INIT_WORK(&drvdata->battery_remove_work,
- corsair_void_battery_remove_work_handler);
- INIT_WORK(&drvdata->battery_add_work,
- corsair_void_battery_add_work_handler);
- ret = devm_mutex_init(drvdata->dev, &drvdata->battery_mutex);
- if (ret)
- return ret;
+ INIT_WORK(&drvdata->battery_work, corsair_void_battery_work_handler);
ret = sysfs_create_group(&hid_dev->dev.kobj, &corsair_void_attr_group);
if (ret)
@@ -721,8 +725,7 @@ static void corsair_void_remove(struct hid_device *hid_dev)
struct corsair_void_drvdata *drvdata = hid_get_drvdata(hid_dev);
hid_hw_stop(hid_dev);
- cancel_work_sync(&drvdata->battery_remove_work);
- cancel_work_sync(&drvdata->battery_add_work);
+ cancel_work_sync(&drvdata->battery_work);
if (drvdata->battery)
power_supply_unregister(drvdata->battery);
diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c
index 541d682af15a..8433306148d5 100644
--- a/drivers/hid/hid-debug.c
+++ b/drivers/hid/hid-debug.c
@@ -3450,7 +3450,7 @@ static const char *keys[KEY_MAX + 1] = {
[KEY_MACRO_RECORD_START] = "MacroRecordStart",
[KEY_MACRO_RECORD_STOP] = "MacroRecordStop",
[KEY_MARK_WAYPOINT] = "MarkWayPoint", [KEY_MEDIA_REPEAT] = "MediaRepeat",
- [KEY_MEDIA_TOP_MENU] = "MediaTopMenu", [KEY_MESSENGER] = "Messanger",
+ [KEY_MEDIA_TOP_MENU] = "MediaTopMenu", [KEY_MESSENGER] = "Messenger",
[KEY_NAV_CHART] = "NavChar", [KEY_NAV_INFO] = "NavInfo",
[KEY_NEWS] = "News", [KEY_NEXT_ELEMENT] = "NextElement",
[KEY_NEXT_FAVORITE] = "NextFavorite", [KEY_NOTIFICATION_CENTER] = "NotificationCenter",
diff --git a/drivers/hid/hid-google-hammer.c b/drivers/hid/hid-google-hammer.c
index 0f292b5d3e26..eb6fd2dc75d0 100644
--- a/drivers/hid/hid-google-hammer.c
+++ b/drivers/hid/hid-google-hammer.c
@@ -268,11 +268,13 @@ static void cbas_ec_remove(struct platform_device *pdev)
mutex_unlock(&cbas_ec_reglock);
}
+#ifdef CONFIG_ACPI
static const struct acpi_device_id cbas_ec_acpi_ids[] = {
{ "GOOG000B", 0 },
{ }
};
MODULE_DEVICE_TABLE(acpi, cbas_ec_acpi_ids);
+#endif
#ifdef CONFIG_OF
static const struct of_device_id cbas_ec_of_match[] = {
diff --git a/drivers/hid/hid-nintendo.c b/drivers/hid/hid-nintendo.c
index 11ac246176ae..839d5bcd72b1 100644
--- a/drivers/hid/hid-nintendo.c
+++ b/drivers/hid/hid-nintendo.c
@@ -457,13 +457,13 @@ static const struct joycon_ctlr_button_mapping snescon_button_mappings[] = {
};
static const struct joycon_ctlr_button_mapping gencon_button_mappings[] = {
- { BTN_A, JC_BTN_A, },
- { BTN_B, JC_BTN_B, },
- { BTN_C, JC_BTN_R, },
- { BTN_X, JC_BTN_X, }, /* MD/GEN 6B Only */
- { BTN_Y, JC_BTN_Y, }, /* MD/GEN 6B Only */
- { BTN_Z, JC_BTN_L, }, /* MD/GEN 6B Only */
- { BTN_SELECT, JC_BTN_ZR, },
+ { BTN_WEST, JC_BTN_A, }, /* A */
+ { BTN_SOUTH, JC_BTN_B, }, /* B */
+ { BTN_EAST, JC_BTN_R, }, /* C */
+ { BTN_TL, JC_BTN_X, }, /* X MD/GEN 6B Only */
+ { BTN_NORTH, JC_BTN_Y, }, /* Y MD/GEN 6B Only */
+ { BTN_TR, JC_BTN_L, }, /* Z MD/GEN 6B Only */
+ { BTN_SELECT, JC_BTN_ZR, }, /* Mode */
{ BTN_START, JC_BTN_PLUS, },
{ BTN_MODE, JC_BTN_HOME, },
{ BTN_Z, JC_BTN_CAP, },
diff --git a/drivers/hid/hid-steam.c b/drivers/hid/hid-steam.c
index c9e65e9088b3..10460b7bde1a 100644
--- a/drivers/hid/hid-steam.c
+++ b/drivers/hid/hid-steam.c
@@ -1327,11 +1327,11 @@ static void steam_remove(struct hid_device *hdev)
return;
}
+ hid_destroy_device(steam->client_hdev);
cancel_delayed_work_sync(&steam->mode_switch);
cancel_work_sync(&steam->work_connect);
cancel_work_sync(&steam->rumble_work);
cancel_work_sync(&steam->unregister_work);
- hid_destroy_device(steam->client_hdev);
steam->client_hdev = NULL;
steam->client_opened = 0;
if (steam->quirks & STEAM_QUIRK_WIRELESS) {
diff --git a/drivers/hid/i2c-hid/i2c-hid-core.c b/drivers/hid/i2c-hid/i2c-hid-core.c
index 75544448c239..d3912e3f2f13 100644
--- a/drivers/hid/i2c-hid/i2c-hid-core.c
+++ b/drivers/hid/i2c-hid/i2c-hid-core.c
@@ -290,7 +290,7 @@ static int i2c_hid_get_report(struct i2c_hid *ihid,
ihid->rawbuf, recv_len + sizeof(__le16));
if (error) {
dev_err(&ihid->client->dev,
- "failed to set a report to device: %d\n", error);
+ "failed to get a report from device: %d\n", error);
return error;
}
diff --git a/drivers/hid/intel-ish-hid/ishtp-hid-client.c b/drivers/hid/intel-ish-hid/ishtp-hid-client.c
index cb04cd1d980b..6550ad5bfbb5 100644
--- a/drivers/hid/intel-ish-hid/ishtp-hid-client.c
+++ b/drivers/hid/intel-ish-hid/ishtp-hid-client.c
@@ -832,9 +832,9 @@ static void hid_ishtp_cl_remove(struct ishtp_cl_device *cl_device)
hid_ishtp_cl);
dev_dbg(ishtp_device(cl_device), "%s\n", __func__);
- hid_ishtp_cl_deinit(hid_ishtp_cl);
ishtp_put_device(cl_device);
ishtp_hid_remove(client_data);
+ hid_ishtp_cl_deinit(hid_ishtp_cl);
hid_ishtp_cl = NULL;
diff --git a/drivers/hid/intel-ish-hid/ishtp-hid.c b/drivers/hid/intel-ish-hid/ishtp-hid.c
index 00c6f0ebf356..be2c62fc8251 100644
--- a/drivers/hid/intel-ish-hid/ishtp-hid.c
+++ b/drivers/hid/intel-ish-hid/ishtp-hid.c
@@ -261,12 +261,14 @@ err_hid_data:
*/
void ishtp_hid_remove(struct ishtp_cl_data *client_data)
{
+ void *data;
int i;
for (i = 0; i < client_data->num_hid_devices; ++i) {
if (client_data->hid_sensor_hubs[i]) {
- kfree(client_data->hid_sensor_hubs[i]->driver_data);
+ data = client_data->hid_sensor_hubs[i]->driver_data;
hid_destroy_device(client_data->hid_sensor_hubs[i]);
+ kfree(data);
client_data->hid_sensor_hubs[i] = NULL;
}
}
diff --git a/drivers/hid/intel-thc-hid/intel-quickspi/pci-quickspi.c b/drivers/hid/intel-thc-hid/intel-quickspi/pci-quickspi.c
index 4641e818dfa4..6b2c7620be2b 100644
--- a/drivers/hid/intel-thc-hid/intel-quickspi/pci-quickspi.c
+++ b/drivers/hid/intel-thc-hid/intel-quickspi/pci-quickspi.c
@@ -909,6 +909,8 @@ static int quickspi_restore(struct device *device)
thc_change_ltr_mode(qsdev->thc_hw, THC_LTR_MODE_ACTIVE);
+ qsdev->state = QUICKSPI_ENABLED;
+
return 0;
}
diff --git a/drivers/hid/intel-thc-hid/intel-quickspi/quickspi-protocol.c b/drivers/hid/intel-thc-hid/intel-quickspi/quickspi-protocol.c
index 7373238ceb18..918050af73e5 100644
--- a/drivers/hid/intel-thc-hid/intel-quickspi/quickspi-protocol.c
+++ b/drivers/hid/intel-thc-hid/intel-quickspi/quickspi-protocol.c
@@ -107,7 +107,7 @@ static int quickspi_get_device_descriptor(struct quickspi_device *qsdev)
return 0;
}
- dev_err_once(qsdev->dev, "Unexpected intput report type: %d\n", input_rep_type);
+ dev_err_once(qsdev->dev, "Unexpected input report type: %d\n", input_rep_type);
return -EINVAL;
}
diff --git a/drivers/hwmon/ad7314.c b/drivers/hwmon/ad7314.c
index 7802bbf5f958..59424103f634 100644
--- a/drivers/hwmon/ad7314.c
+++ b/drivers/hwmon/ad7314.c
@@ -22,11 +22,13 @@
*/
#define AD7314_TEMP_MASK 0x7FE0
#define AD7314_TEMP_SHIFT 5
+#define AD7314_LEADING_ZEROS_MASK BIT(15)
/*
* ADT7301 and ADT7302 temperature masks
*/
#define ADT7301_TEMP_MASK 0x3FFF
+#define ADT7301_LEADING_ZEROS_MASK (BIT(15) | BIT(14))
enum ad7314_variant {
adt7301,
@@ -65,12 +67,20 @@ static ssize_t ad7314_temperature_show(struct device *dev,
return ret;
switch (spi_get_device_id(chip->spi_dev)->driver_data) {
case ad7314:
+ if (ret & AD7314_LEADING_ZEROS_MASK) {
+ /* Invalid read-out, leading zero part is missing */
+ return -EIO;
+ }
data = (ret & AD7314_TEMP_MASK) >> AD7314_TEMP_SHIFT;
data = sign_extend32(data, 9);
return sprintf(buf, "%d\n", 250 * data);
case adt7301:
case adt7302:
+ if (ret & ADT7301_LEADING_ZEROS_MASK) {
+ /* Invalid read-out, leading zero part is missing */
+ return -EIO;
+ }
/*
* Documented as a 13 bit twos complement register
* with a sign bit - which is a 14 bit 2's complement
diff --git a/drivers/hwmon/ntc_thermistor.c b/drivers/hwmon/ntc_thermistor.c
index b5352900463f..0d29c8f97ba7 100644
--- a/drivers/hwmon/ntc_thermistor.c
+++ b/drivers/hwmon/ntc_thermistor.c
@@ -181,40 +181,40 @@ static const struct ntc_compensation ncpXXwf104[] = {
};
static const struct ntc_compensation ncpXXxh103[] = {
- { .temp_c = -40, .ohm = 247565 },
- { .temp_c = -35, .ohm = 181742 },
- { .temp_c = -30, .ohm = 135128 },
- { .temp_c = -25, .ohm = 101678 },
- { .temp_c = -20, .ohm = 77373 },
- { .temp_c = -15, .ohm = 59504 },
- { .temp_c = -10, .ohm = 46222 },
- { .temp_c = -5, .ohm = 36244 },
- { .temp_c = 0, .ohm = 28674 },
- { .temp_c = 5, .ohm = 22878 },
- { .temp_c = 10, .ohm = 18399 },
- { .temp_c = 15, .ohm = 14910 },
- { .temp_c = 20, .ohm = 12169 },
+ { .temp_c = -40, .ohm = 195652 },
+ { .temp_c = -35, .ohm = 148171 },
+ { .temp_c = -30, .ohm = 113347 },
+ { .temp_c = -25, .ohm = 87559 },
+ { .temp_c = -20, .ohm = 68237 },
+ { .temp_c = -15, .ohm = 53650 },
+ { .temp_c = -10, .ohm = 42506 },
+ { .temp_c = -5, .ohm = 33892 },
+ { .temp_c = 0, .ohm = 27219 },
+ { .temp_c = 5, .ohm = 22021 },
+ { .temp_c = 10, .ohm = 17926 },
+ { .temp_c = 15, .ohm = 14674 },
+ { .temp_c = 20, .ohm = 12081 },
{ .temp_c = 25, .ohm = 10000 },
- { .temp_c = 30, .ohm = 8271 },
- { .temp_c = 35, .ohm = 6883 },
- { .temp_c = 40, .ohm = 5762 },
- { .temp_c = 45, .ohm = 4851 },
- { .temp_c = 50, .ohm = 4105 },
- { .temp_c = 55, .ohm = 3492 },
- { .temp_c = 60, .ohm = 2985 },
- { .temp_c = 65, .ohm = 2563 },
- { .temp_c = 70, .ohm = 2211 },
- { .temp_c = 75, .ohm = 1915 },
- { .temp_c = 80, .ohm = 1666 },
- { .temp_c = 85, .ohm = 1454 },
- { .temp_c = 90, .ohm = 1275 },
- { .temp_c = 95, .ohm = 1121 },
- { .temp_c = 100, .ohm = 990 },
- { .temp_c = 105, .ohm = 876 },
- { .temp_c = 110, .ohm = 779 },
- { .temp_c = 115, .ohm = 694 },
- { .temp_c = 120, .ohm = 620 },
- { .temp_c = 125, .ohm = 556 },
+ { .temp_c = 30, .ohm = 8315 },
+ { .temp_c = 35, .ohm = 6948 },
+ { .temp_c = 40, .ohm = 5834 },
+ { .temp_c = 45, .ohm = 4917 },
+ { .temp_c = 50, .ohm = 4161 },
+ { .temp_c = 55, .ohm = 3535 },
+ { .temp_c = 60, .ohm = 3014 },
+ { .temp_c = 65, .ohm = 2586 },
+ { .temp_c = 70, .ohm = 2228 },
+ { .temp_c = 75, .ohm = 1925 },
+ { .temp_c = 80, .ohm = 1669 },
+ { .temp_c = 85, .ohm = 1452 },
+ { .temp_c = 90, .ohm = 1268 },
+ { .temp_c = 95, .ohm = 1110 },
+ { .temp_c = 100, .ohm = 974 },
+ { .temp_c = 105, .ohm = 858 },
+ { .temp_c = 110, .ohm = 758 },
+ { .temp_c = 115, .ohm = 672 },
+ { .temp_c = 120, .ohm = 596 },
+ { .temp_c = 125, .ohm = 531 },
};
/*
diff --git a/drivers/hwmon/peci/dimmtemp.c b/drivers/hwmon/peci/dimmtemp.c
index d6762259dd69..fbe82d9852e0 100644
--- a/drivers/hwmon/peci/dimmtemp.c
+++ b/drivers/hwmon/peci/dimmtemp.c
@@ -127,8 +127,6 @@ static int update_thresholds(struct peci_dimmtemp *priv, int dimm_no)
return 0;
ret = priv->gen_info->read_thresholds(priv, dimm_order, chan_rank, &data);
- if (ret == -ENODATA) /* Use default or previous value */
- return 0;
if (ret)
return ret;
@@ -509,11 +507,11 @@ read_thresholds_icx(struct peci_dimmtemp *priv, int dimm_order, int chan_rank, u
ret = peci_ep_pci_local_read(priv->peci_dev, 0, 13, 0, 2, 0xd4, &reg_val);
if (ret || !(reg_val & BIT(31)))
- return -ENODATA; /* Use default or previous value */
+ return -ENODATA;
ret = peci_ep_pci_local_read(priv->peci_dev, 0, 13, 0, 2, 0xd0, &reg_val);
if (ret)
- return -ENODATA; /* Use default or previous value */
+ return -ENODATA;
/*
* Device 26, Offset 224e0: IMC 0 channel 0 -> rank 0
@@ -546,11 +544,11 @@ read_thresholds_spr(struct peci_dimmtemp *priv, int dimm_order, int chan_rank, u
ret = peci_ep_pci_local_read(priv->peci_dev, 0, 30, 0, 2, 0xd4, &reg_val);
if (ret || !(reg_val & BIT(31)))
- return -ENODATA; /* Use default or previous value */
+ return -ENODATA;
ret = peci_ep_pci_local_read(priv->peci_dev, 0, 30, 0, 2, 0xd0, &reg_val);
if (ret)
- return -ENODATA; /* Use default or previous value */
+ return -ENODATA;
/*
* Device 26, Offset 219a8: IMC 0 channel 0 -> rank 0
diff --git a/drivers/hwmon/pmbus/pmbus.c b/drivers/hwmon/pmbus/pmbus.c
index 77cf268e7d2d..920cd5408141 100644
--- a/drivers/hwmon/pmbus/pmbus.c
+++ b/drivers/hwmon/pmbus/pmbus.c
@@ -103,6 +103,8 @@ static int pmbus_identify(struct i2c_client *client,
if (pmbus_check_byte_register(client, 0, PMBUS_PAGE)) {
int page;
+ info->pages = PMBUS_PAGES;
+
for (page = 1; page < PMBUS_PAGES; page++) {
if (pmbus_set_page(client, page, 0xff) < 0)
break;
diff --git a/drivers/hwmon/xgene-hwmon.c b/drivers/hwmon/xgene-hwmon.c
index 1e3bd129a922..7087197383c9 100644
--- a/drivers/hwmon/xgene-hwmon.c
+++ b/drivers/hwmon/xgene-hwmon.c
@@ -706,7 +706,7 @@ static int xgene_hwmon_probe(struct platform_device *pdev)
goto out;
}
- if (!ctx->pcc_comm_addr) {
+ if (IS_ERR_OR_NULL(ctx->pcc_comm_addr)) {
dev_err(&pdev->dev,
"Failed to ioremap PCC comm region\n");
rc = -ENOMEM;
diff --git a/drivers/hwtracing/intel_th/msu.c b/drivers/hwtracing/intel_th/msu.c
index 66123d684ac9..bf99d79a4192 100644
--- a/drivers/hwtracing/intel_th/msu.c
+++ b/drivers/hwtracing/intel_th/msu.c
@@ -105,23 +105,32 @@ struct msc_iter {
/**
* struct msc - MSC device representation
- * @reg_base: register window base address
+ * @reg_base: register window base address for the entire MSU
+ * @msu_base: register window base address for this MSC
* @thdev: intel_th_device pointer
* @mbuf: MSU buffer, if assigned
- * @mbuf_priv MSU buffer's private data, if @mbuf
+ * @mbuf_priv: MSU buffer's private data, if @mbuf
+ * @work: a work to stop the trace when the buffer is full
* @win_list: list of windows in multiblock mode
* @single_sgt: single mode buffer
* @cur_win: current window
+ * @switch_on_unlock: window to switch to when it becomes available
* @nr_pages: total number of pages allocated for this buffer
* @single_sz: amount of data in single mode
* @single_wrap: single mode wrap occurred
* @base: buffer's base pointer
* @base_addr: buffer's base address
+ * @orig_addr: MSC0 buffer's base address
+ * @orig_sz: MSC0 buffer's size
* @user_count: number of users of the buffer
* @mmap_count: number of mappings
* @buf_mutex: mutex to serialize access to buffer-related bits
+ * @iter_list: list of open file descriptor iterators
+ * @stop_on_full: stop the trace if the current window is full
* @enabled: MSC is enabled
* @wrap: wrapping is enabled
+ * @do_irq: IRQ resource is available, handle interrupts
+ * @multi_is_broken: multiblock mode enabled (not disabled by PCI drvdata)
* @mode: MSC operating mode
* @burst_len: write burst length
* @index: number of this MSC in the MSU
diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c
index e9d8d28e055f..e3def163d5cf 100644
--- a/drivers/hwtracing/intel_th/pci.c
+++ b/drivers/hwtracing/intel_th/pci.c
@@ -335,6 +335,21 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
.driver_data = (kernel_ulong_t)&intel_th_2x,
},
{
+ /* Arrow Lake */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7724),
+ .driver_data = (kernel_ulong_t)&intel_th_2x,
+ },
+ {
+ /* Panther Lake-H */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe324),
+ .driver_data = (kernel_ulong_t)&intel_th_2x,
+ },
+ {
+ /* Panther Lake-P/U */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe424),
+ .driver_data = (kernel_ulong_t)&intel_th_2x,
+ },
+ {
/* Alder Lake CPU */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x466f),
.driver_data = (kernel_ulong_t)&intel_th_2x,
diff --git a/drivers/i2c/busses/i2c-amd-asf-plat.c b/drivers/i2c/busses/i2c-amd-asf-plat.c
index 7512614bf4b7..93ebec162c6d 100644
--- a/drivers/i2c/busses/i2c-amd-asf-plat.c
+++ b/drivers/i2c/busses/i2c-amd-asf-plat.c
@@ -293,6 +293,7 @@ static irqreturn_t amd_asf_irq_handler(int irq, void *ptr)
amd_asf_update_ioport_target(piix4_smba, ASF_SLV_INTR, SMBHSTSTS, true);
}
+ iowrite32(irq, dev->eoi_base);
return IRQ_HANDLED;
}
diff --git a/drivers/i2c/busses/i2c-ls2x.c b/drivers/i2c/busses/i2c-ls2x.c
index 8821cac3897b..b475dd27b7af 100644
--- a/drivers/i2c/busses/i2c-ls2x.c
+++ b/drivers/i2c/busses/i2c-ls2x.c
@@ -10,6 +10,7 @@
* Rewritten for mainline by Binbin Zhou <zhoubinbin@loongson.cn>
*/
+#include <linux/bitfield.h>
#include <linux/bits.h>
#include <linux/completion.h>
#include <linux/device.h>
@@ -26,7 +27,8 @@
#include <linux/units.h>
/* I2C Registers */
-#define I2C_LS2X_PRER 0x0 /* Freq Division Register(16 bits) */
+#define I2C_LS2X_PRER_LO 0x0 /* Freq Division Low Byte Register */
+#define I2C_LS2X_PRER_HI 0x1 /* Freq Division High Byte Register */
#define I2C_LS2X_CTR 0x2 /* Control Register */
#define I2C_LS2X_TXR 0x3 /* Transport Data Register */
#define I2C_LS2X_RXR 0x3 /* Receive Data Register */
@@ -93,6 +95,7 @@ static irqreturn_t ls2x_i2c_isr(int this_irq, void *dev_id)
*/
static void ls2x_i2c_adjust_bus_speed(struct ls2x_i2c_priv *priv)
{
+ u16 val;
struct i2c_timings *t = &priv->i2c_t;
struct device *dev = priv->adapter.dev.parent;
u32 acpi_speed = i2c_acpi_find_bus_speed(dev);
@@ -104,9 +107,14 @@ static void ls2x_i2c_adjust_bus_speed(struct ls2x_i2c_priv *priv)
else
t->bus_freq_hz = LS2X_I2C_FREQ_STD;
- /* Calculate and set i2c frequency. */
- writew(LS2X_I2C_PCLK_FREQ / (5 * t->bus_freq_hz) - 1,
- priv->base + I2C_LS2X_PRER);
+ /*
+ * According to the chip manual, we can only access the registers as bytes,
+ * otherwise the high bits will be truncated.
+ * So set the I2C frequency with a sequential writeb() instead of writew().
+ */
+ val = LS2X_I2C_PCLK_FREQ / (5 * t->bus_freq_hz) - 1;
+ writeb(FIELD_GET(GENMASK(7, 0), val), priv->base + I2C_LS2X_PRER_LO);
+ writeb(FIELD_GET(GENMASK(15, 8), val), priv->base + I2C_LS2X_PRER_HI);
}
static void ls2x_i2c_init(struct ls2x_i2c_priv *priv)
diff --git a/drivers/i2c/busses/i2c-npcm7xx.c b/drivers/i2c/busses/i2c-npcm7xx.c
index 3ca08b8ef8af..de713b5747fe 100644
--- a/drivers/i2c/busses/i2c-npcm7xx.c
+++ b/drivers/i2c/busses/i2c-npcm7xx.c
@@ -2554,6 +2554,13 @@ static int npcm_i2c_probe_bus(struct platform_device *pdev)
if (irq < 0)
return irq;
+ /*
+ * Disable the interrupt to avoid the interrupt handler being triggered
+ * incorrectly by the asynchronous interrupt status since the machine
+ * might do a warm reset during the last smbus/i2c transfer session.
+ */
+ npcm_i2c_int_enable(bus, false);
+
ret = devm_request_irq(bus->dev, irq, npcm_i2c_bus_irq, 0,
dev_name(bus->dev), bus);
if (ret)
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index 118fe1d37c22..0fdb1d1316c4 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -56,6 +56,7 @@
#include <asm/intel-family.h>
#include <asm/mwait.h>
#include <asm/spec-ctrl.h>
+#include <asm/tsc.h>
#include <asm/fpu/api.h>
#define INTEL_IDLE_VERSION "0.5.1"
@@ -1799,6 +1800,9 @@ static void __init intel_idle_init_cstates_acpi(struct cpuidle_driver *drv)
if (intel_idle_state_needs_timer_stop(state))
state->flags |= CPUIDLE_FLAG_TIMER_STOP;
+ if (cx->type > ACPI_STATE_C1 && !boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
+ mark_tsc_unstable("TSC halts in idle");
+
state->enter = intel_idle;
state->enter_s2idle = intel_idle_s2idle;
}
diff --git a/drivers/iio/adc/ad7192.c b/drivers/iio/adc/ad7192.c
index e96a5ae92375..cfaf8f7e0a07 100644
--- a/drivers/iio/adc/ad7192.c
+++ b/drivers/iio/adc/ad7192.c
@@ -1084,7 +1084,7 @@ static int ad7192_update_scan_mode(struct iio_dev *indio_dev, const unsigned lon
conf &= ~AD7192_CONF_CHAN_MASK;
for_each_set_bit(i, scan_mask, 8)
- conf |= FIELD_PREP(AD7192_CONF_CHAN_MASK, i);
+ conf |= FIELD_PREP(AD7192_CONF_CHAN_MASK, BIT(i));
ret = ad_sd_write_reg(&st->sd, AD7192_REG_CONF, 3, conf);
if (ret < 0)
diff --git a/drivers/iio/adc/ad7606.c b/drivers/iio/adc/ad7606.c
index d8e3c7a43678..d39354afd539 100644
--- a/drivers/iio/adc/ad7606.c
+++ b/drivers/iio/adc/ad7606.c
@@ -1047,7 +1047,7 @@ static int ad7606_read_avail(struct iio_dev *indio_dev,
cs = &st->chan_scales[ch];
*vals = (int *)cs->scale_avail;
- *length = cs->num_scales;
+ *length = cs->num_scales * 2;
*type = IIO_VAL_INT_PLUS_MICRO;
return IIO_AVAIL_LIST;
diff --git a/drivers/iio/adc/at91-sama5d2_adc.c b/drivers/iio/adc/at91-sama5d2_adc.c
index 8e5aaf15a921..c3a1dea2aa82 100644
--- a/drivers/iio/adc/at91-sama5d2_adc.c
+++ b/drivers/iio/adc/at91-sama5d2_adc.c
@@ -329,7 +329,7 @@ static const struct at91_adc_reg_layout sama7g5_layout = {
#define AT91_HWFIFO_MAX_SIZE_STR "128"
#define AT91_HWFIFO_MAX_SIZE 128
-#define AT91_SAMA5D2_CHAN_SINGLE(index, num, addr) \
+#define AT91_SAMA_CHAN_SINGLE(index, num, addr, rbits) \
{ \
.type = IIO_VOLTAGE, \
.channel = num, \
@@ -337,7 +337,7 @@ static const struct at91_adc_reg_layout sama7g5_layout = {
.scan_index = index, \
.scan_type = { \
.sign = 'u', \
- .realbits = 14, \
+ .realbits = rbits, \
.storagebits = 16, \
}, \
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
@@ -350,7 +350,13 @@ static const struct at91_adc_reg_layout sama7g5_layout = {
.indexed = 1, \
}
-#define AT91_SAMA5D2_CHAN_DIFF(index, num, num2, addr) \
+#define AT91_SAMA5D2_CHAN_SINGLE(index, num, addr) \
+ AT91_SAMA_CHAN_SINGLE(index, num, addr, 14)
+
+#define AT91_SAMA7G5_CHAN_SINGLE(index, num, addr) \
+ AT91_SAMA_CHAN_SINGLE(index, num, addr, 16)
+
+#define AT91_SAMA_CHAN_DIFF(index, num, num2, addr, rbits) \
{ \
.type = IIO_VOLTAGE, \
.differential = 1, \
@@ -360,7 +366,7 @@ static const struct at91_adc_reg_layout sama7g5_layout = {
.scan_index = index, \
.scan_type = { \
.sign = 's', \
- .realbits = 14, \
+ .realbits = rbits, \
.storagebits = 16, \
}, \
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
@@ -373,6 +379,12 @@ static const struct at91_adc_reg_layout sama7g5_layout = {
.indexed = 1, \
}
+#define AT91_SAMA5D2_CHAN_DIFF(index, num, num2, addr) \
+ AT91_SAMA_CHAN_DIFF(index, num, num2, addr, 14)
+
+#define AT91_SAMA7G5_CHAN_DIFF(index, num, num2, addr) \
+ AT91_SAMA_CHAN_DIFF(index, num, num2, addr, 16)
+
#define AT91_SAMA5D2_CHAN_TOUCH(num, name, mod) \
{ \
.type = IIO_POSITIONRELATIVE, \
@@ -666,30 +678,30 @@ static const struct iio_chan_spec at91_sama5d2_adc_channels[] = {
};
static const struct iio_chan_spec at91_sama7g5_adc_channels[] = {
- AT91_SAMA5D2_CHAN_SINGLE(0, 0, 0x60),
- AT91_SAMA5D2_CHAN_SINGLE(1, 1, 0x64),
- AT91_SAMA5D2_CHAN_SINGLE(2, 2, 0x68),
- AT91_SAMA5D2_CHAN_SINGLE(3, 3, 0x6c),
- AT91_SAMA5D2_CHAN_SINGLE(4, 4, 0x70),
- AT91_SAMA5D2_CHAN_SINGLE(5, 5, 0x74),
- AT91_SAMA5D2_CHAN_SINGLE(6, 6, 0x78),
- AT91_SAMA5D2_CHAN_SINGLE(7, 7, 0x7c),
- AT91_SAMA5D2_CHAN_SINGLE(8, 8, 0x80),
- AT91_SAMA5D2_CHAN_SINGLE(9, 9, 0x84),
- AT91_SAMA5D2_CHAN_SINGLE(10, 10, 0x88),
- AT91_SAMA5D2_CHAN_SINGLE(11, 11, 0x8c),
- AT91_SAMA5D2_CHAN_SINGLE(12, 12, 0x90),
- AT91_SAMA5D2_CHAN_SINGLE(13, 13, 0x94),
- AT91_SAMA5D2_CHAN_SINGLE(14, 14, 0x98),
- AT91_SAMA5D2_CHAN_SINGLE(15, 15, 0x9c),
- AT91_SAMA5D2_CHAN_DIFF(16, 0, 1, 0x60),
- AT91_SAMA5D2_CHAN_DIFF(17, 2, 3, 0x68),
- AT91_SAMA5D2_CHAN_DIFF(18, 4, 5, 0x70),
- AT91_SAMA5D2_CHAN_DIFF(19, 6, 7, 0x78),
- AT91_SAMA5D2_CHAN_DIFF(20, 8, 9, 0x80),
- AT91_SAMA5D2_CHAN_DIFF(21, 10, 11, 0x88),
- AT91_SAMA5D2_CHAN_DIFF(22, 12, 13, 0x90),
- AT91_SAMA5D2_CHAN_DIFF(23, 14, 15, 0x98),
+ AT91_SAMA7G5_CHAN_SINGLE(0, 0, 0x60),
+ AT91_SAMA7G5_CHAN_SINGLE(1, 1, 0x64),
+ AT91_SAMA7G5_CHAN_SINGLE(2, 2, 0x68),
+ AT91_SAMA7G5_CHAN_SINGLE(3, 3, 0x6c),
+ AT91_SAMA7G5_CHAN_SINGLE(4, 4, 0x70),
+ AT91_SAMA7G5_CHAN_SINGLE(5, 5, 0x74),
+ AT91_SAMA7G5_CHAN_SINGLE(6, 6, 0x78),
+ AT91_SAMA7G5_CHAN_SINGLE(7, 7, 0x7c),
+ AT91_SAMA7G5_CHAN_SINGLE(8, 8, 0x80),
+ AT91_SAMA7G5_CHAN_SINGLE(9, 9, 0x84),
+ AT91_SAMA7G5_CHAN_SINGLE(10, 10, 0x88),
+ AT91_SAMA7G5_CHAN_SINGLE(11, 11, 0x8c),
+ AT91_SAMA7G5_CHAN_SINGLE(12, 12, 0x90),
+ AT91_SAMA7G5_CHAN_SINGLE(13, 13, 0x94),
+ AT91_SAMA7G5_CHAN_SINGLE(14, 14, 0x98),
+ AT91_SAMA7G5_CHAN_SINGLE(15, 15, 0x9c),
+ AT91_SAMA7G5_CHAN_DIFF(16, 0, 1, 0x60),
+ AT91_SAMA7G5_CHAN_DIFF(17, 2, 3, 0x68),
+ AT91_SAMA7G5_CHAN_DIFF(18, 4, 5, 0x70),
+ AT91_SAMA7G5_CHAN_DIFF(19, 6, 7, 0x78),
+ AT91_SAMA7G5_CHAN_DIFF(20, 8, 9, 0x80),
+ AT91_SAMA7G5_CHAN_DIFF(21, 10, 11, 0x88),
+ AT91_SAMA7G5_CHAN_DIFF(22, 12, 13, 0x90),
+ AT91_SAMA7G5_CHAN_DIFF(23, 14, 15, 0x98),
IIO_CHAN_SOFT_TIMESTAMP(24),
AT91_SAMA5D2_CHAN_TEMP(AT91_SAMA7G5_ADC_TEMP_CHANNEL, "temp", 0xdc),
};
diff --git a/drivers/iio/adc/pac1921.c b/drivers/iio/adc/pac1921.c
index 90f61c47b1c4..63f518215156 100644
--- a/drivers/iio/adc/pac1921.c
+++ b/drivers/iio/adc/pac1921.c
@@ -1198,11 +1198,11 @@ static int pac1921_match_acpi_device(struct iio_dev *indio_dev)
label = devm_kstrdup(dev, status->package.elements[0].string.pointer,
GFP_KERNEL);
+ ACPI_FREE(status);
if (!label)
return -ENOMEM;
indio_dev->label = label;
- ACPI_FREE(status);
return 0;
}
diff --git a/drivers/iio/dac/ad3552r.c b/drivers/iio/dac/ad3552r.c
index e7206af53af6..7944f5c1d264 100644
--- a/drivers/iio/dac/ad3552r.c
+++ b/drivers/iio/dac/ad3552r.c
@@ -410,6 +410,12 @@ static int ad3552r_reset(struct ad3552r_desc *dac)
return ret;
}
+ /* Clear reset error flag, see ad3552r manual, rev B table 38. */
+ ret = ad3552r_write_reg(dac, AD3552R_REG_ADDR_ERR_STATUS,
+ AD3552R_MASK_RESET_STATUS);
+ if (ret)
+ return ret;
+
return ad3552r_update_reg_field(dac,
AD3552R_REG_ADDR_INTERFACE_CONFIG_A,
AD3552R_MASK_ADDR_ASCENSION,
diff --git a/drivers/iio/filter/admv8818.c b/drivers/iio/filter/admv8818.c
index 848baa6e3bbf..d85b7d3de866 100644
--- a/drivers/iio/filter/admv8818.c
+++ b/drivers/iio/filter/admv8818.c
@@ -574,21 +574,15 @@ static int admv8818_init(struct admv8818_state *st)
struct spi_device *spi = st->spi;
unsigned int chip_id;
- ret = regmap_update_bits(st->regmap, ADMV8818_REG_SPI_CONFIG_A,
- ADMV8818_SOFTRESET_N_MSK |
- ADMV8818_SOFTRESET_MSK,
- FIELD_PREP(ADMV8818_SOFTRESET_N_MSK, 1) |
- FIELD_PREP(ADMV8818_SOFTRESET_MSK, 1));
+ ret = regmap_write(st->regmap, ADMV8818_REG_SPI_CONFIG_A,
+ ADMV8818_SOFTRESET_N_MSK | ADMV8818_SOFTRESET_MSK);
if (ret) {
dev_err(&spi->dev, "ADMV8818 Soft Reset failed.\n");
return ret;
}
- ret = regmap_update_bits(st->regmap, ADMV8818_REG_SPI_CONFIG_A,
- ADMV8818_SDOACTIVE_N_MSK |
- ADMV8818_SDOACTIVE_MSK,
- FIELD_PREP(ADMV8818_SDOACTIVE_N_MSK, 1) |
- FIELD_PREP(ADMV8818_SDOACTIVE_MSK, 1));
+ ret = regmap_write(st->regmap, ADMV8818_REG_SPI_CONFIG_A,
+ ADMV8818_SDOACTIVE_N_MSK | ADMV8818_SDOACTIVE_MSK);
if (ret) {
dev_err(&spi->dev, "ADMV8818 SDO Enable failed.\n");
return ret;
diff --git a/drivers/iio/light/apds9306.c b/drivers/iio/light/apds9306.c
index 69a0d609cffc..5ed7e17f49e7 100644
--- a/drivers/iio/light/apds9306.c
+++ b/drivers/iio/light/apds9306.c
@@ -108,11 +108,11 @@ static const struct part_id_gts_multiplier apds9306_gts_mul[] = {
{
.part_id = 0xB1,
.max_scale_int = 16,
- .max_scale_nano = 3264320,
+ .max_scale_nano = 326432000,
}, {
.part_id = 0xB3,
.max_scale_int = 14,
- .max_scale_nano = 9712000,
+ .max_scale_nano = 97120000,
},
};
diff --git a/drivers/iio/light/hid-sensor-prox.c b/drivers/iio/light/hid-sensor-prox.c
index 7ab64f5c623c..76b76d12b388 100644
--- a/drivers/iio/light/hid-sensor-prox.c
+++ b/drivers/iio/light/hid-sensor-prox.c
@@ -49,9 +49,10 @@ static const u32 prox_sensitivity_addresses[] = {
#define PROX_CHANNEL(_is_proximity, _channel) \
{\
.type = _is_proximity ? IIO_PROXIMITY : IIO_ATTENTION,\
- .info_mask_separate = _is_proximity ? BIT(IIO_CHAN_INFO_RAW) :\
- BIT(IIO_CHAN_INFO_PROCESSED),\
- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_OFFSET) |\
+ .info_mask_separate = \
+ (_is_proximity ? BIT(IIO_CHAN_INFO_RAW) :\
+ BIT(IIO_CHAN_INFO_PROCESSED)) |\
+ BIT(IIO_CHAN_INFO_OFFSET) |\
BIT(IIO_CHAN_INFO_SCALE) |\
BIT(IIO_CHAN_INFO_SAMP_FREQ) |\
BIT(IIO_CHAN_INFO_HYSTERESIS),\
diff --git a/drivers/iio/proximity/hx9023s.c b/drivers/iio/proximity/hx9023s.c
index e092a935dbac..5aa8e5a22f32 100644
--- a/drivers/iio/proximity/hx9023s.c
+++ b/drivers/iio/proximity/hx9023s.c
@@ -1036,12 +1036,13 @@ static int hx9023s_send_cfg(const struct firmware *fw, struct hx9023s_data *data
return -ENOMEM;
memcpy(bin->data, fw->data, fw->size);
- release_firmware(fw);
bin->fw_size = fw->size;
bin->fw_ver = bin->data[FW_VER_OFFSET];
bin->reg_count = get_unaligned_le16(bin->data + FW_REG_CNT_OFFSET);
+ release_firmware(fw);
+
return hx9023s_bin_load(data, bin);
}
diff --git a/drivers/infiniband/hw/bnxt_re/bnxt_re.h b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
index b91a85a491d0..3721446c6ba4 100644
--- a/drivers/infiniband/hw/bnxt_re/bnxt_re.h
+++ b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
@@ -187,7 +187,6 @@ struct bnxt_re_dev {
#define BNXT_RE_FLAG_ISSUE_ROCE_STATS 29
struct net_device *netdev;
struct auxiliary_device *adev;
- struct notifier_block nb;
unsigned int version, major, minor;
struct bnxt_qplib_chip_ctx *chip_ctx;
struct bnxt_en_dev *en_dev;
diff --git a/drivers/infiniband/hw/bnxt_re/hw_counters.c b/drivers/infiniband/hw/bnxt_re/hw_counters.c
index 3ac47f4e6122..f039aefcaf67 100644
--- a/drivers/infiniband/hw/bnxt_re/hw_counters.c
+++ b/drivers/infiniband/hw/bnxt_re/hw_counters.c
@@ -348,8 +348,8 @@ int bnxt_re_ib_get_hw_stats(struct ib_device *ibdev,
goto done;
}
bnxt_re_copy_err_stats(rdev, stats, err_s);
- if (_is_ext_stats_supported(rdev->dev_attr->dev_cap_flags) &&
- !rdev->is_virtfn) {
+ if (bnxt_ext_stats_supported(rdev->chip_ctx, rdev->dev_attr->dev_cap_flags,
+ rdev->is_virtfn)) {
rc = bnxt_re_get_ext_stat(rdev, stats);
if (rc) {
clear_bit(BNXT_RE_FLAG_ISSUE_ROCE_STATS,
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
index 2de101d6e825..6f5db32082dd 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
@@ -1870,6 +1870,8 @@ int bnxt_re_create_srq(struct ib_srq *ib_srq,
srq->qplib_srq.threshold = srq_init_attr->attr.srq_limit;
srq->srq_limit = srq_init_attr->attr.srq_limit;
srq->qplib_srq.eventq_hw_ring_id = rdev->nqr->nq[0].ring_id;
+ srq->qplib_srq.sg_info.pgsize = PAGE_SIZE;
+ srq->qplib_srq.sg_info.pgshft = PAGE_SHIFT;
nq = &rdev->nqr->nq[0];
if (udata) {
diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
index e9e4da4dd576..a94c8c5387d9 100644
--- a/drivers/infiniband/hw/bnxt_re/main.c
+++ b/drivers/infiniband/hw/bnxt_re/main.c
@@ -396,11 +396,16 @@ free_dcb:
static void bnxt_re_async_notifier(void *handle, struct hwrm_async_event_cmpl *cmpl)
{
- struct bnxt_re_dev *rdev = (struct bnxt_re_dev *)handle;
+ struct bnxt_re_en_dev_info *en_info = auxiliary_get_drvdata(handle);
struct bnxt_re_dcb_work *dcb_work;
+ struct bnxt_re_dev *rdev;
u32 data1, data2;
u16 event_id;
+ rdev = en_info->rdev;
+ if (!rdev)
+ return;
+
event_id = le16_to_cpu(cmpl->event_id);
data1 = le32_to_cpu(cmpl->event_data1);
data2 = le32_to_cpu(cmpl->event_data2);
@@ -433,6 +438,8 @@ static void bnxt_re_stop_irq(void *handle, bool reset)
int indx;
rdev = en_info->rdev;
+ if (!rdev)
+ return;
rcfw = &rdev->rcfw;
if (reset) {
@@ -461,6 +468,8 @@ static void bnxt_re_start_irq(void *handle, struct bnxt_msix_entry *ent)
int indx, rc;
rdev = en_info->rdev;
+ if (!rdev)
+ return;
msix_ent = rdev->nqr->msix_entries;
rcfw = &rdev->rcfw;
if (!ent) {
@@ -1350,7 +1359,6 @@ static struct bnxt_re_dev *bnxt_re_dev_add(struct auxiliary_device *adev,
return NULL;
}
/* Default values */
- rdev->nb.notifier_call = NULL;
rdev->netdev = en_dev->net;
rdev->en_dev = en_dev;
rdev->adev = adev;
@@ -2345,15 +2353,6 @@ exit:
static void bnxt_re_remove_device(struct bnxt_re_dev *rdev, u8 op_type,
struct auxiliary_device *aux_dev)
{
- if (rdev->nb.notifier_call) {
- unregister_netdevice_notifier(&rdev->nb);
- rdev->nb.notifier_call = NULL;
- } else {
- /* If notifier is null, we should have already done a
- * clean up before coming here.
- */
- return;
- }
bnxt_re_setup_cc(rdev, false);
ib_unregister_device(&rdev->ibdev);
bnxt_re_dev_uninit(rdev, op_type);
@@ -2433,6 +2432,7 @@ static int bnxt_re_suspend(struct auxiliary_device *adev, pm_message_t state)
ibdev_info(&rdev->ibdev, "%s: L2 driver notified to stop en_state 0x%lx",
__func__, en_dev->en_state);
bnxt_re_remove_device(rdev, BNXT_RE_PRE_RECOVERY_REMOVE, adev);
+ bnxt_re_update_en_info_rdev(NULL, en_info, adev);
mutex_unlock(&bnxt_re_mutex);
return 0;
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.h b/drivers/infiniband/hw/bnxt_re/qplib_res.h
index be5d907a036b..711990232de1 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_res.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_res.h
@@ -547,6 +547,14 @@ static inline bool _is_ext_stats_supported(u16 dev_cap_flags)
CREQ_QUERY_FUNC_RESP_SB_EXT_STATS;
}
+static inline int bnxt_ext_stats_supported(struct bnxt_qplib_chip_ctx *ctx,
+ u16 flags, bool virtfn)
+{
+ /* ext stats supported if cap flag is set AND is a PF OR a Thor2 VF */
+ return (_is_ext_stats_supported(flags) &&
+ ((virtfn && bnxt_qplib_is_chip_gen_p7(ctx)) || (!virtfn)));
+}
+
static inline bool _is_hw_retx_supported(u16 dev_cap_flags)
{
return dev_cap_flags &
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index dded339802b3..160e8927d364 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -1286,10 +1286,8 @@ static u32 hns_roce_cmdq_tx_timeout(u16 opcode, u32 tx_timeout)
return tx_timeout;
}
-static void hns_roce_wait_csq_done(struct hns_roce_dev *hr_dev, u16 opcode)
+static void hns_roce_wait_csq_done(struct hns_roce_dev *hr_dev, u32 tx_timeout)
{
- struct hns_roce_v2_priv *priv = hr_dev->priv;
- u32 tx_timeout = hns_roce_cmdq_tx_timeout(opcode, priv->cmq.tx_timeout);
u32 timeout = 0;
do {
@@ -1299,8 +1297,9 @@ static void hns_roce_wait_csq_done(struct hns_roce_dev *hr_dev, u16 opcode)
} while (++timeout < tx_timeout);
}
-static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
- struct hns_roce_cmq_desc *desc, int num)
+static int __hns_roce_cmq_send_one(struct hns_roce_dev *hr_dev,
+ struct hns_roce_cmq_desc *desc,
+ int num, u32 tx_timeout)
{
struct hns_roce_v2_priv *priv = hr_dev->priv;
struct hns_roce_v2_cmq_ring *csq = &priv->cmq.csq;
@@ -1309,8 +1308,6 @@ static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
int ret;
int i;
- spin_lock_bh(&csq->lock);
-
tail = csq->head;
for (i = 0; i < num; i++) {
@@ -1324,22 +1321,17 @@ static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
atomic64_inc(&hr_dev->dfx_cnt[HNS_ROCE_DFX_CMDS_CNT]);
- hns_roce_wait_csq_done(hr_dev, le16_to_cpu(desc->opcode));
+ hns_roce_wait_csq_done(hr_dev, tx_timeout);
if (hns_roce_cmq_csq_done(hr_dev)) {
ret = 0;
for (i = 0; i < num; i++) {
/* check the result of hardware write back */
- desc[i] = csq->desc[tail++];
+ desc_ret = le16_to_cpu(csq->desc[tail++].retval);
if (tail == csq->desc_num)
tail = 0;
-
- desc_ret = le16_to_cpu(desc[i].retval);
if (likely(desc_ret == CMD_EXEC_SUCCESS))
continue;
- dev_err_ratelimited(hr_dev->dev,
- "Cmdq IO error, opcode = 0x%x, return = 0x%x.\n",
- desc->opcode, desc_ret);
ret = hns_roce_cmd_err_convert_errno(desc_ret);
}
} else {
@@ -1354,14 +1346,54 @@ static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
ret = -EAGAIN;
}
- spin_unlock_bh(&csq->lock);
-
if (ret)
atomic64_inc(&hr_dev->dfx_cnt[HNS_ROCE_DFX_CMDS_ERR_CNT]);
return ret;
}
+static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
+ struct hns_roce_cmq_desc *desc, int num)
+{
+ struct hns_roce_v2_priv *priv = hr_dev->priv;
+ struct hns_roce_v2_cmq_ring *csq = &priv->cmq.csq;
+ u16 opcode = le16_to_cpu(desc->opcode);
+ u32 tx_timeout = hns_roce_cmdq_tx_timeout(opcode, priv->cmq.tx_timeout);
+ u8 try_cnt = HNS_ROCE_OPC_POST_MB_TRY_CNT;
+ u32 rsv_tail;
+ int ret;
+ int i;
+
+ while (try_cnt) {
+ try_cnt--;
+
+ spin_lock_bh(&csq->lock);
+ rsv_tail = csq->head;
+ ret = __hns_roce_cmq_send_one(hr_dev, desc, num, tx_timeout);
+ if (opcode == HNS_ROCE_OPC_POST_MB && ret == -ETIME &&
+ try_cnt) {
+ spin_unlock_bh(&csq->lock);
+ mdelay(HNS_ROCE_OPC_POST_MB_RETRY_GAP_MSEC);
+ continue;
+ }
+
+ for (i = 0; i < num; i++) {
+ desc[i] = csq->desc[rsv_tail++];
+ if (rsv_tail == csq->desc_num)
+ rsv_tail = 0;
+ }
+ spin_unlock_bh(&csq->lock);
+ break;
+ }
+
+ if (ret)
+ dev_err_ratelimited(hr_dev->dev,
+ "Cmdq IO error, opcode = 0x%x, return = %d.\n",
+ opcode, ret);
+
+ return ret;
+}
+
static int hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
struct hns_roce_cmq_desc *desc, int num)
{
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
index cbdbc9edbce6..91a5665465ff 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
@@ -230,6 +230,8 @@ enum hns_roce_opcode_type {
};
#define HNS_ROCE_OPC_POST_MB_TIMEOUT 35000
+#define HNS_ROCE_OPC_POST_MB_TRY_CNT 8
+#define HNS_ROCE_OPC_POST_MB_RETRY_GAP_MSEC 5
struct hns_roce_cmdq_tx_timeout_map {
u16 opcode;
u32 tx_timeout;
diff --git a/drivers/infiniband/hw/mana/main.c b/drivers/infiniband/hw/mana/main.c
index 67c2d43135a8..457cea6d9909 100644
--- a/drivers/infiniband/hw/mana/main.c
+++ b/drivers/infiniband/hw/mana/main.c
@@ -174,7 +174,7 @@ static int mana_gd_allocate_doorbell_page(struct gdma_context *gc,
req.resource_type = GDMA_RESOURCE_DOORBELL_PAGE;
req.num_resources = 1;
- req.alignment = 1;
+ req.alignment = PAGE_SIZE / MANA_PAGE_SIZE;
/* Have GDMA start searching from 0 */
req.allocated_resources = 0;
diff --git a/drivers/infiniband/hw/mlx5/ah.c b/drivers/infiniband/hw/mlx5/ah.c
index 505bc47fd575..99036afb3aef 100644
--- a/drivers/infiniband/hw/mlx5/ah.c
+++ b/drivers/infiniband/hw/mlx5/ah.c
@@ -67,7 +67,8 @@ static void create_ib_ah(struct mlx5_ib_dev *dev, struct mlx5_ib_ah *ah,
ah->av.tclass = grh->traffic_class;
}
- ah->av.stat_rate_sl = (rdma_ah_get_static_rate(ah_attr) << 4);
+ ah->av.stat_rate_sl =
+ (mlx5r_ib_rate(dev, rdma_ah_get_static_rate(ah_attr)) << 4);
if (ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE) {
if (init_attr->xmit_slave)
diff --git a/drivers/infiniband/hw/mlx5/counters.c b/drivers/infiniband/hw/mlx5/counters.c
index 4f6c1968a2ee..81cfa74147a1 100644
--- a/drivers/infiniband/hw/mlx5/counters.c
+++ b/drivers/infiniband/hw/mlx5/counters.c
@@ -546,6 +546,7 @@ static int mlx5_ib_counter_bind_qp(struct rdma_counter *counter,
struct ib_qp *qp)
{
struct mlx5_ib_dev *dev = to_mdev(qp->device);
+ bool new = false;
int err;
if (!counter->id) {
@@ -560,6 +561,7 @@ static int mlx5_ib_counter_bind_qp(struct rdma_counter *counter,
return err;
counter->id =
MLX5_GET(alloc_q_counter_out, out, counter_set_id);
+ new = true;
}
err = mlx5_ib_qp_set_counter(qp, counter);
@@ -569,8 +571,10 @@ static int mlx5_ib_counter_bind_qp(struct rdma_counter *counter,
return 0;
fail_set_counter:
- mlx5_ib_counter_dealloc(counter);
- counter->id = 0;
+ if (new) {
+ mlx5_ib_counter_dealloc(counter);
+ counter->id = 0;
+ }
return err;
}
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index bb02b6adbf2c..753faa9ad06a 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -1550,7 +1550,7 @@ static void mlx5_ib_dmabuf_invalidate_cb(struct dma_buf_attachment *attach)
dma_resv_assert_held(umem_dmabuf->attach->dmabuf->resv);
- if (!umem_dmabuf->sgt)
+ if (!umem_dmabuf->sgt || !mr)
return;
mlx5r_umr_update_mr_pas(mr, MLX5_IB_UPD_XLT_ZAP);
@@ -1935,7 +1935,8 @@ err:
static void
mlx5_free_priv_descs(struct mlx5_ib_mr *mr)
{
- if (!mr->umem && !mr->data_direct && mr->descs) {
+ if (!mr->umem && !mr->data_direct &&
+ mr->ibmr.type != IB_MR_TYPE_DM && mr->descs) {
struct ib_device *device = mr->ibmr.device;
int size = mr->max_descs * mr->desc_size;
struct mlx5_ib_dev *dev = to_mdev(device);
@@ -2022,11 +2023,16 @@ static int mlx5_revoke_mr(struct mlx5_ib_mr *mr)
struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device);
struct mlx5_cache_ent *ent = mr->mmkey.cache_ent;
bool is_odp = is_odp_mr(mr);
+ bool is_odp_dma_buf = is_dmabuf_mr(mr) &&
+ !to_ib_umem_dmabuf(mr->umem)->pinned;
int ret = 0;
if (is_odp)
mutex_lock(&to_ib_umem_odp(mr->umem)->umem_mutex);
+ if (is_odp_dma_buf)
+ dma_resv_lock(to_ib_umem_dmabuf(mr->umem)->attach->dmabuf->resv, NULL);
+
if (mr->mmkey.cacheable && !mlx5r_umr_revoke_mr(mr) && !cache_ent_find_and_store(dev, mr)) {
ent = mr->mmkey.cache_ent;
/* upon storing to a clean temp entry - schedule its cleanup */
@@ -2054,6 +2060,12 @@ out:
mutex_unlock(&to_ib_umem_odp(mr->umem)->umem_mutex);
}
+ if (is_odp_dma_buf) {
+ if (!ret)
+ to_ib_umem_dmabuf(mr->umem)->private = NULL;
+ dma_resv_unlock(to_ib_umem_dmabuf(mr->umem)->attach->dmabuf->resv);
+ }
+
return ret;
}
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index f1e23583e6c0..e77c9280c07e 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -242,6 +242,7 @@ static void destroy_unused_implicit_child_mr(struct mlx5_ib_mr *mr)
if (__xa_cmpxchg(&imr->implicit_children, idx, mr, NULL, GFP_KERNEL) !=
mr) {
xa_unlock(&imr->implicit_children);
+ mlx5r_deref_odp_mkey(&imr->mmkey);
return;
}
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index a43eba9d3572..88724d15705d 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -3447,11 +3447,11 @@ static int ib_to_mlx5_rate_map(u8 rate)
return 0;
}
-static int ib_rate_to_mlx5(struct mlx5_ib_dev *dev, u8 rate)
+int mlx5r_ib_rate(struct mlx5_ib_dev *dev, u8 rate)
{
u32 stat_rate_support;
- if (rate == IB_RATE_PORT_CURRENT)
+ if (rate == IB_RATE_PORT_CURRENT || rate == IB_RATE_800_GBPS)
return 0;
if (rate < IB_RATE_2_5_GBPS || rate > IB_RATE_800_GBPS)
@@ -3596,7 +3596,7 @@ static int mlx5_set_path(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
sizeof(grh->dgid.raw));
}
- err = ib_rate_to_mlx5(dev, rdma_ah_get_static_rate(ah));
+ err = mlx5r_ib_rate(dev, rdma_ah_get_static_rate(ah));
if (err < 0)
return err;
MLX5_SET(ads, path, stat_rate, err);
@@ -4579,6 +4579,8 @@ static int mlx5_ib_modify_dct(struct ib_qp *ibqp, struct ib_qp_attr *attr,
set_id = mlx5_ib_get_counters_id(dev, attr->port_num - 1);
MLX5_SET(dctc, dctc, counter_set_id, set_id);
+
+ qp->port = attr->port_num;
} else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
struct mlx5_ib_modify_qp_resp resp = {};
u32 out[MLX5_ST_SZ_DW(create_dct_out)] = {};
@@ -5074,7 +5076,7 @@ static int mlx5_ib_dct_query_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *mqp,
}
if (qp_attr_mask & IB_QP_PORT)
- qp_attr->port_num = MLX5_GET(dctc, dctc, port);
+ qp_attr->port_num = mqp->port;
if (qp_attr_mask & IB_QP_MIN_RNR_TIMER)
qp_attr->min_rnr_timer = MLX5_GET(dctc, dctc, min_rnr_nak);
if (qp_attr_mask & IB_QP_AV) {
diff --git a/drivers/infiniband/hw/mlx5/qp.h b/drivers/infiniband/hw/mlx5/qp.h
index b6ee7c3ee1ca..2530e7730635 100644
--- a/drivers/infiniband/hw/mlx5/qp.h
+++ b/drivers/infiniband/hw/mlx5/qp.h
@@ -56,4 +56,5 @@ int mlx5_core_xrcd_dealloc(struct mlx5_ib_dev *dev, u32 xrcdn);
int mlx5_ib_qp_set_counter(struct ib_qp *qp, struct rdma_counter *counter);
int mlx5_ib_qp_event_init(void);
void mlx5_ib_qp_event_cleanup(void);
+int mlx5r_ib_rate(struct mlx5_ib_dev *dev, u8 rate);
#endif /* _MLX5_IB_QP_H */
diff --git a/drivers/infiniband/hw/mlx5/umr.c b/drivers/infiniband/hw/mlx5/umr.c
index 887fd6fa3ba9..793f3c5c4d01 100644
--- a/drivers/infiniband/hw/mlx5/umr.c
+++ b/drivers/infiniband/hw/mlx5/umr.c
@@ -231,30 +231,6 @@ void mlx5r_umr_cleanup(struct mlx5_ib_dev *dev)
ib_dealloc_pd(dev->umrc.pd);
}
-static int mlx5r_umr_recover(struct mlx5_ib_dev *dev)
-{
- struct umr_common *umrc = &dev->umrc;
- struct ib_qp_attr attr;
- int err;
-
- attr.qp_state = IB_QPS_RESET;
- err = ib_modify_qp(umrc->qp, &attr, IB_QP_STATE);
- if (err) {
- mlx5_ib_dbg(dev, "Couldn't modify UMR QP\n");
- goto err;
- }
-
- err = mlx5r_umr_qp_rst2rts(dev, umrc->qp);
- if (err)
- goto err;
-
- umrc->state = MLX5_UMR_STATE_ACTIVE;
- return 0;
-
-err:
- umrc->state = MLX5_UMR_STATE_ERR;
- return err;
-}
static int mlx5r_umr_post_send(struct ib_qp *ibqp, u32 mkey, struct ib_cqe *cqe,
struct mlx5r_umr_wqe *wqe, bool with_data)
@@ -302,6 +278,61 @@ out:
return err;
}
+static int mlx5r_umr_recover(struct mlx5_ib_dev *dev, u32 mkey,
+ struct mlx5r_umr_context *umr_context,
+ struct mlx5r_umr_wqe *wqe, bool with_data)
+{
+ struct umr_common *umrc = &dev->umrc;
+ struct ib_qp_attr attr;
+ int err;
+
+ mutex_lock(&umrc->lock);
+ /* Preventing any further WRs to be sent now */
+ if (umrc->state != MLX5_UMR_STATE_RECOVER) {
+ mlx5_ib_warn(dev, "UMR recovery encountered an unexpected state=%d\n",
+ umrc->state);
+ umrc->state = MLX5_UMR_STATE_RECOVER;
+ }
+ mutex_unlock(&umrc->lock);
+
+ /* Sending a final/barrier WR (the failed one) and wait for its completion.
+ * This will ensure that all the previous WRs got a completion before
+ * we set the QP state to RESET.
+ */
+ err = mlx5r_umr_post_send(umrc->qp, mkey, &umr_context->cqe, wqe,
+ with_data);
+ if (err) {
+ mlx5_ib_warn(dev, "UMR recovery post send failed, err %d\n", err);
+ goto err;
+ }
+
+ /* Since the QP is in an error state, it will only receive
+ * IB_WC_WR_FLUSH_ERR. However, as it serves only as a barrier
+ * we don't care about its status.
+ */
+ wait_for_completion(&umr_context->done);
+
+ attr.qp_state = IB_QPS_RESET;
+ err = ib_modify_qp(umrc->qp, &attr, IB_QP_STATE);
+ if (err) {
+ mlx5_ib_warn(dev, "Couldn't modify UMR QP to RESET, err=%d\n", err);
+ goto err;
+ }
+
+ err = mlx5r_umr_qp_rst2rts(dev, umrc->qp);
+ if (err) {
+ mlx5_ib_warn(dev, "Couldn't modify UMR QP to RTS, err=%d\n", err);
+ goto err;
+ }
+
+ umrc->state = MLX5_UMR_STATE_ACTIVE;
+ return 0;
+
+err:
+ umrc->state = MLX5_UMR_STATE_ERR;
+ return err;
+}
+
static void mlx5r_umr_done(struct ib_cq *cq, struct ib_wc *wc)
{
struct mlx5_ib_umr_context *context =
@@ -366,9 +397,7 @@ static int mlx5r_umr_post_send_wait(struct mlx5_ib_dev *dev, u32 mkey,
mlx5_ib_warn(dev,
"reg umr failed (%u). Trying to recover and resubmit the flushed WQEs, mkey = %u\n",
umr_context.status, mkey);
- mutex_lock(&umrc->lock);
- err = mlx5r_umr_recover(dev);
- mutex_unlock(&umrc->lock);
+ err = mlx5r_umr_recover(dev, mkey, &umr_context, wqe, with_data);
if (err)
mlx5_ib_warn(dev, "couldn't recover UMR, err %d\n",
err);
diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
index b48a72bd7b23..cd5116d8c3b2 100644
--- a/drivers/iommu/amd/iommu.c
+++ b/drivers/iommu/amd/iommu.c
@@ -2043,12 +2043,12 @@ static void set_dte_entry(struct amd_iommu *iommu,
make_clear_dte(dev_data, dte, &new);
if (domain->iop.mode != PAGE_MODE_NONE)
- new.data[0] = iommu_virt_to_phys(domain->iop.root);
+ new.data[0] |= iommu_virt_to_phys(domain->iop.root);
new.data[0] |= (domain->iop.mode & DEV_ENTRY_MODE_MASK)
<< DEV_ENTRY_MODE_SHIFT;
- new.data[0] |= DTE_FLAG_IR | DTE_FLAG_IW | DTE_FLAG_V;
+ new.data[0] |= DTE_FLAG_IR | DTE_FLAG_IW;
/*
* When SNP is enabled, we can only support TV=1 with non-zero domain ID.
diff --git a/drivers/iommu/intel/dmar.c b/drivers/iommu/intel/dmar.c
index 9f424acf474e..e540092d664d 100644
--- a/drivers/iommu/intel/dmar.c
+++ b/drivers/iommu/intel/dmar.c
@@ -2043,6 +2043,7 @@ int enable_drhd_fault_handling(unsigned int cpu)
/*
* Enable fault control interrupt.
*/
+ guard(rwsem_read)(&dmar_global_lock);
for_each_iommu(iommu, drhd) {
u32 fault_status;
int ret;
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index cc46098f875b..bf1f0c814348 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -3146,7 +3146,14 @@ int __init intel_iommu_init(void)
iommu_device_sysfs_add(&iommu->iommu, NULL,
intel_iommu_groups,
"%s", iommu->name);
+ /*
+ * The iommu device probe is protected by the iommu_probe_device_lock.
+ * Release the dmar_global_lock before entering the device probe path
+ * to avoid unnecessary lock order splat.
+ */
+ up_read(&dmar_global_lock);
iommu_device_register(&iommu->iommu, &intel_iommu_ops, NULL);
+ down_read(&dmar_global_lock);
iommu_pmu_register(iommu);
}
@@ -4378,9 +4385,6 @@ static int context_setup_pass_through_cb(struct pci_dev *pdev, u16 alias, void *
{
struct device *dev = data;
- if (dev != &pdev->dev)
- return 0;
-
return context_setup_pass_through(dev, PCI_BUS_NUM(alias), alias & 0xff);
}
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index ee9f7cecd78e..c45464b6576a 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -3790,20 +3790,18 @@ static void dm_integrity_status(struct dm_target *ti, status_type_t type,
break;
case STATUSTYPE_TABLE: {
- __u64 watermark_percentage = (__u64)(ic->journal_entries - ic->free_sectors_threshold) * 100;
-
- watermark_percentage += ic->journal_entries / 2;
- do_div(watermark_percentage, ic->journal_entries);
- arg_count = 3;
+ arg_count = 1; /* buffer_sectors */
arg_count += !!ic->meta_dev;
arg_count += ic->sectors_per_block != 1;
arg_count += !!(ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING));
arg_count += ic->reset_recalculate_flag;
arg_count += ic->discard;
- arg_count += ic->mode == 'J';
- arg_count += ic->mode == 'J';
- arg_count += ic->mode == 'B';
- arg_count += ic->mode == 'B';
+ arg_count += ic->mode != 'I'; /* interleave_sectors */
+ arg_count += ic->mode == 'J'; /* journal_sectors */
+ arg_count += ic->mode == 'J'; /* journal_watermark */
+ arg_count += ic->mode == 'J'; /* commit_time */
+ arg_count += ic->mode == 'B'; /* sectors_per_bit */
+ arg_count += ic->mode == 'B'; /* bitmap_flush_interval */
arg_count += !!ic->internal_hash_alg.alg_string;
arg_count += !!ic->journal_crypt_alg.alg_string;
arg_count += !!ic->journal_mac_alg.alg_string;
@@ -3822,10 +3820,15 @@ static void dm_integrity_status(struct dm_target *ti, status_type_t type,
DMEMIT(" reset_recalculate");
if (ic->discard)
DMEMIT(" allow_discards");
- DMEMIT(" journal_sectors:%u", ic->initial_sectors - SB_SECTORS);
- DMEMIT(" interleave_sectors:%u", 1U << ic->sb->log2_interleave_sectors);
+ if (ic->mode != 'I')
+ DMEMIT(" interleave_sectors:%u", 1U << ic->sb->log2_interleave_sectors);
DMEMIT(" buffer_sectors:%u", 1U << ic->log2_buffer_sectors);
if (ic->mode == 'J') {
+ __u64 watermark_percentage = (__u64)(ic->journal_entries - ic->free_sectors_threshold) * 100;
+
+ watermark_percentage += ic->journal_entries / 2;
+ do_div(watermark_percentage, ic->journal_entries);
+ DMEMIT(" journal_sectors:%u", ic->initial_sectors - SB_SECTORS);
DMEMIT(" journal_watermark:%u", (unsigned int)watermark_percentage);
DMEMIT(" commit_time:%u", ic->autocommit_msec);
}
diff --git a/drivers/md/dm-vdo/dedupe.c b/drivers/md/dm-vdo/dedupe.c
index b6f8e2dc7729..3f3d29af1be4 100644
--- a/drivers/md/dm-vdo/dedupe.c
+++ b/drivers/md/dm-vdo/dedupe.c
@@ -2178,6 +2178,7 @@ static int initialize_index(struct vdo *vdo, struct hash_zones *zones)
vdo_set_dedupe_index_timeout_interval(vdo_dedupe_index_timeout_interval);
vdo_set_dedupe_index_min_timer_interval(vdo_dedupe_index_min_timer_interval);
+ spin_lock_init(&zones->lock);
/*
* Since we will save up the timeouts that would have been reported but were ratelimited,
diff --git a/drivers/misc/cardreader/rtsx_usb.c b/drivers/misc/cardreader/rtsx_usb.c
index e0174da5e9fc..77b0490a1b38 100644
--- a/drivers/misc/cardreader/rtsx_usb.c
+++ b/drivers/misc/cardreader/rtsx_usb.c
@@ -286,7 +286,6 @@ static int rtsx_usb_get_status_with_bulk(struct rtsx_ucr *ucr, u16 *status)
int rtsx_usb_get_card_status(struct rtsx_ucr *ucr, u16 *status)
{
int ret;
- u8 interrupt_val = 0;
u16 *buf;
if (!status)
@@ -309,20 +308,6 @@ int rtsx_usb_get_card_status(struct rtsx_ucr *ucr, u16 *status)
ret = rtsx_usb_get_status_with_bulk(ucr, status);
}
- rtsx_usb_read_register(ucr, CARD_INT_PEND, &interrupt_val);
- /* Cross check presence with interrupts */
- if (*status & XD_CD)
- if (!(interrupt_val & XD_INT))
- *status &= ~XD_CD;
-
- if (*status & SD_CD)
- if (!(interrupt_val & SD_INT))
- *status &= ~SD_CD;
-
- if (*status & MS_CD)
- if (!(interrupt_val & MS_INT))
- *status &= ~MS_CD;
-
/* usb_control_msg may return positive when success */
if (ret < 0)
return ret;
diff --git a/drivers/misc/eeprom/digsy_mtc_eeprom.c b/drivers/misc/eeprom/digsy_mtc_eeprom.c
index 88888485e6f8..ee58f7ce5bfa 100644
--- a/drivers/misc/eeprom/digsy_mtc_eeprom.c
+++ b/drivers/misc/eeprom/digsy_mtc_eeprom.c
@@ -50,7 +50,7 @@ static struct platform_device digsy_mtc_eeprom = {
};
static struct gpiod_lookup_table eeprom_spi_gpiod_table = {
- .dev_id = "spi_gpio",
+ .dev_id = "spi_gpio.1",
.table = {
GPIO_LOOKUP("gpio@b00", GPIO_EEPROM_CLK,
"sck", GPIO_ACTIVE_HIGH),
diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h
index c3a6657dcd4a..a5f88ec97df7 100644
--- a/drivers/misc/mei/hw-me-regs.h
+++ b/drivers/misc/mei/hw-me-regs.h
@@ -117,6 +117,8 @@
#define MEI_DEV_ID_LNL_M 0xA870 /* Lunar Lake Point M */
+#define MEI_DEV_ID_PTL_P 0xE470 /* Panther Lake P */
+
/*
* MEI HW Section
*/
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
index 6589635f8ba3..d6ff9d82ae94 100644
--- a/drivers/misc/mei/pci-me.c
+++ b/drivers/misc/mei/pci-me.c
@@ -124,6 +124,8 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
{MEI_PCI_DEVICE(MEI_DEV_ID_LNL_M, MEI_ME_PCH15_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_PTL_P, MEI_ME_PCH15_CFG)},
+
/* required last entry */
{0, }
};
diff --git a/drivers/misc/mei/vsc-tp.c b/drivers/misc/mei/vsc-tp.c
index 35d349fee769..7be1649b1972 100644
--- a/drivers/misc/mei/vsc-tp.c
+++ b/drivers/misc/mei/vsc-tp.c
@@ -502,7 +502,7 @@ static int vsc_tp_probe(struct spi_device *spi)
if (ret)
return ret;
- tp->wakeuphost = devm_gpiod_get(dev, "wakeuphost", GPIOD_IN);
+ tp->wakeuphost = devm_gpiod_get(dev, "wakeuphostint", GPIOD_IN);
if (IS_ERR(tp->wakeuphost))
return PTR_ERR(tp->wakeuphost);
diff --git a/drivers/misc/ntsync.c b/drivers/misc/ntsync.c
index 055395cde42b..999026a1ae04 100644
--- a/drivers/misc/ntsync.c
+++ b/drivers/misc/ntsync.c
@@ -873,6 +873,7 @@ static int setup_wait(struct ntsync_device *dev,
{
int fds[NTSYNC_MAX_WAIT_COUNT + 1];
const __u32 count = args->count;
+ size_t size = array_size(count, sizeof(fds[0]));
struct ntsync_q *q;
__u32 total_count;
__u32 i, j;
@@ -880,15 +881,14 @@ static int setup_wait(struct ntsync_device *dev,
if (args->pad || (args->flags & ~NTSYNC_WAIT_REALTIME))
return -EINVAL;
- if (args->count > NTSYNC_MAX_WAIT_COUNT)
+ if (size >= sizeof(fds))
return -EINVAL;
total_count = count;
if (args->alert)
total_count++;
- if (copy_from_user(fds, u64_to_user_ptr(args->objs),
- array_size(count, sizeof(*fds))))
+ if (copy_from_user(fds, u64_to_user_ptr(args->objs), size))
return -EFAULT;
if (args->alert)
fds[count] = args->alert;
@@ -1208,6 +1208,7 @@ static struct miscdevice ntsync_misc = {
.minor = MISC_DYNAMIC_MINOR,
.name = NTSYNC_NAME,
.fops = &ntsync_fops,
+ .mode = 0666,
};
module_misc_device(ntsync_misc);
diff --git a/drivers/net/caif/caif_virtio.c b/drivers/net/caif/caif_virtio.c
index 7fea00c7ca8a..c60386bf2d1a 100644
--- a/drivers/net/caif/caif_virtio.c
+++ b/drivers/net/caif/caif_virtio.c
@@ -745,7 +745,7 @@ err:
if (cfv->vr_rx)
vdev->vringh_config->del_vrhs(cfv->vdev);
- if (cfv->vdev)
+ if (cfv->vq_tx)
vdev->config->del_vqs(cfv->vdev);
free_netdev(netdev);
return err;
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index 1c83af805209..5883eb93efb1 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -2591,7 +2591,8 @@ mt7531_setup_common(struct dsa_switch *ds)
if (ret < 0)
return ret;
- return 0;
+ /* Setup VLAN ID 0 for VLAN-unaware bridges */
+ return mt7530_setup_vlan0(priv);
}
static int
@@ -2687,11 +2688,6 @@ mt7531_setup(struct dsa_switch *ds)
if (ret)
return ret;
- /* Setup VLAN ID 0 for VLAN-unaware bridges */
- ret = mt7530_setup_vlan0(priv);
- if (ret)
- return ret;
-
ds->assisted_learning_on_cpu_port = true;
ds->mtu_enforcement_ingress = true;
diff --git a/drivers/net/dsa/realtek/Kconfig b/drivers/net/dsa/realtek/Kconfig
index 6989972eebc3..d6eb6713e5f6 100644
--- a/drivers/net/dsa/realtek/Kconfig
+++ b/drivers/net/dsa/realtek/Kconfig
@@ -43,4 +43,10 @@ config NET_DSA_REALTEK_RTL8366RB
help
Select to enable support for Realtek RTL8366RB.
+config NET_DSA_REALTEK_RTL8366RB_LEDS
+ bool
+ depends on (LEDS_CLASS=y || LEDS_CLASS=NET_DSA_REALTEK_RTL8366RB)
+ depends on NET_DSA_REALTEK_RTL8366RB
+ default NET_DSA_REALTEK_RTL8366RB
+
endif
diff --git a/drivers/net/dsa/realtek/Makefile b/drivers/net/dsa/realtek/Makefile
index 35491dc20d6d..17367bcba496 100644
--- a/drivers/net/dsa/realtek/Makefile
+++ b/drivers/net/dsa/realtek/Makefile
@@ -12,4 +12,7 @@ endif
obj-$(CONFIG_NET_DSA_REALTEK_RTL8366RB) += rtl8366.o
rtl8366-objs := rtl8366-core.o rtl8366rb.o
+ifdef CONFIG_NET_DSA_REALTEK_RTL8366RB_LEDS
+rtl8366-objs += rtl8366rb-leds.o
+endif
obj-$(CONFIG_NET_DSA_REALTEK_RTL8365MB) += rtl8365mb.o
diff --git a/drivers/net/dsa/realtek/rtl8366rb-leds.c b/drivers/net/dsa/realtek/rtl8366rb-leds.c
new file mode 100644
index 000000000000..99c890681ae6
--- /dev/null
+++ b/drivers/net/dsa/realtek/rtl8366rb-leds.c
@@ -0,0 +1,177 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/bitops.h>
+#include <linux/regmap.h>
+#include <net/dsa.h>
+#include "rtl83xx.h"
+#include "rtl8366rb.h"
+
+static inline u32 rtl8366rb_led_group_port_mask(u8 led_group, u8 port)
+{
+ switch (led_group) {
+ case 0:
+ return FIELD_PREP(RTL8366RB_LED_0_X_CTRL_MASK, BIT(port));
+ case 1:
+ return FIELD_PREP(RTL8366RB_LED_0_X_CTRL_MASK, BIT(port));
+ case 2:
+ return FIELD_PREP(RTL8366RB_LED_0_X_CTRL_MASK, BIT(port));
+ case 3:
+ return FIELD_PREP(RTL8366RB_LED_0_X_CTRL_MASK, BIT(port));
+ default:
+ return 0;
+ }
+}
+
+static int rb8366rb_get_port_led(struct rtl8366rb_led *led)
+{
+ struct realtek_priv *priv = led->priv;
+ u8 led_group = led->led_group;
+ u8 port_num = led->port_num;
+ int ret;
+ u32 val;
+
+ ret = regmap_read(priv->map, RTL8366RB_LED_X_X_CTRL_REG(led_group),
+ &val);
+ if (ret) {
+ dev_err(priv->dev, "error reading LED on port %d group %d\n",
+ led_group, port_num);
+ return ret;
+ }
+
+ return !!(val & rtl8366rb_led_group_port_mask(led_group, port_num));
+}
+
+static int rb8366rb_set_port_led(struct rtl8366rb_led *led, bool enable)
+{
+ struct realtek_priv *priv = led->priv;
+ u8 led_group = led->led_group;
+ u8 port_num = led->port_num;
+ int ret;
+
+ ret = regmap_update_bits(priv->map,
+ RTL8366RB_LED_X_X_CTRL_REG(led_group),
+ rtl8366rb_led_group_port_mask(led_group,
+ port_num),
+ enable ? 0xffff : 0);
+ if (ret) {
+ dev_err(priv->dev, "error updating LED on port %d group %d\n",
+ led_group, port_num);
+ return ret;
+ }
+
+ /* Change the LED group to manual controlled LEDs if required */
+ ret = rb8366rb_set_ledgroup_mode(priv, led_group,
+ RTL8366RB_LEDGROUP_FORCE);
+
+ if (ret) {
+ dev_err(priv->dev, "error updating LED GROUP group %d\n",
+ led_group);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+rtl8366rb_cled_brightness_set_blocking(struct led_classdev *ldev,
+ enum led_brightness brightness)
+{
+ struct rtl8366rb_led *led = container_of(ldev, struct rtl8366rb_led,
+ cdev);
+
+ return rb8366rb_set_port_led(led, brightness == LED_ON);
+}
+
+static int rtl8366rb_setup_led(struct realtek_priv *priv, struct dsa_port *dp,
+ struct fwnode_handle *led_fwnode)
+{
+ struct rtl8366rb *rb = priv->chip_data;
+ struct led_init_data init_data = { };
+ enum led_default_state state;
+ struct rtl8366rb_led *led;
+ u32 led_group;
+ int ret;
+
+ ret = fwnode_property_read_u32(led_fwnode, "reg", &led_group);
+ if (ret)
+ return ret;
+
+ if (led_group >= RTL8366RB_NUM_LEDGROUPS) {
+ dev_warn(priv->dev, "Invalid LED reg %d defined for port %d",
+ led_group, dp->index);
+ return -EINVAL;
+ }
+
+ led = &rb->leds[dp->index][led_group];
+ led->port_num = dp->index;
+ led->led_group = led_group;
+ led->priv = priv;
+
+ state = led_init_default_state_get(led_fwnode);
+ switch (state) {
+ case LEDS_DEFSTATE_ON:
+ led->cdev.brightness = 1;
+ rb8366rb_set_port_led(led, 1);
+ break;
+ case LEDS_DEFSTATE_KEEP:
+ led->cdev.brightness =
+ rb8366rb_get_port_led(led);
+ break;
+ case LEDS_DEFSTATE_OFF:
+ default:
+ led->cdev.brightness = 0;
+ rb8366rb_set_port_led(led, 0);
+ }
+
+ led->cdev.max_brightness = 1;
+ led->cdev.brightness_set_blocking =
+ rtl8366rb_cled_brightness_set_blocking;
+ init_data.fwnode = led_fwnode;
+ init_data.devname_mandatory = true;
+
+ init_data.devicename = kasprintf(GFP_KERNEL, "Realtek-%d:0%d:%d",
+ dp->ds->index, dp->index, led_group);
+ if (!init_data.devicename)
+ return -ENOMEM;
+
+ ret = devm_led_classdev_register_ext(priv->dev, &led->cdev, &init_data);
+ if (ret) {
+ dev_warn(priv->dev, "Failed to init LED %d for port %d",
+ led_group, dp->index);
+ return ret;
+ }
+
+ return 0;
+}
+
+int rtl8366rb_setup_leds(struct realtek_priv *priv)
+{
+ struct dsa_switch *ds = &priv->ds;
+ struct device_node *leds_np;
+ struct dsa_port *dp;
+ int ret = 0;
+
+ dsa_switch_for_each_port(dp, ds) {
+ if (!dp->dn)
+ continue;
+
+ leds_np = of_get_child_by_name(dp->dn, "leds");
+ if (!leds_np) {
+ dev_dbg(priv->dev, "No leds defined for port %d",
+ dp->index);
+ continue;
+ }
+
+ for_each_child_of_node_scoped(leds_np, led_np) {
+ ret = rtl8366rb_setup_led(priv, dp,
+ of_fwnode_handle(led_np));
+ if (ret)
+ break;
+ }
+
+ of_node_put(leds_np);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
diff --git a/drivers/net/dsa/realtek/rtl8366rb.c b/drivers/net/dsa/realtek/rtl8366rb.c
index 4c4a95d4380c..f54771cab56d 100644
--- a/drivers/net/dsa/realtek/rtl8366rb.c
+++ b/drivers/net/dsa/realtek/rtl8366rb.c
@@ -27,11 +27,7 @@
#include "realtek-smi.h"
#include "realtek-mdio.h"
#include "rtl83xx.h"
-
-#define RTL8366RB_PORT_NUM_CPU 5
-#define RTL8366RB_NUM_PORTS 6
-#define RTL8366RB_PHY_NO_MAX 4
-#define RTL8366RB_PHY_ADDR_MAX 31
+#include "rtl8366rb.h"
/* Switch Global Configuration register */
#define RTL8366RB_SGCR 0x0000
@@ -176,39 +172,6 @@
*/
#define RTL8366RB_VLAN_INGRESS_CTRL2_REG 0x037f
-/* LED control registers */
-/* The LED blink rate is global; it is used by all triggers in all groups. */
-#define RTL8366RB_LED_BLINKRATE_REG 0x0430
-#define RTL8366RB_LED_BLINKRATE_MASK 0x0007
-#define RTL8366RB_LED_BLINKRATE_28MS 0x0000
-#define RTL8366RB_LED_BLINKRATE_56MS 0x0001
-#define RTL8366RB_LED_BLINKRATE_84MS 0x0002
-#define RTL8366RB_LED_BLINKRATE_111MS 0x0003
-#define RTL8366RB_LED_BLINKRATE_222MS 0x0004
-#define RTL8366RB_LED_BLINKRATE_446MS 0x0005
-
-/* LED trigger event for each group */
-#define RTL8366RB_LED_CTRL_REG 0x0431
-#define RTL8366RB_LED_CTRL_OFFSET(led_group) \
- (4 * (led_group))
-#define RTL8366RB_LED_CTRL_MASK(led_group) \
- (0xf << RTL8366RB_LED_CTRL_OFFSET(led_group))
-
-/* The RTL8366RB_LED_X_X registers are used to manually set the LED state only
- * when the corresponding LED group in RTL8366RB_LED_CTRL_REG is
- * RTL8366RB_LEDGROUP_FORCE. Otherwise, it is ignored.
- */
-#define RTL8366RB_LED_0_1_CTRL_REG 0x0432
-#define RTL8366RB_LED_2_3_CTRL_REG 0x0433
-#define RTL8366RB_LED_X_X_CTRL_REG(led_group) \
- ((led_group) <= 1 ? \
- RTL8366RB_LED_0_1_CTRL_REG : \
- RTL8366RB_LED_2_3_CTRL_REG)
-#define RTL8366RB_LED_0_X_CTRL_MASK GENMASK(5, 0)
-#define RTL8366RB_LED_X_1_CTRL_MASK GENMASK(11, 6)
-#define RTL8366RB_LED_2_X_CTRL_MASK GENMASK(5, 0)
-#define RTL8366RB_LED_X_3_CTRL_MASK GENMASK(11, 6)
-
#define RTL8366RB_MIB_COUNT 33
#define RTL8366RB_GLOBAL_MIB_COUNT 1
#define RTL8366RB_MIB_COUNTER_PORT_OFFSET 0x0050
@@ -244,7 +207,6 @@
#define RTL8366RB_PORT_STATUS_AN_MASK 0x0080
#define RTL8366RB_NUM_VLANS 16
-#define RTL8366RB_NUM_LEDGROUPS 4
#define RTL8366RB_NUM_VIDS 4096
#define RTL8366RB_PRIORITYMAX 7
#define RTL8366RB_NUM_FIDS 8
@@ -351,46 +313,6 @@
#define RTL8366RB_GREEN_FEATURE_TX BIT(0)
#define RTL8366RB_GREEN_FEATURE_RX BIT(2)
-enum rtl8366_ledgroup_mode {
- RTL8366RB_LEDGROUP_OFF = 0x0,
- RTL8366RB_LEDGROUP_DUP_COL = 0x1,
- RTL8366RB_LEDGROUP_LINK_ACT = 0x2,
- RTL8366RB_LEDGROUP_SPD1000 = 0x3,
- RTL8366RB_LEDGROUP_SPD100 = 0x4,
- RTL8366RB_LEDGROUP_SPD10 = 0x5,
- RTL8366RB_LEDGROUP_SPD1000_ACT = 0x6,
- RTL8366RB_LEDGROUP_SPD100_ACT = 0x7,
- RTL8366RB_LEDGROUP_SPD10_ACT = 0x8,
- RTL8366RB_LEDGROUP_SPD100_10_ACT = 0x9,
- RTL8366RB_LEDGROUP_FIBER = 0xa,
- RTL8366RB_LEDGROUP_AN_FAULT = 0xb,
- RTL8366RB_LEDGROUP_LINK_RX = 0xc,
- RTL8366RB_LEDGROUP_LINK_TX = 0xd,
- RTL8366RB_LEDGROUP_MASTER = 0xe,
- RTL8366RB_LEDGROUP_FORCE = 0xf,
-
- __RTL8366RB_LEDGROUP_MODE_MAX
-};
-
-struct rtl8366rb_led {
- u8 port_num;
- u8 led_group;
- struct realtek_priv *priv;
- struct led_classdev cdev;
-};
-
-/**
- * struct rtl8366rb - RTL8366RB-specific data
- * @max_mtu: per-port max MTU setting
- * @pvid_enabled: if PVID is set for respective port
- * @leds: per-port and per-ledgroup led info
- */
-struct rtl8366rb {
- unsigned int max_mtu[RTL8366RB_NUM_PORTS];
- bool pvid_enabled[RTL8366RB_NUM_PORTS];
- struct rtl8366rb_led leds[RTL8366RB_NUM_PORTS][RTL8366RB_NUM_LEDGROUPS];
-};
-
static struct rtl8366_mib_counter rtl8366rb_mib_counters[] = {
{ 0, 0, 4, "IfInOctets" },
{ 0, 4, 4, "EtherStatsOctets" },
@@ -831,9 +753,10 @@ static int rtl8366rb_jam_table(const struct rtl8366rb_jam_tbl_entry *jam_table,
return 0;
}
-static int rb8366rb_set_ledgroup_mode(struct realtek_priv *priv,
- u8 led_group,
- enum rtl8366_ledgroup_mode mode)
+/* This code is used also with LEDs disabled */
+int rb8366rb_set_ledgroup_mode(struct realtek_priv *priv,
+ u8 led_group,
+ enum rtl8366_ledgroup_mode mode)
{
int ret;
u32 val;
@@ -850,144 +773,7 @@ static int rb8366rb_set_ledgroup_mode(struct realtek_priv *priv,
return 0;
}
-static inline u32 rtl8366rb_led_group_port_mask(u8 led_group, u8 port)
-{
- switch (led_group) {
- case 0:
- return FIELD_PREP(RTL8366RB_LED_0_X_CTRL_MASK, BIT(port));
- case 1:
- return FIELD_PREP(RTL8366RB_LED_0_X_CTRL_MASK, BIT(port));
- case 2:
- return FIELD_PREP(RTL8366RB_LED_0_X_CTRL_MASK, BIT(port));
- case 3:
- return FIELD_PREP(RTL8366RB_LED_0_X_CTRL_MASK, BIT(port));
- default:
- return 0;
- }
-}
-
-static int rb8366rb_get_port_led(struct rtl8366rb_led *led)
-{
- struct realtek_priv *priv = led->priv;
- u8 led_group = led->led_group;
- u8 port_num = led->port_num;
- int ret;
- u32 val;
-
- ret = regmap_read(priv->map, RTL8366RB_LED_X_X_CTRL_REG(led_group),
- &val);
- if (ret) {
- dev_err(priv->dev, "error reading LED on port %d group %d\n",
- led_group, port_num);
- return ret;
- }
-
- return !!(val & rtl8366rb_led_group_port_mask(led_group, port_num));
-}
-
-static int rb8366rb_set_port_led(struct rtl8366rb_led *led, bool enable)
-{
- struct realtek_priv *priv = led->priv;
- u8 led_group = led->led_group;
- u8 port_num = led->port_num;
- int ret;
-
- ret = regmap_update_bits(priv->map,
- RTL8366RB_LED_X_X_CTRL_REG(led_group),
- rtl8366rb_led_group_port_mask(led_group,
- port_num),
- enable ? 0xffff : 0);
- if (ret) {
- dev_err(priv->dev, "error updating LED on port %d group %d\n",
- led_group, port_num);
- return ret;
- }
-
- /* Change the LED group to manual controlled LEDs if required */
- ret = rb8366rb_set_ledgroup_mode(priv, led_group,
- RTL8366RB_LEDGROUP_FORCE);
-
- if (ret) {
- dev_err(priv->dev, "error updating LED GROUP group %d\n",
- led_group);
- return ret;
- }
-
- return 0;
-}
-
-static int
-rtl8366rb_cled_brightness_set_blocking(struct led_classdev *ldev,
- enum led_brightness brightness)
-{
- struct rtl8366rb_led *led = container_of(ldev, struct rtl8366rb_led,
- cdev);
-
- return rb8366rb_set_port_led(led, brightness == LED_ON);
-}
-
-static int rtl8366rb_setup_led(struct realtek_priv *priv, struct dsa_port *dp,
- struct fwnode_handle *led_fwnode)
-{
- struct rtl8366rb *rb = priv->chip_data;
- struct led_init_data init_data = { };
- enum led_default_state state;
- struct rtl8366rb_led *led;
- u32 led_group;
- int ret;
-
- ret = fwnode_property_read_u32(led_fwnode, "reg", &led_group);
- if (ret)
- return ret;
-
- if (led_group >= RTL8366RB_NUM_LEDGROUPS) {
- dev_warn(priv->dev, "Invalid LED reg %d defined for port %d",
- led_group, dp->index);
- return -EINVAL;
- }
-
- led = &rb->leds[dp->index][led_group];
- led->port_num = dp->index;
- led->led_group = led_group;
- led->priv = priv;
-
- state = led_init_default_state_get(led_fwnode);
- switch (state) {
- case LEDS_DEFSTATE_ON:
- led->cdev.brightness = 1;
- rb8366rb_set_port_led(led, 1);
- break;
- case LEDS_DEFSTATE_KEEP:
- led->cdev.brightness =
- rb8366rb_get_port_led(led);
- break;
- case LEDS_DEFSTATE_OFF:
- default:
- led->cdev.brightness = 0;
- rb8366rb_set_port_led(led, 0);
- }
-
- led->cdev.max_brightness = 1;
- led->cdev.brightness_set_blocking =
- rtl8366rb_cled_brightness_set_blocking;
- init_data.fwnode = led_fwnode;
- init_data.devname_mandatory = true;
-
- init_data.devicename = kasprintf(GFP_KERNEL, "Realtek-%d:0%d:%d",
- dp->ds->index, dp->index, led_group);
- if (!init_data.devicename)
- return -ENOMEM;
-
- ret = devm_led_classdev_register_ext(priv->dev, &led->cdev, &init_data);
- if (ret) {
- dev_warn(priv->dev, "Failed to init LED %d for port %d",
- led_group, dp->index);
- return ret;
- }
-
- return 0;
-}
-
+/* This code is used also with LEDs disabled */
static int rtl8366rb_setup_all_leds_off(struct realtek_priv *priv)
{
int ret = 0;
@@ -1008,38 +794,6 @@ static int rtl8366rb_setup_all_leds_off(struct realtek_priv *priv)
return ret;
}
-static int rtl8366rb_setup_leds(struct realtek_priv *priv)
-{
- struct dsa_switch *ds = &priv->ds;
- struct device_node *leds_np;
- struct dsa_port *dp;
- int ret = 0;
-
- dsa_switch_for_each_port(dp, ds) {
- if (!dp->dn)
- continue;
-
- leds_np = of_get_child_by_name(dp->dn, "leds");
- if (!leds_np) {
- dev_dbg(priv->dev, "No leds defined for port %d",
- dp->index);
- continue;
- }
-
- for_each_child_of_node_scoped(leds_np, led_np) {
- ret = rtl8366rb_setup_led(priv, dp,
- of_fwnode_handle(led_np));
- if (ret)
- break;
- }
-
- of_node_put(leds_np);
- if (ret)
- return ret;
- }
- return 0;
-}
-
static int rtl8366rb_setup(struct dsa_switch *ds)
{
struct realtek_priv *priv = ds->priv;
diff --git a/drivers/net/dsa/realtek/rtl8366rb.h b/drivers/net/dsa/realtek/rtl8366rb.h
new file mode 100644
index 000000000000..685ff3275faa
--- /dev/null
+++ b/drivers/net/dsa/realtek/rtl8366rb.h
@@ -0,0 +1,107 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#ifndef _RTL8366RB_H
+#define _RTL8366RB_H
+
+#include "realtek.h"
+
+#define RTL8366RB_PORT_NUM_CPU 5
+#define RTL8366RB_NUM_PORTS 6
+#define RTL8366RB_PHY_NO_MAX 4
+#define RTL8366RB_NUM_LEDGROUPS 4
+#define RTL8366RB_PHY_ADDR_MAX 31
+
+/* LED control registers */
+/* The LED blink rate is global; it is used by all triggers in all groups. */
+#define RTL8366RB_LED_BLINKRATE_REG 0x0430
+#define RTL8366RB_LED_BLINKRATE_MASK 0x0007
+#define RTL8366RB_LED_BLINKRATE_28MS 0x0000
+#define RTL8366RB_LED_BLINKRATE_56MS 0x0001
+#define RTL8366RB_LED_BLINKRATE_84MS 0x0002
+#define RTL8366RB_LED_BLINKRATE_111MS 0x0003
+#define RTL8366RB_LED_BLINKRATE_222MS 0x0004
+#define RTL8366RB_LED_BLINKRATE_446MS 0x0005
+
+/* LED trigger event for each group */
+#define RTL8366RB_LED_CTRL_REG 0x0431
+#define RTL8366RB_LED_CTRL_OFFSET(led_group) \
+ (4 * (led_group))
+#define RTL8366RB_LED_CTRL_MASK(led_group) \
+ (0xf << RTL8366RB_LED_CTRL_OFFSET(led_group))
+
+/* The RTL8366RB_LED_X_X registers are used to manually set the LED state only
+ * when the corresponding LED group in RTL8366RB_LED_CTRL_REG is
+ * RTL8366RB_LEDGROUP_FORCE. Otherwise, it is ignored.
+ */
+#define RTL8366RB_LED_0_1_CTRL_REG 0x0432
+#define RTL8366RB_LED_2_3_CTRL_REG 0x0433
+#define RTL8366RB_LED_X_X_CTRL_REG(led_group) \
+ ((led_group) <= 1 ? \
+ RTL8366RB_LED_0_1_CTRL_REG : \
+ RTL8366RB_LED_2_3_CTRL_REG)
+#define RTL8366RB_LED_0_X_CTRL_MASK GENMASK(5, 0)
+#define RTL8366RB_LED_X_1_CTRL_MASK GENMASK(11, 6)
+#define RTL8366RB_LED_2_X_CTRL_MASK GENMASK(5, 0)
+#define RTL8366RB_LED_X_3_CTRL_MASK GENMASK(11, 6)
+
+enum rtl8366_ledgroup_mode {
+ RTL8366RB_LEDGROUP_OFF = 0x0,
+ RTL8366RB_LEDGROUP_DUP_COL = 0x1,
+ RTL8366RB_LEDGROUP_LINK_ACT = 0x2,
+ RTL8366RB_LEDGROUP_SPD1000 = 0x3,
+ RTL8366RB_LEDGROUP_SPD100 = 0x4,
+ RTL8366RB_LEDGROUP_SPD10 = 0x5,
+ RTL8366RB_LEDGROUP_SPD1000_ACT = 0x6,
+ RTL8366RB_LEDGROUP_SPD100_ACT = 0x7,
+ RTL8366RB_LEDGROUP_SPD10_ACT = 0x8,
+ RTL8366RB_LEDGROUP_SPD100_10_ACT = 0x9,
+ RTL8366RB_LEDGROUP_FIBER = 0xa,
+ RTL8366RB_LEDGROUP_AN_FAULT = 0xb,
+ RTL8366RB_LEDGROUP_LINK_RX = 0xc,
+ RTL8366RB_LEDGROUP_LINK_TX = 0xd,
+ RTL8366RB_LEDGROUP_MASTER = 0xe,
+ RTL8366RB_LEDGROUP_FORCE = 0xf,
+
+ __RTL8366RB_LEDGROUP_MODE_MAX
+};
+
+#if IS_ENABLED(CONFIG_NET_DSA_REALTEK_RTL8366RB_LEDS)
+
+struct rtl8366rb_led {
+ u8 port_num;
+ u8 led_group;
+ struct realtek_priv *priv;
+ struct led_classdev cdev;
+};
+
+int rtl8366rb_setup_leds(struct realtek_priv *priv);
+
+#else
+
+static inline int rtl8366rb_setup_leds(struct realtek_priv *priv)
+{
+ return 0;
+}
+
+#endif /* IS_ENABLED(CONFIG_LEDS_CLASS) */
+
+/**
+ * struct rtl8366rb - RTL8366RB-specific data
+ * @max_mtu: per-port max MTU setting
+ * @pvid_enabled: if PVID is set for respective port
+ * @leds: per-port and per-ledgroup led info
+ */
+struct rtl8366rb {
+ unsigned int max_mtu[RTL8366RB_NUM_PORTS];
+ bool pvid_enabled[RTL8366RB_NUM_PORTS];
+#if IS_ENABLED(CONFIG_NET_DSA_REALTEK_RTL8366RB_LEDS)
+ struct rtl8366rb_led leds[RTL8366RB_NUM_PORTS][RTL8366RB_NUM_LEDGROUPS];
+#endif
+};
+
+/* This code is used also with LEDs disabled */
+int rb8366rb_set_ledgroup_mode(struct realtek_priv *priv,
+ u8 led_group,
+ enum rtl8366_ledgroup_mode mode);
+
+#endif /* _RTL8366RB_H */
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index 5740c98d8c9f..2847278d9cd4 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -1279,6 +1279,8 @@ struct macb {
struct clk *rx_clk;
struct clk *tsu_clk;
struct net_device *dev;
+ /* Protects hw_stats and ethtool_stats */
+ spinlock_t stats_lock;
union {
struct macb_stats macb;
struct gem_stats gem;
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index 48496209fb16..c1f57d96e63f 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -1978,10 +1978,12 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
if (status & MACB_BIT(ISR_ROVR)) {
/* We missed at least one packet */
+ spin_lock(&bp->stats_lock);
if (macb_is_gem(bp))
bp->hw_stats.gem.rx_overruns++;
else
bp->hw_stats.macb.rx_overruns++;
+ spin_unlock(&bp->stats_lock);
if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
queue_writel(queue, ISR, MACB_BIT(ISR_ROVR));
@@ -3102,6 +3104,7 @@ static struct net_device_stats *gem_get_stats(struct macb *bp)
if (!netif_running(bp->dev))
return nstat;
+ spin_lock_irq(&bp->stats_lock);
gem_update_stats(bp);
nstat->rx_errors = (hwstat->rx_frame_check_sequence_errors +
@@ -3131,6 +3134,7 @@ static struct net_device_stats *gem_get_stats(struct macb *bp)
nstat->tx_aborted_errors = hwstat->tx_excessive_collisions;
nstat->tx_carrier_errors = hwstat->tx_carrier_sense_errors;
nstat->tx_fifo_errors = hwstat->tx_underrun;
+ spin_unlock_irq(&bp->stats_lock);
return nstat;
}
@@ -3138,12 +3142,13 @@ static struct net_device_stats *gem_get_stats(struct macb *bp)
static void gem_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *stats, u64 *data)
{
- struct macb *bp;
+ struct macb *bp = netdev_priv(dev);
- bp = netdev_priv(dev);
+ spin_lock_irq(&bp->stats_lock);
gem_update_stats(bp);
memcpy(data, &bp->ethtool_stats, sizeof(u64)
* (GEM_STATS_LEN + QUEUE_STATS_LEN * MACB_MAX_QUEUES));
+ spin_unlock_irq(&bp->stats_lock);
}
static int gem_get_sset_count(struct net_device *dev, int sset)
@@ -3193,6 +3198,7 @@ static struct net_device_stats *macb_get_stats(struct net_device *dev)
return gem_get_stats(bp);
/* read stats from hardware */
+ spin_lock_irq(&bp->stats_lock);
macb_update_stats(bp);
/* Convert HW stats into netdevice stats */
@@ -3226,6 +3232,7 @@ static struct net_device_stats *macb_get_stats(struct net_device *dev)
nstat->tx_carrier_errors = hwstat->tx_carrier_errors;
nstat->tx_fifo_errors = hwstat->tx_underruns;
/* Don't know about heartbeat or window errors... */
+ spin_unlock_irq(&bp->stats_lock);
return nstat;
}
@@ -5097,6 +5104,7 @@ static int macb_probe(struct platform_device *pdev)
}
}
spin_lock_init(&bp->lock);
+ spin_lock_init(&bp->stats_lock);
/* setup capabilities */
macb_configure_caps(bp, macb_config);
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index e48b861e4ce1..270ff9aab335 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -562,7 +562,7 @@ struct be_adapter {
struct be_dma_mem mbox_mem_alloced;
struct be_mcc_obj mcc_obj;
- struct mutex mcc_lock; /* For serializing mcc cmds to BE card */
+ spinlock_t mcc_lock; /* For serializing mcc cmds to BE card */
spinlock_t mcc_cq_lock;
u16 cfg_num_rx_irqs; /* configured via set-channels */
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 61adcebeef01..51b8377edd1d 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -575,7 +575,7 @@ int be_process_mcc(struct be_adapter *adapter)
/* Wait till no more pending mcc requests are present */
static int be_mcc_wait_compl(struct be_adapter *adapter)
{
-#define mcc_timeout 12000 /* 12s timeout */
+#define mcc_timeout 120000 /* 12s timeout */
int i, status = 0;
struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
@@ -589,7 +589,7 @@ static int be_mcc_wait_compl(struct be_adapter *adapter)
if (atomic_read(&mcc_obj->q.used) == 0)
break;
- usleep_range(500, 1000);
+ udelay(100);
}
if (i == mcc_timeout) {
dev_err(&adapter->pdev->dev, "FW not responding\n");
@@ -866,7 +866,7 @@ static bool use_mcc(struct be_adapter *adapter)
static int be_cmd_lock(struct be_adapter *adapter)
{
if (use_mcc(adapter)) {
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
return 0;
} else {
return mutex_lock_interruptible(&adapter->mbox_lock);
@@ -877,7 +877,7 @@ static int be_cmd_lock(struct be_adapter *adapter)
static void be_cmd_unlock(struct be_adapter *adapter)
{
if (use_mcc(adapter))
- return mutex_unlock(&adapter->mcc_lock);
+ return spin_unlock_bh(&adapter->mcc_lock);
else
return mutex_unlock(&adapter->mbox_lock);
}
@@ -1047,7 +1047,7 @@ int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
struct be_cmd_req_mac_query *req;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -1076,7 +1076,7 @@ int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
}
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -1088,7 +1088,7 @@ int be_cmd_pmac_add(struct be_adapter *adapter, const u8 *mac_addr,
struct be_cmd_req_pmac_add *req;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -1113,7 +1113,7 @@ int be_cmd_pmac_add(struct be_adapter *adapter, const u8 *mac_addr,
}
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
if (base_status(status) == MCC_STATUS_UNAUTHORIZED_REQUEST)
status = -EPERM;
@@ -1131,7 +1131,7 @@ int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, int pmac_id, u32 dom)
if (pmac_id == -1)
return 0;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -1151,7 +1151,7 @@ int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, int pmac_id, u32 dom)
status = be_mcc_notify_wait(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -1414,7 +1414,7 @@ int be_cmd_rxq_create(struct be_adapter *adapter,
struct be_dma_mem *q_mem = &rxq->dma_mem;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -1444,7 +1444,7 @@ int be_cmd_rxq_create(struct be_adapter *adapter,
}
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -1508,7 +1508,7 @@ int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q)
struct be_cmd_req_q_destroy *req;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -1525,7 +1525,7 @@ int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q)
q->created = false;
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -1593,7 +1593,7 @@ int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
struct be_cmd_req_hdr *hdr;
int status = 0;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -1621,7 +1621,7 @@ int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
adapter->stats_cmd_sent = true;
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -1637,7 +1637,7 @@ int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
CMD_SUBSYSTEM_ETH))
return -EPERM;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -1660,7 +1660,7 @@ int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
adapter->stats_cmd_sent = true;
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -1697,7 +1697,7 @@ int be_cmd_link_status_query(struct be_adapter *adapter, u16 *link_speed,
struct be_cmd_req_link_status *req;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
if (link_status)
*link_status = LINK_DOWN;
@@ -1736,7 +1736,7 @@ int be_cmd_link_status_query(struct be_adapter *adapter, u16 *link_speed,
}
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -1747,7 +1747,7 @@ int be_cmd_get_die_temperature(struct be_adapter *adapter)
struct be_cmd_req_get_cntl_addnl_attribs *req;
int status = 0;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -1762,7 +1762,7 @@ int be_cmd_get_die_temperature(struct be_adapter *adapter)
status = be_mcc_notify(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -1811,7 +1811,7 @@ int be_cmd_get_fat_dump(struct be_adapter *adapter, u32 buf_len, void *buf)
if (!get_fat_cmd.va)
return -ENOMEM;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
while (total_size) {
buf_size = min(total_size, (u32)60 * 1024);
@@ -1849,9 +1849,9 @@ int be_cmd_get_fat_dump(struct be_adapter *adapter, u32 buf_len, void *buf)
log_offset += buf_size;
}
err:
+ spin_unlock_bh(&adapter->mcc_lock);
dma_free_coherent(&adapter->pdev->dev, get_fat_cmd.size,
get_fat_cmd.va, get_fat_cmd.dma);
- mutex_unlock(&adapter->mcc_lock);
return status;
}
@@ -1862,7 +1862,7 @@ int be_cmd_get_fw_ver(struct be_adapter *adapter)
struct be_cmd_req_get_fw_version *req;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -1885,7 +1885,7 @@ int be_cmd_get_fw_ver(struct be_adapter *adapter)
sizeof(adapter->fw_on_flash));
}
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -1899,7 +1899,7 @@ static int __be_cmd_modify_eqd(struct be_adapter *adapter,
struct be_cmd_req_modify_eq_delay *req;
int status = 0, i;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -1922,7 +1922,7 @@ static int __be_cmd_modify_eqd(struct be_adapter *adapter,
status = be_mcc_notify(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -1949,7 +1949,7 @@ int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
struct be_cmd_req_vlan_config *req;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -1971,7 +1971,7 @@ int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
status = be_mcc_notify_wait(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -1982,7 +1982,7 @@ static int __be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
struct be_cmd_req_rx_filter *req = mem->va;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -2015,7 +2015,7 @@ static int __be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
status = be_mcc_notify_wait(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -2046,7 +2046,7 @@ int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc)
CMD_SUBSYSTEM_COMMON))
return -EPERM;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -2066,7 +2066,7 @@ int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc)
status = be_mcc_notify_wait(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
if (base_status(status) == MCC_STATUS_FEATURE_NOT_SUPPORTED)
return -EOPNOTSUPP;
@@ -2085,7 +2085,7 @@ int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
CMD_SUBSYSTEM_COMMON))
return -EPERM;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -2108,7 +2108,7 @@ int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
}
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -2189,7 +2189,7 @@ int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable,
if (!(be_if_cap_flags(adapter) & BE_IF_FLAGS_RSS))
return 0;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -2214,7 +2214,7 @@ int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable,
status = be_mcc_notify_wait(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -2226,7 +2226,7 @@ int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num,
struct be_cmd_req_enable_disable_beacon *req;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -2247,7 +2247,7 @@ int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num,
status = be_mcc_notify_wait(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -2258,7 +2258,7 @@ int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state)
struct be_cmd_req_get_beacon_state *req;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -2282,7 +2282,7 @@ int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state)
}
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -2306,7 +2306,7 @@ int be_cmd_read_port_transceiver_data(struct be_adapter *adapter,
return -ENOMEM;
}
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -2328,7 +2328,7 @@ int be_cmd_read_port_transceiver_data(struct be_adapter *adapter,
memcpy(data, resp->page_data + off, len);
}
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
return status;
}
@@ -2345,7 +2345,7 @@ static int lancer_cmd_write_object(struct be_adapter *adapter,
void *ctxt = NULL;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
adapter->flash_status = 0;
wrb = wrb_from_mccq(adapter);
@@ -2387,7 +2387,7 @@ static int lancer_cmd_write_object(struct be_adapter *adapter,
if (status)
goto err_unlock;
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
if (!wait_for_completion_timeout(&adapter->et_cmd_compl,
msecs_to_jiffies(60000)))
@@ -2406,7 +2406,7 @@ static int lancer_cmd_write_object(struct be_adapter *adapter,
return status;
err_unlock:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -2460,7 +2460,7 @@ static int lancer_cmd_delete_object(struct be_adapter *adapter,
struct be_mcc_wrb *wrb;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -2478,7 +2478,7 @@ static int lancer_cmd_delete_object(struct be_adapter *adapter,
status = be_mcc_notify_wait(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -2491,7 +2491,7 @@ int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
struct lancer_cmd_resp_read_object *resp;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -2525,7 +2525,7 @@ int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
}
err_unlock:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -2537,7 +2537,7 @@ static int be_cmd_write_flashrom(struct be_adapter *adapter,
struct be_cmd_write_flashrom *req;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
adapter->flash_status = 0;
wrb = wrb_from_mccq(adapter);
@@ -2562,7 +2562,7 @@ static int be_cmd_write_flashrom(struct be_adapter *adapter,
if (status)
goto err_unlock;
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
if (!wait_for_completion_timeout(&adapter->et_cmd_compl,
msecs_to_jiffies(40000)))
@@ -2573,7 +2573,7 @@ static int be_cmd_write_flashrom(struct be_adapter *adapter,
return status;
err_unlock:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -2584,7 +2584,7 @@ static int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
struct be_mcc_wrb *wrb;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -2611,7 +2611,7 @@ static int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
memcpy(flashed_crc, req->crc, 4);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -3217,7 +3217,7 @@ int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
struct be_cmd_req_acpi_wol_magic_config *req;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -3234,7 +3234,7 @@ int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
status = be_mcc_notify_wait(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -3249,7 +3249,7 @@ int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
CMD_SUBSYSTEM_LOWLEVEL))
return -EPERM;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -3272,7 +3272,7 @@ int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
if (status)
goto err_unlock;
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
if (!wait_for_completion_timeout(&adapter->et_cmd_compl,
msecs_to_jiffies(SET_LB_MODE_TIMEOUT)))
@@ -3281,7 +3281,7 @@ int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
return status;
err_unlock:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -3298,7 +3298,7 @@ int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
CMD_SUBSYSTEM_LOWLEVEL))
return -EPERM;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -3324,7 +3324,7 @@ int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
if (status)
goto err;
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
wait_for_completion(&adapter->et_cmd_compl);
resp = embedded_payload(wrb);
@@ -3332,7 +3332,7 @@ int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
return status;
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -3348,7 +3348,7 @@ int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
CMD_SUBSYSTEM_LOWLEVEL))
return -EPERM;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -3382,7 +3382,7 @@ int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
}
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -3393,7 +3393,7 @@ int be_cmd_get_seeprom_data(struct be_adapter *adapter,
struct be_cmd_req_seeprom_read *req;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -3409,7 +3409,7 @@ int be_cmd_get_seeprom_data(struct be_adapter *adapter,
status = be_mcc_notify_wait(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -3424,7 +3424,7 @@ int be_cmd_get_phy_info(struct be_adapter *adapter)
CMD_SUBSYSTEM_COMMON))
return -EPERM;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -3469,7 +3469,7 @@ int be_cmd_get_phy_info(struct be_adapter *adapter)
}
dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -3479,7 +3479,7 @@ static int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain)
struct be_cmd_req_set_qos *req;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -3499,7 +3499,7 @@ static int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain)
status = be_mcc_notify_wait(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -3611,7 +3611,7 @@ int be_cmd_get_fn_privileges(struct be_adapter *adapter, u32 *privilege,
struct be_cmd_req_get_fn_privileges *req;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -3643,7 +3643,7 @@ int be_cmd_get_fn_privileges(struct be_adapter *adapter, u32 *privilege,
}
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -3655,7 +3655,7 @@ int be_cmd_set_fn_privileges(struct be_adapter *adapter, u32 privileges,
struct be_cmd_req_set_fn_privileges *req;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -3675,7 +3675,7 @@ int be_cmd_set_fn_privileges(struct be_adapter *adapter, u32 privileges,
status = be_mcc_notify_wait(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -3707,7 +3707,7 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
return -ENOMEM;
}
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -3771,7 +3771,7 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
}
out:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
dma_free_coherent(&adapter->pdev->dev, get_mac_list_cmd.size,
get_mac_list_cmd.va, get_mac_list_cmd.dma);
return status;
@@ -3831,7 +3831,7 @@ int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
if (!cmd.va)
return -ENOMEM;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -3853,7 +3853,7 @@ int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
err:
dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -3889,7 +3889,7 @@ int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid,
CMD_SUBSYSTEM_COMMON))
return -EPERM;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -3930,7 +3930,7 @@ int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid,
status = be_mcc_notify_wait(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -3944,7 +3944,7 @@ int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
int status;
u16 vid;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -3991,7 +3991,7 @@ int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
}
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -4190,7 +4190,7 @@ int be_cmd_set_ext_fat_capabilites(struct be_adapter *adapter,
struct be_cmd_req_set_ext_fat_caps *req;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -4206,7 +4206,7 @@ int be_cmd_set_ext_fat_capabilites(struct be_adapter *adapter,
status = be_mcc_notify_wait(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -4684,7 +4684,7 @@ int be_cmd_manage_iface(struct be_adapter *adapter, u32 iface, u8 op)
if (iface == 0xFFFFFFFF)
return -1;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -4701,7 +4701,7 @@ int be_cmd_manage_iface(struct be_adapter *adapter, u32 iface, u8 op)
status = be_mcc_notify_wait(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -4735,7 +4735,7 @@ int be_cmd_get_if_id(struct be_adapter *adapter, struct be_vf_cfg *vf_cfg,
struct be_cmd_resp_get_iface_list *resp;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -4756,7 +4756,7 @@ int be_cmd_get_if_id(struct be_adapter *adapter, struct be_vf_cfg *vf_cfg,
}
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -4850,7 +4850,7 @@ int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain)
if (BEx_chip(adapter))
return 0;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -4868,7 +4868,7 @@ int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain)
req->enable = 1;
status = be_mcc_notify_wait(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -4941,7 +4941,7 @@ __be_cmd_set_logical_link_config(struct be_adapter *adapter,
u32 link_config = 0;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -4969,7 +4969,7 @@ __be_cmd_set_logical_link_config(struct be_adapter *adapter,
status = be_mcc_notify_wait(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -5000,8 +5000,7 @@ int be_cmd_set_features(struct be_adapter *adapter)
struct be_mcc_wrb *wrb;
int status;
- if (mutex_lock_interruptible(&adapter->mcc_lock))
- return -1;
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -5039,7 +5038,7 @@ err:
dev_info(&adapter->pdev->dev,
"Adapter does not support HW error recovery\n");
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -5053,7 +5052,7 @@ int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload,
struct be_cmd_resp_hdr *resp;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -5076,7 +5075,7 @@ int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload,
memcpy(wrb_payload, resp, sizeof(*resp) + resp->response_length);
be_dws_le_to_cpu(wrb_payload, sizeof(*resp) + resp->response_length);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
EXPORT_SYMBOL(be_roce_mcc_cmd);
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 875fe379eea2..3d2e21592119 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -5667,8 +5667,8 @@ static int be_drv_init(struct be_adapter *adapter)
}
mutex_init(&adapter->mbox_lock);
- mutex_init(&adapter->mcc_lock);
mutex_init(&adapter->rx_filter_lock);
+ spin_lock_init(&adapter->mcc_lock);
spin_lock_init(&adapter->mcc_cq_lock);
init_completion(&adapter->et_cmd_compl);
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
index 6a6fc819dfde..2106861463e4 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc.c
@@ -167,6 +167,24 @@ static bool enetc_skb_is_tcp(struct sk_buff *skb)
return skb->csum_offset == offsetof(struct tcphdr, check);
}
+/**
+ * enetc_unwind_tx_frame() - Unwind the DMA mappings of a multi-buffer Tx frame
+ * @tx_ring: Pointer to the Tx ring on which the buffer descriptors are located
+ * @count: Number of Tx buffer descriptors which need to be unmapped
+ * @i: Index of the last successfully mapped Tx buffer descriptor
+ */
+static void enetc_unwind_tx_frame(struct enetc_bdr *tx_ring, int count, int i)
+{
+ while (count--) {
+ struct enetc_tx_swbd *tx_swbd = &tx_ring->tx_swbd[i];
+
+ enetc_free_tx_frame(tx_ring, tx_swbd);
+ if (i == 0)
+ i = tx_ring->bd_count;
+ i--;
+ }
+}
+
static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb)
{
bool do_vlan, do_onestep_tstamp = false, do_twostep_tstamp = false;
@@ -279,9 +297,11 @@ static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb)
}
if (do_onestep_tstamp) {
- u32 lo, hi, val;
- u64 sec, nsec;
+ __be32 new_sec_l, new_nsec;
+ u32 lo, hi, nsec, val;
+ __be16 new_sec_h;
u8 *data;
+ u64 sec;
lo = enetc_rd_hot(hw, ENETC_SICTR0);
hi = enetc_rd_hot(hw, ENETC_SICTR1);
@@ -295,13 +315,38 @@ static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb)
/* Update originTimestamp field of Sync packet
* - 48 bits seconds field
* - 32 bits nanseconds field
+ *
+ * In addition, the UDP checksum needs to be updated
+ * by software after updating originTimestamp field,
+ * otherwise the hardware will calculate the wrong
+ * checksum when updating the correction field and
+ * update it to the packet.
*/
data = skb_mac_header(skb);
- *(__be16 *)(data + offset2) =
- htons((sec >> 32) & 0xffff);
- *(__be32 *)(data + offset2 + 2) =
- htonl(sec & 0xffffffff);
- *(__be32 *)(data + offset2 + 6) = htonl(nsec);
+ new_sec_h = htons((sec >> 32) & 0xffff);
+ new_sec_l = htonl(sec & 0xffffffff);
+ new_nsec = htonl(nsec);
+ if (udp) {
+ struct udphdr *uh = udp_hdr(skb);
+ __be32 old_sec_l, old_nsec;
+ __be16 old_sec_h;
+
+ old_sec_h = *(__be16 *)(data + offset2);
+ inet_proto_csum_replace2(&uh->check, skb, old_sec_h,
+ new_sec_h, false);
+
+ old_sec_l = *(__be32 *)(data + offset2 + 2);
+ inet_proto_csum_replace4(&uh->check, skb, old_sec_l,
+ new_sec_l, false);
+
+ old_nsec = *(__be32 *)(data + offset2 + 6);
+ inet_proto_csum_replace4(&uh->check, skb, old_nsec,
+ new_nsec, false);
+ }
+
+ *(__be16 *)(data + offset2) = new_sec_h;
+ *(__be32 *)(data + offset2 + 2) = new_sec_l;
+ *(__be32 *)(data + offset2 + 6) = new_nsec;
/* Configure single-step register */
val = ENETC_PM0_SINGLE_STEP_EN;
@@ -372,25 +417,20 @@ static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb)
dma_err:
dev_err(tx_ring->dev, "DMA map error");
- do {
- tx_swbd = &tx_ring->tx_swbd[i];
- enetc_free_tx_frame(tx_ring, tx_swbd);
- if (i == 0)
- i = tx_ring->bd_count;
- i--;
- } while (count--);
+ enetc_unwind_tx_frame(tx_ring, count, i);
return 0;
}
-static void enetc_map_tx_tso_hdr(struct enetc_bdr *tx_ring, struct sk_buff *skb,
- struct enetc_tx_swbd *tx_swbd,
- union enetc_tx_bd *txbd, int *i, int hdr_len,
- int data_len)
+static int enetc_map_tx_tso_hdr(struct enetc_bdr *tx_ring, struct sk_buff *skb,
+ struct enetc_tx_swbd *tx_swbd,
+ union enetc_tx_bd *txbd, int *i, int hdr_len,
+ int data_len)
{
union enetc_tx_bd txbd_tmp;
u8 flags = 0, e_flags = 0;
dma_addr_t addr;
+ int count = 1;
enetc_clear_tx_bd(&txbd_tmp);
addr = tx_ring->tso_headers_dma + *i * TSO_HEADER_SIZE;
@@ -433,7 +473,10 @@ static void enetc_map_tx_tso_hdr(struct enetc_bdr *tx_ring, struct sk_buff *skb,
/* Write the BD */
txbd_tmp.ext.e_flags = e_flags;
*txbd = txbd_tmp;
+ count++;
}
+
+ return count;
}
static int enetc_map_tx_tso_data(struct enetc_bdr *tx_ring, struct sk_buff *skb,
@@ -790,9 +833,9 @@ static int enetc_map_tx_tso_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb
/* compute the csum over the L4 header */
csum = enetc_tso_hdr_csum(&tso, skb, hdr, hdr_len, &pos);
- enetc_map_tx_tso_hdr(tx_ring, skb, tx_swbd, txbd, &i, hdr_len, data_len);
+ count += enetc_map_tx_tso_hdr(tx_ring, skb, tx_swbd, txbd,
+ &i, hdr_len, data_len);
bd_data_num = 0;
- count++;
while (data_len > 0) {
int size;
@@ -816,8 +859,13 @@ static int enetc_map_tx_tso_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb
err = enetc_map_tx_tso_data(tx_ring, skb, tx_swbd, txbd,
tso.data, size,
size == data_len);
- if (err)
+ if (err) {
+ if (i == 0)
+ i = tx_ring->bd_count;
+ i--;
+
goto err_map_data;
+ }
data_len -= size;
count++;
@@ -846,13 +894,7 @@ err_map_data:
dev_err(tx_ring->dev, "DMA map error");
err_chained_bd:
- do {
- tx_swbd = &tx_ring->tx_swbd[i];
- enetc_free_tx_frame(tx_ring, tx_swbd);
- if (i == 0)
- i = tx_ring->bd_count;
- i--;
- } while (count--);
+ enetc_unwind_tx_frame(tx_ring, count, i);
return 0;
}
@@ -1901,7 +1943,7 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring,
enetc_xdp_drop(rx_ring, orig_i, i);
tx_ring->stats.xdp_tx_drops++;
} else {
- tx_ring->stats.xdp_tx += xdp_tx_bd_cnt;
+ tx_ring->stats.xdp_tx++;
rx_ring->xdp.xdp_tx_in_flight += xdp_tx_bd_cnt;
xdp_tx_frm_cnt++;
/* The XDP_TX enqueue was successful, so we
@@ -3228,6 +3270,9 @@ static int enetc_hwtstamp_set(struct net_device *ndev, struct ifreq *ifr)
new_offloads |= ENETC_F_TX_TSTAMP;
break;
case HWTSTAMP_TX_ONESTEP_SYNC:
+ if (!enetc_si_is_pf(priv->si))
+ return -EOPNOTSUPP;
+
new_offloads &= ~ENETC_F_TX_TSTAMP_MASK;
new_offloads |= ENETC_F_TX_ONESTEP_SYNC_TSTAMP;
break;
diff --git a/drivers/net/ethernet/freescale/enetc/enetc4_pf.c b/drivers/net/ethernet/freescale/enetc/enetc4_pf.c
index fc41078c4f5d..73ac8c6afb3a 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc4_pf.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc4_pf.c
@@ -672,7 +672,6 @@ err_link_init:
err_alloc_msix:
err_config_si:
err_clk_get:
- mutex_destroy(&priv->mm_lock);
free_netdev(ndev);
return err;
@@ -684,6 +683,7 @@ static void enetc4_pf_netdev_destroy(struct enetc_si *si)
struct net_device *ndev = si->ndev;
unregister_netdev(ndev);
+ enetc4_link_deinit(priv);
enetc_free_msix(priv);
free_netdev(ndev);
}
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
index bf34b5bb1e35..ece3ae28ba82 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
@@ -832,6 +832,7 @@ static int enetc_set_coalesce(struct net_device *ndev,
static int enetc_get_ts_info(struct net_device *ndev,
struct kernel_ethtool_ts_info *info)
{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
int *phc_idx;
phc_idx = symbol_get(enetc_phc_index);
@@ -852,8 +853,10 @@ static int enetc_get_ts_info(struct net_device *ndev,
SOF_TIMESTAMPING_TX_SOFTWARE;
info->tx_types = (1 << HWTSTAMP_TX_OFF) |
- (1 << HWTSTAMP_TX_ON) |
- (1 << HWTSTAMP_TX_ONESTEP_SYNC);
+ (1 << HWTSTAMP_TX_ON);
+
+ if (enetc_si_is_pf(priv->si))
+ info->tx_types |= (1 << HWTSTAMP_TX_ONESTEP_SYNC);
info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
(1 << HWTSTAMP_FILTER_ALL);
diff --git a/drivers/net/ethernet/google/gve/gve_rx_dqo.c b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
index 8ac0047f1ada..f0674a443567 100644
--- a/drivers/net/ethernet/google/gve/gve_rx_dqo.c
+++ b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
@@ -109,10 +109,12 @@ static void gve_rx_reset_ring_dqo(struct gve_priv *priv, int idx)
void gve_rx_stop_ring_dqo(struct gve_priv *priv, int idx)
{
int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx);
+ struct gve_rx_ring *rx = &priv->rx[idx];
if (!gve_rx_was_added_to_block(priv, idx))
return;
+ page_pool_disable_direct_recycling(rx->dqo.page_pool);
gve_remove_napi(priv, ntfy_idx);
gve_rx_remove_from_block(priv, idx);
gve_rx_reset_ring_dqo(priv, idx);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
index bab16c2191b2..181af419b878 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
@@ -483,7 +483,7 @@ int hclge_ptp_init(struct hclge_dev *hdev)
ret = hclge_ptp_get_cycle(hdev);
if (ret)
- return ret;
+ goto out;
}
ret = hclge_ptp_int_en(hdev, true);
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index 852e5b62f0a5..6faa62bced3a 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -1983,7 +1983,7 @@ err:
static void iavf_finish_config(struct work_struct *work)
{
struct iavf_adapter *adapter;
- bool netdev_released = false;
+ bool locks_released = false;
int pairs, err;
adapter = container_of(work, struct iavf_adapter, finish_config);
@@ -2012,19 +2012,22 @@ static void iavf_finish_config(struct work_struct *work)
netif_set_real_num_tx_queues(adapter->netdev, pairs);
if (adapter->netdev->reg_state != NETREG_REGISTERED) {
+ mutex_unlock(&adapter->crit_lock);
netdev_unlock(adapter->netdev);
- netdev_released = true;
+ locks_released = true;
err = register_netdevice(adapter->netdev);
if (err) {
dev_err(&adapter->pdev->dev, "Unable to register netdev (%d)\n",
err);
/* go back and try again.*/
+ mutex_lock(&adapter->crit_lock);
iavf_free_rss(adapter);
iavf_free_misc_irq(adapter);
iavf_reset_interrupt_capability(adapter);
iavf_change_state(adapter,
__IAVF_INIT_CONFIG_ADAPTER);
+ mutex_unlock(&adapter->crit_lock);
goto out;
}
}
@@ -2040,9 +2043,10 @@ static void iavf_finish_config(struct work_struct *work)
}
out:
- mutex_unlock(&adapter->crit_lock);
- if (!netdev_released)
+ if (!locks_released) {
+ mutex_unlock(&adapter->crit_lock);
netdev_unlock(adapter->netdev);
+ }
rtnl_unlock();
}
diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.c b/drivers/net/ethernet/intel/ice/ice_eswitch.c
index fb527434b58b..d649c197cf67 100644
--- a/drivers/net/ethernet/intel/ice/ice_eswitch.c
+++ b/drivers/net/ethernet/intel/ice/ice_eswitch.c
@@ -38,8 +38,7 @@ static int ice_eswitch_setup_env(struct ice_pf *pf)
if (ice_vsi_add_vlan_zero(uplink_vsi))
goto err_vlan_zero;
- if (ice_cfg_dflt_vsi(uplink_vsi->port_info, uplink_vsi->idx, true,
- ICE_FLTR_RX))
+ if (ice_set_dflt_vsi(uplink_vsi))
goto err_def_rx;
if (ice_cfg_dflt_vsi(uplink_vsi->port_info, uplink_vsi->idx, true,
diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.c b/drivers/net/ethernet/intel/ice/ice_sriov.c
index b83f99c01d91..8aabf7749aa5 100644
--- a/drivers/net/ethernet/intel/ice/ice_sriov.c
+++ b/drivers/net/ethernet/intel/ice/ice_sriov.c
@@ -36,6 +36,7 @@ static void ice_free_vf_entries(struct ice_pf *pf)
hash_for_each_safe(vfs->table, bkt, tmp, vf, entry) {
hash_del_rcu(&vf->entry);
+ ice_deinitialize_vf_entry(vf);
ice_put_vf(vf);
}
}
@@ -193,10 +194,6 @@ void ice_free_vfs(struct ice_pf *pf)
wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
}
- /* clear malicious info since the VF is getting released */
- if (!ice_is_feature_supported(pf, ICE_F_MBX_LIMIT))
- list_del(&vf->mbx_info.list_entry);
-
mutex_unlock(&vf->cfg_lock);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.c b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
index c7c0c2f50c26..815ad0bfe832 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
@@ -1036,6 +1036,14 @@ void ice_initialize_vf_entry(struct ice_vf *vf)
mutex_init(&vf->cfg_lock);
}
+void ice_deinitialize_vf_entry(struct ice_vf *vf)
+{
+ struct ice_pf *pf = vf->pf;
+
+ if (!ice_is_feature_supported(pf, ICE_F_MBX_LIMIT))
+ list_del(&vf->mbx_info.list_entry);
+}
+
/**
* ice_dis_vf_qs - Disable the VF queues
* @vf: pointer to the VF structure
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib_private.h b/drivers/net/ethernet/intel/ice/ice_vf_lib_private.h
index 0c7e77c0a09f..5392b0404986 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_lib_private.h
+++ b/drivers/net/ethernet/intel/ice/ice_vf_lib_private.h
@@ -24,6 +24,7 @@
#endif
void ice_initialize_vf_entry(struct ice_vf *vf);
+void ice_deinitialize_vf_entry(struct ice_vf *vf);
void ice_dis_vf_qs(struct ice_vf *vf);
int ice_check_vf_init(struct ice_vf *vf);
enum virtchnl_status_code ice_err_to_virt_err(int err);
diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
index 9be6a6b59c4e..977741c41498 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
@@ -3013,7 +3013,6 @@ static int idpf_rx_rsc(struct idpf_rx_queue *rxq, struct sk_buff *skb,
skb_shinfo(skb)->gso_size = rsc_seg_len;
skb_reset_network_header(skb);
- len = skb->len - skb_transport_offset(skb);
if (ipv4) {
struct iphdr *ipv4h = ip_hdr(skb);
@@ -3022,6 +3021,7 @@ static int idpf_rx_rsc(struct idpf_rx_queue *rxq, struct sk_buff *skb,
/* Reset and set transport header offset in skb */
skb_set_transport_header(skb, sizeof(struct iphdr));
+ len = skb->len - skb_transport_offset(skb);
/* Compute the TCP pseudo header checksum*/
tcp_hdr(skb)->check =
@@ -3031,6 +3031,7 @@ static int idpf_rx_rsc(struct idpf_rx_queue *rxq, struct sk_buff *skb,
skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
skb_set_transport_header(skb, sizeof(struct ipv6hdr));
+ len = skb->len - skb_transport_offset(skb);
tcp_hdr(skb)->check =
~tcp_v6_check(len, &ipv6h->saddr, &ipv6h->daddr, 0);
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_e610.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_e610.c
index 683c668672d6..cb07ecd8937d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_e610.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_e610.c
@@ -1122,7 +1122,7 @@ static bool ixgbe_is_media_cage_present(struct ixgbe_hw *hw)
* returns error (ENOENT), then no cage present. If no cage present then
* connection type is backplane or BASE-T.
*/
- return ixgbe_aci_get_netlist_node(hw, cmd, NULL, NULL);
+ return !ixgbe_aci_get_netlist_node(hw, cmd, NULL, NULL);
}
/**
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
index 1641791a2d5b..8ed83fb98862 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
@@ -324,7 +324,7 @@ static const struct mvpp2_cls_flow cls_flows[MVPP2_N_PRS_FLOWS] = {
MVPP2_PRS_RI_VLAN_MASK),
/* Non IP flow, with vlan tag */
MVPP2_DEF_FLOW(MVPP22_FLOW_ETHERNET, MVPP2_FL_NON_IP_TAG,
- MVPP22_CLS_HEK_OPT_VLAN,
+ MVPP22_CLS_HEK_TAGGED,
0, 0),
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
index 8b7c843446e1..823c1ba456cd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
@@ -564,6 +564,9 @@ static int esw_qos_vport_enable(struct mlx5_vport *vport, struct mlx5_esw_sched_
return err;
esw_qos_normalize_min_rate(parent->esw, parent, extack);
+ trace_mlx5_esw_vport_qos_create(vport->dev, vport,
+ vport->qos.sched_node->max_rate,
+ vport->qos.sched_node->bw_share);
return 0;
}
@@ -591,8 +594,11 @@ static int mlx5_esw_qos_vport_enable(struct mlx5_vport *vport, enum sched_node_t
sched_node->vport = vport;
vport->qos.sched_node = sched_node;
err = esw_qos_vport_enable(vport, parent, extack);
- if (err)
+ if (err) {
+ __esw_qos_free_node(sched_node);
esw_qos_put(esw);
+ vport->qos.sched_node = NULL;
+ }
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
index 7db9cab9bedf..d9362eabc6a1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
@@ -572,7 +572,7 @@ irq_pool_alloc(struct mlx5_core_dev *dev, int start, int size, char *name,
pool->min_threshold = min_threshold * MLX5_EQ_REFS_PER_IRQ;
pool->max_threshold = max_threshold * MLX5_EQ_REFS_PER_IRQ;
mlx5_core_dbg(dev, "pool->name = %s, pool->size = %d, pool->start = %d",
- name, size, start);
+ name ? name : "mlx5_pcif_pool", size, start);
return pool;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
index bfe6e2d631bd..ab7c2750c104 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
@@ -11,6 +11,8 @@
#include "dwmac_dma.h"
#include "dwmac1000.h"
+#define DRIVER_NAME "dwmac-loongson-pci"
+
/* Normal Loongson Tx Summary */
#define DMA_INTR_ENA_NIE_TX_LOONGSON 0x00040000
/* Normal Loongson Rx Summary */
@@ -516,6 +518,19 @@ static int loongson_dwmac_acpi_config(struct pci_dev *pdev,
return 0;
}
+/* Loongson's DWMAC device may take nearly two seconds to complete DMA reset */
+static int loongson_dwmac_fix_reset(void *priv, void __iomem *ioaddr)
+{
+ u32 value = readl(ioaddr + DMA_BUS_MODE);
+
+ value |= DMA_BUS_MODE_SFT_RESET;
+ writel(value, ioaddr + DMA_BUS_MODE);
+
+ return readl_poll_timeout(ioaddr + DMA_BUS_MODE, value,
+ !(value & DMA_BUS_MODE_SFT_RESET),
+ 10000, 2000000);
+}
+
static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct plat_stmmacenet_data *plat;
@@ -555,7 +570,7 @@ static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id
for (i = 0; i < PCI_STD_NUM_BARS; i++) {
if (pci_resource_len(pdev, i) == 0)
continue;
- ret = pcim_iomap_regions(pdev, BIT(0), pci_name(pdev));
+ ret = pcim_iomap_regions(pdev, BIT(0), DRIVER_NAME);
if (ret)
goto err_disable_device;
break;
@@ -566,6 +581,7 @@ static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id
plat->bsp_priv = ld;
plat->setup = loongson_dwmac_setup;
+ plat->fix_soc_reset = loongson_dwmac_fix_reset;
ld->dev = &pdev->dev;
ld->loongson_id = readl(res.addr + GMAC_VERSION) & 0xff;
@@ -673,7 +689,7 @@ static const struct pci_device_id loongson_dwmac_id_table[] = {
MODULE_DEVICE_TABLE(pci, loongson_dwmac_id_table);
static struct pci_driver loongson_dwmac_driver = {
- .name = "dwmac-loongson-pci",
+ .name = DRIVER_NAME,
.id_table = loongson_dwmac_id_table,
.probe = loongson_dwmac_probe,
.remove = loongson_dwmac_remove,
diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig
index 0d5a862cd78a..3a13d60a947a 100644
--- a/drivers/net/ethernet/ti/Kconfig
+++ b/drivers/net/ethernet/ti/Kconfig
@@ -99,6 +99,7 @@ config TI_K3_AM65_CPSW_NUSS
select NET_DEVLINK
select TI_DAVINCI_MDIO
select PHYLINK
+ select PAGE_POOL
select TI_K3_CPPI_DESC_POOL
imply PHY_TI_GMII_SEL
depends on TI_K3_AM65_CPTS || !TI_K3_AM65_CPTS
diff --git a/drivers/net/ethernet/ti/icssg/icss_iep.c b/drivers/net/ethernet/ti/icssg/icss_iep.c
index 768578c0d958..d59c1744840a 100644
--- a/drivers/net/ethernet/ti/icssg/icss_iep.c
+++ b/drivers/net/ethernet/ti/icssg/icss_iep.c
@@ -474,26 +474,7 @@ static int icss_iep_perout_enable_hw(struct icss_iep *iep,
static int icss_iep_perout_enable(struct icss_iep *iep,
struct ptp_perout_request *req, int on)
{
- int ret = 0;
-
- mutex_lock(&iep->ptp_clk_mutex);
-
- if (iep->pps_enabled) {
- ret = -EBUSY;
- goto exit;
- }
-
- if (iep->perout_enabled == !!on)
- goto exit;
-
- ret = icss_iep_perout_enable_hw(iep, req, on);
- if (!ret)
- iep->perout_enabled = !!on;
-
-exit:
- mutex_unlock(&iep->ptp_clk_mutex);
-
- return ret;
+ return -EOPNOTSUPP;
}
static void icss_iep_cap_cmp_work(struct work_struct *work)
diff --git a/drivers/net/ipa/data/ipa_data-v4.7.c b/drivers/net/ipa/data/ipa_data-v4.7.c
index c8c23d9be961..41f212209993 100644
--- a/drivers/net/ipa/data/ipa_data-v4.7.c
+++ b/drivers/net/ipa/data/ipa_data-v4.7.c
@@ -28,20 +28,18 @@ enum ipa_resource_type {
enum ipa_rsrc_group_id {
/* Source resource group identifiers */
IPA_RSRC_GROUP_SRC_UL_DL = 0,
- IPA_RSRC_GROUP_SRC_UC_RX_Q,
IPA_RSRC_GROUP_SRC_COUNT, /* Last in set; not a source group */
/* Destination resource group identifiers */
- IPA_RSRC_GROUP_DST_UL_DL_DPL = 0,
- IPA_RSRC_GROUP_DST_UNUSED_1,
+ IPA_RSRC_GROUP_DST_UL_DL = 0,
IPA_RSRC_GROUP_DST_COUNT, /* Last; not a destination group */
};
/* QSB configuration data for an SoC having IPA v4.7 */
static const struct ipa_qsb_data ipa_qsb_data[] = {
[IPA_QSB_MASTER_DDR] = {
- .max_writes = 8,
- .max_reads = 0, /* no limit (hardware max) */
+ .max_writes = 12,
+ .max_reads = 13,
.max_reads_beats = 120,
},
};
@@ -81,7 +79,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
},
.endpoint = {
.config = {
- .resource_group = IPA_RSRC_GROUP_DST_UL_DL_DPL,
+ .resource_group = IPA_RSRC_GROUP_DST_UL_DL,
.aggregation = true,
.status_enable = true,
.rx = {
@@ -106,6 +104,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
.filter_support = true,
.config = {
.resource_group = IPA_RSRC_GROUP_SRC_UL_DL,
+ .checksum = true,
.qmap = true,
.status_enable = true,
.tx = {
@@ -128,7 +127,8 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
},
.endpoint = {
.config = {
- .resource_group = IPA_RSRC_GROUP_DST_UL_DL_DPL,
+ .resource_group = IPA_RSRC_GROUP_DST_UL_DL,
+ .checksum = true,
.qmap = true,
.aggregation = true,
.rx = {
@@ -197,12 +197,12 @@ static const struct ipa_resource ipa_resource_src[] = {
/* Destination resource configuration data for an SoC having IPA v4.7 */
static const struct ipa_resource ipa_resource_dst[] = {
[IPA_RESOURCE_TYPE_DST_DATA_SECTORS] = {
- .limits[IPA_RSRC_GROUP_DST_UL_DL_DPL] = {
+ .limits[IPA_RSRC_GROUP_DST_UL_DL] = {
.min = 7, .max = 7,
},
},
[IPA_RESOURCE_TYPE_DST_DPS_DMARS] = {
- .limits[IPA_RSRC_GROUP_DST_UL_DL_DPL] = {
+ .limits[IPA_RSRC_GROUP_DST_UL_DL] = {
.min = 2, .max = 2,
},
},
diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
index fd591ddb3884..ca62188a317a 100644
--- a/drivers/net/ipvlan/ipvlan_core.c
+++ b/drivers/net/ipvlan/ipvlan_core.c
@@ -416,20 +416,25 @@ struct ipvl_addr *ipvlan_addr_lookup(struct ipvl_port *port, void *lyr3h,
static noinline_for_stack int ipvlan_process_v4_outbound(struct sk_buff *skb)
{
- const struct iphdr *ip4h = ip_hdr(skb);
struct net_device *dev = skb->dev;
struct net *net = dev_net(dev);
- struct rtable *rt;
int err, ret = NET_XMIT_DROP;
+ const struct iphdr *ip4h;
+ struct rtable *rt;
struct flowi4 fl4 = {
.flowi4_oif = dev->ifindex,
- .flowi4_tos = inet_dscp_to_dsfield(ip4h_dscp(ip4h)),
.flowi4_flags = FLOWI_FLAG_ANYSRC,
.flowi4_mark = skb->mark,
- .daddr = ip4h->daddr,
- .saddr = ip4h->saddr,
};
+ if (!pskb_network_may_pull(skb, sizeof(struct iphdr)))
+ goto err;
+
+ ip4h = ip_hdr(skb);
+ fl4.daddr = ip4h->daddr;
+ fl4.saddr = ip4h->saddr;
+ fl4.flowi4_tos = inet_dscp_to_dsfield(ip4h_dscp(ip4h));
+
rt = ip_route_output_flow(net, &fl4, NULL);
if (IS_ERR(rt))
goto err;
@@ -488,6 +493,12 @@ static int ipvlan_process_v6_outbound(struct sk_buff *skb)
struct net_device *dev = skb->dev;
int err, ret = NET_XMIT_DROP;
+ if (!pskb_network_may_pull(skb, sizeof(struct ipv6hdr))) {
+ DEV_STATS_INC(dev, tx_errors);
+ kfree_skb(skb);
+ return ret;
+ }
+
err = ipvlan_route_v6_outbound(dev, skb);
if (unlikely(err)) {
DEV_STATS_INC(dev, tx_errors);
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index c8840c3b9a1b..f1d68153987e 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -244,8 +244,22 @@ static netdev_tx_t blackhole_netdev_xmit(struct sk_buff *skb,
return NETDEV_TX_OK;
}
+static int blackhole_neigh_output(struct neighbour *n, struct sk_buff *skb)
+{
+ kfree_skb(skb);
+ return 0;
+}
+
+static int blackhole_neigh_construct(struct net_device *dev,
+ struct neighbour *n)
+{
+ n->output = blackhole_neigh_output;
+ return 0;
+}
+
static const struct net_device_ops blackhole_netdev_ops = {
.ndo_start_xmit = blackhole_netdev_xmit,
+ .ndo_neigh_construct = blackhole_neigh_construct,
};
/* This is a dst-dummy device used specifically for invalidated
diff --git a/drivers/net/mctp/mctp-i3c.c b/drivers/net/mctp/mctp-i3c.c
index d247fe483c58..c1e72253063b 100644
--- a/drivers/net/mctp/mctp-i3c.c
+++ b/drivers/net/mctp/mctp-i3c.c
@@ -507,6 +507,9 @@ static int mctp_i3c_header_create(struct sk_buff *skb, struct net_device *dev,
{
struct mctp_i3c_internal_hdr *ihdr;
+ if (!daddr || !saddr)
+ return -EINVAL;
+
skb_push(skb, sizeof(struct mctp_i3c_internal_hdr));
skb_reset_mac_header(skb);
ihdr = (void *)skb_mac_header(skb);
diff --git a/drivers/net/netdevsim/ethtool.c b/drivers/net/netdevsim/ethtool.c
index 5c80fbee7913..7ab358616e03 100644
--- a/drivers/net/netdevsim/ethtool.c
+++ b/drivers/net/netdevsim/ethtool.c
@@ -184,9 +184,11 @@ static const struct ethtool_ops nsim_ethtool_ops = {
static void nsim_ethtool_ring_init(struct netdevsim *ns)
{
+ ns->ethtool.ring.rx_pending = 512;
ns->ethtool.ring.rx_max_pending = 4096;
ns->ethtool.ring.rx_jumbo_max_pending = 4096;
ns->ethtool.ring.rx_mini_max_pending = 4096;
+ ns->ethtool.ring.tx_pending = 512;
ns->ethtool.ring.tx_max_pending = 4096;
}
diff --git a/drivers/net/phy/qcom/qca807x.c b/drivers/net/phy/qcom/qca807x.c
index 3279de857b47..2ad8c2586d64 100644
--- a/drivers/net/phy/qcom/qca807x.c
+++ b/drivers/net/phy/qcom/qca807x.c
@@ -774,7 +774,7 @@ static int qca807x_config_init(struct phy_device *phydev)
control_dac &= ~QCA807X_CONTROL_DAC_MASK;
if (!priv->dac_full_amplitude)
control_dac |= QCA807X_CONTROL_DAC_DSP_AMPLITUDE;
- if (!priv->dac_full_amplitude)
+ if (!priv->dac_full_bias_current)
control_dac |= QCA807X_CONTROL_DAC_DSP_BIAS_CURRENT;
if (!priv->dac_disable_bias_current_tweak)
control_dac |= QCA807X_CONTROL_DAC_BIAS_CURRENT_TWEAK;
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index 4583e15ad03a..1420c4efa48e 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -72,6 +72,17 @@
#define PPP_PROTO_LEN 2
#define PPP_LCP_HDRLEN 4
+/* The filter instructions generated by libpcap are constructed
+ * assuming a four-byte PPP header on each packet, where the last
+ * 2 bytes are the protocol field defined in the RFC and the first
+ * byte of the first 2 bytes indicates the direction.
+ * The second byte is currently unused, but we still need to initialize
+ * it to prevent crafted BPF programs from reading them which would
+ * cause reading of uninitialized data.
+ */
+#define PPP_FILTER_OUTBOUND_TAG 0x0100
+#define PPP_FILTER_INBOUND_TAG 0x0000
+
/*
* An instance of /dev/ppp can be associated with either a ppp
* interface unit or a ppp channel. In both cases, file->private_data
@@ -1762,10 +1773,10 @@ ppp_send_frame(struct ppp *ppp, struct sk_buff *skb)
if (proto < 0x8000) {
#ifdef CONFIG_PPP_FILTER
- /* check if we should pass this packet */
- /* the filter instructions are constructed assuming
- a four-byte PPP header on each packet */
- *(u8 *)skb_push(skb, 2) = 1;
+ /* check if the packet passes the pass and active filters.
+ * See comment for PPP_FILTER_OUTBOUND_TAG above.
+ */
+ *(__be16 *)skb_push(skb, 2) = htons(PPP_FILTER_OUTBOUND_TAG);
if (ppp->pass_filter &&
bpf_prog_run(ppp->pass_filter, skb) == 0) {
if (ppp->debug & 1)
@@ -2482,14 +2493,13 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
/* network protocol frame - give it to the kernel */
#ifdef CONFIG_PPP_FILTER
- /* check if the packet passes the pass and active filters */
- /* the filter instructions are constructed assuming
- a four-byte PPP header on each packet */
if (ppp->pass_filter || ppp->active_filter) {
if (skb_unclone(skb, GFP_ATOMIC))
goto err;
-
- *(u8 *)skb_push(skb, 2) = 0;
+ /* Check if the packet passes the pass and active filters.
+ * See comment for PPP_FILTER_INBOUND_TAG above.
+ */
+ *(__be16 *)skb_push(skb, 2) = htons(PPP_FILTER_INBOUND_TAG);
if (ppp->pass_filter &&
bpf_prog_run(ppp->pass_filter, skb) == 0) {
if (ppp->debug & 1)
diff --git a/drivers/net/usb/gl620a.c b/drivers/net/usb/gl620a.c
index 46af78caf457..0bfa37c14059 100644
--- a/drivers/net/usb/gl620a.c
+++ b/drivers/net/usb/gl620a.c
@@ -179,9 +179,7 @@ static int genelink_bind(struct usbnet *dev, struct usb_interface *intf)
{
dev->hard_mtu = GL_RCV_BUF_SIZE;
dev->net->hard_header_len += 4;
- dev->in = usb_rcvbulkpipe(dev->udev, dev->driver_info->in);
- dev->out = usb_sndbulkpipe(dev->udev, dev->driver_info->out);
- return 0;
+ return usbnet_get_endpoints(dev, intf);
}
static const struct driver_info genelink_info = {
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
index 60eb95fc19a5..6bc107476a2a 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
@@ -1172,6 +1172,7 @@ static int brcmf_ops_sdio_suspend(struct device *dev)
struct brcmf_bus *bus_if;
struct brcmf_sdio_dev *sdiodev;
mmc_pm_flag_t sdio_flags;
+ bool cap_power_off;
int ret = 0;
func = container_of(dev, struct sdio_func, dev);
@@ -1179,19 +1180,23 @@ static int brcmf_ops_sdio_suspend(struct device *dev)
if (func->num != 1)
return 0;
+ cap_power_off = !!(func->card->host->caps & MMC_CAP_POWER_OFF_CARD);
bus_if = dev_get_drvdata(dev);
sdiodev = bus_if->bus_priv.sdio;
- if (sdiodev->wowl_enabled) {
+ if (sdiodev->wowl_enabled || !cap_power_off) {
brcmf_sdiod_freezer_on(sdiodev);
brcmf_sdio_wd_timer(sdiodev->bus, 0);
sdio_flags = MMC_PM_KEEP_POWER;
- if (sdiodev->settings->bus.sdio.oob_irq_supported)
- enable_irq_wake(sdiodev->settings->bus.sdio.oob_irq_nr);
- else
- sdio_flags |= MMC_PM_WAKE_SDIO_IRQ;
+
+ if (sdiodev->wowl_enabled) {
+ if (sdiodev->settings->bus.sdio.oob_irq_supported)
+ enable_irq_wake(sdiodev->settings->bus.sdio.oob_irq_nr);
+ else
+ sdio_flags |= MMC_PM_WAKE_SDIO_IRQ;
+ }
if (sdio_set_host_pm_flags(sdiodev->func1, sdio_flags))
brcmf_err("Failed to set pm_flags %x\n", sdio_flags);
@@ -1213,18 +1218,19 @@ static int brcmf_ops_sdio_resume(struct device *dev)
struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
struct sdio_func *func = container_of(dev, struct sdio_func, dev);
int ret = 0;
+ bool cap_power_off = !!(func->card->host->caps & MMC_CAP_POWER_OFF_CARD);
brcmf_dbg(SDIO, "Enter: F%d\n", func->num);
if (func->num != 2)
return 0;
- if (!sdiodev->wowl_enabled) {
+ if (!sdiodev->wowl_enabled && cap_power_off) {
/* bus was powered off and device removed, probe again */
ret = brcmf_sdiod_probe(sdiodev);
if (ret)
brcmf_err("Failed to probe device on resume\n");
} else {
- if (sdiodev->settings->bus.sdio.oob_irq_supported)
+ if (sdiodev->wowl_enabled && sdiodev->settings->bus.sdio.oob_irq_supported)
disable_irq_wake(sdiodev->settings->bus.sdio.oob_irq_nr);
brcmf_sdiod_freezer_off(sdiodev);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
index fb2ea38e89ac..6594216f873c 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
@@ -558,41 +558,71 @@ static void iwl_dump_prph(struct iwl_fw_runtime *fwrt,
}
/*
- * alloc_sgtable - allocates scallerlist table in the given size,
- * fills it with pages and returns it
+ * alloc_sgtable - allocates (chained) scatterlist in the given size,
+ * fills it with pages and returns it
* @size: the size (in bytes) of the table
-*/
-static struct scatterlist *alloc_sgtable(int size)
+ */
+static struct scatterlist *alloc_sgtable(ssize_t size)
{
- int alloc_size, nents, i;
- struct page *new_page;
- struct scatterlist *iter;
- struct scatterlist *table;
+ struct scatterlist *result = NULL, *prev;
+ int nents, i, n_prev;
nents = DIV_ROUND_UP(size, PAGE_SIZE);
- table = kcalloc(nents, sizeof(*table), GFP_KERNEL);
- if (!table)
- return NULL;
- sg_init_table(table, nents);
- iter = table;
- for_each_sg(table, iter, sg_nents(table), i) {
- new_page = alloc_page(GFP_KERNEL);
- if (!new_page) {
- /* release all previous allocated pages in the table */
- iter = table;
- for_each_sg(table, iter, sg_nents(table), i) {
- new_page = sg_page(iter);
- if (new_page)
- __free_page(new_page);
- }
- kfree(table);
+
+#define N_ENTRIES_PER_PAGE (PAGE_SIZE / sizeof(*result))
+ /*
+ * We need an additional entry for table chaining,
+ * this ensures the loop can finish i.e. we can
+ * fit at least two entries per page (obviously,
+ * many more really fit.)
+ */
+ BUILD_BUG_ON(N_ENTRIES_PER_PAGE < 2);
+
+ while (nents > 0) {
+ struct scatterlist *new, *iter;
+ int n_fill, n_alloc;
+
+ if (nents <= N_ENTRIES_PER_PAGE) {
+ /* last needed table */
+ n_fill = nents;
+ n_alloc = nents;
+ nents = 0;
+ } else {
+ /* fill a page with entries */
+ n_alloc = N_ENTRIES_PER_PAGE;
+ /* reserve one for chaining */
+ n_fill = n_alloc - 1;
+ nents -= n_fill;
+ }
+
+ new = kcalloc(n_alloc, sizeof(*new), GFP_KERNEL);
+ if (!new) {
+ if (result)
+ _devcd_free_sgtable(result);
return NULL;
}
- alloc_size = min_t(int, size, PAGE_SIZE);
- size -= PAGE_SIZE;
- sg_set_page(iter, new_page, alloc_size, 0);
+ sg_init_table(new, n_alloc);
+
+ if (!result)
+ result = new;
+ else
+ sg_chain(prev, n_prev, new);
+ prev = new;
+ n_prev = n_alloc;
+
+ for_each_sg(new, iter, n_fill, i) {
+ struct page *new_page = alloc_page(GFP_KERNEL);
+
+ if (!new_page) {
+ _devcd_free_sgtable(result);
+ return NULL;
+ }
+
+ sg_set_page(iter, new_page, PAGE_SIZE, 0);
+ }
}
- return table;
+
+ return result;
}
static void iwl_fw_get_prph_len(struct iwl_fw_runtime *fwrt,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dump.c b/drivers/net/wireless/intel/iwlwifi/fw/dump.c
index 8e0c85a1240d..c7b261c8ec96 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dump.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dump.c
@@ -540,6 +540,9 @@ bool iwl_fwrt_read_err_table(struct iwl_trans *trans, u32 base, u32 *err_id)
} err_info = {};
int ret;
+ if (err_id)
+ *err_id = 0;
+
if (!base)
return false;
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
index d3a65f33097c..352b6e73e08f 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
@@ -1181,7 +1181,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
if (tlv_len != sizeof(*fseq_ver))
goto invalid_tlv_len;
- IWL_INFO(drv, "TLV_FW_FSEQ_VERSION: %s\n",
+ IWL_INFO(drv, "TLV_FW_FSEQ_VERSION: %.32s\n",
fseq_ver->version);
}
break;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
index 129b6bdf9ef9..82ca7f8b1bb2 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
@@ -3092,8 +3092,14 @@ static void iwl_mvm_d3_disconnect_iter(void *data, u8 *mac,
ieee80211_resume_disconnect(vif);
}
-static bool iwl_mvm_check_rt_status(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif)
+enum rt_status {
+ FW_ALIVE,
+ FW_NEEDS_RESET,
+ FW_ERROR,
+};
+
+static enum rt_status iwl_mvm_check_rt_status(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif)
{
u32 err_id;
@@ -3101,29 +3107,35 @@ static bool iwl_mvm_check_rt_status(struct iwl_mvm *mvm,
if (iwl_fwrt_read_err_table(mvm->trans,
mvm->trans->dbg.lmac_error_event_table[0],
&err_id)) {
- if (err_id == RF_KILL_INDICATOR_FOR_WOWLAN && vif) {
- struct cfg80211_wowlan_wakeup wakeup = {
- .rfkill_release = true,
- };
- ieee80211_report_wowlan_wakeup(vif, &wakeup,
- GFP_KERNEL);
+ if (err_id == RF_KILL_INDICATOR_FOR_WOWLAN) {
+ IWL_WARN(mvm, "Rfkill was toggled during suspend\n");
+ if (vif) {
+ struct cfg80211_wowlan_wakeup wakeup = {
+ .rfkill_release = true,
+ };
+
+ ieee80211_report_wowlan_wakeup(vif, &wakeup,
+ GFP_KERNEL);
+ }
+
+ return FW_NEEDS_RESET;
}
- return true;
+ return FW_ERROR;
}
/* check if we have lmac2 set and check for error */
if (iwl_fwrt_read_err_table(mvm->trans,
mvm->trans->dbg.lmac_error_event_table[1],
NULL))
- return true;
+ return FW_ERROR;
/* check for umac error */
if (iwl_fwrt_read_err_table(mvm->trans,
mvm->trans->dbg.umac_error_event_table,
NULL))
- return true;
+ return FW_ERROR;
- return false;
+ return FW_ALIVE;
}
/*
@@ -3492,6 +3504,7 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
bool d0i3_first = fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_D0I3_END_FIRST);
bool resume_notif_based = iwl_mvm_d3_resume_notif_based(mvm);
+ enum rt_status rt_status;
bool keep = false;
mutex_lock(&mvm->mutex);
@@ -3515,14 +3528,19 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
iwl_fw_dbg_read_d3_debug_data(&mvm->fwrt);
- if (iwl_mvm_check_rt_status(mvm, vif)) {
- IWL_ERR(mvm, "FW Error occurred during suspend. Restarting.\n");
+ rt_status = iwl_mvm_check_rt_status(mvm, vif);
+ if (rt_status != FW_ALIVE) {
set_bit(STATUS_FW_ERROR, &mvm->trans->status);
- iwl_mvm_dump_nic_error_log(mvm);
- iwl_dbg_tlv_time_point(&mvm->fwrt,
- IWL_FW_INI_TIME_POINT_FW_ASSERT, NULL);
- iwl_fw_dbg_collect_desc(&mvm->fwrt, &iwl_dump_desc_assert,
- false, 0);
+ if (rt_status == FW_ERROR) {
+ IWL_ERR(mvm, "FW Error occurred during suspend. Restarting.\n");
+ iwl_mvm_dump_nic_error_log(mvm);
+ iwl_dbg_tlv_time_point(&mvm->fwrt,
+ IWL_FW_INI_TIME_POINT_FW_ASSERT,
+ NULL);
+ iwl_fw_dbg_collect_desc(&mvm->fwrt,
+ &iwl_dump_desc_assert,
+ false, 0);
+ }
ret = 1;
goto err;
}
@@ -3679,6 +3697,7 @@ int iwl_mvm_fast_resume(struct iwl_mvm *mvm)
.notif_expected =
IWL_D3_NOTIF_D3_END_NOTIF,
};
+ enum rt_status rt_status;
int ret;
lockdep_assert_held(&mvm->mutex);
@@ -3688,14 +3707,20 @@ int iwl_mvm_fast_resume(struct iwl_mvm *mvm)
mvm->last_reset_or_resume_time_jiffies = jiffies;
iwl_fw_dbg_read_d3_debug_data(&mvm->fwrt);
- if (iwl_mvm_check_rt_status(mvm, NULL)) {
- IWL_ERR(mvm, "FW Error occurred during suspend. Restarting.\n");
+ rt_status = iwl_mvm_check_rt_status(mvm, NULL);
+ if (rt_status != FW_ALIVE) {
set_bit(STATUS_FW_ERROR, &mvm->trans->status);
- iwl_mvm_dump_nic_error_log(mvm);
- iwl_dbg_tlv_time_point(&mvm->fwrt,
- IWL_FW_INI_TIME_POINT_FW_ASSERT, NULL);
- iwl_fw_dbg_collect_desc(&mvm->fwrt, &iwl_dump_desc_assert,
- false, 0);
+ if (rt_status == FW_ERROR) {
+ IWL_ERR(mvm,
+ "iwl_mvm_check_rt_status failed, device is gone during suspend\n");
+ iwl_mvm_dump_nic_error_log(mvm);
+ iwl_dbg_tlv_time_point(&mvm->fwrt,
+ IWL_FW_INI_TIME_POINT_FW_ASSERT,
+ NULL);
+ iwl_fw_dbg_collect_desc(&mvm->fwrt,
+ &iwl_dump_desc_assert,
+ false, 0);
+ }
mvm->trans->state = IWL_TRANS_NO_FW;
ret = -ENODEV;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
index 83e3c1160362..55d035b896e9 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
@@ -1479,6 +1479,13 @@ static ssize_t iwl_dbgfs_fw_dbg_clear_write(struct iwl_mvm *mvm,
if (mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_9000)
return -EOPNOTSUPP;
+ /*
+ * If the firmware is not running, silently succeed since there is
+ * no data to clear.
+ */
+ if (!iwl_mvm_firmware_running(mvm))
+ return count;
+
mutex_lock(&mvm->mutex);
iwl_fw_dbg_clear_monitor_buf(&mvm->fwrt);
mutex_unlock(&mvm->mutex);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
index 09fd8752046e..14ea89f931bb 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
@@ -995,7 +995,7 @@ iwl_mvm_decode_he_phy_ru_alloc(struct iwl_mvm_rx_phy_data *phy_data,
*/
u8 ru = le32_get_bits(phy_data->d1, IWL_RX_PHY_DATA1_HE_RU_ALLOC_MASK);
u32 rate_n_flags = phy_data->rate_n_flags;
- u32 he_type = rate_n_flags & RATE_MCS_HE_TYPE_MSK_V1;
+ u32 he_type = rate_n_flags & RATE_MCS_HE_TYPE_MSK;
u8 offs = 0;
rx_status->bw = RATE_INFO_BW_HE_RU;
@@ -1050,13 +1050,13 @@ iwl_mvm_decode_he_phy_ru_alloc(struct iwl_mvm_rx_phy_data *phy_data,
if (he_mu)
he_mu->flags2 |=
- le16_encode_bits(FIELD_GET(RATE_MCS_CHAN_WIDTH_MSK_V1,
+ le16_encode_bits(FIELD_GET(RATE_MCS_CHAN_WIDTH_MSK,
rate_n_flags),
IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW);
- else if (he_type == RATE_MCS_HE_TYPE_TRIG_V1)
+ else if (he_type == RATE_MCS_HE_TYPE_TRIG)
he->data6 |=
cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA6_TB_PPDU_BW_KNOWN) |
- le16_encode_bits(FIELD_GET(RATE_MCS_CHAN_WIDTH_MSK_V1,
+ le16_encode_bits(FIELD_GET(RATE_MCS_CHAN_WIDTH_MSK,
rate_n_flags),
IEEE80211_RADIOTAP_HE_DATA6_TB_PPDU_BW);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
index 9216c43a35c4..ebfa88b38b71 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
@@ -1030,6 +1030,8 @@ void iwl_mvm_rx_session_protect_notif(struct iwl_mvm *mvm,
/* End TE, notify mac80211 */
mvmvif->time_event_data.id = SESSION_PROTECT_CONF_MAX_ID;
mvmvif->time_event_data.link_id = -1;
+ /* set the bit so the ROC cleanup will actually clean up */
+ set_bit(IWL_MVM_STATUS_ROC_P2P_RUNNING, &mvm->status);
iwl_mvm_roc_finished(mvm);
ieee80211_remain_on_channel_expired(mvm->hw);
} else if (le32_to_cpu(notif->start)) {
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
index 856b7e9f717d..45460f93d24a 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2003-2015, 2018-2024 Intel Corporation
+ * Copyright (C) 2003-2015, 2018-2025 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -646,7 +646,8 @@ dma_addr_t iwl_pcie_get_sgt_tb_phys(struct sg_table *sgt, unsigned int offset,
unsigned int len);
struct sg_table *iwl_pcie_prep_tso(struct iwl_trans *trans, struct sk_buff *skb,
struct iwl_cmd_meta *cmd_meta,
- u8 **hdr, unsigned int hdr_room);
+ u8 **hdr, unsigned int hdr_room,
+ unsigned int offset);
void iwl_pcie_free_tso_pages(struct iwl_trans *trans, struct sk_buff *skb,
struct iwl_cmd_meta *cmd_meta);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
index 1f483f15c238..401919f9fe88 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2020, 2023-2024 Intel Corporation
+ * Copyright (C) 2018-2020, 2023-2025 Intel Corporation
*/
#include <net/tso.h>
#include <linux/tcp.h>
@@ -188,7 +188,8 @@ static int iwl_txq_gen2_build_amsdu(struct iwl_trans *trans,
(3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr));
/* Our device supports 9 segments at most, it will fit in 1 page */
- sgt = iwl_pcie_prep_tso(trans, skb, out_meta, &start_hdr, hdr_room);
+ sgt = iwl_pcie_prep_tso(trans, skb, out_meta, &start_hdr, hdr_room,
+ snap_ip_tcp_hdrlen + hdr_len);
if (!sgt)
return -ENOMEM;
@@ -347,6 +348,7 @@ iwl_tfh_tfd *iwl_txq_gen2_build_tx_amsdu(struct iwl_trans *trans,
return tfd;
out_err:
+ iwl_pcie_free_tso_pages(trans, skb, out_meta);
iwl_txq_gen2_tfd_unmap(trans, out_meta, tfd);
return NULL;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
index 334ebd4c12fa..7b6071a59b69 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2003-2014, 2018-2021, 2023-2024 Intel Corporation
+ * Copyright (C) 2003-2014, 2018-2021, 2023-2025 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -1855,6 +1855,7 @@ dma_addr_t iwl_pcie_get_sgt_tb_phys(struct sg_table *sgt, unsigned int offset,
* @cmd_meta: command meta to store the scatter list information for unmapping
* @hdr: output argument for TSO headers
* @hdr_room: requested length for TSO headers
+ * @offset: offset into the data from which mapping should start
*
* Allocate space for a scatter gather list and TSO headers and map the SKB
* using the scatter gather list. The SKB is unmapped again when the page is
@@ -1864,18 +1865,20 @@ dma_addr_t iwl_pcie_get_sgt_tb_phys(struct sg_table *sgt, unsigned int offset,
*/
struct sg_table *iwl_pcie_prep_tso(struct iwl_trans *trans, struct sk_buff *skb,
struct iwl_cmd_meta *cmd_meta,
- u8 **hdr, unsigned int hdr_room)
+ u8 **hdr, unsigned int hdr_room,
+ unsigned int offset)
{
struct sg_table *sgt;
+ unsigned int n_segments;
if (WARN_ON_ONCE(skb_has_frag_list(skb)))
return NULL;
+ n_segments = DIV_ROUND_UP(skb->len - offset, skb_shinfo(skb)->gso_size);
*hdr = iwl_pcie_get_page_hdr(trans,
hdr_room + __alignof__(struct sg_table) +
sizeof(struct sg_table) +
- (skb_shinfo(skb)->nr_frags + 1) *
- sizeof(struct scatterlist),
+ n_segments * sizeof(struct scatterlist),
skb);
if (!*hdr)
return NULL;
@@ -1883,11 +1886,11 @@ struct sg_table *iwl_pcie_prep_tso(struct iwl_trans *trans, struct sk_buff *skb,
sgt = (void *)PTR_ALIGN(*hdr + hdr_room, __alignof__(struct sg_table));
sgt->sgl = (void *)(sgt + 1);
- sg_init_table(sgt->sgl, skb_shinfo(skb)->nr_frags + 1);
+ sg_init_table(sgt->sgl, n_segments);
/* Only map the data, not the header (it is copied to the TSO page) */
- sgt->orig_nents = skb_to_sgvec(skb, sgt->sgl, skb_headlen(skb),
- skb->data_len);
+ sgt->orig_nents = skb_to_sgvec(skb, sgt->sgl, offset,
+ skb->len - offset);
if (WARN_ON_ONCE(sgt->orig_nents <= 0))
return NULL;
@@ -1939,7 +1942,8 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
(3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr)) + iv_len;
/* Our device supports 9 segments at most, it will fit in 1 page */
- sgt = iwl_pcie_prep_tso(trans, skb, out_meta, &start_hdr, hdr_room);
+ sgt = iwl_pcie_prep_tso(trans, skb, out_meta, &start_hdr, hdr_room,
+ snap_ip_tcp_hdrlen + hdr_len + iv_len);
if (!sgt)
return -ENOMEM;
diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c
index b1b46c2713e1..24e2c702da7a 100644
--- a/drivers/nvme/host/ioctl.c
+++ b/drivers/nvme/host/ioctl.c
@@ -128,8 +128,10 @@ static int nvme_map_user_request(struct request *req, u64 ubuffer,
if (!nvme_ctrl_sgl_supported(ctrl))
dev_warn_once(ctrl->device, "using unchecked data buffer\n");
if (has_metadata) {
- if (!supports_metadata)
- return -EINVAL;
+ if (!supports_metadata) {
+ ret = -EINVAL;
+ goto out;
+ }
if (!nvme_ctrl_meta_sgl_supported(ctrl))
dev_warn_once(ctrl->device,
"using unchecked metadata buffer\n");
@@ -139,8 +141,10 @@ static int nvme_map_user_request(struct request *req, u64 ubuffer,
struct iov_iter iter;
/* fixedbufs is only for non-vectored io */
- if (WARN_ON_ONCE(flags & NVME_IOCTL_VEC))
- return -EINVAL;
+ if (WARN_ON_ONCE(flags & NVME_IOCTL_VEC)) {
+ ret = -EINVAL;
+ goto out;
+ }
ret = io_uring_cmd_import_fixed(ubuffer, bufflen,
rq_data_dir(req), &iter, ioucmd);
if (ret < 0)
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 950289405ef2..640590b21728 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -1983,6 +1983,18 @@ static void nvme_map_cmb(struct nvme_dev *dev)
return;
/*
+ * Controllers may support a CMB size larger than their BAR, for
+ * example, due to being behind a bridge. Reduce the CMB to the
+ * reported size of the BAR
+ */
+ size = min(size, bar_size - offset);
+
+ if (!IS_ALIGNED(size, memremap_compat_align()) ||
+ !IS_ALIGNED(pci_resource_start(pdev, bar),
+ memremap_compat_align()))
+ return;
+
+ /*
* Tell the controller about the host side address mapping the CMB,
* and enable CMB decoding for the NVMe 1.4+ scheme:
*/
@@ -1992,17 +2004,10 @@ static void nvme_map_cmb(struct nvme_dev *dev)
dev->bar + NVME_REG_CMBMSC);
}
- /*
- * Controllers may support a CMB size larger than their BAR,
- * for example, due to being behind a bridge. Reduce the CMB to
- * the reported size of the BAR
- */
- if (size > bar_size - offset)
- size = bar_size - offset;
-
if (pci_p2pdma_add_resource(pdev, bar, size, offset)) {
dev_warn(dev->ctrl.device,
"failed to register the CMB\n");
+ hi_lo_writeq(0, dev->bar + NVME_REG_CMBMSC);
return;
}
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index 8a9131c95a3d..327f3f2f5399 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -217,6 +217,19 @@ static inline int nvme_tcp_queue_id(struct nvme_tcp_queue *queue)
return queue - queue->ctrl->queues;
}
+static inline bool nvme_tcp_recv_pdu_supported(enum nvme_tcp_pdu_type type)
+{
+ switch (type) {
+ case nvme_tcp_c2h_term:
+ case nvme_tcp_c2h_data:
+ case nvme_tcp_r2t:
+ case nvme_tcp_rsp:
+ return true;
+ default:
+ return false;
+ }
+}
+
/*
* Check if the queue is TLS encrypted
*/
@@ -775,7 +788,7 @@ static void nvme_tcp_handle_c2h_term(struct nvme_tcp_queue *queue,
[NVME_TCP_FES_PDU_SEQ_ERR] = "PDU Sequence Error",
[NVME_TCP_FES_HDR_DIGEST_ERR] = "Header Digest Error",
[NVME_TCP_FES_DATA_OUT_OF_RANGE] = "Data Transfer Out Of Range",
- [NVME_TCP_FES_R2T_LIMIT_EXCEEDED] = "R2T Limit Exceeded",
+ [NVME_TCP_FES_DATA_LIMIT_EXCEEDED] = "Data Transfer Limit Exceeded",
[NVME_TCP_FES_UNSUPPORTED_PARAM] = "Unsupported Parameter",
};
@@ -818,6 +831,16 @@ static int nvme_tcp_recv_pdu(struct nvme_tcp_queue *queue, struct sk_buff *skb,
return 0;
hdr = queue->pdu;
+ if (unlikely(hdr->hlen != sizeof(struct nvme_tcp_rsp_pdu))) {
+ if (!nvme_tcp_recv_pdu_supported(hdr->type))
+ goto unsupported_pdu;
+
+ dev_err(queue->ctrl->ctrl.device,
+ "pdu type %d has unexpected header length (%d)\n",
+ hdr->type, hdr->hlen);
+ return -EPROTO;
+ }
+
if (unlikely(hdr->type == nvme_tcp_c2h_term)) {
/*
* C2HTermReq never includes Header or Data digests.
@@ -850,10 +873,13 @@ static int nvme_tcp_recv_pdu(struct nvme_tcp_queue *queue, struct sk_buff *skb,
nvme_tcp_init_recv_ctx(queue);
return nvme_tcp_handle_r2t(queue, (void *)queue->pdu);
default:
- dev_err(queue->ctrl->ctrl.device,
- "unsupported pdu type (%d)\n", hdr->type);
- return -EINVAL;
+ goto unsupported_pdu;
}
+
+unsupported_pdu:
+ dev_err(queue->ctrl->ctrl.device,
+ "unsupported pdu type (%d)\n", hdr->type);
+ return -EINVAL;
}
static inline void nvme_tcp_end_request(struct request *rq, u16 status)
@@ -1495,11 +1521,11 @@ static int nvme_tcp_init_connection(struct nvme_tcp_queue *queue)
msg.msg_flags = MSG_WAITALL;
ret = kernel_recvmsg(queue->sock, &msg, &iov, 1,
iov.iov_len, msg.msg_flags);
- if (ret < sizeof(*icresp)) {
+ if (ret >= 0 && ret < sizeof(*icresp))
+ ret = -ECONNRESET;
+ if (ret < 0) {
pr_warn("queue %d: failed to receive icresp, error %d\n",
nvme_tcp_queue_id(queue), ret);
- if (ret >= 0)
- ret = -ECONNRESET;
goto free_icresp;
}
ret = -ENOTCONN;
@@ -2699,6 +2725,7 @@ static int nvme_tcp_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
{
struct nvme_tcp_queue *queue = hctx->driver_data;
struct sock *sk = queue->sock->sk;
+ int ret;
if (!test_bit(NVME_TCP_Q_LIVE, &queue->flags))
return 0;
@@ -2706,9 +2733,9 @@ static int nvme_tcp_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
set_bit(NVME_TCP_Q_POLLING, &queue->flags);
if (sk_can_busy_loop(sk) && skb_queue_empty_lockless(&sk->sk_receive_queue))
sk_busy_loop(sk, true);
- nvme_tcp_try_recv(queue);
+ ret = nvme_tcp_try_recv(queue);
clear_bit(NVME_TCP_Q_POLLING, &queue->flags);
- return queue->nr_cqe;
+ return ret < 0 ? ret : queue->nr_cqe;
}
static int nvme_tcp_get_address(struct nvme_ctrl *ctrl, char *buf, int size)
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index d2c1233981e1..fcf4f460dc9a 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -647,7 +647,6 @@ void nvmet_subsys_disc_changed(struct nvmet_subsys *subsys,
struct nvmet_host *host);
void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type,
u8 event_info, u8 log_page);
-bool nvmet_subsys_nsid_exists(struct nvmet_subsys *subsys, u32 nsid);
#define NVMET_MIN_QUEUE_SIZE 16
#define NVMET_MAX_QUEUE_SIZE 1024
diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
index 7c51c2a8c109..4f9cac8a5abe 100644
--- a/drivers/nvme/target/tcp.c
+++ b/drivers/nvme/target/tcp.c
@@ -571,10 +571,16 @@ static void nvmet_tcp_queue_response(struct nvmet_req *req)
struct nvmet_tcp_cmd *cmd =
container_of(req, struct nvmet_tcp_cmd, req);
struct nvmet_tcp_queue *queue = cmd->queue;
+ enum nvmet_tcp_recv_state queue_state;
+ struct nvmet_tcp_cmd *queue_cmd;
struct nvme_sgl_desc *sgl;
u32 len;
- if (unlikely(cmd == queue->cmd)) {
+ /* Pairs with store_release in nvmet_prepare_receive_pdu() */
+ queue_state = smp_load_acquire(&queue->rcv_state);
+ queue_cmd = READ_ONCE(queue->cmd);
+
+ if (unlikely(cmd == queue_cmd)) {
sgl = &cmd->req.cmd->common.dptr.sgl;
len = le32_to_cpu(sgl->length);
@@ -583,7 +589,7 @@ static void nvmet_tcp_queue_response(struct nvmet_req *req)
* Avoid using helpers, this might happen before
* nvmet_req_init is completed.
*/
- if (queue->rcv_state == NVMET_TCP_RECV_PDU &&
+ if (queue_state == NVMET_TCP_RECV_PDU &&
len && len <= cmd->req.port->inline_data_size &&
nvme_is_write(cmd->req.cmd))
return;
@@ -847,8 +853,9 @@ static void nvmet_prepare_receive_pdu(struct nvmet_tcp_queue *queue)
{
queue->offset = 0;
queue->left = sizeof(struct nvme_tcp_hdr);
- queue->cmd = NULL;
- queue->rcv_state = NVMET_TCP_RECV_PDU;
+ WRITE_ONCE(queue->cmd, NULL);
+ /* Ensure rcv_state is visible only after queue->cmd is set */
+ smp_store_release(&queue->rcv_state, NVMET_TCP_RECV_PDU);
}
static void nvmet_tcp_free_crypto(struct nvmet_tcp_queue *queue)
diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c
index 75e819f66a56..ee2e31522d7e 100644
--- a/drivers/of/of_reserved_mem.c
+++ b/drivers/of/of_reserved_mem.c
@@ -415,12 +415,12 @@ static int __init __reserved_mem_alloc_size(unsigned long node, const char *unam
prop = of_get_flat_dt_prop(node, "alignment", &len);
if (prop) {
- if (len != dt_root_size_cells * sizeof(__be32)) {
+ if (len != dt_root_addr_cells * sizeof(__be32)) {
pr_err("invalid alignment property in '%s' node.\n",
uname);
return -EINVAL;
}
- align = dt_mem_next_cell(dt_root_size_cells, &prop);
+ align = dt_mem_next_cell(dt_root_addr_cells, &prop);
}
nomap = of_get_flat_dt_prop(node, "no-map", NULL) != NULL;
diff --git a/drivers/phy/freescale/phy-fsl-samsung-hdmi.c b/drivers/phy/freescale/phy-fsl-samsung-hdmi.c
index 45004f598e4d..e4c0a82d16d9 100644
--- a/drivers/phy/freescale/phy-fsl-samsung-hdmi.c
+++ b/drivers/phy/freescale/phy-fsl-samsung-hdmi.c
@@ -325,7 +325,7 @@ to_fsl_samsung_hdmi_phy(struct clk_hw *hw)
return container_of(hw, struct fsl_samsung_hdmi_phy, hw);
}
-static void
+static int
fsl_samsung_hdmi_phy_configure_pll_lock_det(struct fsl_samsung_hdmi_phy *phy,
const struct phy_config *cfg)
{
@@ -341,6 +341,9 @@ fsl_samsung_hdmi_phy_configure_pll_lock_det(struct fsl_samsung_hdmi_phy *phy,
break;
}
+ if (unlikely(div == 4))
+ return -EINVAL;
+
writeb(FIELD_PREP(REG12_CK_DIV_MASK, div), phy->regs + PHY_REG(12));
/*
@@ -364,6 +367,8 @@ fsl_samsung_hdmi_phy_configure_pll_lock_det(struct fsl_samsung_hdmi_phy *phy,
FIELD_PREP(REG14_RP_CODE_MASK, 2) |
FIELD_PREP(REG14_TG_CODE_HIGH_MASK, fld_tg_code >> 8),
phy->regs + PHY_REG(14));
+
+ return 0;
}
static unsigned long fsl_samsung_hdmi_phy_find_pms(unsigned long fout, u8 *p, u16 *m, u8 *s)
@@ -466,7 +471,11 @@ static int fsl_samsung_hdmi_phy_configure(struct fsl_samsung_hdmi_phy *phy,
writeb(REG21_SEL_TX_CK_INV | FIELD_PREP(REG21_PMS_S_MASK,
cfg->pll_div_regs[2] >> 4), phy->regs + PHY_REG(21));
- fsl_samsung_hdmi_phy_configure_pll_lock_det(phy, cfg);
+ ret = fsl_samsung_hdmi_phy_configure_pll_lock_det(phy, cfg);
+ if (ret) {
+ dev_err(phy->dev, "pixclock too large\n");
+ return ret;
+ }
writeb(REG33_FIX_DA | REG33_MODE_SET_DONE, phy->regs + PHY_REG(33));
diff --git a/drivers/phy/rockchip/Kconfig b/drivers/phy/rockchip/Kconfig
index 2f7a05f21dc5..dcb8e1628632 100644
--- a/drivers/phy/rockchip/Kconfig
+++ b/drivers/phy/rockchip/Kconfig
@@ -125,6 +125,7 @@ config PHY_ROCKCHIP_USBDP
depends on ARCH_ROCKCHIP && OF
depends on TYPEC
select GENERIC_PHY
+ select USB_COMMON
help
Enable this to support the Rockchip USB3.0/DP combo PHY with
Samsung IP block. This is required for USB3 support on RK3588.
diff --git a/drivers/phy/rockchip/phy-rockchip-naneng-combphy.c b/drivers/phy/rockchip/phy-rockchip-naneng-combphy.c
index a1532ef8bbe9..8c3ce57f8915 100644
--- a/drivers/phy/rockchip/phy-rockchip-naneng-combphy.c
+++ b/drivers/phy/rockchip/phy-rockchip-naneng-combphy.c
@@ -324,7 +324,10 @@ static int rockchip_combphy_parse_dt(struct device *dev, struct rockchip_combphy
priv->ext_refclk = device_property_present(dev, "rockchip,ext-refclk");
- priv->phy_rst = devm_reset_control_get(dev, "phy");
+ priv->phy_rst = devm_reset_control_get_exclusive(dev, "phy");
+ /* fallback to old behaviour */
+ if (PTR_ERR(priv->phy_rst) == -ENOENT)
+ priv->phy_rst = devm_reset_control_array_get_exclusive(dev);
if (IS_ERR(priv->phy_rst))
return dev_err_probe(dev, PTR_ERR(priv->phy_rst), "failed to get phy reset\n");
diff --git a/drivers/phy/samsung/phy-exynos5-usbdrd.c b/drivers/phy/samsung/phy-exynos5-usbdrd.c
index c421b495eb0f..46b8f6987c62 100644
--- a/drivers/phy/samsung/phy-exynos5-usbdrd.c
+++ b/drivers/phy/samsung/phy-exynos5-usbdrd.c
@@ -488,9 +488,9 @@ exynos5_usbdrd_pipe3_set_refclk(struct phy_usb_instance *inst)
reg |= PHYCLKRST_REFCLKSEL_EXT_REFCLK;
/* FSEL settings corresponding to reference clock */
- reg &= ~PHYCLKRST_FSEL_PIPE_MASK |
- PHYCLKRST_MPLL_MULTIPLIER_MASK |
- PHYCLKRST_SSC_REFCLKSEL_MASK;
+ reg &= ~(PHYCLKRST_FSEL_PIPE_MASK |
+ PHYCLKRST_MPLL_MULTIPLIER_MASK |
+ PHYCLKRST_SSC_REFCLKSEL_MASK);
switch (phy_drd->extrefclk) {
case EXYNOS5_FSEL_50MHZ:
reg |= (PHYCLKRST_MPLL_MULTIPLIER_50M_REF |
@@ -532,9 +532,9 @@ exynos5_usbdrd_utmi_set_refclk(struct phy_usb_instance *inst)
reg &= ~PHYCLKRST_REFCLKSEL_MASK;
reg |= PHYCLKRST_REFCLKSEL_EXT_REFCLK;
- reg &= ~PHYCLKRST_FSEL_UTMI_MASK |
- PHYCLKRST_MPLL_MULTIPLIER_MASK |
- PHYCLKRST_SSC_REFCLKSEL_MASK;
+ reg &= ~(PHYCLKRST_FSEL_UTMI_MASK |
+ PHYCLKRST_MPLL_MULTIPLIER_MASK |
+ PHYCLKRST_SSC_REFCLKSEL_MASK);
reg |= PHYCLKRST_FSEL(phy_drd->extrefclk);
return reg;
@@ -1296,14 +1296,17 @@ static int exynos5_usbdrd_gs101_phy_exit(struct phy *phy)
struct exynos5_usbdrd_phy *phy_drd = to_usbdrd_phy(inst);
int ret;
+ if (inst->phy_cfg->id == EXYNOS5_DRDPHY_UTMI) {
+ ret = exynos850_usbdrd_phy_exit(phy);
+ if (ret)
+ return ret;
+ }
+
+ exynos5_usbdrd_phy_isol(inst, true);
+
if (inst->phy_cfg->id != EXYNOS5_DRDPHY_UTMI)
return 0;
- ret = exynos850_usbdrd_phy_exit(phy);
- if (ret)
- return ret;
-
- exynos5_usbdrd_phy_isol(inst, true);
return regulator_bulk_disable(phy_drd->drv_data->n_regulators,
phy_drd->regulators);
}
diff --git a/drivers/phy/st/phy-stm32-combophy.c b/drivers/phy/st/phy-stm32-combophy.c
index 49e9fa90a681..607b4d607eb5 100644
--- a/drivers/phy/st/phy-stm32-combophy.c
+++ b/drivers/phy/st/phy-stm32-combophy.c
@@ -111,6 +111,7 @@ static const struct clk_impedance imp_lookup[] = {
{ 4204000, { 511000, 609000, 706000, 802000 } },
{ 3999000, { 571000, 648000, 726000, 803000 } }
};
+#define DEFAULT_IMP_INDEX 3 /* Default impedance is 50 Ohm */
static int stm32_impedance_tune(struct stm32_combophy *combophy)
{
@@ -119,10 +120,9 @@ static int stm32_impedance_tune(struct stm32_combophy *combophy)
u8 imp_of, vswing_of;
u32 max_imp = imp_lookup[0].microohm;
u32 min_imp = imp_lookup[imp_size - 1].microohm;
- u32 max_vswing = imp_lookup[imp_size - 1].vswing[vswing_size - 1];
+ u32 max_vswing;
u32 min_vswing = imp_lookup[0].vswing[0];
u32 val;
- u32 regval;
if (!of_property_read_u32(combophy->dev->of_node, "st,output-micro-ohms", &val)) {
if (val < min_imp || val > max_imp) {
@@ -130,45 +130,43 @@ static int stm32_impedance_tune(struct stm32_combophy *combophy)
return -EINVAL;
}
- regval = 0;
- for (imp_of = 0; imp_of < ARRAY_SIZE(imp_lookup); imp_of++) {
- if (imp_lookup[imp_of].microohm <= val) {
- regval = FIELD_PREP(STM32MP25_PCIEPRG_IMPCTRL_OHM, imp_of);
+ for (imp_of = 0; imp_of < ARRAY_SIZE(imp_lookup); imp_of++)
+ if (imp_lookup[imp_of].microohm <= val)
break;
- }
- }
+
+ if (WARN_ON(imp_of == ARRAY_SIZE(imp_lookup)))
+ return -EINVAL;
dev_dbg(combophy->dev, "Set %u micro-ohms output impedance\n",
imp_lookup[imp_of].microohm);
regmap_update_bits(combophy->regmap, SYSCFG_PCIEPRGCR,
STM32MP25_PCIEPRG_IMPCTRL_OHM,
- regval);
- } else {
- regmap_read(combophy->regmap, SYSCFG_PCIEPRGCR, &val);
- imp_of = FIELD_GET(STM32MP25_PCIEPRG_IMPCTRL_OHM, val);
- }
+ FIELD_PREP(STM32MP25_PCIEPRG_IMPCTRL_OHM, imp_of));
+ } else
+ imp_of = DEFAULT_IMP_INDEX;
if (!of_property_read_u32(combophy->dev->of_node, "st,output-vswing-microvolt", &val)) {
+ max_vswing = imp_lookup[imp_of].vswing[vswing_size - 1];
+
if (val < min_vswing || val > max_vswing) {
dev_err(combophy->dev, "Invalid value %u for output vswing\n", val);
return -EINVAL;
}
- regval = 0;
- for (vswing_of = 0; vswing_of < ARRAY_SIZE(imp_lookup[imp_of].vswing); vswing_of++) {
- if (imp_lookup[imp_of].vswing[vswing_of] >= val) {
- regval = FIELD_PREP(STM32MP25_PCIEPRG_IMPCTRL_VSWING, vswing_of);
+ for (vswing_of = 0; vswing_of < ARRAY_SIZE(imp_lookup[imp_of].vswing); vswing_of++)
+ if (imp_lookup[imp_of].vswing[vswing_of] >= val)
break;
- }
- }
+
+ if (WARN_ON(vswing_of == ARRAY_SIZE(imp_lookup[imp_of].vswing)))
+ return -EINVAL;
dev_dbg(combophy->dev, "Set %u microvolt swing\n",
imp_lookup[imp_of].vswing[vswing_of]);
regmap_update_bits(combophy->regmap, SYSCFG_PCIEPRGCR,
STM32MP25_PCIEPRG_IMPCTRL_VSWING,
- regval);
+ FIELD_PREP(STM32MP25_PCIEPRG_IMPCTRL_VSWING, vswing_of));
}
return 0;
diff --git a/drivers/phy/tegra/xusb-tegra186.c b/drivers/phy/tegra/xusb-tegra186.c
index 0f60d5d1c167..fae6242aa730 100644
--- a/drivers/phy/tegra/xusb-tegra186.c
+++ b/drivers/phy/tegra/xusb-tegra186.c
@@ -928,6 +928,7 @@ static int tegra186_utmi_phy_init(struct phy *phy)
unsigned int index = lane->index;
struct device *dev = padctl->dev;
int err;
+ u32 reg;
port = tegra_xusb_find_usb2_port(padctl, index);
if (!port) {
@@ -935,6 +936,16 @@ static int tegra186_utmi_phy_init(struct phy *phy)
return -ENODEV;
}
+ if (port->mode == USB_DR_MODE_OTG ||
+ port->mode == USB_DR_MODE_PERIPHERAL) {
+ /* reset VBUS&ID OVERRIDE */
+ reg = padctl_readl(padctl, USB2_VBUS_ID);
+ reg &= ~VBUS_OVERRIDE;
+ reg &= ~ID_OVERRIDE(~0);
+ reg |= ID_OVERRIDE_FLOATING;
+ padctl_writel(padctl, reg, USB2_VBUS_ID);
+ }
+
if (port->supply && port->mode == USB_DR_MODE_HOST) {
err = regulator_enable(port->supply);
if (err) {
diff --git a/drivers/phy/ti/phy-gmii-sel.c b/drivers/phy/ti/phy-gmii-sel.c
index e0ca59ae3153..ff5d5e29629f 100644
--- a/drivers/phy/ti/phy-gmii-sel.c
+++ b/drivers/phy/ti/phy-gmii-sel.c
@@ -424,6 +424,12 @@ static int phy_gmii_sel_init_ports(struct phy_gmii_sel_priv *priv)
return 0;
}
+static const struct regmap_config phy_gmii_sel_regmap_cfg = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+};
+
static int phy_gmii_sel_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -468,7 +474,14 @@ static int phy_gmii_sel_probe(struct platform_device *pdev)
priv->regmap = syscon_node_to_regmap(node->parent);
if (IS_ERR(priv->regmap)) {
- priv->regmap = device_node_to_regmap(node);
+ void __iomem *base;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return dev_err_probe(dev, PTR_ERR(base),
+ "failed to get base memory resource\n");
+
+ priv->regmap = regmap_init_mmio(dev, base, &phy_gmii_sel_regmap_cfg);
if (IS_ERR(priv->regmap))
return dev_err_probe(dev, PTR_ERR(priv->regmap),
"Failed to get syscon\n");
diff --git a/drivers/platform/x86/amd/pmf/core.c b/drivers/platform/x86/amd/pmf/core.c
index 764cc1fe90ae..a2cb2d5544f5 100644
--- a/drivers/platform/x86/amd/pmf/core.c
+++ b/drivers/platform/x86/amd/pmf/core.c
@@ -452,6 +452,7 @@ static int amd_pmf_probe(struct platform_device *pdev)
mutex_init(&dev->lock);
mutex_init(&dev->update_mutex);
+ mutex_init(&dev->cb_mutex);
apmf_acpi_init(dev);
platform_set_drvdata(pdev, dev);
@@ -477,6 +478,7 @@ static void amd_pmf_remove(struct platform_device *pdev)
amd_pmf_dbgfs_unregister(dev);
mutex_destroy(&dev->lock);
mutex_destroy(&dev->update_mutex);
+ mutex_destroy(&dev->cb_mutex);
kfree(dev->buf);
}
diff --git a/drivers/platform/x86/amd/pmf/pmf.h b/drivers/platform/x86/amd/pmf/pmf.h
index 41b2b91b8fdc..e6bdee68ccf3 100644
--- a/drivers/platform/x86/amd/pmf/pmf.h
+++ b/drivers/platform/x86/amd/pmf/pmf.h
@@ -106,9 +106,12 @@ struct cookie_header {
#define PMF_TA_IF_VERSION_MAJOR 1
#define TA_PMF_ACTION_MAX 32
#define TA_PMF_UNDO_MAX 8
-#define TA_OUTPUT_RESERVED_MEM 906
+#define TA_OUTPUT_RESERVED_MEM 922
#define MAX_OPERATION_PARAMS 4
+#define TA_ERROR_CRYPTO_INVALID_PARAM 0x20002
+#define TA_ERROR_CRYPTO_BIN_TOO_LARGE 0x2000d
+
#define PMF_IF_V1 1
#define PMF_IF_V2 2
diff --git a/drivers/platform/x86/amd/pmf/sps.c b/drivers/platform/x86/amd/pmf/sps.c
index e6cf0b22dac3..d3083383f11f 100644
--- a/drivers/platform/x86/amd/pmf/sps.c
+++ b/drivers/platform/x86/amd/pmf/sps.c
@@ -297,12 +297,14 @@ int amd_pmf_get_pprof_modes(struct amd_pmf_dev *pmf)
switch (pmf->current_profile) {
case PLATFORM_PROFILE_PERFORMANCE:
+ case PLATFORM_PROFILE_BALANCED_PERFORMANCE:
mode = POWER_MODE_PERFORMANCE;
break;
case PLATFORM_PROFILE_BALANCED:
mode = POWER_MODE_BALANCED_POWER;
break;
case PLATFORM_PROFILE_LOW_POWER:
+ case PLATFORM_PROFILE_QUIET:
mode = POWER_MODE_POWER_SAVER;
break;
default:
@@ -387,6 +389,14 @@ static int amd_pmf_profile_set(struct device *dev,
return 0;
}
+static int amd_pmf_hidden_choices(void *drvdata, unsigned long *choices)
+{
+ set_bit(PLATFORM_PROFILE_QUIET, choices);
+ set_bit(PLATFORM_PROFILE_BALANCED_PERFORMANCE, choices);
+
+ return 0;
+}
+
static int amd_pmf_profile_probe(void *drvdata, unsigned long *choices)
{
set_bit(PLATFORM_PROFILE_LOW_POWER, choices);
@@ -398,6 +408,7 @@ static int amd_pmf_profile_probe(void *drvdata, unsigned long *choices)
static const struct platform_profile_ops amd_pmf_profile_ops = {
.probe = amd_pmf_profile_probe,
+ .hidden_choices = amd_pmf_hidden_choices,
.profile_get = amd_pmf_profile_get,
.profile_set = amd_pmf_profile_set,
};
diff --git a/drivers/platform/x86/amd/pmf/tee-if.c b/drivers/platform/x86/amd/pmf/tee-if.c
index 8c88769ea1d8..ceaff1ebb7b9 100644
--- a/drivers/platform/x86/amd/pmf/tee-if.c
+++ b/drivers/platform/x86/amd/pmf/tee-if.c
@@ -27,8 +27,11 @@ module_param(pb_side_load, bool, 0444);
MODULE_PARM_DESC(pb_side_load, "Sideload policy binaries debug policy failures");
#endif
-static const uuid_t amd_pmf_ta_uuid = UUID_INIT(0x6fd93b77, 0x3fb8, 0x524d,
- 0xb1, 0x2d, 0xc5, 0x29, 0xb1, 0x3d, 0x85, 0x43);
+static const uuid_t amd_pmf_ta_uuid[] = { UUID_INIT(0xd9b39bf2, 0x66bd, 0x4154, 0xaf, 0xb8, 0x8a,
+ 0xcc, 0x2b, 0x2b, 0x60, 0xd6),
+ UUID_INIT(0x6fd93b77, 0x3fb8, 0x524d, 0xb1, 0x2d, 0xc5,
+ 0x29, 0xb1, 0x3d, 0x85, 0x43),
+ };
static const char *amd_pmf_uevent_as_str(unsigned int state)
{
@@ -321,9 +324,9 @@ static int amd_pmf_start_policy_engine(struct amd_pmf_dev *dev)
*/
schedule_delayed_work(&dev->pb_work, msecs_to_jiffies(pb_actions_ms * 3));
} else {
- dev_err(dev->dev, "ta invoke cmd init failed err: %x\n", res);
+ dev_dbg(dev->dev, "ta invoke cmd init failed err: %x\n", res);
dev->smart_pc_enabled = false;
- return -EIO;
+ return res;
}
return 0;
@@ -390,12 +393,12 @@ static int amd_pmf_amdtee_ta_match(struct tee_ioctl_version_data *ver, const voi
return ver->impl_id == TEE_IMPL_ID_AMDTEE;
}
-static int amd_pmf_ta_open_session(struct tee_context *ctx, u32 *id)
+static int amd_pmf_ta_open_session(struct tee_context *ctx, u32 *id, const uuid_t *uuid)
{
struct tee_ioctl_open_session_arg sess_arg = {};
int rc;
- export_uuid(sess_arg.uuid, &amd_pmf_ta_uuid);
+ export_uuid(sess_arg.uuid, uuid);
sess_arg.clnt_login = TEE_IOCTL_LOGIN_PUBLIC;
sess_arg.num_params = 0;
@@ -434,7 +437,7 @@ static int amd_pmf_register_input_device(struct amd_pmf_dev *dev)
return 0;
}
-static int amd_pmf_tee_init(struct amd_pmf_dev *dev)
+static int amd_pmf_tee_init(struct amd_pmf_dev *dev, const uuid_t *uuid)
{
u32 size;
int ret;
@@ -445,7 +448,7 @@ static int amd_pmf_tee_init(struct amd_pmf_dev *dev)
return PTR_ERR(dev->tee_ctx);
}
- ret = amd_pmf_ta_open_session(dev->tee_ctx, &dev->session_id);
+ ret = amd_pmf_ta_open_session(dev->tee_ctx, &dev->session_id, uuid);
if (ret) {
dev_err(dev->dev, "Failed to open TA session (%d)\n", ret);
ret = -EINVAL;
@@ -489,7 +492,8 @@ static void amd_pmf_tee_deinit(struct amd_pmf_dev *dev)
int amd_pmf_init_smart_pc(struct amd_pmf_dev *dev)
{
- int ret;
+ bool status;
+ int ret, i;
ret = apmf_check_smart_pc(dev);
if (ret) {
@@ -502,10 +506,6 @@ int amd_pmf_init_smart_pc(struct amd_pmf_dev *dev)
return -ENODEV;
}
- ret = amd_pmf_tee_init(dev);
- if (ret)
- return ret;
-
INIT_DELAYED_WORK(&dev->pb_work, amd_pmf_invoke_cmd);
ret = amd_pmf_set_dram_addr(dev, true);
@@ -534,8 +534,30 @@ int amd_pmf_init_smart_pc(struct amd_pmf_dev *dev)
goto error;
}
- ret = amd_pmf_start_policy_engine(dev);
- if (ret)
+ for (i = 0; i < ARRAY_SIZE(amd_pmf_ta_uuid); i++) {
+ ret = amd_pmf_tee_init(dev, &amd_pmf_ta_uuid[i]);
+ if (ret)
+ return ret;
+
+ ret = amd_pmf_start_policy_engine(dev);
+ switch (ret) {
+ case TA_PMF_TYPE_SUCCESS:
+ status = true;
+ break;
+ case TA_ERROR_CRYPTO_INVALID_PARAM:
+ case TA_ERROR_CRYPTO_BIN_TOO_LARGE:
+ amd_pmf_tee_deinit(dev);
+ status = false;
+ break;
+ default:
+ goto error;
+ }
+
+ if (status)
+ break;
+ }
+
+ if (!status && !pb_side_load)
goto error;
if (pb_side_load)
diff --git a/drivers/platform/x86/intel/hid.c b/drivers/platform/x86/intel/hid.c
index 927a2993f616..88a1a9ff2f34 100644
--- a/drivers/platform/x86/intel/hid.c
+++ b/drivers/platform/x86/intel/hid.c
@@ -139,6 +139,13 @@ static const struct dmi_system_id button_array_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "Surface Go 3"),
},
},
+ {
+ .ident = "Microsoft Surface Go 4",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Surface Go 4"),
+ },
+ },
{ }
};
diff --git a/drivers/platform/x86/intel/vsec.c b/drivers/platform/x86/intel/vsec.c
index 8272f1dd0fbc..db3c031d1757 100644
--- a/drivers/platform/x86/intel/vsec.c
+++ b/drivers/platform/x86/intel/vsec.c
@@ -404,6 +404,11 @@ static const struct intel_vsec_platform_info oobmsm_info = {
.caps = VSEC_CAP_TELEMETRY | VSEC_CAP_SDSI | VSEC_CAP_TPMI,
};
+/* DMR OOBMSM info */
+static const struct intel_vsec_platform_info dmr_oobmsm_info = {
+ .caps = VSEC_CAP_TELEMETRY | VSEC_CAP_TPMI,
+};
+
/* TGL info */
static const struct intel_vsec_platform_info tgl_info = {
.caps = VSEC_CAP_TELEMETRY,
@@ -420,6 +425,7 @@ static const struct intel_vsec_platform_info lnl_info = {
#define PCI_DEVICE_ID_INTEL_VSEC_MTL_M 0x7d0d
#define PCI_DEVICE_ID_INTEL_VSEC_MTL_S 0xad0d
#define PCI_DEVICE_ID_INTEL_VSEC_OOBMSM 0x09a7
+#define PCI_DEVICE_ID_INTEL_VSEC_OOBMSM_DMR 0x09a1
#define PCI_DEVICE_ID_INTEL_VSEC_RPL 0xa77d
#define PCI_DEVICE_ID_INTEL_VSEC_TGL 0x9a0d
#define PCI_DEVICE_ID_INTEL_VSEC_LNL_M 0x647d
@@ -430,6 +436,7 @@ static const struct pci_device_id intel_vsec_pci_ids[] = {
{ PCI_DEVICE_DATA(INTEL, VSEC_MTL_M, &mtl_info) },
{ PCI_DEVICE_DATA(INTEL, VSEC_MTL_S, &mtl_info) },
{ PCI_DEVICE_DATA(INTEL, VSEC_OOBMSM, &oobmsm_info) },
+ { PCI_DEVICE_DATA(INTEL, VSEC_OOBMSM_DMR, &dmr_oobmsm_info) },
{ PCI_DEVICE_DATA(INTEL, VSEC_RPL, &tgl_info) },
{ PCI_DEVICE_DATA(INTEL, VSEC_TGL, &tgl_info) },
{ PCI_DEVICE_DATA(INTEL, VSEC_LNL_M, &lnl_info) },
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index 72a10ed2017c..1cc91173e012 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -9972,6 +9972,7 @@ static const struct tpacpi_quirk battery_quirk_table[] __initconst = {
* Individual addressing is broken on models that expose the
* primary battery as BAT1.
*/
+ TPACPI_Q_LNV('G', '8', true), /* ThinkPad X131e */
TPACPI_Q_LNV('8', 'F', true), /* Thinkpad X120e */
TPACPI_Q_LNV('J', '7', true), /* B5400 */
TPACPI_Q_LNV('J', 'I', true), /* Thinkpad 11e */
diff --git a/drivers/rapidio/devices/rio_mport_cdev.c b/drivers/rapidio/devices/rio_mport_cdev.c
index 27afbb9d544b..cbf531d0ba68 100644
--- a/drivers/rapidio/devices/rio_mport_cdev.c
+++ b/drivers/rapidio/devices/rio_mport_cdev.c
@@ -1742,7 +1742,8 @@ static int rio_mport_add_riodev(struct mport_cdev_priv *priv,
err = rio_add_net(net);
if (err) {
rmcd_debug(RDEV, "failed to register net, err=%d", err);
- kfree(net);
+ put_device(&net->dev);
+ mport->net = NULL;
goto cleanup;
}
}
diff --git a/drivers/rapidio/rio-scan.c b/drivers/rapidio/rio-scan.c
index fdcf742b2adb..c12941f71e2c 100644
--- a/drivers/rapidio/rio-scan.c
+++ b/drivers/rapidio/rio-scan.c
@@ -871,7 +871,10 @@ static struct rio_net *rio_scan_alloc_net(struct rio_mport *mport,
dev_set_name(&net->dev, "rnet_%d", net->id);
net->dev.parent = &mport->dev;
net->dev.release = rio_scan_release_dev;
- rio_add_net(net);
+ if (rio_add_net(net)) {
+ put_device(&net->dev);
+ net = NULL;
+ }
}
return net;
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index be0890e4e706..f1cfe0bb89b2 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1669,13 +1669,6 @@ static blk_status_t scsi_prepare_cmd(struct request *req)
if (in_flight)
__set_bit(SCMD_STATE_INFLIGHT, &cmd->state);
- /*
- * Only clear the driver-private command data if the LLD does not supply
- * a function to initialize that data.
- */
- if (!shost->hostt->init_cmd_priv)
- memset(cmd + 1, 0, shost->hostt->cmd_size);
-
cmd->prot_op = SCSI_PROT_NORMAL;
if (blk_rq_bytes(req))
cmd->sc_data_direction = rq_dma_dir(req);
@@ -1842,6 +1835,13 @@ static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
if (!scsi_host_queue_ready(q, shost, sdev, cmd))
goto out_dec_target_busy;
+ /*
+ * Only clear the driver-private command data if the LLD does not supply
+ * a function to initialize that data.
+ */
+ if (shost->hostt->cmd_size && !shost->hostt->init_cmd_priv)
+ memset(cmd + 1, 0, shost->hostt->cmd_size);
+
if (!(req->rq_flags & RQF_DONTPREP)) {
ret = scsi_prepare_cmd(req);
if (ret != BLK_STS_OK)
diff --git a/drivers/slimbus/messaging.c b/drivers/slimbus/messaging.c
index e7aa9bd4b44b..6f01d944f9c6 100644
--- a/drivers/slimbus/messaging.c
+++ b/drivers/slimbus/messaging.c
@@ -148,8 +148,9 @@ int slim_do_transfer(struct slim_controller *ctrl, struct slim_msg_txn *txn)
}
ret = ctrl->xfer_msg(ctrl, txn);
-
- if (!ret && need_tid && !txn->msg->comp) {
+ if (ret == -ETIMEDOUT) {
+ slim_free_txn_tid(ctrl, txn);
+ } else if (!ret && need_tid && !txn->msg->comp) {
unsigned long ms = txn->rl + HZ;
time_left = wait_for_completion_timeout(txn->comp,
diff --git a/drivers/thermal/gov_power_allocator.c b/drivers/thermal/gov_power_allocator.c
index 3b644de3292e..0d9f636c80f4 100644
--- a/drivers/thermal/gov_power_allocator.c
+++ b/drivers/thermal/gov_power_allocator.c
@@ -370,7 +370,7 @@ static void divvy_up_power(struct power_actor *power, int num_actors,
for (i = 0; i < num_actors; i++) {
struct power_actor *pa = &power[i];
- u64 req_range = (u64)pa->req_power * power_range;
+ u64 req_range = (u64)pa->weighted_req_power * power_range;
pa->granted_power = DIV_ROUND_CLOSEST_ULL(req_range,
total_req_power);
@@ -641,6 +641,22 @@ clean_state:
return ret;
}
+static void power_allocator_update_weight(struct power_allocator_params *params)
+{
+ const struct thermal_trip_desc *td;
+ struct thermal_instance *instance;
+
+ if (!params->trip_max)
+ return;
+
+ td = trip_to_trip_desc(params->trip_max);
+
+ params->total_weight = 0;
+ list_for_each_entry(instance, &td->thermal_instances, trip_node)
+ if (power_actor_is_valid(instance))
+ params->total_weight += instance->weight;
+}
+
static void power_allocator_update_tz(struct thermal_zone_device *tz,
enum thermal_notify_event reason)
{
@@ -656,16 +672,12 @@ static void power_allocator_update_tz(struct thermal_zone_device *tz,
if (power_actor_is_valid(instance))
num_actors++;
- if (num_actors == params->num_actors)
- return;
+ if (num_actors != params->num_actors)
+ allocate_actors_buffer(params, num_actors);
- allocate_actors_buffer(params, num_actors);
- break;
+ fallthrough;
case THERMAL_INSTANCE_WEIGHT_CHANGED:
- params->total_weight = 0;
- list_for_each_entry(instance, &td->thermal_instances, trip_node)
- if (power_actor_is_valid(instance))
- params->total_weight += instance->weight;
+ power_allocator_update_weight(params);
break;
default:
break;
@@ -731,6 +743,8 @@ static int power_allocator_bind(struct thermal_zone_device *tz)
tz->governor_data = params;
+ power_allocator_update_weight(params);
+
return 0;
free_params:
diff --git a/drivers/thermal/thermal_of.c b/drivers/thermal/thermal_of.c
index 5ab4ce4daaeb..5401f03d6b6c 100644
--- a/drivers/thermal/thermal_of.c
+++ b/drivers/thermal/thermal_of.c
@@ -274,6 +274,34 @@ static bool thermal_of_get_cooling_spec(struct device_node *map_np, int index,
return true;
}
+static bool thermal_of_cm_lookup(struct device_node *cm_np,
+ const struct thermal_trip *trip,
+ struct thermal_cooling_device *cdev,
+ struct cooling_spec *c)
+{
+ for_each_child_of_node_scoped(cm_np, child) {
+ struct device_node *tr_np;
+ int count, i;
+
+ tr_np = of_parse_phandle(child, "trip", 0);
+ if (tr_np != trip->priv)
+ continue;
+
+ /* The trip has been found, look up the cdev. */
+ count = of_count_phandle_with_args(child, "cooling-device",
+ "#cooling-cells");
+ if (count <= 0)
+ pr_err("Add a cooling_device property with at least one device\n");
+
+ for (i = 0; i < count; i++) {
+ if (thermal_of_get_cooling_spec(child, i, cdev, c))
+ return true;
+ }
+ }
+
+ return false;
+}
+
static bool thermal_of_should_bind(struct thermal_zone_device *tz,
const struct thermal_trip *trip,
struct thermal_cooling_device *cdev,
@@ -293,27 +321,7 @@ static bool thermal_of_should_bind(struct thermal_zone_device *tz,
goto out;
/* Look up the trip and the cdev in the cooling maps. */
- for_each_child_of_node_scoped(cm_np, child) {
- struct device_node *tr_np;
- int count, i;
-
- tr_np = of_parse_phandle(child, "trip", 0);
- if (tr_np != trip->priv)
- continue;
-
- /* The trip has been found, look up the cdev. */
- count = of_count_phandle_with_args(child, "cooling-device", "#cooling-cells");
- if (count <= 0)
- pr_err("Add a cooling_device property with at least one device\n");
-
- for (i = 0; i < count; i++) {
- result = thermal_of_get_cooling_spec(child, i, cdev, c);
- if (result)
- break;
- }
-
- break;
- }
+ result = thermal_of_cm_lookup(cm_np, trip, cdev, c);
of_node_put(cm_np);
out:
diff --git a/drivers/ufs/core/ufs_bsg.c b/drivers/ufs/core/ufs_bsg.c
index 8d4ad0a3f2cf..252186124669 100644
--- a/drivers/ufs/core/ufs_bsg.c
+++ b/drivers/ufs/core/ufs_bsg.c
@@ -194,10 +194,12 @@ out:
ufshcd_rpm_put_sync(hba);
kfree(buff);
bsg_reply->result = ret;
- job->reply_len = !rpmb ? sizeof(struct ufs_bsg_reply) : sizeof(struct ufs_rpmb_reply);
/* complete the job here only if no error */
- if (ret == 0)
+ if (ret == 0) {
+ job->reply_len = rpmb ? sizeof(struct ufs_rpmb_reply) :
+ sizeof(struct ufs_bsg_reply);
bsg_job_done(job, ret, bsg_reply->reply_payload_rcv_len);
+ }
return ret;
}
diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
index 1893a7ad9531..464f13da259a 100644
--- a/drivers/ufs/core/ufshcd.c
+++ b/drivers/ufs/core/ufshcd.c
@@ -266,7 +266,7 @@ static bool ufshcd_has_pending_tasks(struct ufs_hba *hba)
static bool ufshcd_is_ufs_dev_busy(struct ufs_hba *hba)
{
- return hba->outstanding_reqs || ufshcd_has_pending_tasks(hba);
+ return scsi_host_busy(hba->host) || ufshcd_has_pending_tasks(hba);
}
static const struct ufs_dev_quirk ufs_fixups[] = {
@@ -628,8 +628,8 @@ static void ufshcd_print_host_state(struct ufs_hba *hba)
const struct scsi_device *sdev_ufs = hba->ufs_device_wlun;
dev_err(hba->dev, "UFS Host state=%d\n", hba->ufshcd_state);
- dev_err(hba->dev, "outstanding reqs=0x%lx tasks=0x%lx\n",
- hba->outstanding_reqs, hba->outstanding_tasks);
+ dev_err(hba->dev, "%d outstanding reqs, tasks=0x%lx\n",
+ scsi_host_busy(hba->host), hba->outstanding_tasks);
dev_err(hba->dev, "saved_err=0x%x, saved_uic_err=0x%x\n",
hba->saved_err, hba->saved_uic_err);
dev_err(hba->dev, "Device power mode=%d, UIC link state=%d\n",
@@ -8882,7 +8882,7 @@ static enum scsi_timeout_action ufshcd_eh_timed_out(struct scsi_cmnd *scmd)
dev_info(hba->dev, "%s() finished; outstanding_tasks = %#lx.\n",
__func__, hba->outstanding_tasks);
- return hba->outstanding_reqs ? SCSI_EH_RESET_TIMER : SCSI_EH_DONE;
+ return scsi_host_busy(hba->host) ? SCSI_EH_RESET_TIMER : SCSI_EH_DONE;
}
static const struct attribute_group *ufshcd_driver_groups[] = {
@@ -10431,6 +10431,21 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
*/
spin_lock_init(&hba->clk_gating.lock);
+ /*
+ * Set the default power management level for runtime and system PM.
+ * Host controller drivers can override them in their
+ * 'ufs_hba_variant_ops::init' callback.
+ *
+ * Default power saving mode is to keep UFS link in Hibern8 state
+ * and UFS device in sleep state.
+ */
+ hba->rpm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
+ UFS_SLEEP_PWR_MODE,
+ UIC_LINK_HIBERN8_STATE);
+ hba->spm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
+ UFS_SLEEP_PWR_MODE,
+ UIC_LINK_HIBERN8_STATE);
+
err = ufshcd_hba_init(hba);
if (err)
goto out_error;
@@ -10544,21 +10559,6 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
goto out_disable;
}
- /*
- * Set the default power management level for runtime and system PM if
- * not set by the host controller drivers.
- * Default power saving mode is to keep UFS link in Hibern8 state
- * and UFS device in sleep state.
- */
- if (!hba->rpm_lvl)
- hba->rpm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
- UFS_SLEEP_PWR_MODE,
- UIC_LINK_HIBERN8_STATE);
- if (!hba->spm_lvl)
- hba->spm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
- UFS_SLEEP_PWR_MODE,
- UIC_LINK_HIBERN8_STATE);
-
INIT_DELAYED_WORK(&hba->rpm_dev_flush_recheck_work, ufshcd_rpm_dev_flush_recheck_work);
INIT_DELAYED_WORK(&hba->ufs_rtc_update_work, ufshcd_rtc_work);
diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
index 0dd85d2635b9..47d06af33747 100644
--- a/drivers/usb/atm/cxacru.c
+++ b/drivers/usb/atm/cxacru.c
@@ -1131,7 +1131,10 @@ static int cxacru_bind(struct usbatm_data *usbatm_instance,
struct cxacru_data *instance;
struct usb_device *usb_dev = interface_to_usbdev(intf);
struct usb_host_endpoint *cmd_ep = usb_dev->ep_in[CXACRU_EP_CMD];
- struct usb_endpoint_descriptor *in, *out;
+ static const u8 ep_addrs[] = {
+ CXACRU_EP_CMD + USB_DIR_IN,
+ CXACRU_EP_CMD + USB_DIR_OUT,
+ 0};
int ret;
/* instance init */
@@ -1179,13 +1182,11 @@ static int cxacru_bind(struct usbatm_data *usbatm_instance,
}
if (usb_endpoint_xfer_int(&cmd_ep->desc))
- ret = usb_find_common_endpoints(intf->cur_altsetting,
- NULL, NULL, &in, &out);
+ ret = usb_check_int_endpoints(intf, ep_addrs);
else
- ret = usb_find_common_endpoints(intf->cur_altsetting,
- &in, &out, NULL, NULL);
+ ret = usb_check_bulk_endpoints(intf, ep_addrs);
- if (ret) {
+ if (!ret) {
usb_err(usbatm_instance, "cxacru_bind: interface has incorrect endpoints\n");
ret = -ENODEV;
goto fail;
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index a76bb50b6202..dcba4281ea48 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -6066,6 +6066,36 @@ void usb_hub_cleanup(void)
} /* usb_hub_cleanup() */
/**
+ * hub_hc_release_resources - clear resources used by host controller
+ * @udev: pointer to device being released
+ *
+ * Context: task context, might sleep
+ *
+ * Function releases the host controller resources in correct order before
+ * making any operation on resuming usb device. The host controller resources
+ * allocated for devices in tree should be released starting from the last
+ * usb device in tree toward the root hub. This function is used only during
+ * resuming device when usb device require reinitialization – that is, when
+ * flag udev->reset_resume is set.
+ *
+ * This call is synchronous, and may not be used in an interrupt context.
+ */
+static void hub_hc_release_resources(struct usb_device *udev)
+{
+ struct usb_hub *hub = usb_hub_to_struct_hub(udev);
+ struct usb_hcd *hcd = bus_to_hcd(udev->bus);
+ int i;
+
+ /* Release up resources for all children before this device */
+ for (i = 0; i < udev->maxchild; i++)
+ if (hub->ports[i]->child)
+ hub_hc_release_resources(hub->ports[i]->child);
+
+ if (hcd->driver->reset_device)
+ hcd->driver->reset_device(hcd, udev);
+}
+
+/**
* usb_reset_and_verify_device - perform a USB port reset to reinitialize a device
* @udev: device to reset (not in SUSPENDED or NOTATTACHED state)
*
@@ -6129,6 +6159,9 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
bos = udev->bos;
udev->bos = NULL;
+ if (udev->reset_resume)
+ hub_hc_release_resources(udev);
+
mutex_lock(hcd->address0_mutex);
for (i = 0; i < PORT_INIT_TRIES; ++i) {
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index dfcfc142bd5e..8efbacc5bc34 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -341,6 +341,10 @@ static const struct usb_device_id usb_quirk_list[] = {
{ USB_DEVICE(0x0638, 0x0a13), .driver_info =
USB_QUIRK_STRING_FETCH_255 },
+ /* Prolific Single-LUN Mass Storage Card Reader */
+ { USB_DEVICE(0x067b, 0x2731), .driver_info = USB_QUIRK_DELAY_INIT |
+ USB_QUIRK_NO_LPM },
+
/* Saitek Cyborg Gold Joystick */
{ USB_DEVICE(0x06a3, 0x0006), .driver_info =
USB_QUIRK_CONFIG_INTF_STRINGS },
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index dfa1b5fe48dc..66a08b527165 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -131,11 +131,24 @@ void dwc3_enable_susphy(struct dwc3 *dwc, bool enable)
}
}
-void dwc3_set_prtcap(struct dwc3 *dwc, u32 mode)
+void dwc3_set_prtcap(struct dwc3 *dwc, u32 mode, bool ignore_susphy)
{
+ unsigned int hw_mode;
u32 reg;
reg = dwc3_readl(dwc->regs, DWC3_GCTL);
+
+ /*
+ * For DRD controllers, GUSB3PIPECTL.SUSPENDENABLE and
+ * GUSB2PHYCFG.SUSPHY should be cleared during mode switching,
+ * and they can be set after core initialization.
+ */
+ hw_mode = DWC3_GHWPARAMS0_MODE(dwc->hwparams.hwparams0);
+ if (hw_mode == DWC3_GHWPARAMS0_MODE_DRD && !ignore_susphy) {
+ if (DWC3_GCTL_PRTCAP(reg) != mode)
+ dwc3_enable_susphy(dwc, false);
+ }
+
reg &= ~(DWC3_GCTL_PRTCAPDIR(DWC3_GCTL_PRTCAP_OTG));
reg |= DWC3_GCTL_PRTCAPDIR(mode);
dwc3_writel(dwc->regs, DWC3_GCTL, reg);
@@ -216,7 +229,7 @@ static void __dwc3_set_mode(struct work_struct *work)
spin_lock_irqsave(&dwc->lock, flags);
- dwc3_set_prtcap(dwc, desired_dr_role);
+ dwc3_set_prtcap(dwc, desired_dr_role, false);
spin_unlock_irqrestore(&dwc->lock, flags);
@@ -658,16 +671,7 @@ static int dwc3_ss_phy_setup(struct dwc3 *dwc, int index)
*/
reg &= ~DWC3_GUSB3PIPECTL_UX_EXIT_PX;
- /*
- * Above DWC_usb3.0 1.94a, it is recommended to set
- * DWC3_GUSB3PIPECTL_SUSPHY to '0' during coreConsultant configuration.
- * So default value will be '0' when the core is reset. Application
- * needs to set it to '1' after the core initialization is completed.
- *
- * Similarly for DRD controllers, GUSB3PIPECTL.SUSPENDENABLE must be
- * cleared after power-on reset, and it can be set after core
- * initialization.
- */
+ /* Ensure the GUSB3PIPECTL.SUSPENDENABLE is cleared prior to phy init. */
reg &= ~DWC3_GUSB3PIPECTL_SUSPHY;
if (dwc->u2ss_inp3_quirk)
@@ -747,15 +751,7 @@ static int dwc3_hs_phy_setup(struct dwc3 *dwc, int index)
break;
}
- /*
- * Above DWC_usb3.0 1.94a, it is recommended to set
- * DWC3_GUSB2PHYCFG_SUSPHY to '0' during coreConsultant configuration.
- * So default value will be '0' when the core is reset. Application
- * needs to set it to '1' after the core initialization is completed.
- *
- * Similarly for DRD controllers, GUSB2PHYCFG.SUSPHY must be cleared
- * after power-on reset, and it can be set after core initialization.
- */
+ /* Ensure the GUSB2PHYCFG.SUSPHY is cleared prior to phy init. */
reg &= ~DWC3_GUSB2PHYCFG_SUSPHY;
if (dwc->dis_enblslpm_quirk)
@@ -830,6 +826,25 @@ static int dwc3_phy_init(struct dwc3 *dwc)
goto err_exit_usb3_phy;
}
+ /*
+ * Above DWC_usb3.0 1.94a, it is recommended to set
+ * DWC3_GUSB3PIPECTL_SUSPHY and DWC3_GUSB2PHYCFG_SUSPHY to '0' during
+ * coreConsultant configuration. So default value will be '0' when the
+ * core is reset. Application needs to set it to '1' after the core
+ * initialization is completed.
+ *
+ * Certain phy requires to be in P0 power state during initialization.
+ * Make sure GUSB3PIPECTL.SUSPENDENABLE and GUSB2PHYCFG.SUSPHY are clear
+ * prior to phy init to maintain in the P0 state.
+ *
+ * After phy initialization, some phy operations can only be executed
+ * while in lower P states. Ensure GUSB3PIPECTL.SUSPENDENABLE and
+ * GUSB2PHYCFG.SUSPHY are set soon after initialization to avoid
+ * blocking phy ops.
+ */
+ if (!DWC3_VER_IS_WITHIN(DWC3, ANY, 194A))
+ dwc3_enable_susphy(dwc, true);
+
return 0;
err_exit_usb3_phy:
@@ -1588,7 +1603,7 @@ static int dwc3_core_init_mode(struct dwc3 *dwc)
switch (dwc->dr_mode) {
case USB_DR_MODE_PERIPHERAL:
- dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_DEVICE);
+ dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_DEVICE, false);
if (dwc->usb2_phy)
otg_set_vbus(dwc->usb2_phy->otg, false);
@@ -1600,7 +1615,7 @@ static int dwc3_core_init_mode(struct dwc3 *dwc)
return dev_err_probe(dev, ret, "failed to initialize gadget\n");
break;
case USB_DR_MODE_HOST:
- dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_HOST);
+ dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_HOST, false);
if (dwc->usb2_phy)
otg_set_vbus(dwc->usb2_phy->otg, true);
@@ -1645,7 +1660,7 @@ static void dwc3_core_exit_mode(struct dwc3 *dwc)
}
/* de-assert DRVVBUS for HOST and OTG mode */
- dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_DEVICE);
+ dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_DEVICE, true);
}
static void dwc3_get_software_properties(struct dwc3 *dwc)
@@ -1835,8 +1850,6 @@ static void dwc3_get_properties(struct dwc3 *dwc)
dwc->tx_thr_num_pkt_prd = tx_thr_num_pkt_prd;
dwc->tx_max_burst_prd = tx_max_burst_prd;
- dwc->imod_interval = 0;
-
dwc->tx_fifo_resize_max_num = tx_fifo_resize_max_num;
}
@@ -1854,21 +1867,19 @@ static void dwc3_check_params(struct dwc3 *dwc)
unsigned int hwparam_gen =
DWC3_GHWPARAMS3_SSPHY_IFC(dwc->hwparams.hwparams3);
- /* Check for proper value of imod_interval */
- if (dwc->imod_interval && !dwc3_has_imod(dwc)) {
- dev_warn(dwc->dev, "Interrupt moderation not supported\n");
- dwc->imod_interval = 0;
- }
-
/*
+ * Enable IMOD for all supporting controllers.
+ *
+ * Particularly, DWC_usb3 v3.00a must enable this feature for
+ * the following reason:
+ *
* Workaround for STAR 9000961433 which affects only version
* 3.00a of the DWC_usb3 core. This prevents the controller
* interrupt from being masked while handling events. IMOD
* allows us to work around this issue. Enable it for the
* affected version.
*/
- if (!dwc->imod_interval &&
- DWC3_VER_IS(DWC3, 300A))
+ if (dwc3_has_imod((dwc)))
dwc->imod_interval = 1;
/* Check the maximum_speed parameter */
@@ -2457,7 +2468,7 @@ static int dwc3_resume_common(struct dwc3 *dwc, pm_message_t msg)
if (ret)
return ret;
- dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_DEVICE);
+ dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_DEVICE, true);
dwc3_gadget_resume(dwc);
break;
case DWC3_GCTL_PRTCAP_HOST:
@@ -2465,7 +2476,7 @@ static int dwc3_resume_common(struct dwc3 *dwc, pm_message_t msg)
ret = dwc3_core_init_for_resume(dwc);
if (ret)
return ret;
- dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_HOST);
+ dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_HOST, true);
break;
}
/* Restore GUSB2PHYCFG bits that were modified in suspend */
@@ -2494,7 +2505,7 @@ static int dwc3_resume_common(struct dwc3 *dwc, pm_message_t msg)
if (ret)
return ret;
- dwc3_set_prtcap(dwc, dwc->current_dr_role);
+ dwc3_set_prtcap(dwc, dwc->current_dr_role, true);
dwc3_otg_init(dwc);
if (dwc->current_otg_role == DWC3_OTG_ROLE_HOST) {
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index c955039bb4f6..aaa39e663f60 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -1558,7 +1558,7 @@ struct dwc3_gadget_ep_cmd_params {
#define DWC3_HAS_OTG BIT(3)
/* prototypes */
-void dwc3_set_prtcap(struct dwc3 *dwc, u32 mode);
+void dwc3_set_prtcap(struct dwc3 *dwc, u32 mode, bool ignore_susphy);
void dwc3_set_mode(struct dwc3 *dwc, u32 mode);
u32 dwc3_core_fifo_space(struct dwc3_ep *dep, u8 type);
diff --git a/drivers/usb/dwc3/drd.c b/drivers/usb/dwc3/drd.c
index d76ae676783c..7977860932b1 100644
--- a/drivers/usb/dwc3/drd.c
+++ b/drivers/usb/dwc3/drd.c
@@ -173,7 +173,7 @@ void dwc3_otg_init(struct dwc3 *dwc)
* block "Initialize GCTL for OTG operation".
*/
/* GCTL.PrtCapDir=2'b11 */
- dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_OTG);
+ dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_OTG, true);
/* GUSB2PHYCFG0.SusPHY=0 */
reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
reg &= ~DWC3_GUSB2PHYCFG_SUSPHY;
@@ -556,7 +556,7 @@ int dwc3_drd_init(struct dwc3 *dwc)
dwc3_drd_update(dwc);
} else {
- dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_OTG);
+ dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_OTG, true);
/* use OTG block to get ID event */
irq = dwc3_otg_get_irq(dwc);
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index ddd6b2ce5710..89a4dc8ebf94 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -4501,14 +4501,18 @@ static irqreturn_t dwc3_process_event_buf(struct dwc3_event_buffer *evt)
dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0),
DWC3_GEVNTSIZ_SIZE(evt->length));
+ evt->flags &= ~DWC3_EVENT_PENDING;
+ /*
+ * Add an explicit write memory barrier to make sure that the update of
+ * clearing DWC3_EVENT_PENDING is observed in dwc3_check_event_buf()
+ */
+ wmb();
+
if (dwc->imod_interval) {
dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), DWC3_GEVNTCOUNT_EHB);
dwc3_writel(dwc->regs, DWC3_DEV_IMOD(0), dwc->imod_interval);
}
- /* Keep the clearing of DWC3_EVENT_PENDING at the end */
- evt->flags &= ~DWC3_EVENT_PENDING;
-
return ret;
}
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index bdda8c74602d..869ad99afb48 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -1050,10 +1050,11 @@ static int set_config(struct usb_composite_dev *cdev,
else
usb_gadget_set_remote_wakeup(gadget, 0);
done:
- if (power <= USB_SELF_POWER_VBUS_MAX_DRAW)
- usb_gadget_set_selfpowered(gadget);
- else
+ if (power > USB_SELF_POWER_VBUS_MAX_DRAW ||
+ (c && !(c->bmAttributes & USB_CONFIG_ATT_SELFPOWER)))
usb_gadget_clear_selfpowered(gadget);
+ else
+ usb_gadget_set_selfpowered(gadget);
usb_gadget_vbus_draw(gadget, power);
if (result >= 0 && cdev->delayed_status)
@@ -2615,7 +2616,10 @@ void composite_suspend(struct usb_gadget *gadget)
cdev->suspended = 1;
- usb_gadget_set_selfpowered(gadget);
+ if (cdev->config &&
+ cdev->config->bmAttributes & USB_CONFIG_ATT_SELFPOWER)
+ usb_gadget_set_selfpowered(gadget);
+
usb_gadget_vbus_draw(gadget, 2);
}
@@ -2649,8 +2653,11 @@ void composite_resume(struct usb_gadget *gadget)
else
maxpower = min(maxpower, 900U);
- if (maxpower > USB_SELF_POWER_VBUS_MAX_DRAW)
+ if (maxpower > USB_SELF_POWER_VBUS_MAX_DRAW ||
+ !(cdev->config->bmAttributes & USB_CONFIG_ATT_SELFPOWER))
usb_gadget_clear_selfpowered(gadget);
+ else
+ usb_gadget_set_selfpowered(gadget);
usb_gadget_vbus_draw(gadget, maxpower);
} else {
diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c
index 09e2838917e2..f58590bf5e02 100644
--- a/drivers/usb/gadget/function/u_ether.c
+++ b/drivers/usb/gadget/function/u_ether.c
@@ -1052,8 +1052,8 @@ void gether_suspend(struct gether *link)
* There is a transfer in progress. So we trigger a remote
* wakeup to inform the host.
*/
- ether_wakeup_host(dev->port_usb);
- return;
+ if (!ether_wakeup_host(dev->port_usb))
+ return;
}
spin_lock_irqsave(&dev->lock, flags);
link->is_suspend = true;
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index 9693464c0520..69c278b64084 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -12,6 +12,7 @@
#include <linux/slab.h>
#include <linux/unaligned.h>
#include <linux/bitfield.h>
+#include <linux/pci.h>
#include "xhci.h"
#include "xhci-trace.h"
@@ -770,9 +771,16 @@ static int xhci_exit_test_mode(struct xhci_hcd *xhci)
enum usb_link_tunnel_mode xhci_port_is_tunneled(struct xhci_hcd *xhci,
struct xhci_port *port)
{
+ struct usb_hcd *hcd;
void __iomem *base;
u32 offset;
+ /* Don't try and probe this capability for non-Intel hosts */
+ hcd = xhci_to_hcd(xhci);
+ if (!dev_is_pci(hcd->self.controller) ||
+ to_pci_dev(hcd->self.controller)->vendor != PCI_VENDOR_ID_INTEL)
+ return USB_LINK_UNKNOWN;
+
base = &xhci->cap_regs->hc_capbase;
offset = xhci_find_next_ext_cap(base, 0, XHCI_EXT_CAPS_INTEL_SPR_SHADOW);
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 92703efda1f7..fdf0c1008225 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -2437,7 +2437,8 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
* and our use of dma addresses in the trb_address_map radix tree needs
* TRB_SEGMENT_SIZE alignment, so we pick the greater alignment need.
*/
- if (xhci->quirks & XHCI_ZHAOXIN_TRB_FETCH)
+ if (xhci->quirks & XHCI_TRB_OVERFETCH)
+ /* Buggy HC prefetches beyond segment bounds - allocate dummy space at the end */
xhci->segment_pool = dma_pool_create("xHCI ring segments", dev,
TRB_SEGMENT_SIZE * 2, TRB_SEGMENT_SIZE * 2, xhci->page_size * 2);
else
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index ad0ff356f6fa..54460d11f7ee 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -38,6 +38,8 @@
#define PCI_DEVICE_ID_ETRON_EJ168 0x7023
#define PCI_DEVICE_ID_ETRON_EJ188 0x7052
+#define PCI_DEVICE_ID_VIA_VL805 0x3483
+
#define PCI_DEVICE_ID_INTEL_LYNXPOINT_XHCI 0x8c31
#define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI 0x9c31
#define PCI_DEVICE_ID_INTEL_WILDCATPOINT_LP_XHCI 0x9cb1
@@ -418,8 +420,10 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
pdev->device == 0x3432)
xhci->quirks |= XHCI_BROKEN_STREAMS;
- if (pdev->vendor == PCI_VENDOR_ID_VIA && pdev->device == 0x3483)
+ if (pdev->vendor == PCI_VENDOR_ID_VIA && pdev->device == PCI_DEVICE_ID_VIA_VL805) {
xhci->quirks |= XHCI_LPM_SUPPORT;
+ xhci->quirks |= XHCI_TRB_OVERFETCH;
+ }
if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA &&
pdev->device == PCI_DEVICE_ID_ASMEDIA_1042_XHCI) {
@@ -467,11 +471,11 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
if (pdev->device == 0x9202) {
xhci->quirks |= XHCI_RESET_ON_RESUME;
- xhci->quirks |= XHCI_ZHAOXIN_TRB_FETCH;
+ xhci->quirks |= XHCI_TRB_OVERFETCH;
}
if (pdev->device == 0x9203)
- xhci->quirks |= XHCI_ZHAOXIN_TRB_FETCH;
+ xhci->quirks |= XHCI_TRB_OVERFETCH;
}
if (pdev->vendor == PCI_VENDOR_ID_CDNS &&
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 45653114ccd7..1a90ebc8a30e 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -780,8 +780,12 @@ static void xhci_clear_command_ring(struct xhci_hcd *xhci)
struct xhci_segment *seg;
ring = xhci->cmd_ring;
- xhci_for_each_ring_seg(ring->first_seg, seg)
+ xhci_for_each_ring_seg(ring->first_seg, seg) {
+ /* erase all TRBs before the link */
memset(seg->trbs, 0, sizeof(union xhci_trb) * (TRBS_PER_SEGMENT - 1));
+ /* clear link cycle bit */
+ seg->trbs[TRBS_PER_SEGMENT - 1].link.control &= cpu_to_le32(~TRB_CYCLE);
+ }
xhci_initialize_ring_info(ring);
/*
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 8c164340a2c3..779b01dee068 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1632,7 +1632,7 @@ struct xhci_hcd {
#define XHCI_EP_CTX_BROKEN_DCS BIT_ULL(42)
#define XHCI_SUSPEND_RESUME_CLKS BIT_ULL(43)
#define XHCI_RESET_TO_DEFAULT BIT_ULL(44)
-#define XHCI_ZHAOXIN_TRB_FETCH BIT_ULL(45)
+#define XHCI_TRB_OVERFETCH BIT_ULL(45)
#define XHCI_ZHAOXIN_HOST BIT_ULL(46)
#define XHCI_WRITE_64_HI_LO BIT_ULL(47)
#define XHCI_CDNS_SCTX_QUIRK BIT_ULL(48)
diff --git a/drivers/usb/renesas_usbhs/common.c b/drivers/usb/renesas_usbhs/common.c
index 935fc496fe94..4b35ef216125 100644
--- a/drivers/usb/renesas_usbhs/common.c
+++ b/drivers/usb/renesas_usbhs/common.c
@@ -312,8 +312,10 @@ static int usbhsc_clk_get(struct device *dev, struct usbhs_priv *priv)
priv->clks[1] = of_clk_get(dev_of_node(dev), 1);
if (PTR_ERR(priv->clks[1]) == -ENOENT)
priv->clks[1] = NULL;
- else if (IS_ERR(priv->clks[1]))
+ else if (IS_ERR(priv->clks[1])) {
+ clk_put(priv->clks[0]);
return PTR_ERR(priv->clks[1]);
+ }
return 0;
}
@@ -779,6 +781,8 @@ static void usbhs_remove(struct platform_device *pdev)
dev_dbg(&pdev->dev, "usb remove\n");
+ flush_delayed_work(&priv->notify_hotplug_work);
+
/* power off */
if (!usbhs_get_dparam(priv, runtime_pwctrl))
usbhsc_power_ctrl(priv, 0);
diff --git a/drivers/usb/renesas_usbhs/mod_gadget.c b/drivers/usb/renesas_usbhs/mod_gadget.c
index 105132ae87ac..e8e5723f5412 100644
--- a/drivers/usb/renesas_usbhs/mod_gadget.c
+++ b/drivers/usb/renesas_usbhs/mod_gadget.c
@@ -1094,7 +1094,7 @@ int usbhs_mod_gadget_probe(struct usbhs_priv *priv)
goto usbhs_mod_gadget_probe_err_gpriv;
}
- gpriv->transceiver = usb_get_phy(USB_PHY_TYPE_UNDEFINED);
+ gpriv->transceiver = devm_usb_get_phy(dev, USB_PHY_TYPE_UNDEFINED);
dev_info(dev, "%stransceiver found\n",
!IS_ERR(gpriv->transceiver) ? "" : "no ");
diff --git a/drivers/usb/typec/tcpm/tcpci_rt1711h.c b/drivers/usb/typec/tcpm/tcpci_rt1711h.c
index 64f6dd0dc660..88c50b984e8a 100644
--- a/drivers/usb/typec/tcpm/tcpci_rt1711h.c
+++ b/drivers/usb/typec/tcpm/tcpci_rt1711h.c
@@ -334,6 +334,11 @@ static int rt1711h_probe(struct i2c_client *client)
{
int ret;
struct rt1711h_chip *chip;
+ const u16 alert_mask = TCPC_ALERT_TX_SUCCESS | TCPC_ALERT_TX_DISCARDED |
+ TCPC_ALERT_TX_FAILED | TCPC_ALERT_RX_HARD_RST |
+ TCPC_ALERT_RX_STATUS | TCPC_ALERT_POWER_STATUS |
+ TCPC_ALERT_CC_STATUS | TCPC_ALERT_RX_BUF_OVF |
+ TCPC_ALERT_FAULT;
chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL);
if (!chip)
@@ -382,6 +387,12 @@ static int rt1711h_probe(struct i2c_client *client)
dev_name(chip->dev), chip);
if (ret < 0)
return ret;
+
+ /* Enable alert interrupts */
+ ret = rt1711h_write16(chip, TCPC_ALERT_MASK, alert_mask);
+ if (ret < 0)
+ return ret;
+
enable_irq_wake(client->irq);
return 0;
diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c
index fcf499cc9458..2a2915b0a645 100644
--- a/drivers/usb/typec/ucsi/ucsi.c
+++ b/drivers/usb/typec/ucsi/ucsi.c
@@ -25,7 +25,7 @@
* difficult to estimate the time it takes for the system to process the command
* before it is actually passed to the PPM.
*/
-#define UCSI_TIMEOUT_MS 5000
+#define UCSI_TIMEOUT_MS 10000
/*
* UCSI_SWAP_TIMEOUT_MS - Timeout for role swap requests
@@ -1346,7 +1346,7 @@ static int ucsi_reset_ppm(struct ucsi *ucsi)
mutex_lock(&ucsi->ppm_lock);
- ret = ucsi->ops->read_cci(ucsi, &cci);
+ ret = ucsi->ops->poll_cci(ucsi, &cci);
if (ret < 0)
goto out;
@@ -1364,7 +1364,7 @@ static int ucsi_reset_ppm(struct ucsi *ucsi)
tmo = jiffies + msecs_to_jiffies(UCSI_TIMEOUT_MS);
do {
- ret = ucsi->ops->read_cci(ucsi, &cci);
+ ret = ucsi->ops->poll_cci(ucsi, &cci);
if (ret < 0)
goto out;
if (cci & UCSI_CCI_COMMAND_COMPLETE)
@@ -1393,7 +1393,7 @@ static int ucsi_reset_ppm(struct ucsi *ucsi)
/* Give the PPM time to process a reset before reading CCI */
msleep(20);
- ret = ucsi->ops->read_cci(ucsi, &cci);
+ ret = ucsi->ops->poll_cci(ucsi, &cci);
if (ret)
goto out;
@@ -1825,11 +1825,11 @@ static int ucsi_init(struct ucsi *ucsi)
err_unregister:
for (con = connector; con->port; con++) {
+ if (con->wq)
+ destroy_workqueue(con->wq);
ucsi_unregister_partner(con);
ucsi_unregister_altmodes(con, UCSI_RECIPIENT_CON);
ucsi_unregister_port_psy(con);
- if (con->wq)
- destroy_workqueue(con->wq);
usb_power_delivery_unregister_capabilities(con->port_sink_caps);
con->port_sink_caps = NULL;
@@ -1929,8 +1929,8 @@ struct ucsi *ucsi_create(struct device *dev, const struct ucsi_operations *ops)
struct ucsi *ucsi;
if (!ops ||
- !ops->read_version || !ops->read_cci || !ops->read_message_in ||
- !ops->sync_control || !ops->async_control)
+ !ops->read_version || !ops->read_cci || !ops->poll_cci ||
+ !ops->read_message_in || !ops->sync_control || !ops->async_control)
return ERR_PTR(-EINVAL);
ucsi = kzalloc(sizeof(*ucsi), GFP_KERNEL);
@@ -2013,10 +2013,6 @@ void ucsi_unregister(struct ucsi *ucsi)
for (i = 0; i < ucsi->cap.num_connectors; i++) {
cancel_work_sync(&ucsi->connector[i].work);
- ucsi_unregister_partner(&ucsi->connector[i]);
- ucsi_unregister_altmodes(&ucsi->connector[i],
- UCSI_RECIPIENT_CON);
- ucsi_unregister_port_psy(&ucsi->connector[i]);
if (ucsi->connector[i].wq) {
struct ucsi_work *uwork;
@@ -2032,6 +2028,11 @@ void ucsi_unregister(struct ucsi *ucsi)
destroy_workqueue(ucsi->connector[i].wq);
}
+ ucsi_unregister_partner(&ucsi->connector[i]);
+ ucsi_unregister_altmodes(&ucsi->connector[i],
+ UCSI_RECIPIENT_CON);
+ ucsi_unregister_port_psy(&ucsi->connector[i]);
+
usb_power_delivery_unregister_capabilities(ucsi->connector[i].port_sink_caps);
ucsi->connector[i].port_sink_caps = NULL;
usb_power_delivery_unregister_capabilities(ucsi->connector[i].port_source_caps);
diff --git a/drivers/usb/typec/ucsi/ucsi.h b/drivers/usb/typec/ucsi/ucsi.h
index 82735eb34f0e..28780acc4af2 100644
--- a/drivers/usb/typec/ucsi/ucsi.h
+++ b/drivers/usb/typec/ucsi/ucsi.h
@@ -62,6 +62,7 @@ struct dentry;
* struct ucsi_operations - UCSI I/O operations
* @read_version: Read implemented UCSI version
* @read_cci: Read CCI register
+ * @poll_cci: Read CCI register while polling with notifications disabled
* @read_message_in: Read message data from UCSI
* @sync_control: Blocking control operation
* @async_control: Non-blocking control operation
@@ -76,6 +77,7 @@ struct dentry;
struct ucsi_operations {
int (*read_version)(struct ucsi *ucsi, u16 *version);
int (*read_cci)(struct ucsi *ucsi, u32 *cci);
+ int (*poll_cci)(struct ucsi *ucsi, u32 *cci);
int (*read_message_in)(struct ucsi *ucsi, void *val, size_t val_len);
int (*sync_control)(struct ucsi *ucsi, u64 command);
int (*async_control)(struct ucsi *ucsi, u64 command);
diff --git a/drivers/usb/typec/ucsi/ucsi_acpi.c b/drivers/usb/typec/ucsi/ucsi_acpi.c
index 5c5515551963..ac1ebb5d9527 100644
--- a/drivers/usb/typec/ucsi/ucsi_acpi.c
+++ b/drivers/usb/typec/ucsi/ucsi_acpi.c
@@ -59,19 +59,24 @@ static int ucsi_acpi_read_version(struct ucsi *ucsi, u16 *version)
static int ucsi_acpi_read_cci(struct ucsi *ucsi, u32 *cci)
{
struct ucsi_acpi *ua = ucsi_get_drvdata(ucsi);
- int ret;
-
- if (UCSI_COMMAND(ua->cmd) == UCSI_PPM_RESET) {
- ret = ucsi_acpi_dsm(ua, UCSI_DSM_FUNC_READ);
- if (ret)
- return ret;
- }
memcpy(cci, ua->base + UCSI_CCI, sizeof(*cci));
return 0;
}
+static int ucsi_acpi_poll_cci(struct ucsi *ucsi, u32 *cci)
+{
+ struct ucsi_acpi *ua = ucsi_get_drvdata(ucsi);
+ int ret;
+
+ ret = ucsi_acpi_dsm(ua, UCSI_DSM_FUNC_READ);
+ if (ret)
+ return ret;
+
+ return ucsi_acpi_read_cci(ucsi, cci);
+}
+
static int ucsi_acpi_read_message_in(struct ucsi *ucsi, void *val, size_t val_len)
{
struct ucsi_acpi *ua = ucsi_get_drvdata(ucsi);
@@ -94,6 +99,7 @@ static int ucsi_acpi_async_control(struct ucsi *ucsi, u64 command)
static const struct ucsi_operations ucsi_acpi_ops = {
.read_version = ucsi_acpi_read_version,
.read_cci = ucsi_acpi_read_cci,
+ .poll_cci = ucsi_acpi_poll_cci,
.read_message_in = ucsi_acpi_read_message_in,
.sync_control = ucsi_sync_control_common,
.async_control = ucsi_acpi_async_control
@@ -142,6 +148,7 @@ static int ucsi_gram_sync_control(struct ucsi *ucsi, u64 command)
static const struct ucsi_operations ucsi_gram_ops = {
.read_version = ucsi_acpi_read_version,
.read_cci = ucsi_acpi_read_cci,
+ .poll_cci = ucsi_acpi_poll_cci,
.read_message_in = ucsi_gram_read_message_in,
.sync_control = ucsi_gram_sync_control,
.async_control = ucsi_acpi_async_control
diff --git a/drivers/usb/typec/ucsi/ucsi_ccg.c b/drivers/usb/typec/ucsi/ucsi_ccg.c
index 740171f24ef9..4b1668733a4b 100644
--- a/drivers/usb/typec/ucsi/ucsi_ccg.c
+++ b/drivers/usb/typec/ucsi/ucsi_ccg.c
@@ -664,6 +664,7 @@ err_put:
static const struct ucsi_operations ucsi_ccg_ops = {
.read_version = ucsi_ccg_read_version,
.read_cci = ucsi_ccg_read_cci,
+ .poll_cci = ucsi_ccg_read_cci,
.read_message_in = ucsi_ccg_read_message_in,
.sync_control = ucsi_ccg_sync_control,
.async_control = ucsi_ccg_async_control,
diff --git a/drivers/usb/typec/ucsi/ucsi_glink.c b/drivers/usb/typec/ucsi/ucsi_glink.c
index fed39d458090..8af79101a2fc 100644
--- a/drivers/usb/typec/ucsi/ucsi_glink.c
+++ b/drivers/usb/typec/ucsi/ucsi_glink.c
@@ -206,6 +206,7 @@ static void pmic_glink_ucsi_connector_status(struct ucsi_connector *con)
static const struct ucsi_operations pmic_glink_ucsi_ops = {
.read_version = pmic_glink_ucsi_read_version,
.read_cci = pmic_glink_ucsi_read_cci,
+ .poll_cci = pmic_glink_ucsi_read_cci,
.read_message_in = pmic_glink_ucsi_read_message_in,
.sync_control = ucsi_sync_control_common,
.async_control = pmic_glink_ucsi_async_control,
diff --git a/drivers/usb/typec/ucsi/ucsi_stm32g0.c b/drivers/usb/typec/ucsi/ucsi_stm32g0.c
index 6923fad31d79..57ef7d83a412 100644
--- a/drivers/usb/typec/ucsi/ucsi_stm32g0.c
+++ b/drivers/usb/typec/ucsi/ucsi_stm32g0.c
@@ -424,6 +424,7 @@ static irqreturn_t ucsi_stm32g0_irq_handler(int irq, void *data)
static const struct ucsi_operations ucsi_stm32g0_ops = {
.read_version = ucsi_stm32g0_read_version,
.read_cci = ucsi_stm32g0_read_cci,
+ .poll_cci = ucsi_stm32g0_read_cci,
.read_message_in = ucsi_stm32g0_read_message_in,
.sync_control = ucsi_sync_control_common,
.async_control = ucsi_stm32g0_async_control,
diff --git a/drivers/usb/typec/ucsi/ucsi_yoga_c630.c b/drivers/usb/typec/ucsi/ucsi_yoga_c630.c
index 4cae85c0dc12..d33e3f2dd1d8 100644
--- a/drivers/usb/typec/ucsi/ucsi_yoga_c630.c
+++ b/drivers/usb/typec/ucsi/ucsi_yoga_c630.c
@@ -74,6 +74,7 @@ static int yoga_c630_ucsi_async_control(struct ucsi *ucsi, u64 command)
static const struct ucsi_operations yoga_c630_ucsi_ops = {
.read_version = yoga_c630_ucsi_read_version,
.read_cci = yoga_c630_ucsi_read_cci,
+ .poll_cci = yoga_c630_ucsi_read_cci,
.read_message_in = yoga_c630_ucsi_read_message_in,
.sync_control = ucsi_sync_control_common,
.async_control = yoga_c630_ucsi_async_control,
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 9ac25d08f473..63612faeab72 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -666,7 +666,7 @@ static struct vhost_worker *vhost_worker_create(struct vhost_dev *dev)
vtsk = vhost_task_create(vhost_run_work_list, vhost_worker_killed,
worker, name);
- if (!vtsk)
+ if (IS_ERR(vtsk))
goto free_worker;
mutex_init(&worker->mutex);
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index 44c9ef1435a2..5df981920a94 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -39,6 +39,7 @@ source "drivers/gpu/vga/Kconfig"
source "drivers/gpu/host1x/Kconfig"
source "drivers/gpu/ipu-v3/Kconfig"
+source "drivers/gpu/nova-core/Kconfig"
source "drivers/gpu/drm/Kconfig"
diff --git a/drivers/virt/acrn/hsm.c b/drivers/virt/acrn/hsm.c
index c24036c4e51e..e4e196abdaac 100644
--- a/drivers/virt/acrn/hsm.c
+++ b/drivers/virt/acrn/hsm.c
@@ -49,7 +49,7 @@ static int pmcmd_ioctl(u64 cmd, void __user *uptr)
switch (cmd & PMCMD_TYPE_MASK) {
case ACRN_PMCMD_GET_PX_CNT:
case ACRN_PMCMD_GET_CX_CNT:
- pm_info = kmalloc(sizeof(u64), GFP_KERNEL);
+ pm_info = kzalloc(sizeof(u64), GFP_KERNEL);
if (!pm_info)
return -ENOMEM;
@@ -64,7 +64,7 @@ static int pmcmd_ioctl(u64 cmd, void __user *uptr)
kfree(pm_info);
break;
case ACRN_PMCMD_GET_PX_DATA:
- px_data = kmalloc(sizeof(*px_data), GFP_KERNEL);
+ px_data = kzalloc(sizeof(*px_data), GFP_KERNEL);
if (!px_data)
return -ENOMEM;
@@ -79,7 +79,7 @@ static int pmcmd_ioctl(u64 cmd, void __user *uptr)
kfree(px_data);
break;
case ACRN_PMCMD_GET_CX_DATA:
- cx_data = kmalloc(sizeof(*cx_data), GFP_KERNEL);
+ cx_data = kzalloc(sizeof(*cx_data), GFP_KERNEL);
if (!cx_data)
return -ENOMEM;
diff --git a/drivers/virt/coco/sev-guest/sev-guest.c b/drivers/virt/coco/sev-guest/sev-guest.c
index 264b6523fe52..70fbc9a3e703 100644
--- a/drivers/virt/coco/sev-guest/sev-guest.c
+++ b/drivers/virt/coco/sev-guest/sev-guest.c
@@ -38,12 +38,6 @@ struct snp_guest_dev {
struct miscdevice misc;
struct snp_msg_desc *msg_desc;
-
- union {
- struct snp_report_req report;
- struct snp_derived_key_req derived_key;
- struct snp_ext_report_req ext_report;
- } req;
};
/*
@@ -71,7 +65,7 @@ struct snp_req_resp {
static int get_report(struct snp_guest_dev *snp_dev, struct snp_guest_request_ioctl *arg)
{
- struct snp_report_req *report_req = &snp_dev->req.report;
+ struct snp_report_req *report_req __free(kfree) = NULL;
struct snp_msg_desc *mdesc = snp_dev->msg_desc;
struct snp_report_resp *report_resp;
struct snp_guest_req req = {};
@@ -80,6 +74,10 @@ static int get_report(struct snp_guest_dev *snp_dev, struct snp_guest_request_io
if (!arg->req_data || !arg->resp_data)
return -EINVAL;
+ report_req = kzalloc(sizeof(*report_req), GFP_KERNEL_ACCOUNT);
+ if (!report_req)
+ return -ENOMEM;
+
if (copy_from_user(report_req, (void __user *)arg->req_data, sizeof(*report_req)))
return -EFAULT;
@@ -116,7 +114,7 @@ e_free:
static int get_derived_key(struct snp_guest_dev *snp_dev, struct snp_guest_request_ioctl *arg)
{
- struct snp_derived_key_req *derived_key_req = &snp_dev->req.derived_key;
+ struct snp_derived_key_req *derived_key_req __free(kfree) = NULL;
struct snp_derived_key_resp derived_key_resp = {0};
struct snp_msg_desc *mdesc = snp_dev->msg_desc;
struct snp_guest_req req = {};
@@ -136,6 +134,10 @@ static int get_derived_key(struct snp_guest_dev *snp_dev, struct snp_guest_reque
if (sizeof(buf) < resp_len)
return -ENOMEM;
+ derived_key_req = kzalloc(sizeof(*derived_key_req), GFP_KERNEL_ACCOUNT);
+ if (!derived_key_req)
+ return -ENOMEM;
+
if (copy_from_user(derived_key_req, (void __user *)arg->req_data,
sizeof(*derived_key_req)))
return -EFAULT;
@@ -168,16 +170,21 @@ static int get_ext_report(struct snp_guest_dev *snp_dev, struct snp_guest_reques
struct snp_req_resp *io)
{
- struct snp_ext_report_req *report_req = &snp_dev->req.ext_report;
+ struct snp_ext_report_req *report_req __free(kfree) = NULL;
struct snp_msg_desc *mdesc = snp_dev->msg_desc;
struct snp_report_resp *report_resp;
struct snp_guest_req req = {};
int ret, npages = 0, resp_len;
sockptr_t certs_address;
+ struct page *page;
if (sockptr_is_null(io->req_data) || sockptr_is_null(io->resp_data))
return -EINVAL;
+ report_req = kzalloc(sizeof(*report_req), GFP_KERNEL_ACCOUNT);
+ if (!report_req)
+ return -ENOMEM;
+
if (copy_from_sockptr(report_req, io->req_data, sizeof(*report_req)))
return -EFAULT;
@@ -203,8 +210,20 @@ static int get_ext_report(struct snp_guest_dev *snp_dev, struct snp_guest_reques
* the host. If host does not supply any certs in it, then copy
* zeros to indicate that certificate data was not provided.
*/
- memset(mdesc->certs_data, 0, report_req->certs_len);
npages = report_req->certs_len >> PAGE_SHIFT;
+ page = alloc_pages(GFP_KERNEL_ACCOUNT | __GFP_ZERO,
+ get_order(report_req->certs_len));
+ if (!page)
+ return -ENOMEM;
+
+ req.certs_data = page_address(page);
+ ret = set_memory_decrypted((unsigned long)req.certs_data, npages);
+ if (ret) {
+ pr_err("failed to mark page shared, ret=%d\n", ret);
+ __free_pages(page, get_order(report_req->certs_len));
+ return -EFAULT;
+ }
+
cmd:
/*
* The intermediate response buffer is used while decrypting the
@@ -213,10 +232,12 @@ cmd:
*/
resp_len = sizeof(report_resp->data) + mdesc->ctx->authsize;
report_resp = kzalloc(resp_len, GFP_KERNEL_ACCOUNT);
- if (!report_resp)
- return -ENOMEM;
+ if (!report_resp) {
+ ret = -ENOMEM;
+ goto e_free_data;
+ }
- mdesc->input.data_npages = npages;
+ req.input.data_npages = npages;
req.msg_version = arg->msg_version;
req.msg_type = SNP_MSG_REPORT_REQ;
@@ -231,7 +252,7 @@ cmd:
/* If certs length is invalid then copy the returned length */
if (arg->vmm_error == SNP_GUEST_VMM_ERR_INVALID_LEN) {
- report_req->certs_len = mdesc->input.data_npages << PAGE_SHIFT;
+ report_req->certs_len = req.input.data_npages << PAGE_SHIFT;
if (copy_to_sockptr(io->req_data, report_req, sizeof(*report_req)))
ret = -EFAULT;
@@ -240,7 +261,7 @@ cmd:
if (ret)
goto e_free;
- if (npages && copy_to_sockptr(certs_address, mdesc->certs_data, report_req->certs_len)) {
+ if (npages && copy_to_sockptr(certs_address, req.certs_data, report_req->certs_len)) {
ret = -EFAULT;
goto e_free;
}
@@ -250,6 +271,13 @@ cmd:
e_free:
kfree(report_resp);
+e_free_data:
+ if (npages) {
+ if (set_memory_encrypted((unsigned long)req.certs_data, npages))
+ WARN_ONCE(ret, "failed to restore encryption mask (leak it)\n");
+ else
+ __free_pages(page, get_order(report_req->certs_len));
+ }
return ret;
}
diff --git a/drivers/virt/vboxguest/Kconfig b/drivers/virt/vboxguest/Kconfig
index 11b153e7454e..eaba28c95e73 100644
--- a/drivers/virt/vboxguest/Kconfig
+++ b/drivers/virt/vboxguest/Kconfig
@@ -1,7 +1,8 @@
# SPDX-License-Identifier: GPL-2.0-only
config VBOXGUEST
tristate "Virtual Box Guest integration support"
- depends on (ARM64 || X86) && PCI && INPUT
+ depends on (ARM64 || X86 || COMPILE_TEST) && PCI && INPUT
+ depends on HAS_IOPORT
help
This is a driver for the Virtual Box Guest PCI device used in
Virtual Box virtual machines. Enabling this driver will add
diff --git a/fs/affs/file.c b/fs/affs/file.c
index a5a861dd5223..7a71018e3f67 100644
--- a/fs/affs/file.c
+++ b/fs/affs/file.c
@@ -596,7 +596,7 @@ affs_extent_file_ofs(struct inode *inode, u32 newsize)
BUG_ON(tmp > bsize);
AFFS_DATA_HEAD(bh)->ptype = cpu_to_be32(T_DATA);
AFFS_DATA_HEAD(bh)->key = cpu_to_be32(inode->i_ino);
- AFFS_DATA_HEAD(bh)->sequence = cpu_to_be32(bidx);
+ AFFS_DATA_HEAD(bh)->sequence = cpu_to_be32(bidx + 1);
AFFS_DATA_HEAD(bh)->size = cpu_to_be32(tmp);
affs_fix_checksum(sb, bh);
bh->b_state &= ~(1UL << BH_New);
@@ -724,7 +724,8 @@ static int affs_write_end_ofs(struct file *file, struct address_space *mapping,
tmp = min(bsize - boff, to - from);
BUG_ON(boff + tmp > bsize || tmp > bsize);
memcpy(AFFS_DATA(bh) + boff, data + from, tmp);
- be32_add_cpu(&AFFS_DATA_HEAD(bh)->size, tmp);
+ AFFS_DATA_HEAD(bh)->size = cpu_to_be32(
+ max(boff + tmp, be32_to_cpu(AFFS_DATA_HEAD(bh)->size)));
affs_fix_checksum(sb, bh);
mark_buffer_dirty_inode(bh, inode);
written += tmp;
@@ -746,7 +747,7 @@ static int affs_write_end_ofs(struct file *file, struct address_space *mapping,
if (buffer_new(bh)) {
AFFS_DATA_HEAD(bh)->ptype = cpu_to_be32(T_DATA);
AFFS_DATA_HEAD(bh)->key = cpu_to_be32(inode->i_ino);
- AFFS_DATA_HEAD(bh)->sequence = cpu_to_be32(bidx);
+ AFFS_DATA_HEAD(bh)->sequence = cpu_to_be32(bidx + 1);
AFFS_DATA_HEAD(bh)->size = cpu_to_be32(bsize);
AFFS_DATA_HEAD(bh)->next = 0;
bh->b_state &= ~(1UL << BH_New);
@@ -780,7 +781,7 @@ static int affs_write_end_ofs(struct file *file, struct address_space *mapping,
if (buffer_new(bh)) {
AFFS_DATA_HEAD(bh)->ptype = cpu_to_be32(T_DATA);
AFFS_DATA_HEAD(bh)->key = cpu_to_be32(inode->i_ino);
- AFFS_DATA_HEAD(bh)->sequence = cpu_to_be32(bidx);
+ AFFS_DATA_HEAD(bh)->sequence = cpu_to_be32(bidx + 1);
AFFS_DATA_HEAD(bh)->size = cpu_to_be32(tmp);
AFFS_DATA_HEAD(bh)->next = 0;
bh->b_state &= ~(1UL << BH_New);
diff --git a/fs/afs/server.c b/fs/afs/server.c
index 038f9d0ae3af..4504e16b458c 100644
--- a/fs/afs/server.c
+++ b/fs/afs/server.c
@@ -163,6 +163,8 @@ static struct afs_server *afs_install_server(struct afs_cell *cell,
rb_insert_color(&server->uuid_rb, &net->fs_servers);
hlist_add_head_rcu(&server->proc_link, &net->fs_proc);
+ afs_get_cell(cell, afs_cell_trace_get_server);
+
added_dup:
write_seqlock(&net->fs_addr_lock);
estate = rcu_dereference_protected(server->endpoint_state,
@@ -442,6 +444,7 @@ static void afs_server_rcu(struct rcu_head *rcu)
atomic_read(&server->active), afs_server_trace_free);
afs_put_endpoint_state(rcu_access_pointer(server->endpoint_state),
afs_estate_trace_put_server);
+ afs_put_cell(server->cell, afs_cell_trace_put_server);
kfree(server);
}
diff --git a/fs/afs/server_list.c b/fs/afs/server_list.c
index 7e7e567a7f8a..d20cd902ef94 100644
--- a/fs/afs/server_list.c
+++ b/fs/afs/server_list.c
@@ -97,8 +97,8 @@ struct afs_server_list *afs_alloc_server_list(struct afs_volume *volume,
break;
if (j < slist->nr_servers) {
if (slist->servers[j].server == server) {
- afs_put_server(volume->cell->net, server,
- afs_server_trace_put_slist_isort);
+ afs_unuse_server(volume->cell->net, server,
+ afs_server_trace_put_slist_isort);
continue;
}
diff --git a/fs/bcachefs/btree_cache.c b/fs/bcachefs/btree_cache.c
index ca755e8d1a37..1ec1f90e0eb3 100644
--- a/fs/bcachefs/btree_cache.c
+++ b/fs/bcachefs/btree_cache.c
@@ -203,7 +203,7 @@ struct btree *__bch2_btree_node_mem_alloc(struct bch_fs *c)
return NULL;
}
- bch2_btree_lock_init(&b->c, 0);
+ bch2_btree_lock_init(&b->c, 0, GFP_KERNEL);
__bch2_btree_node_to_freelist(bc, b);
return b;
@@ -795,17 +795,18 @@ struct btree *bch2_btree_node_mem_alloc(struct btree_trans *trans, bool pcpu_rea
}
b = __btree_node_mem_alloc(c, GFP_NOWAIT|__GFP_NOWARN);
- if (!b) {
+ if (b) {
+ bch2_btree_lock_init(&b->c, pcpu_read_locks ? SIX_LOCK_INIT_PCPU : 0, GFP_NOWAIT);
+ } else {
mutex_unlock(&bc->lock);
bch2_trans_unlock(trans);
b = __btree_node_mem_alloc(c, GFP_KERNEL);
if (!b)
goto err;
+ bch2_btree_lock_init(&b->c, pcpu_read_locks ? SIX_LOCK_INIT_PCPU : 0, GFP_KERNEL);
mutex_lock(&bc->lock);
}
- bch2_btree_lock_init(&b->c, pcpu_read_locks ? SIX_LOCK_INIT_PCPU : 0);
-
BUG_ON(!six_trylock_intent(&b->c.lock));
BUG_ON(!six_trylock_write(&b->c.lock));
diff --git a/fs/bcachefs/btree_io.c b/fs/bcachefs/btree_io.c
index e371e60e3133..dece27d9db04 100644
--- a/fs/bcachefs/btree_io.c
+++ b/fs/bcachefs/btree_io.c
@@ -996,7 +996,7 @@ drop_this_key:
}
got_good_key:
le16_add_cpu(&i->u64s, -next_good_key);
- memmove_u64s_down(k, bkey_p_next(k), (u64 *) vstruct_end(i) - (u64 *) k);
+ memmove_u64s_down(k, (u64 *) k + next_good_key, (u64 *) vstruct_end(i) - (u64 *) k);
set_btree_node_need_rewrite(b);
}
fsck_err:
diff --git a/fs/bcachefs/btree_key_cache.c b/fs/bcachefs/btree_key_cache.c
index 1821f40c161a..edce59433375 100644
--- a/fs/bcachefs/btree_key_cache.c
+++ b/fs/bcachefs/btree_key_cache.c
@@ -156,7 +156,7 @@ bkey_cached_alloc(struct btree_trans *trans, struct btree_path *path, unsigned k
}
if (ck) {
- bch2_btree_lock_init(&ck->c, pcpu_readers ? SIX_LOCK_INIT_PCPU : 0);
+ bch2_btree_lock_init(&ck->c, pcpu_readers ? SIX_LOCK_INIT_PCPU : 0, GFP_KERNEL);
ck->c.cached = true;
goto lock;
}
diff --git a/fs/bcachefs/btree_locking.c b/fs/bcachefs/btree_locking.c
index 10b805a60f52..caef65adeae4 100644
--- a/fs/bcachefs/btree_locking.c
+++ b/fs/bcachefs/btree_locking.c
@@ -7,9 +7,10 @@
static struct lock_class_key bch2_btree_node_lock_key;
void bch2_btree_lock_init(struct btree_bkey_cached_common *b,
- enum six_lock_init_flags flags)
+ enum six_lock_init_flags flags,
+ gfp_t gfp)
{
- __six_lock_init(&b->lock, "b->c.lock", &bch2_btree_node_lock_key, flags);
+ __six_lock_init(&b->lock, "b->c.lock", &bch2_btree_node_lock_key, flags, gfp);
lockdep_set_notrack_class(&b->lock);
}
diff --git a/fs/bcachefs/btree_locking.h b/fs/bcachefs/btree_locking.h
index b54ef48eb8cc..b33ab7af8440 100644
--- a/fs/bcachefs/btree_locking.h
+++ b/fs/bcachefs/btree_locking.h
@@ -13,7 +13,7 @@
#include "btree_iter.h"
#include "six.h"
-void bch2_btree_lock_init(struct btree_bkey_cached_common *, enum six_lock_init_flags);
+void bch2_btree_lock_init(struct btree_bkey_cached_common *, enum six_lock_init_flags, gfp_t gfp);
void bch2_trans_unlock_noassert(struct btree_trans *);
void bch2_trans_unlock_write(struct btree_trans *);
diff --git a/fs/bcachefs/data_update.c b/fs/bcachefs/data_update.c
index 337494facac6..642fbc60ecab 100644
--- a/fs/bcachefs/data_update.c
+++ b/fs/bcachefs/data_update.c
@@ -340,6 +340,7 @@ restart_drop_extra_replicas:
struct printbuf buf = PRINTBUF;
prt_str(&buf, "about to insert invalid key in data update path");
+ prt_printf(&buf, "\nop.nonce: %u", m->op.nonce);
prt_str(&buf, "\nold: ");
bch2_bkey_val_to_text(&buf, c, old);
prt_str(&buf, "\nk: ");
diff --git a/fs/bcachefs/dirent.h b/fs/bcachefs/dirent.h
index a633f83c1ac7..362b3b2f2f2e 100644
--- a/fs/bcachefs/dirent.h
+++ b/fs/bcachefs/dirent.h
@@ -31,11 +31,6 @@ static inline unsigned dirent_val_u64s(unsigned len)
sizeof(u64));
}
-static inline unsigned int dirent_occupied_size(const struct qstr *name)
-{
- return (BKEY_U64s + dirent_val_u64s(name->len)) * sizeof(u64);
-}
-
int bch2_dirent_read_target(struct btree_trans *, subvol_inum,
struct bkey_s_c_dirent, subvol_inum *);
diff --git a/fs/bcachefs/extents.h b/fs/bcachefs/extents.h
index 620b284aa34f..204d765dd74c 100644
--- a/fs/bcachefs/extents.h
+++ b/fs/bcachefs/extents.h
@@ -704,7 +704,7 @@ static inline bool bch2_extent_ptr_eq(struct bch_extent_ptr ptr1,
ptr1.unwritten == ptr2.unwritten &&
ptr1.offset == ptr2.offset &&
ptr1.dev == ptr2.dev &&
- ptr1.dev == ptr2.dev);
+ ptr1.gen == ptr2.gen);
}
void bch2_ptr_swab(struct bkey_s);
diff --git a/fs/bcachefs/fs-common.c b/fs/bcachefs/fs-common.c
index d70d9f634cea..2c3d46ac70c6 100644
--- a/fs/bcachefs/fs-common.c
+++ b/fs/bcachefs/fs-common.c
@@ -152,7 +152,6 @@ int bch2_create_trans(struct btree_trans *trans,
if (is_subdir_for_nlink(new_inode))
dir_u->bi_nlink++;
dir_u->bi_mtime = dir_u->bi_ctime = now;
- dir_u->bi_size += dirent_occupied_size(name);
ret = bch2_inode_write(trans, &dir_iter, dir_u);
if (ret)
@@ -221,7 +220,6 @@ int bch2_link_trans(struct btree_trans *trans,
}
dir_u->bi_mtime = dir_u->bi_ctime = now;
- dir_u->bi_size += dirent_occupied_size(name);
dir_hash = bch2_hash_info_init(c, dir_u);
@@ -324,7 +322,6 @@ int bch2_unlink_trans(struct btree_trans *trans,
dir_u->bi_mtime = dir_u->bi_ctime = inode_u->bi_ctime = now;
dir_u->bi_nlink -= is_subdir_for_nlink(inode_u);
- dir_u->bi_size -= dirent_occupied_size(name);
ret = bch2_hash_delete_at(trans, bch2_dirent_hash_desc,
&dir_hash, &dirent_iter,
@@ -463,14 +460,6 @@ int bch2_rename_trans(struct btree_trans *trans,
goto err;
}
- if (mode == BCH_RENAME) {
- src_dir_u->bi_size -= dirent_occupied_size(src_name);
- dst_dir_u->bi_size += dirent_occupied_size(dst_name);
- }
-
- if (mode == BCH_RENAME_OVERWRITE)
- src_dir_u->bi_size -= dirent_occupied_size(src_name);
-
if (src_inode_u->bi_parent_subvol)
src_inode_u->bi_parent_subvol = dst_dir.subvol;
diff --git a/fs/bcachefs/fs-io.c b/fs/bcachefs/fs-io.c
index 94bf34b9b65f..717e7b94c66f 100644
--- a/fs/bcachefs/fs-io.c
+++ b/fs/bcachefs/fs-io.c
@@ -466,6 +466,7 @@ int bchfs_truncate(struct mnt_idmap *idmap,
ret = bch2_truncate_folio(inode, iattr->ia_size);
if (unlikely(ret < 0))
goto err;
+ ret = 0;
truncate_setsize(&inode->v, iattr->ia_size);
diff --git a/fs/bcachefs/fsck.c b/fs/bcachefs/fsck.c
index 9bf316e7b845..0e85131d0af8 100644
--- a/fs/bcachefs/fsck.c
+++ b/fs/bcachefs/fsck.c
@@ -1978,31 +1978,10 @@ fsck_err:
return ret;
}
-static int check_dir_i_size_notnested(struct btree_trans *trans, struct inode_walker *w)
-{
- struct bch_fs *c = trans->c;
- int ret = 0;
-
- darray_for_each(w->inodes, i)
- if (fsck_err_on(i->inode.bi_size != i->i_size,
- trans, inode_dir_wrong_nlink,
- "directory %llu:%u with wrong i_size: got %llu, should be %llu",
- w->last_pos.inode, i->snapshot, i->inode.bi_size, i->i_size)) {
- i->inode.bi_size = i->i_size;
- ret = bch2_fsck_write_inode(trans, &i->inode);
- if (ret)
- break;
- }
-fsck_err:
- bch_err_fn(c, ret);
- return ret;
-}
-
static int check_subdir_dirents_count(struct btree_trans *trans, struct inode_walker *w)
{
u32 restart_count = trans->restart_count;
return check_subdir_count_notnested(trans, w) ?:
- check_dir_i_size_notnested(trans, w) ?:
trans_was_restarted(trans, restart_count);
}
diff --git a/fs/bcachefs/journal.c b/fs/bcachefs/journal.c
index 24c294d4634e..05b1250619ec 100644
--- a/fs/bcachefs/journal.c
+++ b/fs/bcachefs/journal.c
@@ -1021,8 +1021,8 @@ struct journal_buf *bch2_next_write_buffer_flush_journal_buf(struct journal *j,
/* allocate journal on a device: */
-static int __bch2_set_nr_journal_buckets(struct bch_dev *ca, unsigned nr,
- bool new_fs, struct closure *cl)
+static int bch2_set_nr_journal_buckets_iter(struct bch_dev *ca, unsigned nr,
+ bool new_fs, struct closure *cl)
{
struct bch_fs *c = ca->fs;
struct journal_device *ja = &ca->journal;
@@ -1150,26 +1150,20 @@ err_free:
return ret;
}
-/*
- * Allocate more journal space at runtime - not currently making use if it, but
- * the code works:
- */
-int bch2_set_nr_journal_buckets(struct bch_fs *c, struct bch_dev *ca,
- unsigned nr)
+static int bch2_set_nr_journal_buckets_loop(struct bch_fs *c, struct bch_dev *ca,
+ unsigned nr, bool new_fs)
{
struct journal_device *ja = &ca->journal;
- struct closure cl;
int ret = 0;
+ struct closure cl;
closure_init_stack(&cl);
- down_write(&c->state_lock);
-
/* don't handle reducing nr of buckets yet: */
if (nr < ja->nr)
- goto unlock;
+ return 0;
- while (ja->nr < nr) {
+ while (!ret && ja->nr < nr) {
struct disk_reservation disk_res = { 0, 0, 0 };
/*
@@ -1182,25 +1176,38 @@ int bch2_set_nr_journal_buckets(struct bch_fs *c, struct bch_dev *ca,
* filesystem-wide allocation will succeed, this is a device
* specific allocation - we can hang here:
*/
+ if (!new_fs) {
+ ret = bch2_disk_reservation_get(c, &disk_res,
+ bucket_to_sector(ca, nr - ja->nr), 1, 0);
+ if (ret)
+ break;
+ }
- ret = bch2_disk_reservation_get(c, &disk_res,
- bucket_to_sector(ca, nr - ja->nr), 1, 0);
- if (ret)
- break;
+ ret = bch2_set_nr_journal_buckets_iter(ca, nr, new_fs, &cl);
- ret = __bch2_set_nr_journal_buckets(ca, nr, false, &cl);
+ if (ret == -BCH_ERR_bucket_alloc_blocked ||
+ ret == -BCH_ERR_open_buckets_empty)
+ ret = 0; /* wait and retry */
bch2_disk_reservation_put(c, &disk_res);
-
closure_sync(&cl);
-
- if (ret && ret != -BCH_ERR_bucket_alloc_blocked)
- break;
}
- bch_err_fn(c, ret);
-unlock:
+ return ret;
+}
+
+/*
+ * Allocate more journal space at runtime - not currently making use if it, but
+ * the code works:
+ */
+int bch2_set_nr_journal_buckets(struct bch_fs *c, struct bch_dev *ca,
+ unsigned nr)
+{
+ down_write(&c->state_lock);
+ int ret = bch2_set_nr_journal_buckets_loop(c, ca, nr, false);
up_write(&c->state_lock);
+
+ bch_err_fn(c, ret);
return ret;
}
@@ -1226,7 +1233,7 @@ int bch2_dev_journal_alloc(struct bch_dev *ca, bool new_fs)
min(1 << 13,
(1 << 24) / ca->mi.bucket_size));
- ret = __bch2_set_nr_journal_buckets(ca, nr, new_fs, NULL);
+ ret = bch2_set_nr_journal_buckets_loop(ca->fs, ca, nr, new_fs);
err:
bch_err_fn(ca, ret);
return ret;
diff --git a/fs/bcachefs/movinggc.c b/fs/bcachefs/movinggc.c
index 21805509ab9e..6718dc37c5a3 100644
--- a/fs/bcachefs/movinggc.c
+++ b/fs/bcachefs/movinggc.c
@@ -74,20 +74,14 @@ static int bch2_bucket_is_movable(struct btree_trans *trans,
struct move_bucket *b, u64 time)
{
struct bch_fs *c = trans->c;
- struct btree_iter iter;
- struct bkey_s_c k;
- struct bch_alloc_v4 _a;
- const struct bch_alloc_v4 *a;
- int ret;
- if (bch2_bucket_is_open(trans->c,
- b->k.bucket.inode,
- b->k.bucket.offset))
+ if (bch2_bucket_is_open(c, b->k.bucket.inode, b->k.bucket.offset))
return 0;
- k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_alloc,
- b->k.bucket, BTREE_ITER_cached);
- ret = bkey_err(k);
+ struct btree_iter iter;
+ struct bkey_s_c k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_alloc,
+ b->k.bucket, BTREE_ITER_cached);
+ int ret = bkey_err(k);
if (ret)
return ret;
@@ -95,13 +89,18 @@ static int bch2_bucket_is_movable(struct btree_trans *trans,
if (!ca)
goto out;
- a = bch2_alloc_to_v4(k, &_a);
+ if (ca->mi.state != BCH_MEMBER_STATE_rw ||
+ !bch2_dev_is_online(ca))
+ goto out_put;
+
+ struct bch_alloc_v4 _a;
+ const struct bch_alloc_v4 *a = bch2_alloc_to_v4(k, &_a);
b->k.gen = a->gen;
b->sectors = bch2_bucket_sectors_dirty(*a);
u64 lru_idx = alloc_lru_idx_fragmentation(*a, ca);
ret = lru_idx && lru_idx <= time;
-
+out_put:
bch2_dev_put(ca);
out:
bch2_trans_iter_exit(trans, &iter);
diff --git a/fs/bcachefs/sb-downgrade.c b/fs/bcachefs/sb-downgrade.c
index 35e07bc8fbd3..051214fdc735 100644
--- a/fs/bcachefs/sb-downgrade.c
+++ b/fs/bcachefs/sb-downgrade.c
@@ -90,10 +90,7 @@
BIT_ULL(BCH_RECOVERY_PASS_check_allocations), \
BCH_FSCK_ERR_accounting_mismatch, \
BCH_FSCK_ERR_accounting_key_replicas_nr_devs_0, \
- BCH_FSCK_ERR_accounting_key_junk_at_end) \
- x(directory_size, \
- BIT_ULL(BCH_RECOVERY_PASS_check_dirents), \
- BCH_FSCK_ERR_directory_size_mismatch) \
+ BCH_FSCK_ERR_accounting_key_junk_at_end)
#define DOWNGRADE_TABLE() \
x(bucket_stripe_sectors, \
diff --git a/fs/bcachefs/six.c b/fs/bcachefs/six.c
index 7e7c66a1e1a6..7c403427fbdb 100644
--- a/fs/bcachefs/six.c
+++ b/fs/bcachefs/six.c
@@ -850,7 +850,8 @@ void six_lock_exit(struct six_lock *lock)
EXPORT_SYMBOL_GPL(six_lock_exit);
void __six_lock_init(struct six_lock *lock, const char *name,
- struct lock_class_key *key, enum six_lock_init_flags flags)
+ struct lock_class_key *key, enum six_lock_init_flags flags,
+ gfp_t gfp)
{
atomic_set(&lock->state, 0);
raw_spin_lock_init(&lock->wait_lock);
@@ -873,7 +874,7 @@ void __six_lock_init(struct six_lock *lock, const char *name,
* failure if they wish by checking lock->readers, but generally
* will not want to treat it as an error.
*/
- lock->readers = alloc_percpu(unsigned);
+ lock->readers = alloc_percpu_gfp(unsigned, gfp);
}
#endif
}
diff --git a/fs/bcachefs/six.h b/fs/bcachefs/six.h
index c142e06b7a3a..59b851cf8bac 100644
--- a/fs/bcachefs/six.h
+++ b/fs/bcachefs/six.h
@@ -164,18 +164,19 @@ enum six_lock_init_flags {
};
void __six_lock_init(struct six_lock *lock, const char *name,
- struct lock_class_key *key, enum six_lock_init_flags flags);
+ struct lock_class_key *key, enum six_lock_init_flags flags,
+ gfp_t gfp);
/**
* six_lock_init - initialize a six lock
* @lock: lock to initialize
* @flags: optional flags, i.e. SIX_LOCK_INIT_PCPU
*/
-#define six_lock_init(lock, flags) \
+#define six_lock_init(lock, flags, gfp) \
do { \
static struct lock_class_key __key; \
\
- __six_lock_init((lock), #lock, &__key, flags); \
+ __six_lock_init((lock), #lock, &__key, flags, gfp); \
} while (0)
/**
diff --git a/fs/bcachefs/super-io.c b/fs/bcachefs/super-io.c
index 8037ccbacf6a..a81a7b6c0989 100644
--- a/fs/bcachefs/super-io.c
+++ b/fs/bcachefs/super-io.c
@@ -69,14 +69,20 @@ enum bcachefs_metadata_version bch2_latest_compatible_version(enum bcachefs_meta
return v;
}
-void bch2_set_version_incompat(struct bch_fs *c, enum bcachefs_metadata_version version)
+bool bch2_set_version_incompat(struct bch_fs *c, enum bcachefs_metadata_version version)
{
- mutex_lock(&c->sb_lock);
- SET_BCH_SB_VERSION_INCOMPAT(c->disk_sb.sb,
- max(BCH_SB_VERSION_INCOMPAT(c->disk_sb.sb), version));
- c->disk_sb.sb->features[0] |= cpu_to_le64(BCH_FEATURE_incompat_version_field);
- bch2_write_super(c);
- mutex_unlock(&c->sb_lock);
+ bool ret = (c->sb.features & BIT_ULL(BCH_FEATURE_incompat_version_field)) &&
+ version <= c->sb.version_incompat_allowed;
+
+ if (ret) {
+ mutex_lock(&c->sb_lock);
+ SET_BCH_SB_VERSION_INCOMPAT(c->disk_sb.sb,
+ max(BCH_SB_VERSION_INCOMPAT(c->disk_sb.sb), version));
+ bch2_write_super(c);
+ mutex_unlock(&c->sb_lock);
+ }
+
+ return ret;
}
const char * const bch2_sb_fields[] = {
@@ -1219,9 +1225,11 @@ void bch2_sb_upgrade(struct bch_fs *c, unsigned new_version, bool incompat)
c->disk_sb.sb->version = cpu_to_le16(new_version);
c->disk_sb.sb->features[0] |= cpu_to_le64(BCH_SB_FEATURES_ALL);
- if (incompat)
+ if (incompat) {
SET_BCH_SB_VERSION_INCOMPAT_ALLOWED(c->disk_sb.sb,
max(BCH_SB_VERSION_INCOMPAT_ALLOWED(c->disk_sb.sb), new_version));
+ c->disk_sb.sb->features[0] |= cpu_to_le64(BCH_FEATURE_incompat_version_field);
+ }
}
static int bch2_sb_ext_validate(struct bch_sb *sb, struct bch_sb_field *f,
diff --git a/fs/bcachefs/super-io.h b/fs/bcachefs/super-io.h
index f1ab4f943720..b4cff9ebdebb 100644
--- a/fs/bcachefs/super-io.h
+++ b/fs/bcachefs/super-io.h
@@ -21,17 +21,14 @@ static inline bool bch2_version_compatible(u16 version)
void bch2_version_to_text(struct printbuf *, enum bcachefs_metadata_version);
enum bcachefs_metadata_version bch2_latest_compatible_version(enum bcachefs_metadata_version);
-void bch2_set_version_incompat(struct bch_fs *, enum bcachefs_metadata_version);
+bool bch2_set_version_incompat(struct bch_fs *, enum bcachefs_metadata_version);
static inline bool bch2_request_incompat_feature(struct bch_fs *c,
enum bcachefs_metadata_version version)
{
- if (unlikely(version > c->sb.version_incompat)) {
- if (version > c->sb.version_incompat_allowed)
- return false;
- bch2_set_version_incompat(c, version);
- }
- return true;
+ return likely(version <= c->sb.version_incompat)
+ ? true
+ : bch2_set_version_incompat(c, version);
}
static inline size_t bch2_sb_field_bytes(struct bch_sb_field *f)
diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c
index 67ce85ff0ae2..7f46abbd6311 100644
--- a/fs/btrfs/extent_map.c
+++ b/fs/btrfs/extent_map.c
@@ -1128,6 +1128,8 @@ static long btrfs_scan_inode(struct btrfs_inode *inode, struct btrfs_em_shrink_c
long nr_dropped = 0;
struct rb_node *node;
+ lockdep_assert_held_write(&tree->lock);
+
/*
* Take the mmap lock so that we serialize with the inode logging phase
* of fsync because we may need to set the full sync flag on the inode,
@@ -1139,28 +1141,12 @@ static long btrfs_scan_inode(struct btrfs_inode *inode, struct btrfs_em_shrink_c
* to find new extents, which may not be there yet because ordered
* extents haven't completed yet.
*
- * We also do a try lock because otherwise we could deadlock. This is
- * because the shrinker for this filesystem may be invoked while we are
- * in a path that is holding the mmap lock in write mode. For example in
- * a reflink operation while COWing an extent buffer, when allocating
- * pages for a new extent buffer and under memory pressure, the shrinker
- * may be invoked, and therefore we would deadlock by attempting to read
- * lock the mmap lock while we are holding already a write lock on it.
+ * We also do a try lock because we don't want to block for too long and
+ * we are holding the extent map tree's lock in write mode.
*/
if (!down_read_trylock(&inode->i_mmap_lock))
return 0;
- /*
- * We want to be fast so if the lock is busy we don't want to spend time
- * waiting for it - either some task is about to do IO for the inode or
- * we may have another task shrinking extent maps, here in this code, so
- * skip this inode.
- */
- if (!write_trylock(&tree->lock)) {
- up_read(&inode->i_mmap_lock);
- return 0;
- }
-
node = rb_first(&tree->root);
while (node) {
struct rb_node *next = rb_next(node);
@@ -1201,12 +1187,61 @@ next:
break;
node = next;
}
- write_unlock(&tree->lock);
up_read(&inode->i_mmap_lock);
return nr_dropped;
}
+static struct btrfs_inode *find_first_inode_to_shrink(struct btrfs_root *root,
+ u64 min_ino)
+{
+ struct btrfs_inode *inode;
+ unsigned long from = min_ino;
+
+ xa_lock(&root->inodes);
+ while (true) {
+ struct extent_map_tree *tree;
+
+ inode = xa_find(&root->inodes, &from, ULONG_MAX, XA_PRESENT);
+ if (!inode)
+ break;
+
+ tree = &inode->extent_tree;
+
+ /*
+ * We want to be fast so if the lock is busy we don't want to
+ * spend time waiting for it (some task is about to do IO for
+ * the inode).
+ */
+ if (!write_trylock(&tree->lock))
+ goto next;
+
+ /*
+ * Skip inode if it doesn't have loaded extent maps, so we avoid
+ * getting a reference and doing an iput later. This includes
+ * cases like files that were opened for things like stat(2), or
+ * files with all extent maps previously released through the
+ * release folio callback (btrfs_release_folio()) or released in
+ * a previous run, or directories which never have extent maps.
+ */
+ if (RB_EMPTY_ROOT(&tree->root)) {
+ write_unlock(&tree->lock);
+ goto next;
+ }
+
+ if (igrab(&inode->vfs_inode))
+ break;
+
+ write_unlock(&tree->lock);
+next:
+ from = btrfs_ino(inode) + 1;
+ cond_resched_lock(&root->inodes.xa_lock);
+ }
+ xa_unlock(&root->inodes);
+
+ return inode;
+}
+
static long btrfs_scan_root(struct btrfs_root *root, struct btrfs_em_shrink_ctx *ctx)
{
struct btrfs_fs_info *fs_info = root->fs_info;
@@ -1214,21 +1249,21 @@ static long btrfs_scan_root(struct btrfs_root *root, struct btrfs_em_shrink_ctx
long nr_dropped = 0;
u64 min_ino = fs_info->em_shrinker_last_ino + 1;
- inode = btrfs_find_first_inode(root, min_ino);
+ inode = find_first_inode_to_shrink(root, min_ino);
while (inode) {
nr_dropped += btrfs_scan_inode(inode, ctx);
+ write_unlock(&inode->extent_tree.lock);
min_ino = btrfs_ino(inode) + 1;
fs_info->em_shrinker_last_ino = btrfs_ino(inode);
- btrfs_add_delayed_iput(inode);
+ iput(&inode->vfs_inode);
- if (ctx->scanned >= ctx->nr_to_scan ||
- btrfs_fs_closing(inode->root->fs_info))
+ if (ctx->scanned >= ctx->nr_to_scan || btrfs_fs_closing(fs_info))
break;
cond_resched();
- inode = btrfs_find_first_inode(root, min_ino);
+ inode = find_first_inode_to_shrink(root, min_ino);
}
if (inode) {
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index ed3c0d6546c5..0b568c8d24cb 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -1090,7 +1090,7 @@ ssize_t btrfs_buffered_write(struct kiocb *iocb, struct iov_iter *i)
u64 lockend;
size_t num_written = 0;
ssize_t ret;
- loff_t old_isize = i_size_read(inode);
+ loff_t old_isize;
unsigned int ilock_flags = 0;
const bool nowait = (iocb->ki_flags & IOCB_NOWAIT);
unsigned int bdp_flags = (nowait ? BDP_ASYNC : 0);
@@ -1103,6 +1103,13 @@ ssize_t btrfs_buffered_write(struct kiocb *iocb, struct iov_iter *i)
if (ret < 0)
return ret;
+ /*
+ * We can only trust the isize with inode lock held, or it can race with
+ * other buffered writes and cause incorrect call of
+ * pagecache_isize_extended() to overwrite existing data.
+ */
+ old_isize = i_size_read(inode);
+
ret = generic_write_checks(iocb, i);
if (ret <= 0)
goto out;
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index a9322601ab5c..38756f8cef46 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -1382,8 +1382,13 @@ static noinline int cow_file_range(struct btrfs_inode *inode,
continue;
}
if (done_offset) {
- *done_offset = start - 1;
- return 0;
+ /*
+ * Move @end to the end of the processed range,
+ * and exit the loop to unlock the processed extents.
+ */
+ end = start - 1;
+ ret = 0;
+ break;
}
ret = -ENOSPC;
}
diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c
index 53b846d99ece..14f53f757555 100644
--- a/fs/btrfs/sysfs.c
+++ b/fs/btrfs/sysfs.c
@@ -1330,13 +1330,13 @@ MODULE_PARM_DESC(read_policy,
int btrfs_read_policy_to_enum(const char *str, s64 *value_ret)
{
- char param[32] = { 0 };
+ char param[32];
char __maybe_unused *value_str;
if (!str || strlen(str) == 0)
return 0;
- strncpy(param, str, sizeof(param) - 1);
+ strscpy(param, str);
#ifdef CONFIG_BTRFS_EXPERIMENTAL
/* Separate value from input in policy:value format. */
diff --git a/fs/btrfs/tests/delayed-refs-tests.c b/fs/btrfs/tests/delayed-refs-tests.c
index 6558508c2ddf..265370e79a54 100644
--- a/fs/btrfs/tests/delayed-refs-tests.c
+++ b/fs/btrfs/tests/delayed-refs-tests.c
@@ -1009,6 +1009,7 @@ int btrfs_test_delayed_refs(u32 sectorsize, u32 nodesize)
if (!ret)
ret = select_delayed_refs_test(&trans);
+ kfree(transaction);
out_free_fs_info:
btrfs_free_dummy_fs_info(fs_info);
return ret;
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 0a0776489055..3f8afbd1ebb5 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -7155,6 +7155,7 @@ static int read_one_chunk(struct btrfs_key *key, struct extent_buffer *leaf,
btrfs_err(fs_info,
"failed to add chunk map, start=%llu len=%llu: %d",
map->start, map->chunk_len, ret);
+ btrfs_free_chunk_map(map);
}
return ret;
@@ -7200,8 +7201,12 @@ static struct btrfs_fs_devices *open_seed_devices(struct btrfs_fs_info *fs_info,
fs_devices = find_fsid(fsid, NULL);
if (!fs_devices) {
- if (!btrfs_test_opt(fs_info, DEGRADED))
+ if (!btrfs_test_opt(fs_info, DEGRADED)) {
+ btrfs_err(fs_info,
+ "failed to find fsid %pU when attempting to open seed devices",
+ fsid);
return ERR_PTR(-ENOENT);
+ }
fs_devices = alloc_fs_devices(fsid);
if (IS_ERR(fs_devices))
diff --git a/fs/coredump.c b/fs/coredump.c
index 591700e1b2ce..4375c70144d0 100644
--- a/fs/coredump.c
+++ b/fs/coredump.c
@@ -63,6 +63,7 @@ static void free_vma_snapshot(struct coredump_params *cprm);
static int core_uses_pid;
static unsigned int core_pipe_limit;
+static unsigned int core_sort_vma;
static char core_pattern[CORENAME_MAX_SIZE] = "core";
static int core_name_size = CORENAME_MAX_SIZE;
unsigned int core_file_note_size_limit = CORE_FILE_NOTE_SIZE_DEFAULT;
@@ -1026,6 +1027,15 @@ static const struct ctl_table coredump_sysctls[] = {
.extra1 = (unsigned int *)&core_file_note_size_min,
.extra2 = (unsigned int *)&core_file_note_size_max,
},
+ {
+ .procname = "core_sort_vma",
+ .data = &core_sort_vma,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_douintvec_minmax,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_ONE,
+ },
};
static int __init init_fs_coredump_sysctls(void)
@@ -1256,8 +1266,9 @@ static bool dump_vma_snapshot(struct coredump_params *cprm)
cprm->vma_data_size += m->dump_size;
}
- sort(cprm->vma_meta, cprm->vma_count, sizeof(*cprm->vma_meta),
- cmp_vma_size, NULL);
+ if (core_sort_vma)
+ sort(cprm->vma_meta, cprm->vma_count, sizeof(*cprm->vma_meta),
+ cmp_vma_size, NULL);
return true;
}
diff --git a/fs/efivarfs/file.c b/fs/efivarfs/file.c
index cb1b6d0c3454..c294a8fc566d 100644
--- a/fs/efivarfs/file.c
+++ b/fs/efivarfs/file.c
@@ -57,10 +57,11 @@ static ssize_t efivarfs_file_write(struct file *file,
if (bytes == -ENOENT) {
/*
- * zero size signals to release that the write deleted
- * the variable
+ * FIXME: temporary workaround for fwupdate, signal
+ * failed write with a 1 to keep created but not
+ * written files
*/
- i_size_write(inode, 0);
+ i_size_write(inode, 1);
} else {
i_size_write(inode, datasize + sizeof(attributes));
inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
@@ -124,7 +125,8 @@ static int efivarfs_file_release(struct inode *inode, struct file *file)
struct efivar_entry *var = inode->i_private;
inode_lock(inode);
- var->removed = (--var->open_count == 0 && i_size_read(inode) == 0);
+ /* FIXME: temporary work around for fwupdate */
+ var->removed = (--var->open_count == 0 && i_size_read(inode) == 1);
inode_unlock(inode);
if (var->removed)
diff --git a/fs/efivarfs/super.c b/fs/efivarfs/super.c
index 09fcf731e65d..6eae8cf655c1 100644
--- a/fs/efivarfs/super.c
+++ b/fs/efivarfs/super.c
@@ -367,6 +367,8 @@ static int efivarfs_fill_super(struct super_block *sb, struct fs_context *fc)
if (err)
return err;
+ register_pm_notifier(&sfi->pm_nb);
+
return efivar_init(efivarfs_callback, sb, true);
}
@@ -552,7 +554,6 @@ static int efivarfs_init_fs_context(struct fs_context *fc)
sfi->pm_nb.notifier_call = efivarfs_pm_notify;
sfi->pm_nb.priority = 0;
- register_pm_notifier(&sfi->pm_nb);
return 0;
}
diff --git a/fs/exfat/balloc.c b/fs/exfat/balloc.c
index ce9be95c9172..9ff825f1502d 100644
--- a/fs/exfat/balloc.c
+++ b/fs/exfat/balloc.c
@@ -141,7 +141,7 @@ int exfat_set_bitmap(struct inode *inode, unsigned int clu, bool sync)
return 0;
}
-void exfat_clear_bitmap(struct inode *inode, unsigned int clu, bool sync)
+int exfat_clear_bitmap(struct inode *inode, unsigned int clu, bool sync)
{
int i, b;
unsigned int ent_idx;
@@ -150,13 +150,17 @@ void exfat_clear_bitmap(struct inode *inode, unsigned int clu, bool sync)
struct exfat_mount_options *opts = &sbi->options;
if (!is_valid_cluster(sbi, clu))
- return;
+ return -EIO;
ent_idx = CLUSTER_TO_BITMAP_ENT(clu);
i = BITMAP_OFFSET_SECTOR_INDEX(sb, ent_idx);
b = BITMAP_OFFSET_BIT_IN_SECTOR(sb, ent_idx);
+ if (!test_bit_le(b, sbi->vol_amap[i]->b_data))
+ return -EIO;
+
clear_bit_le(b, sbi->vol_amap[i]->b_data);
+
exfat_update_bh(sbi->vol_amap[i], sync);
if (opts->discard) {
@@ -171,6 +175,8 @@ void exfat_clear_bitmap(struct inode *inode, unsigned int clu, bool sync)
opts->discard = 0;
}
}
+
+ return 0;
}
/*
diff --git a/fs/exfat/exfat_fs.h b/fs/exfat/exfat_fs.h
index 78be6964a8a0..d30ce18a88b7 100644
--- a/fs/exfat/exfat_fs.h
+++ b/fs/exfat/exfat_fs.h
@@ -456,7 +456,7 @@ int exfat_count_num_clusters(struct super_block *sb,
int exfat_load_bitmap(struct super_block *sb);
void exfat_free_bitmap(struct exfat_sb_info *sbi);
int exfat_set_bitmap(struct inode *inode, unsigned int clu, bool sync);
-void exfat_clear_bitmap(struct inode *inode, unsigned int clu, bool sync);
+int exfat_clear_bitmap(struct inode *inode, unsigned int clu, bool sync);
unsigned int exfat_find_free_bitmap(struct super_block *sb, unsigned int clu);
int exfat_count_used_clusters(struct super_block *sb, unsigned int *ret_count);
int exfat_trim_fs(struct inode *inode, struct fstrim_range *range);
diff --git a/fs/exfat/fatent.c b/fs/exfat/fatent.c
index 9e5492ac409b..6f3651c6ca91 100644
--- a/fs/exfat/fatent.c
+++ b/fs/exfat/fatent.c
@@ -175,6 +175,7 @@ static int __exfat_free_cluster(struct inode *inode, struct exfat_chain *p_chain
BITMAP_OFFSET_SECTOR_INDEX(sb, CLUSTER_TO_BITMAP_ENT(clu));
if (p_chain->flags == ALLOC_NO_FAT_CHAIN) {
+ int err;
unsigned int last_cluster = p_chain->dir + p_chain->size - 1;
do {
bool sync = false;
@@ -189,7 +190,9 @@ static int __exfat_free_cluster(struct inode *inode, struct exfat_chain *p_chain
cur_cmap_i = next_cmap_i;
}
- exfat_clear_bitmap(inode, clu, (sync && IS_DIRSYNC(inode)));
+ err = exfat_clear_bitmap(inode, clu, (sync && IS_DIRSYNC(inode)));
+ if (err)
+ break;
clu++;
num_clusters++;
} while (num_clusters < p_chain->size);
@@ -210,12 +213,13 @@ static int __exfat_free_cluster(struct inode *inode, struct exfat_chain *p_chain
cur_cmap_i = next_cmap_i;
}
- exfat_clear_bitmap(inode, clu, (sync && IS_DIRSYNC(inode)));
+ if (exfat_clear_bitmap(inode, clu, (sync && IS_DIRSYNC(inode))))
+ break;
clu = n_clu;
num_clusters++;
if (err)
- goto dec_used_clus;
+ break;
if (num_clusters >= sbi->num_clusters - EXFAT_FIRST_CLUSTER) {
/*
@@ -229,7 +233,6 @@ static int __exfat_free_cluster(struct inode *inode, struct exfat_chain *p_chain
} while (clu != EXFAT_EOF_CLUSTER);
}
-dec_used_clus:
sbi->used_clusters -= num_clusters;
return 0;
}
diff --git a/fs/exfat/file.c b/fs/exfat/file.c
index 05b51e721783..807349d8ea05 100644
--- a/fs/exfat/file.c
+++ b/fs/exfat/file.c
@@ -587,7 +587,7 @@ static ssize_t exfat_file_write_iter(struct kiocb *iocb, struct iov_iter *iter)
valid_size = ei->valid_size;
ret = generic_write_checks(iocb, iter);
- if (ret < 0)
+ if (ret <= 0)
goto unlock;
if (iocb->ki_flags & IOCB_DIRECT) {
diff --git a/fs/exfat/namei.c b/fs/exfat/namei.c
index 691dd77b6ab5..8b30027d8251 100644
--- a/fs/exfat/namei.c
+++ b/fs/exfat/namei.c
@@ -232,7 +232,7 @@ static int exfat_search_empty_slot(struct super_block *sb,
dentry = 0;
}
- while (dentry + num_entries < total_entries &&
+ while (dentry + num_entries <= total_entries &&
clu.dir != EXFAT_EOF_CLUSTER) {
i = dentry & (dentries_per_clu - 1);
@@ -646,6 +646,11 @@ static int exfat_find(struct inode *dir, struct qstr *qname,
info->valid_size = le64_to_cpu(ep2->dentry.stream.valid_size);
info->size = le64_to_cpu(ep2->dentry.stream.size);
+ if (unlikely(EXFAT_B_TO_CLU_ROUND_UP(info->size, sbi) > sbi->used_clusters)) {
+ exfat_fs_error(sb, "data size is invalid(%lld)", info->size);
+ return -EIO;
+ }
+
info->start_clu = le32_to_cpu(ep2->dentry.stream.start_clu);
if (!is_valid_cluster(sbi, info->start_clu) && info->size) {
exfat_warn(sb, "start_clu is invalid cluster(0x%x)",
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index 5b5f789b37eb..2c3a4d09e500 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -838,6 +838,12 @@ static int fuse_check_folio(struct folio *folio)
return 0;
}
+/*
+ * Attempt to steal a page from the splice() pipe and move it into the
+ * pagecache. If successful, the pointer in @pagep will be updated. The
+ * folio that was originally in @pagep will lose a reference and the new
+ * folio returned in @pagep will carry a reference.
+ */
static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep)
{
int err;
@@ -1451,7 +1457,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
if (ret < 0)
goto out;
- if (pipe_occupancy(pipe->head, pipe->tail) + cs.nr_segs > pipe->max_usage) {
+ if (pipe_buf_usage(pipe) + cs.nr_segs > pipe->max_usage) {
ret = -EIO;
goto out;
}
@@ -2101,7 +2107,7 @@ static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe,
struct file *out, loff_t *ppos,
size_t len, unsigned int flags)
{
- unsigned int head, tail, mask, count;
+ unsigned int head, tail, count;
unsigned nbuf;
unsigned idx;
struct pipe_buffer *bufs;
@@ -2118,8 +2124,7 @@ static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe,
head = pipe->head;
tail = pipe->tail;
- mask = pipe->ring_size - 1;
- count = head - tail;
+ count = pipe_occupancy(head, tail);
bufs = kvmalloc_array(count, sizeof(struct pipe_buffer), GFP_KERNEL);
if (!bufs) {
@@ -2129,8 +2134,8 @@ static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe,
nbuf = 0;
rem = 0;
- for (idx = tail; idx != head && rem < len; idx++)
- rem += pipe->bufs[idx & mask].len;
+ for (idx = tail; !pipe_empty(head, idx) && rem < len; idx++)
+ rem += pipe_buf(pipe, idx)->len;
ret = -EINVAL;
if (rem < len)
@@ -2141,10 +2146,10 @@ static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe,
struct pipe_buffer *ibuf;
struct pipe_buffer *obuf;
- if (WARN_ON(nbuf >= count || tail == head))
+ if (WARN_ON(nbuf >= count || pipe_empty(head, tail)))
goto out_free;
- ibuf = &pipe->bufs[tail & mask];
+ ibuf = pipe_buf(pipe, tail);
obuf = &bufs[nbuf];
if (rem >= ibuf->len) {
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index 198862b086ff..3805f9b06c9d 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -1636,7 +1636,7 @@ static const char *fuse_get_link(struct dentry *dentry, struct inode *inode,
goto out_err;
if (fc->cache_symlinks)
- return page_get_link(dentry, inode, callback);
+ return page_get_link_raw(dentry, inode, callback);
err = -ECHILD;
if (!dentry)
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index 7d92a5479998..d63e56fd3dd2 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -955,8 +955,10 @@ static void fuse_readpages_end(struct fuse_mount *fm, struct fuse_args *args,
fuse_invalidate_atime(inode);
}
- for (i = 0; i < ap->num_folios; i++)
+ for (i = 0; i < ap->num_folios; i++) {
folio_end_read(ap->folios[i], !err);
+ folio_put(ap->folios[i]);
+ }
if (ia->ff)
fuse_file_put(ia->ff, false);
@@ -1048,7 +1050,14 @@ static void fuse_readahead(struct readahead_control *rac)
ap = &ia->ap;
while (ap->num_folios < cur_pages) {
- folio = readahead_folio(rac);
+ /*
+ * This returns a folio with a ref held on it.
+ * The ref needs to be held until the request is
+ * completed, since the splice case (see
+ * fuse_try_move_page()) drops the ref after it's
+ * replaced in the page cache.
+ */
+ folio = __readahead_folio(rac);
ap->folios[ap->num_folios] = folio;
ap->descs[ap->num_folios].length = folio_size(folio);
ap->num_folios++;
diff --git a/fs/iomap/direct-io.c b/fs/iomap/direct-io.c
index b521eb15759e..0e47da82b0c2 100644
--- a/fs/iomap/direct-io.c
+++ b/fs/iomap/direct-io.c
@@ -427,12 +427,10 @@ static loff_t iomap_dio_bio_iter(const struct iomap_iter *iter,
bio_put(bio);
goto zero_tail;
}
- if (dio->flags & IOMAP_DIO_WRITE) {
+ if (dio->flags & IOMAP_DIO_WRITE)
task_io_account_write(n);
- } else {
- if (dio->flags & IOMAP_DIO_DIRTY)
- bio_set_pages_dirty(bio);
- }
+ else if (dio->flags & IOMAP_DIO_DIRTY)
+ bio_set_pages_dirty(bio);
dio->size += n;
copied += n;
diff --git a/fs/namei.c b/fs/namei.c
index 3ab9440c5b93..ecb7b95c2ca3 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -5356,10 +5356,9 @@ const char *vfs_get_link(struct dentry *dentry, struct delayed_call *done)
EXPORT_SYMBOL(vfs_get_link);
/* get the link contents into pagecache */
-const char *page_get_link(struct dentry *dentry, struct inode *inode,
- struct delayed_call *callback)
+static char *__page_get_link(struct dentry *dentry, struct inode *inode,
+ struct delayed_call *callback)
{
- char *kaddr;
struct page *page;
struct address_space *mapping = inode->i_mapping;
@@ -5378,8 +5377,23 @@ const char *page_get_link(struct dentry *dentry, struct inode *inode,
}
set_delayed_call(callback, page_put_link, page);
BUG_ON(mapping_gfp_mask(mapping) & __GFP_HIGHMEM);
- kaddr = page_address(page);
- nd_terminate_link(kaddr, inode->i_size, PAGE_SIZE - 1);
+ return page_address(page);
+}
+
+const char *page_get_link_raw(struct dentry *dentry, struct inode *inode,
+ struct delayed_call *callback)
+{
+ return __page_get_link(dentry, inode, callback);
+}
+EXPORT_SYMBOL_GPL(page_get_link_raw);
+
+const char *page_get_link(struct dentry *dentry, struct inode *inode,
+ struct delayed_call *callback)
+{
+ char *kaddr = __page_get_link(dentry, inode, callback);
+
+ if (!IS_ERR(kaddr))
+ nd_terminate_link(kaddr, inode->i_size, PAGE_SIZE - 1);
return kaddr;
}
diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
index 035ba52742a5..4db912f56230 100644
--- a/fs/nfs/delegation.c
+++ b/fs/nfs/delegation.c
@@ -781,6 +781,43 @@ int nfs4_inode_return_delegation(struct inode *inode)
}
/**
+ * nfs4_inode_set_return_delegation_on_close - asynchronously return a delegation
+ * @inode: inode to process
+ *
+ * This routine is called to request that the delegation be returned as soon
+ * as the file is closed. If the file is already closed, the delegation is
+ * immediately returned.
+ */
+void nfs4_inode_set_return_delegation_on_close(struct inode *inode)
+{
+ struct nfs_delegation *delegation;
+ struct nfs_delegation *ret = NULL;
+
+ if (!inode)
+ return;
+ rcu_read_lock();
+ delegation = nfs4_get_valid_delegation(inode);
+ if (!delegation)
+ goto out;
+ spin_lock(&delegation->lock);
+ if (!delegation->inode)
+ goto out_unlock;
+ if (list_empty(&NFS_I(inode)->open_files) &&
+ !test_and_set_bit(NFS_DELEGATION_RETURNING, &delegation->flags)) {
+ /* Refcount matched in nfs_end_delegation_return() */
+ ret = nfs_get_delegation(delegation);
+ } else
+ set_bit(NFS_DELEGATION_RETURN_IF_CLOSED, &delegation->flags);
+out_unlock:
+ spin_unlock(&delegation->lock);
+ if (ret)
+ nfs_clear_verifier_delegated(inode);
+out:
+ rcu_read_unlock();
+ nfs_end_delegation_return(inode, ret, 0);
+}
+
+/**
* nfs4_inode_return_delegation_on_close - asynchronously return a delegation
* @inode: inode to process
*
diff --git a/fs/nfs/delegation.h b/fs/nfs/delegation.h
index 71524d34ed20..8ff5ab9c5c25 100644
--- a/fs/nfs/delegation.h
+++ b/fs/nfs/delegation.h
@@ -49,6 +49,7 @@ void nfs_inode_reclaim_delegation(struct inode *inode, const struct cred *cred,
unsigned long pagemod_limit, u32 deleg_type);
int nfs4_inode_return_delegation(struct inode *inode);
void nfs4_inode_return_delegation_on_close(struct inode *inode);
+void nfs4_inode_set_return_delegation_on_close(struct inode *inode);
int nfs_async_inode_return_delegation(struct inode *inode, const nfs4_stateid *stateid);
void nfs_inode_evict_delegation(struct inode *inode);
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index f45beea92d03..f32f8d7c9122 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -56,6 +56,7 @@
#include <linux/uaccess.h>
#include <linux/atomic.h>
+#include "delegation.h"
#include "internal.h"
#include "iostat.h"
#include "pnfs.h"
@@ -130,6 +131,20 @@ static void nfs_direct_truncate_request(struct nfs_direct_req *dreq,
dreq->count = req_start;
}
+static void nfs_direct_file_adjust_size_locked(struct inode *inode,
+ loff_t offset, size_t count)
+{
+ loff_t newsize = offset + (loff_t)count;
+ loff_t oldsize = i_size_read(inode);
+
+ if (newsize > oldsize) {
+ i_size_write(inode, newsize);
+ NFS_I(inode)->cache_validity &= ~NFS_INO_INVALID_SIZE;
+ trace_nfs_size_grow(inode, newsize);
+ nfs_inc_stats(inode, NFSIOS_EXTENDWRITE);
+ }
+}
+
/**
* nfs_swap_rw - NFS address space operation for swap I/O
* @iocb: target I/O control block
@@ -272,6 +287,8 @@ static void nfs_direct_read_completion(struct nfs_pgio_header *hdr)
nfs_direct_count_bytes(dreq, hdr);
spin_unlock(&dreq->lock);
+ nfs_update_delegated_atime(dreq->inode);
+
while (!list_empty(&hdr->pages)) {
struct nfs_page *req = nfs_list_entry(hdr->pages.next);
struct page *page = req->wb_page;
@@ -741,6 +758,7 @@ static void nfs_direct_write_completion(struct nfs_pgio_header *hdr)
struct nfs_direct_req *dreq = hdr->dreq;
struct nfs_commit_info cinfo;
struct nfs_page *req = nfs_list_entry(hdr->pages.next);
+ struct inode *inode = dreq->inode;
int flags = NFS_ODIRECT_DONE;
trace_nfs_direct_write_completion(dreq);
@@ -762,6 +780,11 @@ static void nfs_direct_write_completion(struct nfs_pgio_header *hdr)
}
spin_unlock(&dreq->lock);
+ spin_lock(&inode->i_lock);
+ nfs_direct_file_adjust_size_locked(inode, dreq->io_start, dreq->count);
+ nfs_update_delegated_mtime_locked(dreq->inode);
+ spin_unlock(&inode->i_lock);
+
while (!list_empty(&hdr->pages)) {
req = nfs_list_entry(hdr->pages.next);
diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index 1bb646752e46..033feeab8c34 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -29,6 +29,7 @@
#include <linux/pagemap.h>
#include <linux/gfp.h>
#include <linux/swap.h>
+#include <linux/compaction.h>
#include <linux/uaccess.h>
#include <linux/filelock.h>
@@ -457,7 +458,7 @@ static bool nfs_release_folio(struct folio *folio, gfp_t gfp)
/* If the private flag is set, then the folio is not freeable */
if (folio_test_private(folio)) {
if ((current_gfp_context(gfp) & GFP_KERNEL) != GFP_KERNEL ||
- current_is_kswapd())
+ current_is_kswapd() || current_is_kcompactd())
return false;
if (nfs_wb_folio(folio->mapping->host, folio) < 0)
return false;
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index df9669d4ded7..6e95db6c17e9 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -133,6 +133,7 @@ nfs4_label_init_security(struct inode *dir, struct dentry *dentry,
if (err)
return NULL;
+ label->lsmid = shim.id;
label->label = shim.context;
label->len = shim.len;
return label;
@@ -145,7 +146,7 @@ nfs4_label_release_security(struct nfs4_label *label)
if (label) {
shim.context = label->label;
shim.len = label->len;
- shim.id = LSM_ID_UNDEF;
+ shim.id = label->lsmid;
security_release_secctx(&shim);
}
}
@@ -3906,8 +3907,11 @@ nfs4_atomic_open(struct inode *dir, struct nfs_open_context *ctx,
static void nfs4_close_context(struct nfs_open_context *ctx, int is_sync)
{
+ struct dentry *dentry = ctx->dentry;
if (ctx->state == NULL)
return;
+ if (dentry->d_flags & DCACHE_NFSFS_RENAMED)
+ nfs4_inode_set_return_delegation_on_close(d_inode(dentry));
if (is_sync)
nfs4_close_sync(ctx->state, _nfs4_ctx_to_openmode(ctx));
else
@@ -6269,7 +6273,7 @@ static int _nfs4_get_security_label(struct inode *inode, void *buf,
size_t buflen)
{
struct nfs_server *server = NFS_SERVER(inode);
- struct nfs4_label label = {0, 0, buflen, buf};
+ struct nfs4_label label = {0, 0, 0, buflen, buf};
u32 bitmask[3] = { 0, 0, FATTR4_WORD2_SECURITY_LABEL };
struct nfs_fattr fattr = {
@@ -6374,7 +6378,7 @@ static int nfs4_do_set_security_label(struct inode *inode,
static int
nfs4_set_security_label(struct inode *inode, const void *buf, size_t buflen)
{
- struct nfs4_label ilabel = {0, 0, buflen, (char *)buf };
+ struct nfs4_label ilabel = {0, 0, 0, buflen, (char *)buf };
struct nfs_fattr *fattr;
int status;
diff --git a/fs/nsfs.c b/fs/nsfs.c
index 663f8656158d..f7fddf8ecf73 100644
--- a/fs/nsfs.c
+++ b/fs/nsfs.c
@@ -37,7 +37,6 @@ static char *ns_dname(struct dentry *dentry, char *buffer, int buflen)
}
const struct dentry_operations ns_dentry_operations = {
- .d_delete = always_delete_dentry,
.d_dname = ns_dname,
.d_prune = stashed_dentry_prune,
};
diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c
index 0c28e5fa3407..d7310fcf3888 100644
--- a/fs/overlayfs/copy_up.c
+++ b/fs/overlayfs/copy_up.c
@@ -618,7 +618,6 @@ static int ovl_link_up(struct ovl_copy_up_ctx *c)
err = PTR_ERR(upper);
if (!IS_ERR(upper)) {
err = ovl_do_link(ofs, ovl_dentry_upper(c->dentry), udir, upper);
- dput(upper);
if (!err) {
/* Restore timestamps on parent (best effort) */
@@ -626,6 +625,7 @@ static int ovl_link_up(struct ovl_copy_up_ctx *c)
ovl_dentry_set_upper_alias(c->dentry);
ovl_dentry_update_reval(c->dentry, upper);
}
+ dput(upper);
}
inode_unlock(udir);
if (err)
diff --git a/fs/pidfs.c b/fs/pidfs.c
index 63f9699ebac3..c0478b3c55d9 100644
--- a/fs/pidfs.c
+++ b/fs/pidfs.c
@@ -521,7 +521,6 @@ static char *pidfs_dname(struct dentry *dentry, char *buffer, int buflen)
}
const struct dentry_operations pidfs_dentry_operations = {
- .d_delete = always_delete_dentry,
.d_dname = pidfs_dname,
.d_prune = stashed_dentry_prune,
};
diff --git a/fs/pipe.c b/fs/pipe.c
index ce1af7592780..4d0799e4e719 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -210,11 +210,10 @@ static const struct pipe_buf_operations anon_pipe_buf_ops = {
/* Done while waiting without holding the pipe lock - thus the READ_ONCE() */
static inline bool pipe_readable(const struct pipe_inode_info *pipe)
{
- unsigned int head = READ_ONCE(pipe->head);
- unsigned int tail = READ_ONCE(pipe->tail);
+ union pipe_index idx = { .head_tail = READ_ONCE(pipe->head_tail) };
unsigned int writers = READ_ONCE(pipe->writers);
- return !pipe_empty(head, tail) || !writers;
+ return !pipe_empty(idx.head, idx.tail) || !writers;
}
static inline unsigned int pipe_update_tail(struct pipe_inode_info *pipe,
@@ -395,7 +394,7 @@ pipe_read(struct kiocb *iocb, struct iov_iter *to)
wake_next_reader = true;
mutex_lock(&pipe->mutex);
}
- if (pipe_empty(pipe->head, pipe->tail))
+ if (pipe_is_empty(pipe))
wake_next_reader = false;
mutex_unlock(&pipe->mutex);
@@ -417,11 +416,10 @@ static inline int is_packetized(struct file *file)
/* Done while waiting without holding the pipe lock - thus the READ_ONCE() */
static inline bool pipe_writable(const struct pipe_inode_info *pipe)
{
- unsigned int head = READ_ONCE(pipe->head);
- unsigned int tail = READ_ONCE(pipe->tail);
+ union pipe_index idx = { .head_tail = READ_ONCE(pipe->head_tail) };
unsigned int max_usage = READ_ONCE(pipe->max_usage);
- return !pipe_full(head, tail, max_usage) ||
+ return !pipe_full(idx.head, idx.tail, max_usage) ||
!READ_ONCE(pipe->readers);
}
@@ -579,11 +577,11 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
wait_event_interruptible_exclusive(pipe->wr_wait, pipe_writable(pipe));
mutex_lock(&pipe->mutex);
- was_empty = pipe_empty(pipe->head, pipe->tail);
+ was_empty = pipe_is_empty(pipe);
wake_next_writer = true;
}
out:
- if (pipe_full(pipe->head, pipe->tail, pipe->max_usage))
+ if (pipe_is_full(pipe))
wake_next_writer = false;
mutex_unlock(&pipe->mutex);
@@ -616,7 +614,7 @@ out:
static long pipe_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
struct pipe_inode_info *pipe = filp->private_data;
- unsigned int count, head, tail, mask;
+ unsigned int count, head, tail;
switch (cmd) {
case FIONREAD:
@@ -624,10 +622,9 @@ static long pipe_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
count = 0;
head = pipe->head;
tail = pipe->tail;
- mask = pipe->ring_size - 1;
- while (tail != head) {
- count += pipe->bufs[tail & mask].len;
+ while (!pipe_empty(head, tail)) {
+ count += pipe_buf(pipe, tail)->len;
tail++;
}
mutex_unlock(&pipe->mutex);
@@ -659,7 +656,7 @@ pipe_poll(struct file *filp, poll_table *wait)
{
__poll_t mask;
struct pipe_inode_info *pipe = filp->private_data;
- unsigned int head, tail;
+ union pipe_index idx;
/* Epoll has some historical nasty semantics, this enables them */
WRITE_ONCE(pipe->poll_usage, true);
@@ -680,19 +677,18 @@ pipe_poll(struct file *filp, poll_table *wait)
* if something changes and you got it wrong, the poll
* table entry will wake you up and fix it.
*/
- head = READ_ONCE(pipe->head);
- tail = READ_ONCE(pipe->tail);
+ idx.head_tail = READ_ONCE(pipe->head_tail);
mask = 0;
if (filp->f_mode & FMODE_READ) {
- if (!pipe_empty(head, tail))
+ if (!pipe_empty(idx.head, idx.tail))
mask |= EPOLLIN | EPOLLRDNORM;
if (!pipe->writers && filp->f_pipe != pipe->w_counter)
mask |= EPOLLHUP;
}
if (filp->f_mode & FMODE_WRITE) {
- if (!pipe_full(head, tail, pipe->max_usage))
+ if (!pipe_full(idx.head, idx.tail, pipe->max_usage))
mask |= EPOLLOUT | EPOLLWRNORM;
/*
* Most Unices do not set EPOLLERR for FIFOs but on Linux they
diff --git a/fs/smb/client/cifsacl.c b/fs/smb/client/cifsacl.c
index 699a3f76d083..64bd68f750f8 100644
--- a/fs/smb/client/cifsacl.c
+++ b/fs/smb/client/cifsacl.c
@@ -763,7 +763,7 @@ static void parse_dacl(struct smb_acl *pdacl, char *end_of_acl,
struct cifs_fattr *fattr, bool mode_from_special_sid)
{
int i;
- int num_aces = 0;
+ u16 num_aces = 0;
int acl_size;
char *acl_base;
struct smb_ace **ppace;
@@ -778,14 +778,15 @@ static void parse_dacl(struct smb_acl *pdacl, char *end_of_acl,
}
/* validate that we do not go past end of acl */
- if (end_of_acl < (char *)pdacl + le16_to_cpu(pdacl->size)) {
+ if (end_of_acl < (char *)pdacl + sizeof(struct smb_acl) ||
+ end_of_acl < (char *)pdacl + le16_to_cpu(pdacl->size)) {
cifs_dbg(VFS, "ACL too small to parse DACL\n");
return;
}
cifs_dbg(NOISY, "DACL revision %d size %d num aces %d\n",
le16_to_cpu(pdacl->revision), le16_to_cpu(pdacl->size),
- le32_to_cpu(pdacl->num_aces));
+ le16_to_cpu(pdacl->num_aces));
/* reset rwx permissions for user/group/other.
Also, if num_aces is 0 i.e. DACL has no ACEs,
@@ -795,12 +796,15 @@ static void parse_dacl(struct smb_acl *pdacl, char *end_of_acl,
acl_base = (char *)pdacl;
acl_size = sizeof(struct smb_acl);
- num_aces = le32_to_cpu(pdacl->num_aces);
+ num_aces = le16_to_cpu(pdacl->num_aces);
if (num_aces > 0) {
umode_t denied_mode = 0;
- if (num_aces > ULONG_MAX / sizeof(struct smb_ace *))
+ if (num_aces > (le16_to_cpu(pdacl->size) - sizeof(struct smb_acl)) /
+ (offsetof(struct smb_ace, sid) +
+ offsetof(struct smb_sid, sub_auth) + sizeof(__le16)))
return;
+
ppace = kmalloc_array(num_aces, sizeof(struct smb_ace *),
GFP_KERNEL);
if (!ppace)
@@ -937,12 +941,12 @@ unsigned int setup_special_user_owner_ACE(struct smb_ace *pntace)
static void populate_new_aces(char *nacl_base,
struct smb_sid *pownersid,
struct smb_sid *pgrpsid,
- __u64 *pnmode, u32 *pnum_aces, u16 *pnsize,
+ __u64 *pnmode, u16 *pnum_aces, u16 *pnsize,
bool modefromsid,
bool posix)
{
__u64 nmode;
- u32 num_aces = 0;
+ u16 num_aces = 0;
u16 nsize = 0;
__u64 user_mode;
__u64 group_mode;
@@ -1050,7 +1054,7 @@ static __u16 replace_sids_and_copy_aces(struct smb_acl *pdacl, struct smb_acl *p
u16 size = 0;
struct smb_ace *pntace = NULL;
char *acl_base = NULL;
- u32 src_num_aces = 0;
+ u16 src_num_aces = 0;
u16 nsize = 0;
struct smb_ace *pnntace = NULL;
char *nacl_base = NULL;
@@ -1058,7 +1062,7 @@ static __u16 replace_sids_and_copy_aces(struct smb_acl *pdacl, struct smb_acl *p
acl_base = (char *)pdacl;
size = sizeof(struct smb_acl);
- src_num_aces = le32_to_cpu(pdacl->num_aces);
+ src_num_aces = le16_to_cpu(pdacl->num_aces);
nacl_base = (char *)pndacl;
nsize = sizeof(struct smb_acl);
@@ -1090,11 +1094,11 @@ static int set_chmod_dacl(struct smb_acl *pdacl, struct smb_acl *pndacl,
u16 size = 0;
struct smb_ace *pntace = NULL;
char *acl_base = NULL;
- u32 src_num_aces = 0;
+ u16 src_num_aces = 0;
u16 nsize = 0;
struct smb_ace *pnntace = NULL;
char *nacl_base = NULL;
- u32 num_aces = 0;
+ u16 num_aces = 0;
bool new_aces_set = false;
/* Assuming that pndacl and pnmode are never NULL */
@@ -1112,7 +1116,7 @@ static int set_chmod_dacl(struct smb_acl *pdacl, struct smb_acl *pndacl,
acl_base = (char *)pdacl;
size = sizeof(struct smb_acl);
- src_num_aces = le32_to_cpu(pdacl->num_aces);
+ src_num_aces = le16_to_cpu(pdacl->num_aces);
/* Retain old ACEs which we can retain */
for (i = 0; i < src_num_aces; ++i) {
@@ -1158,7 +1162,7 @@ next_ace:
}
finalize_dacl:
- pndacl->num_aces = cpu_to_le32(num_aces);
+ pndacl->num_aces = cpu_to_le16(num_aces);
pndacl->size = cpu_to_le16(nsize);
return 0;
@@ -1293,7 +1297,7 @@ static int build_sec_desc(struct smb_ntsd *pntsd, struct smb_ntsd *pnntsd,
dacloffset ? dacl_ptr->revision : cpu_to_le16(ACL_REVISION);
ndacl_ptr->size = cpu_to_le16(0);
- ndacl_ptr->num_aces = cpu_to_le32(0);
+ ndacl_ptr->num_aces = cpu_to_le16(0);
rc = set_chmod_dacl(dacl_ptr, ndacl_ptr, owner_sid_ptr, group_sid_ptr,
pnmode, mode_from_sid, posix);
@@ -1653,7 +1657,7 @@ id_mode_to_cifs_acl(struct inode *inode, const char *path, __u64 *pnmode,
dacl_ptr = (struct smb_acl *)((char *)pntsd + dacloffset);
if (mode_from_sid)
nsecdesclen +=
- le32_to_cpu(dacl_ptr->num_aces) * sizeof(struct smb_ace);
+ le16_to_cpu(dacl_ptr->num_aces) * sizeof(struct smb_ace);
else /* cifsacl */
nsecdesclen += le16_to_cpu(dacl_ptr->size);
}
diff --git a/fs/smb/client/cifssmb.c b/fs/smb/client/cifssmb.c
index 3feaa0f68169..d07682020c64 100644
--- a/fs/smb/client/cifssmb.c
+++ b/fs/smb/client/cifssmb.c
@@ -1338,7 +1338,8 @@ cifs_readv_callback(struct mid_q_entry *mid)
rdata->credits.value = 0;
rdata->subreq.error = rdata->result;
rdata->subreq.transferred += rdata->got_bytes;
- queue_work(cifsiod_wq, &rdata->subreq.work);
+ trace_netfs_sreq(&rdata->subreq, netfs_sreq_trace_io_progress);
+ netfs_read_subreq_terminated(&rdata->subreq);
release_mid(mid);
add_credits(server, &credits, 0);
}
diff --git a/fs/smb/common/smbacl.h b/fs/smb/common/smbacl.h
index 6a60698fc6f0..a624ec9e4a14 100644
--- a/fs/smb/common/smbacl.h
+++ b/fs/smb/common/smbacl.h
@@ -107,7 +107,8 @@ struct smb_sid {
struct smb_acl {
__le16 revision; /* revision level */
__le16 size;
- __le32 num_aces;
+ __le16 num_aces;
+ __le16 reserved;
} __attribute__((packed));
struct smb_ace {
diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c
index f1efcd027475..c53121538990 100644
--- a/fs/smb/server/smb2pdu.c
+++ b/fs/smb/server/smb2pdu.c
@@ -7458,17 +7458,17 @@ out_check_cl:
}
no_check_cl:
+ flock = smb_lock->fl;
+ list_del(&smb_lock->llist);
+
if (smb_lock->zero_len) {
err = 0;
goto skip;
}
-
- flock = smb_lock->fl;
- list_del(&smb_lock->llist);
retry:
rc = vfs_lock_file(filp, smb_lock->cmd, flock, NULL);
skip:
- if (flags & SMB2_LOCKFLAG_UNLOCK) {
+ if (smb_lock->flags & SMB2_LOCKFLAG_UNLOCK) {
if (!rc) {
ksmbd_debug(SMB, "File unlocked\n");
} else if (rc == -ENOENT) {
diff --git a/fs/smb/server/smbacl.c b/fs/smb/server/smbacl.c
index d39d3e553366..49b128698670 100644
--- a/fs/smb/server/smbacl.c
+++ b/fs/smb/server/smbacl.c
@@ -333,7 +333,7 @@ void posix_state_to_acl(struct posix_acl_state *state,
pace->e_perm = state->other.allow;
}
-int init_acl_state(struct posix_acl_state *state, int cnt)
+int init_acl_state(struct posix_acl_state *state, u16 cnt)
{
int alloc;
@@ -368,7 +368,7 @@ static void parse_dacl(struct mnt_idmap *idmap,
struct smb_fattr *fattr)
{
int i, ret;
- int num_aces = 0;
+ u16 num_aces = 0;
unsigned int acl_size;
char *acl_base;
struct smb_ace **ppace;
@@ -389,16 +389,18 @@ static void parse_dacl(struct mnt_idmap *idmap,
ksmbd_debug(SMB, "DACL revision %d size %d num aces %d\n",
le16_to_cpu(pdacl->revision), le16_to_cpu(pdacl->size),
- le32_to_cpu(pdacl->num_aces));
+ le16_to_cpu(pdacl->num_aces));
acl_base = (char *)pdacl;
acl_size = sizeof(struct smb_acl);
- num_aces = le32_to_cpu(pdacl->num_aces);
+ num_aces = le16_to_cpu(pdacl->num_aces);
if (num_aces <= 0)
return;
- if (num_aces > ULONG_MAX / sizeof(struct smb_ace *))
+ if (num_aces > (le16_to_cpu(pdacl->size) - sizeof(struct smb_acl)) /
+ (offsetof(struct smb_ace, sid) +
+ offsetof(struct smb_sid, sub_auth) + sizeof(__le16)))
return;
ret = init_acl_state(&acl_state, num_aces);
@@ -432,6 +434,7 @@ static void parse_dacl(struct mnt_idmap *idmap,
offsetof(struct smb_sid, sub_auth);
if (end_of_acl - acl_base < acl_size ||
+ ppace[i]->sid.num_subauth == 0 ||
ppace[i]->sid.num_subauth > SID_MAX_SUB_AUTHORITIES ||
(end_of_acl - acl_base <
acl_size + sizeof(__le32) * ppace[i]->sid.num_subauth) ||
@@ -580,7 +583,7 @@ static void parse_dacl(struct mnt_idmap *idmap,
static void set_posix_acl_entries_dacl(struct mnt_idmap *idmap,
struct smb_ace *pndace,
- struct smb_fattr *fattr, u32 *num_aces,
+ struct smb_fattr *fattr, u16 *num_aces,
u16 *size, u32 nt_aces_num)
{
struct posix_acl_entry *pace;
@@ -701,7 +704,7 @@ static void set_ntacl_dacl(struct mnt_idmap *idmap,
struct smb_fattr *fattr)
{
struct smb_ace *ntace, *pndace;
- int nt_num_aces = le32_to_cpu(nt_dacl->num_aces), num_aces = 0;
+ u16 nt_num_aces = le16_to_cpu(nt_dacl->num_aces), num_aces = 0;
unsigned short size = 0;
int i;
@@ -728,7 +731,7 @@ static void set_ntacl_dacl(struct mnt_idmap *idmap,
set_posix_acl_entries_dacl(idmap, pndace, fattr,
&num_aces, &size, nt_num_aces);
- pndacl->num_aces = cpu_to_le32(num_aces);
+ pndacl->num_aces = cpu_to_le16(num_aces);
pndacl->size = cpu_to_le16(le16_to_cpu(pndacl->size) + size);
}
@@ -736,7 +739,7 @@ static void set_mode_dacl(struct mnt_idmap *idmap,
struct smb_acl *pndacl, struct smb_fattr *fattr)
{
struct smb_ace *pace, *pndace;
- u32 num_aces = 0;
+ u16 num_aces = 0;
u16 size = 0, ace_size = 0;
uid_t uid;
const struct smb_sid *sid;
@@ -792,7 +795,7 @@ static void set_mode_dacl(struct mnt_idmap *idmap,
fattr->cf_mode, 0007);
out:
- pndacl->num_aces = cpu_to_le32(num_aces);
+ pndacl->num_aces = cpu_to_le16(num_aces);
pndacl->size = cpu_to_le16(le16_to_cpu(pndacl->size) + size);
}
@@ -807,6 +810,13 @@ static int parse_sid(struct smb_sid *psid, char *end_of_acl)
return -EINVAL;
}
+ if (!psid->num_subauth)
+ return 0;
+
+ if (psid->num_subauth > SID_MAX_SUB_AUTHORITIES ||
+ end_of_acl < (char *)psid + 8 + sizeof(__le32) * psid->num_subauth)
+ return -EINVAL;
+
return 0;
}
@@ -848,6 +858,9 @@ int parse_sec_desc(struct mnt_idmap *idmap, struct smb_ntsd *pntsd,
pntsd->type = cpu_to_le16(DACL_PRESENT);
if (pntsd->osidoffset) {
+ if (le32_to_cpu(pntsd->osidoffset) < sizeof(struct smb_ntsd))
+ return -EINVAL;
+
rc = parse_sid(owner_sid_ptr, end_of_acl);
if (rc) {
pr_err("%s: Error %d parsing Owner SID\n", __func__, rc);
@@ -863,6 +876,9 @@ int parse_sec_desc(struct mnt_idmap *idmap, struct smb_ntsd *pntsd,
}
if (pntsd->gsidoffset) {
+ if (le32_to_cpu(pntsd->gsidoffset) < sizeof(struct smb_ntsd))
+ return -EINVAL;
+
rc = parse_sid(group_sid_ptr, end_of_acl);
if (rc) {
pr_err("%s: Error %d mapping Owner SID to gid\n",
@@ -884,6 +900,9 @@ int parse_sec_desc(struct mnt_idmap *idmap, struct smb_ntsd *pntsd,
pntsd->type |= cpu_to_le16(DACL_PROTECTED);
if (dacloffset) {
+ if (dacloffset < sizeof(struct smb_ntsd))
+ return -EINVAL;
+
parse_dacl(idmap, dacl_ptr, end_of_acl,
owner_sid_ptr, group_sid_ptr, fattr);
}
@@ -1006,8 +1025,9 @@ int smb_inherit_dacl(struct ksmbd_conn *conn,
struct smb_sid owner_sid, group_sid;
struct dentry *parent = path->dentry->d_parent;
struct mnt_idmap *idmap = mnt_idmap(path->mnt);
- int inherited_flags = 0, flags = 0, i, ace_cnt = 0, nt_size = 0, pdacl_size;
- int rc = 0, num_aces, dacloffset, pntsd_type, pntsd_size, acl_len, aces_size;
+ int inherited_flags = 0, flags = 0, i, nt_size = 0, pdacl_size;
+ int rc = 0, dacloffset, pntsd_type, pntsd_size, acl_len, aces_size;
+ u16 num_aces, ace_cnt = 0;
char *aces_base;
bool is_dir = S_ISDIR(d_inode(path->dentry)->i_mode);
@@ -1023,7 +1043,7 @@ int smb_inherit_dacl(struct ksmbd_conn *conn,
parent_pdacl = (struct smb_acl *)((char *)parent_pntsd + dacloffset);
acl_len = pntsd_size - dacloffset;
- num_aces = le32_to_cpu(parent_pdacl->num_aces);
+ num_aces = le16_to_cpu(parent_pdacl->num_aces);
pntsd_type = le16_to_cpu(parent_pntsd->type);
pdacl_size = le16_to_cpu(parent_pdacl->size);
@@ -1183,7 +1203,7 @@ pass:
pdacl = (struct smb_acl *)((char *)pntsd + le32_to_cpu(pntsd->dacloffset));
pdacl->revision = cpu_to_le16(2);
pdacl->size = cpu_to_le16(sizeof(struct smb_acl) + nt_size);
- pdacl->num_aces = cpu_to_le32(ace_cnt);
+ pdacl->num_aces = cpu_to_le16(ace_cnt);
pace = (struct smb_ace *)((char *)pdacl + sizeof(struct smb_acl));
memcpy(pace, aces_base, nt_size);
pntsd_size += sizeof(struct smb_acl) + nt_size;
@@ -1264,7 +1284,7 @@ int smb_check_perm_dacl(struct ksmbd_conn *conn, const struct path *path,
ace = (struct smb_ace *)((char *)pdacl + sizeof(struct smb_acl));
aces_size = acl_size - sizeof(struct smb_acl);
- for (i = 0; i < le32_to_cpu(pdacl->num_aces); i++) {
+ for (i = 0; i < le16_to_cpu(pdacl->num_aces); i++) {
if (offsetof(struct smb_ace, access_req) > aces_size)
break;
ace_size = le16_to_cpu(ace->size);
@@ -1285,7 +1305,7 @@ int smb_check_perm_dacl(struct ksmbd_conn *conn, const struct path *path,
ace = (struct smb_ace *)((char *)pdacl + sizeof(struct smb_acl));
aces_size = acl_size - sizeof(struct smb_acl);
- for (i = 0; i < le32_to_cpu(pdacl->num_aces); i++) {
+ for (i = 0; i < le16_to_cpu(pdacl->num_aces); i++) {
if (offsetof(struct smb_ace, access_req) > aces_size)
break;
ace_size = le16_to_cpu(ace->size);
diff --git a/fs/smb/server/smbacl.h b/fs/smb/server/smbacl.h
index 24ce576fc292..355adaee39b8 100644
--- a/fs/smb/server/smbacl.h
+++ b/fs/smb/server/smbacl.h
@@ -86,7 +86,7 @@ int parse_sec_desc(struct mnt_idmap *idmap, struct smb_ntsd *pntsd,
int build_sec_desc(struct mnt_idmap *idmap, struct smb_ntsd *pntsd,
struct smb_ntsd *ppntsd, int ppntsd_size, int addition_info,
__u32 *secdesclen, struct smb_fattr *fattr);
-int init_acl_state(struct posix_acl_state *state, int cnt);
+int init_acl_state(struct posix_acl_state *state, u16 cnt);
void free_acl_state(struct posix_acl_state *state);
void posix_state_to_acl(struct posix_acl_state *state,
struct posix_acl_entry *pace);
diff --git a/fs/smb/server/transport_ipc.c b/fs/smb/server/transport_ipc.c
index 0460ebea6ff0..3f185ae60dc5 100644
--- a/fs/smb/server/transport_ipc.c
+++ b/fs/smb/server/transport_ipc.c
@@ -281,6 +281,7 @@ static int handle_response(int type, void *payload, size_t sz)
if (entry->type + 1 != type) {
pr_err("Waiting for IPC type %d, got %d. Ignore.\n",
entry->type + 1, type);
+ continue;
}
entry->response = kvzalloc(sz, KSMBD_DEFAULT_GFP);
diff --git a/fs/splice.c b/fs/splice.c
index 28cfa63aa236..23fa5561b944 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -331,7 +331,7 @@ ssize_t copy_splice_read(struct file *in, loff_t *ppos,
int i;
/* Work out how much data we can actually add into the pipe */
- used = pipe_occupancy(pipe->head, pipe->tail);
+ used = pipe_buf_usage(pipe);
npages = max_t(ssize_t, pipe->max_usage - used, 0);
len = min_t(size_t, len, npages * PAGE_SIZE);
npages = DIV_ROUND_UP(len, PAGE_SIZE);
@@ -527,7 +527,7 @@ static int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_des
return -ERESTARTSYS;
repeat:
- while (pipe_empty(pipe->head, pipe->tail)) {
+ while (pipe_is_empty(pipe)) {
if (!pipe->writers)
return 0;
@@ -820,7 +820,7 @@ ssize_t splice_to_socket(struct pipe_inode_info *pipe, struct file *out,
if (signal_pending(current))
break;
- while (pipe_empty(pipe->head, pipe->tail)) {
+ while (pipe_is_empty(pipe)) {
ret = 0;
if (!pipe->writers)
goto out;
@@ -968,7 +968,7 @@ static ssize_t do_splice_read(struct file *in, loff_t *ppos,
return 0;
/* Don't try to read more the pipe has space for. */
- p_space = pipe->max_usage - pipe_occupancy(pipe->head, pipe->tail);
+ p_space = pipe->max_usage - pipe_buf_usage(pipe);
len = min_t(size_t, len, p_space << PAGE_SHIFT);
if (unlikely(len > MAX_RW_COUNT))
@@ -1080,7 +1080,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
more = sd->flags & SPLICE_F_MORE;
sd->flags |= SPLICE_F_MORE;
- WARN_ON_ONCE(!pipe_empty(pipe->head, pipe->tail));
+ WARN_ON_ONCE(!pipe_is_empty(pipe));
while (len) {
size_t read_len;
@@ -1268,7 +1268,7 @@ static int wait_for_space(struct pipe_inode_info *pipe, unsigned flags)
send_sig(SIGPIPE, current, 0);
return -EPIPE;
}
- if (!pipe_full(pipe->head, pipe->tail, pipe->max_usage))
+ if (!pipe_is_full(pipe))
return 0;
if (flags & SPLICE_F_NONBLOCK)
return -EAGAIN;
@@ -1652,13 +1652,13 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
* Check the pipe occupancy without the inode lock first. This function
* is speculative anyways, so missing one is ok.
*/
- if (!pipe_empty(pipe->head, pipe->tail))
+ if (!pipe_is_empty(pipe))
return 0;
ret = 0;
pipe_lock(pipe);
- while (pipe_empty(pipe->head, pipe->tail)) {
+ while (pipe_is_empty(pipe)) {
if (signal_pending(current)) {
ret = -ERESTARTSYS;
break;
@@ -1688,13 +1688,13 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
* Check pipe occupancy without the inode lock first. This function
* is speculative anyways, so missing one is ok.
*/
- if (!pipe_full(pipe->head, pipe->tail, pipe->max_usage))
+ if (!pipe_is_full(pipe))
return 0;
ret = 0;
pipe_lock(pipe);
- while (pipe_full(pipe->head, pipe->tail, pipe->max_usage)) {
+ while (pipe_is_full(pipe)) {
if (!pipe->readers) {
send_sig(SIGPIPE, current, 0);
ret = -EPIPE;
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index 15bb790359f8..5d560e9073f4 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -29,11 +29,6 @@ struct kmem_cache *xfs_buf_cache;
/*
* Locking orders
*
- * xfs_buf_ioacct_inc:
- * xfs_buf_ioacct_dec:
- * b_sema (caller holds)
- * b_lock
- *
* xfs_buf_stale:
* b_sema (caller holds)
* b_lock
@@ -82,51 +77,6 @@ xfs_buf_vmap_len(
}
/*
- * Bump the I/O in flight count on the buftarg if we haven't yet done so for
- * this buffer. The count is incremented once per buffer (per hold cycle)
- * because the corresponding decrement is deferred to buffer release. Buffers
- * can undergo I/O multiple times in a hold-release cycle and per buffer I/O
- * tracking adds unnecessary overhead. This is used for sychronization purposes
- * with unmount (see xfs_buftarg_drain()), so all we really need is a count of
- * in-flight buffers.
- *
- * Buffers that are never released (e.g., superblock, iclog buffers) must set
- * the XBF_NO_IOACCT flag before I/O submission. Otherwise, the buftarg count
- * never reaches zero and unmount hangs indefinitely.
- */
-static inline void
-xfs_buf_ioacct_inc(
- struct xfs_buf *bp)
-{
- if (bp->b_flags & XBF_NO_IOACCT)
- return;
-
- ASSERT(bp->b_flags & XBF_ASYNC);
- spin_lock(&bp->b_lock);
- if (!(bp->b_state & XFS_BSTATE_IN_FLIGHT)) {
- bp->b_state |= XFS_BSTATE_IN_FLIGHT;
- percpu_counter_inc(&bp->b_target->bt_io_count);
- }
- spin_unlock(&bp->b_lock);
-}
-
-/*
- * Clear the in-flight state on a buffer about to be released to the LRU or
- * freed and unaccount from the buftarg.
- */
-static inline void
-__xfs_buf_ioacct_dec(
- struct xfs_buf *bp)
-{
- lockdep_assert_held(&bp->b_lock);
-
- if (bp->b_state & XFS_BSTATE_IN_FLIGHT) {
- bp->b_state &= ~XFS_BSTATE_IN_FLIGHT;
- percpu_counter_dec(&bp->b_target->bt_io_count);
- }
-}
-
-/*
* When we mark a buffer stale, we remove the buffer from the LRU and clear the
* b_lru_ref count so that the buffer is freed immediately when the buffer
* reference count falls to zero. If the buffer is already on the LRU, we need
@@ -149,15 +99,7 @@ xfs_buf_stale(
*/
bp->b_flags &= ~_XBF_DELWRI_Q;
- /*
- * Once the buffer is marked stale and unlocked, a subsequent lookup
- * could reset b_flags. There is no guarantee that the buffer is
- * unaccounted (released to LRU) before that occurs. Drop in-flight
- * status now to preserve accounting consistency.
- */
spin_lock(&bp->b_lock);
- __xfs_buf_ioacct_dec(bp);
-
atomic_set(&bp->b_lru_ref, 0);
if (!(bp->b_state & XFS_BSTATE_DISPOSE) &&
(list_lru_del_obj(&bp->b_target->bt_lru, &bp->b_lru)))
@@ -794,18 +736,13 @@ out_put_perag:
int
_xfs_buf_read(
- struct xfs_buf *bp,
- xfs_buf_flags_t flags)
+ struct xfs_buf *bp)
{
- ASSERT(!(flags & XBF_WRITE));
ASSERT(bp->b_maps[0].bm_bn != XFS_BUF_DADDR_NULL);
bp->b_flags &= ~(XBF_WRITE | XBF_ASYNC | XBF_READ_AHEAD | XBF_DONE);
- bp->b_flags |= flags & (XBF_READ | XBF_ASYNC | XBF_READ_AHEAD);
-
+ bp->b_flags |= XBF_READ;
xfs_buf_submit(bp);
- if (flags & XBF_ASYNC)
- return 0;
return xfs_buf_iowait(bp);
}
@@ -857,6 +794,8 @@ xfs_buf_read_map(
struct xfs_buf *bp;
int error;
+ ASSERT(!(flags & (XBF_WRITE | XBF_ASYNC | XBF_READ_AHEAD)));
+
flags |= XBF_READ;
*bpp = NULL;
@@ -870,21 +809,11 @@ xfs_buf_read_map(
/* Initiate the buffer read and wait. */
XFS_STATS_INC(target->bt_mount, xb_get_read);
bp->b_ops = ops;
- error = _xfs_buf_read(bp, flags);
-
- /* Readahead iodone already dropped the buffer, so exit. */
- if (flags & XBF_ASYNC)
- return 0;
+ error = _xfs_buf_read(bp);
} else {
/* Buffer already read; all we need to do is check it. */
error = xfs_buf_reverify(bp, ops);
- /* Readahead already finished; drop the buffer and exit. */
- if (flags & XBF_ASYNC) {
- xfs_buf_relse(bp);
- return 0;
- }
-
/* We do not want read in the flags */
bp->b_flags &= ~XBF_READ;
ASSERT(bp->b_ops != NULL || ops == NULL);
@@ -936,6 +865,7 @@ xfs_buf_readahead_map(
int nmaps,
const struct xfs_buf_ops *ops)
{
+ const xfs_buf_flags_t flags = XBF_READ | XBF_ASYNC | XBF_READ_AHEAD;
struct xfs_buf *bp;
/*
@@ -945,9 +875,21 @@ xfs_buf_readahead_map(
if (xfs_buftarg_is_mem(target))
return;
- xfs_buf_read_map(target, map, nmaps,
- XBF_TRYLOCK | XBF_ASYNC | XBF_READ_AHEAD, &bp, ops,
- __this_address);
+ if (xfs_buf_get_map(target, map, nmaps, flags | XBF_TRYLOCK, &bp))
+ return;
+ trace_xfs_buf_readahead(bp, 0, _RET_IP_);
+
+ if (bp->b_flags & XBF_DONE) {
+ xfs_buf_reverify(bp, ops);
+ xfs_buf_relse(bp);
+ return;
+ }
+ XFS_STATS_INC(target->bt_mount, xb_get_read);
+ bp->b_ops = ops;
+ bp->b_flags &= ~(XBF_WRITE | XBF_DONE);
+ bp->b_flags |= flags;
+ percpu_counter_inc(&target->bt_readahead_count);
+ xfs_buf_submit(bp);
}
/*
@@ -1003,10 +945,12 @@ xfs_buf_get_uncached(
struct xfs_buf *bp;
DEFINE_SINGLE_BUF_MAP(map, XFS_BUF_DADDR_NULL, numblks);
+ /* there are currently no valid flags for xfs_buf_get_uncached */
+ ASSERT(flags == 0);
+
*bpp = NULL;
- /* flags might contain irrelevant bits, pass only what we care about */
- error = _xfs_buf_alloc(target, &map, 1, flags & XBF_NO_IOACCT, &bp);
+ error = _xfs_buf_alloc(target, &map, 1, flags, &bp);
if (error)
return error;
@@ -1060,7 +1004,6 @@ xfs_buf_rele_uncached(
spin_unlock(&bp->b_lock);
return;
}
- __xfs_buf_ioacct_dec(bp);
spin_unlock(&bp->b_lock);
xfs_buf_free(bp);
}
@@ -1079,20 +1022,12 @@ xfs_buf_rele_cached(
spin_lock(&bp->b_lock);
ASSERT(bp->b_hold >= 1);
if (bp->b_hold > 1) {
- /*
- * Drop the in-flight state if the buffer is already on the LRU
- * and it holds the only reference. This is racy because we
- * haven't acquired the pag lock, but the use of _XBF_IN_FLIGHT
- * ensures the decrement occurs only once per-buf.
- */
- if (--bp->b_hold == 1 && !list_empty(&bp->b_lru))
- __xfs_buf_ioacct_dec(bp);
+ bp->b_hold--;
goto out_unlock;
}
/* we are asked to drop the last reference */
- __xfs_buf_ioacct_dec(bp);
- if (!(bp->b_flags & XBF_STALE) && atomic_read(&bp->b_lru_ref)) {
+ if (atomic_read(&bp->b_lru_ref)) {
/*
* If the buffer is added to the LRU, keep the reference to the
* buffer for the LRU and clear the (now stale) dispose list
@@ -1345,6 +1280,7 @@ xfs_buf_ioend_handle_error(
resubmit:
xfs_buf_ioerror(bp, 0);
bp->b_flags |= (XBF_DONE | XBF_WRITE_FAIL);
+ reinit_completion(&bp->b_iowait);
xfs_buf_submit(bp);
return true;
out_stale:
@@ -1355,8 +1291,9 @@ out_stale:
return false;
}
-static void
-xfs_buf_ioend(
+/* returns false if the caller needs to resubmit the I/O, else true */
+static bool
+__xfs_buf_ioend(
struct xfs_buf *bp)
{
trace_xfs_buf_iodone(bp, _RET_IP_);
@@ -1369,6 +1306,8 @@ xfs_buf_ioend(
bp->b_ops->verify_read(bp);
if (!bp->b_error)
bp->b_flags |= XBF_DONE;
+ if (bp->b_flags & XBF_READ_AHEAD)
+ percpu_counter_dec(&bp->b_target->bt_readahead_count);
} else {
if (!bp->b_error) {
bp->b_flags &= ~XBF_WRITE_FAIL;
@@ -1376,7 +1315,7 @@ xfs_buf_ioend(
}
if (unlikely(bp->b_error) && xfs_buf_ioend_handle_error(bp))
- return;
+ return false;
/* clear the retry state */
bp->b_last_error = 0;
@@ -1397,7 +1336,15 @@ xfs_buf_ioend(
bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_READ_AHEAD |
_XBF_LOGRECOVERY);
+ return true;
+}
+static void
+xfs_buf_ioend(
+ struct xfs_buf *bp)
+{
+ if (!__xfs_buf_ioend(bp))
+ return;
if (bp->b_flags & XBF_ASYNC)
xfs_buf_relse(bp);
else
@@ -1411,15 +1358,8 @@ xfs_buf_ioend_work(
struct xfs_buf *bp =
container_of(work, struct xfs_buf, b_ioend_work);
- xfs_buf_ioend(bp);
-}
-
-static void
-xfs_buf_ioend_async(
- struct xfs_buf *bp)
-{
- INIT_WORK(&bp->b_ioend_work, xfs_buf_ioend_work);
- queue_work(bp->b_mount->m_buf_workqueue, &bp->b_ioend_work);
+ if (__xfs_buf_ioend(bp))
+ xfs_buf_relse(bp);
}
void
@@ -1491,7 +1431,13 @@ xfs_buf_bio_end_io(
XFS_TEST_ERROR(false, bp->b_mount, XFS_ERRTAG_BUF_IOERROR))
xfs_buf_ioerror(bp, -EIO);
- xfs_buf_ioend_async(bp);
+ if (bp->b_flags & XBF_ASYNC) {
+ INIT_WORK(&bp->b_ioend_work, xfs_buf_ioend_work);
+ queue_work(bp->b_mount->m_buf_workqueue, &bp->b_ioend_work);
+ } else {
+ complete(&bp->b_iowait);
+ }
+
bio_put(bio);
}
@@ -1568,9 +1514,11 @@ xfs_buf_iowait(
{
ASSERT(!(bp->b_flags & XBF_ASYNC));
- trace_xfs_buf_iowait(bp, _RET_IP_);
- wait_for_completion(&bp->b_iowait);
- trace_xfs_buf_iowait_done(bp, _RET_IP_);
+ do {
+ trace_xfs_buf_iowait(bp, _RET_IP_);
+ wait_for_completion(&bp->b_iowait);
+ trace_xfs_buf_iowait_done(bp, _RET_IP_);
+ } while (!__xfs_buf_ioend(bp));
return bp->b_error;
}
@@ -1648,9 +1596,6 @@ xfs_buf_submit(
*/
bp->b_error = 0;
- if (bp->b_flags & XBF_ASYNC)
- xfs_buf_ioacct_inc(bp);
-
if ((bp->b_flags & XBF_WRITE) && !xfs_buf_verify_write(bp)) {
xfs_force_shutdown(bp->b_mount, SHUTDOWN_CORRUPT_INCORE);
xfs_buf_ioend(bp);
@@ -1776,9 +1721,8 @@ xfs_buftarg_wait(
struct xfs_buftarg *btp)
{
/*
- * First wait on the buftarg I/O count for all in-flight buffers to be
- * released. This is critical as new buffers do not make the LRU until
- * they are released.
+ * First wait for all in-flight readahead buffers to be released. This is
+ * critical as new buffers do not make the LRU until they are released.
*
* Next, flush the buffer workqueue to ensure all completion processing
* has finished. Just waiting on buffer locks is not sufficient for
@@ -1787,7 +1731,7 @@ xfs_buftarg_wait(
* all reference counts have been dropped before we start walking the
* LRU list.
*/
- while (percpu_counter_sum(&btp->bt_io_count))
+ while (percpu_counter_sum(&btp->bt_readahead_count))
delay(100);
flush_workqueue(btp->bt_mount->m_buf_workqueue);
}
@@ -1904,8 +1848,8 @@ xfs_destroy_buftarg(
struct xfs_buftarg *btp)
{
shrinker_free(btp->bt_shrinker);
- ASSERT(percpu_counter_sum(&btp->bt_io_count) == 0);
- percpu_counter_destroy(&btp->bt_io_count);
+ ASSERT(percpu_counter_sum(&btp->bt_readahead_count) == 0);
+ percpu_counter_destroy(&btp->bt_readahead_count);
list_lru_destroy(&btp->bt_lru);
}
@@ -1959,7 +1903,7 @@ xfs_init_buftarg(
if (list_lru_init(&btp->bt_lru))
return -ENOMEM;
- if (percpu_counter_init(&btp->bt_io_count, 0, GFP_KERNEL))
+ if (percpu_counter_init(&btp->bt_readahead_count, 0, GFP_KERNEL))
goto out_destroy_lru;
btp->bt_shrinker =
@@ -1973,7 +1917,7 @@ xfs_init_buftarg(
return 0;
out_destroy_io_count:
- percpu_counter_destroy(&btp->bt_io_count);
+ percpu_counter_destroy(&btp->bt_readahead_count);
out_destroy_lru:
list_lru_destroy(&btp->bt_lru);
return -ENOMEM;
diff --git a/fs/xfs/xfs_buf.h b/fs/xfs/xfs_buf.h
index 3b4ed42e11c0..80e06eecaf56 100644
--- a/fs/xfs/xfs_buf.h
+++ b/fs/xfs/xfs_buf.h
@@ -27,7 +27,6 @@ struct xfs_buf;
#define XBF_READ (1u << 0) /* buffer intended for reading from device */
#define XBF_WRITE (1u << 1) /* buffer intended for writing to device */
#define XBF_READ_AHEAD (1u << 2) /* asynchronous read-ahead */
-#define XBF_NO_IOACCT (1u << 3) /* bypass I/O accounting (non-LRU bufs) */
#define XBF_ASYNC (1u << 4) /* initiator will not wait for completion */
#define XBF_DONE (1u << 5) /* all pages in the buffer uptodate */
#define XBF_STALE (1u << 6) /* buffer has been staled, do not find it */
@@ -58,7 +57,6 @@ typedef unsigned int xfs_buf_flags_t;
{ XBF_READ, "READ" }, \
{ XBF_WRITE, "WRITE" }, \
{ XBF_READ_AHEAD, "READ_AHEAD" }, \
- { XBF_NO_IOACCT, "NO_IOACCT" }, \
{ XBF_ASYNC, "ASYNC" }, \
{ XBF_DONE, "DONE" }, \
{ XBF_STALE, "STALE" }, \
@@ -77,7 +75,6 @@ typedef unsigned int xfs_buf_flags_t;
* Internal state flags.
*/
#define XFS_BSTATE_DISPOSE (1 << 0) /* buffer being discarded */
-#define XFS_BSTATE_IN_FLIGHT (1 << 1) /* I/O in flight */
struct xfs_buf_cache {
struct rhashtable bc_hash;
@@ -116,7 +113,7 @@ struct xfs_buftarg {
struct shrinker *bt_shrinker;
struct list_lru bt_lru;
- struct percpu_counter bt_io_count;
+ struct percpu_counter bt_readahead_count;
struct ratelimit_state bt_ioerror_rl;
/* Atomic write unit values */
@@ -291,7 +288,7 @@ int xfs_buf_get_uncached(struct xfs_buftarg *target, size_t numblks,
int xfs_buf_read_uncached(struct xfs_buftarg *target, xfs_daddr_t daddr,
size_t numblks, xfs_buf_flags_t flags, struct xfs_buf **bpp,
const struct xfs_buf_ops *ops);
-int _xfs_buf_read(struct xfs_buf *bp, xfs_buf_flags_t flags);
+int _xfs_buf_read(struct xfs_buf *bp);
void xfs_buf_hold(struct xfs_buf *bp);
/* Releasing Buffers */
diff --git a/fs/xfs/xfs_buf_mem.c b/fs/xfs/xfs_buf_mem.c
index 07bebbfb16ee..5b64a2b3b113 100644
--- a/fs/xfs/xfs_buf_mem.c
+++ b/fs/xfs/xfs_buf_mem.c
@@ -117,7 +117,7 @@ xmbuf_free(
struct xfs_buftarg *btp)
{
ASSERT(xfs_buftarg_is_mem(btp));
- ASSERT(percpu_counter_sum(&btp->bt_io_count) == 0);
+ ASSERT(percpu_counter_sum(&btp->bt_readahead_count) == 0);
trace_xmbuf_free(btp);
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index b3c27dbccce8..2f76531842f8 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -3380,7 +3380,7 @@ xlog_do_recover(
*/
xfs_buf_lock(bp);
xfs_buf_hold(bp);
- error = _xfs_buf_read(bp, XBF_READ);
+ error = _xfs_buf_read(bp);
if (error) {
if (!xlog_is_shutdown(log)) {
xfs_buf_ioerror_alert(bp, __this_address);
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index 477c5262cf91..b69356582b86 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -181,14 +181,11 @@ xfs_readsb(
/*
* Allocate a (locked) buffer to hold the superblock. This will be kept
- * around at all times to optimize access to the superblock. Therefore,
- * set XBF_NO_IOACCT to make sure it doesn't hold the buftarg count
- * elevated.
+ * around at all times to optimize access to the superblock.
*/
reread:
error = xfs_buf_read_uncached(mp->m_ddev_targp, XFS_SB_DADDR,
- BTOBB(sector_size), XBF_NO_IOACCT, &bp,
- buf_ops);
+ BTOBB(sector_size), 0, &bp, buf_ops);
if (error) {
if (loud)
xfs_warn(mp, "SB validate failed with error %d.", error);
diff --git a/fs/xfs/xfs_rtalloc.c b/fs/xfs/xfs_rtalloc.c
index d8e6d073d64d..57bef567e011 100644
--- a/fs/xfs/xfs_rtalloc.c
+++ b/fs/xfs/xfs_rtalloc.c
@@ -1407,7 +1407,7 @@ xfs_rtmount_readsb(
/* m_blkbb_log is not set up yet */
error = xfs_buf_read_uncached(mp->m_rtdev_targp, XFS_RTSB_DADDR,
- mp->m_sb.sb_blocksize >> BBSHIFT, XBF_NO_IOACCT, &bp,
+ mp->m_sb.sb_blocksize >> BBSHIFT, 0, &bp,
&xfs_rtsb_buf_ops);
if (error) {
xfs_warn(mp, "rt sb validate failed with error %d.", error);
diff --git a/fs/xfs/xfs_trace.h b/fs/xfs/xfs_trace.h
index b29462363b81..bfc2f1249022 100644
--- a/fs/xfs/xfs_trace.h
+++ b/fs/xfs/xfs_trace.h
@@ -593,6 +593,7 @@ DEFINE_EVENT(xfs_buf_flags_class, name, \
DEFINE_BUF_FLAGS_EVENT(xfs_buf_find);
DEFINE_BUF_FLAGS_EVENT(xfs_buf_get);
DEFINE_BUF_FLAGS_EVENT(xfs_buf_read);
+DEFINE_BUF_FLAGS_EVENT(xfs_buf_readahead);
TRACE_EVENT(xfs_buf_ioerror,
TP_PROTO(struct xfs_buf *bp, int error, xfs_failaddr_t caller_ip),
diff --git a/include/asm-generic/hugetlb.h b/include/asm-generic/hugetlb.h
index f42133dae68e..2afc95bf1655 100644
--- a/include/asm-generic/hugetlb.h
+++ b/include/asm-generic/hugetlb.h
@@ -90,7 +90,7 @@ static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
#ifndef __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR
static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
- unsigned long addr, pte_t *ptep)
+ unsigned long addr, pte_t *ptep, unsigned long sz)
{
return ptep_get_and_clear(mm, addr, ptep);
}
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 02a4adb4a999..0d5b186abee8 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -457,7 +457,7 @@ defined(CONFIG_AUTOFDO_CLANG) || defined(CONFIG_PROPELLER_CLANG)
. = ALIGN((align)); \
.rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
__start_rodata = .; \
- *(.rodata) *(.rodata.*) \
+ *(.rodata) *(.rodata.*) *(.data.rel.ro*) \
SCHED_DATA \
RO_AFTER_INIT_DATA /* Read only after init */ \
. = ALIGN(8); \
diff --git a/include/drm/drm_client.h b/include/drm/drm_client.h
index 3b13cf29ed55..146ca80e35db 100644
--- a/include/drm/drm_client.h
+++ b/include/drm/drm_client.h
@@ -143,6 +143,14 @@ struct drm_client_dev {
bool suspended;
/**
+ * @hotplug_pending:
+ *
+ * A hotplug event has been received while the client was suspended.
+ * Try again on resume.
+ */
+ bool hotplug_pending;
+
+ /**
* @hotplug_failed:
*
* Set by client hotplug helpers if the hotplugging failed
diff --git a/include/drm/drm_damage_helper.h b/include/drm/drm_damage_helper.h
index effda42cce31..a58cbcd11276 100644
--- a/include/drm/drm_damage_helper.h
+++ b/include/drm/drm_damage_helper.h
@@ -78,7 +78,7 @@ bool
drm_atomic_helper_damage_iter_next(struct drm_atomic_helper_damage_iter *iter,
struct drm_rect *rect);
bool drm_atomic_helper_damage_merged(const struct drm_plane_state *old_state,
- struct drm_plane_state *state,
+ const struct drm_plane_state *state,
struct drm_rect *rect);
#endif
diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h
index 8426b9921a03..c1d38d54a112 100644
--- a/include/drm/drm_fb_helper.h
+++ b/include/drm/drm_fb_helper.h
@@ -70,23 +70,6 @@ struct drm_fb_helper_surface_size {
*/
struct drm_fb_helper_funcs {
/**
- * @fb_probe:
- *
- * Driver callback to allocate and initialize the fbdev info structure.
- * Furthermore it also needs to allocate the DRM framebuffer used to
- * back the fbdev.
- *
- * This callback is mandatory.
- *
- * RETURNS:
- *
- * The driver should return 0 on success and a negative error code on
- * failure.
- */
- int (*fb_probe)(struct drm_fb_helper *helper,
- struct drm_fb_helper_surface_size *sizes);
-
- /**
* @fb_dirty:
*
* Driver callback to update the framebuffer memory. If set, fbdev
@@ -99,6 +82,33 @@ struct drm_fb_helper_funcs {
* 0 on success, or an error code otherwise.
*/
int (*fb_dirty)(struct drm_fb_helper *helper, struct drm_clip_rect *clip);
+
+ /**
+ * @fb_restore:
+ *
+ * Driver callback to restore internal fbdev state. If set, fbdev
+ * emulation will invoke this callback after restoring the display
+ * mode.
+ *
+ * Only for i915. Do not use in new code.
+ *
+ * TODO: Fix i915 to not require this callback.
+ */
+ void (*fb_restore)(struct drm_fb_helper *helper);
+
+ /**
+ * @fb_set_suspend:
+ *
+ * Driver callback to suspend or resume, if set, fbdev emulation will
+ * invoke this callback during suspend and resume. Driver should call
+ * fb_set_suspend() from their implementation. If not set, fbdev
+ * emulation will invoke fb_set_suspend() directly.
+ *
+ * Only for i915. Do not use in new code.
+ *
+ * TODO: Fix i915 to not require this callback.
+ */
+ void (*fb_set_suspend)(struct drm_fb_helper *helper, bool suspend);
};
/**
diff --git a/include/drm/drm_gpusvm.h b/include/drm/drm_gpusvm.h
new file mode 100644
index 000000000000..df120b4d1f83
--- /dev/null
+++ b/include/drm/drm_gpusvm.h
@@ -0,0 +1,509 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+#ifndef __DRM_GPUSVM_H__
+#define __DRM_GPUSVM_H__
+
+#include <linux/kref.h>
+#include <linux/interval_tree.h>
+#include <linux/mmu_notifier.h>
+
+struct dev_pagemap_ops;
+struct drm_device;
+struct drm_gpusvm;
+struct drm_gpusvm_notifier;
+struct drm_gpusvm_ops;
+struct drm_gpusvm_range;
+struct drm_gpusvm_devmem;
+struct drm_pagemap;
+struct drm_pagemap_device_addr;
+
+/**
+ * struct drm_gpusvm_devmem_ops - Operations structure for GPU SVM device memory
+ *
+ * This structure defines the operations for GPU Shared Virtual Memory (SVM)
+ * device memory. These operations are provided by the GPU driver to manage device memory
+ * allocations and perform operations such as migration between device memory and system
+ * RAM.
+ */
+struct drm_gpusvm_devmem_ops {
+ /**
+ * @devmem_release: Release device memory allocation (optional)
+ * @devmem_allocation: device memory allocation
+ *
+ * Release device memory allocation and drop a reference to device
+ * memory allocation.
+ */
+ void (*devmem_release)(struct drm_gpusvm_devmem *devmem_allocation);
+
+ /**
+ * @populate_devmem_pfn: Populate device memory PFN (required for migration)
+ * @devmem_allocation: device memory allocation
+ * @npages: Number of pages to populate
+ * @pfn: Array of page frame numbers to populate
+ *
+ * Populate device memory page frame numbers (PFN).
+ *
+ * Return: 0 on success, a negative error code on failure.
+ */
+ int (*populate_devmem_pfn)(struct drm_gpusvm_devmem *devmem_allocation,
+ unsigned long npages, unsigned long *pfn);
+
+ /**
+ * @copy_to_devmem: Copy to device memory (required for migration)
+ * @pages: Pointer to array of device memory pages (destination)
+ * @dma_addr: Pointer to array of DMA addresses (source)
+ * @npages: Number of pages to copy
+ *
+ * Copy pages to device memory.
+ *
+ * Return: 0 on success, a negative error code on failure.
+ */
+ int (*copy_to_devmem)(struct page **pages,
+ dma_addr_t *dma_addr,
+ unsigned long npages);
+
+ /**
+ * @copy_to_ram: Copy to system RAM (required for migration)
+ * @pages: Pointer to array of device memory pages (source)
+ * @dma_addr: Pointer to array of DMA addresses (destination)
+ * @npages: Number of pages to copy
+ *
+ * Copy pages to system RAM.
+ *
+ * Return: 0 on success, a negative error code on failure.
+ */
+ int (*copy_to_ram)(struct page **pages,
+ dma_addr_t *dma_addr,
+ unsigned long npages);
+};
+
+/**
+ * struct drm_gpusvm_devmem - Structure representing a GPU SVM device memory allocation
+ *
+ * @dev: Pointer to the device structure which device memory allocation belongs to
+ * @mm: Pointer to the mm_struct for the address space
+ * @detached: device memory allocations is detached from device pages
+ * @ops: Pointer to the operations structure for GPU SVM device memory
+ * @dpagemap: The struct drm_pagemap of the pages this allocation belongs to.
+ * @size: Size of device memory allocation
+ */
+struct drm_gpusvm_devmem {
+ struct device *dev;
+ struct mm_struct *mm;
+ struct completion detached;
+ const struct drm_gpusvm_devmem_ops *ops;
+ struct drm_pagemap *dpagemap;
+ size_t size;
+};
+
+/**
+ * struct drm_gpusvm_ops - Operations structure for GPU SVM
+ *
+ * This structure defines the operations for GPU Shared Virtual Memory (SVM).
+ * These operations are provided by the GPU driver to manage SVM ranges and
+ * notifiers.
+ */
+struct drm_gpusvm_ops {
+ /**
+ * @notifier_alloc: Allocate a GPU SVM notifier (optional)
+ *
+ * Allocate a GPU SVM notifier.
+ *
+ * Return: Pointer to the allocated GPU SVM notifier on success, NULL on failure.
+ */
+ struct drm_gpusvm_notifier *(*notifier_alloc)(void);
+
+ /**
+ * @notifier_free: Free a GPU SVM notifier (optional)
+ * @notifier: Pointer to the GPU SVM notifier to be freed
+ *
+ * Free a GPU SVM notifier.
+ */
+ void (*notifier_free)(struct drm_gpusvm_notifier *notifier);
+
+ /**
+ * @range_alloc: Allocate a GPU SVM range (optional)
+ * @gpusvm: Pointer to the GPU SVM
+ *
+ * Allocate a GPU SVM range.
+ *
+ * Return: Pointer to the allocated GPU SVM range on success, NULL on failure.
+ */
+ struct drm_gpusvm_range *(*range_alloc)(struct drm_gpusvm *gpusvm);
+
+ /**
+ * @range_free: Free a GPU SVM range (optional)
+ * @range: Pointer to the GPU SVM range to be freed
+ *
+ * Free a GPU SVM range.
+ */
+ void (*range_free)(struct drm_gpusvm_range *range);
+
+ /**
+ * @invalidate: Invalidate GPU SVM notifier (required)
+ * @gpusvm: Pointer to the GPU SVM
+ * @notifier: Pointer to the GPU SVM notifier
+ * @mmu_range: Pointer to the mmu_notifier_range structure
+ *
+ * Invalidate the GPU page tables. It can safely walk the notifier range
+ * RB tree/list in this function. Called while holding the notifier lock.
+ */
+ void (*invalidate)(struct drm_gpusvm *gpusvm,
+ struct drm_gpusvm_notifier *notifier,
+ const struct mmu_notifier_range *mmu_range);
+};
+
+/**
+ * struct drm_gpusvm_notifier - Structure representing a GPU SVM notifier
+ *
+ * @gpusvm: Pointer to the GPU SVM structure
+ * @notifier: MMU interval notifier
+ * @itree: Interval tree node for the notifier (inserted in GPU SVM)
+ * @entry: List entry to fast interval tree traversal
+ * @root: Cached root node of the RB tree containing ranges
+ * @range_list: List head containing of ranges in the same order they appear in
+ * interval tree. This is useful to keep iterating ranges while
+ * doing modifications to RB tree.
+ * @flags: Flags for notifier
+ * @flags.removed: Flag indicating whether the MMU interval notifier has been
+ * removed
+ *
+ * This structure represents a GPU SVM notifier.
+ */
+struct drm_gpusvm_notifier {
+ struct drm_gpusvm *gpusvm;
+ struct mmu_interval_notifier notifier;
+ struct interval_tree_node itree;
+ struct list_head entry;
+ struct rb_root_cached root;
+ struct list_head range_list;
+ struct {
+ u32 removed : 1;
+ } flags;
+};
+
+/**
+ * struct drm_gpusvm_range - Structure representing a GPU SVM range
+ *
+ * @gpusvm: Pointer to the GPU SVM structure
+ * @notifier: Pointer to the GPU SVM notifier
+ * @refcount: Reference count for the range
+ * @itree: Interval tree node for the range (inserted in GPU SVM notifier)
+ * @entry: List entry to fast interval tree traversal
+ * @notifier_seq: Notifier sequence number of the range's pages
+ * @dma_addr: Device address array
+ * @dpagemap: The struct drm_pagemap of the device pages we're dma-mapping.
+ * Note this is assuming only one drm_pagemap per range is allowed.
+ * @flags: Flags for range
+ * @flags.migrate_devmem: Flag indicating whether the range can be migrated to device memory
+ * @flags.unmapped: Flag indicating if the range has been unmapped
+ * @flags.partial_unmap: Flag indicating if the range has been partially unmapped
+ * @flags.has_devmem_pages: Flag indicating if the range has devmem pages
+ * @flags.has_dma_mapping: Flag indicating if the range has a DMA mapping
+ *
+ * This structure represents a GPU SVM range used for tracking memory ranges
+ * mapped in a DRM device.
+ */
+struct drm_gpusvm_range {
+ struct drm_gpusvm *gpusvm;
+ struct drm_gpusvm_notifier *notifier;
+ struct kref refcount;
+ struct interval_tree_node itree;
+ struct list_head entry;
+ unsigned long notifier_seq;
+ struct drm_pagemap_device_addr *dma_addr;
+ struct drm_pagemap *dpagemap;
+ struct {
+ /* All flags below must be set upon creation */
+ u16 migrate_devmem : 1;
+ /* All flags below must be set / cleared under notifier lock */
+ u16 unmapped : 1;
+ u16 partial_unmap : 1;
+ u16 has_devmem_pages : 1;
+ u16 has_dma_mapping : 1;
+ } flags;
+};
+
+/**
+ * struct drm_gpusvm - GPU SVM structure
+ *
+ * @name: Name of the GPU SVM
+ * @drm: Pointer to the DRM device structure
+ * @mm: Pointer to the mm_struct for the address space
+ * @device_private_page_owner: Device private pages owner
+ * @mm_start: Start address of GPU SVM
+ * @mm_range: Range of the GPU SVM
+ * @notifier_size: Size of individual notifiers
+ * @ops: Pointer to the operations structure for GPU SVM
+ * @chunk_sizes: Pointer to the array of chunk sizes used in range allocation.
+ * Entries should be powers of 2 in descending order.
+ * @num_chunks: Number of chunks
+ * @notifier_lock: Read-write semaphore for protecting notifier operations
+ * @root: Cached root node of the Red-Black tree containing GPU SVM notifiers
+ * @notifier_list: list head containing of notifiers in the same order they
+ * appear in interval tree. This is useful to keep iterating
+ * notifiers while doing modifications to RB tree.
+ *
+ * This structure represents a GPU SVM (Shared Virtual Memory) used for tracking
+ * memory ranges mapped in a DRM (Direct Rendering Manager) device.
+ *
+ * No reference counting is provided, as this is expected to be embedded in the
+ * driver VM structure along with the struct drm_gpuvm, which handles reference
+ * counting.
+ */
+struct drm_gpusvm {
+ const char *name;
+ struct drm_device *drm;
+ struct mm_struct *mm;
+ void *device_private_page_owner;
+ unsigned long mm_start;
+ unsigned long mm_range;
+ unsigned long notifier_size;
+ const struct drm_gpusvm_ops *ops;
+ const unsigned long *chunk_sizes;
+ int num_chunks;
+ struct rw_semaphore notifier_lock;
+ struct rb_root_cached root;
+ struct list_head notifier_list;
+#ifdef CONFIG_LOCKDEP
+ /**
+ * @lock_dep_map: Annotates drm_gpusvm_range_find_or_insert and
+ * drm_gpusvm_range_remove with a driver provided lock.
+ */
+ struct lockdep_map *lock_dep_map;
+#endif
+};
+
+/**
+ * struct drm_gpusvm_ctx - DRM GPU SVM context
+ *
+ * @check_pages_threshold: Check CPU pages for present if chunk is less than or
+ * equal to threshold. If not present, reduce chunk
+ * size.
+ * @in_notifier: entering from a MMU notifier
+ * @read_only: operating on read-only memory
+ * @devmem_possible: possible to use device memory
+ *
+ * Context that is DRM GPUSVM is operating in (i.e. user arguments).
+ */
+struct drm_gpusvm_ctx {
+ unsigned long check_pages_threshold;
+ unsigned int in_notifier :1;
+ unsigned int read_only :1;
+ unsigned int devmem_possible :1;
+};
+
+int drm_gpusvm_init(struct drm_gpusvm *gpusvm,
+ const char *name, struct drm_device *drm,
+ struct mm_struct *mm, void *device_private_page_owner,
+ unsigned long mm_start, unsigned long mm_range,
+ unsigned long notifier_size,
+ const struct drm_gpusvm_ops *ops,
+ const unsigned long *chunk_sizes, int num_chunks);
+
+void drm_gpusvm_fini(struct drm_gpusvm *gpusvm);
+
+void drm_gpusvm_free(struct drm_gpusvm *gpusvm);
+
+struct drm_gpusvm_range *
+drm_gpusvm_range_find_or_insert(struct drm_gpusvm *gpusvm,
+ unsigned long fault_addr,
+ unsigned long gpuva_start,
+ unsigned long gpuva_end,
+ const struct drm_gpusvm_ctx *ctx);
+
+void drm_gpusvm_range_remove(struct drm_gpusvm *gpusvm,
+ struct drm_gpusvm_range *range);
+
+int drm_gpusvm_range_evict(struct drm_gpusvm *gpusvm,
+ struct drm_gpusvm_range *range);
+
+struct drm_gpusvm_range *
+drm_gpusvm_range_get(struct drm_gpusvm_range *range);
+
+void drm_gpusvm_range_put(struct drm_gpusvm_range *range);
+
+bool drm_gpusvm_range_pages_valid(struct drm_gpusvm *gpusvm,
+ struct drm_gpusvm_range *range);
+
+int drm_gpusvm_range_get_pages(struct drm_gpusvm *gpusvm,
+ struct drm_gpusvm_range *range,
+ const struct drm_gpusvm_ctx *ctx);
+
+void drm_gpusvm_range_unmap_pages(struct drm_gpusvm *gpusvm,
+ struct drm_gpusvm_range *range,
+ const struct drm_gpusvm_ctx *ctx);
+
+int drm_gpusvm_migrate_to_devmem(struct drm_gpusvm *gpusvm,
+ struct drm_gpusvm_range *range,
+ struct drm_gpusvm_devmem *devmem_allocation,
+ const struct drm_gpusvm_ctx *ctx);
+
+int drm_gpusvm_evict_to_ram(struct drm_gpusvm_devmem *devmem_allocation);
+
+const struct dev_pagemap_ops *drm_gpusvm_pagemap_ops_get(void);
+
+bool drm_gpusvm_has_mapping(struct drm_gpusvm *gpusvm, unsigned long start,
+ unsigned long end);
+
+struct drm_gpusvm_range *
+drm_gpusvm_range_find(struct drm_gpusvm_notifier *notifier, unsigned long start,
+ unsigned long end);
+
+void drm_gpusvm_range_set_unmapped(struct drm_gpusvm_range *range,
+ const struct mmu_notifier_range *mmu_range);
+
+void drm_gpusvm_devmem_init(struct drm_gpusvm_devmem *devmem_allocation,
+ struct device *dev, struct mm_struct *mm,
+ const struct drm_gpusvm_devmem_ops *ops,
+ struct drm_pagemap *dpagemap, size_t size);
+
+#ifdef CONFIG_LOCKDEP
+/**
+ * drm_gpusvm_driver_set_lock() - Set the lock protecting accesses to GPU SVM
+ * @gpusvm: Pointer to the GPU SVM structure.
+ * @lock: the lock used to protect the gpuva list. The locking primitive
+ * must contain a dep_map field.
+ *
+ * Call this to annotate drm_gpusvm_range_find_or_insert and
+ * drm_gpusvm_range_remove.
+ */
+#define drm_gpusvm_driver_set_lock(gpusvm, lock) \
+ do { \
+ if (!WARN((gpusvm)->lock_dep_map, \
+ "GPUSVM range lock should be set only once."))\
+ (gpusvm)->lock_dep_map = &(lock)->dep_map; \
+ } while (0)
+#else
+#define drm_gpusvm_driver_set_lock(gpusvm, lock) do {} while (0)
+#endif
+
+/**
+ * drm_gpusvm_notifier_lock() - Lock GPU SVM notifier
+ * @gpusvm__: Pointer to the GPU SVM structure.
+ *
+ * Abstract client usage GPU SVM notifier lock, take lock
+ */
+#define drm_gpusvm_notifier_lock(gpusvm__) \
+ down_read(&(gpusvm__)->notifier_lock)
+
+/**
+ * drm_gpusvm_notifier_unlock() - Unlock GPU SVM notifier
+ * @gpusvm__: Pointer to the GPU SVM structure.
+ *
+ * Abstract client usage GPU SVM notifier lock, drop lock
+ */
+#define drm_gpusvm_notifier_unlock(gpusvm__) \
+ up_read(&(gpusvm__)->notifier_lock)
+
+/**
+ * drm_gpusvm_range_start() - GPU SVM range start address
+ * @range: Pointer to the GPU SVM range
+ *
+ * Return: GPU SVM range start address
+ */
+static inline unsigned long
+drm_gpusvm_range_start(struct drm_gpusvm_range *range)
+{
+ return range->itree.start;
+}
+
+/**
+ * drm_gpusvm_range_end() - GPU SVM range end address
+ * @range: Pointer to the GPU SVM range
+ *
+ * Return: GPU SVM range end address
+ */
+static inline unsigned long
+drm_gpusvm_range_end(struct drm_gpusvm_range *range)
+{
+ return range->itree.last + 1;
+}
+
+/**
+ * drm_gpusvm_range_size() - GPU SVM range size
+ * @range: Pointer to the GPU SVM range
+ *
+ * Return: GPU SVM range size
+ */
+static inline unsigned long
+drm_gpusvm_range_size(struct drm_gpusvm_range *range)
+{
+ return drm_gpusvm_range_end(range) - drm_gpusvm_range_start(range);
+}
+
+/**
+ * drm_gpusvm_notifier_start() - GPU SVM notifier start address
+ * @notifier: Pointer to the GPU SVM notifier
+ *
+ * Return: GPU SVM notifier start address
+ */
+static inline unsigned long
+drm_gpusvm_notifier_start(struct drm_gpusvm_notifier *notifier)
+{
+ return notifier->itree.start;
+}
+
+/**
+ * drm_gpusvm_notifier_end() - GPU SVM notifier end address
+ * @notifier: Pointer to the GPU SVM notifier
+ *
+ * Return: GPU SVM notifier end address
+ */
+static inline unsigned long
+drm_gpusvm_notifier_end(struct drm_gpusvm_notifier *notifier)
+{
+ return notifier->itree.last + 1;
+}
+
+/**
+ * drm_gpusvm_notifier_size() - GPU SVM notifier size
+ * @notifier: Pointer to the GPU SVM notifier
+ *
+ * Return: GPU SVM notifier size
+ */
+static inline unsigned long
+drm_gpusvm_notifier_size(struct drm_gpusvm_notifier *notifier)
+{
+ return drm_gpusvm_notifier_end(notifier) -
+ drm_gpusvm_notifier_start(notifier);
+}
+
+/**
+ * __drm_gpusvm_range_next() - Get the next GPU SVM range in the list
+ * @range: a pointer to the current GPU SVM range
+ *
+ * Return: A pointer to the next drm_gpusvm_range if available, or NULL if the
+ * current range is the last one or if the input range is NULL.
+ */
+static inline struct drm_gpusvm_range *
+__drm_gpusvm_range_next(struct drm_gpusvm_range *range)
+{
+ if (range && !list_is_last(&range->entry,
+ &range->notifier->range_list))
+ return list_next_entry(range, entry);
+
+ return NULL;
+}
+
+/**
+ * drm_gpusvm_for_each_range() - Iterate over GPU SVM ranges in a notifier
+ * @range__: Iterator variable for the ranges. If set, it indicates the start of
+ * the iterator. If NULL, call drm_gpusvm_range_find() to get the range.
+ * @notifier__: Pointer to the GPU SVM notifier
+ * @start__: Start address of the range
+ * @end__: End address of the range
+ *
+ * This macro is used to iterate over GPU SVM ranges in a notifier. It is safe
+ * to use while holding the driver SVM lock or the notifier lock.
+ */
+#define drm_gpusvm_for_each_range(range__, notifier__, start__, end__) \
+ for ((range__) = (range__) ?: \
+ drm_gpusvm_range_find((notifier__), (start__), (end__)); \
+ (range__) && (drm_gpusvm_range_start(range__) < (end__)); \
+ (range__) = __drm_gpusvm_range_next(range__))
+
+#endif /* __DRM_GPUSVM_H__ */
diff --git a/include/drm/drm_gpuvm.h b/include/drm/drm_gpuvm.h
index 00d4e43b76b6..2a9629377633 100644
--- a/include/drm/drm_gpuvm.h
+++ b/include/drm/drm_gpuvm.h
@@ -812,6 +812,11 @@ enum drm_gpuva_op_type {
* @DRM_GPUVA_OP_PREFETCH: the prefetch op type
*/
DRM_GPUVA_OP_PREFETCH,
+
+ /**
+ * @DRM_GPUVA_OP_DRIVER: the driver defined op type
+ */
+ DRM_GPUVA_OP_DRIVER,
};
/**
diff --git a/include/drm/drm_pagemap.h b/include/drm/drm_pagemap.h
new file mode 100644
index 000000000000..202c157ff4d7
--- /dev/null
+++ b/include/drm/drm_pagemap.h
@@ -0,0 +1,107 @@
+/* SPDX-License-Identifier: MIT */
+#ifndef _DRM_PAGEMAP_H_
+#define _DRM_PAGEMAP_H_
+
+#include <linux/dma-direction.h>
+#include <linux/hmm.h>
+#include <linux/types.h>
+
+struct drm_pagemap;
+struct device;
+
+/**
+ * enum drm_interconnect_protocol - Used to identify an interconnect protocol.
+ *
+ * @DRM_INTERCONNECT_SYSTEM: DMA map is system pages
+ * @DRM_INTERCONNECT_DRIVER: DMA map is driver defined
+ */
+enum drm_interconnect_protocol {
+ DRM_INTERCONNECT_SYSTEM,
+ DRM_INTERCONNECT_DRIVER,
+ /* A driver can add private values beyond DRM_INTERCONNECT_DRIVER */
+};
+
+/**
+ * struct drm_pagemap_device_addr - Device address representation.
+ * @addr: The dma address or driver-defined address for driver private interconnects.
+ * @proto: The interconnect protocol.
+ * @order: The page order of the device mapping. (Size is PAGE_SIZE << order).
+ * @dir: The DMA direction.
+ *
+ * Note: There is room for improvement here. We should be able to pack into
+ * 64 bits.
+ */
+struct drm_pagemap_device_addr {
+ dma_addr_t addr;
+ u64 proto : 54;
+ u64 order : 8;
+ u64 dir : 2;
+};
+
+/**
+ * drm_pagemap_device_addr_encode() - Encode a dma address with metadata
+ * @addr: The dma address or driver-defined address for driver private interconnects.
+ * @proto: The interconnect protocol.
+ * @order: The page order of the dma mapping. (Size is PAGE_SIZE << order).
+ * @dir: The DMA direction.
+ *
+ * Return: A struct drm_pagemap_device_addr encoding the above information.
+ */
+static inline struct drm_pagemap_device_addr
+drm_pagemap_device_addr_encode(dma_addr_t addr,
+ enum drm_interconnect_protocol proto,
+ unsigned int order,
+ enum dma_data_direction dir)
+{
+ return (struct drm_pagemap_device_addr) {
+ .addr = addr,
+ .proto = proto,
+ .order = order,
+ .dir = dir,
+ };
+}
+
+/**
+ * struct drm_pagemap_ops: Ops for a drm-pagemap.
+ */
+struct drm_pagemap_ops {
+ /**
+ * @device_map: Map for device access or provide a virtual address suitable for
+ *
+ * @dpagemap: The struct drm_pagemap for the page.
+ * @dev: The device mapper.
+ * @page: The page to map.
+ * @order: The page order of the device mapping. (Size is PAGE_SIZE << order).
+ * @dir: The transfer direction.
+ */
+ struct drm_pagemap_device_addr (*device_map)(struct drm_pagemap *dpagemap,
+ struct device *dev,
+ struct page *page,
+ unsigned int order,
+ enum dma_data_direction dir);
+
+ /**
+ * @device_unmap: Unmap a device address previously obtained using @device_map.
+ *
+ * @dpagemap: The struct drm_pagemap for the mapping.
+ * @dev: The device unmapper.
+ * @addr: The device address obtained when mapping.
+ */
+ void (*device_unmap)(struct drm_pagemap *dpagemap,
+ struct device *dev,
+ struct drm_pagemap_device_addr addr);
+
+};
+
+/**
+ * struct drm_pagemap: Additional information for a struct dev_pagemap
+ * used for device p2p handshaking.
+ * @ops: The struct drm_pagemap_ops.
+ * @dev: The struct drevice owning the device-private memory.
+ */
+struct drm_pagemap {
+ const struct drm_pagemap_ops *ops;
+ struct device *dev;
+};
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,dsi-phy-28nm.h b/include/dt-bindings/clock/qcom,dsi-phy-28nm.h
new file mode 100644
index 000000000000..ab94d58377a1
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,dsi-phy-28nm.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_DSI_PHY_28NM_H
+#define _DT_BINDINGS_CLK_QCOM_DSI_PHY_28NM_H
+
+#define DSI_BYTE_PLL_CLK 0
+#define DSI_PIXEL_PLL_CLK 1
+
+#endif
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index fa2a76cc2f73..71f4f0cc3dac 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -28,7 +28,7 @@ typedef enum rq_end_io_ret (rq_end_io_fn)(struct request *, blk_status_t);
typedef __u32 __bitwise req_flags_t;
/* Keep rqf_name[] in sync with the definitions below */
-enum {
+enum rqf_flags {
/* drive already may have started this one */
__RQF_STARTED,
/* request for flush sequence */
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 248416ecd01c..d37751789bf5 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -196,10 +196,11 @@ struct gendisk {
unsigned int zone_capacity;
unsigned int last_zone_capacity;
unsigned long __rcu *conv_zones_bitmap;
- unsigned int zone_wplugs_hash_bits;
- spinlock_t zone_wplugs_lock;
+ unsigned int zone_wplugs_hash_bits;
+ atomic_t nr_zone_wplugs;
+ spinlock_t zone_wplugs_lock;
struct mempool_s *zone_wplugs_pool;
- struct hlist_head *zone_wplugs_hash;
+ struct hlist_head *zone_wplugs_hash;
struct workqueue_struct *zone_wplugs_wq;
#endif /* CONFIG_BLK_DEV_ZONED */
@@ -367,6 +368,7 @@ struct queue_limits {
unsigned int max_sectors;
unsigned int max_user_sectors;
unsigned int max_segment_size;
+ unsigned int min_segment_size;
unsigned int physical_block_size;
unsigned int logical_block_size;
unsigned int alignment_offset;
diff --git a/include/linux/call_once.h b/include/linux/call_once.h
index 6261aa0b3fb0..13cd6469e7e5 100644
--- a/include/linux/call_once.h
+++ b/include/linux/call_once.h
@@ -26,20 +26,41 @@ do { \
__once_init((once), #once, &__key); \
} while (0)
-static inline void call_once(struct once *once, void (*cb)(struct once *))
+/*
+ * call_once - Ensure a function has been called exactly once
+ *
+ * @once: Tracking struct
+ * @cb: Function to be called
+ *
+ * If @once has never completed successfully before, call @cb and, if
+ * it returns a zero or positive value, mark @once as completed. Return
+ * the value returned by @cb
+ *
+ * If @once has completed succesfully before, return 0.
+ *
+ * The call to @cb is implicitly surrounded by a mutex, though for
+ * efficiency the * function avoids taking it after the first call.
+ */
+static inline int call_once(struct once *once, int (*cb)(struct once *))
{
- /* Pairs with atomic_set_release() below. */
- if (atomic_read_acquire(&once->state) == ONCE_COMPLETED)
- return;
-
- guard(mutex)(&once->lock);
- WARN_ON(atomic_read(&once->state) == ONCE_RUNNING);
- if (atomic_read(&once->state) != ONCE_NOT_STARTED)
- return;
-
- atomic_set(&once->state, ONCE_RUNNING);
- cb(once);
- atomic_set_release(&once->state, ONCE_COMPLETED);
+ int r, state;
+
+ /* Pairs with atomic_set_release() below. */
+ if (atomic_read_acquire(&once->state) == ONCE_COMPLETED)
+ return 0;
+
+ guard(mutex)(&once->lock);
+ state = atomic_read(&once->state);
+ if (unlikely(state != ONCE_NOT_STARTED))
+ return WARN_ON_ONCE(state != ONCE_COMPLETED) ? -EINVAL : 0;
+
+ atomic_set(&once->state, ONCE_RUNNING);
+ r = cb(once);
+ if (r < 0)
+ atomic_set(&once->state, ONCE_NOT_STARTED);
+ else
+ atomic_set_release(&once->state, ONCE_COMPLETED);
+ return r;
}
#endif /* _LINUX_CALL_ONCE_H */
diff --git a/include/linux/compaction.h b/include/linux/compaction.h
index e94776496049..7bf0c521db63 100644
--- a/include/linux/compaction.h
+++ b/include/linux/compaction.h
@@ -80,6 +80,11 @@ static inline unsigned long compact_gap(unsigned int order)
return 2UL << order;
}
+static inline int current_is_kcompactd(void)
+{
+ return current->flags & PF_KCOMPACTD;
+}
+
#ifdef CONFIG_COMPACTION
extern unsigned int extfrag_for_order(struct zone *zone, unsigned int order);
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 200fd3c5bc70..155385754824 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -110,7 +110,7 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
/* Unreachable code */
#ifdef CONFIG_OBJTOOL
/* Annotate a C jump table to allow objtool to follow the code flow */
-#define __annotate_jump_table __section(".rodata..c_jump_table,\"a\",@progbits #")
+#define __annotate_jump_table __section(".data.rel.ro.c_jump_table")
#else /* !CONFIG_OBJTOOL */
#define __annotate_jump_table
#endif /* CONFIG_OBJTOOL */
diff --git a/include/linux/cred.h b/include/linux/cred.h
index 0c3c4b16b469..5658a3bfe803 100644
--- a/include/linux/cred.h
+++ b/include/linux/cred.h
@@ -172,18 +172,12 @@ static inline bool cap_ambient_invariant_ok(const struct cred *cred)
static inline const struct cred *override_creds(const struct cred *override_cred)
{
- const struct cred *old = current->cred;
-
- rcu_assign_pointer(current->cred, override_cred);
- return old;
+ return rcu_replace_pointer(current->cred, override_cred, 1);
}
static inline const struct cred *revert_creds(const struct cred *revert_cred)
{
- const struct cred *override_cred = current->cred;
-
- rcu_assign_pointer(current->cred, revert_cred);
- return override_cred;
+ return rcu_replace_pointer(current->cred, revert_cred, 1);
}
/**
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 2c3b2f8a621f..2788df98080f 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -2975,8 +2975,8 @@ static inline ssize_t generic_write_sync(struct kiocb *iocb, ssize_t count)
} else if (iocb->ki_flags & IOCB_DONTCACHE) {
struct address_space *mapping = iocb->ki_filp->f_mapping;
- filemap_fdatawrite_range_kick(mapping, iocb->ki_pos,
- iocb->ki_pos + count);
+ filemap_fdatawrite_range_kick(mapping, iocb->ki_pos - count,
+ iocb->ki_pos - 1);
}
return count;
@@ -3452,6 +3452,8 @@ extern const struct file_operations generic_ro_fops;
extern int readlink_copy(char __user *, int, const char *, int);
extern int page_readlink(struct dentry *, char __user *, int);
+extern const char *page_get_link_raw(struct dentry *, struct inode *,
+ struct delayed_call *);
extern const char *page_get_link(struct dentry *, struct inode *,
struct delayed_call *);
extern void page_put_link(void *);
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index ec8c0ccc8f95..76a75ec03dd6 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -682,6 +682,7 @@ struct huge_bootmem_page {
int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list);
int replace_free_hugepage_folios(unsigned long start_pfn, unsigned long end_pfn);
+void wait_for_freed_hugetlb_folios(void);
struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma,
unsigned long addr, bool cow_from_owner);
struct folio *alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid,
@@ -1004,7 +1005,9 @@ static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
static inline pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep)
{
- return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
+ unsigned long psize = huge_page_size(hstate_vma(vma));
+
+ return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep, psize);
}
#endif
@@ -1066,6 +1069,10 @@ static inline int replace_free_hugepage_folios(unsigned long start_pfn,
return 0;
}
+static inline void wait_for_freed_hugetlb_folios(void)
+{
+}
+
static inline struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma,
unsigned long addr,
bool cow_from_owner)
diff --git a/include/linux/log2.h b/include/linux/log2.h
index 9f30d087a128..1366cb688a6d 100644
--- a/include/linux/log2.h
+++ b/include/linux/log2.h
@@ -41,7 +41,7 @@ int __ilog2_u64(u64 n)
* *not* considered a power of two.
* Return: true if @n is a power of 2, otherwise false.
*/
-static inline __attribute__((const))
+static __always_inline __attribute__((const))
bool is_power_of_2(unsigned long n)
{
return (n != 0 && ((n & (n - 1)) == 0));
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index 29919faea2f1..80891120cca9 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -227,6 +227,7 @@ void migrate_vma_pages(struct migrate_vma *migrate);
void migrate_vma_finalize(struct migrate_vma *migrate);
int migrate_device_range(unsigned long *src_pfns, unsigned long start,
unsigned long npages);
+int migrate_device_pfns(unsigned long *src_pfns, unsigned long npages);
void migrate_device_pages(unsigned long *src_pfns, unsigned long *dst_pfns,
unsigned long npages);
void migrate_device_finalize(unsigned long *src_pfns,
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h
index 71fbebfa43c7..9ac83ca88326 100644
--- a/include/linux/nfs4.h
+++ b/include/linux/nfs4.h
@@ -47,6 +47,7 @@ struct nfs4_acl {
struct nfs4_label {
uint32_t lfs;
uint32_t pi;
+ u32 lsmid;
u32 len;
char *label;
};
diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
index 8ff23bf5a819..b698758000f8 100644
--- a/include/linux/pipe_fs_i.h
+++ b/include/linux/pipe_fs_i.h
@@ -31,6 +31,33 @@ struct pipe_buffer {
unsigned long private;
};
+/*
+ * Really only alpha needs 32-bit fields, but
+ * might as well do it for 64-bit architectures
+ * since that's what we've historically done,
+ * and it makes 'head_tail' always be a simple
+ * 'unsigned long'.
+ */
+#ifdef CONFIG_64BIT
+typedef unsigned int pipe_index_t;
+#else
+typedef unsigned short pipe_index_t;
+#endif
+
+/*
+ * We have to declare this outside 'struct pipe_inode_info',
+ * but then we can't use 'union pipe_index' for an anonymous
+ * union, so we end up having to duplicate this declaration
+ * below. Annoying.
+ */
+union pipe_index {
+ unsigned long head_tail;
+ struct {
+ pipe_index_t head;
+ pipe_index_t tail;
+ };
+};
+
/**
* struct pipe_inode_info - a linux kernel pipe
* @mutex: mutex protecting the whole thing
@@ -38,6 +65,7 @@ struct pipe_buffer {
* @wr_wait: writer wait point in case of full pipe
* @head: The point of buffer production
* @tail: The point of buffer consumption
+ * @head_tail: unsigned long union of @head and @tail
* @note_loss: The next read() should insert a data-lost message
* @max_usage: The maximum number of slots that may be used in the ring
* @ring_size: total number of buffers (should be a power of 2)
@@ -58,8 +86,16 @@ struct pipe_buffer {
struct pipe_inode_info {
struct mutex mutex;
wait_queue_head_t rd_wait, wr_wait;
- unsigned int head;
- unsigned int tail;
+
+ /* This has to match the 'union pipe_index' above */
+ union {
+ unsigned long head_tail;
+ struct {
+ pipe_index_t head;
+ pipe_index_t tail;
+ };
+ };
+
unsigned int max_usage;
unsigned int ring_size;
unsigned int nr_accounted;
@@ -141,23 +177,23 @@ static inline bool pipe_has_watch_queue(const struct pipe_inode_info *pipe)
}
/**
- * pipe_empty - Return true if the pipe is empty
+ * pipe_occupancy - Return number of slots used in the pipe
* @head: The pipe ring head pointer
* @tail: The pipe ring tail pointer
*/
-static inline bool pipe_empty(unsigned int head, unsigned int tail)
+static inline unsigned int pipe_occupancy(unsigned int head, unsigned int tail)
{
- return head == tail;
+ return (pipe_index_t)(head - tail);
}
/**
- * pipe_occupancy - Return number of slots used in the pipe
+ * pipe_empty - Return true if the pipe is empty
* @head: The pipe ring head pointer
* @tail: The pipe ring tail pointer
*/
-static inline unsigned int pipe_occupancy(unsigned int head, unsigned int tail)
+static inline bool pipe_empty(unsigned int head, unsigned int tail)
{
- return head - tail;
+ return !pipe_occupancy(head, tail);
}
/**
@@ -173,6 +209,33 @@ static inline bool pipe_full(unsigned int head, unsigned int tail,
}
/**
+ * pipe_is_full - Return true if the pipe is full
+ * @pipe: the pipe
+ */
+static inline bool pipe_is_full(const struct pipe_inode_info *pipe)
+{
+ return pipe_full(pipe->head, pipe->tail, pipe->max_usage);
+}
+
+/**
+ * pipe_is_empty - Return true if the pipe is empty
+ * @pipe: the pipe
+ */
+static inline bool pipe_is_empty(const struct pipe_inode_info *pipe)
+{
+ return pipe_empty(pipe->head, pipe->tail);
+}
+
+/**
+ * pipe_buf_usage - Return how many pipe buffers are in use
+ * @pipe: the pipe
+ */
+static inline unsigned int pipe_buf_usage(const struct pipe_inode_info *pipe)
+{
+ return pipe_occupancy(pipe->head, pipe->tail);
+}
+
+/**
* pipe_buf - Return the pipe buffer for the specified slot in the pipe ring
* @pipe: The pipe to access
* @slot: The slot of interest
@@ -245,15 +308,6 @@ static inline bool pipe_buf_try_steal(struct pipe_inode_info *pipe,
return buf->ops->try_steal(pipe, buf);
}
-static inline void pipe_discard_from(struct pipe_inode_info *pipe,
- unsigned int old_head)
-{
- unsigned int mask = pipe->ring_size - 1;
-
- while (pipe->head > old_head)
- pipe_buf_release(pipe, &pipe->bufs[--pipe->head & mask]);
-}
-
/* Differs from PIPE_BUF in that PIPE_SIZE is the length of the actual
memory allocation, whereas PIPE_BUF makes atomicity guarantees. */
#define PIPE_SIZE PAGE_SIZE
diff --git a/include/linux/platform_profile.h b/include/linux/platform_profile.h
index 8ab5b0e8eb2c..8c9df7dadd5d 100644
--- a/include/linux/platform_profile.h
+++ b/include/linux/platform_profile.h
@@ -33,6 +33,8 @@ enum platform_profile_option {
* @probe: Callback to setup choices available to the new class device. These
* choices will only be enforced when setting a new profile, not when
* getting the current one.
+ * @hidden_choices: Callback to setup choices that are not visible to the user
+ * but can be set by the driver.
* @profile_get: Callback that will be called when showing the current platform
* profile in sysfs.
* @profile_set: Callback that will be called when storing a new platform
@@ -40,6 +42,7 @@ enum platform_profile_option {
*/
struct platform_profile_ops {
int (*probe)(void *drvdata, unsigned long *choices);
+ int (*hidden_choices)(void *drvdata, unsigned long *choices);
int (*profile_get)(struct device *dev, enum platform_profile_option *profile);
int (*profile_set)(struct device *dev, enum platform_profile_option profile);
};
diff --git a/include/linux/rcuref.h b/include/linux/rcuref.h
index 2c8bfd0f1b6b..6322d8c1c6b4 100644
--- a/include/linux/rcuref.h
+++ b/include/linux/rcuref.h
@@ -71,27 +71,30 @@ static inline __must_check bool rcuref_get(rcuref_t *ref)
return rcuref_get_slowpath(ref);
}
-extern __must_check bool rcuref_put_slowpath(rcuref_t *ref);
+extern __must_check bool rcuref_put_slowpath(rcuref_t *ref, unsigned int cnt);
/*
* Internal helper. Do not invoke directly.
*/
static __always_inline __must_check bool __rcuref_put(rcuref_t *ref)
{
+ int cnt;
+
RCU_LOCKDEP_WARN(!rcu_read_lock_held() && preemptible(),
"suspicious rcuref_put_rcusafe() usage");
/*
* Unconditionally decrease the reference count. The saturation and
* dead zones provide enough tolerance for this.
*/
- if (likely(!atomic_add_negative_release(-1, &ref->refcnt)))
+ cnt = atomic_sub_return_release(1, &ref->refcnt);
+ if (likely(cnt >= 0))
return false;
/*
* Handle the last reference drop and cases inside the saturation
* and dead zones.
*/
- return rcuref_put_slowpath(ref);
+ return rcuref_put_slowpath(ref, cnt);
}
/**
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 9632e3318e0d..9c15365a30c0 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1701,7 +1701,7 @@ extern struct pid *cad_pid;
#define PF_USED_MATH 0x00002000 /* If unset the fpu must be initialized before use */
#define PF_USER_WORKER 0x00004000 /* Kernel thread cloned from userspace thread */
#define PF_NOFREEZE 0x00008000 /* This thread should not be frozen */
-#define PF__HOLE__00010000 0x00010000
+#define PF_KCOMPACTD 0x00010000 /* I am kcompactd */
#define PF_KSWAPD 0x00020000 /* I am kswapd */
#define PF_MEMALLOC_NOFS 0x00040000 /* All allocations inherit GFP_NOFS. See memalloc_nfs_save() */
#define PF_MEMALLOC_NOIO 0x00080000 /* All allocations inherit GFP_NOIO. See memalloc_noio_save() */
diff --git a/include/linux/socket.h b/include/linux/socket.h
index d18cc47e89bd..c3322eb3d686 100644
--- a/include/linux/socket.h
+++ b/include/linux/socket.h
@@ -392,6 +392,8 @@ struct ucred {
extern int move_addr_to_kernel(void __user *uaddr, int ulen, struct sockaddr_storage *kaddr);
extern int put_cmsg(struct msghdr*, int level, int type, int len, void *data);
+extern int put_cmsg_notrunc(struct msghdr *msg, int level, int type, int len,
+ void *data);
struct timespec64;
struct __kernel_timespec;
diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
index fec1e8a1570c..eac57914dcf3 100644
--- a/include/linux/sunrpc/sched.h
+++ b/include/linux/sunrpc/sched.h
@@ -158,7 +158,6 @@ enum {
RPC_TASK_NEED_XMIT,
RPC_TASK_NEED_RECV,
RPC_TASK_MSG_PIN_WAIT,
- RPC_TASK_SIGNALLED,
};
#define rpc_test_and_set_running(t) \
@@ -171,7 +170,7 @@ enum {
#define RPC_IS_ACTIVATED(t) test_bit(RPC_TASK_ACTIVE, &(t)->tk_runstate)
-#define RPC_SIGNALLED(t) test_bit(RPC_TASK_SIGNALLED, &(t)->tk_runstate)
+#define RPC_SIGNALLED(t) (READ_ONCE(task->tk_rpc_status) == -ERESTARTSYS)
/*
* Task priorities.
diff --git a/include/net/sock.h b/include/net/sock.h
index 8036b3b79cd8..7ef728324e4e 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -1751,6 +1751,7 @@ static inline bool sock_allow_reclassification(const struct sock *csk)
struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
struct proto *prot, int kern);
void sk_free(struct sock *sk);
+void sk_net_refcnt_upgrade(struct sock *sk);
void sk_destruct(struct sock *sk);
struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority);
void sk_free_unlock_clone(struct sock *sk);
diff --git a/include/sound/cs35l56.h b/include/sound/cs35l56.h
index 3dc7a1551ac3..5d653a3491d0 100644
--- a/include/sound/cs35l56.h
+++ b/include/sound/cs35l56.h
@@ -12,6 +12,7 @@
#include <linux/firmware/cirrus/cs_dsp.h>
#include <linux/regulator/consumer.h>
#include <linux/regmap.h>
+#include <linux/spi/spi.h>
#include <sound/cs-amp-lib.h>
#define CS35L56_DEVID 0x0000000
@@ -61,6 +62,7 @@
#define CS35L56_IRQ1_MASK_8 0x000E0AC
#define CS35L56_IRQ1_MASK_18 0x000E0D4
#define CS35L56_IRQ1_MASK_20 0x000E0DC
+#define CS35L56_DSP_MBOX_1_RAW 0x0011000
#define CS35L56_DSP_VIRTUAL1_MBOX_1 0x0011020
#define CS35L56_DSP_VIRTUAL1_MBOX_2 0x0011024
#define CS35L56_DSP_VIRTUAL1_MBOX_3 0x0011028
@@ -224,6 +226,7 @@
#define CS35L56_HALO_STATE_SHUTDOWN 1
#define CS35L56_HALO_STATE_BOOT_DONE 2
+#define CS35L56_MBOX_CMD_PING 0x0A000000
#define CS35L56_MBOX_CMD_AUDIO_PLAY 0x0B000001
#define CS35L56_MBOX_CMD_AUDIO_PAUSE 0x0B000002
#define CS35L56_MBOX_CMD_AUDIO_REINIT 0x0B000003
@@ -254,6 +257,16 @@
#define CS35L56_NUM_BULK_SUPPLIES 3
#define CS35L56_NUM_DSP_REGIONS 5
+/* Additional margin for SYSTEM_RESET to control port ready on SPI */
+#define CS35L56_SPI_RESET_TO_PORT_READY_US (CS35L56_CONTROL_PORT_READY_US + 2500)
+
+struct cs35l56_spi_payload {
+ __be32 addr;
+ __be16 pad;
+ __be32 value;
+} __packed;
+static_assert(sizeof(struct cs35l56_spi_payload) == 10);
+
struct cs35l56_base {
struct device *dev;
struct regmap *regmap;
@@ -269,6 +282,7 @@ struct cs35l56_base {
s8 cal_index;
struct cirrus_amp_cal_data cal_data;
struct gpio_desc *reset_gpio;
+ struct cs35l56_spi_payload *spi_payload_buf;
};
static inline bool cs35l56_is_otp_register(unsigned int reg)
@@ -276,6 +290,23 @@ static inline bool cs35l56_is_otp_register(unsigned int reg)
return (reg >> 16) == 3;
}
+static inline int cs35l56_init_config_for_spi(struct cs35l56_base *cs35l56,
+ struct spi_device *spi)
+{
+ cs35l56->spi_payload_buf = devm_kzalloc(&spi->dev,
+ sizeof(*cs35l56->spi_payload_buf),
+ GFP_KERNEL | GFP_DMA);
+ if (!cs35l56->spi_payload_buf)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static inline bool cs35l56_is_spi(struct cs35l56_base *cs35l56)
+{
+ return IS_ENABLED(CONFIG_SPI_MASTER) && !!cs35l56->spi_payload_buf;
+}
+
extern const struct regmap_config cs35l56_regmap_i2c;
extern const struct regmap_config cs35l56_regmap_spi;
extern const struct regmap_config cs35l56_regmap_sdw;
diff --git a/include/trace/events/afs.h b/include/trace/events/afs.h
index b0db89058c91..958a2460330c 100644
--- a/include/trace/events/afs.h
+++ b/include/trace/events/afs.h
@@ -174,6 +174,7 @@ enum yfs_cm_operation {
EM(afs_cell_trace_get_queue_dns, "GET q-dns ") \
EM(afs_cell_trace_get_queue_manage, "GET q-mng ") \
EM(afs_cell_trace_get_queue_new, "GET q-new ") \
+ EM(afs_cell_trace_get_server, "GET server") \
EM(afs_cell_trace_get_vol, "GET vol ") \
EM(afs_cell_trace_insert, "INSERT ") \
EM(afs_cell_trace_manage, "MANAGE ") \
@@ -182,6 +183,7 @@ enum yfs_cm_operation {
EM(afs_cell_trace_put_destroy, "PUT destry") \
EM(afs_cell_trace_put_queue_work, "PUT q-work") \
EM(afs_cell_trace_put_queue_fail, "PUT q-fail") \
+ EM(afs_cell_trace_put_server, "PUT server") \
EM(afs_cell_trace_put_vol, "PUT vol ") \
EM(afs_cell_trace_see_source, "SEE source") \
EM(afs_cell_trace_see_ws, "SEE ws ") \
diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h
index b13dc275ef4a..851841336ee6 100644
--- a/include/trace/events/sunrpc.h
+++ b/include/trace/events/sunrpc.h
@@ -360,8 +360,7 @@ TRACE_EVENT(rpc_request,
{ (1UL << RPC_TASK_ACTIVE), "ACTIVE" }, \
{ (1UL << RPC_TASK_NEED_XMIT), "NEED_XMIT" }, \
{ (1UL << RPC_TASK_NEED_RECV), "NEED_RECV" }, \
- { (1UL << RPC_TASK_MSG_PIN_WAIT), "MSG_PIN_WAIT" }, \
- { (1UL << RPC_TASK_SIGNALLED), "SIGNALLED" })
+ { (1UL << RPC_TASK_MSG_PIN_WAIT), "MSG_PIN_WAIT" })
DECLARE_EVENT_CLASS(rpc_task_running,
diff --git a/include/uapi/drm/xe_drm.h b/include/uapi/drm/xe_drm.h
index 892f54d3aa09..616916985e3f 100644
--- a/include/uapi/drm/xe_drm.h
+++ b/include/uapi/drm/xe_drm.h
@@ -393,6 +393,10 @@ struct drm_xe_query_mem_regions {
*
* - %DRM_XE_QUERY_CONFIG_FLAG_HAS_VRAM - Flag is set if the device
* has usable VRAM
+ * - %DRM_XE_QUERY_CONFIG_FLAG_HAS_LOW_LATENCY - Flag is set if the device
+ * has low latency hint support
+ * - %DRM_XE_QUERY_CONFIG_FLAG_HAS_CPU_ADDR_MIRROR - Flag is set if the
+ * device has CPU address mirroring support
* - %DRM_XE_QUERY_CONFIG_MIN_ALIGNMENT - Minimal memory alignment
* required by this device, typically SZ_4K or SZ_64K
* - %DRM_XE_QUERY_CONFIG_VA_BITS - Maximum bits of a virtual address
@@ -409,6 +413,8 @@ struct drm_xe_query_config {
#define DRM_XE_QUERY_CONFIG_REV_AND_DEVICE_ID 0
#define DRM_XE_QUERY_CONFIG_FLAGS 1
#define DRM_XE_QUERY_CONFIG_FLAG_HAS_VRAM (1 << 0)
+ #define DRM_XE_QUERY_CONFIG_FLAG_HAS_LOW_LATENCY (1 << 1)
+ #define DRM_XE_QUERY_CONFIG_FLAG_HAS_CPU_ADDR_MIRROR (1 << 2)
#define DRM_XE_QUERY_CONFIG_MIN_ALIGNMENT 2
#define DRM_XE_QUERY_CONFIG_VA_BITS 3
#define DRM_XE_QUERY_CONFIG_MAX_EXEC_QUEUE_PRIORITY 4
@@ -735,6 +741,7 @@ struct drm_xe_device_query {
#define DRM_XE_DEVICE_QUERY_UC_FW_VERSION 7
#define DRM_XE_DEVICE_QUERY_OA_UNITS 8
#define DRM_XE_DEVICE_QUERY_PXP_STATUS 9
+#define DRM_XE_DEVICE_QUERY_EU_STALL 10
/** @query: The type of data to query */
__u32 query;
@@ -986,6 +993,12 @@ struct drm_xe_vm_destroy {
* - %DRM_XE_VM_BIND_FLAG_CHECK_PXP - If the object is encrypted via PXP,
* reject the binding if the encryption key is no longer valid. This
* flag has no effect on BOs that are not marked as using PXP.
+ * - %DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR - When the CPU address mirror flag is
+ * set, no mappings are created rather the range is reserved for CPU address
+ * mirroring which will be populated on GPU page faults or prefetches. Only
+ * valid on VMs with DRM_XE_VM_CREATE_FLAG_FAULT_MODE set. The CPU address
+ * mirror flag are only valid for DRM_XE_VM_BIND_OP_MAP operations, the BO
+ * handle MBZ, and the BO offset MBZ.
*/
struct drm_xe_vm_bind_op {
/** @extensions: Pointer to the first extension struct, if any */
@@ -1038,7 +1051,9 @@ struct drm_xe_vm_bind_op {
* on the @pat_index. For such mappings there is no actual memory being
* mapped (the address in the PTE is invalid), so the various PAT memory
* attributes likely do not apply. Simply leaving as zero is one
- * option (still a valid pat_index).
+ * option (still a valid pat_index). Same applies to
+ * DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR bindings as for such mapping
+ * there is no actual memory being mapped.
*/
__u16 pat_index;
@@ -1054,6 +1069,14 @@ struct drm_xe_vm_bind_op {
/** @userptr: user pointer to bind on */
__u64 userptr;
+
+ /**
+ * @cpu_addr_mirror_offset: Offset from GPU @addr to create
+ * CPU address mirror mappings. MBZ with current level of
+ * support (e.g. 1 to 1 mapping between GPU and CPU mappings
+ * only supported).
+ */
+ __s64 cpu_addr_mirror_offset;
};
/**
@@ -1077,6 +1100,7 @@ struct drm_xe_vm_bind_op {
#define DRM_XE_VM_BIND_FLAG_NULL (1 << 2)
#define DRM_XE_VM_BIND_FLAG_DUMPABLE (1 << 3)
#define DRM_XE_VM_BIND_FLAG_CHECK_PXP (1 << 4)
+#define DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR (1 << 5)
/** @flags: Bind flags */
__u32 flags;
@@ -1204,6 +1228,21 @@ struct drm_xe_vm_bind {
* };
* ioctl(fd, DRM_IOCTL_XE_EXEC_QUEUE_CREATE, &exec_queue_create);
*
+ * Allow users to provide a hint to kernel for cases demanding low latency
+ * profile. Please note it will have impact on power consumption. User can
+ * indicate low latency hint with flag while creating exec queue as
+ * mentioned below,
+ *
+ * struct drm_xe_exec_queue_create exec_queue_create = {
+ * .flags = DRM_XE_EXEC_QUEUE_LOW_LATENCY_HINT,
+ * .extensions = 0,
+ * .vm_id = vm,
+ * .num_bb_per_exec = 1,
+ * .num_eng_per_bb = 1,
+ * .instances = to_user_pointer(&instance),
+ * };
+ * ioctl(fd, DRM_IOCTL_XE_EXEC_QUEUE_CREATE, &exec_queue_create);
+ *
*/
struct drm_xe_exec_queue_create {
#define DRM_XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY 0
@@ -1222,7 +1261,8 @@ struct drm_xe_exec_queue_create {
/** @vm_id: VM to use for this exec queue */
__u32 vm_id;
- /** @flags: MBZ */
+#define DRM_XE_EXEC_QUEUE_LOW_LATENCY_HINT (1 << 0)
+ /** @flags: flags to use for this exec queue */
__u32 flags;
/** @exec_queue_id: Returned exec queue ID */
@@ -1496,6 +1536,8 @@ struct drm_xe_wait_user_fence {
enum drm_xe_observation_type {
/** @DRM_XE_OBSERVATION_TYPE_OA: OA observation stream type */
DRM_XE_OBSERVATION_TYPE_OA,
+ /** @DRM_XE_OBSERVATION_TYPE_EU_STALL: EU stall sampling observation stream type */
+ DRM_XE_OBSERVATION_TYPE_EU_STALL,
};
/**
@@ -1848,6 +1890,77 @@ enum drm_xe_pxp_session_type {
/* ID of the protected content session managed by Xe when PXP is active */
#define DRM_XE_PXP_HWDRM_DEFAULT_SESSION 0xf
+/**
+ * enum drm_xe_eu_stall_property_id - EU stall sampling input property ids.
+ *
+ * These properties are passed to the driver at open as a chain of
+ * @drm_xe_ext_set_property structures with @property set to these
+ * properties' enums and @value set to the corresponding values of these
+ * properties. @drm_xe_user_extension base.name should be set to
+ * @DRM_XE_EU_STALL_EXTENSION_SET_PROPERTY.
+ *
+ * With the file descriptor obtained from open, user space must enable
+ * the EU stall stream fd with @DRM_XE_OBSERVATION_IOCTL_ENABLE before
+ * calling read(). EIO errno from read() indicates HW dropped data
+ * due to full buffer.
+ */
+enum drm_xe_eu_stall_property_id {
+#define DRM_XE_EU_STALL_EXTENSION_SET_PROPERTY 0
+ /**
+ * @DRM_XE_EU_STALL_PROP_GT_ID: @gt_id of the GT on which
+ * EU stall data will be captured.
+ */
+ DRM_XE_EU_STALL_PROP_GT_ID = 1,
+
+ /**
+ * @DRM_XE_EU_STALL_PROP_SAMPLE_RATE: Sampling rate in
+ * GPU cycles from @sampling_rates in struct @drm_xe_query_eu_stall
+ */
+ DRM_XE_EU_STALL_PROP_SAMPLE_RATE,
+
+ /**
+ * @DRM_XE_EU_STALL_PROP_WAIT_NUM_REPORTS: Minimum number of
+ * EU stall data reports to be present in the kernel buffer
+ * before unblocking a blocked poll or read.
+ */
+ DRM_XE_EU_STALL_PROP_WAIT_NUM_REPORTS,
+};
+
+/**
+ * struct drm_xe_query_eu_stall - Information about EU stall sampling.
+ *
+ * If a query is made with a struct @drm_xe_device_query where .query
+ * is equal to @DRM_XE_DEVICE_QUERY_EU_STALL, then the reply uses
+ * struct @drm_xe_query_eu_stall in .data.
+ */
+struct drm_xe_query_eu_stall {
+ /** @extensions: Pointer to the first extension struct, if any */
+ __u64 extensions;
+
+ /** @capabilities: EU stall capabilities bit-mask */
+ __u64 capabilities;
+#define DRM_XE_EU_STALL_CAPS_BASE (1 << 0)
+
+ /** @record_size: size of each EU stall data record */
+ __u64 record_size;
+
+ /** @per_xecore_buf_size: internal per XeCore buffer size */
+ __u64 per_xecore_buf_size;
+
+ /** @reserved: Reserved */
+ __u64 reserved[5];
+
+ /** @num_sampling_rates: Number of sampling rates in @sampling_rates array */
+ __u64 num_sampling_rates;
+
+ /**
+ * @sampling_rates: Flexible array of sampling rates
+ * sorted in the fastest to slowest order.
+ * Sampling rates are specified in GPU clock cycles.
+ */
+ __u64 sampling_rates[];
+};
+
#if defined(__cplusplus)
}
#endif
diff --git a/include/uapi/linux/landlock.h b/include/uapi/linux/landlock.h
index 33745642f787..e1d2c27533b4 100644
--- a/include/uapi/linux/landlock.h
+++ b/include/uapi/linux/landlock.h
@@ -268,7 +268,9 @@ struct landlock_net_port_attr {
* ~~~~~~~~~~~~~~~~
*
* These flags enable to restrict a sandboxed process to a set of network
- * actions. This is supported since the Landlock ABI version 4.
+ * actions.
+ *
+ * This is supported since Landlock ABI version 4.
*
* The following access rights apply to TCP port numbers:
*
@@ -291,11 +293,13 @@ struct landlock_net_port_attr {
* Setting a flag for a ruleset will isolate the Landlock domain to forbid
* connections to resources outside the domain.
*
+ * This is supported since Landlock ABI version 6.
+ *
* Scopes:
*
* - %LANDLOCK_SCOPE_ABSTRACT_UNIX_SOCKET: Restrict a sandboxed process from
* connecting to an abstract UNIX socket created by a process outside the
- * related Landlock domain (e.g. a parent domain or a non-sandboxed process).
+ * related Landlock domain (e.g., a parent domain or a non-sandboxed process).
* - %LANDLOCK_SCOPE_SIGNAL: Restrict a sandboxed process from sending a signal
* to another process outside the domain.
*/
diff --git a/io_uring/net.c b/io_uring/net.c
index 17852a6616ff..5d0b56ff50ee 100644
--- a/io_uring/net.c
+++ b/io_uring/net.c
@@ -322,7 +322,9 @@ static int io_sendmsg_copy_hdr(struct io_kiocb *req,
if (unlikely(ret))
return ret;
- return __get_compat_msghdr(&iomsg->msg, &cmsg, NULL);
+ ret = __get_compat_msghdr(&iomsg->msg, &cmsg, NULL);
+ sr->msg_control = iomsg->msg.msg_control_user;
+ return ret;
}
#endif
diff --git a/io_uring/rw.c b/io_uring/rw.c
index 9edc6baebd01..e5528cebcd06 100644
--- a/io_uring/rw.c
+++ b/io_uring/rw.c
@@ -560,11 +560,10 @@ static void io_complete_rw_iopoll(struct kiocb *kiocb, long res)
if (kiocb->ki_flags & IOCB_WRITE)
io_req_end_write(req);
if (unlikely(res != req->cqe.res)) {
- if (res == -EAGAIN && io_rw_should_reissue(req)) {
+ if (res == -EAGAIN && io_rw_should_reissue(req))
req->flags |= REQ_F_REISSUE | REQ_F_BL_NO_RECYCLE;
- return;
- }
- req->cqe.res = res;
+ else
+ req->cqe.res = res;
}
/* order with io_iopoll_complete() checking ->iopoll_completed */
diff --git a/kernel/events/core.c b/kernel/events/core.c
index bcb09e011e9e..823aa0824916 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -4950,7 +4950,7 @@ static struct perf_event_pmu_context *
find_get_pmu_context(struct pmu *pmu, struct perf_event_context *ctx,
struct perf_event *event)
{
- struct perf_event_pmu_context *new = NULL, *epc;
+ struct perf_event_pmu_context *new = NULL, *pos = NULL, *epc;
void *task_ctx_data = NULL;
if (!ctx->task) {
@@ -5007,12 +5007,19 @@ find_get_pmu_context(struct pmu *pmu, struct perf_event_context *ctx,
atomic_inc(&epc->refcount);
goto found_epc;
}
+ /* Make sure the pmu_ctx_list is sorted by PMU type: */
+ if (!pos && epc->pmu->type > pmu->type)
+ pos = epc;
}
epc = new;
new = NULL;
- list_add(&epc->pmu_ctx_entry, &ctx->pmu_ctx_list);
+ if (!pos)
+ list_add_tail(&epc->pmu_ctx_entry, &ctx->pmu_ctx_list);
+ else
+ list_add(&epc->pmu_ctx_entry, pos->pmu_ctx_entry.prev);
+
epc->ctx = ctx;
found_epc:
@@ -5962,14 +5969,15 @@ static int _perf_event_period(struct perf_event *event, u64 value)
if (!value)
return -EINVAL;
- if (event->attr.freq && value > sysctl_perf_event_sample_rate)
- return -EINVAL;
-
- if (perf_event_check_period(event, value))
- return -EINVAL;
-
- if (!event->attr.freq && (value & (1ULL << 63)))
- return -EINVAL;
+ if (event->attr.freq) {
+ if (value > sysctl_perf_event_sample_rate)
+ return -EINVAL;
+ } else {
+ if (perf_event_check_period(event, value))
+ return -EINVAL;
+ if (value & (1ULL << 63))
+ return -EINVAL;
+ }
event_function_call(event, __perf_event_period, &value);
@@ -8321,7 +8329,8 @@ void perf_event_exec(void)
perf_event_enable_on_exec(ctx);
perf_event_remove_on_exec(ctx);
- perf_iterate_ctx(ctx, perf_event_addr_filters_exec, NULL, true);
+ scoped_guard(rcu)
+ perf_iterate_ctx(ctx, perf_event_addr_filters_exec, NULL, true);
perf_unpin_context(ctx);
put_ctx(ctx);
@@ -11821,6 +11830,21 @@ free_dev:
static struct lock_class_key cpuctx_mutex;
static struct lock_class_key cpuctx_lock;
+static bool idr_cmpxchg(struct idr *idr, unsigned long id, void *old, void *new)
+{
+ void *tmp, *val = idr_find(idr, id);
+
+ if (val != old)
+ return false;
+
+ tmp = idr_replace(idr, new, id);
+ if (IS_ERR(tmp))
+ return false;
+
+ WARN_ON_ONCE(tmp != val);
+ return true;
+}
+
int perf_pmu_register(struct pmu *pmu, const char *name, int type)
{
int cpu, ret, max = PERF_TYPE_MAX;
@@ -11847,7 +11871,7 @@ int perf_pmu_register(struct pmu *pmu, const char *name, int type)
if (type >= 0)
max = type;
- ret = idr_alloc(&pmu_idr, pmu, max, 0, GFP_KERNEL);
+ ret = idr_alloc(&pmu_idr, NULL, max, 0, GFP_KERNEL);
if (ret < 0)
goto free_pdc;
@@ -11855,6 +11879,7 @@ int perf_pmu_register(struct pmu *pmu, const char *name, int type)
type = ret;
pmu->type = type;
+ atomic_set(&pmu->exclusive_cnt, 0);
if (pmu_bus_running && !pmu->dev) {
ret = pmu_dev_alloc(pmu);
@@ -11903,14 +11928,22 @@ int perf_pmu_register(struct pmu *pmu, const char *name, int type)
if (!pmu->event_idx)
pmu->event_idx = perf_event_idx_default;
+ /*
+ * Now that the PMU is complete, make it visible to perf_try_init_event().
+ */
+ if (!idr_cmpxchg(&pmu_idr, pmu->type, NULL, pmu))
+ goto free_context;
list_add_rcu(&pmu->entry, &pmus);
- atomic_set(&pmu->exclusive_cnt, 0);
+
ret = 0;
unlock:
mutex_unlock(&pmus_lock);
return ret;
+free_context:
+ free_percpu(pmu->cpu_pmu_context);
+
free_dev:
if (pmu->dev && pmu->dev != PMU_NULL_DEV) {
device_del(pmu->dev);
@@ -11930,6 +11963,8 @@ void perf_pmu_unregister(struct pmu *pmu)
{
mutex_lock(&pmus_lock);
list_del_rcu(&pmu->entry);
+ idr_remove(&pmu_idr, pmu->type);
+ mutex_unlock(&pmus_lock);
/*
* We dereference the pmu list under both SRCU and regular RCU, so
@@ -11939,7 +11974,6 @@ void perf_pmu_unregister(struct pmu *pmu)
synchronize_rcu();
free_percpu(pmu->pmu_disable_count);
- idr_remove(&pmu_idr, pmu->type);
if (pmu_bus_running && pmu->dev && pmu->dev != PMU_NULL_DEV) {
if (pmu->nr_addr_filters)
device_remove_file(pmu->dev, &dev_attr_nr_addr_filters);
@@ -11947,7 +11981,6 @@ void perf_pmu_unregister(struct pmu *pmu)
put_device(pmu->dev);
}
free_pmu_context(pmu);
- mutex_unlock(&pmus_lock);
}
EXPORT_SYMBOL_GPL(perf_pmu_unregister);
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index bf2a87a0a378..b4ca8898fe17 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -495,6 +495,11 @@ retry:
if (ret <= 0)
goto put_old;
+ if (is_zero_page(old_page)) {
+ ret = -EINVAL;
+ goto put_old;
+ }
+
if (WARN(!is_register && PageCompound(old_page),
"uprobe unregister should never work on compound page\n")) {
ret = -EINVAL;
@@ -762,10 +767,14 @@ static struct uprobe *hprobe_expire(struct hprobe *hprobe, bool get)
enum hprobe_state hstate;
/*
- * return_instance's hprobe is protected by RCU.
- * Underlying uprobe is itself protected from reuse by SRCU.
+ * Caller should guarantee that return_instance is not going to be
+ * freed from under us. This can be achieved either through holding
+ * rcu_read_lock() or by owning return_instance in the first place.
+ *
+ * Underlying uprobe is itself protected from reuse by SRCU, so ensure
+ * SRCU lock is held properly.
*/
- lockdep_assert(rcu_read_lock_held() && srcu_read_lock_held(&uretprobes_srcu));
+ lockdep_assert(srcu_read_lock_held(&uretprobes_srcu));
hstate = READ_ONCE(hprobe->state);
switch (hstate) {
diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c
index 8f6cfec87555..7098ed44e717 100644
--- a/kernel/pid_namespace.c
+++ b/kernel/pid_namespace.c
@@ -107,7 +107,7 @@ static struct pid_namespace *create_pid_namespace(struct user_namespace *user_ns
goto out_free_idr;
ns->ns.ops = &pidns_operations;
- ns->pid_max = parent_pid_ns->pid_max;
+ ns->pid_max = PID_MAX_LIMIT;
err = register_pidns_sysctls(ns);
if (err)
goto out_free_inum;
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 9aecd914ac69..67189907214d 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -7285,7 +7285,7 @@ out_unlock:
#if !defined(CONFIG_PREEMPTION) || defined(CONFIG_PREEMPT_DYNAMIC)
int __sched __cond_resched(void)
{
- if (should_resched(0)) {
+ if (should_resched(0) && !irqs_disabled()) {
preempt_schedule_common();
return 1;
}
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index 38e4537790af..ff4df16b5186 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -3189,7 +3189,7 @@ int sched_dl_global_validate(void)
* value smaller than the currently allocated bandwidth in
* any of the root_domains.
*/
- for_each_possible_cpu(cpu) {
+ for_each_online_cpu(cpu) {
rcu_read_lock_sched();
if (dl_bw_visited(cpu, gen))
diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c
index 5a81d9a1e31f..0f1da199cfc7 100644
--- a/kernel/sched/ext.c
+++ b/kernel/sched/ext.c
@@ -3117,7 +3117,6 @@ static struct task_struct *pick_task_scx(struct rq *rq)
{
struct task_struct *prev = rq->curr;
struct task_struct *p;
- bool prev_on_scx = prev->sched_class == &ext_sched_class;
bool keep_prev = rq->scx.flags & SCX_RQ_BAL_KEEP;
bool kick_idle = false;
@@ -3137,14 +3136,18 @@ static struct task_struct *pick_task_scx(struct rq *rq)
* if pick_task_scx() is called without preceding balance_scx().
*/
if (unlikely(rq->scx.flags & SCX_RQ_BAL_PENDING)) {
- if (prev_on_scx) {
+ if (prev->scx.flags & SCX_TASK_QUEUED) {
keep_prev = true;
} else {
keep_prev = false;
kick_idle = true;
}
- } else if (unlikely(keep_prev && !prev_on_scx)) {
- /* only allowed during transitions */
+ } else if (unlikely(keep_prev &&
+ prev->sched_class != &ext_sched_class)) {
+ /*
+ * Can happen while enabling as SCX_RQ_BAL_PENDING assertion is
+ * conditional on scx_enabled() and may have been skipped.
+ */
WARN_ON_ONCE(scx_ops_enable_state() == SCX_OPS_ENABLED);
keep_prev = false;
}
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 1c0ef435a7aa..c798d2795243 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -4045,15 +4045,17 @@ static inline bool child_cfs_rq_on_list(struct cfs_rq *cfs_rq)
{
struct cfs_rq *prev_cfs_rq;
struct list_head *prev;
+ struct rq *rq = rq_of(cfs_rq);
if (cfs_rq->on_list) {
prev = cfs_rq->leaf_cfs_rq_list.prev;
} else {
- struct rq *rq = rq_of(cfs_rq);
-
prev = rq->tmp_alone_branch;
}
+ if (prev == &rq->leaf_cfs_rq_list)
+ return false;
+
prev_cfs_rq = container_of(prev, struct cfs_rq, leaf_cfs_rq_list);
return (prev_cfs_rq->tg->parent == cfs_rq->tg);
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 6b0c25761ccb..fc88e0688daf 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -540,6 +540,7 @@ static int function_stat_show(struct seq_file *m, void *v)
static struct trace_seq s;
unsigned long long avg;
unsigned long long stddev;
+ unsigned long long stddev_denom;
#endif
guard(mutex)(&ftrace_profile_lock);
@@ -559,23 +560,19 @@ static int function_stat_show(struct seq_file *m, void *v)
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
seq_puts(m, " ");
- /* Sample standard deviation (s^2) */
- if (rec->counter <= 1)
- stddev = 0;
- else {
- /*
- * Apply Welford's method:
- * s^2 = 1 / (n * (n-1)) * (n * \Sum (x_i)^2 - (\Sum x_i)^2)
- */
+ /*
+ * Variance formula:
+ * s^2 = 1 / (n * (n-1)) * (n * \Sum (x_i)^2 - (\Sum x_i)^2)
+ * Maybe Welford's method is better here?
+ * Divide only by 1000 for ns^2 -> us^2 conversion.
+ * trace_print_graph_duration will divide by 1000 again.
+ */
+ stddev = 0;
+ stddev_denom = rec->counter * (rec->counter - 1) * 1000;
+ if (stddev_denom) {
stddev = rec->counter * rec->time_squared -
rec->time * rec->time;
-
- /*
- * Divide only 1000 for ns^2 -> us^2 conversion.
- * trace_print_graph_duration will divide 1000 again.
- */
- stddev = div64_ul(stddev,
- rec->counter * (rec->counter - 1) * 1000);
+ stddev = div64_ul(stddev, stddev_denom);
}
trace_seq_init(&s);
diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c
index 261163b00137..ad7419e24055 100644
--- a/kernel/trace/trace_events_hist.c
+++ b/kernel/trace/trace_events_hist.c
@@ -6724,27 +6724,27 @@ static int event_hist_trigger_parse(struct event_command *cmd_ops,
if (existing_hist_update_only(glob, trigger_data, file))
goto out_free;
- ret = event_trigger_register(cmd_ops, file, glob, trigger_data);
- if (ret < 0)
- goto out_free;
+ if (!get_named_trigger_data(trigger_data)) {
- if (get_named_trigger_data(trigger_data))
- goto enable;
+ ret = create_actions(hist_data);
+ if (ret)
+ goto out_free;
- ret = create_actions(hist_data);
- if (ret)
- goto out_unreg;
+ if (has_hist_vars(hist_data) || hist_data->n_var_refs) {
+ ret = save_hist_vars(hist_data);
+ if (ret)
+ goto out_free;
+ }
- if (has_hist_vars(hist_data) || hist_data->n_var_refs) {
- ret = save_hist_vars(hist_data);
+ ret = tracing_map_init(hist_data->map);
if (ret)
- goto out_unreg;
+ goto out_free;
}
- ret = tracing_map_init(hist_data->map);
- if (ret)
- goto out_unreg;
-enable:
+ ret = event_trigger_register(cmd_ops, file, glob, trigger_data);
+ if (ret < 0)
+ goto out_free;
+
ret = hist_trigger_enable(trigger_data, file);
if (ret)
goto out_unreg;
diff --git a/kernel/trace/trace_fprobe.c b/kernel/trace/trace_fprobe.c
index b8f3c4ba309b..e27305d31fc5 100644
--- a/kernel/trace/trace_fprobe.c
+++ b/kernel/trace/trace_fprobe.c
@@ -1049,6 +1049,19 @@ static int parse_symbol_and_return(int argc, const char *argv[],
if (*is_return)
return 0;
+ if (is_tracepoint) {
+ tmp = *symbol;
+ while (*tmp && (isalnum(*tmp) || *tmp == '_'))
+ tmp++;
+ if (*tmp) {
+ /* find a wrong character. */
+ trace_probe_log_err(tmp - *symbol, BAD_TP_NAME);
+ kfree(*symbol);
+ *symbol = NULL;
+ return -EINVAL;
+ }
+ }
+
/* If there is $retval, this should be a return fprobe. */
for (i = 2; i < argc; i++) {
tmp = strstr(argv[i], "$retval");
@@ -1056,6 +1069,8 @@ static int parse_symbol_and_return(int argc, const char *argv[],
if (is_tracepoint) {
trace_probe_log_set_index(i);
trace_probe_log_err(tmp - argv[i], RETVAL_ON_PROBE);
+ kfree(*symbol);
+ *symbol = NULL;
return -EINVAL;
}
*is_return = true;
@@ -1215,6 +1230,11 @@ static int trace_fprobe_create_internal(int argc, const char *argv[],
if (is_return && tf->tp.entry_arg) {
tf->fp.entry_handler = trace_fprobe_entry_handler;
tf->fp.entry_data_size = traceprobe_get_entry_data_size(&tf->tp);
+ if (ALIGN(tf->fp.entry_data_size, sizeof(long)) > MAX_FPROBE_DATA_SIZE) {
+ trace_probe_log_set_index(2);
+ trace_probe_log_err(0, TOO_MANY_EARGS);
+ return -E2BIG;
+ }
}
ret = traceprobe_set_print_fmt(&tf->tp,
diff --git a/kernel/trace/trace_probe.h b/kernel/trace/trace_probe.h
index 5803e6a41570..96792bc4b092 100644
--- a/kernel/trace/trace_probe.h
+++ b/kernel/trace/trace_probe.h
@@ -36,7 +36,6 @@
#define MAX_BTF_ARGS_LEN 128
#define MAX_DENTRY_ARGS_LEN 256
#define MAX_STRING_SIZE PATH_MAX
-#define MAX_ARG_BUF_LEN (MAX_TRACE_ARGS * MAX_ARG_NAME_LEN)
/* Reserved field names */
#define FIELD_STRING_IP "__probe_ip"
@@ -481,6 +480,7 @@ extern int traceprobe_define_arg_fields(struct trace_event_call *event_call,
C(NON_UNIQ_SYMBOL, "The symbol is not unique"), \
C(BAD_RETPROBE, "Retprobe address must be an function entry"), \
C(NO_TRACEPOINT, "Tracepoint is not found"), \
+ C(BAD_TP_NAME, "Invalid character in tracepoint name"),\
C(BAD_ADDR_SUFFIX, "Invalid probed address suffix"), \
C(NO_GROUP_NAME, "Group name is not specified"), \
C(GROUP_TOO_LONG, "Group name is too long"), \
@@ -544,7 +544,8 @@ extern int traceprobe_define_arg_fields(struct trace_event_call *event_call,
C(NO_BTF_FIELD, "This field is not found."), \
C(BAD_BTF_TID, "Failed to get BTF type info."),\
C(BAD_TYPE4STR, "This type does not fit for string."),\
- C(NEED_STRING_TYPE, "$comm and immediate-string only accepts string type"),
+ C(NEED_STRING_TYPE, "$comm and immediate-string only accepts string type"),\
+ C(TOO_MANY_EARGS, "Too many entry arguments specified"),
#undef C
#define C(a, b) TP_ERR_##a
diff --git a/kernel/vhost_task.c b/kernel/vhost_task.c
index 8800f5acc007..2ef2e1b80091 100644
--- a/kernel/vhost_task.c
+++ b/kernel/vhost_task.c
@@ -133,7 +133,7 @@ struct vhost_task *vhost_task_create(bool (*fn)(void *),
vtsk = kzalloc(sizeof(*vtsk), GFP_KERNEL);
if (!vtsk)
- return NULL;
+ return ERR_PTR(-ENOMEM);
init_completion(&vtsk->exited);
mutex_init(&vtsk->exit_mutex);
vtsk->data = arg;
@@ -145,7 +145,7 @@ struct vhost_task *vhost_task_create(bool (*fn)(void *),
tsk = copy_process(NULL, 0, NUMA_NO_NODE, &args);
if (IS_ERR(tsk)) {
kfree(vtsk);
- return NULL;
+ return ERR_PTR(PTR_ERR(tsk));
}
vtsk->task = tsk;
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 97152f2250fe..bfe030b443e2 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -2254,8 +2254,10 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
* queues a new work item to a wq after destroy_workqueue(wq).
*/
if (unlikely(wq->flags & (__WQ_DESTROYING | __WQ_DRAINING) &&
- WARN_ON_ONCE(!is_chained_work(wq))))
+ WARN_ONCE(!is_chained_work(wq), "workqueue: cannot queue %ps on wq %s\n",
+ work->func, wq->name))) {
return;
+ }
rcu_read_lock();
retry:
/* pwq which will be used unless @work is executing elsewhere */
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 1af972a92d06..35796c290ca3 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -2103,7 +2103,7 @@ config FAIL_SKB_REALLOC
reallocated, catching possible invalid pointers to the skb.
For more information, check
- Documentation/dev-tools/fault-injection/fault-injection.rst
+ Documentation/fault-injection/fault-injection.rst
config FAULT_INJECTION_CONFIGFS
bool "Configfs interface for fault-injection capabilities"
diff --git a/lib/rcuref.c b/lib/rcuref.c
index 97f300eca927..5bd726b71e39 100644
--- a/lib/rcuref.c
+++ b/lib/rcuref.c
@@ -220,6 +220,7 @@ EXPORT_SYMBOL_GPL(rcuref_get_slowpath);
/**
* rcuref_put_slowpath - Slowpath of __rcuref_put()
* @ref: Pointer to the reference count
+ * @cnt: The resulting value of the fastpath decrement
*
* Invoked when the reference count is outside of the valid zone.
*
@@ -233,10 +234,8 @@ EXPORT_SYMBOL_GPL(rcuref_get_slowpath);
* with a concurrent get()/put() pair. Caller is not allowed to
* deconstruct the protected object.
*/
-bool rcuref_put_slowpath(rcuref_t *ref)
+bool rcuref_put_slowpath(rcuref_t *ref, unsigned int cnt)
{
- unsigned int cnt = atomic_read(&ref->refcnt);
-
/* Did this drop the last reference? */
if (likely(cnt == RCUREF_NOREF)) {
/*
diff --git a/mm/compaction.c b/mm/compaction.c
index 12ed8425fa17..a3203d97123e 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -3181,6 +3181,7 @@ static int kcompactd(void *p)
long default_timeout = msecs_to_jiffies(HPAGE_FRAG_CHECK_INTERVAL_MSEC);
long timeout = default_timeout;
+ current->flags |= PF_KCOMPACTD;
set_freezable();
pgdat->kcompactd_max_order = 0;
@@ -3237,6 +3238,8 @@ static int kcompactd(void *p)
pgdat->proactive_compact_trigger = false;
}
+ current->flags &= ~PF_KCOMPACTD;
+
return 0;
}
diff --git a/mm/filemap.c b/mm/filemap.c
index 804d7365680c..2974691fdfad 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -445,7 +445,7 @@ EXPORT_SYMBOL(filemap_fdatawrite_range);
* filemap_fdatawrite_range_kick - start writeback on a range
* @mapping: target address_space
* @start: index to start writeback on
- * @end: last (non-inclusive) index for writeback
+ * @end: last (inclusive) index for writeback
*
* This is a non-integrity writeback helper, to start writing back folios
* for the indicated range.
@@ -2897,8 +2897,7 @@ size_t splice_folio_into_pipe(struct pipe_inode_info *pipe,
size = min(size, folio_size(folio) - offset);
offset %= PAGE_SIZE;
- while (spliced < size &&
- !pipe_full(pipe->head, pipe->tail, pipe->max_usage)) {
+ while (spliced < size && !pipe_is_full(pipe)) {
struct pipe_buffer *buf = pipe_head_buf(pipe);
size_t part = min_t(size_t, PAGE_SIZE - offset, size - spliced);
@@ -2955,7 +2954,7 @@ ssize_t filemap_splice_read(struct file *in, loff_t *ppos,
iocb.ki_pos = *ppos;
/* Work out how much data we can actually add into the pipe */
- used = pipe_occupancy(pipe->head, pipe->tail);
+ used = pipe_buf_usage(pipe);
npages = max_t(ssize_t, pipe->max_usage - used, 0);
len = min_t(size_t, len, npages * PAGE_SIZE);
@@ -3015,7 +3014,7 @@ ssize_t filemap_splice_read(struct file *in, loff_t *ppos,
total_spliced += n;
*ppos += n;
in->f_ra.prev_pos = *ppos;
- if (pipe_full(pipe->head, pipe->tail, pipe->max_usage))
+ if (pipe_is_full(pipe))
goto out;
}
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 163190e89ea1..97930d44d460 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2943,6 +2943,14 @@ int replace_free_hugepage_folios(unsigned long start_pfn, unsigned long end_pfn)
return ret;
}
+void wait_for_freed_hugetlb_folios(void)
+{
+ if (llist_empty(&hpage_freelist))
+ return;
+
+ flush_work(&free_hpage_work);
+}
+
typedef enum {
/*
* For either 0/1: we checked the per-vma resv map, and one resv
@@ -5447,7 +5455,7 @@ static void move_huge_pte(struct vm_area_struct *vma, unsigned long old_addr,
if (src_ptl != dst_ptl)
spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
- pte = huge_ptep_get_and_clear(mm, old_addr, src_pte);
+ pte = huge_ptep_get_and_clear(mm, old_addr, src_pte, sz);
if (need_clear_uffd_wp && pte_marker_uffd_wp(pte))
huge_pte_clear(mm, new_addr, dst_pte, sz);
@@ -5622,7 +5630,7 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED);
}
- pte = huge_ptep_get_and_clear(mm, address, ptep);
+ pte = huge_ptep_get_and_clear(mm, address, ptep, sz);
tlb_remove_huge_tlb_entry(h, tlb, ptep, address);
if (huge_pte_dirty(pte))
set_page_dirty(page);
diff --git a/mm/internal.h b/mm/internal.h
index 109ef30fee11..20b3535935a3 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -1115,7 +1115,7 @@ static inline int find_next_best_node(int node, nodemask_t *used_node_mask)
* mm/memory-failure.c
*/
#ifdef CONFIG_MEMORY_FAILURE
-void unmap_poisoned_folio(struct folio *folio, enum ttu_flags ttu);
+int unmap_poisoned_folio(struct folio *folio, unsigned long pfn, bool must_kill);
void shake_folio(struct folio *folio);
extern int hwpoison_filter(struct page *p);
@@ -1138,8 +1138,9 @@ unsigned long page_mapped_in_vma(const struct page *page,
struct vm_area_struct *vma);
#else
-static inline void unmap_poisoned_folio(struct folio *folio, enum ttu_flags ttu)
+static inline int unmap_poisoned_folio(struct folio *folio, unsigned long pfn, bool must_kill)
{
+ return -EBUSY;
}
#endif
diff --git a/mm/kmsan/hooks.c b/mm/kmsan/hooks.c
index 3ea50f09311f..3df45c25c1f6 100644
--- a/mm/kmsan/hooks.c
+++ b/mm/kmsan/hooks.c
@@ -357,6 +357,7 @@ void kmsan_handle_dma(struct page *page, size_t offset, size_t size,
size -= to_go;
}
}
+EXPORT_SYMBOL_GPL(kmsan_handle_dma);
void kmsan_handle_dma_sg(struct scatterlist *sg, int nents,
enum dma_data_direction dir)
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 995a15eb67e2..327e02fdc029 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -1556,11 +1556,35 @@ static int get_hwpoison_page(struct page *p, unsigned long flags)
return ret;
}
-void unmap_poisoned_folio(struct folio *folio, enum ttu_flags ttu)
+int unmap_poisoned_folio(struct folio *folio, unsigned long pfn, bool must_kill)
{
- if (folio_test_hugetlb(folio) && !folio_test_anon(folio)) {
- struct address_space *mapping;
+ enum ttu_flags ttu = TTU_IGNORE_MLOCK | TTU_SYNC | TTU_HWPOISON;
+ struct address_space *mapping;
+
+ if (folio_test_swapcache(folio)) {
+ pr_err("%#lx: keeping poisoned page in swap cache\n", pfn);
+ ttu &= ~TTU_HWPOISON;
+ }
+ /*
+ * Propagate the dirty bit from PTEs to struct page first, because we
+ * need this to decide if we should kill or just drop the page.
+ * XXX: the dirty test could be racy: set_page_dirty() may not always
+ * be called inside page lock (it's recommended but not enforced).
+ */
+ mapping = folio_mapping(folio);
+ if (!must_kill && !folio_test_dirty(folio) && mapping &&
+ mapping_can_writeback(mapping)) {
+ if (folio_mkclean(folio)) {
+ folio_set_dirty(folio);
+ } else {
+ ttu &= ~TTU_HWPOISON;
+ pr_info("%#lx: corrupted page was clean: dropped without side effects\n",
+ pfn);
+ }
+ }
+
+ if (folio_test_hugetlb(folio) && !folio_test_anon(folio)) {
/*
* For hugetlb folios in shared mappings, try_to_unmap
* could potentially call huge_pmd_unshare. Because of
@@ -1572,7 +1596,7 @@ void unmap_poisoned_folio(struct folio *folio, enum ttu_flags ttu)
if (!mapping) {
pr_info("%#lx: could not lock mapping for mapped hugetlb folio\n",
folio_pfn(folio));
- return;
+ return -EBUSY;
}
try_to_unmap(folio, ttu|TTU_RMAP_LOCKED);
@@ -1580,6 +1604,8 @@ void unmap_poisoned_folio(struct folio *folio, enum ttu_flags ttu)
} else {
try_to_unmap(folio, ttu);
}
+
+ return folio_mapped(folio) ? -EBUSY : 0;
}
/*
@@ -1589,8 +1615,6 @@ void unmap_poisoned_folio(struct folio *folio, enum ttu_flags ttu)
static bool hwpoison_user_mappings(struct folio *folio, struct page *p,
unsigned long pfn, int flags)
{
- enum ttu_flags ttu = TTU_IGNORE_MLOCK | TTU_SYNC | TTU_HWPOISON;
- struct address_space *mapping;
LIST_HEAD(tokill);
bool unmap_success;
int forcekill;
@@ -1613,29 +1637,6 @@ static bool hwpoison_user_mappings(struct folio *folio, struct page *p,
if (!folio_mapped(folio))
return true;
- if (folio_test_swapcache(folio)) {
- pr_err("%#lx: keeping poisoned page in swap cache\n", pfn);
- ttu &= ~TTU_HWPOISON;
- }
-
- /*
- * Propagate the dirty bit from PTEs to struct page first, because we
- * need this to decide if we should kill or just drop the page.
- * XXX: the dirty test could be racy: set_page_dirty() may not always
- * be called inside page lock (it's recommended but not enforced).
- */
- mapping = folio_mapping(folio);
- if (!(flags & MF_MUST_KILL) && !folio_test_dirty(folio) && mapping &&
- mapping_can_writeback(mapping)) {
- if (folio_mkclean(folio)) {
- folio_set_dirty(folio);
- } else {
- ttu &= ~TTU_HWPOISON;
- pr_info("%#lx: corrupted page was clean: dropped without side effects\n",
- pfn);
- }
- }
-
/*
* First collect all the processes that have the page
* mapped in dirty form. This has to be done before try_to_unmap,
@@ -1643,9 +1644,7 @@ static bool hwpoison_user_mappings(struct folio *folio, struct page *p,
*/
collect_procs(folio, p, &tokill, flags & MF_ACTION_REQUIRED);
- unmap_poisoned_folio(folio, ttu);
-
- unmap_success = !folio_mapped(folio);
+ unmap_success = !unmap_poisoned_folio(folio, pfn, flags & MF_MUST_KILL);
if (!unmap_success)
pr_err("%#lx: failed to unmap page (folio mapcount=%d)\n",
pfn, folio_mapcount(folio));
diff --git a/mm/memory.c b/mm/memory.c
index b4d3d4893267..7ad47bc16c79 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3051,8 +3051,10 @@ static int __apply_to_page_range(struct mm_struct *mm, unsigned long addr,
next = pgd_addr_end(addr, end);
if (pgd_none(*pgd) && !create)
continue;
- if (WARN_ON_ONCE(pgd_leaf(*pgd)))
- return -EINVAL;
+ if (WARN_ON_ONCE(pgd_leaf(*pgd))) {
+ err = -EINVAL;
+ break;
+ }
if (!pgd_none(*pgd) && WARN_ON_ONCE(pgd_bad(*pgd))) {
if (!create)
continue;
@@ -4348,10 +4350,15 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
* Get a page reference while we know the page can't be
* freed.
*/
- get_page(vmf->page);
- pte_unmap_unlock(vmf->pte, vmf->ptl);
- ret = vmf->page->pgmap->ops->migrate_to_ram(vmf);
- put_page(vmf->page);
+ if (trylock_page(vmf->page)) {
+ get_page(vmf->page);
+ pte_unmap_unlock(vmf->pte, vmf->ptl);
+ ret = vmf->page->pgmap->ops->migrate_to_ram(vmf);
+ unlock_page(vmf->page);
+ put_page(vmf->page);
+ } else {
+ pte_unmap_unlock(vmf->pte, vmf->ptl);
+ }
} else if (is_hwpoison_entry(entry)) {
ret = VM_FAULT_HWPOISON;
} else if (is_pte_marker_entry(entry)) {
@@ -5183,7 +5190,11 @@ vm_fault_t finish_fault(struct vm_fault *vmf)
bool is_cow = (vmf->flags & FAULT_FLAG_WRITE) &&
!(vma->vm_flags & VM_SHARED);
int type, nr_pages;
- unsigned long addr = vmf->address;
+ unsigned long addr;
+ bool needs_fallback = false;
+
+fallback:
+ addr = vmf->address;
/* Did we COW the page? */
if (is_cow)
@@ -5222,7 +5233,8 @@ vm_fault_t finish_fault(struct vm_fault *vmf)
* approach also applies to non-anonymous-shmem faults to avoid
* inflating the RSS of the process.
*/
- if (!vma_is_anon_shmem(vma) || unlikely(userfaultfd_armed(vma))) {
+ if (!vma_is_anon_shmem(vma) || unlikely(userfaultfd_armed(vma)) ||
+ unlikely(needs_fallback)) {
nr_pages = 1;
} else if (nr_pages > 1) {
pgoff_t idx = folio_page_idx(folio, page);
@@ -5258,9 +5270,9 @@ vm_fault_t finish_fault(struct vm_fault *vmf)
ret = VM_FAULT_NOPAGE;
goto unlock;
} else if (nr_pages > 1 && !pte_range_none(vmf->pte, nr_pages)) {
- update_mmu_tlb_range(vma, addr, vmf->pte, nr_pages);
- ret = VM_FAULT_NOPAGE;
- goto unlock;
+ needs_fallback = true;
+ pte_unmap_unlock(vmf->pte, vmf->ptl);
+ goto fallback;
}
folio_ref_add(folio, nr_pages - 1);
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index e3655f07dd6e..16cf9e17077e 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -1822,26 +1822,24 @@ static void do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
if (folio_test_large(folio))
pfn = folio_pfn(folio) + folio_nr_pages(folio) - 1;
- /*
- * HWPoison pages have elevated reference counts so the migration would
- * fail on them. It also doesn't make any sense to migrate them in the
- * first place. Still try to unmap such a page in case it is still mapped
- * (keep the unmap as the catch all safety net).
- */
+ if (!folio_try_get(folio))
+ continue;
+
+ if (unlikely(page_folio(page) != folio))
+ goto put_folio;
+
if (folio_test_hwpoison(folio) ||
(folio_test_large(folio) && folio_test_has_hwpoisoned(folio))) {
if (WARN_ON(folio_test_lru(folio)))
folio_isolate_lru(folio);
- if (folio_mapped(folio))
- unmap_poisoned_folio(folio, TTU_IGNORE_MLOCK);
- continue;
- }
-
- if (!folio_try_get(folio))
- continue;
+ if (folio_mapped(folio)) {
+ folio_lock(folio);
+ unmap_poisoned_folio(folio, pfn, false);
+ folio_unlock(folio);
+ }
- if (unlikely(page_folio(page) != folio))
goto put_folio;
+ }
if (!isolate_folio_to_list(folio, &source)) {
if (__ratelimit(&migrate_rs)) {
diff --git a/mm/migrate_device.c b/mm/migrate_device.c
index 5bd888223cc8..a351497ced4a 100644
--- a/mm/migrate_device.c
+++ b/mm/migrate_device.c
@@ -60,6 +60,8 @@ static int migrate_vma_collect_pmd(pmd_t *pmdp,
struct mm_walk *walk)
{
struct migrate_vma *migrate = walk->private;
+ struct folio *fault_folio = migrate->fault_page ?
+ page_folio(migrate->fault_page) : NULL;
struct vm_area_struct *vma = walk->vma;
struct mm_struct *mm = vma->vm_mm;
unsigned long addr = start, unmapped = 0;
@@ -88,11 +90,16 @@ again:
folio_get(folio);
spin_unlock(ptl);
+ /* FIXME: we don't expect THP for fault_folio */
+ if (WARN_ON_ONCE(fault_folio == folio))
+ return migrate_vma_collect_skip(start, end,
+ walk);
if (unlikely(!folio_trylock(folio)))
return migrate_vma_collect_skip(start, end,
walk);
ret = split_folio(folio);
- folio_unlock(folio);
+ if (fault_folio != folio)
+ folio_unlock(folio);
folio_put(folio);
if (ret)
return migrate_vma_collect_skip(start, end,
@@ -192,7 +199,7 @@ again:
* optimisation to avoid walking the rmap later with
* try_to_migrate().
*/
- if (folio_trylock(folio)) {
+ if (fault_folio == folio || folio_trylock(folio)) {
bool anon_exclusive;
pte_t swp_pte;
@@ -204,7 +211,8 @@ again:
if (folio_try_share_anon_rmap_pte(folio, page)) {
set_pte_at(mm, addr, ptep, pte);
- folio_unlock(folio);
+ if (fault_folio != folio)
+ folio_unlock(folio);
folio_put(folio);
mpfn = 0;
goto next;
@@ -363,6 +371,8 @@ static unsigned long migrate_device_unmap(unsigned long *src_pfns,
unsigned long npages,
struct page *fault_page)
{
+ struct folio *fault_folio = fault_page ?
+ page_folio(fault_page) : NULL;
unsigned long i, restore = 0;
bool allow_drain = true;
unsigned long unmapped = 0;
@@ -427,7 +437,8 @@ static unsigned long migrate_device_unmap(unsigned long *src_pfns,
remove_migration_ptes(folio, folio, 0);
src_pfns[i] = 0;
- folio_unlock(folio);
+ if (fault_folio != folio)
+ folio_unlock(folio);
folio_put(folio);
restore--;
}
@@ -536,6 +547,8 @@ int migrate_vma_setup(struct migrate_vma *args)
return -EINVAL;
if (args->fault_page && !is_device_private_page(args->fault_page))
return -EINVAL;
+ if (args->fault_page && !PageLocked(args->fault_page))
+ return -EINVAL;
memset(args->src, 0, sizeof(*args->src) * nr_pages);
args->cpages = 0;
@@ -799,19 +812,13 @@ void migrate_vma_pages(struct migrate_vma *migrate)
}
EXPORT_SYMBOL(migrate_vma_pages);
-/*
- * migrate_device_finalize() - complete page migration
- * @src_pfns: src_pfns returned from migrate_device_range()
- * @dst_pfns: array of pfns allocated by the driver to migrate memory to
- * @npages: number of pages in the range
- *
- * Completes migration of the page by removing special migration entries.
- * Drivers must ensure copying of page data is complete and visible to the CPU
- * before calling this.
- */
-void migrate_device_finalize(unsigned long *src_pfns,
- unsigned long *dst_pfns, unsigned long npages)
+static void __migrate_device_finalize(unsigned long *src_pfns,
+ unsigned long *dst_pfns,
+ unsigned long npages,
+ struct page *fault_page)
{
+ struct folio *fault_folio = fault_page ?
+ page_folio(fault_page) : NULL;
unsigned long i;
for (i = 0; i < npages; i++) {
@@ -824,6 +831,7 @@ void migrate_device_finalize(unsigned long *src_pfns,
if (!page) {
if (dst) {
+ WARN_ON_ONCE(fault_folio == dst);
folio_unlock(dst);
folio_put(dst);
}
@@ -834,6 +842,7 @@ void migrate_device_finalize(unsigned long *src_pfns,
if (!(src_pfns[i] & MIGRATE_PFN_MIGRATE) || !dst) {
if (dst) {
+ WARN_ON_ONCE(fault_folio == dst);
folio_unlock(dst);
folio_put(dst);
}
@@ -843,15 +852,33 @@ void migrate_device_finalize(unsigned long *src_pfns,
if (!folio_is_zone_device(dst))
folio_add_lru(dst);
remove_migration_ptes(src, dst, 0);
- folio_unlock(src);
+ if (fault_folio != src)
+ folio_unlock(src);
folio_put(src);
if (dst != src) {
+ WARN_ON_ONCE(fault_folio == dst);
folio_unlock(dst);
folio_put(dst);
}
}
}
+
+/*
+ * migrate_device_finalize() - complete page migration
+ * @src_pfns: src_pfns returned from migrate_device_range()
+ * @dst_pfns: array of pfns allocated by the driver to migrate memory to
+ * @npages: number of pages in the range
+ *
+ * Completes migration of the page by removing special migration entries.
+ * Drivers must ensure copying of page data is complete and visible to the CPU
+ * before calling this.
+ */
+void migrate_device_finalize(unsigned long *src_pfns,
+ unsigned long *dst_pfns, unsigned long npages)
+{
+ return __migrate_device_finalize(src_pfns, dst_pfns, npages, NULL);
+}
EXPORT_SYMBOL(migrate_device_finalize);
/**
@@ -867,10 +894,27 @@ EXPORT_SYMBOL(migrate_device_finalize);
*/
void migrate_vma_finalize(struct migrate_vma *migrate)
{
- migrate_device_finalize(migrate->src, migrate->dst, migrate->npages);
+ __migrate_device_finalize(migrate->src, migrate->dst, migrate->npages,
+ migrate->fault_page);
}
EXPORT_SYMBOL(migrate_vma_finalize);
+static unsigned long migrate_device_pfn_lock(unsigned long pfn)
+{
+ struct folio *folio;
+
+ folio = folio_get_nontail_page(pfn_to_page(pfn));
+ if (!folio)
+ return 0;
+
+ if (!folio_trylock(folio)) {
+ folio_put(folio);
+ return 0;
+ }
+
+ return migrate_pfn(pfn) | MIGRATE_PFN_MIGRATE;
+}
+
/**
* migrate_device_range() - migrate device private pfns to normal memory.
* @src_pfns: array large enough to hold migrating source device private pfns.
@@ -895,29 +939,35 @@ int migrate_device_range(unsigned long *src_pfns, unsigned long start,
{
unsigned long i, pfn;
- for (pfn = start, i = 0; i < npages; pfn++, i++) {
- struct folio *folio;
+ for (pfn = start, i = 0; i < npages; pfn++, i++)
+ src_pfns[i] = migrate_device_pfn_lock(pfn);
- folio = folio_get_nontail_page(pfn_to_page(pfn));
- if (!folio) {
- src_pfns[i] = 0;
- continue;
- }
+ migrate_device_unmap(src_pfns, npages, NULL);
- if (!folio_trylock(folio)) {
- src_pfns[i] = 0;
- folio_put(folio);
- continue;
- }
+ return 0;
+}
+EXPORT_SYMBOL(migrate_device_range);
- src_pfns[i] = migrate_pfn(pfn) | MIGRATE_PFN_MIGRATE;
- }
+/**
+ * migrate_device_pfns() - migrate device private pfns to normal memory.
+ * @src_pfns: pre-popluated array of source device private pfns to migrate.
+ * @npages: number of pages to migrate.
+ *
+ * Similar to migrate_device_range() but supports non-contiguous pre-popluated
+ * array of device pages to migrate.
+ */
+int migrate_device_pfns(unsigned long *src_pfns, unsigned long npages)
+{
+ unsigned long i;
+
+ for (i = 0; i < npages; i++)
+ src_pfns[i] = migrate_device_pfn_lock(src_pfns[i]);
migrate_device_unmap(src_pfns, npages, NULL);
return 0;
}
-EXPORT_SYMBOL(migrate_device_range);
+EXPORT_SYMBOL(migrate_device_pfns);
/*
* Migrate a device coherent folio back to normal memory. The caller should have
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 579789600a3c..94917c729120 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -4243,6 +4243,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
restart:
compaction_retries = 0;
no_progress_loops = 0;
+ compact_result = COMPACT_SKIPPED;
compact_priority = DEF_COMPACT_PRIORITY;
cpuset_mems_cookie = read_mems_allowed_begin();
zonelist_iter_cookie = zonelist_iter_begin();
@@ -5849,11 +5850,10 @@ static void setup_per_zone_lowmem_reserve(void)
for (j = i + 1; j < MAX_NR_ZONES; j++) {
struct zone *upper_zone = &pgdat->node_zones[j];
- bool empty = !zone_managed_pages(upper_zone);
managed_pages += zone_managed_pages(upper_zone);
- if (clear || empty)
+ if (clear)
zone->lowmem_reserve[j] = 0;
else
zone->lowmem_reserve[j] = managed_pages / ratio;
diff --git a/mm/page_isolation.c b/mm/page_isolation.c
index c608e9d72865..a051a29e95ad 100644
--- a/mm/page_isolation.c
+++ b/mm/page_isolation.c
@@ -608,6 +608,16 @@ int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
int ret;
/*
+ * Due to the deferred freeing of hugetlb folios, the hugepage folios may
+ * not immediately release to the buddy system. This can cause PageBuddy()
+ * to fail in __test_page_isolated_in_pageblock(). To ensure that the
+ * hugetlb folios are properly released back to the buddy system, we
+ * invoke the wait_for_freed_hugetlb_folios() function to wait for the
+ * release to complete.
+ */
+ wait_for_freed_hugetlb_folios();
+
+ /*
* Note: pageblock_nr_pages != MAX_PAGE_ORDER. Then, chunks of free
* pages are not aligned to pageblock_nr_pages.
* Then we just check migratetype first.
diff --git a/mm/shmem.c b/mm/shmem.c
index 4ea6109a8043..1ede0800e846 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1548,7 +1548,7 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
if (WARN_ON_ONCE(!wbc->for_reclaim))
goto redirty;
- if (WARN_ON_ONCE((info->flags & VM_LOCKED) || sbinfo->noswap))
+ if ((info->flags & VM_LOCKED) || sbinfo->noswap)
goto redirty;
if (!total_swap_pages)
@@ -2253,7 +2253,7 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
struct folio *folio = NULL;
bool skip_swapcache = false;
swp_entry_t swap;
- int error, nr_pages;
+ int error, nr_pages, order, split_order;
VM_BUG_ON(!*foliop || !xa_is_value(*foliop));
swap = radix_to_swp_entry(*foliop);
@@ -2272,10 +2272,9 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
/* Look it up and read it in.. */
folio = swap_cache_get_folio(swap, NULL, 0);
+ order = xa_get_order(&mapping->i_pages, index);
if (!folio) {
- int order = xa_get_order(&mapping->i_pages, index);
bool fallback_order0 = false;
- int split_order;
/* Or update major stats only when swapin succeeds?? */
if (fault_type) {
@@ -2339,6 +2338,29 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
error = -ENOMEM;
goto failed;
}
+ } else if (order != folio_order(folio)) {
+ /*
+ * Swap readahead may swap in order 0 folios into swapcache
+ * asynchronously, while the shmem mapping can still stores
+ * large swap entries. In such cases, we should split the
+ * large swap entry to prevent possible data corruption.
+ */
+ split_order = shmem_split_large_entry(inode, index, swap, gfp);
+ if (split_order < 0) {
+ error = split_order;
+ goto failed;
+ }
+
+ /*
+ * If the large swap entry has already been split, it is
+ * necessary to recalculate the new swap entry based on
+ * the old order alignment.
+ */
+ if (split_order > 0) {
+ pgoff_t offset = index - round_down(index, 1 << split_order);
+
+ swap = swp_entry(swp_type(swap), swp_offset(swap) + offset);
+ }
}
alloced:
@@ -2346,7 +2368,8 @@ alloced:
folio_lock(folio);
if ((!skip_swapcache && !folio_test_swapcache(folio)) ||
folio->swap.val != swap.val ||
- !shmem_confirm_swap(mapping, index, swap)) {
+ !shmem_confirm_swap(mapping, index, swap) ||
+ xa_get_order(&mapping->i_pages, index) != folio_order(folio)) {
error = -EEXIST;
goto unlock;
}
@@ -3487,7 +3510,7 @@ static size_t splice_zeropage_into_pipe(struct pipe_inode_info *pipe,
size = min_t(size_t, size, PAGE_SIZE - offset);
- if (!pipe_full(pipe->head, pipe->tail, pipe->max_usage)) {
+ if (!pipe_is_full(pipe)) {
struct pipe_buffer *buf = pipe_head_buf(pipe);
*buf = (struct pipe_buffer) {
@@ -3514,7 +3537,7 @@ static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos,
int error = 0;
/* Work out how much data we can actually add into the pipe */
- used = pipe_occupancy(pipe->head, pipe->tail);
+ used = pipe_buf_usage(pipe);
npages = max_t(ssize_t, pipe->max_usage - used, 0);
len = min_t(size_t, len, npages * PAGE_SIZE);
@@ -3601,7 +3624,7 @@ static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos,
total_spliced += n;
*ppos += n;
in->f_ra.prev_pos = *ppos;
- if (pipe_full(pipe->head, pipe->tail, pipe->max_usage))
+ if (pipe_is_full(pipe))
break;
cond_resched();
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 4030907b6b7d..4c9f0a87f733 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -1304,6 +1304,8 @@ module_param(rcu_min_cached_objs, int, 0444);
static int rcu_delay_page_cache_fill_msec = 5000;
module_param(rcu_delay_page_cache_fill_msec, int, 0444);
+static struct workqueue_struct *rcu_reclaim_wq;
+
/* Maximum number of jiffies to wait before draining a batch. */
#define KFREE_DRAIN_JIFFIES (5 * HZ)
#define KFREE_N_BATCHES 2
@@ -1632,10 +1634,10 @@ __schedule_delayed_monitor_work(struct kfree_rcu_cpu *krcp)
if (delayed_work_pending(&krcp->monitor_work)) {
delay_left = krcp->monitor_work.timer.expires - jiffies;
if (delay < delay_left)
- mod_delayed_work(system_unbound_wq, &krcp->monitor_work, delay);
+ mod_delayed_work(rcu_reclaim_wq, &krcp->monitor_work, delay);
return;
}
- queue_delayed_work(system_unbound_wq, &krcp->monitor_work, delay);
+ queue_delayed_work(rcu_reclaim_wq, &krcp->monitor_work, delay);
}
static void
@@ -1733,7 +1735,7 @@ kvfree_rcu_queue_batch(struct kfree_rcu_cpu *krcp)
// "free channels", the batch can handle. Break
// the loop since it is done with this CPU thus
// queuing an RCU work is _always_ success here.
- queued = queue_rcu_work(system_unbound_wq, &krwp->rcu_work);
+ queued = queue_rcu_work(rcu_reclaim_wq, &krwp->rcu_work);
WARN_ON_ONCE(!queued);
break;
}
@@ -1883,7 +1885,7 @@ run_page_cache_worker(struct kfree_rcu_cpu *krcp)
if (rcu_scheduler_active == RCU_SCHEDULER_RUNNING &&
!atomic_xchg(&krcp->work_in_progress, 1)) {
if (atomic_read(&krcp->backoff_page_cache_fill)) {
- queue_delayed_work(system_unbound_wq,
+ queue_delayed_work(rcu_reclaim_wq,
&krcp->page_cache_work,
msecs_to_jiffies(rcu_delay_page_cache_fill_msec));
} else {
@@ -2120,6 +2122,10 @@ void __init kvfree_rcu_init(void)
int i, j;
struct shrinker *kfree_rcu_shrinker;
+ rcu_reclaim_wq = alloc_workqueue("kvfree_rcu_reclaim",
+ WQ_UNBOUND | WQ_MEM_RECLAIM, 0);
+ WARN_ON(!rcu_reclaim_wq);
+
/* Clamp it to [0:100] seconds interval. */
if (rcu_delay_page_cache_fill_msec < 0 ||
rcu_delay_page_cache_fill_msec > 100 * MSEC_PER_SEC) {
diff --git a/mm/swapfile.c b/mm/swapfile.c
index ba19430dd4ea..df7c4e8b089c 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -653,7 +653,8 @@ static void relocate_cluster(struct swap_info_struct *si,
return;
if (!ci->count) {
- free_cluster(si, ci);
+ if (ci->flags != CLUSTER_FLAG_FREE)
+ free_cluster(si, ci);
} else if (ci->count != SWAPFILE_CLUSTER) {
if (ci->flags != CLUSTER_FLAG_FRAG)
move_cluster(si, ci, &si->frag_clusters[ci->order],
@@ -858,6 +859,10 @@ static void swap_reclaim_full_clusters(struct swap_info_struct *si, bool force)
offset++;
}
+ /* in case no swap cache is reclaimed */
+ if (ci->flags == CLUSTER_FLAG_NONE)
+ relocate_cluster(si, ci);
+
unlock_cluster(ci);
if (to_scan <= 0)
break;
@@ -2641,7 +2646,6 @@ static void wait_for_allocation(struct swap_info_struct *si)
for (offset = 0; offset < end; offset += SWAPFILE_CLUSTER) {
ci = lock_cluster(si, offset);
unlock_cluster(ci);
- offset += SWAPFILE_CLUSTER;
}
}
@@ -3542,6 +3546,10 @@ static int __swap_duplicate(swp_entry_t entry, unsigned char usage, int nr)
int err, i;
si = swp_swap_info(entry);
+ if (WARN_ON_ONCE(!si)) {
+ pr_err("%s%08lx\n", Bad_file, entry.val);
+ return -EINVAL;
+ }
offset = swp_offset(entry);
VM_WARN_ON(nr > SWAPFILE_CLUSTER - offset % SWAPFILE_CLUSTER);
diff --git a/mm/truncate.c b/mm/truncate.c
index e2e115adfbc5..76d8fcd89bd0 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -548,8 +548,6 @@ int folio_unmap_invalidate(struct address_space *mapping, struct folio *folio,
VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
- if (folio_test_dirty(folio))
- return 0;
if (folio_mapped(folio))
unmap_mapping_folio(folio);
BUG_ON(folio_mapped(folio));
diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
index af3dfc3633db..d06453fa8aba 100644
--- a/mm/userfaultfd.c
+++ b/mm/userfaultfd.c
@@ -18,6 +18,7 @@
#include <asm/tlbflush.h>
#include <asm/tlb.h>
#include "internal.h"
+#include "swap.h"
static __always_inline
bool validate_dst_vma(struct vm_area_struct *dst_vma, unsigned long dst_end)
@@ -1076,16 +1077,14 @@ out:
return err;
}
-static int move_swap_pte(struct mm_struct *mm,
+static int move_swap_pte(struct mm_struct *mm, struct vm_area_struct *dst_vma,
unsigned long dst_addr, unsigned long src_addr,
pte_t *dst_pte, pte_t *src_pte,
pte_t orig_dst_pte, pte_t orig_src_pte,
pmd_t *dst_pmd, pmd_t dst_pmdval,
- spinlock_t *dst_ptl, spinlock_t *src_ptl)
+ spinlock_t *dst_ptl, spinlock_t *src_ptl,
+ struct folio *src_folio)
{
- if (!pte_swp_exclusive(orig_src_pte))
- return -EBUSY;
-
double_pt_lock(dst_ptl, src_ptl);
if (!is_pte_pages_stable(dst_pte, src_pte, orig_dst_pte, orig_src_pte,
@@ -1094,6 +1093,16 @@ static int move_swap_pte(struct mm_struct *mm,
return -EAGAIN;
}
+ /*
+ * The src_folio resides in the swapcache, requiring an update to its
+ * index and mapping to align with the dst_vma, where a swap-in may
+ * occur and hit the swapcache after moving the PTE.
+ */
+ if (src_folio) {
+ folio_move_anon_rmap(src_folio, dst_vma);
+ src_folio->index = linear_page_index(dst_vma, dst_addr);
+ }
+
orig_src_pte = ptep_get_and_clear(mm, src_addr, src_pte);
set_pte_at(mm, dst_addr, dst_pte, orig_src_pte);
double_pt_unlock(dst_ptl, src_ptl);
@@ -1141,6 +1150,7 @@ static int move_pages_pte(struct mm_struct *mm, pmd_t *dst_pmd, pmd_t *src_pmd,
__u64 mode)
{
swp_entry_t entry;
+ struct swap_info_struct *si = NULL;
pte_t orig_src_pte, orig_dst_pte;
pte_t src_folio_pte;
spinlock_t *src_ptl, *dst_ptl;
@@ -1240,6 +1250,7 @@ retry:
*/
if (!src_folio) {
struct folio *folio;
+ bool locked;
/*
* Pin the page while holding the lock to be sure the
@@ -1259,14 +1270,28 @@ retry:
goto out;
}
+ locked = folio_trylock(folio);
+ /*
+ * We avoid waiting for folio lock with a raised
+ * refcount for large folios because extra refcounts
+ * will result in split_folio() failing later and
+ * retrying. If multiple tasks are trying to move a
+ * large folio we can end up livelocking.
+ */
+ if (!locked && folio_test_large(folio)) {
+ spin_unlock(src_ptl);
+ err = -EAGAIN;
+ goto out;
+ }
+
folio_get(folio);
src_folio = folio;
src_folio_pte = orig_src_pte;
spin_unlock(src_ptl);
- if (!folio_trylock(src_folio)) {
- pte_unmap(&orig_src_pte);
- pte_unmap(&orig_dst_pte);
+ if (!locked) {
+ pte_unmap(src_pte);
+ pte_unmap(dst_pte);
src_pte = dst_pte = NULL;
/* now we can block and wait */
folio_lock(src_folio);
@@ -1282,8 +1307,8 @@ retry:
/* at this point we have src_folio locked */
if (folio_test_large(src_folio)) {
/* split_folio() can block */
- pte_unmap(&orig_src_pte);
- pte_unmap(&orig_dst_pte);
+ pte_unmap(src_pte);
+ pte_unmap(dst_pte);
src_pte = dst_pte = NULL;
err = split_folio(src_folio);
if (err)
@@ -1308,8 +1333,8 @@ retry:
goto out;
}
if (!anon_vma_trylock_write(src_anon_vma)) {
- pte_unmap(&orig_src_pte);
- pte_unmap(&orig_dst_pte);
+ pte_unmap(src_pte);
+ pte_unmap(dst_pte);
src_pte = dst_pte = NULL;
/* now we can block and wait */
anon_vma_lock_write(src_anon_vma);
@@ -1322,11 +1347,13 @@ retry:
orig_dst_pte, orig_src_pte, dst_pmd,
dst_pmdval, dst_ptl, src_ptl, src_folio);
} else {
+ struct folio *folio = NULL;
+
entry = pte_to_swp_entry(orig_src_pte);
if (non_swap_entry(entry)) {
if (is_migration_entry(entry)) {
- pte_unmap(&orig_src_pte);
- pte_unmap(&orig_dst_pte);
+ pte_unmap(src_pte);
+ pte_unmap(dst_pte);
src_pte = dst_pte = NULL;
migration_entry_wait(mm, src_pmd, src_addr);
err = -EAGAIN;
@@ -1335,9 +1362,53 @@ retry:
goto out;
}
- err = move_swap_pte(mm, dst_addr, src_addr, dst_pte, src_pte,
- orig_dst_pte, orig_src_pte, dst_pmd,
- dst_pmdval, dst_ptl, src_ptl);
+ if (!pte_swp_exclusive(orig_src_pte)) {
+ err = -EBUSY;
+ goto out;
+ }
+
+ si = get_swap_device(entry);
+ if (unlikely(!si)) {
+ err = -EAGAIN;
+ goto out;
+ }
+ /*
+ * Verify the existence of the swapcache. If present, the folio's
+ * index and mapping must be updated even when the PTE is a swap
+ * entry. The anon_vma lock is not taken during this process since
+ * the folio has already been unmapped, and the swap entry is
+ * exclusive, preventing rmap walks.
+ *
+ * For large folios, return -EBUSY immediately, as split_folio()
+ * also returns -EBUSY when attempting to split unmapped large
+ * folios in the swapcache. This issue needs to be resolved
+ * separately to allow proper handling.
+ */
+ if (!src_folio)
+ folio = filemap_get_folio(swap_address_space(entry),
+ swap_cache_index(entry));
+ if (!IS_ERR_OR_NULL(folio)) {
+ if (folio_test_large(folio)) {
+ err = -EBUSY;
+ folio_put(folio);
+ goto out;
+ }
+ src_folio = folio;
+ src_folio_pte = orig_src_pte;
+ if (!folio_trylock(src_folio)) {
+ pte_unmap(src_pte);
+ pte_unmap(dst_pte);
+ src_pte = dst_pte = NULL;
+ put_swap_device(si);
+ si = NULL;
+ /* now we can block and wait */
+ folio_lock(src_folio);
+ goto retry;
+ }
+ }
+ err = move_swap_pte(mm, dst_vma, dst_addr, src_addr, dst_pte, src_pte,
+ orig_dst_pte, orig_src_pte, dst_pmd, dst_pmdval,
+ dst_ptl, src_ptl, src_folio);
}
out:
@@ -1354,6 +1425,8 @@ out:
if (src_pte)
pte_unmap(src_pte);
mmu_notifier_invalidate_range_end(&range);
+ if (si)
+ put_swap_device(si);
return err;
}
diff --git a/mm/vma.c b/mm/vma.c
index af1d549b179c..96bcb372c90e 100644
--- a/mm/vma.c
+++ b/mm/vma.c
@@ -1509,24 +1509,28 @@ int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm,
static struct vm_area_struct *vma_modify(struct vma_merge_struct *vmg)
{
struct vm_area_struct *vma = vmg->vma;
+ unsigned long start = vmg->start;
+ unsigned long end = vmg->end;
struct vm_area_struct *merged;
/* First, try to merge. */
merged = vma_merge_existing_range(vmg);
if (merged)
return merged;
+ if (vmg_nomem(vmg))
+ return ERR_PTR(-ENOMEM);
/* Split any preceding portion of the VMA. */
- if (vma->vm_start < vmg->start) {
- int err = split_vma(vmg->vmi, vma, vmg->start, 1);
+ if (vma->vm_start < start) {
+ int err = split_vma(vmg->vmi, vma, start, 1);
if (err)
return ERR_PTR(err);
}
/* Split any trailing portion of the VMA. */
- if (vma->vm_end > vmg->end) {
- int err = split_vma(vmg->vmi, vma, vmg->end, 0);
+ if (vma->vm_end > end) {
+ int err = split_vma(vmg->vmi, vma, end, 0);
if (err)
return ERR_PTR(err);
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index a6e7acebe9ad..61981ee1c9d2 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -586,13 +586,13 @@ static int vmap_small_pages_range_noflush(unsigned long addr, unsigned long end,
mask |= PGTBL_PGD_MODIFIED;
err = vmap_pages_p4d_range(pgd, addr, next, prot, pages, &nr, &mask);
if (err)
- return err;
+ break;
} while (pgd++, addr = next, addr != end);
if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
arch_sync_kernel_mappings(start, end);
- return 0;
+ return err;
}
/*
diff --git a/mm/zswap.c b/mm/zswap.c
index ac9d299e7d0c..23365e76a3ce 100644
--- a/mm/zswap.c
+++ b/mm/zswap.c
@@ -43,7 +43,7 @@
* statistics
**********************************/
/* The number of compressed pages currently stored in zswap */
-atomic_long_t zswap_stored_pages = ATOMIC_INIT(0);
+atomic_long_t zswap_stored_pages = ATOMIC_LONG_INIT(0);
/*
* The statistics below are not protected from concurrent access for
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index e45187b88220..41be38264493 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -131,7 +131,8 @@ int vlan_check_real_dev(struct net_device *real_dev,
{
const char *name = real_dev->name;
- if (real_dev->features & NETIF_F_VLAN_CHALLENGED) {
+ if (real_dev->features & NETIF_F_VLAN_CHALLENGED ||
+ real_dev->type != ARPHRD_ETHER) {
pr_info("VLANs not supported on %s\n", name);
NL_SET_ERR_MSG_MOD(extack, "VLANs not supported on device");
return -EOPNOTSUPP;
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index fec11e576f31..b22078b67972 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -632,7 +632,8 @@ void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
hci_conn_hold(conn->hcon);
- list_add(&chan->list, &conn->chan_l);
+ /* Append to the list since the order matters for ECRED */
+ list_add_tail(&chan->list, &conn->chan_l);
}
void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
@@ -3771,7 +3772,11 @@ static void l2cap_ecred_rsp_defer(struct l2cap_chan *chan, void *data)
struct l2cap_ecred_conn_rsp *rsp_flex =
container_of(&rsp->pdu.rsp, struct l2cap_ecred_conn_rsp, hdr);
- if (test_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
+ /* Check if channel for outgoing connection or if it wasn't deferred
+ * since in those cases it must be skipped.
+ */
+ if (test_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags) ||
+ !test_and_clear_bit(FLAG_DEFER_SETUP, &chan->flags))
return;
/* Reset ident so only one response is sent */
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index f53304cb09db..621c555f639b 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -9660,6 +9660,9 @@ void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0) +
eir_precalc_len(sizeof(conn->dev_class)));
+ if (!skb)
+ return;
+
ev = skb_put(skb, sizeof(*ev));
bacpy(&ev->addr.bdaddr, &conn->dst);
ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
@@ -10413,6 +10416,8 @@ void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0));
+ if (!skb)
+ return;
ev = skb_put(skb, sizeof(*ev));
bacpy(&ev->addr.bdaddr, bdaddr);
diff --git a/net/core/dev.c b/net/core/dev.c
index 1b252e9459fd..30da277c5a6f 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2141,21 +2141,15 @@ int register_netdevice_notifier_dev_net(struct net_device *dev,
struct notifier_block *nb,
struct netdev_net_notifier *nn)
{
- struct net *net = dev_net(dev);
int err;
- /* rtnl_net_lock() assumes dev is not yet published by
- * register_netdevice().
- */
- DEBUG_NET_WARN_ON_ONCE(!list_empty(&dev->dev_list));
-
- rtnl_net_lock(net);
- err = __register_netdevice_notifier_net(net, nb, false);
+ rtnl_net_dev_lock(dev);
+ err = __register_netdevice_notifier_net(dev_net(dev), nb, false);
if (!err) {
nn->nb = nb;
list_add(&nn->list, &dev->net_notifier_list);
}
- rtnl_net_unlock(net);
+ rtnl_net_dev_unlock(dev);
return err;
}
@@ -4763,7 +4757,7 @@ use_local_napi:
* we have to raise NET_RX_SOFTIRQ.
*/
if (!sd->in_net_rx_action)
- __raise_softirq_irqoff(NET_RX_SOFTIRQ);
+ raise_softirq_irqoff(NET_RX_SOFTIRQ);
}
#ifdef CONFIG_RPS
diff --git a/net/core/gro.c b/net/core/gro.c
index 78b320b63174..0ad549b07e03 100644
--- a/net/core/gro.c
+++ b/net/core/gro.c
@@ -653,6 +653,7 @@ static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
skb->pkt_type = PACKET_HOST;
skb->encapsulation = 0;
+ skb->ip_summed = CHECKSUM_NONE;
skb_shinfo(skb)->gso_type = 0;
skb_shinfo(skb)->gso_size = 0;
if (unlikely(skb->slow_gro)) {
diff --git a/net/core/scm.c b/net/core/scm.c
index 4f6a14babe5a..733c0cbd393d 100644
--- a/net/core/scm.c
+++ b/net/core/scm.c
@@ -282,6 +282,16 @@ efault:
}
EXPORT_SYMBOL(put_cmsg);
+int put_cmsg_notrunc(struct msghdr *msg, int level, int type, int len,
+ void *data)
+{
+ /* Don't produce truncated CMSGs */
+ if (!msg->msg_control || msg->msg_controllen < CMSG_LEN(len))
+ return -ETOOSMALL;
+
+ return put_cmsg(msg, level, type, len, data);
+}
+
void put_cmsg_scm_timestamping64(struct msghdr *msg, struct scm_timestamping_internal *tss_internal)
{
struct scm_timestamping64 tss;
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 7b03b64fdcb2..b1c81687e9d8 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -6033,11 +6033,11 @@ void skb_scrub_packet(struct sk_buff *skb, bool xnet)
skb->offload_fwd_mark = 0;
skb->offload_l3_fwd_mark = 0;
#endif
+ ipvs_reset(skb);
if (!xnet)
return;
- ipvs_reset(skb);
skb->mark = 0;
skb_clear_tstamp(skb);
}
diff --git a/net/core/sock.c b/net/core/sock.c
index eae2ae70a2e0..6c0e87f97fa4 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -2246,6 +2246,7 @@ struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
get_net_track(net, &sk->ns_tracker, priority);
sock_inuse_add(net, 1);
} else {
+ net_passive_inc(net);
__netns_tracker_alloc(net, &sk->ns_tracker,
false, priority);
}
@@ -2270,6 +2271,7 @@ EXPORT_SYMBOL(sk_alloc);
static void __sk_destruct(struct rcu_head *head)
{
struct sock *sk = container_of(head, struct sock, sk_rcu);
+ struct net *net = sock_net(sk);
struct sk_filter *filter;
if (sk->sk_destruct)
@@ -2301,14 +2303,28 @@ static void __sk_destruct(struct rcu_head *head)
put_cred(sk->sk_peer_cred);
put_pid(sk->sk_peer_pid);
- if (likely(sk->sk_net_refcnt))
- put_net_track(sock_net(sk), &sk->ns_tracker);
- else
- __netns_tracker_free(sock_net(sk), &sk->ns_tracker, false);
-
+ if (likely(sk->sk_net_refcnt)) {
+ put_net_track(net, &sk->ns_tracker);
+ } else {
+ __netns_tracker_free(net, &sk->ns_tracker, false);
+ net_passive_dec(net);
+ }
sk_prot_free(sk->sk_prot_creator, sk);
}
+void sk_net_refcnt_upgrade(struct sock *sk)
+{
+ struct net *net = sock_net(sk);
+
+ WARN_ON_ONCE(sk->sk_net_refcnt);
+ __netns_tracker_free(net, &sk->ns_tracker, false);
+ net_passive_dec(net);
+ sk->sk_net_refcnt = 1;
+ get_net_track(net, &sk->ns_tracker, GFP_KERNEL);
+ sock_inuse_add(net, 1);
+}
+EXPORT_SYMBOL_GPL(sk_net_refcnt_upgrade);
+
void sk_destruct(struct sock *sk)
{
bool use_call_rcu = sock_flag(sk, SOCK_RCU_FREE);
@@ -2405,6 +2421,7 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
* is not properly dismantling its kernel sockets at netns
* destroy time.
*/
+ net_passive_inc(sock_net(newsk));
__netns_tracker_alloc(sock_net(newsk), &newsk->ns_tracker,
false, priority);
}
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index ad2741f1346a..c7769ee0d9c5 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -34,6 +34,7 @@ static int min_sndbuf = SOCK_MIN_SNDBUF;
static int min_rcvbuf = SOCK_MIN_RCVBUF;
static int max_skb_frags = MAX_SKB_FRAGS;
static int min_mem_pcpu_rsv = SK_MEMORY_PCPU_RESERVE;
+static int netdev_budget_usecs_min = 2 * USEC_PER_SEC / HZ;
static int net_msg_warn; /* Unused, but still a sysctl */
@@ -587,7 +588,7 @@ static struct ctl_table net_core_table[] = {
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
- .extra1 = SYSCTL_ZERO,
+ .extra1 = &netdev_budget_usecs_min,
},
{
.procname = "fb_tunnels_only_for_init_net",
diff --git a/net/ethtool/cabletest.c b/net/ethtool/cabletest.c
index f22051f33868..84096f6b0236 100644
--- a/net/ethtool/cabletest.c
+++ b/net/ethtool/cabletest.c
@@ -72,8 +72,8 @@ int ethnl_act_cable_test(struct sk_buff *skb, struct genl_info *info)
dev = req_info.dev;
rtnl_lock();
- phydev = ethnl_req_get_phydev(&req_info,
- tb[ETHTOOL_A_CABLE_TEST_HEADER],
+ phydev = ethnl_req_get_phydev(&req_info, tb,
+ ETHTOOL_A_CABLE_TEST_HEADER,
info->extack);
if (IS_ERR_OR_NULL(phydev)) {
ret = -EOPNOTSUPP;
@@ -339,8 +339,8 @@ int ethnl_act_cable_test_tdr(struct sk_buff *skb, struct genl_info *info)
goto out_dev_put;
rtnl_lock();
- phydev = ethnl_req_get_phydev(&req_info,
- tb[ETHTOOL_A_CABLE_TEST_TDR_HEADER],
+ phydev = ethnl_req_get_phydev(&req_info, tb,
+ ETHTOOL_A_CABLE_TEST_TDR_HEADER,
info->extack);
if (IS_ERR_OR_NULL(phydev)) {
ret = -EOPNOTSUPP;
diff --git a/net/ethtool/common.c b/net/ethtool/common.c
index d88e9080643b..b97374b508f6 100644
--- a/net/ethtool/common.c
+++ b/net/ethtool/common.c
@@ -6,6 +6,7 @@
#include <linux/rtnetlink.h>
#include <linux/ptp_clock_kernel.h>
#include <linux/phy_link_topology.h>
+#include <net/netdev_queues.h>
#include "netlink.h"
#include "common.h"
@@ -771,6 +772,21 @@ int ethtool_check_ops(const struct ethtool_ops *ops)
return 0;
}
+void ethtool_ringparam_get_cfg(struct net_device *dev,
+ struct ethtool_ringparam *param,
+ struct kernel_ethtool_ringparam *kparam,
+ struct netlink_ext_ack *extack)
+{
+ memset(param, 0, sizeof(*param));
+ memset(kparam, 0, sizeof(*kparam));
+
+ param->cmd = ETHTOOL_GRINGPARAM;
+ dev->ethtool_ops->get_ringparam(dev, param, kparam, extack);
+
+ /* Driver gives us current state, we want to return current config */
+ kparam->tcp_data_split = dev->cfg->hds_config;
+}
+
static void ethtool_init_tsinfo(struct kernel_ethtool_ts_info *info)
{
memset(info, 0, sizeof(*info));
diff --git a/net/ethtool/common.h b/net/ethtool/common.h
index 58e9e7db06f9..a1088c2441d0 100644
--- a/net/ethtool/common.h
+++ b/net/ethtool/common.h
@@ -51,6 +51,12 @@ int ethtool_check_max_channel(struct net_device *dev,
struct ethtool_channels channels,
struct genl_info *info);
int ethtool_check_rss_ctx_busy(struct net_device *dev, u32 rss_context);
+
+void ethtool_ringparam_get_cfg(struct net_device *dev,
+ struct ethtool_ringparam *param,
+ struct kernel_ethtool_ringparam *kparam,
+ struct netlink_ext_ack *extack);
+
int __ethtool_get_ts_info(struct net_device *dev, struct kernel_ethtool_ts_info *info);
int ethtool_get_ts_info_by_phc(struct net_device *dev,
struct kernel_ethtool_ts_info *info,
diff --git a/net/ethtool/ioctl.c b/net/ethtool/ioctl.c
index 7609ce2b2c5e..1c3ba2247776 100644
--- a/net/ethtool/ioctl.c
+++ b/net/ethtool/ioctl.c
@@ -2059,8 +2059,8 @@ static int ethtool_get_ringparam(struct net_device *dev, void __user *useraddr)
static int ethtool_set_ringparam(struct net_device *dev, void __user *useraddr)
{
- struct ethtool_ringparam ringparam, max = { .cmd = ETHTOOL_GRINGPARAM };
struct kernel_ethtool_ringparam kernel_ringparam;
+ struct ethtool_ringparam ringparam, max;
int ret;
if (!dev->ethtool_ops->set_ringparam || !dev->ethtool_ops->get_ringparam)
@@ -2069,7 +2069,7 @@ static int ethtool_set_ringparam(struct net_device *dev, void __user *useraddr)
if (copy_from_user(&ringparam, useraddr, sizeof(ringparam)))
return -EFAULT;
- dev->ethtool_ops->get_ringparam(dev, &max, &kernel_ringparam, NULL);
+ ethtool_ringparam_get_cfg(dev, &max, &kernel_ringparam, NULL);
/* ensure new ring parameters are within the maximums */
if (ringparam.rx_pending > max.rx_max_pending ||
diff --git a/net/ethtool/linkstate.c b/net/ethtool/linkstate.c
index af19e1bed303..05a5f72c99fa 100644
--- a/net/ethtool/linkstate.c
+++ b/net/ethtool/linkstate.c
@@ -103,7 +103,7 @@ static int linkstate_prepare_data(const struct ethnl_req_info *req_base,
struct phy_device *phydev;
int ret;
- phydev = ethnl_req_get_phydev(req_base, tb[ETHTOOL_A_LINKSTATE_HEADER],
+ phydev = ethnl_req_get_phydev(req_base, tb, ETHTOOL_A_LINKSTATE_HEADER,
info->extack);
if (IS_ERR(phydev)) {
ret = PTR_ERR(phydev);
diff --git a/net/ethtool/netlink.c b/net/ethtool/netlink.c
index b4c45207fa32..734849a57369 100644
--- a/net/ethtool/netlink.c
+++ b/net/ethtool/netlink.c
@@ -211,7 +211,7 @@ int ethnl_parse_header_dev_get(struct ethnl_req_info *req_info,
}
struct phy_device *ethnl_req_get_phydev(const struct ethnl_req_info *req_info,
- const struct nlattr *header,
+ struct nlattr **tb, unsigned int header,
struct netlink_ext_ack *extack)
{
struct phy_device *phydev;
@@ -225,8 +225,8 @@ struct phy_device *ethnl_req_get_phydev(const struct ethnl_req_info *req_info,
return req_info->dev->phydev;
phydev = phy_link_topo_get_phy(req_info->dev, req_info->phy_index);
- if (!phydev) {
- NL_SET_ERR_MSG_ATTR(extack, header,
+ if (!phydev && tb) {
+ NL_SET_ERR_MSG_ATTR(extack, tb[header],
"no phy matching phyindex");
return ERR_PTR(-ENODEV);
}
diff --git a/net/ethtool/netlink.h b/net/ethtool/netlink.h
index ff69ca0715de..ec6ab5443a6f 100644
--- a/net/ethtool/netlink.h
+++ b/net/ethtool/netlink.h
@@ -275,7 +275,8 @@ static inline void ethnl_parse_header_dev_put(struct ethnl_req_info *req_info)
* ethnl_req_get_phydev() - Gets the phy_device targeted by this request,
* if any. Must be called under rntl_lock().
* @req_info: The ethnl request to get the phy from.
- * @header: The netlink header, used for error reporting.
+ * @tb: The netlink attributes array, for error reporting.
+ * @header: The netlink header index, used for error reporting.
* @extack: The netlink extended ACK, for error reporting.
*
* The caller must hold RTNL, until it's done interacting with the returned
@@ -289,7 +290,7 @@ static inline void ethnl_parse_header_dev_put(struct ethnl_req_info *req_info)
* is returned.
*/
struct phy_device *ethnl_req_get_phydev(const struct ethnl_req_info *req_info,
- const struct nlattr *header,
+ struct nlattr **tb, unsigned int header,
struct netlink_ext_ack *extack);
/**
diff --git a/net/ethtool/phy.c b/net/ethtool/phy.c
index ed8f690f6bac..e067cc234419 100644
--- a/net/ethtool/phy.c
+++ b/net/ethtool/phy.c
@@ -125,7 +125,7 @@ static int ethnl_phy_parse_request(struct ethnl_req_info *req_base,
struct phy_req_info *req_info = PHY_REQINFO(req_base);
struct phy_device *phydev;
- phydev = ethnl_req_get_phydev(req_base, tb[ETHTOOL_A_PHY_HEADER],
+ phydev = ethnl_req_get_phydev(req_base, tb, ETHTOOL_A_PHY_HEADER,
extack);
if (!phydev)
return 0;
diff --git a/net/ethtool/plca.c b/net/ethtool/plca.c
index d95d92f173a6..e1f7820a6158 100644
--- a/net/ethtool/plca.c
+++ b/net/ethtool/plca.c
@@ -62,7 +62,7 @@ static int plca_get_cfg_prepare_data(const struct ethnl_req_info *req_base,
struct phy_device *phydev;
int ret;
- phydev = ethnl_req_get_phydev(req_base, tb[ETHTOOL_A_PLCA_HEADER],
+ phydev = ethnl_req_get_phydev(req_base, tb, ETHTOOL_A_PLCA_HEADER,
info->extack);
// check that the PHY device is available and connected
if (IS_ERR_OR_NULL(phydev)) {
@@ -152,7 +152,7 @@ ethnl_set_plca(struct ethnl_req_info *req_info, struct genl_info *info)
bool mod = false;
int ret;
- phydev = ethnl_req_get_phydev(req_info, tb[ETHTOOL_A_PLCA_HEADER],
+ phydev = ethnl_req_get_phydev(req_info, tb, ETHTOOL_A_PLCA_HEADER,
info->extack);
// check that the PHY device is available and connected
if (IS_ERR_OR_NULL(phydev))
@@ -211,7 +211,7 @@ static int plca_get_status_prepare_data(const struct ethnl_req_info *req_base,
struct phy_device *phydev;
int ret;
- phydev = ethnl_req_get_phydev(req_base, tb[ETHTOOL_A_PLCA_HEADER],
+ phydev = ethnl_req_get_phydev(req_base, tb, ETHTOOL_A_PLCA_HEADER,
info->extack);
// check that the PHY device is available and connected
if (IS_ERR_OR_NULL(phydev)) {
diff --git a/net/ethtool/pse-pd.c b/net/ethtool/pse-pd.c
index 2819e2ba6be2..4f6b99eab2a6 100644
--- a/net/ethtool/pse-pd.c
+++ b/net/ethtool/pse-pd.c
@@ -64,7 +64,7 @@ static int pse_prepare_data(const struct ethnl_req_info *req_base,
if (ret < 0)
return ret;
- phydev = ethnl_req_get_phydev(req_base, tb[ETHTOOL_A_PSE_HEADER],
+ phydev = ethnl_req_get_phydev(req_base, tb, ETHTOOL_A_PSE_HEADER,
info->extack);
if (IS_ERR(phydev))
return -ENODEV;
@@ -261,7 +261,7 @@ ethnl_set_pse(struct ethnl_req_info *req_info, struct genl_info *info)
struct phy_device *phydev;
int ret;
- phydev = ethnl_req_get_phydev(req_info, tb[ETHTOOL_A_PSE_HEADER],
+ phydev = ethnl_req_get_phydev(req_info, tb, ETHTOOL_A_PSE_HEADER,
info->extack);
ret = ethnl_set_pse_validate(phydev, info);
if (ret)
diff --git a/net/ethtool/rings.c b/net/ethtool/rings.c
index 7839bfd1ac6a..aeedd5ec6b8c 100644
--- a/net/ethtool/rings.c
+++ b/net/ethtool/rings.c
@@ -215,17 +215,16 @@ ethnl_set_rings_validate(struct ethnl_req_info *req_info,
static int
ethnl_set_rings(struct ethnl_req_info *req_info, struct genl_info *info)
{
- struct kernel_ethtool_ringparam kernel_ringparam = {};
- struct ethtool_ringparam ringparam = {};
+ struct kernel_ethtool_ringparam kernel_ringparam;
struct net_device *dev = req_info->dev;
+ struct ethtool_ringparam ringparam;
struct nlattr **tb = info->attrs;
const struct nlattr *err_attr;
bool mod = false;
int ret;
- dev->ethtool_ops->get_ringparam(dev, &ringparam,
- &kernel_ringparam, info->extack);
- kernel_ringparam.tcp_data_split = dev->cfg->hds_config;
+ ethtool_ringparam_get_cfg(dev, &ringparam, &kernel_ringparam,
+ info->extack);
ethnl_update_u32(&ringparam.rx_pending, tb[ETHTOOL_A_RINGS_RX], &mod);
ethnl_update_u32(&ringparam.rx_mini_pending,
diff --git a/net/ethtool/stats.c b/net/ethtool/stats.c
index 038a2558f052..3ca8eb2a3b31 100644
--- a/net/ethtool/stats.c
+++ b/net/ethtool/stats.c
@@ -138,7 +138,7 @@ static int stats_prepare_data(const struct ethnl_req_info *req_base,
struct phy_device *phydev;
int ret;
- phydev = ethnl_req_get_phydev(req_base, tb[ETHTOOL_A_STATS_HEADER],
+ phydev = ethnl_req_get_phydev(req_base, tb, ETHTOOL_A_STATS_HEADER,
info->extack);
if (IS_ERR(phydev))
return PTR_ERR(phydev);
diff --git a/net/ethtool/strset.c b/net/ethtool/strset.c
index 6b76c05caba4..f6a67109beda 100644
--- a/net/ethtool/strset.c
+++ b/net/ethtool/strset.c
@@ -309,7 +309,7 @@ static int strset_prepare_data(const struct ethnl_req_info *req_base,
return 0;
}
- phydev = ethnl_req_get_phydev(req_base, tb[ETHTOOL_A_HEADER_FLAGS],
+ phydev = ethnl_req_get_phydev(req_base, tb, ETHTOOL_A_HEADER_FLAGS,
info->extack);
/* phydev can be NULL, check for errors only */
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 285678d8ce07..57df7c1d2faa 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -2457,14 +2457,12 @@ static int tcp_recvmsg_dmabuf(struct sock *sk, const struct sk_buff *skb,
*/
memset(&dmabuf_cmsg, 0, sizeof(dmabuf_cmsg));
dmabuf_cmsg.frag_size = copy;
- err = put_cmsg(msg, SOL_SOCKET, SO_DEVMEM_LINEAR,
- sizeof(dmabuf_cmsg), &dmabuf_cmsg);
- if (err || msg->msg_flags & MSG_CTRUNC) {
- msg->msg_flags &= ~MSG_CTRUNC;
- if (!err)
- err = -ETOOSMALL;
+ err = put_cmsg_notrunc(msg, SOL_SOCKET,
+ SO_DEVMEM_LINEAR,
+ sizeof(dmabuf_cmsg),
+ &dmabuf_cmsg);
+ if (err)
goto out;
- }
sent += copy;
@@ -2518,16 +2516,12 @@ static int tcp_recvmsg_dmabuf(struct sock *sk, const struct sk_buff *skb,
offset += copy;
remaining_len -= copy;
- err = put_cmsg(msg, SOL_SOCKET,
- SO_DEVMEM_DMABUF,
- sizeof(dmabuf_cmsg),
- &dmabuf_cmsg);
- if (err || msg->msg_flags & MSG_CTRUNC) {
- msg->msg_flags &= ~MSG_CTRUNC;
- if (!err)
- err = -ETOOSMALL;
+ err = put_cmsg_notrunc(msg, SOL_SOCKET,
+ SO_DEVMEM_DMABUF,
+ sizeof(dmabuf_cmsg),
+ &dmabuf_cmsg);
+ if (err)
goto out;
- }
atomic_long_inc(&niov->pp_ref_count);
tcp_xa_pool.netmems[tcp_xa_pool.idx++] = skb_frag_netmem(frag);
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index b089b08e9617..dfdb7a4608a8 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -815,12 +815,6 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
/* In sequence, PAWS is OK. */
- /* TODO: We probably should defer ts_recent change once
- * we take ownership of @req.
- */
- if (tmp_opt.saw_tstamp && !after(TCP_SKB_CB(skb)->seq, tcp_rsk(req)->rcv_nxt))
- WRITE_ONCE(req->ts_recent, tmp_opt.rcv_tsval);
-
if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn) {
/* Truncate SYN, it is out of window starting
at tcp_rsk(req)->rcv_isn + 1. */
@@ -869,6 +863,10 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
if (!child)
goto listen_overflow;
+ if (own_req && tmp_opt.saw_tstamp &&
+ !after(TCP_SKB_CB(skb)->seq, tcp_rsk(req)->rcv_nxt))
+ tcp_sk(child)->rx_opt.ts_recent = tmp_opt.rcv_tsval;
+
if (own_req && rsk_drop_req(req)) {
reqsk_queue_removed(&inet_csk(req->rsk_listener)->icsk_accept_queue, req);
inet_csk_reqsk_queue_drop_and_put(req->rsk_listener, req);
diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c
index 2308665b51c5..2dfac79dc78b 100644
--- a/net/ipv4/tcp_offload.c
+++ b/net/ipv4/tcp_offload.c
@@ -13,12 +13,15 @@
#include <net/tcp.h>
#include <net/protocol.h>
-static void tcp_gso_tstamp(struct sk_buff *skb, unsigned int ts_seq,
+static void tcp_gso_tstamp(struct sk_buff *skb, struct sk_buff *gso_skb,
unsigned int seq, unsigned int mss)
{
+ u32 flags = skb_shinfo(gso_skb)->tx_flags & SKBTX_ANY_TSTAMP;
+ u32 ts_seq = skb_shinfo(gso_skb)->tskey;
+
while (skb) {
if (before(ts_seq, seq + mss)) {
- skb_shinfo(skb)->tx_flags |= SKBTX_SW_TSTAMP;
+ skb_shinfo(skb)->tx_flags |= flags;
skb_shinfo(skb)->tskey = ts_seq;
return;
}
@@ -193,8 +196,8 @@ struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
th = tcp_hdr(skb);
seq = ntohl(th->seq);
- if (unlikely(skb_shinfo(gso_skb)->tx_flags & SKBTX_SW_TSTAMP))
- tcp_gso_tstamp(segs, skb_shinfo(gso_skb)->tskey, seq, mss);
+ if (unlikely(skb_shinfo(gso_skb)->tx_flags & SKBTX_ANY_TSTAMP))
+ tcp_gso_tstamp(segs, gso_skb, seq, mss);
newcheck = ~csum_fold(csum_add(csum_unfold(th->check), delta));
diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
index a5be6e4ed326..ecfca59f31f1 100644
--- a/net/ipv4/udp_offload.c
+++ b/net/ipv4/udp_offload.c
@@ -321,13 +321,17 @@ struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb,
/* clear destructor to avoid skb_segment assigning it to tail */
copy_dtor = gso_skb->destructor == sock_wfree;
- if (copy_dtor)
+ if (copy_dtor) {
gso_skb->destructor = NULL;
+ gso_skb->sk = NULL;
+ }
segs = skb_segment(gso_skb, features);
if (IS_ERR_OR_NULL(segs)) {
- if (copy_dtor)
+ if (copy_dtor) {
gso_skb->destructor = sock_wfree;
+ gso_skb->sk = sk;
+ }
return segs;
}
diff --git a/net/ipv6/ila/ila_lwt.c b/net/ipv6/ila/ila_lwt.c
index ff7e734e335b..7d574f5132e2 100644
--- a/net/ipv6/ila/ila_lwt.c
+++ b/net/ipv6/ila/ila_lwt.c
@@ -88,13 +88,15 @@ static int ila_output(struct net *net, struct sock *sk, struct sk_buff *skb)
goto drop;
}
- if (ilwt->connected) {
+ /* cache only if we don't create a dst reference loop */
+ if (ilwt->connected && orig_dst->lwtstate != dst->lwtstate) {
local_bh_disable();
dst_cache_set_ip6(&ilwt->dst_cache, dst, &fl6.saddr);
local_bh_enable();
}
}
+ skb_dst_drop(skb);
skb_dst_set(skb, dst);
return dst_output(net, sk, skb);
diff --git a/net/ipv6/rpl_iptunnel.c b/net/ipv6/rpl_iptunnel.c
index 0ac4283acdf2..7c05ac846646 100644
--- a/net/ipv6/rpl_iptunnel.c
+++ b/net/ipv6/rpl_iptunnel.c
@@ -262,10 +262,18 @@ static int rpl_input(struct sk_buff *skb)
{
struct dst_entry *orig_dst = skb_dst(skb);
struct dst_entry *dst = NULL;
+ struct lwtunnel_state *lwtst;
struct rpl_lwt *rlwt;
int err;
- rlwt = rpl_lwt_lwtunnel(orig_dst->lwtstate);
+ /* We cannot dereference "orig_dst" once ip6_route_input() or
+ * skb_dst_drop() is called. However, in order to detect a dst loop, we
+ * need the address of its lwtstate. So, save the address of lwtstate
+ * now and use it later as a comparison.
+ */
+ lwtst = orig_dst->lwtstate;
+
+ rlwt = rpl_lwt_lwtunnel(lwtst);
local_bh_disable();
dst = dst_cache_get(&rlwt->cache);
@@ -280,7 +288,9 @@ static int rpl_input(struct sk_buff *skb)
if (!dst) {
ip6_route_input(skb);
dst = skb_dst(skb);
- if (!dst->error) {
+
+ /* cache only if we don't create a dst reference loop */
+ if (!dst->error && lwtst != dst->lwtstate) {
local_bh_disable();
dst_cache_set_ip6(&rlwt->cache, dst,
&ipv6_hdr(skb)->saddr);
diff --git a/net/ipv6/seg6_iptunnel.c b/net/ipv6/seg6_iptunnel.c
index 33833b2064c0..51583461ae29 100644
--- a/net/ipv6/seg6_iptunnel.c
+++ b/net/ipv6/seg6_iptunnel.c
@@ -472,10 +472,18 @@ static int seg6_input_core(struct net *net, struct sock *sk,
{
struct dst_entry *orig_dst = skb_dst(skb);
struct dst_entry *dst = NULL;
+ struct lwtunnel_state *lwtst;
struct seg6_lwt *slwt;
int err;
- slwt = seg6_lwt_lwtunnel(orig_dst->lwtstate);
+ /* We cannot dereference "orig_dst" once ip6_route_input() or
+ * skb_dst_drop() is called. However, in order to detect a dst loop, we
+ * need the address of its lwtstate. So, save the address of lwtstate
+ * now and use it later as a comparison.
+ */
+ lwtst = orig_dst->lwtstate;
+
+ slwt = seg6_lwt_lwtunnel(lwtst);
local_bh_disable();
dst = dst_cache_get(&slwt->cache);
@@ -490,7 +498,9 @@ static int seg6_input_core(struct net *net, struct sock *sk,
if (!dst) {
ip6_route_input(skb);
dst = skb_dst(skb);
- if (!dst->error) {
+
+ /* cache only if we don't create a dst reference loop */
+ if (!dst->error && lwtst != dst->lwtstate) {
local_bh_disable();
dst_cache_set_ip6(&slwt->cache, dst,
&ipv6_hdr(skb)->saddr);
diff --git a/net/llc/llc_s_ac.c b/net/llc/llc_s_ac.c
index 06fb8e6944b0..7a0cae9a8111 100644
--- a/net/llc/llc_s_ac.c
+++ b/net/llc/llc_s_ac.c
@@ -24,7 +24,7 @@
#include <net/llc_s_ac.h>
#include <net/llc_s_ev.h>
#include <net/llc_sap.h>
-
+#include <net/sock.h>
/**
* llc_sap_action_unitdata_ind - forward UI PDU to network layer
@@ -40,6 +40,26 @@ int llc_sap_action_unitdata_ind(struct llc_sap *sap, struct sk_buff *skb)
return 0;
}
+static int llc_prepare_and_xmit(struct sk_buff *skb)
+{
+ struct llc_sap_state_ev *ev = llc_sap_ev(skb);
+ struct sk_buff *nskb;
+ int rc;
+
+ rc = llc_mac_hdr_init(skb, ev->saddr.mac, ev->daddr.mac);
+ if (rc)
+ return rc;
+
+ nskb = skb_clone(skb, GFP_ATOMIC);
+ if (!nskb)
+ return -ENOMEM;
+
+ if (skb->sk)
+ skb_set_owner_w(nskb, skb->sk);
+
+ return dev_queue_xmit(nskb);
+}
+
/**
* llc_sap_action_send_ui - sends UI PDU resp to UNITDATA REQ to MAC layer
* @sap: SAP
@@ -52,17 +72,12 @@ int llc_sap_action_unitdata_ind(struct llc_sap *sap, struct sk_buff *skb)
int llc_sap_action_send_ui(struct llc_sap *sap, struct sk_buff *skb)
{
struct llc_sap_state_ev *ev = llc_sap_ev(skb);
- int rc;
llc_pdu_header_init(skb, LLC_PDU_TYPE_U, ev->saddr.lsap,
ev->daddr.lsap, LLC_PDU_CMD);
llc_pdu_init_as_ui_cmd(skb);
- rc = llc_mac_hdr_init(skb, ev->saddr.mac, ev->daddr.mac);
- if (likely(!rc)) {
- skb_get(skb);
- rc = dev_queue_xmit(skb);
- }
- return rc;
+
+ return llc_prepare_and_xmit(skb);
}
/**
@@ -77,17 +92,12 @@ int llc_sap_action_send_ui(struct llc_sap *sap, struct sk_buff *skb)
int llc_sap_action_send_xid_c(struct llc_sap *sap, struct sk_buff *skb)
{
struct llc_sap_state_ev *ev = llc_sap_ev(skb);
- int rc;
llc_pdu_header_init(skb, LLC_PDU_TYPE_U_XID, ev->saddr.lsap,
ev->daddr.lsap, LLC_PDU_CMD);
llc_pdu_init_as_xid_cmd(skb, LLC_XID_NULL_CLASS_2, 0);
- rc = llc_mac_hdr_init(skb, ev->saddr.mac, ev->daddr.mac);
- if (likely(!rc)) {
- skb_get(skb);
- rc = dev_queue_xmit(skb);
- }
- return rc;
+
+ return llc_prepare_and_xmit(skb);
}
/**
@@ -133,17 +143,12 @@ out:
int llc_sap_action_send_test_c(struct llc_sap *sap, struct sk_buff *skb)
{
struct llc_sap_state_ev *ev = llc_sap_ev(skb);
- int rc;
llc_pdu_header_init(skb, LLC_PDU_TYPE_U, ev->saddr.lsap,
ev->daddr.lsap, LLC_PDU_CMD);
llc_pdu_init_as_test_cmd(skb);
- rc = llc_mac_hdr_init(skb, ev->saddr.mac, ev->daddr.mac);
- if (likely(!rc)) {
- skb_get(skb);
- rc = dev_queue_xmit(skb);
- }
- return rc;
+
+ return llc_prepare_and_xmit(skb);
}
int llc_sap_action_send_test_r(struct llc_sap *sap, struct sk_buff *skb)
diff --git a/net/mac80211/driver-ops.c b/net/mac80211/driver-ops.c
index 299d38e9e863..35349a7f16cb 100644
--- a/net/mac80211/driver-ops.c
+++ b/net/mac80211/driver-ops.c
@@ -116,8 +116,14 @@ void drv_remove_interface(struct ieee80211_local *local,
sdata->flags &= ~IEEE80211_SDATA_IN_DRIVER;
- /* Remove driver debugfs entries */
- ieee80211_debugfs_recreate_netdev(sdata, sdata->vif.valid_links);
+ /*
+ * Remove driver debugfs entries.
+ * The virtual monitor interface doesn't get a debugfs
+ * entry, so it's exempt here.
+ */
+ if (sdata != rcu_access_pointer(local->monitor_sdata))
+ ieee80211_debugfs_recreate_netdev(sdata,
+ sdata->vif.valid_links);
trace_drv_remove_interface(local, sdata);
local->ops->remove_interface(&local->hw, &sdata->vif);
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 0ea7e77860b7..738de269e13f 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -1206,16 +1206,17 @@ void ieee80211_del_virtual_monitor(struct ieee80211_local *local)
return;
}
- RCU_INIT_POINTER(local->monitor_sdata, NULL);
- mutex_unlock(&local->iflist_mtx);
-
- synchronize_net();
-
+ clear_bit(SDATA_STATE_RUNNING, &sdata->state);
ieee80211_link_release_channel(&sdata->deflink);
if (ieee80211_hw_check(&local->hw, WANT_MONITOR_VIF))
drv_remove_interface(local, sdata);
+ RCU_INIT_POINTER(local->monitor_sdata, NULL);
+ mutex_unlock(&local->iflist_mtx);
+
+ synchronize_net();
+
kfree(sdata);
}
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index f8d52b3b0d0e..36a9be9a66c8 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -4959,6 +4959,7 @@ static bool ieee80211_assoc_config_link(struct ieee80211_link_data *link,
parse_params.start = bss_ies->data;
parse_params.len = bss_ies->len;
parse_params.bss = cbss;
+ parse_params.link_id = -1;
bss_elems = ieee802_11_parse_elems_full(&parse_params);
if (!bss_elems) {
ret = false;
diff --git a/net/mac80211/parse.c b/net/mac80211/parse.c
index cd318c1c67be..6da39c864f45 100644
--- a/net/mac80211/parse.c
+++ b/net/mac80211/parse.c
@@ -47,6 +47,9 @@ struct ieee80211_elems_parse {
/* The EPCS Multi-Link element in the original elements */
const struct element *ml_epcs_elem;
+ bool multi_link_inner;
+ bool skip_vendor;
+
/*
* scratch buffer that can be used for various element parsing related
* tasks, e.g., element de-fragmentation etc.
@@ -152,12 +155,11 @@ ieee80211_parse_extension_element(u32 *crc,
switch (le16_get_bits(mle->control,
IEEE80211_ML_CONTROL_TYPE)) {
case IEEE80211_ML_CONTROL_TYPE_BASIC:
- if (elems_parse->ml_basic_elem) {
+ if (elems_parse->multi_link_inner) {
elems->parse_error |=
IEEE80211_PARSE_ERR_DUP_NEST_ML_BASIC;
break;
}
- elems_parse->ml_basic_elem = elem;
break;
case IEEE80211_ML_CONTROL_TYPE_RECONF:
elems_parse->ml_reconf_elem = elem;
@@ -399,6 +401,9 @@ _ieee802_11_parse_elems_full(struct ieee80211_elems_parse_params *params,
IEEE80211_PARSE_ERR_BAD_ELEM_SIZE;
break;
case WLAN_EID_VENDOR_SPECIFIC:
+ if (elems_parse->skip_vendor)
+ break;
+
if (elen >= 4 && pos[0] == 0x00 && pos[1] == 0x50 &&
pos[2] == 0xf2) {
/* Microsoft OUI (00:50:F2) */
@@ -866,21 +871,36 @@ ieee80211_mle_get_sta_prof(struct ieee80211_elems_parse *elems_parse,
}
}
-static void ieee80211_mle_parse_link(struct ieee80211_elems_parse *elems_parse,
- struct ieee80211_elems_parse_params *params)
+static const struct element *
+ieee80211_prep_mle_link_parse(struct ieee80211_elems_parse *elems_parse,
+ struct ieee80211_elems_parse_params *params,
+ struct ieee80211_elems_parse_params *sub)
{
struct ieee802_11_elems *elems = &elems_parse->elems;
struct ieee80211_mle_per_sta_profile *prof;
- struct ieee80211_elems_parse_params sub = {
- .mode = params->mode,
- .action = params->action,
- .from_ap = params->from_ap,
- .link_id = -1,
- };
- ssize_t ml_len = elems->ml_basic_len;
- const struct element *non_inherit = NULL;
+ const struct element *tmp;
+ ssize_t ml_len;
const u8 *end;
+ if (params->mode < IEEE80211_CONN_MODE_EHT)
+ return NULL;
+
+ for_each_element_extid(tmp, WLAN_EID_EXT_EHT_MULTI_LINK,
+ elems->ie_start, elems->total_len) {
+ const struct ieee80211_multi_link_elem *mle =
+ (void *)tmp->data + 1;
+
+ if (!ieee80211_mle_size_ok(tmp->data + 1, tmp->datalen - 1))
+ continue;
+
+ if (le16_get_bits(mle->control, IEEE80211_ML_CONTROL_TYPE) !=
+ IEEE80211_ML_CONTROL_TYPE_BASIC)
+ continue;
+
+ elems_parse->ml_basic_elem = tmp;
+ break;
+ }
+
ml_len = cfg80211_defragment_element(elems_parse->ml_basic_elem,
elems->ie_start,
elems->total_len,
@@ -891,26 +911,26 @@ static void ieee80211_mle_parse_link(struct ieee80211_elems_parse *elems_parse,
WLAN_EID_FRAGMENT);
if (ml_len < 0)
- return;
+ return NULL;
elems->ml_basic = (const void *)elems_parse->scratch_pos;
elems->ml_basic_len = ml_len;
elems_parse->scratch_pos += ml_len;
if (params->link_id == -1)
- return;
+ return NULL;
ieee80211_mle_get_sta_prof(elems_parse, params->link_id);
prof = elems->prof;
if (!prof)
- return;
+ return NULL;
/* check if we have the 4 bytes for the fixed part in assoc response */
if (elems->sta_prof_len < sizeof(*prof) + prof->sta_info_len - 1 + 4) {
elems->prof = NULL;
elems->sta_prof_len = 0;
- return;
+ return NULL;
}
/*
@@ -919,13 +939,17 @@ static void ieee80211_mle_parse_link(struct ieee80211_elems_parse *elems_parse,
* the -1 is because the 'sta_info_len' is accounted to as part of the
* per-STA profile, but not part of the 'u8 variable[]' portion.
*/
- sub.start = prof->variable + prof->sta_info_len - 1 + 4;
+ sub->start = prof->variable + prof->sta_info_len - 1 + 4;
end = (const u8 *)prof + elems->sta_prof_len;
- sub.len = end - sub.start;
+ sub->len = end - sub->start;
- non_inherit = cfg80211_find_ext_elem(WLAN_EID_EXT_NON_INHERITANCE,
- sub.start, sub.len);
- _ieee802_11_parse_elems_full(&sub, elems_parse, non_inherit);
+ sub->mode = params->mode;
+ sub->action = params->action;
+ sub->from_ap = params->from_ap;
+ sub->link_id = -1;
+
+ return cfg80211_find_ext_elem(WLAN_EID_EXT_NON_INHERITANCE,
+ sub->start, sub->len);
}
static void
@@ -973,15 +997,19 @@ ieee80211_mle_defrag_epcs(struct ieee80211_elems_parse *elems_parse)
struct ieee802_11_elems *
ieee802_11_parse_elems_full(struct ieee80211_elems_parse_params *params)
{
+ struct ieee80211_elems_parse_params sub = {};
struct ieee80211_elems_parse *elems_parse;
- struct ieee802_11_elems *elems;
const struct element *non_inherit = NULL;
- u8 *nontransmitted_profile;
- int nontransmitted_profile_len = 0;
+ struct ieee802_11_elems *elems;
size_t scratch_len = 3 * params->len;
+ bool multi_link_inner = false;
BUILD_BUG_ON(offsetof(typeof(*elems_parse), elems) != 0);
+ /* cannot parse for both a specific link and non-transmitted BSS */
+ if (WARN_ON(params->link_id >= 0 && params->bss))
+ return NULL;
+
elems_parse = kzalloc(struct_size(elems_parse, scratch, scratch_len),
GFP_ATOMIC);
if (!elems_parse)
@@ -998,34 +1026,51 @@ ieee802_11_parse_elems_full(struct ieee80211_elems_parse_params *params)
ieee80211_clear_tpe(&elems->tpe);
ieee80211_clear_tpe(&elems->csa_tpe);
- nontransmitted_profile = elems_parse->scratch_pos;
- nontransmitted_profile_len =
- ieee802_11_find_bssid_profile(params->start, params->len,
- elems, params->bss,
- nontransmitted_profile);
- elems_parse->scratch_pos += nontransmitted_profile_len;
- non_inherit = cfg80211_find_ext_elem(WLAN_EID_EXT_NON_INHERITANCE,
- nontransmitted_profile,
- nontransmitted_profile_len);
+ /*
+ * If we're looking for a non-transmitted BSS then we cannot at
+ * the same time be looking for a second link as the two can only
+ * appear in the same frame carrying info for different BSSes.
+ *
+ * In any case, we only look for one at a time, as encoded by
+ * the WARN_ON above.
+ */
+ if (params->bss) {
+ int nontx_len =
+ ieee802_11_find_bssid_profile(params->start,
+ params->len,
+ elems, params->bss,
+ elems_parse->scratch_pos);
+ sub.start = elems_parse->scratch_pos;
+ sub.mode = params->mode;
+ sub.len = nontx_len;
+ sub.action = params->action;
+ sub.link_id = params->link_id;
+
+ /* consume the space used for non-transmitted profile */
+ elems_parse->scratch_pos += nontx_len;
+
+ non_inherit = cfg80211_find_ext_elem(WLAN_EID_EXT_NON_INHERITANCE,
+ sub.start, nontx_len);
+ } else {
+ /* must always parse to get elems_parse->ml_basic_elem */
+ non_inherit = ieee80211_prep_mle_link_parse(elems_parse, params,
+ &sub);
+ multi_link_inner = true;
+ }
+ elems_parse->skip_vendor =
+ cfg80211_find_elem(WLAN_EID_VENDOR_SPECIFIC,
+ sub.start, sub.len);
elems->crc = _ieee802_11_parse_elems_full(params, elems_parse,
non_inherit);
- /* Override with nontransmitted profile, if found */
- if (nontransmitted_profile_len) {
- struct ieee80211_elems_parse_params sub = {
- .mode = params->mode,
- .start = nontransmitted_profile,
- .len = nontransmitted_profile_len,
- .action = params->action,
- .link_id = params->link_id,
- };
-
+ /* Override with nontransmitted/per-STA profile if found */
+ if (sub.len) {
+ elems_parse->multi_link_inner = multi_link_inner;
+ elems_parse->skip_vendor = false;
_ieee802_11_parse_elems_full(&sub, elems_parse, NULL);
}
- ieee80211_mle_parse_link(elems_parse, params);
-
ieee80211_mle_defrag_reconf(elems_parse);
ieee80211_mle_defrag_epcs(elems_parse);
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index f6b631faf4f7..7f02bd5891eb 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -687,7 +687,7 @@ void __ieee80211_flush_queues(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata,
unsigned int queues, bool drop)
{
- if (!local->ops->flush)
+ if (!local->ops->flush && !drop)
return;
/*
@@ -714,7 +714,8 @@ void __ieee80211_flush_queues(struct ieee80211_local *local,
}
}
- drv_flush(local, sdata, queues, drop);
+ if (local->ops->flush)
+ drv_flush(local, sdata, queues, drop);
ieee80211_wake_queues_by_reason(&local->hw, queues,
IEEE80211_QUEUE_STOP_REASON_FLUSH,
diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c
index 572d160edca3..7868207c4e9d 100644
--- a/net/mptcp/pm_netlink.c
+++ b/net/mptcp/pm_netlink.c
@@ -977,7 +977,7 @@ static void __mptcp_pm_release_addr_entry(struct mptcp_pm_addr_entry *entry)
static int mptcp_pm_nl_append_new_local_addr(struct pm_nl_pernet *pernet,
struct mptcp_pm_addr_entry *entry,
- bool needs_id)
+ bool needs_id, bool replace)
{
struct mptcp_pm_addr_entry *cur, *del_entry = NULL;
unsigned int addr_max;
@@ -1017,6 +1017,17 @@ static int mptcp_pm_nl_append_new_local_addr(struct pm_nl_pernet *pernet,
if (entry->addr.id)
goto out;
+ /* allow callers that only need to look up the local
+ * addr's id to skip replacement. This allows them to
+ * avoid calling synchronize_rcu in the packet recv
+ * path.
+ */
+ if (!replace) {
+ kfree(entry);
+ ret = cur->addr.id;
+ goto out;
+ }
+
pernet->addrs--;
entry->addr.id = cur->addr.id;
list_del_rcu(&cur->list);
@@ -1165,7 +1176,7 @@ int mptcp_pm_nl_get_local_id(struct mptcp_sock *msk, struct mptcp_addr_info *skc
entry->ifindex = 0;
entry->flags = MPTCP_PM_ADDR_FLAG_IMPLICIT;
entry->lsk = NULL;
- ret = mptcp_pm_nl_append_new_local_addr(pernet, entry, true);
+ ret = mptcp_pm_nl_append_new_local_addr(pernet, entry, true, false);
if (ret < 0)
kfree(entry);
@@ -1433,7 +1444,8 @@ int mptcp_pm_nl_add_addr_doit(struct sk_buff *skb, struct genl_info *info)
}
}
ret = mptcp_pm_nl_append_new_local_addr(pernet, entry,
- !mptcp_pm_has_addr_attr_id(attr, info));
+ !mptcp_pm_has_addr_attr_id(attr, info),
+ true);
if (ret < 0) {
GENL_SET_ERR_MSG_FMT(info, "too many addresses or duplicate one: %d", ret);
goto out_free;
@@ -1514,11 +1526,6 @@ static int mptcp_nl_remove_subflow_and_signal_addr(struct net *net,
if (mptcp_pm_is_userspace(msk))
goto next;
- if (list_empty(&msk->conn_list)) {
- mptcp_pm_remove_anno_addr(msk, addr, false);
- goto next;
- }
-
lock_sock(sk);
remove_subflow = mptcp_lookup_subflow_by_saddr(&msk->conn_list, addr);
mptcp_pm_remove_anno_addr(msk, addr, remove_subflow &&
diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
index f6a207958459..ad21925af061 100644
--- a/net/mptcp/protocol.h
+++ b/net/mptcp/protocol.h
@@ -1199,6 +1199,8 @@ static inline void __mptcp_do_fallback(struct mptcp_sock *msk)
pr_debug("TCP fallback already done (msk=%p)\n", msk);
return;
}
+ if (WARN_ON_ONCE(!READ_ONCE(msk->allow_infinite_fallback)))
+ return;
set_bit(MPTCP_FALLBACK_DONE, &msk->flags);
}
diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
index fd021cf8286e..9f18217dddc8 100644
--- a/net/mptcp/subflow.c
+++ b/net/mptcp/subflow.c
@@ -1142,7 +1142,6 @@ static enum mapping_status get_mapping_status(struct sock *ssk,
if (data_len == 0) {
pr_debug("infinite mapping received\n");
MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_INFINITEMAPRX);
- subflow->map_data_len = 0;
return MAPPING_INVALID;
}
@@ -1286,18 +1285,6 @@ static void subflow_sched_work_if_closed(struct mptcp_sock *msk, struct sock *ss
mptcp_schedule_work(sk);
}
-static bool subflow_can_fallback(struct mptcp_subflow_context *subflow)
-{
- struct mptcp_sock *msk = mptcp_sk(subflow->conn);
-
- if (subflow->mp_join)
- return false;
- else if (READ_ONCE(msk->csum_enabled))
- return !subflow->valid_csum_seen;
- else
- return READ_ONCE(msk->allow_infinite_fallback);
-}
-
static void mptcp_subflow_fail(struct mptcp_sock *msk, struct sock *ssk)
{
struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
@@ -1393,7 +1380,7 @@ fallback:
return true;
}
- if (!subflow_can_fallback(subflow) && subflow->map_data_len) {
+ if (!READ_ONCE(msk->allow_infinite_fallback)) {
/* fatal protocol error, close the socket.
* subflow_error_report() will introduce the appropriate barriers
*/
@@ -1772,10 +1759,7 @@ int mptcp_subflow_create_socket(struct sock *sk, unsigned short family,
* needs it.
* Update ns_tracker to current stack trace and refcounted tracker.
*/
- __netns_tracker_free(net, &sf->sk->ns_tracker, false);
- sf->sk->sk_net_refcnt = 1;
- get_net_track(net, &sf->sk->ns_tracker, GFP_KERNEL);
- sock_inuse_add(net, 1);
+ sk_net_refcnt_upgrade(sf->sk);
err = tcp_set_ulp(sf->sk, "mptcp");
if (err)
goto err_free;
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 85311226183a..a53ea60d0a78 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -795,16 +795,6 @@ static int netlink_release(struct socket *sock)
sock_prot_inuse_add(sock_net(sk), &netlink_proto, -1);
- /* Because struct net might disappear soon, do not keep a pointer. */
- if (!sk->sk_net_refcnt && sock_net(sk) != &init_net) {
- __netns_tracker_free(sock_net(sk), &sk->ns_tracker, false);
- /* Because of deferred_put_nlk_sk and use of work queue,
- * it is possible netns will be freed before this socket.
- */
- sock_net_set(sk, &init_net);
- __netns_tracker_alloc(&init_net, &sk->ns_tracker,
- false, GFP_KERNEL);
- }
call_rcu(&nlk->rcu, deferred_put_nlk_sk);
return 0;
}
diff --git a/net/rds/tcp.c b/net/rds/tcp.c
index 0581c53e6517..3cc2f303bf78 100644
--- a/net/rds/tcp.c
+++ b/net/rds/tcp.c
@@ -504,12 +504,8 @@ bool rds_tcp_tune(struct socket *sock)
release_sock(sk);
return false;
}
- /* Update ns_tracker to current stack trace and refcounted tracker */
- __netns_tracker_free(net, &sk->ns_tracker, false);
-
- sk->sk_net_refcnt = 1;
- netns_tracker_alloc(net, &sk->ns_tracker, GFP_KERNEL);
- sock_inuse_add(net, 1);
+ sk_net_refcnt_upgrade(sk);
+ put_net(net);
}
rtn = net_generic(net, rds_tcp_netid);
if (rtn->sndbuf_size > 0) {
diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
index 5e740c486203..a64a0cab1bf7 100644
--- a/net/rxrpc/ar-internal.h
+++ b/net/rxrpc/ar-internal.h
@@ -360,7 +360,6 @@ struct rxrpc_peer {
u8 pmtud_jumbo; /* Max jumbo packets for the MTU */
bool ackr_adv_pmtud; /* T if the peer advertises path-MTU */
unsigned int ackr_max_data; /* Maximum data advertised by peer */
- seqcount_t mtu_lock; /* Lockless MTU access management */
unsigned int if_mtu; /* Local interface MTU (- hdrsize) for this peer */
unsigned int max_data; /* Maximum packet data capacity for this peer */
unsigned short hdrsize; /* header size (IP + UDP + RxRPC) */
diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c
index 9047ba13bd31..24aceb183c2c 100644
--- a/net/rxrpc/input.c
+++ b/net/rxrpc/input.c
@@ -810,9 +810,7 @@ static void rxrpc_input_ack_trailer(struct rxrpc_call *call, struct sk_buff *skb
if (max_mtu < peer->max_data) {
trace_rxrpc_pmtud_reduce(peer, sp->hdr.serial, max_mtu,
rxrpc_pmtud_reduce_ack);
- write_seqcount_begin(&peer->mtu_lock);
peer->max_data = max_mtu;
- write_seqcount_end(&peer->mtu_lock);
}
max_data = umin(max_mtu, peer->max_data);
diff --git a/net/rxrpc/peer_event.c b/net/rxrpc/peer_event.c
index bc283da9ee40..7f4729234957 100644
--- a/net/rxrpc/peer_event.c
+++ b/net/rxrpc/peer_event.c
@@ -130,9 +130,7 @@ static void rxrpc_adjust_mtu(struct rxrpc_peer *peer, unsigned int mtu)
peer->pmtud_bad = max_data + 1;
trace_rxrpc_pmtud_reduce(peer, 0, max_data, rxrpc_pmtud_reduce_icmp);
- write_seqcount_begin(&peer->mtu_lock);
peer->max_data = max_data;
- write_seqcount_end(&peer->mtu_lock);
}
}
@@ -408,13 +406,8 @@ void rxrpc_input_probe_for_pmtud(struct rxrpc_connection *conn, rxrpc_serial_t a
}
max_data = umin(max_data, peer->ackr_max_data);
- if (max_data != peer->max_data) {
- preempt_disable();
- write_seqcount_begin(&peer->mtu_lock);
+ if (max_data != peer->max_data)
peer->max_data = max_data;
- write_seqcount_end(&peer->mtu_lock);
- preempt_enable();
- }
jumbo = max_data + sizeof(struct rxrpc_jumbo_header);
jumbo /= RXRPC_JUMBO_SUBPKTLEN;
diff --git a/net/rxrpc/peer_object.c b/net/rxrpc/peer_object.c
index 0fcc87f0409f..56e09d161a97 100644
--- a/net/rxrpc/peer_object.c
+++ b/net/rxrpc/peer_object.c
@@ -235,7 +235,6 @@ struct rxrpc_peer *rxrpc_alloc_peer(struct rxrpc_local *local, gfp_t gfp,
peer->service_conns = RB_ROOT;
seqlock_init(&peer->service_conn_lock);
spin_lock_init(&peer->lock);
- seqcount_init(&peer->mtu_lock);
peer->debug_id = atomic_inc_return(&rxrpc_debug_id);
peer->recent_srtt_us = UINT_MAX;
peer->cong_ssthresh = RXRPC_TX_MAX_WINDOW;
@@ -325,10 +324,10 @@ void rxrpc_new_incoming_peer(struct rxrpc_local *local, struct rxrpc_peer *peer)
hash_key = rxrpc_peer_hash_key(local, &peer->srx);
rxrpc_init_peer(local, peer, hash_key);
- spin_lock_bh(&rxnet->peer_hash_lock);
+ spin_lock(&rxnet->peer_hash_lock);
hash_add_rcu(rxnet->peer_hash, &peer->hash_link, hash_key);
list_add_tail(&peer->keepalive_link, &rxnet->peer_keepalive_new);
- spin_unlock_bh(&rxnet->peer_hash_lock);
+ spin_unlock(&rxnet->peer_hash_lock);
}
/*
diff --git a/net/rxrpc/rxperf.c b/net/rxrpc/rxperf.c
index 7ef93407be83..e848a4777b8c 100644
--- a/net/rxrpc/rxperf.c
+++ b/net/rxrpc/rxperf.c
@@ -478,6 +478,18 @@ static int rxperf_deliver_request(struct rxperf_call *call)
call->unmarshal++;
fallthrough;
case 2:
+ ret = rxperf_extract_data(call, true);
+ if (ret < 0)
+ return ret;
+
+ /* Deal with the terminal magic cookie. */
+ call->iov_len = 4;
+ call->kvec[0].iov_len = call->iov_len;
+ call->kvec[0].iov_base = call->tmp;
+ iov_iter_kvec(&call->iter, READ, call->kvec, 1, call->iov_len);
+ call->unmarshal++;
+ fallthrough;
+ case 3:
ret = rxperf_extract_data(call, false);
if (ret < 0)
return ret;
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
index ca6984541edb..3e6cb35baf25 100644
--- a/net/smc/af_smc.c
+++ b/net/smc/af_smc.c
@@ -3337,10 +3337,7 @@ int smc_create_clcsk(struct net *net, struct sock *sk, int family)
* which need net ref.
*/
sk = smc->clcsock->sk;
- __netns_tracker_free(net, &sk->ns_tracker, false);
- sk->sk_net_refcnt = 1;
- get_net_track(net, &sk->ns_tracker, GFP_KERNEL);
- sock_inuse_add(net, 1);
+ sk_net_refcnt_upgrade(sk);
return 0;
}
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index cb279eb9ac4b..7ce5e28a6c03 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -1674,12 +1674,14 @@ static void remove_cache_proc_entries(struct cache_detail *cd)
}
}
-#ifdef CONFIG_PROC_FS
static int create_cache_proc_entries(struct cache_detail *cd, struct net *net)
{
struct proc_dir_entry *p;
struct sunrpc_net *sn;
+ if (!IS_ENABLED(CONFIG_PROC_FS))
+ return 0;
+
sn = net_generic(net, sunrpc_net_id);
cd->procfs = proc_mkdir(cd->name, sn->proc_net_rpc);
if (cd->procfs == NULL)
@@ -1707,12 +1709,6 @@ out_nomem:
remove_cache_proc_entries(cd);
return -ENOMEM;
}
-#else /* CONFIG_PROC_FS */
-static int create_cache_proc_entries(struct cache_detail *cd, struct net *net)
-{
- return 0;
-}
-#endif
void __init cache_initialize(void)
{
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index cef623ea1506..9b45fbdc90ca 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -864,8 +864,6 @@ void rpc_signal_task(struct rpc_task *task)
if (!rpc_task_set_rpc_status(task, -ERESTARTSYS))
return;
trace_rpc_task_signalled(task, task->tk_action);
- set_bit(RPC_TASK_SIGNALLED, &task->tk_runstate);
- smp_mb__after_atomic();
queue = READ_ONCE(task->tk_waitqueue);
if (queue)
rpc_wake_up_queued_task(queue, task);
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index cb3bd12f5818..72e5a01df3d3 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -1541,10 +1541,7 @@ static struct svc_xprt *svc_create_socket(struct svc_serv *serv,
newlen = error;
if (protocol == IPPROTO_TCP) {
- __netns_tracker_free(net, &sock->sk->ns_tracker, false);
- sock->sk->sk_net_refcnt = 1;
- get_net_track(net, &sock->sk->ns_tracker, GFP_KERNEL);
- sock_inuse_add(net, 1);
+ sk_net_refcnt_upgrade(sock->sk);
if ((error = kernel_listen(sock, 64)) < 0)
goto bummer;
}
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index c60936d8cef7..83cc095846d3 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -1941,12 +1941,8 @@ static struct socket *xs_create_sock(struct rpc_xprt *xprt,
goto out;
}
- if (protocol == IPPROTO_TCP) {
- __netns_tracker_free(xprt->xprt_net, &sock->sk->ns_tracker, false);
- sock->sk->sk_net_refcnt = 1;
- get_net_track(xprt->xprt_net, &sock->sk->ns_tracker, GFP_KERNEL);
- sock_inuse_add(xprt->xprt_net, 1);
- }
+ if (protocol == IPPROTO_TCP)
+ sk_net_refcnt_upgrade(sock->sk);
filp = sock_alloc_file(sock, O_NONBLOCK, NULL);
if (IS_ERR(filp))
@@ -2581,7 +2577,15 @@ static void xs_tls_handshake_done(void *data, int status, key_serial_t peerid)
struct sock_xprt *lower_transport =
container_of(lower_xprt, struct sock_xprt, xprt);
- lower_transport->xprt_err = status ? -EACCES : 0;
+ switch (status) {
+ case 0:
+ case -EACCES:
+ case -ETIMEDOUT:
+ lower_transport->xprt_err = status;
+ break;
+ default:
+ lower_transport->xprt_err = -EACCES;
+ }
complete(&lower_transport->handshake_done);
xprt_put(lower_xprt);
}
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 34945de1fb1f..f0e613d97664 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -2102,6 +2102,7 @@ restart_locked:
goto out_sock_put;
}
+ sock_put(other);
goto lookup;
}
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index d7d3da0f6833..e87267fbb442 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -4220,6 +4220,11 @@ static int parse_monitor_flags(struct nlattr *nla, u32 *mntrflags)
if (flags[flag])
*mntrflags |= (1<<flag);
+ /* cooked monitor mode is incompatible with other modes */
+ if (*mntrflags & MONITOR_FLAG_COOK_FRAMES &&
+ *mntrflags != MONITOR_FLAG_COOK_FRAMES)
+ return -EOPNOTSUPP;
+
*mntrflags |= MONITOR_FLAG_CHANGED;
return 0;
@@ -16529,7 +16534,7 @@ static int nl80211_assoc_ml_reconf(struct sk_buff *skb, struct genl_info *info)
goto out;
}
- err = cfg80211_assoc_ml_reconf(rdev, dev, links, rem_links);
+ err = -EOPNOTSUPP;
out:
for (link_id = 0; link_id < ARRAY_SIZE(links); link_id++)
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 2dd0533e7660..212e9561aae7 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -407,7 +407,8 @@ static bool is_an_alpha2(const char *alpha2)
{
if (!alpha2)
return false;
- return isalpha(alpha2[0]) && isalpha(alpha2[1]);
+ return isascii(alpha2[0]) && isalpha(alpha2[0]) &&
+ isascii(alpha2[1]) && isalpha(alpha2[1]);
}
static bool alpha2_equal(const char *alpha2_x, const char *alpha2_y)
diff --git a/rust/kernel/firmware.rs b/rust/kernel/firmware.rs
index c5162fdc95ff..f04b058b09b2 100644
--- a/rust/kernel/firmware.rs
+++ b/rust/kernel/firmware.rs
@@ -115,3 +115,219 @@ unsafe impl Send for Firmware {}
// SAFETY: `Firmware` only holds a pointer to a C `struct firmware`, references to which are safe to
// be used from any thread.
unsafe impl Sync for Firmware {}
+
+/// Create firmware .modinfo entries.
+///
+/// This macro is the counterpart of the C macro `MODULE_FIRMWARE()`, but instead of taking a
+/// simple string literals, which is already covered by the `firmware` field of
+/// [`crate::prelude::module!`], it allows the caller to pass a builder type, based on the
+/// [`ModInfoBuilder`], which can create the firmware modinfo strings in a more flexible way.
+///
+/// Drivers should extend the [`ModInfoBuilder`] with their own driver specific builder type.
+///
+/// The `builder` argument must be a type which implements the following function.
+///
+/// `const fn create(module_name: &'static CStr) -> ModInfoBuilder`
+///
+/// `create` should pass the `module_name` to the [`ModInfoBuilder`] and, with the help of
+/// it construct the corresponding firmware modinfo.
+///
+/// Typically, such contracts would be enforced by a trait, however traits do not (yet) support
+/// const functions.
+///
+/// # Example
+///
+/// ```
+/// # mod module_firmware_test {
+/// # use kernel::firmware;
+/// # use kernel::prelude::*;
+/// #
+/// # struct MyModule;
+/// #
+/// # impl kernel::Module for MyModule {
+/// # fn init(_module: &'static ThisModule) -> Result<Self> {
+/// # Ok(Self)
+/// # }
+/// # }
+/// #
+/// #
+/// struct Builder<const N: usize>;
+///
+/// impl<const N: usize> Builder<N> {
+/// const DIR: &'static str = "vendor/chip/";
+/// const FILES: [&'static str; 3] = [ "foo", "bar", "baz" ];
+///
+/// const fn create(module_name: &'static kernel::str::CStr) -> firmware::ModInfoBuilder<N> {
+/// let mut builder = firmware::ModInfoBuilder::new(module_name);
+///
+/// let mut i = 0;
+/// while i < Self::FILES.len() {
+/// builder = builder.new_entry()
+/// .push(Self::DIR)
+/// .push(Self::FILES[i])
+/// .push(".bin");
+///
+/// i += 1;
+/// }
+///
+/// builder
+/// }
+/// }
+///
+/// module! {
+/// type: MyModule,
+/// name: "module_firmware_test",
+/// author: "Rust for Linux",
+/// description: "module_firmware! test module",
+/// license: "GPL",
+/// }
+///
+/// kernel::module_firmware!(Builder);
+/// # }
+/// ```
+#[macro_export]
+macro_rules! module_firmware {
+ // The argument is the builder type without the const generic, since it's deferred from within
+ // this macro. Hence, we can neither use `expr` nor `ty`.
+ ($($builder:tt)*) => {
+ const _: () = {
+ const __MODULE_FIRMWARE_PREFIX: &'static $crate::str::CStr = if cfg!(MODULE) {
+ $crate::c_str!("")
+ } else {
+ <LocalModule as $crate::ModuleMetadata>::NAME
+ };
+
+ #[link_section = ".modinfo"]
+ #[used]
+ static __MODULE_FIRMWARE: [u8; $($builder)*::create(__MODULE_FIRMWARE_PREFIX)
+ .build_length()] = $($builder)*::create(__MODULE_FIRMWARE_PREFIX).build();
+ };
+ };
+}
+
+/// Builder for firmware module info.
+///
+/// [`ModInfoBuilder`] is a helper component to flexibly compose firmware paths strings for the
+/// .modinfo section in const context.
+///
+/// Therefore the [`ModInfoBuilder`] provides the methods [`ModInfoBuilder::new_entry`] and
+/// [`ModInfoBuilder::push`], where the latter is used to push path components and the former to
+/// mark the beginning of a new path string.
+///
+/// [`ModInfoBuilder`] is meant to be used in combination with [`kernel::module_firmware!`].
+///
+/// The const generic `N` as well as the `module_name` parameter of [`ModInfoBuilder::new`] is an
+/// internal implementation detail and supplied through the above macro.
+pub struct ModInfoBuilder<const N: usize> {
+ buf: [u8; N],
+ n: usize,
+ module_name: &'static CStr,
+}
+
+impl<const N: usize> ModInfoBuilder<N> {
+ /// Create an empty builder instance.
+ pub const fn new(module_name: &'static CStr) -> Self {
+ Self {
+ buf: [0; N],
+ n: 0,
+ module_name,
+ }
+ }
+
+ const fn push_internal(mut self, bytes: &[u8]) -> Self {
+ let mut j = 0;
+
+ if N == 0 {
+ self.n += bytes.len();
+ return self;
+ }
+
+ while j < bytes.len() {
+ if self.n < N {
+ self.buf[self.n] = bytes[j];
+ }
+ self.n += 1;
+ j += 1;
+ }
+ self
+ }
+
+ /// Push an additional path component.
+ ///
+ /// Append path components to the [`ModInfoBuilder`] instance. Paths need to be separated
+ /// with [`ModInfoBuilder::new_entry`].
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use kernel::firmware::ModInfoBuilder;
+ ///
+ /// # const DIR: &str = "vendor/chip/";
+ /// # const fn no_run<const N: usize>(builder: ModInfoBuilder<N>) {
+ /// let builder = builder.new_entry()
+ /// .push(DIR)
+ /// .push("foo.bin")
+ /// .new_entry()
+ /// .push(DIR)
+ /// .push("bar.bin");
+ /// # }
+ /// ```
+ pub const fn push(self, s: &str) -> Self {
+ // Check whether there has been an initial call to `next_entry()`.
+ if N != 0 && self.n == 0 {
+ crate::build_error!("Must call next_entry() before push().");
+ }
+
+ self.push_internal(s.as_bytes())
+ }
+
+ const fn push_module_name(self) -> Self {
+ let mut this = self;
+ let module_name = this.module_name;
+
+ if !this.module_name.is_empty() {
+ this = this.push_internal(module_name.as_bytes_with_nul());
+
+ if N != 0 {
+ // Re-use the space taken by the NULL terminator and swap it with the '.' separator.
+ this.buf[this.n - 1] = b'.';
+ }
+ }
+
+ this
+ }
+
+ /// Prepare the [`ModInfoBuilder`] for the next entry.
+ ///
+ /// This method acts as a separator between module firmware path entries.
+ ///
+ /// Must be called before constructing a new entry with subsequent calls to
+ /// [`ModInfoBuilder::push`].
+ ///
+ /// See [`ModInfoBuilder::push`] for an example.
+ pub const fn new_entry(self) -> Self {
+ self.push_internal(b"\0")
+ .push_module_name()
+ .push_internal(b"firmware=")
+ }
+
+ /// Build the byte array.
+ pub const fn build(self) -> [u8; N] {
+ // Add the final NULL terminator.
+ let this = self.push_internal(b"\0");
+
+ if this.n == N {
+ this.buf
+ } else {
+ crate::build_error!("Length mismatch.");
+ }
+ }
+}
+
+impl ModInfoBuilder<0> {
+ /// Return the length of the byte array to build.
+ pub const fn build_length(self) -> usize {
+ // Compensate for the NULL terminator added by `build`.
+ self.n + 1
+ }
+}
diff --git a/rust/macros/module.rs b/rust/macros/module.rs
index cdf94f4982df..110e59c64197 100644
--- a/rust/macros/module.rs
+++ b/rust/macros/module.rs
@@ -228,6 +228,10 @@ pub(crate) fn module(ts: TokenStream) -> TokenStream {
kernel::ThisModule::from_ptr(core::ptr::null_mut())
}};
+ /// The `LocalModule` type is the type of the module created by `module!`,
+ /// `module_pci_driver!`, `module_platform_driver!`, etc.
+ type LocalModule = {type_};
+
impl kernel::ModuleMetadata for {type_} {{
const NAME: &'static kernel::str::CStr = kernel::c_str!(\"{name}\");
}}
diff --git a/scripts/package/install-extmod-build b/scripts/package/install-extmod-build
index 2966473b4660..b96538787f3d 100755
--- a/scripts/package/install-extmod-build
+++ b/scripts/package/install-extmod-build
@@ -63,7 +63,7 @@ if [ "${CC}" != "${HOSTCC}" ]; then
# Clear VPATH and srcroot because the source files reside in the output
# directory.
# shellcheck disable=SC2016 # $(MAKE) and $(build) will be expanded by Make
- "${MAKE}" run-command KBUILD_RUN_COMMAND='+$(MAKE) HOSTCC='"${CC}"' VPATH= srcroot=. $(build)='"${destdir}"/scripts
+ "${MAKE}" run-command KBUILD_RUN_COMMAND='+$(MAKE) HOSTCC='"${CC}"' VPATH= srcroot=. $(build)='"$(realpath --relative-base=. "${destdir}")"/scripts
rm -f "${destdir}/scripts/Kbuild"
fi
diff --git a/security/integrity/evm/evm_crypto.c b/security/integrity/evm/evm_crypto.c
index 7c06ffd633d2..a5e730ffda57 100644
--- a/security/integrity/evm/evm_crypto.c
+++ b/security/integrity/evm/evm_crypto.c
@@ -180,7 +180,7 @@ static void hmac_add_misc(struct shash_desc *desc, struct inode *inode,
}
/*
- * Dump large security xattr values as a continuous ascii hexademical string.
+ * Dump large security xattr values as a continuous ascii hexadecimal string.
* (pr_debug is limited to 64 bytes.)
*/
static void dump_security_xattr_l(const char *prefix, const void *src,
diff --git a/security/integrity/evm/evm_main.c b/security/integrity/evm/evm_main.c
index 377e57e9084f..0add782e73ba 100644
--- a/security/integrity/evm/evm_main.c
+++ b/security/integrity/evm/evm_main.c
@@ -169,7 +169,7 @@ static int is_unsupported_hmac_fs(struct dentry *dentry)
* and compare it against the stored security.evm xattr.
*
* For performance:
- * - use the previoulsy retrieved xattr value and length to calculate the
+ * - use the previously retrieved xattr value and length to calculate the
* HMAC.)
* - cache the verification result in the iint, when available.
*
diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h
index 24d09ea91b87..a4f284bd846c 100644
--- a/security/integrity/ima/ima.h
+++ b/security/integrity/ima/ima.h
@@ -149,6 +149,9 @@ struct ima_kexec_hdr {
#define IMA_CHECK_BLACKLIST 0x40000000
#define IMA_VERITY_REQUIRED 0x80000000
+/* Exclude non-action flags which are not rule-specific. */
+#define IMA_NONACTION_RULE_FLAGS (IMA_NONACTION_FLAGS & ~IMA_NEW_FILE)
+
#define IMA_DO_MASK (IMA_MEASURE | IMA_APPRAISE | IMA_AUDIT | \
IMA_HASH | IMA_APPRAISE_SUBMASK)
#define IMA_DONE_MASK (IMA_MEASURED | IMA_APPRAISED | IMA_AUDITED | \
diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c
index 9f9897a7c217..28b8b0db6f9b 100644
--- a/security/integrity/ima/ima_main.c
+++ b/security/integrity/ima/ima_main.c
@@ -269,10 +269,13 @@ static int process_measurement(struct file *file, const struct cred *cred,
mutex_lock(&iint->mutex);
if (test_and_clear_bit(IMA_CHANGE_ATTR, &iint->atomic_flags))
- /* reset appraisal flags if ima_inode_post_setattr was called */
+ /*
+ * Reset appraisal flags (action and non-action rule-specific)
+ * if ima_inode_post_setattr was called.
+ */
iint->flags &= ~(IMA_APPRAISE | IMA_APPRAISED |
IMA_APPRAISE_SUBMASK | IMA_APPRAISED_SUBMASK |
- IMA_NONACTION_FLAGS);
+ IMA_NONACTION_RULE_FLAGS);
/*
* Re-evaulate the file if either the xattr has changed or the
@@ -1011,9 +1014,9 @@ int process_buffer_measurement(struct mnt_idmap *idmap,
}
/*
- * Both LSM hooks and auxilary based buffer measurements are
- * based on policy. To avoid code duplication, differentiate
- * between the LSM hooks and auxilary buffer measurements,
+ * Both LSM hooks and auxiliary based buffer measurements are
+ * based on policy. To avoid code duplication, differentiate
+ * between the LSM hooks and auxiliary buffer measurements,
* retrieving the policy rule information only for the LSM hook
* buffer measurements.
*/
diff --git a/security/landlock/net.c b/security/landlock/net.c
index d5dcc4407a19..104b6c01fe50 100644
--- a/security/landlock/net.c
+++ b/security/landlock/net.c
@@ -63,8 +63,7 @@ static int current_check_access_socket(struct socket *const sock,
if (WARN_ON_ONCE(dom->num_layers < 1))
return -EACCES;
- /* Checks if it's a (potential) TCP socket. */
- if (sock->type != SOCK_STREAM)
+ if (!sk_is_tcp(sock->sk))
return 0;
/* Checks for minimal header length to safely read sa_family. */
diff --git a/security/landlock/ruleset.c b/security/landlock/ruleset.c
index 241ce44375b6..bff4e40a3093 100644
--- a/security/landlock/ruleset.c
+++ b/security/landlock/ruleset.c
@@ -124,7 +124,7 @@ create_rule(const struct landlock_id id,
return ERR_PTR(-ENOMEM);
RB_CLEAR_NODE(&new_rule->node);
if (is_object_pointer(id.type)) {
- /* This should be catched by insert_rule(). */
+ /* This should have been caught by insert_rule(). */
WARN_ON_ONCE(!id.key.object);
landlock_get_object(id.key.object);
}
diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
index cb66ec42a3f8..706f53e39b53 100644
--- a/sound/core/seq/seq_clientmgr.c
+++ b/sound/core/seq/seq_clientmgr.c
@@ -106,7 +106,7 @@ static struct snd_seq_client *clientptr(int clientid)
return clienttab[clientid];
}
-struct snd_seq_client *snd_seq_client_use_ptr(int clientid)
+static struct snd_seq_client *client_use_ptr(int clientid, bool load_module)
{
unsigned long flags;
struct snd_seq_client *client;
@@ -126,7 +126,7 @@ struct snd_seq_client *snd_seq_client_use_ptr(int clientid)
}
spin_unlock_irqrestore(&clients_lock, flags);
#ifdef CONFIG_MODULES
- if (!in_interrupt()) {
+ if (load_module) {
static DECLARE_BITMAP(client_requested, SNDRV_SEQ_GLOBAL_CLIENTS);
static DECLARE_BITMAP(card_requested, SNDRV_CARDS);
@@ -168,6 +168,20 @@ struct snd_seq_client *snd_seq_client_use_ptr(int clientid)
return client;
}
+/* get snd_seq_client object for the given id quickly */
+struct snd_seq_client *snd_seq_client_use_ptr(int clientid)
+{
+ return client_use_ptr(clientid, false);
+}
+
+/* get snd_seq_client object for the given id;
+ * if not found, retry after loading the modules
+ */
+static struct snd_seq_client *client_load_and_use_ptr(int clientid)
+{
+ return client_use_ptr(clientid, IS_ENABLED(CONFIG_MODULES));
+}
+
/* Take refcount and perform ioctl_mutex lock on the given client;
* used only for OSS sequencer
* Unlock via snd_seq_client_ioctl_unlock() below
@@ -176,7 +190,7 @@ bool snd_seq_client_ioctl_lock(int clientid)
{
struct snd_seq_client *client;
- client = snd_seq_client_use_ptr(clientid);
+ client = client_load_and_use_ptr(clientid);
if (!client)
return false;
mutex_lock(&client->ioctl_mutex);
@@ -1195,7 +1209,7 @@ static int snd_seq_ioctl_running_mode(struct snd_seq_client *client, void *arg)
int err = 0;
/* requested client number */
- cptr = snd_seq_client_use_ptr(info->client);
+ cptr = client_load_and_use_ptr(info->client);
if (cptr == NULL)
return -ENOENT; /* don't change !!! */
@@ -1257,7 +1271,7 @@ static int snd_seq_ioctl_get_client_info(struct snd_seq_client *client,
struct snd_seq_client *cptr;
/* requested client number */
- cptr = snd_seq_client_use_ptr(client_info->client);
+ cptr = client_load_and_use_ptr(client_info->client);
if (cptr == NULL)
return -ENOENT; /* don't change !!! */
@@ -1396,7 +1410,7 @@ static int snd_seq_ioctl_get_port_info(struct snd_seq_client *client, void *arg)
struct snd_seq_client *cptr;
struct snd_seq_client_port *port;
- cptr = snd_seq_client_use_ptr(info->addr.client);
+ cptr = client_load_and_use_ptr(info->addr.client);
if (cptr == NULL)
return -ENXIO;
@@ -1503,10 +1517,10 @@ static int snd_seq_ioctl_subscribe_port(struct snd_seq_client *client,
struct snd_seq_client *receiver = NULL, *sender = NULL;
struct snd_seq_client_port *sport = NULL, *dport = NULL;
- receiver = snd_seq_client_use_ptr(subs->dest.client);
+ receiver = client_load_and_use_ptr(subs->dest.client);
if (!receiver)
goto __end;
- sender = snd_seq_client_use_ptr(subs->sender.client);
+ sender = client_load_and_use_ptr(subs->sender.client);
if (!sender)
goto __end;
sport = snd_seq_port_use_ptr(sender, subs->sender.port);
@@ -1871,7 +1885,7 @@ static int snd_seq_ioctl_get_client_pool(struct snd_seq_client *client,
struct snd_seq_client_pool *info = arg;
struct snd_seq_client *cptr;
- cptr = snd_seq_client_use_ptr(info->client);
+ cptr = client_load_and_use_ptr(info->client);
if (cptr == NULL)
return -ENOENT;
memset(info, 0, sizeof(*info));
@@ -1975,7 +1989,7 @@ static int snd_seq_ioctl_get_subscription(struct snd_seq_client *client,
struct snd_seq_client_port *sport = NULL;
result = -EINVAL;
- sender = snd_seq_client_use_ptr(subs->sender.client);
+ sender = client_load_and_use_ptr(subs->sender.client);
if (!sender)
goto __end;
sport = snd_seq_port_use_ptr(sender, subs->sender.port);
@@ -2006,7 +2020,7 @@ static int snd_seq_ioctl_query_subs(struct snd_seq_client *client, void *arg)
struct list_head *p;
int i;
- cptr = snd_seq_client_use_ptr(subs->root.client);
+ cptr = client_load_and_use_ptr(subs->root.client);
if (!cptr)
goto __end;
port = snd_seq_port_use_ptr(cptr, subs->root.port);
@@ -2073,7 +2087,7 @@ static int snd_seq_ioctl_query_next_client(struct snd_seq_client *client,
if (info->client < 0)
info->client = 0;
for (; info->client < SNDRV_SEQ_MAX_CLIENTS; info->client++) {
- cptr = snd_seq_client_use_ptr(info->client);
+ cptr = client_load_and_use_ptr(info->client);
if (cptr)
break; /* found */
}
@@ -2096,7 +2110,7 @@ static int snd_seq_ioctl_query_next_port(struct snd_seq_client *client,
struct snd_seq_client *cptr;
struct snd_seq_client_port *port = NULL;
- cptr = snd_seq_client_use_ptr(info->addr.client);
+ cptr = client_load_and_use_ptr(info->addr.client);
if (cptr == NULL)
return -ENXIO;
@@ -2193,7 +2207,7 @@ static int snd_seq_ioctl_client_ump_info(struct snd_seq_client *caller,
size = sizeof(struct snd_ump_endpoint_info);
else
size = sizeof(struct snd_ump_block_info);
- cptr = snd_seq_client_use_ptr(client);
+ cptr = client_load_and_use_ptr(client);
if (!cptr)
return -ENOENT;
@@ -2475,7 +2489,7 @@ int snd_seq_kernel_client_enqueue(int client, struct snd_seq_event *ev,
if (check_event_type_and_length(ev))
return -EINVAL;
- cptr = snd_seq_client_use_ptr(client);
+ cptr = client_load_and_use_ptr(client);
if (cptr == NULL)
return -EINVAL;
@@ -2707,7 +2721,7 @@ void snd_seq_info_clients_read(struct snd_info_entry *entry,
/* list the client table */
for (c = 0; c < SNDRV_SEQ_MAX_CLIENTS; c++) {
- client = snd_seq_client_use_ptr(c);
+ client = client_load_and_use_ptr(c);
if (client == NULL)
continue;
if (client->type == NO_CLIENT) {
diff --git a/sound/pci/hda/Kconfig b/sound/pci/hda/Kconfig
index e393578cbe68..84ebf19f2883 100644
--- a/sound/pci/hda/Kconfig
+++ b/sound/pci/hda/Kconfig
@@ -222,6 +222,7 @@ comment "Set to Y if you want auto-loading the side codec driver"
config SND_HDA_CODEC_REALTEK
tristate "Build Realtek HD-audio codec support"
+ depends on INPUT
select SND_HDA_GENERIC
select SND_HDA_GENERIC_LEDS
select SND_HDA_SCODEC_COMPONENT
diff --git a/sound/pci/hda/cs35l56_hda_spi.c b/sound/pci/hda/cs35l56_hda_spi.c
index d4ee5bb7c486..903578466905 100644
--- a/sound/pci/hda/cs35l56_hda_spi.c
+++ b/sound/pci/hda/cs35l56_hda_spi.c
@@ -22,6 +22,9 @@ static int cs35l56_hda_spi_probe(struct spi_device *spi)
return -ENOMEM;
cs35l56->base.dev = &spi->dev;
+ ret = cs35l56_init_config_for_spi(&cs35l56->base, spi);
+ if (ret)
+ return ret;
#ifdef CS35L56_WAKE_HOLD_TIME_US
cs35l56->base.can_hibernate = true;
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 67540e037309..e67c22c59f02 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -2232,6 +2232,8 @@ static const struct snd_pci_quirk power_save_denylist[] = {
SND_PCI_QUIRK(0x1631, 0xe017, "Packard Bell NEC IMEDIA 5204", 0),
/* KONTRON SinglePC may cause a stall at runtime resume */
SND_PCI_QUIRK(0x1734, 0x1232, "KONTRON SinglePC", 0),
+ /* Dell ALC3271 */
+ SND_PCI_QUIRK(0x1028, 0x0962, "Dell ALC3271", 0),
{}
};
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 224616fbec4f..d2a1f836dbbf 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -3843,6 +3843,79 @@ static void alc225_shutup(struct hda_codec *codec)
}
}
+static void alc222_init(struct hda_codec *codec)
+{
+ struct alc_spec *spec = codec->spec;
+ hda_nid_t hp_pin = alc_get_hp_pin(spec);
+ bool hp1_pin_sense, hp2_pin_sense;
+
+ if (!hp_pin)
+ return;
+
+ msleep(30);
+
+ hp1_pin_sense = snd_hda_jack_detect(codec, hp_pin);
+ hp2_pin_sense = snd_hda_jack_detect(codec, 0x14);
+
+ if (hp1_pin_sense || hp2_pin_sense) {
+ msleep(2);
+
+ if (hp1_pin_sense)
+ snd_hda_codec_write(codec, hp_pin, 0,
+ AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT);
+ if (hp2_pin_sense)
+ snd_hda_codec_write(codec, 0x14, 0,
+ AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT);
+ msleep(75);
+
+ if (hp1_pin_sense)
+ snd_hda_codec_write(codec, hp_pin, 0,
+ AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE);
+ if (hp2_pin_sense)
+ snd_hda_codec_write(codec, 0x14, 0,
+ AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE);
+
+ msleep(75);
+ }
+}
+
+static void alc222_shutup(struct hda_codec *codec)
+{
+ struct alc_spec *spec = codec->spec;
+ hda_nid_t hp_pin = alc_get_hp_pin(spec);
+ bool hp1_pin_sense, hp2_pin_sense;
+
+ if (!hp_pin)
+ hp_pin = 0x21;
+
+ hp1_pin_sense = snd_hda_jack_detect(codec, hp_pin);
+ hp2_pin_sense = snd_hda_jack_detect(codec, 0x14);
+
+ if (hp1_pin_sense || hp2_pin_sense) {
+ msleep(2);
+
+ if (hp1_pin_sense)
+ snd_hda_codec_write(codec, hp_pin, 0,
+ AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE);
+ if (hp2_pin_sense)
+ snd_hda_codec_write(codec, 0x14, 0,
+ AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE);
+
+ msleep(75);
+
+ if (hp1_pin_sense)
+ snd_hda_codec_write(codec, hp_pin, 0,
+ AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
+ if (hp2_pin_sense)
+ snd_hda_codec_write(codec, 0x14, 0,
+ AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
+
+ msleep(75);
+ }
+ alc_auto_setup_eapd(codec, false);
+ alc_shutup_pins(codec);
+}
+
static void alc_default_init(struct hda_codec *codec)
{
struct alc_spec *spec = codec->spec;
@@ -4927,7 +5000,6 @@ static void alc298_fixup_samsung_amp_v2_4_amps(struct hda_codec *codec,
alc298_samsung_v2_init_amps(codec, 4);
}
-#if IS_REACHABLE(CONFIG_INPUT)
static void gpio2_mic_hotkey_event(struct hda_codec *codec,
struct hda_jack_callback *event)
{
@@ -5036,10 +5108,6 @@ static void alc233_fixup_lenovo_line2_mic_hotkey(struct hda_codec *codec,
spec->kb_dev = NULL;
}
}
-#else /* INPUT */
-#define alc280_fixup_hp_gpio2_mic_hotkey NULL
-#define alc233_fixup_lenovo_line2_mic_hotkey NULL
-#endif /* INPUT */
static void alc269_fixup_hp_line1_mic1_led(struct hda_codec *codec,
const struct hda_fixup *fix, int action)
@@ -5053,6 +5121,16 @@ static void alc269_fixup_hp_line1_mic1_led(struct hda_codec *codec,
}
}
+static void alc233_fixup_lenovo_low_en_micmute_led(struct hda_codec *codec,
+ const struct hda_fixup *fix, int action)
+{
+ struct alc_spec *spec = codec->spec;
+
+ if (action == HDA_FIXUP_ACT_PRE_PROBE)
+ spec->micmute_led_polarity = 1;
+ alc233_fixup_lenovo_line2_mic_hotkey(codec, fix, action);
+}
+
static void alc_hp_mute_disable(struct hda_codec *codec, unsigned int delay)
{
if (delay <= 0)
@@ -7621,6 +7699,7 @@ enum {
ALC275_FIXUP_DELL_XPS,
ALC293_FIXUP_LENOVO_SPK_NOISE,
ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY,
+ ALC233_FIXUP_LENOVO_L2MH_LOW_ENLED,
ALC255_FIXUP_DELL_SPK_NOISE,
ALC225_FIXUP_DISABLE_MIC_VREF,
ALC225_FIXUP_DELL1_MIC_NO_PRESENCE,
@@ -7690,7 +7769,6 @@ enum {
ALC285_FIXUP_THINKPAD_X1_GEN7,
ALC285_FIXUP_THINKPAD_HEADSET_JACK,
ALC294_FIXUP_ASUS_ALLY,
- ALC294_FIXUP_ASUS_ALLY_X,
ALC294_FIXUP_ASUS_ALLY_PINS,
ALC294_FIXUP_ASUS_ALLY_VERBS,
ALC294_FIXUP_ASUS_ALLY_SPEAKER,
@@ -8616,6 +8694,10 @@ static const struct hda_fixup alc269_fixups[] = {
.type = HDA_FIXUP_FUNC,
.v.func = alc233_fixup_lenovo_line2_mic_hotkey,
},
+ [ALC233_FIXUP_LENOVO_L2MH_LOW_ENLED] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc233_fixup_lenovo_low_en_micmute_led,
+ },
[ALC233_FIXUP_INTEL_NUC8_DMIC] = {
.type = HDA_FIXUP_FUNC,
.v.func = alc_fixup_inv_dmic,
@@ -9138,12 +9220,6 @@ static const struct hda_fixup alc269_fixups[] = {
.chained = true,
.chain_id = ALC294_FIXUP_ASUS_ALLY_PINS
},
- [ALC294_FIXUP_ASUS_ALLY_X] = {
- .type = HDA_FIXUP_FUNC,
- .v.func = tas2781_fixup_i2c,
- .chained = true,
- .chain_id = ALC294_FIXUP_ASUS_ALLY_PINS
- },
[ALC294_FIXUP_ASUS_ALLY_PINS] = {
.type = HDA_FIXUP_PINS,
.v.pins = (const struct hda_pintbl[]) {
@@ -10600,7 +10676,9 @@ static const struct hda_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x103c, 0x8e1a, "HP ZBook Firefly 14 G12A", ALC285_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
+ SND_PCI_QUIRK(0x1043, 0x1054, "ASUS G614FH/FM/FP", ALC287_FIXUP_CS35L41_I2C_2),
SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+ SND_PCI_QUIRK(0x1043, 0x1074, "ASUS G614PH/PM/PP", ALC287_FIXUP_CS35L41_I2C_2),
SND_PCI_QUIRK(0x1043, 0x10a1, "ASUS UX391UA", ALC294_FIXUP_ASUS_SPK),
SND_PCI_QUIRK(0x1043, 0x10a4, "ASUS TP3407SA", ALC287_FIXUP_TAS2781_I2C),
SND_PCI_QUIRK(0x1043, 0x10c0, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
@@ -10608,21 +10686,25 @@ static const struct hda_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1043, 0x10d3, "ASUS K6500ZC", ALC294_FIXUP_ASUS_SPK),
SND_PCI_QUIRK(0x1043, 0x1154, "ASUS TP3607SH", ALC287_FIXUP_TAS2781_I2C),
SND_PCI_QUIRK(0x1043, 0x115d, "Asus 1015E", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+ SND_PCI_QUIRK(0x1043, 0x1194, "ASUS UM3406KA", ALC287_FIXUP_CS35L41_I2C_2),
SND_PCI_QUIRK(0x1043, 0x11c0, "ASUS X556UR", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1043, 0x1204, "ASUS Strix G615JHR_JMR_JPR", ALC287_FIXUP_TAS2781_I2C),
SND_PCI_QUIRK(0x1043, 0x1214, "ASUS Strix G615LH_LM_LP", ALC287_FIXUP_TAS2781_I2C),
SND_PCI_QUIRK(0x1043, 0x125e, "ASUS Q524UQK", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1043, 0x1271, "ASUS X430UN", ALC256_FIXUP_ASUS_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1043, 0x1290, "ASUS X441SA", ALC233_FIXUP_EAPD_COEF_AND_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1043, 0x1294, "ASUS B3405CVA", ALC245_FIXUP_CS35L41_SPI_2),
SND_PCI_QUIRK(0x1043, 0x12a0, "ASUS X441UV", ALC233_FIXUP_EAPD_COEF_AND_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1043, 0x12a3, "Asus N7691ZM", ALC269_FIXUP_ASUS_N7601ZM),
SND_PCI_QUIRK(0x1043, 0x12af, "ASUS UX582ZS", ALC245_FIXUP_CS35L41_SPI_2),
+ SND_PCI_QUIRK(0x1043, 0x12b4, "ASUS B3405CCA / P3405CCA", ALC245_FIXUP_CS35L41_SPI_2),
SND_PCI_QUIRK(0x1043, 0x12e0, "ASUS X541SA", ALC256_FIXUP_ASUS_MIC),
SND_PCI_QUIRK(0x1043, 0x12f0, "ASUS X541UV", ALC256_FIXUP_ASUS_MIC),
SND_PCI_QUIRK(0x1043, 0x1313, "Asus K42JZ", ALC269VB_FIXUP_ASUS_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1043, 0x13b0, "ASUS Z550SA", ALC256_FIXUP_ASUS_MIC),
SND_PCI_QUIRK(0x1043, 0x1427, "Asus Zenbook UX31E", ALC269VB_FIXUP_ASUS_ZENBOOK),
SND_PCI_QUIRK(0x1043, 0x1433, "ASUS GX650PY/PZ/PV/PU/PYV/PZV/PIV/PVV", ALC285_FIXUP_ASUS_I2C_HEADSET_MIC),
+ SND_PCI_QUIRK(0x1043, 0x1460, "Asus VivoBook 15", ALC256_FIXUP_ASUS_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1043, 0x1463, "Asus GA402X/GA402N", ALC285_FIXUP_ASUS_I2C_HEADSET_MIC),
SND_PCI_QUIRK(0x1043, 0x1473, "ASUS GU604VI/VC/VE/VG/VJ/VQ/VU/VV/VY/VZ", ALC285_FIXUP_ASUS_HEADSET_MIC),
SND_PCI_QUIRK(0x1043, 0x1483, "ASUS GU603VQ/VU/VV/VJ/VI", ALC285_FIXUP_ASUS_HEADSET_MIC),
@@ -10644,7 +10726,6 @@ static const struct hda_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1043, 0x1740, "ASUS UX430UA", ALC295_FIXUP_ASUS_DACS),
SND_PCI_QUIRK(0x1043, 0x17d1, "ASUS UX431FL", ALC294_FIXUP_ASUS_DUAL_SPK),
SND_PCI_QUIRK(0x1043, 0x17f3, "ROG Ally NR2301L/X", ALC294_FIXUP_ASUS_ALLY),
- SND_PCI_QUIRK(0x1043, 0x1eb3, "ROG Ally X RC72LA", ALC294_FIXUP_ASUS_ALLY_X),
SND_PCI_QUIRK(0x1043, 0x1863, "ASUS UX6404VI/VV", ALC245_FIXUP_CS35L41_SPI_2),
SND_PCI_QUIRK(0x1043, 0x1881, "ASUS Zephyrus S/M", ALC294_FIXUP_ASUS_GX502_PINS),
SND_PCI_QUIRK(0x1043, 0x18b1, "Asus MJ401TA", ALC256_FIXUP_ASUS_HEADSET_MIC),
@@ -10656,7 +10737,6 @@ static const struct hda_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1043, 0x19ce, "ASUS B9450FA", ALC294_FIXUP_ASUS_HPE),
SND_PCI_QUIRK(0x1043, 0x19e1, "ASUS UX581LV", ALC295_FIXUP_ASUS_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW),
- SND_PCI_QUIRK(0x1043, 0x1a30, "ASUS X705UD", ALC256_FIXUP_ASUS_MIC),
SND_PCI_QUIRK(0x1043, 0x1a63, "ASUS UX3405MA", ALC245_FIXUP_CS35L41_SPI_2),
SND_PCI_QUIRK(0x1043, 0x1a83, "ASUS UM5302LA", ALC294_FIXUP_CS35L41_I2C_2),
SND_PCI_QUIRK(0x1043, 0x1a8f, "ASUS UX582ZS", ALC245_FIXUP_CS35L41_SPI_2),
@@ -10699,14 +10779,28 @@ static const struct hda_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1043, 0x1f12, "ASUS UM5302", ALC287_FIXUP_CS35L41_I2C_2),
SND_PCI_QUIRK(0x1043, 0x1f1f, "ASUS H7604JI/JV/J3D", ALC245_FIXUP_CS35L41_SPI_2),
SND_PCI_QUIRK(0x1043, 0x1f62, "ASUS UX7602ZM", ALC245_FIXUP_CS35L41_SPI_2),
+ SND_PCI_QUIRK(0x1043, 0x1f63, "ASUS P5405CSA", ALC245_FIXUP_CS35L41_SPI_2),
SND_PCI_QUIRK(0x1043, 0x1f92, "ASUS ROG Flow X16", ALC289_FIXUP_ASUS_GA401),
+ SND_PCI_QUIRK(0x1043, 0x1fb3, "ASUS ROG Flow Z13 GZ302EA", ALC287_FIXUP_CS35L41_I2C_2),
+ SND_PCI_QUIRK(0x1043, 0x3011, "ASUS B5605CVA", ALC245_FIXUP_CS35L41_SPI_2),
SND_PCI_QUIRK(0x1043, 0x3030, "ASUS ZN270IE", ALC256_FIXUP_ASUS_AIO_GPIO2),
+ SND_PCI_QUIRK(0x1043, 0x3061, "ASUS B3405CCA", ALC245_FIXUP_CS35L41_SPI_2),
+ SND_PCI_QUIRK(0x1043, 0x3071, "ASUS B5405CCA", ALC245_FIXUP_CS35L41_SPI_2),
+ SND_PCI_QUIRK(0x1043, 0x30c1, "ASUS B3605CCA / P3605CCA", ALC245_FIXUP_CS35L41_SPI_2),
+ SND_PCI_QUIRK(0x1043, 0x30d1, "ASUS B5405CCA", ALC245_FIXUP_CS35L41_SPI_2),
+ SND_PCI_QUIRK(0x1043, 0x30e1, "ASUS B5605CCA", ALC245_FIXUP_CS35L41_SPI_2),
SND_PCI_QUIRK(0x1043, 0x31d0, "ASUS Zen AIO 27 Z272SD_A272SD", ALC274_FIXUP_ASUS_ZEN_AIO_27),
+ SND_PCI_QUIRK(0x1043, 0x31e1, "ASUS B5605CCA", ALC245_FIXUP_CS35L41_SPI_2),
+ SND_PCI_QUIRK(0x1043, 0x31f1, "ASUS B3605CCA", ALC245_FIXUP_CS35L41_SPI_2),
SND_PCI_QUIRK(0x1043, 0x3a20, "ASUS G614JZR", ALC285_FIXUP_ASUS_SPI_REAR_SPEAKERS),
SND_PCI_QUIRK(0x1043, 0x3a30, "ASUS G814JVR/JIR", ALC285_FIXUP_ASUS_SPI_REAR_SPEAKERS),
SND_PCI_QUIRK(0x1043, 0x3a40, "ASUS G814JZR", ALC285_FIXUP_ASUS_SPI_REAR_SPEAKERS),
SND_PCI_QUIRK(0x1043, 0x3a50, "ASUS G834JYR/JZR", ALC285_FIXUP_ASUS_SPI_REAR_SPEAKERS),
SND_PCI_QUIRK(0x1043, 0x3a60, "ASUS G634JYR/JZR", ALC285_FIXUP_ASUS_SPI_REAR_SPEAKERS),
+ SND_PCI_QUIRK(0x1043, 0x3d78, "ASUS GA603KH", ALC287_FIXUP_CS35L41_I2C_2),
+ SND_PCI_QUIRK(0x1043, 0x3d88, "ASUS GA603KM", ALC287_FIXUP_CS35L41_I2C_2),
+ SND_PCI_QUIRK(0x1043, 0x3e00, "ASUS G814FH/FM/FP", ALC287_FIXUP_CS35L41_I2C_2),
+ SND_PCI_QUIRK(0x1043, 0x3e20, "ASUS G814PH/PM/PP", ALC287_FIXUP_CS35L41_I2C_2),
SND_PCI_QUIRK(0x1043, 0x3e30, "ASUS TP3607SA", ALC287_FIXUP_TAS2781_I2C),
SND_PCI_QUIRK(0x1043, 0x3ee0, "ASUS Strix G815_JHR_JMR_JPR", ALC287_FIXUP_TAS2781_I2C),
SND_PCI_QUIRK(0x1043, 0x3ef0, "ASUS Strix G635LR_LW_LX", ALC287_FIXUP_TAS2781_I2C),
@@ -10714,6 +10808,8 @@ static const struct hda_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1043, 0x3f10, "ASUS Strix G835LR_LW_LX", ALC287_FIXUP_TAS2781_I2C),
SND_PCI_QUIRK(0x1043, 0x3f20, "ASUS Strix G615LR_LW", ALC287_FIXUP_TAS2781_I2C),
SND_PCI_QUIRK(0x1043, 0x3f30, "ASUS Strix G815LR_LW", ALC287_FIXUP_TAS2781_I2C),
+ SND_PCI_QUIRK(0x1043, 0x3fd0, "ASUS B3605CVA", ALC245_FIXUP_CS35L41_SPI_2),
+ SND_PCI_QUIRK(0x1043, 0x3ff0, "ASUS B5405CVA", ALC245_FIXUP_CS35L41_SPI_2),
SND_PCI_QUIRK(0x1043, 0x831a, "ASUS P901", ALC269_FIXUP_STEREO_DMIC),
SND_PCI_QUIRK(0x1043, 0x834a, "ASUS S101", ALC269_FIXUP_STEREO_DMIC),
SND_PCI_QUIRK(0x1043, 0x8398, "ASUS P1005", ALC269_FIXUP_STEREO_DMIC),
@@ -10912,6 +11008,9 @@ static const struct hda_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x17aa, 0x3178, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC),
SND_PCI_QUIRK(0x17aa, 0x31af, "ThinkCentre Station", ALC623_FIXUP_LENOVO_THINKSTATION_P340),
SND_PCI_QUIRK(0x17aa, 0x334b, "Lenovo ThinkCentre M70 Gen5", ALC283_FIXUP_HEADSET_MIC),
+ SND_PCI_QUIRK(0x17aa, 0x3384, "ThinkCentre M90a PRO", ALC233_FIXUP_LENOVO_L2MH_LOW_ENLED),
+ SND_PCI_QUIRK(0x17aa, 0x3386, "ThinkCentre M90a Gen6", ALC233_FIXUP_LENOVO_L2MH_LOW_ENLED),
+ SND_PCI_QUIRK(0x17aa, 0x3387, "ThinkCentre M70a Gen6", ALC233_FIXUP_LENOVO_L2MH_LOW_ENLED),
SND_PCI_QUIRK(0x17aa, 0x3801, "Lenovo Yoga9 14IAP7", ALC287_FIXUP_YOGA9_14IAP7_BASS_SPK_PIN),
HDA_CODEC_QUIRK(0x17aa, 0x3802, "DuetITL 2021", ALC287_FIXUP_YOGA7_14ITL_SPEAKERS),
SND_PCI_QUIRK(0x17aa, 0x3802, "Lenovo Yoga Pro 9 14IRP8", ALC287_FIXUP_TAS2781_I2C),
@@ -11900,8 +11999,11 @@ static int patch_alc269(struct hda_codec *codec)
spec->codec_variant = ALC269_TYPE_ALC300;
spec->gen.mixer_nid = 0; /* no loopback on ALC300 */
break;
+ case 0x10ec0222:
case 0x10ec0623:
spec->codec_variant = ALC269_TYPE_ALC623;
+ spec->shutup = alc222_shutup;
+ spec->init_hook = alc222_init;
break;
case 0x10ec0700:
case 0x10ec0701:
diff --git a/sound/soc/codecs/cs35l56-shared.c b/sound/soc/codecs/cs35l56-shared.c
index e0ed4fc11155..e28bfefa72f3 100644
--- a/sound/soc/codecs/cs35l56-shared.c
+++ b/sound/soc/codecs/cs35l56-shared.c
@@ -10,6 +10,7 @@
#include <linux/gpio/consumer.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
+#include <linux/spi/spi.h>
#include <linux/types.h>
#include <sound/cs-amp-lib.h>
@@ -303,6 +304,79 @@ void cs35l56_wait_min_reset_pulse(void)
}
EXPORT_SYMBOL_NS_GPL(cs35l56_wait_min_reset_pulse, "SND_SOC_CS35L56_SHARED");
+static const struct {
+ u32 addr;
+ u32 value;
+} cs35l56_spi_system_reset_stages[] = {
+ { .addr = CS35L56_DSP_VIRTUAL1_MBOX_1, .value = CS35L56_MBOX_CMD_SYSTEM_RESET },
+ /* The next write is necessary to delimit the soft reset */
+ { .addr = CS35L56_DSP_MBOX_1_RAW, .value = CS35L56_MBOX_CMD_PING },
+};
+
+static void cs35l56_spi_issue_bus_locked_reset(struct cs35l56_base *cs35l56_base,
+ struct spi_device *spi)
+{
+ struct cs35l56_spi_payload *buf = cs35l56_base->spi_payload_buf;
+ struct spi_transfer t = {
+ .tx_buf = buf,
+ .len = sizeof(*buf),
+ };
+ struct spi_message m;
+ int i, ret;
+
+ for (i = 0; i < ARRAY_SIZE(cs35l56_spi_system_reset_stages); i++) {
+ buf->addr = cpu_to_be32(cs35l56_spi_system_reset_stages[i].addr);
+ buf->value = cpu_to_be32(cs35l56_spi_system_reset_stages[i].value);
+ spi_message_init_with_transfers(&m, &t, 1);
+ ret = spi_sync_locked(spi, &m);
+ if (ret)
+ dev_warn(cs35l56_base->dev, "spi_sync failed: %d\n", ret);
+
+ usleep_range(CS35L56_SPI_RESET_TO_PORT_READY_US,
+ 2 * CS35L56_SPI_RESET_TO_PORT_READY_US);
+ }
+}
+
+static void cs35l56_spi_system_reset(struct cs35l56_base *cs35l56_base)
+{
+ struct spi_device *spi = to_spi_device(cs35l56_base->dev);
+ unsigned int val;
+ int read_ret, ret;
+
+ /*
+ * There must not be any other SPI bus activity while the amp is
+ * soft-resetting.
+ */
+ ret = spi_bus_lock(spi->controller);
+ if (ret) {
+ dev_warn(cs35l56_base->dev, "spi_bus_lock failed: %d\n", ret);
+ return;
+ }
+
+ cs35l56_spi_issue_bus_locked_reset(cs35l56_base, spi);
+ spi_bus_unlock(spi->controller);
+
+ /*
+ * Check firmware boot by testing for a response in MBOX_2.
+ * HALO_STATE cannot be trusted yet because the reset sequence
+ * can leave it with stale state. But MBOX is reset.
+ * The regmap must remain in cache-only until the chip has
+ * booted, so use a bypassed read.
+ */
+ ret = read_poll_timeout(regmap_read_bypassed, read_ret,
+ (val > 0) && (val < 0xffffffff),
+ CS35L56_HALO_STATE_POLL_US,
+ CS35L56_HALO_STATE_TIMEOUT_US,
+ false,
+ cs35l56_base->regmap,
+ CS35L56_DSP_VIRTUAL1_MBOX_2,
+ &val);
+ if (ret) {
+ dev_err(cs35l56_base->dev, "SPI reboot timed out(%d): MBOX2=%#x\n",
+ read_ret, val);
+ }
+}
+
static const struct reg_sequence cs35l56_system_reset_seq[] = {
REG_SEQ0(CS35L56_DSP1_HALO_STATE, 0),
REG_SEQ0(CS35L56_DSP_VIRTUAL1_MBOX_1, CS35L56_MBOX_CMD_SYSTEM_RESET),
@@ -315,6 +389,12 @@ void cs35l56_system_reset(struct cs35l56_base *cs35l56_base, bool is_soundwire)
* accesses other than the controlled system reset sequence below.
*/
regcache_cache_only(cs35l56_base->regmap, true);
+
+ if (cs35l56_is_spi(cs35l56_base)) {
+ cs35l56_spi_system_reset(cs35l56_base);
+ return;
+ }
+
regmap_multi_reg_write_bypassed(cs35l56_base->regmap,
cs35l56_system_reset_seq,
ARRAY_SIZE(cs35l56_system_reset_seq));
diff --git a/sound/soc/codecs/cs35l56-spi.c b/sound/soc/codecs/cs35l56-spi.c
index c101134e8532..ca6c03a8766d 100644
--- a/sound/soc/codecs/cs35l56-spi.c
+++ b/sound/soc/codecs/cs35l56-spi.c
@@ -33,6 +33,9 @@ static int cs35l56_spi_probe(struct spi_device *spi)
cs35l56->base.dev = &spi->dev;
cs35l56->base.can_hibernate = true;
+ ret = cs35l56_init_config_for_spi(&cs35l56->base, spi);
+ if (ret)
+ return ret;
ret = cs35l56_common_probe(cs35l56);
if (ret != 0)
diff --git a/sound/soc/codecs/es8328.c b/sound/soc/codecs/es8328.c
index f3c97da798dc..76159c45e6b5 100644
--- a/sound/soc/codecs/es8328.c
+++ b/sound/soc/codecs/es8328.c
@@ -233,7 +233,6 @@ static const struct snd_kcontrol_new es8328_right_line_controls =
/* Left Mixer */
static const struct snd_kcontrol_new es8328_left_mixer_controls[] = {
- SOC_DAPM_SINGLE("Playback Switch", ES8328_DACCONTROL17, 7, 1, 0),
SOC_DAPM_SINGLE("Left Bypass Switch", ES8328_DACCONTROL17, 6, 1, 0),
SOC_DAPM_SINGLE("Right Playback Switch", ES8328_DACCONTROL18, 7, 1, 0),
SOC_DAPM_SINGLE("Right Bypass Switch", ES8328_DACCONTROL18, 6, 1, 0),
@@ -243,7 +242,6 @@ static const struct snd_kcontrol_new es8328_left_mixer_controls[] = {
static const struct snd_kcontrol_new es8328_right_mixer_controls[] = {
SOC_DAPM_SINGLE("Left Playback Switch", ES8328_DACCONTROL19, 7, 1, 0),
SOC_DAPM_SINGLE("Left Bypass Switch", ES8328_DACCONTROL19, 6, 1, 0),
- SOC_DAPM_SINGLE("Playback Switch", ES8328_DACCONTROL20, 7, 1, 0),
SOC_DAPM_SINGLE("Right Bypass Switch", ES8328_DACCONTROL20, 6, 1, 0),
};
@@ -336,10 +334,10 @@ static const struct snd_soc_dapm_widget es8328_dapm_widgets[] = {
SND_SOC_DAPM_DAC("Left DAC", "Left Playback", ES8328_DACPOWER,
ES8328_DACPOWER_LDAC_OFF, 1),
- SND_SOC_DAPM_MIXER("Left Mixer", SND_SOC_NOPM, 0, 0,
+ SND_SOC_DAPM_MIXER("Left Mixer", ES8328_DACCONTROL17, 7, 0,
&es8328_left_mixer_controls[0],
ARRAY_SIZE(es8328_left_mixer_controls)),
- SND_SOC_DAPM_MIXER("Right Mixer", SND_SOC_NOPM, 0, 0,
+ SND_SOC_DAPM_MIXER("Right Mixer", ES8328_DACCONTROL20, 7, 0,
&es8328_right_mixer_controls[0],
ARRAY_SIZE(es8328_right_mixer_controls)),
@@ -418,19 +416,14 @@ static const struct snd_soc_dapm_route es8328_dapm_routes[] = {
{ "Right Line Mux", "PGA", "Right PGA Mux" },
{ "Right Line Mux", "Differential", "Differential Mux" },
- { "Left Out 1", NULL, "Left DAC" },
- { "Right Out 1", NULL, "Right DAC" },
- { "Left Out 2", NULL, "Left DAC" },
- { "Right Out 2", NULL, "Right DAC" },
-
- { "Left Mixer", "Playback Switch", "Left DAC" },
+ { "Left Mixer", NULL, "Left DAC" },
{ "Left Mixer", "Left Bypass Switch", "Left Line Mux" },
{ "Left Mixer", "Right Playback Switch", "Right DAC" },
{ "Left Mixer", "Right Bypass Switch", "Right Line Mux" },
{ "Right Mixer", "Left Playback Switch", "Left DAC" },
{ "Right Mixer", "Left Bypass Switch", "Left Line Mux" },
- { "Right Mixer", "Playback Switch", "Right DAC" },
+ { "Right Mixer", NULL, "Right DAC" },
{ "Right Mixer", "Right Bypass Switch", "Right Line Mux" },
{ "DAC DIG", NULL, "DAC STM" },
diff --git a/sound/soc/codecs/tas2764.c b/sound/soc/codecs/tas2764.c
index d482cd194c08..58315eab492a 100644
--- a/sound/soc/codecs/tas2764.c
+++ b/sound/soc/codecs/tas2764.c
@@ -365,7 +365,7 @@ static int tas2764_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
{
struct snd_soc_component *component = dai->component;
struct tas2764_priv *tas2764 = snd_soc_component_get_drvdata(component);
- u8 tdm_rx_start_slot = 0, asi_cfg_0 = 0, asi_cfg_1 = 0;
+ u8 tdm_rx_start_slot = 0, asi_cfg_0 = 0, asi_cfg_1 = 0, asi_cfg_4 = 0;
int ret;
switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
@@ -374,12 +374,14 @@ static int tas2764_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
fallthrough;
case SND_SOC_DAIFMT_NB_NF:
asi_cfg_1 = TAS2764_TDM_CFG1_RX_RISING;
+ asi_cfg_4 = TAS2764_TDM_CFG4_TX_FALLING;
break;
case SND_SOC_DAIFMT_IB_IF:
asi_cfg_0 ^= TAS2764_TDM_CFG0_FRAME_START;
fallthrough;
case SND_SOC_DAIFMT_IB_NF:
asi_cfg_1 = TAS2764_TDM_CFG1_RX_FALLING;
+ asi_cfg_4 = TAS2764_TDM_CFG4_TX_RISING;
break;
}
@@ -389,6 +391,12 @@ static int tas2764_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
if (ret < 0)
return ret;
+ ret = snd_soc_component_update_bits(component, TAS2764_TDM_CFG4,
+ TAS2764_TDM_CFG4_TX_MASK,
+ asi_cfg_4);
+ if (ret < 0)
+ return ret;
+
switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
case SND_SOC_DAIFMT_I2S:
asi_cfg_0 ^= TAS2764_TDM_CFG0_FRAME_START;
diff --git a/sound/soc/codecs/tas2764.h b/sound/soc/codecs/tas2764.h
index 168af772a898..9490f2686e38 100644
--- a/sound/soc/codecs/tas2764.h
+++ b/sound/soc/codecs/tas2764.h
@@ -25,7 +25,7 @@
/* Power Control */
#define TAS2764_PWR_CTRL TAS2764_REG(0X0, 0x02)
-#define TAS2764_PWR_CTRL_MASK GENMASK(1, 0)
+#define TAS2764_PWR_CTRL_MASK GENMASK(2, 0)
#define TAS2764_PWR_CTRL_ACTIVE 0x0
#define TAS2764_PWR_CTRL_MUTE BIT(0)
#define TAS2764_PWR_CTRL_SHUTDOWN BIT(1)
@@ -79,6 +79,12 @@
#define TAS2764_TDM_CFG3_RXS_SHIFT 0x4
#define TAS2764_TDM_CFG3_MASK GENMASK(3, 0)
+/* TDM Configuration Reg4 */
+#define TAS2764_TDM_CFG4 TAS2764_REG(0X0, 0x0d)
+#define TAS2764_TDM_CFG4_TX_MASK BIT(0)
+#define TAS2764_TDM_CFG4_TX_RISING 0x0
+#define TAS2764_TDM_CFG4_TX_FALLING BIT(0)
+
/* TDM Configuration Reg5 */
#define TAS2764_TDM_CFG5 TAS2764_REG(0X0, 0x0e)
#define TAS2764_TDM_CFG5_VSNS_MASK BIT(6)
diff --git a/sound/soc/codecs/tas2770.c b/sound/soc/codecs/tas2770.c
index 9f93b230652a..863c3f672ba9 100644
--- a/sound/soc/codecs/tas2770.c
+++ b/sound/soc/codecs/tas2770.c
@@ -506,7 +506,7 @@ static int tas2770_codec_probe(struct snd_soc_component *component)
}
static DECLARE_TLV_DB_SCALE(tas2770_digital_tlv, 1100, 50, 0);
-static DECLARE_TLV_DB_SCALE(tas2770_playback_volume, -12750, 50, 0);
+static DECLARE_TLV_DB_SCALE(tas2770_playback_volume, -10050, 50, 0);
static const struct snd_kcontrol_new tas2770_snd_controls[] = {
SOC_SINGLE_TLV("Speaker Playback Volume", TAS2770_PLAY_CFG_REG2,
diff --git a/sound/soc/fsl/fsl_sai.c b/sound/soc/fsl/fsl_sai.c
index c4eb87c5d39e..9f33dd11d47f 100644
--- a/sound/soc/fsl/fsl_sai.c
+++ b/sound/soc/fsl/fsl_sai.c
@@ -994,10 +994,10 @@ static struct snd_soc_dai_driver fsl_sai_dai_template[] = {
{
.name = "sai-tx",
.playback = {
- .stream_name = "CPU-Playback",
+ .stream_name = "SAI-Playback",
.channels_min = 1,
.channels_max = 32,
- .rate_min = 8000,
+ .rate_min = 8000,
.rate_max = 2822400,
.rates = SNDRV_PCM_RATE_KNOT,
.formats = FSL_SAI_FORMATS,
@@ -1007,7 +1007,7 @@ static struct snd_soc_dai_driver fsl_sai_dai_template[] = {
{
.name = "sai-rx",
.capture = {
- .stream_name = "CPU-Capture",
+ .stream_name = "SAI-Capture",
.channels_min = 1,
.channels_max = 32,
.rate_min = 8000,
diff --git a/sound/soc/fsl/imx-audmix.c b/sound/soc/fsl/imx-audmix.c
index 50ecc5f51100..dac5d4ddacd6 100644
--- a/sound/soc/fsl/imx-audmix.c
+++ b/sound/soc/fsl/imx-audmix.c
@@ -119,8 +119,8 @@ static const struct snd_soc_ops imx_audmix_be_ops = {
static const char *name[][3] = {
{"HiFi-AUDMIX-FE-0", "HiFi-AUDMIX-FE-1", "HiFi-AUDMIX-FE-2"},
{"sai-tx", "sai-tx", "sai-rx"},
- {"AUDMIX-Playback-0", "AUDMIX-Playback-1", "CPU-Capture"},
- {"CPU-Playback", "CPU-Playback", "AUDMIX-Capture-0"},
+ {"AUDMIX-Playback-0", "AUDMIX-Playback-1", "SAI-Capture"},
+ {"SAI-Playback", "SAI-Playback", "AUDMIX-Capture-0"},
};
static int imx_audmix_probe(struct platform_device *pdev)
diff --git a/sound/soc/intel/boards/sof_sdw.c b/sound/soc/intel/boards/sof_sdw.c
index 203b07d4d833..c13064c77726 100644
--- a/sound/soc/intel/boards/sof_sdw.c
+++ b/sound/soc/intel/boards/sof_sdw.c
@@ -803,7 +803,9 @@ static int create_sdw_dailink(struct snd_soc_card *card,
int *be_id, struct snd_soc_codec_conf **codec_conf)
{
struct device *dev = card->dev;
+ struct snd_soc_acpi_mach *mach = dev_get_platdata(card->dev);
struct asoc_sdw_mc_private *ctx = snd_soc_card_get_drvdata(card);
+ struct snd_soc_acpi_mach_params *mach_params = &mach->mach_params;
struct intel_mc_ctx *intel_ctx = (struct intel_mc_ctx *)ctx->private;
struct asoc_sdw_endpoint *sof_end;
int stream;
@@ -900,6 +902,11 @@ static int create_sdw_dailink(struct snd_soc_card *card,
codecs[j].name = sof_end->codec_name;
codecs[j].dai_name = sof_end->dai_info->dai_name;
+ if (sof_end->dai_info->dai_type == SOC_SDW_DAI_TYPE_MIC &&
+ mach_params->dmic_num > 0) {
+ dev_warn(dev,
+ "Both SDW DMIC and PCH DMIC are present, if incorrect, please set kernel params snd_sof_intel_hda_generic dmic_num=0 to disable PCH DMIC\n");
+ }
j++;
}
diff --git a/sound/soc/sof/intel/hda.c b/sound/soc/sof/intel/hda.c
index be689f6e10c8..a1ccd95da8bb 100644
--- a/sound/soc/sof/intel/hda.c
+++ b/sound/soc/sof/intel/hda.c
@@ -1312,22 +1312,8 @@ struct snd_soc_acpi_mach *hda_machine_select(struct snd_sof_dev *sdev)
/* report to machine driver if any DMICs are found */
mach->mach_params.dmic_num = check_dmic_num(sdev);
- if (sdw_mach_found) {
- /*
- * DMICs use up to 4 pins and are typically pin-muxed with SoundWire
- * link 2 and 3, or link 1 and 2, thus we only try to enable dmics
- * if all conditions are true:
- * a) 2 or fewer links are used by SoundWire
- * b) the NHLT table reports the presence of microphones
- */
- if (hweight_long(mach->link_mask) <= 2)
- dmic_fixup = true;
- else
- mach->mach_params.dmic_num = 0;
- } else {
- if (mach->tplg_quirk_mask & SND_SOC_ACPI_TPLG_INTEL_DMIC_NUMBER)
- dmic_fixup = true;
- }
+ if (sdw_mach_found || mach->tplg_quirk_mask & SND_SOC_ACPI_TPLG_INTEL_DMIC_NUMBER)
+ dmic_fixup = true;
if (tplg_fixup &&
dmic_fixup &&
diff --git a/sound/usb/midi.c b/sound/usb/midi.c
index 737dd00e97b1..779d97d31f17 100644
--- a/sound/usb/midi.c
+++ b/sound/usb/midi.c
@@ -1145,7 +1145,7 @@ static int snd_usbmidi_output_close(struct snd_rawmidi_substream *substream)
{
struct usbmidi_out_port *port = substream->runtime->private_data;
- cancel_work_sync(&port->ep->work);
+ flush_work(&port->ep->work);
return substream_open(substream, 0, 0);
}
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
index a97efb7b131e..09210fb4ac60 100644
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
@@ -1868,6 +1868,7 @@ void snd_usb_set_format_quirk(struct snd_usb_substream *subs,
case USB_ID(0x534d, 0x2109): /* MacroSilicon MS2109 */
subs->stream_offset_adj = 2;
break;
+ case USB_ID(0x2b73, 0x000a): /* Pioneer DJM-900NXS2 */
case USB_ID(0x2b73, 0x0013): /* Pioneer DJM-450 */
pioneer_djm_set_format_quirk(subs, 0x0082);
break;
diff --git a/sound/usb/usx2y/usbusx2y.c b/sound/usb/usx2y/usbusx2y.c
index 5f81c68fd42b..5756ff3528a2 100644
--- a/sound/usb/usx2y/usbusx2y.c
+++ b/sound/usb/usx2y/usbusx2y.c
@@ -151,6 +151,12 @@ static int snd_usx2y_card_used[SNDRV_CARDS];
static void snd_usx2y_card_private_free(struct snd_card *card);
static void usx2y_unlinkseq(struct snd_usx2y_async_seq *s);
+#ifdef USX2Y_NRPACKS_VARIABLE
+int nrpacks = USX2Y_NRPACKS; /* number of packets per urb */
+module_param(nrpacks, int, 0444);
+MODULE_PARM_DESC(nrpacks, "Number of packets per URB.");
+#endif
+
/*
* pipe 4 is used for switching the lamps, setting samplerate, volumes ....
*/
@@ -432,6 +438,11 @@ static int snd_usx2y_probe(struct usb_interface *intf,
struct snd_card *card;
int err;
+#ifdef USX2Y_NRPACKS_VARIABLE
+ if (nrpacks < 0 || nrpacks > USX2Y_NRPACKS_MAX)
+ return -EINVAL;
+#endif
+
if (le16_to_cpu(device->descriptor.idVendor) != 0x1604 ||
(le16_to_cpu(device->descriptor.idProduct) != USB_ID_US122 &&
le16_to_cpu(device->descriptor.idProduct) != USB_ID_US224 &&
diff --git a/sound/usb/usx2y/usbusx2y.h b/sound/usb/usx2y/usbusx2y.h
index 391fd7b4ed5e..6a76d04bf1c7 100644
--- a/sound/usb/usx2y/usbusx2y.h
+++ b/sound/usb/usx2y/usbusx2y.h
@@ -7,6 +7,32 @@
#define NRURBS 2
+/* Default value used for nr of packs per urb.
+ * 1 to 4 have been tested ok on uhci.
+ * To use 3 on ohci, you'd need a patch:
+ * look for "0000425-linux-2.6.9-rc4-mm1_ohci-hcd.patch.gz" on
+ * "https://bugtrack.alsa-project.org/alsa-bug/bug_view_page.php?bug_id=0000425"
+ *
+ * 1, 2 and 4 work out of the box on ohci, if I recall correctly.
+ * Bigger is safer operation, smaller gives lower latencies.
+ */
+#define USX2Y_NRPACKS 4
+
+#define USX2Y_NRPACKS_MAX 1024
+
+/* If your system works ok with this module's parameter
+ * nrpacks set to 1, you might as well comment
+ * this define out, and thereby produce smaller, faster code.
+ * You'd also set USX2Y_NRPACKS to 1 then.
+ */
+#define USX2Y_NRPACKS_VARIABLE 1
+
+#ifdef USX2Y_NRPACKS_VARIABLE
+extern int nrpacks;
+#define nr_of_packs() nrpacks
+#else
+#define nr_of_packs() USX2Y_NRPACKS
+#endif
#define URBS_ASYNC_SEQ 10
#define URB_DATA_LEN_ASYNC_SEQ 32
diff --git a/sound/usb/usx2y/usbusx2yaudio.c b/sound/usb/usx2y/usbusx2yaudio.c
index f540f46a0b14..acca8bead82e 100644
--- a/sound/usb/usx2y/usbusx2yaudio.c
+++ b/sound/usb/usx2y/usbusx2yaudio.c
@@ -28,33 +28,6 @@
#include "usx2y.h"
#include "usbusx2y.h"
-/* Default value used for nr of packs per urb.
- * 1 to 4 have been tested ok on uhci.
- * To use 3 on ohci, you'd need a patch:
- * look for "0000425-linux-2.6.9-rc4-mm1_ohci-hcd.patch.gz" on
- * "https://bugtrack.alsa-project.org/alsa-bug/bug_view_page.php?bug_id=0000425"
- *
- * 1, 2 and 4 work out of the box on ohci, if I recall correctly.
- * Bigger is safer operation, smaller gives lower latencies.
- */
-#define USX2Y_NRPACKS 4
-
-/* If your system works ok with this module's parameter
- * nrpacks set to 1, you might as well comment
- * this define out, and thereby produce smaller, faster code.
- * You'd also set USX2Y_NRPACKS to 1 then.
- */
-#define USX2Y_NRPACKS_VARIABLE 1
-
-#ifdef USX2Y_NRPACKS_VARIABLE
-static int nrpacks = USX2Y_NRPACKS; /* number of packets per urb */
-#define nr_of_packs() nrpacks
-module_param(nrpacks, int, 0444);
-MODULE_PARM_DESC(nrpacks, "Number of packets per URB.");
-#else
-#define nr_of_packs() USX2Y_NRPACKS
-#endif
-
static int usx2y_urb_capt_retire(struct snd_usx2y_substream *subs)
{
struct urb *urb = subs->completed_urb;
diff --git a/tools/arch/arm64/tools/Makefile b/tools/arch/arm64/tools/Makefile
index 7b42feedf647..de4f1b66ef01 100644
--- a/tools/arch/arm64/tools/Makefile
+++ b/tools/arch/arm64/tools/Makefile
@@ -13,12 +13,6 @@ AWK ?= awk
MKDIR ?= mkdir
RM ?= rm
-ifeq ($(V),1)
-Q =
-else
-Q = @
-endif
-
arm64_tools_dir = $(top_srcdir)/arch/arm64/tools
arm64_sysreg_tbl = $(arm64_tools_dir)/sysreg
arm64_gen_sysreg = $(arm64_tools_dir)/gen-sysreg.awk
diff --git a/tools/bpf/Makefile b/tools/bpf/Makefile
index 243b79f2b451..062bbd6cd048 100644
--- a/tools/bpf/Makefile
+++ b/tools/bpf/Makefile
@@ -27,12 +27,6 @@ srctree := $(patsubst %/,%,$(dir $(CURDIR)))
srctree := $(patsubst %/,%,$(dir $(srctree)))
endif
-ifeq ($(V),1)
- Q =
-else
- Q = @
-endif
-
FEATURE_USER = .bpf
FEATURE_TESTS = libbfd disassembler-four-args disassembler-init-styled
FEATURE_DISPLAY = libbfd
diff --git a/tools/bpf/bpftool/Documentation/Makefile b/tools/bpf/bpftool/Documentation/Makefile
index 4315652678b9..bf843f328812 100644
--- a/tools/bpf/bpftool/Documentation/Makefile
+++ b/tools/bpf/bpftool/Documentation/Makefile
@@ -5,12 +5,6 @@ INSTALL ?= install
RM ?= rm -f
RMDIR ?= rmdir --ignore-fail-on-non-empty
-ifeq ($(V),1)
- Q =
-else
- Q = @
-endif
-
prefix ?= /usr/local
mandir ?= $(prefix)/man
man8dir = $(mandir)/man8
diff --git a/tools/bpf/bpftool/Makefile b/tools/bpf/bpftool/Makefile
index dd9f3ec84201..6ea4823b770c 100644
--- a/tools/bpf/bpftool/Makefile
+++ b/tools/bpf/bpftool/Makefile
@@ -7,12 +7,6 @@ srctree := $(patsubst %/,%,$(dir $(srctree)))
srctree := $(patsubst %/,%,$(dir $(srctree)))
endif
-ifeq ($(V),1)
- Q =
-else
- Q = @
-endif
-
BPF_DIR = $(srctree)/tools/lib/bpf
ifneq ($(OUTPUT),)
diff --git a/tools/bpf/resolve_btfids/Makefile b/tools/bpf/resolve_btfids/Makefile
index 4b8079f294f6..afbddea3a39c 100644
--- a/tools/bpf/resolve_btfids/Makefile
+++ b/tools/bpf/resolve_btfids/Makefile
@@ -5,10 +5,8 @@ include ../../scripts/Makefile.arch
srctree := $(abspath $(CURDIR)/../../../)
ifeq ($(V),1)
- Q =
msg =
else
- Q = @
ifeq ($(silent),1)
msg =
else
diff --git a/tools/bpf/runqslower/Makefile b/tools/bpf/runqslower/Makefile
index c4f1f1735af6..e49203ebd48c 100644
--- a/tools/bpf/runqslower/Makefile
+++ b/tools/bpf/runqslower/Makefile
@@ -26,10 +26,7 @@ VMLINUX_BTF_PATHS := $(if $(O),$(O)/vmlinux) \
VMLINUX_BTF_PATH := $(or $(VMLINUX_BTF),$(firstword \
$(wildcard $(VMLINUX_BTF_PATHS))))
-ifeq ($(V),1)
-Q =
-else
-Q = @
+ifneq ($(V),1)
MAKEFLAGS += --no-print-directory
submake_extras := feature_display=0
endif
diff --git a/tools/build/Makefile b/tools/build/Makefile
index 18ad131f6ea7..63ef21878761 100644
--- a/tools/build/Makefile
+++ b/tools/build/Makefile
@@ -17,13 +17,7 @@ $(call allow-override,LD,$(CROSS_COMPILE)ld)
export HOSTCC HOSTLD HOSTAR
-ifeq ($(V),1)
- Q =
-else
- Q = @
-endif
-
-export Q srctree CC LD
+export srctree CC LD
MAKEFLAGS := --no-print-directory
build := -f $(srctree)/tools/build/Makefile.build dir=. obj
diff --git a/tools/lib/bpf/Makefile b/tools/lib/bpf/Makefile
index 857a5f7b413d..168140f8e646 100644
--- a/tools/lib/bpf/Makefile
+++ b/tools/lib/bpf/Makefile
@@ -53,13 +53,6 @@ include $(srctree)/tools/scripts/Makefile.include
# copy a bit from Linux kbuild
-ifeq ("$(origin V)", "command line")
- VERBOSE = $(V)
-endif
-ifndef VERBOSE
- VERBOSE = 0
-endif
-
INCLUDES = -I$(or $(OUTPUT),.) \
-I$(srctree)/tools/include -I$(srctree)/tools/include/uapi \
-I$(srctree)/tools/arch/$(SRCARCH)/include
@@ -96,12 +89,6 @@ override CFLAGS += $(CLANG_CROSS_FLAGS)
# flags specific for shared library
SHLIB_FLAGS := -DSHARED -fPIC
-ifeq ($(VERBOSE),1)
- Q =
-else
- Q = @
-endif
-
# Disable command line variables (CFLAGS) override from top
# level Makefile (perf), otherwise build Makefile will get
# the same command line setup.
diff --git a/tools/lib/perf/Makefile b/tools/lib/perf/Makefile
index 3a9b2140aa04..e9a7ac2c062e 100644
--- a/tools/lib/perf/Makefile
+++ b/tools/lib/perf/Makefile
@@ -39,19 +39,6 @@ libdir = $(prefix)/$(libdir_relative)
libdir_SQ = $(subst ','\'',$(libdir))
libdir_relative_SQ = $(subst ','\'',$(libdir_relative))
-ifeq ("$(origin V)", "command line")
- VERBOSE = $(V)
-endif
-ifndef VERBOSE
- VERBOSE = 0
-endif
-
-ifeq ($(VERBOSE),1)
- Q =
-else
- Q = @
-endif
-
TEST_ARGS := $(if $(V),-v)
# Set compile option CFLAGS
diff --git a/tools/lib/thermal/Makefile b/tools/lib/thermal/Makefile
index 8890fd57b110..a1f5e388644d 100644
--- a/tools/lib/thermal/Makefile
+++ b/tools/lib/thermal/Makefile
@@ -39,19 +39,6 @@ libdir = $(prefix)/$(libdir_relative)
libdir_SQ = $(subst ','\'',$(libdir))
libdir_relative_SQ = $(subst ','\'',$(libdir_relative))
-ifeq ("$(origin V)", "command line")
- VERBOSE = $(V)
-endif
-ifndef VERBOSE
- VERBOSE = 0
-endif
-
-ifeq ($(VERBOSE),1)
- Q =
-else
- Q = @
-endif
-
# Set compile option CFLAGS
ifdef EXTRA_CFLAGS
CFLAGS := $(EXTRA_CFLAGS)
diff --git a/tools/objtool/Makefile b/tools/objtool/Makefile
index f56e27727534..7a65948892e5 100644
--- a/tools/objtool/Makefile
+++ b/tools/objtool/Makefile
@@ -46,12 +46,6 @@ HOST_OVERRIDES := CC="$(HOSTCC)" LD="$(HOSTLD)" AR="$(HOSTAR)"
AWK = awk
MKDIR = mkdir
-ifeq ($(V),1)
- Q =
-else
- Q = @
-endif
-
BUILD_ORC := n
ifeq ($(SRCARCH),x86)
diff --git a/tools/objtool/check.c b/tools/objtool/check.c
index be18a0489303..ce973d9d8e6d 100644
--- a/tools/objtool/check.c
+++ b/tools/objtool/check.c
@@ -2472,13 +2472,14 @@ static void mark_rodata(struct objtool_file *file)
*
* - .rodata: can contain GCC switch tables
* - .rodata.<func>: same, if -fdata-sections is being used
- * - .rodata..c_jump_table: contains C annotated jump tables
+ * - .data.rel.ro.c_jump_table: contains C annotated jump tables
*
* .rodata.str1.* sections are ignored; they don't contain jump tables.
*/
for_each_sec(file, sec) {
- if (!strncmp(sec->name, ".rodata", 7) &&
- !strstr(sec->name, ".str1.")) {
+ if ((!strncmp(sec->name, ".rodata", 7) &&
+ !strstr(sec->name, ".str1.")) ||
+ !strncmp(sec->name, ".data.rel.ro", 12)) {
sec->rodata = true;
found = true;
}
diff --git a/tools/objtool/include/objtool/special.h b/tools/objtool/include/objtool/special.h
index e7ee7ffccefd..e049679bb17b 100644
--- a/tools/objtool/include/objtool/special.h
+++ b/tools/objtool/include/objtool/special.h
@@ -10,7 +10,7 @@
#include <objtool/check.h>
#include <objtool/elf.h>
-#define C_JUMP_TABLE_SECTION ".rodata..c_jump_table"
+#define C_JUMP_TABLE_SECTION ".data.rel.ro.c_jump_table"
struct special_alt {
struct list_head list;
diff --git a/tools/objtool/noreturns.h b/tools/objtool/noreturns.h
index b2174894f9f7..6bb7edda3094 100644
--- a/tools/objtool/noreturns.h
+++ b/tools/objtool/noreturns.h
@@ -19,7 +19,7 @@ NORETURN(__x64_sys_exit_group)
NORETURN(arch_cpu_idle_dead)
NORETURN(bch2_trans_in_restart_error)
NORETURN(bch2_trans_restart_error)
-NORETURN(bch2_trans_unlocked_error)
+NORETURN(bch2_trans_unlocked_or_in_restart_error)
NORETURN(cpu_bringup_and_idle)
NORETURN(cpu_startup_entry)
NORETURN(do_exit)
diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf
index 55d6ce9ea52f..05c083bb1122 100644
--- a/tools/perf/Makefile.perf
+++ b/tools/perf/Makefile.perf
@@ -161,47 +161,6 @@ export VPATH
SOURCE := $(shell ln -sf $(srctree)/tools/perf $(OUTPUT)/source)
endif
-# Beautify output
-# ---------------------------------------------------------------------------
-#
-# Most of build commands in Kbuild start with "cmd_". You can optionally define
-# "quiet_cmd_*". If defined, the short log is printed. Otherwise, no log from
-# that command is printed by default.
-#
-# e.g.)
-# quiet_cmd_depmod = DEPMOD $(MODLIB)
-# cmd_depmod = $(srctree)/scripts/depmod.sh $(DEPMOD) $(KERNELRELEASE)
-#
-# A simple variant is to prefix commands with $(Q) - that's useful
-# for commands that shall be hidden in non-verbose mode.
-#
-# $(Q)$(MAKE) $(build)=scripts/basic
-#
-# To put more focus on warnings, be less verbose as default
-# Use 'make V=1' to see the full commands
-
-ifeq ($(V),1)
- quiet =
- Q =
-else
- quiet=quiet_
- Q=@
-endif
-
-# If the user is running make -s (silent mode), suppress echoing of commands
-# make-4.0 (and later) keep single letter options in the 1st word of MAKEFLAGS.
-ifeq ($(filter 3.%,$(MAKE_VERSION)),)
-short-opts := $(firstword -$(MAKEFLAGS))
-else
-short-opts := $(filter-out --%,$(MAKEFLAGS))
-endif
-
-ifneq ($(findstring s,$(short-opts)),)
- quiet=silent_
-endif
-
-export quiet Q
-
# Do not use make's built-in rules
# (this improves performance and avoids hard-to-debug behaviour);
MAKEFLAGS += -r
diff --git a/tools/scripts/Makefile.include b/tools/scripts/Makefile.include
index 0aa4005017c7..45f4abef7064 100644
--- a/tools/scripts/Makefile.include
+++ b/tools/scripts/Makefile.include
@@ -136,6 +136,33 @@ else
NO_SUBDIR = :
endif
+# Beautify output
+# ---------------------------------------------------------------------------
+#
+# Most of build commands in Kbuild start with "cmd_". You can optionally define
+# "quiet_cmd_*". If defined, the short log is printed. Otherwise, no log from
+# that command is printed by default.
+#
+# e.g.)
+# quiet_cmd_depmod = DEPMOD $(MODLIB)
+# cmd_depmod = $(srctree)/scripts/depmod.sh $(DEPMOD) $(KERNELRELEASE)
+#
+# A simple variant is to prefix commands with $(Q) - that's useful
+# for commands that shall be hidden in non-verbose mode.
+#
+# $(Q)$(MAKE) $(build)=scripts/basic
+#
+# To put more focus on warnings, be less verbose as default
+# Use 'make V=1' to see the full commands
+
+ifeq ($(V),1)
+ quiet =
+ Q =
+else
+ quiet = quiet_
+ Q = @
+endif
+
# If the user is running make -s (silent mode), suppress echoing of commands
# make-4.0 (and later) keep single letter options in the 1st word of MAKEFLAGS.
ifeq ($(filter 3.%,$(MAKE_VERSION)),)
@@ -146,8 +173,11 @@ endif
ifneq ($(findstring s,$(short-opts)),)
silent=1
+ quiet=silent_
endif
+export quiet Q
+
#
# Define a callable command for descending to a new directory
#
diff --git a/tools/sound/dapm-graph b/tools/sound/dapm-graph
index f14bdfedee8f..b6196ee5065a 100755
--- a/tools/sound/dapm-graph
+++ b/tools/sound/dapm-graph
@@ -10,7 +10,7 @@ set -eu
STYLE_COMPONENT_ON="color=dodgerblue;style=bold"
STYLE_COMPONENT_OFF="color=gray40;style=filled;fillcolor=gray90"
-STYLE_NODE_ON="shape=box,style=bold,color=green4"
+STYLE_NODE_ON="shape=box,style=bold,color=green4,fillcolor=white"
STYLE_NODE_OFF="shape=box,style=filled,color=gray30,fillcolor=gray95"
# Print usage and exit
diff --git a/tools/testing/selftests/bpf/Makefile.docs b/tools/testing/selftests/bpf/Makefile.docs
index eb6a4fea8c79..f7f9e7088bb3 100644
--- a/tools/testing/selftests/bpf/Makefile.docs
+++ b/tools/testing/selftests/bpf/Makefile.docs
@@ -7,12 +7,6 @@ INSTALL ?= install
RM ?= rm -f
RMDIR ?= rmdir --ignore-fail-on-non-empty
-ifeq ($(V),1)
- Q =
-else
- Q = @
-endif
-
prefix ?= /usr/local
mandir ?= $(prefix)/man
man2dir = $(mandir)/man2
diff --git a/tools/testing/selftests/damon/damon_nr_regions.py b/tools/testing/selftests/damon/damon_nr_regions.py
index 2e8a74aff543..58f3291fed12 100755
--- a/tools/testing/selftests/damon/damon_nr_regions.py
+++ b/tools/testing/selftests/damon/damon_nr_regions.py
@@ -65,6 +65,7 @@ def test_nr_regions(real_nr_regions, min_nr_regions, max_nr_regions):
test_name = 'nr_regions test with %d/%d/%d real/min/max nr_regions' % (
real_nr_regions, min_nr_regions, max_nr_regions)
+ collected_nr_regions.sort()
if (collected_nr_regions[0] < min_nr_regions or
collected_nr_regions[-1] > max_nr_regions):
print('fail %s' % test_name)
@@ -109,6 +110,7 @@ def main():
attrs = kdamonds.kdamonds[0].contexts[0].monitoring_attrs
attrs.min_nr_regions = 3
attrs.max_nr_regions = 7
+ attrs.update_us = 100000
err = kdamonds.kdamonds[0].commit()
if err is not None:
proc.terminate()
diff --git a/tools/testing/selftests/damon/damos_quota.py b/tools/testing/selftests/damon/damos_quota.py
index 7d4c6bb2e3cd..57c4937aaed2 100755
--- a/tools/testing/selftests/damon/damos_quota.py
+++ b/tools/testing/selftests/damon/damos_quota.py
@@ -51,16 +51,19 @@ def main():
nr_quota_exceeds = scheme.stats.qt_exceeds
wss_collected.sort()
+ nr_expected_quota_exceeds = 0
for wss in wss_collected:
if wss > sz_quota:
print('quota is not kept: %s > %s' % (wss, sz_quota))
print('collected samples are as below')
print('\n'.join(['%d' % wss for wss in wss_collected]))
exit(1)
+ if wss == sz_quota:
+ nr_expected_quota_exceeds += 1
- if nr_quota_exceeds < len(wss_collected):
- print('quota is not always exceeded: %d > %d' %
- (len(wss_collected), nr_quota_exceeds))
+ if nr_quota_exceeds < nr_expected_quota_exceeds:
+ print('quota is exceeded less than expected: %d < %d' %
+ (nr_quota_exceeds, nr_expected_quota_exceeds))
exit(1)
if __name__ == '__main__':
diff --git a/tools/testing/selftests/damon/damos_quota_goal.py b/tools/testing/selftests/damon/damos_quota_goal.py
index 18246f3b62f7..f76e0412b564 100755
--- a/tools/testing/selftests/damon/damos_quota_goal.py
+++ b/tools/testing/selftests/damon/damos_quota_goal.py
@@ -63,6 +63,9 @@ def main():
if last_effective_bytes != 0 else -1.0))
if last_effective_bytes == goal.effective_bytes:
+ # effective quota was already minimum that cannot be more reduced
+ if expect_increase is False and last_effective_bytes == 1:
+ continue
print('efective bytes not changed: %d' % goal.effective_bytes)
exit(1)
diff --git a/tools/testing/selftests/drivers/net/hds.py b/tools/testing/selftests/drivers/net/hds.py
index 394971b25c0b..873f5219e41d 100755
--- a/tools/testing/selftests/drivers/net/hds.py
+++ b/tools/testing/selftests/drivers/net/hds.py
@@ -2,17 +2,54 @@
# SPDX-License-Identifier: GPL-2.0
import errno
+import os
from lib.py import ksft_run, ksft_exit, ksft_eq, ksft_raises, KsftSkipEx
-from lib.py import EthtoolFamily, NlError
+from lib.py import CmdExitFailure, EthtoolFamily, NlError
from lib.py import NetDrvEnv
+from lib.py import defer, ethtool, ip
-def get_hds(cfg, netnl) -> None:
+
+def _get_hds_mode(cfg, netnl) -> str:
try:
rings = netnl.rings_get({'header': {'dev-index': cfg.ifindex}})
except NlError as e:
raise KsftSkipEx('ring-get not supported by device')
if 'tcp-data-split' not in rings:
raise KsftSkipEx('tcp-data-split not supported by device')
+ return rings['tcp-data-split']
+
+
+def _xdp_onoff(cfg):
+ test_dir = os.path.dirname(os.path.realpath(__file__))
+ prog = test_dir + "/../../net/lib/xdp_dummy.bpf.o"
+ ip("link set dev %s xdp obj %s sec xdp" %
+ (cfg.ifname, prog))
+ ip("link set dev %s xdp off" % cfg.ifname)
+
+
+def _ioctl_ringparam_modify(cfg, netnl) -> None:
+ """
+ Helper for performing a hopefully unimportant IOCTL SET.
+ IOCTL does not support HDS, so it should not affect the HDS config.
+ """
+ try:
+ rings = netnl.rings_get({'header': {'dev-index': cfg.ifindex}})
+ except NlError as e:
+ raise KsftSkipEx('ring-get not supported by device')
+
+ if 'tx' not in rings:
+ raise KsftSkipEx('setting Tx ring size not supported')
+
+ try:
+ ethtool(f"--disable-netlink -G {cfg.ifname} tx {rings['tx'] // 2}")
+ except CmdExitFailure as e:
+ ethtool(f"--disable-netlink -G {cfg.ifname} tx {rings['tx'] * 2}")
+ defer(ethtool, f"-G {cfg.ifname} tx {rings['tx']}")
+
+
+def get_hds(cfg, netnl) -> None:
+ _get_hds_mode(cfg, netnl)
+
def get_hds_thresh(cfg, netnl) -> None:
try:
@@ -104,6 +141,103 @@ def set_hds_thresh_gt(cfg, netnl) -> None:
netnl.rings_set({'header': {'dev-index': cfg.ifindex}, 'hds-thresh': hds_gt})
ksft_eq(e.exception.nl_msg.error, -errno.EINVAL)
+
+def set_xdp(cfg, netnl) -> None:
+ """
+ Enable single-buffer XDP on the device.
+ When HDS is in "auto" / UNKNOWN mode, XDP installation should work.
+ """
+ mode = _get_hds_mode(cfg, netnl)
+ if mode == 'enabled':
+ netnl.rings_set({'header': {'dev-index': cfg.ifindex},
+ 'tcp-data-split': 'unknown'})
+
+ _xdp_onoff(cfg)
+
+
+def enabled_set_xdp(cfg, netnl) -> None:
+ """
+ Enable single-buffer XDP on the device.
+ When HDS is in "enabled" mode, XDP installation should not work.
+ """
+ _get_hds_mode(cfg, netnl)
+ netnl.rings_set({'header': {'dev-index': cfg.ifindex},
+ 'tcp-data-split': 'enabled'})
+
+ defer(netnl.rings_set, {'header': {'dev-index': cfg.ifindex},
+ 'tcp-data-split': 'unknown'})
+
+ with ksft_raises(CmdExitFailure) as e:
+ _xdp_onoff(cfg)
+
+
+def set_xdp(cfg, netnl) -> None:
+ """
+ Enable single-buffer XDP on the device.
+ When HDS is in "auto" / UNKNOWN mode, XDP installation should work.
+ """
+ mode = _get_hds_mode(cfg, netnl)
+ if mode == 'enabled':
+ netnl.rings_set({'header': {'dev-index': cfg.ifindex},
+ 'tcp-data-split': 'unknown'})
+
+ _xdp_onoff(cfg)
+
+
+def enabled_set_xdp(cfg, netnl) -> None:
+ """
+ Enable single-buffer XDP on the device.
+ When HDS is in "enabled" mode, XDP installation should not work.
+ """
+ _get_hds_mode(cfg, netnl) # Trigger skip if not supported
+
+ netnl.rings_set({'header': {'dev-index': cfg.ifindex},
+ 'tcp-data-split': 'enabled'})
+ defer(netnl.rings_set, {'header': {'dev-index': cfg.ifindex},
+ 'tcp-data-split': 'unknown'})
+
+ with ksft_raises(CmdExitFailure) as e:
+ _xdp_onoff(cfg)
+
+
+def ioctl(cfg, netnl) -> None:
+ mode1 = _get_hds_mode(cfg, netnl)
+ _ioctl_ringparam_modify(cfg, netnl)
+ mode2 = _get_hds_mode(cfg, netnl)
+
+ ksft_eq(mode1, mode2)
+
+
+def ioctl_set_xdp(cfg, netnl) -> None:
+ """
+ Like set_xdp(), but we perturb the settings via the legacy ioctl.
+ """
+ mode = _get_hds_mode(cfg, netnl)
+ if mode == 'enabled':
+ netnl.rings_set({'header': {'dev-index': cfg.ifindex},
+ 'tcp-data-split': 'unknown'})
+
+ _ioctl_ringparam_modify(cfg, netnl)
+
+ _xdp_onoff(cfg)
+
+
+def ioctl_enabled_set_xdp(cfg, netnl) -> None:
+ """
+ Enable single-buffer XDP on the device.
+ When HDS is in "enabled" mode, XDP installation should not work.
+ """
+ _get_hds_mode(cfg, netnl) # Trigger skip if not supported
+
+ netnl.rings_set({'header': {'dev-index': cfg.ifindex},
+ 'tcp-data-split': 'enabled'})
+ defer(netnl.rings_set, {'header': {'dev-index': cfg.ifindex},
+ 'tcp-data-split': 'unknown'})
+
+ with ksft_raises(CmdExitFailure) as e:
+ _xdp_onoff(cfg)
+
+
def main() -> None:
with NetDrvEnv(__file__, queue_count=3) as cfg:
ksft_run([get_hds,
@@ -112,7 +246,12 @@ def main() -> None:
set_hds_enable,
set_hds_thresh_zero,
set_hds_thresh_max,
- set_hds_thresh_gt],
+ set_hds_thresh_gt,
+ set_xdp,
+ enabled_set_xdp,
+ ioctl,
+ ioctl_set_xdp,
+ ioctl_enabled_set_xdp],
args=(cfg, EthtoolFamily()))
ksft_exit()
diff --git a/tools/testing/selftests/drivers/net/queues.py b/tools/testing/selftests/drivers/net/queues.py
index 38303da957ee..8a518905a9f9 100755
--- a/tools/testing/selftests/drivers/net/queues.py
+++ b/tools/testing/selftests/drivers/net/queues.py
@@ -45,10 +45,9 @@ def addremove_queues(cfg, nl) -> None:
netnl = EthtoolFamily()
channels = netnl.channels_get({'header': {'dev-index': cfg.ifindex}})
- if channels['combined-count'] == 0:
- rx_type = 'rx'
- else:
- rx_type = 'combined'
+ rx_type = 'rx'
+ if channels.get('combined-count', 0) > 0:
+ rx_type = 'combined'
expected = curr_queues - 1
cmd(f"ethtool -L {cfg.dev['ifname']} {rx_type} {expected}", timeout=10)
diff --git a/tools/testing/selftests/ftrace/test.d/dynevent/add_remove_fprobe.tc b/tools/testing/selftests/ftrace/test.d/dynevent/add_remove_fprobe.tc
index 449f9d8be746..73f6c6fcecab 100644
--- a/tools/testing/selftests/ftrace/test.d/dynevent/add_remove_fprobe.tc
+++ b/tools/testing/selftests/ftrace/test.d/dynevent/add_remove_fprobe.tc
@@ -10,12 +10,16 @@ PLACE=$FUNCTION_FORK
PLACE2="kmem_cache_free"
PLACE3="schedule_timeout"
+# Some functions may have BPF programs attached, therefore
+# count already enabled_functions before tests start
+ocnt=`cat enabled_functions | wc -l`
+
echo "f:myevent1 $PLACE" >> dynamic_events
# Make sure the event is attached and is the only one
grep -q $PLACE enabled_functions
cnt=`cat enabled_functions | wc -l`
-if [ $cnt -ne 1 ]; then
+if [ $cnt -ne $((ocnt + 1)) ]; then
exit_fail
fi
@@ -23,7 +27,7 @@ echo "f:myevent2 $PLACE%return" >> dynamic_events
# It should till be the only attached function
cnt=`cat enabled_functions | wc -l`
-if [ $cnt -ne 1 ]; then
+if [ $cnt -ne $((ocnt + 1)) ]; then
exit_fail
fi
@@ -32,7 +36,7 @@ echo "f:myevent3 $PLACE2" >> dynamic_events
grep -q $PLACE2 enabled_functions
cnt=`cat enabled_functions | wc -l`
-if [ $cnt -ne 2 ]; then
+if [ $cnt -ne $((ocnt + 2)) ]; then
exit_fail
fi
@@ -49,7 +53,7 @@ grep -q myevent1 dynamic_events
# should still have 2 left
cnt=`cat enabled_functions | wc -l`
-if [ $cnt -ne 2 ]; then
+if [ $cnt -ne $((ocnt + 2)) ]; then
exit_fail
fi
@@ -57,7 +61,7 @@ echo > dynamic_events
# Should have none left
cnt=`cat enabled_functions | wc -l`
-if [ $cnt -ne 0 ]; then
+if [ $cnt -ne $ocnt ]; then
exit_fail
fi
@@ -65,7 +69,7 @@ echo "f:myevent4 $PLACE" >> dynamic_events
# Should only have one enabled
cnt=`cat enabled_functions | wc -l`
-if [ $cnt -ne 1 ]; then
+if [ $cnt -ne $((ocnt + 1)) ]; then
exit_fail
fi
@@ -73,7 +77,7 @@ echo > dynamic_events
# Should have none left
cnt=`cat enabled_functions | wc -l`
-if [ $cnt -ne 0 ]; then
+if [ $cnt -ne $ocnt ]; then
exit_fail
fi
diff --git a/tools/testing/selftests/hid/Makefile b/tools/testing/selftests/hid/Makefile
index 0336353bd15f..2839d2612ce3 100644
--- a/tools/testing/selftests/hid/Makefile
+++ b/tools/testing/selftests/hid/Makefile
@@ -43,10 +43,8 @@ TEST_GEN_PROGS = hid_bpf hidraw
# $3 - target (assumed to be file); only file name will be emitted;
# $4 - optional extra arg, emitted as-is, if provided.
ifeq ($(V),1)
-Q =
msg =
else
-Q = @
msg = @printf ' %-8s%s %s%s\n' "$(1)" "$(if $(2), [$(2)])" "$(notdir $(3))" "$(if $(4), $(4))";
MAKEFLAGS += --no-print-directory
submake_extras := feature_display=0
diff --git a/tools/testing/selftests/kvm/mmu_stress_test.c b/tools/testing/selftests/kvm/mmu_stress_test.c
index d9c76b4c0d88..6a437d2be9fa 100644
--- a/tools/testing/selftests/kvm/mmu_stress_test.c
+++ b/tools/testing/selftests/kvm/mmu_stress_test.c
@@ -18,6 +18,7 @@
#include "ucall_common.h"
static bool mprotect_ro_done;
+static bool all_vcpus_hit_ro_fault;
static void guest_code(uint64_t start_gpa, uint64_t end_gpa, uint64_t stride)
{
@@ -36,9 +37,9 @@ static void guest_code(uint64_t start_gpa, uint64_t end_gpa, uint64_t stride)
/*
* Write to the region while mprotect(PROT_READ) is underway. Keep
- * looping until the memory is guaranteed to be read-only, otherwise
- * vCPUs may complete their writes and advance to the next stage
- * prematurely.
+ * looping until the memory is guaranteed to be read-only and a fault
+ * has occurred, otherwise vCPUs may complete their writes and advance
+ * to the next stage prematurely.
*
* For architectures that support skipping the faulting instruction,
* generate the store via inline assembly to ensure the exact length
@@ -56,7 +57,7 @@ static void guest_code(uint64_t start_gpa, uint64_t end_gpa, uint64_t stride)
#else
vcpu_arch_put_guest(*((volatile uint64_t *)gpa), gpa);
#endif
- } while (!READ_ONCE(mprotect_ro_done));
+ } while (!READ_ONCE(mprotect_ro_done) || !READ_ONCE(all_vcpus_hit_ro_fault));
/*
* Only architectures that write the entire range can explicitly sync,
@@ -81,6 +82,7 @@ struct vcpu_info {
static int nr_vcpus;
static atomic_t rendezvous;
+static atomic_t nr_ro_faults;
static void rendezvous_with_boss(void)
{
@@ -148,12 +150,16 @@ static void *vcpu_worker(void *data)
* be stuck on the faulting instruction for other architectures. Go to
* stage 3 without a rendezvous
*/
- do {
- r = _vcpu_run(vcpu);
- } while (!r);
+ r = _vcpu_run(vcpu);
TEST_ASSERT(r == -1 && errno == EFAULT,
"Expected EFAULT on write to RO memory, got r = %d, errno = %d", r, errno);
+ atomic_inc(&nr_ro_faults);
+ if (atomic_read(&nr_ro_faults) == nr_vcpus) {
+ WRITE_ONCE(all_vcpus_hit_ro_fault, true);
+ sync_global_to_guest(vm, all_vcpus_hit_ro_fault);
+ }
+
#if defined(__x86_64__) || defined(__aarch64__)
/*
* Verify *all* writes from the guest hit EFAULT due to the VMA now
@@ -378,7 +384,6 @@ int main(int argc, char *argv[])
rendezvous_with_vcpus(&time_run2, "run 2");
mprotect(mem, slot_size, PROT_READ);
- usleep(10);
mprotect_ro_done = true;
sync_global_to_guest(vm, mprotect_ro_done);
diff --git a/tools/testing/selftests/kvm/x86/nested_exceptions_test.c b/tools/testing/selftests/kvm/x86/nested_exceptions_test.c
index 3eb0313ffa39..3641a42934ac 100644
--- a/tools/testing/selftests/kvm/x86/nested_exceptions_test.c
+++ b/tools/testing/selftests/kvm/x86/nested_exceptions_test.c
@@ -85,6 +85,7 @@ static void svm_run_l2(struct svm_test_data *svm, void *l2_code, int vector,
GUEST_ASSERT_EQ(ctrl->exit_code, (SVM_EXIT_EXCP_BASE + vector));
GUEST_ASSERT_EQ(ctrl->exit_info_1, error_code);
+ GUEST_ASSERT(!ctrl->int_state);
}
static void l1_svm_code(struct svm_test_data *svm)
@@ -122,6 +123,7 @@ static void vmx_run_l2(void *l2_code, int vector, uint32_t error_code)
GUEST_ASSERT_EQ(vmreadz(VM_EXIT_REASON), EXIT_REASON_EXCEPTION_NMI);
GUEST_ASSERT_EQ((vmreadz(VM_EXIT_INTR_INFO) & 0xff), vector);
GUEST_ASSERT_EQ(vmreadz(VM_EXIT_INTR_ERROR_CODE), error_code);
+ GUEST_ASSERT(!vmreadz(GUEST_INTERRUPTIBILITY_INFO));
}
static void l1_vmx_code(struct vmx_pages *vmx)
diff --git a/tools/testing/selftests/kvm/x86/sev_smoke_test.c b/tools/testing/selftests/kvm/x86/sev_smoke_test.c
index a1a688e75266..d97816dc476a 100644
--- a/tools/testing/selftests/kvm/x86/sev_smoke_test.c
+++ b/tools/testing/selftests/kvm/x86/sev_smoke_test.c
@@ -52,7 +52,8 @@ static void compare_xsave(u8 *from_host, u8 *from_guest)
bool bad = false;
for (i = 0; i < 4095; i++) {
if (from_host[i] != from_guest[i]) {
- printf("mismatch at %02hhx | %02hhx %02hhx\n", i, from_host[i], from_guest[i]);
+ printf("mismatch at %u | %02hhx %02hhx\n",
+ i, from_host[i], from_guest[i]);
bad = true;
}
}
diff --git a/tools/testing/selftests/landlock/.gitignore b/tools/testing/selftests/landlock/.gitignore
index 470203a7cd73..335b2b1a3463 100644
--- a/tools/testing/selftests/landlock/.gitignore
+++ b/tools/testing/selftests/landlock/.gitignore
@@ -1,2 +1,4 @@
/*_test
+/sandbox-and-launch
/true
+/wait-pipe
diff --git a/tools/testing/selftests/landlock/common.h b/tools/testing/selftests/landlock/common.h
index a604ea5d8297..6064c9ac0532 100644
--- a/tools/testing/selftests/landlock/common.h
+++ b/tools/testing/selftests/landlock/common.h
@@ -207,6 +207,7 @@ enforce_ruleset(struct __test_metadata *const _metadata, const int ruleset_fd)
struct protocol_variant {
int domain;
int type;
+ int protocol;
};
struct service_fixture {
diff --git a/tools/testing/selftests/landlock/config b/tools/testing/selftests/landlock/config
index 29af19c4e9f9..425de4c20271 100644
--- a/tools/testing/selftests/landlock/config
+++ b/tools/testing/selftests/landlock/config
@@ -1,8 +1,11 @@
+CONFIG_AF_UNIX_OOB=y
CONFIG_CGROUPS=y
CONFIG_CGROUP_SCHED=y
CONFIG_INET=y
CONFIG_IPV6=y
CONFIG_KEYS=y
+CONFIG_MPTCP=y
+CONFIG_MPTCP_IPV6=y
CONFIG_NET=y
CONFIG_NET_NS=y
CONFIG_OVERLAY_FS=y
diff --git a/tools/testing/selftests/landlock/net_test.c b/tools/testing/selftests/landlock/net_test.c
index 4e0aeb53b225..d9de0ee49ebc 100644
--- a/tools/testing/selftests/landlock/net_test.c
+++ b/tools/testing/selftests/landlock/net_test.c
@@ -85,18 +85,18 @@ static void setup_loopback(struct __test_metadata *const _metadata)
clear_ambient_cap(_metadata, CAP_NET_ADMIN);
}
+static bool prot_is_tcp(const struct protocol_variant *const prot)
+{
+ return (prot->domain == AF_INET || prot->domain == AF_INET6) &&
+ prot->type == SOCK_STREAM &&
+ (prot->protocol == IPPROTO_TCP || prot->protocol == IPPROTO_IP);
+}
+
static bool is_restricted(const struct protocol_variant *const prot,
const enum sandbox_type sandbox)
{
- switch (prot->domain) {
- case AF_INET:
- case AF_INET6:
- switch (prot->type) {
- case SOCK_STREAM:
- return sandbox == TCP_SANDBOX;
- }
- break;
- }
+ if (sandbox == TCP_SANDBOX)
+ return prot_is_tcp(prot);
return false;
}
@@ -105,7 +105,7 @@ static int socket_variant(const struct service_fixture *const srv)
int ret;
ret = socket(srv->protocol.domain, srv->protocol.type | SOCK_CLOEXEC,
- 0);
+ srv->protocol.protocol);
if (ret < 0)
return -errno;
return ret;
@@ -290,22 +290,70 @@ FIXTURE_TEARDOWN(protocol)
}
/* clang-format off */
-FIXTURE_VARIANT_ADD(protocol, no_sandbox_with_ipv4_tcp) {
+FIXTURE_VARIANT_ADD(protocol, no_sandbox_with_ipv4_tcp1) {
/* clang-format on */
.sandbox = NO_SANDBOX,
.prot = {
.domain = AF_INET,
.type = SOCK_STREAM,
+ /* IPPROTO_IP == 0 */
+ .protocol = IPPROTO_IP,
},
};
/* clang-format off */
-FIXTURE_VARIANT_ADD(protocol, no_sandbox_with_ipv6_tcp) {
+FIXTURE_VARIANT_ADD(protocol, no_sandbox_with_ipv4_tcp2) {
+ /* clang-format on */
+ .sandbox = NO_SANDBOX,
+ .prot = {
+ .domain = AF_INET,
+ .type = SOCK_STREAM,
+ .protocol = IPPROTO_TCP,
+ },
+};
+
+/* clang-format off */
+FIXTURE_VARIANT_ADD(protocol, no_sandbox_with_ipv4_mptcp) {
+ /* clang-format on */
+ .sandbox = NO_SANDBOX,
+ .prot = {
+ .domain = AF_INET,
+ .type = SOCK_STREAM,
+ .protocol = IPPROTO_MPTCP,
+ },
+};
+
+/* clang-format off */
+FIXTURE_VARIANT_ADD(protocol, no_sandbox_with_ipv6_tcp1) {
+ /* clang-format on */
+ .sandbox = NO_SANDBOX,
+ .prot = {
+ .domain = AF_INET6,
+ .type = SOCK_STREAM,
+ /* IPPROTO_IP == 0 */
+ .protocol = IPPROTO_IP,
+ },
+};
+
+/* clang-format off */
+FIXTURE_VARIANT_ADD(protocol, no_sandbox_with_ipv6_tcp2) {
/* clang-format on */
.sandbox = NO_SANDBOX,
.prot = {
.domain = AF_INET6,
.type = SOCK_STREAM,
+ .protocol = IPPROTO_TCP,
+ },
+};
+
+/* clang-format off */
+FIXTURE_VARIANT_ADD(protocol, no_sandbox_with_ipv6_mptcp) {
+ /* clang-format on */
+ .sandbox = NO_SANDBOX,
+ .prot = {
+ .domain = AF_INET6,
+ .type = SOCK_STREAM,
+ .protocol = IPPROTO_MPTCP,
},
};
@@ -350,22 +398,70 @@ FIXTURE_VARIANT_ADD(protocol, no_sandbox_with_unix_datagram) {
};
/* clang-format off */
-FIXTURE_VARIANT_ADD(protocol, tcp_sandbox_with_ipv4_tcp) {
+FIXTURE_VARIANT_ADD(protocol, tcp_sandbox_with_ipv4_tcp1) {
+ /* clang-format on */
+ .sandbox = TCP_SANDBOX,
+ .prot = {
+ .domain = AF_INET,
+ .type = SOCK_STREAM,
+ /* IPPROTO_IP == 0 */
+ .protocol = IPPROTO_IP,
+ },
+};
+
+/* clang-format off */
+FIXTURE_VARIANT_ADD(protocol, tcp_sandbox_with_ipv4_tcp2) {
+ /* clang-format on */
+ .sandbox = TCP_SANDBOX,
+ .prot = {
+ .domain = AF_INET,
+ .type = SOCK_STREAM,
+ .protocol = IPPROTO_TCP,
+ },
+};
+
+/* clang-format off */
+FIXTURE_VARIANT_ADD(protocol, tcp_sandbox_with_ipv4_mptcp) {
/* clang-format on */
.sandbox = TCP_SANDBOX,
.prot = {
.domain = AF_INET,
.type = SOCK_STREAM,
+ .protocol = IPPROTO_MPTCP,
+ },
+};
+
+/* clang-format off */
+FIXTURE_VARIANT_ADD(protocol, tcp_sandbox_with_ipv6_tcp1) {
+ /* clang-format on */
+ .sandbox = TCP_SANDBOX,
+ .prot = {
+ .domain = AF_INET6,
+ .type = SOCK_STREAM,
+ /* IPPROTO_IP == 0 */
+ .protocol = IPPROTO_IP,
+ },
+};
+
+/* clang-format off */
+FIXTURE_VARIANT_ADD(protocol, tcp_sandbox_with_ipv6_tcp2) {
+ /* clang-format on */
+ .sandbox = TCP_SANDBOX,
+ .prot = {
+ .domain = AF_INET6,
+ .type = SOCK_STREAM,
+ .protocol = IPPROTO_TCP,
},
};
/* clang-format off */
-FIXTURE_VARIANT_ADD(protocol, tcp_sandbox_with_ipv6_tcp) {
+FIXTURE_VARIANT_ADD(protocol, tcp_sandbox_with_ipv6_mptcp) {
/* clang-format on */
.sandbox = TCP_SANDBOX,
.prot = {
.domain = AF_INET6,
.type = SOCK_STREAM,
+ .protocol = IPPROTO_MPTCP,
},
};
diff --git a/tools/testing/selftests/mm/hugepage-mremap.c b/tools/testing/selftests/mm/hugepage-mremap.c
index ada9156cc497..c463d1c09c9b 100644
--- a/tools/testing/selftests/mm/hugepage-mremap.c
+++ b/tools/testing/selftests/mm/hugepage-mremap.c
@@ -15,7 +15,7 @@
#define _GNU_SOURCE
#include <stdlib.h>
#include <stdio.h>
-#include <asm-generic/unistd.h>
+#include <unistd.h>
#include <sys/mman.h>
#include <errno.h>
#include <fcntl.h> /* Definition of O_* constants */
diff --git a/tools/testing/selftests/mm/ksm_functional_tests.c b/tools/testing/selftests/mm/ksm_functional_tests.c
index 66b4e111b5a2..b61803e36d1c 100644
--- a/tools/testing/selftests/mm/ksm_functional_tests.c
+++ b/tools/testing/selftests/mm/ksm_functional_tests.c
@@ -11,7 +11,7 @@
#include <string.h>
#include <stdbool.h>
#include <stdint.h>
-#include <asm-generic/unistd.h>
+#include <unistd.h>
#include <errno.h>
#include <fcntl.h>
#include <sys/mman.h>
@@ -369,6 +369,7 @@ unmap:
munmap(map, size);
}
+#ifdef __NR_userfaultfd
static void test_unmerge_uffd_wp(void)
{
struct uffdio_writeprotect uffd_writeprotect;
@@ -429,6 +430,7 @@ close_uffd:
unmap:
munmap(map, size);
}
+#endif
/* Verify that KSM can be enabled / queried with prctl. */
static void test_prctl(void)
@@ -684,7 +686,9 @@ int main(int argc, char **argv)
exit(test_child_ksm());
}
+#ifdef __NR_userfaultfd
tests++;
+#endif
ksft_print_header();
ksft_set_plan(tests);
@@ -696,7 +700,9 @@ int main(int argc, char **argv)
test_unmerge();
test_unmerge_zero_pages();
test_unmerge_discarded();
+#ifdef __NR_userfaultfd
test_unmerge_uffd_wp();
+#endif
test_prot_none();
diff --git a/tools/testing/selftests/mm/memfd_secret.c b/tools/testing/selftests/mm/memfd_secret.c
index 74c911aa3aea..9a0597310a76 100644
--- a/tools/testing/selftests/mm/memfd_secret.c
+++ b/tools/testing/selftests/mm/memfd_secret.c
@@ -17,7 +17,7 @@
#include <stdlib.h>
#include <string.h>
-#include <asm-generic/unistd.h>
+#include <unistd.h>
#include <errno.h>
#include <stdio.h>
#include <fcntl.h>
@@ -28,6 +28,8 @@
#define pass(fmt, ...) ksft_test_result_pass(fmt, ##__VA_ARGS__)
#define skip(fmt, ...) ksft_test_result_skip(fmt, ##__VA_ARGS__)
+#ifdef __NR_memfd_secret
+
#define PATTERN 0x55
static const int prot = PROT_READ | PROT_WRITE;
@@ -332,3 +334,13 @@ int main(int argc, char *argv[])
ksft_finished();
}
+
+#else /* __NR_memfd_secret */
+
+int main(int argc, char *argv[])
+{
+ printf("skip: skipping memfd_secret test (missing __NR_memfd_secret)\n");
+ return KSFT_SKIP;
+}
+
+#endif /* __NR_memfd_secret */
diff --git a/tools/testing/selftests/mm/mkdirty.c b/tools/testing/selftests/mm/mkdirty.c
index af2fce496912..09feeb453646 100644
--- a/tools/testing/selftests/mm/mkdirty.c
+++ b/tools/testing/selftests/mm/mkdirty.c
@@ -9,7 +9,7 @@
*/
#include <fcntl.h>
#include <signal.h>
-#include <asm-generic/unistd.h>
+#include <unistd.h>
#include <string.h>
#include <errno.h>
#include <stdlib.h>
@@ -265,6 +265,7 @@ munmap:
munmap(mmap_mem, mmap_size);
}
+#ifdef __NR_userfaultfd
static void test_uffdio_copy(void)
{
struct uffdio_register uffdio_register;
@@ -322,6 +323,7 @@ munmap:
munmap(dst, pagesize);
free(src);
}
+#endif /* __NR_userfaultfd */
int main(void)
{
@@ -334,7 +336,9 @@ int main(void)
thpsize / 1024);
tests += 3;
}
+#ifdef __NR_userfaultfd
tests += 1;
+#endif /* __NR_userfaultfd */
ksft_print_header();
ksft_set_plan(tests);
@@ -364,7 +368,9 @@ int main(void)
if (thpsize)
test_pte_mapped_thp();
/* Placing a fresh page via userfaultfd may set the PTE dirty. */
+#ifdef __NR_userfaultfd
test_uffdio_copy();
+#endif /* __NR_userfaultfd */
err = ksft_get_fail_cnt();
if (err)
diff --git a/tools/testing/selftests/mm/mlock2.h b/tools/testing/selftests/mm/mlock2.h
index 1e5731bab499..4417eaa5cfb7 100644
--- a/tools/testing/selftests/mm/mlock2.h
+++ b/tools/testing/selftests/mm/mlock2.h
@@ -3,7 +3,6 @@
#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
-#include <asm-generic/unistd.h>
static int mlock2_(void *start, size_t len, int flags)
{
diff --git a/tools/testing/selftests/mm/protection_keys.c b/tools/testing/selftests/mm/protection_keys.c
index a4683f2476f2..35565af308af 100644
--- a/tools/testing/selftests/mm/protection_keys.c
+++ b/tools/testing/selftests/mm/protection_keys.c
@@ -42,7 +42,7 @@
#include <sys/wait.h>
#include <sys/stat.h>
#include <fcntl.h>
-#include <asm-generic/unistd.h>
+#include <unistd.h>
#include <sys/ptrace.h>
#include <setjmp.h>
diff --git a/tools/testing/selftests/mm/uffd-common.c b/tools/testing/selftests/mm/uffd-common.c
index 717539eddf98..7ad6ba660c7d 100644
--- a/tools/testing/selftests/mm/uffd-common.c
+++ b/tools/testing/selftests/mm/uffd-common.c
@@ -673,7 +673,11 @@ int uffd_open_dev(unsigned int flags)
int uffd_open_sys(unsigned int flags)
{
+#ifdef __NR_userfaultfd
return syscall(__NR_userfaultfd, flags);
+#else
+ return -1;
+#endif
}
int uffd_open(unsigned int flags)
diff --git a/tools/testing/selftests/mm/uffd-stress.c b/tools/testing/selftests/mm/uffd-stress.c
index a4b83280998a..944d559ade21 100644
--- a/tools/testing/selftests/mm/uffd-stress.c
+++ b/tools/testing/selftests/mm/uffd-stress.c
@@ -33,10 +33,11 @@
* pthread_mutex_lock will also verify the atomicity of the memory
* transfer (UFFDIO_COPY).
*/
-#include <asm-generic/unistd.h>
+
#include "uffd-common.h"
uint64_t features;
+#ifdef __NR_userfaultfd
#define BOUNCE_RANDOM (1<<0)
#define BOUNCE_RACINGFAULTS (1<<1)
@@ -471,3 +472,15 @@ int main(int argc, char **argv)
nr_pages, nr_pages_per_cpu);
return userfaultfd_stress();
}
+
+#else /* __NR_userfaultfd */
+
+#warning "missing __NR_userfaultfd definition"
+
+int main(void)
+{
+ printf("skip: Skipping userfaultfd test (missing __NR_userfaultfd)\n");
+ return KSFT_SKIP;
+}
+
+#endif /* __NR_userfaultfd */
diff --git a/tools/testing/selftests/mm/uffd-unit-tests.c b/tools/testing/selftests/mm/uffd-unit-tests.c
index 9ff71fa1f9bf..74c8bc02b506 100644
--- a/tools/testing/selftests/mm/uffd-unit-tests.c
+++ b/tools/testing/selftests/mm/uffd-unit-tests.c
@@ -5,11 +5,12 @@
* Copyright (C) 2015-2023 Red Hat, Inc.
*/
-#include <asm-generic/unistd.h>
#include "uffd-common.h"
#include "../../../../mm/gup_test.h"
+#ifdef __NR_userfaultfd
+
/* The unit test doesn't need a large or random size, make it 32MB for now */
#define UFFD_TEST_MEM_SIZE (32UL << 20)
@@ -1558,3 +1559,14 @@ int main(int argc, char *argv[])
return ksft_get_fail_cnt() ? KSFT_FAIL : KSFT_PASS;
}
+#else /* __NR_userfaultfd */
+
+#warning "missing __NR_userfaultfd definition"
+
+int main(void)
+{
+ printf("Skipping %s (missing __NR_userfaultfd)\n", __file__);
+ return KSFT_SKIP;
+}
+
+#endif /* __NR_userfaultfd */
diff --git a/tools/testing/selftests/net/lib/Makefile b/tools/testing/selftests/net/lib/Makefile
index bc6b6762baf3..c22623b9a2a5 100644
--- a/tools/testing/selftests/net/lib/Makefile
+++ b/tools/testing/selftests/net/lib/Makefile
@@ -9,7 +9,10 @@ TEST_FILES := ../../../../../Documentation/netlink/specs
TEST_FILES += ../../../../net/ynl
TEST_GEN_FILES += csum
+TEST_GEN_FILES += $(patsubst %.c,%.o,$(wildcard *.bpf.c))
TEST_INCLUDES := $(wildcard py/*.py sh/*.sh)
include ../../lib.mk
+
+include ../bpf.mk
diff --git a/tools/testing/selftests/net/lib/xdp_dummy.bpf.c b/tools/testing/selftests/net/lib/xdp_dummy.bpf.c
new file mode 100644
index 000000000000..d988b2e0cee8
--- /dev/null
+++ b/tools/testing/selftests/net/lib/xdp_dummy.bpf.c
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#define KBUILD_MODNAME "xdp_dummy"
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+
+SEC("xdp")
+int xdp_dummy_prog(struct xdp_md *ctx)
+{
+ return XDP_PASS;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/rseq/rseq-riscv-bits.h b/tools/testing/selftests/rseq/rseq-riscv-bits.h
index de31a0143139..f02f411d550d 100644
--- a/tools/testing/selftests/rseq/rseq-riscv-bits.h
+++ b/tools/testing/selftests/rseq/rseq-riscv-bits.h
@@ -243,7 +243,7 @@ int RSEQ_TEMPLATE_IDENTIFIER(rseq_offset_deref_addv)(intptr_t *ptr, off_t off, i
#ifdef RSEQ_COMPARE_TWICE
RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, "%l[error1]")
#endif
- RSEQ_ASM_OP_R_DEREF_ADDV(ptr, off, 3)
+ RSEQ_ASM_OP_R_DEREF_ADDV(ptr, off, inc, 3)
RSEQ_INJECT_ASM(4)
RSEQ_ASM_DEFINE_ABORT(4, abort)
: /* gcc asm goto does not allow outputs */
@@ -251,8 +251,8 @@ int RSEQ_TEMPLATE_IDENTIFIER(rseq_offset_deref_addv)(intptr_t *ptr, off_t off, i
[current_cpu_id] "m" (rseq_get_abi()->RSEQ_TEMPLATE_CPU_ID_FIELD),
[rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr),
[ptr] "r" (ptr),
- [off] "er" (off),
- [inc] "er" (inc)
+ [off] "r" (off),
+ [inc] "r" (inc)
RSEQ_INJECT_INPUT
: "memory", RSEQ_ASM_TMP_REG_1
RSEQ_INJECT_CLOBBER
diff --git a/tools/testing/selftests/rseq/rseq-riscv.h b/tools/testing/selftests/rseq/rseq-riscv.h
index 37e598d0a365..67d544aaa9a3 100644
--- a/tools/testing/selftests/rseq/rseq-riscv.h
+++ b/tools/testing/selftests/rseq/rseq-riscv.h
@@ -158,7 +158,7 @@ do { \
"bnez " RSEQ_ASM_TMP_REG_1 ", 222b\n" \
"333:\n"
-#define RSEQ_ASM_OP_R_DEREF_ADDV(ptr, off, post_commit_label) \
+#define RSEQ_ASM_OP_R_DEREF_ADDV(ptr, off, inc, post_commit_label) \
"mv " RSEQ_ASM_TMP_REG_1 ", %[" __rseq_str(ptr) "]\n" \
RSEQ_ASM_OP_R_ADD(off) \
REG_L RSEQ_ASM_TMP_REG_1 ", 0(" RSEQ_ASM_TMP_REG_1 ")\n" \
diff --git a/tools/testing/selftests/vDSO/parse_vdso.c b/tools/testing/selftests/vDSO/parse_vdso.c
index 2fe5e983cb22..f89d052c730e 100644
--- a/tools/testing/selftests/vDSO/parse_vdso.c
+++ b/tools/testing/selftests/vDSO/parse_vdso.c
@@ -53,7 +53,7 @@ static struct vdso_info
/* Symbol table */
ELF(Sym) *symtab;
const char *symstrings;
- ELF(Word) *gnu_hash;
+ ELF(Word) *gnu_hash, *gnu_bucket;
ELF_HASH_ENTRY *bucket, *chain;
ELF_HASH_ENTRY nbucket, nchain;
@@ -185,8 +185,8 @@ void vdso_init_from_sysinfo_ehdr(uintptr_t base)
/* The bucket array is located after the header (4 uint32) and the bloom
* filter (size_t array of gnu_hash[2] elements).
*/
- vdso_info.bucket = vdso_info.gnu_hash + 4 +
- sizeof(size_t) / 4 * vdso_info.gnu_hash[2];
+ vdso_info.gnu_bucket = vdso_info.gnu_hash + 4 +
+ sizeof(size_t) / 4 * vdso_info.gnu_hash[2];
} else {
vdso_info.nbucket = hash[0];
vdso_info.nchain = hash[1];
@@ -268,11 +268,11 @@ void *vdso_sym(const char *version, const char *name)
if (vdso_info.gnu_hash) {
uint32_t h1 = gnu_hash(name), h2, *hashval;
- i = vdso_info.bucket[h1 % vdso_info.nbucket];
+ i = vdso_info.gnu_bucket[h1 % vdso_info.nbucket];
if (i == 0)
return 0;
h1 |= 1;
- hashval = vdso_info.bucket + vdso_info.nbucket +
+ hashval = vdso_info.gnu_bucket + vdso_info.nbucket +
(i - vdso_info.gnu_hash[1]);
for (;; i++) {
ELF(Sym) *sym = &vdso_info.symtab[i];
diff --git a/tools/thermal/lib/Makefile b/tools/thermal/lib/Makefile
index f2552f73a64c..056d212f25cf 100644
--- a/tools/thermal/lib/Makefile
+++ b/tools/thermal/lib/Makefile
@@ -39,19 +39,6 @@ libdir = $(prefix)/$(libdir_relative)
libdir_SQ = $(subst ','\'',$(libdir))
libdir_relative_SQ = $(subst ','\'',$(libdir_relative))
-ifeq ("$(origin V)", "command line")
- VERBOSE = $(V)
-endif
-ifndef VERBOSE
- VERBOSE = 0
-endif
-
-ifeq ($(VERBOSE),1)
- Q =
-else
- Q = @
-endif
-
# Set compile option CFLAGS
ifdef EXTRA_CFLAGS
CFLAGS := $(EXTRA_CFLAGS)
diff --git a/tools/tracing/latency/Makefile b/tools/tracing/latency/Makefile
index 6518b03e05c7..257a56b1899f 100644
--- a/tools/tracing/latency/Makefile
+++ b/tools/tracing/latency/Makefile
@@ -37,12 +37,6 @@ FEATURE_TESTS += libtracefs
FEATURE_DISPLAY := libtraceevent
FEATURE_DISPLAY += libtracefs
-ifeq ($(V),1)
- Q =
-else
- Q = @
-endif
-
all: $(LATENCY-COLLECTOR)
include $(srctree)/tools/build/Makefile.include
diff --git a/tools/tracing/rtla/Makefile b/tools/tracing/rtla/Makefile
index 8b5101457c70..0b61208db604 100644
--- a/tools/tracing/rtla/Makefile
+++ b/tools/tracing/rtla/Makefile
@@ -37,12 +37,6 @@ FEATURE_DISPLAY := libtraceevent
FEATURE_DISPLAY += libtracefs
FEATURE_DISPLAY += libcpupower
-ifeq ($(V),1)
- Q =
-else
- Q = @
-endif
-
all: $(RTLA)
include $(srctree)/tools/build/Makefile.include
diff --git a/tools/verification/rv/Makefile b/tools/verification/rv/Makefile
index 411d62b3d8eb..5b898360ba48 100644
--- a/tools/verification/rv/Makefile
+++ b/tools/verification/rv/Makefile
@@ -35,12 +35,6 @@ FEATURE_TESTS += libtracefs
FEATURE_DISPLAY := libtraceevent
FEATURE_DISPLAY += libtracefs
-ifeq ($(V),1)
- Q =
-else
- Q = @
-endif
-
all: $(RV)
include $(srctree)/tools/build/Makefile.include
diff --git a/usr/include/Makefile b/usr/include/Makefile
index 6c6de1b1622b..e3d6b03527fe 100644
--- a/usr/include/Makefile
+++ b/usr/include/Makefile
@@ -10,7 +10,7 @@ UAPI_CFLAGS := -std=c90 -Wall -Werror=implicit-function-declaration
# In theory, we do not care -m32 or -m64 for header compile tests.
# It is here just because CONFIG_CC_CAN_LINK is tested with -m32 or -m64.
-UAPI_CFLAGS += $(filter -m32 -m64 --target=%, $(KBUILD_CFLAGS))
+UAPI_CFLAGS += $(filter -m32 -m64 --target=%, $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS))
# USERCFLAGS might contain sysroot location for CC.
UAPI_CFLAGS += $(USERCFLAGS)