summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.mailmap13
-rw-r--r--CREDITS11
-rw-r--r--Documentation/ABI/removed/sysfs-class-cxl (renamed from Documentation/ABI/obsolete/sysfs-class-cxl)55
-rw-r--r--Documentation/ABI/stable/sysfs-devices-node6
-rw-r--r--Documentation/ABI/testing/sysfs-block-zram8
-rw-r--r--Documentation/ABI/testing/sysfs-bus-coresight-devices-tpdm15
-rw-r--r--Documentation/ABI/testing/sysfs-bus-counter9
-rw-r--r--Documentation/ABI/testing/sysfs-bus-cxl53
-rw-r--r--Documentation/ABI/testing/sysfs-bus-iio12
-rw-r--r--Documentation/ABI/testing/sysfs-bus-iio-adc-ad413020
-rw-r--r--Documentation/ABI/testing/sysfs-driver-intel-m10-bmc4
-rw-r--r--Documentation/ABI/testing/sysfs-driver-intel-m10-bmc-sec-update14
-rw-r--r--Documentation/ABI/testing/sysfs-kernel-mm-cma13
-rw-r--r--Documentation/ABI/testing/sysfs-kernel-mm-damon60
-rw-r--r--Documentation/ABI/testing/sysfs-kernel-reboot8
-rw-r--r--Documentation/ABI/testing/sysfs-pps-gen-tio6
-rw-r--r--Documentation/RCU/whatisRCU.rst10
-rw-r--r--Documentation/admin-guide/blockdev/zram.rst36
-rw-r--r--Documentation/admin-guide/cgroup-v1/memory.rst4
-rw-r--r--Documentation/admin-guide/cgroup-v2.rst25
-rw-r--r--Documentation/admin-guide/device-mapper/dm-crypt.rst5
-rw-r--r--Documentation/admin-guide/device-mapper/dm-integrity.rst5
-rw-r--r--Documentation/admin-guide/device-mapper/verity.rst20
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt38
-rw-r--r--Documentation/admin-guide/mm/cma_debugfs.rst10
-rw-r--r--Documentation/admin-guide/mm/damon/usage.rst87
-rw-r--r--Documentation/admin-guide/mm/hugetlbpage.rst10
-rw-r--r--Documentation/admin-guide/mm/pagemap.rst21
-rw-r--r--Documentation/admin-guide/mm/zswap.rst10
-rw-r--r--Documentation/admin-guide/sysctl/fs.rst25
-rw-r--r--Documentation/admin-guide/sysctl/vm.rst9
-rw-r--r--Documentation/arch/arm64/ptdump.rst2
-rw-r--r--Documentation/arch/powerpc/cxl.rst470
-rw-r--r--Documentation/arch/powerpc/index.rst1
-rw-r--r--Documentation/block/ublk.rst37
-rw-r--r--Documentation/core-api/refcount-vs-atomic.rst37
-rw-r--r--Documentation/core-api/xarray.rst14
-rw-r--r--Documentation/devicetree/bindings/arm/arm,coresight-tmc.yaml26
-rw-r--r--Documentation/devicetree/bindings/arm/qcom,coresight-ctcu.yaml84
-rw-r--r--Documentation/devicetree/bindings/arm/qcom,coresight-tpda.yaml3
-rw-r--r--Documentation/devicetree/bindings/arm/qcom,coresight-tpdm.yaml3
-rw-r--r--Documentation/devicetree/bindings/dma/atmel,at91sam9g45-dma.yaml68
-rw-r--r--Documentation/devicetree/bindings/dma/atmel,sama5d4-dma.yaml3
-rw-r--r--Documentation/devicetree/bindings/dma/atmel-dma.txt42
-rw-r--r--Documentation/devicetree/bindings/dma/fsl,edma.yaml8
-rw-r--r--Documentation/devicetree/bindings/dma/fsl,elo-dma.yaml137
-rw-r--r--Documentation/devicetree/bindings/dma/fsl,elo3-dma.yaml125
-rw-r--r--Documentation/devicetree/bindings/dma/fsl,eloplus-dma.yaml132
-rw-r--r--Documentation/devicetree/bindings/dma/fsl,mxs-dma.yaml6
-rw-r--r--Documentation/devicetree/bindings/dma/snps,dw-axi-dmac.yaml2
-rw-r--r--Documentation/devicetree/bindings/eeprom/at24.yaml5
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-exynos5.yaml1
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-imx-lpi2c.yaml1
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-rk3x.yaml1
-rw-r--r--Documentation/devicetree/bindings/i2c/qcom,i2c-qup.yaml14
-rw-r--r--Documentation/devicetree/bindings/i2c/samsung,s3c2410-i2c.yaml1
-rw-r--r--Documentation/devicetree/bindings/i2c/snps,designware-i2c.yaml5
-rw-r--r--Documentation/devicetree/bindings/i2c/spacemit,k1-i2c.yaml61
-rw-r--r--Documentation/devicetree/bindings/i2c/ti,omap4-i2c.yaml6
-rw-r--r--Documentation/devicetree/bindings/i3c/silvaco,i3c-master.yaml4
-rw-r--r--Documentation/devicetree/bindings/i3c/snps,dw-i3c-master.yaml3
-rw-r--r--Documentation/devicetree/bindings/iio/adc/adi,ad4030.yaml110
-rw-r--r--Documentation/devicetree/bindings/iio/adc/adi,ad4695.yaml13
-rw-r--r--Documentation/devicetree/bindings/iio/adc/adi,ad4851.yaml153
-rw-r--r--Documentation/devicetree/bindings/iio/adc/adi,ad7191.yaml149
-rw-r--r--Documentation/devicetree/bindings/iio/adc/adi,ad7380.yaml4
-rw-r--r--Documentation/devicetree/bindings/iio/adc/adi,axi-adc.yaml72
-rw-r--r--Documentation/devicetree/bindings/iio/adc/nxp,imx93-adc.yaml9
-rw-r--r--Documentation/devicetree/bindings/iio/adc/rockchip-saradc.yaml2
-rw-r--r--Documentation/devicetree/bindings/iio/adc/ti,ads7138.yaml63
-rw-r--r--Documentation/devicetree/bindings/iio/dac/adi,ad5380.yaml18
-rw-r--r--Documentation/devicetree/bindings/iio/frequency/adf4371.yaml5
-rw-r--r--Documentation/devicetree/bindings/iio/humidity/sciosense,ens210.yaml12
-rw-r--r--Documentation/devicetree/bindings/iio/imu/adi,adis16550.yaml74
-rw-r--r--Documentation/devicetree/bindings/iio/light/brcm,apds9160.yaml78
-rw-r--r--Documentation/devicetree/bindings/iio/light/dynaimage,al3010.yaml6
-rw-r--r--Documentation/devicetree/bindings/iio/magnetometer/silabs,si7210.yaml48
-rw-r--r--Documentation/devicetree/bindings/iio/temperature/maxim,max31865.yaml20
-rw-r--r--Documentation/devicetree/bindings/iio/temperature/ti,tmp117.yaml6
-rw-r--r--Documentation/devicetree/bindings/mfd/aspeed-lpc.yaml2
-rw-r--r--Documentation/devicetree/bindings/phy/allwinner,sun50i-a64-usb-phy.yaml4
-rw-r--r--Documentation/devicetree/bindings/phy/phy-rockchip-naneng-combphy.yaml1
-rw-r--r--Documentation/devicetree/bindings/phy/qcom,ipq5332-uniphy-pcie-phy.yaml76
-rw-r--r--Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-pcie-phy.yaml11
-rw-r--r--Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-ufs-phy.yaml2
-rw-r--r--Documentation/devicetree/bindings/phy/rockchip,rk3588-hdptx-phy.yaml62
-rw-r--r--Documentation/devicetree/bindings/phy/rockchip,rk3588-mipi-dcphy.yaml87
-rw-r--r--Documentation/devicetree/bindings/phy/samsung,ufs-phy.yaml1
-rw-r--r--Documentation/devicetree/bindings/phy/samsung,usb3-drd-phy.yaml21
-rw-r--r--Documentation/devicetree/bindings/powerpc/fsl/dma.txt204
-rw-r--r--Documentation/devicetree/bindings/rtc/adi,max31335.yaml4
-rw-r--r--Documentation/devicetree/bindings/rtc/nxp,pcf2127.yaml3
-rw-r--r--Documentation/devicetree/bindings/rtc/qcom-pm8xxx-rtc.yaml5
-rw-r--r--Documentation/devicetree/bindings/serial/8250.yaml2
-rw-r--r--Documentation/devicetree/bindings/serial/fsl-lpuart.yaml1
-rw-r--r--Documentation/devicetree/bindings/serial/nvidia,tegra264-utc.yaml73
-rw-r--r--Documentation/devicetree/bindings/serial/pl011.yaml3
-rw-r--r--Documentation/devicetree/bindings/serial/samsung_uart.yaml4
-rw-r--r--Documentation/devicetree/bindings/serial/snps-dw-apb-uart.yaml21
-rw-r--r--Documentation/devicetree/bindings/serial/sprd-uart.yaml9
-rw-r--r--Documentation/devicetree/bindings/thermal/allwinner,sun8i-a83t-ths.yaml48
-rw-r--r--Documentation/devicetree/bindings/thermal/brcm,avs-tmon.yaml1
-rw-r--r--Documentation/devicetree/bindings/thermal/imx-thermal.yaml40
-rw-r--r--Documentation/devicetree/bindings/thermal/imx8mm-thermal.yaml8
-rw-r--r--Documentation/devicetree/bindings/thermal/qcom-tsens.yaml18
-rw-r--r--Documentation/devicetree/bindings/thermal/thermal-zones.yaml5
-rw-r--r--Documentation/devicetree/bindings/trivial-devices.yaml2
-rw-r--r--Documentation/devicetree/bindings/usb/generic-xhci.yaml2
-rw-r--r--Documentation/devicetree/bindings/usb/microchip,usb2514.yaml35
-rw-r--r--Documentation/devicetree/bindings/usb/parade,ps8830.yaml140
-rw-r--r--Documentation/devicetree/bindings/usb/qcom,dwc3.yaml2
-rw-r--r--Documentation/devicetree/bindings/usb/richtek,rt1711h.yaml3
-rw-r--r--Documentation/devicetree/bindings/usb/rockchip,dwc3.yaml19
-rw-r--r--Documentation/devicetree/bindings/usb/samsung,exynos-dwc3.yaml44
-rw-r--r--Documentation/devicetree/bindings/usb/snps,dwc3-common.yaml11
-rw-r--r--Documentation/devicetree/bindings/usb/usb-device.yaml6
-rw-r--r--Documentation/devicetree/bindings/watchdog/allwinner,sun4i-a10-wdt.yaml2
-rw-r--r--Documentation/devicetree/bindings/watchdog/fsl-imx7ulp-wdt.yaml4
-rw-r--r--Documentation/devicetree/bindings/watchdog/renesas,wdt.yaml4
-rw-r--r--Documentation/driver-api/cxl/maturity-map.rst2
-rw-r--r--Documentation/driver-api/phy/phy.rst3
-rw-r--r--Documentation/driver-api/pps.rst27
-rw-r--r--Documentation/driver-api/serial/driver.rst4
-rw-r--r--Documentation/driver-api/soundwire/bra.rst336
-rw-r--r--Documentation/driver-api/soundwire/bra_cadence.rst66
-rw-r--r--Documentation/driver-api/soundwire/index.rst2
-rw-r--r--Documentation/driver-api/soundwire/stream.rst2
-rw-r--r--Documentation/driver-api/soundwire/summary.rst8
-rw-r--r--Documentation/driver-api/thermal/sysfs-api.rst25
-rw-r--r--Documentation/driver-api/tty/tty_driver.rst4
-rw-r--r--Documentation/driver-api/tty/tty_struct.rst2
-rw-r--r--Documentation/driver-api/usb/writing_musb_glue_layer.rst2
-rw-r--r--Documentation/features/core/mseal_sys_mappings/arch-support.txt30
-rw-r--r--Documentation/filesystems/9p.rst6
-rw-r--r--Documentation/filesystems/dax.rst1
-rw-r--r--Documentation/filesystems/proc.rst53
-rw-r--r--Documentation/iio/ad4030.rst180
-rw-r--r--Documentation/iio/ad4695.rst104
-rw-r--r--Documentation/iio/ad7191.rst119
-rw-r--r--Documentation/iio/ad7380.rst56
-rw-r--r--Documentation/iio/ad7944.rst24
-rw-r--r--Documentation/iio/adis16550.rst376
-rw-r--r--Documentation/iio/adxl380.rst2
-rw-r--r--Documentation/iio/iio_adc.rst305
-rw-r--r--Documentation/iio/index.rst4
-rw-r--r--Documentation/mm/balance.rst2
-rw-r--r--Documentation/mm/damon/design.rst123
-rw-r--r--Documentation/mm/damon/monitoring_intervals_tuning_example.rst8
-rw-r--r--Documentation/mm/hmm.rst2
-rw-r--r--Documentation/mm/index.rst1
-rw-r--r--Documentation/mm/physical_memory.rst266
-rw-r--r--Documentation/mm/process_addrs.rst44
-rw-r--r--Documentation/mm/transhuge.rst39
-rw-r--r--Documentation/mm/z3fold.rst28
-rw-r--r--Documentation/mm/zsmalloc.rst5
-rw-r--r--Documentation/rust/arch-support.rst1
-rw-r--r--Documentation/subsystem-apis.rst1
-rw-r--r--Documentation/trace/coresight/coresight.rst41
-rw-r--r--Documentation/trace/coresight/panic.rst362
-rw-r--r--Documentation/trace/debugging.rst2
-rw-r--r--Documentation/translations/zh_CN/mm/hmm.rst2
-rw-r--r--Documentation/translations/zh_CN/mm/index.rst1
-rw-r--r--Documentation/translations/zh_CN/mm/z3fold.rst31
-rw-r--r--Documentation/usb/CREDITS2
-rw-r--r--Documentation/userspace-api/ioctl/ioctl-number.rst4
-rw-r--r--Documentation/userspace-api/iommufd.rst17
-rw-r--r--Documentation/userspace-api/mseal.rst21
-rw-r--r--Documentation/userspace-api/perf_ring_buffer.rst4
-rw-r--r--MAINTAINERS155
-rw-r--r--arch/alpha/kernel/srmcons.c62
-rw-r--r--arch/alpha/mm/init.c8
-rw-r--r--arch/arc/mm/init.c25
-rw-r--r--arch/arc/mm/ioremap.c6
-rw-r--r--arch/arm/Kconfig3
-rw-r--r--arch/arm/Makefile1
-rw-r--r--arch/arm/include/asm/pgtable.h5
-rw-r--r--arch/arm/include/asm/vmlinux.lds.h14
-rw-r--r--arch/arm/kernel/smp.c3
-rw-r--r--arch/arm/kernel/vmlinux-xip.lds.S2
-rw-r--r--arch/arm/kernel/vmlinux.lds.S2
-rw-r--r--arch/arm/mm/init.c43
-rw-r--r--arch/arm64/Kconfig3
-rw-r--r--arch/arm64/include/asm/cputype.h2
-rw-r--r--arch/arm64/include/asm/io.h6
-rw-r--r--arch/arm64/include/asm/pgtable.h5
-rw-r--r--arch/arm64/include/asm/ptdump.h4
-rw-r--r--arch/arm64/include/asm/tlbflush.h23
-rw-r--r--arch/arm64/include/asm/traps.h4
-rw-r--r--arch/arm64/kernel/acpi.c2
-rw-r--r--arch/arm64/kernel/compat_alignment.c2
-rw-r--r--arch/arm64/kernel/proton-pack.c1
-rw-r--r--arch/arm64/kernel/vdso.c9
-rw-r--r--arch/arm64/kvm/Kconfig4
-rw-r--r--arch/arm64/mm/Makefile2
-rw-r--r--arch/arm64/mm/contpte.c2
-rw-r--r--arch/arm64/mm/init.c18
-rw-r--r--arch/arm64/mm/ioremap.c3
-rw-r--r--arch/arm64/mm/mmu.c3
-rw-r--r--arch/csky/include/asm/io.h2
-rw-r--r--arch/csky/include/asm/pgalloc.h7
-rw-r--r--arch/csky/kernel/setup.c43
-rw-r--r--arch/csky/mm/init.c67
-rw-r--r--arch/hexagon/include/asm/pgalloc.h7
-rw-r--r--arch/hexagon/mm/init.c32
-rw-r--r--arch/loongarch/Kconfig7
-rw-r--r--arch/loongarch/configs/loongson3_defconfig14
-rw-r--r--arch/loongarch/include/asm/cache.h2
-rw-r--r--arch/loongarch/include/asm/io.h10
-rw-r--r--arch/loongarch/include/asm/irq.h2
-rw-r--r--arch/loongarch/include/asm/pgalloc.h7
-rw-r--r--arch/loongarch/include/asm/stacktrace.h3
-rw-r--r--arch/loongarch/include/asm/unwind_hints.h10
-rw-r--r--arch/loongarch/kernel/env.c2
-rw-r--r--arch/loongarch/kernel/kgdb.c5
-rw-r--r--arch/loongarch/kernel/numa.c6
-rw-r--r--arch/loongarch/kernel/setup.c5
-rw-r--r--arch/loongarch/mm/init.c8
-rw-r--r--arch/loongarch/net/bpf_jit.c12
-rw-r--r--arch/loongarch/net/bpf_jit.h5
-rw-r--r--arch/loongarch/vdso/Makefile3
-rw-r--r--arch/loongarch/vdso/vgetrandom-chacha.S13
-rw-r--r--arch/m68k/include/asm/sun3_pgalloc.h7
-rw-r--r--arch/m68k/mm/init.c2
-rw-r--r--arch/microblaze/mm/init.c27
-rw-r--r--arch/mips/include/asm/io.h8
-rw-r--r--arch/mips/include/asm/mmzone.h2
-rw-r--r--arch/mips/include/asm/pgalloc.h7
-rw-r--r--arch/mips/loongson64/numa.c7
-rw-r--r--arch/mips/mm/init.c51
-rw-r--r--arch/mips/mm/ioremap.c4
-rw-r--r--arch/mips/mm/ioremap64.c4
-rw-r--r--arch/mips/sgi-ip27/ip27-memory.c9
-rw-r--r--arch/nios2/include/asm/pgalloc.h7
-rw-r--r--arch/nios2/kernel/setup.c3
-rw-r--r--arch/nios2/mm/init.c16
-rw-r--r--arch/openrisc/include/asm/pgalloc.h7
-rw-r--r--arch/openrisc/mm/init.c6
-rw-r--r--arch/parisc/include/asm/io.h2
-rw-r--r--arch/parisc/mm/init.c4
-rw-r--r--arch/parisc/mm/ioremap.c4
-rw-r--r--arch/powerpc/Kconfig5
-rw-r--r--arch/powerpc/configs/mpc885_ads_defconfig2
-rw-r--r--arch/powerpc/configs/skiroot_defconfig1
-rw-r--r--arch/powerpc/include/asm/book3s/64/hugetlb.h6
-rw-r--r--arch/powerpc/include/asm/copro.h6
-rw-r--r--arch/powerpc/include/asm/crash_reserve.h8
-rw-r--r--arch/powerpc/include/asm/device.h3
-rw-r--r--arch/powerpc/include/asm/io.h2
-rw-r--r--arch/powerpc/include/asm/kexec.h10
-rw-r--r--arch/powerpc/include/asm/pnv-pci.h17
-rw-r--r--arch/powerpc/kernel/prom.c2
-rw-r--r--arch/powerpc/kernel/secvar-sysfs.c24
-rw-r--r--arch/powerpc/kernel/setup-common.c2
-rw-r--r--arch/powerpc/kexec/core.c96
-rw-r--r--arch/powerpc/kexec/file_load_64.c259
-rw-r--r--arch/powerpc/mm/Makefile2
-rw-r--r--arch/powerpc/mm/book3s64/hash_native.c13
-rw-r--r--arch/powerpc/mm/book3s64/hash_utils.c10
-rw-r--r--arch/powerpc/mm/book3s64/pgtable.c1
-rw-r--r--arch/powerpc/mm/book3s64/slice.c6
-rw-r--r--arch/powerpc/mm/copro_fault.c11
-rw-r--r--arch/powerpc/mm/hugetlbpage.c1
-rw-r--r--arch/powerpc/mm/init_64.c4
-rw-r--r--arch/powerpc/mm/ioremap.c4
-rw-r--r--arch/powerpc/mm/mem.c20
-rw-r--r--arch/powerpc/perf/hv-24x7.c8
-rw-r--r--arch/powerpc/platforms/cell/spufs/gang.c1
-rw-r--r--arch/powerpc/platforms/cell/spufs/inode.c63
-rw-r--r--arch/powerpc/platforms/cell/spufs/spufs.h2
-rw-r--r--arch/powerpc/platforms/powernv/Makefile1
-rw-r--r--arch/powerpc/platforms/powernv/opal-core.c10
-rw-r--r--arch/powerpc/platforms/powernv/opal-dump.c4
-rw-r--r--arch/powerpc/platforms/powernv/opal-elog.c4
-rw-r--r--arch/powerpc/platforms/powernv/opal-flash.c4
-rw-r--r--arch/powerpc/platforms/powernv/opal-msglog.c6
-rw-r--r--arch/powerpc/platforms/powernv/pci-cxl.c153
-rw-r--r--arch/powerpc/platforms/powernv/pci-ioda.c43
-rw-r--r--arch/powerpc/platforms/powernv/pci.c61
-rw-r--r--arch/powerpc/platforms/powernv/pci.h2
-rw-r--r--arch/powerpc/platforms/powernv/ultravisor.c6
-rw-r--r--arch/powerpc/platforms/ps3/spu.c4
-rw-r--r--arch/riscv/Kconfig2
-rw-r--r--arch/riscv/include/asm/io.h2
-rw-r--r--arch/riscv/include/asm/pgalloc.h26
-rw-r--r--arch/riscv/include/asm/tlbflush.h3
-rw-r--r--arch/riscv/kernel/acpi.c2
-rw-r--r--arch/riscv/mm/Makefile2
-rw-r--r--arch/riscv/mm/init.c11
-rw-r--r--arch/riscv/mm/tlbflush.c3
-rw-r--r--arch/s390/Kconfig3
-rw-r--r--arch/s390/configs/debug_defconfig2
-rw-r--r--arch/s390/configs/defconfig2
-rw-r--r--arch/s390/hypfs/hypfs_diag_fs.c2
-rw-r--r--arch/s390/include/asm/io.h2
-rw-r--r--arch/s390/kernel/vdso.c2
-rw-r--r--arch/s390/mm/Makefile2
-rw-r--r--arch/s390/mm/init.c33
-rw-r--r--arch/s390/pci/pci.c4
-rw-r--r--arch/sh/boards/mach-landisk/setup.c2
-rw-r--r--arch/sh/boards/mach-lboxre2/setup.c2
-rw-r--r--arch/sh/boards/mach-sh03/setup.c2
-rw-r--r--arch/sh/include/asm/io.h2
-rw-r--r--arch/sh/include/asm/pgalloc.h7
-rw-r--r--arch/sh/mm/init.c10
-rw-r--r--arch/sh/mm/ioremap.c3
-rw-r--r--arch/sparc/include/asm/pgtable_64.h2
-rw-r--r--arch/sparc/mm/init_32.c31
-rw-r--r--arch/sparc/mm/init_64.c4
-rw-r--r--arch/sparc/mm/tlb.c5
-rw-r--r--arch/um/Kconfig1
-rw-r--r--arch/um/drivers/Kconfig12
-rw-r--r--arch/um/drivers/Makefile3
-rw-r--r--arch/um/drivers/random.c2
-rw-r--r--arch/um/drivers/rtc_user.c2
-rw-r--r--arch/um/drivers/ubd.h6
-rw-r--r--arch/um/drivers/ubd_kern.c25
-rw-r--r--arch/um/drivers/ubd_user.c14
-rw-r--r--arch/um/drivers/virt-pci.c699
-rw-r--r--arch/um/drivers/virt-pci.h41
-rw-r--r--arch/um/drivers/virtio_pcidev.c628
-rw-r--r--arch/um/include/asm/Kbuild1
-rw-r--r--arch/um/include/asm/pgalloc.h21
-rw-r--r--arch/um/include/asm/processor-generic.h2
-rw-r--r--arch/um/include/asm/uaccess.h20
-rw-r--r--arch/um/include/linux/time-internal.h2
-rw-r--r--arch/um/include/shared/arch.h2
-rw-r--r--arch/um/include/shared/as-layout.h2
-rw-r--r--arch/um/include/shared/irq_user.h3
-rw-r--r--arch/um/include/shared/kern_util.h12
-rw-r--r--arch/um/include/shared/mem_user.h1
-rw-r--r--arch/um/include/shared/os.h8
-rw-r--r--arch/um/include/shared/sigio.h1
-rw-r--r--arch/um/kernel/Makefile2
-rw-r--r--arch/um/kernel/irq.c3
-rw-r--r--arch/um/kernel/maccess.c19
-rw-r--r--arch/um/kernel/mem.c20
-rw-r--r--arch/um/kernel/physmem.c12
-rw-r--r--arch/um/kernel/sigio.c26
-rw-r--r--arch/um/kernel/skas/syscall.c11
-rw-r--r--arch/um/kernel/trap.c28
-rw-r--r--arch/um/kernel/um_arch.c5
-rw-r--r--arch/um/os-Linux/helper.c67
-rw-r--r--arch/um/os-Linux/process.c51
-rw-r--r--arch/um/os-Linux/sigio.c352
-rw-r--r--arch/um/os-Linux/signal.c4
-rw-r--r--arch/um/os-Linux/skas/process.c8
-rw-r--r--arch/x86/Kconfig5
-rw-r--r--arch/x86/Kconfig.debug2
-rw-r--r--arch/x86/Makefile.um7
-rw-r--r--arch/x86/coco/tdx/tdx.c34
-rw-r--r--arch/x86/entry/vdso/vma.c5
-rw-r--r--arch/x86/include/asm/arch_hweight.h6
-rw-r--r--arch/x86/include/asm/highmem.h3
-rw-r--r--arch/x86/include/asm/io.h2
-rw-r--r--arch/x86/include/asm/iosf_mbi.h7
-rw-r--r--arch/x86/include/asm/irqflags.h40
-rw-r--r--arch/x86/include/asm/numa.h4
-rw-r--r--arch/x86/include/asm/numa_32.h13
-rw-r--r--arch/x86/include/asm/paravirt.h20
-rw-r--r--arch/x86/include/asm/paravirt_types.h3
-rw-r--r--arch/x86/include/asm/percpu.h33
-rw-r--r--arch/x86/include/asm/smap.h23
-rw-r--r--arch/x86/include/asm/tdx.h4
-rw-r--r--arch/x86/include/asm/tlbflush.h3
-rw-r--r--arch/x86/include/asm/xen/hypercall.h6
-rw-r--r--arch/x86/include/asm/xen/hypervisor.h15
-rw-r--r--arch/x86/kernel/cpu/mce/severity.c11
-rw-r--r--arch/x86/kernel/cpu/resctrl/internal.h5
-rw-r--r--arch/x86/kernel/cpu/resctrl/pseudo_lock.c14
-rw-r--r--arch/x86/kernel/cpu/resctrl/rdtgroup.c73
-rw-r--r--arch/x86/kernel/kgdb.c2
-rw-r--r--arch/x86/kernel/paravirt.c14
-rw-r--r--arch/x86/kernel/process.c2
-rw-r--r--arch/x86/kernel/setup.c12
-rw-r--r--arch/x86/lib/copy_user_64.S18
-rw-r--r--arch/x86/mm/Makefile4
-rw-r--r--arch/x86/mm/highmem_32.c34
-rw-r--r--arch/x86/mm/init_32.c38
-rw-r--r--arch/x86/mm/init_64.c40
-rw-r--r--arch/x86/mm/ioremap.c4
-rw-r--r--arch/x86/mm/pat/set_memory.c1
-rw-r--r--arch/x86/mm/pgtable.c8
-rw-r--r--arch/x86/platform/intel/iosf_mbi.c13
-rw-r--r--arch/x86/power/cpu.c14
-rw-r--r--arch/x86/tools/insn_decoder_test.c2
-rw-r--r--arch/x86/um/asm/barrier.h6
-rw-r--r--arch/x86/um/asm/module.h24
-rw-r--r--arch/x86/um/os-Linux/mcontext.c15
-rw-r--r--arch/x86/um/shared/sysdep/faultinfo_32.h12
-rw-r--r--arch/x86/um/shared/sysdep/faultinfo_64.h12
-rw-r--r--arch/x86/um/vdso/vma.c17
-rw-r--r--arch/x86/xen/enlighten_pv.c1
-rw-r--r--arch/xtensa/include/asm/io.h6
-rw-r--r--arch/xtensa/mm/init.c66
-rw-r--r--arch/xtensa/mm/ioremap.c4
-rw-r--r--block/blk-mq.c29
-rw-r--r--crypto/testmgr.c157
-rw-r--r--drivers/accel/habanalabs/common/command_submission.c2
-rw-r--r--drivers/accel/habanalabs/common/debugfs.c2
-rw-r--r--drivers/accel/habanalabs/common/device.c2
-rw-r--r--drivers/accel/habanalabs/common/habanalabs_drv.c2
-rw-r--r--drivers/accel/habanalabs/common/sysfs.c10
-rw-r--r--drivers/acpi/acpi_pnp.c2
-rw-r--r--drivers/acpi/acpi_video.c9
-rw-r--r--drivers/acpi/apei/ghes.c103
-rw-r--r--drivers/acpi/nfit/core.c2
-rw-r--r--drivers/acpi/numa/hmat.c44
-rw-r--r--drivers/acpi/numa/srat.c22
-rw-r--r--drivers/acpi/platform_profile.c13
-rw-r--r--drivers/acpi/processor_idle.c4
-rw-r--r--drivers/acpi/resource.c7
-rw-r--r--drivers/acpi/x86/utils.c3
-rw-r--r--drivers/android/binder_internal.h1
-rw-r--r--drivers/ata/libata-zpodd.c3
-rw-r--r--drivers/base/bus.c2
-rw-r--r--drivers/base/component.c12
-rw-r--r--drivers/base/faux.c15
-rw-r--r--drivers/base/memory.c29
-rw-r--r--drivers/base/node.c2
-rw-r--r--drivers/base/physical_location.c5
-rw-r--r--drivers/block/ublk_drv.c223
-rw-r--r--drivers/block/zram/backend_zstd.c11
-rw-r--r--drivers/block/zram/zcomp.c48
-rw-r--r--drivers/block/zram/zcomp.h8
-rw-r--r--drivers/block/zram/zram_drv.c330
-rw-r--r--drivers/block/zram/zram_drv.h17
-rw-r--r--drivers/bus/mhi/host/main.c35
-rw-r--r--drivers/bus/mhi/host/pci_generic.c34
-rw-r--r--drivers/bus/mhi/host/pm.c14
-rw-r--r--drivers/char/tlclk.c32
-rw-r--r--drivers/char/virtio_console.c9
-rw-r--r--drivers/clk/clkdev.c9
-rw-r--r--drivers/counter/microchip-tcb-capture.c179
-rw-r--r--drivers/counter/stm32-lptimer-cnt.c24
-rw-r--r--drivers/counter/ti-eqep.c32
-rw-r--r--drivers/cpufreq/cpufreq.c6
-rw-r--r--drivers/cxl/Kconfig4
-rw-r--r--drivers/cxl/core/Makefile3
-rw-r--r--drivers/cxl/core/acpi.c11
-rw-r--r--drivers/cxl/core/cdat.c102
-rw-r--r--drivers/cxl/core/core.h10
-rw-r--r--drivers/cxl/core/hdm.c382
-rw-r--r--drivers/cxl/core/mbox.c141
-rw-r--r--drivers/cxl/core/mce.c65
-rw-r--r--drivers/cxl/core/mce.h20
-rw-r--r--drivers/cxl/core/memdev.c83
-rw-r--r--drivers/cxl/core/pci.c97
-rw-r--r--drivers/cxl/core/port.c38
-rw-r--r--drivers/cxl/core/ras.c119
-rw-r--r--drivers/cxl/core/region.c336
-rw-r--r--drivers/cxl/core/trace.h81
-rw-r--r--drivers/cxl/cxl.h52
-rw-r--r--drivers/cxl/cxlmem.h77
-rw-r--r--drivers/cxl/cxlpci.h6
-rw-r--r--drivers/cxl/mem.c2
-rw-r--r--drivers/cxl/pci.c7
-rw-r--r--drivers/cxl/pmem.c81
-rw-r--r--drivers/cxl/port.c10
-rw-r--r--drivers/dax/device.c24
-rw-r--r--drivers/dma/Kconfig2
-rw-r--r--drivers/dma/amd/ae4dma/ae4dma-pci.c6
-rw-r--r--drivers/dma/amd/ae4dma/ae4dma.h2
-rw-r--r--drivers/dma/amd/ptdma/ptdma-dmaengine.c90
-rw-r--r--drivers/dma/bcm2835-dma.c2
-rw-r--r--drivers/dma/dmaengine.c16
-rw-r--r--drivers/dma/dmatest.c6
-rw-r--r--drivers/dma/dw-edma/dw-edma-core.c6
-rw-r--r--drivers/dma/dw/pci.c8
-rw-r--r--drivers/dma/dw/platform.c8
-rw-r--r--drivers/dma/fsl-edma-main.c18
-rw-r--r--drivers/dma/idxd/init.c3
-rw-r--r--drivers/dma/img-mdc-dma.c2
-rw-r--r--drivers/dma/imx-dma.c3
-rw-r--r--drivers/dma/imx-sdma.c5
-rw-r--r--drivers/dma/pxa_dma.c4
-rw-r--r--drivers/dma/sh/shdma-base.c2
-rw-r--r--drivers/dma/sun6i-dma.c3
-rw-r--r--drivers/dma/ti/edma.c11
-rw-r--r--drivers/dma/ti/k3-udma-glue.c15
-rw-r--r--drivers/dma/ti/k3-udma.c36
-rw-r--r--drivers/dma/xilinx/xilinx_dma.c3
-rw-r--r--drivers/firewire/core-cdev.c42
-rw-r--r--drivers/firmware/cirrus/cs_dsp.c2
-rw-r--r--drivers/firmware/dmi-sysfs.c28
-rw-r--r--drivers/firmware/dmi_scan.c4
-rw-r--r--drivers/firmware/efi/cper.c6
-rw-r--r--drivers/firmware/efi/cper_cxl.c39
-rw-r--r--drivers/firmware/efi/cper_cxl.h66
-rw-r--r--drivers/firmware/efi/mokvar-table.c4
-rw-r--r--drivers/firmware/efi/rci2-table.c2
-rw-r--r--drivers/firmware/qemu_fw_cfg.c6
-rw-r--r--drivers/fpga/altera-cvp.c2
-rw-r--r--drivers/fpga/versal-fpga.c2
-rw-r--r--drivers/fsi/fsi-core.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c13
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c8
-rw-r--r--drivers/gpu/drm/drm_gpusvm.c4
-rw-r--r--drivers/gpu/drm/drm_sysfs.c10
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c8
-rw-r--r--drivers/gpu/drm/i915/i915_iosf_mbi.h6
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c12
-rw-r--r--drivers/gpu/drm/lima/lima_drv.c8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dmem.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_svm.c5
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_msg.c2
-rw-r--r--drivers/gpu/drm/xe/xe_svm.c2
-rw-r--r--drivers/gpu/nova-core/driver.rs4
-rw-r--r--drivers/gpu/nova-core/regs.rs2
-rw-r--r--drivers/greybus/gb-beagleplay.c4
-rw-r--r--drivers/hv/hv_balloon.c2
-rw-r--r--drivers/hwtracing/coresight/Kconfig12
-rw-r--r--drivers/hwtracing/coresight/Makefile4
-rw-r--r--drivers/hwtracing/coresight/coresight-catu.c4
-rw-r--r--drivers/hwtracing/coresight/coresight-cfg-preload.c2
-rw-r--r--drivers/hwtracing/coresight/coresight-cfg-preload.h2
-rw-r--r--drivers/hwtracing/coresight/coresight-cfg-pstop.c83
-rw-r--r--drivers/hwtracing/coresight/coresight-config.c8
-rw-r--r--drivers/hwtracing/coresight/coresight-config.h2
-rw-r--r--drivers/hwtracing/coresight/coresight-core.c192
-rw-r--r--drivers/hwtracing/coresight/coresight-ctcu-core.c326
-rw-r--r--drivers/hwtracing/coresight/coresight-ctcu.h39
-rw-r--r--drivers/hwtracing/coresight/coresight-cti-core.c44
-rw-r--r--drivers/hwtracing/coresight/coresight-cti-sysfs.c76
-rw-r--r--drivers/hwtracing/coresight/coresight-cti.h2
-rw-r--r--drivers/hwtracing/coresight/coresight-dummy.c15
-rw-r--r--drivers/hwtracing/coresight/coresight-etb10.c26
-rw-r--r--drivers/hwtracing/coresight/coresight-etm-perf.c27
-rw-r--r--drivers/hwtracing/coresight/coresight-etm-perf.h2
-rw-r--r--drivers/hwtracing/coresight/coresight-etm.h1
-rw-r--r--drivers/hwtracing/coresight/coresight-etm3x-core.c55
-rw-r--r--drivers/hwtracing/coresight/coresight-etm3x-sysfs.c3
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x-core.c122
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x-sysfs.c254
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x.h3
-rw-r--r--drivers/hwtracing/coresight/coresight-funnel.c12
-rw-r--r--drivers/hwtracing/coresight/coresight-priv.h14
-rw-r--r--drivers/hwtracing/coresight/coresight-replicator.c12
-rw-r--r--drivers/hwtracing/coresight/coresight-stm.c13
-rw-r--r--drivers/hwtracing/coresight/coresight-syscfg-configfs.c12
-rw-r--r--drivers/hwtracing/coresight/coresight-syscfg.c26
-rw-r--r--drivers/hwtracing/coresight/coresight-sysfs.c17
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-core.c327
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-etf.c140
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-etr.c224
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc.h106
-rw-r--r--drivers/hwtracing/coresight/coresight-tpda.c20
-rw-r--r--drivers/hwtracing/coresight/coresight-tpdm.c124
-rw-r--r--drivers/hwtracing/coresight/coresight-tpdm.h33
-rw-r--r--drivers/hwtracing/coresight/coresight-trace-id.c22
-rw-r--r--drivers/hwtracing/coresight/ultrasoc-smb.c12
-rw-r--r--drivers/hwtracing/coresight/ultrasoc-smb.h2
-rw-r--r--drivers/i2c/busses/Kconfig18
-rw-r--r--drivers/i2c/busses/Makefile1
-rw-r--r--drivers/i2c/busses/i2c-amd-asf-plat.c8
-rw-r--r--drivers/i2c/busses/i2c-axxia.c21
-rw-r--r--drivers/i2c/busses/i2c-bcm-iproc.c13
-rw-r--r--drivers/i2c/busses/i2c-bcm-kona.c6
-rw-r--r--drivers/i2c/busses/i2c-brcmstb.c11
-rw-r--r--drivers/i2c/busses/i2c-cadence.c19
-rw-r--r--drivers/i2c/busses/i2c-designware-amdpsp.c26
-rw-r--r--drivers/i2c/busses/i2c-designware-master.c2
-rw-r--r--drivers/i2c/busses/i2c-eg20t.c28
-rw-r--r--drivers/i2c/busses/i2c-exynos5.c2
-rw-r--r--drivers/i2c/busses/i2c-i801.c271
-rw-r--r--drivers/i2c/busses/i2c-ibm_iic.c14
-rw-r--r--drivers/i2c/busses/i2c-k1.c602
-rw-r--r--drivers/i2c/busses/i2c-kempld.c10
-rw-r--r--drivers/i2c/busses/i2c-mlxbf.c106
-rw-r--r--drivers/i2c/busses/i2c-mt7621.c20
-rw-r--r--drivers/i2c/busses/i2c-mv64xxx.c12
-rw-r--r--drivers/i2c/busses/i2c-octeon-core.c111
-rw-r--r--drivers/i2c/busses/i2c-omap.c22
-rw-r--r--drivers/i2c/busses/i2c-pasemi-core.c40
-rw-r--r--drivers/i2c/busses/i2c-pxa.c5
-rw-r--r--drivers/i2c/busses/i2c-qcom-geni.c6
-rw-r--r--drivers/i2c/busses/i2c-qup.c36
-rw-r--r--drivers/i2c/busses/i2c-rzv2m.c15
-rw-r--r--drivers/i2c/i2c-core-of.c1
-rw-r--r--drivers/i2c/i2c-core.h9
-rw-r--r--drivers/i2c/muxes/i2c-mux-ltc4306.c2
-rw-r--r--drivers/i2c/muxes/i2c-mux-pca954x.c2
-rw-r--r--drivers/i2c/muxes/i2c-mux-reg.c2
-rw-r--r--drivers/i3c/master.c21
-rw-r--r--drivers/i3c/master/dw-i3c-master.c2
-rw-r--r--drivers/i3c/master/i3c-master-cdns.c2
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/core.c10
-rw-r--r--drivers/i3c/master/svc-i3c-master.c138
-rw-r--r--drivers/iio/accel/adxl345.h1
-rw-r--r--drivers/iio/accel/adxl345_core.c78
-rw-r--r--drivers/iio/accel/adxl367.c194
-rw-r--r--drivers/iio/accel/adxl372.c7
-rw-r--r--drivers/iio/accel/adxl380.c7
-rw-r--r--drivers/iio/accel/bma180.c7
-rw-r--r--drivers/iio/accel/bma400_core.c2
-rw-r--r--drivers/iio/accel/bmi088-accel-core.c9
-rw-r--r--drivers/iio/accel/fxls8962af-core.c21
-rw-r--r--drivers/iio/accel/kionix-kx022a.c78
-rw-r--r--drivers/iio/accel/mc3230.c95
-rw-r--r--drivers/iio/accel/mma8452.c86
-rw-r--r--drivers/iio/accel/msa311.c34
-rw-r--r--drivers/iio/adc/Kconfig56
-rw-r--r--drivers/iio/adc/Makefile4
-rw-r--r--drivers/iio/adc/ad4000.c60
-rw-r--r--drivers/iio/adc/ad4030.c1230
-rw-r--r--drivers/iio/adc/ad4130.c139
-rw-r--r--drivers/iio/adc/ad4695.c1094
-rw-r--r--drivers/iio/adc/ad4851.c1315
-rw-r--r--drivers/iio/adc/ad7091r-base.c1
-rw-r--r--drivers/iio/adc/ad7124.c343
-rw-r--r--drivers/iio/adc/ad7173.c707
-rw-r--r--drivers/iio/adc/ad7191.c554
-rw-r--r--drivers/iio/adc/ad7192.c124
-rw-r--r--drivers/iio/adc/ad7266.c7
-rw-r--r--drivers/iio/adc/ad7298.c7
-rw-r--r--drivers/iio/adc/ad7380.c917
-rw-r--r--drivers/iio/adc/ad7476.c7
-rw-r--r--drivers/iio/adc/ad7606.c174
-rw-r--r--drivers/iio/adc/ad7606.h103
-rw-r--r--drivers/iio/adc/ad7606_bus_iface.h16
-rw-r--r--drivers/iio/adc/ad7606_par.c52
-rw-r--r--drivers/iio/adc/ad7606_spi.c137
-rw-r--r--drivers/iio/adc/ad7625.c13
-rw-r--r--drivers/iio/adc/ad7768-1.c47
-rw-r--r--drivers/iio/adc/ad7779.c101
-rw-r--r--drivers/iio/adc/ad7791.c31
-rw-r--r--drivers/iio/adc/ad7793.c80
-rw-r--r--drivers/iio/adc/ad7887.c7
-rw-r--r--drivers/iio/adc/ad7923.c7
-rw-r--r--drivers/iio/adc/ad7944.c314
-rw-r--r--drivers/iio/adc/ad799x.c14
-rw-r--r--drivers/iio/adc/ad9467.c23
-rw-r--r--drivers/iio/adc/ad_sigma_delta.c24
-rw-r--r--drivers/iio/adc/adi-axi-adc.c305
-rw-r--r--drivers/iio/adc/at91-sama5d2_adc.c54
-rw-r--r--drivers/iio/adc/dln2-adc.c7
-rw-r--r--drivers/iio/adc/max1027.c37
-rw-r--r--drivers/iio/adc/max11410.c72
-rw-r--r--drivers/iio/adc/max1363.c165
-rw-r--r--drivers/iio/adc/max34408.c1
-rw-r--r--drivers/iio/adc/pac1921.c1
-rw-r--r--drivers/iio/adc/rockchip_saradc.c42
-rw-r--r--drivers/iio/adc/rtq6056.c46
-rw-r--r--drivers/iio/adc/stm32-adc-core.c6
-rw-r--r--drivers/iio/adc/stm32-adc.c7
-rw-r--r--drivers/iio/adc/stm32-dfsdm-adc.c76
-rw-r--r--drivers/iio/adc/ti-adc084s021.c9
-rw-r--r--drivers/iio/adc/ti-adc108s102.c7
-rw-r--r--drivers/iio/adc/ti-adc161s626.c14
-rw-r--r--drivers/iio/adc/ti-ads1119.c17
-rw-r--r--drivers/iio/adc/ti-ads124s08.c2
-rw-r--r--drivers/iio/adc/ti-ads1298.c7
-rw-r--r--drivers/iio/adc/ti-ads131e08.c14
-rw-r--r--drivers/iio/adc/ti-ads7138.c749
-rw-r--r--drivers/iio/adc/ti-ads7924.c7
-rw-r--r--drivers/iio/adc/ti-tlc4541.c7
-rw-r--r--drivers/iio/addac/ad74413r.c14
-rw-r--r--drivers/iio/amplifiers/hmc425a.c3
-rw-r--r--drivers/iio/buffer/industrialio-buffer-dma.c4
-rw-r--r--drivers/iio/buffer/industrialio-buffer-dmaengine.c144
-rw-r--r--drivers/iio/chemical/ens160_core.c32
-rw-r--r--drivers/iio/chemical/scd30_core.c70
-rw-r--r--drivers/iio/common/cros_ec_sensors/Makefile3
-rw-r--r--drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c9
-rw-r--r--drivers/iio/common/cros_ec_sensors/cros_ec_sensors_trace.c32
-rw-r--r--drivers/iio/common/cros_ec_sensors/cros_ec_sensors_trace.h56
-rw-r--r--drivers/iio/dac/Kconfig3
-rw-r--r--drivers/iio/dac/ad3552r-common.c50
-rw-r--r--drivers/iio/dac/ad3552r-hs.c333
-rw-r--r--drivers/iio/dac/ad3552r-hs.h8
-rw-r--r--drivers/iio/dac/ad3552r.c36
-rw-r--r--drivers/iio/dac/ad3552r.h9
-rw-r--r--drivers/iio/dac/ad5791.c181
-rw-r--r--drivers/iio/dac/ad8460.c18
-rw-r--r--drivers/iio/dac/adi-axi-dac.c35
-rw-r--r--drivers/iio/dummy/iio_simple_dummy.c119
-rw-r--r--drivers/iio/frequency/adf4371.c45
-rw-r--r--drivers/iio/gyro/bmg160_i2c.c1
-rw-r--r--drivers/iio/gyro/bmg160_spi.c10
-rw-r--r--drivers/iio/humidity/dht11.c3
-rw-r--r--drivers/iio/imu/Kconfig13
-rw-r--r--drivers/iio/imu/Makefile1
-rw-r--r--drivers/iio/imu/adis.c35
-rw-r--r--drivers/iio/imu/adis16550.c1147
-rw-r--r--drivers/iio/imu/bmi270/bmi270.h17
-rw-r--r--drivers/iio/imu/bmi270/bmi270_core.c374
-rw-r--r--drivers/iio/imu/bmi323/bmi323_core.c44
-rw-r--r--drivers/iio/industrialio-backend.c64
-rw-r--r--drivers/iio/industrialio-core.c9
-rw-r--r--drivers/iio/industrialio-event.c2
-rw-r--r--drivers/iio/industrialio-gts-helper.c283
-rw-r--r--drivers/iio/light/Kconfig22
-rw-r--r--drivers/iio/light/Makefile2
-rw-r--r--drivers/iio/light/adux1020.c1
-rw-r--r--drivers/iio/light/al3000a.c209
-rw-r--r--drivers/iio/light/apds9160.c1594
-rw-r--r--drivers/iio/light/bh1745.c18
-rw-r--r--drivers/iio/light/cm32181.c2
-rw-r--r--drivers/iio/light/cm36651.c2
-rw-r--r--drivers/iio/light/veml6030.c608
-rw-r--r--drivers/iio/light/veml6075.c8
-rw-r--r--drivers/iio/magnetometer/Kconfig11
-rw-r--r--drivers/iio/magnetometer/Makefile2
-rw-r--r--drivers/iio/magnetometer/af8133j.c1
-rw-r--r--drivers/iio/magnetometer/si7210.c446
-rw-r--r--drivers/iio/pressure/zpa2326_i2c.c1
-rw-r--r--drivers/iio/pressure/zpa2326_spi.c1
-rw-r--r--drivers/iio/proximity/irsd200.c3
-rw-r--r--drivers/iio/proximity/sx9310.c19
-rw-r--r--drivers/iio/proximity/sx9324.c19
-rw-r--r--drivers/iio/proximity/sx9360.c19
-rw-r--r--drivers/iio/resolver/ad2s1210.c17
-rw-r--r--drivers/iio/temperature/tmp006.c33
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_fs.c1
-rw-r--r--drivers/input/mouse/cyapa.c4
-rw-r--r--drivers/input/touchscreen/goodix_berlin_core.c10
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-iommufd.c60
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c80
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h36
-rw-r--r--drivers/iommu/dma-iommu.c204
-rw-r--r--drivers/iommu/dma-iommu.h14
-rw-r--r--drivers/iommu/intel/iommu.c3
-rw-r--r--drivers/iommu/intel/nested.c2
-rw-r--r--drivers/iommu/iommu-priv.h16
-rw-r--r--drivers/iommu/iommu-sva.c1
-rw-r--r--drivers/iommu/iommu.c160
-rw-r--r--drivers/iommu/iommufd/Kconfig2
-rw-r--r--drivers/iommu/iommufd/Makefile2
-rw-r--r--drivers/iommu/iommufd/device.c499
-rw-r--r--drivers/iommu/iommufd/driver.c198
-rw-r--r--drivers/iommu/iommufd/eventq.c598
-rw-r--r--drivers/iommu/iommufd/fault.c342
-rw-r--r--drivers/iommu/iommufd/hw_pagetable.c42
-rw-r--r--drivers/iommu/iommufd/iommufd_private.h156
-rw-r--r--drivers/iommu/iommufd/iommufd_test.h40
-rw-r--r--drivers/iommu/iommufd/main.c7
-rw-r--r--drivers/iommu/iommufd/selftest.c297
-rw-r--r--drivers/iommu/iommufd/viommu.c2
-rw-r--r--drivers/md/Kconfig1
-rw-r--r--drivers/md/dm-bufio.c4
-rw-r--r--drivers/md/dm-cache-target.c96
-rw-r--r--drivers/md/dm-crypt.c41
-rw-r--r--drivers/md/dm-delay.c18
-rw-r--r--drivers/md/dm-ebs-target.c7
-rw-r--r--drivers/md/dm-integrity.c48
-rw-r--r--drivers/md/dm-stripe.c2
-rw-r--r--drivers/md/dm-table.c4
-rw-r--r--drivers/md/dm-vdo/block-map.c13
-rw-r--r--drivers/md/dm-vdo/constants.h3
-rw-r--r--drivers/md/dm-vdo/dedupe.c20
-rw-r--r--drivers/md/dm-vdo/encodings.c20
-rw-r--r--drivers/md/dm-vdo/indexer/index-layout.c5
-rw-r--r--drivers/md/dm-vdo/indexer/index-session.c6
-rw-r--r--drivers/md/dm-vdo/indexer/indexer.h53
-rw-r--r--drivers/md/dm-vdo/io-submitter.c6
-rw-r--r--drivers/md/dm-vdo/io-submitter.h18
-rw-r--r--drivers/md/dm-vdo/packer.h2
-rw-r--r--drivers/md/dm-vdo/priority-table.c2
-rw-r--r--drivers/md/dm-vdo/recovery-journal.h6
-rw-r--r--drivers/md/dm-vdo/slab-depot.c193
-rw-r--r--drivers/md/dm-vdo/slab-depot.h13
-rw-r--r--drivers/md/dm-vdo/types.h3
-rw-r--r--drivers/md/dm-vdo/vdo.c11
-rw-r--r--drivers/md/dm-vdo/vio.c54
-rw-r--r--drivers/md/dm-vdo/vio.h13
-rw-r--r--drivers/md/dm-vdo/wait-queue.c2
-rw-r--r--drivers/md/dm-verity-target.c62
-rw-r--r--drivers/md/dm.c8
-rw-r--r--drivers/media/dvb-frontends/dib8000.c5
-rw-r--r--drivers/misc/Kconfig1
-rw-r--r--drivers/misc/Makefile1
-rw-r--r--drivers/misc/cxl/Kconfig28
-rw-r--r--drivers/misc/cxl/Makefile14
-rw-r--r--drivers/misc/cxl/api.c532
-rw-r--r--drivers/misc/cxl/base.c126
-rw-r--r--drivers/misc/cxl/context.c362
-rw-r--r--drivers/misc/cxl/cxl.h1135
-rw-r--r--drivers/misc/cxl/cxllib.c271
-rw-r--r--drivers/misc/cxl/debugfs.c134
-rw-r--r--drivers/misc/cxl/fault.c341
-rw-r--r--drivers/misc/cxl/file.c699
-rw-r--r--drivers/misc/cxl/flash.c538
-rw-r--r--drivers/misc/cxl/guest.c1208
-rw-r--r--drivers/misc/cxl/hcalls.c643
-rw-r--r--drivers/misc/cxl/hcalls.h200
-rw-r--r--drivers/misc/cxl/irq.c450
-rw-r--r--drivers/misc/cxl/main.c383
-rw-r--r--drivers/misc/cxl/native.c1592
-rw-r--r--drivers/misc/cxl/of.c346
-rw-r--r--drivers/misc/cxl/pci.c2103
-rw-r--r--drivers/misc/cxl/sysfs.c771
-rw-r--r--drivers/misc/cxl/trace.c9
-rw-r--r--drivers/misc/cxl/trace.h691
-rw-r--r--drivers/misc/cxl/vphb.c309
-rw-r--r--drivers/misc/eeprom/at24.c10
-rw-r--r--drivers/misc/eeprom/ee1004.c4
-rw-r--r--drivers/misc/lis3lv02d/lis3lv02d.c26
-rw-r--r--drivers/misc/lis3lv02d/lis3lv02d.h4
-rw-r--r--drivers/misc/lkdtm/perms.c14
-rw-r--r--drivers/misc/mei/bus.c52
-rw-r--r--drivers/misc/mei/client.c22
-rw-r--r--drivers/misc/mei/client.h2
-rw-r--r--drivers/misc/mei/hw-txe.c45
-rw-r--r--drivers/misc/mei/hw-txe.h2
-rw-r--r--drivers/net/caif/caif_serial.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c2
-rw-r--r--drivers/net/ethernet/google/gve/gve_main.c4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.c8
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_en.c46
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.c4
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_common.c2
-rw-r--r--drivers/net/phy/broadcom.c6
-rw-r--r--drivers/net/usb/rndis_host.c16
-rw-r--r--drivers/net/usb/usbnet.c21
-rw-r--r--drivers/net/wan/lapbether.c2
-rw-r--r--drivers/nvdimm/claim.c11
-rw-r--r--drivers/nvdimm/label.c3
-rw-r--r--drivers/nvdimm/nd-core.h4
-rw-r--r--drivers/nvdimm/pmem.c4
-rw-r--r--drivers/nvdimm/region_devs.c41
-rw-r--r--drivers/nvme/host/Kconfig13
-rw-r--r--drivers/nvme/host/core.c8
-rw-r--r--drivers/nvme/host/ioctl.c68
-rw-r--r--drivers/nvme/host/pci.c3
-rw-r--r--drivers/nvme/target/debugfs.c2
-rw-r--r--drivers/nvme/target/pci-epf.c63
-rw-r--r--drivers/pci/ats.c33
-rw-r--r--drivers/pci/p2pdma.c19
-rw-r--r--drivers/pcmcia/cistpl.c8
-rw-r--r--drivers/phy/freescale/Kconfig1
-rw-r--r--drivers/phy/freescale/phy-fsl-imx8m-pcie.c23
-rw-r--r--drivers/phy/freescale/phy-fsl-imx8mq-usb.c240
-rw-r--r--drivers/phy/freescale/phy-fsl-samsung-hdmi.c10
-rw-r--r--drivers/phy/mediatek/phy-mtk-tphy.c4
-rw-r--r--drivers/phy/microchip/Kconfig1
-rw-r--r--drivers/phy/phy-can-transceiver.c13
-rw-r--r--drivers/phy/phy-core.c31
-rw-r--r--drivers/phy/qualcomm/Kconfig13
-rw-r--r--drivers/phy/qualcomm/Makefile1
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcie.c107
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-qserdes-com-v6.h7
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-ufs-v7.h67
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-ufs.c180
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-usbc.c3
-rw-r--r--drivers/phy/qualcomm/phy-qcom-uniphy-pcie-28lp.c286
-rw-r--r--drivers/phy/rockchip/Kconfig12
-rw-r--r--drivers/phy/rockchip/Makefile1
-rw-r--r--drivers/phy/rockchip/phy-rockchip-inno-usb2.c4
-rw-r--r--drivers/phy/rockchip/phy-rockchip-naneng-combphy.c152
-rw-r--r--drivers/phy/rockchip/phy-rockchip-samsung-dcphy.c1719
-rw-r--r--drivers/phy/rockchip/phy-rockchip-samsung-hdptx.c1046
-rw-r--r--drivers/phy/rockchip/phy-rockchip-usbdp.c90
-rw-r--r--drivers/phy/samsung/Kconfig1
-rw-r--r--drivers/phy/samsung/Makefile1
-rw-r--r--drivers/phy/samsung/phy-exynos5-usbdrd.c202
-rw-r--r--drivers/phy/samsung/phy-exynosautov920-ufs.c168
-rw-r--r--drivers/phy/samsung/phy-samsung-ufs.c9
-rw-r--r--drivers/phy/samsung/phy-samsung-ufs.h4
-rw-r--r--drivers/phy/st/phy-stih407-usb.c24
-rw-r--r--drivers/platform/chrome/cros_ec_lpc.c2
-rw-r--r--drivers/pnp/isapnp/core.c1
-rw-r--r--drivers/power/supply/da9030_battery.c3
-rw-r--r--drivers/power/supply/ip5xxx_power.c7
-rw-r--r--drivers/pps/generators/Kconfig16
-rw-r--r--drivers/pps/generators/Makefile1
-rw-r--r--drivers/pps/generators/pps_gen-dummy.c2
-rw-r--r--drivers/pps/generators/pps_gen.c14
-rw-r--r--drivers/pps/generators/pps_gen_tio.c272
-rw-r--r--drivers/pps/generators/sysfs.c6
-rw-r--r--drivers/rapidio/rio-sysfs.c14
-rw-r--r--drivers/regulator/core.c4
-rw-r--r--drivers/regulator/dummy.c37
-rw-r--r--drivers/regulator/irq_helpers.c16
-rw-r--r--drivers/regulator/rk808-regulator.c4
-rw-r--r--drivers/rtc/Kconfig7
-rw-r--r--drivers/rtc/Makefile1
-rw-r--r--drivers/rtc/rtc-ab-eoz9.c24
-rw-r--r--drivers/rtc/rtc-ab8500.c11
-rw-r--r--drivers/rtc/rtc-aspeed.c16
-rw-r--r--drivers/rtc/rtc-cros-ec.c30
-rw-r--r--drivers/rtc/rtc-ds1307.c4
-rw-r--r--drivers/rtc/rtc-ds1343.c8
-rw-r--r--drivers/rtc/rtc-ds2404.c14
-rw-r--r--drivers/rtc/rtc-ds3232.c24
-rw-r--r--drivers/rtc/rtc-ep93xx.c16
-rw-r--r--drivers/rtc/rtc-fsl-ftm-alarm.c2
-rw-r--r--drivers/rtc/rtc-ftrtc010.c17
-rw-r--r--drivers/rtc/rtc-m48t86.c14
-rw-r--r--drivers/rtc/rtc-max31335.c165
-rw-r--r--drivers/rtc/rtc-max77686.c37
-rw-r--r--drivers/rtc/rtc-meson-vrtc.c12
-rw-r--r--drivers/rtc/rtc-meson.c16
-rw-r--r--drivers/rtc/rtc-mpfs.c10
-rw-r--r--drivers/rtc/rtc-nxp-bbnsm.c29
-rw-r--r--drivers/rtc/rtc-pcf50633.c284
-rw-r--r--drivers/rtc/rtc-pcf85063.c25
-rw-r--r--drivers/rtc/rtc-pl030.c16
-rw-r--r--drivers/rtc/rtc-pl031.c8
-rw-r--r--drivers/rtc/rtc-pm8xxx.c220
-rw-r--r--drivers/rtc/rtc-renesas-rtca3.c15
-rw-r--r--drivers/rtc/rtc-rv3032.c8
-rw-r--r--drivers/rtc/rtc-rx8581.c85
-rw-r--r--drivers/rtc/rtc-rzn1.c108
-rw-r--r--drivers/rtc/rtc-s35390a.c22
-rw-r--r--drivers/rtc/rtc-s5m.c58
-rw-r--r--drivers/rtc/rtc-sd2405al.c16
-rw-r--r--drivers/rtc/rtc-sd3078.c71
-rw-r--r--drivers/rtc/rtc-stm32.c10
-rw-r--r--drivers/s390/block/Kconfig12
-rw-r--r--drivers/s390/block/dcssblk.c27
-rw-r--r--drivers/soundwire/Kconfig1
-rw-r--r--drivers/soundwire/amd_manager.c151
-rw-r--r--drivers/soundwire/amd_manager.h26
-rw-r--r--drivers/soundwire/bus.c48
-rw-r--r--drivers/soundwire/bus.h18
-rw-r--r--drivers/soundwire/cadence_master.c668
-rw-r--r--drivers/soundwire/cadence_master.h20
-rw-r--r--drivers/soundwire/debugfs.c87
-rw-r--r--drivers/soundwire/generic_bandwidth_allocation.c53
-rw-r--r--drivers/soundwire/intel.h23
-rw-r--r--drivers/soundwire/intel_ace2x.c312
-rw-r--r--drivers/soundwire/intel_auxdevice.c45
-rw-r--r--drivers/soundwire/slave.c1
-rw-r--r--drivers/soundwire/stream.c143
-rw-r--r--drivers/spi/spi-amd.c2
-rw-r--r--drivers/staging/gpib/Kconfig5
-rw-r--r--drivers/staging/gpib/agilent_82350b/agilent_82350b.c260
-rw-r--r--drivers/staging/gpib/agilent_82350b/agilent_82350b.h50
-rw-r--r--drivers/staging/gpib/agilent_82357a/agilent_82357a.c527
-rw-r--r--drivers/staging/gpib/cb7210/Makefile1
-rw-r--r--drivers/staging/gpib/cb7210/cb7210.c287
-rw-r--r--drivers/staging/gpib/cb7210/cb7210.h45
-rw-r--r--drivers/staging/gpib/cec/cec.h29
-rw-r--r--drivers/staging/gpib/cec/cec_gpib.c96
-rw-r--r--drivers/staging/gpib/common/gpib_os.c328
-rw-r--r--drivers/staging/gpib/common/iblib.c167
-rw-r--r--drivers/staging/gpib/common/ibsys.h14
-rw-r--r--drivers/staging/gpib/eastwood/fluke_gpib.c204
-rw-r--r--drivers/staging/gpib/fmh_gpib/fmh_gpib.c280
-rw-r--r--drivers/staging/gpib/gpio/gpib_bitbang.c138
-rw-r--r--drivers/staging/gpib/hp_82335/hp82335.c83
-rw-r--r--drivers/staging/gpib/hp_82335/hp82335.h30
-rw-r--r--drivers/staging/gpib/hp_82341/hp_82341.c161
-rw-r--r--drivers/staging/gpib/hp_82341/hp_82341.h40
-rw-r--r--drivers/staging/gpib/include/gpibP.h10
-rw-r--r--drivers/staging/gpib/include/gpib_proto.h58
-rw-r--r--drivers/staging/gpib/include/gpib_types.h58
-rw-r--r--drivers/staging/gpib/include/nec7210.h60
-rw-r--r--drivers/staging/gpib/include/tms9914.h52
-rw-r--r--drivers/staging/gpib/ines/Makefile1
-rw-r--r--drivers/staging/gpib/ines/ines.h54
-rw-r--r--drivers/staging/gpib/ines/ines_gpib.c240
-rw-r--r--drivers/staging/gpib/lpvo_usb_gpib/lpvo_usb_gpib.c363
-rw-r--r--drivers/staging/gpib/nec7210/nec7210.c147
-rw-r--r--drivers/staging/gpib/ni_usb/ni_usb_gpib.c611
-rw-r--r--drivers/staging/gpib/pc2/pc2_gpib.c352
-rw-r--r--drivers/staging/gpib/tms9914/tms9914.c117
-rw-r--r--drivers/staging/gpib/tnt4882/Makefile1
-rw-r--r--drivers/staging/gpib/tnt4882/mite.c17
-rw-r--r--drivers/staging/gpib/tnt4882/tnt4882_gpib.c964
-rw-r--r--drivers/staging/gpib/uapi/gpib_user.h29
-rw-r--r--drivers/staging/greybus/uart.c4
-rw-r--r--drivers/staging/iio/accel/Kconfig12
-rw-r--r--drivers/staging/iio/accel/Makefile1
-rw-r--r--drivers/staging/iio/accel/adis16240.c443
-rw-r--r--drivers/staging/iio/frequency/ad9832.c37
-rw-r--r--drivers/staging/iio/frequency/ad9834.c22
-rw-r--r--drivers/staging/rtl8723bs/Kconfig1
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_ap.c96
-rw-r--r--drivers/staging/rtl8723bs/hal/hal_com.c3
-rw-r--r--drivers/staging/rtl8723bs/include/osdep_intf.h27
-rw-r--r--drivers/staging/rtl8723bs/include/rtl8723b_hal.h1
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_io.h92
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_mp.h341
-rw-r--r--drivers/staging/rtl8723bs/os_dep/os_intfs.c2
-rw-r--r--drivers/staging/sm750fb/ddk750_chip.c2
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c2
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c127
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c65
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h2
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_dev.c14
-rw-r--r--drivers/thermal/broadcom/brcmstb_thermal.c11
-rw-r--r--drivers/thermal/mediatek/lvts_thermal.c103
-rw-r--r--drivers/thermal/qcom/qcom-spmi-temp-alarm.c1
-rw-r--r--drivers/thermal/qcom/tsens-v2.c178
-rw-r--r--drivers/thermal/qcom/tsens.c8
-rw-r--r--drivers/thermal/qcom/tsens.h3
-rw-r--r--drivers/thermal/qoriq_thermal.c47
-rw-r--r--drivers/thermal/renesas/rcar_gen3_thermal.c107
-rw-r--r--drivers/thermal/rockchip_thermal.c1
-rw-r--r--drivers/thermal/thermal_core.c17
-rw-r--r--drivers/thermal/thermal_core.h1
-rw-r--r--drivers/thermal/thermal_of.c7
-rw-r--r--drivers/thunderbolt/retimer.c8
-rw-r--r--drivers/thunderbolt/tb.c16
-rw-r--r--drivers/thunderbolt/tunnel.c16
-rw-r--r--drivers/tty/Kconfig2
-rw-r--r--drivers/tty/moxa.c251
-rw-r--r--drivers/tty/n_tty.c212
-rw-r--r--drivers/tty/serdev/core.c11
-rw-r--r--drivers/tty/serdev/serdev-ttyport.c9
-rw-r--r--drivers/tty/serial/8250/8250_dma.c2
-rw-r--r--drivers/tty/serial/8250/8250_dw.c73
-rw-r--r--drivers/tty/serial/8250/8250_fsl.c8
-rw-r--r--drivers/tty/serial/8250/8250_ni.c461
-rw-r--r--drivers/tty/serial/8250/8250_omap.c2
-rw-r--r--drivers/tty/serial/8250/8250_pci.c46
-rw-r--r--drivers/tty/serial/8250/8250_port.c61
-rw-r--r--drivers/tty/serial/8250/8250_rsa.c21
-rw-r--r--drivers/tty/serial/8250/Kconfig13
-rw-r--r--drivers/tty/serial/8250/Makefile1
-rw-r--r--drivers/tty/serial/Kconfig42
-rw-r--r--drivers/tty/serial/Makefile2
-rw-r--r--drivers/tty/serial/amba-pl011.c149
-rw-r--r--drivers/tty/serial/atmel_serial.c2
-rw-r--r--drivers/tty/serial/fsl_lpuart.c489
-rw-r--r--drivers/tty/serial/icom.c9
-rw-r--r--drivers/tty/serial/imx.c2
-rw-r--r--drivers/tty/serial/kgdb_nmi.c280
-rw-r--r--drivers/tty/serial/kgdboc.c8
-rw-r--r--drivers/tty/serial/ma35d1_serial.c2
-rw-r--r--drivers/tty/serial/mpc52xx_uart.c1
-rw-r--r--drivers/tty/serial/pch_uart.c1
-rw-r--r--drivers/tty/serial/serial_core.c10
-rw-r--r--drivers/tty/serial/serial_mctrl_gpio.c62
-rw-r--r--drivers/tty/serial/serial_mctrl_gpio.h27
-rw-r--r--drivers/tty/serial/sh-sci.c98
-rw-r--r--drivers/tty/serial/stm32-usart.c6
-rw-r--r--drivers/tty/serial/sunsu.c178
-rw-r--r--drivers/tty/serial/tegra-utc.c625
-rw-r--r--drivers/tty/tty_audit.c10
-rw-r--r--drivers/tty/tty_io.c8
-rw-r--r--drivers/tty/tty_ldsem.c17
-rw-r--r--drivers/usb/cdns3/cdns3-gadget.c4
-rw-r--r--drivers/usb/cdns3/cdns3-ti.c107
-rw-r--r--drivers/usb/cdns3/cdnsp-gadget.c2
-rw-r--r--drivers/usb/cdns3/core.c5
-rw-r--r--drivers/usb/cdns3/core.h2
-rw-r--r--drivers/usb/cdns3/host.c11
-rw-r--r--drivers/usb/chipidea/usbmisc_imx.c10
-rw-r--r--drivers/usb/common/usb-conn-gpio.c2
-rw-r--r--drivers/usb/core/config.c51
-rw-r--r--drivers/usb/core/hcd.c4
-rw-r--r--drivers/usb/core/hub.c4
-rw-r--r--drivers/usb/core/urb.c2
-rw-r--r--drivers/usb/dwc2/core.c1
-rw-r--r--drivers/usb/dwc2/core.h23
-rw-r--r--drivers/usb/dwc2/gadget.c116
-rw-r--r--drivers/usb/dwc2/hcd.c99
-rw-r--r--drivers/usb/dwc2/platform.c38
-rw-r--r--drivers/usb/dwc3/dwc3-am62.c12
-rw-r--r--drivers/usb/dwc3/dwc3-exynos.c9
-rw-r--r--drivers/usb/dwc3/dwc3-pci.c10
-rw-r--r--drivers/usb/dwc3/dwc3-st.c2
-rw-r--r--drivers/usb/dwc3/gadget.c69
-rw-r--r--drivers/usb/gadget/function/uvc_queue.c2
-rw-r--r--drivers/usb/gadget/udc/aspeed-vhub/dev.c3
-rw-r--r--drivers/usb/host/max3421-hcd.c7
-rw-r--r--drivers/usb/host/xhci-histb.c2
-rw-r--r--drivers/usb/host/xhci-mem.c34
-rw-r--r--drivers/usb/host/xhci-mvebu.c10
-rw-r--r--drivers/usb/host/xhci-mvebu.h6
-rw-r--r--drivers/usb/host/xhci-pci.c8
-rw-r--r--drivers/usb/host/xhci-plat.c13
-rw-r--r--drivers/usb/host/xhci-plat.h1
-rw-r--r--drivers/usb/host/xhci-ring.c420
-rw-r--r--drivers/usb/host/xhci-tegra.c10
-rw-r--r--drivers/usb/host/xhci.c41
-rw-r--r--drivers/usb/host/xhci.h30
-rw-r--r--drivers/usb/misc/onboard_usb_dev.h9
-rw-r--r--drivers/usb/misc/usb251xb.c6
-rw-r--r--drivers/usb/musb/jz4740.c4
-rw-r--r--drivers/usb/musb/mediatek.c2
-rw-r--r--drivers/usb/musb/mpfs.c2
-rw-r--r--drivers/usb/musb/musb_core.c14
-rw-r--r--drivers/usb/musb/sunxi.c4
-rw-r--r--drivers/usb/phy/phy-mxs-usb.c8
-rw-r--r--drivers/usb/phy/phy-ulpi.c23
-rw-r--r--drivers/usb/serial/mos7840.c13
-rw-r--r--drivers/usb/storage/alauda.c8
-rw-r--r--drivers/usb/storage/datafab.c14
-rw-r--r--drivers/usb/storage/initializers.c2
-rw-r--r--drivers/usb/storage/jumpshot.c10
-rw-r--r--drivers/usb/storage/realtek_cr.c6
-rw-r--r--drivers/usb/storage/sddr09.c14
-rw-r--r--drivers/usb/storage/sddr55.c4
-rw-r--r--drivers/usb/storage/shuttle_usbat.c2
-rw-r--r--drivers/usb/storage/transport.c2
-rw-r--r--drivers/usb/typec/altmodes/thunderbolt.c10
-rw-r--r--drivers/usb/typec/mux/Kconfig10
-rw-r--r--drivers/usb/typec/mux/Makefile1
-rw-r--r--drivers/usb/typec/mux/ps883x.c466
-rw-r--r--drivers/usb/typec/ucsi/cros_ec_ucsi.c22
-rw-r--r--drivers/usb/typec/ucsi/debugfs.c6
-rw-r--r--drivers/usb/typec/ucsi/trace.c2
-rw-r--r--drivers/usb/typec/ucsi/ucsi.c19
-rw-r--r--drivers/usb/typec/ucsi/ucsi.h10
-rw-r--r--drivers/usb/typec/ucsi/ucsi_acpi.c29
-rw-r--r--drivers/usb/typec/ucsi/ucsi_ccg.c97
-rw-r--r--drivers/vdpa/mlx5/core/mr.c7
-rw-r--r--drivers/vdpa/mlx5/net/mlx5_vnet.c3
-rw-r--r--drivers/vdpa/vdpa_user/vduse_dev.c1
-rw-r--r--drivers/vfio/device_cdev.c60
-rw-r--r--drivers/vfio/iommufd.c60
-rw-r--r--drivers/vfio/pci/vfio_pci.c6
-rw-r--r--drivers/vfio/pci/vfio_pci_config.c3
-rw-r--r--drivers/vfio/pci/vfio_pci_core.c10
-rw-r--r--drivers/vfio/pci/vfio_pci_igd.c6
-rw-r--r--drivers/vfio/pci/vfio_pci_intrs.c2
-rw-r--r--drivers/vfio/pci/vfio_pci_priv.h6
-rw-r--r--drivers/vfio/pci/virtio/Kconfig6
-rw-r--r--drivers/vfio/pci/virtio/legacy_io.c4
-rw-r--r--drivers/vfio/pci/virtio/main.c5
-rw-r--r--drivers/vfio/vfio_iommu_type1.c123
-rw-r--r--drivers/vhost/Kconfig1
-rw-r--r--drivers/vhost/scsi.c547
-rw-r--r--drivers/video/fbdev/core/fb_defio.c43
-rw-r--r--drivers/virtio/virtio.c29
-rw-r--r--drivers/w1/masters/w1-uart.c4
-rw-r--r--drivers/w1/slaves/w1_therm.c12
-rw-r--r--drivers/watchdog/Kconfig22
-rw-r--r--drivers/watchdog/Makefile1
-rw-r--r--drivers/watchdog/aspeed_wdt.c81
-rw-r--r--drivers/watchdog/cros_ec_wdt.c10
-rw-r--r--drivers/watchdog/lenovo_se30_wdt.c394
-rw-r--r--drivers/watchdog/nic7018_wdt.c9
-rw-r--r--drivers/watchdog/npcm_wdt.c9
-rw-r--r--drivers/watchdog/s3c2410_wdt.c10
-rw-r--r--drivers/watchdog/sunxi_wdt.c11
-rw-r--r--drivers/watchdog/watchdog_core.c6
-rw-r--r--drivers/xen/balloon.c4
-rw-r--r--fs/9p/vfs_inode_dotl.c2
-rw-r--r--fs/Kconfig1
-rw-r--r--fs/bcachefs/Kconfig1
-rw-r--r--fs/bcachefs/acl.c4
-rw-r--r--fs/bcachefs/alloc_background.c95
-rw-r--r--fs/bcachefs/alloc_background.h6
-rw-r--r--fs/bcachefs/alloc_foreground.c79
-rw-r--r--fs/bcachefs/backpointers.c24
-rw-r--r--fs/bcachefs/bcachefs.h7
-rw-r--r--fs/bcachefs/btree_gc.c4
-rw-r--r--fs/bcachefs/btree_io.c17
-rw-r--r--fs/bcachefs/btree_iter.c188
-rw-r--r--fs/bcachefs/btree_iter.h122
-rw-r--r--fs/bcachefs/btree_key_cache.c32
-rw-r--r--fs/bcachefs/btree_node_scan.c8
-rw-r--r--fs/bcachefs/btree_types.h1
-rw-r--r--fs/bcachefs/btree_update.c26
-rw-r--r--fs/bcachefs/btree_update_interior.c12
-rw-r--r--fs/bcachefs/btree_write_buffer.c10
-rw-r--r--fs/bcachefs/buckets.c16
-rw-r--r--fs/bcachefs/buckets.h21
-rw-r--r--fs/bcachefs/buckets_types.h5
-rw-r--r--fs/bcachefs/chardev.c14
-rw-r--r--fs/bcachefs/compress.c5
-rw-r--r--fs/bcachefs/data_update.c8
-rw-r--r--fs/bcachefs/debug.c4
-rw-r--r--fs/bcachefs/dirent.c16
-rw-r--r--fs/bcachefs/disk_accounting.c4
-rw-r--r--fs/bcachefs/disk_groups.c4
-rw-r--r--fs/bcachefs/ec.c18
-rw-r--r--fs/bcachefs/error.c7
-rw-r--r--fs/bcachefs/extent_update.c6
-rw-r--r--fs/bcachefs/fs-io-buffered.c6
-rw-r--r--fs/bcachefs/fs-io.c14
-rw-r--r--fs/bcachefs/fs.c24
-rw-r--r--fs/bcachefs/fsck.c30
-rw-r--r--fs/bcachefs/inode.c18
-rw-r--r--fs/bcachefs/io_misc.c18
-rw-r--r--fs/bcachefs/io_read.c14
-rw-r--r--fs/bcachefs/io_write.c40
-rw-r--r--fs/bcachefs/journal.c14
-rw-r--r--fs/bcachefs/journal_io.c8
-rw-r--r--fs/bcachefs/migrate.c4
-rw-r--r--fs/bcachefs/move.c14
-rw-r--r--fs/bcachefs/movinggc.c8
-rw-r--r--fs/bcachefs/namei.c38
-rw-r--r--fs/bcachefs/quota.c2
-rw-r--r--fs/bcachefs/rebalance.c12
-rw-r--r--fs/bcachefs/recovery.c6
-rw-r--r--fs/bcachefs/reflink.c23
-rw-r--r--fs/bcachefs/sb-members.h23
-rw-r--r--fs/bcachefs/snapshot.c13
-rw-r--r--fs/bcachefs/str_hash.c2
-rw-r--r--fs/bcachefs/str_hash.h8
-rw-r--r--fs/bcachefs/subvolume.c4
-rw-r--r--fs/bcachefs/subvolume.h14
-rw-r--r--fs/bcachefs/super-io.c21
-rw-r--r--fs/bcachefs/super.c85
-rw-r--r--fs/bcachefs/tests.c30
-rw-r--r--fs/bcachefs/util.h2
-rw-r--r--fs/bcachefs/xattr.c2
-rw-r--r--fs/btrfs/disk-io.c6
-rw-r--r--fs/buffer.c14
-rw-r--r--fs/cachefiles/namei.c7
-rw-r--r--fs/dax.c369
-rw-r--r--fs/exec.c15
-rw-r--r--fs/exportfs/expfs.c1
-rw-r--r--fs/ext4/inline.c2
-rw-r--r--fs/ext4/inode.c18
-rw-r--r--fs/ext4/move_extent.c2
-rw-r--r--fs/fuse/dax.c30
-rw-r--r--fs/fuse/dev.c162
-rw-r--r--fs/fuse/dev_uring.c43
-rw-r--r--fs/fuse/dev_uring_i.h18
-rw-r--r--fs/fuse/dir.c13
-rw-r--r--fs/fuse/file.c4
-rw-r--r--fs/fuse/fuse_dev_i.h4
-rw-r--r--fs/fuse/fuse_i.h47
-rw-r--r--fs/fuse/inode.c51
-rw-r--r--fs/fuse/sysctl.c24
-rw-r--r--fs/fuse/virtio_fs.c3
-rw-r--r--fs/hostfs/hostfs.h2
-rw-r--r--fs/hostfs/hostfs_kern.c7
-rw-r--r--fs/hostfs/hostfs_user.c59
-rw-r--r--fs/hugetlbfs/inode.c28
-rw-r--r--fs/iomap/buffered-io.c2
-rw-r--r--fs/kernfs/dir.c209
-rw-r--r--fs/kernfs/file.c6
-rw-r--r--fs/kernfs/kernfs-internal.h37
-rw-r--r--fs/kernfs/mount.c44
-rw-r--r--fs/kernfs/symlink.c30
-rw-r--r--fs/namespace.c7
-rw-r--r--fs/nfs/client.c5
-rw-r--r--fs/nfs/delegation.c66
-rw-r--r--fs/nfs/dir.c2
-rw-r--r--fs/nfs/flexfilelayout/flexfilelayout.c24
-rw-r--r--fs/nfs/fs_context.c71
-rw-r--r--fs/nfs/inode.c2
-rw-r--r--fs/nfs/internal.h5
-rw-r--r--fs/nfs/nfs3client.c2
-rw-r--r--fs/nfs/nfs3proc.c2
-rw-r--r--fs/nfs/nfs42proc.c172
-rw-r--r--fs/nfs/nfs42xdr.c86
-rw-r--r--fs/nfs/nfs4client.c7
-rw-r--r--fs/nfs/nfs4proc.c17
-rw-r--r--fs/nfs/nfs4state.c14
-rw-r--r--fs/nfs/nfs4trace.h11
-rw-r--r--fs/nfs/nfs4xdr.c19
-rw-r--r--fs/nfs/super.c4
-rw-r--r--fs/nfs/sysfs.c82
-rw-r--r--fs/nfs/write.c4
-rw-r--r--fs/ntfs3/attrib.c3
-rw-r--r--fs/ntfs3/file.c42
-rw-r--r--fs/ntfs3/frecord.c63
-rw-r--r--fs/ntfs3/fsntfs.c28
-rw-r--r--fs/ntfs3/index.c4
-rw-r--r--fs/ntfs3/inode.c40
-rw-r--r--fs/ntfs3/ntfs.h2
-rw-r--r--fs/ntfs3/ntfs_fs.h6
-rw-r--r--fs/ntfs3/super.c89
-rw-r--r--fs/ocfs2/alloc.c8
-rw-r--r--fs/ocfs2/aops.c21
-rw-r--r--fs/ocfs2/file.c2
-rw-r--r--fs/ocfs2/quota_global.c2
-rw-r--r--fs/proc/base.c2
-rw-r--r--fs/proc/internal.h43
-rw-r--r--fs/proc/meminfo.c2
-rw-r--r--fs/proc/page.c11
-rw-r--r--fs/proc/task_mmu.c56
-rw-r--r--fs/smb/server/auth.c4
-rw-r--r--fs/smb/server/connection.h11
-rw-r--r--fs/smb/server/mgmt/user_session.c18
-rw-r--r--fs/smb/server/smb2pdu.c21
-rw-r--r--fs/smb/server/smbacl.c21
-rw-r--r--fs/sysfs/dir.c2
-rw-r--r--fs/sysfs/file.c24
-rw-r--r--fs/udf/file.c2
-rw-r--r--fs/userfaultfd.c51
-rw-r--r--fs/xfs/xfs_icache.c2
-rw-r--r--fs/xfs/xfs_inode.c31
-rw-r--r--fs/xfs/xfs_inode.h2
-rw-r--r--fs/xfs/xfs_super.c12
-rw-r--r--fs/xfs/xfs_sysfs.c8
-rw-r--r--include/asm-generic/io.h4
-rw-r--r--include/asm-generic/memory_model.h5
-rw-r--r--include/asm-generic/percpu.h39
-rw-r--r--include/asm-generic/tlb.h45
-rw-r--r--include/cxl/event.h101
-rw-r--r--include/dt-bindings/iio/adc/adi,ad4695.h7
-rw-r--r--include/linux/acpi.h11
-rw-r--r--include/linux/bit_spinlock.h8
-rw-r--r--include/linux/bootmem_info.h7
-rw-r--r--include/linux/buffer_head.h2
-rw-r--r--include/linux/bvec.h6
-rw-r--r--include/linux/cma.h9
-rw-r--r--include/linux/compaction.h5
-rw-r--r--include/linux/compiler-clang.h8
-rw-r--r--include/linux/compiler-gcc.h8
-rw-r--r--include/linux/compiler.h20
-rw-r--r--include/linux/compiler_types.h2
-rw-r--r--include/linux/context_tracking_irq.h8
-rw-r--r--include/linux/coresight.h47
-rw-r--r--include/linux/counter.h3
-rw-r--r--include/linux/cper.h8
-rw-r--r--include/linux/cpu.h2
-rw-r--r--include/linux/cpuhotplug.h1
-rw-r--r--include/linux/crash_reserve.h11
-rw-r--r--include/linux/damon.h118
-rw-r--r--include/linux/dax.h28
-rw-r--r--include/linux/device.h2
-rw-r--r--include/linux/device/class.h2
-rw-r--r--include/linux/dma/k3-udma-glue.h3
-rw-r--r--include/linux/dmaengine.h10
-rw-r--r--include/linux/fb.h1
-rw-r--r--include/linux/folio_queue.h12
-rw-r--r--include/linux/huge_mm.h44
-rw-r--r--include/linux/hugetlb.h35
-rw-r--r--include/linux/i2c.h26
-rw-r--r--include/linux/i3c/master.h2
-rw-r--r--include/linux/idr.h11
-rw-r--r--include/linux/iio/adc/ad_sigma_delta.h4
-rw-r--r--include/linux/iio/backend.h19
-rw-r--r--include/linux/iio/buffer-dmaengine.h7
-rw-r--r--include/linux/iio/iio-gts-helper.h1
-rw-r--r--include/linux/iio/iio.h41
-rw-r--r--include/linux/iio/imu/adis.h34
-rw-r--r--include/linux/interval_tree_generic.h8
-rw-r--r--include/linux/io_uring/cmd.h1
-rw-r--r--include/linux/iomap.h15
-rw-r--r--include/linux/iommu.h35
-rw-r--r--include/linux/iommufd.h32
-rw-r--r--include/linux/ioport.h9
-rw-r--r--include/linux/kdb.h2
-rw-r--r--include/linux/kernfs.h14
-rw-r--r--include/linux/kexec.h9
-rw-r--r--include/linux/kgdb.h11
-rw-r--r--include/linux/linkage.h4
-rw-r--r--include/linux/list_nulls.h1
-rw-r--r--include/linux/mei_cl_bus.h5
-rw-r--r--include/linux/memblock.h1
-rw-r--r--include/linux/memcontrol.h32
-rw-r--r--include/linux/memory.h2
-rw-r--r--include/linux/memremap.h17
-rw-r--r--include/linux/mfd/mt6397/rtc.h5
-rw-r--r--include/linux/mhi.h18
-rw-r--r--include/linux/migrate.h4
-rw-r--r--include/linux/min_heap.h12
-rw-r--r--include/linux/mm.h343
-rw-r--r--include/linux/mm_types.h203
-rw-r--r--include/linux/mmap_lock.h6
-rw-r--r--include/linux/mmu_notifier.h8
-rw-r--r--include/linux/mmzone.h55
-rw-r--r--include/linux/mutex.h2
-rw-r--r--include/linux/nfs4.h2
-rw-r--r--include/linux/nfs_fs_sb.h8
-rw-r--r--include/linux/nfs_xdr.h5
-rw-r--r--include/linux/node.h7
-rw-r--r--include/linux/objtool.h2
-rw-r--r--include/linux/page-flags.h53
-rw-r--r--include/linux/page_counter.h9
-rw-r--r--include/linux/page_ext.h93
-rw-r--r--include/linux/page_ref.h2
-rw-r--r--include/linux/pagemap.h25
-rw-r--r--include/linux/part_stat.h2
-rw-r--r--include/linux/pci-ats.h3
-rw-r--r--include/linux/percpu-defs.h6
-rw-r--r--include/linux/pgalloc_tag.h77
-rw-r--r--include/linux/pgtable.h14
-rw-r--r--include/linux/phy/phy.h12
-rw-r--r--include/linux/platform_data/cros_ec_commands.h1
-rw-r--r--include/linux/pps_gen_kernel.h4
-rw-r--r--include/linux/rcupdate.h2
-rw-r--r--include/linux/rcuwait.h13
-rw-r--r--include/linux/reboot.h36
-rw-r--r--include/linux/refcount.h125
-rw-r--r--include/linux/rhashtable.h6
-rw-r--r--include/linux/rmap.h293
-rw-r--r--include/linux/rtc.h1
-rw-r--r--include/linux/sched.h4
-rw-r--r--include/linux/sched/smt.h2
-rw-r--r--include/linux/seq_buf.h4
-rw-r--r--include/linux/seq_file.h1
-rw-r--r--include/linux/serdev.h6
-rw-r--r--include/linux/slab.h15
-rw-r--r--include/linux/sort.h11
-rw-r--r--include/linux/soundwire/sdw.h33
-rw-r--r--include/linux/soundwire/sdw_amd.h2
-rw-r--r--include/linux/soundwire/sdw_intel.h4
-rw-r--r--include/linux/string.h4
-rw-r--r--include/linux/sunrpc/clnt.h5
-rw-r--r--include/linux/sunrpc/sched.h1
-rw-r--r--include/linux/sunrpc/xprtmultipath.h1
-rw-r--r--include/linux/swap.h41
-rw-r--r--include/linux/swap_slots.h28
-rw-r--r--include/linux/swapops.h27
-rw-r--r--include/linux/thermal.h2
-rw-r--r--include/linux/trace.h4
-rw-r--r--include/linux/trace_seq.h8
-rw-r--r--include/linux/tty.h53
-rw-r--r--include/linux/tty_driver.h180
-rw-r--r--include/linux/tty_ldisc.h1
-rw-r--r--include/linux/types.h13
-rw-r--r--include/linux/usb.h8
-rw-r--r--include/linux/usb/musb.h2
-rw-r--r--include/linux/usb/ulpi.h9
-rw-r--r--include/linux/user_namespace.h15
-rw-r--r--include/linux/vfio.h14
-rw-r--r--include/linux/vm_event_item.h2
-rw-r--r--include/linux/vmstat.h2
-rw-r--r--include/linux/writeback.h24
-rw-r--r--include/linux/xarray.h13
-rw-r--r--include/linux/zpool.h47
-rw-r--r--include/linux/zsmalloc.h29
-rw-r--r--include/linux/zswap.h6
-rw-r--r--include/misc/cxl-base.h48
-rw-r--r--include/misc/cxl.h265
-rw-r--r--include/misc/cxllib.h129
-rw-r--r--include/net/snmp.h5
-rw-r--r--include/net/tcp.h15
-rw-r--r--include/sound/hda-sdw-bpt.h69
-rw-r--r--include/trace/events/kmem.h78
-rw-r--r--include/trace/events/sunrpc.h1
-rw-r--r--include/trace/events/writeback.h33
-rw-r--r--include/uapi/linux/capability.h1
-rw-r--r--include/uapi/linux/counter.h2
-rw-r--r--include/uapi/linux/counter/microchip-tcb-capture.h40
-rw-r--r--include/uapi/linux/fuse.h12
-rw-r--r--include/uapi/linux/iio/types.h2
-rw-r--r--include/uapi/linux/iommufd.h129
-rw-r--r--include/uapi/linux/ublk_cmd.h25
-rw-r--r--include/uapi/linux/usb/ch9.h15
-rw-r--r--include/uapi/linux/vfio.h29
-rw-r--r--include/uapi/misc/cxl.h156
-rw-r--r--init/Kconfig27
-rw-r--r--io_uring/Kconfig1
-rw-r--r--io_uring/io_uring.c18
-rw-r--r--io_uring/io_uring.h3
-rw-r--r--io_uring/msg_ring.c11
-rw-r--r--io_uring/net.c135
-rw-r--r--io_uring/refs.h7
-rw-r--r--io_uring/rsrc.c126
-rw-r--r--io_uring/uring_cmd.c22
-rw-r--r--io_uring/uring_cmd.h1
-rw-r--r--io_uring/zcrx.c8
-rw-r--r--kernel/Kconfig.hz2
-rw-r--r--kernel/cgroup/cgroup-v1.c2
-rw-r--r--kernel/cgroup/cgroup.c24
-rw-r--r--kernel/configs/debug.config1
-rw-r--r--kernel/crash_reserve.c9
-rw-r--r--kernel/debug/debug_core.c14
-rw-r--r--kernel/debug/kdb/kdb_io.c4
-rw-r--r--kernel/debug/kdb/kdb_main.c85
-rw-r--r--kernel/events/uprobes.c16
-rw-r--r--kernel/exit.c11
-rw-r--r--kernel/fork.c184
-rw-r--r--kernel/hung_task.c38
-rw-r--r--kernel/kexec_core.c10
-rw-r--r--kernel/kexec_elf.c2
-rw-r--r--kernel/kexec_file.c12
-rw-r--r--kernel/locking/mutex.c14
-rw-r--r--kernel/locking/percpu-rwsem.c2
-rw-r--r--kernel/panic.c6
-rw-r--r--kernel/rcu/Kconfig2
-rw-r--r--kernel/reboot.c140
-rw-r--r--kernel/relay.c3
-rw-r--r--kernel/resource.c18
-rw-r--r--kernel/sched/ext.c8
-rw-r--r--kernel/sched/ext_idle.c12
-rw-r--r--kernel/signal.c15
-rw-r--r--kernel/trace/Kconfig3
-rw-r--r--kernel/trace/ftrace.c1
-rw-r--r--kernel/trace/ring_buffer.c5
-rw-r--r--kernel/trace/rv/rv.c3
-rw-r--r--kernel/trace/trace.c78
-rw-r--r--kernel/trace/trace.h17
-rw-r--r--kernel/trace/trace_events.c7
-rw-r--r--kernel/ucount.c95
-rw-r--r--kernel/watchdog_perf.c6
-rw-r--r--lib/Kconfig.debug26
-rw-r--r--lib/alloc_tag.c6
-rw-r--r--lib/idr.c67
-rw-r--r--lib/interval_tree.c12
-rw-r--r--lib/interval_tree_test.c237
-rw-r--r--lib/maple_tree.c10
-rw-r--r--lib/min_heap.c4
-rw-r--r--lib/plist.c12
-rw-r--r--lib/rbtree_test.c30
-rw-r--r--lib/sg_split.c2
-rw-r--r--lib/sort.c110
-rw-r--r--lib/test_hmm.c72
-rw-r--r--lib/test_ida.c70
-rw-r--r--lib/test_xarray.c52
-rw-r--r--lib/vdso/datastore.c3
-rw-r--r--lib/vsprintf.c9
-rw-r--r--lib/xarray.c157
-rw-r--r--lib/zlib_deflate/deflate.c6
-rw-r--r--mm/Kconfig74
-rw-r--r--mm/Kconfig.debug11
-rw-r--r--mm/Makefile9
-rw-r--r--mm/balloon_compaction.c2
-rw-r--r--mm/bootmem_info.c4
-rw-r--r--mm/cma.c746
-rw-r--r--mm/cma.h47
-rw-r--r--mm/cma_debug.c61
-rw-r--r--mm/cma_sysfs.c20
-rw-r--r--mm/compaction.c95
-rw-r--r--mm/damon/core.c312
-rw-r--r--mm/damon/ops-common.c25
-rw-r--r--mm/damon/paddr.c86
-rw-r--r--mm/damon/sysfs-schemes.c179
-rw-r--r--mm/damon/sysfs.c357
-rw-r--r--mm/damon/tests/core-kunit.h6
-rw-r--r--mm/damon/vaddr.c1
-rw-r--r--mm/debug.c46
-rw-r--r--mm/filemap.c50
-rw-r--r--mm/gup.c29
-rw-r--r--mm/hmm.c2
-rw-r--r--mm/huge_memory.c1019
-rw-r--r--mm/hugetlb.c663
-rw-r--r--mm/hugetlb_cgroup.c31
-rw-r--r--mm/hugetlb_cma.c275
-rw-r--r--mm/hugetlb_cma.h57
-rw-r--r--mm/hugetlb_vmemmap.c199
-rw-r--r--mm/hugetlb_vmemmap.h23
-rw-r--r--mm/init-mm.c1
-rw-r--r--mm/internal.h93
-rw-r--r--mm/ioremap.c4
-rw-r--r--mm/kasan/kasan_test_c.c5
-rw-r--r--mm/khugepaged.c8
-rw-r--r--mm/kmemleak.c52
-rw-r--r--mm/ksm.c9
-rw-r--r--mm/list_lru.c15
-rw-r--r--mm/madvise.c229
-rw-r--r--mm/memblock.c6
-rw-r--r--mm/memcontrol-v1.c102
-rw-r--r--mm/memcontrol-v1.h52
-rw-r--r--mm/memcontrol.c294
-rw-r--r--mm/memfd.c4
-rw-r--r--mm/memory-failure.c27
-rw-r--r--mm/memory.c381
-rw-r--r--mm/memory_hotplug.c15
-rw-r--r--mm/mempolicy.c8
-rw-r--r--mm/memremap.c60
-rw-r--r--mm/migrate.c11
-rw-r--r--mm/migrate_device.c18
-rw-r--r--mm/mincore.c2
-rw-r--r--mm/mlock.c2
-rw-r--r--mm/mm_init.c196
-rw-r--r--mm/mmap.c10
-rw-r--r--mm/mmu_gather.c12
-rw-r--r--mm/mprotect.c16
-rw-r--r--mm/mremap.c1449
-rw-r--r--mm/nommu.c10
-rw-r--r--mm/oom_kill.c2
-rw-r--r--mm/page-writeback.c38
-rw-r--r--mm/page_alloc.c753
-rw-r--r--mm/page_counter.c4
-rw-r--r--mm/page_ext.c13
-rw-r--r--mm/page_idle.c9
-rw-r--r--mm/page_io.c6
-rw-r--r--mm/page_isolation.c9
-rw-r--r--mm/page_owner.c86
-rw-r--r--mm/page_table_check.c44
-rw-r--r--mm/page_vma_mapped.c16
-rw-r--r--mm/percpu.c8
-rw-r--r--mm/rmap.c939
-rw-r--r--mm/shmem.c167
-rw-r--r--mm/show_mem.c4
-rw-r--r--mm/shrinker_debug.c8
-rw-r--r--mm/slub.c51
-rw-r--r--mm/sparse-vmemmap.c168
-rw-r--r--mm/sparse.c92
-rw-r--r--mm/swap.c2
-rw-r--r--mm/swap.h6
-rw-r--r--mm/swap_cgroup.c3
-rw-r--r--mm/swap_slots.c295
-rw-r--r--mm/swap_state.c91
-rw-r--r--mm/swapfile.c432
-rw-r--r--mm/truncate.c53
-rw-r--r--mm/userfaultfd.c38
-rw-r--r--mm/vma.c352
-rw-r--r--mm/vma.h101
-rw-r--r--mm/vmalloc.c20
-rw-r--r--mm/vmscan.c252
-rw-r--r--mm/vmstat.c5
-rw-r--r--mm/z3fold.c1447
-rw-r--r--mm/zbud.c455
-rw-r--r--mm/zpool.c97
-rw-r--r--mm/zsmalloc.c498
-rw-r--r--mm/zswap.c194
-rw-r--r--net/9p/client.c44
-rw-r--r--net/9p/error.c21
-rw-r--r--net/9p/trans_fd.c73
-rw-r--r--net/core/netdev-genl.c6
-rw-r--r--net/core/rtnetlink.c3
-rw-r--r--net/ipv4/tcp.c18
-rw-r--r--net/ipv4/tcp_fastopen.c2
-rw-r--r--net/ipv4/tcp_input.c6
-rw-r--r--net/mpls/internal.h4
-rw-r--r--net/mptcp/subflow.c15
-rw-r--r--net/sched/sch_api.c73
-rw-r--r--net/sunrpc/clnt.c33
-rw-r--r--net/sunrpc/rpcb_clnt.c5
-rw-r--r--net/sunrpc/sched.c2
-rw-r--r--net/sunrpc/sysfs.c202
-rw-r--r--net/sunrpc/xprtmultipath.c21
-rw-r--r--net/xdp/xsk.c5
-rw-r--r--rust/Makefile12
-rw-r--r--rust/compiler_builtins.rs24
-rw-r--r--rust/kernel/device.rs26
-rw-r--r--rust/kernel/devres.rs2
-rw-r--r--rust/kernel/faux.rs16
-rw-r--r--rust/kernel/io.rs66
-rw-r--r--rust/kernel/miscdevice.rs297
-rw-r--r--rust/kernel/pci.rs146
-rw-r--r--rust/kernel/platform.rs104
-rw-r--r--samples/Kconfig9
-rw-r--r--samples/Makefile1
-rw-r--r--samples/damon/Kconfig4
-rw-r--r--samples/hung_task/Makefile2
-rw-r--r--samples/hung_task/hung_task_mutex.c66
-rw-r--r--samples/kmemleak/kmemleak-test.c36
-rw-r--r--samples/rust/rust_dma.rs8
-rw-r--r--samples/rust/rust_driver_faux.rs2
-rw-r--r--samples/rust/rust_driver_pci.rs20
-rw-r--r--samples/rust/rust_driver_platform.rs11
-rw-r--r--samples/rust/rust_misc_device.rs181
-rw-r--r--samples/trace_events/trace-events-sample.h8
-rw-r--r--scripts/Makefile.lib4
-rw-r--r--scripts/Makefile.vmlinux_o15
-rwxr-xr-xscripts/checkpatch.pl5
-rw-r--r--scripts/coccinelle/misc/secs_to_jiffies.cocci10
-rwxr-xr-xscripts/extract-fwblobs30
-rw-r--r--scripts/gdb/linux/cpus.py22
-rw-r--r--scripts/gdb/linux/symbols.py44
-rw-r--r--scripts/gdb/linux/utils.py35
-rw-r--r--scripts/generate_rust_target.rs4
-rwxr-xr-xscripts/get_maintainer.pl49
-rw-r--r--scripts/sorttable.c2
-rwxr-xr-xscripts/tags.sh1
-rw-r--r--security/Kconfig21
-rw-r--r--security/selinux/hooks.c7
-rw-r--r--sound/hda/intel-sdw-acpi.c2
-rw-r--r--sound/pci/ac97/ac97_codec.c3
-rw-r--r--sound/pci/hda/patch_realtek.c65
-rw-r--r--sound/pci/hda/tas2781_hda_i2c.c30
-rw-r--r--sound/soc/codecs/rt5665.c24
-rw-r--r--sound/soc/codecs/rt711-sdca-sdw.c8
-rw-r--r--sound/soc/codecs/sma1307.c11
-rw-r--r--sound/soc/codecs/wcd934x.c2
-rw-r--r--sound/soc/codecs/wsa883x.c2
-rw-r--r--sound/soc/codecs/wsa884x.c2
-rw-r--r--sound/soc/fsl/imx-card.c4
-rw-r--r--sound/soc/qcom/qdsp6/q6apm-dai.c60
-rw-r--r--sound/soc/qcom/qdsp6/q6apm.c18
-rw-r--r--sound/soc/qcom/qdsp6/q6apm.h3
-rw-r--r--sound/soc/qcom/qdsp6/q6asm-dai.c19
-rw-r--r--sound/soc/qcom/sdw.c2
-rw-r--r--sound/soc/sof/intel/Kconfig7
-rw-r--r--sound/soc/sof/intel/Makefile4
-rw-r--r--sound/soc/sof/intel/hda-dsp.c8
-rw-r--r--sound/soc/sof/intel/hda-sdw-bpt.c445
-rw-r--r--sound/soc/sof/intel/hda.c4
-rw-r--r--sound/soc/sof/intel/hda.h8
-rw-r--r--sound/soc/sof/intel/ptl.c33
-rw-r--r--sound/virtio/virtio_pcm.c21
-rw-r--r--tools/counter/.gitignore1
-rw-r--r--tools/counter/counter_watch_events.c5
-rw-r--r--tools/iio/iio_event_monitor.c4
-rw-r--r--tools/include/asm/timex.h13
-rw-r--r--tools/include/linux/bitmap.h21
-rw-r--r--tools/include/linux/container_of.h18
-rw-r--r--tools/include/linux/kernel.h14
-rw-r--r--tools/include/linux/math64.h5
-rw-r--r--tools/include/linux/moduleparam.h7
-rw-r--r--tools/include/linux/prandom.h51
-rw-r--r--tools/include/linux/refcount.h5
-rw-r--r--tools/include/linux/slab.h1
-rw-r--r--tools/include/linux/types.h2
-rw-r--r--tools/lib/bitmap.c20
-rw-r--r--tools/lib/slab.c16
-rw-r--r--tools/objtool/Documentation/objtool.txt10
-rw-r--r--tools/objtool/arch/loongarch/decode.c14
-rw-r--r--tools/objtool/arch/loongarch/orc.c8
-rw-r--r--tools/objtool/arch/x86/decode.c15
-rw-r--r--tools/objtool/arch/x86/orc.c6
-rw-r--r--tools/objtool/arch/x86/special.c38
-rw-r--r--tools/objtool/builtin-check.c132
-rw-r--r--tools/objtool/check.c647
-rw-r--r--tools/objtool/elf.c156
-rw-r--r--tools/objtool/include/objtool/builtin.h6
-rw-r--r--tools/objtool/include/objtool/check.h3
-rw-r--r--tools/objtool/include/objtool/elf.h30
-rw-r--r--tools/objtool/include/objtool/objtool.h2
-rw-r--r--tools/objtool/include/objtool/special.h4
-rw-r--r--tools/objtool/include/objtool/warn.h62
-rw-r--r--tools/objtool/objtool.c15
-rw-r--r--tools/objtool/orc_dump.c30
-rw-r--r--tools/objtool/special.c25
-rw-r--r--tools/sched_ext/include/scx/common.bpf.h85
-rw-r--r--tools/sched_ext/include/scx/enum_defs.autogen.h3
-rw-r--r--tools/sched_ext/include/scx/enums.autogen.bpf.h24
-rw-r--r--tools/sched_ext/include/scx/enums.autogen.h8
-rw-r--r--tools/sched_ext/include/scx/enums.h3
-rw-r--r--tools/testing/cxl/Kbuild3
-rw-r--r--tools/testing/cxl/test/cxl.c32
-rw-r--r--tools/testing/cxl/test/mem.c32
-rw-r--r--tools/testing/radix-tree/Makefile1
-rw-r--r--tools/testing/rbtree/Makefile33
-rw-r--r--tools/testing/rbtree/interval_tree_test.c58
-rw-r--r--tools/testing/rbtree/rbtree_test.c48
-rw-r--r--tools/testing/rbtree/test.h4
-rw-r--r--tools/testing/selftests/Makefile1
-rw-r--r--tools/testing/selftests/bpf/progs/profiler.inc.h2
-rw-r--r--tools/testing/selftests/bpf/progs/test_module_attach.c2
-rw-r--r--tools/testing/selftests/bpf/progs/test_subprogs_extable.c6
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_bpf_fastcall.c6
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_private_stack.c6
-rw-r--r--tools/testing/selftests/clone3/clone3_selftests.h2
-rwxr-xr-xtools/testing/selftests/drivers/net/hds.py2
-rwxr-xr-xtools/testing/selftests/drivers/net/hw/csum.py2
-rwxr-xr-xtools/testing/selftests/drivers/net/hw/irq.py2
-rw-r--r--tools/testing/selftests/drivers/net/hw/xdp_dummy.bpf.c13
-rw-r--r--tools/testing/selftests/drivers/net/lib/py/env.py21
-rwxr-xr-xtools/testing/selftests/drivers/net/ping.py15
-rwxr-xr-xtools/testing/selftests/drivers/net/queues.py4
-rw-r--r--tools/testing/selftests/iommu/iommufd.c365
-rw-r--r--tools/testing/selftests/iommu/iommufd_fail_nth.c59
-rw-r--r--tools/testing/selftests/iommu/iommufd_utils.h229
-rw-r--r--tools/testing/selftests/mm/.gitignore2
-rw-r--r--tools/testing/selftests/mm/Makefile2
-rw-r--r--tools/testing/selftests/mm/cow.c2
-rw-r--r--tools/testing/selftests/mm/guard-regions.c (renamed from tools/testing/selftests/mm/guard-pages.c)968
-rw-r--r--tools/testing/selftests/mm/gup_longterm.c41
-rw-r--r--tools/testing/selftests/mm/map_populate.c5
-rw-r--r--tools/testing/selftests/mm/mlock-random-test.c4
-rw-r--r--tools/testing/selftests/mm/mlock2.h8
-rwxr-xr-xtools/testing/selftests/mm/run_vmtests.sh95
-rw-r--r--tools/testing/selftests/mm/split_huge_page_test.c106
-rw-r--r--tools/testing/selftests/mm/thuge-gen.c4
-rw-r--r--tools/testing/selftests/mm/uffd-common.c12
-rw-r--r--tools/testing/selftests/mm/uffd-common.h2
-rw-r--r--tools/testing/selftests/mm/uffd-stress.c42
-rw-r--r--tools/testing/selftests/mm/uffd-unit-tests.c7
-rw-r--r--tools/testing/selftests/mm/uffd-wp-mremap.c5
-rwxr-xr-xtools/testing/selftests/mm/va_high_addr_switch.sh28
-rw-r--r--tools/testing/selftests/mm/vm_util.h19
-rw-r--r--tools/testing/selftests/mseal_system_mappings/.gitignore2
-rw-r--r--tools/testing/selftests/mseal_system_mappings/Makefile6
-rw-r--r--tools/testing/selftests/mseal_system_mappings/config1
-rw-r--r--tools/testing/selftests/mseal_system_mappings/sysmap_is_sealed.c119
-rw-r--r--tools/testing/selftests/net/mptcp/.gitignore1
-rw-r--r--tools/testing/selftests/net/mptcp/mptcp_connect.c11
-rwxr-xr-xtools/testing/selftests/net/udpgro_bench.sh2
-rwxr-xr-xtools/testing/selftests/net/udpgro_frglist.sh2
-rwxr-xr-xtools/testing/selftests/net/udpgro_fwd.sh2
-rwxr-xr-xtools/testing/selftests/net/veth.sh2
-rw-r--r--tools/testing/selftests/net/xdp_dummy.bpf.c13
-rw-r--r--tools/testing/selftests/pidfd/pidfd.h8
-rw-r--r--tools/testing/selftests/rtc/.gitignore1
-rw-r--r--tools/testing/selftests/rtc/Makefile2
-rw-r--r--tools/testing/selftests/rtc/rtctest.c19
-rw-r--r--tools/testing/selftests/rtc/setdate.c77
-rw-r--r--tools/testing/selftests/ublk/Makefile5
-rw-r--r--tools/testing/selftests/ublk/kublk.c8
-rw-r--r--tools/testing/selftests/ublk/kublk.h4
-rw-r--r--tools/testing/selftests/ublk/null.c11
-rw-r--r--tools/testing/selftests/ublk/stripe.c69
-rwxr-xr-xtools/testing/selftests/ublk/test_common.sh6
-rwxr-xr-xtools/testing/selftests/ublk/test_generic_02.sh44
-rwxr-xr-xtools/testing/selftests/ublk/test_generic_03.sh28
-rwxr-xr-xtools/testing/selftests/ublk/test_loop_01.sh14
-rwxr-xr-xtools/testing/selftests/ublk/test_loop_03.sh14
-rwxr-xr-xtools/testing/selftests/ublk/test_loop_05.sh28
-rwxr-xr-xtools/testing/selftests/ublk/test_stress_01.sh6
-rwxr-xr-xtools/testing/selftests/ublk/test_stress_02.sh6
-rwxr-xr-xtools/testing/selftests/ublk/test_stripe_01.sh14
-rwxr-xr-xtools/testing/selftests/ublk/test_stripe_03.sh30
-rw-r--r--tools/testing/selftests/x86/test_mremap_vdso.c43
-rw-r--r--tools/testing/shared/interval_tree-shim.c5
-rw-r--r--tools/testing/shared/linux/interval_tree.h7
-rw-r--r--tools/testing/shared/linux/interval_tree_generic.h2
-rw-r--r--tools/testing/shared/linux/rbtree.h8
-rw-r--r--tools/testing/shared/linux/rbtree_augmented.h7
-rw-r--r--tools/testing/shared/linux/rbtree_types.h8
-rw-r--r--tools/testing/shared/rbtree-shim.c6
-rw-r--r--tools/testing/vma/linux/atomic.h5
-rw-r--r--tools/testing/vma/vma.c105
-rw-r--r--tools/testing/vma/vma_internal.h131
-rw-r--r--tools/virtio/linux/compiler.h25
-rw-r--r--tools/virtio/linux/dma-mapping.h13
-rw-r--r--tools/virtio/linux/module.h7
1787 files changed, 63425 insertions, 42703 deletions
diff --git a/.mailmap b/.mailmap
index f485903803c6..4f7cd8e23177 100644
--- a/.mailmap
+++ b/.mailmap
@@ -31,6 +31,13 @@ Alexander Lobakin <alobakin@pm.me> <alobakin@marvell.com>
Alexander Lobakin <alobakin@pm.me> <bloodyreaper@yandex.ru>
Alexander Mikhalitsyn <alexander@mihalicyn.com> <alexander.mikhalitsyn@virtuozzo.com>
Alexander Mikhalitsyn <alexander@mihalicyn.com> <aleksandr.mikhalitsyn@canonical.com>
+Alexander Sverdlin <alexander.sverdlin@gmail.com> <alexander.sverdlin.ext@nsn.com>
+Alexander Sverdlin <alexander.sverdlin@gmail.com> <alexander.sverdlin@gmx.de>
+Alexander Sverdlin <alexander.sverdlin@gmail.com> <alexander.sverdlin@nokia.com>
+Alexander Sverdlin <alexander.sverdlin@gmail.com> <alexander.sverdlin@nsn.com>
+Alexander Sverdlin <alexander.sverdlin@gmail.com> <alexander.sverdlin@siemens.com>
+Alexander Sverdlin <alexander.sverdlin@gmail.com> <alexander.sverdlin@sysgo.com>
+Alexander Sverdlin <alexander.sverdlin@gmail.com> <subaparts@yandex.ru>
Alexandre Belloni <alexandre.belloni@bootlin.com> <alexandre.belloni@free-electrons.com>
Alexandre Ghiti <alex@ghiti.fr> <alexandre.ghiti@canonical.com>
Alexei Avshalom Lazar <quic_ailizaro@quicinc.com> <ailizaro@codeaurora.org>
@@ -153,7 +160,6 @@ Carlos Bilbao <carlos.bilbao@kernel.org> <carlos.bilbao@amd.com>
Carlos Bilbao <carlos.bilbao@kernel.org> <carlos.bilbao.osdev@gmail.com>
Carlos Bilbao <carlos.bilbao@kernel.org> <bilbao@vt.edu>
Changbin Du <changbin.du@intel.com> <changbin.du@gmail.com>
-Changbin Du <changbin.du@intel.com> <changbin.du@intel.com>
Chao Yu <chao@kernel.org> <chao2.yu@samsung.com>
Chao Yu <chao@kernel.org> <yuchao0@huawei.com>
Chester Lin <chester62515@gmail.com> <clin@suse.com>
@@ -271,6 +277,7 @@ Hamza Mahfooz <hamzamahfooz@linux.microsoft.com> <hamza.mahfooz@amd.com>
Hanjun Guo <guohanjun@huawei.com> <hanjun.guo@linaro.org>
Hans Verkuil <hverkuil@xs4all.nl> <hansverk@cisco.com>
Hans Verkuil <hverkuil@xs4all.nl> <hverkuil-cisco@xs4all.nl>
+Harry Yoo <harry.yoo@oracle.com> <42.hyeyoo@gmail.com>
Heiko Carstens <hca@linux.ibm.com> <h.carstens@de.ibm.com>
Heiko Carstens <hca@linux.ibm.com> <heiko.carstens@de.ibm.com>
Heiko Stuebner <heiko@sntech.de> <heiko.stuebner@bqreaders.com>
@@ -305,7 +312,6 @@ Jan Glauber <jan.glauber@gmail.com> <jglauber@cavium.com>
Jan Kuliga <jtkuliga.kdev@gmail.com> <jankul@alatek.krakow.pl>
Jarkko Sakkinen <jarkko@kernel.org> <jarkko.sakkinen@linux.intel.com>
Jarkko Sakkinen <jarkko@kernel.org> <jarkko@profian.com>
-Jarkko Sakkinen <jarkko@kernel.org> <jarkko.sakkinen@parity.io>
Jason Gunthorpe <jgg@ziepe.ca> <jgg@mellanox.com>
Jason Gunthorpe <jgg@ziepe.ca> <jgg@nvidia.com>
Jason Gunthorpe <jgg@ziepe.ca> <jgunthorpe@obsidianresearch.com>
@@ -543,6 +549,8 @@ Nicolas Pitre <nico@fluxnic.net> <nicolas.pitre@linaro.org>
Nicolas Pitre <nico@fluxnic.net> <nico@linaro.org>
Nicolas Saenz Julienne <nsaenz@kernel.org> <nsaenzjulienne@suse.de>
Nicolas Saenz Julienne <nsaenz@kernel.org> <nsaenzjulienne@suse.com>
+Nicolas Schier <nicolas.schier@linux.dev> <n.schier@avm.de>
+Nicolas Schier <nicolas.schier@linux.dev> <nicolas@fjasle.eu>
Niklas Söderlund <niklas.soderlund+renesas@ragnatech.se>
Nikolay Aleksandrov <razor@blackwall.org> <naleksan@redhat.com>
Nikolay Aleksandrov <razor@blackwall.org> <nikolay@redhat.com>
@@ -762,7 +770,6 @@ Vinod Koul <vkoul@kernel.org> <vkoul@infradead.org>
Viresh Kumar <vireshk@kernel.org> <viresh.kumar2@arm.com>
Viresh Kumar <vireshk@kernel.org> <viresh.kumar@st.com>
Viresh Kumar <vireshk@kernel.org> <viresh.linux@gmail.com>
-Viresh Kumar <viresh.kumar@linaro.org> <viresh.kumar@linaro.org>
Viresh Kumar <viresh.kumar@linaro.org> <viresh.kumar@linaro.com>
Vishnu Dasa <vishnu.dasa@broadcom.com> <vdasa@vmware.com>
Vivek Aknurwar <quic_viveka@quicinc.com> <viveka@codeaurora.org>
diff --git a/CREDITS b/CREDITS
index 5cc36686d0f1..74a138c67614 100644
--- a/CREDITS
+++ b/CREDITS
@@ -317,6 +317,10 @@ S: Code 930.5, Goddard Space Flight Center
S: Greenbelt, Maryland 20771
S: USA
+N: Joel Becker
+E: jlbec@evilplan.org
+D: configfs
+
N: Adam Belay
E: ambx1@neo.rr.com
D: Linux Plug and Play Support
@@ -855,6 +859,10 @@ N: John Crispin
E: john@phrozen.org
D: MediaTek MT7623 Gigabit ethernet support
+N: Conor Culhane
+E: conor.culhane@silvaco.com
+D: Silvaco I3C master driver
+
N: Laurence Culhane
E: loz@holmes.demon.co.uk
D: Wrote the initial alpha SLIP code
@@ -1895,6 +1903,7 @@ S: Czech Republic
N: Seth Jennings
E: sjenning@redhat.com
D: Creation and maintenance of zswap
+D: Creation and maintenace of the zbud allocator
N: Jeremy Kerr
D: Maintainer of SPU File System
@@ -3803,6 +3812,7 @@ N: Dan Streetman
E: ddstreet@ieee.org
D: Maintenance and development of zswap
D: Creation and maintenance of the zpool API
+D: Maintenace of the zbud allocator
N: Drew Sullivan
E: drew@ss.org
@@ -4330,6 +4340,7 @@ S: England
N: Vitaly Wool
E: vitaly.wool@konsulko.com
D: Maintenance and development of zswap
+D: Maintenance and development of z3fold
N: Chris Wright
E: chrisw@sous-sol.org
diff --git a/Documentation/ABI/obsolete/sysfs-class-cxl b/Documentation/ABI/removed/sysfs-class-cxl
index 8cba1b626985..266c413b96e8 100644
--- a/Documentation/ABI/obsolete/sysfs-class-cxl
+++ b/Documentation/ABI/removed/sysfs-class-cxl
@@ -1,5 +1,4 @@
-The cxl driver is no longer maintained, and will be removed from the kernel in
-the near future.
+The cxl driver was removed in 6.15.
Please note that attributes that are shared between devices are stored in
the directory pointed to by the symlink device/.
@@ -10,7 +9,7 @@ For example, the real path of the attribute /sys/class/cxl/afu0.0s/irqs_max is
Slave contexts (eg. /sys/class/cxl/afu0.0s):
What: /sys/class/cxl/<afu>/afu_err_buf
-Date: September 2014
+Date: September 2014, removed February 2025
Contact: linuxppc-dev@lists.ozlabs.org
Description: read only
AFU Error Buffer contents. The contents of this file are
@@ -21,7 +20,7 @@ Description: read only
What: /sys/class/cxl/<afu>/irqs_max
-Date: September 2014
+Date: September 2014, removed February 2025
Contact: linuxppc-dev@lists.ozlabs.org
Description: read/write
Decimal value of maximum number of interrupts that can be
@@ -32,7 +31,7 @@ Description: read/write
Users: https://github.com/ibm-capi/libcxl
What: /sys/class/cxl/<afu>/irqs_min
-Date: September 2014
+Date: September 2014, removed February 2025
Contact: linuxppc-dev@lists.ozlabs.org
Description: read only
Decimal value of the minimum number of interrupts that
@@ -42,7 +41,7 @@ Description: read only
Users: https://github.com/ibm-capi/libcxl
What: /sys/class/cxl/<afu>/mmio_size
-Date: September 2014
+Date: September 2014, removed February 2025
Contact: linuxppc-dev@lists.ozlabs.org
Description: read only
Decimal value of the size of the MMIO space that may be mmapped
@@ -50,7 +49,7 @@ Description: read only
Users: https://github.com/ibm-capi/libcxl
What: /sys/class/cxl/<afu>/modes_supported
-Date: September 2014
+Date: September 2014, removed February 2025
Contact: linuxppc-dev@lists.ozlabs.org
Description: read only
List of the modes this AFU supports. One per line.
@@ -58,7 +57,7 @@ Description: read only
Users: https://github.com/ibm-capi/libcxl
What: /sys/class/cxl/<afu>/mode
-Date: September 2014
+Date: September 2014, removed February 2025
Contact: linuxppc-dev@lists.ozlabs.org
Description: read/write
The current mode the AFU is using. Will be one of the modes
@@ -68,7 +67,7 @@ Users: https://github.com/ibm-capi/libcxl
What: /sys/class/cxl/<afu>/prefault_mode
-Date: September 2014
+Date: September 2014, removed February 2025
Contact: linuxppc-dev@lists.ozlabs.org
Description: read/write
Set the mode for prefaulting in segments into the segment table
@@ -88,7 +87,7 @@ Description: read/write
Users: https://github.com/ibm-capi/libcxl
What: /sys/class/cxl/<afu>/reset
-Date: September 2014
+Date: September 2014, removed February 2025
Contact: linuxppc-dev@lists.ozlabs.org
Description: write only
Writing 1 here will reset the AFU provided there are not
@@ -96,14 +95,14 @@ Description: write only
Users: https://github.com/ibm-capi/libcxl
What: /sys/class/cxl/<afu>/api_version
-Date: September 2014
+Date: September 2014, removed February 2025
Contact: linuxppc-dev@lists.ozlabs.org
Description: read only
Decimal value of the current version of the kernel/user API.
Users: https://github.com/ibm-capi/libcxl
What: /sys/class/cxl/<afu>/api_version_compatible
-Date: September 2014
+Date: September 2014, removed February 2025
Contact: linuxppc-dev@lists.ozlabs.org
Description: read only
Decimal value of the lowest version of the userspace API
@@ -117,7 +116,7 @@ An AFU may optionally export one or more PCIe like configuration records, known
as AFU configuration records, which will show up here (if present).
What: /sys/class/cxl/<afu>/cr<config num>/vendor
-Date: February 2015
+Date: February 2015, removed February 2025
Contact: linuxppc-dev@lists.ozlabs.org
Description: read only
Hexadecimal value of the vendor ID found in this AFU
@@ -125,7 +124,7 @@ Description: read only
Users: https://github.com/ibm-capi/libcxl
What: /sys/class/cxl/<afu>/cr<config num>/device
-Date: February 2015
+Date: February 2015, removed February 2025
Contact: linuxppc-dev@lists.ozlabs.org
Description: read only
Hexadecimal value of the device ID found in this AFU
@@ -133,7 +132,7 @@ Description: read only
Users: https://github.com/ibm-capi/libcxl
What: /sys/class/cxl/<afu>/cr<config num>/class
-Date: February 2015
+Date: February 2015, removed February 2025
Contact: linuxppc-dev@lists.ozlabs.org
Description: read only
Hexadecimal value of the class code found in this AFU
@@ -141,7 +140,7 @@ Description: read only
Users: https://github.com/ibm-capi/libcxl
What: /sys/class/cxl/<afu>/cr<config num>/config
-Date: February 2015
+Date: February 2015, removed February 2025
Contact: linuxppc-dev@lists.ozlabs.org
Description: read only
This binary file provides raw access to the AFU configuration
@@ -155,7 +154,7 @@ Users: https://github.com/ibm-capi/libcxl
Master contexts (eg. /sys/class/cxl/afu0.0m)
What: /sys/class/cxl/<afu>m/mmio_size
-Date: September 2014
+Date: September 2014, removed February 2025
Contact: linuxppc-dev@lists.ozlabs.org
Description: read only
Decimal value of the size of the MMIO space that may be mmapped
@@ -163,14 +162,14 @@ Description: read only
Users: https://github.com/ibm-capi/libcxl
What: /sys/class/cxl/<afu>m/pp_mmio_len
-Date: September 2014
+Date: September 2014, removed February 2025
Contact: linuxppc-dev@lists.ozlabs.org
Description: read only
Decimal value of the Per Process MMIO space length.
Users: https://github.com/ibm-capi/libcxl
What: /sys/class/cxl/<afu>m/pp_mmio_off
-Date: September 2014
+Date: September 2014, removed February 2025
Contact: linuxppc-dev@lists.ozlabs.org
Description: read only
(not in a guest)
@@ -181,21 +180,21 @@ Users: https://github.com/ibm-capi/libcxl
Card info (eg. /sys/class/cxl/card0)
What: /sys/class/cxl/<card>/caia_version
-Date: September 2014
+Date: September 2014, removed February 2025
Contact: linuxppc-dev@lists.ozlabs.org
Description: read only
Identifies the CAIA Version the card implements.
Users: https://github.com/ibm-capi/libcxl
What: /sys/class/cxl/<card>/psl_revision
-Date: September 2014
+Date: September 2014, removed February 2025
Contact: linuxppc-dev@lists.ozlabs.org
Description: read only
Identifies the revision level of the PSL.
Users: https://github.com/ibm-capi/libcxl
What: /sys/class/cxl/<card>/base_image
-Date: September 2014
+Date: September 2014, removed February 2025
Contact: linuxppc-dev@lists.ozlabs.org
Description: read only
(not in a guest)
@@ -206,7 +205,7 @@ Description: read only
Users: https://github.com/ibm-capi/libcxl
What: /sys/class/cxl/<card>/image_loaded
-Date: September 2014
+Date: September 2014, removed February 2025
Contact: linuxppc-dev@lists.ozlabs.org
Description: read only
(not in a guest)
@@ -215,7 +214,7 @@ Description: read only
Users: https://github.com/ibm-capi/libcxl
What: /sys/class/cxl/<card>/load_image_on_perst
-Date: December 2014
+Date: December 2014, removed February 2025
Contact: linuxppc-dev@lists.ozlabs.org
Description: read/write
(not in a guest)
@@ -232,7 +231,7 @@ Description: read/write
Users: https://github.com/ibm-capi/libcxl
What: /sys/class/cxl/<card>/reset
-Date: October 2014
+Date: October 2014, removed February 2025
Contact: linuxppc-dev@lists.ozlabs.org
Description: write only
Writing 1 will issue a PERST to card provided there are no
@@ -243,7 +242,7 @@ Description: write only
Users: https://github.com/ibm-capi/libcxl
What: /sys/class/cxl/<card>/perst_reloads_same_image
-Date: July 2015
+Date: July 2015, removed February 2025
Contact: linuxppc-dev@lists.ozlabs.org
Description: read/write
(not in a guest)
@@ -257,7 +256,7 @@ Description: read/write
Users: https://github.com/ibm-capi/libcxl
What: /sys/class/cxl/<card>/psl_timebase_synced
-Date: March 2016
+Date: March 2016, removed February 2025
Contact: linuxppc-dev@lists.ozlabs.org
Description: read only
Returns 1 if the psl timebase register is synchronized
@@ -265,7 +264,7 @@ Description: read only
Users: https://github.com/ibm-capi/libcxl
What: /sys/class/cxl/<card>/tunneled_ops_supported
-Date: May 2018
+Date: May 2018, removed February 2025
Contact: linuxppc-dev@lists.ozlabs.org
Description: read only
Returns 1 if tunneled operations are supported in capi mode,
diff --git a/Documentation/ABI/stable/sysfs-devices-node b/Documentation/ABI/stable/sysfs-devices-node
index 402af4b2b905..a02707cb7cbc 100644
--- a/Documentation/ABI/stable/sysfs-devices-node
+++ b/Documentation/ABI/stable/sysfs-devices-node
@@ -177,6 +177,12 @@ Description:
The cache write policy: 0 for write-back, 1 for write-through,
other or unknown.
+What: /sys/devices/system/node/nodeX/memory_side_cache/indexY/address_mode
+Date: March 2025
+Contact: Dave Jiang <dave.jiang@intel.com>
+Description:
+ The address mode: 0 for reserved, 1 for extended-linear.
+
What: /sys/devices/system/node/nodeX/x86/sgx_total_bytes
Date: November 2021
Contact: Jarkko Sakkinen <jarkko@kernel.org>
diff --git a/Documentation/ABI/testing/sysfs-block-zram b/Documentation/ABI/testing/sysfs-block-zram
index 1ef69e0271f9..36c57de0a10a 100644
--- a/Documentation/ABI/testing/sysfs-block-zram
+++ b/Documentation/ABI/testing/sysfs-block-zram
@@ -22,14 +22,6 @@ Description:
device. The reset operation frees all the memory associated
with this device.
-What: /sys/block/zram<id>/max_comp_streams
-Date: February 2014
-Contact: Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
-Description:
- The max_comp_streams file is read-write and specifies the
- number of backend's zcomp_strm compression streams (number of
- concurrent compress operations).
-
What: /sys/block/zram<id>/comp_algorithm
Date: February 2014
Contact: Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
diff --git a/Documentation/ABI/testing/sysfs-bus-coresight-devices-tpdm b/Documentation/ABI/testing/sysfs-bus-coresight-devices-tpdm
index 53cb454b60d0..a341b08ae70b 100644
--- a/Documentation/ABI/testing/sysfs-bus-coresight-devices-tpdm
+++ b/Documentation/ABI/testing/sysfs-bus-coresight-devices-tpdm
@@ -257,3 +257,18 @@ Contact: Jinlong Mao (QUIC) <quic_jinlmao@quicinc.com>, Tao Zhang (QUIC) <quic_t
Description:
(RW) Set/Get the MSR(mux select register) for the CMB subunit
TPDM.
+
+What: /sys/bus/coresight/devices/<tpdm-name>/mcmb_trig_lane
+Date: Feb 2025
+KernelVersion 6.15
+Contact: Jinlong Mao (QUIC) <quic_jinlmao@quicinc.com>, Tao Zhang (QUIC) <quic_taozha@quicinc.com>
+Description:
+ (RW) Set/Get which lane participates in the output pattern
+ match cross trigger mechanism for the MCMB subunit TPDM.
+
+What: /sys/bus/coresight/devices/<tpdm-name>/mcmb_lanes_select
+Date: Feb 2025
+KernelVersion 6.15
+Contact: Jinlong Mao (QUIC) <quic_jinlmao@quicinc.com>, Tao Zhang (QUIC) <quic_taozha@quicinc.com>
+Description:
+ (RW) Set/Get the enablement of the individual lane.
diff --git a/Documentation/ABI/testing/sysfs-bus-counter b/Documentation/ABI/testing/sysfs-bus-counter
index 73ac84c0bca7..3e8259e56d38 100644
--- a/Documentation/ABI/testing/sysfs-bus-counter
+++ b/Documentation/ABI/testing/sysfs-bus-counter
@@ -34,6 +34,14 @@ Contact: linux-iio@vger.kernel.org
Description:
Count data of Count Y represented as a string.
+What: /sys/bus/counter/devices/counterX/countY/compare
+KernelVersion: 6.15
+Contact: linux-iio@vger.kernel.org
+Description:
+ If the counter device supports compare registers -- registers
+ used to compare counter channels against a particular count --
+ the compare count for channel Y is provided by this attribute.
+
What: /sys/bus/counter/devices/counterX/countY/capture
KernelVersion: 6.1
Contact: linux-iio@vger.kernel.org
@@ -301,6 +309,7 @@ Description:
What: /sys/bus/counter/devices/counterX/cascade_counts_enable_component_id
What: /sys/bus/counter/devices/counterX/external_input_phase_clock_select_component_id
+What: /sys/bus/counter/devices/counterX/countY/compare_component_id
What: /sys/bus/counter/devices/counterX/countY/capture_component_id
What: /sys/bus/counter/devices/counterX/countY/ceiling_component_id
What: /sys/bus/counter/devices/counterX/countY/floor_component_id
diff --git a/Documentation/ABI/testing/sysfs-bus-cxl b/Documentation/ABI/testing/sysfs-bus-cxl
index 3f5627a1210a..99bb3faf7a0e 100644
--- a/Documentation/ABI/testing/sysfs-bus-cxl
+++ b/Documentation/ABI/testing/sysfs-bus-cxl
@@ -1,5 +1,5 @@
What: /sys/bus/cxl/flush
-Date: Januarry, 2022
+Date: January, 2022
KernelVersion: v5.18
Contact: linux-cxl@vger.kernel.org
Description:
@@ -18,6 +18,24 @@ Description:
specification.
+What: /sys/bus/cxl/devices/memX/payload_max
+Date: December, 2020
+KernelVersion: v5.12
+Contact: linux-cxl@vger.kernel.org
+Description:
+ (RO) Maximum size (in bytes) of the mailbox command payload
+ registers. Linux caps this at 1MB if the device reports a
+ larger size.
+
+
+What: /sys/bus/cxl/devices/memX/label_storage_size
+Date: May, 2021
+KernelVersion: v5.13
+Contact: linux-cxl@vger.kernel.org
+Description:
+ (RO) Size (in bytes) of the Label Storage Area (LSA).
+
+
What: /sys/bus/cxl/devices/memX/ram/size
Date: December, 2020
KernelVersion: v5.12
@@ -33,7 +51,7 @@ Date: May, 2023
KernelVersion: v6.8
Contact: linux-cxl@vger.kernel.org
Description:
- (RO) For CXL host platforms that support "QoS Telemmetry"
+ (RO) For CXL host platforms that support "QoS Telemetry"
this attribute conveys a comma delimited list of platform
specific cookies that identifies a QoS performance class
for the volatile partition of the CXL mem device. These
@@ -60,7 +78,7 @@ Date: May, 2023
KernelVersion: v6.8
Contact: linux-cxl@vger.kernel.org
Description:
- (RO) For CXL host platforms that support "QoS Telemmetry"
+ (RO) For CXL host platforms that support "QoS Telemetry"
this attribute conveys a comma delimited list of platform
specific cookies that identifies a QoS performance class
for the persistent partition of the CXL mem device. These
@@ -321,14 +339,13 @@ KernelVersion: v6.0
Contact: linux-cxl@vger.kernel.org
Description:
(RW) When a CXL decoder is of devtype "cxl_decoder_endpoint" it
- translates from a host physical address range, to a device local
- address range. Device-local address ranges are further split
- into a 'ram' (volatile memory) range and 'pmem' (persistent
- memory) range. The 'mode' attribute emits one of 'ram', 'pmem',
- 'mixed', or 'none'. The 'mixed' indication is for error cases
- when a decoder straddles the volatile/persistent partition
- boundary, and 'none' indicates the decoder is not actively
- decoding, or no DPA allocation policy has been set.
+ translates from a host physical address range, to a device
+ local address range. Device-local address ranges are further
+ split into a 'ram' (volatile memory) range and 'pmem'
+ (persistent memory) range. The 'mode' attribute emits one of
+ 'ram', 'pmem', or 'none'. The 'none' indicates the decoder is
+ not actively decoding, or no DPA allocation policy has been
+ set.
'mode' can be written, when the decoder is in the 'disabled'
state, with either 'ram' or 'pmem' to set the boundaries for the
@@ -423,7 +440,7 @@ Date: May, 2023
KernelVersion: v6.5
Contact: linux-cxl@vger.kernel.org
Description:
- (RO) For CXL host platforms that support "QoS Telemmetry" this
+ (RO) For CXL host platforms that support "QoS Telemetry" this
root-decoder-only attribute conveys a platform specific cookie
that identifies a QoS performance class for the CXL Window.
This class-id can be compared against a similar "qos_class"
@@ -586,3 +603,15 @@ Description:
See Documentation/ABI/stable/sysfs-devices-node. access0 provides
the number to the closest initiator and access1 provides the
number to the closest CPU.
+
+
+What: /sys/bus/cxl/devices/nvdimm-bridge0/ndbusX/nmemY/cxl/dirty_shutdown
+Date: Feb, 2025
+KernelVersion: v6.15
+Contact: linux-cxl@vger.kernel.org
+Description:
+ (RO) The device dirty shutdown count value, which is the number
+ of times the device could have incurred in potential data loss.
+ The count is persistent across power loss and wraps back to 0
+ upon overflow. If this file is not present, the device does not
+ have the necessary support for dirty tracking.
diff --git a/Documentation/ABI/testing/sysfs-bus-iio b/Documentation/ABI/testing/sysfs-bus-iio
index 25d366d452a5..722aa989baac 100644
--- a/Documentation/ABI/testing/sysfs-bus-iio
+++ b/Documentation/ABI/testing/sysfs-bus-iio
@@ -2268,7 +2268,7 @@ Description:
representing the sensor unique ID number.
What: /sys/bus/iio/devices/iio:deviceX/filter_type_available
-What: /sys/bus/iio/devices/iio:deviceX/in_voltage-voltage_filter_mode_available
+What: /sys/bus/iio/devices/iio:deviceX/in_voltage-voltage_filter_type_available
KernelVersion: 6.1
Contact: linux-iio@vger.kernel.org
Description:
@@ -2290,6 +2290,16 @@ Description:
* "sinc3+pf2" - Sinc3 + device specific Post Filter 2.
* "sinc3+pf3" - Sinc3 + device specific Post Filter 3.
* "sinc3+pf4" - Sinc3 + device specific Post Filter 4.
+ * "wideband" - filter with wideband low ripple passband
+ and sharp transition band.
+
+What: /sys/bus/iio/devices/iio:deviceX/filter_type
+What: /sys/bus/iio/devices/iio:deviceX/in_voltageY-voltageZ_filter_type
+KernelVersion: 6.1
+Contact: linux-iio@vger.kernel.org
+Description:
+ Specifies which filter type apply to the channel. The possible
+ values are given by the filter_type_available attribute.
What: /sys/.../events/in_proximity_thresh_either_runningperiod
KernelVersion: 6.6
diff --git a/Documentation/ABI/testing/sysfs-bus-iio-adc-ad4130 b/Documentation/ABI/testing/sysfs-bus-iio-adc-ad4130
new file mode 100644
index 000000000000..d3fad27421d6
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-bus-iio-adc-ad4130
@@ -0,0 +1,20 @@
+What: /sys/bus/iio/devices/iio:deviceX/in_voltage-voltage_filter_mode_available
+KernelVersion: 6.2
+Contact: linux-iio@vger.kernel.org
+Description:
+ Reading returns a list with the possible filter modes.
+
+ This ABI is only kept for backwards compatibility and the values
+ returned are identical to filter_type_available attribute
+ documented in Documentation/ABI/testing/sysfs-bus-iio. Please,
+ use filter_type_available like ABI to provide filter options for
+ new drivers.
+
+What: /sys/bus/iio/devices/iio:deviceX/in_voltageY-voltageZ_filter_mode
+KernelVersion: 6.2
+Contact: linux-iio@vger.kernel.org
+Description:
+ This ABI is only kept for backwards compatibility and the values
+ returned are identical to in_voltageY-voltageZ_filter_type
+ attribute documented in Documentation/ABI/testing/sysfs-bus-iio.
+ Please, use in_voltageY-voltageZ_filter_type for new drivers.
diff --git a/Documentation/ABI/testing/sysfs-driver-intel-m10-bmc b/Documentation/ABI/testing/sysfs-driver-intel-m10-bmc
index c12316dfd973..a6e400364932 100644
--- a/Documentation/ABI/testing/sysfs-driver-intel-m10-bmc
+++ b/Documentation/ABI/testing/sysfs-driver-intel-m10-bmc
@@ -17,7 +17,7 @@ Description: Read only. Returns the firmware version of Intel MAX10
What: /sys/bus/.../drivers/intel-m10-bmc/.../mac_address
Date: January 2021
KernelVersion: 5.12
-Contact: Peter Colberg <peter.colberg@intel.com>
+Contact: Peter Colberg <peter.colberg@altera.com>
Description: Read only. Returns the first MAC address in a block
of sequential MAC addresses assigned to the board
that is managed by the Intel MAX10 BMC. It is stored in
@@ -28,7 +28,7 @@ Description: Read only. Returns the first MAC address in a block
What: /sys/bus/.../drivers/intel-m10-bmc/.../mac_count
Date: January 2021
KernelVersion: 5.12
-Contact: Peter Colberg <peter.colberg@intel.com>
+Contact: Peter Colberg <peter.colberg@altera.com>
Description: Read only. Returns the number of sequential MAC
addresses assigned to the board managed by the Intel
MAX10 BMC. This value is stored in FLASH and is mirrored
diff --git a/Documentation/ABI/testing/sysfs-driver-intel-m10-bmc-sec-update b/Documentation/ABI/testing/sysfs-driver-intel-m10-bmc-sec-update
index 9051695d2211..c69fd3894eb4 100644
--- a/Documentation/ABI/testing/sysfs-driver-intel-m10-bmc-sec-update
+++ b/Documentation/ABI/testing/sysfs-driver-intel-m10-bmc-sec-update
@@ -1,7 +1,7 @@
What: /sys/bus/platform/drivers/intel-m10bmc-sec-update/.../security/sr_root_entry_hash
Date: Sep 2022
KernelVersion: 5.20
-Contact: Peter Colberg <peter.colberg@intel.com>
+Contact: Peter Colberg <peter.colberg@altera.com>
Description: Read only. Returns the root entry hash for the static
region if one is programmed, else it returns the
string: "hash not programmed". This file is only
@@ -11,7 +11,7 @@ Description: Read only. Returns the root entry hash for the static
What: /sys/bus/platform/drivers/intel-m10bmc-sec-update/.../security/pr_root_entry_hash
Date: Sep 2022
KernelVersion: 5.20
-Contact: Peter Colberg <peter.colberg@intel.com>
+Contact: Peter Colberg <peter.colberg@altera.com>
Description: Read only. Returns the root entry hash for the partial
reconfiguration region if one is programmed, else it
returns the string: "hash not programmed". This file
@@ -21,7 +21,7 @@ Description: Read only. Returns the root entry hash for the partial
What: /sys/bus/platform/drivers/intel-m10bmc-sec-update/.../security/bmc_root_entry_hash
Date: Sep 2022
KernelVersion: 5.20
-Contact: Peter Colberg <peter.colberg@intel.com>
+Contact: Peter Colberg <peter.colberg@altera.com>
Description: Read only. Returns the root entry hash for the BMC image
if one is programmed, else it returns the string:
"hash not programmed". This file is only visible if the
@@ -31,7 +31,7 @@ Description: Read only. Returns the root entry hash for the BMC image
What: /sys/bus/platform/drivers/intel-m10bmc-sec-update/.../security/sr_canceled_csks
Date: Sep 2022
KernelVersion: 5.20
-Contact: Peter Colberg <peter.colberg@intel.com>
+Contact: Peter Colberg <peter.colberg@altera.com>
Description: Read only. Returns a list of indices for canceled code
signing keys for the static region. The standard bitmap
list format is used (e.g. "1,2-6,9").
@@ -39,7 +39,7 @@ Description: Read only. Returns a list of indices for canceled code
What: /sys/bus/platform/drivers/intel-m10bmc-sec-update/.../security/pr_canceled_csks
Date: Sep 2022
KernelVersion: 5.20
-Contact: Peter Colberg <peter.colberg@intel.com>
+Contact: Peter Colberg <peter.colberg@altera.com>
Description: Read only. Returns a list of indices for canceled code
signing keys for the partial reconfiguration region. The
standard bitmap list format is used (e.g. "1,2-6,9").
@@ -47,7 +47,7 @@ Description: Read only. Returns a list of indices for canceled code
What: /sys/bus/platform/drivers/intel-m10bmc-sec-update/.../security/bmc_canceled_csks
Date: Sep 2022
KernelVersion: 5.20
-Contact: Peter Colberg <peter.colberg@intel.com>
+Contact: Peter Colberg <peter.colberg@altera.com>
Description: Read only. Returns a list of indices for canceled code
signing keys for the BMC. The standard bitmap list format
is used (e.g. "1,2-6,9").
@@ -55,7 +55,7 @@ Description: Read only. Returns a list of indices for canceled code
What: /sys/bus/platform/drivers/intel-m10bmc-sec-update/.../security/flash_count
Date: Sep 2022
KernelVersion: 5.20
-Contact: Peter Colberg <peter.colberg@intel.com>
+Contact: Peter Colberg <peter.colberg@altera.com>
Description: Read only. Returns number of times the secure update
staging area has been flashed.
Format: "%u".
diff --git a/Documentation/ABI/testing/sysfs-kernel-mm-cma b/Documentation/ABI/testing/sysfs-kernel-mm-cma
index dfd755201142..aaf2a5d8b13b 100644
--- a/Documentation/ABI/testing/sysfs-kernel-mm-cma
+++ b/Documentation/ABI/testing/sysfs-kernel-mm-cma
@@ -29,3 +29,16 @@ Date: Feb 2024
Contact: Anshuman Khandual <anshuman.khandual@arm.com>
Description:
the number of pages CMA API succeeded to release
+
+What: /sys/kernel/mm/cma/<cma-heap-name>/total_pages
+Date: Jun 2024
+Contact: Frank van der Linden <fvdl@google.com>
+Description:
+ The size of the CMA area in pages.
+
+What: /sys/kernel/mm/cma/<cma-heap-name>/available_pages
+Date: Jun 2024
+Contact: Frank van der Linden <fvdl@google.com>
+Description:
+ The number of pages in the CMA area that are still
+ available for CMA allocation.
diff --git a/Documentation/ABI/testing/sysfs-kernel-mm-damon b/Documentation/ABI/testing/sysfs-kernel-mm-damon
index b057eddefbfc..293197f180ad 100644
--- a/Documentation/ABI/testing/sysfs-kernel-mm-damon
+++ b/Documentation/ABI/testing/sysfs-kernel-mm-damon
@@ -91,6 +91,36 @@ Description: Writing a value to this file sets the update interval of the
DAMON context in microseconds as the value. Reading this file
returns the value.
+What: /sys/kernel/mm/damon/admin/kdamonds/<K>/contexts/<C>/monitoring_attrs/intervals/intrvals_goal/access_bp
+Date: Feb 2025
+Contact: SeongJae Park <sj@kernel.org>
+Description: Writing a value to this file sets the monitoring intervals
+ auto-tuning target DAMON-observed access events ratio within
+ the given time interval (aggrs in same directory), in bp
+ (1/10,000). Reading this file returns the value.
+
+What: /sys/kernel/mm/damon/admin/kdamonds/<K>/contexts/<C>/monitoring_attrs/intervals/intrvals_goal/aggrs
+Date: Feb 2025
+Contact: SeongJae Park <sj@kernel.org>
+Description: Writing a value to this file sets the time interval to achieve
+ the monitoring intervals auto-tuning target DAMON-observed
+ access events ratio (access_bp in same directory) within.
+ Reading this file returns the value.
+
+What: /sys/kernel/mm/damon/admin/kdamonds/<K>/contexts/<C>/monitoring_attrs/intervals/intrvals_goal/min_sample_us
+Date: Feb 2025
+Contact: SeongJae Park <sj@kernel.org>
+Description: Writing a value to this file sets the minimum value of
+ auto-tuned sampling interval in microseconds. Reading this
+ file returns the value.
+
+What: /sys/kernel/mm/damon/admin/kdamonds/<K>/contexts/<C>/monitoring_attrs/intervals/intrvals_goal/max_sample_us
+Date: Feb 2025
+Contact: SeongJae Park <sj@kernel.org>
+Description: Writing a value to this file sets the maximum value of
+ auto-tuned sampling interval in microseconds. Reading this
+ file returns the value.
+
What: /sys/kernel/mm/damon/admin/kdamonds/<K>/contexts/<C>/monitoring_attrs/nr_regions/min
WDate: Mar 2022
@@ -345,6 +375,20 @@ Description: If 'addr' is written to the 'type' file, writing to or reading
from this file sets or gets the end address of the address
range for the filter.
+What: /sys/kernel/mm/damon/admin/kdamonds/<K>/contexts/<C>/schemes/<S>/filters/<F>/min
+Date: Feb 2025
+Contact: SeongJae Park <sj@kernel.org>
+Description: If 'hugepage_size' is written to the 'type' file, writing to
+ or reading from this file sets or gets the minimum size of the
+ hugepage for the filter.
+
+What: /sys/kernel/mm/damon/admin/kdamonds/<K>/contexts/<C>/schemes/<S>/filters/<F>/max
+Date: Feb 2025
+Contact: SeongJae Park <sj@kernel.org>
+Description: If 'hugepage_size' is written to the 'type' file, writing to
+ or reading from this file sets or gets the maximum size of the
+ hugepage for the filter.
+
What: /sys/kernel/mm/damon/admin/kdamonds/<K>/contexts/<C>/schemes/<S>/filters/<F>/target_idx
Date: Dec 2022
Contact: SeongJae Park <sj@kernel.org>
@@ -365,6 +409,22 @@ Description: Writing 'Y' or 'N' to this file sets whether to allow or reject
applying the scheme's action to the memory that satisfies the
'type' and the 'matching' of the directory.
+What: /sys/kernel/mm/damon/admin/kdamonds/<K>/contexts/<C>/schemes/<S>/core_filters
+Date: Feb 2025
+Contact: SeongJae Park <sj@kernel.org>
+Description: Directory for DAMON core layer-handled DAMOS filters. Files
+ under this directory works same to those of
+ /sys/kernel/mm/damon/admin/kdamonds/<K>/contexts/<C>/schemes/<S>/filters
+ directory.
+
+What: /sys/kernel/mm/damon/admin/kdamonds/<K>/contexts/<C>/schemes/<S>/ops_filters
+Date: Feb 2025
+Contact: SeongJae Park <sj@kernel.org>
+Description: Directory for DAMON operations set layer-handled DAMOS filters.
+ Files under this directory works same to those of
+ /sys/kernel/mm/damon/admin/kdamonds/<K>/contexts/<C>/schemes/<S>/filters
+ directory.
+
What: /sys/kernel/mm/damon/admin/kdamonds/<K>/contexts/<C>/schemes/<S>/stats/nr_tried
Date: Mar 2022
Contact: SeongJae Park <sj@kernel.org>
diff --git a/Documentation/ABI/testing/sysfs-kernel-reboot b/Documentation/ABI/testing/sysfs-kernel-reboot
index 837330fb2511..e117aba46be0 100644
--- a/Documentation/ABI/testing/sysfs-kernel-reboot
+++ b/Documentation/ABI/testing/sysfs-kernel-reboot
@@ -30,3 +30,11 @@ KernelVersion: 5.11
Contact: Matteo Croce <mcroce@microsoft.com>
Description: Don't wait for any other CPUs on reboot and
avoid anything that could hang.
+
+What: /sys/kernel/reboot/hw_protection
+Date: April 2025
+KernelVersion: 6.15
+Contact: Ahmad Fatoum <a.fatoum@pengutronix.de>
+Description: Hardware protection action taken on critical events like
+ overtemperature or imminent voltage loss.
+ Valid values are: reboot shutdown
diff --git a/Documentation/ABI/testing/sysfs-pps-gen-tio b/Documentation/ABI/testing/sysfs-pps-gen-tio
new file mode 100644
index 000000000000..3c34ff17a335
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-pps-gen-tio
@@ -0,0 +1,6 @@
+What: /sys/class/pps-gen/pps-genx/enable
+Date: April 2025
+KernelVersion: 6.15
+Contact: Subramanian Mohan<subramanian.mohan@intel.com>
+Description:
+ Enable or disable PPS TIO generator output.
diff --git a/Documentation/RCU/whatisRCU.rst b/Documentation/RCU/whatisRCU.rst
index 1ef5784c1b84..53faeed7c190 100644
--- a/Documentation/RCU/whatisRCU.rst
+++ b/Documentation/RCU/whatisRCU.rst
@@ -971,6 +971,16 @@ unfortunately any spinlock in a ``SLAB_TYPESAFE_BY_RCU`` object must be
initialized after each and every call to kmem_cache_alloc(), which renders
reference-free spinlock acquisition completely unsafe. Therefore, when
using ``SLAB_TYPESAFE_BY_RCU``, make proper use of a reference counter.
+If using refcount_t, the specialized refcount_{add|inc}_not_zero_acquire()
+and refcount_set_release() APIs should be used to ensure correct operation
+ordering when verifying object identity and when initializing newly
+allocated objects. Acquire fence in refcount_{add|inc}_not_zero_acquire()
+ensures that identity checks happen *after* reference count is taken.
+refcount_set_release() should be called after a newly allocated object is
+fully initialized and release fence ensures that new values are visible
+*before* refcount can be successfully taken by other users. Once
+refcount_set_release() is called, the object should be considered visible
+by other tasks.
(Those willing to initialize their locks in a kmem_cache constructor
may also use locking, including cache-friendly sequence locking.)
diff --git a/Documentation/admin-guide/blockdev/zram.rst b/Documentation/admin-guide/blockdev/zram.rst
index 1576fb93f06c..9bdb30901a93 100644
--- a/Documentation/admin-guide/blockdev/zram.rst
+++ b/Documentation/admin-guide/blockdev/zram.rst
@@ -54,7 +54,7 @@ The list of possible return codes:
If you use 'echo', the returned value is set by the 'echo' utility,
and, in general case, something like::
- echo 3 > /sys/block/zram0/max_comp_streams
+ echo foo > /sys/block/zram0/comp_algorithm
if [ $? -ne 0 ]; then
handle_error
fi
@@ -73,21 +73,7 @@ This creates 4 devices: /dev/zram{0,1,2,3}
num_devices parameter is optional and tells zram how many devices should be
pre-created. Default: 1.
-2) Set max number of compression streams
-========================================
-
-Regardless of the value passed to this attribute, ZRAM will always
-allocate multiple compression streams - one per online CPU - thus
-allowing several concurrent compression operations. The number of
-allocated compression streams goes down when some of the CPUs
-become offline. There is no single-compression-stream mode anymore,
-unless you are running a UP system or have only 1 CPU online.
-
-To find out how many streams are currently available::
-
- cat /sys/block/zram0/max_comp_streams
-
-3) Select compression algorithm
+2) Select compression algorithm
===============================
Using comp_algorithm device attribute one can see available and
@@ -107,7 +93,7 @@ Examples::
For the time being, the `comp_algorithm` content shows only compression
algorithms that are supported by zram.
-4) Set compression algorithm parameters: Optional
+3) Set compression algorithm parameters: Optional
=================================================
Compression algorithms may support specific parameters which can be
@@ -138,7 +124,7 @@ better the compression ratio, it even can take negatives values for some
algorithms), for other algorithms `level` is acceleration level (the higher
the value the lower the compression ratio).
-5) Set Disksize
+4) Set Disksize
===============
Set disk size by writing the value to sysfs node 'disksize'.
@@ -158,7 +144,7 @@ There is little point creating a zram of greater than twice the size of memory
since we expect a 2:1 compression ratio. Note that zram uses about 0.1% of the
size of the disk when not in use so a huge zram is wasteful.
-6) Set memory limit: Optional
+5) Set memory limit: Optional
=============================
Set memory limit by writing the value to sysfs node 'mem_limit'.
@@ -177,7 +163,7 @@ Examples::
# To disable memory limit
echo 0 > /sys/block/zram0/mem_limit
-7) Activate
+6) Activate
===========
::
@@ -188,7 +174,7 @@ Examples::
mkfs.ext4 /dev/zram1
mount /dev/zram1 /tmp
-8) Add/remove zram devices
+7) Add/remove zram devices
==========================
zram provides a control interface, which enables dynamic (on-demand) device
@@ -208,7 +194,7 @@ execute::
echo X > /sys/class/zram-control/hot_remove
-9) Stats
+8) Stats
========
Per-device statistics are exported as various nodes under /sys/block/zram<id>/
@@ -228,8 +214,6 @@ mem_limit WO specifies the maximum amount of memory ZRAM can
writeback_limit WO specifies the maximum amount of write IO zram
can write out to backing device as 4KB unit
writeback_limit_enable RW show and set writeback_limit feature
-max_comp_streams RW the number of possible concurrent compress
- operations
comp_algorithm RW show and change the compression algorithm
algorithm_params WO setup compression algorithm parameters
compact WO trigger memory compaction
@@ -310,7 +294,7 @@ a single line of text and contains the following stats separated by whitespace:
Unit: 4K bytes
============== =============================================================
-10) Deactivate
+9) Deactivate
==============
::
@@ -318,7 +302,7 @@ a single line of text and contains the following stats separated by whitespace:
swapoff /dev/zram0
umount /dev/zram1
-11) Reset
+10) Reset
=========
Write any positive value to 'reset' sysfs node::
diff --git a/Documentation/admin-guide/cgroup-v1/memory.rst b/Documentation/admin-guide/cgroup-v1/memory.rst
index 02b8206a3594..d6b1db8cc7eb 100644
--- a/Documentation/admin-guide/cgroup-v1/memory.rst
+++ b/Documentation/admin-guide/cgroup-v1/memory.rst
@@ -610,6 +610,10 @@ memory.stat file includes following statistics:
'rss + mapped_file" will give you resident set size of cgroup.
+ Note that some kernel configurations might account complete larger
+ allocations (e.g., THP) towards 'rss' and 'mapped_file', even if
+ only some, but not all that memory is mapped.
+
(Note: file and shmem may be shared among other cgroups. In that case,
mapped_file is accounted only when the memory cgroup is owner of page
cache.)
diff --git a/Documentation/admin-guide/cgroup-v2.rst b/Documentation/admin-guide/cgroup-v2.rst
index f293a13b42ed..1a16ce68a4d7 100644
--- a/Documentation/admin-guide/cgroup-v2.rst
+++ b/Documentation/admin-guide/cgroup-v2.rst
@@ -1445,7 +1445,10 @@ The following nested keys are defined.
anon
Amount of memory used in anonymous mappings such as
- brk(), sbrk(), and mmap(MAP_ANONYMOUS)
+ brk(), sbrk(), and mmap(MAP_ANONYMOUS). Note that
+ some kernel configurations might account complete larger
+ allocations (e.g., THP) if only some, but not all the
+ memory of such an allocation is mapped anymore.
file
Amount of memory used to cache filesystem data,
@@ -1488,7 +1491,10 @@ The following nested keys are defined.
Amount of application memory swapped out to zswap.
file_mapped
- Amount of cached filesystem data mapped with mmap()
+ Amount of cached filesystem data mapped with mmap(). Note
+ that some kernel configurations might account complete
+ larger allocations (e.g., THP) if only some, but not
+ not all the memory of such an allocation is mapped.
file_dirty
Amount of cached filesystem data that was modified but
@@ -1560,6 +1566,12 @@ The following nested keys are defined.
workingset_nodereclaim
Number of times a shadow node has been reclaimed
+ pswpin (npn)
+ Number of pages swapped into memory
+
+ pswpout (npn)
+ Number of pages swapped out of memory
+
pgscan (npn)
Amount of scanned pages (in an inactive LRU list)
@@ -1575,6 +1587,9 @@ The following nested keys are defined.
pgscan_khugepaged (npn)
Amount of scanned pages by khugepaged (in an inactive LRU list)
+ pgscan_proactive (npn)
+ Amount of scanned pages proactively (in an inactive LRU list)
+
pgsteal_kswapd (npn)
Amount of reclaimed pages by kswapd
@@ -1584,6 +1599,9 @@ The following nested keys are defined.
pgsteal_khugepaged (npn)
Amount of reclaimed pages by khugepaged
+ pgsteal_proactive (npn)
+ Amount of reclaimed pages proactively
+
pgfault (npn)
Total number of page faults incurred
@@ -1661,6 +1679,9 @@ The following nested keys are defined.
pgdemote_khugepaged
Number of pages demoted by khugepaged.
+ pgdemote_proactive
+ Number of pages demoted by proactively.
+
hugetlb
Amount of memory used by hugetlb pages. This metric only shows
up if hugetlb usage is accounted for in memory.current (i.e.
diff --git a/Documentation/admin-guide/device-mapper/dm-crypt.rst b/Documentation/admin-guide/device-mapper/dm-crypt.rst
index 9f8139ff97d6..4467f6d4b632 100644
--- a/Documentation/admin-guide/device-mapper/dm-crypt.rst
+++ b/Documentation/admin-guide/device-mapper/dm-crypt.rst
@@ -146,6 +146,11 @@ integrity:<bytes>:<type>
integrity for the encrypted device. The additional space is then
used for storing authentication tag (and persistent IV if needed).
+integrity_key_size:<bytes>
+ Optionally set the integrity key size if it differs from the digest size.
+ It allows the use of wrapped key algorithms where the key size is
+ independent of the cryptographic key size.
+
sector_size:<bytes>
Use <bytes> as the encryption unit instead of 512 bytes sectors.
This option can be in range 512 - 4096 bytes and must be power of two.
diff --git a/Documentation/admin-guide/device-mapper/dm-integrity.rst b/Documentation/admin-guide/device-mapper/dm-integrity.rst
index d8a5f14d0e3c..c2e18ecc065c 100644
--- a/Documentation/admin-guide/device-mapper/dm-integrity.rst
+++ b/Documentation/admin-guide/device-mapper/dm-integrity.rst
@@ -92,6 +92,11 @@ Target arguments:
allowed. This mode is useful for data recovery if the
device cannot be activated in any of the other standard
modes.
+ I - inline mode - in this mode, dm-integrity will store integrity
+ data directly in the underlying device sectors.
+ The underlying device must have an integrity profile that
+ allows storing user integrity data and provides enough
+ space for the selected integrity tag.
5. the number of additional arguments
diff --git a/Documentation/admin-guide/device-mapper/verity.rst b/Documentation/admin-guide/device-mapper/verity.rst
index a65c1602cb23..8c3f1f967a3c 100644
--- a/Documentation/admin-guide/device-mapper/verity.rst
+++ b/Documentation/admin-guide/device-mapper/verity.rst
@@ -87,6 +87,15 @@ panic_on_corruption
Panic the device when a corrupted block is discovered. This option is
not compatible with ignore_corruption and restart_on_corruption.
+restart_on_error
+ Restart the system when an I/O error is detected.
+ This option can be combined with the restart_on_corruption option.
+
+panic_on_error
+ Panic the device when an I/O error is detected. This option is
+ not compatible with the restart_on_error option but can be combined
+ with the panic_on_corruption option.
+
ignore_zero_blocks
Do not verify blocks that are expected to contain zeroes and always return
zeroes instead. This may be useful if the partition contains unused blocks
@@ -142,8 +151,15 @@ root_hash_sig_key_desc <key_description>
already in the secondary trusted keyring.
try_verify_in_tasklet
- If verity hashes are in cache, verify data blocks in kernel tasklet instead
- of workqueue. This option can reduce IO latency.
+ If verity hashes are in cache and the IO size does not exceed the limit,
+ verify data blocks in bottom half instead of workqueue. This option can
+ reduce IO latency. The size limits can be configured via
+ /sys/module/dm_verity/parameters/use_bh_bytes. The four parameters
+ correspond to limits for IOPRIO_CLASS_NONE, IOPRIO_CLASS_RT,
+ IOPRIO_CLASS_BE and IOPRIO_CLASS_IDLE in turn.
+ For example:
+ <none>,<rt>,<be>,<idle>
+ 4096,4096,4096,4096
Theory of operation
===================
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index 3435a062a208..f5af86b3c4a2 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -1866,7 +1866,7 @@
hpet_mmap= [X86, HPET_MMAP] Allow userspace to mmap HPET
registers. Default set by CONFIG_HPET_MMAP_DEFAULT.
- hugepages= [HW] Number of HugeTLB pages to allocate at boot.
+ hugepages= [HW,EARLY] Number of HugeTLB pages to allocate at boot.
If this follows hugepagesz (below), it specifies
the number of pages of hugepagesz to be allocated.
If this is the first HugeTLB parameter on the command
@@ -1878,15 +1878,24 @@
<node>:<integer>[,<node>:<integer>]
hugepagesz=
- [HW] The size of the HugeTLB pages. This is used in
- conjunction with hugepages (above) to allocate huge
- pages of a specific size at boot. The pair
- hugepagesz=X hugepages=Y can be specified once for
- each supported huge page size. Huge page sizes are
- architecture dependent. See also
+ [HW,EARLY] The size of the HugeTLB pages. This is
+ used in conjunction with hugepages (above) to
+ allocate huge pages of a specific size at boot. The
+ pair hugepagesz=X hugepages=Y can be specified once
+ for each supported huge page size. Huge page sizes
+ are architecture dependent. See also
Documentation/admin-guide/mm/hugetlbpage.rst.
Format: size[KMG]
+ hugepage_alloc_threads=
+ [HW] The number of threads that should be used to
+ allocate hugepages during boot. This option can be
+ used to improve system bootup time when allocating
+ a large amount of huge pages.
+ The default value is 25% of the available hardware threads.
+
+ Note that this parameter only applies to non-gigantic huge pages.
+
hugetlb_cma= [HW,CMA,EARLY] The size of a CMA area used for allocation
of gigantic hugepages. Or using node format, the size
of a CMA area per node can be specified.
@@ -1897,6 +1906,13 @@
hugepages using the CMA allocator. If enabled, the
boot-time allocation of gigantic hugepages is skipped.
+ hugetlb_cma_only=
+ [HW,CMA,EARLY] When allocating new HugeTLB pages, only
+ try to allocate from the CMA areas.
+
+ This option does nothing if hugetlb_cma= is not also
+ specified.
+
hugetlb_free_vmemmap=
[KNL] Requires CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP
enabled.
@@ -1938,6 +1954,12 @@
which allow the hypervisor to 'idle' the guest
on lock contention.
+ hw_protection= [HW]
+ Format: reboot | shutdown
+
+ Hardware protection action taken on critical events like
+ overtemperature or imminent voltage loss.
+
i2c_bus= [HW] Override the default board specific I2C bus speed
or register an additional I2C bus that is not
registered from board initialization code.
@@ -7266,6 +7288,8 @@
This is just one of many ways that can clear memory. Make sure your system
keeps the content of memory across reboots before relying on this option.
+ NB: Both the mapped address and size must be page aligned for the architecture.
+
See also Documentation/trace/debugging.rst
diff --git a/Documentation/admin-guide/mm/cma_debugfs.rst b/Documentation/admin-guide/mm/cma_debugfs.rst
index 7367e6294ef6..4120e9cb0cd5 100644
--- a/Documentation/admin-guide/mm/cma_debugfs.rst
+++ b/Documentation/admin-guide/mm/cma_debugfs.rst
@@ -12,10 +12,16 @@ its CMA name like below:
The structure of the files created under that directory is as follows:
- - [RO] base_pfn: The base PFN (Page Frame Number) of the zone.
+ - [RO] base_pfn: The base PFN (Page Frame Number) of the CMA area.
+ This is the same as ranges/0/base_pfn.
- [RO] count: Amount of memory in the CMA area.
- [RO] order_per_bit: Order of pages represented by one bit.
- - [RO] bitmap: The bitmap of page states in the zone.
+ - [RO] bitmap: The bitmap of allocated pages in the area.
+ This is the same as ranges/0/base_pfn.
+ - [RO] ranges/N/base_pfn: The base PFN of contiguous range N
+ in the CMA area.
+ - [RO] ranges/N/bitmap: The bit map of allocated pages in
+ range N in the CMA area.
- [WO] alloc: Allocate N pages from that CMA area. For example::
echo 5 > <debugfs>/cma/<cma_name>/alloc
diff --git a/Documentation/admin-guide/mm/damon/usage.rst b/Documentation/admin-guide/mm/damon/usage.rst
index 47a44bd348ab..ced2013db3df 100644
--- a/Documentation/admin-guide/mm/damon/usage.rst
+++ b/Documentation/admin-guide/mm/damon/usage.rst
@@ -64,6 +64,7 @@ comma (",").
│ │ │ │ :ref:`0 <sysfs_context>`/avail_operations,operations
│ │ │ │ │ :ref:`monitoring_attrs <sysfs_monitoring_attrs>`/
│ │ │ │ │ │ intervals/sample_us,aggr_us,update_us
+ │ │ │ │ │ │ │ intervals_goal/access_bp,aggrs,min_sample_us,max_sample_us
│ │ │ │ │ │ nr_regions/min,max
│ │ │ │ │ :ref:`targets <sysfs_targets>`/nr_targets
│ │ │ │ │ │ :ref:`0 <sysfs_target>`/pid_target
@@ -82,8 +83,8 @@ comma (",").
│ │ │ │ │ │ │ │ :ref:`goals <sysfs_schemes_quota_goals>`/nr_goals
│ │ │ │ │ │ │ │ │ 0/target_metric,target_value,current_value
│ │ │ │ │ │ │ :ref:`watermarks <sysfs_watermarks>`/metric,interval_us,high,mid,low
- │ │ │ │ │ │ │ :ref:`filters <sysfs_filters>`/nr_filters
- │ │ │ │ │ │ │ │ 0/type,matching,allow,memcg_path,addr_start,addr_end,target_idx
+ │ │ │ │ │ │ │ :ref:`{core_,ops_,}filters <sysfs_filters>`/nr_filters
+ │ │ │ │ │ │ │ │ 0/type,matching,allow,memcg_path,addr_start,addr_end,target_idx,min,max
│ │ │ │ │ │ │ :ref:`stats <sysfs_schemes_stats>`/nr_tried,sz_tried,nr_applied,sz_applied,sz_ops_filter_passed,qt_exceeds
│ │ │ │ │ │ │ :ref:`tried_regions <sysfs_schemes_tried_regions>`/total_bytes
│ │ │ │ │ │ │ │ 0/start,end,nr_accesses,age,sz_filter_passed
@@ -132,6 +133,11 @@ Users can write below commands for the kdamond to the ``state`` file.
- ``off``: Stop running.
- ``commit``: Read the user inputs in the sysfs files except ``state`` file
again.
+- ``update_tuned_intervals``: Update the contents of ``sample_us`` and
+ ``aggr_us`` files of the kdamond with the auto-tuning applied ``sampling
+ interval`` and ``aggregation interval`` for the files. Please refer to
+ :ref:`intervals_goal section <damon_usage_sysfs_monitoring_intervals_goal>`
+ for more details.
- ``commit_schemes_quota_goals``: Read the DAMON-based operation schemes'
:ref:`quota goals <sysfs_schemes_quota_goals>`.
- ``update_schemes_stats``: Update the contents of stats files for each
@@ -213,6 +219,25 @@ writing to and rading from the files.
For more details about the intervals and monitoring regions range, please refer
to the Design document (:doc:`/mm/damon/design`).
+.. _damon_usage_sysfs_monitoring_intervals_goal:
+
+contexts/<N>/monitoring_attrs/intervals/intervals_goal/
+-------------------------------------------------------
+
+Under the ``intervals`` directory, one directory for automated tuning of
+``sample_us`` and ``aggr_us``, namely ``intervals_goal`` directory also exists.
+Under the directory, four files for the auto-tuning control, namely
+``access_bp``, ``aggrs``, ``min_sample_us`` and ``max_sample_us`` exist.
+Please refer to the :ref:`design document of the feature
+<damon_design_monitoring_intervals_autotuning>` for the internal of the tuning
+mechanism. Reading and writing the four files under ``intervals_goal``
+directory shows and updates the tuning parameters that described in the
+:ref:design doc <damon_design_monitoring_intervals_autotuning>` with the same
+names. The tuning starts with the user-set ``sample_us`` and ``aggr_us``. The
+tuning-applied current values of the two intervals can be read from the
+``sample_us`` and ``aggr_us`` files after writing ``update_tuned_intervals`` to
+the ``state`` file.
+
.. _sysfs_targets:
contexts/<N>/targets/
@@ -282,9 +307,10 @@ to ``N-1``. Each directory represents each DAMON-based operation scheme.
schemes/<N>/
------------
-In each scheme directory, five directories (``access_pattern``, ``quotas``,
-``watermarks``, ``filters``, ``stats``, and ``tried_regions``) and three files
-(``action``, ``target_nid`` and ``apply_interval``) exist.
+In each scheme directory, seven directories (``access_pattern``, ``quotas``,
+``watermarks``, ``core_filters``, ``ops_filters``, ``filters``, ``stats``, and
+``tried_regions``) and three files (``action``, ``target_nid`` and
+``apply_interval``) exist.
The ``action`` file is for setting and getting the scheme's :ref:`action
<damon_design_damos_action>`. The keywords that can be written to and read
@@ -395,33 +421,43 @@ The ``interval`` should written in microseconds unit.
.. _sysfs_filters:
-schemes/<N>/filters/
---------------------
+schemes/<N>/{core\_,ops\_,}filters/
+-----------------------------------
-The directory for the :ref:`filters <damon_design_damos_filters>` of the given
+Directories for :ref:`filters <damon_design_damos_filters>` of the given
DAMON-based operation scheme.
-In the beginning, this directory has only one file, ``nr_filters``. Writing a
+``core_filters`` and ``ops_filters`` directories are for the filters handled by
+the DAMON core layer and operations set layer, respectively. ``filters``
+directory can be used for installing filters regardless of their handled
+layers. Filters that requested by ``core_filters`` and ``ops_filters`` will be
+installed before those of ``filters``. All three directories have same files.
+
+Use of ``filters`` directory can make expecting evaluation orders of given
+filters with the files under directory bit confusing. Users are hence
+recommended to use ``core_filters`` and ``ops_filters`` directories. The
+``filters`` directory could be deprecated in future.
+
+In the beginning, the directory has only one file, ``nr_filters``. Writing a
number (``N``) to the file creates the number of child directories named ``0``
to ``N-1``. Each directory represents each filter. The filters are evaluated
in the numeric order.
-Each filter directory contains seven files, namely ``type``, ``matching``,
-``allow``, ``memcg_path``, ``addr_start``, ``addr_end``, and ``target_idx``.
-To ``type`` file, you can write one of five special keywords: ``anon`` for
-anonymous pages, ``memcg`` for specific memory cgroup, ``young`` for young
-pages, ``addr`` for specific address range (an open-ended interval), or
-``target`` for specific DAMON monitoring target filtering. Meaning of the
-types are same to the description on the :ref:`design doc
-<damon_design_damos_filters>`.
-
-In case of the memory cgroup filtering, you can specify the memory cgroup of
-the interest by writing the path of the memory cgroup from the cgroups mount
-point to ``memcg_path`` file. In case of the address range filtering, you can
-specify the start and end address of the range to ``addr_start`` and
-``addr_end`` files, respectively. For the DAMON monitoring target filtering,
-you can specify the index of the target between the list of the DAMON context's
-monitoring targets list to ``target_idx`` file.
+Each filter directory contains nine files, namely ``type``, ``matching``,
+``allow``, ``memcg_path``, ``addr_start``, ``addr_end``, ``min``, ``max``
+and ``target_idx``. To ``type`` file, you can write the type of the filter.
+Refer to :ref:`the design doc <damon_design_damos_filters>` for available type
+names, their meaning and on what layer those are handled.
+
+For ``memcg`` type, you can specify the memory cgroup of the interest by
+writing the path of the memory cgroup from the cgroups mount point to
+``memcg_path`` file. For ``addr`` type, you can specify the start and end
+address of the range (open-ended interval) to ``addr_start`` and ``addr_end``
+files, respectively. For ``hugepage_size`` type, you can specify the minimum
+and maximum size of the range (closed interval) to ``min`` and ``max`` files,
+respectively. For ``target`` type, you can specify the index of the target
+between the list of the DAMON context's monitoring targets list to
+``target_idx`` file.
You can write ``Y`` or ``N`` to ``matching`` file to specify whether the filter
is for memory that matches the ``type``. You can write ``Y`` or ``N`` to
@@ -431,6 +467,7 @@ the ``type`` and ``matching`` should be allowed or not.
For example, below restricts a DAMOS action to be applied to only non-anonymous
pages of all memory cgroups except ``/having_care_already``.::
+ # cd ops_filters/0/
# echo 2 > nr_filters
# # disallow anonymous pages
echo anon > 0/type
diff --git a/Documentation/admin-guide/mm/hugetlbpage.rst b/Documentation/admin-guide/mm/hugetlbpage.rst
index f34a0d798d5b..67a941903fd2 100644
--- a/Documentation/admin-guide/mm/hugetlbpage.rst
+++ b/Documentation/admin-guide/mm/hugetlbpage.rst
@@ -145,7 +145,17 @@ hugepages
It will allocate 1 2M hugepage on node0 and 2 2M hugepages on node1.
If the node number is invalid, the parameter will be ignored.
+hugepage_alloc_threads
+ Specify the number of threads that should be used to allocate hugepages
+ during boot. This parameter can be used to improve system bootup time
+ when allocating a large amount of huge pages.
+ The default value is 25% of the available hardware threads.
+ Example to use 8 allocation threads::
+
+ hugepage_alloc_threads=8
+
+ Note that this parameter only applies to non-gigantic huge pages.
default_hugepagesz
Specify the default huge page size. This parameter can
only be specified once on the command line. default_hugepagesz can
diff --git a/Documentation/admin-guide/mm/pagemap.rst b/Documentation/admin-guide/mm/pagemap.rst
index caba0f52dd36..afce291649dd 100644
--- a/Documentation/admin-guide/mm/pagemap.rst
+++ b/Documentation/admin-guide/mm/pagemap.rst
@@ -21,7 +21,8 @@ There are four components to pagemap:
* Bit 56 page exclusively mapped (since 4.2)
* Bit 57 pte is uffd-wp write-protected (since 5.13) (see
Documentation/admin-guide/mm/userfaultfd.rst)
- * Bits 58-60 zero
+ * Bit 58 pte is a guard region (since 6.15) (see madvise (2) man page)
+ * Bits 59-60 zero
* Bit 61 page is file-page or shared-anon (since 3.5)
* Bit 62 page swapped
* Bit 63 page present
@@ -37,12 +38,28 @@ There are four components to pagemap:
precisely which pages are mapped (or in swap) and comparing mapped
pages between processes.
+ Traditionally, bit 56 indicates that a page is mapped exactly once and bit
+ 56 is clear when a page is mapped multiple times, even when mapped in the
+ same process multiple times. In some kernel configurations, the semantics
+ for pages part of a larger allocation (e.g., THP) can differ: bit 56 is set
+ if all pages part of the corresponding large allocation are *certainly*
+ mapped in the same process, even if the page is mapped multiple times in that
+ process. Bit 56 is clear when any page page of the larger allocation
+ is *maybe* mapped in a different process. In some cases, a large allocation
+ might be treated as "maybe mapped by multiple processes" even though this
+ is no longer the case.
+
Efficient users of this interface will use ``/proc/pid/maps`` to
determine which areas of memory are actually mapped and llseek to
skip over unmapped regions.
* ``/proc/kpagecount``. This file contains a 64-bit count of the number of
- times each page is mapped, indexed by PFN.
+ times each page is mapped, indexed by PFN. Some kernel configurations do
+ not track the precise number of times a page part of a larger allocation
+ (e.g., THP) is mapped. In these configurations, the average number of
+ mappings per page in this larger allocation is returned instead. However,
+ if any page of the large allocation is mapped, the returned value will
+ be at least 1.
The page-types tool in the tools/mm directory can be used to query the
number of times a page is mapped.
diff --git a/Documentation/admin-guide/mm/zswap.rst b/Documentation/admin-guide/mm/zswap.rst
index 3598dcd7dbe7..fd3370aa43fe 100644
--- a/Documentation/admin-guide/mm/zswap.rst
+++ b/Documentation/admin-guide/mm/zswap.rst
@@ -60,15 +60,13 @@ accessed. The compressed memory pool grows on demand and shrinks as compressed
pages are freed. The pool is not preallocated. By default, a zpool
of type selected in ``CONFIG_ZSWAP_ZPOOL_DEFAULT`` Kconfig option is created,
but it can be overridden at boot time by setting the ``zpool`` attribute,
-e.g. ``zswap.zpool=zbud``. It can also be changed at runtime using the sysfs
+e.g. ``zswap.zpool=zsmalloc``. It can also be changed at runtime using the sysfs
``zpool`` attribute, e.g.::
- echo zbud > /sys/module/zswap/parameters/zpool
+ echo zsmalloc > /sys/module/zswap/parameters/zpool
-The zbud type zpool allocates exactly 1 page to store 2 compressed pages, which
-means the compression ratio will always be 2:1 or worse (because of half-full
-zbud pages). The zsmalloc type zpool has a more complex compressed page
-storage method, and it can achieve greater storage densities.
+The zsmalloc type zpool has a complex compressed page storage method, and it
+can achieve great storage densities.
When a swap page is passed from swapout to zswap, zswap maintains a mapping
of the swap entry, a combination of the swap type and swap offset, to the zpool
diff --git a/Documentation/admin-guide/sysctl/fs.rst b/Documentation/admin-guide/sysctl/fs.rst
index 08e89e031714..6c54718c9d04 100644
--- a/Documentation/admin-guide/sysctl/fs.rst
+++ b/Documentation/admin-guide/sysctl/fs.rst
@@ -347,3 +347,28 @@ filesystems:
``/proc/sys/fs/fuse/max_pages_limit`` is a read/write file for
setting/getting the maximum number of pages that can be used for servicing
requests in FUSE.
+
+``/proc/sys/fs/fuse/default_request_timeout`` is a read/write file for
+setting/getting the default timeout (in seconds) for a fuse server to
+reply to a kernel-issued request in the event where the server did not
+specify a timeout at mount. If the server set a timeout,
+then default_request_timeout will be ignored. The default
+"default_request_timeout" is set to 0. 0 indicates no default timeout.
+The maximum value that can be set is 65535.
+
+``/proc/sys/fs/fuse/max_request_timeout`` is a read/write file for
+setting/getting the maximum timeout (in seconds) for a fuse server to
+reply to a kernel-issued request. A value greater than 0 automatically opts
+the server into a timeout that will be set to at most "max_request_timeout",
+even if the server did not specify a timeout and default_request_timeout is
+set to 0. If max_request_timeout is greater than 0 and the server set a timeout
+greater than max_request_timeout or default_request_timeout is set to a value
+greater than max_request_timeout, the system will use max_request_timeout as the
+timeout. 0 indicates no max request timeout. The maximum value that can be set
+is 65535.
+
+For timeouts, if the server does not respond to the request by the time
+the set timeout elapses, then the connection to the fuse server will be aborted.
+Please note that the timeouts are not 100% precise (eg you may set 60 seconds but
+the timeout may kick in after 70 seconds). The upper margin of error for the
+timeout is roughly FUSE_TIMEOUT_TIMER_FREQ seconds.
diff --git a/Documentation/admin-guide/sysctl/vm.rst b/Documentation/admin-guide/sysctl/vm.rst
index f48eaa98d22d..8290177b4f75 100644
--- a/Documentation/admin-guide/sysctl/vm.rst
+++ b/Documentation/admin-guide/sysctl/vm.rst
@@ -28,6 +28,7 @@ Currently, these files are in /proc/sys/vm:
- compact_memory
- compaction_proactiveness
- compact_unevictable_allowed
+- defrag_mode
- dirty_background_bytes
- dirty_background_ratio
- dirty_bytes
@@ -145,6 +146,14 @@ On CONFIG_PREEMPT_RT the default value is 0 in order to avoid a page fault, due
to compaction, which would block the task from becoming active until the fault
is resolved.
+defrag_mode
+===========
+
+When set to 1, the page allocator tries harder to avoid fragmentation
+and maintain the ability to produce huge pages / higher-order pages.
+
+It is recommended to enable this right after boot, as fragmentation,
+once it occurred, can be long-lasting or even permanent.
dirty_background_bytes
======================
diff --git a/Documentation/arch/arm64/ptdump.rst b/Documentation/arch/arm64/ptdump.rst
index 5dcfc5d7cddf..51eb902ba41a 100644
--- a/Documentation/arch/arm64/ptdump.rst
+++ b/Documentation/arch/arm64/ptdump.rst
@@ -22,8 +22,6 @@ offlining of memory being accessed by the ptdump code.
In order to dump the kernel page tables, enable the following
configurations and mount debugfs::
- CONFIG_GENERIC_PTDUMP=y
- CONFIG_PTDUMP_CORE=y
CONFIG_PTDUMP_DEBUGFS=y
mount -t debugfs nodev /sys/kernel/debug
diff --git a/Documentation/arch/powerpc/cxl.rst b/Documentation/arch/powerpc/cxl.rst
deleted file mode 100644
index 778adda740d2..000000000000
--- a/Documentation/arch/powerpc/cxl.rst
+++ /dev/null
@@ -1,470 +0,0 @@
-====================================
-Coherent Accelerator Interface (CXL)
-====================================
-
-Introduction
-============
-
- The coherent accelerator interface is designed to allow the
- coherent connection of accelerators (FPGAs and other devices) to a
- POWER system. These devices need to adhere to the Coherent
- Accelerator Interface Architecture (CAIA).
-
- IBM refers to this as the Coherent Accelerator Processor Interface
- or CAPI. In the kernel it's referred to by the name CXL to avoid
- confusion with the ISDN CAPI subsystem.
-
- Coherent in this context means that the accelerator and CPUs can
- both access system memory directly and with the same effective
- addresses.
-
- **This driver is deprecated and will be removed in a future release.**
-
-Hardware overview
-=================
-
- ::
-
- POWER8/9 FPGA
- +----------+ +---------+
- | | | |
- | CPU | | AFU |
- | | | |
- | | | |
- | | | |
- +----------+ +---------+
- | PHB | | |
- | +------+ | PSL |
- | | CAPP |<------>| |
- +---+------+ PCIE +---------+
-
- The POWER8/9 chip has a Coherently Attached Processor Proxy (CAPP)
- unit which is part of the PCIe Host Bridge (PHB). This is managed
- by Linux by calls into OPAL. Linux doesn't directly program the
- CAPP.
-
- The FPGA (or coherently attached device) consists of two parts.
- The POWER Service Layer (PSL) and the Accelerator Function Unit
- (AFU). The AFU is used to implement specific functionality behind
- the PSL. The PSL, among other things, provides memory address
- translation services to allow each AFU direct access to userspace
- memory.
-
- The AFU is the core part of the accelerator (eg. the compression,
- crypto etc function). The kernel has no knowledge of the function
- of the AFU. Only userspace interacts directly with the AFU.
-
- The PSL provides the translation and interrupt services that the
- AFU needs. This is what the kernel interacts with. For example, if
- the AFU needs to read a particular effective address, it sends
- that address to the PSL, the PSL then translates it, fetches the
- data from memory and returns it to the AFU. If the PSL has a
- translation miss, it interrupts the kernel and the kernel services
- the fault. The context to which this fault is serviced is based on
- who owns that acceleration function.
-
- - POWER8 and PSL Version 8 are compliant to the CAIA Version 1.0.
- - POWER9 and PSL Version 9 are compliant to the CAIA Version 2.0.
-
- This PSL Version 9 provides new features such as:
-
- * Interaction with the nest MMU on the P9 chip.
- * Native DMA support.
- * Supports sending ASB_Notify messages for host thread wakeup.
- * Supports Atomic operations.
- * etc.
-
- Cards with a PSL9 won't work on a POWER8 system and cards with a
- PSL8 won't work on a POWER9 system.
-
-AFU Modes
-=========
-
- There are two programming modes supported by the AFU. Dedicated
- and AFU directed. AFU may support one or both modes.
-
- When using dedicated mode only one MMU context is supported. In
- this mode, only one userspace process can use the accelerator at
- time.
-
- When using AFU directed mode, up to 16K simultaneous contexts can
- be supported. This means up to 16K simultaneous userspace
- applications may use the accelerator (although specific AFUs may
- support fewer). In this mode, the AFU sends a 16 bit context ID
- with each of its requests. This tells the PSL which context is
- associated with each operation. If the PSL can't translate an
- operation, the ID can also be accessed by the kernel so it can
- determine the userspace context associated with an operation.
-
-
-MMIO space
-==========
-
- A portion of the accelerator MMIO space can be directly mapped
- from the AFU to userspace. Either the whole space can be mapped or
- just a per context portion. The hardware is self describing, hence
- the kernel can determine the offset and size of the per context
- portion.
-
-
-Interrupts
-==========
-
- AFUs may generate interrupts that are destined for userspace. These
- are received by the kernel as hardware interrupts and passed onto
- userspace by a read syscall documented below.
-
- Data storage faults and error interrupts are handled by the kernel
- driver.
-
-
-Work Element Descriptor (WED)
-=============================
-
- The WED is a 64-bit parameter passed to the AFU when a context is
- started. Its format is up to the AFU hence the kernel has no
- knowledge of what it represents. Typically it will be the
- effective address of a work queue or status block where the AFU
- and userspace can share control and status information.
-
-
-
-
-User API
-========
-
-1. AFU character devices
-^^^^^^^^^^^^^^^^^^^^^^^^
-
- For AFUs operating in AFU directed mode, two character device
- files will be created. /dev/cxl/afu0.0m will correspond to a
- master context and /dev/cxl/afu0.0s will correspond to a slave
- context. Master contexts have access to the full MMIO space an
- AFU provides. Slave contexts have access to only the per process
- MMIO space an AFU provides.
-
- For AFUs operating in dedicated process mode, the driver will
- only create a single character device per AFU called
- /dev/cxl/afu0.0d. This will have access to the entire MMIO space
- that the AFU provides (like master contexts in AFU directed).
-
- The types described below are defined in include/uapi/misc/cxl.h
-
- The following file operations are supported on both slave and
- master devices.
-
- A userspace library libcxl is available here:
-
- https://github.com/ibm-capi/libcxl
-
- This provides a C interface to this kernel API.
-
-open
-----
-
- Opens the device and allocates a file descriptor to be used with
- the rest of the API.
-
- A dedicated mode AFU only has one context and only allows the
- device to be opened once.
-
- An AFU directed mode AFU can have many contexts, the device can be
- opened once for each context that is available.
-
- When all available contexts are allocated the open call will fail
- and return -ENOSPC.
-
- Note:
- IRQs need to be allocated for each context, which may limit
- the number of contexts that can be created, and therefore
- how many times the device can be opened. The POWER8 CAPP
- supports 2040 IRQs and 3 are used by the kernel, so 2037 are
- left. If 1 IRQ is needed per context, then only 2037
- contexts can be allocated. If 4 IRQs are needed per context,
- then only 2037/4 = 509 contexts can be allocated.
-
-
-ioctl
------
-
- CXL_IOCTL_START_WORK:
- Starts the AFU context and associates it with the current
- process. Once this ioctl is successfully executed, all memory
- mapped into this process is accessible to this AFU context
- using the same effective addresses. No additional calls are
- required to map/unmap memory. The AFU memory context will be
- updated as userspace allocates and frees memory. This ioctl
- returns once the AFU context is started.
-
- Takes a pointer to a struct cxl_ioctl_start_work
-
- ::
-
- struct cxl_ioctl_start_work {
- __u64 flags;
- __u64 work_element_descriptor;
- __u64 amr;
- __s16 num_interrupts;
- __s16 reserved1;
- __s32 reserved2;
- __u64 reserved3;
- __u64 reserved4;
- __u64 reserved5;
- __u64 reserved6;
- };
-
- flags:
- Indicates which optional fields in the structure are
- valid.
-
- work_element_descriptor:
- The Work Element Descriptor (WED) is a 64-bit argument
- defined by the AFU. Typically this is an effective
- address pointing to an AFU specific structure
- describing what work to perform.
-
- amr:
- Authority Mask Register (AMR), same as the powerpc
- AMR. This field is only used by the kernel when the
- corresponding CXL_START_WORK_AMR value is specified in
- flags. If not specified the kernel will use a default
- value of 0.
-
- num_interrupts:
- Number of userspace interrupts to request. This field
- is only used by the kernel when the corresponding
- CXL_START_WORK_NUM_IRQS value is specified in flags.
- If not specified the minimum number required by the
- AFU will be allocated. The min and max number can be
- obtained from sysfs.
-
- reserved fields:
- For ABI padding and future extensions
-
- CXL_IOCTL_GET_PROCESS_ELEMENT:
- Get the current context id, also known as the process element.
- The value is returned from the kernel as a __u32.
-
-
-mmap
-----
-
- An AFU may have an MMIO space to facilitate communication with the
- AFU. If it does, the MMIO space can be accessed via mmap. The size
- and contents of this area are specific to the particular AFU. The
- size can be discovered via sysfs.
-
- In AFU directed mode, master contexts are allowed to map all of
- the MMIO space and slave contexts are allowed to only map the per
- process MMIO space associated with the context. In dedicated
- process mode the entire MMIO space can always be mapped.
-
- This mmap call must be done after the START_WORK ioctl.
-
- Care should be taken when accessing MMIO space. Only 32 and 64-bit
- accesses are supported by POWER8. Also, the AFU will be designed
- with a specific endianness, so all MMIO accesses should consider
- endianness (recommend endian(3) variants like: le64toh(),
- be64toh() etc). These endian issues equally apply to shared memory
- queues the WED may describe.
-
-
-read
-----
-
- Reads events from the AFU. Blocks if no events are pending
- (unless O_NONBLOCK is supplied). Returns -EIO in the case of an
- unrecoverable error or if the card is removed.
-
- read() will always return an integral number of events.
-
- The buffer passed to read() must be at least 4K bytes.
-
- The result of the read will be a buffer of one or more events,
- each event is of type struct cxl_event, of varying size::
-
- struct cxl_event {
- struct cxl_event_header header;
- union {
- struct cxl_event_afu_interrupt irq;
- struct cxl_event_data_storage fault;
- struct cxl_event_afu_error afu_error;
- };
- };
-
- The struct cxl_event_header is defined as
-
- ::
-
- struct cxl_event_header {
- __u16 type;
- __u16 size;
- __u16 process_element;
- __u16 reserved1;
- };
-
- type:
- This defines the type of event. The type determines how
- the rest of the event is structured. These types are
- described below and defined by enum cxl_event_type.
-
- size:
- This is the size of the event in bytes including the
- struct cxl_event_header. The start of the next event can
- be found at this offset from the start of the current
- event.
-
- process_element:
- Context ID of the event.
-
- reserved field:
- For future extensions and padding.
-
- If the event type is CXL_EVENT_AFU_INTERRUPT then the event
- structure is defined as
-
- ::
-
- struct cxl_event_afu_interrupt {
- __u16 flags;
- __u16 irq; /* Raised AFU interrupt number */
- __u32 reserved1;
- };
-
- flags:
- These flags indicate which optional fields are present
- in this struct. Currently all fields are mandatory.
-
- irq:
- The IRQ number sent by the AFU.
-
- reserved field:
- For future extensions and padding.
-
- If the event type is CXL_EVENT_DATA_STORAGE then the event
- structure is defined as
-
- ::
-
- struct cxl_event_data_storage {
- __u16 flags;
- __u16 reserved1;
- __u32 reserved2;
- __u64 addr;
- __u64 dsisr;
- __u64 reserved3;
- };
-
- flags:
- These flags indicate which optional fields are present in
- this struct. Currently all fields are mandatory.
-
- address:
- The address that the AFU unsuccessfully attempted to
- access. Valid accesses will be handled transparently by the
- kernel but invalid accesses will generate this event.
-
- dsisr:
- This field gives information on the type of fault. It is a
- copy of the DSISR from the PSL hardware when the address
- fault occurred. The form of the DSISR is as defined in the
- CAIA.
-
- reserved fields:
- For future extensions
-
- If the event type is CXL_EVENT_AFU_ERROR then the event structure
- is defined as
-
- ::
-
- struct cxl_event_afu_error {
- __u16 flags;
- __u16 reserved1;
- __u32 reserved2;
- __u64 error;
- };
-
- flags:
- These flags indicate which optional fields are present in
- this struct. Currently all fields are Mandatory.
-
- error:
- Error status from the AFU. Defined by the AFU.
-
- reserved fields:
- For future extensions and padding
-
-
-2. Card character device (powerVM guest only)
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
- In a powerVM guest, an extra character device is created for the
- card. The device is only used to write (flash) a new image on the
- FPGA accelerator. Once the image is written and verified, the
- device tree is updated and the card is reset to reload the updated
- image.
-
-open
-----
-
- Opens the device and allocates a file descriptor to be used with
- the rest of the API. The device can only be opened once.
-
-ioctl
------
-
-CXL_IOCTL_DOWNLOAD_IMAGE / CXL_IOCTL_VALIDATE_IMAGE:
- Starts and controls flashing a new FPGA image. Partial
- reconfiguration is not supported (yet), so the image must contain
- a copy of the PSL and AFU(s). Since an image can be quite large,
- the caller may have to iterate, splitting the image in smaller
- chunks.
-
- Takes a pointer to a struct cxl_adapter_image::
-
- struct cxl_adapter_image {
- __u64 flags;
- __u64 data;
- __u64 len_data;
- __u64 len_image;
- __u64 reserved1;
- __u64 reserved2;
- __u64 reserved3;
- __u64 reserved4;
- };
-
- flags:
- These flags indicate which optional fields are present in
- this struct. Currently all fields are mandatory.
-
- data:
- Pointer to a buffer with part of the image to write to the
- card.
-
- len_data:
- Size of the buffer pointed to by data.
-
- len_image:
- Full size of the image.
-
-
-Sysfs Class
-===========
-
- A cxl sysfs class is added under /sys/class/cxl to facilitate
- enumeration and tuning of the accelerators. Its layout is
- described in Documentation/ABI/obsolete/sysfs-class-cxl
-
-
-Udev rules
-==========
-
- The following udev rules could be used to create a symlink to the
- most logical chardev to use in any programming mode (afuX.Yd for
- dedicated, afuX.Ys for afu directed), since the API is virtually
- identical for each::
-
- SUBSYSTEM=="cxl", ATTRS{mode}=="dedicated_process", SYMLINK="cxl/%b"
- SUBSYSTEM=="cxl", ATTRS{mode}=="afu_directed", \
- KERNEL=="afu[0-9]*.[0-9]*s", SYMLINK="cxl/%b"
diff --git a/Documentation/arch/powerpc/index.rst b/Documentation/arch/powerpc/index.rst
index 995268530f21..0560cbae5fa1 100644
--- a/Documentation/arch/powerpc/index.rst
+++ b/Documentation/arch/powerpc/index.rst
@@ -12,7 +12,6 @@ powerpc
bootwrapper
cpu_families
cpu_features
- cxl
dawr-power9
dexcr
dscr
diff --git a/Documentation/block/ublk.rst b/Documentation/block/ublk.rst
index 1e0e7358e14a..854f823b46c2 100644
--- a/Documentation/block/ublk.rst
+++ b/Documentation/block/ublk.rst
@@ -309,18 +309,35 @@ with specified IO tag in the command data:
``UBLK_IO_COMMIT_AND_FETCH_REQ`` to the server, ublkdrv needs to copy
the server buffer (pages) read to the IO request pages.
-Future development
-==================
-
Zero copy
---------
-Zero copy is a generic requirement for nbd, fuse or similar drivers. A
-problem [#xiaoguang]_ Xiaoguang mentioned is that pages mapped to userspace
-can't be remapped any more in kernel with existing mm interfaces. This can
-occurs when destining direct IO to ``/dev/ublkb*``. Also, he reported that
-big requests (IO size >= 256 KB) may benefit a lot from zero copy.
-
+ublk zero copy relies on io_uring's fixed kernel buffer, which provides
+two APIs: `io_buffer_register_bvec()` and `io_buffer_unregister_bvec`.
+
+ublk adds IO command of `UBLK_IO_REGISTER_IO_BUF` to call
+`io_buffer_register_bvec()` for ublk server to register client request
+buffer into io_uring buffer table, then ublk server can submit io_uring
+IOs with the registered buffer index. IO command of `UBLK_IO_UNREGISTER_IO_BUF`
+calls `io_buffer_unregister_bvec()` to unregister the buffer, which is
+guaranteed to be live between calling `io_buffer_register_bvec()` and
+`io_buffer_unregister_bvec()`. Any io_uring operation which supports this
+kind of kernel buffer will grab one reference of the buffer until the
+operation is completed.
+
+ublk server implementing zero copy or user copy has to be CAP_SYS_ADMIN and
+be trusted, because it is ublk server's responsibility to make sure IO buffer
+filled with data for handling read command, and ublk server has to return
+correct result to ublk driver when handling READ command, and the result
+has to match with how many bytes filled to the IO buffer. Otherwise,
+uninitialized kernel IO buffer will be exposed to client application.
+
+ublk server needs to align the parameter of `struct ublk_param_dma_align`
+with backend for zero copy to work correctly.
+
+For reaching best IO performance, ublk server should align its segment
+parameter of `struct ublk_param_segment` with backend for avoiding
+unnecessary IO split, which usually hurts io_uring performance.
References
==========
@@ -332,5 +349,3 @@ References
.. [#userspace_nbdublk] https://gitlab.com/rwmjones/libnbd/-/tree/nbdublk
.. [#userspace_readme] https://github.com/ming1/ubdsrv/blob/master/README
-
-.. [#xiaoguang] https://lore.kernel.org/linux-block/YoOr6jBfgVm8GvWg@stefanha-x1.localdomain/
diff --git a/Documentation/core-api/refcount-vs-atomic.rst b/Documentation/core-api/refcount-vs-atomic.rst
index 79a009ce11df..94e628c1eb49 100644
--- a/Documentation/core-api/refcount-vs-atomic.rst
+++ b/Documentation/core-api/refcount-vs-atomic.rst
@@ -86,7 +86,19 @@ Memory ordering guarantee changes:
* none (both fully unordered)
-case 2) - increment-based ops that return no value
+case 2) - non-"Read/Modify/Write" (RMW) ops with release ordering
+-----------------------------------------------------------------
+
+Function changes:
+
+ * atomic_set_release() --> refcount_set_release()
+
+Memory ordering guarantee changes:
+
+ * none (both provide RELEASE ordering)
+
+
+case 3) - increment-based ops that return no value
--------------------------------------------------
Function changes:
@@ -98,7 +110,7 @@ Memory ordering guarantee changes:
* none (both fully unordered)
-case 3) - decrement-based RMW ops that return no value
+case 4) - decrement-based RMW ops that return no value
------------------------------------------------------
Function changes:
@@ -110,7 +122,7 @@ Memory ordering guarantee changes:
* fully unordered --> RELEASE ordering
-case 4) - increment-based RMW ops that return a value
+case 5) - increment-based RMW ops that return a value
-----------------------------------------------------
Function changes:
@@ -126,7 +138,20 @@ Memory ordering guarantees changes:
result of obtaining pointer to the object!
-case 5) - generic dec/sub decrement-based RMW ops that return a value
+case 6) - increment-based RMW ops with acquire ordering that return a value
+---------------------------------------------------------------------------
+
+Function changes:
+
+ * atomic_inc_not_zero() --> refcount_inc_not_zero_acquire()
+ * no atomic counterpart --> refcount_add_not_zero_acquire()
+
+Memory ordering guarantees changes:
+
+ * fully ordered --> ACQUIRE ordering on success
+
+
+case 7) - generic dec/sub decrement-based RMW ops that return a value
---------------------------------------------------------------------
Function changes:
@@ -139,7 +164,7 @@ Memory ordering guarantees changes:
* fully ordered --> RELEASE ordering + ACQUIRE ordering on success
-case 6) other decrement-based RMW ops that return a value
+case 8) other decrement-based RMW ops that return a value
---------------------------------------------------------
Function changes:
@@ -154,7 +179,7 @@ Memory ordering guarantees changes:
.. note:: atomic_add_unless() only provides full order on success.
-case 7) - lock-based RMW
+case 9) - lock-based RMW
------------------------
Function changes:
diff --git a/Documentation/core-api/xarray.rst b/Documentation/core-api/xarray.rst
index f6a3eef4fe7f..c6c91cbd0c3c 100644
--- a/Documentation/core-api/xarray.rst
+++ b/Documentation/core-api/xarray.rst
@@ -489,7 +489,19 @@ Storing ``NULL`` into any index of a multi-index entry will set the
entry at every index to ``NULL`` and dissolve the tie. A multi-index
entry can be split into entries occupying smaller ranges by calling
xas_split_alloc() without the xa_lock held, followed by taking the lock
-and calling xas_split().
+and calling xas_split() or calling xas_try_split() with xa_lock. The
+difference between xas_split_alloc()+xas_split() and xas_try_alloc() is
+that xas_split_alloc() + xas_split() split the entry from the original
+order to the new order in one shot uniformly, whereas xas_try_split()
+iteratively splits the entry containing the index non-uniformly.
+For example, to split an order-9 entry, which takes 2^(9-6)=8 slots,
+assuming ``XA_CHUNK_SHIFT`` is 6, xas_split_alloc() + xas_split() need
+8 xa_node. xas_try_split() splits the order-9 entry into
+2 order-8 entries, then split one order-8 entry, based on the given index,
+to 2 order-7 entries, ..., and split one order-1 entry to 2 order-0 entries.
+When splitting the order-6 entry and a new xa_node is needed, xas_try_split()
+will try to allocate one if possible. As a result, xas_try_split() would only
+need 1 xa_node instead of 8.
Functions and structures
========================
diff --git a/Documentation/devicetree/bindings/arm/arm,coresight-tmc.yaml b/Documentation/devicetree/bindings/arm/arm,coresight-tmc.yaml
index cb8dceaca70e..4787d7c6bac2 100644
--- a/Documentation/devicetree/bindings/arm/arm,coresight-tmc.yaml
+++ b/Documentation/devicetree/bindings/arm/arm,coresight-tmc.yaml
@@ -101,6 +101,29 @@ properties:
and ETF configurations.
$ref: /schemas/graph.yaml#/properties/port
+ memory-region:
+ items:
+ - description: Reserved trace buffer memory for ETR and ETF sinks.
+ For ETR, this reserved memory region is used for trace data capture.
+ Same region is used for trace data retention as well after a panic
+ or watchdog reset.
+ This reserved memory region is used as trace buffer or used for trace
+ data retention only if specifically selected by the user in sysfs
+ interface.
+ The default memory usage models for ETR in sysfs/perf modes are
+ otherwise unaltered.
+
+ For ETF, this reserved memory region is used by default for
+ retention of trace data synced from internal SRAM after a panic
+ or watchdog reset.
+ - description: Reserved meta data memory. Used for ETR and ETF sinks
+ for storing metadata.
+
+ memory-region-names:
+ items:
+ - const: tracedata
+ - const: metadata
+
required:
- compatible
- reg
@@ -115,6 +138,9 @@ examples:
etr@20070000 {
compatible = "arm,coresight-tmc", "arm,primecell";
reg = <0x20070000 0x1000>;
+ memory-region = <&etr_trace_mem_reserved>,
+ <&etr_mdata_mem_reserved>;
+ memory-region-names = "tracedata", "metadata";
clocks = <&oscclk6a>;
clock-names = "apb_pclk";
diff --git a/Documentation/devicetree/bindings/arm/qcom,coresight-ctcu.yaml b/Documentation/devicetree/bindings/arm/qcom,coresight-ctcu.yaml
new file mode 100644
index 000000000000..843b52eaf872
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/qcom,coresight-ctcu.yaml
@@ -0,0 +1,84 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/arm/qcom,coresight-ctcu.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: CoreSight TMC Control Unit
+
+maintainers:
+ - Yuanfang Zhang <quic_yuanfang@quicinc.com>
+ - Mao Jinlong <quic_jinlmao@quicinc.com>
+ - Jie Gan <quic_jiegan@quicinc.com>
+
+description: |
+ The Trace Memory Controller(TMC) is used for Embedded Trace Buffer(ETB),
+ Embedded Trace FIFO(ETF) and Embedded Trace Router(ETR) configurations.
+ The configuration mode (ETB, ETF, ETR) is discovered at boot time when
+ the device is probed.
+
+ The Coresight TMC Control unit controls various Coresight behaviors.
+ It works as a helper device when connected to TMC ETR device.
+ It is responsible for controlling the data filter function based on
+ the source device's Trace ID for TMC ETR device. The trace data with
+ that Trace id can get into ETR's buffer while other trace data gets
+ ignored.
+
+properties:
+ compatible:
+ enum:
+ - qcom,sa8775p-ctcu
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ items:
+ - const: apb
+
+ in-ports:
+ $ref: /schemas/graph.yaml#/properties/ports
+
+ patternProperties:
+ '^port(@[0-1])?$':
+ description: Input connections from CoreSight Trace bus
+ $ref: /schemas/graph.yaml#/properties/port
+
+required:
+ - compatible
+ - reg
+ - in-ports
+
+additionalProperties: false
+
+examples:
+ - |
+ ctcu@1001000 {
+ compatible = "qcom,sa8775p-ctcu";
+ reg = <0x1001000 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb";
+
+ in-ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ ctcu_in_port0: endpoint {
+ remote-endpoint = <&etr0_out_port>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ ctcu_in_port1: endpoint {
+ remote-endpoint = <&etr1_out_port>;
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/arm/qcom,coresight-tpda.yaml b/Documentation/devicetree/bindings/arm/qcom,coresight-tpda.yaml
index 76163abed655..5ed40f21b8eb 100644
--- a/Documentation/devicetree/bindings/arm/qcom,coresight-tpda.yaml
+++ b/Documentation/devicetree/bindings/arm/qcom,coresight-tpda.yaml
@@ -55,8 +55,7 @@ properties:
- const: arm,primecell
reg:
- minItems: 1
- maxItems: 2
+ maxItems: 1
clocks:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/arm/qcom,coresight-tpdm.yaml b/Documentation/devicetree/bindings/arm/qcom,coresight-tpdm.yaml
index 8eec07d9d454..07d21a3617f5 100644
--- a/Documentation/devicetree/bindings/arm/qcom,coresight-tpdm.yaml
+++ b/Documentation/devicetree/bindings/arm/qcom,coresight-tpdm.yaml
@@ -41,8 +41,7 @@ properties:
- const: arm,primecell
reg:
- minItems: 1
- maxItems: 2
+ maxItems: 1
qcom,dsb-element-bits:
description:
diff --git a/Documentation/devicetree/bindings/dma/atmel,at91sam9g45-dma.yaml b/Documentation/devicetree/bindings/dma/atmel,at91sam9g45-dma.yaml
new file mode 100644
index 000000000000..a58dc407311b
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/atmel,at91sam9g45-dma.yaml
@@ -0,0 +1,68 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/dma/atmel,at91sam9g45-dma.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Atmel Direct Memory Access Controller (DMA)
+
+maintainers:
+ - Ludovic Desroches <ludovic.desroches@microchip.com>
+
+description:
+ The Atmel Direct Memory Access Controller (DMAC) transfers data from a source
+ peripheral to a destination peripheral over one or more AMBA buses. One channel
+ is required for each source/destination pair. In the most basic configuration,
+ the DMAC has one master interface and one channel. The master interface reads
+ the data from a source and writes it to a destination. Two AMBA transfers are
+ required for each DMAC data transfer. This is also known as a dual-access transfer.
+ The DMAC is programmed via the APB interface.
+
+properties:
+ compatible:
+ enum:
+ - atmel,at91sam9g45-dma
+ - atmel,at91sam9rl-dma
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ "#dma-cells":
+ description:
+ Must be <2>, used to represent the number of integer cells in the dma
+ property of client devices. The two cells in order are
+ 1. The first cell represents the channel number.
+ 2. The second cell is 0 for RX and 1 for TX transfers.
+ const: 2
+
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ const: dma_clk
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - "#dma-cells"
+ - clocks
+ - clock-names
+
+additionalProperties: false
+
+examples:
+ - |
+ dma-controller@ffffec00 {
+ compatible = "atmel,at91sam9g45-dma";
+ reg = <0xffffec00 0x200>;
+ interrupts = <21>;
+ #dma-cells = <2>;
+ clocks = <&pmc 2 20>;
+ clock-names = "dma_clk";
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/dma/atmel,sama5d4-dma.yaml b/Documentation/devicetree/bindings/dma/atmel,sama5d4-dma.yaml
index 9ca1c5d1f00f..73fc13b902b3 100644
--- a/Documentation/devicetree/bindings/dma/atmel,sama5d4-dma.yaml
+++ b/Documentation/devicetree/bindings/dma/atmel,sama5d4-dma.yaml
@@ -32,6 +32,9 @@ properties:
- microchip,sam9x60-dma
- microchip,sam9x7-dma
- const: atmel,sama5d4-dma
+ - items:
+ - const: microchip,sama7d65-dma
+ - const: microchip,sama7g5-dma
"#dma-cells":
description: |
diff --git a/Documentation/devicetree/bindings/dma/atmel-dma.txt b/Documentation/devicetree/bindings/dma/atmel-dma.txt
deleted file mode 100644
index f69bcf5a6343..000000000000
--- a/Documentation/devicetree/bindings/dma/atmel-dma.txt
+++ /dev/null
@@ -1,42 +0,0 @@
-* Atmel Direct Memory Access Controller (DMA)
-
-Required properties:
-- compatible: Should be "atmel,<chip>-dma".
-- reg: Should contain DMA registers location and length.
-- interrupts: Should contain DMA interrupt.
-- #dma-cells: Must be <2>, used to represent the number of integer cells in
-the dmas property of client devices.
-
-Example:
-
-dma0: dma@ffffec00 {
- compatible = "atmel,at91sam9g45-dma";
- reg = <0xffffec00 0x200>;
- interrupts = <21>;
- #dma-cells = <2>;
-};
-
-DMA clients connected to the Atmel DMA controller must use the format
-described in the dma.txt file, using a three-cell specifier for each channel:
-a phandle plus two integer cells.
-The three cells in order are:
-
-1. A phandle pointing to the DMA controller.
-2. The memory interface (16 most significant bits), the peripheral interface
-(16 less significant bits).
-3. Parameters for the at91 DMA configuration register which are device
-dependent:
- - bit 7-0: peripheral identifier for the hardware handshaking interface. The
- identifier can be different for tx and rx.
- - bit 11-8: FIFO configuration. 0 for half FIFO, 1 for ALAP, 2 for ASAP.
-
-Example:
-
-i2c0@i2c@f8010000 {
- compatible = "atmel,at91sam9x5-i2c";
- reg = <0xf8010000 0x100>;
- interrupts = <9 4 6>;
- dmas = <&dma0 1 7>,
- <&dma0 1 8>;
- dma-names = "tx", "rx";
-};
diff --git a/Documentation/devicetree/bindings/dma/fsl,edma.yaml b/Documentation/devicetree/bindings/dma/fsl,edma.yaml
index 4f925469533e..950e8fa4f4ab 100644
--- a/Documentation/devicetree/bindings/dma/fsl,edma.yaml
+++ b/Documentation/devicetree/bindings/dma/fsl,edma.yaml
@@ -28,6 +28,14 @@ properties:
- fsl,imx95-edma5
- nxp,s32g2-edma
- items:
+ - enum:
+ - fsl,imx94-edma3
+ - const: fsl,imx93-edma3
+ - items:
+ - enum:
+ - fsl,imx94-edma5
+ - const: fsl,imx95-edma5
+ - items:
- const: fsl,ls1028a-edma
- const: fsl,vf610-edma
- items:
diff --git a/Documentation/devicetree/bindings/dma/fsl,elo-dma.yaml b/Documentation/devicetree/bindings/dma/fsl,elo-dma.yaml
new file mode 100644
index 000000000000..92288d76d51b
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/fsl,elo-dma.yaml
@@ -0,0 +1,137 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/dma/fsl,elo-dma.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Freescale Elo DMA Controller
+
+maintainers:
+ - J. Neuschäfer <j.ne@posteo.net>
+
+description:
+ This is a little-endian 4-channel DMA controller, used in Freescale mpc83xx
+ series chips such as mpc8315, mpc8349, mpc8379 etc.
+
+properties:
+ compatible:
+ items:
+ - enum:
+ - fsl,mpc8313-dma
+ - fsl,mpc8315-dma
+ - fsl,mpc8323-dma
+ - fsl,mpc8347-dma
+ - fsl,mpc8349-dma
+ - fsl,mpc8360-dma
+ - fsl,mpc8377-dma
+ - fsl,mpc8378-dma
+ - fsl,mpc8379-dma
+ - const: fsl,elo-dma
+
+ reg:
+ items:
+ - description:
+ DMA General Status Register, i.e. DGSR which contains status for
+ all the 4 DMA channels.
+
+ cell-index:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: Controller index. 0 for controller @ 0x8100.
+
+ ranges: true
+
+ "#address-cells":
+ const: 1
+
+ "#size-cells":
+ const: 1
+
+ interrupts:
+ maxItems: 1
+ description: Controller interrupt.
+
+required:
+ - compatible
+ - reg
+
+patternProperties:
+ "^dma-channel@[0-9a-f]+$":
+ type: object
+ additionalProperties: false
+
+ properties:
+ compatible:
+ oneOf:
+ # native DMA channel
+ - items:
+ - enum:
+ - fsl,mpc8315-dma-channel
+ - fsl,mpc8323-dma-channel
+ - fsl,mpc8347-dma-channel
+ - fsl,mpc8349-dma-channel
+ - fsl,mpc8360-dma-channel
+ - fsl,mpc8377-dma-channel
+ - fsl,mpc8378-dma-channel
+ - fsl,mpc8379-dma-channel
+ - const: fsl,elo-dma-channel
+
+ # audio DMA channel, see fsl,ssi.yaml
+ - const: fsl,ssi-dma-channel
+
+ reg:
+ maxItems: 1
+
+ cell-index:
+ description: DMA channel index starts at 0.
+
+ interrupts:
+ maxItems: 1
+ description:
+ Per-channel interrupt. Only necessary if no controller interrupt has
+ been provided.
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+
+ dma@82a8 {
+ compatible = "fsl,mpc8349-dma", "fsl,elo-dma";
+ reg = <0x82a8 4>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0x8100 0x1a4>;
+ interrupts = <71 IRQ_TYPE_LEVEL_LOW>;
+ cell-index = <0>;
+
+ dma-channel@0 {
+ compatible = "fsl,mpc8349-dma-channel", "fsl,elo-dma-channel";
+ reg = <0 0x80>;
+ cell-index = <0>;
+ interrupts = <71 IRQ_TYPE_LEVEL_LOW>;
+ };
+
+ dma-channel@80 {
+ compatible = "fsl,mpc8349-dma-channel", "fsl,elo-dma-channel";
+ reg = <0x80 0x80>;
+ cell-index = <1>;
+ interrupts = <71 IRQ_TYPE_LEVEL_LOW>;
+ };
+
+ dma-channel@100 {
+ compatible = "fsl,mpc8349-dma-channel", "fsl,elo-dma-channel";
+ reg = <0x100 0x80>;
+ cell-index = <2>;
+ interrupts = <71 IRQ_TYPE_LEVEL_LOW>;
+ };
+
+ dma-channel@180 {
+ compatible = "fsl,mpc8349-dma-channel", "fsl,elo-dma-channel";
+ reg = <0x180 0x80>;
+ cell-index = <3>;
+ interrupts = <71 IRQ_TYPE_LEVEL_LOW>;
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/dma/fsl,elo3-dma.yaml b/Documentation/devicetree/bindings/dma/fsl,elo3-dma.yaml
new file mode 100644
index 000000000000..0f5e475657a7
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/fsl,elo3-dma.yaml
@@ -0,0 +1,125 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/dma/fsl,elo3-dma.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Freescale Elo3 DMA Controller
+
+maintainers:
+ - J. Neuschäfer <j.ne@posteo.net>
+
+description:
+ DMA controller which has same function as EloPlus except that Elo3 has 8
+ channels while EloPlus has only 4, it is used in Freescale Txxx and Bxxx
+ series chips, such as t1040, t4240, b4860.
+
+properties:
+ compatible:
+ const: fsl,elo3-dma
+
+ reg:
+ items:
+ - description:
+ DMA General Status Registers starting from DGSR0, for channel 1~4
+ - description:
+ DMA General Status Registers starting from DGSR1, for channel 5~8
+
+ ranges: true
+
+ "#address-cells":
+ const: 1
+
+ "#size-cells":
+ const: 1
+
+ interrupts:
+ maxItems: 1
+
+patternProperties:
+ "^dma-channel@[0-9a-f]+$":
+ type: object
+ additionalProperties: false
+
+ properties:
+ compatible:
+ enum:
+ # native DMA channel
+ - fsl,eloplus-dma-channel
+
+ # audio DMA channel, see fsl,ssi.yaml
+ - fsl,ssi-dma-channel
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+ description:
+ Per-channel interrupt. Only necessary if no controller interrupt has
+ been provided.
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+
+ dma@100300 {
+ compatible = "fsl,elo3-dma";
+ reg = <0x100300 0x4>,
+ <0x100600 0x4>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x100100 0x500>;
+
+ dma-channel@0 {
+ compatible = "fsl,eloplus-dma-channel";
+ reg = <0x0 0x80>;
+ interrupts = <28 IRQ_TYPE_EDGE_FALLING 0 0>;
+ };
+
+ dma-channel@80 {
+ compatible = "fsl,eloplus-dma-channel";
+ reg = <0x80 0x80>;
+ interrupts = <29 IRQ_TYPE_EDGE_FALLING 0 0>;
+ };
+
+ dma-channel@100 {
+ compatible = "fsl,eloplus-dma-channel";
+ reg = <0x100 0x80>;
+ interrupts = <30 IRQ_TYPE_EDGE_FALLING 0 0>;
+ };
+
+ dma-channel@180 {
+ compatible = "fsl,eloplus-dma-channel";
+ reg = <0x180 0x80>;
+ interrupts = <31 IRQ_TYPE_EDGE_FALLING 0 0>;
+ };
+
+ dma-channel@300 {
+ compatible = "fsl,eloplus-dma-channel";
+ reg = <0x300 0x80>;
+ interrupts = <76 IRQ_TYPE_EDGE_FALLING 0 0>;
+ };
+
+ dma-channel@380 {
+ compatible = "fsl,eloplus-dma-channel";
+ reg = <0x380 0x80>;
+ interrupts = <77 IRQ_TYPE_EDGE_FALLING 0 0>;
+ };
+
+ dma-channel@400 {
+ compatible = "fsl,eloplus-dma-channel";
+ reg = <0x400 0x80>;
+ interrupts = <78 IRQ_TYPE_EDGE_FALLING 0 0>;
+ };
+
+ dma-channel@480 {
+ compatible = "fsl,eloplus-dma-channel";
+ reg = <0x480 0x80>;
+ interrupts = <79 IRQ_TYPE_EDGE_FALLING 0 0>;
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/dma/fsl,eloplus-dma.yaml b/Documentation/devicetree/bindings/dma/fsl,eloplus-dma.yaml
new file mode 100644
index 000000000000..8992f244c4db
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/fsl,eloplus-dma.yaml
@@ -0,0 +1,132 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/dma/fsl,eloplus-dma.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Freescale EloPlus DMA Controller
+
+maintainers:
+ - J. Neuschäfer <j.ne@posteo.net>
+
+description:
+ This is a 4-channel DMA controller with extended addresses and chaining,
+ mainly used in Freescale mpc85xx/86xx, Pxxx and BSC series chips, such as
+ mpc8540, mpc8641 p4080, bsc9131 etc.
+
+properties:
+ compatible:
+ oneOf:
+ - items:
+ - enum:
+ - fsl,mpc8540-dma
+ - fsl,mpc8541-dma
+ - fsl,mpc8548-dma
+ - fsl,mpc8555-dma
+ - fsl,mpc8560-dma
+ - fsl,mpc8572-dma
+ - fsl,mpc8641-dma
+ - const: fsl,eloplus-dma
+ - const: fsl,eloplus-dma
+
+ reg:
+ items:
+ - description:
+ DMA General Status Register, i.e. DGSR which contains
+ status for all the 4 DMA channels
+
+ cell-index:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description:
+ controller index. 0 for controller @ 0x21000, 1 for controller @ 0xc000
+
+ ranges: true
+
+ "#address-cells":
+ const: 1
+
+ "#size-cells":
+ const: 1
+
+ interrupts:
+ maxItems: 1
+ description: Controller interrupt.
+
+patternProperties:
+ "^dma-channel@[0-9a-f]+$":
+ type: object
+ additionalProperties: false
+
+ properties:
+ compatible:
+ oneOf:
+ # native DMA channel
+ - items:
+ - enum:
+ - fsl,mpc8540-dma-channel
+ - fsl,mpc8541-dma-channel
+ - fsl,mpc8548-dma-channel
+ - fsl,mpc8555-dma-channel
+ - fsl,mpc8560-dma-channel
+ - fsl,mpc8572-dma-channel
+ - const: fsl,eloplus-dma-channel
+
+ # audio DMA channel, see fsl,ssi.yaml
+ - const: fsl,ssi-dma-channel
+
+ reg:
+ maxItems: 1
+
+ cell-index:
+ description: DMA channel index starts at 0.
+
+ interrupts:
+ maxItems: 1
+ description:
+ Per-channel interrupt. Only necessary if no controller interrupt has
+ been provided.
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+
+ dma@21300 {
+ compatible = "fsl,mpc8540-dma", "fsl,eloplus-dma";
+ reg = <0x21300 4>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0x21100 0x200>;
+ cell-index = <0>;
+
+ dma-channel@0 {
+ compatible = "fsl,mpc8540-dma-channel", "fsl,eloplus-dma-channel";
+ reg = <0 0x80>;
+ cell-index = <0>;
+ interrupts = <20 IRQ_TYPE_EDGE_FALLING>;
+ };
+
+ dma-channel@80 {
+ compatible = "fsl,mpc8540-dma-channel", "fsl,eloplus-dma-channel";
+ reg = <0x80 0x80>;
+ cell-index = <1>;
+ interrupts = <21 IRQ_TYPE_EDGE_FALLING>;
+ };
+
+ dma-channel@100 {
+ compatible = "fsl,mpc8540-dma-channel", "fsl,eloplus-dma-channel";
+ reg = <0x100 0x80>;
+ cell-index = <2>;
+ interrupts = <22 IRQ_TYPE_EDGE_FALLING>;
+ };
+
+ dma-channel@180 {
+ compatible = "fsl,mpc8540-dma-channel", "fsl,eloplus-dma-channel";
+ reg = <0x180 0x80>;
+ cell-index = <3>;
+ interrupts = <23 IRQ_TYPE_EDGE_FALLING>;
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/dma/fsl,mxs-dma.yaml b/Documentation/devicetree/bindings/dma/fsl,mxs-dma.yaml
index a17cf2360dd4..75a7d9556699 100644
--- a/Documentation/devicetree/bindings/dma/fsl,mxs-dma.yaml
+++ b/Documentation/devicetree/bindings/dma/fsl,mxs-dma.yaml
@@ -31,6 +31,12 @@ properties:
- fsl,imx6q-dma-apbh
- fsl,imx6sx-dma-apbh
- fsl,imx7d-dma-apbh
+ - fsl,imx8dxl-dma-apbh
+ - fsl,imx8mm-dma-apbh
+ - fsl,imx8mn-dma-apbh
+ - fsl,imx8mp-dma-apbh
+ - fsl,imx8mq-dma-apbh
+ - fsl,imx8qm-dma-apbh
- fsl,imx8qxp-dma-apbh
- const: fsl,imx28-dma-apbh
- enum:
diff --git a/Documentation/devicetree/bindings/dma/snps,dw-axi-dmac.yaml b/Documentation/devicetree/bindings/dma/snps,dw-axi-dmac.yaml
index 525f5f3932f5..935735a59afd 100644
--- a/Documentation/devicetree/bindings/dma/snps,dw-axi-dmac.yaml
+++ b/Documentation/devicetree/bindings/dma/snps,dw-axi-dmac.yaml
@@ -59,6 +59,8 @@ properties:
minimum: 1
maximum: 8
+ dma-noncoherent: true
+
resets:
minItems: 1
maxItems: 2
diff --git a/Documentation/devicetree/bindings/eeprom/at24.yaml b/Documentation/devicetree/bindings/eeprom/at24.yaml
index c9e4afbdc448..0ac68646c077 100644
--- a/Documentation/devicetree/bindings/eeprom/at24.yaml
+++ b/Documentation/devicetree/bindings/eeprom/at24.yaml
@@ -130,10 +130,13 @@ properties:
- const: giantec,gt24c32a
- const: atmel,24c32
- items:
- - const: onnn,n24s64b
+ - enum:
+ - onnn,n24s64b
+ - puya,p24c64f
- const: atmel,24c64
- items:
- enum:
+ - giantec,gt24p128e
- giantec,gt24p128f
- renesas,r1ex24128
- samsung,s524ad0xd1
diff --git a/Documentation/devicetree/bindings/i2c/i2c-exynos5.yaml b/Documentation/devicetree/bindings/i2c/i2c-exynos5.yaml
index 70cc2ee9ee27..8d47b290b4ed 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-exynos5.yaml
+++ b/Documentation/devicetree/bindings/i2c/i2c-exynos5.yaml
@@ -30,6 +30,7 @@ properties:
- items:
- enum:
- samsung,exynos5433-hsi2c
+ - samsung,exynos7870-hsi2c
- tesla,fsd-hsi2c
- const: samsung,exynos7-hsi2c
- items:
diff --git a/Documentation/devicetree/bindings/i2c/i2c-imx-lpi2c.yaml b/Documentation/devicetree/bindings/i2c/i2c-imx-lpi2c.yaml
index 1dcb9c78de3b..969030a6f82a 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-imx-lpi2c.yaml
+++ b/Documentation/devicetree/bindings/i2c/i2c-imx-lpi2c.yaml
@@ -26,6 +26,7 @@ properties:
- fsl,imx8qm-lpi2c
- fsl,imx8ulp-lpi2c
- fsl,imx93-lpi2c
+ - fsl,imx94-lpi2c
- fsl,imx95-lpi2c
- const: fsl,imx7ulp-lpi2c
diff --git a/Documentation/devicetree/bindings/i2c/i2c-rk3x.yaml b/Documentation/devicetree/bindings/i2c/i2c-rk3x.yaml
index a9dae5b52f28..8101afa6f146 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-rk3x.yaml
+++ b/Documentation/devicetree/bindings/i2c/i2c-rk3x.yaml
@@ -37,6 +37,7 @@ properties:
- rockchip,px30-i2c
- rockchip,rk3308-i2c
- rockchip,rk3328-i2c
+ - rockchip,rk3562-i2c
- rockchip,rk3568-i2c
- rockchip,rk3576-i2c
- rockchip,rk3588-i2c
diff --git a/Documentation/devicetree/bindings/i2c/qcom,i2c-qup.yaml b/Documentation/devicetree/bindings/i2c/qcom,i2c-qup.yaml
index f43947514d48..758d8f6321e1 100644
--- a/Documentation/devicetree/bindings/i2c/qcom,i2c-qup.yaml
+++ b/Documentation/devicetree/bindings/i2c/qcom,i2c-qup.yaml
@@ -40,6 +40,9 @@ properties:
- const: tx
- const: rx
+ interconnects:
+ maxItems: 1
+
interrupts:
maxItems: 1
@@ -52,9 +55,15 @@ properties:
- const: default
- const: sleep
+ power-domains:
+ maxItems: 1
+
reg:
maxItems: 1
+ required-opps:
+ maxItems: 1
+
required:
- compatible
- clock-names
@@ -67,7 +76,9 @@ unevaluatedProperties: false
examples:
- |
#include <dt-bindings/clock/qcom,gcc-msm8998.h>
+ #include <dt-bindings/interconnect/qcom,msm8996.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/power/qcom-rpmpd.h>
i2c@c175000 {
compatible = "qcom,i2c-qup-v2.2.1";
@@ -82,6 +93,9 @@ examples:
pinctrl-names = "default", "sleep";
pinctrl-0 = <&blsp1_i2c1_default>;
pinctrl-1 = <&blsp1_i2c1_sleep>;
+ power-domains = <&rpmpd MSM8909_VDDCX>;
+ required-opps = <&rpmpd_opp_svs_krait>;
+ interconnects = <&pnoc MASTER_BLSP_1 &bimc SLAVE_EBI_CH0>;
clock-frequency = <400000>;
#address-cells = <1>;
diff --git a/Documentation/devicetree/bindings/i2c/samsung,s3c2410-i2c.yaml b/Documentation/devicetree/bindings/i2c/samsung,s3c2410-i2c.yaml
index bbc568485627..6ba7d793504c 100644
--- a/Documentation/devicetree/bindings/i2c/samsung,s3c2410-i2c.yaml
+++ b/Documentation/devicetree/bindings/i2c/samsung,s3c2410-i2c.yaml
@@ -22,6 +22,7 @@ properties:
- samsung,exynos5-sata-phy-i2c
- items:
- enum:
+ - samsung,exynos7870-i2c
- samsung,exynos7885-i2c
- samsung,exynos850-i2c
- const: samsung,s3c2440-i2c
diff --git a/Documentation/devicetree/bindings/i2c/snps,designware-i2c.yaml b/Documentation/devicetree/bindings/i2c/snps,designware-i2c.yaml
index e5d05263c45a..bc5d0fb5abfe 100644
--- a/Documentation/devicetree/bindings/i2c/snps,designware-i2c.yaml
+++ b/Documentation/devicetree/bindings/i2c/snps,designware-i2c.yaml
@@ -27,6 +27,11 @@ properties:
oneOf:
- description: Generic Synopsys DesignWare I2C controller
const: snps,designware-i2c
+ - description: Renesas RZ/N1D I2C controller
+ items:
+ - const: renesas,r9a06g032-i2c # RZ/N1D
+ - const: renesas,rzn1-i2c # RZ/N1
+ - const: snps,designware-i2c
- description: Microsemi Ocelot SoCs I2C controller
items:
- const: mscc,ocelot-i2c
diff --git a/Documentation/devicetree/bindings/i2c/spacemit,k1-i2c.yaml b/Documentation/devicetree/bindings/i2c/spacemit,k1-i2c.yaml
new file mode 100644
index 000000000000..3d6aefb0d0f1
--- /dev/null
+++ b/Documentation/devicetree/bindings/i2c/spacemit,k1-i2c.yaml
@@ -0,0 +1,61 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/i2c/spacemit,k1-i2c.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: I2C controller embedded in SpacemiT's K1 SoC
+
+maintainers:
+ - Troy Mitchell <troymitchell988@gmail.com>
+
+properties:
+ compatible:
+ const: spacemit,k1-i2c
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ items:
+ - description: I2C Functional Clock
+ - description: APB Bus Clock
+
+ clock-names:
+ items:
+ - const: func
+ - const: bus
+
+ clock-frequency:
+ description: |
+ K1 support three different modes which running different frequencies
+ standard speed mode: up to 100000 (100Hz)
+ fast speed mode : up to 400000 (400Hz)
+ high speed mode : up to 3300000 (3.3Mhz)
+ default: 400000
+ maximum: 3300000
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ i2c@d4010800 {
+ compatible = "spacemit,k1-i2c";
+ reg = <0xd4010800 0x38>;
+ interrupt-parent = <&plic>;
+ interrupts = <36>;
+ clocks =<&ccu 32>, <&ccu 84>;
+ clock-names = "func", "bus";
+ clock-frequency = <100000>;
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/i2c/ti,omap4-i2c.yaml b/Documentation/devicetree/bindings/i2c/ti,omap4-i2c.yaml
index 8c2e35fabf5b..58d32ceeacfc 100644
--- a/Documentation/devicetree/bindings/i2c/ti,omap4-i2c.yaml
+++ b/Documentation/devicetree/bindings/i2c/ti,omap4-i2c.yaml
@@ -47,6 +47,11 @@ properties:
$ref: /schemas/types.yaml#/definitions/string
deprecated: true
+ mux-states:
+ description:
+ mux controller node to route the I2C signals from SoC to clients.
+ maxItems: 1
+
required:
- compatible
- reg
@@ -87,4 +92,5 @@ examples:
interrupts = <GIC_SPI 200 IRQ_TYPE_LEVEL_HIGH>;
#address-cells = <1>;
#size-cells = <0>;
+ mux-states = <&i2c_mux 1>;
};
diff --git a/Documentation/devicetree/bindings/i3c/silvaco,i3c-master.yaml b/Documentation/devicetree/bindings/i3c/silvaco,i3c-master.yaml
index c56ff77677f1..4fbdcdac0aee 100644
--- a/Documentation/devicetree/bindings/i3c/silvaco,i3c-master.yaml
+++ b/Documentation/devicetree/bindings/i3c/silvaco,i3c-master.yaml
@@ -14,7 +14,9 @@ allOf:
properties:
compatible:
- const: silvaco,i3c-master-v1
+ enum:
+ - nuvoton,npcm845-i3c
+ - silvaco,i3c-master-v1
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/i3c/snps,dw-i3c-master.yaml b/Documentation/devicetree/bindings/i3c/snps,dw-i3c-master.yaml
index 4fc13e3c0f75..5f6467375811 100644
--- a/Documentation/devicetree/bindings/i3c/snps,dw-i3c-master.yaml
+++ b/Documentation/devicetree/bindings/i3c/snps,dw-i3c-master.yaml
@@ -34,6 +34,9 @@ properties:
interrupts:
maxItems: 1
+ power-domains:
+ maxItems: 1
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/iio/adc/adi,ad4030.yaml b/Documentation/devicetree/bindings/iio/adc/adi,ad4030.yaml
new file mode 100644
index 000000000000..54e7349317b7
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/adc/adi,ad4030.yaml
@@ -0,0 +1,110 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+# Copyright 2024 Analog Devices Inc.
+# Copyright 2024 BayLibre, SAS.
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/iio/adc/adi,ad4030.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Analog Devices AD4030 and AD4630 ADC families
+
+maintainers:
+ - Michael Hennerich <michael.hennerich@analog.com>
+ - Nuno Sa <nuno.sa@analog.com>
+
+description: |
+ Analog Devices AD4030 single channel and AD4630/AD4632 dual channel precision
+ SAR ADC families
+
+ * https://www.analog.com/media/en/technical-documentation/data-sheets/ad4030-24-4032-24.pdf
+ * https://www.analog.com/media/en/technical-documentation/data-sheets/ad4630-24_ad4632-24.pdf
+ * https://www.analog.com/media/en/technical-documentation/data-sheets/ad4630-16-4632-16.pdf
+
+properties:
+ compatible:
+ enum:
+ - adi,ad4030-24
+ - adi,ad4032-24
+ - adi,ad4630-16
+ - adi,ad4630-24
+ - adi,ad4632-16
+ - adi,ad4632-24
+
+ reg:
+ maxItems: 1
+
+ spi-max-frequency:
+ maximum: 102040816
+
+ spi-rx-bus-width:
+ enum: [1, 2, 4]
+
+ vdd-5v-supply: true
+ vdd-1v8-supply: true
+ vio-supply: true
+
+ ref-supply:
+ description:
+ Optional External unbuffered reference. Used when refin-supply is not
+ connected.
+
+ refin-supply:
+ description:
+ Internal buffered Reference. Used when ref-supply is not connected.
+
+ cnv-gpios:
+ description:
+ The Convert Input (CNV). It initiates the sampling conversions.
+ maxItems: 1
+
+ reset-gpios:
+ description:
+ The Reset Input (/RST). Used for asynchronous device reset.
+ maxItems: 1
+
+ interrupts:
+ description:
+ The BUSY pin is used to signal that the conversions results are available
+ to be transferred when in SPI Clocking Mode. This nodes should be
+ connected to an interrupt that is triggered when the BUSY line goes low.
+ maxItems: 1
+
+ interrupt-names:
+ const: busy
+
+required:
+ - compatible
+ - reg
+ - vdd-5v-supply
+ - vdd-1v8-supply
+ - vio-supply
+ - cnv-gpios
+
+oneOf:
+ - required:
+ - ref-supply
+ - required:
+ - refin-supply
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+
+ spi {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ adc@0 {
+ compatible = "adi,ad4030-24";
+ reg = <0>;
+ spi-max-frequency = <80000000>;
+ vdd-5v-supply = <&supply_5V>;
+ vdd-1v8-supply = <&supply_1_8V>;
+ vio-supply = <&supply_1_8V>;
+ ref-supply = <&supply_5V>;
+ cnv-gpios = <&gpio0 0 GPIO_ACTIVE_HIGH>;
+ reset-gpios = <&gpio0 1 GPIO_ACTIVE_LOW>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/iio/adc/adi,ad4695.yaml b/Documentation/devicetree/bindings/iio/adc/adi,ad4695.yaml
index 7d2229dee444..cbde7a0505d2 100644
--- a/Documentation/devicetree/bindings/iio/adc/adi,ad4695.yaml
+++ b/Documentation/devicetree/bindings/iio/adc/adi,ad4695.yaml
@@ -84,6 +84,10 @@ properties:
description: The Reset Input (RESET). Should be configured GPIO_ACTIVE_LOW.
maxItems: 1
+ pwms:
+ description: PWM signal connected to the CNV pin.
+ maxItems: 1
+
interrupts:
minItems: 1
items:
@@ -106,6 +110,15 @@ properties:
The first cell is the GPn number: 0 to 3.
The second cell takes standard GPIO flags.
+ '#trigger-source-cells':
+ description: |
+ First cell indicates the output signal: 0 = BUSY, 1 = ALERT.
+ Second cell indicates which GPn pin is used: 0, 2 or 3.
+
+ For convenience, macros for these values are available in
+ dt-bindings/iio/adc/adi,ad4695.h.
+ const: 2
+
"#address-cells":
const: 1
diff --git a/Documentation/devicetree/bindings/iio/adc/adi,ad4851.yaml b/Documentation/devicetree/bindings/iio/adc/adi,ad4851.yaml
new file mode 100644
index 000000000000..c6676d91b4e6
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/adc/adi,ad4851.yaml
@@ -0,0 +1,153 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+# Copyright 2024 Analog Devices Inc.
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/iio/adc/adi,ad4851.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Analog Devices AD485X family
+
+maintainers:
+ - Sergiu Cuciurean <sergiu.cuciurean@analog.com>
+ - Dragos Bogdan <dragos.bogdan@analog.com>
+ - Antoniu Miclaus <antoniu.miclaus@analog.com>
+
+description: |
+ Analog Devices AD485X fully buffered, 8-channel simultaneous sampling,
+ 16/20-bit, 1 MSPS data acquisition system (DAS) with differential, wide
+ common-mode range inputs.
+
+ https://www.analog.com/media/en/technical-documentation/data-sheets/ad4855.pdf
+ https://www.analog.com/media/en/technical-documentation/data-sheets/ad4856.pdf
+ https://www.analog.com/media/en/technical-documentation/data-sheets/ad4857.pdf
+ https://www.analog.com/media/en/technical-documentation/data-sheets/ad4858.pdf
+
+$ref: /schemas/spi/spi-peripheral-props.yaml#
+
+properties:
+ compatible:
+ enum:
+ - adi,ad4851
+ - adi,ad4852
+ - adi,ad4853
+ - adi,ad4854
+ - adi,ad4855
+ - adi,ad4856
+ - adi,ad4857
+ - adi,ad4858
+ - adi,ad4858i
+
+ reg:
+ maxItems: 1
+
+ vcc-supply: true
+
+ vee-supply: true
+
+ vdd-supply: true
+
+ vddh-supply: true
+
+ vddl-supply: true
+
+ vio-supply: true
+
+ vrefbuf-supply: true
+
+ vrefio-supply: true
+
+ pwms:
+ description: PWM connected to the CNV pin.
+ maxItems: 1
+
+ io-backends:
+ maxItems: 1
+
+ pd-gpios:
+ maxItems: 1
+
+ spi-max-frequency:
+ maximum: 25000000
+
+ '#address-cells':
+ const: 1
+
+ '#size-cells':
+ const: 0
+
+patternProperties:
+ "^channel(@[0-7])?$":
+ $ref: adc.yaml
+ type: object
+ description: Represents the channels which are connected to the ADC.
+
+ properties:
+ reg:
+ description:
+ The channel number, as specified in the datasheet (from 0 to 7).
+ minimum: 0
+ maximum: 7
+
+ diff-channels:
+ description:
+ Each channel can be configured as a bipolar differential channel.
+ The ADC uses the same positive and negative inputs for this.
+ This property must be specified as 'reg' (or the channel number) for
+ both positive and negative inputs (i.e. diff-channels = <reg reg>).
+ Since the configuration is bipolar differential, the 'bipolar'
+ property is required.
+ items:
+ minimum: 0
+ maximum: 7
+
+ bipolar: true
+
+ required:
+ - reg
+
+ additionalProperties: false
+
+required:
+ - compatible
+ - reg
+ - vcc-supply
+ - vee-supply
+ - vdd-supply
+ - vio-supply
+ - pwms
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ spi {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ adc@0{
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "adi,ad4858";
+ reg = <0>;
+ spi-max-frequency = <10000000>;
+ vcc-supply = <&vcc>;
+ vdd-supply = <&vdd>;
+ vee-supply = <&vee>;
+ vddh-supply = <&vddh>;
+ vddl-supply = <&vddl>;
+ vio-supply = <&vio>;
+ pwms = <&pwm_gen 0 0>;
+ io-backends = <&iio_backend>;
+
+ channel@0 {
+ reg = <0>;
+ diff-channels = <0 0>;
+ bipolar;
+ };
+
+ channel@1 {
+ reg = <1>;
+ };
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/iio/adc/adi,ad7191.yaml b/Documentation/devicetree/bindings/iio/adc/adi,ad7191.yaml
new file mode 100644
index 000000000000..801ed319ee82
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/adc/adi,ad7191.yaml
@@ -0,0 +1,149 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+# Copyright 2025 Analog Devices Inc.
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/iio/adc/adi,ad7191.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Analog Devices AD7191 ADC
+
+maintainers:
+ - Alisa-Dariana Roman <alisa.roman@analog.com>
+
+description: |
+ Bindings for the Analog Devices AD7191 ADC device. Datasheet can be
+ found here:
+ https://www.analog.com/media/en/technical-documentation/data-sheets/AD7191.pdf
+ The device's PDOWN pin must be connected to the SPI controller's chip select
+ pin.
+
+properties:
+ compatible:
+ enum:
+ - adi,ad7191
+
+ reg:
+ maxItems: 1
+
+ spi-cpol: true
+
+ spi-cpha: true
+
+ clocks:
+ maxItems: 1
+ description:
+ Must be present when CLKSEL pin is tied HIGH to select external clock
+ source (either a crystal between MCLK1 and MCLK2 pins, or a
+ CMOS-compatible clock driving MCLK2 pin). Must be absent when CLKSEL pin
+ is tied LOW to use the internal 4.92MHz clock.
+
+ interrupts:
+ maxItems: 1
+
+ avdd-supply:
+ description: AVdd voltage supply
+
+ dvdd-supply:
+ description: DVdd voltage supply
+
+ vref-supply:
+ description: Vref voltage supply
+
+ odr-gpios:
+ description:
+ ODR1 and ODR2 pins for output data rate selection. Should be defined if
+ adi,odr-value is absent.
+ minItems: 2
+ maxItems: 2
+
+ adi,odr-value:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: |
+ Should be present if ODR pins are pin-strapped. Possible values:
+ 120 Hz (ODR1=0, ODR2=0)
+ 60 Hz (ODR1=0, ODR2=1)
+ 50 Hz (ODR1=1, ODR2=0)
+ 10 Hz (ODR1=1, ODR2=1)
+ If defined, odr-gpios must be absent.
+ enum: [120, 60, 50, 10]
+
+ pga-gpios:
+ description:
+ PGA1 and PGA2 pins for gain selection. Should be defined if adi,pga-value
+ is absent.
+ minItems: 2
+ maxItems: 2
+
+ adi,pga-value:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: |
+ Should be present if PGA pins are pin-strapped. Possible values:
+ Gain 1 (PGA1=0, PGA2=0)
+ Gain 8 (PGA1=0, PGA2=1)
+ Gain 64 (PGA1=1, PGA2=0)
+ Gain 128 (PGA1=1, PGA2=1)
+ If defined, pga-gpios must be absent.
+ enum: [1, 8, 64, 128]
+
+ temp-gpios:
+ description: TEMP pin for temperature sensor enable.
+ maxItems: 1
+
+ chan-gpios:
+ description: CHAN pin for input channel selection.
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - avdd-supply
+ - dvdd-supply
+ - vref-supply
+ - spi-cpol
+ - spi-cpha
+ - temp-gpios
+ - chan-gpios
+
+allOf:
+ - $ref: /schemas/spi/spi-peripheral-props.yaml#
+ - oneOf:
+ - required:
+ - adi,odr-value
+ - required:
+ - odr-gpios
+ - oneOf:
+ - required:
+ - adi,pga-value
+ - required:
+ - pga-gpios
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+
+ spi {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ adc@0 {
+ compatible = "adi,ad7191";
+ reg = <0>;
+ spi-max-frequency = <1000000>;
+ spi-cpol;
+ spi-cpha;
+ clocks = <&ad7191_mclk>;
+ interrupts = <25 IRQ_TYPE_EDGE_FALLING>;
+ interrupt-parent = <&gpio>;
+ avdd-supply = <&avdd>;
+ dvdd-supply = <&dvdd>;
+ vref-supply = <&vref>;
+ adi,pga-value = <1>;
+ odr-gpios = <&gpio 23 GPIO_ACTIVE_HIGH>, <&gpio 24 GPIO_ACTIVE_HIGH>;
+ temp-gpios = <&gpio 22 GPIO_ACTIVE_HIGH>;
+ chan-gpios = <&gpio 27 GPIO_ACTIVE_HIGH>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/iio/adc/adi,ad7380.yaml b/Documentation/devicetree/bindings/iio/adc/adi,ad7380.yaml
index ada08005b3cd..ff4f5c21c548 100644
--- a/Documentation/devicetree/bindings/iio/adc/adi,ad7380.yaml
+++ b/Documentation/devicetree/bindings/iio/adc/adi,ad7380.yaml
@@ -27,6 +27,7 @@ description: |
* https://www.analog.com/en/products/ad7388-4.html
* https://www.analog.com/en/products/adaq4370-4.html
* https://www.analog.com/en/products/adaq4380-4.html
+ * https://www.analog.com/en/products/adaq4381-4.html
$ref: /schemas/spi/spi-peripheral-props.yaml#
@@ -50,6 +51,7 @@ properties:
- adi,ad7388-4
- adi,adaq4370-4
- adi,adaq4380-4
+ - adi,adaq4381-4
reg:
maxItems: 1
@@ -201,6 +203,7 @@ allOf:
- adi,ad7380-4
- adi,adaq4370-4
- adi,adaq4380-4
+ - adi,adaq4381-4
then:
properties:
refio-supply: false
@@ -218,6 +221,7 @@ allOf:
enum:
- adi,adaq4370-4
- adi,adaq4380-4
+ - adi,adaq4381-4
then:
required:
- vs-p-supply
diff --git a/Documentation/devicetree/bindings/iio/adc/adi,axi-adc.yaml b/Documentation/devicetree/bindings/iio/adc/adi,axi-adc.yaml
index e1f450b80db2..cf74f84d6103 100644
--- a/Documentation/devicetree/bindings/iio/adc/adi,axi-adc.yaml
+++ b/Documentation/devicetree/bindings/iio/adc/adi,axi-adc.yaml
@@ -17,13 +17,25 @@ description: |
interface for the actual ADC, while this IP core will interface
to the data-lines of the ADC and handle the streaming of data into
memory via DMA.
+ In some cases, the AXI ADC interface is used to perform specialized
+ operation to a particular ADC, e.g access the physical bus through
+ specific registers to write ADC registers.
+ In this case, we use a different compatible which indicates the target
+ IP core's name.
+ The following IP is currently supported:
+ - AXI AD7606x: specialized version of the IP core for all the chips from
+ the ad7606 family.
https://wiki.analog.com/resources/fpga/docs/axi_adc_ip
+ https://analogdevicesinc.github.io/hdl/library/axi_ad485x/index.html
+ http://analogdevicesinc.github.io/hdl/library/axi_ad7606x/index.html
properties:
compatible:
enum:
- adi,axi-adc-10.0.a
+ - adi,axi-ad7606x
+ - adi,axi-ad485x
reg:
maxItems: 1
@@ -47,17 +59,48 @@ properties:
'#io-backend-cells':
const: 0
+ '#address-cells':
+ const: 1
+
+ '#size-cells':
+ const: 0
+
+patternProperties:
+ "^adc@[0-9a-f]+$":
+ type: object
+ properties:
+ reg:
+ maxItems: 1
+ additionalProperties: true
+ required:
+ - compatible
+ - reg
+
required:
- compatible
- dmas
- reg
- clocks
+allOf:
+ - if:
+ properties:
+ compatible:
+ not:
+ contains:
+ const: adi,axi-ad7606x
+ then:
+ properties:
+ '#address-cells': false
+ '#size-cells': false
+ patternProperties:
+ "^adc@[0-9a-f]+$": false
+
additionalProperties: false
examples:
- |
- axi-adc@44a00000 {
+ adc@44a00000 {
compatible = "adi,axi-adc-10.0.a";
reg = <0x44a00000 0x10000>;
dmas = <&rx_dma 0>;
@@ -65,4 +108,31 @@ examples:
clocks = <&axi_clk>;
#io-backend-cells = <0>;
};
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+ parallel_bus_controller@44a00000 {
+ compatible = "adi,axi-ad7606x";
+ reg = <0x44a00000 0x10000>;
+ dmas = <&rx_dma 0>;
+ dma-names = "rx";
+ clocks = <&ext_clk>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ adc@0 {
+ compatible = "adi,ad7606b";
+ reg = <0>;
+ pwms = <&axi_pwm_gen 0 0>;
+ pwm-names = "convst1";
+ avcc-supply = <&adc_vref>;
+ vdrive-supply = <&vdd_supply>;
+ reset-gpios = <&gpio0 91 GPIO_ACTIVE_HIGH>;
+ standby-gpios = <&gpio0 90 GPIO_ACTIVE_LOW>;
+ adi,range-gpios = <&gpio0 89 GPIO_ACTIVE_HIGH>;
+ adi,oversampling-ratio-gpios = <&gpio0 88 GPIO_ACTIVE_HIGH
+ &gpio0 87 GPIO_ACTIVE_HIGH
+ &gpio0 86 GPIO_ACTIVE_HIGH>;
+ io-backends = <&parallel_bus_controller>;
+ };
+ };
...
diff --git a/Documentation/devicetree/bindings/iio/adc/nxp,imx93-adc.yaml b/Documentation/devicetree/bindings/iio/adc/nxp,imx93-adc.yaml
index dfc3f512918f..c2e5ff418920 100644
--- a/Documentation/devicetree/bindings/iio/adc/nxp,imx93-adc.yaml
+++ b/Documentation/devicetree/bindings/iio/adc/nxp,imx93-adc.yaml
@@ -19,7 +19,14 @@ description:
properties:
compatible:
- const: nxp,imx93-adc
+ oneOf:
+ - enum:
+ - nxp,imx93-adc
+ - items:
+ - enum:
+ - nxp,imx94-adc
+ - nxp,imx95-adc
+ - const: nxp,imx93-adc
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/iio/adc/rockchip-saradc.yaml b/Documentation/devicetree/bindings/iio/adc/rockchip-saradc.yaml
index fd93ed3991e0..41e0c56ef8e3 100644
--- a/Documentation/devicetree/bindings/iio/adc/rockchip-saradc.yaml
+++ b/Documentation/devicetree/bindings/iio/adc/rockchip-saradc.yaml
@@ -15,6 +15,8 @@ properties:
- const: rockchip,saradc
- const: rockchip,rk3066-tsadc
- const: rockchip,rk3399-saradc
+ - const: rockchip,rk3528-saradc
+ - const: rockchip,rk3562-saradc
- const: rockchip,rk3588-saradc
- items:
- const: rockchip,rk3576-saradc
diff --git a/Documentation/devicetree/bindings/iio/adc/ti,ads7138.yaml b/Documentation/devicetree/bindings/iio/adc/ti,ads7138.yaml
new file mode 100644
index 000000000000..a51893e207d4
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/adc/ti,ads7138.yaml
@@ -0,0 +1,63 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/iio/adc/ti,ads7138.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Texas Instruments ADS7128/ADS7138 analog-to-digital converter (ADC)
+
+maintainers:
+ - Tobias Sperling <tobias.sperling@softing.com>
+
+description: |
+ The ADS7128 and ADS7138 chips are 12-bit, 8 channel analog-to-digital
+ converters (ADC) with build-in digital window comparator (DWC), using the
+ I2C interface.
+ ADS7128 differs in the addition of further hardware features, like a
+ root-mean-square (RMS) and a zero-crossing-detect (ZCD) module.
+
+ Datasheets:
+ https://www.ti.com/product/ADS7128
+ https://www.ti.com/product/ADS7138
+
+properties:
+ compatible:
+ enum:
+ - ti,ads7128
+ - ti,ads7138
+
+ reg:
+ maxItems: 1
+
+ avdd-supply:
+ description:
+ The regulator used as analog supply voltage as well as reference voltage.
+
+ interrupts:
+ description:
+ Interrupt on ALERT pin, triggers on low level.
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - avdd-supply
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ adc@10 {
+ compatible = "ti,ads7138";
+ reg = <0x10>;
+ avdd-supply = <&reg_stb_3v3>;
+ interrupt-parent = <&gpio2>;
+ interrupts = <12 IRQ_TYPE_LEVEL_LOW>;
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/iio/dac/adi,ad5380.yaml b/Documentation/devicetree/bindings/iio/dac/adi,ad5380.yaml
index 9eb9928500e2..3e323f1a5458 100644
--- a/Documentation/devicetree/bindings/iio/dac/adi,ad5380.yaml
+++ b/Documentation/devicetree/bindings/iio/dac/adi,ad5380.yaml
@@ -55,18 +55,18 @@ examples:
#address-cells = <1>;
#size-cells = <0>;
dac@0 {
- reg = <0>;
- compatible = "adi,ad5390-5";
- vref-supply = <&dacvref>;
+ reg = <0>;
+ compatible = "adi,ad5390-5";
+ vref-supply = <&dacvref>;
};
};
- |
i2c {
- #address-cells = <1>;
- #size-cells = <0>;
- dac@42 {
- reg = <0x42>;
- compatible = "adi,ad5380-3";
- };
+ #address-cells = <1>;
+ #size-cells = <0>;
+ dac@42 {
+ reg = <0x42>;
+ compatible = "adi,ad5380-3";
+ };
};
...
diff --git a/Documentation/devicetree/bindings/iio/frequency/adf4371.yaml b/Documentation/devicetree/bindings/iio/frequency/adf4371.yaml
index 1cb2adaf66f9..53d607441612 100644
--- a/Documentation/devicetree/bindings/iio/frequency/adf4371.yaml
+++ b/Documentation/devicetree/bindings/iio/frequency/adf4371.yaml
@@ -30,8 +30,9 @@ properties:
clock-names:
description:
- Must be "clkin"
- maxItems: 1
+ Must be "clkin" if the input reference is single ended or "clkin-diff"
+ if the input reference is differential.
+ enum: [clkin, clkin-diff]
adi,mute-till-lock-en:
type: boolean
diff --git a/Documentation/devicetree/bindings/iio/humidity/sciosense,ens210.yaml b/Documentation/devicetree/bindings/iio/humidity/sciosense,ens210.yaml
index ed0ea938f7f8..1e25cf781cf1 100644
--- a/Documentation/devicetree/bindings/iio/humidity/sciosense,ens210.yaml
+++ b/Documentation/devicetree/bindings/iio/humidity/sciosense,ens210.yaml
@@ -43,13 +43,13 @@ additionalProperties: false
examples:
- |
i2c {
- #address-cells = <1>;
- #size-cells = <0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
- temperature-sensor@43 {
- compatible = "sciosense,ens210";
- reg = <0x43>;
- };
+ temperature-sensor@43 {
+ compatible = "sciosense,ens210";
+ reg = <0x43>;
+ };
};
...
diff --git a/Documentation/devicetree/bindings/iio/imu/adi,adis16550.yaml b/Documentation/devicetree/bindings/iio/imu/adi,adis16550.yaml
new file mode 100644
index 000000000000..a4c273c7a67f
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/imu/adi,adis16550.yaml
@@ -0,0 +1,74 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/iio/imu/adi,adis16550.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Analog Devices ADIS16550 and similar IMUs
+
+maintainers:
+ - Nuno Sa <nuno.sa@analog.com>
+ - Ramona Gradinariu <ramona.gradinariu@analog.com>
+ - Antoniu Miclaus <antoniu.miclaus@analog.com>
+ - Robert Budai <robert.budai@analog.com>
+
+properties:
+ compatible:
+ enum:
+ - adi,adis16550
+
+ reg:
+ maxItems: 1
+
+ spi-cpha: true
+
+ spi-cpol: true
+
+ spi-max-frequency:
+ maximum: 15000000
+
+ vdd-supply: true
+
+ interrupts:
+ maxItems: 1
+
+ reset-gpios:
+ description:
+ Active low RESET pin.
+ maxItems: 1
+
+ clocks:
+ description: If not provided, then the internal clock is used.
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - spi-cpha
+ - spi-cpol
+ - spi-max-frequency
+ - vdd-supply
+
+allOf:
+ - $ref: /schemas/spi/spi-peripheral-props.yaml#
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+ spi {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ imu@0 {
+ compatible = "adi,adis16550";
+ reg = <0>;
+ spi-max-frequency = <15000000>;
+ spi-cpol;
+ spi-cpha;
+ vdd-supply = <&vdd>;
+ interrupts = <4 IRQ_TYPE_EDGE_FALLING>;
+ interrupt-parent = <&gpio>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/iio/light/brcm,apds9160.yaml b/Documentation/devicetree/bindings/iio/light/brcm,apds9160.yaml
new file mode 100644
index 000000000000..bb1cc4404a55
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/light/brcm,apds9160.yaml
@@ -0,0 +1,78 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/iio/light/brcm,apds9160.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Broadcom Combined Proximity & Ambient light sensor
+
+maintainers:
+ - Mikael Gonella-Bolduc <m.gonella.bolduc@gmail.com>
+
+description: |
+ Datasheet: https://docs.broadcom.com/docs/APDS-9160-003-DS
+
+properties:
+ compatible:
+ enum:
+ - brcm,apds9160
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ vdd-supply: true
+
+ ps-cancellation-duration:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description:
+ Proximity sensor cancellation pulse duration in half clock cycles.
+ This parameter determines a cancellation pulse duration.
+ The cancellation is applied in the integration phase to cancel out
+ unwanted reflected light from very near objects such as tempered glass
+ in front of the sensor.
+ default: 0
+ maximum: 63
+
+ ps-cancellation-current-picoamp:
+ description:
+ Proximity sensor crosstalk cancellation current in picoampere.
+ This parameter adjusts the current in steps of 2400 pA up to 276000 pA.
+ The provided value must be a multiple of 2400 and in one of these ranges
+ [60000 - 96000]
+ [120000 - 156000]
+ [180000 - 216000]
+ [240000 - 276000]
+ This parameter is used in conjunction with the cancellation duration.
+ minimum: 60000
+ maximum: 276000
+ multipleOf: 2400
+
+required:
+ - compatible
+ - reg
+ - vdd-supply
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ light-sensor@53 {
+ compatible = "brcm,apds9160";
+ reg = <0x53>;
+ vdd-supply = <&vdd_reg>;
+ interrupts = <29 IRQ_TYPE_EDGE_FALLING>;
+ interrupt-parent = <&pinctrl>;
+ ps-cancellation-duration = <10>;
+ ps-cancellation-current-picoamp = <62400>;
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/iio/light/dynaimage,al3010.yaml b/Documentation/devicetree/bindings/iio/light/dynaimage,al3010.yaml
index a3a979553e32..f1048c30e73e 100644
--- a/Documentation/devicetree/bindings/iio/light/dynaimage,al3010.yaml
+++ b/Documentation/devicetree/bindings/iio/light/dynaimage,al3010.yaml
@@ -4,14 +4,16 @@
$id: http://devicetree.org/schemas/iio/light/dynaimage,al3010.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
-title: Dyna-Image AL3010 sensor
+title: Dyna-Image AL3000a/AL3010 sensor
maintainers:
- David Heidelberg <david@ixit.cz>
properties:
compatible:
- const: dynaimage,al3010
+ enum:
+ - dynaimage,al3000a
+ - dynaimage,al3010
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/iio/magnetometer/silabs,si7210.yaml b/Documentation/devicetree/bindings/iio/magnetometer/silabs,si7210.yaml
new file mode 100644
index 000000000000..d4a3f7981c36
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/magnetometer/silabs,si7210.yaml
@@ -0,0 +1,48 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/iio/magnetometer/silabs,si7210.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Si7210 magnetic position and temperature sensor
+
+maintainers:
+ - Antoni Pokusinski <apokusinski01@gmail.com>
+
+description: |
+ Silabs Si7210 I2C Hall effect magnetic position and temperature sensor.
+ https://www.silabs.com/documents/public/data-sheets/si7210-datasheet.pdf
+
+properties:
+ compatible:
+ const: silabs,si7210
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ vdd-supply:
+ description: Regulator that provides power to the sensor
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ magnetometer@30 {
+ compatible = "silabs,si7210";
+ reg = <0x30>;
+ interrupt-parent = <&gpio1>;
+ interrupts = <4 IRQ_TYPE_EDGE_FALLING>;
+ vdd-supply = <&vdd_3v3_reg>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/iio/temperature/maxim,max31865.yaml b/Documentation/devicetree/bindings/iio/temperature/maxim,max31865.yaml
index 7cc365e0ebc8..7c0c6ab6fc69 100644
--- a/Documentation/devicetree/bindings/iio/temperature/maxim,max31865.yaml
+++ b/Documentation/devicetree/bindings/iio/temperature/maxim,max31865.yaml
@@ -40,15 +40,15 @@ unevaluatedProperties: false
examples:
- |
spi {
- #address-cells = <1>;
- #size-cells = <0>;
-
- temperature-sensor@0 {
- compatible = "maxim,max31865";
- reg = <0>;
- spi-max-frequency = <400000>;
- spi-cpha;
- maxim,3-wire;
- };
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ temperature-sensor@0 {
+ compatible = "maxim,max31865";
+ reg = <0>;
+ spi-max-frequency = <400000>;
+ spi-cpha;
+ maxim,3-wire;
+ };
};
...
diff --git a/Documentation/devicetree/bindings/iio/temperature/ti,tmp117.yaml b/Documentation/devicetree/bindings/iio/temperature/ti,tmp117.yaml
index 58aa1542776b..fbba5e934861 100644
--- a/Documentation/devicetree/bindings/iio/temperature/ti,tmp117.yaml
+++ b/Documentation/devicetree/bindings/iio/temperature/ti,tmp117.yaml
@@ -44,8 +44,8 @@ examples:
#size-cells = <0>;
tmp117@48 {
- compatible = "ti,tmp117";
- reg = <0x48>;
- vcc-supply = <&pmic_reg_3v3>;
+ compatible = "ti,tmp117";
+ reg = <0x48>;
+ vcc-supply = <&pmic_reg_3v3>;
};
};
diff --git a/Documentation/devicetree/bindings/mfd/aspeed-lpc.yaml b/Documentation/devicetree/bindings/mfd/aspeed-lpc.yaml
index 5dfe77aca167..d88854e60b7f 100644
--- a/Documentation/devicetree/bindings/mfd/aspeed-lpc.yaml
+++ b/Documentation/devicetree/bindings/mfd/aspeed-lpc.yaml
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
-# # Copyright (c) 2021 Aspeed Tehchnology Inc.
+# # Copyright (c) 2021 Aspeed Technology Inc.
%YAML 1.2
---
$id: http://devicetree.org/schemas/mfd/aspeed-lpc.yaml#
diff --git a/Documentation/devicetree/bindings/phy/allwinner,sun50i-a64-usb-phy.yaml b/Documentation/devicetree/bindings/phy/allwinner,sun50i-a64-usb-phy.yaml
index 21209126ed00..580c3296a18d 100644
--- a/Documentation/devicetree/bindings/phy/allwinner,sun50i-a64-usb-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/allwinner,sun50i-a64-usb-phy.yaml
@@ -20,7 +20,9 @@ properties:
- allwinner,sun20i-d1-usb-phy
- allwinner,sun50i-a64-usb-phy
- items:
- - const: allwinner,sun50i-a100-usb-phy
+ - enum:
+ - allwinner,sun50i-a100-usb-phy
+ - allwinner,sun55i-a523-usb-phy
- const: allwinner,sun20i-d1-usb-phy
reg:
diff --git a/Documentation/devicetree/bindings/phy/phy-rockchip-naneng-combphy.yaml b/Documentation/devicetree/bindings/phy/phy-rockchip-naneng-combphy.yaml
index 1b3de6678c08..888e6b2aac5a 100644
--- a/Documentation/devicetree/bindings/phy/phy-rockchip-naneng-combphy.yaml
+++ b/Documentation/devicetree/bindings/phy/phy-rockchip-naneng-combphy.yaml
@@ -12,6 +12,7 @@ maintainers:
properties:
compatible:
enum:
+ - rockchip,rk3562-naneng-combphy
- rockchip,rk3568-naneng-combphy
- rockchip,rk3576-naneng-combphy
- rockchip,rk3588-naneng-combphy
diff --git a/Documentation/devicetree/bindings/phy/qcom,ipq5332-uniphy-pcie-phy.yaml b/Documentation/devicetree/bindings/phy/qcom,ipq5332-uniphy-pcie-phy.yaml
new file mode 100644
index 000000000000..e39168d55d23
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/qcom,ipq5332-uniphy-pcie-phy.yaml
@@ -0,0 +1,76 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/phy/qcom,ipq5332-uniphy-pcie-phy.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm UNIPHY PCIe 28LP PHY
+
+maintainers:
+ - Nitheesh Sekar <quic_nsekar@quicinc.com>
+ - Varadarajan Narayanan <quic_varada@quicinc.com>
+
+description:
+ PCIe and USB combo PHY found in Qualcomm IPQ5332 SoC
+
+properties:
+ compatible:
+ enum:
+ - qcom,ipq5332-uniphy-pcie-phy
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ items:
+ - description: pcie pipe clock
+ - description: pcie ahb clock
+
+ resets:
+ items:
+ - description: phy reset
+ - description: ahb reset
+ - description: cfg reset
+
+ "#phy-cells":
+ const: 0
+
+ "#clock-cells":
+ const: 0
+
+ num-lanes:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [1, 2]
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - resets
+ - "#phy-cells"
+ - "#clock-cells"
+ - num-lanes
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/qcom,ipq5332-gcc.h>
+
+ pcie0_phy: phy@4b0000 {
+ compatible = "qcom,ipq5332-uniphy-pcie-phy";
+ reg = <0x004b0000 0x800>;
+
+ clocks = <&gcc GCC_PCIE3X1_0_PIPE_CLK>,
+ <&gcc GCC_PCIE3X1_PHY_AHB_CLK>;
+
+ resets = <&gcc GCC_PCIE3X1_0_PHY_BCR>,
+ <&gcc GCC_PCIE3X1_PHY_AHB_CLK_ARES>,
+ <&gcc GCC_PCIE3X1_0_PHY_PHY_BCR>;
+
+ #clock-cells = <0>;
+
+ #phy-cells = <0>;
+
+ num-lanes = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-pcie-phy.yaml b/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-pcie-phy.yaml
index 89391649e0b5..2c6c9296e4c0 100644
--- a/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-pcie-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-pcie-phy.yaml
@@ -17,6 +17,7 @@ properties:
compatible:
enum:
- qcom,qcs615-qmp-gen3x1-pcie-phy
+ - qcom,qcs8300-qmp-gen4x2-pcie-phy
- qcom,sa8775p-qmp-gen4x2-pcie-phy
- qcom,sa8775p-qmp-gen4x4-pcie-phy
- qcom,sar2130p-qmp-gen3x2-pcie-phy
@@ -45,6 +46,7 @@ properties:
- qcom,x1e80100-qmp-gen4x2-pcie-phy
- qcom,x1e80100-qmp-gen4x4-pcie-phy
- qcom,x1e80100-qmp-gen4x8-pcie-phy
+ - qcom,x1p42100-qmp-gen4x4-pcie-phy
reg:
minItems: 1
@@ -124,6 +126,7 @@ allOf:
enum:
- qcom,sc8280xp-qmp-gen3x4-pcie-phy
- qcom,x1e80100-qmp-gen4x4-pcie-phy
+ - qcom,x1p42100-qmp-gen4x4-pcie-phy
then:
properties:
reg:
@@ -180,6 +183,7 @@ allOf:
- qcom,x1e80100-qmp-gen4x2-pcie-phy
- qcom,x1e80100-qmp-gen4x4-pcie-phy
- qcom,x1e80100-qmp-gen4x8-pcie-phy
+ - qcom,x1p42100-qmp-gen4x4-pcie-phy
then:
properties:
clocks:
@@ -192,6 +196,7 @@ allOf:
compatible:
contains:
enum:
+ - qcom,qcs8300-qmp-gen4x2-pcie-phy
- qcom,sa8775p-qmp-gen4x2-pcie-phy
- qcom,sa8775p-qmp-gen4x4-pcie-phy
then:
@@ -217,12 +222,6 @@ allOf:
minItems: 2
reset-names:
minItems: 2
- else:
- properties:
- resets:
- maxItems: 1
- reset-names:
- maxItems: 1
- if:
properties:
diff --git a/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-ufs-phy.yaml b/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-ufs-phy.yaml
index 72bed2933b03..a58370a6a5d3 100644
--- a/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-ufs-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-ufs-phy.yaml
@@ -44,6 +44,7 @@ properties:
- qcom,sm8475-qmp-ufs-phy
- qcom,sm8550-qmp-ufs-phy
- qcom,sm8650-qmp-ufs-phy
+ - qcom,sm8750-qmp-ufs-phy
reg:
maxItems: 1
@@ -111,6 +112,7 @@ allOf:
- qcom,sm8475-qmp-ufs-phy
- qcom,sm8550-qmp-ufs-phy
- qcom,sm8650-qmp-ufs-phy
+ - qcom,sm8750-qmp-ufs-phy
then:
properties:
clocks:
diff --git a/Documentation/devicetree/bindings/phy/rockchip,rk3588-hdptx-phy.yaml b/Documentation/devicetree/bindings/phy/rockchip,rk3588-hdptx-phy.yaml
index 84fe59dbcf48..7a307f45cdec 100644
--- a/Documentation/devicetree/bindings/phy/rockchip,rk3588-hdptx-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/rockchip,rk3588-hdptx-phy.yaml
@@ -11,8 +11,13 @@ maintainers:
properties:
compatible:
- enum:
- - rockchip,rk3588-hdptx-phy
+ oneOf:
+ - enum:
+ - rockchip,rk3588-hdptx-phy
+ - items:
+ - enum:
+ - rockchip,rk3576-hdptx-phy
+ - const: rockchip,rk3588-hdptx-phy
reg:
maxItems: 1
@@ -34,24 +39,12 @@ properties:
const: 0
resets:
- items:
- - description: PHY reset line
- - description: APB reset line
- - description: INIT reset line
- - description: CMN reset line
- - description: LANE reset line
- - description: ROPLL reset line
- - description: LCPLL reset line
+ minItems: 4
+ maxItems: 7
reset-names:
- items:
- - const: phy
- - const: apb
- - const: init
- - const: cmn
- - const: lane
- - const: ropll
- - const: lcpll
+ minItems: 4
+ maxItems: 7
rockchip,grf:
$ref: /schemas/types.yaml#/definitions/phandle
@@ -67,6 +60,39 @@ required:
- reset-names
- rockchip,grf
+allOf:
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - rockchip,rk3576-hdptx-phy
+ then:
+ properties:
+ resets:
+ minItems: 4
+ maxItems: 4
+ reset-names:
+ items:
+ - const: apb
+ - const: init
+ - const: cmn
+ - const: lane
+ else:
+ properties:
+ resets:
+ minItems: 7
+ maxItems: 7
+ reset-names:
+ items:
+ - const: phy
+ - const: apb
+ - const: init
+ - const: cmn
+ - const: lane
+ - const: ropll
+ - const: lcpll
+
additionalProperties: false
examples:
diff --git a/Documentation/devicetree/bindings/phy/rockchip,rk3588-mipi-dcphy.yaml b/Documentation/devicetree/bindings/phy/rockchip,rk3588-mipi-dcphy.yaml
new file mode 100644
index 000000000000..c8ff5ba22a86
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/rockchip,rk3588-mipi-dcphy.yaml
@@ -0,0 +1,87 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/phy/rockchip,rk3588-mipi-dcphy.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Rockchip MIPI D-/C-PHY with Samsung IP block
+
+maintainers:
+ - Guochun Huang <hero.huang@rock-chips.com>
+ - Heiko Stuebner <heiko@sntech.de>
+
+properties:
+ compatible:
+ enum:
+ - rockchip,rk3576-mipi-dcphy
+ - rockchip,rk3588-mipi-dcphy
+
+ reg:
+ maxItems: 1
+
+ "#phy-cells":
+ const: 1
+ description: |
+ Argument is mode to operate in. Supported modes are:
+ - PHY_TYPE_DPHY
+ - PHY_TYPE_CPHY
+ See include/dt-bindings/phy/phy.h for constants.
+
+ clocks:
+ maxItems: 2
+
+ clock-names:
+ items:
+ - const: pclk
+ - const: ref
+
+ resets:
+ maxItems: 4
+
+ reset-names:
+ items:
+ - const: m_phy
+ - const: apb
+ - const: grf
+ - const: s_phy
+
+ rockchip,grf:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description:
+ Phandle to the syscon managing the 'mipi dcphy general register files'.
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - clock-names
+ - resets
+ - reset-names
+ - "#phy-cells"
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/rockchip,rk3588-cru.h>
+ #include <dt-bindings/reset/rockchip,rk3588-cru.h>
+
+ soc {
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ phy@feda0000 {
+ compatible = "rockchip,rk3588-mipi-dcphy";
+ reg = <0x0 0xfeda0000 0x0 0x10000>;
+ clocks = <&cru PCLK_MIPI_DCPHY0>,
+ <&cru CLK_USBDPPHY_MIPIDCPPHY_REF>;
+ clock-names = "pclk", "ref";
+ resets = <&cru SRST_M_MIPI_DCPHY0>,
+ <&cru SRST_P_MIPI_DCPHY0>,
+ <&cru SRST_P_MIPI_DCPHY0_GRF>,
+ <&cru SRST_S_MIPI_DCPHY0>;
+ reset-names = "m_phy", "apb", "grf", "s_phy";
+ rockchip,grf = <&mipidcphy0_grf>;
+ #phy-cells = <1>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/phy/samsung,ufs-phy.yaml b/Documentation/devicetree/bindings/phy/samsung,ufs-phy.yaml
index f402e31bf58d..d70ffeb6e824 100644
--- a/Documentation/devicetree/bindings/phy/samsung,ufs-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/samsung,ufs-phy.yaml
@@ -18,6 +18,7 @@ properties:
- google,gs101-ufs-phy
- samsung,exynos7-ufs-phy
- samsung,exynosautov9-ufs-phy
+ - samsung,exynosautov920-ufs-phy
- tesla,fsd-ufs-phy
reg:
diff --git a/Documentation/devicetree/bindings/phy/samsung,usb3-drd-phy.yaml b/Documentation/devicetree/bindings/phy/samsung,usb3-drd-phy.yaml
index 16321cdd4919..27295acbba76 100644
--- a/Documentation/devicetree/bindings/phy/samsung,usb3-drd-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/samsung,usb3-drd-phy.yaml
@@ -83,14 +83,19 @@ properties:
pll-supply:
description: Power supply for the USB PLL.
+
dvdd-usb20-supply:
description: DVDD power supply for the USB 2.0 phy.
+
vddh-usb20-supply:
description: VDDh power supply for the USB 2.0 phy.
+
vdd33-usb20-supply:
description: 3.3V power supply for the USB 2.0 phy.
+
vdda-usbdp-supply:
description: VDDa power supply for the USB DP phy.
+
vddh-usbdp-supply:
description: VDDh power supply for the USB DP phy.
@@ -109,6 +114,8 @@ allOf:
contains:
const: google,gs101-usb31drd-phy
then:
+ $ref: /schemas/usb/usb-switch.yaml#
+
properties:
clocks:
items:
@@ -117,6 +124,7 @@ allOf:
- description: Gate of control interface AXI clock
- description: Gate of control interface APB clock
- description: Gate of SCL APB clock
+
clock-names:
items:
- const: phy
@@ -124,12 +132,17 @@ allOf:
- const: ctrl_aclk
- const: ctrl_pclk
- const: scl_pclk
+
reg:
minItems: 3
+
reg-names:
minItems: 3
+
required:
- reg-names
+ - orientation-switch
+ - port
- pll-supply
- dvdd-usb20-supply
- vddh-usb20-supply
@@ -149,6 +162,7 @@ allOf:
clocks:
minItems: 5
maxItems: 5
+
clock-names:
items:
- const: phy
@@ -156,8 +170,10 @@ allOf:
- const: phy_utmi
- const: phy_pipe
- const: itp
+
reg:
maxItems: 1
+
reg-names:
maxItems: 1
@@ -174,16 +190,19 @@ allOf:
clocks:
minItems: 2
maxItems: 2
+
clock-names:
items:
- const: phy
- const: ref
+
reg:
maxItems: 1
+
reg-names:
maxItems: 1
-additionalProperties: false
+unevaluatedProperties: false
examples:
- |
diff --git a/Documentation/devicetree/bindings/powerpc/fsl/dma.txt b/Documentation/devicetree/bindings/powerpc/fsl/dma.txt
deleted file mode 100644
index c11ad5c6db21..000000000000
--- a/Documentation/devicetree/bindings/powerpc/fsl/dma.txt
+++ /dev/null
@@ -1,204 +0,0 @@
-* Freescale DMA Controllers
-
-** Freescale Elo DMA Controller
- This is a little-endian 4-channel DMA controller, used in Freescale mpc83xx
- series chips such as mpc8315, mpc8349, mpc8379 etc.
-
-Required properties:
-
-- compatible : must include "fsl,elo-dma"
-- reg : DMA General Status Register, i.e. DGSR which contains
- status for all the 4 DMA channels
-- ranges : describes the mapping between the address space of the
- DMA channels and the address space of the DMA controller
-- cell-index : controller index. 0 for controller @ 0x8100
-- interrupts : interrupt specifier for DMA IRQ
-
-- DMA channel nodes:
- - compatible : must include "fsl,elo-dma-channel"
- However, see note below.
- - reg : DMA channel specific registers
- - cell-index : DMA channel index starts at 0.
-
-Optional properties:
- - interrupts : interrupt specifier for DMA channel IRQ
- (on 83xx this is expected to be identical to
- the interrupts property of the parent node)
-
-Example:
- dma@82a8 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "fsl,mpc8349-dma", "fsl,elo-dma";
- reg = <0x82a8 4>;
- ranges = <0 0x8100 0x1a4>;
- interrupt-parent = <&ipic>;
- interrupts = <71 8>;
- cell-index = <0>;
- dma-channel@0 {
- compatible = "fsl,mpc8349-dma-channel", "fsl,elo-dma-channel";
- cell-index = <0>;
- reg = <0 0x80>;
- interrupt-parent = <&ipic>;
- interrupts = <71 8>;
- };
- dma-channel@80 {
- compatible = "fsl,mpc8349-dma-channel", "fsl,elo-dma-channel";
- cell-index = <1>;
- reg = <0x80 0x80>;
- interrupt-parent = <&ipic>;
- interrupts = <71 8>;
- };
- dma-channel@100 {
- compatible = "fsl,mpc8349-dma-channel", "fsl,elo-dma-channel";
- cell-index = <2>;
- reg = <0x100 0x80>;
- interrupt-parent = <&ipic>;
- interrupts = <71 8>;
- };
- dma-channel@180 {
- compatible = "fsl,mpc8349-dma-channel", "fsl,elo-dma-channel";
- cell-index = <3>;
- reg = <0x180 0x80>;
- interrupt-parent = <&ipic>;
- interrupts = <71 8>;
- };
- };
-
-** Freescale EloPlus DMA Controller
- This is a 4-channel DMA controller with extended addresses and chaining,
- mainly used in Freescale mpc85xx/86xx, Pxxx and BSC series chips, such as
- mpc8540, mpc8641 p4080, bsc9131 etc.
-
-Required properties:
-
-- compatible : must include "fsl,eloplus-dma"
-- reg : DMA General Status Register, i.e. DGSR which contains
- status for all the 4 DMA channels
-- cell-index : controller index. 0 for controller @ 0x21000,
- 1 for controller @ 0xc000
-- ranges : describes the mapping between the address space of the
- DMA channels and the address space of the DMA controller
-
-- DMA channel nodes:
- - compatible : must include "fsl,eloplus-dma-channel"
- However, see note below.
- - cell-index : DMA channel index starts at 0.
- - reg : DMA channel specific registers
- - interrupts : interrupt specifier for DMA channel IRQ
-
-Example:
- dma@21300 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "fsl,mpc8540-dma", "fsl,eloplus-dma";
- reg = <0x21300 4>;
- ranges = <0 0x21100 0x200>;
- cell-index = <0>;
- dma-channel@0 {
- compatible = "fsl,mpc8540-dma-channel", "fsl,eloplus-dma-channel";
- reg = <0 0x80>;
- cell-index = <0>;
- interrupt-parent = <&mpic>;
- interrupts = <20 2>;
- };
- dma-channel@80 {
- compatible = "fsl,mpc8540-dma-channel", "fsl,eloplus-dma-channel";
- reg = <0x80 0x80>;
- cell-index = <1>;
- interrupt-parent = <&mpic>;
- interrupts = <21 2>;
- };
- dma-channel@100 {
- compatible = "fsl,mpc8540-dma-channel", "fsl,eloplus-dma-channel";
- reg = <0x100 0x80>;
- cell-index = <2>;
- interrupt-parent = <&mpic>;
- interrupts = <22 2>;
- };
- dma-channel@180 {
- compatible = "fsl,mpc8540-dma-channel", "fsl,eloplus-dma-channel";
- reg = <0x180 0x80>;
- cell-index = <3>;
- interrupt-parent = <&mpic>;
- interrupts = <23 2>;
- };
- };
-
-** Freescale Elo3 DMA Controller
- DMA controller which has same function as EloPlus except that Elo3 has 8
- channels while EloPlus has only 4, it is used in Freescale Txxx and Bxxx
- series chips, such as t1040, t4240, b4860.
-
-Required properties:
-
-- compatible : must include "fsl,elo3-dma"
-- reg : contains two entries for DMA General Status Registers,
- i.e. DGSR0 which includes status for channel 1~4, and
- DGSR1 for channel 5~8
-- ranges : describes the mapping between the address space of the
- DMA channels and the address space of the DMA controller
-
-- DMA channel nodes:
- - compatible : must include "fsl,eloplus-dma-channel"
- - reg : DMA channel specific registers
- - interrupts : interrupt specifier for DMA channel IRQ
-
-Example:
-dma@100300 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "fsl,elo3-dma";
- reg = <0x100300 0x4>,
- <0x100600 0x4>;
- ranges = <0x0 0x100100 0x500>;
- dma-channel@0 {
- compatible = "fsl,eloplus-dma-channel";
- reg = <0x0 0x80>;
- interrupts = <28 2 0 0>;
- };
- dma-channel@80 {
- compatible = "fsl,eloplus-dma-channel";
- reg = <0x80 0x80>;
- interrupts = <29 2 0 0>;
- };
- dma-channel@100 {
- compatible = "fsl,eloplus-dma-channel";
- reg = <0x100 0x80>;
- interrupts = <30 2 0 0>;
- };
- dma-channel@180 {
- compatible = "fsl,eloplus-dma-channel";
- reg = <0x180 0x80>;
- interrupts = <31 2 0 0>;
- };
- dma-channel@300 {
- compatible = "fsl,eloplus-dma-channel";
- reg = <0x300 0x80>;
- interrupts = <76 2 0 0>;
- };
- dma-channel@380 {
- compatible = "fsl,eloplus-dma-channel";
- reg = <0x380 0x80>;
- interrupts = <77 2 0 0>;
- };
- dma-channel@400 {
- compatible = "fsl,eloplus-dma-channel";
- reg = <0x400 0x80>;
- interrupts = <78 2 0 0>;
- };
- dma-channel@480 {
- compatible = "fsl,eloplus-dma-channel";
- reg = <0x480 0x80>;
- interrupts = <79 2 0 0>;
- };
-};
-
-Note on DMA channel compatible properties: The compatible property must say
-"fsl,elo-dma-channel" or "fsl,eloplus-dma-channel" to be used by the Elo DMA
-driver (fsldma). Any DMA channel used by fsldma cannot be used by another
-DMA driver, such as the SSI sound drivers for the MPC8610. Therefore, any DMA
-channel that should be used for another driver should not use
-"fsl,elo-dma-channel" or "fsl,eloplus-dma-channel". For the SSI drivers, for
-example, the compatible property should be "fsl,ssi-dma-channel". See ssi.txt
-for more information.
diff --git a/Documentation/devicetree/bindings/rtc/adi,max31335.yaml b/Documentation/devicetree/bindings/rtc/adi,max31335.yaml
index 0125cf6727cc..bce7558d0d87 100644
--- a/Documentation/devicetree/bindings/rtc/adi,max31335.yaml
+++ b/Documentation/devicetree/bindings/rtc/adi,max31335.yaml
@@ -18,7 +18,9 @@ allOf:
properties:
compatible:
- const: adi,max31335
+ enum:
+ - adi,max31331
+ - adi,max31335
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/rtc/nxp,pcf2127.yaml b/Documentation/devicetree/bindings/rtc/nxp,pcf2127.yaml
index 2d9fe5a75b06..11fcf0ca1ae0 100644
--- a/Documentation/devicetree/bindings/rtc/nxp,pcf2127.yaml
+++ b/Documentation/devicetree/bindings/rtc/nxp,pcf2127.yaml
@@ -8,6 +8,7 @@ title: NXP PCF2127 Real Time Clock
allOf:
- $ref: rtc.yaml#
+ - $ref: /schemas/spi/spi-peripheral-props.yaml#
maintainers:
- Alexandre Belloni <alexandre.belloni@bootlin.com>
@@ -34,7 +35,7 @@ required:
- compatible
- reg
-additionalProperties: false
+unevaluatedProperties: false
examples:
- |
diff --git a/Documentation/devicetree/bindings/rtc/qcom-pm8xxx-rtc.yaml b/Documentation/devicetree/bindings/rtc/qcom-pm8xxx-rtc.yaml
index d274bb7a534b..68ef3208c886 100644
--- a/Documentation/devicetree/bindings/rtc/qcom-pm8xxx-rtc.yaml
+++ b/Documentation/devicetree/bindings/rtc/qcom-pm8xxx-rtc.yaml
@@ -50,6 +50,11 @@ properties:
items:
- const: offset
+ qcom,no-alarm:
+ type: boolean
+ description:
+ RTC alarm is not owned by the OS
+
wakeup-source: true
required:
diff --git a/Documentation/devicetree/bindings/serial/8250.yaml b/Documentation/devicetree/bindings/serial/8250.yaml
index 0bde2379e864..dc0d52920575 100644
--- a/Documentation/devicetree/bindings/serial/8250.yaml
+++ b/Documentation/devicetree/bindings/serial/8250.yaml
@@ -77,7 +77,6 @@ properties:
- altr,16550-FIFO64
- altr,16550-FIFO128
- fsl,16550-FIFO64
- - fsl,ns16550
- andestech,uart16550
- nxp,lpc1850-uart
- opencores,uart16550-rtlsvn105
@@ -86,6 +85,7 @@ properties:
- items:
- enum:
- ns16750
+ - fsl,ns16550
- cavium,octeon-3860-uart
- xlnx,xps-uart16550-2.00.b
- ralink,rt2880-uart
diff --git a/Documentation/devicetree/bindings/serial/fsl-lpuart.yaml b/Documentation/devicetree/bindings/serial/fsl-lpuart.yaml
index 3f9ace89dee9..c42261b5a80a 100644
--- a/Documentation/devicetree/bindings/serial/fsl-lpuart.yaml
+++ b/Documentation/devicetree/bindings/serial/fsl-lpuart.yaml
@@ -30,6 +30,7 @@ properties:
- items:
- enum:
- fsl,imx93-lpuart
+ - fsl,imx94-lpuart
- fsl,imx95-lpuart
- const: fsl,imx8ulp-lpuart
- const: fsl,imx7ulp-lpuart
diff --git a/Documentation/devicetree/bindings/serial/nvidia,tegra264-utc.yaml b/Documentation/devicetree/bindings/serial/nvidia,tegra264-utc.yaml
new file mode 100644
index 000000000000..572cc574da64
--- /dev/null
+++ b/Documentation/devicetree/bindings/serial/nvidia,tegra264-utc.yaml
@@ -0,0 +1,73 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/serial/nvidia,tegra264-utc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: NVIDIA Tegra UTC (UART Trace Controller) client
+
+maintainers:
+ - Kartik Rajput <kkartik@nvidia.com>
+ - Thierry Reding <thierry.reding@gmail.com>
+ - Jonathan Hunter <jonathanh@nvidia.com>
+
+description:
+ Represents a client interface of the Tegra UTC (UART Trace Controller). The
+ Tegra UTC allows multiple clients within the Tegra SoC to share a physical
+ UART interface. It supports up to 16 clients. Each client operates as an
+ independent UART endpoint with a dedicated interrupt and 128-character TX/RX
+ FIFOs.
+
+ The Tegra UTC clients use 8-N-1 configuration and operates on a baudrate
+ configured by the bootloader at the controller level.
+
+allOf:
+ - $ref: serial.yaml#
+
+properties:
+ compatible:
+ const: nvidia,tegra264-utc
+
+ reg:
+ items:
+ - description: TX region.
+ - description: RX region.
+
+ reg-names:
+ items:
+ - const: tx
+ - const: rx
+
+ interrupts:
+ maxItems: 1
+
+ tx-threshold:
+ minimum: 1
+ maximum: 128
+
+ rx-threshold:
+ minimum: 1
+ maximum: 128
+
+required:
+ - compatible
+ - reg
+ - reg-names
+ - interrupts
+ - tx-threshold
+ - rx-threshold
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+ tegra_utc: serial@c4e0000 {
+ compatible = "nvidia,tegra264-utc";
+ reg = <0xc4e0000 0x8000>, <0xc4e8000 0x8000>;
+ reg-names = "tx", "rx";
+ interrupts = <GIC_SPI 514 IRQ_TYPE_LEVEL_HIGH>;
+ tx-threshold = <4>;
+ rx-threshold = <4>;
+ };
diff --git a/Documentation/devicetree/bindings/serial/pl011.yaml b/Documentation/devicetree/bindings/serial/pl011.yaml
index 9571041030b7..3fcf2d042372 100644
--- a/Documentation/devicetree/bindings/serial/pl011.yaml
+++ b/Documentation/devicetree/bindings/serial/pl011.yaml
@@ -92,6 +92,9 @@ properties:
3000ms.
default: 3000
+ power-domains:
+ maxItems: 1
+
resets:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/serial/samsung_uart.yaml b/Documentation/devicetree/bindings/serial/samsung_uart.yaml
index 070eba9f19d3..83d9986d8e98 100644
--- a/Documentation/devicetree/bindings/serial/samsung_uart.yaml
+++ b/Documentation/devicetree/bindings/serial/samsung_uart.yaml
@@ -42,6 +42,10 @@ properties:
- samsung,exynosautov9-uart
- samsung,exynosautov920-uart
- const: samsung,exynos850-uart
+ - items:
+ - enum:
+ - samsung,exynos7870-uart
+ - const: samsung,exynos8895-uart
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/serial/snps-dw-apb-uart.yaml b/Documentation/devicetree/bindings/serial/snps-dw-apb-uart.yaml
index 1c163cb5dff1..1aa3480d8d81 100644
--- a/Documentation/devicetree/bindings/serial/snps-dw-apb-uart.yaml
+++ b/Documentation/devicetree/bindings/serial/snps-dw-apb-uart.yaml
@@ -16,6 +16,20 @@ allOf:
- if:
properties:
compatible:
+ items:
+ - enum:
+ - renesas,r9a06g032-uart
+ - renesas,r9a06g033-uart
+ - const: renesas,rzn1-uart
+ - const: snps,dw-apb-uart
+ then:
+ properties:
+ dmas: false
+ dma-names: false
+
+ - if:
+ properties:
+ compatible:
contains:
const: starfive,jh7110-uart
then:
@@ -35,6 +49,12 @@ properties:
- renesas,r9a06g032-uart
- renesas,r9a06g033-uart
- const: renesas,rzn1-uart
+ - const: snps,dw-apb-uart
+ - items:
+ - enum:
+ - renesas,r9a06g032-uart
+ - renesas,r9a06g033-uart
+ - const: renesas,rzn1-uart
- items:
- enum:
- brcm,bcm11351-dw-apb-uart
@@ -51,6 +71,7 @@ properties:
- rockchip,rk3368-uart
- rockchip,rk3399-uart
- rockchip,rk3528-uart
+ - rockchip,rk3562-uart
- rockchip,rk3568-uart
- rockchip,rk3576-uart
- rockchip,rk3588-uart
diff --git a/Documentation/devicetree/bindings/serial/sprd-uart.yaml b/Documentation/devicetree/bindings/serial/sprd-uart.yaml
index a2a5056eba04..5bf2656afcfd 100644
--- a/Documentation/devicetree/bindings/serial/sprd-uart.yaml
+++ b/Documentation/devicetree/bindings/serial/sprd-uart.yaml
@@ -17,13 +17,18 @@ properties:
oneOf:
- items:
- enum:
- - sprd,sc9632-uart
+ - sprd,ums9632-uart
+ - const: sprd,sc9632-uart
+ - items:
+ - enum:
- sprd,sc9860-uart
- sprd,sc9863a-uart
- sprd,ums512-uart
- sprd,ums9620-uart
- const: sprd,sc9836-uart
- - const: sprd,sc9836-uart
+ - enum:
+ - sprd,sc9632-uart
+ - sprd,sc9836-uart
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/thermal/allwinner,sun8i-a83t-ths.yaml b/Documentation/devicetree/bindings/thermal/allwinner,sun8i-a83t-ths.yaml
index dad8de900495..3e61689f6dd4 100644
--- a/Documentation/devicetree/bindings/thermal/allwinner,sun8i-a83t-ths.yaml
+++ b/Documentation/devicetree/bindings/thermal/allwinner,sun8i-a83t-ths.yaml
@@ -142,38 +142,38 @@ unevaluatedProperties: false
examples:
- |
thermal-sensor@1f04000 {
- compatible = "allwinner,sun8i-a83t-ths";
- reg = <0x01f04000 0x100>;
- interrupts = <0 31 0>;
- nvmem-cells = <&ths_calibration>;
- nvmem-cell-names = "calibration";
- #thermal-sensor-cells = <1>;
+ compatible = "allwinner,sun8i-a83t-ths";
+ reg = <0x01f04000 0x100>;
+ interrupts = <0 31 0>;
+ nvmem-cells = <&ths_calibration>;
+ nvmem-cell-names = "calibration";
+ #thermal-sensor-cells = <1>;
};
- |
thermal-sensor@1c25000 {
- compatible = "allwinner,sun8i-h3-ths";
- reg = <0x01c25000 0x400>;
- clocks = <&ccu 0>, <&ccu 1>;
- clock-names = "bus", "mod";
- resets = <&ccu 2>;
- interrupts = <0 31 0>;
- nvmem-cells = <&ths_calibration>;
- nvmem-cell-names = "calibration";
- #thermal-sensor-cells = <0>;
+ compatible = "allwinner,sun8i-h3-ths";
+ reg = <0x01c25000 0x400>;
+ clocks = <&ccu 0>, <&ccu 1>;
+ clock-names = "bus", "mod";
+ resets = <&ccu 2>;
+ interrupts = <0 31 0>;
+ nvmem-cells = <&ths_calibration>;
+ nvmem-cell-names = "calibration";
+ #thermal-sensor-cells = <0>;
};
- |
thermal-sensor@5070400 {
- compatible = "allwinner,sun50i-h6-ths";
- reg = <0x05070400 0x100>;
- clocks = <&ccu 0>;
- clock-names = "bus";
- resets = <&ccu 2>;
- interrupts = <0 15 0>;
- nvmem-cells = <&ths_calibration>;
- nvmem-cell-names = "calibration";
- #thermal-sensor-cells = <1>;
+ compatible = "allwinner,sun50i-h6-ths";
+ reg = <0x05070400 0x100>;
+ clocks = <&ccu 0>;
+ clock-names = "bus";
+ resets = <&ccu 2>;
+ interrupts = <0 15 0>;
+ nvmem-cells = <&ths_calibration>;
+ nvmem-cell-names = "calibration";
+ #thermal-sensor-cells = <1>;
};
...
diff --git a/Documentation/devicetree/bindings/thermal/brcm,avs-tmon.yaml b/Documentation/devicetree/bindings/thermal/brcm,avs-tmon.yaml
index 081486b44382..2f62551a49c1 100644
--- a/Documentation/devicetree/bindings/thermal/brcm,avs-tmon.yaml
+++ b/Documentation/devicetree/bindings/thermal/brcm,avs-tmon.yaml
@@ -18,6 +18,7 @@ properties:
compatible:
items:
- enum:
+ - brcm,avs-tmon-bcm74110
- brcm,avs-tmon-bcm7216
- brcm,avs-tmon-bcm7445
- const: brcm,avs-tmon
diff --git a/Documentation/devicetree/bindings/thermal/imx-thermal.yaml b/Documentation/devicetree/bindings/thermal/imx-thermal.yaml
index 337560562337..949b154856c5 100644
--- a/Documentation/devicetree/bindings/thermal/imx-thermal.yaml
+++ b/Documentation/devicetree/bindings/thermal/imx-thermal.yaml
@@ -80,19 +80,19 @@ examples:
#include <dt-bindings/interrupt-controller/arm-gic.h>
efuse@21bc000 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "fsl,imx6sx-ocotp", "syscon";
- reg = <0x021bc000 0x4000>;
- clocks = <&clks IMX6SX_CLK_OCOTP>;
-
- tempmon_calib: calib@38 {
- reg = <0x38 4>;
- };
-
- tempmon_temp_grade: temp-grade@20 {
- reg = <0x20 4>;
- };
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "fsl,imx6sx-ocotp", "syscon";
+ reg = <0x021bc000 0x4000>;
+ clocks = <&clks IMX6SX_CLK_OCOTP>;
+
+ tempmon_calib: calib@38 {
+ reg = <0x38 4>;
+ };
+
+ tempmon_temp_grade: temp-grade@20 {
+ reg = <0x20 4>;
+ };
};
anatop@20c8000 {
@@ -103,12 +103,12 @@ examples:
<0 127 IRQ_TYPE_LEVEL_HIGH>;
tempmon {
- compatible = "fsl,imx6sx-tempmon";
- interrupts = <GIC_SPI 49 IRQ_TYPE_LEVEL_HIGH>;
- fsl,tempmon = <&anatop>;
- nvmem-cells = <&tempmon_calib>, <&tempmon_temp_grade>;
- nvmem-cell-names = "calib", "temp_grade";
- clocks = <&clks IMX6SX_CLK_PLL3_USB_OTG>;
- #thermal-sensor-cells = <0>;
+ compatible = "fsl,imx6sx-tempmon";
+ interrupts = <GIC_SPI 49 IRQ_TYPE_LEVEL_HIGH>;
+ fsl,tempmon = <&anatop>;
+ nvmem-cells = <&tempmon_calib>, <&tempmon_temp_grade>;
+ nvmem-cell-names = "calib", "temp_grade";
+ clocks = <&clks IMX6SX_CLK_PLL3_USB_OTG>;
+ #thermal-sensor-cells = <0>;
};
};
diff --git a/Documentation/devicetree/bindings/thermal/imx8mm-thermal.yaml b/Documentation/devicetree/bindings/thermal/imx8mm-thermal.yaml
index bef0e95e7416..df6c7c5d519f 100644
--- a/Documentation/devicetree/bindings/thermal/imx8mm-thermal.yaml
+++ b/Documentation/devicetree/bindings/thermal/imx8mm-thermal.yaml
@@ -63,10 +63,10 @@ examples:
#include <dt-bindings/clock/imx8mm-clock.h>
thermal-sensor@30260000 {
- compatible = "fsl,imx8mm-tmu";
- reg = <0x30260000 0x10000>;
- clocks = <&clk IMX8MM_CLK_TMU_ROOT>;
- #thermal-sensor-cells = <0>;
+ compatible = "fsl,imx8mm-tmu";
+ reg = <0x30260000 0x10000>;
+ clocks = <&clk IMX8MM_CLK_TMU_ROOT>;
+ #thermal-sensor-cells = <0>;
};
...
diff --git a/Documentation/devicetree/bindings/thermal/qcom-tsens.yaml b/Documentation/devicetree/bindings/thermal/qcom-tsens.yaml
index b9829bb22cc0..f9d8012c8cf5 100644
--- a/Documentation/devicetree/bindings/thermal/qcom-tsens.yaml
+++ b/Documentation/devicetree/bindings/thermal/qcom-tsens.yaml
@@ -75,6 +75,8 @@ properties:
- description: v2 of TSENS with combined interrupt
enum:
+ - qcom,ipq5332-tsens
+ - qcom,ipq5424-tsens
- qcom,ipq8074-tsens
- description: v2 of TSENS with combined interrupt
@@ -212,6 +214,18 @@ properties:
- const: s9_p2_backup
- const: s10_p1_backup
- const: s10_p2_backup
+ - minItems: 8
+ items:
+ - const: mode
+ - const: base0
+ - const: base1
+ - pattern: '^tsens_sens[0-9]+_off$'
+ - pattern: '^tsens_sens[0-9]+_off$'
+ - pattern: '^tsens_sens[0-9]+_off$'
+ - pattern: '^tsens_sens[0-9]+_off$'
+ - pattern: '^tsens_sens[0-9]+_off$'
+ - pattern: '^tsens_sens[0-9]+_off$'
+ - pattern: '^tsens_sens[0-9]+_off$'
"#qcom,sensors":
description:
@@ -271,6 +285,8 @@ allOf:
compatible:
contains:
enum:
+ - qcom,ipq5332-tsens
+ - qcom,ipq5424-tsens
- qcom,ipq8074-tsens
then:
properties:
@@ -286,6 +302,8 @@ allOf:
compatible:
contains:
enum:
+ - qcom,ipq5332-tsens
+ - qcom,ipq5424-tsens
- qcom,ipq8074-tsens
- qcom,tsens-v0_1
- qcom,tsens-v1
diff --git a/Documentation/devicetree/bindings/thermal/thermal-zones.yaml b/Documentation/devicetree/bindings/thermal/thermal-zones.yaml
index 0f435be1dbd8..0de0a9757ccc 100644
--- a/Documentation/devicetree/bindings/thermal/thermal-zones.yaml
+++ b/Documentation/devicetree/bindings/thermal/thermal-zones.yaml
@@ -82,9 +82,8 @@ patternProperties:
$ref: /schemas/types.yaml#/definitions/string
description: |
The action the OS should perform after the critical temperature is reached.
- By default the system will shutdown as a safe action to prevent damage
- to the hardware, if the property is not set.
- The shutdown action should be always the default and preferred one.
+ If the property is not set, it is up to the system to select the correct
+ action. The recommended and preferred default is shutdown.
Choose 'reboot' with care, as the hardware may be in thermal stress,
thus leading to infinite reboots that may cause damage to the hardware.
Make sure the firmware/bootloader will act as the last resort and take
diff --git a/Documentation/devicetree/bindings/trivial-devices.yaml b/Documentation/devicetree/bindings/trivial-devices.yaml
index 8704b0dfc9c0..8da408107e55 100644
--- a/Documentation/devicetree/bindings/trivial-devices.yaml
+++ b/Documentation/devicetree/bindings/trivial-devices.yaml
@@ -193,6 +193,8 @@ properties:
- maxim,max20751
# mCube 3-axis 8-bit digital accelerometer
- mcube,mc3230
+ # mCube 3-axis 8-bit digital accelerometer
+ - mcube,mc3510c
# Measurement Specialities I2C temperature and humidity sensor
- meas,htu21
# Measurement Specialities I2C temperature and humidity sensor
diff --git a/Documentation/devicetree/bindings/usb/generic-xhci.yaml b/Documentation/devicetree/bindings/usb/generic-xhci.yaml
index 6ceafa4af292..a2b94a138999 100644
--- a/Documentation/devicetree/bindings/usb/generic-xhci.yaml
+++ b/Documentation/devicetree/bindings/usb/generic-xhci.yaml
@@ -51,6 +51,8 @@ properties:
- const: core
- const: reg
+ dma-coherent: true
+
power-domains:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/usb/microchip,usb2514.yaml b/Documentation/devicetree/bindings/usb/microchip,usb2514.yaml
index b14e6f37b298..4e3901efed3f 100644
--- a/Documentation/devicetree/bindings/usb/microchip,usb2514.yaml
+++ b/Documentation/devicetree/bindings/usb/microchip,usb2514.yaml
@@ -9,16 +9,19 @@ title: Microchip USB2514 Hub Controller
maintainers:
- Fabio Estevam <festevam@gmail.com>
-allOf:
- - $ref: usb-device.yaml#
-
properties:
compatible:
- enum:
- - usb424,2412
- - usb424,2417
- - usb424,2514
- - usb424,2517
+ oneOf:
+ - enum:
+ - usb424,2412
+ - usb424,2417
+ - usb424,2514
+ - usb424,2517
+ - items:
+ - enum:
+ - usb424,2512
+ - usb424,2513
+ - const: usb424,2514
reg: true
@@ -28,6 +31,9 @@ properties:
vdd-supply:
description: 3.3V power supply.
+ vdda-supply:
+ description: 3.3V analog power supply.
+
clocks:
description: External 24MHz clock connected to the CLKIN pin.
maxItems: 1
@@ -43,6 +49,18 @@ patternProperties:
$ref: /schemas/usb/usb-device.yaml
additionalProperties: true
+allOf:
+ - $ref: usb-device.yaml#
+ - if:
+ not:
+ properties:
+ compatible:
+ contains:
+ const: usb424,2514
+ then:
+ properties:
+ vdda-supply: false
+
unevaluatedProperties: false
examples:
@@ -60,6 +78,7 @@ examples:
clocks = <&clks IMX6QDL_CLK_CKO>;
reset-gpios = <&gpio7 12 GPIO_ACTIVE_LOW>;
vdd-supply = <&reg_3v3_hub>;
+ vdda-supply = <&reg_3v3a_hub>;
#address-cells = <1>;
#size-cells = <0>;
diff --git a/Documentation/devicetree/bindings/usb/parade,ps8830.yaml b/Documentation/devicetree/bindings/usb/parade,ps8830.yaml
new file mode 100644
index 000000000000..935d57f5d26f
--- /dev/null
+++ b/Documentation/devicetree/bindings/usb/parade,ps8830.yaml
@@ -0,0 +1,140 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/usb/parade,ps8830.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Parade PS883x USB and DisplayPort Retimer
+
+maintainers:
+ - Abel Vesa <abel.vesa@linaro.org>
+
+properties:
+ compatible:
+ enum:
+ - parade,ps8830
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ items:
+ - description: XO Clock
+
+ reset-gpios:
+ maxItems: 1
+
+ vdd-supply:
+ description: power supply (1.07V)
+
+ vdd33-supply:
+ description: power supply (3.3V)
+
+ vdd33-cap-supply:
+ description: power supply (3.3V)
+
+ vddar-supply:
+ description: power supply (1.07V)
+
+ vddat-supply:
+ description: power supply (1.07V)
+
+ vddio-supply:
+ description: power supply (1.2V or 1.8V)
+
+ orientation-switch: true
+ retimer-switch: true
+
+ ports:
+ $ref: /schemas/graph.yaml#/properties/ports
+ properties:
+ port@0:
+ $ref: /schemas/graph.yaml#/properties/port
+ description: Super Speed (SS) Output endpoint to the Type-C connector
+
+ port@1:
+ $ref: /schemas/graph.yaml#/$defs/port-base
+ description: Super Speed (SS) Input endpoint from the Super-Speed PHY
+ unevaluatedProperties: false
+
+ port@2:
+ $ref: /schemas/graph.yaml#/properties/port
+ description:
+ Sideband Use (SBU) AUX lines endpoint to the Type-C connector for the purpose of
+ handling altmode muxing and orientation switching.
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - reset-gpios
+ - vdd-supply
+ - vdd33-supply
+ - vdd33-cap-supply
+ - vddat-supply
+ - vddio-supply
+ - orientation-switch
+ - retimer-switch
+
+allOf:
+ - $ref: usb-switch.yaml#
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ typec-mux@8 {
+ compatible = "parade,ps8830";
+ reg = <0x8>;
+
+ clocks = <&clk_rtmr_xo>;
+
+ vdd-supply = <&vreg_rtmr_1p15>;
+ vdd33-supply = <&vreg_rtmr_3p3>;
+ vdd33-cap-supply = <&vreg_rtmr_3p3>;
+ vddar-supply = <&vreg_rtmr_1p15>;
+ vddat-supply = <&vreg_rtmr_1p15>;
+ vddio-supply = <&vreg_rtmr_1p8>;
+
+ reset-gpios = <&tlmm 10 GPIO_ACTIVE_LOW>;
+
+ retimer-switch;
+ orientation-switch;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ endpoint {
+ remote-endpoint = <&typec_con_ss>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ endpoint {
+ remote-endpoint = <&usb_phy_ss>;
+ };
+ };
+
+ port@2 {
+ reg = <2>;
+
+ endpoint {
+ remote-endpoint = <&typec_dp_aux>;
+ };
+ };
+ };
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/usb/qcom,dwc3.yaml b/Documentation/devicetree/bindings/usb/qcom,dwc3.yaml
index a2b3cf625e5b..64137c1619a6 100644
--- a/Documentation/devicetree/bindings/usb/qcom,dwc3.yaml
+++ b/Documentation/devicetree/bindings/usb/qcom,dwc3.yaml
@@ -404,6 +404,7 @@ allOf:
minItems: 2
maxItems: 3
interrupt-names:
+ minItems: 2
items:
- const: pwr_event
- const: qusb2_phy
@@ -425,6 +426,7 @@ allOf:
minItems: 3
maxItems: 4
interrupt-names:
+ minItems: 3
items:
- const: pwr_event
- const: qusb2_phy
diff --git a/Documentation/devicetree/bindings/usb/richtek,rt1711h.yaml b/Documentation/devicetree/bindings/usb/richtek,rt1711h.yaml
index 8da4d2ad1a91..ae611f7e57ca 100644
--- a/Documentation/devicetree/bindings/usb/richtek,rt1711h.yaml
+++ b/Documentation/devicetree/bindings/usb/richtek,rt1711h.yaml
@@ -30,6 +30,9 @@ properties:
interrupts:
maxItems: 1
+ vbus-supply:
+ description: VBUS power supply
+
wakeup-source:
type: boolean
diff --git a/Documentation/devicetree/bindings/usb/rockchip,dwc3.yaml b/Documentation/devicetree/bindings/usb/rockchip,dwc3.yaml
index a21cc098542d..fba2cb05ecba 100644
--- a/Documentation/devicetree/bindings/usb/rockchip,dwc3.yaml
+++ b/Documentation/devicetree/bindings/usb/rockchip,dwc3.yaml
@@ -26,6 +26,7 @@ select:
contains:
enum:
- rockchip,rk3328-dwc3
+ - rockchip,rk3562-dwc3
- rockchip,rk3568-dwc3
- rockchip,rk3576-dwc3
- rockchip,rk3588-dwc3
@@ -37,6 +38,7 @@ properties:
items:
- enum:
- rockchip,rk3328-dwc3
+ - rockchip,rk3562-dwc3
- rockchip,rk3568-dwc3
- rockchip,rk3576-dwc3
- rockchip,rk3588-dwc3
@@ -72,6 +74,7 @@ properties:
- enum:
- grf_clk
- utmi
+ - pipe
- const: pipe
power-domains:
@@ -115,6 +118,22 @@ allOf:
properties:
compatible:
contains:
+ const: rockchip,rk3562-dwc3
+ then:
+ properties:
+ clocks:
+ minItems: 4
+ maxItems: 4
+ clock-names:
+ items:
+ - const: ref_clk
+ - const: suspend_clk
+ - const: bus_clk
+ - const: pipe
+ - if:
+ properties:
+ compatible:
+ contains:
enum:
- rockchip,rk3568-dwc3
- rockchip,rk3576-dwc3
diff --git a/Documentation/devicetree/bindings/usb/samsung,exynos-dwc3.yaml b/Documentation/devicetree/bindings/usb/samsung,exynos-dwc3.yaml
index 2b3430cebe99..256bee2a03ca 100644
--- a/Documentation/devicetree/bindings/usb/samsung,exynos-dwc3.yaml
+++ b/Documentation/devicetree/bindings/usb/samsung,exynos-dwc3.yaml
@@ -11,12 +11,17 @@ maintainers:
properties:
compatible:
- enum:
- - google,gs101-dwusb3
- - samsung,exynos5250-dwusb3
- - samsung,exynos5433-dwusb3
- - samsung,exynos7-dwusb3
- - samsung,exynos850-dwusb3
+ oneOf:
+ - enum:
+ - google,gs101-dwusb3
+ - samsung,exynos5250-dwusb3
+ - samsung,exynos5433-dwusb3
+ - samsung,exynos7-dwusb3
+ - samsung,exynos7870-dwusb3
+ - samsung,exynos850-dwusb3
+ - items:
+ - const: samsung,exynos990-dwusb3
+ - const: samsung,exynos850-dwusb3
'#address-cells':
const: 1
@@ -52,7 +57,6 @@ required:
- clock-names
- ranges
- '#size-cells'
- - vdd10-supply
- vdd33-supply
allOf:
@@ -72,6 +76,8 @@ allOf:
- const: susp_clk
- const: link_aclk
- const: link_pclk
+ required:
+ - vdd10-supply
- if:
properties:
@@ -86,6 +92,8 @@ allOf:
clock-names:
items:
- const: usbdrd30
+ required:
+ - vdd10-supply
- if:
properties:
@@ -103,6 +111,8 @@ allOf:
- const: susp_clk
- const: phyclk
- const: pipe_pclk
+ required:
+ - vdd10-supply
- if:
properties:
@@ -119,6 +129,24 @@ allOf:
- const: usbdrd30
- const: usbdrd30_susp_clk
- const: usbdrd30_axius_clk
+ required:
+ - vdd10-supply
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: samsung,exynos7870-dwusb3
+ then:
+ properties:
+ clocks:
+ minItems: 3
+ maxItems: 3
+ clock-names:
+ items:
+ - const: bus_early
+ - const: ref
+ - const: ctrl
- if:
properties:
@@ -134,6 +162,8 @@ allOf:
items:
- const: bus_early
- const: ref
+ required:
+ - vdd10-supply
additionalProperties: false
diff --git a/Documentation/devicetree/bindings/usb/snps,dwc3-common.yaml b/Documentation/devicetree/bindings/usb/snps,dwc3-common.yaml
index c956053fd036..71249b6ba616 100644
--- a/Documentation/devicetree/bindings/usb/snps,dwc3-common.yaml
+++ b/Documentation/devicetree/bindings/usb/snps,dwc3-common.yaml
@@ -65,6 +65,17 @@ properties:
mode.
type: boolean
+ snps,reserved-endpoints:
+ description:
+ Reserve endpoints for other needs, e.g, for tracing control and output.
+ When set, the driver will avoid using them for the regular USB transfers.
+ $ref: /schemas/types.yaml#/definitions/uint8-array
+ minItems: 1
+ maxItems: 30
+ items:
+ minimum: 2
+ maximum: 31
+
snps,dis-start-transfer-quirk:
description:
When set, disable isoc START TRANSFER command failure SW work-around
diff --git a/Documentation/devicetree/bindings/usb/usb-device.yaml b/Documentation/devicetree/bindings/usb/usb-device.yaml
index da890ee60ce6..c67695681033 100644
--- a/Documentation/devicetree/bindings/usb/usb-device.yaml
+++ b/Documentation/devicetree/bindings/usb/usb-device.yaml
@@ -39,8 +39,10 @@ properties:
reg:
description: the number of the USB hub port or the USB host-controller
- port to which this device is attached. The range is 1-255.
- maxItems: 1
+ port to which this device is attached.
+ items:
+ - minimum: 1
+ maximum: 255
"#address-cells":
description: should be 1 for hub nodes with device nodes,
diff --git a/Documentation/devicetree/bindings/watchdog/allwinner,sun4i-a10-wdt.yaml b/Documentation/devicetree/bindings/watchdog/allwinner,sun4i-a10-wdt.yaml
index 64c8f7393809..b35ac03d5172 100644
--- a/Documentation/devicetree/bindings/watchdog/allwinner,sun4i-a10-wdt.yaml
+++ b/Documentation/devicetree/bindings/watchdog/allwinner,sun4i-a10-wdt.yaml
@@ -32,6 +32,7 @@ properties:
- items:
- const: allwinner,sun20i-d1-wdt-reset
- const: allwinner,sun20i-d1-wdt
+ - const: allwinner,sun55i-a523-wdt
reg:
maxItems: 1
@@ -60,6 +61,7 @@ if:
- allwinner,sun20i-d1-wdt-reset
- allwinner,sun50i-r329-wdt
- allwinner,sun50i-r329-wdt-reset
+ - allwinner,sun55i-a523-wdt
then:
properties:
diff --git a/Documentation/devicetree/bindings/watchdog/fsl-imx7ulp-wdt.yaml b/Documentation/devicetree/bindings/watchdog/fsl-imx7ulp-wdt.yaml
index a09686b3030d..6ec391b9723a 100644
--- a/Documentation/devicetree/bindings/watchdog/fsl-imx7ulp-wdt.yaml
+++ b/Documentation/devicetree/bindings/watchdog/fsl-imx7ulp-wdt.yaml
@@ -22,6 +22,10 @@ properties:
- const: fsl,imx8ulp-wdt
- const: fsl,imx7ulp-wdt
- const: fsl,imx93-wdt
+ - items:
+ - enum:
+ - fsl,imx94-wdt
+ - const: fsl,imx93-wdt
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/watchdog/renesas,wdt.yaml b/Documentation/devicetree/bindings/watchdog/renesas,wdt.yaml
index 29ada89fdcdc..3e0a8747a357 100644
--- a/Documentation/devicetree/bindings/watchdog/renesas,wdt.yaml
+++ b/Documentation/devicetree/bindings/watchdog/renesas,wdt.yaml
@@ -75,6 +75,10 @@ properties:
- renesas,r8a779h0-wdt # R-Car V4M
- const: renesas,rcar-gen4-wdt # R-Car Gen4
+ - items:
+ - const: renesas,r9a09g047-wdt # RZ/G3E
+ - const: renesas,r9a09g057-wdt # RZ/V2H(P)
+
- const: renesas,r9a09g057-wdt # RZ/V2H(P)
reg:
diff --git a/Documentation/driver-api/cxl/maturity-map.rst b/Documentation/driver-api/cxl/maturity-map.rst
index df8e2ac2a320..a2288f9df658 100644
--- a/Documentation/driver-api/cxl/maturity-map.rst
+++ b/Documentation/driver-api/cxl/maturity-map.rst
@@ -130,7 +130,7 @@ Mailbox commands
* [0] Switch CCI
* [3] Timestamp
* [1] PMEM labels
-* [0] PMEM GPF / Dirty Shutdown
+* [3] PMEM GPF / Dirty Shutdown
* [0] Scan Media
PMU
diff --git a/Documentation/driver-api/phy/phy.rst b/Documentation/driver-api/phy/phy.rst
index 81785c084f3e..719a2b3fd2ab 100644
--- a/Documentation/driver-api/phy/phy.rst
+++ b/Documentation/driver-api/phy/phy.rst
@@ -198,8 +198,7 @@ pm_runtime_get_sync of PHY provider device because of parent-child relationship.
It should also be noted that phy_power_on and phy_power_off performs
phy_pm_runtime_get_sync and phy_pm_runtime_put respectively.
There are exported APIs like phy_pm_runtime_get, phy_pm_runtime_get_sync,
-phy_pm_runtime_put, phy_pm_runtime_put_sync, phy_pm_runtime_allow and
-phy_pm_runtime_forbid for performing PM operations.
+phy_pm_runtime_put and phy_pm_runtime_put_sync for performing PM operations.
PHY Mappings
============
diff --git a/Documentation/driver-api/pps.rst b/Documentation/driver-api/pps.rst
index 71ad04c82d6c..598729f9cd27 100644
--- a/Documentation/driver-api/pps.rst
+++ b/Documentation/driver-api/pps.rst
@@ -206,8 +206,7 @@ To do so the class pps-gen has been added. PPS generators can be
registered in the kernel by defining a struct pps_gen_source_info as
follows::
- static struct pps_gen_source_info pps_gen_dummy_info = {
- .name = "dummy",
+ static const struct pps_gen_source_info pps_gen_dummy_info = {
.use_system_clock = true,
.get_time = pps_gen_dummy_get_time,
.enable = pps_gen_dummy_enable,
@@ -286,3 +285,27 @@ delay between assert and clear edge as small as possible to reduce system
latencies. But if it is too small slave won't be able to capture clear edge
transition. The default of 30us should be good enough in most situations.
The delay can be selected using 'delay' pps_gen_parport module parameter.
+
+
+Intel Timed I/O PPS signal generator
+------------------------------------
+
+Intel Timed I/O is a high precision device, present on 2019 and newer Intel
+CPUs, that can generate PPS signals.
+
+Timed I/O and system time are both driven by same hardware clock. The signal
+is generated with a precision of ~20 nanoseconds. The generated PPS signal
+is used to synchronize an external device with system clock. For example,
+it can be used to share your clock with a device that receives PPS signal,
+generated by Timed I/O device. There are dedicated Timed I/O pins to deliver
+the PPS signal to an external device.
+
+Usage of Intel Timed I/O as PPS generator:
+
+Start generating PPS signal::
+
+ $echo 1 > /sys/class/pps-gen/pps-genx/enable
+
+Stop generating PPS signal::
+
+ $echo 0 > /sys/class/pps-gen/pps-genx/enable
diff --git a/Documentation/driver-api/serial/driver.rst b/Documentation/driver-api/serial/driver.rst
index 84b43061c11b..fa1ebfcd4472 100644
--- a/Documentation/driver-api/serial/driver.rst
+++ b/Documentation/driver-api/serial/driver.rst
@@ -101,6 +101,6 @@ Modem control lines via GPIO
Some helpers are provided in order to set/get modem control lines via GPIO.
.. kernel-doc:: drivers/tty/serial/serial_mctrl_gpio.c
- :identifiers: mctrl_gpio_init mctrl_gpio_free mctrl_gpio_to_gpiod
+ :identifiers: mctrl_gpio_init mctrl_gpio_to_gpiod
mctrl_gpio_set mctrl_gpio_get mctrl_gpio_enable_ms
- mctrl_gpio_disable_ms
+ mctrl_gpio_disable_ms_sync mctrl_gpio_disable_ms_no_sync
diff --git a/Documentation/driver-api/soundwire/bra.rst b/Documentation/driver-api/soundwire/bra.rst
new file mode 100644
index 000000000000..8500253fa3e8
--- /dev/null
+++ b/Documentation/driver-api/soundwire/bra.rst
@@ -0,0 +1,336 @@
+==========================
+Bulk Register Access (BRA)
+==========================
+
+Conventions
+-----------
+
+Capitalized words used in this documentation are intentional and refer
+to concepts of the SoundWire 1.x specification.
+
+Introduction
+------------
+
+The SoundWire 1.x specification provides a mechanism to speed-up
+command/control transfers by reclaiming parts of the audio
+bandwidth. The Bulk Register Access (BRA) protocol is a standard
+solution based on the Bulk Payload Transport (BPT) definitions.
+
+The regular control channel uses Column 0 and can only send/retrieve
+one byte per frame with write/read commands. With a typical 48kHz
+frame rate, only 48kB/s can be transferred.
+
+The optional Bulk Register Access capability can transmit up to 12
+Mbits/s and reduce transfer times by several orders of magnitude, but
+has multiple design constraints:
+
+ (1) Each frame can only support a read or a write transfer, with a
+ 10-byte overhead per frame (header and footer response).
+
+ (2) The read/writes SHALL be from/to contiguous register addresses
+ in the same frame. A fragmented register space decreases the
+ efficiency of the protocol by requiring multiple BRA transfers
+ scheduled in different frames.
+
+ (3) The targeted Peripheral device SHALL support the optional Data
+ Port 0, and likewise the Manager SHALL expose audio-like Ports
+ to insert BRA packets in the audio payload using the concepts of
+ Sample Interval, HSTART, HSTOP, etc.
+
+ (4) The BRA transport efficiency depends on the available
+ bandwidth. If there are no on-going audio transfers, the entire
+ frame minus Column 0 can be reclaimed for BRA. The frame shape
+ also impacts efficiency: since Column0 cannot be used for
+ BTP/BRA, the frame should rely on a large number of columns and
+ minimize the number of rows. The bus clock should be as high as
+ possible.
+
+ (5) The number of bits transferred per frame SHALL be a multiple of
+ 8 bits. Padding bits SHALL be inserted if necessary at the end
+ of the data.
+
+ (6) The regular read/write commands can be issued in parallel with
+ BRA transfers. This is convenient to e.g. deal with alerts, jack
+ detection or change the volume during firmware download, but
+ accessing the same address with two independent protocols has to
+ be avoided to avoid undefined behavior.
+
+ (7) Some implementations may not be capable of handling the
+ bandwidth of the BRA protocol, e.g. in the case of a slow I2C
+ bus behind the SoundWire IP. In this case, the transfers may
+ need to be spaced in time or flow-controlled.
+
+ (8) Each BRA packet SHALL be marked as 'Active' when valid data is
+ to be transmitted. This allows for software to allocate a BRA
+ stream but not transmit/discard data while processing the
+ results or preparing the next batch of data, or allowing the
+ peripheral to deal with the previous transfer. In addition BRA
+ transfer can be started early on without data being ready.
+
+ (9) Up to 470 bytes may be transmitted per frame.
+
+ (10) The address is represented with 32 bits and does not rely on
+ the paging registers used for the regular command/control
+ protocol in Column 0.
+
+
+Error checking
+--------------
+
+Firmware download is one of the key usages of the Bulk Register Access
+protocol. To make sure the binary data integrity is not compromised by
+transmission or programming errors, each BRA packet provides:
+
+ (1) A CRC on the 7-byte header. This CRC helps the Peripheral Device
+ check if it is addressed and set the start address and number of
+ bytes. The Peripheral Device provides a response in Byte 7.
+
+ (2) A CRC on the data block (header excluded). This CRC is
+ transmitted as the last-but-one byte in the packet, prior to the
+ footer response.
+
+The header response can be one of:
+ (a) Ack
+ (b) Nak
+ (c) Not Ready
+
+The footer response can be one of:
+ (1) Ack
+ (2) Nak (CRC failure)
+ (3) Good (operation completed)
+ (4) Bad (operation failed)
+
+Example frame
+-------------
+
+The example below is not to scale and makes simplifying assumptions
+for clarity. The different chunks in the BRA packets are not required
+to start on a new SoundWire Row, and the scale of data may vary.
+
+ ::
+
+ +---+--------------------------------------------+
+ + | |
+ + | BRA HEADER |
+ + | |
+ + +--------------------------------------------+
+ + C | HEADER CRC |
+ + O +--------------------------------------------+
+ + M | HEADER RESPONSE |
+ + M +--------------------------------------------+
+ + A | |
+ + N | |
+ + D | DATA |
+ + | |
+ + | |
+ + | |
+ + +--------------------------------------------+
+ + | DATA CRC |
+ + +--------------------------------------------+
+ + | FOOTER RESPONSE |
+ +---+--------------------------------------------+
+
+
+Assuming the frame uses N columns, the configuration shown above can
+be programmed by setting the DP0 registers as:
+
+ - HSTART = 1
+ - HSTOP = N - 1
+ - Sampling Interval = N
+ - WordLength = N - 1
+
+Addressing restrictions
+-----------------------
+
+The Device Number specified in the Header follows the SoundWire
+definitions, and broadcast and group addressing are permitted. For now
+the Linux implementation only allows for a single BPT transfer to a
+single device at a time. This might be revisited at a later point as
+an optimization to send the same firmware to multiple devices, but
+this would only be beneficial for single-link solutions.
+
+In the case of multiple Peripheral devices attached to different
+Managers, the broadcast and group addressing is not supported by the
+SoundWire specification. Each device must be handled with separate BRA
+streams, possibly in parallel - the links are really independent.
+
+Unsupported features
+--------------------
+
+The Bulk Register Access specification provides a number of
+capabilities that are not supported in known implementations, such as:
+
+ (1) Transfers initiated by a Peripheral Device. The BRA Initiator is
+ always the Manager Device.
+
+ (2) Flow-control capabilities and retransmission based on the
+ 'NotReady' header response require extra buffering in the
+ SoundWire IP and are not implemented.
+
+Bi-directional handling
+-----------------------
+
+The BRA protocol can handle writes as well as reads, and in each
+packet the header and footer response are provided by the Peripheral
+Target device. On the Peripheral device, the BRA protocol is handled
+by a single DP0 data port, and at the low-level the bus ownership can
+will change for header/footer response as well as the data transmitted
+during a read.
+
+On the host side, most implementations rely on a Port-like concept,
+with two FIFOs consuming/generating data transfers in parallel
+(Host->Peripheral and Peripheral->Host). The amount of data
+consumed/produced by these FIFOs is not symmetrical, as a result
+hardware typically inserts markers to help software and hardware
+interpret raw data
+
+Each packet will typically have:
+
+ (1) a 'Start of Packet' indicator.
+
+ (2) an 'End of Packet' indicator.
+
+ (3) a packet identifier to correlate the data requested and
+ transmitted, and the error status for each frame
+
+Hardware implementations can check errors at the frame level, and
+retry a transfer in case of errors. However, as for the flow-control
+case, this requires extra buffering and intelligence in the
+hardware. The Linux support assumes that the entire transfer is
+cancelled if a single error is detected in one of the responses.
+
+Abstraction required
+~~~~~~~~~~~~~~~~~~~~
+
+There are no standard registers or mandatory implementation at the
+Manager level, so the low-level BPT/BRA details must be hidden in
+Manager-specific code. For example the Cadence IP format above is not
+known to the codec drivers.
+
+Likewise, codec drivers should not have to know the frame size. The
+computation of CRC and handling of responses is handled in helpers and
+Manager-specific code.
+
+The host BRA driver may also have restrictions on pages allocated for
+DMA, or other host-DSP communication protocols. The codec driver
+should not be aware of any of these restrictions, since it might be
+reused in combination with different implementations of Manager IPs.
+
+Concurrency between BRA and regular read/write
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The existing 'nread/nwrite' API already relies on a notion of start
+address and number of bytes, so it would be possible to extend this
+API with a 'hint' requesting BPT/BRA be used.
+
+However BRA transfers could be quite long, and the use of a single
+mutex for regular read/write and BRA is a show-stopper. Independent
+operation of the control/command and BRA transfers is a fundamental
+requirement, e.g. to change the volume level with the existing regmap
+interface while downloading firmware. The integration must however
+ensure that there are no concurrent access to the same address with
+the command/control protocol and the BRA protocol.
+
+In addition, the 'sdw_msg' structure hard-codes support for 16-bit
+addresses and paging registers which are irrelevant for BPT/BRA
+support based on native 32-bit addresses. A separate API with
+'sdw_bpt_msg' makes more sense.
+
+One possible strategy to speed-up all initialization tasks would be to
+start a BRA transfer for firmware download, then deal with all the
+"regular" read/writes in parallel with the command channel, and last
+to wait for the BRA transfers to complete. This would allow for a
+degree of overlap instead of a purely sequential solution. As such,
+the BRA API must support async transfers and expose a separate wait
+function.
+
+
+Peripheral/bus interface
+------------------------
+
+The bus interface for BPT/BRA is made of two functions:
+
+ - sdw_bpt_send_async(bpt_message)
+
+ This function sends the data using the Manager
+ implementation-defined capabilities (typically DMA or IPC
+ protocol).
+
+ Queueing is currently not supported, the caller
+ needs to wait for completion of the requested transfer.
+
+ - sdw_bpt_wait()
+
+ This function waits for the entire message provided by the
+ codec driver in the 'send_async' stage. Intermediate status for
+ smaller chunks will not be provided back to the codec driver,
+ only a return code will be provided.
+
+Regmap use
+~~~~~~~~~~
+
+Existing codec drivers rely on regmap to download firmware to
+Peripherals. regmap exposes an async interface similar to the
+send/wait API suggested above, so at a high-level it would seem
+natural to combine BRA and regmap. The regmap layer could check if BRA
+is available or not, and use a regular read-write command channel in
+the latter case.
+
+The regmap integration will be handled in a second step.
+
+BRA stream model
+----------------
+
+For regular audio transfers, the machine driver exposes a dailink
+connecting CPU DAI(s) and Codec DAI(s).
+
+This model is not required BRA support:
+
+ (1) The SoundWire DAIs are mainly wrappers for SoundWire Data
+ Ports, with possibly some analog or audio conversion
+ capabilities bolted behind the Data Port. In the context of
+ BRA, the DP0 is the destination. DP0 registers are standard and
+ can be programmed blindly without knowing what Peripheral is
+ connected to each link. In addition, if there are multiple
+ Peripherals on a link and some of them do not support DP0, the
+ write commands to program DP0 registers will generate harmless
+ COMMAND_IGNORED responses that will be wired-ORed with
+ responses from Peripherals which support DP0. In other words,
+ the DP0 programming can be done with broadcast commands, and
+ the information on the Target device can be added only in the
+ BRA Header.
+
+ (2) At the CPU level, the DAI concept is not useful for BRA; the
+ machine driver will not create a dailink relying on DP0. The
+ only concept that is needed is the notion of port.
+
+ (3) The stream concept relies on a set of master_rt and slave_rt
+ concepts. All of these entities represent ports and not DAIs.
+
+ (4) With the assumption that a single BRA stream is used per link,
+ that stream can connect master ports as well as all peripheral
+ DP0 ports.
+
+ (5) BRA transfers only make sense in the context of one
+ Manager/Link, so the BRA stream handling does not rely on the
+ concept of multi-link aggregation allowed by regular DAI links.
+
+Audio DMA support
+-----------------
+
+Some DMAs, such as HDaudio, require an audio format field to be
+set. This format is in turn used to define acceptable bursts. BPT/BRA
+support is not fully compatible with these definitions in that the
+format and bandwidth may vary between read and write commands.
+
+In addition, on Intel HDaudio Intel platforms the DMAs need to be
+programmed with a PCM format matching the bandwidth of the BPT/BRA
+transfer. The format is based on 192kHz 32-bit samples, and the number
+of channels varies to adjust the bandwidth. The notion of channel is
+completely notional since the data is not typical audio
+PCM. Programming such channels helps reserve enough bandwidth and adjust
+FIFO sizes to avoid xruns.
+
+Alignment requirements are currently not enforced at the core level
+but at the platform-level, e.g. for Intel the data sizes must be
+multiples of 32 bytes.
diff --git a/Documentation/driver-api/soundwire/bra_cadence.rst b/Documentation/driver-api/soundwire/bra_cadence.rst
new file mode 100644
index 000000000000..4560752e8f47
--- /dev/null
+++ b/Documentation/driver-api/soundwire/bra_cadence.rst
@@ -0,0 +1,66 @@
+Cadence IP BRA support
+----------------------
+
+Format requirements
+~~~~~~~~~~~~~~~~~~~
+
+The Cadence IP relies on PDI0 for TX and PDI1 for RX. The data needs
+to be formatted with the following conventions:
+
+ (1) all Data is stored in bits 15..0 of the 32-bit PDI FIFOs.
+
+ (2) the start of packet is BIT(31).
+
+ (3) the end of packet is BIT(30).
+
+ (4) A packet ID is stored in bits 19..16. This packet ID is
+ determined by software and is typically a rolling counter.
+
+ (5) Padding shall be inserted as needed so that the Header CRC,
+ Header response, Footer CRC, Footer response are always in
+ Byte0. Padding is inserted by software for writes, and on reads
+ software shall discard the padding added by the hardware.
+
+Example format
+~~~~~~~~~~~~~~
+
+The following table represents the sequence provided to PDI0 for a
+write command followed by a read command.
+
+::
+
+ +---+---+--------+---------------+---------------+
+ + 1 | 0 | ID = 0 | WR HDR[1] | WR HDR[0] |
+ + | | | WR HDR[3] | WR HDR[2] |
+ + | | | WR HDR[5] | WR HDR[4] |
+ + | | | pad | WR HDR CRC |
+ + | | | WR Data[1] | WR Data[0] |
+ + | | | WR Data[3] | WR Data[2] |
+ + | | | WR Data[n-2] | WR Data[n-3] |
+ + | | | pad | WR Data[n-1] |
+ + 0 | 1 | | pad | WR Data CRC |
+ +---+---+--------+---------------+---------------+
+ + 1 | 0 | ID = 1 | RD HDR[1] | RD HDR[0] |
+ + | | | RD HDR[3] | RD HDR[2] |
+ + | | | RD HDR[5] | RD HDR[4] |
+ + 0 | 1 | | pad | RD HDR CRC |
+ +---+---+--------+---------------+---------------+
+
+
+The table below represents the data received on PDI1 for the same
+write command followed by a read command.
+
+::
+
+ +---+---+--------+---------------+---------------+
+ + 1 | 0 | ID = 0 | pad | WR Hdr Rsp |
+ + 0 | 1 | | pad | WR Ftr Rsp |
+ +---+---+--------+---------------+---------------+
+ + 1 | 0 | ID = 0 | pad | Rd Hdr Rsp |
+ + | | | RD Data[1] | RD Data[0] |
+ + | | | RD Data[3] | RD Data[2] |
+ + | | | RD HDR[n-2] | RD Data[n-3] |
+ + | | | pad | RD Data[n-1] |
+ + | | | pad | RD Data CRC |
+ + 0 | 1 | | pad | RD Ftr Rsp |
+ +---+---+--------+---------------+---------------+
diff --git a/Documentation/driver-api/soundwire/index.rst b/Documentation/driver-api/soundwire/index.rst
index 234911a0db99..ef8d90dfbdde 100644
--- a/Documentation/driver-api/soundwire/index.rst
+++ b/Documentation/driver-api/soundwire/index.rst
@@ -9,6 +9,8 @@ SoundWire Documentation
stream
error_handling
locking
+ bra
+ bra_cadence
.. only:: subproject and html
diff --git a/Documentation/driver-api/soundwire/stream.rst b/Documentation/driver-api/soundwire/stream.rst
index 2a794484f62c..d66201299633 100644
--- a/Documentation/driver-api/soundwire/stream.rst
+++ b/Documentation/driver-api/soundwire/stream.rst
@@ -291,7 +291,7 @@ per stream. From ASoC DPCM framework, this stream state maybe linked to
.. code-block:: c
- int sdw_alloc_stream(char * stream_name);
+ int sdw_alloc_stream(char * stream_name, enum sdw_stream_type type);
The SoundWire core provides a sdw_startup_stream() helper function,
typically called during a dailink .startup() callback, which performs
diff --git a/Documentation/driver-api/soundwire/summary.rst b/Documentation/driver-api/soundwire/summary.rst
index 01dcb954f6d7..df78053743b5 100644
--- a/Documentation/driver-api/soundwire/summary.rst
+++ b/Documentation/driver-api/soundwire/summary.rst
@@ -184,14 +184,6 @@ function that provides capabilities information. Bus needs to know a set of
Slave capabilities to program Slave registers and to control the Bus
reconfigurations.
-Future enhancements to be done
-==============================
-
- (1) Bulk Register Access (BRA) transfers.
-
-
- (2) Multiple data lane support.
-
Links
=====
diff --git a/Documentation/driver-api/thermal/sysfs-api.rst b/Documentation/driver-api/thermal/sysfs-api.rst
index c803b89b7248..f73de211bdce 100644
--- a/Documentation/driver-api/thermal/sysfs-api.rst
+++ b/Documentation/driver-api/thermal/sysfs-api.rst
@@ -413,18 +413,21 @@ This function serves as an arbitrator to set the state of a cooling
device. It sets the cooling device to the deepest cooling state if
possible.
-5. thermal_emergency_poweroff
-=============================
+5. Critical Events
+==================
+
+On an event of critical trip temperature crossing, the thermal framework
+will trigger a hardware protection power-off (shutdown) or reboot,
+depending on configuration.
-On an event of critical trip temperature crossing the thermal framework
-shuts down the system by calling hw_protection_shutdown(). The
-hw_protection_shutdown() first attempts to perform an orderly shutdown
-but accepts a delay after which it proceeds doing a forced power-off
-or as last resort an emergency_restart.
+At first, the kernel will attempt an orderly power-off or reboot, but
+accepts a delay after which it proceeds to do a forced power-off or
+reboot, respectively. If this fails, ``emergency_restart()`` is invoked
+as last resort.
The delay should be carefully profiled so as to give adequate time for
-orderly poweroff.
+orderly power-off or reboot.
-If the delay is set to 0 emergency poweroff will not be supported. So a
-carefully profiled non-zero positive value is a must for emergency
-poweroff to be triggered.
+If the delay is set to 0, the emergency action will not be supported. So a
+carefully profiled non-zero positive value is a must for the emergency
+action to be triggered.
diff --git a/Documentation/driver-api/tty/tty_driver.rst b/Documentation/driver-api/tty/tty_driver.rst
index cc529f863406..7138222a70f2 100644
--- a/Documentation/driver-api/tty/tty_driver.rst
+++ b/Documentation/driver-api/tty/tty_driver.rst
@@ -25,6 +25,8 @@ freed.
For reference, both allocation and deallocation functions are explained here in
detail:
+.. kernel-doc:: include/linux/tty_driver.h
+ :identifiers: tty_alloc_driver
.. kernel-doc:: drivers/tty/tty_io.c
:identifiers: __tty_alloc_driver tty_driver_kref_put
@@ -35,7 +37,7 @@ Here comes the documentation of flags accepted by tty_alloc_driver() (or
__tty_alloc_driver()):
.. kernel-doc:: include/linux/tty_driver.h
- :doc: TTY Driver Flags
+ :identifiers: tty_driver_flag
----
diff --git a/Documentation/driver-api/tty/tty_struct.rst b/Documentation/driver-api/tty/tty_struct.rst
index c72f5a4293b2..29caf1c1ca5f 100644
--- a/Documentation/driver-api/tty/tty_struct.rst
+++ b/Documentation/driver-api/tty/tty_struct.rst
@@ -72,7 +72,7 @@ TTY Struct Flags
================
.. kernel-doc:: include/linux/tty.h
- :doc: TTY Struct Flags
+ :identifiers: tty_struct_flags
TTY Struct Reference
====================
diff --git a/Documentation/driver-api/usb/writing_musb_glue_layer.rst b/Documentation/driver-api/usb/writing_musb_glue_layer.rst
index e755c8551bba..0bb96ecdf527 100644
--- a/Documentation/driver-api/usb/writing_musb_glue_layer.rst
+++ b/Documentation/driver-api/usb/writing_musb_glue_layer.rst
@@ -613,7 +613,7 @@ endpoints configuration from the hardware, so we use line 12 instruction
to bypass reading the configuration from silicon, and rely on a
hard-coded table that describes the endpoints configuration instead::
- static struct musb_fifo_cfg jz4740_musb_fifo_cfg[] = {
+ static const struct musb_fifo_cfg jz4740_musb_fifo_cfg[] = {
{ .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, },
{ .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, },
{ .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 64, },
diff --git a/Documentation/features/core/mseal_sys_mappings/arch-support.txt b/Documentation/features/core/mseal_sys_mappings/arch-support.txt
new file mode 100644
index 000000000000..c6cab9760d57
--- /dev/null
+++ b/Documentation/features/core/mseal_sys_mappings/arch-support.txt
@@ -0,0 +1,30 @@
+#
+# Feature name: mseal-system-mappings
+# Kconfig: ARCH_SUPPORTS_MSEAL_SYSTEM_MAPPINGS
+# description: arch supports mseal system mappings
+#
+ -----------------------
+ | arch |status|
+ -----------------------
+ | alpha: | TODO |
+ | arc: | N/A |
+ | arm: | N/A |
+ | arm64: | ok |
+ | csky: | N/A |
+ | hexagon: | N/A |
+ | loongarch: | TODO |
+ | m68k: | N/A |
+ | microblaze: | N/A |
+ | mips: | TODO |
+ | nios2: | N/A |
+ | openrisc: | N/A |
+ | parisc: | TODO |
+ | powerpc: | TODO |
+ | riscv: | TODO |
+ | s390: | ok |
+ | sh: | N/A |
+ | sparc: | TODO |
+ | um: | TODO |
+ | x86: | ok |
+ | xtensa: | N/A |
+ -----------------------
diff --git a/Documentation/filesystems/9p.rst b/Documentation/filesystems/9p.rst
index 3078f3c9256a..be3504ca034a 100644
--- a/Documentation/filesystems/9p.rst
+++ b/Documentation/filesystems/9p.rst
@@ -40,7 +40,7 @@ For remote file server::
mount -t 9p 10.10.1.2 /mnt/9
-For Plan 9 From User Space applications (http://swtch.com/plan9)::
+For Plan 9 From User Space applications (https://9fans.github.io/plan9port/)::
mount -t 9p `namespace`/acme /mnt/9 -o trans=unix,uname=$USER
@@ -165,8 +165,8 @@ Options
do not necessarily validate cached values on the server. In other
words changes on the server are not guaranteed to be reflected
on the client system. Only use this mode of operation if you
- have an exclusive mount and the server will modify the filesystem
- underneath you.
+ have an exclusive mount and the server will not modify the
+ filesystem underneath you.
debug=n specifies debug level. The debug level is a bitmask.
diff --git a/Documentation/filesystems/dax.rst b/Documentation/filesystems/dax.rst
index 719e90f1988e..08dd5e254cc5 100644
--- a/Documentation/filesystems/dax.rst
+++ b/Documentation/filesystems/dax.rst
@@ -207,7 +207,6 @@ implement direct_access.
These block devices may be used for inspiration:
- brd: RAM backed block device driver
-- dcssblk: s390 dcss block device driver
- pmem: NVDIMM persistent memory driver
diff --git a/Documentation/filesystems/proc.rst b/Documentation/filesystems/proc.rst
index 09f0aed5a08b..2a17865dfe39 100644
--- a/Documentation/filesystems/proc.rst
+++ b/Documentation/filesystems/proc.rst
@@ -128,6 +128,16 @@ process running on the system, which is named after the process ID (PID).
The link 'self' points to the process reading the file system. Each process
subdirectory has the entries listed in Table 1-1.
+A process can read its own information from /proc/PID/* with no extra
+permissions. When reading /proc/PID/* information for other processes, reading
+process is required to have either CAP_SYS_PTRACE capability with
+PTRACE_MODE_READ access permissions, or, alternatively, CAP_PERFMON
+capability. This applies to all read-only information like `maps`, `environ`,
+`pagemap`, etc. The only exception is `mem` file due to its read-write nature,
+which requires CAP_SYS_PTRACE capabilities with more elevated
+PTRACE_MODE_ATTACH permissions; CAP_PERFMON capability does not grant access
+to /proc/PID/mem for other processes.
+
Note that an open file descriptor to /proc/<pid> or to any of its
contained files or subdirectories does not prevent <pid> being reused
for some other process in the event that <pid> exits. Operations on
@@ -502,9 +512,25 @@ process, its PSS will be 1500. "Pss_Dirty" is the portion of PSS which
consists of dirty pages. ("Pss_Clean" is not included, but it can be
calculated by subtracting "Pss_Dirty" from "Pss".)
-Note that even a page which is part of a MAP_SHARED mapping, but has only
-a single pte mapped, i.e. is currently used by only one process, is accounted
-as private and not as shared.
+Traditionally, a page is accounted as "private" if it is mapped exactly once,
+and a page is accounted as "shared" when mapped multiple times, even when
+mapped in the same process multiple times. Note that this accounting is
+independent of MAP_SHARED.
+
+In some kernel configurations, the semantics of pages part of a larger
+allocation (e.g., THP) can differ: a page is accounted as "private" if all
+pages part of the corresponding large allocation are *certainly* mapped in the
+same process, even if the page is mapped multiple times in that process. A
+page is accounted as "shared" if any page page of the larger allocation
+is *maybe* mapped in a different process. In some cases, a large allocation
+might be treated as "maybe mapped by multiple processes" even though this
+is no longer the case.
+
+Some kernel configurations do not track the precise number of times a page part
+of a larger allocation is mapped. In this case, when calculating the PSS, the
+average number of mappings per page in this larger allocation might be used
+as an approximation for the number of mappings of a page. The PSS calculation
+will be imprecise in this case.
"Referenced" indicates the amount of memory currently marked as referenced or
accessed.
@@ -686,6 +712,11 @@ Where:
node locality page counters (N0 == node0, N1 == node1, ...) and the kernel page
size, in KB, that is backing the mapping up.
+Note that some kernel configurations do not track the precise number of times
+a page part of a larger allocation (e.g., THP) is mapped. In these
+configurations, "mapmax" might corresponds to the average number of mappings
+per page in such a larger allocation instead.
+
1.2 Kernel data
---------------
@@ -1060,6 +1091,8 @@ Example output. You may not have all of these fields.
FilePmdMapped: 0 kB
CmaTotal: 0 kB
CmaFree: 0 kB
+ Unaccepted: 0 kB
+ Balloon: 0 kB
HugePages_Total: 0
HugePages_Free: 0
HugePages_Rsvd: 0
@@ -1132,9 +1165,15 @@ Dirty
Writeback
Memory which is actively being written back to the disk
AnonPages
- Non-file backed pages mapped into userspace page tables
+ Non-file backed pages mapped into userspace page tables. Note that
+ some kernel configurations might consider all pages part of a
+ larger allocation (e.g., THP) as "mapped", as soon as a single
+ page is mapped.
Mapped
- files which have been mmapped, such as libraries
+ files which have been mmapped, such as libraries. Note that some
+ kernel configurations might consider all pages part of a larger
+ allocation (e.g., THP) as "mapped", as soon as a single page is
+ mapped.
Shmem
Total memory used by shared memory (shmem) and tmpfs
KReclaimable
@@ -1228,6 +1267,10 @@ CmaTotal
Memory reserved for the Contiguous Memory Allocator (CMA)
CmaFree
Free remaining memory in the CMA reserves
+Unaccepted
+ Memory that has not been accepted by the guest
+Balloon
+ Memory returned to Host by VM Balloon Drivers
HugePages_Total, HugePages_Free, HugePages_Rsvd, HugePages_Surp, Hugepagesize, Hugetlb
See Documentation/admin-guide/mm/hugetlbpage.rst.
DirectMap4k, DirectMap2M, DirectMap1G
diff --git a/Documentation/iio/ad4030.rst b/Documentation/iio/ad4030.rst
new file mode 100644
index 000000000000..b57424b650a8
--- /dev/null
+++ b/Documentation/iio/ad4030.rst
@@ -0,0 +1,180 @@
+.. SPDX-License-Identifier: GPL-2.0-only
+
+=============
+AD4030 driver
+=============
+
+ADC driver for Analog Devices Inc. AD4030 and similar devices. The module name
+is ``ad4030``.
+
+
+Supported devices
+=================
+
+The following chips are supported by this driver:
+
+* `AD4030-24 <https://www.analog.com/AD4030-24>`_
+* `AD4032-24 <https://www.analog.com/AD4032-24>`_
+* `AD4630-16 <https://www.analog.com/AD4630-16>`_
+* `AD4630-24 <https://www.analog.com/AD4630-24>`_
+* `AD4632-16 <https://www.analog.com/AD4632-16>`_
+* `AD4632-24 <https://www.analog.com/AD4632-24>`_
+
+IIO channels
+============
+
+Each "hardware" channel as described in the datasheet is split in 2 IIO
+channels:
+
+- One channel for the differential data
+- One channel for the common byte.
+
+The possible IIO channels depending on the numbers of "hardware" channel are:
+
++------------------------------------+------------------------------------+
+| 1 channel ADC | 2 channels ADC |
++====================================+====================================+
+| - voltage0-voltage1 (differential) | - voltage0-voltage1 (differential) |
+| - voltage2 (common-mode) | - voltage2-voltage3 (differential) |
+| | - voltage4 (common-mode) |
+| | - voltage5 (common-mode) |
++------------------------------------+------------------------------------+
+
+Labels
+------
+
+For ease of use, the IIO channels provide a label. For a differential channel,
+the label is ``differentialN`` where ``N`` is the "hardware" channel id. For a
+common-mode channel, the label is ``common-modeN`` where ``N`` is the
+"hardware" channel id.
+
+The possible labels are:
+
++-----------------+-----------------+
+| 1 channel ADC | 2 channels ADC |
++=================+=================+
+| - differential0 | - differential0 |
+| - common-mode0 | - differential1 |
+| | - common-mode0 |
+| | - common-mode1 |
++-----------------+-----------------+
+
+Supported features
+==================
+
+SPI wiring modes
+----------------
+
+The driver currently supports the following SPI wiring configurations:
+
+One lane mode
+^^^^^^^^^^^^^
+
+In this mode, each channel has its own SDO line to send the conversion results.
+At the moment this mode can only be used on AD4030 which has one channel so only
+one SDO line is used.
+
+.. code-block::
+
+ +-------------+ +-------------+
+ | ADC | | HOST |
+ | | | |
+ | CNV |<--------| CNV |
+ | CS |<--------| CS |
+ | SDI |<--------| SDO |
+ | SDO0 |-------->| SDI |
+ | SCLK |<--------| SCLK |
+ +-------------+ +-------------+
+
+Interleaved mode
+^^^^^^^^^^^^^^^^
+
+In this mode, both channels conversion results are bit interleaved one SDO line.
+As such the wiring is the same as `One lane mode`_.
+
+SPI Clock mode
+--------------
+
+Only the SPI clocking mode is supported.
+
+Output modes
+------------
+
+There are more exposed IIO channels than channels as describe in the devices
+datasheet. This is due to the `Differential data + common-mode`_ encoding
+2 types of information in one conversion result. As such a "device" channel
+provides 2 IIO channels, one for the differential data and one for the common
+byte.
+
+Differential data
+^^^^^^^^^^^^^^^^^
+
+This mode is selected when:
+
+- Only differential channels are enabled in a buffered read
+- Oversampling attribute is set to 1
+
+Differential data + common-mode
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+This mode is selected when:
+
+- Differential and common-mode channels are enabled in a buffered read
+- Oversampling attribute is set to 1
+
+For the 24-bits chips, this mode is also available with 16-bits differential
+data but is not selectable yet.
+
+Averaged differential data
+^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+This mode is selected when:
+
+- Only differential channels are selected enabled in a buffered read
+- Oversampling attribute is greater than 1
+
+Digital Gain and Offset
+-----------------------
+
+Each differential data channel has a 16-bits unsigned configurable hardware
+gain applied to it. By default it's equal to 1. Note that applying gain can
+cause numerical saturation.
+
+Each differential data channel has a signed configurable hardware offset.
+For the ADCs ending in ``-24``, the gain is encoded on 24-bits.
+Likewise, the ADCs ending in ``-16`` have a gain encoded on 16-bits. Note that
+applying an offset can cause numerical saturation.
+
+The final differential data returned by the ADC is computed by first applying
+the gain, then the offset.
+
+The gain is controlled by the ``calibscale`` IIO attribute while the offset is
+controlled by the ``calibbias`` attribute.
+
+Reference voltage
+-----------------
+
+The chip supports an external reference voltage via the ``REF`` input or an
+internal buffered reference voltage via the ``REFIN`` input. The driver looks
+at the device tree to determine which is being used. If ``ref-supply`` is
+present, then the external reference voltage is used and the internal buffer is
+disabled. If ``refin-supply`` is present, then the internal buffered reference
+voltage is used.
+
+Reset
+-----
+
+Both hardware and software reset are supported. The driver looks first at the
+device tree to see if the ``reset-gpio`` is populated.
+If not present, the driver will fallback to a software reset by wiring to the
+device's registers.
+
+Unimplemented features
+----------------------
+
+- ``BUSY`` indication
+- Additional wiring modes
+- Additional clock modes
+- Differential data 16-bits + common-mode for 24-bits chips
+- Overrange events
+- Test patterns
diff --git a/Documentation/iio/ad4695.rst b/Documentation/iio/ad4695.rst
index 9ec8bf466c15..f40593bcc37d 100644
--- a/Documentation/iio/ad4695.rst
+++ b/Documentation/iio/ad4695.rst
@@ -47,6 +47,36 @@ In this mode, CNV and CS are tied together and there is a single SDO line.
To use this mode, in the device tree, omit the ``cnv-gpios`` and
``spi-rx-bus-width`` properties.
+SPI offload wiring
+^^^^^^^^^^^^^^^^^^
+
+When used with a SPI offload, the supported wiring configuration is:
+
+.. code-block::
+
+ +-------------+ +-------------+
+ | GP0/BUSY |-------->| TRIGGER |
+ | CS |<--------| CS |
+ | | | |
+ | ADC | | SPI |
+ | | | |
+ | SDI |<--------| SDO |
+ | SDO |-------->| SDI |
+ | SCLK |<--------| SCLK |
+ | | | |
+ | | +-------------+
+ | CNV |<-----+--| PWM |
+ | | +--| GPIO |
+ +-------------+ +-------------+
+
+In this case, both the ``cnv-gpios`` and ``pwms`` properties are required.
+The ``#trigger-source-cells = <2>`` property is also required to connect back
+to the SPI offload. The SPI offload will have ``trigger-sources`` property
+with cells to indicate the busy signal and which GPx pin is used, e.g
+``<&ad4695 AD4695_TRIGGER_EVENT_BUSY AD4695_TRIGGER_PIN_GP0>``.
+
+.. seealso:: `SPI offload support`_
+
Channel configuration
---------------------
@@ -149,15 +179,62 @@ Gain/offset calibration
System calibration is supported using the channel gain and offset registers via
the ``calibscale`` and ``calibbias`` attributes respectively.
+Oversampling
+------------
+
+The chip supports per-channel oversampling when SPI offload is being used, with
+available oversampling ratios (OSR) of 1 (default), 4, 16, and 64. Enabling
+oversampling on a channel raises the effective number of bits of sampled data to
+17 (OSR == 4), 18 (16), or 19 (64), respectively. This can be set via the
+``oversampling_ratio`` attribute.
+
+Setting the oversampling ratio for a channel also changes the sample rate for
+that channel, since it requires multiple conversions per 1 sample. Specifically,
+the new sampling frequency is the PWM sampling frequency divided by the
+particular OSR. This is set automatically by the driver when setting the
+``oversampling_ratio`` attribute. For example, if the device's current
+``sampling_frequency`` is 10000 and an OSR of 4 is set on channel ``voltage0``,
+the new reported sampling rate for that channel will be 2500 (ignoring PWM API
+rounding), while all others will remain at 10000. Subsequently setting the
+sampling frequency to a higher value on that channel will adjust the CNV trigger
+period for all channels, e.g. if ``voltage0``'s sampling frequency is adjusted
+from 2500 (with an OSR of 4) to 10000, the value reported by
+``in_voltage0_sampling_frequency`` will be 10000, but all other channels will
+now report 40000.
+
+For simplicity, the sampling frequency of the device should be set (considering
+the highest desired OSR value to be used) first, before configuring oversampling
+for specific channels.
+
Unimplemented features
----------------------
- Additional wiring modes
- Threshold events
-- Oversampling
- GPIO support
- CRC support
+SPI offload support
+===================
+
+To be able to achieve the maximum sample rate, the driver can be used with the
+`AXI SPI Engine`_ to provide SPI offload support.
+
+.. _AXI SPI Engine: http://analogdevicesinc.github.io/hdl/projects/ad469x_fmc/index.html
+
+.. seealso:: `SPI offload wiring`_
+
+When SPI offload is being used, some attributes will be different.
+
+* ``trigger`` directory is removed.
+* ``in_voltage0_sampling_frequency`` attributes are added for setting the sample
+ rate.
+* ``in_voltage0_sampling_frequency_available`` attributes are added for querying
+ the max sample rate.
+* ``timestamp`` channel is removed.
+* Buffer data format may be different compared to when offload is not used,
+ e.g. the ``buffer0/in_voltage0_type`` attribute.
+
Device buffers
==============
@@ -165,3 +242,28 @@ This driver supports hardware triggered buffers. This uses the "advanced
sequencer" feature of the chip to trigger a burst of conversions.
Also see :doc:`iio_devbuf` for more general information.
+
+Effective sample rate for buffered reads
+----------------------------------------
+
+When SPI offload is not used, the sample rate is determined by the trigger that
+is manually configured in userspace. All enabled channels will be read in a
+burst when the trigger is received.
+
+When SPI offload is used, the sample rate is configured per channel. All
+channels will have the same rate, so only one ``in_voltageY_sampling_frequency``
+attribute needs to be set. Since this rate determines the delay between each
+individual conversion, the effective sample rate for each sample is actually
+the sum of the periods of each enabled channel in a buffered read. In other
+words, it is the value of the ``in_voltageY_sampling_frequency`` attribute
+divided by the number of enabled channels. So if 4 channels are enabled, with
+the ``in_voltageY_sampling_frequency`` attributes set to 1 MHz, the effective
+sample rate is 250 kHz.
+
+With oversampling enabled, the effective sample rate also depends on the OSR
+assigned to each channel. For example, if one of the 4 channels mentioned in the
+previous case is configured with an OSR of 4, the effective sample rate for that
+channel becomes (1 MHz / 4 ) = 250 kHz. The effective sample rate for all
+four channels is then 1 / ( (3 / 1 MHz) + ( 1 / 250 kHz) ) ~= 142.9 kHz. Note
+that in this case "sample" refers to one read of all enabled channels (i.e. one
+full cycle through the auto-sequencer).
diff --git a/Documentation/iio/ad7191.rst b/Documentation/iio/ad7191.rst
new file mode 100644
index 000000000000..977d4fea14b0
--- /dev/null
+++ b/Documentation/iio/ad7191.rst
@@ -0,0 +1,119 @@
+.. SPDX-License-Identifier: GPL-2.0-only
+
+=============
+AD7191 driver
+=============
+
+Device driver for Analog Devices AD7191 ADC.
+
+Supported devices
+=================
+
+* `AD7191 <https://www.analog.com/AD7191>`_
+
+The AD7191 is a high precision, low noise, 24-bit Σ-Δ ADC with integrated PGA.
+It features two differential input channels, an internal temperature sensor, and
+configurable sampling rates.
+
+Devicetree
+==========
+
+Pin Configuration
+-----------------
+
+The driver supports both pin-strapped and GPIO-controlled configurations for ODR
+(Output Data Rate) and PGA (Programmable Gain Amplifier) settings. These
+configurations are mutually exclusive - you must use either pin-strapped or GPIO
+control for each setting, not both.
+
+ODR Configuration
+^^^^^^^^^^^^^^^^^
+
+The ODR can be configured either through GPIO control or pin-strapping:
+
+- When using GPIO control, specify the "odr-gpios" property in the device tree
+- For pin-strapped configuration, specify the "adi,odr-value" property in the
+ device tree
+
+Available ODR settings:
+
+ - 120 Hz (ODR1=0, ODR2=0)
+ - 60 Hz (ODR1=0, ODR2=1)
+ - 50 Hz (ODR1=1, ODR2=0)
+ - 10 Hz (ODR1=1, ODR2=1)
+
+PGA Configuration
+^^^^^^^^^^^^^^^^^
+
+The PGA can be configured either through GPIO control or pin-strapping:
+
+- When using GPIO control, specify the "pga-gpios" property in the device tree
+- For pin-strapped configuration, specify the "adi,pga-value" property in the
+ device tree
+
+Available PGA gain settings:
+
+ - 1x (PGA1=0, PGA2=0)
+ - 8x (PGA1=0, PGA2=1)
+ - 64x (PGA1=1, PGA2=0)
+ - 128x (PGA1=1, PGA2=1)
+
+Clock Configuration
+-------------------
+
+The AD7191 supports both internal and external clock sources:
+
+- When CLKSEL pin is tied LOW: Uses internal 4.92MHz clock (no clock property
+ needed)
+- When CLKSEL pin is tied HIGH: Requires external clock source
+ - Can be a crystal between MCLK1 and MCLK2 pins
+ - Or a CMOS-compatible clock driving MCLK2 pin
+ - Must specify the "clocks" property in device tree when using external clock
+
+SPI Interface Requirements
+--------------------------
+
+The AD7191 has specific SPI interface requirements:
+
+- The DOUT/RDY output is dual-purpose and requires SPI bus locking
+- DOUT/RDY must be connected to an interrupt-capable GPIO
+- The SPI controller's chip select must be connected to the PDOWN pin of the ADC
+- When CS (PDOWN) is high, the device powers down and resets internal circuitry
+- SPI mode 3 operation (CPOL=1, CPHA=1) is required
+
+Power Supply Requirements
+-------------------------
+
+The device requires the following power supplies:
+
+- AVdd: Analog power supply
+- DVdd: Digital power supply
+- Vref: Reference voltage supply (external)
+
+All power supplies must be specified in the device tree.
+
+Channel Configuration
+=====================
+
+The device provides three channels:
+
+1. Temperature Sensor
+ - 24-bit unsigned
+ - Internal temperature measurement
+ - Temperature in millidegrees Celsius
+
+2. Differential Input (AIN1-AIN2)
+ - 24-bit unsigned
+ - Differential voltage measurement
+ - Configurable gain via PGA
+
+3. Differential Input (AIN3-AIN4)
+ - 24-bit unsigned
+ - Differential voltage measurement
+ - Configurable gain via PGA
+
+Buffer Support
+==============
+
+This driver supports IIO triggered buffers. See Documentation/iio/iio_devbuf.rst
+for more information about IIO triggered buffers.
diff --git a/Documentation/iio/ad7380.rst b/Documentation/iio/ad7380.rst
index c46127700e14..24a92a1c4371 100644
--- a/Documentation/iio/ad7380.rst
+++ b/Documentation/iio/ad7380.rst
@@ -29,6 +29,7 @@ The following chips are supported by this driver:
* `AD7388-4 <https://www.analog.com/en/products/ad7388-4.html>`_
* `ADAQ4370-4 <https://www.analog.com/en/products/adaq4370-4.html>`_
* `ADAQ4380-4 <https://www.analog.com/en/products/adaq4380-4.html>`_
+* `ADAQ4381-4 <https://www.analog.com/en/products/adaq4381-4.html>`_
Supported features
@@ -52,8 +53,8 @@ declared in the device tree as ``refin-supply``.
ADAQ devices
~~~~~~~~~~~~
-adaq4370-4 and adaq4380-4 don't have an external reference, but use a 3.3V
-internal reference derived from one of its supplies (``refin-supply``)
+ADAQ devices don't have an external reference, but use a 3.3V internal reference
+derived from one of its supplies (``refin-supply``)
All other devices from ad738x family
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -92,6 +93,38 @@ must restart iiod using the following command:
root:~# systemctl restart iiod
+Alert
+-----
+
+2 channels variants of the ad738x family, can use the SDOB line as an alert pin
+when configured in 1 SDO line mode. 4 channels variants, can use SDOD as an
+alert pin when configured in 1 or 2 SDO line(s) mode, although only 1 SDO line
+mode is currently supported by the driver (see `SPI wiring modes`_).
+
+At the end of a conversion the active-low alert pin gets asserted if the
+conversion result exceeds the alert high limit or falls below the alert low
+limit. It is cleared, on a falling edge of CS. The alert pin is common to all
+channels.
+
+User can enable alert using the regular iio events attribute:
+
+.. code-block:: bash
+
+ events/thresh_either_en
+
+The high and low thresholds are common to all channels and can also be set using
+regular iio events attributes:
+
+.. code-block:: bash
+
+ events/in_thresh_falling_value
+ events/in_thresh_rising_value
+
+If debugfs is available, user can read the ALERT register to determine the
+faulty channel and direction.
+
+In most use cases, user will hardwire the alert pin to trigger a shutdown.
+
Channel selection and sequencer (single-end chips only)
-------------------------------------------------------
@@ -144,8 +177,25 @@ Unimplemented features
- Rolling average oversampling
- Power down mode
- CRC indication
-- Alert
+SPI offload support
+===================
+
+To be able to achieve the maximum sample rate, the driver can be used with the
+`AXI SPI Engine`_ to provide SPI offload support.
+
+.. _AXI SPI Engine: http://analogdevicesinc.github.io/hdl/projects/pulsar_adc/index.html
+
+When SPI offload is being used, some attributes will be different.
+
+* ``trigger`` directory is removed.
+* ``in_voltage0_sampling_frequency`` attribute is added for setting the sample
+ rate.
+* ``in_voltage0_sampling_frequency_available`` attribute is added for querying
+ the max sample rate.
+* ``timestamp`` channel is removed.
+* Buffer data format may be different compared to when offload is not used,
+ e.g. the ``in_voltage0_type`` attribute.
Device buffers
==============
diff --git a/Documentation/iio/ad7944.rst b/Documentation/iio/ad7944.rst
index 0d26e56aba88..e6dbe4d7f58c 100644
--- a/Documentation/iio/ad7944.rst
+++ b/Documentation/iio/ad7944.rst
@@ -46,6 +46,8 @@ CS mode, 3-wire, without busy indicator
To select this mode in the device tree, set the ``adi,spi-mode`` property to
``"single"`` and omit the ``cnv-gpios`` property.
+This is the only wiring configuration supported when using `SPI offload support`_.
+
CS mode, 4-wire, without busy indicator
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -106,7 +108,6 @@ Unimplemented features
----------------------
- ``BUSY`` indication
-- ``TURBO`` mode
Device attributes
@@ -147,6 +148,27 @@ AD7986 is a fully-differential ADC and has the following attributes:
In "chain" mode, additional chips will appear as additional voltage input
channels, e.g. ``in_voltage2-voltage3_raw``.
+SPI offload support
+===================
+
+To be able to achieve the maximum sample rate, the driver can be used with the
+`AXI SPI Engine`_ to provide SPI offload support.
+
+.. _AXI SPI Engine: http://analogdevicesinc.github.io/hdl/projects/pulsar_adc/index.html
+
+When SPI offload is being used, some attributes will be different.
+
+* ``trigger`` directory is removed.
+* ``in_voltage0_sampling_frequency`` attribute is added for setting the sample
+ rate.
+* ``in_voltage0_sampling_frequency_available`` attribute is added for querying
+ the max sample rate.
+* ``timestamp`` channel is removed.
+* Buffer data format may be different compared to when offload is not used,
+ e.g. the ``in_voltage0_type`` attribute.
+
+If the ``turbo-gpios`` property is present in the device tree, the driver will
+turn on TURBO during buffered reads and turn it off otherwise.
Device buffers
==============
diff --git a/Documentation/iio/adis16550.rst b/Documentation/iio/adis16550.rst
new file mode 100644
index 000000000000..25db7b8060c4
--- /dev/null
+++ b/Documentation/iio/adis16550.rst
@@ -0,0 +1,376 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+================
+ADIS16550 driver
+================
+
+This driver supports Analog Device's IMUs on SPI bus.
+
+1. Supported devices
+====================
+
+* `ADIS16550 <https://www.analog.com/ADIS16550>`_
+
+The ADIS16550 is a complete inertial system that includes a triaxis gyroscope
+and a triaxis accelerometer. The factory calibration characterizes each sensor for
+sensitivity, bias, and alignment. As a result, each sensor has its own dynamic
+compensation formulas that provide accurate sensor measurements.
+
+2. Device attributes
+====================
+
+Accelerometer, gyroscope measurements are always provided. Furthermore, the
+driver offers the capability to retrieve the delta angle and the delta velocity
+measurements computed by the device.
+
+The delta angle measurements represent a calculation of angular displacement
+between each sample update, while the delta velocity measurements represent a
+calculation of linear velocity change between each sample update.
+
+Finally, temperature data are provided which show a coarse measurement of
+the temperature inside of the IMU device. This data is most useful for
+monitoring relative changes in the thermal environment.
+
+Each IIO device, has a device folder under ``/sys/bus/iio/devices/iio:deviceX``,
+where X is the IIO index of the device. Under these folders reside a set of
+device files, depending on the characteristics and features of the hardware
+device in questions. These files are consistently generalized and documented in
+the IIO ABI documentation.
+
+The following tables show the adis16550 related device files, found in the
+specific device folder path ``/sys/bus/iio/devices/iio:deviceX``.
+
++-------------------------------------------+----------------------------------------------------------+
+| 3-Axis Accelerometer related device files | Description |
++-------------------------------------------+----------------------------------------------------------+
+| in_accel_scale | Scale for the accelerometer channels. |
++-------------------------------------------+----------------------------------------------------------+
+| in_accel_filter_low_pass_3db_frequency | Bandwidth for the accelerometer channels. |
++-------------------------------------------+----------------------------------------------------------+
+| in_accel_x_calibbias | Calibration offset for the X-axis accelerometer channel. |
++-------------------------------------------+----------------------------------------------------------+
+| in_accel_x_calibscale | Calibration scale for the X-axis accelerometer channel. |
++-------------------------------------------+----------------------------------------------------------+
+| in_accel_x_raw | Raw X-axis accelerometer channel value. |
++-------------------------------------------+----------------------------------------------------------+
+| in_accel_y_calibbias | Calibration offset for the Y-axis accelerometer channel. |
++-------------------------------------------+----------------------------------------------------------+
+| in_accel_y_calibscale | Calibration scale for the Y-axis accelerometer channel. |
++-------------------------------------------+----------------------------------------------------------+
+| in_accel_y_raw | Raw Y-axis accelerometer channel value. |
++-------------------------------------------+----------------------------------------------------------+
+| in_accel_z_calibbias | Calibration offset for the Z-axis accelerometer channel. |
++-------------------------------------------+----------------------------------------------------------+
+| in_accel_z_calibscale | Calibration scale for the Z-axis accelerometer channel. |
++-------------------------------------------+----------------------------------------------------------+
+| in_accel_z_raw | Raw Z-axis accelerometer channel value. |
++-------------------------------------------+----------------------------------------------------------+
+| in_deltavelocity_scale | Scale for delta velocity channels. |
++-------------------------------------------+----------------------------------------------------------+
+| in_deltavelocity_x_raw | Raw X-axis delta velocity channel value. |
++-------------------------------------------+----------------------------------------------------------+
+| in_deltavelocity_y_raw | Raw Y-axis delta velocity channel value. |
++-------------------------------------------+----------------------------------------------------------+
+| in_deltavelocity_z_raw | Raw Z-axis delta velocity channel value. |
++-------------------------------------------+----------------------------------------------------------+
+
++--------------------------------------------+------------------------------------------------------+
+| 3-Axis Gyroscope related device files | Description |
++--------------------------------------------+------------------------------------------------------+
+| in_anglvel_scale | Scale for the gyroscope channels. |
++--------------------------------------------+------------------------------------------------------+
+| in_anglvel_filter_low_pass_3db_frequency | Scale for the gyroscope channels. |
++--------------------------------------------+------------------------------------------------------+
+| in_anglvel_x_calibbias | Calibration offset for the X-axis gyroscope channel. |
++--------------------------------------------+------------------------------------------------------+
+| in_anglvel_x_calibscale | Calibration scale for the X-axis gyroscope channel. |
++--------------------------------------------+------------------------------------------------------+
+| in_anglvel_x_raw | Raw X-axis gyroscope channel value. |
++--------------------------------------------+------------------------------------------------------+
+| in_anglvel_y_calibbias | Calibration offset for the Y-axis gyroscope channel. |
++--------------------------------------------+------------------------------------------------------+
+| in_anglvel_y_calibscale | Calibration scale for the Y-axis gyroscope channel. |
++--------------------------------------------+------------------------------------------------------+
+| in_anglvel_y_raw | Raw Y-axis gyroscope channel value. |
++--------------------------------------------+------------------------------------------------------+
+| in_anglvel_z_calibbias | Calibration offset for the Z-axis gyroscope channel. |
++--------------------------------------------+------------------------------------------------------+
+| in_anglvel_z_calibscale | Calibration scale for the Z-axis gyroscope channel. |
++--------------------------------------------+------------------------------------------------------+
+| in_anglvel_z_raw | Raw Z-axis gyroscope channel value. |
++--------------------------------------------+------------------------------------------------------+
+| in_deltaangl_scale | Scale for delta angle channels. |
++--------------------------------------------+------------------------------------------------------+
+| in_deltaangl_x_raw | Raw X-axis delta angle channel value. |
++--------------------------------------------+------------------------------------------------------+
+| in_deltaangl_y_raw | Raw Y-axis delta angle channel value. |
++--------------------------------------------+------------------------------------------------------+
+| in_deltaangl_z_raw | Raw Z-axis delta angle channel value. |
++--------------------------------------------+------------------------------------------------------+
+
++----------------------------------+-------------------------------------------+
+| Temperature sensor related files | Description |
++----------------------------------+-------------------------------------------+
+| in_temp0_raw | Raw temperature channel value. |
++----------------------------------+-------------------------------------------+
+| in_temp0_offset | Offset for the temperature sensor channel.|
++----------------------------------+-------------------------------------------+
+| in_temp0_scale | Scale for the temperature sensor channel. |
++----------------------------------+-------------------------------------------+
+
++----------------------------+--------------------------------------------------------------------------------+
+| Miscellaneous device files | Description |
++----------------------------+--------------------------------------------------------------------------------+
+| name | Name of the IIO device. |
++----------------------------+--------------------------------------------------------------------------------+
+| sampling_frequency | Currently selected sample rate. |
++----------------------------+--------------------------------------------------------------------------------+
+
+The following table shows the adis16550 related device debug files, found in the
+specific device debug folder path ``/sys/kernel/debug/iio/iio:deviceX``.
+
++----------------------+-------------------------------------------------------------------------+
+| Debugfs device files | Description |
++----------------------+-------------------------------------------------------------------------+
+| serial_number | The serial number of the chip in hexadecimal format. |
++----------------------+-------------------------------------------------------------------------+
+| product_id | Chip specific product id (16550). |
++----------------------+-------------------------------------------------------------------------+
+| flash_count | The number of flash writes performed on the device. |
++----------------------+-------------------------------------------------------------------------+
+| firmware_revision | String containing the firmware revision in the following format ##.##. |
++----------------------+-------------------------------------------------------------------------+
+| firmware_date | String containing the firmware date in the following format mm-dd-yyyy. |
++----------------------+-------------------------------------------------------------------------+
+
+Channels processed values
+-------------------------
+
+A channel value can be read from its _raw attribute. The value returned is the
+raw value as reported by the devices. To get the processed value of the channel,
+apply the following formula:
+
+.. code-block:: bash
+
+ processed value = (_raw + _offset) * _scale
+
+Where _offset and _scale are device attributes. If no _offset attribute is
+present, simply assume its value is 0.
+
+The adis16550 driver offers data for 5 types of channels, the table below shows
+the measurement units for the processed value, which are defined by the IIO
+framework:
+
++--------------------------------------+---------------------------+
+| Channel type | Measurement unit |
++--------------------------------------+---------------------------+
+| Acceleration on X, Y, and Z axis | Meters per Second squared |
++--------------------------------------+---------------------------+
+| Angular velocity on X, Y and Z axis | Radians per second |
++--------------------------------------+---------------------------+
+| Delta velocity on X. Y, and Z axis | Meters per Second |
++--------------------------------------+---------------------------+
+| Delta angle on X, Y, and Z axis | Radians |
++--------------------------------------+---------------------------+
+| Temperature | Millidegrees Celsius |
++--------------------------------------+---------------------------+
+
+Usage examples
+--------------
+
+Show device name:
+
+.. code-block:: bash
+
+ root:/sys/bus/iio/devices/iio:device0> cat name
+ adis16550
+
+Show accelerometer channels value:
+
+.. code-block:: bash
+
+ root:/sys/bus/iio/devices/iio:device0> cat in_accel_x_raw
+ 6903851
+ root:/sys/bus/iio/devices/iio:device0> cat in_accel_y_raw
+ 5650550
+ root:/sys/bus/iio/devices/iio:device0> cat in_accel_z_raw
+ 104873530
+ root:/sys/bus/iio/devices/iio:device0> cat in_accel_scale
+ 0.000000095
+
+- X-axis acceleration = in_accel_x_raw * in_accel_scale = 0.655865845 m/s^2
+- Y-axis acceleration = in_accel_y_raw * in_accel_scale = 0.53680225 m/s^2
+- Z-axis acceleration = in_accel_z_raw * in_accel_scale = 9.96298535 m/s^2
+
+Show gyroscope channels value:
+
+.. code-block:: bash
+
+ root:/sys/bus/iio/devices/iio:device0> cat in_anglvel_x_raw
+ 193309
+ root:/sys/bus/iio/devices/iio:device0> cat in_anglvel_y_raw
+ -763676
+ root:/sys/bus/iio/devices/iio:device0> cat in_anglvel_z_raw
+ -358108
+ root:/sys/bus/iio/devices/iio:device0> cat in_anglvel_scale
+ 0.000000003
+
+- X-axis angular velocity = in_anglvel_x_raw * in_anglvel_scale = 0.000579927 rad/s
+- Y-axis angular velocity = in_anglvel_y_raw * in_anglvel_scale = −0.002291028 rad/s
+- Z-axis angular velocity = in_anglvel_z_raw * in_anglvel_scale = −0.001074324 rad/s
+
+Set calibration offset for accelerometer channels:
+
+.. code-block:: bash
+
+ root:/sys/bus/iio/devices/iio:device0> cat in_accel_x_calibbias
+ 0
+
+ root:/sys/bus/iio/devices/iio:device0> echo 5000 > in_accel_x_calibbias
+ root:/sys/bus/iio/devices/iio:device0> cat in_accel_x_calibbias
+ 5000
+
+Set calibration offset for gyroscope channels:
+
+.. code-block:: bash
+
+ root:/sys/bus/iio/devices/iio:device0> cat in_anglvel_y_calibbias
+ 0
+
+ root:/sys/bus/iio/devices/iio:device0> echo -5000 > in_anglvel_y_calibbias
+ root:/sys/bus/iio/devices/iio:device0> cat in_anglvel_y_calibbias
+ -5000
+
+Set sampling frequency:
+
+.. code-block:: bash
+
+ root:/sys/bus/iio/devices/iio:device0> cat sampling_frequency
+ 4000.000000
+
+ root:/sys/bus/iio/devices/iio:device0> echo 1000 > sampling_frequency
+ 1000.000000
+
+Set bandwidth for accelerometer channels:
+
+.. code-block:: bash
+
+ root:/sys/bus/iio/devices/iio:device0> cat in_accel_filter_low_pass_3db_frequency
+ 0
+
+ root:/sys/bus/iio/devices/iio:device0> echo 100 > in_accel_filter_low_pass_3db_frequency
+ root:/sys/bus/iio/devices/iio:device0> cat in_accel_filter_low_pass_3db_frequency
+ 100
+
+Show serial number:
+
+.. code-block:: bash
+
+ root:/sys/kernel/debug/iio/iio:device0> cat serial_number
+ 0x000000b6
+
+Show product id:
+
+.. code-block:: bash
+
+ root:/sys/kernel/debug/iio/iio:device0> cat product_id
+ 16550
+
+Show flash count:
+
+.. code-block:: bash
+
+ root:/sys/kernel/debug/iio/iio:device0> cat flash_count
+ 13
+
+Show firmware revision:
+
+.. code-block:: bash
+
+ root:/sys/kernel/debug/iio/iio:device0> cat firmware_revision
+ 1.5
+
+Show firmware date:
+
+.. code-block:: bash
+
+ root:/sys/kernel/debug/iio/iio:device0> cat firmware_date
+ 28-04-2021
+
+3. Device buffers
+=================
+
+This driver supports IIO buffers.
+
+The device supports retrieving the raw acceleration, gyroscope, delta velocity,
+delta angle and temperature measurements using buffers.
+
+However, when retrieving acceleration or gyroscope data using buffers, delta
+readings will not be available and vice versa. This is because the device only
+allows to read either acceleration and gyroscope data or delta velocity and
+delta angle data at a time and switching between these two burst data selection
+modes is time consuming.
+
+Usage examples
+--------------
+
+Set device trigger in current_trigger, if not already set:
+
+.. code-block:: bash
+
+ root:/sys/bus/iio/devices/iio:device0> cat trigger/current_trigger
+
+ root:/sys/bus/iio/devices/iio:device0> echo adis16550-dev0 > trigger/current_trigger
+ root:/sys/bus/iio/devices/iio:device0> cat trigger/current_trigger
+ adis16550-dev0
+
+Select channels for buffer read:
+
+.. code-block:: bash
+
+ root:/sys/bus/iio/devices/iio:device0> echo 1 > scan_elements/in_deltavelocity_x_en
+ root:/sys/bus/iio/devices/iio:device0> echo 1 > scan_elements/in_deltavelocity_y_en
+ root:/sys/bus/iio/devices/iio:device0> echo 1 > scan_elements/in_deltavelocity_z_en
+ root:/sys/bus/iio/devices/iio:device0> echo 1 > scan_elements/in_temp0_en
+
+Set the number of samples to be stored in the buffer:
+
+.. code-block:: bash
+
+ root:/sys/bus/iio/devices/iio:device0> echo 10 > buffer/length
+
+Enable buffer readings:
+
+.. code-block:: bash
+
+ root:/sys/bus/iio/devices/iio:device0> echo 1 > buffer/enable
+
+Obtain buffered data:
+
+.. code-block:: bash
+
+ root:/sys/bus/iio/devices/iio:device0> hexdump -C /dev/iio\:device0
+ ...
+ 0000cdf0 00 00 0d 2f 00 00 08 43 00 00 09 09 00 00 a4 5f |.../...C......._|
+ 0000ce00 00 00 0d 2f 00 00 07 de 00 00 08 db 00 00 a4 4b |.../...........K|
+ 0000ce10 00 00 0d 2f 00 00 07 58 00 00 08 a3 00 00 a4 55 |.../...X.......U|
+ 0000ce20 00 00 0d 2f 00 00 06 d6 00 00 08 5c 00 00 a4 62 |.../.......\...b|
+ 0000ce30 00 00 0d 2f 00 00 06 45 00 00 08 37 00 00 a4 47 |.../...E...7...G|
+ 0000ce40 00 00 0d 2f 00 00 05 d4 00 00 08 30 00 00 a3 fa |.../.......0....|
+ 0000ce50 00 00 0d 2f 00 00 05 d0 00 00 08 12 00 00 a3 d3 |.../............|
+ 0000ce60 00 00 0d 2f 00 00 05 dd 00 00 08 2e 00 00 a3 e9 |.../............|
+ 0000ce70 00 00 0d 2f 00 00 05 cc 00 00 08 51 00 00 a3 d5 |.../.......Q....|
+ 0000ce80 00 00 0d 2f 00 00 05 ba 00 00 08 22 00 00 a3 9a |.../......."....|
+ 0000ce90 00 00 0d 2f 00 00 05 9c 00 00 07 d9 00 00 a3 40 |.../...........@|
+ 0000cea0 00 00 0d 2f 00 00 05 68 00 00 07 94 00 00 a2 e4 |.../...h........|
+ 0000ceb0 00 00 0d 2f 00 00 05 25 00 00 07 8d 00 00 a2 ce |.../...%........|
+ ...
+
+See ``Documentation/iio/iio_devbuf.rst`` for more information about how buffered
+data is structured.
+
+4. IIO Interfacing Tools
+========================
+
+See ``Documentation/iio/iio_tools.rst`` for the description of the available IIO
+interfacing tools.
diff --git a/Documentation/iio/adxl380.rst b/Documentation/iio/adxl380.rst
index 376dee5fe1dd..66c8a4d4f767 100644
--- a/Documentation/iio/adxl380.rst
+++ b/Documentation/iio/adxl380.rst
@@ -94,7 +94,7 @@ apply the following formula:
Where _offset and _scale are device attributes. If no _offset attribute is
present, simply assume its value is 0.
-The adis16475 driver offers data for 2 types of channels, the table below shows
+The ADXL380 driver offers data for 2 types of channels, the table below shows
the measurement units for the processed value, which are defined by the IIO
framework:
diff --git a/Documentation/iio/iio_adc.rst b/Documentation/iio/iio_adc.rst
new file mode 100644
index 000000000000..f2f19a691907
--- /dev/null
+++ b/Documentation/iio/iio_adc.rst
@@ -0,0 +1,305 @@
+.. SPDX-License-Identifier: GPL-2.0-only
+
+=========================
+IIO Abstractions for ADCs
+=========================
+
+1. Overview
+===========
+
+The IIO subsystem supports many Analog to Digital Converters (ADCs). Some ADCs
+have features and characteristics that are supported in specific ways by IIO
+device drivers. This documentation describes common ADC features and explains
+how they are supported by the IIO subsystem.
+
+1. ADC Channel Types
+====================
+
+ADCs can have distinct types of inputs, each of them measuring analog voltages
+in a slightly different way. An ADC digitizes the analog input voltage over a
+span that is often given by the provided voltage reference, the input type, and
+the input polarity. The input range allowed to an ADC channel is needed to
+determine the scale factor and offset needed to obtain the measured value in
+real-world units (millivolts for voltage measurement, milliamps for current
+measurement, etc.).
+
+Elaborate designs may have nonlinear characteristics or integrated components
+(such as amplifiers and reference buffers) that might also have to be considered
+to derive the allowed input range for an ADC. For clarity, the sections below
+assume the input range only depends on the provided voltage references, input
+type, and input polarity.
+
+There are three general types of ADC inputs (single-ended, differential,
+pseudo-differential) and two possible polarities (unipolar, bipolar). The input
+type (single-ended, differential, pseudo-differential) is one channel
+characteristic, and is completely independent of the polarity (unipolar,
+bipolar) aspect. A comprehensive article about ADC input types (on which this
+doc is heavily based on) can be found at
+https://www.analog.com/en/resources/technical-articles/sar-adc-input-types.html.
+
+1.1 Single-ended channels
+-------------------------
+
+Single-ended channels digitize the analog input voltage relative to ground and
+can be either unipolar or bipolar.
+
+1.1.1 Single-ended Unipolar Channels
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+::
+
+ ---------- VREF -------------
+ ´ ` ´ ` _____________
+ / \ / \ / |
+ / \ / \ --- < IN ADC |
+ \ / \ / \ |
+ `-´ `-´ \ VREF |
+ -------- GND (0V) ----------- +-----------+
+ ^
+ |
+ External VREF
+
+The input voltage to a **single-ended unipolar** channel is allowed to swing
+from GND to VREF (where VREF is a voltage reference with electrical potential
+higher than system ground). The maximum input voltage is also called VFS
+(Voltage input Full-Scale), with VFS being determined by VREF. The voltage
+reference may be provided from an external supply or derived from the chip power
+source.
+
+A single-ended unipolar channel could be described in device tree like the
+following example::
+
+ adc@0 {
+ ...
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ channel@0 {
+ reg = <0>;
+ };
+ };
+
+One is always allowed to include ADC channel nodes in the device tree. Though,
+if the device has a uniform set of inputs (e.g. all inputs are single-ended),
+then declaring the channel nodes is optional.
+
+One caveat for devices that support mixed single-ended and differential channels
+is that single-ended channel nodes also need to provide a ``single-channel``
+property when ``reg`` is an arbitrary number that doesn't match the input pin
+number.
+
+See ``Documentation/devicetree/bindings/iio/adc/adc.yaml`` for the complete
+documentation of ADC specific device tree properties.
+
+
+1.1.2 Single-ended Bipolar Channels
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+::
+
+ ---------- +VREF ------------
+ ´ ` ´ ` _____________________
+ / \ / \ / |
+ / \ / \ --- < IN ADC |
+ \ / \ / \ |
+ `-´ `-´ \ +VREF -VREF |
+ ---------- -VREF ------------ +-------------------+
+ ^ ^
+ | |
+ External +VREF ------+ External -VREF
+
+For a **single-ended bipolar** channel, the analog voltage input can go from
+-VREF to +VREF (where -VREF is the voltage reference that has the lower
+electrical potential while +VREF is the reference with the higher one). Some ADC
+chips derive the lower reference from +VREF, others get it from a separate
+input. Often, +VREF and -VREF are symmetric but they don't need to be so. When
+-VREF is lower than system ground, these inputs are also called single-ended
+true bipolar. Also, while there is a relevant difference between bipolar and
+true bipolar from the electrical perspective, IIO makes no explicit distinction
+between them.
+
+Here's an example device tree description of a single-ended bipolar channel::
+
+ adc@0 {
+ ...
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ channel@0 {
+ reg = <0>;
+ bipolar;
+ };
+ };
+
+1.2 Differential channels
+-------------------------
+
+A differential voltage measurement digitizes the voltage level at the positive
+input (IN+) relative to the negative input (IN-) over the -VREF to +VREF span.
+In other words, a differential channel measures the potential difference between
+IN+ and IN-, which is often denoted by the IN+ - IN- formula.
+
+1.2.1 Differential Bipolar Channels
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+::
+
+ -------- +VREF ------ +-------------------+
+ ´ ` ´ ` / |
+ / \ / \ / --- < IN+ |
+ `-´ `-´ | |
+ -------- -VREF ------ | |
+ | ADC |
+ -------- +VREF ------ | |
+ ´ ` ´ ` | |
+ \ / \ / \ --- < IN- |
+ `-´ `-´ \ +VREF -VREF |
+ -------- -VREF ------ +-------------------+
+ ^ ^
+ | +---- External -VREF
+ External +VREF
+
+The analog signals to **differential bipolar** inputs are also allowed to swing
+from -VREF to +VREF. The bipolar part of the name means that the resulting value
+of the difference (IN+ - IN-) can be positive or negative. If -VREF is below
+system GND, these are also called differential true bipolar inputs.
+
+Device tree example of a differential bipolar channel::
+
+ adc@0 {
+ ...
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ channel@0 {
+ reg = <0>;
+ bipolar;
+ diff-channels = <0 1>;
+ };
+ };
+
+In the ADC driver, ``differential = 1`` is set into ``struct iio_chan_spec`` for
+the channel. Even though, there are three general input types, ``differential``
+is only used to distinguish between differential and non-differential (either
+single-ended or pseudo-differential) input types. See
+``include/linux/iio/iio.h`` for more information.
+
+1.2.2 Differential Unipolar Channels
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+For **differential unipolar** channels, the analog voltage at the positive input
+must also be higher than the voltage at the negative input. Thus, the actual
+input range allowed to a differential unipolar channel is IN- to +VREF. Because
+IN+ is allowed to swing with the measured analog signal and the input setup must
+guarantee IN+ will not go below IN- (nor IN- will raise above IN+), most
+differential unipolar channel setups have IN- fixed to a known voltage that does
+not fall within the voltage range expected for the measured signal. That leads
+to a setup that is equivalent to a pseudo-differential channel. Thus,
+differential unipolar setups can often be supported as pseudo-differential
+unipolar channels.
+
+1.3 Pseudo-differential Channels
+--------------------------------
+
+There is a third ADC input type which is called pseudo-differential or
+single-ended to differential configuration. A pseudo-differential channel is
+similar to a differential channel in that it also measures IN+ relative to IN-.
+However, unlike bipolar differential channels, the negative input is limited to
+a narrow voltage range (taken as a constant voltage) while only IN+ is allowed
+to swing. A pseudo-differential channel can be made out from a differential pair
+of inputs by restricting the negative input to a known voltage while allowing
+only the positive input to swing. Sometimes, the input provided to IN- is called
+common-mode voltage. Besides, some parts have a COM pin that allows single-ended
+inputs to be referenced to a common-mode voltage, making them
+pseudo-differential channels. Often, the common mode input voltage can be
+described in the device tree as a voltage regulator (e.g. ``com-supply``) since
+it is basically a constant voltage source.
+
+1.3.1 Pseudo-differential Unipolar Channels
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+::
+
+ -------- +VREF ------ +-------------------+
+ ´ ` ´ ` / |
+ / \ / \ / --- < IN+ |
+ `-´ `-´ | |
+ --------- IN- ------- | ADC |
+ | |
+ Common-mode voltage --> --- < IN- |
+ \ +VREF -VREF |
+ +-------------------+
+ ^ ^
+ | +---- External -VREF
+ External +VREF
+
+A **pseudo-differential unipolar** input has the limitations a differential
+unipolar channel would have, meaning the analog voltage to the positive input
+IN+ must stay within IN- to +VREF. The fixed voltage to IN- is often called
+common-mode voltage and it must be within -VREF to +VREF as would be expected
+from the signal to any differential channel negative input.
+
+The voltage measured from IN+ is relative to IN- but, unlike differential
+channels, pseudo-differential setups are intended to gauge single-ended input
+signals. To enable applications to calculate IN+ voltage with respect to system
+ground, the IIO channel may provide an ``_offset`` sysfs attribute to be added
+to ADC output when converting raw data to voltage units. In many setups, the
+common-mode voltage input is at GND level and the ``_offset`` attribute is
+omitted due to being always zero.
+
+Device tree example for pseudo-differential unipolar channel::
+
+ adc@0 {
+ ...
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ channel@0 {
+ reg = <0>;
+ single-channel = <0>;
+ common-mode-channel = <1>;
+ };
+ };
+
+Do not set ``differential`` in the channel ``iio_chan_spec`` struct of
+pseudo-differential channels.
+
+1.3.2 Pseudo-differential Bipolar Channels
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+::
+
+ -------- +VREF ------ +-------------------+
+ ´ ` ´ ` / |
+ / \ / \ / --- < IN+ |
+ `-´ `-´ | |
+ -------- -VREF ------ | ADC |
+ | |
+ Common-mode voltage --> --- < IN- |
+ \ +VREF -VREF |
+ +-------------------+
+ ^ ^
+ | +---- External -VREF
+ External +VREF
+
+A **pseudo-differential bipolar** input is not limited by the level at IN- but
+it will be limited to -VREF or to GND on the lower end of the input range
+depending on the particular ADC. Similar to their unipolar counter parts,
+pseudo-differential bipolar channels ought to declare an ``_offset`` attribute
+to enable the conversion of raw ADC data to voltage units. For the setup with
+IN- connected to GND, ``_offset`` is often omitted.
+
+Device tree example for pseudo-differential bipolar channel::
+
+ adc@0 {
+ ...
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ channel@0 {
+ reg = <0>;
+ bipolar;
+ single-channel = <0>;
+ common-mode-channel = <1>;
+ };
+ };
diff --git a/Documentation/iio/index.rst b/Documentation/iio/index.rst
index 5710f5b9e958..bbb2edce8272 100644
--- a/Documentation/iio/index.rst
+++ b/Documentation/iio/index.rst
@@ -7,6 +7,7 @@ Industrial I/O
.. toctree::
:maxdepth: 1
+ iio_adc
iio_configfs
iio_devbuf
iio_dmabuf_api
@@ -19,13 +20,16 @@ Industrial I/O Kernel Drivers
:maxdepth: 1
ad4000
+ ad4030
ad4695
+ ad7191
ad7380
ad7606
ad7625
ad7944
adis16475
adis16480
+ adis16550
adxl380
bno055
ep93xx_adc
diff --git a/Documentation/mm/balance.rst b/Documentation/mm/balance.rst
index abaa78561c31..c4962c89a7f5 100644
--- a/Documentation/mm/balance.rst
+++ b/Documentation/mm/balance.rst
@@ -81,7 +81,7 @@ Page stealing from process memory and shm is done if stealing the page would
alleviate memory pressure on any zone in the page's node that has fallen below
its watermark.
-watemark[WMARK_MIN/WMARK_LOW/WMARK_HIGH]/low_on_memory/zone_wake_kswapd: These
+watermark[WMARK_MIN/WMARK_LOW/WMARK_HIGH]/low_on_memory/zone_wake_kswapd: These
are per-zone fields, used to determine when a zone needs to be balanced. When
the number of pages falls below watermark[WMARK_MIN], the hysteric field
low_on_memory gets set. This stays set till the number of free pages becomes
diff --git a/Documentation/mm/damon/design.rst b/Documentation/mm/damon/design.rst
index e28c6a1b40ae..f12d33749329 100644
--- a/Documentation/mm/damon/design.rst
+++ b/Documentation/mm/damon/design.rst
@@ -313,6 +313,10 @@ sufficient for the given purpose, it shouldn't be unnecessarily further
lowered. It is recommended to be set proportional to ``aggregation interval``.
By default, the ratio is set as ``1/20``, and it is still recommended.
+Based on the manual tuning guide, DAMON provides more intuitive knob-based
+intervals auto tuning mechanism. Please refer to :ref:`the design document of
+the feature <damon_design_monitoring_intervals_autotuning>` for detail.
+
Refer to below documents for an example tuning based on the above guide.
.. toctree::
@@ -321,6 +325,52 @@ Refer to below documents for an example tuning based on the above guide.
monitoring_intervals_tuning_example
+.. _damon_design_monitoring_intervals_autotuning:
+
+Monitoring Intervals Auto-tuning
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+DAMON provides automatic tuning of the ``sampling interval`` and ``aggregation
+interval`` based on the :ref:`the tuning guide idea
+<damon_design_monitoring_params_tuning_guide>`. The tuning mechanism allows
+users to set the aimed amount of access events to observe via DAMON within
+given time interval. The target can be specified by the user as a ratio of
+DAMON-observed access events to the theoretical maximum amount of the events
+(``access_bp``) that measured within a given number of aggregations
+(``aggrs``).
+
+The DAMON-observed access events are calculated in byte granularity based on
+DAMON :ref:`region assumption <damon_design_region_based_sampling>`. For
+example, if a region of size ``X`` bytes of ``Y`` ``nr_accesses`` is found, it
+means ``X * Y`` access events are observed by DAMON. Theoretical maximum
+access events for the region is calculated in same way, but replacing ``Y``
+with theoretical maximum ``nr_accesses``, which can be calculated as
+``aggregation interval / sampling interval``.
+
+The mechanism calculates the ratio of access events for ``aggrs`` aggregations,
+and increases or decrease the ``sampleing interval`` and ``aggregation
+interval`` in same ratio, if the observed access ratio is lower or higher than
+the target, respectively. The ratio of the intervals change is decided in
+proportion to the distance between current samples ratio and the target ratio.
+
+The user can further set the minimum and maximum ``sampling interval`` that can
+be set by the tuning mechanism using two parameters (``min_sample_us`` and
+``max_sample_us``). Because the tuning mechanism changes ``sampling interval``
+and ``aggregation interval`` in same ratio always, the minimum and maximum
+``aggregation interval`` after each of the tuning changes can automatically set
+together.
+
+The tuning is turned off by default, and need to be set explicitly by the user.
+As a rule of thumbs and the Parreto principle, 4% access samples ratio target
+is recommended. Note that Parreto principle (80/20 rule) has applied twice.
+That is, assumes 4% (20% of 20%) DAMON-observed access events ratio (source)
+to capture 64% (80% multipled by 80%) real access events (outcomes).
+
+To know how user-space can use this feature via :ref:`DAMON sysfs interface
+<sysfs_interface>`, refer to :ref:`intervals_goal <sysfs_scheme>` part of
+the documentation.
+
+
.. _damon_design_damos:
Operation Schemes
@@ -569,11 +619,22 @@ number of filters for each scheme. Each filter specifies
- whether it is to allow (include) or reject (exclude) applying
the scheme's action to the memory (``allow``).
-When multiple filters are installed, each filter is evaluated in the installed
-order. If a part of memory is matched to one of the filter, next filters are
-ignored. If the memory passes through the filters evaluation stage because it
-is not matched to any of the filters, applying the scheme's action to it is
-allowed, same to the behavior when no filter exists.
+For efficient handling of filters, some types of filters are handled by the
+core layer, while others are handled by operations set. In the latter case,
+hence, support of the filter types depends on the DAMON operations set. In
+case of the core layer-handled filters, the memory regions that excluded by the
+filter are not counted as the scheme has tried to the region. In contrast, if
+a memory regions is filtered by an operations set layer-handled filter, it is
+counted as the scheme has tried. This difference affects the statistics.
+
+When multiple filters are installed, the group of filters that handled by the
+core layer are evaluated first. After that, the group of filters that handled
+by the operations layer are evaluated. Filters in each of the groups are
+evaluated in the installed order. If a part of memory is matched to one of the
+filter, next filters are ignored. If the part passes through the filters
+evaluation stage because it is not matched to any of the filters, applying the
+scheme's action to it depends on the last filter's allowance type. If the last
+filter was for allowing, the part of memory will be rejected, and vice versa.
For example, let's assume 1) a filter for allowing anonymous pages and 2)
another filter for rejecting young pages are installed in the order. If a page
@@ -585,39 +646,29 @@ second reject-filter blocks it. If the page is neither anonymous nor young,
the page will pass through the filters evaluation stage since there is no
matching filter, and the action will be applied to the page.
-Note that the action can equally be applied to memory that either explicitly
-filter-allowed or filters evaluation stage passed. It means that installing
-allow-filters at the end of the list makes no practical change but only
-filters-checking overhead.
-
-For efficient handling of filters, some types of filters are handled by the
-core layer, while others are handled by operations set. In the latter case,
-hence, support of the filter types depends on the DAMON operations set. In
-case of the core layer-handled filters, the memory regions that excluded by the
-filter are not counted as the scheme has tried to the region. In contrast, if
-a memory regions is filtered by an operations set layer-handled filter, it is
-counted as the scheme has tried. This difference affects the statistics.
-
Below ``type`` of filters are currently supported.
-- anonymous page
- - Applied to pages that containing data that not stored in files.
- - Handled by operations set layer. Supported by only ``paddr`` set.
-- memory cgroup
- - Applied to pages that belonging to a given cgroup.
- - Handled by operations set layer. Supported by only ``paddr`` set.
-- young page
- - Applied to pages that are accessed after the last access check from the
- scheme.
- - Handled by operations set layer. Supported by only ``paddr`` set.
-- address range
- - Applied to pages that belonging to a given address range.
- - Handled by the core logic.
-- DAMON monitoring target
- - Applied to pages that belonging to a given DAMON monitoring target.
- - Handled by the core logic.
-
-To know how user-space can set the watermarks via :ref:`DAMON sysfs interface
+- Core layer handled
+ - addr
+ - Applied to pages that belonging to a given address range.
+ - target
+ - Applied to pages that belonging to a given DAMON monitoring target.
+- Operations layer handled, supported by only ``paddr`` operations set.
+ - anon
+ - Applied to pages that containing data that not stored in files.
+ - active
+ - Applied to active pages.
+ - memcg
+ - Applied to pages that belonging to a given cgroup.
+ - young
+ - Applied to pages that are accessed after the last access check from the
+ scheme.
+ - hugepage_size
+ - Applied to pages that managed in a given size range.
+ - unmapped
+ - Applied to pages that unmapped.
+
+To know how user-space can set the filters via :ref:`DAMON sysfs interface
<sysfs_interface>`, refer to :ref:`filters <sysfs_filters>` part of the
documentation.
diff --git a/Documentation/mm/damon/monitoring_intervals_tuning_example.rst b/Documentation/mm/damon/monitoring_intervals_tuning_example.rst
index 334a854efb40..7207cbed591f 100644
--- a/Documentation/mm/damon/monitoring_intervals_tuning_example.rst
+++ b/Documentation/mm/damon/monitoring_intervals_tuning_example.rst
@@ -36,7 +36,7 @@ Then, list the DAMON-found regions of different access patterns, sorted by the
"access temperature". "Access temperature" is a metric representing the
access-hotness of a region. It is calculated as a weighted sum of the access
frequency and the age of the region. If the access frequency is 0 %, the
-temperature is multipled by minus one. That is, if a region is not accessed,
+temperature is multiplied by minus one. That is, if a region is not accessed,
it gets minus temperature and it gets lower as not accessed for longer time.
The sorting is in temperature-ascendint order, so the region at the top of the
list is the coldest, and the one at the bottom is the hottest one. ::
@@ -58,11 +58,11 @@ list is the coldest, and the one at the bottom is the hottest one. ::
The list shows not seemingly hot regions, and only minimum access pattern
diversity. Every region has zero access frequency. The number of region is
10, which is the default ``min_nr_regions value``. Size of each region is also
-nearly idential. We can suspect this is because “adaptive regions adjustment”
+nearly identical. We can suspect this is because “adaptive regions adjustment”
mechanism was not well working. As the guide suggested, we can get relative
hotness of regions using ``age`` as the recency information. That would be
better than nothing, but given the fact that the longest age is only about 6
-seconds while we waited about ten minuts, it is unclear how useful this will
+seconds while we waited about ten minutes, it is unclear how useful this will
be.
The temperature ranges to total size of regions of each range histogram
@@ -190,7 +190,7 @@ for sampling and aggregation intervals, respectively). ::
The number of regions having different access patterns has significantly
increased. Size of each region is also more varied. Total size of non-zero
access frequency regions is also significantly increased. Maybe this is already
-good enough to make some meaningful memory management efficieny changes.
+good enough to make some meaningful memory management efficiency changes.
800ms/16s intervals: Another bias
=================================
diff --git a/Documentation/mm/hmm.rst b/Documentation/mm/hmm.rst
index f6d53c37a2ca..7d61b7a8b65b 100644
--- a/Documentation/mm/hmm.rst
+++ b/Documentation/mm/hmm.rst
@@ -400,7 +400,7 @@ Exclusive access memory
Some devices have features such as atomic PTE bits that can be used to implement
atomic access to system memory. To support atomic operations to a shared virtual
memory page such a device needs access to that page which is exclusive of any
-userspace access from the CPU. The ``make_device_exclusive_range()`` function
+userspace access from the CPU. The ``make_device_exclusive()`` function
can be used to make a memory range inaccessible from userspace.
This replaces all mappings for pages in the given range with special swap
diff --git a/Documentation/mm/index.rst b/Documentation/mm/index.rst
index 0be1c7503a01..d3ada3e45e10 100644
--- a/Documentation/mm/index.rst
+++ b/Documentation/mm/index.rst
@@ -62,5 +62,4 @@ documentation, or deleted if it has served its purpose.
unevictable-lru
vmalloced-kernel-stacks
vmemmap_dedup
- z3fold
zsmalloc
diff --git a/Documentation/mm/physical_memory.rst b/Documentation/mm/physical_memory.rst
index 71fd4a6acf42..d3ac106e6b14 100644
--- a/Documentation/mm/physical_memory.rst
+++ b/Documentation/mm/physical_memory.rst
@@ -338,10 +338,272 @@ Statistics
Zones
=====
+As we have mentioned, each zone in memory is described by a ``struct zone``
+which is an element of the ``node_zones`` array of the node it belongs to.
+``struct zone`` is the core data structure of the page allocator. A zone
+represents a range of physical memory and may have holes.
+
+The page allocator uses the GFP flags, see :ref:`mm-api-gfp-flags`, specified by
+a memory allocation to determine the highest zone in a node from which the
+memory allocation can allocate memory. The page allocator first allocates memory
+from that zone, if the page allocator can't allocate the requested amount of
+memory from the zone, it will allocate memory from the next lower zone in the
+node, the process continues up to and including the lowest zone. For example, if
+a node contains ``ZONE_DMA32``, ``ZONE_NORMAL`` and ``ZONE_MOVABLE`` and the
+highest zone of a memory allocation is ``ZONE_MOVABLE``, the order of the zones
+from which the page allocator allocates memory is ``ZONE_MOVABLE`` >
+``ZONE_NORMAL`` > ``ZONE_DMA32``.
+
+At runtime, free pages in a zone are in the Per-CPU Pagesets (PCP) or free areas
+of the zone. The Per-CPU Pagesets are a vital mechanism in the kernel's memory
+management system. By handling most frequent allocations and frees locally on
+each CPU, the Per-CPU Pagesets improve performance and scalability, especially
+on systems with many cores. The page allocator in the kernel employs a two-step
+strategy for memory allocation, starting with the Per-CPU Pagesets before
+falling back to the buddy allocator. Pages are transferred between the Per-CPU
+Pagesets and the global free areas (managed by the buddy allocator) in batches.
+This minimizes the overhead of frequent interactions with the global buddy
+allocator.
+
+Architecture specific code calls free_area_init() to initializes zones.
+
+Zone structure
+--------------
+The zones structure ``struct zone`` is defined in ``include/linux/mmzone.h``.
+Here we briefly describe fields of this structure:
-.. admonition:: Stub
+General
+~~~~~~~
- This section is incomplete. Please list and describe the appropriate fields.
+``_watermark``
+ The watermarks for this zone. When the amount of free pages in a zone is below
+ the min watermark, boosting is ignored, an allocation may trigger direct
+ reclaim and direct compaction, it is also used to throttle direct reclaim.
+ When the amount of free pages in a zone is below the low watermark, kswapd is
+ woken up. When the amount of free pages in a zone is above the high watermark,
+ kswapd stops reclaiming (a zone is balanced) when the
+ ``NUMA_BALANCING_MEMORY_TIERING`` bit of ``sysctl_numa_balancing_mode`` is not
+ set. The promo watermark is used for memory tiering and NUMA balancing. When
+ the amount of free pages in a zone is above the promo watermark, kswapd stops
+ reclaiming when the ``NUMA_BALANCING_MEMORY_TIERING`` bit of
+ ``sysctl_numa_balancing_mode`` is set. The watermarks are set by
+ ``__setup_per_zone_wmarks()``. The min watermark is calculated according to
+ ``vm.min_free_kbytes`` sysctl. The other three watermarks are set according
+ to the distance between two watermarks. The distance itself is calculated
+ taking ``vm.watermark_scale_factor`` sysctl into account.
+
+``watermark_boost``
+ The number of pages which are used to boost watermarks to increase reclaim
+ pressure to reduce the likelihood of future fallbacks and wake kswapd now
+ as the node may be balanced overall and kswapd will not wake naturally.
+
+``nr_reserved_highatomic``
+ The number of pages which are reserved for high-order atomic allocations.
+
+``nr_free_highatomic``
+ The number of free pages in reserved highatomic pageblocks
+
+``lowmem_reserve``
+ The array of the amounts of the memory reserved in this zone for memory
+ allocations. For example, if the highest zone a memory allocation can
+ allocate memory from is ``ZONE_MOVABLE``, the amount of memory reserved in
+ this zone for this allocation is ``lowmem_reserve[ZONE_MOVABLE]`` when
+ attempting to allocate memory from this zone. This is a mechanism the page
+ allocator uses to prevent allocations which could use ``highmem`` from using
+ too much ``lowmem``. For some specialised workloads on ``highmem`` machines,
+ it is dangerous for the kernel to allow process memory to be allocated from
+ the ``lowmem`` zone. This is because that memory could then be pinned via the
+ ``mlock()`` system call, or by unavailability of swapspace.
+ ``vm.lowmem_reserve_ratio`` sysctl determines how aggressive the kernel is in
+ defending these lower zones. This array is recalculated by
+ ``setup_per_zone_lowmem_reserve()`` at runtime if ``vm.lowmem_reserve_ratio``
+ sysctl changes.
+
+``node``
+ The index of the node this zone belongs to. Available only when
+ ``CONFIG_NUMA`` is enabled because there is only one zone in a UMA system.
+
+``zone_pgdat``
+ Pointer to the ``struct pglist_data`` of the node this zone belongs to.
+
+``per_cpu_pageset``
+ Pointer to the Per-CPU Pagesets (PCP) allocated and initialized by
+ ``setup_zone_pageset()``. By handling most frequent allocations and frees
+ locally on each CPU, PCP improves performance and scalability on systems with
+ many cores.
+
+``pageset_high_min``
+ Copied to the ``high_min`` of the Per-CPU Pagesets for faster access.
+
+``pageset_high_max``
+ Copied to the ``high_max`` of the Per-CPU Pagesets for faster access.
+
+``pageset_batch``
+ Copied to the ``batch`` of the Per-CPU Pagesets for faster access. The
+ ``batch``, ``high_min`` and ``high_max`` of the Per-CPU Pagesets are used to
+ calculate the number of elements the Per-CPU Pagesets obtain from the buddy
+ allocator under a single hold of the lock for efficiency. They are also used
+ to decide if the Per-CPU Pagesets return pages to the buddy allocator in page
+ free process.
+
+``pageblock_flags``
+ The pointer to the flags for the pageblocks in the zone (see
+ ``include/linux/pageblock-flags.h`` for flags list). The memory is allocated
+ in ``setup_usemap()``. Each pageblock occupies ``NR_PAGEBLOCK_BITS`` bits.
+ Defined only when ``CONFIG_FLATMEM`` is enabled. The flags is stored in
+ ``mem_section`` when ``CONFIG_SPARSEMEM`` is enabled.
+
+``zone_start_pfn``
+ The start pfn of the zone. It is initialized by
+ ``calculate_node_totalpages()``.
+
+``managed_pages``
+ The present pages managed by the buddy system, which is calculated as:
+ ``managed_pages`` = ``present_pages`` - ``reserved_pages``, ``reserved_pages``
+ includes pages allocated by the memblock allocator. It should be used by page
+ allocator and vm scanner to calculate all kinds of watermarks and thresholds.
+ It is accessed using ``atomic_long_xxx()`` functions. It is initialized in
+ ``free_area_init_core()`` and then is reinitialized when memblock allocator
+ frees pages into buddy system.
+
+``spanned_pages``
+ The total pages spanned by the zone, including holes, which is calculated as:
+ ``spanned_pages`` = ``zone_end_pfn`` - ``zone_start_pfn``. It is initialized
+ by ``calculate_node_totalpages()``.
+
+``present_pages``
+ The physical pages existing within the zone, which is calculated as:
+ ``present_pages`` = ``spanned_pages`` - ``absent_pages`` (pages in holes). It
+ may be used by memory hotplug or memory power management logic to figure out
+ unmanaged pages by checking (``present_pages`` - ``managed_pages``). Write
+ access to ``present_pages`` at runtime should be protected by
+ ``mem_hotplug_begin/done()``. Any reader who can't tolerant drift of
+ ``present_pages`` should use ``get_online_mems()`` to get a stable value. It
+ is initialized by ``calculate_node_totalpages()``.
+
+``present_early_pages``
+ The present pages existing within the zone located on memory available since
+ early boot, excluding hotplugged memory. Defined only when
+ ``CONFIG_MEMORY_HOTPLUG`` is enabled and initialized by
+ ``calculate_node_totalpages()``.
+
+``cma_pages``
+ The pages reserved for CMA use. These pages behave like ``ZONE_MOVABLE`` when
+ they are not used for CMA. Defined only when ``CONFIG_CMA`` is enabled.
+
+``name``
+ The name of the zone. It is a pointer to the corresponding element of
+ the ``zone_names`` array.
+
+``nr_isolate_pageblock``
+ Number of isolated pageblocks. It is used to solve incorrect freepage counting
+ problem due to racy retrieving migratetype of pageblock. Protected by
+ ``zone->lock``. Defined only when ``CONFIG_MEMORY_ISOLATION`` is enabled.
+
+``span_seqlock``
+ The seqlock to protect ``zone_start_pfn`` and ``spanned_pages``. It is a
+ seqlock because it has to be read outside of ``zone->lock``, and it is done in
+ the main allocator path. However, the seqlock is written quite infrequently.
+ Defined only when ``CONFIG_MEMORY_HOTPLUG`` is enabled.
+
+``initialized``
+ The flag indicating if the zone is initialized. Set by
+ ``init_currently_empty_zone()`` during boot.
+
+``free_area``
+ The array of free areas, where each element corresponds to a specific order
+ which is a power of two. The buddy allocator uses this structure to manage
+ free memory efficiently. When allocating, it tries to find the smallest
+ sufficient block, if the smallest sufficient block is larger than the
+ requested size, it will be recursively split into the next smaller blocks
+ until the required size is reached. When a page is freed, it may be merged
+ with its buddy to form a larger block. It is initialized by
+ ``zone_init_free_lists()``.
+
+``unaccepted_pages``
+ The list of pages to be accepted. All pages on the list are ``MAX_PAGE_ORDER``.
+ Defined only when ``CONFIG_UNACCEPTED_MEMORY`` is enabled.
+
+``flags``
+ The zone flags. The least three bits are used and defined by
+ ``enum zone_flags``. ``ZONE_BOOSTED_WATERMARK`` (bit 0): zone recently boosted
+ watermarks. Cleared when kswapd is woken. ``ZONE_RECLAIM_ACTIVE`` (bit 1):
+ kswapd may be scanning the zone. ``ZONE_BELOW_HIGH`` (bit 2): zone is below
+ high watermark.
+
+``lock``
+ The main lock that protects the internal data structures of the page allocator
+ specific to the zone, especially protects ``free_area``.
+
+``percpu_drift_mark``
+ When free pages are below this point, additional steps are taken when reading
+ the number of free pages to avoid per-cpu counter drift allowing watermarks
+ to be breached. It is updated in ``refresh_zone_stat_thresholds()``.
+
+Compaction control
+~~~~~~~~~~~~~~~~~~
+
+``compact_cached_free_pfn``
+ The PFN where compaction free scanner should start in the next scan.
+
+``compact_cached_migrate_pfn``
+ The PFNs where compaction migration scanner should start in the next scan.
+ This array has two elements: the first one is used in ``MIGRATE_ASYNC`` mode,
+ and the other one is used in ``MIGRATE_SYNC`` mode.
+
+``compact_init_migrate_pfn``
+ The initial migration PFN which is initialized to 0 at boot time, and to the
+ first pageblock with migratable pages in the zone after a full compaction
+ finishes. It is used to check if a scan is a whole zone scan or not.
+
+``compact_init_free_pfn``
+ The initial free PFN which is initialized to 0 at boot time and to the last
+ pageblock with free ``MIGRATE_MOVABLE`` pages in the zone. It is used to check
+ if it is the start of a scan.
+
+``compact_considered``
+ The number of compactions attempted since last failure. It is reset in
+ ``defer_compaction()`` when a compaction fails to result in a page allocation
+ success. It is increased by 1 in ``compaction_deferred()`` when a compaction
+ should be skipped. ``compaction_deferred()`` is called before
+ ``compact_zone()`` is called, ``compaction_defer_reset()`` is called when
+ ``compact_zone()`` returns ``COMPACT_SUCCESS``, ``defer_compaction()`` is
+ called when ``compact_zone()`` returns ``COMPACT_PARTIAL_SKIPPED`` or
+ ``COMPACT_COMPLETE``.
+
+``compact_defer_shift``
+ The number of compactions skipped before trying again is
+ ``1<<compact_defer_shift``. It is increased by 1 in ``defer_compaction()``.
+ It is reset in ``compaction_defer_reset()`` when a direct compaction results
+ in a page allocation success. Its maximum value is ``COMPACT_MAX_DEFER_SHIFT``.
+
+``compact_order_failed``
+ The minimum compaction failed order. It is set in ``compaction_defer_reset()``
+ when a compaction succeeds and in ``defer_compaction()`` when a compaction
+ fails to result in a page allocation success.
+
+``compact_blockskip_flush``
+ Set to true when compaction migration scanner and free scanner meet, which
+ means the ``PB_migrate_skip`` bits should be cleared.
+
+``contiguous``
+ Set to true when the zone is contiguous (in other words, no hole).
+
+Statistics
+~~~~~~~~~~
+
+``vm_stat``
+ VM statistics for the zone. The items tracked are defined by
+ ``enum zone_stat_item``.
+
+``vm_numa_event``
+ VM NUMA event statistics for the zone. The items tracked are defined by
+ ``enum numa_stat_item``.
+
+``per_cpu_zonestats``
+ Per-CPU VM statistics for the zone. It records VM statistics and VM NUMA event
+ statistics on a per-CPU basis. It reduces updates to the global ``vm_stat``
+ and ``vm_numa_event`` fields of the zone to improve performance.
.. _pages:
diff --git a/Documentation/mm/process_addrs.rst b/Documentation/mm/process_addrs.rst
index 81417fa2ed20..e6756e78b476 100644
--- a/Documentation/mm/process_addrs.rst
+++ b/Documentation/mm/process_addrs.rst
@@ -716,9 +716,14 @@ calls :c:func:`!rcu_read_lock` to ensure that the VMA is looked up in an RCU
critical section, then attempts to VMA lock it via :c:func:`!vma_start_read`,
before releasing the RCU lock via :c:func:`!rcu_read_unlock`.
-VMA read locks hold the read lock on the :c:member:`!vma->vm_lock` semaphore for
-their duration and the caller of :c:func:`!lock_vma_under_rcu` must release it
-via :c:func:`!vma_end_read`.
+In cases when the user already holds mmap read lock, :c:func:`!vma_start_read_locked`
+and :c:func:`!vma_start_read_locked_nested` can be used. These functions do not
+fail due to lock contention but the caller should still check their return values
+in case they fail for other reasons.
+
+VMA read locks increment :c:member:`!vma.vm_refcnt` reference counter for their
+duration and the caller of :c:func:`!lock_vma_under_rcu` must drop it via
+:c:func:`!vma_end_read`.
VMA **write** locks are acquired via :c:func:`!vma_start_write` in instances where a
VMA is about to be modified, unlike :c:func:`!vma_start_read` the lock is always
@@ -726,9 +731,9 @@ acquired. An mmap write lock **must** be held for the duration of the VMA write
lock, releasing or downgrading the mmap write lock also releases the VMA write
lock so there is no :c:func:`!vma_end_write` function.
-Note that a semaphore write lock is not held across a VMA lock. Rather, a
-sequence number is used for serialisation, and the write semaphore is only
-acquired at the point of write lock to update this.
+Note that when write-locking a VMA lock, the :c:member:`!vma.vm_refcnt` is temporarily
+modified so that readers can detect the presense of a writer. The reference counter is
+restored once the vma sequence number used for serialisation is updated.
This ensures the semantics we require - VMA write locks provide exclusive write
access to the VMA.
@@ -738,7 +743,7 @@ Implementation details
The VMA lock mechanism is designed to be a lightweight means of avoiding the use
of the heavily contended mmap lock. It is implemented using a combination of a
-read/write semaphore and sequence numbers belonging to the containing
+reference counter and sequence numbers belonging to the containing
:c:struct:`!struct mm_struct` and the VMA.
Read locks are acquired via :c:func:`!vma_start_read`, which is an optimistic
@@ -779,28 +784,31 @@ release of any VMA locks on its release makes sense, as you would never want to
keep VMAs locked across entirely separate write operations. It also maintains
correct lock ordering.
-Each time a VMA read lock is acquired, we acquire a read lock on the
-:c:member:`!vma->vm_lock` read/write semaphore and hold it, while checking that
-the sequence count of the VMA does not match that of the mm.
+Each time a VMA read lock is acquired, we increment :c:member:`!vma.vm_refcnt`
+reference counter and check that the sequence count of the VMA does not match
+that of the mm.
-If it does, the read lock fails. If it does not, we hold the lock, excluding
-writers, but permitting other readers, who will also obtain this lock under RCU.
+If it does, the read lock fails and :c:member:`!vma.vm_refcnt` is dropped.
+If it does not, we keep the reference counter raised, excluding writers, but
+permitting other readers, who can also obtain this lock under RCU.
Importantly, maple tree operations performed in :c:func:`!lock_vma_under_rcu`
are also RCU safe, so the whole read lock operation is guaranteed to function
correctly.
-On the write side, we acquire a write lock on the :c:member:`!vma->vm_lock`
-read/write semaphore, before setting the VMA's sequence number under this lock,
-also simultaneously holding the mmap write lock.
+On the write side, we set a bit in :c:member:`!vma.vm_refcnt` which can't be
+modified by readers and wait for all readers to drop their reference count.
+Once there are no readers, the VMA's sequence number is set to match that of
+the mm. During this entire operation mmap write lock is held.
This way, if any read locks are in effect, :c:func:`!vma_start_write` will sleep
until these are finished and mutual exclusion is achieved.
-After setting the VMA's sequence number, the lock is released, avoiding
-complexity with a long-term held write lock.
+After setting the VMA's sequence number, the bit in :c:member:`!vma.vm_refcnt`
+indicating a writer is cleared. From this point on, VMA's sequence number will
+indicate VMA's write-locked state until mmap write lock is dropped or downgraded.
-This clever combination of a read/write semaphore and sequence count allows for
+This clever combination of a reference counter and sequence count allows for
fast RCU-based per-VMA lock acquisition (especially on page fault, though
utilised elsewhere) with minimal complexity around lock ordering.
diff --git a/Documentation/mm/transhuge.rst b/Documentation/mm/transhuge.rst
index a2cd8800d527..0e7f8e4cd2e3 100644
--- a/Documentation/mm/transhuge.rst
+++ b/Documentation/mm/transhuge.rst
@@ -116,14 +116,27 @@ pages:
succeeds on tail pages.
- map/unmap of a PMD entry for the whole THP increment/decrement
- folio->_entire_mapcount, increment/decrement folio->_large_mapcount
- and also increment/decrement folio->_nr_pages_mapped by ENTIRELY_MAPPED
- when _entire_mapcount goes from -1 to 0 or 0 to -1.
+ folio->_entire_mapcount and folio->_large_mapcount.
+
+ We also maintain the two slots for tracking MM owners (MM ID and
+ corresponding mapcount), and the current status ("maybe mapped shared" vs.
+ "mapped exclusively").
+
+ With CONFIG_PAGE_MAPCOUNT, we also increment/decrement
+ folio->_nr_pages_mapped by ENTIRELY_MAPPED when _entire_mapcount goes
+ from -1 to 0 or 0 to -1.
- map/unmap of individual pages with PTE entry increment/decrement
- page->_mapcount, increment/decrement folio->_large_mapcount and also
- increment/decrement folio->_nr_pages_mapped when page->_mapcount goes
- from -1 to 0 or 0 to -1 as this counts the number of pages mapped by PTE.
+ folio->_large_mapcount.
+
+ We also maintain the two slots for tracking MM owners (MM ID and
+ corresponding mapcount), and the current status ("maybe mapped shared" vs.
+ "mapped exclusively").
+
+ With CONFIG_PAGE_MAPCOUNT, we also increment/decrement
+ page->_mapcount and increment/decrement folio->_nr_pages_mapped when
+ page->_mapcount goes from -1 to 0 or 0 to -1 as this counts the number
+ of pages mapped by PTE.
split_huge_page internally has to distribute the refcounts in the head
page to the tail pages before clearing all PG_head/tail bits from the page
@@ -151,8 +164,8 @@ clear where references should go after split: it will stay on the head page.
Note that split_huge_pmd() doesn't have any limitations on refcounting:
pmd can be split at any point and never fails.
-Partial unmap and deferred_split_folio()
-========================================
+Partial unmap and deferred_split_folio() (anon THP only)
+========================================================
Unmapping part of THP (with munmap() or other way) is not going to free
memory immediately. Instead, we detect that a subpage of THP is not in use
@@ -167,3 +180,13 @@ a THP crosses a VMA boundary.
The function deferred_split_folio() is used to queue a folio for splitting.
The splitting itself will happen when we get memory pressure via shrinker
interface.
+
+With CONFIG_PAGE_MAPCOUNT, we reliably detect partial mappings based on
+folio->_nr_pages_mapped.
+
+With CONFIG_NO_PAGE_MAPCOUNT, we detect partial mappings based on the
+average per-page mapcount in a THP: if the average is < 1, an anon THP is
+certainly partially mapped. As long as only a single process maps a THP,
+this detection is reliable. With long-running child processes, there can
+be scenarios where partial mappings can currently not be detected, and
+might need asynchronous detection during memory reclaim in the future.
diff --git a/Documentation/mm/z3fold.rst b/Documentation/mm/z3fold.rst
deleted file mode 100644
index 25b5935d06c7..000000000000
--- a/Documentation/mm/z3fold.rst
+++ /dev/null
@@ -1,28 +0,0 @@
-======
-z3fold
-======
-
-z3fold is a special purpose allocator for storing compressed pages.
-It is designed to store up to three compressed pages per physical page.
-It is a zbud derivative which allows for higher compression
-ratio keeping the simplicity and determinism of its predecessor.
-
-The main differences between z3fold and zbud are:
-
-* unlike zbud, z3fold allows for up to PAGE_SIZE allocations
-* z3fold can hold up to 3 compressed pages in its page
-* z3fold doesn't export any API itself and is thus intended to be used
- via the zpool API.
-
-To keep the determinism and simplicity, z3fold, just like zbud, always
-stores an integral number of compressed pages per page, but it can store
-up to 3 pages unlike zbud which can store at most 2. Therefore the
-compression ratio goes to around 2.7x while zbud's one is around 1.7x.
-
-Unlike zbud (but like zsmalloc for that matter) z3fold_alloc() does not
-return a dereferenceable pointer. Instead, it returns an unsigned long
-handle which encodes actual location of the allocated object.
-
-Keeping effective compression ratio close to zsmalloc's, z3fold doesn't
-depend on MMU enabled and provides more predictable reclaim behavior
-which makes it a better fit for small and response-critical systems.
diff --git a/Documentation/mm/zsmalloc.rst b/Documentation/mm/zsmalloc.rst
index 76902835e68e..d2bbecd78e14 100644
--- a/Documentation/mm/zsmalloc.rst
+++ b/Documentation/mm/zsmalloc.rst
@@ -27,9 +27,8 @@ Instead, it returns an opaque handle (unsigned long) which encodes actual
location of the allocated object. The reason for this indirection is that
zsmalloc does not keep zspages permanently mapped since that would cause
issues on 32-bit systems where the VA region for kernel space mappings
-is very small. So, before using the allocating memory, the object has to
-be mapped using zs_map_object() to get a usable pointer and subsequently
-unmapped using zs_unmap_object().
+is very small. So, using the allocated memory should be done through the
+proper handle-based APIs.
stat
====
diff --git a/Documentation/rust/arch-support.rst b/Documentation/rust/arch-support.rst
index 54be7ddf3e57..6e6a515d0899 100644
--- a/Documentation/rust/arch-support.rst
+++ b/Documentation/rust/arch-support.rst
@@ -15,6 +15,7 @@ support corresponds to ``S`` values in the ``MAINTAINERS`` file.
============= ================ ==============================================
Architecture Level of support Constraints
============= ================ ==============================================
+``arm`` Maintained ARMv7 Little Endian only.
``arm64`` Maintained Little Endian only.
``loongarch`` Maintained \-
``riscv`` Maintained ``riscv64`` and LLVM/Clang only.
diff --git a/Documentation/subsystem-apis.rst b/Documentation/subsystem-apis.rst
index b52ad5b969d4..ff4fe8c936c8 100644
--- a/Documentation/subsystem-apis.rst
+++ b/Documentation/subsystem-apis.rst
@@ -71,6 +71,7 @@ Other subsystems
accounting/index
cpu-freq/index
+ edac/index
fpga/index
i2c/index
iio/index
diff --git a/Documentation/trace/coresight/coresight.rst b/Documentation/trace/coresight/coresight.rst
index d4f93d6a2d63..806699871b80 100644
--- a/Documentation/trace/coresight/coresight.rst
+++ b/Documentation/trace/coresight/coresight.rst
@@ -462,44 +462,35 @@ queried by the perf command line tool:
cs_etm// [Kernel PMU event]
- linaro@linaro-nano:~$
-
Regardless of the number of tracers available in a system (usually equal to the
amount of processor cores), the "cs_etm" PMU will be listed only once.
A Coresight PMU works the same way as any other PMU, i.e the name of the PMU is
-listed along with configuration options within forward slashes '/'. Since a
-Coresight system will typically have more than one sink, the name of the sink to
-work with needs to be specified as an event option.
-On newer kernels the available sinks are listed in sysFS under
+provided along with configuration options within forward slashes '/' (see
+`Config option formats`_).
+
+Advanced Perf framework usage
+-----------------------------
+
+Sink selection
+~~~~~~~~~~~~~~
+
+An appropriate sink will be selected automatically for use with Perf, but since
+there will typically be more than one sink, the name of the sink to use may be
+specified as a special config option prefixed with '@'.
+
+The available sinks are listed in sysFS under
($SYSFS)/bus/event_source/devices/cs_etm/sinks/::
root@localhost:/sys/bus/event_source/devices/cs_etm/sinks# ls
tmc_etf0 tmc_etr0 tpiu0
-On older kernels, this may need to be found from the list of coresight devices,
-available under ($SYSFS)/bus/coresight/devices/::
-
- root:~# ls /sys/bus/coresight/devices/
- etm0 etm1 etm2 etm3 etm4 etm5 funnel0
- funnel1 funnel2 replicator0 stm0 tmc_etf0 tmc_etr0 tpiu0
root@linaro-nano:~# perf record -e cs_etm/@tmc_etr0/u --per-thread program
-As mentioned above in section "Device Naming scheme", the names of the devices could
-look different from what is used in the example above. One must use the device names
-as it appears under the sysFS.
-
-The syntax within the forward slashes '/' is important. The '@' character
-tells the parser that a sink is about to be specified and that this is the sink
-to use for the trace session.
-
More information on the above and other example on how to use Coresight with
the perf tools can be found in the "HOWTO.md" file of the openCSD gitHub
repository [#third]_.
-Advanced perf framework usage
------------------------------
-
AutoFDO analysis using the perf tools
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -508,7 +499,7 @@ perf can be used to record and analyze trace of programs.
Execution can be recorded using 'perf record' with the cs_etm event,
specifying the name of the sink to record to, e.g::
- perf record -e cs_etm/@tmc_etr0/u --per-thread
+ perf record -e cs_etm//u --per-thread
The 'perf report' and 'perf script' commands can be used to analyze execution,
synthesizing instruction and branch events from the instruction trace.
@@ -572,7 +563,7 @@ sort example is from the AutoFDO tutorial (https://gcc.gnu.org/wiki/AutoFDO/Tuto
Bubble sorting array of 30000 elements
5910 ms
- $ perf record -e cs_etm/@tmc_etr0/u --per-thread taskset -c 2 ./sort
+ $ perf record -e cs_etm//u --per-thread taskset -c 2 ./sort
Bubble sorting array of 30000 elements
12543 ms
[ perf record: Woken up 35 times to write data ]
diff --git a/Documentation/trace/coresight/panic.rst b/Documentation/trace/coresight/panic.rst
new file mode 100644
index 000000000000..a58aa914c241
--- /dev/null
+++ b/Documentation/trace/coresight/panic.rst
@@ -0,0 +1,362 @@
+===================================================
+Using Coresight for Kernel panic and Watchdog reset
+===================================================
+
+Introduction
+------------
+This documentation is about using Linux coresight trace support to
+debug kernel panic and watchdog reset scenarios.
+
+Coresight trace during Kernel panic
+-----------------------------------
+From the coresight driver point of view, addressing the kernel panic
+situation has four main requirements.
+
+a. Support for allocation of trace buffer pages from reserved memory area.
+ Platform can advertise this using a new device tree property added to
+ relevant coresight nodes.
+
+b. Support for stopping coresight blocks at the time of panic
+
+c. Saving required metadata in the specified format
+
+d. Support for reading trace data captured at the time of panic
+
+Allocation of trace buffer pages from reserved RAM
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+A new optional device tree property "memory-region" is added to the
+Coresight TMC device nodes, that would give the base address and size of trace
+buffer.
+
+Static allocation of trace buffers would ensure that both IOMMU enabled
+and disabled cases are handled. Also, platforms that support persistent
+RAM will allow users to read trace data in the subsequent boot without
+booting the crashdump kernel.
+
+Note:
+For ETR sink devices, this reserved region will be used for both trace
+capture and trace data retrieval.
+For ETF sink devices, internal SRAM would be used for trace capture,
+and they would be synced to reserved region for retrieval.
+
+
+Disabling coresight blocks at the time of panic
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+In order to avoid the situation of losing relevant trace data after a
+kernel panic, it would be desirable to stop the coresight blocks at the
+time of panic.
+
+This can be achieved by configuring the comparator, CTI and sink
+devices as below::
+
+ Trigger on panic
+ Comparator --->External out --->CTI -->External In---->ETR/ETF stop
+
+Saving metadata at the time of kernel panic
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Coresight metadata involves all additional data that are required for a
+successful trace decode in addition to the trace data. This involves
+ETR/ETF/ETB register snapshot etc.
+
+A new optional device property "memory-region" is added to
+the ETR/ETF/ETB device nodes for this.
+
+Reading trace data captured at the time of panic
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Trace data captured at the time of panic, can be read from rebooted kernel
+or from crashdump kernel using a special device file /dev/crash_tmc_xxx.
+This device file is created only when there is a valid crashdata available.
+
+General flow of trace capture and decode incase of kernel panic
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+1. Enable source and sink on all the cores using the sysfs interface.
+ ETR sinks should have trace buffers allocated from reserved memory,
+ by selecting "resrv" buffer mode from sysfs.
+
+2. Run relevant tests.
+
+3. On a kernel panic, all coresight blocks are disabled, necessary
+ metadata is synced by kernel panic handler.
+
+ System would eventually reboot or boot a crashdump kernel.
+
+4. For platforms that supports crashdump kernel, raw trace data can be
+ dumped using the coresight sysfs interface from the crashdump kernel
+ itself. Persistent RAM is not a requirement in this case.
+
+5. For platforms that supports persistent RAM, trace data can be dumped
+ using the coresight sysfs interface in the subsequent Linux boot.
+ Crashdump kernel is not a requirement in this case. Persistent RAM
+ ensures that trace data is intact across reboot.
+
+Coresight trace during Watchdog reset
+-------------------------------------
+The main difference between addressing the watchdog reset and kernel panic
+case are below,
+
+a. Saving coresight metadata need to be taken care by the
+ SCP(system control processor) firmware in the specified format,
+ instead of kernel.
+
+b. Reserved memory region given by firmware for trace buffer and metadata
+ has to be in persistent RAM.
+ Note: This is a requirement for watchdog reset case but optional
+ in kernel panic case.
+
+Watchdog reset can be supported only on platforms that meet the above
+two requirements.
+
+Sample commands for testing a Kernel panic case with ETR sink
+-------------------------------------------------------------
+
+1. Boot Linux kernel with "crash_kexec_post_notifiers" added to the kernel
+ bootargs. This is mandatory if the user would like to read the tracedata
+ from the crashdump kernel.
+
+2. Enable the preloaded ETM configuration::
+
+ #echo 1 > /sys/kernel/config/cs-syscfg/configurations/panicstop/enable
+
+3. Configure CTI using sysfs interface::
+
+ #./cti_setup.sh
+
+ #cat cti_setup.sh
+
+
+ cd /sys/bus/coresight/devices/
+
+ ap_cti_config () {
+ #ETM trig out[0] trigger to Channel 0
+ echo 0 4 > channels/trigin_attach
+ }
+
+ etf_cti_config () {
+ #ETF Flush in trigger from Channel 0
+ echo 0 1 > channels/trigout_attach
+ echo 1 > channels/trig_filter_enable
+ }
+
+ etr_cti_config () {
+ #ETR Flush in from Channel 0
+ echo 0 1 > channels/trigout_attach
+ echo 1 > channels/trig_filter_enable
+ }
+
+ ctidevs=`find . -name "cti*"`
+
+ for i in $ctidevs
+ do
+ cd $i
+
+ connection=`find . -name "ete*"`
+ if [ ! -z "$connection" ]
+ then
+ echo "AP CTI config for $i"
+ ap_cti_config
+ fi
+
+ connection=`find . -name "tmc_etf*"`
+ if [ ! -z "$connection" ]
+ then
+ echo "ETF CTI config for $i"
+ etf_cti_config
+ fi
+
+ connection=`find . -name "tmc_etr*"`
+ if [ ! -z "$connection" ]
+ then
+ echo "ETR CTI config for $i"
+ etr_cti_config
+ fi
+
+ cd ..
+ done
+
+Note: CTI connections are SOC specific and hence the above script is
+added just for reference.
+
+4. Choose reserved buffer mode for ETR buffer::
+
+ #echo "resrv" > /sys/bus/coresight/devices/tmc_etr0/buf_mode_preferred
+
+5. Enable stop on flush trigger configuration::
+
+ #echo 1 > /sys/bus/coresight/devices/tmc_etr0/stop_on_flush
+
+6. Start Coresight tracing on cores 1 and 2 using sysfs interface
+
+7. Run some application on core 1::
+
+ #taskset -c 1 dd if=/dev/urandom of=/dev/null &
+
+8. Invoke kernel panic on core 2::
+
+ #echo 1 > /proc/sys/kernel/panic
+ #taskset -c 2 echo c > /proc/sysrq-trigger
+
+9. From rebooted kernel or crashdump kernel, read crashdata::
+
+ #dd if=/dev/crash_tmc_etr0 of=/trace/cstrace.bin
+
+10. Run opencsd decoder tools/scripts to generate the instruction trace.
+
+Sample instruction trace dump
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Core1 dump::
+
+ A etm4_enable_hw: ffff800008ae1dd4
+ CONTEXT EL2 etm4_enable_hw: ffff800008ae1dd4
+ I etm4_enable_hw: ffff800008ae1dd4:
+ d503201f nop
+ I etm4_enable_hw: ffff800008ae1dd8:
+ d503201f nop
+ I etm4_enable_hw: ffff800008ae1ddc:
+ d503201f nop
+ I etm4_enable_hw: ffff800008ae1de0:
+ d503201f nop
+ I etm4_enable_hw: ffff800008ae1de4:
+ d503201f nop
+ I etm4_enable_hw: ffff800008ae1de8:
+ d503233f paciasp
+ I etm4_enable_hw: ffff800008ae1dec:
+ a9be7bfd stp x29, x30, [sp, #-32]!
+ I etm4_enable_hw: ffff800008ae1df0:
+ 910003fd mov x29, sp
+ I etm4_enable_hw: ffff800008ae1df4:
+ a90153f3 stp x19, x20, [sp, #16]
+ I etm4_enable_hw: ffff800008ae1df8:
+ 2a0003f4 mov w20, w0
+ I etm4_enable_hw: ffff800008ae1dfc:
+ 900085b3 adrp x19, ffff800009b95000 <reserved_mem+0xc48>
+ I etm4_enable_hw: ffff800008ae1e00:
+ 910f4273 add x19, x19, #0x3d0
+ I etm4_enable_hw: ffff800008ae1e04:
+ f8747a60 ldr x0, [x19, x20, lsl #3]
+ E etm4_enable_hw: ffff800008ae1e08:
+ b4000140 cbz x0, ffff800008ae1e30 <etm4_starting_cpu+0x50>
+ I 149.039572921 etm4_enable_hw: ffff800008ae1e30:
+ a94153f3 ldp x19, x20, [sp, #16]
+ I 149.039572921 etm4_enable_hw: ffff800008ae1e34:
+ 52800000 mov w0, #0x0 // #0
+ I 149.039572921 etm4_enable_hw: ffff800008ae1e38:
+ a8c27bfd ldp x29, x30, [sp], #32
+
+ ..snip
+
+ 149.052324811 chacha_block_generic: ffff800008642d80:
+ 9100a3e0 add x0,
+ I 149.052324811 chacha_block_generic: ffff800008642d84:
+ b86178a2 ldr w2, [x5, x1, lsl #2]
+ I 149.052324811 chacha_block_generic: ffff800008642d88:
+ 8b010803 add x3, x0, x1, lsl #2
+ I 149.052324811 chacha_block_generic: ffff800008642d8c:
+ b85fc063 ldur w3, [x3, #-4]
+ I 149.052324811 chacha_block_generic: ffff800008642d90:
+ 0b030042 add w2, w2, w3
+ I 149.052324811 chacha_block_generic: ffff800008642d94:
+ b8217882 str w2, [x4, x1, lsl #2]
+ I 149.052324811 chacha_block_generic: ffff800008642d98:
+ 91000421 add x1, x1, #0x1
+ I 149.052324811 chacha_block_generic: ffff800008642d9c:
+ f100443f cmp x1, #0x11
+
+
+Core 2 dump::
+
+ A etm4_enable_hw: ffff800008ae1dd4
+ CONTEXT EL2 etm4_enable_hw: ffff800008ae1dd4
+ I etm4_enable_hw: ffff800008ae1dd4:
+ d503201f nop
+ I etm4_enable_hw: ffff800008ae1dd8:
+ d503201f nop
+ I etm4_enable_hw: ffff800008ae1ddc:
+ d503201f nop
+ I etm4_enable_hw: ffff800008ae1de0:
+ d503201f nop
+ I etm4_enable_hw: ffff800008ae1de4:
+ d503201f nop
+ I etm4_enable_hw: ffff800008ae1de8:
+ d503233f paciasp
+ I etm4_enable_hw: ffff800008ae1dec:
+ a9be7bfd stp x29, x30, [sp, #-32]!
+ I etm4_enable_hw: ffff800008ae1df0:
+ 910003fd mov x29, sp
+ I etm4_enable_hw: ffff800008ae1df4:
+ a90153f3 stp x19, x20, [sp, #16]
+ I etm4_enable_hw: ffff800008ae1df8:
+ 2a0003f4 mov w20, w0
+ I etm4_enable_hw: ffff800008ae1dfc:
+ 900085b3 adrp x19, ffff800009b95000 <reserved_mem+0xc48>
+ I etm4_enable_hw: ffff800008ae1e00:
+ 910f4273 add x19, x19, #0x3d0
+ I etm4_enable_hw: ffff800008ae1e04:
+ f8747a60 ldr x0, [x19, x20, lsl #3]
+ E etm4_enable_hw: ffff800008ae1e08:
+ b4000140 cbz x0, ffff800008ae1e30 <etm4_starting_cpu+0x50>
+ I 149.046243445 etm4_enable_hw: ffff800008ae1e30:
+ a94153f3 ldp x19, x20, [sp, #16]
+ I 149.046243445 etm4_enable_hw: ffff800008ae1e34:
+ 52800000 mov w0, #0x0 // #0
+ I 149.046243445 etm4_enable_hw: ffff800008ae1e38:
+ a8c27bfd ldp x29, x30, [sp], #32
+ I 149.046243445 etm4_enable_hw: ffff800008ae1e3c:
+ d50323bf autiasp
+ E 149.046243445 etm4_enable_hw: ffff800008ae1e40:
+ d65f03c0 ret
+ A ete_sysreg_write: ffff800008adfa18
+
+ ..snip
+
+ I 149.05422547 panic: ffff800008096300:
+ a90363f7 stp x23, x24, [sp, #48]
+ I 149.05422547 panic: ffff800008096304:
+ 6b00003f cmp w1, w0
+ I 149.05422547 panic: ffff800008096308:
+ 3a411804 ccmn w0, #0x1, #0x4, ne // ne = any
+ N 149.05422547 panic: ffff80000809630c:
+ 540001e0 b.eq ffff800008096348 <panic+0xe0> // b.none
+ I 149.05422547 panic: ffff800008096310:
+ f90023f9 str x25, [sp, #64]
+ E 149.05422547 panic: ffff800008096314:
+ 97fe44ef bl ffff8000080276d0 <panic_smp_self_stop>
+ A panic: ffff80000809634c
+ I 149.05422547 panic: ffff80000809634c:
+ 910102d5 add x21, x22, #0x40
+ I 149.05422547 panic: ffff800008096350:
+ 52800020 mov w0, #0x1 // #1
+ E 149.05422547 panic: ffff800008096354:
+ 94166b8b bl ffff800008631180 <bust_spinlocks>
+ N 149.054225518 bust_spinlocks: ffff800008631180:
+ 340000c0 cbz w0, ffff800008631198 <bust_spinlocks+0x18>
+ I 149.054225518 bust_spinlocks: ffff800008631184:
+ f000a321 adrp x1, ffff800009a98000 <pbufs.0+0xbb8>
+ I 149.054225518 bust_spinlocks: ffff800008631188:
+ b9405c20 ldr w0, [x1, #92]
+ I 149.054225518 bust_spinlocks: ffff80000863118c:
+ 11000400 add w0, w0, #0x1
+ I 149.054225518 bust_spinlocks: ffff800008631190:
+ b9005c20 str w0, [x1, #92]
+ E 149.054225518 bust_spinlocks: ffff800008631194:
+ d65f03c0 ret
+ A panic: ffff800008096358
+
+Perf based testing
+------------------
+
+Starting perf session
+~~~~~~~~~~~~~~~~~~~~~
+ETF::
+
+ perf record -e cs_etm/panicstop,@tmc_etf1/ -C 1
+ perf record -e cs_etm/panicstop,@tmc_etf2/ -C 2
+
+ETR::
+
+ perf record -e cs_etm/panicstop,@tmc_etr0/ -C 1,2
+
+Reading trace data after panic
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Same sysfs based method explained above can be used to retrieve and
+decode the trace data after the reboot on kernel panic.
diff --git a/Documentation/trace/debugging.rst b/Documentation/trace/debugging.rst
index 54fb16239d70..d54bc500af80 100644
--- a/Documentation/trace/debugging.rst
+++ b/Documentation/trace/debugging.rst
@@ -136,6 +136,8 @@ kernel, so only the same kernel is guaranteed to work if the mapping is
preserved. Switching to a different kernel version may find a different
layout and mark the buffer as invalid.
+NB: Both the mapped address and size must be page aligned for the architecture.
+
Using trace_printk() in the boot instance
-----------------------------------------
By default, the content of trace_printk() goes into the top level tracing
diff --git a/Documentation/translations/zh_CN/mm/hmm.rst b/Documentation/translations/zh_CN/mm/hmm.rst
index 0669f947d0bc..22c210f4e94f 100644
--- a/Documentation/translations/zh_CN/mm/hmm.rst
+++ b/Documentation/translations/zh_CN/mm/hmm.rst
@@ -326,7 +326,7 @@ devm_memunmap_pages() 和 devm_release_mem_region() 当资源可以绑定到 ``s
一些设备具有诸如原子PTE位的功能,可以用来实现对系统内存的原子访问。为了支持对一
个共享的虚拟内存页的原子操作,这样的设备需要对该页的访问是排他的,而不是来自CPU
-的任何用户空间访问。 ``make_device_exclusive_range()`` 函数可以用来使一
+的任何用户空间访问。 ``make_device_exclusive()`` 函数可以用来使一
个内存范围不能从用户空间访问。
这将用特殊的交换条目替换给定范围内的所有页的映射。任何试图访问交换条目的行为都会
diff --git a/Documentation/translations/zh_CN/mm/index.rst b/Documentation/translations/zh_CN/mm/index.rst
index c8726bce8f74..a71116be058f 100644
--- a/Documentation/translations/zh_CN/mm/index.rst
+++ b/Documentation/translations/zh_CN/mm/index.rst
@@ -58,7 +58,6 @@ Linux内存管理文档
remap_file_pages
split_page_table_lock
vmalloced-kernel-stacks
- z3fold
zsmalloc
TODOLIST:
diff --git a/Documentation/translations/zh_CN/mm/z3fold.rst b/Documentation/translations/zh_CN/mm/z3fold.rst
deleted file mode 100644
index 9569a6d88270..000000000000
--- a/Documentation/translations/zh_CN/mm/z3fold.rst
+++ /dev/null
@@ -1,31 +0,0 @@
-:Original: Documentation/mm/z3fold.rst
-
-:翻译:
-
- 司延腾 Yanteng Si <siyanteng@loongson.cn>
-
-:校译:
-
-
-======
-z3fold
-======
-
-z3fold是一个专门用于存储压缩页的分配器。它被设计为每个物理页最多可以存储三个压缩页。
-它是zbud的衍生物,允许更高的压缩率,保持其前辈的简单性和确定性。
-
-z3fold和zbud的主要区别是:
-
-* 与zbud不同的是,z3fold允许最大的PAGE_SIZE分配。
-* z3fold在其页面中最多可以容纳3个压缩页面
-* z3fold本身没有输出任何API,因此打算通过zpool的API来使用
-
-为了保持确定性和简单性,z3fold,就像zbud一样,总是在每页存储一个整数的压缩页,但是
-它最多可以存储3页,不像zbud最多可以存储2页。因此压缩率达到2.7倍左右,而zbud的压缩
-率是1.7倍左右。
-
-不像zbud(但也像zsmalloc),z3fold_alloc()那样不返回一个可重复引用的指针。相反,它
-返回一个无符号长句柄,它编码了被分配对象的实际位置。
-
-保持有效的压缩率接近于zsmalloc,z3fold不依赖于MMU的启用,并提供更可预测的回收行
-为,这使得它更适合于小型和反应迅速的系统。
diff --git a/Documentation/usb/CREDITS b/Documentation/usb/CREDITS
index 81ea3eb29e96..ce6450a6ed7c 100644
--- a/Documentation/usb/CREDITS
+++ b/Documentation/usb/CREDITS
@@ -161,7 +161,7 @@ THANKS file in Inaky's driver):
- The people at the linux-usb mailing list, for reading so
many messages :) Ok, no more kidding; for all your advises!
- - All the people at the USB Implementors Forum for their
+ - All the people at the USB Implementers Forum for their
help and assistance.
- Nathan Myers <ncm@cantrip.org>, for his advice! (hope you
diff --git a/Documentation/userspace-api/ioctl/ioctl-number.rst b/Documentation/userspace-api/ioctl/ioctl-number.rst
index 3d1cd7ad9d67..7a1409ecc238 100644
--- a/Documentation/userspace-api/ioctl/ioctl-number.rst
+++ b/Documentation/userspace-api/ioctl/ioctl-number.rst
@@ -376,9 +376,9 @@ Code Seq# Include File Comments
0xB8 all uapi/linux/mshv.h Microsoft Hyper-V /dev/mshv driver
<mailto:linux-hyperv@vger.kernel.org>
0xC0 00-0F linux/usb/iowarrior.h
-0xCA 00-0F uapi/misc/cxl.h
+0xCA 00-0F uapi/misc/cxl.h Dead since 6.15
0xCA 10-2F uapi/misc/ocxl.h
-0xCA 80-BF uapi/scsi/cxlflash_ioctl.h Dead since 6.14
+0xCA 80-BF uapi/scsi/cxlflash_ioctl.h Dead since 6.15
0xCB 00-1F CBM serial IEC bus in development:
<mailto:michael.klein@puffin.lb.shuttle.de>
0xCC 00-0F drivers/misc/ibmvmc.h pseries VMC driver
diff --git a/Documentation/userspace-api/iommufd.rst b/Documentation/userspace-api/iommufd.rst
index 70289d6815d2..b0df15865dec 100644
--- a/Documentation/userspace-api/iommufd.rst
+++ b/Documentation/userspace-api/iommufd.rst
@@ -63,6 +63,13 @@ Following IOMMUFD objects are exposed to userspace:
space usually has mappings from guest-level I/O virtual addresses to guest-
level physical addresses.
+- IOMMUFD_FAULT, representing a software queue for an HWPT reporting IO page
+ faults using the IOMMU HW's PRI (Page Request Interface). This queue object
+ provides user space an FD to poll the page fault events and also to respond
+ to those events. A FAULT object must be created first to get a fault_id that
+ could be then used to allocate a fault-enabled HWPT via the IOMMU_HWPT_ALLOC
+ command by setting the IOMMU_HWPT_FAULT_ID_VALID bit in its flags field.
+
- IOMMUFD_OBJ_VIOMMU, representing a slice of the physical IOMMU instance,
passed to or shared with a VM. It may be some HW-accelerated virtualization
features and some SW resources used by the VM. For examples:
@@ -109,6 +116,14 @@ Following IOMMUFD objects are exposed to userspace:
vIOMMU, which is a separate ioctl call from attaching the same device to an
HWPT_PAGING that the vIOMMU holds.
+- IOMMUFD_OBJ_VEVENTQ, representing a software queue for a vIOMMU to report its
+ events such as translation faults occurred to a nested stage-1 (excluding I/O
+ page faults that should go through IOMMUFD_OBJ_FAULT) and HW-specific events.
+ This queue object provides user space an FD to poll/read the vIOMMU events. A
+ vIOMMU object must be created first to get its viommu_id, which could be then
+ used to allocate a vEVENTQ. Each vIOMMU can support multiple types of vEVENTS,
+ but is confined to one vEVENTQ per vEVENTQ type.
+
All user-visible objects are destroyed via the IOMMU_DESTROY uAPI.
The diagrams below show relationships between user-visible objects and kernel
@@ -251,8 +266,10 @@ User visible objects are backed by following datastructures:
- iommufd_device for IOMMUFD_OBJ_DEVICE.
- iommufd_hwpt_paging for IOMMUFD_OBJ_HWPT_PAGING.
- iommufd_hwpt_nested for IOMMUFD_OBJ_HWPT_NESTED.
+- iommufd_fault for IOMMUFD_OBJ_FAULT.
- iommufd_viommu for IOMMUFD_OBJ_VIOMMU.
- iommufd_vdevice for IOMMUFD_OBJ_VDEVICE.
+- iommufd_veventq for IOMMUFD_OBJ_VEVENTQ.
Several terminologies when looking at these datastructures:
diff --git a/Documentation/userspace-api/mseal.rst b/Documentation/userspace-api/mseal.rst
index 41102f74c5e2..1dabfc29be0d 100644
--- a/Documentation/userspace-api/mseal.rst
+++ b/Documentation/userspace-api/mseal.rst
@@ -130,6 +130,27 @@ Use cases
- Chrome browser: protect some security sensitive data structures.
+- System mappings:
+ The system mappings are created by the kernel and includes vdso, vvar,
+ vvar_vclock, vectors (arm compat-mode), sigpage (arm compat-mode), uprobes.
+
+ Those system mappings are readonly only or execute only, memory sealing can
+ protect them from ever changing to writable or unmmap/remapped as different
+ attributes. This is useful to mitigate memory corruption issues where a
+ corrupted pointer is passed to a memory management system.
+
+ If supported by an architecture (CONFIG_ARCH_SUPPORTS_MSEAL_SYSTEM_MAPPINGS),
+ the CONFIG_MSEAL_SYSTEM_MAPPINGS seals all system mappings of this
+ architecture.
+
+ The following architectures currently support this feature: x86-64, arm64,
+ and s390.
+
+ WARNING: This feature breaks programs which rely on relocating
+ or unmapping system mappings. Known broken software at the time
+ of writing includes CHECKPOINT_RESTORE, UML, gVisor, rr. Therefore
+ this config can't be enabled universally.
+
When not to use mseal
=====================
Applications can apply sealing to any virtual memory region from userspace,
diff --git a/Documentation/userspace-api/perf_ring_buffer.rst b/Documentation/userspace-api/perf_ring_buffer.rst
index bde9d8cbc106..dc71544532ce 100644
--- a/Documentation/userspace-api/perf_ring_buffer.rst
+++ b/Documentation/userspace-api/perf_ring_buffer.rst
@@ -627,7 +627,7 @@ regular ring buffer.
AUX events and AUX trace data are two different things. Let's see an
example::
- perf record -a -e cycles -e cs_etm/@tmc_etr0/ -- sleep 2
+ perf record -a -e cycles -e cs_etm// -- sleep 2
The above command enables two events: one is the event *cycles* from PMU
and another is the AUX event *cs_etm* from Arm CoreSight, both are saved
@@ -766,7 +766,7 @@ only record AUX trace data at a specific time point which users are
interested in. E.g. below gives an example of how to take snapshots
with 1 second interval with Arm CoreSight::
- perf record -e cs_etm/@tmc_etr0/u -S -a program &
+ perf record -e cs_etm//u -S -a program &
PERFPID=$!
while true; do
kill -USR2 $PERFPID
diff --git a/MAINTAINERS b/MAINTAINERS
index 957f70ee85ec..02d3c8795673 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1317,11 +1317,23 @@ F: Documentation/devicetree/bindings/iio/adc/adi,ad4000.yaml
F: Documentation/iio/ad4000.rst
F: drivers/iio/adc/ad4000.c
+AD4030 ADC DRIVER (AD4030-24/AD4630-16/AD4630-24/AD4632-16/AD4632-24)
+M: Michael Hennerich <michael.hennerich@analog.com>
+M: Nuno Sá <nuno.sa@analog.com>
+R: Esteban Blanc <eblanc@baylibre.com>
+L: linux-iio@vger.kernel.org
+S: Supported
+W: https://ez.analog.com/linux-software-drivers
+F: Documentation/devicetree/bindings/iio/adc/adi,ad4030.yaml
+F: Documentation/iio/ad4030.rst
+F: drivers/iio/adc/ad4030.c
+
ANALOG DEVICES INC AD4130 DRIVER
M: Cosmin Tanislav <cosmin.tanislav@analog.com>
L: linux-iio@vger.kernel.org
S: Supported
W: https://ez.analog.com/linux-software-drivers
+F: Documentation/ABI/testing/sysfs-bus-iio-adc-ad4130
F: Documentation/devicetree/bindings/iio/adc/adi,ad4130.yaml
F: drivers/iio/adc/ad4130.c
@@ -1345,6 +1357,15 @@ W: http://ez.analog.com/community/linux-device-drivers
F: Documentation/devicetree/bindings/iio/adc/adi,ad7091r*
F: drivers/iio/adc/ad7091r*
+ANALOG DEVICES INC AD7191 DRIVER
+M: Alisa-Dariana Roman <alisa.roman@analog.com>
+L: linux-iio@vger.kernel.org
+S: Supported
+W: https://ez.analog.com/linux-software-drivers
+F: Documentation/devicetree/bindings/iio/adc/adi,ad7191.yaml
+F: Documentation/iio/ad7191.rst
+F: drivers/iio/adc/ad7191.c
+
ANALOG DEVICES INC AD7192 DRIVER
M: Alisa-Dariana Roman <alisa.roman@analog.com>
L: linux-iio@vger.kernel.org
@@ -1496,6 +1517,16 @@ W: https://ez.analog.com/linux-software-drivers
F: Documentation/devicetree/bindings/iio/imu/adi,adis16475.yaml
F: drivers/iio/imu/adis16475.c
+ANALOG DEVICES INC ADIS16550 DRIVER
+M: Nuno Sa <nuno.sa@analog.com>
+M: Ramona Gradinariu <ramona.gradinariu@analog.com>
+M: Antoniu Miclaus <antoniu.miclaus@analog.com>
+M: Robert Budai <robert.budai@analog.com>
+L: linux-iio@vger.kernel.org
+S: Supported
+W: https://ez.analog.com/linux-software-drivers
+F: Documentation/devicetree/bindings/iio/imu/adi,adis16550.yaml
+
ANALOG DEVICES INC ADM1177 DRIVER
M: Michael Hennerich <Michael.Hennerich@analog.com>
L: linux-hwmon@vger.kernel.org
@@ -4485,6 +4516,13 @@ F: kernel/bpf/stackmap.c
F: kernel/trace/bpf_trace.c
F: lib/buildid.c
+BROADCOM APDS9160 AMBIENT LIGHT SENSOR AND PROXIMITY DRIVER
+M: Mikael Gonella-Bolduc <m.gonella.bolduc@gmail.com>
+L: linux-iio@vger.kernel.org
+S: Maintained
+F: Documentation/devicetree/bindings/iio/light/brcm,apds9160.yaml
+F: drivers/iio/light/apds9160.c
+
BROADCOM ASP 2.0 ETHERNET DRIVER
M: Justin Chen <justin.chen@broadcom.com>
M: Florian Fainelli <florian.fainelli@broadcom.com>
@@ -5929,9 +5967,10 @@ S: Maintained
F: Documentation/security/snp-tdx-threat-model.rst
CONFIGFS
-M: Joel Becker <jlbec@evilplan.org>
+M: Andreas Hindborg <a.hindborg@kernel.org>
+R: Breno Leitao <leitao@debian.org>
S: Supported
-T: git git://git.infradead.org/users/hch/configfs.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/a.hindborg/linux.git configfs-next
F: fs/configfs/
F: include/linux/configfs.h
F: samples/configfs/
@@ -6397,18 +6436,6 @@ S: Maintained
W: http://www.chelsio.com
F: drivers/net/ethernet/chelsio/cxgb4vf/
-CXL (IBM Coherent Accelerator Processor Interface CAPI) DRIVER
-M: Frederic Barrat <fbarrat@linux.ibm.com>
-M: Andrew Donnellan <ajd@linux.ibm.com>
-L: linuxppc-dev@lists.ozlabs.org
-S: Obsolete
-F: Documentation/ABI/obsolete/sysfs-class-cxl
-F: Documentation/arch/powerpc/cxl.rst
-F: arch/powerpc/platforms/powernv/pci-cxl.c
-F: drivers/misc/cxl/
-F: include/misc/cxl*
-F: include/uapi/misc/cxl.h
-
CYBERPRO FB DRIVER
M: Russell King <linux@armlinux.org.uk>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -7193,15 +7220,17 @@ F: include/linux/component.h
DRIVER CORE, KOBJECTS, DEBUGFS AND SYSFS
M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-R: "Rafael J. Wysocki" <rafael@kernel.org>
-R: Danilo Krummrich <dakr@kernel.org>
+M: "Rafael J. Wysocki" <rafael@kernel.org>
+M: Danilo Krummrich <dakr@kernel.org>
S: Supported
T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/driver-core.git
F: Documentation/core-api/kobject.rst
F: drivers/base/
F: fs/debugfs/
F: fs/sysfs/
+F: include/linux/device/
F: include/linux/debugfs.h
+F: include/linux/device.h
F: include/linux/fwnode.h
F: include/linux/kobj*
F: include/linux/property.h
@@ -9078,7 +9107,7 @@ F: include/linux/iomap.h
FILESYSTEMS [NETFS LIBRARY]
M: David Howells <dhowells@redhat.com>
-R: Jeff Layton <jlayton@kernel.org>
+M: Paulo Alcantara <pc@manguebit.com>
L: netfs@lists.linux.dev
L: linux-fsdevel@vger.kernel.org
S: Supported
@@ -10933,6 +10962,8 @@ F: fs/hugetlbfs/
F: include/linux/hugetlb.h
F: include/trace/events/hugetlbfs.h
F: mm/hugetlb.c
+F: mm/hugetlb_cma.c
+F: mm/hugetlb_cma.h
F: mm/hugetlb_vmemmap.c
F: mm/hugetlb_vmemmap.h
F: tools/testing/selftests/cgroup/test_hugetlb_memcg.c
@@ -11223,6 +11254,7 @@ F: drivers/i3c/master/dw*
I3C SUBSYSTEM
M: Alexandre Belloni <alexandre.belloni@bootlin.com>
+R: Frank Li <Frank.Li@nxp.com>
L: linux-i3c@lists.infradead.org (moderated for non-subscribers)
S: Maintained
C: irc://chat.freenode.net/linux-i3c
@@ -11926,7 +11958,7 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm.git
F: drivers/idle/intel_idle.c
INTEL IDXD DRIVER
-M: Fenghua Yu <fenghua.yu@intel.com>
+M: Vinicius Costa Gomes <vinicius.gomes@intel.com>
R: Dave Jiang <dave.jiang@intel.com>
L: dmaengine@vger.kernel.org
S: Supported
@@ -12075,7 +12107,7 @@ F: drivers/mfd/intel-m10-bmc*
F: include/linux/mfd/intel-m10-bmc.h
INTEL MAX10 BMC SECURE UPDATES
-M: Peter Colberg <peter.colberg@intel.com>
+M: Peter Colberg <peter.colberg@altera.com>
L: linux-fpga@vger.kernel.org
S: Maintained
F: Documentation/ABI/testing/sysfs-driver-intel-m10-bmc-sec-update
@@ -14177,8 +14209,8 @@ F: include/linux/maple_tree.h
F: include/trace/events/maple_tree.h
F: lib/maple_tree.c
F: lib/test_maple_tree.c
-F: tools/testing/radix-tree/linux/maple_tree.h
F: tools/testing/radix-tree/maple.c
+F: tools/testing/shared/linux/maple_tree.h
MARDUK (CREATOR CI40) DEVICE TREE SUPPORT
M: Rahul Bedarkar <rahulbedarkar89@gmail.com>
@@ -15455,6 +15487,45 @@ F: tools/mm/
F: tools/testing/selftests/mm/
N: include/linux/page[-_]*
+MEMORY MANAGEMENT - EXECMEM
+M: Andrew Morton <akpm@linux-foundation.org>
+M: Mike Rapoport <rppt@kernel.org>
+L: linux-mm@kvack.org
+S: Maintained
+F: include/linux/execmem.h
+F: mm/execmem.c
+
+MEMORY MANAGEMENT - NUMA MEMBLOCKS AND NUMA EMULATION
+M: Andrew Morton <akpm@linux-foundation.org>
+M: Mike Rapoport <rppt@kernel.org>
+L: linux-mm@kvack.org
+S: Maintained
+F: include/linux/numa_memblks.h
+F: mm/numa.c
+F: mm/numa_emulation.c
+F: mm/numa_memblks.c
+
+MEMORY MANAGEMENT - SECRETMEM
+M: Andrew Morton <akpm@linux-foundation.org>
+M: Mike Rapoport <rppt@kernel.org>
+L: linux-mm@kvack.org
+S: Maintained
+F: include/linux/secretmem.h
+F: mm/secretmem.c
+
+MEMORY MANAGEMENT - USERFAULTFD
+M: Andrew Morton <akpm@linux-foundation.org>
+R: Peter Xu <peterx@redhat.com>
+L: linux-mm@kvack.org
+S: Maintained
+F: Documentation/admin-guide/mm/userfaultfd.rst
+F: fs/userfaultfd.c
+F: include/asm-generic/pgtable_uffd.h
+F: include/linux/userfaultfd_k.h
+F: include/uapi/linux/userfaultfd.h
+F: mm/userfaultfd.c
+F: tools/testing/selftests/mm/uffd-*.[ch]
+
MEMORY MAPPING
M: Andrew Morton <akpm@linux-foundation.org>
M: Liam R. Howlett <Liam.Howlett@oracle.com>
@@ -15606,7 +15677,7 @@ M: Ludovic Desroches <ludovic.desroches@microchip.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
L: dmaengine@vger.kernel.org
S: Supported
-F: Documentation/devicetree/bindings/dma/atmel-dma.txt
+F: Documentation/devicetree/bindings/dma/atmel,at91sam9g45-dma.yaml
F: drivers/dma/at_hdmac.c
F: drivers/dma/at_xdmac.c
F: include/dt-bindings/dma/at91.h
@@ -15901,6 +15972,7 @@ L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
L: linux-iio@vger.kernel.org
S: Maintained
F: drivers/counter/microchip-tcb-capture.c
+F: include/uapi/linux/counter/microchip-tcb-capture.h
MICROCHIP USB251XB DRIVER
M: Richard Leitner <richard.leitner@skidata.com>
@@ -16517,6 +16589,12 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/mtd/linux.git nand/next
F: drivers/mtd/nand/
F: include/linux/mtd/*nand*.h
+NATIONAL INSTRUMENTS SERIAL DRIVER
+M: Chaitanya Vadrevu <chaitanya.vadrevu@emerson.com>
+L: linux-serial@vger.kernel.org
+S: Maintained
+F: drivers/tty/serial/8250/8250_ni.c
+
NATIVE INSTRUMENTS USB SOUND INTERFACE DRIVER
M: Daniel Mack <zonque@gmail.com>
L: linux-sound@vger.kernel.org
@@ -16762,6 +16840,13 @@ F: net/ethtool/mm.c
F: tools/testing/selftests/drivers/net/hw/ethtool_mm.sh
K: ethtool_mm
+NETWORKING [ETHTOOL PHY TOPOLOGY]
+M: Maxime Chevallier <maxime.chevallier@bootlin.com>
+F: Documentation/networking/phy-link-topology.rst
+F: drivers/net/phy/phy_link_topology.c
+F: include/linux/phy_link_topology.h
+F: net/ethtool/phy.c
+
NETWORKING [GENERAL]
M: "David S. Miller" <davem@davemloft.net>
M: Eric Dumazet <edumazet@google.com>
@@ -18800,6 +18885,7 @@ F: mm/percpu*.c
PER-TASK DELAY ACCOUNTING
M: Balbir Singh <bsingharora@gmail.com>
+M: Yang Yang <yang.yang29@zte.com.cn>
S: Maintained
F: include/linux/delayacct.h
F: kernel/delayacct.c
@@ -19213,6 +19299,7 @@ S: Maintained
W: http://wiki.enneenne.com/index.php/LinuxPPS_support
F: Documentation/ABI/testing/sysfs-pps
F: Documentation/ABI/testing/sysfs-pps-gen
+F: Documentation/ABI/testing/sysfs-pps-gen-tio
F: Documentation/devicetree/bindings/pps/pps-gpio.yaml
F: Documentation/driver-api/pps.rst
F: drivers/pps/
@@ -22061,8 +22148,9 @@ F: drivers/video/fbdev/sm712*
SILVACO I3C DUAL-ROLE MASTER
M: Miquel Raynal <miquel.raynal@bootlin.com>
-M: Conor Culhane <conor.culhane@silvaco.com>
+M: Frank Li <Frank.Li@nxp.com>
L: linux-i3c@lists.infradead.org (moderated for non-subscribers)
+L: imx@lists.linux.dev
S: Maintained
F: Documentation/devicetree/bindings/i3c/silvaco,i3c-master.yaml
F: drivers/i3c/master/svc-i3c-master.c
@@ -22151,7 +22239,7 @@ M: Joonsoo Kim <iamjoonsoo.kim@lge.com>
M: Andrew Morton <akpm@linux-foundation.org>
M: Vlastimil Babka <vbabka@suse.cz>
R: Roman Gushchin <roman.gushchin@linux.dev>
-R: Hyeonggon Yoo <42.hyeyoo@gmail.com>
+R: Harry Yoo <harry.yoo@oracle.com>
L: linux-mm@kvack.org
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/vbabka/slab.git
@@ -22852,7 +22940,6 @@ STAGING - SEPS525 LCD CONTROLLER DRIVERS
M: Michael Hennerich <michael.hennerich@analog.com>
L: linux-fbdev@vger.kernel.org
S: Supported
-F: Documentation/devicetree/bindings/iio/adc/adi,ad7606.yaml
F: drivers/staging/fbtft/fb_seps525.c
STAGING - SILICON MOTION SM750 FRAME BUFFER DRIVER
@@ -23975,7 +24062,7 @@ F: drivers/thunderbolt/dma_test.c
THUNDERBOLT DRIVER
M: Andreas Noever <andreas.noever@gmail.com>
M: Michael Jamet <michael.jamet@intel.com>
-M: Mika Westerberg <mika.westerberg@linux.intel.com>
+M: Mika Westerberg <westeri@kernel.org>
M: Yehezkel Bernat <YehezkelShB@gmail.com>
L: linux-usb@vger.kernel.org
S: Maintained
@@ -23986,7 +24073,7 @@ F: include/linux/thunderbolt.h
THUNDERBOLT NETWORK DRIVER
M: Michael Jamet <michael.jamet@intel.com>
-M: Mika Westerberg <mika.westerberg@linux.intel.com>
+M: Mika Westerberg <westeri@kernel.org>
M: Yehezkel Bernat <YehezkelShB@gmail.com>
L: netdev@vger.kernel.org
S: Maintained
@@ -24292,6 +24379,7 @@ F: drivers/hwmon/tmp513.c
TMPFS (SHMEM FILESYSTEM)
M: Hugh Dickins <hughd@google.com>
+R: Baolin Wang <baolin.wang@linux.alibaba.com>
L: linux-mm@kvack.org
S: Maintained
F: include/linux/shmem_fs.h
@@ -25650,7 +25738,6 @@ F: tools/testing/vsock/
VMALLOC
M: Andrew Morton <akpm@linux-foundation.org>
R: Uladzislau Rezki <urezki@gmail.com>
-R: Christoph Hellwig <hch@infradead.org>
L: linux-mm@kvack.org
S: Maintained
W: http://www.linux-mm.org
@@ -26539,13 +26626,6 @@ S: Maintained
F: Documentation/input/devices/yealink.rst
F: drivers/input/misc/yealink.*
-Z3FOLD COMPRESSED PAGE ALLOCATOR
-M: Vitaly Wool <vitaly.wool@konsulko.com>
-R: Miaohe Lin <linmiaohe@huawei.com>
-L: linux-mm@kvack.org
-S: Maintained
-F: mm/z3fold.c
-
Z8530 DRIVER FOR AX.25
M: Joerg Reuter <jreuter@yaina.de>
L: linux-hams@vger.kernel.org
@@ -26556,13 +26636,6 @@ F: Documentation/networking/device_drivers/hamradio/z8530drv.rst
F: drivers/net/hamradio/*scc.c
F: drivers/net/hamradio/z8530.h
-ZBUD COMPRESSED PAGE ALLOCATOR
-M: Seth Jennings <sjenning@redhat.com>
-M: Dan Streetman <ddstreet@ieee.org>
-L: linux-mm@kvack.org
-S: Maintained
-F: mm/zbud.c
-
ZD1211RW WIRELESS DRIVER
L: linux-wireless@vger.kernel.org
S: Orphan
diff --git a/arch/alpha/kernel/srmcons.c b/arch/alpha/kernel/srmcons.c
index 3e61073f4b30..b9cd364e814e 100644
--- a/arch/alpha/kernel/srmcons.c
+++ b/arch/alpha/kernel/srmcons.c
@@ -196,40 +196,44 @@ static const struct tty_operations srmcons_ops = {
static int __init
srmcons_init(void)
{
+ struct tty_driver *driver;
+ int err;
+
timer_setup(&srmcons_singleton.timer, srmcons_receive_chars, 0);
- if (srm_is_registered_console) {
- struct tty_driver *driver;
- int err;
-
- driver = tty_alloc_driver(MAX_SRM_CONSOLE_DEVICES, 0);
- if (IS_ERR(driver))
- return PTR_ERR(driver);
-
- tty_port_init(&srmcons_singleton.port);
-
- driver->driver_name = "srm";
- driver->name = "srm";
- driver->major = 0; /* dynamic */
- driver->minor_start = 0;
- driver->type = TTY_DRIVER_TYPE_SYSTEM;
- driver->subtype = SYSTEM_TYPE_SYSCONS;
- driver->init_termios = tty_std_termios;
- tty_set_operations(driver, &srmcons_ops);
- tty_port_link_device(&srmcons_singleton.port, driver, 0);
- err = tty_register_driver(driver);
- if (err) {
- tty_driver_kref_put(driver);
- tty_port_destroy(&srmcons_singleton.port);
- return err;
- }
- srmcons_driver = driver;
- }
- return -ENODEV;
+ if (!srm_is_registered_console)
+ return -ENODEV;
+
+ driver = tty_alloc_driver(MAX_SRM_CONSOLE_DEVICES, 0);
+ if (IS_ERR(driver))
+ return PTR_ERR(driver);
+
+ tty_port_init(&srmcons_singleton.port);
+
+ driver->driver_name = "srm";
+ driver->name = "srm";
+ driver->major = 0; /* dynamic */
+ driver->minor_start = 0;
+ driver->type = TTY_DRIVER_TYPE_SYSTEM;
+ driver->subtype = SYSTEM_TYPE_SYSCONS;
+ driver->init_termios = tty_std_termios;
+ tty_set_operations(driver, &srmcons_ops);
+ tty_port_link_device(&srmcons_singleton.port, driver, 0);
+ err = tty_register_driver(driver);
+ if (err)
+ goto err_free_drv;
+
+ srmcons_driver = driver;
+
+ return 0;
+err_free_drv:
+ tty_driver_kref_put(driver);
+ tty_port_destroy(&srmcons_singleton.port);
+
+ return err;
}
device_initcall(srmcons_init);
-
/*
* The console driver
*/
diff --git a/arch/alpha/mm/init.c b/arch/alpha/mm/init.c
index 61c2198b1359..2d491b8cdab9 100644
--- a/arch/alpha/mm/init.c
+++ b/arch/alpha/mm/init.c
@@ -273,14 +273,6 @@ srm_paging_stop (void)
}
#endif
-void __init
-mem_init(void)
-{
- set_max_mapnr(max_low_pfn);
- high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
- memblock_free_all();
-}
-
static const pgprot_t protection_map[16] = {
[VM_NONE] = _PAGE_P(_PAGE_FOE | _PAGE_FOW |
_PAGE_FOR),
diff --git a/arch/arc/mm/init.c b/arch/arc/mm/init.c
index 6a71b23f1383..a73cc94f806e 100644
--- a/arch/arc/mm/init.c
+++ b/arch/arc/mm/init.c
@@ -150,41 +150,18 @@ void __init setup_arch_memory(void)
*/
max_zone_pfn[ZONE_HIGHMEM] = max_high_pfn;
- high_memory = (void *)(min_high_pfn << PAGE_SHIFT);
-
arch_pfn_offset = min(min_low_pfn, min_high_pfn);
kmap_init();
-
-#else /* CONFIG_HIGHMEM */
- /* pfn_valid() uses this when FLATMEM=y and HIGHMEM=n */
- max_mapnr = max_low_pfn - min_low_pfn;
-
#endif /* CONFIG_HIGHMEM */
free_area_init(max_zone_pfn);
}
-static void __init highmem_init(void)
+void __init arch_mm_preinit(void)
{
#ifdef CONFIG_HIGHMEM
- unsigned long tmp;
-
memblock_phys_free(high_mem_start, high_mem_sz);
- for (tmp = min_high_pfn; tmp < max_high_pfn; tmp++)
- free_highmem_page(pfn_to_page(tmp));
#endif
-}
-
-/*
- * mem_init - initializes memory
- *
- * Frees up bootmem
- * Calculates and displays memory available/used
- */
-void __init mem_init(void)
-{
- memblock_free_all();
- highmem_init();
BUILD_BUG_ON((PTRS_PER_PGD * sizeof(pgd_t)) > PAGE_SIZE);
BUILD_BUG_ON((PTRS_PER_PUD * sizeof(pud_t)) > PAGE_SIZE);
diff --git a/arch/arc/mm/ioremap.c b/arch/arc/mm/ioremap.c
index b07004d53267..fd8897a0e52c 100644
--- a/arch/arc/mm/ioremap.c
+++ b/arch/arc/mm/ioremap.c
@@ -32,7 +32,7 @@ void __iomem *ioremap(phys_addr_t paddr, unsigned long size)
return (void __iomem *)(u32)paddr;
return ioremap_prot(paddr, size,
- pgprot_val(pgprot_noncached(PAGE_KERNEL)));
+ pgprot_noncached(PAGE_KERNEL));
}
EXPORT_SYMBOL(ioremap);
@@ -44,10 +44,8 @@ EXPORT_SYMBOL(ioremap);
* might need finer access control (R/W/X)
*/
void __iomem *ioremap_prot(phys_addr_t paddr, size_t size,
- unsigned long flags)
+ pgprot_t prot)
{
- pgprot_t prot = __pgprot(flags);
-
/* force uncached */
return generic_ioremap_prot(paddr, size, pgprot_noncached(prot));
}
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 835b5f100e92..25ed6f1a7c7a 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -121,7 +121,7 @@ config ARM
select HAVE_KERNEL_XZ
select HAVE_KPROBES if !XIP_KERNEL && !CPU_ENDIAN_BE32 && !CPU_V7M
select HAVE_KRETPROBES if HAVE_KPROBES
- select HAVE_LD_DEAD_CODE_DATA_ELIMINATION if (LD_VERSION >= 23600 || LD_IS_LLD)
+ select HAVE_LD_DEAD_CODE_DATA_ELIMINATION if (LD_VERSION >= 23600 || LD_CAN_USE_KEEP_IN_OVERLAY)
select HAVE_MOD_ARCH_SPECIFIC
select HAVE_NMI
select HAVE_OPTPROBES if !THUMB2_KERNEL
@@ -133,6 +133,7 @@ config ARM
select MMU_GATHER_RCU_TABLE_FREE if SMP && ARM_LPAE
select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_RSEQ
+ select HAVE_RUST if CPU_LITTLE_ENDIAN && CPU_32v7
select HAVE_STACKPROTECTOR
select HAVE_SYSCALL_TRACEPOINTS
select HAVE_UID16
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index 00ca7886b18e..4808d3ed98e4 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -150,6 +150,7 @@ endif
KBUILD_CPPFLAGS +=$(cpp-y)
KBUILD_CFLAGS +=$(CFLAGS_ABI) $(CFLAGS_ISA) $(arch-y) $(tune-y) $(call cc-option,-mshort-load-bytes,$(call cc-option,-malignment-traps,)) -msoft-float -Uarm
KBUILD_AFLAGS +=$(CFLAGS_ABI) $(AFLAGS_ISA) -Wa,$(arch-y) $(tune-y) -include asm/unified.h -msoft-float
+KBUILD_RUSTFLAGS += --target=arm-unknown-linux-gnueabi
CHECKFLAGS += -D__arm__
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
index be91e376df79..6b986ef6042f 100644
--- a/arch/arm/include/asm/pgtable.h
+++ b/arch/arm/include/asm/pgtable.h
@@ -19,14 +19,13 @@ extern struct page *empty_zero_page;
#define ZERO_PAGE(vaddr) (empty_zero_page)
#endif
-#ifndef CONFIG_MMU
-
#include <asm-generic/pgtable-nopud.h>
+
+#ifndef CONFIG_MMU
#include <asm/pgtable-nommu.h>
#else
-#include <asm-generic/pgtable-nopud.h>
#include <asm/page.h>
#include <asm/pgtable-hwdef.h>
diff --git a/arch/arm/include/asm/vmlinux.lds.h b/arch/arm/include/asm/vmlinux.lds.h
index d60f6e83a9f7..0341973e30e1 100644
--- a/arch/arm/include/asm/vmlinux.lds.h
+++ b/arch/arm/include/asm/vmlinux.lds.h
@@ -19,7 +19,7 @@
#endif
#ifdef CONFIG_MMU
-#define ARM_MMU_KEEP(x) x
+#define ARM_MMU_KEEP(x) KEEP(x)
#define ARM_MMU_DISCARD(x)
#else
#define ARM_MMU_KEEP(x)
@@ -34,6 +34,12 @@
#define NOCROSSREFS
#endif
+#ifdef CONFIG_LD_CAN_USE_KEEP_IN_OVERLAY
+#define OVERLAY_KEEP(x) KEEP(x)
+#else
+#define OVERLAY_KEEP(x) x
+#endif
+
/* Set start/end symbol names to the LMA for the section */
#define ARM_LMA(sym, section) \
sym##_start = LOADADDR(section); \
@@ -125,13 +131,13 @@
__vectors_lma = .; \
OVERLAY 0xffff0000 : NOCROSSREFS AT(__vectors_lma) { \
.vectors { \
- *(.vectors) \
+ OVERLAY_KEEP(*(.vectors)) \
} \
.vectors.bhb.loop8 { \
- *(.vectors.bhb.loop8) \
+ OVERLAY_KEEP(*(.vectors.bhb.loop8)) \
} \
.vectors.bhb.bpiall { \
- *(.vectors.bhb.bpiall) \
+ OVERLAY_KEEP(*(.vectors.bhb.bpiall)) \
} \
} \
ARM_LMA(__vectors, .vectors); \
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 3431c0553f45..50999886a8b5 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -551,7 +551,8 @@ void show_ipi_list(struct seq_file *p, int prec)
if (!ipi_desc[i])
continue;
- seq_printf(p, "%*s%u: ", prec - 1, "IPI", i);
+ seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i,
+ prec >= 4 ? " " : "");
for_each_online_cpu(cpu)
seq_printf(p, "%10u ", irq_desc_kstat_cpu(ipi_desc[i], cpu));
diff --git a/arch/arm/kernel/vmlinux-xip.lds.S b/arch/arm/kernel/vmlinux-xip.lds.S
index 5eddb75a7174..f2e8d4fac068 100644
--- a/arch/arm/kernel/vmlinux-xip.lds.S
+++ b/arch/arm/kernel/vmlinux-xip.lds.S
@@ -63,7 +63,7 @@ SECTIONS
. = ALIGN(4);
__ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) {
__start___ex_table = .;
- ARM_MMU_KEEP(KEEP(*(__ex_table)))
+ ARM_MMU_KEEP(*(__ex_table))
__stop___ex_table = .;
}
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
index de373c6c2ae8..d592a203f9c6 100644
--- a/arch/arm/kernel/vmlinux.lds.S
+++ b/arch/arm/kernel/vmlinux.lds.S
@@ -74,7 +74,7 @@ SECTIONS
. = ALIGN(4);
__ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) {
__start___ex_table = .;
- ARM_MMU_KEEP(KEEP(*(__ex_table)))
+ ARM_MMU_KEEP(*(__ex_table))
__stop___ex_table = .;
}
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 5345d218899a..54bdca025c9f 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -237,56 +237,17 @@ static inline void poison_init_mem(void *s, size_t count)
*p++ = 0xe7fddef0;
}
-static void __init free_highpages(void)
-{
-#ifdef CONFIG_HIGHMEM
- unsigned long max_low = max_low_pfn;
- phys_addr_t range_start, range_end;
- u64 i;
-
- /* set highmem page free */
- for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE,
- &range_start, &range_end, NULL) {
- unsigned long start = PFN_UP(range_start);
- unsigned long end = PFN_DOWN(range_end);
-
- /* Ignore complete lowmem entries */
- if (end <= max_low)
- continue;
-
- /* Truncate partial highmem entries */
- if (start < max_low)
- start = max_low;
-
- for (; start < end; start++)
- free_highmem_page(pfn_to_page(start));
- }
-#endif
-}
-
-/*
- * mem_init() marks the free areas in the mem_map and tells us how much
- * memory is free. This is done after various parts of the system have
- * claimed their memory after the kernel image.
- */
-void __init mem_init(void)
+void __init arch_mm_preinit(void)
{
#ifdef CONFIG_ARM_LPAE
swiotlb_init(max_pfn > arm_dma_pfn_limit, SWIOTLB_VERBOSE);
#endif
- set_max_mapnr(pfn_to_page(max_pfn) - mem_map);
-
- /* this will put all unused low memory onto the freelists */
- memblock_free_all();
-
#ifdef CONFIG_SA1111
/* now that our DMA memory is actually so designated, we can free it */
- free_reserved_area(__va(PHYS_OFFSET), swapper_pg_dir, -1, NULL);
+ memblock_phys_free(PHYS_OFFSET, __pa(swapper_pg_dir) - PHYS_OFFSET);
#endif
- free_highpages();
-
/*
* Check boundaries twice: Some fundamental inconsistencies can
* be detected at build time already.
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 62dc903ecc7f..a182295e6f08 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -38,9 +38,11 @@ config ARM64
select ARCH_HAS_KEEPINITRD
select ARCH_HAS_MEMBARRIER_SYNC_CORE
select ARCH_HAS_MEM_ENCRYPT
+ select ARCH_SUPPORTS_MSEAL_SYSTEM_MAPPINGS
select ARCH_HAS_NMI_SAFE_THIS_CPU_OPS
select ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
select ARCH_HAS_NONLEAF_PMD_YOUNG if ARM64_HAFT
+ select ARCH_HAS_PTDUMP
select ARCH_HAS_PTE_DEVMAP
select ARCH_HAS_PTE_SPECIAL
select ARCH_HAS_HW_PTE_YOUNG
@@ -157,7 +159,6 @@ config ARM64
select GENERIC_IRQ_SHOW_LEVEL
select GENERIC_LIB_DEVMEM_IS_ALLOWED
select GENERIC_PCI_IOMAP
- select GENERIC_PTDUMP
select GENERIC_SCHED_CLOCK
select GENERIC_SMP_IDLE_THREAD
select GENERIC_TIME_VSYSCALL
diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h
index c607e0bf5e0b..d1cc0571798b 100644
--- a/arch/arm64/include/asm/cputype.h
+++ b/arch/arm64/include/asm/cputype.h
@@ -132,6 +132,7 @@
#define FUJITSU_CPU_PART_A64FX 0x001
#define HISI_CPU_PART_TSV110 0xD01
+#define HISI_CPU_PART_HIP09 0xD02
#define APPLE_CPU_PART_M1_ICESTORM 0x022
#define APPLE_CPU_PART_M1_FIRESTORM 0x023
@@ -218,6 +219,7 @@
#define MIDR_NVIDIA_CARMEL MIDR_CPU_MODEL(ARM_CPU_IMP_NVIDIA, NVIDIA_CPU_PART_CARMEL)
#define MIDR_FUJITSU_A64FX MIDR_CPU_MODEL(ARM_CPU_IMP_FUJITSU, FUJITSU_CPU_PART_A64FX)
#define MIDR_HISI_TSV110 MIDR_CPU_MODEL(ARM_CPU_IMP_HISI, HISI_CPU_PART_TSV110)
+#define MIDR_HISI_HIP09 MIDR_CPU_MODEL(ARM_CPU_IMP_HISI, HISI_CPU_PART_HIP09)
#define MIDR_APPLE_M1_ICESTORM MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_ICESTORM)
#define MIDR_APPLE_M1_FIRESTORM MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_FIRESTORM)
#define MIDR_APPLE_M1_ICESTORM_PRO MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_ICESTORM_PRO)
diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h
index 76ebbdc6ffdd..9b96840fb979 100644
--- a/arch/arm64/include/asm/io.h
+++ b/arch/arm64/include/asm/io.h
@@ -270,9 +270,9 @@ int arm64_ioremap_prot_hook_register(const ioremap_prot_hook_t hook);
#define _PAGE_IOREMAP PROT_DEVICE_nGnRE
#define ioremap_wc(addr, size) \
- ioremap_prot((addr), (size), PROT_NORMAL_NC)
+ ioremap_prot((addr), (size), __pgprot(PROT_NORMAL_NC))
#define ioremap_np(addr, size) \
- ioremap_prot((addr), (size), PROT_DEVICE_nGnRnE)
+ ioremap_prot((addr), (size), __pgprot(PROT_DEVICE_nGnRnE))
/*
* io{read,write}{16,32,64}be() macros
@@ -293,7 +293,7 @@ static inline void __iomem *ioremap_cache(phys_addr_t addr, size_t size)
if (pfn_is_map_memory(__phys_to_pfn(addr)))
return (void __iomem *)__phys_to_virt(addr);
- return ioremap_prot(addr, size, PROT_NORMAL);
+ return ioremap_prot(addr, size, __pgprot(PROT_NORMAL));
}
/*
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 84f05f781a70..d3b538be1500 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -633,11 +633,6 @@ static inline pud_t pud_mkhuge(pud_t pud)
#define pud_pfn(pud) ((__pud_to_phys(pud) & PUD_MASK) >> PAGE_SHIFT)
#define pfn_pud(pfn,prot) __pud(__phys_to_pud_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))
-#ifdef CONFIG_ARCH_SUPPORTS_PUD_PFNMAP
-#define pud_special(pte) pte_special(pud_pte(pud))
-#define pud_mkspecial(pte) pte_pud(pte_mkspecial(pud_pte(pud)))
-#endif
-
#define pmd_pgprot pmd_pgprot
static inline pgprot_t pmd_pgprot(pmd_t pmd)
{
diff --git a/arch/arm64/include/asm/ptdump.h b/arch/arm64/include/asm/ptdump.h
index 6cf4aae05219..b2931d1ae0fb 100644
--- a/arch/arm64/include/asm/ptdump.h
+++ b/arch/arm64/include/asm/ptdump.h
@@ -7,7 +7,7 @@
#include <linux/ptdump.h>
-#ifdef CONFIG_PTDUMP_CORE
+#ifdef CONFIG_PTDUMP
#include <linux/mm_types.h>
#include <linux/seq_file.h>
@@ -70,6 +70,6 @@ static inline void ptdump_debugfs_register(struct ptdump_info *info,
#else
static inline void note_page(struct ptdump_state *pt_st, unsigned long addr,
int level, u64 val) { }
-#endif /* CONFIG_PTDUMP_CORE */
+#endif /* CONFIG_PTDUMP */
#endif /* __ASM_PTDUMP_H */
diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h
index 8104aee4f9a0..eba1a98657f1 100644
--- a/arch/arm64/include/asm/tlbflush.h
+++ b/arch/arm64/include/asm/tlbflush.h
@@ -322,13 +322,6 @@ static inline bool arch_tlbbatch_should_defer(struct mm_struct *mm)
return true;
}
-static inline void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch,
- struct mm_struct *mm,
- unsigned long uaddr)
-{
- __flush_tlb_page_nosync(mm, uaddr);
-}
-
/*
* If mprotect/munmap/etc occurs during TLB batched flushing, we need to
* synchronise all the TLBI issued with a DSB to avoid the race mentioned in
@@ -450,7 +443,7 @@ static inline bool __flush_tlb_range_limit_excess(unsigned long start,
return false;
}
-static inline void __flush_tlb_range_nosync(struct vm_area_struct *vma,
+static inline void __flush_tlb_range_nosync(struct mm_struct *mm,
unsigned long start, unsigned long end,
unsigned long stride, bool last_level,
int tlb_level)
@@ -462,12 +455,12 @@ static inline void __flush_tlb_range_nosync(struct vm_area_struct *vma,
pages = (end - start) >> PAGE_SHIFT;
if (__flush_tlb_range_limit_excess(start, end, pages, stride)) {
- flush_tlb_mm(vma->vm_mm);
+ flush_tlb_mm(mm);
return;
}
dsb(ishst);
- asid = ASID(vma->vm_mm);
+ asid = ASID(mm);
if (last_level)
__flush_tlb_range_op(vale1is, start, pages, stride, asid,
@@ -476,7 +469,7 @@ static inline void __flush_tlb_range_nosync(struct vm_area_struct *vma,
__flush_tlb_range_op(vae1is, start, pages, stride, asid,
tlb_level, true, lpa2_is_enabled());
- mmu_notifier_arch_invalidate_secondary_tlbs(vma->vm_mm, start, end);
+ mmu_notifier_arch_invalidate_secondary_tlbs(mm, start, end);
}
static inline void __flush_tlb_range(struct vm_area_struct *vma,
@@ -484,7 +477,7 @@ static inline void __flush_tlb_range(struct vm_area_struct *vma,
unsigned long stride, bool last_level,
int tlb_level)
{
- __flush_tlb_range_nosync(vma, start, end, stride,
+ __flush_tlb_range_nosync(vma->vm_mm, start, end, stride,
last_level, tlb_level);
dsb(ish);
}
@@ -535,6 +528,12 @@ static inline void __flush_tlb_kernel_pgtable(unsigned long kaddr)
dsb(ish);
isb();
}
+
+static inline void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch,
+ struct mm_struct *mm, unsigned long start, unsigned long end)
+{
+ __flush_tlb_range_nosync(mm, start, end, PAGE_SIZE, true, 3);
+}
#endif
#endif
diff --git a/arch/arm64/include/asm/traps.h b/arch/arm64/include/asm/traps.h
index d780d1bd2eac..82cf1f879c61 100644
--- a/arch/arm64/include/asm/traps.h
+++ b/arch/arm64/include/asm/traps.h
@@ -109,10 +109,9 @@ static inline void arm64_mops_reset_regs(struct user_pt_regs *regs, unsigned lon
int dstreg = ESR_ELx_MOPS_ISS_DESTREG(esr);
int srcreg = ESR_ELx_MOPS_ISS_SRCREG(esr);
int sizereg = ESR_ELx_MOPS_ISS_SIZEREG(esr);
- unsigned long dst, src, size;
+ unsigned long dst, size;
dst = regs->regs[dstreg];
- src = regs->regs[srcreg];
size = regs->regs[sizereg];
/*
@@ -129,6 +128,7 @@ static inline void arm64_mops_reset_regs(struct user_pt_regs *regs, unsigned lon
}
} else {
/* CPY* instruction */
+ unsigned long src = regs->regs[srcreg];
if (!(option_a ^ wrong_option)) {
/* Format is from Option B */
if (regs->pstate & PSR_N_BIT) {
diff --git a/arch/arm64/kernel/acpi.c b/arch/arm64/kernel/acpi.c
index e6f66491fbe9..b9a66fc146c9 100644
--- a/arch/arm64/kernel/acpi.c
+++ b/arch/arm64/kernel/acpi.c
@@ -379,7 +379,7 @@ void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size)
prot = __acpi_get_writethrough_mem_attribute();
}
}
- return ioremap_prot(phys, size, pgprot_val(prot));
+ return ioremap_prot(phys, size, prot);
}
/*
diff --git a/arch/arm64/kernel/compat_alignment.c b/arch/arm64/kernel/compat_alignment.c
index deff21bfa680..b68e1d328d4c 100644
--- a/arch/arm64/kernel/compat_alignment.c
+++ b/arch/arm64/kernel/compat_alignment.c
@@ -368,6 +368,8 @@ int do_compat_alignment_fixup(unsigned long addr, struct pt_regs *regs)
return 1;
}
+ if (!handler)
+ return 1;
type = handler(addr, instr, regs);
if (type == TYPE_ERROR || type == TYPE_FAULT)
diff --git a/arch/arm64/kernel/proton-pack.c b/arch/arm64/kernel/proton-pack.c
index d5d11fd11549..b198dde79e59 100644
--- a/arch/arm64/kernel/proton-pack.c
+++ b/arch/arm64/kernel/proton-pack.c
@@ -901,6 +901,7 @@ static u8 spectre_bhb_loop_affected(void)
MIDR_ALL_VERSIONS(MIDR_CORTEX_A77),
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1),
MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_GOLD),
+ MIDR_ALL_VERSIONS(MIDR_HISI_HIP09),
{},
};
static const struct midr_range spectre_bhb_k11_list[] = {
diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c
index 887ac0b05961..78ddf6bdecad 100644
--- a/arch/arm64/kernel/vdso.c
+++ b/arch/arm64/kernel/vdso.c
@@ -130,7 +130,8 @@ static int __setup_additional_pages(enum vdso_abi abi,
mm->context.vdso = (void *)vdso_base;
ret = _install_special_mapping(mm, vdso_base, vdso_text_len,
VM_READ|VM_EXEC|gp_flags|
- VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
+ VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
+ VM_SEALED_SYSMAP,
vdso_info[abi].cm);
if (IS_ERR(ret))
goto up_fail;
@@ -256,7 +257,8 @@ static int aarch32_kuser_helpers_setup(struct mm_struct *mm)
*/
ret = _install_special_mapping(mm, AARCH32_VECTORS_BASE, PAGE_SIZE,
VM_READ | VM_EXEC |
- VM_MAYREAD | VM_MAYEXEC,
+ VM_MAYREAD | VM_MAYEXEC |
+ VM_SEALED_SYSMAP,
&aarch32_vdso_maps[AA32_MAP_VECTORS]);
return PTR_ERR_OR_ZERO(ret);
@@ -279,7 +281,8 @@ static int aarch32_sigreturn_setup(struct mm_struct *mm)
*/
ret = _install_special_mapping(mm, addr, PAGE_SIZE,
VM_READ | VM_EXEC | VM_MAYREAD |
- VM_MAYWRITE | VM_MAYEXEC,
+ VM_MAYWRITE | VM_MAYEXEC |
+ VM_SEALED_SYSMAP,
&aarch32_vdso_maps[AA32_MAP_SIGPAGE]);
if (IS_ERR(ret))
goto out;
diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig
index ead632ad01b4..096e45acadb2 100644
--- a/arch/arm64/kvm/Kconfig
+++ b/arch/arm64/kvm/Kconfig
@@ -71,8 +71,8 @@ config PTDUMP_STAGE2_DEBUGFS
depends on KVM
depends on DEBUG_KERNEL
depends on DEBUG_FS
- depends on GENERIC_PTDUMP
- select PTDUMP_CORE
+ depends on ARCH_HAS_PTDUMP
+ select PTDUMP
default n
help
Say Y here if you want to show the stage-2 kernel pagetables
diff --git a/arch/arm64/mm/Makefile b/arch/arm64/mm/Makefile
index fc92170a8f37..c26489cf96cd 100644
--- a/arch/arm64/mm/Makefile
+++ b/arch/arm64/mm/Makefile
@@ -5,7 +5,7 @@ obj-y := dma-mapping.o extable.o fault.o init.o \
context.o proc.o pageattr.o fixmap.o
obj-$(CONFIG_ARM64_CONTPTE) += contpte.o
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
-obj-$(CONFIG_PTDUMP_CORE) += ptdump.o
+obj-$(CONFIG_PTDUMP) += ptdump.o
obj-$(CONFIG_PTDUMP_DEBUGFS) += ptdump_debugfs.o
obj-$(CONFIG_TRANS_TABLE) += trans_pgd.o
obj-$(CONFIG_TRANS_TABLE) += trans_pgd-asm.o
diff --git a/arch/arm64/mm/contpte.c b/arch/arm64/mm/contpte.c
index 55107d27d3f8..bcac4f55f9c1 100644
--- a/arch/arm64/mm/contpte.c
+++ b/arch/arm64/mm/contpte.c
@@ -335,7 +335,7 @@ int contpte_ptep_clear_flush_young(struct vm_area_struct *vma,
* eliding the trailing DSB applies here.
*/
addr = ALIGN_DOWN(addr, CONT_PTE_SIZE);
- __flush_tlb_range_nosync(vma, addr, addr + CONT_PTE_SIZE,
+ __flush_tlb_range_nosync(vma->vm_mm, addr, addr + CONT_PTE_SIZE,
PAGE_SIZE, true, 3);
}
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index ccdef53872a0..b99bf3980fc6 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -98,21 +98,19 @@ static void __init arch_reserve_crashkernel(void)
{
unsigned long long low_size = 0;
unsigned long long crash_base, crash_size;
- char *cmdline = boot_command_line;
bool high = false;
int ret;
if (!IS_ENABLED(CONFIG_CRASH_RESERVE))
return;
- ret = parse_crashkernel(cmdline, memblock_phys_mem_size(),
+ ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(),
&crash_size, &crash_base,
&low_size, &high);
if (ret)
return;
- reserve_crashkernel_generic(cmdline, crash_size, crash_base,
- low_size, high);
+ reserve_crashkernel_generic(crash_size, crash_base, low_size, high);
}
static phys_addr_t __init max_zone_phys(phys_addr_t zone_limit)
@@ -309,8 +307,6 @@ void __init arm64_memblock_init(void)
}
early_init_fdt_scan_reserved_mem();
-
- high_memory = __va(memblock_end_of_DRAM() - 1) + 1;
}
void __init bootmem_init(void)
@@ -359,12 +355,7 @@ void __init bootmem_init(void)
memblock_dump_all();
}
-/*
- * mem_init() marks the free areas in the mem_map and tells us how much memory
- * is free. This is done after various parts of the system have claimed their
- * memory after the kernel image.
- */
-void __init mem_init(void)
+void __init arch_mm_preinit(void)
{
unsigned int flags = SWIOTLB_VERBOSE;
bool swiotlb = max_pfn > PFN_DOWN(arm64_dma_phys_limit);
@@ -388,9 +379,6 @@ void __init mem_init(void)
swiotlb_init(swiotlb, flags);
swiotlb_update_mem_attributes();
- /* this will put all unused low memory onto the freelists */
- memblock_free_all();
-
/*
* Check boundaries twice: Some fundamental inconsistencies can be
* detected at build time already.
diff --git a/arch/arm64/mm/ioremap.c b/arch/arm64/mm/ioremap.c
index 6cc0b7e7eb03..10e246f11271 100644
--- a/arch/arm64/mm/ioremap.c
+++ b/arch/arm64/mm/ioremap.c
@@ -15,10 +15,9 @@ int arm64_ioremap_prot_hook_register(ioremap_prot_hook_t hook)
}
void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size,
- unsigned long prot)
+ pgprot_t pgprot)
{
unsigned long last_addr = phys_addr + size - 1;
- pgprot_t pgprot = __pgprot(prot);
/* Don't allow outside PHYS_MASK */
if (last_addr & ~PHYS_MASK)
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index b98f89420713..ea6695d53fb9 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -1361,7 +1361,8 @@ int arch_add_memory(int nid, u64 start, u64 size,
__remove_pgd_mapping(swapper_pg_dir,
__phys_to_virt(start), size);
else {
- max_pfn = PFN_UP(start + size);
+ /* Address of hotplugged memory can be smaller */
+ max_pfn = max(max_pfn, PFN_UP(start + size));
max_low_pfn = max_pfn;
}
diff --git a/arch/csky/include/asm/io.h b/arch/csky/include/asm/io.h
index ed53f0b47388..536d3bf32ff1 100644
--- a/arch/csky/include/asm/io.h
+++ b/arch/csky/include/asm/io.h
@@ -36,7 +36,7 @@
*/
#define ioremap_wc(addr, size) \
ioremap_prot((addr), (size), \
- (_PAGE_IOREMAP & ~_CACHE_MASK) | _CACHE_UNCACHED)
+ __pgprot((_PAGE_IOREMAP & ~_CACHE_MASK) | _CACHE_UNCACHED))
#include <asm-generic/io.h>
diff --git a/arch/csky/include/asm/pgalloc.h b/arch/csky/include/asm/pgalloc.h
index bf8400c28b5a..11055c574968 100644
--- a/arch/csky/include/asm/pgalloc.h
+++ b/arch/csky/include/asm/pgalloc.h
@@ -61,11 +61,8 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
return ret;
}
-#define __pte_free_tlb(tlb, pte, address) \
-do { \
- pagetable_dtor(page_ptdesc(pte)); \
- tlb_remove_page_ptdesc(tlb, page_ptdesc(pte)); \
-} while (0)
+#define __pte_free_tlb(tlb, pte, address) \
+ tlb_remove_ptdesc((tlb), page_ptdesc(pte))
extern void pagetable_init(void);
extern void mmu_init(unsigned long min_pfn, unsigned long max_pfn);
diff --git a/arch/csky/kernel/setup.c b/arch/csky/kernel/setup.c
index fe715b707fd0..e0d6ca86ea8c 100644
--- a/arch/csky/kernel/setup.c
+++ b/arch/csky/kernel/setup.c
@@ -12,6 +12,45 @@
#include <asm/mmu_context.h>
#include <asm/pgalloc.h>
+#ifdef CONFIG_BLK_DEV_INITRD
+static void __init setup_initrd(void)
+{
+ unsigned long size;
+
+ if (initrd_start >= initrd_end) {
+ pr_err("initrd not found or empty");
+ goto disable;
+ }
+
+ if (__pa(initrd_end) > PFN_PHYS(max_low_pfn)) {
+ pr_err("initrd extends beyond end of memory");
+ goto disable;
+ }
+
+ size = initrd_end - initrd_start;
+
+ if (memblock_is_region_reserved(__pa(initrd_start), size)) {
+ pr_err("INITRD: 0x%08lx+0x%08lx overlaps in-use memory region",
+ __pa(initrd_start), size);
+ goto disable;
+ }
+
+ memblock_reserve(__pa(initrd_start), size);
+
+ pr_info("Initial ramdisk at: 0x%p (%lu bytes)\n",
+ (void *)(initrd_start), size);
+
+ initrd_below_start_ok = 1;
+
+ return;
+
+disable:
+ initrd_start = initrd_end = 0;
+
+ pr_err(" - disabling initrd\n");
+}
+#endif
+
static void __init csky_memblock_init(void)
{
unsigned long lowmem_size = PFN_DOWN(LOWMEM_LIMIT - PHYS_OFFSET_OFFSET);
@@ -40,6 +79,10 @@ static void __init csky_memblock_init(void)
max_low_pfn = min_low_pfn + sseg_size;
}
+#ifdef CONFIG_BLK_DEV_INITRD
+ setup_initrd();
+#endif
+
max_zone_pfn[ZONE_NORMAL] = max_low_pfn;
mmu_init(min_low_pfn, max_low_pfn);
diff --git a/arch/csky/mm/init.c b/arch/csky/mm/init.c
index bde7cabd23df..573da66b2543 100644
--- a/arch/csky/mm/init.c
+++ b/arch/csky/mm/init.c
@@ -42,73 +42,6 @@ unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]
__page_aligned_bss;
EXPORT_SYMBOL(empty_zero_page);
-#ifdef CONFIG_BLK_DEV_INITRD
-static void __init setup_initrd(void)
-{
- unsigned long size;
-
- if (initrd_start >= initrd_end) {
- pr_err("initrd not found or empty");
- goto disable;
- }
-
- if (__pa(initrd_end) > PFN_PHYS(max_low_pfn)) {
- pr_err("initrd extends beyond end of memory");
- goto disable;
- }
-
- size = initrd_end - initrd_start;
-
- if (memblock_is_region_reserved(__pa(initrd_start), size)) {
- pr_err("INITRD: 0x%08lx+0x%08lx overlaps in-use memory region",
- __pa(initrd_start), size);
- goto disable;
- }
-
- memblock_reserve(__pa(initrd_start), size);
-
- pr_info("Initial ramdisk at: 0x%p (%lu bytes)\n",
- (void *)(initrd_start), size);
-
- initrd_below_start_ok = 1;
-
- return;
-
-disable:
- initrd_start = initrd_end = 0;
-
- pr_err(" - disabling initrd\n");
-}
-#endif
-
-void __init mem_init(void)
-{
-#ifdef CONFIG_HIGHMEM
- unsigned long tmp;
-
- set_max_mapnr(highend_pfn - ARCH_PFN_OFFSET);
-#else
- set_max_mapnr(max_low_pfn - ARCH_PFN_OFFSET);
-#endif
- high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
-
-#ifdef CONFIG_BLK_DEV_INITRD
- setup_initrd();
-#endif
-
- memblock_free_all();
-
-#ifdef CONFIG_HIGHMEM
- for (tmp = highstart_pfn; tmp < highend_pfn; tmp++) {
- struct page *page = pfn_to_page(tmp);
-
- /* FIXME not sure about */
- if (!memblock_is_reserved(tmp << PAGE_SHIFT))
- free_highmem_page(page);
- }
-#endif
-}
-
void free_initmem(void)
{
free_initmem_default(-1);
diff --git a/arch/hexagon/include/asm/pgalloc.h b/arch/hexagon/include/asm/pgalloc.h
index 1ee5f5f157ca..937a11ef4c33 100644
--- a/arch/hexagon/include/asm/pgalloc.h
+++ b/arch/hexagon/include/asm/pgalloc.h
@@ -87,10 +87,7 @@ static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
max_kernel_seg = pmdindex;
}
-#define __pte_free_tlb(tlb, pte, addr) \
-do { \
- pagetable_dtor((page_ptdesc(pte))); \
- tlb_remove_page_ptdesc((tlb), (page_ptdesc(pte))); \
-} while (0)
+#define __pte_free_tlb(tlb, pte, addr) \
+ tlb_remove_ptdesc((tlb), page_ptdesc(pte))
#endif
diff --git a/arch/hexagon/mm/init.c b/arch/hexagon/mm/init.c
index 3458f39ca2ac..34eb9d424b96 100644
--- a/arch/hexagon/mm/init.c
+++ b/arch/hexagon/mm/init.c
@@ -43,32 +43,6 @@ DEFINE_SPINLOCK(kmap_gen_lock);
/* checkpatch says don't init this to 0. */
unsigned long long kmap_generation;
-/*
- * mem_init - initializes memory
- *
- * Frees up bootmem
- * Fixes up more stuff for HIGHMEM
- * Calculates and displays memory available/used
- */
-void __init mem_init(void)
-{
- /* No idea where this is actually declared. Seems to evade LXR. */
- memblock_free_all();
-
- /*
- * To-Do: someone somewhere should wipe out the bootmem map
- * after we're done?
- */
-
- /*
- * This can be moved to some more virtual-memory-specific
- * initialization hook at some point. Set the init_mm
- * descriptors "context" value to point to the initial
- * kernel segment table's physical address.
- */
- init_mm.context.ptbase = __pa(init_mm.pgd);
-}
-
void sync_icache_dcache(pte_t pte)
{
unsigned long addr;
@@ -104,10 +78,10 @@ static void __init paging_init(void)
free_area_init(max_zone_pfn); /* sets up the zonelists and mem_map */
/*
- * Start of high memory area. Will probably need something more
- * fancy if we... get more fancy.
+ * Set the init_mm descriptors "context" value to point to the
+ * initial kernel segment table's physical address.
*/
- high_memory = (void *)((bootmem_lastpg + 1) << PAGE_SHIFT);
+ init_mm.context.ptbase = __pa(init_mm.pgd);
}
#ifndef DMA_RESERVE
diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig
index 687502917ae2..067c0b994648 100644
--- a/arch/loongarch/Kconfig
+++ b/arch/loongarch/Kconfig
@@ -30,6 +30,7 @@ config LOONGARCH
select ARCH_HAS_SET_MEMORY
select ARCH_HAS_SET_DIRECT_MAP
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
+ select ARCH_HAS_UBSAN
select ARCH_HAS_VDSO_ARCH_DATA
select ARCH_INLINE_READ_LOCK if !PREEMPTION
select ARCH_INLINE_READ_LOCK_BH if !PREEMPTION
@@ -177,7 +178,7 @@ config LOONGARCH
select HAVE_STACKPROTECTOR
select HAVE_SYSCALL_TRACEPOINTS
select HAVE_TIF_NOHZ
- select HAVE_VIRT_CPU_ACCOUNTING_GEN if !SMP
+ select HAVE_VIRT_CPU_ACCOUNTING_GEN
select IRQ_FORCED_THREADING
select IRQ_LOONGARCH_CPU
select LOCK_MM_AND_FIND_VMA
@@ -387,8 +388,8 @@ config CMDLINE_BOOTLOADER
config CMDLINE_EXTEND
bool "Use built-in to extend bootloader kernel arguments"
help
- The command-line arguments provided during boot will be
- appended to the built-in command line. This is useful in
+ The built-in command line will be appended to the command-
+ line arguments provided during boot. This is useful in
cases where the provided arguments are insufficient and
you don't want to or cannot modify them.
diff --git a/arch/loongarch/configs/loongson3_defconfig b/arch/loongarch/configs/loongson3_defconfig
index 3c240afe5aed..90f21dfe22b1 100644
--- a/arch/loongarch/configs/loongson3_defconfig
+++ b/arch/loongarch/configs/loongson3_defconfig
@@ -24,9 +24,9 @@ CONFIG_NUMA_BALANCING=y
CONFIG_MEMCG=y
CONFIG_BLK_CGROUP=y
CONFIG_CFS_BANDWIDTH=y
-CONFIG_RT_GROUP_SCHED=y
CONFIG_CGROUP_PIDS=y
CONFIG_CGROUP_RDMA=y
+CONFIG_CGROUP_DMEM=y
CONFIG_CGROUP_FREEZER=y
CONFIG_CGROUP_HUGETLB=y
CONFIG_CPUSETS=y
@@ -109,8 +109,7 @@ CONFIG_BINFMT_MISC=m
CONFIG_ZPOOL=y
CONFIG_ZSWAP=y
CONFIG_ZSWAP_COMPRESSOR_DEFAULT_ZSTD=y
-CONFIG_ZBUD=y
-CONFIG_ZSMALLOC=m
+CONFIG_ZSMALLOC=y
# CONFIG_COMPAT_BRK is not set
CONFIG_MEMORY_HOTPLUG=y
# CONFIG_MHP_DEFAULT_ONLINE_TYPE_OFFLINE is not set
@@ -666,6 +665,10 @@ CONFIG_RTW88_8723DE=m
CONFIG_RTW88_8723DU=m
CONFIG_RTW88_8821CE=m
CONFIG_RTW88_8821CU=m
+CONFIG_RTW88_8821AU=m
+CONFIG_RTW88_8812AU=m
+CONFIG_RTW88_8814AE=m
+CONFIG_RTW88_8814AU=m
CONFIG_RTW89=m
CONFIG_RTW89_8851BE=m
CONFIG_RTW89_8852AE=m
@@ -749,6 +752,7 @@ CONFIG_MEDIA_PCI_SUPPORT=y
CONFIG_VIDEO_BT848=m
CONFIG_DVB_BT8XX=m
CONFIG_DRM=y
+CONFIG_DRM_LOAD_EDID_FIRMWARE=y
CONFIG_DRM_RADEON=m
CONFIG_DRM_RADEON_USERPTR=y
CONFIG_DRM_AMDGPU=m
@@ -762,6 +766,7 @@ CONFIG_DRM_LOONGSON=y
CONFIG_FB=y
CONFIG_FB_EFI=y
CONFIG_FB_RADEON=y
+CONFIG_FIRMWARE_EDID=y
CONFIG_LCD_CLASS_DEVICE=y
CONFIG_LCD_PLATFORM=m
# CONFIG_VGA_CONSOLE is not set
@@ -844,6 +849,9 @@ CONFIG_TYPEC_TCPCI=m
CONFIG_TYPEC_UCSI=m
CONFIG_UCSI_ACPI=m
CONFIG_INFINIBAND=m
+CONFIG_EDAC=y
+# CONFIG_EDAC_LEGACY_SYSFS is not set
+CONFIG_EDAC_LOONGSON=y
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_EFI=y
CONFIG_RTC_DRV_LOONGSON=y
diff --git a/arch/loongarch/include/asm/cache.h b/arch/loongarch/include/asm/cache.h
index 1b6d09617199..aa622c754414 100644
--- a/arch/loongarch/include/asm/cache.h
+++ b/arch/loongarch/include/asm/cache.h
@@ -8,6 +8,8 @@
#define L1_CACHE_SHIFT CONFIG_L1_CACHE_SHIFT
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
+#define ARCH_DMA_MINALIGN (16)
+
#define __read_mostly __section(".data..read_mostly")
#endif /* _ASM_CACHE_H */
diff --git a/arch/loongarch/include/asm/io.h b/arch/loongarch/include/asm/io.h
index e77a56eaf906..eaff72b38dc8 100644
--- a/arch/loongarch/include/asm/io.h
+++ b/arch/loongarch/include/asm/io.h
@@ -23,9 +23,9 @@ extern void __init early_iounmap(void __iomem *addr, unsigned long size);
#ifdef CONFIG_ARCH_IOREMAP
static inline void __iomem *ioremap_prot(phys_addr_t offset, unsigned long size,
- unsigned long prot_val)
+ pgprot_t prot)
{
- switch (prot_val & _CACHE_MASK) {
+ switch (pgprot_val(prot) & _CACHE_MASK) {
case _CACHE_CC:
return (void __iomem *)(unsigned long)(CACHE_BASE + offset);
case _CACHE_SUC:
@@ -38,7 +38,7 @@ static inline void __iomem *ioremap_prot(phys_addr_t offset, unsigned long size,
}
#define ioremap(offset, size) \
- ioremap_prot((offset), (size), pgprot_val(PAGE_KERNEL_SUC))
+ ioremap_prot((offset), (size), PAGE_KERNEL_SUC)
#define iounmap(addr) ((void)(addr))
@@ -55,10 +55,10 @@ static inline void __iomem *ioremap_prot(phys_addr_t offset, unsigned long size,
*/
#define ioremap_wc(offset, size) \
ioremap_prot((offset), (size), \
- pgprot_val(wc_enabled ? PAGE_KERNEL_WUC : PAGE_KERNEL_SUC))
+ wc_enabled ? PAGE_KERNEL_WUC : PAGE_KERNEL_SUC)
#define ioremap_cache(offset, size) \
- ioremap_prot((offset), (size), pgprot_val(PAGE_KERNEL))
+ ioremap_prot((offset), (size), PAGE_KERNEL)
#define mmiowb() wmb()
diff --git a/arch/loongarch/include/asm/irq.h b/arch/loongarch/include/asm/irq.h
index a0ca84da8541..12bd15578c33 100644
--- a/arch/loongarch/include/asm/irq.h
+++ b/arch/loongarch/include/asm/irq.h
@@ -53,7 +53,7 @@ void spurious_interrupt(void);
#define arch_trigger_cpumask_backtrace arch_trigger_cpumask_backtrace
void arch_trigger_cpumask_backtrace(const struct cpumask *mask, int exclude_cpu);
-#define MAX_IO_PICS 2
+#define MAX_IO_PICS 8
#define NR_IRQS (64 + NR_VECTORS * (NR_CPUS + MAX_IO_PICS))
struct acpi_vector_group {
diff --git a/arch/loongarch/include/asm/pgalloc.h b/arch/loongarch/include/asm/pgalloc.h
index 7211dff8c969..b58f587f0f0a 100644
--- a/arch/loongarch/include/asm/pgalloc.h
+++ b/arch/loongarch/include/asm/pgalloc.h
@@ -55,11 +55,8 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
return pte;
}
-#define __pte_free_tlb(tlb, pte, address) \
-do { \
- pagetable_dtor(page_ptdesc(pte)); \
- tlb_remove_page_ptdesc((tlb), page_ptdesc(pte)); \
-} while (0)
+#define __pte_free_tlb(tlb, pte, address) \
+ tlb_remove_ptdesc((tlb), page_ptdesc(pte))
#ifndef __PAGETABLE_PMD_FOLDED
diff --git a/arch/loongarch/include/asm/stacktrace.h b/arch/loongarch/include/asm/stacktrace.h
index f23adb15f418..fc8b64773794 100644
--- a/arch/loongarch/include/asm/stacktrace.h
+++ b/arch/loongarch/include/asm/stacktrace.h
@@ -8,6 +8,7 @@
#include <asm/asm.h>
#include <asm/ptrace.h>
#include <asm/loongarch.h>
+#include <asm/unwind_hints.h>
#include <linux/stringify.h>
enum stack_type {
@@ -43,6 +44,7 @@ int get_stack_info(unsigned long stack, struct task_struct *task, struct stack_i
static __always_inline void prepare_frametrace(struct pt_regs *regs)
{
__asm__ __volatile__(
+ UNWIND_HINT_SAVE
/* Save $ra */
STORE_ONE_REG(1)
/* Use $ra to save PC */
@@ -80,6 +82,7 @@ static __always_inline void prepare_frametrace(struct pt_regs *regs)
STORE_ONE_REG(29)
STORE_ONE_REG(30)
STORE_ONE_REG(31)
+ UNWIND_HINT_RESTORE
: "=m" (regs->csr_era)
: "r" (regs->regs)
: "memory");
diff --git a/arch/loongarch/include/asm/unwind_hints.h b/arch/loongarch/include/asm/unwind_hints.h
index a01086ad9dde..2c68bc72736c 100644
--- a/arch/loongarch/include/asm/unwind_hints.h
+++ b/arch/loongarch/include/asm/unwind_hints.h
@@ -23,6 +23,14 @@
UNWIND_HINT sp_reg=ORC_REG_SP type=UNWIND_HINT_TYPE_CALL
.endm
-#endif /* __ASSEMBLY__ */
+#else /* !__ASSEMBLY__ */
+
+#define UNWIND_HINT_SAVE \
+ UNWIND_HINT(UNWIND_HINT_TYPE_SAVE, 0, 0, 0)
+
+#define UNWIND_HINT_RESTORE \
+ UNWIND_HINT(UNWIND_HINT_TYPE_RESTORE, 0, 0, 0)
+
+#endif /* !__ASSEMBLY__ */
#endif /* _ASM_LOONGARCH_UNWIND_HINTS_H */
diff --git a/arch/loongarch/kernel/env.c b/arch/loongarch/kernel/env.c
index 2f1f5b08638f..27144de5c5fe 100644
--- a/arch/loongarch/kernel/env.c
+++ b/arch/loongarch/kernel/env.c
@@ -68,6 +68,8 @@ static int __init fdt_cpu_clk_init(void)
return -ENODEV;
clk = of_clk_get(np, 0);
+ of_node_put(np);
+
if (IS_ERR(clk))
return -ENODEV;
diff --git a/arch/loongarch/kernel/kgdb.c b/arch/loongarch/kernel/kgdb.c
index 445c452d72a7..7be5b4c0c900 100644
--- a/arch/loongarch/kernel/kgdb.c
+++ b/arch/loongarch/kernel/kgdb.c
@@ -8,6 +8,7 @@
#include <linux/hw_breakpoint.h>
#include <linux/kdebug.h>
#include <linux/kgdb.h>
+#include <linux/objtool.h>
#include <linux/processor.h>
#include <linux/ptrace.h>
#include <linux/sched.h>
@@ -224,13 +225,13 @@ void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc)
regs->csr_era = pc;
}
-void arch_kgdb_breakpoint(void)
+noinline void arch_kgdb_breakpoint(void)
{
__asm__ __volatile__ ( \
".globl kgdb_breakinst\n\t" \
- "nop\n" \
"kgdb_breakinst:\tbreak 2\n\t"); /* BRK_KDB = 2 */
}
+STACK_FRAME_NON_STANDARD(arch_kgdb_breakpoint);
/*
* Calls linux_debug_hook before the kernel dies. If KGDB is enabled,
diff --git a/arch/loongarch/kernel/numa.c b/arch/loongarch/kernel/numa.c
index 84fe7f854820..30a72fd528c0 100644
--- a/arch/loongarch/kernel/numa.c
+++ b/arch/loongarch/kernel/numa.c
@@ -387,12 +387,6 @@ void __init paging_init(void)
free_area_init(zones_size);
}
-void __init mem_init(void)
-{
- high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
- memblock_free_all();
-}
-
int pcibus_to_node(struct pci_bus *bus)
{
return dev_to_node(&bus->dev);
diff --git a/arch/loongarch/kernel/setup.c b/arch/loongarch/kernel/setup.c
index 90cb3ca96f08..b99fbb388fe0 100644
--- a/arch/loongarch/kernel/setup.c
+++ b/arch/loongarch/kernel/setup.c
@@ -259,18 +259,17 @@ static void __init arch_reserve_crashkernel(void)
int ret;
unsigned long long low_size = 0;
unsigned long long crash_base, crash_size;
- char *cmdline = boot_command_line;
bool high = false;
if (!IS_ENABLED(CONFIG_CRASH_RESERVE))
return;
- ret = parse_crashkernel(cmdline, memblock_phys_mem_size(),
+ ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(),
&crash_size, &crash_base, &low_size, &high);
if (ret)
return;
- reserve_crashkernel_generic(cmdline, crash_size, crash_base, low_size, high);
+ reserve_crashkernel_generic(crash_size, crash_base, low_size, high);
}
static void __init fdt_setup(void)
diff --git a/arch/loongarch/mm/init.c b/arch/loongarch/mm/init.c
index ca5aa5f46a9f..fdb7f73ad160 100644
--- a/arch/loongarch/mm/init.c
+++ b/arch/loongarch/mm/init.c
@@ -75,14 +75,6 @@ void __init paging_init(void)
free_area_init(max_zone_pfns);
}
-
-void __init mem_init(void)
-{
- max_mapnr = max_low_pfn;
- high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
-
- memblock_free_all();
-}
#endif /* !CONFIG_NUMA */
void __ref free_initmem(void)
diff --git a/arch/loongarch/net/bpf_jit.c b/arch/loongarch/net/bpf_jit.c
index ea357a3edc09..fa1500d4aa3e 100644
--- a/arch/loongarch/net/bpf_jit.c
+++ b/arch/loongarch/net/bpf_jit.c
@@ -142,6 +142,8 @@ static void build_prologue(struct jit_ctx *ctx)
*/
if (seen_tail_call(ctx) && seen_call(ctx))
move_reg(ctx, TCC_SAVED, REG_TCC);
+ else
+ emit_insn(ctx, nop);
ctx->stack_size = stack_adjust;
}
@@ -905,7 +907,10 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext
move_addr(ctx, t1, func_addr);
emit_insn(ctx, jirl, LOONGARCH_GPR_RA, t1, 0);
- move_reg(ctx, regmap[BPF_REG_0], LOONGARCH_GPR_A0);
+
+ if (insn->src_reg != BPF_PSEUDO_CALL)
+ move_reg(ctx, regmap[BPF_REG_0], LOONGARCH_GPR_A0);
+
break;
/* tail call */
@@ -930,7 +935,10 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext
{
const u64 imm64 = (u64)(insn + 1)->imm << 32 | (u32)insn->imm;
- move_imm(ctx, dst, imm64, is32);
+ if (bpf_pseudo_func(insn))
+ move_addr(ctx, dst, imm64);
+ else
+ move_imm(ctx, dst, imm64, is32);
return 1;
}
diff --git a/arch/loongarch/net/bpf_jit.h b/arch/loongarch/net/bpf_jit.h
index 68586338ecf8..f9c569f53949 100644
--- a/arch/loongarch/net/bpf_jit.h
+++ b/arch/loongarch/net/bpf_jit.h
@@ -27,6 +27,11 @@ struct jit_data {
struct jit_ctx ctx;
};
+static inline void emit_nop(union loongarch_instruction *insn)
+{
+ insn->word = INSN_NOP;
+}
+
#define emit_insn(ctx, func, ...) \
do { \
if (ctx->image != NULL) { \
diff --git a/arch/loongarch/vdso/Makefile b/arch/loongarch/vdso/Makefile
index 1c26147aff70..ccd2c5e135c6 100644
--- a/arch/loongarch/vdso/Makefile
+++ b/arch/loongarch/vdso/Makefile
@@ -36,8 +36,7 @@ endif
# VDSO linker flags.
ldflags-y := -Bsymbolic --no-undefined -soname=linux-vdso.so.1 \
- $(filter -E%,$(KBUILD_CFLAGS)) -nostdlib -shared \
- --hash-style=sysv --build-id -T
+ $(filter -E%,$(KBUILD_CFLAGS)) -nostdlib -shared --build-id -T
#
# Shared build commands.
diff --git a/arch/loongarch/vdso/vgetrandom-chacha.S b/arch/loongarch/vdso/vgetrandom-chacha.S
index c2733e6c3a8d..c4dd2bab8825 100644
--- a/arch/loongarch/vdso/vgetrandom-chacha.S
+++ b/arch/loongarch/vdso/vgetrandom-chacha.S
@@ -58,9 +58,7 @@ SYM_FUNC_START(__arch_chacha20_blocks_nostack)
#define copy0 t5
#define copy1 t6
#define copy2 t7
-
-/* Reuse i as copy3 */
-#define copy3 i
+#define copy3 t8
/* Packs to be used with OP_4REG */
#define line0 state0, state1, state2, state3
@@ -99,6 +97,7 @@ SYM_FUNC_START(__arch_chacha20_blocks_nostack)
li.w copy0, 0x61707865
li.w copy1, 0x3320646e
li.w copy2, 0x79622d32
+ li.w copy3, 0x6b206574
ld.w cnt_lo, counter, 0
ld.w cnt_hi, counter, 4
@@ -108,7 +107,7 @@ SYM_FUNC_START(__arch_chacha20_blocks_nostack)
move state0, copy0
move state1, copy1
move state2, copy2
- li.w state3, 0x6b206574
+ move state3, copy3
/* state[4,5,..,11] = key */
ld.w state4, key, 0
@@ -167,12 +166,6 @@ SYM_FUNC_START(__arch_chacha20_blocks_nostack)
addi.w i, i, -1
bnez i, .Lpermute
- /*
- * copy[3] = "expa", materialize it here because copy[3] shares the
- * same register with i which just became dead.
- */
- li.w copy3, 0x6b206574
-
/* output[0,1,2,3] = copy[0,1,2,3] + state[0,1,2,3] */
OP_4REG add.w line0, copy
st.w state0, output, 0
diff --git a/arch/m68k/include/asm/sun3_pgalloc.h b/arch/m68k/include/asm/sun3_pgalloc.h
index 80afc3a18724..1e21c758b774 100644
--- a/arch/m68k/include/asm/sun3_pgalloc.h
+++ b/arch/m68k/include/asm/sun3_pgalloc.h
@@ -17,11 +17,8 @@
extern const char bad_pmd_string[];
-#define __pte_free_tlb(tlb, pte, addr) \
-do { \
- pagetable_dtor(page_ptdesc(pte)); \
- tlb_remove_page_ptdesc((tlb), page_ptdesc(pte)); \
-} while (0)
+#define __pte_free_tlb(tlb, pte, addr) \
+ tlb_remove_ptdesc((tlb), page_ptdesc(pte))
static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
{
diff --git a/arch/m68k/mm/init.c b/arch/m68k/mm/init.c
index 8b11d0d545aa..488411af1b3f 100644
--- a/arch/m68k/mm/init.c
+++ b/arch/m68k/mm/init.c
@@ -121,7 +121,5 @@ static inline void init_pointer_tables(void)
void __init mem_init(void)
{
- /* this will put all memory onto the freelists */
- memblock_free_all();
init_pointer_tables();
}
diff --git a/arch/microblaze/mm/init.c b/arch/microblaze/mm/init.c
index 4520c5741579..31d475cdb1c5 100644
--- a/arch/microblaze/mm/init.c
+++ b/arch/microblaze/mm/init.c
@@ -52,19 +52,6 @@ static void __init highmem_init(void)
map_page(PKMAP_BASE, 0, 0); /* XXX gross */
pkmap_page_table = virt_to_kpte(PKMAP_BASE);
}
-
-static void __meminit highmem_setup(void)
-{
- unsigned long pfn;
-
- for (pfn = max_low_pfn; pfn < max_pfn; ++pfn) {
- struct page *page = pfn_to_page(pfn);
-
- /* FIXME not sure about */
- if (!memblock_is_reserved(pfn << PAGE_SHIFT))
- free_highmem_page(page);
- }
-}
#endif /* CONFIG_HIGHMEM */
/*
@@ -104,17 +91,13 @@ void __init setup_memory(void)
*
* min_low_pfn - the first page (mm/bootmem.c - node_boot_start)
* max_low_pfn
- * max_mapnr - the first unused page (mm/bootmem.c - node_low_pfn)
*/
/* memory start is from the kernel end (aligned) to higher addr */
min_low_pfn = memory_start >> PAGE_SHIFT; /* minimum for allocation */
- /* RAM is assumed contiguous */
- max_mapnr = memory_size >> PAGE_SHIFT;
max_low_pfn = ((u64)memory_start + (u64)lowmem_size) >> PAGE_SHIFT;
max_pfn = ((u64)memory_start + (u64)memory_size) >> PAGE_SHIFT;
- pr_info("%s: max_mapnr: %#lx\n", __func__, max_mapnr);
pr_info("%s: min_low_pfn: %#lx\n", __func__, min_low_pfn);
pr_info("%s: max_low_pfn: %#lx\n", __func__, max_low_pfn);
pr_info("%s: max_pfn: %#lx\n", __func__, max_pfn);
@@ -124,14 +107,6 @@ void __init setup_memory(void)
void __init mem_init(void)
{
- high_memory = (void *)__va(memory_start + lowmem_size - 1);
-
- /* this will put all memory onto the freelists */
- memblock_free_all();
-#ifdef CONFIG_HIGHMEM
- highmem_setup();
-#endif
-
mem_init_done = 1;
}
@@ -143,7 +118,7 @@ int page_is_ram(unsigned long pfn)
/*
* Check for command-line options that affect what MMU_init will do.
*/
-static void mm_cmdline_setup(void)
+static void __init mm_cmdline_setup(void)
{
unsigned long maxmem = 0;
char *p = cmd_line;
diff --git a/arch/mips/include/asm/io.h b/arch/mips/include/asm/io.h
index 78c6573f91f2..980187a83053 100644
--- a/arch/mips/include/asm/io.h
+++ b/arch/mips/include/asm/io.h
@@ -115,7 +115,7 @@ static inline unsigned long isa_virt_to_bus(volatile void *address)
}
void __iomem *ioremap_prot(phys_addr_t offset, unsigned long size,
- unsigned long prot_val);
+ pgprot_t prot);
void iounmap(const volatile void __iomem *addr);
/*
@@ -130,7 +130,7 @@ void iounmap(const volatile void __iomem *addr);
* address.
*/
#define ioremap(offset, size) \
- ioremap_prot((offset), (size), _CACHE_UNCACHED)
+ ioremap_prot((offset), (size), __pgprot(_CACHE_UNCACHED))
/*
* ioremap_cache - map bus memory into CPU space
@@ -148,7 +148,7 @@ void iounmap(const volatile void __iomem *addr);
* memory-like regions on I/O busses.
*/
#define ioremap_cache(offset, size) \
- ioremap_prot((offset), (size), _page_cachable_default)
+ ioremap_prot((offset), (size), __pgprot(_page_cachable_default))
/*
* ioremap_wc - map bus memory into CPU space
@@ -169,7 +169,7 @@ void iounmap(const volatile void __iomem *addr);
* _CACHE_UNCACHED option (see cpu_probe() method).
*/
#define ioremap_wc(offset, size) \
- ioremap_prot((offset), (size), boot_cpu_data.writecombine)
+ ioremap_prot((offset), (size), __pgprot(boot_cpu_data.writecombine))
#if defined(CONFIG_CPU_CAVIUM_OCTEON)
#define war_io_reorder_wmb() wmb()
diff --git a/arch/mips/include/asm/mmzone.h b/arch/mips/include/asm/mmzone.h
index 14226ea42036..602a21aee9d4 100644
--- a/arch/mips/include/asm/mmzone.h
+++ b/arch/mips/include/asm/mmzone.h
@@ -20,6 +20,4 @@
#define nid_to_addrbase(nid) 0
#endif
-extern void setup_zero_pages(void);
-
#endif /* _ASM_MMZONE_H_ */
diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h
index 26c7a6ede983..bbca420c96d3 100644
--- a/arch/mips/include/asm/pgalloc.h
+++ b/arch/mips/include/asm/pgalloc.h
@@ -48,11 +48,8 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
extern void pgd_init(void *addr);
extern pgd_t *pgd_alloc(struct mm_struct *mm);
-#define __pte_free_tlb(tlb, pte, address) \
-do { \
- pagetable_dtor(page_ptdesc(pte)); \
- tlb_remove_page_ptdesc((tlb), page_ptdesc(pte)); \
-} while (0)
+#define __pte_free_tlb(tlb, pte, address) \
+ tlb_remove_ptdesc((tlb), page_ptdesc(pte))
#ifndef __PAGETABLE_PMD_FOLDED
diff --git a/arch/mips/loongson64/numa.c b/arch/mips/loongson64/numa.c
index 8388400d052f..95d5f553ce19 100644
--- a/arch/mips/loongson64/numa.c
+++ b/arch/mips/loongson64/numa.c
@@ -164,13 +164,6 @@ void __init paging_init(void)
free_area_init(zones_size);
}
-void __init mem_init(void)
-{
- high_memory = (void *) __va(get_num_physpages() << PAGE_SHIFT);
- memblock_free_all();
- setup_zero_pages(); /* This comes from node 0 */
-}
-
/* All PCI device belongs to logical Node-0 */
int pcibus_to_node(struct pci_bus *bus)
{
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
index 4583d1a2a73e..a673d3d68254 100644
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
@@ -59,24 +59,16 @@ EXPORT_SYMBOL(zero_page_mask);
/*
* Not static inline because used by IP27 special magic initialization code
*/
-void setup_zero_pages(void)
+static void __init setup_zero_pages(void)
{
- unsigned int order, i;
- struct page *page;
+ unsigned int order;
if (cpu_has_vce)
order = 3;
else
order = 0;
- empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
- if (!empty_zero_page)
- panic("Oh boy, that early out of memory?");
-
- page = virt_to_page((void *)empty_zero_page);
- split_page(page, order);
- for (i = 0; i < (1 << order); i++, page++)
- mark_page_reserved(page);
+ empty_zero_page = (unsigned long)memblock_alloc_or_panic(PAGE_SIZE << order, PAGE_SIZE);
zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK;
}
@@ -423,17 +415,8 @@ void __init paging_init(void)
" %ldk highmem ignored\n",
(highend_pfn - max_low_pfn) << (PAGE_SHIFT - 10));
max_zone_pfns[ZONE_HIGHMEM] = max_low_pfn;
-
- max_mapnr = max_low_pfn;
- } else if (highend_pfn) {
- max_mapnr = highend_pfn;
- } else {
- max_mapnr = max_low_pfn;
}
-#else
- max_mapnr = max_low_pfn;
#endif
- high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
free_area_init(max_zone_pfns);
}
@@ -442,26 +425,7 @@ void __init paging_init(void)
static struct kcore_list kcore_kseg0;
#endif
-static inline void __init mem_init_free_highmem(void)
-{
-#ifdef CONFIG_HIGHMEM
- unsigned long tmp;
-
- if (cpu_has_dc_aliases)
- return;
-
- for (tmp = highstart_pfn; tmp < highend_pfn; tmp++) {
- struct page *page = pfn_to_page(tmp);
-
- if (!memblock_is_memory(PFN_PHYS(tmp)))
- SetPageReserved(page);
- else
- free_highmem_page(page);
- }
-#endif
-}
-
-void __init mem_init(void)
+void __init arch_mm_preinit(void)
{
/*
* When PFN_PTE_SHIFT is greater than PAGE_SHIFT we won't have enough PTE
@@ -470,9 +434,7 @@ void __init mem_init(void)
BUILD_BUG_ON(IS_ENABLED(CONFIG_32BIT) && (PFN_PTE_SHIFT > PAGE_SHIFT));
maar_init();
- memblock_free_all();
setup_zero_pages(); /* Setup zeroed pages. */
- mem_init_free_highmem();
#ifdef CONFIG_64BIT
if ((unsigned long) &_text > (unsigned long) CKSEG0)
@@ -482,6 +444,11 @@ void __init mem_init(void)
0x80000000 - 4, KCORE_TEXT);
#endif
}
+#else /* CONFIG_NUMA */
+void __init arch_mm_preinit(void)
+{
+ setup_zero_pages(); /* This comes from node 0 */
+}
#endif /* !CONFIG_NUMA */
void free_init_pages(const char *what, unsigned long begin, unsigned long end)
diff --git a/arch/mips/mm/ioremap.c b/arch/mips/mm/ioremap.c
index d8243d61ef32..c6c4576cd4a8 100644
--- a/arch/mips/mm/ioremap.c
+++ b/arch/mips/mm/ioremap.c
@@ -44,9 +44,9 @@ static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages,
* ioremap_prot gives the caller control over cache coherency attributes (CCA)
*/
void __iomem *ioremap_prot(phys_addr_t phys_addr, unsigned long size,
- unsigned long prot_val)
+ pgprot_t prot)
{
- unsigned long flags = prot_val & _CACHE_MASK;
+ unsigned long flags = pgprot_val(prot) & _CACHE_MASK;
unsigned long offset, pfn, last_pfn;
struct vm_struct *area;
phys_addr_t last_addr;
diff --git a/arch/mips/mm/ioremap64.c b/arch/mips/mm/ioremap64.c
index 15e7820d6a5f..acc03ba20098 100644
--- a/arch/mips/mm/ioremap64.c
+++ b/arch/mips/mm/ioremap64.c
@@ -3,9 +3,9 @@
#include <ioremap.h>
void __iomem *ioremap_prot(phys_addr_t offset, unsigned long size,
- unsigned long prot_val)
+ pgprot_t prot)
{
- unsigned long flags = prot_val & _CACHE_MASK;
+ unsigned long flags = pgprot_val(prot) & _CACHE_MASK;
u64 base = (flags == _CACHE_UNCACHED ? IO_BASE : UNCAC_BASE);
void __iomem *addr;
diff --git a/arch/mips/sgi-ip27/ip27-memory.c b/arch/mips/sgi-ip27/ip27-memory.c
index 1963313f55d8..2b3e46e2e607 100644
--- a/arch/mips/sgi-ip27/ip27-memory.c
+++ b/arch/mips/sgi-ip27/ip27-memory.c
@@ -406,8 +406,6 @@ void __init prom_meminit(void)
}
}
-extern void setup_zero_pages(void);
-
void __init paging_init(void)
{
unsigned long zones_size[MAX_NR_ZONES] = {0, };
@@ -416,10 +414,3 @@ void __init paging_init(void)
zones_size[ZONE_NORMAL] = max_low_pfn;
free_area_init(zones_size);
}
-
-void __init mem_init(void)
-{
- high_memory = (void *) __va(get_num_physpages() << PAGE_SHIFT);
- memblock_free_all();
- setup_zero_pages(); /* This comes from node 0 */
-}
diff --git a/arch/nios2/include/asm/pgalloc.h b/arch/nios2/include/asm/pgalloc.h
index 12a536b7bfbd..db122b093a8b 100644
--- a/arch/nios2/include/asm/pgalloc.h
+++ b/arch/nios2/include/asm/pgalloc.h
@@ -28,10 +28,7 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
extern pgd_t *pgd_alloc(struct mm_struct *mm);
-#define __pte_free_tlb(tlb, pte, addr) \
- do { \
- pagetable_dtor(page_ptdesc(pte)); \
- tlb_remove_page_ptdesc((tlb), (page_ptdesc(pte))); \
- } while (0)
+#define __pte_free_tlb(tlb, pte, addr) \
+ tlb_remove_ptdesc((tlb), page_ptdesc(pte))
#endif /* _ASM_NIOS2_PGALLOC_H */
diff --git a/arch/nios2/kernel/setup.c b/arch/nios2/kernel/setup.c
index da122a5fa43b..2a40150142c3 100644
--- a/arch/nios2/kernel/setup.c
+++ b/arch/nios2/kernel/setup.c
@@ -149,6 +149,8 @@ void __init setup_arch(char **cmdline_p)
memory_start = memblock_start_of_DRAM();
memory_end = memblock_end_of_DRAM();
+ pr_debug("%s: start=%lx, end=%lx\n", __func__, memory_start, memory_end);
+
setup_initial_init_mm(_stext, _etext, _edata, _end);
init_task.thread.kregs = &fake_regs;
@@ -156,7 +158,6 @@ void __init setup_arch(char **cmdline_p)
*cmdline_p = boot_command_line;
find_limits(&min_low_pfn, &max_low_pfn, &max_pfn);
- max_mapnr = max_low_pfn;
memblock_reserve(__pa_symbol(_stext), _end - _stext);
#ifdef CONFIG_BLK_DEV_INITRD
diff --git a/arch/nios2/mm/init.c b/arch/nios2/mm/init.c
index a2278485de19..94efa3de3933 100644
--- a/arch/nios2/mm/init.c
+++ b/arch/nios2/mm/init.c
@@ -51,7 +51,7 @@ void __init paging_init(void)
pagetable_init();
pgd_current = swapper_pg_dir;
- max_zone_pfn[ZONE_NORMAL] = max_mapnr;
+ max_zone_pfn[ZONE_NORMAL] = max_low_pfn;
/* pass the memory from the bootmem allocator to the main allocator */
free_area_init(max_zone_pfn);
@@ -60,20 +60,6 @@ void __init paging_init(void)
(unsigned long)empty_zero_page + PAGE_SIZE);
}
-void __init mem_init(void)
-{
- unsigned long end_mem = memory_end; /* this must not include
- kernel stack at top */
-
- pr_debug("mem_init: start=%lx, end=%lx\n", memory_start, memory_end);
-
- end_mem &= PAGE_MASK;
- high_memory = __va(end_mem);
-
- /* this will put all memory onto the freelists */
- memblock_free_all();
-}
-
void __init mmu_init(void)
{
flush_tlb_all();
diff --git a/arch/openrisc/include/asm/pgalloc.h b/arch/openrisc/include/asm/pgalloc.h
index 3372f4e6ab4b..3f110931d8f6 100644
--- a/arch/openrisc/include/asm/pgalloc.h
+++ b/arch/openrisc/include/asm/pgalloc.h
@@ -64,10 +64,7 @@ extern inline pgd_t *pgd_alloc(struct mm_struct *mm)
extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm);
-#define __pte_free_tlb(tlb, pte, addr) \
-do { \
- pagetable_dtor(page_ptdesc(pte)); \
- tlb_remove_page_ptdesc((tlb), (page_ptdesc(pte))); \
-} while (0)
+#define __pte_free_tlb(tlb, pte, addr) \
+ tlb_remove_ptdesc((tlb), page_ptdesc(pte))
#endif
diff --git a/arch/openrisc/mm/init.c b/arch/openrisc/mm/init.c
index d0cb1a0126f9..be1c2eb8bb94 100644
--- a/arch/openrisc/mm/init.c
+++ b/arch/openrisc/mm/init.c
@@ -193,15 +193,9 @@ void __init mem_init(void)
{
BUG_ON(!mem_map);
- max_mapnr = max_low_pfn;
- high_memory = (void *)__va(max_low_pfn * PAGE_SIZE);
-
/* clear the zero-page */
memset((void *)empty_zero_page, 0, PAGE_SIZE);
- /* this will put all low memory onto the freelists */
- memblock_free_all();
-
printk("mem_init_done ...........................................\n");
mem_init_done = 1;
return;
diff --git a/arch/parisc/include/asm/io.h b/arch/parisc/include/asm/io.h
index 325ae693c20e..f01ad3ad60b5 100644
--- a/arch/parisc/include/asm/io.h
+++ b/arch/parisc/include/asm/io.h
@@ -131,7 +131,7 @@ static inline void gsc_writeq(unsigned long long val, unsigned long addr)
_PAGE_ACCESSED | _PAGE_NO_CACHE)
#define ioremap_wc(addr, size) \
- ioremap_prot((addr), (size), _PAGE_IOREMAP)
+ ioremap_prot((addr), (size), __pgprot(_PAGE_IOREMAP))
#define pci_iounmap pci_iounmap
diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c
index 61c0a2477072..14270715d754 100644
--- a/arch/parisc/mm/init.c
+++ b/arch/parisc/mm/init.c
@@ -562,10 +562,6 @@ void __init mem_init(void)
BUILD_BUG_ON(TMPALIAS_MAP_START >= 0x80000000);
#endif
- high_memory = __va((max_pfn << PAGE_SHIFT));
- set_max_mapnr(max_low_pfn);
- memblock_free_all();
-
#ifdef CONFIG_PA11
if (boot_cpu_data.cpu_type == pcxl2 || boot_cpu_data.cpu_type == pcxl) {
pcxl_dma_start = (unsigned long)SET_MAP_OFFSET(MAP_START);
diff --git a/arch/parisc/mm/ioremap.c b/arch/parisc/mm/ioremap.c
index fd996472dfe7..0b65c4b3baee 100644
--- a/arch/parisc/mm/ioremap.c
+++ b/arch/parisc/mm/ioremap.c
@@ -14,7 +14,7 @@
#include <linux/mm.h>
void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size,
- unsigned long prot)
+ pgprot_t prot)
{
#ifdef CONFIG_EISA
unsigned long end = phys_addr + size - 1;
@@ -41,6 +41,6 @@ void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size,
}
}
- return generic_ioremap_prot(phys_addr, size, __pgprot(prot));
+ return generic_ioremap_prot(phys_addr, size, prot);
}
EXPORT_SYMBOL(ioremap_prot);
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index b5630f8ad436..6722625a406a 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -148,6 +148,7 @@ config PPC
select ARCH_HAS_PHYS_TO_DMA
select ARCH_HAS_PMEM_API
select ARCH_HAS_PREEMPT_LAZY
+ select ARCH_HAS_PTDUMP
select ARCH_HAS_PTE_DEVMAP if PPC_BOOK3S_64
select ARCH_HAS_PTE_SPECIAL
select ARCH_HAS_SCALED_CPUTIME if VIRT_CPU_ACCOUNTING_NATIVE && PPC_BOOK3S_64
@@ -207,7 +208,6 @@ config PPC
select GENERIC_IRQ_SHOW
select GENERIC_IRQ_SHOW_LEVEL
select GENERIC_PCI_IOMAP if PCI
- select GENERIC_PTDUMP
select GENERIC_SMP_IDLE_THREAD
select GENERIC_TIME_VSYSCALL
select GENERIC_VDSO_DATA_STORE
@@ -716,6 +716,9 @@ config ARCH_SUPPORTS_CRASH_HOTPLUG
def_bool y
depends on PPC64
+config ARCH_HAS_GENERIC_CRASHKERNEL_RESERVATION
+ def_bool CRASH_RESERVE
+
config FA_DUMP
bool "Firmware-assisted dump"
depends on CRASH_DUMP && PPC64 && (PPC_RTAS || PPC_POWERNV)
diff --git a/arch/powerpc/configs/mpc885_ads_defconfig b/arch/powerpc/configs/mpc885_ads_defconfig
index 89da51d724fb..9bc2758a6a9a 100644
--- a/arch/powerpc/configs/mpc885_ads_defconfig
+++ b/arch/powerpc/configs/mpc885_ads_defconfig
@@ -77,4 +77,4 @@ CONFIG_DEBUG_VM_PGTABLE=y
CONFIG_DETECT_HUNG_TASK=y
CONFIG_BDI_SWITCH=y
CONFIG_PPC_EARLY_DEBUG=y
-CONFIG_GENERIC_PTDUMP=y
+CONFIG_PTDUMP_DEBUGFS=y
diff --git a/arch/powerpc/configs/skiroot_defconfig b/arch/powerpc/configs/skiroot_defconfig
index 1eb446452fc0..3086c4a12d6d 100644
--- a/arch/powerpc/configs/skiroot_defconfig
+++ b/arch/powerpc/configs/skiroot_defconfig
@@ -78,7 +78,6 @@ CONFIG_VIRTIO_BLK=m
CONFIG_BLK_DEV_NVME=m
CONFIG_NVME_MULTIPATH=y
CONFIG_EEPROM_AT24=m
-# CONFIG_CXL is not set
# CONFIG_OCXL is not set
CONFIG_BLK_DEV_SD=m
CONFIG_BLK_DEV_SR=m
diff --git a/arch/powerpc/include/asm/book3s/64/hugetlb.h b/arch/powerpc/include/asm/book3s/64/hugetlb.h
index f0bba9c5f9c3..bb786694dd26 100644
--- a/arch/powerpc/include/asm/book3s/64/hugetlb.h
+++ b/arch/powerpc/include/asm/book3s/64/hugetlb.h
@@ -94,4 +94,10 @@ static inline int check_and_get_huge_psize(int shift)
return mmu_psize;
}
+#define arch_has_huge_bootmem_alloc arch_has_huge_bootmem_alloc
+
+static inline bool arch_has_huge_bootmem_alloc(void)
+{
+ return (firmware_has_feature(FW_FEATURE_LPAR) && !radix_enabled());
+}
#endif
diff --git a/arch/powerpc/include/asm/copro.h b/arch/powerpc/include/asm/copro.h
index fd2e166ea02a..81bd176203ab 100644
--- a/arch/powerpc/include/asm/copro.h
+++ b/arch/powerpc/include/asm/copro.h
@@ -18,10 +18,4 @@ int copro_handle_mm_fault(struct mm_struct *mm, unsigned long ea,
int copro_calculate_slb(struct mm_struct *mm, u64 ea, struct copro_slb *slb);
-
-#ifdef CONFIG_PPC_COPRO_BASE
-void copro_flush_all_slbs(struct mm_struct *mm);
-#else
-static inline void copro_flush_all_slbs(struct mm_struct *mm) {}
-#endif
#endif /* _ASM_POWERPC_COPRO_H */
diff --git a/arch/powerpc/include/asm/crash_reserve.h b/arch/powerpc/include/asm/crash_reserve.h
new file mode 100644
index 000000000000..6467ce29b1fa
--- /dev/null
+++ b/arch/powerpc/include/asm/crash_reserve.h
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_CRASH_RESERVE_H
+#define _ASM_POWERPC_CRASH_RESERVE_H
+
+/* crash kernel regions are Page size agliged */
+#define CRASH_ALIGN PAGE_SIZE
+
+#endif /* _ASM_POWERPC_CRASH_RESERVE_H */
diff --git a/arch/powerpc/include/asm/device.h b/arch/powerpc/include/asm/device.h
index 47ed639f3b8f..a4dc27655b3e 100644
--- a/arch/powerpc/include/asm/device.h
+++ b/arch/powerpc/include/asm/device.h
@@ -38,9 +38,6 @@ struct dev_archdata {
#ifdef CONFIG_FAIL_IOMMU
int fail_iommu;
#endif
-#ifdef CONFIG_CXL_BASE
- struct cxl_context *cxl_ctx;
-#endif
#ifdef CONFIG_PCI_IOV
void *iov_data;
#endif
diff --git a/arch/powerpc/include/asm/io.h b/arch/powerpc/include/asm/io.h
index 492e8855e00f..7a89754842d6 100644
--- a/arch/powerpc/include/asm/io.h
+++ b/arch/powerpc/include/asm/io.h
@@ -826,7 +826,7 @@ void __iomem *ioremap_wt(phys_addr_t address, unsigned long size);
void __iomem *ioremap_coherent(phys_addr_t address, unsigned long size);
#define ioremap_cache(addr, size) \
- ioremap_prot((addr), (size), pgprot_val(PAGE_KERNEL))
+ ioremap_prot((addr), (size), PAGE_KERNEL)
#define iounmap iounmap
diff --git a/arch/powerpc/include/asm/kexec.h b/arch/powerpc/include/asm/kexec.h
index 601e569303e1..70f2f0517509 100644
--- a/arch/powerpc/include/asm/kexec.h
+++ b/arch/powerpc/include/asm/kexec.h
@@ -94,8 +94,10 @@ int arch_kexec_kernel_image_probe(struct kimage *image, void *buf, unsigned long
int arch_kimage_file_post_load_cleanup(struct kimage *image);
#define arch_kimage_file_post_load_cleanup arch_kimage_file_post_load_cleanup
-int arch_kexec_locate_mem_hole(struct kexec_buf *kbuf);
-#define arch_kexec_locate_mem_hole arch_kexec_locate_mem_hole
+int arch_check_excluded_range(struct kimage *image, unsigned long start,
+ unsigned long end);
+#define arch_check_excluded_range arch_check_excluded_range
+
int load_crashdump_segments_ppc64(struct kimage *image,
struct kexec_buf *kbuf);
@@ -112,9 +114,9 @@ int setup_new_fdt_ppc64(const struct kimage *image, void *fdt, struct crash_mem
#ifdef CONFIG_CRASH_RESERVE
int __init overlaps_crashkernel(unsigned long start, unsigned long size);
-extern void reserve_crashkernel(void);
+extern void arch_reserve_crashkernel(void);
#else
-static inline void reserve_crashkernel(void) {}
+static inline void arch_reserve_crashkernel(void) {}
static inline int overlaps_crashkernel(unsigned long start, unsigned long size) { return 0; }
#endif
diff --git a/arch/powerpc/include/asm/pnv-pci.h b/arch/powerpc/include/asm/pnv-pci.h
index 8afc92860dbb..7e9a479951a3 100644
--- a/arch/powerpc/include/asm/pnv-pci.h
+++ b/arch/powerpc/include/asm/pnv-pci.h
@@ -10,7 +10,6 @@
#include <linux/pci_hotplug.h>
#include <linux/irq.h>
#include <linux/of.h>
-#include <misc/cxl-base.h>
#include <asm/opal-api.h>
#define PCI_SLOT_ID_PREFIX (1UL << 63)
@@ -25,25 +24,9 @@ extern int pnv_pci_get_power_state(uint64_t id, uint8_t *state);
extern int pnv_pci_set_power_state(uint64_t id, uint8_t state,
struct opal_msg *msg);
-extern int pnv_pci_set_tunnel_bar(struct pci_dev *dev, uint64_t addr,
- int enable);
-int pnv_phb_to_cxl_mode(struct pci_dev *dev, uint64_t mode);
-int pnv_cxl_ioda_msi_setup(struct pci_dev *dev, unsigned int hwirq,
- unsigned int virq);
-int pnv_cxl_alloc_hwirqs(struct pci_dev *dev, int num);
-void pnv_cxl_release_hwirqs(struct pci_dev *dev, int hwirq, int num);
-int pnv_cxl_get_irq_count(struct pci_dev *dev);
-struct device_node *pnv_pci_get_phb_node(struct pci_dev *dev);
int64_t pnv_opal_pci_msi_eoi(struct irq_data *d);
bool is_pnv_opal_msi(struct irq_chip *chip);
-#ifdef CONFIG_CXL_BASE
-int pnv_cxl_alloc_hwirq_ranges(struct cxl_irq_ranges *irqs,
- struct pci_dev *dev, int num);
-void pnv_cxl_release_hwirq_ranges(struct cxl_irq_ranges *irqs,
- struct pci_dev *dev);
-#endif
-
struct pnv_php_slot {
struct hotplug_slot slot;
uint64_t id;
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index e0059842a1c6..9ed9dde7d231 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -860,7 +860,7 @@ void __init early_init_devtree(void *params)
*/
if (fadump_reserve_mem() == 0)
#endif
- reserve_crashkernel();
+ arch_reserve_crashkernel();
early_reserve_mem();
if (memory_limit > memblock_phys_mem_size())
diff --git a/arch/powerpc/kernel/secvar-sysfs.c b/arch/powerpc/kernel/secvar-sysfs.c
index fbeb1cbac01b..afb690a172b4 100644
--- a/arch/powerpc/kernel/secvar-sysfs.c
+++ b/arch/powerpc/kernel/secvar-sysfs.c
@@ -52,7 +52,7 @@ static ssize_t size_show(struct kobject *kobj, struct kobj_attribute *attr,
}
static ssize_t data_read(struct file *filep, struct kobject *kobj,
- struct bin_attribute *attr, char *buf, loff_t off,
+ const struct bin_attribute *attr, char *buf, loff_t off,
size_t count)
{
char *data;
@@ -85,7 +85,7 @@ data_fail:
}
static ssize_t update_write(struct file *filep, struct kobject *kobj,
- struct bin_attribute *attr, char *buf, loff_t off,
+ const struct bin_attribute *attr, char *buf, loff_t off,
size_t count)
{
int rc;
@@ -104,11 +104,11 @@ static struct kobj_attribute format_attr = __ATTR_RO(format);
static struct kobj_attribute size_attr = __ATTR_RO(size);
-static struct bin_attribute data_attr = __BIN_ATTR_RO(data, 0);
+static struct bin_attribute data_attr __ro_after_init = __BIN_ATTR_RO(data, 0);
-static struct bin_attribute update_attr = __BIN_ATTR_WO(update, 0);
+static struct bin_attribute update_attr __ro_after_init = __BIN_ATTR_WO(update, 0);
-static struct bin_attribute *secvar_bin_attrs[] = {
+static const struct bin_attribute *const secvar_bin_attrs[] = {
&data_attr,
&update_attr,
NULL,
@@ -121,7 +121,7 @@ static struct attribute *secvar_attrs[] = {
static const struct attribute_group secvar_attr_group = {
.attrs = secvar_attrs,
- .bin_attrs = secvar_bin_attrs,
+ .bin_attrs_new = secvar_bin_attrs,
};
__ATTRIBUTE_GROUPS(secvar_attr);
@@ -130,7 +130,7 @@ static const struct kobj_type secvar_ktype = {
.default_groups = secvar_attr_groups,
};
-static int update_kobj_size(void)
+static __init int update_kobj_size(void)
{
u64 varsize;
@@ -145,7 +145,7 @@ static int update_kobj_size(void)
return 0;
}
-static int secvar_sysfs_config(struct kobject *kobj)
+static __init int secvar_sysfs_config(struct kobject *kobj)
{
struct attribute_group config_group = {
.name = "config",
@@ -158,7 +158,7 @@ static int secvar_sysfs_config(struct kobject *kobj)
return 0;
}
-static int add_var(const char *name)
+static __init int add_var(const char *name)
{
struct kobject *kobj;
int rc;
@@ -181,7 +181,7 @@ static int add_var(const char *name)
return 0;
}
-static int secvar_sysfs_load(void)
+static __init int secvar_sysfs_load(void)
{
u64 namesize = 0;
char *name;
@@ -209,7 +209,7 @@ static int secvar_sysfs_load(void)
return rc;
}
-static int secvar_sysfs_load_static(void)
+static __init int secvar_sysfs_load_static(void)
{
const char * const *name_ptr = secvar_ops->var_names;
int rc;
@@ -224,7 +224,7 @@ static int secvar_sysfs_load_static(void)
return 0;
}
-static int secvar_sysfs_init(void)
+static __init int secvar_sysfs_init(void)
{
u64 max_size;
int rc;
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index a08b0ede4e64..68d47c53876c 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -957,8 +957,6 @@ void __init setup_arch(char **cmdline_p)
/* Parse memory topology */
mem_topology_setup();
- /* Set max_mapnr before paging_init() */
- set_max_mapnr(max_pfn);
high_memory = (void *)__va(max_low_pfn * PAGE_SIZE);
/*
diff --git a/arch/powerpc/kexec/core.c b/arch/powerpc/kexec/core.c
index 58a930a47422..00e9c267b912 100644
--- a/arch/powerpc/kexec/core.c
+++ b/arch/powerpc/kexec/core.c
@@ -58,38 +58,20 @@ void machine_kexec(struct kimage *image)
}
#ifdef CONFIG_CRASH_RESERVE
-void __init reserve_crashkernel(void)
-{
- unsigned long long crash_size, crash_base, total_mem_sz;
- int ret;
- total_mem_sz = memory_limit ? memory_limit : memblock_phys_mem_size();
- /* use common parsing */
- ret = parse_crashkernel(boot_command_line, total_mem_sz,
- &crash_size, &crash_base, NULL, NULL);
- if (ret == 0 && crash_size > 0) {
- crashk_res.start = crash_base;
- crashk_res.end = crash_base + crash_size - 1;
- }
-
- if (crashk_res.end == crashk_res.start) {
- crashk_res.start = crashk_res.end = 0;
- return;
- }
-
- /* We might have got these values via the command line or the
- * device tree, either way sanitise them now. */
-
- crash_size = resource_size(&crashk_res);
+static unsigned long long __init get_crash_base(unsigned long long crash_base)
+{
#ifndef CONFIG_NONSTATIC_KERNEL
- if (crashk_res.start != KDUMP_KERNELBASE)
+ if (crash_base != KDUMP_KERNELBASE)
printk("Crash kernel location must be 0x%x\n",
KDUMP_KERNELBASE);
- crashk_res.start = KDUMP_KERNELBASE;
+ return KDUMP_KERNELBASE;
#else
- if (!crashk_res.start) {
+ unsigned long long crash_base_align;
+
+ if (!crash_base) {
#ifdef CONFIG_PPC64
/*
* On the LPAR platform place the crash kernel to mid of
@@ -101,53 +83,51 @@ void __init reserve_crashkernel(void)
* kernel starts at 128MB offset on other platforms.
*/
if (firmware_has_feature(FW_FEATURE_LPAR))
- crashk_res.start = min_t(u64, ppc64_rma_size / 2, SZ_512M);
+ crash_base = min_t(u64, ppc64_rma_size / 2, SZ_512M);
else
- crashk_res.start = min_t(u64, ppc64_rma_size / 2, SZ_128M);
+ crash_base = min_t(u64, ppc64_rma_size / 2, SZ_128M);
#else
- crashk_res.start = KDUMP_KERNELBASE;
+ crash_base = KDUMP_KERNELBASE;
#endif
}
- crash_base = PAGE_ALIGN(crashk_res.start);
- if (crash_base != crashk_res.start) {
- printk("Crash kernel base must be aligned to 0x%lx\n",
- PAGE_SIZE);
- crashk_res.start = crash_base;
- }
+ crash_base_align = PAGE_ALIGN(crash_base);
+ if (crash_base != crash_base_align)
+ pr_warn("Crash kernel base must be aligned to 0x%lx\n", PAGE_SIZE);
+ return crash_base_align;
#endif
- crash_size = PAGE_ALIGN(crash_size);
- crashk_res.end = crashk_res.start + crash_size - 1;
+}
- /* The crash region must not overlap the current kernel */
- if (overlaps_crashkernel(__pa(_stext), _end - _stext)) {
- printk(KERN_WARNING
- "Crash kernel can not overlap current kernel\n");
- crashk_res.start = crashk_res.end = 0;
+void __init arch_reserve_crashkernel(void)
+{
+ unsigned long long crash_size, crash_base, crash_end;
+ unsigned long long kernel_start, kernel_size;
+ unsigned long long total_mem_sz;
+ int ret;
+
+ total_mem_sz = memory_limit ? memory_limit : memblock_phys_mem_size();
+
+ /* use common parsing */
+ ret = parse_crashkernel(boot_command_line, total_mem_sz, &crash_size,
+ &crash_base, NULL, NULL);
+
+ if (ret)
return;
- }
- /* Crash kernel trumps memory limit */
- if (memory_limit && memory_limit <= crashk_res.end) {
- memory_limit = crashk_res.end + 1;
- total_mem_sz = memory_limit;
- printk("Adjusted memory limit for crashkernel, now 0x%llx\n",
- memory_limit);
- }
+ crash_base = get_crash_base(crash_base);
+ crash_end = crash_base + crash_size - 1;
- printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
- "for crashkernel (System RAM: %ldMB)\n",
- (unsigned long)(crash_size >> 20),
- (unsigned long)(crashk_res.start >> 20),
- (unsigned long)(total_mem_sz >> 20));
+ kernel_start = __pa(_stext);
+ kernel_size = _end - _stext;
- if (!memblock_is_region_memory(crashk_res.start, crash_size) ||
- memblock_reserve(crashk_res.start, crash_size)) {
- pr_err("Failed to reserve memory for crashkernel!\n");
- crashk_res.start = crashk_res.end = 0;
+ /* The crash region must not overlap the current kernel */
+ if ((kernel_start + kernel_size > crash_base) && (kernel_start <= crash_end)) {
+ pr_warn("Crash kernel can not overlap current kernel\n");
return;
}
+
+ reserve_crashkernel_generic(crash_size, crash_base, 0, false);
}
int __init overlaps_crashkernel(unsigned long start, unsigned long size)
diff --git a/arch/powerpc/kexec/file_load_64.c b/arch/powerpc/kexec/file_load_64.c
index dc65c1391157..e7ef8b2a2554 100644
--- a/arch/powerpc/kexec/file_load_64.c
+++ b/arch/powerpc/kexec/file_load_64.c
@@ -49,201 +49,18 @@ const struct kexec_file_ops * const kexec_file_loaders[] = {
NULL
};
-/**
- * __locate_mem_hole_top_down - Looks top down for a large enough memory hole
- * in the memory regions between buf_min & buf_max
- * for the buffer. If found, sets kbuf->mem.
- * @kbuf: Buffer contents and memory parameters.
- * @buf_min: Minimum address for the buffer.
- * @buf_max: Maximum address for the buffer.
- *
- * Returns 0 on success, negative errno on error.
- */
-static int __locate_mem_hole_top_down(struct kexec_buf *kbuf,
- u64 buf_min, u64 buf_max)
-{
- int ret = -EADDRNOTAVAIL;
- phys_addr_t start, end;
- u64 i;
-
- for_each_mem_range_rev(i, &start, &end) {
- /*
- * memblock uses [start, end) convention while it is
- * [start, end] here. Fix the off-by-one to have the
- * same convention.
- */
- end -= 1;
-
- if (start > buf_max)
- continue;
-
- /* Memory hole not found */
- if (end < buf_min)
- break;
-
- /* Adjust memory region based on the given range */
- if (start < buf_min)
- start = buf_min;
- if (end > buf_max)
- end = buf_max;
-
- start = ALIGN(start, kbuf->buf_align);
- if (start < end && (end - start + 1) >= kbuf->memsz) {
- /* Suitable memory range found. Set kbuf->mem */
- kbuf->mem = ALIGN_DOWN(end - kbuf->memsz + 1,
- kbuf->buf_align);
- ret = 0;
- break;
- }
- }
-
- return ret;
-}
-
-/**
- * locate_mem_hole_top_down_ppc64 - Skip special memory regions to find a
- * suitable buffer with top down approach.
- * @kbuf: Buffer contents and memory parameters.
- * @buf_min: Minimum address for the buffer.
- * @buf_max: Maximum address for the buffer.
- * @emem: Exclude memory ranges.
- *
- * Returns 0 on success, negative errno on error.
- */
-static int locate_mem_hole_top_down_ppc64(struct kexec_buf *kbuf,
- u64 buf_min, u64 buf_max,
- const struct crash_mem *emem)
+int arch_check_excluded_range(struct kimage *image, unsigned long start,
+ unsigned long end)
{
- int i, ret = 0, err = -EADDRNOTAVAIL;
- u64 start, end, tmin, tmax;
-
- tmax = buf_max;
- for (i = (emem->nr_ranges - 1); i >= 0; i--) {
- start = emem->ranges[i].start;
- end = emem->ranges[i].end;
-
- if (start > tmax)
- continue;
-
- if (end < tmax) {
- tmin = (end < buf_min ? buf_min : end + 1);
- ret = __locate_mem_hole_top_down(kbuf, tmin, tmax);
- if (!ret)
- return 0;
- }
-
- tmax = start - 1;
-
- if (tmax < buf_min) {
- ret = err;
- break;
- }
- ret = 0;
- }
-
- if (!ret) {
- tmin = buf_min;
- ret = __locate_mem_hole_top_down(kbuf, tmin, tmax);
- }
- return ret;
-}
-
-/**
- * __locate_mem_hole_bottom_up - Looks bottom up for a large enough memory hole
- * in the memory regions between buf_min & buf_max
- * for the buffer. If found, sets kbuf->mem.
- * @kbuf: Buffer contents and memory parameters.
- * @buf_min: Minimum address for the buffer.
- * @buf_max: Maximum address for the buffer.
- *
- * Returns 0 on success, negative errno on error.
- */
-static int __locate_mem_hole_bottom_up(struct kexec_buf *kbuf,
- u64 buf_min, u64 buf_max)
-{
- int ret = -EADDRNOTAVAIL;
- phys_addr_t start, end;
- u64 i;
-
- for_each_mem_range(i, &start, &end) {
- /*
- * memblock uses [start, end) convention while it is
- * [start, end] here. Fix the off-by-one to have the
- * same convention.
- */
- end -= 1;
-
- if (end < buf_min)
- continue;
-
- /* Memory hole not found */
- if (start > buf_max)
- break;
-
- /* Adjust memory region based on the given range */
- if (start < buf_min)
- start = buf_min;
- if (end > buf_max)
- end = buf_max;
-
- start = ALIGN(start, kbuf->buf_align);
- if (start < end && (end - start + 1) >= kbuf->memsz) {
- /* Suitable memory range found. Set kbuf->mem */
- kbuf->mem = start;
- ret = 0;
- break;
- }
- }
-
- return ret;
-}
-
-/**
- * locate_mem_hole_bottom_up_ppc64 - Skip special memory regions to find a
- * suitable buffer with bottom up approach.
- * @kbuf: Buffer contents and memory parameters.
- * @buf_min: Minimum address for the buffer.
- * @buf_max: Maximum address for the buffer.
- * @emem: Exclude memory ranges.
- *
- * Returns 0 on success, negative errno on error.
- */
-static int locate_mem_hole_bottom_up_ppc64(struct kexec_buf *kbuf,
- u64 buf_min, u64 buf_max,
- const struct crash_mem *emem)
-{
- int i, ret = 0, err = -EADDRNOTAVAIL;
- u64 start, end, tmin, tmax;
-
- tmin = buf_min;
- for (i = 0; i < emem->nr_ranges; i++) {
- start = emem->ranges[i].start;
- end = emem->ranges[i].end;
-
- if (end < tmin)
- continue;
-
- if (start > tmin) {
- tmax = (start > buf_max ? buf_max : start - 1);
- ret = __locate_mem_hole_bottom_up(kbuf, tmin, tmax);
- if (!ret)
- return 0;
- }
-
- tmin = end + 1;
+ struct crash_mem *emem;
+ int i;
- if (tmin > buf_max) {
- ret = err;
- break;
- }
- ret = 0;
- }
+ emem = image->arch.exclude_ranges;
+ for (i = 0; i < emem->nr_ranges; i++)
+ if (start < emem->ranges[i].end && end > emem->ranges[i].start)
+ return 1;
- if (!ret) {
- tmax = buf_max;
- ret = __locate_mem_hole_bottom_up(kbuf, tmin, tmax);
- }
- return ret;
+ return 0;
}
#ifdef CONFIG_CRASH_DUMP
@@ -1005,64 +822,6 @@ out:
}
/**
- * arch_kexec_locate_mem_hole - Skip special memory regions like rtas, opal,
- * tce-table, reserved-ranges & such (exclude
- * memory ranges) as they can't be used for kexec
- * segment buffer. Sets kbuf->mem when a suitable
- * memory hole is found.
- * @kbuf: Buffer contents and memory parameters.
- *
- * Assumes minimum of PAGE_SIZE alignment for kbuf->memsz & kbuf->buf_align.
- *
- * Returns 0 on success, negative errno on error.
- */
-int arch_kexec_locate_mem_hole(struct kexec_buf *kbuf)
-{
- struct crash_mem **emem;
- u64 buf_min, buf_max;
- int ret;
-
- /* Look up the exclude ranges list while locating the memory hole */
- emem = &(kbuf->image->arch.exclude_ranges);
- if (!(*emem) || ((*emem)->nr_ranges == 0)) {
- pr_warn("No exclude range list. Using the default locate mem hole method\n");
- return kexec_locate_mem_hole(kbuf);
- }
-
- buf_min = kbuf->buf_min;
- buf_max = kbuf->buf_max;
- /* Segments for kdump kernel should be within crashkernel region */
- if (IS_ENABLED(CONFIG_CRASH_DUMP) && kbuf->image->type == KEXEC_TYPE_CRASH) {
- buf_min = (buf_min < crashk_res.start ?
- crashk_res.start : buf_min);
- buf_max = (buf_max > crashk_res.end ?
- crashk_res.end : buf_max);
- }
-
- if (buf_min > buf_max) {
- pr_err("Invalid buffer min and/or max values\n");
- return -EINVAL;
- }
-
- if (kbuf->top_down)
- ret = locate_mem_hole_top_down_ppc64(kbuf, buf_min, buf_max,
- *emem);
- else
- ret = locate_mem_hole_bottom_up_ppc64(kbuf, buf_min, buf_max,
- *emem);
-
- /* Add the buffer allocated to the exclude list for the next lookup */
- if (!ret) {
- add_mem_range(emem, kbuf->mem, kbuf->memsz);
- sort_memory_ranges(*emem, true);
- } else {
- pr_err("Failed to locate memory buffer of size %lu\n",
- kbuf->memsz);
- }
- return ret;
-}
-
-/**
* arch_kexec_kernel_image_probe - Does additional handling needed to setup
* kexec segments.
* @image: kexec image being loaded.
diff --git a/arch/powerpc/mm/Makefile b/arch/powerpc/mm/Makefile
index 0fe2f085c05a..8c1582b2987d 100644
--- a/arch/powerpc/mm/Makefile
+++ b/arch/powerpc/mm/Makefile
@@ -15,5 +15,5 @@ obj-$(CONFIG_NUMA) += numa.o
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
obj-$(CONFIG_NOT_COHERENT_CACHE) += dma-noncoherent.o
obj-$(CONFIG_PPC_COPRO_BASE) += copro_fault.o
-obj-$(CONFIG_PTDUMP_CORE) += ptdump/
+obj-$(CONFIG_PTDUMP) += ptdump/
obj-$(CONFIG_KASAN) += kasan/
diff --git a/arch/powerpc/mm/book3s64/hash_native.c b/arch/powerpc/mm/book3s64/hash_native.c
index 430d1d935a7c..e9e2dd70c060 100644
--- a/arch/powerpc/mm/book3s64/hash_native.c
+++ b/arch/powerpc/mm/book3s64/hash_native.c
@@ -27,8 +27,6 @@
#include <asm/ppc-opcode.h>
#include <asm/feature-fixups.h>
-#include <misc/cxl-base.h>
-
#ifdef DEBUG_LOW
#define DBG_LOW(fmt...) udbg_printf(fmt)
#else
@@ -217,11 +215,9 @@ static inline void __tlbiel(unsigned long vpn, int psize, int apsize, int ssize)
static inline void tlbie(unsigned long vpn, int psize, int apsize,
int ssize, int local)
{
- unsigned int use_local;
+ unsigned int use_local = local && mmu_has_feature(MMU_FTR_TLBIEL);
int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
- use_local = local && mmu_has_feature(MMU_FTR_TLBIEL) && !cxl_ctx_in_use();
-
if (use_local)
use_local = mmu_psize_defs[psize].tlbiel;
if (lock_tlbie && !use_local)
@@ -789,10 +785,6 @@ static void native_flush_hash_range(unsigned long number, int local)
unsigned long psize = batch->psize;
int ssize = batch->ssize;
int i;
- unsigned int use_local;
-
- use_local = local && mmu_has_feature(MMU_FTR_TLBIEL) &&
- mmu_psize_defs[psize].tlbiel && !cxl_ctx_in_use();
local_irq_save(flags);
@@ -827,7 +819,8 @@ static void native_flush_hash_range(unsigned long number, int local)
} pte_iterate_hashed_end();
}
- if (use_local) {
+ if (mmu_has_feature(MMU_FTR_TLBIEL) &&
+ mmu_psize_defs[psize].tlbiel && local) {
asm volatile("ptesync":::"memory");
for (i = 0; i < number; i++) {
vpn = batch->vpn[i];
diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c
index 734610052cf4..5158aefe4873 100644
--- a/arch/powerpc/mm/book3s64/hash_utils.c
+++ b/arch/powerpc/mm/book3s64/hash_utils.c
@@ -56,7 +56,7 @@
#include <asm/cacheflush.h>
#include <asm/cputable.h>
#include <asm/sections.h>
-#include <asm/copro.h>
+#include <asm/spu.h>
#include <asm/udbg.h>
#include <asm/text-patching.h>
#include <asm/fadump.h>
@@ -1600,7 +1600,9 @@ void demote_segment_4k(struct mm_struct *mm, unsigned long addr)
if (get_slice_psize(mm, addr) == MMU_PAGE_4K)
return;
slice_set_range_psize(mm, addr, 1, MMU_PAGE_4K);
- copro_flush_all_slbs(mm);
+#ifdef CONFIG_SPU_BASE
+ spu_flush_all_slbs(mm);
+#endif
if ((get_paca_psize(addr) != MMU_PAGE_4K) && (current->mm == mm)) {
copy_mm_to_paca(mm);
@@ -1869,7 +1871,9 @@ int hash_page_mm(struct mm_struct *mm, unsigned long ea,
"to 4kB pages because of "
"non-cacheable mapping\n");
psize = mmu_vmalloc_psize = MMU_PAGE_4K;
- copro_flush_all_slbs(mm);
+#ifdef CONFIG_SPU_BASE
+ spu_flush_all_slbs(mm);
+#endif
}
}
diff --git a/arch/powerpc/mm/book3s64/pgtable.c b/arch/powerpc/mm/book3s64/pgtable.c
index c0c45d033cba..8f7d41ce2ca1 100644
--- a/arch/powerpc/mm/book3s64/pgtable.c
+++ b/arch/powerpc/mm/book3s64/pgtable.c
@@ -10,7 +10,6 @@
#include <linux/pkeys.h>
#include <linux/debugfs.h>
#include <linux/proc_fs.h>
-#include <misc/cxl-base.h>
#include <asm/pgalloc.h>
#include <asm/tlb.h>
diff --git a/arch/powerpc/mm/book3s64/slice.c b/arch/powerpc/mm/book3s64/slice.c
index bc9a39821d1c..28bec5bc7879 100644
--- a/arch/powerpc/mm/book3s64/slice.c
+++ b/arch/powerpc/mm/book3s64/slice.c
@@ -22,7 +22,7 @@
#include <linux/security.h>
#include <asm/mman.h>
#include <asm/mmu.h>
-#include <asm/copro.h>
+#include <asm/spu.h>
#include <asm/hugetlb.h>
#include <asm/mmu_context.h>
@@ -248,7 +248,9 @@ static void slice_convert(struct mm_struct *mm,
spin_unlock_irqrestore(&slice_convert_lock, flags);
- copro_flush_all_slbs(mm);
+#ifdef CONFIG_SPU_BASE
+ spu_flush_all_slbs(mm);
+#endif
}
/*
diff --git a/arch/powerpc/mm/copro_fault.c b/arch/powerpc/mm/copro_fault.c
index f49fd873df8d..f5f8692e2c69 100644
--- a/arch/powerpc/mm/copro_fault.c
+++ b/arch/powerpc/mm/copro_fault.c
@@ -12,8 +12,6 @@
#include <linux/export.h>
#include <asm/reg.h>
#include <asm/copro.h>
-#include <asm/spu.h>
-#include <misc/cxl-base.h>
/*
* This ought to be kept in sync with the powerpc specific do_page_fault
@@ -135,13 +133,4 @@ int copro_calculate_slb(struct mm_struct *mm, u64 ea, struct copro_slb *slb)
return 0;
}
EXPORT_SYMBOL_GPL(copro_calculate_slb);
-
-void copro_flush_all_slbs(struct mm_struct *mm)
-{
-#ifdef CONFIG_SPU_BASE
- spu_flush_all_slbs(mm);
-#endif
- cxl_slbia(mm);
-}
-EXPORT_SYMBOL_GPL(copro_flush_all_slbs);
#endif
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 6b043180220a..d3c1b749dcfc 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -113,6 +113,7 @@ static int __init pseries_alloc_bootmem_huge_page(struct hstate *hstate)
gpage_freearray[nr_gpages] = 0;
list_add(&m->list, &huge_boot_pages[0]);
m->hstate = hstate;
+ m->flags = 0;
return 1;
}
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index d96bbc001e73..b6f3ae03ca9e 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -41,6 +41,7 @@
#include <linux/libfdt.h>
#include <linux/memremap.h>
#include <linux/memory.h>
+#include <linux/bootmem_info.h>
#include <asm/pgalloc.h>
#include <asm/page.h>
@@ -386,10 +387,13 @@ void __ref vmemmap_free(unsigned long start, unsigned long end,
}
#endif
+
+#ifdef CONFIG_HAVE_BOOTMEM_INFO_NODE
void register_page_bootmem_memmap(unsigned long section_nr,
struct page *start_page, unsigned long size)
{
}
+#endif /* CONFIG_HAVE_BOOTMEM_INFO_NODE */
#endif /* CONFIG_SPARSEMEM_VMEMMAP */
diff --git a/arch/powerpc/mm/ioremap.c b/arch/powerpc/mm/ioremap.c
index 70b08bf3dd1f..4b4feba9873b 100644
--- a/arch/powerpc/mm/ioremap.c
+++ b/arch/powerpc/mm/ioremap.c
@@ -34,9 +34,9 @@ void __iomem *ioremap_coherent(phys_addr_t addr, unsigned long size)
return __ioremap_caller(addr, size, prot, caller);
}
-void __iomem *ioremap_prot(phys_addr_t addr, size_t size, unsigned long flags)
+void __iomem *ioremap_prot(phys_addr_t addr, size_t size, pgprot_t prot)
{
- pte_t pte = __pte(flags);
+ pte_t pte = __pte(pgprot_val(prot));
void *caller = __builtin_return_address(0);
/* writeable implies dirty for kernel addresses */
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 34806c858e54..3ddbfdbfa941 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -273,7 +273,7 @@ void __init paging_init(void)
mark_nonram_nosave();
}
-void __init mem_init(void)
+void __init arch_mm_preinit(void)
{
/*
* book3s is limited to 16 page sizes due to encoding this in
@@ -295,22 +295,6 @@ void __init mem_init(void)
kasan_late_init();
- memblock_free_all();
-
-#ifdef CONFIG_HIGHMEM
- {
- unsigned long pfn, highmem_mapnr;
-
- highmem_mapnr = lowmem_end_addr >> PAGE_SHIFT;
- for (pfn = highmem_mapnr; pfn < max_mapnr; ++pfn) {
- phys_addr_t paddr = (phys_addr_t)pfn << PAGE_SHIFT;
- struct page *page = pfn_to_page(pfn);
- if (memblock_is_memory(paddr) && !memblock_is_reserved(paddr))
- free_highmem_page(page);
- }
- }
-#endif /* CONFIG_HIGHMEM */
-
#if defined(CONFIG_PPC_E500) && !defined(CONFIG_SMP)
/*
* If smp is enabled, next_tlbcam_idx is initialized in the cpu up
@@ -354,7 +338,7 @@ static int __init add_system_ram_resources(void)
*/
res->end = end - 1;
res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
- WARN_ON(request_resource(&iomem_resource, res) < 0);
+ WARN_ON(insert_resource(&iomem_resource, res) < 0);
}
}
diff --git a/arch/powerpc/perf/hv-24x7.c b/arch/powerpc/perf/hv-24x7.c
index d400fa391c27..b0768f3d2893 100644
--- a/arch/powerpc/perf/hv-24x7.c
+++ b/arch/powerpc/perf/hv-24x7.c
@@ -998,7 +998,7 @@ e_out:
}
static ssize_t catalog_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t offset, size_t count)
{
long hret;
@@ -1108,14 +1108,14 @@ PAGE_0_ATTR(catalog_version, "%lld\n",
(unsigned long long)be64_to_cpu(page_0->version));
PAGE_0_ATTR(catalog_len, "%lld\n",
(unsigned long long)be32_to_cpu(page_0->length) * 4096);
-static BIN_ATTR_RO(catalog, 0/* real length varies */);
+static const BIN_ATTR_RO(catalog, 0/* real length varies */);
static DEVICE_ATTR_RO(domains);
static DEVICE_ATTR_RO(sockets);
static DEVICE_ATTR_RO(chipspersocket);
static DEVICE_ATTR_RO(coresperchip);
static DEVICE_ATTR_RO(cpumask);
-static struct bin_attribute *if_bin_attrs[] = {
+static const struct bin_attribute *const if_bin_attrs[] = {
&bin_attr_catalog,
NULL,
};
@@ -1141,7 +1141,7 @@ static struct attribute *if_attrs[] = {
static const struct attribute_group if_group = {
.name = "interface",
- .bin_attrs = if_bin_attrs,
+ .bin_attrs_new = if_bin_attrs,
.attrs = if_attrs,
};
diff --git a/arch/powerpc/platforms/cell/spufs/gang.c b/arch/powerpc/platforms/cell/spufs/gang.c
index 827d338deaf4..2c2999de6bfa 100644
--- a/arch/powerpc/platforms/cell/spufs/gang.c
+++ b/arch/powerpc/platforms/cell/spufs/gang.c
@@ -25,6 +25,7 @@ struct spu_gang *alloc_spu_gang(void)
mutex_init(&gang->aff_mutex);
INIT_LIST_HEAD(&gang->list);
INIT_LIST_HEAD(&gang->aff_list_head);
+ gang->alive = 1;
out:
return gang;
diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c
index 70236d1df3d3..9f9e4b871627 100644
--- a/arch/powerpc/platforms/cell/spufs/inode.c
+++ b/arch/powerpc/platforms/cell/spufs/inode.c
@@ -192,13 +192,32 @@ static int spufs_fill_dir(struct dentry *dir,
return -ENOMEM;
ret = spufs_new_file(dir->d_sb, dentry, files->ops,
files->mode & mode, files->size, ctx);
- if (ret)
+ if (ret) {
+ dput(dentry);
return ret;
+ }
files++;
}
return 0;
}
+static void unuse_gang(struct dentry *dir)
+{
+ struct inode *inode = dir->d_inode;
+ struct spu_gang *gang = SPUFS_I(inode)->i_gang;
+
+ if (gang) {
+ bool dead;
+
+ inode_lock(inode); // exclusion with spufs_create_context()
+ dead = !--gang->alive;
+ inode_unlock(inode);
+
+ if (dead)
+ simple_recursive_removal(dir, NULL);
+ }
+}
+
static int spufs_dir_close(struct inode *inode, struct file *file)
{
struct inode *parent;
@@ -213,6 +232,7 @@ static int spufs_dir_close(struct inode *inode, struct file *file)
inode_unlock(parent);
WARN_ON(ret);
+ unuse_gang(dir->d_parent);
return dcache_dir_close(inode, file);
}
@@ -405,7 +425,7 @@ spufs_create_context(struct inode *inode, struct dentry *dentry,
{
int ret;
int affinity;
- struct spu_gang *gang;
+ struct spu_gang *gang = SPUFS_I(inode)->i_gang;
struct spu_context *neighbor;
struct path path = {.mnt = mnt, .dentry = dentry};
@@ -420,11 +440,15 @@ spufs_create_context(struct inode *inode, struct dentry *dentry,
if ((flags & SPU_CREATE_ISOLATE) && !isolated_loader)
return -ENODEV;
- gang = NULL;
+ if (gang) {
+ if (!gang->alive)
+ return -ENOENT;
+ gang->alive++;
+ }
+
neighbor = NULL;
affinity = flags & (SPU_CREATE_AFFINITY_MEM | SPU_CREATE_AFFINITY_SPU);
if (affinity) {
- gang = SPUFS_I(inode)->i_gang;
if (!gang)
return -EINVAL;
mutex_lock(&gang->aff_mutex);
@@ -436,8 +460,11 @@ spufs_create_context(struct inode *inode, struct dentry *dentry,
}
ret = spufs_mkdir(inode, dentry, flags, mode & 0777);
- if (ret)
+ if (ret) {
+ if (neighbor)
+ put_spu_context(neighbor);
goto out_aff_unlock;
+ }
if (affinity) {
spufs_set_affinity(flags, SPUFS_I(d_inode(dentry))->i_ctx,
@@ -453,6 +480,8 @@ spufs_create_context(struct inode *inode, struct dentry *dentry,
out_aff_unlock:
if (affinity)
mutex_unlock(&gang->aff_mutex);
+ if (ret && gang)
+ gang->alive--; // can't reach 0
return ret;
}
@@ -482,6 +511,7 @@ spufs_mkgang(struct inode *dir, struct dentry *dentry, umode_t mode)
inode->i_fop = &simple_dir_operations;
d_instantiate(dentry, inode);
+ dget(dentry);
inc_nlink(dir);
inc_nlink(d_inode(dentry));
return ret;
@@ -492,6 +522,21 @@ out:
return ret;
}
+static int spufs_gang_close(struct inode *inode, struct file *file)
+{
+ unuse_gang(file->f_path.dentry);
+ return dcache_dir_close(inode, file);
+}
+
+static const struct file_operations spufs_gang_fops = {
+ .open = dcache_dir_open,
+ .release = spufs_gang_close,
+ .llseek = dcache_dir_lseek,
+ .read = generic_read_dir,
+ .iterate_shared = dcache_readdir,
+ .fsync = noop_fsync,
+};
+
static int spufs_gang_open(const struct path *path)
{
int ret;
@@ -511,7 +556,7 @@ static int spufs_gang_open(const struct path *path)
return PTR_ERR(filp);
}
- filp->f_op = &simple_dir_operations;
+ filp->f_op = &spufs_gang_fops;
fd_install(ret, filp);
return ret;
}
@@ -526,10 +571,8 @@ static int spufs_create_gang(struct inode *inode,
ret = spufs_mkgang(inode, dentry, mode & 0777);
if (!ret) {
ret = spufs_gang_open(&path);
- if (ret < 0) {
- int err = simple_rmdir(inode, dentry);
- WARN_ON(err);
- }
+ if (ret < 0)
+ unuse_gang(dentry);
}
return ret;
}
diff --git a/arch/powerpc/platforms/cell/spufs/spufs.h b/arch/powerpc/platforms/cell/spufs/spufs.h
index 84958487f696..d33787c57c39 100644
--- a/arch/powerpc/platforms/cell/spufs/spufs.h
+++ b/arch/powerpc/platforms/cell/spufs/spufs.h
@@ -151,6 +151,8 @@ struct spu_gang {
int aff_flags;
struct spu *aff_ref_spu;
atomic_t aff_sched_count;
+
+ int alive;
};
/* Flag bits for spu_gang aff_flags */
diff --git a/arch/powerpc/platforms/powernv/Makefile b/arch/powerpc/platforms/powernv/Makefile
index 19f0fc5c6f1b..9e5d0c847ee2 100644
--- a/arch/powerpc/platforms/powernv/Makefile
+++ b/arch/powerpc/platforms/powernv/Makefile
@@ -21,7 +21,6 @@ obj-$(CONFIG_PRESERVE_FA_DUMP) += opal-fadump.o
obj-$(CONFIG_OPAL_CORE) += opal-core.o
obj-$(CONFIG_PCI) += pci.o pci-ioda.o pci-ioda-tce.o
obj-$(CONFIG_PCI_IOV) += pci-sriov.o
-obj-$(CONFIG_CXL_BASE) += pci-cxl.o
obj-$(CONFIG_EEH) += eeh-powernv.o
obj-$(CONFIG_MEMORY_FAILURE) += opal-memory-errors.o
obj-$(CONFIG_OPAL_PRD) += opal-prd.o
diff --git a/arch/powerpc/platforms/powernv/opal-core.c b/arch/powerpc/platforms/powernv/opal-core.c
index a379ff86c120..e652da8f986f 100644
--- a/arch/powerpc/platforms/powernv/opal-core.c
+++ b/arch/powerpc/platforms/powernv/opal-core.c
@@ -159,7 +159,7 @@ static Elf64_Word *__init auxv_to_elf64_notes(Elf64_Word *buf,
* Returns number of bytes read on success, -errno on failure.
*/
static ssize_t read_opalcore(struct file *file, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *to,
+ const struct bin_attribute *bin_attr, char *to,
loff_t pos, size_t count)
{
struct opalcore *m;
@@ -206,9 +206,9 @@ static ssize_t read_opalcore(struct file *file, struct kobject *kobj,
return (tpos - pos);
}
-static struct bin_attribute opal_core_attr = {
+static struct bin_attribute opal_core_attr __ro_after_init = {
.attr = {.name = "core", .mode = 0400},
- .read = read_opalcore
+ .read_new = read_opalcore
};
/*
@@ -599,7 +599,7 @@ static struct attribute *mpipl_attr[] = {
NULL,
};
-static struct bin_attribute *mpipl_bin_attr[] = {
+static const struct bin_attribute *const mpipl_bin_attr[] = {
&opal_core_attr,
NULL,
@@ -607,7 +607,7 @@ static struct bin_attribute *mpipl_bin_attr[] = {
static const struct attribute_group mpipl_group = {
.attrs = mpipl_attr,
- .bin_attrs = mpipl_bin_attr,
+ .bin_attrs_new = mpipl_bin_attr,
};
static int __init opalcore_init(void)
diff --git a/arch/powerpc/platforms/powernv/opal-dump.c b/arch/powerpc/platforms/powernv/opal-dump.c
index 608e4b68c5ea..27e25693cf39 100644
--- a/arch/powerpc/platforms/powernv/opal-dump.c
+++ b/arch/powerpc/platforms/powernv/opal-dump.c
@@ -286,7 +286,7 @@ out:
}
static ssize_t dump_attr_read(struct file *filep, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buffer, loff_t pos, size_t count)
{
ssize_t rc;
@@ -342,7 +342,7 @@ static void create_dump_obj(uint32_t id, size_t size, uint32_t type)
dump->dump_attr.attr.name = "dump";
dump->dump_attr.attr.mode = 0400;
dump->dump_attr.size = size;
- dump->dump_attr.read = dump_attr_read;
+ dump->dump_attr.read_new = dump_attr_read;
dump->id = id;
dump->size = size;
diff --git a/arch/powerpc/platforms/powernv/opal-elog.c b/arch/powerpc/platforms/powernv/opal-elog.c
index 5db1e733143b..de33f354e9fd 100644
--- a/arch/powerpc/platforms/powernv/opal-elog.c
+++ b/arch/powerpc/platforms/powernv/opal-elog.c
@@ -156,7 +156,7 @@ static const struct kobj_type elog_ktype = {
#define OPAL_MAX_ERRLOG_SIZE 16384
static ssize_t raw_attr_read(struct file *filep, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buffer, loff_t pos, size_t count)
{
int opal_rc;
@@ -203,7 +203,7 @@ static void create_elog_obj(uint64_t id, size_t size, uint64_t type)
elog->raw_attr.attr.name = "raw";
elog->raw_attr.attr.mode = 0400;
elog->raw_attr.size = size;
- elog->raw_attr.read = raw_attr_read;
+ elog->raw_attr.read_new = raw_attr_read;
elog->id = id;
elog->size = size;
diff --git a/arch/powerpc/platforms/powernv/opal-flash.c b/arch/powerpc/platforms/powernv/opal-flash.c
index d5ea04e8e4c5..fd8c8621e973 100644
--- a/arch/powerpc/platforms/powernv/opal-flash.c
+++ b/arch/powerpc/platforms/powernv/opal-flash.c
@@ -432,7 +432,7 @@ static int alloc_image_buf(char *buffer, size_t count)
* and pre-allocate required memory.
*/
static ssize_t image_data_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buffer, loff_t pos, size_t count)
{
int rc;
@@ -493,7 +493,7 @@ out:
static const struct bin_attribute image_data_attr = {
.attr = {.name = "image", .mode = 0200},
.size = MAX_IMAGE_SIZE, /* Limit image size */
- .write = image_data_write,
+ .write_new = image_data_write,
};
static struct kobj_attribute validate_attribute =
diff --git a/arch/powerpc/platforms/powernv/opal-msglog.c b/arch/powerpc/platforms/powernv/opal-msglog.c
index 22d6efe17b0d..f1988d0ab45c 100644
--- a/arch/powerpc/platforms/powernv/opal-msglog.c
+++ b/arch/powerpc/platforms/powernv/opal-msglog.c
@@ -94,15 +94,15 @@ ssize_t opal_msglog_copy(char *to, loff_t pos, size_t count)
}
static ssize_t opal_msglog_read(struct file *file, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *to,
+ const struct bin_attribute *bin_attr, char *to,
loff_t pos, size_t count)
{
return opal_msglog_copy(to, pos, count);
}
-static struct bin_attribute opal_msglog_attr = {
+static struct bin_attribute opal_msglog_attr __ro_after_init = {
.attr = {.name = "msglog", .mode = 0400},
- .read = opal_msglog_read
+ .read_new = opal_msglog_read
};
struct memcons *__init memcons_init(struct device_node *node, const char *mc_prop_name)
diff --git a/arch/powerpc/platforms/powernv/pci-cxl.c b/arch/powerpc/platforms/powernv/pci-cxl.c
deleted file mode 100644
index 7e419de71db8..000000000000
--- a/arch/powerpc/platforms/powernv/pci-cxl.c
+++ /dev/null
@@ -1,153 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright 2014-2016 IBM Corp.
- */
-
-#include <linux/module.h>
-#include <misc/cxl-base.h>
-#include <asm/pnv-pci.h>
-#include <asm/opal.h>
-
-#include "pci.h"
-
-int pnv_phb_to_cxl_mode(struct pci_dev *dev, uint64_t mode)
-{
- struct pci_controller *hose = pci_bus_to_host(dev->bus);
- struct pnv_phb *phb = hose->private_data;
- struct pnv_ioda_pe *pe;
- int rc;
-
- pe = pnv_ioda_get_pe(dev);
- if (!pe)
- return -ENODEV;
-
- pe_info(pe, "Switching PHB to CXL\n");
-
- rc = opal_pci_set_phb_cxl_mode(phb->opal_id, mode, pe->pe_number);
- if (rc == OPAL_UNSUPPORTED)
- dev_err(&dev->dev, "Required cxl mode not supported by firmware - update skiboot\n");
- else if (rc)
- dev_err(&dev->dev, "opal_pci_set_phb_cxl_mode failed: %i\n", rc);
-
- return rc;
-}
-EXPORT_SYMBOL(pnv_phb_to_cxl_mode);
-
-/* Find PHB for cxl dev and allocate MSI hwirqs?
- * Returns the absolute hardware IRQ number
- */
-int pnv_cxl_alloc_hwirqs(struct pci_dev *dev, int num)
-{
- struct pci_controller *hose = pci_bus_to_host(dev->bus);
- struct pnv_phb *phb = hose->private_data;
- int hwirq = msi_bitmap_alloc_hwirqs(&phb->msi_bmp, num);
-
- if (hwirq < 0) {
- dev_warn(&dev->dev, "Failed to find a free MSI\n");
- return -ENOSPC;
- }
-
- return phb->msi_base + hwirq;
-}
-EXPORT_SYMBOL(pnv_cxl_alloc_hwirqs);
-
-void pnv_cxl_release_hwirqs(struct pci_dev *dev, int hwirq, int num)
-{
- struct pci_controller *hose = pci_bus_to_host(dev->bus);
- struct pnv_phb *phb = hose->private_data;
-
- msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq - phb->msi_base, num);
-}
-EXPORT_SYMBOL(pnv_cxl_release_hwirqs);
-
-void pnv_cxl_release_hwirq_ranges(struct cxl_irq_ranges *irqs,
- struct pci_dev *dev)
-{
- struct pci_controller *hose = pci_bus_to_host(dev->bus);
- struct pnv_phb *phb = hose->private_data;
- int i, hwirq;
-
- for (i = 1; i < CXL_IRQ_RANGES; i++) {
- if (!irqs->range[i])
- continue;
- pr_devel("cxl release irq range 0x%x: offset: 0x%lx limit: %ld\n",
- i, irqs->offset[i],
- irqs->range[i]);
- hwirq = irqs->offset[i] - phb->msi_base;
- msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq,
- irqs->range[i]);
- }
-}
-EXPORT_SYMBOL(pnv_cxl_release_hwirq_ranges);
-
-int pnv_cxl_alloc_hwirq_ranges(struct cxl_irq_ranges *irqs,
- struct pci_dev *dev, int num)
-{
- struct pci_controller *hose = pci_bus_to_host(dev->bus);
- struct pnv_phb *phb = hose->private_data;
- int i, hwirq, try;
-
- memset(irqs, 0, sizeof(struct cxl_irq_ranges));
-
- /* 0 is reserved for the multiplexed PSL DSI interrupt */
- for (i = 1; i < CXL_IRQ_RANGES && num; i++) {
- try = num;
- while (try) {
- hwirq = msi_bitmap_alloc_hwirqs(&phb->msi_bmp, try);
- if (hwirq >= 0)
- break;
- try /= 2;
- }
- if (!try)
- goto fail;
-
- irqs->offset[i] = phb->msi_base + hwirq;
- irqs->range[i] = try;
- pr_devel("cxl alloc irq range 0x%x: offset: 0x%lx limit: %li\n",
- i, irqs->offset[i], irqs->range[i]);
- num -= try;
- }
- if (num)
- goto fail;
-
- return 0;
-fail:
- pnv_cxl_release_hwirq_ranges(irqs, dev);
- return -ENOSPC;
-}
-EXPORT_SYMBOL(pnv_cxl_alloc_hwirq_ranges);
-
-int pnv_cxl_get_irq_count(struct pci_dev *dev)
-{
- struct pci_controller *hose = pci_bus_to_host(dev->bus);
- struct pnv_phb *phb = hose->private_data;
-
- return phb->msi_bmp.irq_count;
-}
-EXPORT_SYMBOL(pnv_cxl_get_irq_count);
-
-int pnv_cxl_ioda_msi_setup(struct pci_dev *dev, unsigned int hwirq,
- unsigned int virq)
-{
- struct pci_controller *hose = pci_bus_to_host(dev->bus);
- struct pnv_phb *phb = hose->private_data;
- unsigned int xive_num = hwirq - phb->msi_base;
- struct pnv_ioda_pe *pe;
- int rc;
-
- if (!(pe = pnv_ioda_get_pe(dev)))
- return -ENODEV;
-
- /* Assign XIVE to PE */
- rc = opal_pci_set_xive_pe(phb->opal_id, pe->pe_number, xive_num);
- if (rc) {
- pe_warn(pe, "%s: OPAL error %d setting msi_base 0x%x "
- "hwirq 0x%x XIVE 0x%x PE\n",
- pci_name(dev), rc, phb->msi_base, hwirq, xive_num);
- return -EIO;
- }
- pnv_set_msi_irq_chip(phb, virq);
-
- return 0;
-}
-EXPORT_SYMBOL(pnv_cxl_ioda_msi_setup);
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index b0a14e48175c..d2a8e0287811 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -39,8 +39,6 @@
#include <asm/mmzone.h>
#include <asm/xive.h>
-#include <misc/cxl-base.h>
-
#include "powernv.h"
#include "pci.h"
#include "../../../../drivers/pci/pci.h"
@@ -1636,47 +1634,6 @@ int64_t pnv_opal_pci_msi_eoi(struct irq_data *d)
return opal_pci_msi_eoi(phb->opal_id, d->parent_data->hwirq);
}
-/*
- * The IRQ data is mapped in the XICS domain, with OPAL HW IRQ numbers
- */
-static void pnv_ioda2_msi_eoi(struct irq_data *d)
-{
- int64_t rc;
- unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
- struct pci_controller *hose = irq_data_get_irq_chip_data(d);
- struct pnv_phb *phb = hose->private_data;
-
- rc = opal_pci_msi_eoi(phb->opal_id, hw_irq);
- WARN_ON_ONCE(rc);
-
- icp_native_eoi(d);
-}
-
-/* P8/CXL only */
-void pnv_set_msi_irq_chip(struct pnv_phb *phb, unsigned int virq)
-{
- struct irq_data *idata;
- struct irq_chip *ichip;
-
- /* The MSI EOI OPAL call is only needed on PHB3 */
- if (phb->model != PNV_PHB_MODEL_PHB3)
- return;
-
- if (!phb->ioda.irq_chip_init) {
- /*
- * First time we setup an MSI IRQ, we need to setup the
- * corresponding IRQ chip to route correctly.
- */
- idata = irq_get_irq_data(virq);
- ichip = irq_data_get_irq_chip(idata);
- phb->ioda.irq_chip_init = 1;
- phb->ioda.irq_chip = *ichip;
- phb->ioda.irq_chip.irq_eoi = pnv_ioda2_msi_eoi;
- }
- irq_set_chip(virq, &phb->ioda.irq_chip);
- irq_set_chip_data(virq, phb->hose);
-}
-
static struct irq_chip pnv_pci_msi_irq_chip;
/*
diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c
index 35f566aa0424..b2c1da025410 100644
--- a/arch/powerpc/platforms/powernv/pci.c
+++ b/arch/powerpc/platforms/powernv/pci.c
@@ -14,7 +14,6 @@
#include <linux/io.h>
#include <linux/msi.h>
#include <linux/iommu.h>
-#include <linux/sched/mm.h>
#include <asm/sections.h>
#include <asm/io.h>
@@ -33,8 +32,6 @@
#include "powernv.h"
#include "pci.h"
-static DEFINE_MUTEX(tunnel_mutex);
-
int pnv_pci_get_slot_id(struct device_node *np, uint64_t *id)
{
struct device_node *node = np;
@@ -744,64 +741,6 @@ struct iommu_table *pnv_pci_table_alloc(int nid)
return tbl;
}
-struct device_node *pnv_pci_get_phb_node(struct pci_dev *dev)
-{
- struct pci_controller *hose = pci_bus_to_host(dev->bus);
-
- return of_node_get(hose->dn);
-}
-EXPORT_SYMBOL(pnv_pci_get_phb_node);
-
-int pnv_pci_set_tunnel_bar(struct pci_dev *dev, u64 addr, int enable)
-{
- struct pnv_phb *phb = pci_bus_to_pnvhb(dev->bus);
- u64 tunnel_bar;
- __be64 val;
- int rc;
-
- if (!opal_check_token(OPAL_PCI_GET_PBCQ_TUNNEL_BAR))
- return -ENXIO;
- if (!opal_check_token(OPAL_PCI_SET_PBCQ_TUNNEL_BAR))
- return -ENXIO;
-
- mutex_lock(&tunnel_mutex);
- rc = opal_pci_get_pbcq_tunnel_bar(phb->opal_id, &val);
- if (rc != OPAL_SUCCESS) {
- rc = -EIO;
- goto out;
- }
- tunnel_bar = be64_to_cpu(val);
- if (enable) {
- /*
- * Only one device per PHB can use atomics.
- * Our policy is first-come, first-served.
- */
- if (tunnel_bar) {
- if (tunnel_bar != addr)
- rc = -EBUSY;
- else
- rc = 0; /* Setting same address twice is ok */
- goto out;
- }
- } else {
- /*
- * The device that owns atomics and wants to release
- * them must pass the same address with enable == 0.
- */
- if (tunnel_bar != addr) {
- rc = -EPERM;
- goto out;
- }
- addr = 0x0ULL;
- }
- rc = opal_pci_set_pbcq_tunnel_bar(phb->opal_id, addr);
- rc = opal_error_code(rc);
-out:
- mutex_unlock(&tunnel_mutex);
- return rc;
-}
-EXPORT_SYMBOL_GPL(pnv_pci_set_tunnel_bar);
-
void pnv_pci_shutdown(void)
{
struct pci_controller *hose;
diff --git a/arch/powerpc/platforms/powernv/pci.h b/arch/powerpc/platforms/powernv/pci.h
index 93fba1f8661f..42075501663b 100644
--- a/arch/powerpc/platforms/powernv/pci.h
+++ b/arch/powerpc/platforms/powernv/pci.h
@@ -163,7 +163,6 @@ struct pnv_phb {
unsigned int *io_segmap;
/* IRQ chip */
- int irq_chip_init;
struct irq_chip irq_chip;
/* Sorted list of used PE's based
@@ -281,7 +280,6 @@ extern int pnv_eeh_phb_reset(struct pci_controller *hose, int option);
extern struct pnv_ioda_pe *pnv_pci_bdfn_to_pe(struct pnv_phb *phb, u16 bdfn);
extern struct pnv_ioda_pe *pnv_ioda_get_pe(struct pci_dev *dev);
-extern void pnv_set_msi_irq_chip(struct pnv_phb *phb, unsigned int virq);
extern unsigned long pnv_pci_ioda2_get_table_size(__u32 page_shift,
__u64 window_size, __u32 levels);
extern int pnv_eeh_post_init(void);
diff --git a/arch/powerpc/platforms/powernv/ultravisor.c b/arch/powerpc/platforms/powernv/ultravisor.c
index 67c8c4b2d8b1..157d9a8134e4 100644
--- a/arch/powerpc/platforms/powernv/ultravisor.c
+++ b/arch/powerpc/platforms/powernv/ultravisor.c
@@ -32,15 +32,15 @@ int __init early_init_dt_scan_ultravisor(unsigned long node, const char *uname,
static struct memcons *uv_memcons;
static ssize_t uv_msglog_read(struct file *file, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *to,
+ const struct bin_attribute *bin_attr, char *to,
loff_t pos, size_t count)
{
return memcons_copy(uv_memcons, to, pos, count);
}
-static struct bin_attribute uv_msglog_attr = {
+static struct bin_attribute uv_msglog_attr __ro_after_init = {
.attr = {.name = "msglog", .mode = 0400},
- .read = uv_msglog_read
+ .read_new = uv_msglog_read
};
static int __init uv_init(void)
diff --git a/arch/powerpc/platforms/ps3/spu.c b/arch/powerpc/platforms/ps3/spu.c
index 4a2520ec6d7f..61b37c9400b2 100644
--- a/arch/powerpc/platforms/ps3/spu.c
+++ b/arch/powerpc/platforms/ps3/spu.c
@@ -190,10 +190,10 @@ static void spu_unmap(struct spu *spu)
static int __init setup_areas(struct spu *spu)
{
struct table {char* name; unsigned long addr; unsigned long size;};
- unsigned long shadow_flags = pgprot_val(pgprot_noncached_wc(PAGE_KERNEL_RO));
spu_pdata(spu)->shadow = ioremap_prot(spu_pdata(spu)->shadow_addr,
- sizeof(struct spe_shadow), shadow_flags);
+ sizeof(struct spe_shadow),
+ pgprot_noncached_wc(PAGE_KERNEL_RO));
if (!spu_pdata(spu)->shadow) {
pr_debug("%s:%d: ioremap shadow failed\n", __func__, __LINE__);
goto fail_ioremap;
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
index 10116f68569d..e776fb35667e 100644
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
@@ -45,6 +45,7 @@ config RISCV
select ARCH_HAS_PMEM_API
select ARCH_HAS_PREEMPT_LAZY
select ARCH_HAS_PREPARE_SYNC_CORE_CMD
+ select ARCH_HAS_PTDUMP if MMU
select ARCH_HAS_PTE_DEVMAP if 64BIT && MMU
select ARCH_HAS_PTE_SPECIAL
select ARCH_HAS_SET_DIRECT_MAP if MMU
@@ -115,7 +116,6 @@ config RISCV
select GENERIC_LIB_DEVMEM_IS_ALLOWED
select GENERIC_PENDING_IRQ if SMP
select GENERIC_PCI_IOMAP
- select GENERIC_PTDUMP if MMU
select GENERIC_SCHED_CLOCK
select GENERIC_SMP_IDLE_THREAD
select GENERIC_TIME_VSYSCALL if MMU && 64BIT
diff --git a/arch/riscv/include/asm/io.h b/arch/riscv/include/asm/io.h
index 0257f4aa7ff4..a0e51840b9db 100644
--- a/arch/riscv/include/asm/io.h
+++ b/arch/riscv/include/asm/io.h
@@ -137,7 +137,7 @@ __io_writes_outs(outs, u64, q, __io_pbr(), __io_paw())
#ifdef CONFIG_MMU
#define arch_memremap_wb(addr, size, flags) \
- ((__force void *)ioremap_prot((addr), (size), _PAGE_KERNEL))
+ ((__force void *)ioremap_prot((addr), (size), __pgprot(_PAGE_KERNEL)))
#endif
#endif /* _ASM_RISCV_IO_H */
diff --git a/arch/riscv/include/asm/pgalloc.h b/arch/riscv/include/asm/pgalloc.h
index 3e2aebea6312..770ce18a7328 100644
--- a/arch/riscv/include/asm/pgalloc.h
+++ b/arch/riscv/include/asm/pgalloc.h
@@ -15,24 +15,6 @@
#define __HAVE_ARCH_PUD_FREE
#include <asm-generic/pgalloc.h>
-/*
- * While riscv platforms with riscv_ipi_for_rfence as true require an IPI to
- * perform TLB shootdown, some platforms with riscv_ipi_for_rfence as false use
- * SBI to perform TLB shootdown. To keep software pagetable walkers safe in this
- * case we switch to RCU based table free (MMU_GATHER_RCU_TABLE_FREE). See the
- * comment below 'ifdef CONFIG_MMU_GATHER_RCU_TABLE_FREE' in include/asm-generic/tlb.h
- * for more details.
- */
-static inline void riscv_tlb_remove_ptdesc(struct mmu_gather *tlb, void *pt)
-{
- if (riscv_use_sbi_for_rfence()) {
- tlb_remove_ptdesc(tlb, pt);
- } else {
- pagetable_dtor(pt);
- tlb_remove_page_ptdesc(tlb, pt);
- }
-}
-
static inline void pmd_populate_kernel(struct mm_struct *mm,
pmd_t *pmd, pte_t *pte)
{
@@ -108,14 +90,14 @@ static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
unsigned long addr)
{
if (pgtable_l4_enabled)
- riscv_tlb_remove_ptdesc(tlb, virt_to_ptdesc(pud));
+ tlb_remove_ptdesc(tlb, virt_to_ptdesc(pud));
}
static inline void __p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d,
unsigned long addr)
{
if (pgtable_l5_enabled)
- riscv_tlb_remove_ptdesc(tlb, virt_to_ptdesc(p4d));
+ tlb_remove_ptdesc(tlb, virt_to_ptdesc(p4d));
}
#endif /* __PAGETABLE_PMD_FOLDED */
@@ -143,7 +125,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
unsigned long addr)
{
- riscv_tlb_remove_ptdesc(tlb, virt_to_ptdesc(pmd));
+ tlb_remove_ptdesc(tlb, virt_to_ptdesc(pmd));
}
#endif /* __PAGETABLE_PMD_FOLDED */
@@ -151,7 +133,7 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
unsigned long addr)
{
- riscv_tlb_remove_ptdesc(tlb, page_ptdesc(pte));
+ tlb_remove_ptdesc(tlb, page_ptdesc(pte));
}
#endif /* CONFIG_MMU */
diff --git a/arch/riscv/include/asm/tlbflush.h b/arch/riscv/include/asm/tlbflush.h
index 72e559934952..ce0dd0fed764 100644
--- a/arch/riscv/include/asm/tlbflush.h
+++ b/arch/riscv/include/asm/tlbflush.h
@@ -60,8 +60,7 @@ void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
bool arch_tlbbatch_should_defer(struct mm_struct *mm);
void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch,
- struct mm_struct *mm,
- unsigned long uaddr);
+ struct mm_struct *mm, unsigned long start, unsigned long end);
void arch_flush_tlb_batched_pending(struct mm_struct *mm);
void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch);
diff --git a/arch/riscv/kernel/acpi.c b/arch/riscv/kernel/acpi.c
index 2fd29695a788..3f6d5a6789e8 100644
--- a/arch/riscv/kernel/acpi.c
+++ b/arch/riscv/kernel/acpi.c
@@ -305,7 +305,7 @@ void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size)
}
}
- return ioremap_prot(phys, size, pgprot_val(prot));
+ return ioremap_prot(phys, size, prot);
}
#ifdef CONFIG_PCI
diff --git a/arch/riscv/mm/Makefile b/arch/riscv/mm/Makefile
index cbe4d775ef56..b916a68d324a 100644
--- a/arch/riscv/mm/Makefile
+++ b/arch/riscv/mm/Makefile
@@ -19,7 +19,7 @@ obj-y += context.o
obj-y += pmem.o
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
-obj-$(CONFIG_PTDUMP_CORE) += ptdump.o
+obj-$(CONFIG_PTDUMP) += ptdump.o
obj-$(CONFIG_KASAN) += kasan_init.o
ifdef CONFIG_KASAN
diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
index 15b2eda4c364..66ee5ee42aa8 100644
--- a/arch/riscv/mm/init.c
+++ b/arch/riscv/mm/init.c
@@ -171,7 +171,7 @@ static void __init print_vm_layout(void)
static void print_vm_layout(void) { }
#endif /* CONFIG_DEBUG_VM */
-void __init mem_init(void)
+void __init arch_mm_preinit(void)
{
bool swiotlb = max_pfn > PFN_DOWN(dma32_phys_limit);
#ifdef CONFIG_FLATMEM
@@ -192,7 +192,6 @@ void __init mem_init(void)
}
swiotlb_init(swiotlb, SWIOTLB_VERBOSE);
- memblock_free_all();
print_vm_layout();
}
@@ -295,10 +294,8 @@ static void __init setup_bootmem(void)
phys_ram_end = memblock_end_of_DRAM();
min_low_pfn = PFN_UP(phys_ram_base);
max_low_pfn = max_pfn = PFN_DOWN(phys_ram_end);
- high_memory = (void *)(__va(PFN_PHYS(max_low_pfn)));
dma32_phys_limit = min(4UL * SZ_1G, (unsigned long)PFN_PHYS(max_low_pfn));
- set_max_mapnr(max_low_pfn - ARCH_PFN_OFFSET);
reserve_initrd_mem();
@@ -1396,21 +1393,19 @@ static void __init arch_reserve_crashkernel(void)
{
unsigned long long low_size = 0;
unsigned long long crash_base, crash_size;
- char *cmdline = boot_command_line;
bool high = false;
int ret;
if (!IS_ENABLED(CONFIG_CRASH_RESERVE))
return;
- ret = parse_crashkernel(cmdline, memblock_phys_mem_size(),
+ ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(),
&crash_size, &crash_base,
&low_size, &high);
if (ret)
return;
- reserve_crashkernel_generic(cmdline, crash_size, crash_base,
- low_size, high);
+ reserve_crashkernel_generic(crash_size, crash_base, low_size, high);
}
void __init paging_init(void)
diff --git a/arch/riscv/mm/tlbflush.c b/arch/riscv/mm/tlbflush.c
index 9b6e86ce3867..74dd9307fbf1 100644
--- a/arch/riscv/mm/tlbflush.c
+++ b/arch/riscv/mm/tlbflush.c
@@ -186,8 +186,7 @@ bool arch_tlbbatch_should_defer(struct mm_struct *mm)
}
void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch,
- struct mm_struct *mm,
- unsigned long uaddr)
+ struct mm_struct *mm, unsigned long start, unsigned long end)
{
cpumask_or(&batch->cpumask, &batch->cpumask, mm_cpumask(mm));
}
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 6412e39a795d..b8fa367c1fc9 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -92,6 +92,7 @@ config S390
select ARCH_HAS_MEM_ENCRYPT
select ARCH_HAS_NMI_SAFE_THIS_CPU_OPS
select ARCH_HAS_PREEMPT_LAZY
+ select ARCH_HAS_PTDUMP
select ARCH_HAS_PTE_SPECIAL
select ARCH_HAS_SCALED_CPUTIME
select ARCH_HAS_SET_DIRECT_MAP
@@ -136,6 +137,7 @@ config S390
select ARCH_SUPPORTS_DEBUG_PAGEALLOC
select ARCH_SUPPORTS_HUGETLBFS
select ARCH_SUPPORTS_INT128 if CC_HAS_INT128 && CC_IS_CLANG
+ select ARCH_SUPPORTS_MSEAL_SYSTEM_MAPPINGS
select ARCH_SUPPORTS_NUMA_BALANCING
select ARCH_SUPPORTS_PER_VMA_LOCK
select ARCH_USE_BUILTIN_BSWAP
@@ -159,7 +161,6 @@ config S390
select GENERIC_CPU_VULNERABILITIES
select GENERIC_ENTRY
select GENERIC_GETTIMEOFDAY
- select GENERIC_PTDUMP
select GENERIC_SMP_IDLE_THREAD
select GENERIC_TIME_VSYSCALL
select GENERIC_VDSO_DATA_STORE
diff --git a/arch/s390/configs/debug_defconfig b/arch/s390/configs/debug_defconfig
index 8b825e3578d8..6f2c9ce1b154 100644
--- a/arch/s390/configs/debug_defconfig
+++ b/arch/s390/configs/debug_defconfig
@@ -92,7 +92,7 @@ CONFIG_UNIXWARE_DISKLABEL=y
CONFIG_IOSCHED_BFQ=y
CONFIG_BINFMT_MISC=m
CONFIG_ZSWAP=y
-CONFIG_ZSWAP_ZPOOL_DEFAULT_ZBUD=y
+CONFIG_ZSMALLOC=y
CONFIG_ZSMALLOC_STAT=y
CONFIG_SLAB_BUCKETS=y
CONFIG_SLUB_STATS=y
diff --git a/arch/s390/configs/defconfig b/arch/s390/configs/defconfig
index 8392f8a5ad6d..f18a7d97ac21 100644
--- a/arch/s390/configs/defconfig
+++ b/arch/s390/configs/defconfig
@@ -86,7 +86,7 @@ CONFIG_UNIXWARE_DISKLABEL=y
CONFIG_IOSCHED_BFQ=y
CONFIG_BINFMT_MISC=m
CONFIG_ZSWAP=y
-CONFIG_ZSWAP_ZPOOL_DEFAULT_ZBUD=y
+CONFIG_ZSMALLOC=y
CONFIG_ZSMALLOC_STAT=y
CONFIG_SLAB_BUCKETS=y
# CONFIG_COMPAT_BRK is not set
diff --git a/arch/s390/hypfs/hypfs_diag_fs.c b/arch/s390/hypfs/hypfs_diag_fs.c
index 1e17e288cee4..ede951dc0085 100644
--- a/arch/s390/hypfs/hypfs_diag_fs.c
+++ b/arch/s390/hypfs/hypfs_diag_fs.c
@@ -209,6 +209,8 @@ static int hypfs_create_cpu_files(struct dentry *cpus_dir, void *cpu_info)
snprintf(buffer, TMP_SIZE, "%d", cpu_info__cpu_addr(diag204_get_info_type(),
cpu_info));
cpu_dir = hypfs_mkdir(cpus_dir, buffer);
+ if (IS_ERR(cpu_dir))
+ return PTR_ERR(cpu_dir);
rc = hypfs_create_u64(cpu_dir, "mgmtime",
cpu_info__acc_time(diag204_get_info_type(), cpu_info) -
cpu_info__lp_time(diag204_get_info_type(), cpu_info));
diff --git a/arch/s390/include/asm/io.h b/arch/s390/include/asm/io.h
index 251e0372ccbd..faddb9aef3b8 100644
--- a/arch/s390/include/asm/io.h
+++ b/arch/s390/include/asm/io.h
@@ -33,7 +33,7 @@ void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr);
#define _PAGE_IOREMAP pgprot_val(PAGE_KERNEL)
#define ioremap_wc(addr, size) \
- ioremap_prot((addr), (size), pgprot_val(pgprot_writecombine(PAGE_KERNEL)))
+ ioremap_prot((addr), (size), pgprot_writecombine(PAGE_KERNEL))
static inline void __iomem *ioport_map(unsigned long port, unsigned int nr)
{
diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c
index 70c8f9ad13cd..430feb1a5013 100644
--- a/arch/s390/kernel/vdso.c
+++ b/arch/s390/kernel/vdso.c
@@ -80,7 +80,7 @@ static int map_vdso(unsigned long addr, unsigned long vdso_mapping_len)
vdso_text_start = vvar_start + VDSO_NR_PAGES * PAGE_SIZE;
/* VM_MAYWRITE for COW so gdb can set breakpoints */
vma = _install_special_mapping(mm, vdso_text_start, vdso_text_len,
- VM_READ|VM_EXEC|
+ VM_READ|VM_EXEC|VM_SEALED_SYSMAP|
VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
vdso_mapping);
if (IS_ERR(vma)) {
diff --git a/arch/s390/mm/Makefile b/arch/s390/mm/Makefile
index f6c2db7a8669..9726b91fe7e4 100644
--- a/arch/s390/mm/Makefile
+++ b/arch/s390/mm/Makefile
@@ -9,6 +9,6 @@ obj-y += page-states.o pageattr.o pgtable.o pgalloc.o extable.o
obj-$(CONFIG_CMM) += cmm.o
obj-$(CONFIG_DEBUG_VIRTUAL) += physaddr.o
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
-obj-$(CONFIG_PTDUMP_CORE) += dump_pagetables.o
+obj-$(CONFIG_PTDUMP) += dump_pagetables.o
obj-$(CONFIG_PGSTE) += gmap.o
obj-$(CONFIG_PFAULT) += pfault.o
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index f4ac69506608..afa085e8186c 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -74,8 +74,6 @@ static void __init setup_zero_pages(void)
{
unsigned long total_pages = memblock_estimated_nr_free_pages();
unsigned int order;
- struct page *page;
- int i;
/* Latest machines require a mapping granularity of 512KB */
order = 7;
@@ -84,16 +82,7 @@ static void __init setup_zero_pages(void)
while (order > 2 && (total_pages >> 10) < (1UL << order))
order--;
- empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
- if (!empty_zero_page)
- panic("Out of memory in setup_zero_pages");
-
- page = virt_to_page((void *) empty_zero_page);
- split_page(page, order);
- for (i = 1 << order; i > 0; i--) {
- mark_page_reserved(page);
- page++;
- }
+ empty_zero_page = (unsigned long)memblock_alloc_or_panic(PAGE_SIZE << order, PAGE_SIZE);
zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK;
}
@@ -166,18 +155,13 @@ static void pv_init(void)
swiotlb_update_mem_attributes();
}
-void __init mem_init(void)
+void __init arch_mm_preinit(void)
{
cpumask_set_cpu(0, &init_mm.context.cpu_attach_mask);
cpumask_set_cpu(0, mm_cpumask(&init_mm));
- set_max_mapnr(max_low_pfn);
- high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
-
pv_init();
- /* this will put all low memory onto the freelists */
- memblock_free_all();
setup_zero_pages(); /* Setup zeroed pages. */
}
@@ -239,16 +223,13 @@ struct s390_cma_mem_data {
static int s390_cma_check_range(struct cma *cma, void *data)
{
struct s390_cma_mem_data *mem_data;
- unsigned long start, end;
mem_data = data;
- start = cma_get_base(cma);
- end = start + cma_get_size(cma);
- if (end < mem_data->start)
- return 0;
- if (start >= mem_data->end)
- return 0;
- return -EBUSY;
+
+ if (cma_intersects(cma, mem_data->start, mem_data->end))
+ return -EBUSY;
+
+ return 0;
}
static int s390_cma_mem_notifier(struct notifier_block *nb,
diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
index 054240c6798f..5bbdc4190b8b 100644
--- a/arch/s390/pci/pci.c
+++ b/arch/s390/pci/pci.c
@@ -255,7 +255,7 @@ resource_size_t pcibios_align_resource(void *data, const struct resource *res,
}
void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size,
- unsigned long prot)
+ pgprot_t prot)
{
/*
* When PCI MIO instructions are unavailable the "physical" address
@@ -265,7 +265,7 @@ void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size,
if (!static_branch_unlikely(&have_mio))
return (void __iomem *)phys_addr;
- return generic_ioremap_prot(phys_addr, size, __pgprot(prot));
+ return generic_ioremap_prot(phys_addr, size, prot);
}
EXPORT_SYMBOL(ioremap_prot);
diff --git a/arch/sh/boards/mach-landisk/setup.c b/arch/sh/boards/mach-landisk/setup.c
index 2c44b94f82fb..1b3f43c3ac46 100644
--- a/arch/sh/boards/mach-landisk/setup.c
+++ b/arch/sh/boards/mach-landisk/setup.c
@@ -58,7 +58,7 @@ static int __init landisk_devices_setup(void)
/* open I/O area window */
paddrbase = virt_to_phys((void *)PA_AREA5_IO);
prot = PAGE_KERNEL_PCC(1, _PAGE_PCC_IO16);
- cf_ide_base = ioremap_prot(paddrbase, PAGE_SIZE, pgprot_val(prot));
+ cf_ide_base = ioremap_prot(paddrbase, PAGE_SIZE, prot);
if (!cf_ide_base) {
printk("allocate_cf_area : can't open CF I/O window!\n");
return -ENOMEM;
diff --git a/arch/sh/boards/mach-lboxre2/setup.c b/arch/sh/boards/mach-lboxre2/setup.c
index 20d01b430f2a..e95bde207adb 100644
--- a/arch/sh/boards/mach-lboxre2/setup.c
+++ b/arch/sh/boards/mach-lboxre2/setup.c
@@ -53,7 +53,7 @@ static int __init lboxre2_devices_setup(void)
paddrbase = virt_to_phys((void*)PA_AREA5_IO);
psize = PAGE_SIZE;
prot = PAGE_KERNEL_PCC(1, _PAGE_PCC_IO16);
- cf0_io_base = (u32)ioremap_prot(paddrbase, psize, pgprot_val(prot));
+ cf0_io_base = (u32)ioremap_prot(paddrbase, psize, prot);
if (!cf0_io_base) {
printk(KERN_ERR "%s : can't open CF I/O window!\n" , __func__ );
return -ENOMEM;
diff --git a/arch/sh/boards/mach-sh03/setup.c b/arch/sh/boards/mach-sh03/setup.c
index 3901b6031ad5..5c9312f334d3 100644
--- a/arch/sh/boards/mach-sh03/setup.c
+++ b/arch/sh/boards/mach-sh03/setup.c
@@ -75,7 +75,7 @@ static int __init sh03_devices_setup(void)
/* open I/O area window */
paddrbase = virt_to_phys((void *)PA_AREA5_IO);
prot = PAGE_KERNEL_PCC(1, _PAGE_PCC_IO16);
- cf_ide_base = ioremap_prot(paddrbase, PAGE_SIZE, pgprot_val(prot));
+ cf_ide_base = ioremap_prot(paddrbase, PAGE_SIZE, prot);
if (!cf_ide_base) {
printk("allocate_cf_area : can't open CF I/O window!\n");
return -ENOMEM;
diff --git a/arch/sh/include/asm/io.h b/arch/sh/include/asm/io.h
index 0f663ebec700..6d282b253815 100644
--- a/arch/sh/include/asm/io.h
+++ b/arch/sh/include/asm/io.h
@@ -279,7 +279,7 @@ unsigned long long poke_real_address_q(unsigned long long addr,
#define _PAGE_IOREMAP pgprot_val(PAGE_KERNEL_NOCACHE)
#define ioremap_cache(addr, size) \
- ioremap_prot((addr), (size), pgprot_val(PAGE_KERNEL))
+ ioremap_prot((addr), (size), PAGE_KERNEL)
#endif /* CONFIG_MMU */
#include <asm-generic/io.h>
diff --git a/arch/sh/include/asm/pgalloc.h b/arch/sh/include/asm/pgalloc.h
index 96d938fdf224..6fe7123d38fa 100644
--- a/arch/sh/include/asm/pgalloc.h
+++ b/arch/sh/include/asm/pgalloc.h
@@ -32,10 +32,7 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
set_pmd(pmd, __pmd((unsigned long)page_address(pte)));
}
-#define __pte_free_tlb(tlb, pte, addr) \
-do { \
- pagetable_dtor(page_ptdesc(pte)); \
- tlb_remove_page_ptdesc((tlb), (page_ptdesc(pte))); \
-} while (0)
+#define __pte_free_tlb(tlb, pte, addr) \
+ tlb_remove_ptdesc((tlb), page_ptdesc(pte))
#endif /* __ASM_SH_PGALLOC_H */
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c
index 289a2fecebef..99e302eeeec1 100644
--- a/arch/sh/mm/init.c
+++ b/arch/sh/mm/init.c
@@ -290,7 +290,6 @@ void __init paging_init(void)
*/
max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
min_low_pfn = __MEMORY_START >> PAGE_SHIFT;
- set_max_mapnr(max_low_pfn - min_low_pfn);
nodes_clear(node_online_map);
@@ -331,15 +330,6 @@ unsigned int mem_init_done = 0;
void __init mem_init(void)
{
- pg_data_t *pgdat;
-
- high_memory = NULL;
- for_each_online_pgdat(pgdat)
- high_memory = max_t(void *, high_memory,
- __va(pgdat_end_pfn(pgdat) << PAGE_SHIFT));
-
- memblock_free_all();
-
/* Set this up early, so we can take care of the zero page */
cpu_cache_init();
diff --git a/arch/sh/mm/ioremap.c b/arch/sh/mm/ioremap.c
index 33d20f34560f..5bbde53fb32d 100644
--- a/arch/sh/mm/ioremap.c
+++ b/arch/sh/mm/ioremap.c
@@ -73,10 +73,9 @@ __ioremap_29bit(phys_addr_t offset, unsigned long size, pgprot_t prot)
#endif /* CONFIG_29BIT */
void __iomem __ref *ioremap_prot(phys_addr_t phys_addr, size_t size,
- unsigned long prot)
+ pgprot_t pgprot)
{
void __iomem *mapped;
- pgprot_t pgprot = __pgprot(prot);
mapped = __ioremap_trapped(phys_addr, size);
if (mapped)
diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h
index 2b7f358762c1..dc28f2c4eee3 100644
--- a/arch/sparc/include/asm/pgtable_64.h
+++ b/arch/sparc/include/asm/pgtable_64.h
@@ -936,7 +936,6 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
static inline void set_ptes(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte, unsigned int nr)
{
- arch_enter_lazy_mmu_mode();
for (;;) {
__set_pte_at(mm, addr, ptep, pte, 0);
if (--nr == 0)
@@ -945,7 +944,6 @@ static inline void set_ptes(struct mm_struct *mm, unsigned long addr,
pte_val(pte) += PAGE_SIZE;
addr += PAGE_SIZE;
}
- arch_leave_lazy_mmu_mode();
}
#define set_ptes set_ptes
diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c
index d96a14ffceeb..fdc93dd12c3e 100644
--- a/arch/sparc/mm/init_32.c
+++ b/arch/sparc/mm/init_32.c
@@ -232,19 +232,7 @@ static void __init taint_real_pages(void)
}
}
-static void map_high_region(unsigned long start_pfn, unsigned long end_pfn)
-{
- unsigned long tmp;
-
-#ifdef CONFIG_DEBUG_HIGHMEM
- printk("mapping high region %08lx - %08lx\n", start_pfn, end_pfn);
-#endif
-
- for (tmp = start_pfn; tmp < end_pfn; tmp++)
- free_highmem_page(pfn_to_page(tmp));
-}
-
-void __init mem_init(void)
+void __init arch_mm_preinit(void)
{
int i;
@@ -274,23 +262,6 @@ void __init mem_init(void)
memset(sparc_valid_addr_bitmap, 0, i << 2);
taint_real_pages();
-
- max_mapnr = last_valid_pfn - pfn_base;
- high_memory = __va(max_low_pfn << PAGE_SHIFT);
- memblock_free_all();
-
- for (i = 0; sp_banks[i].num_bytes != 0; i++) {
- unsigned long start_pfn = sp_banks[i].base_addr >> PAGE_SHIFT;
- unsigned long end_pfn = (sp_banks[i].base_addr + sp_banks[i].num_bytes) >> PAGE_SHIFT;
-
- if (end_pfn <= highstart_pfn)
- continue;
-
- if (start_pfn < highstart_pfn)
- start_pfn = highstart_pfn;
-
- map_high_region(start_pfn, end_pfn);
- }
}
void sparc_flush_page_to_ram(struct page *page)
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index 05882bca5b73..760818950464 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -2505,10 +2505,6 @@ static void __init register_page_bootmem_info(void)
}
void __init mem_init(void)
{
- high_memory = __va(last_valid_pfn << PAGE_SHIFT);
-
- memblock_free_all();
-
/*
* Must be done after boot memory is put on freelist, because here we
* might set fields in deferred struct pages that have not yet been
diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c
index 8648a50afe88..a35ddcca5e76 100644
--- a/arch/sparc/mm/tlb.c
+++ b/arch/sparc/mm/tlb.c
@@ -52,8 +52,10 @@ out:
void arch_enter_lazy_mmu_mode(void)
{
- struct tlb_batch *tb = this_cpu_ptr(&tlb_batch);
+ struct tlb_batch *tb;
+ preempt_disable();
+ tb = this_cpu_ptr(&tlb_batch);
tb->active = 1;
}
@@ -64,6 +66,7 @@ void arch_leave_lazy_mmu_mode(void)
if (tb->tlb_nr)
flush_tlb_pending();
tb->active = 0;
+ preempt_enable();
}
static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr,
diff --git a/arch/um/Kconfig b/arch/um/Kconfig
index 18051b1cfce0..79509c7f39de 100644
--- a/arch/um/Kconfig
+++ b/arch/um/Kconfig
@@ -12,6 +12,7 @@ config UML
select ARCH_HAS_KCOV
select ARCH_HAS_STRNCPY_FROM_USER
select ARCH_HAS_STRNLEN_USER
+ select ARCH_HAS_STRICT_KERNEL_RWX
select HAVE_ARCH_AUDITSYSCALL
select HAVE_ARCH_KASAN if X86_64
select HAVE_ARCH_KASAN_VMALLOC if HAVE_ARCH_KASAN
diff --git a/arch/um/drivers/Kconfig b/arch/um/drivers/Kconfig
index ede40a160c5e..9cb196070614 100644
--- a/arch/um/drivers/Kconfig
+++ b/arch/um/drivers/Kconfig
@@ -345,16 +345,20 @@ config UML_RTC
by providing a fake RTC clock that causes a wakeup at the right
time.
-config UML_PCI_OVER_VIRTIO
- bool "Enable PCI over VIRTIO device simulation"
- # in theory, just VIRTIO is enough, but that causes recursion
- depends on VIRTIO_UML
+config UML_PCI
+ bool
select FORCE_PCI
select UML_IOMEM_EMULATION
select UML_DMA_EMULATION
select PCI_MSI
select PCI_LOCKLESS_CONFIG
+config UML_PCI_OVER_VIRTIO
+ bool "Enable PCI over VIRTIO device simulation"
+ # in theory, just VIRTIO is enough, but that causes recursion
+ depends on VIRTIO_UML
+ select UML_PCI
+
config UML_PCI_OVER_VIRTIO_DEVICE_ID
int "set the virtio device ID for PCI emulation"
default -1
diff --git a/arch/um/drivers/Makefile b/arch/um/drivers/Makefile
index 57882e6bc215..0a5820343ad3 100644
--- a/arch/um/drivers/Makefile
+++ b/arch/um/drivers/Makefile
@@ -60,7 +60,8 @@ obj-$(CONFIG_BLK_DEV_COW_COMMON) += cow_user.o
obj-$(CONFIG_UML_RANDOM) += random.o
obj-$(CONFIG_VIRTIO_UML) += virtio_uml.o
obj-$(CONFIG_UML_RTC) += rtc.o
-obj-$(CONFIG_UML_PCI_OVER_VIRTIO) += virt-pci.o
+obj-$(CONFIG_UML_PCI) += virt-pci.o
+obj-$(CONFIG_UML_PCI_OVER_VIRTIO) += virtio_pcidev.o
# pcap_user.o must be added explicitly.
USER_OBJS := fd.o null.o pty.o tty.o xterm.o slip_common.o vde_user.o vector_user.o
diff --git a/arch/um/drivers/random.c b/arch/um/drivers/random.c
index da985e0dc69a..ca08c91f47a3 100644
--- a/arch/um/drivers/random.c
+++ b/arch/um/drivers/random.c
@@ -79,7 +79,7 @@ static int __init rng_init (void)
if (err < 0)
goto err_out_cleanup_hw;
- sigio_broken(random_fd);
+ sigio_broken();
hwrng.name = RNG_MODULE_NAME;
hwrng.read = rng_dev_read;
diff --git a/arch/um/drivers/rtc_user.c b/arch/um/drivers/rtc_user.c
index 7c3cec4c68cf..51e79f3148cd 100644
--- a/arch/um/drivers/rtc_user.c
+++ b/arch/um/drivers/rtc_user.c
@@ -39,7 +39,7 @@ int uml_rtc_start(bool timetravel)
}
/* apparently timerfd won't send SIGIO, use workaround */
- sigio_broken(uml_rtc_irq_fds[0]);
+ sigio_broken();
err = add_sigio_fd(uml_rtc_irq_fds[0]);
if (err < 0) {
close(uml_rtc_irq_fds[0]);
diff --git a/arch/um/drivers/ubd.h b/arch/um/drivers/ubd.h
index f016fe15499f..2985c14661f4 100644
--- a/arch/um/drivers/ubd.h
+++ b/arch/um/drivers/ubd.h
@@ -7,8 +7,10 @@
#ifndef __UM_UBD_USER_H
#define __UM_UBD_USER_H
-extern int start_io_thread(unsigned long sp, int *fds_out);
-extern int io_thread(void *arg);
+#include <os.h>
+
+int start_io_thread(struct os_helper_thread **td_out, int *fd_out);
+void *io_thread(void *arg);
extern int kernel_fd;
extern int ubd_read_poll(int timeout);
diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c
index 0b1e61f72fb3..4de6613e7468 100644
--- a/arch/um/drivers/ubd_kern.c
+++ b/arch/um/drivers/ubd_kern.c
@@ -474,12 +474,12 @@ static irqreturn_t ubd_intr(int irq, void *dev)
}
/* Only changed by ubd_init, which is an initcall. */
-static int io_pid = -1;
+static struct os_helper_thread *io_td;
static void kill_io_thread(void)
{
- if(io_pid != -1)
- os_kill_process(io_pid, 1);
+ if (io_td)
+ os_kill_helper_thread(io_td);
}
__uml_exitcall(kill_io_thread);
@@ -1104,8 +1104,8 @@ static int __init ubd_init(void)
late_initcall(ubd_init);
-static int __init ubd_driver_init(void){
- unsigned long stack;
+static int __init ubd_driver_init(void)
+{
int err;
/* Set by CONFIG_BLK_DEV_UBD_SYNC or ubd=sync.*/
@@ -1114,13 +1114,11 @@ static int __init ubd_driver_init(void){
/* Letting ubd=sync be like using ubd#s= instead of ubd#= is
* enough. So use anyway the io thread. */
}
- stack = alloc_stack(0, 0);
- io_pid = start_io_thread(stack + PAGE_SIZE, &thread_fd);
- if(io_pid < 0){
+ err = start_io_thread(&io_td, &thread_fd);
+ if (err < 0) {
printk(KERN_ERR
"ubd : Failed to start I/O thread (errno = %d) - "
- "falling back to synchronous I/O\n", -io_pid);
- io_pid = -1;
+ "falling back to synchronous I/O\n", -err);
return 0;
}
err = um_request_irq(UBD_IRQ, thread_fd, IRQ_READ, ubd_intr,
@@ -1496,12 +1494,11 @@ int kernel_fd = -1;
/* Only changed by the io thread. XXX: currently unused. */
static int io_count;
-int io_thread(void *arg)
+void *io_thread(void *arg)
{
int n, count, written, res;
- os_set_pdeathsig();
- os_fix_helper_signals();
+ os_fix_helper_thread_signals();
while(1){
n = bulk_req_safe_read(
@@ -1543,5 +1540,5 @@ int io_thread(void *arg)
} while (written < n);
}
- return 0;
+ return NULL;
}
diff --git a/arch/um/drivers/ubd_user.c b/arch/um/drivers/ubd_user.c
index b4f8b8e60564..c5e6545f6fcf 100644
--- a/arch/um/drivers/ubd_user.c
+++ b/arch/um/drivers/ubd_user.c
@@ -25,9 +25,9 @@
static struct pollfd kernel_pollfd;
-int start_io_thread(unsigned long sp, int *fd_out)
+int start_io_thread(struct os_helper_thread **td_out, int *fd_out)
{
- int pid, fds[2], err;
+ int fds[2], err;
err = os_pipe(fds, 1, 1);
if(err < 0){
@@ -47,14 +47,14 @@ int start_io_thread(unsigned long sp, int *fd_out)
goto out_close;
}
- pid = clone(io_thread, (void *) sp, CLONE_FILES | CLONE_VM, NULL);
- if(pid < 0){
- err = -errno;
- printk("start_io_thread - clone failed : errno = %d\n", errno);
+ err = os_run_helper_thread(td_out, io_thread, NULL);
+ if (err < 0) {
+ printk("%s - failed to run helper thread, err = %d\n",
+ __func__, -err);
goto out_close;
}
- return(pid);
+ return 0;
out_close:
os_close_file(fds[0]);
diff --git a/arch/um/drivers/virt-pci.c b/arch/um/drivers/virt-pci.c
index dd5580f975cc..b83b5a765d4e 100644
--- a/arch/um/drivers/virt-pci.c
+++ b/arch/um/drivers/virt-pci.c
@@ -5,52 +5,19 @@
*/
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/virtio.h>
-#include <linux/virtio_config.h>
#include <linux/logic_iomem.h>
#include <linux/of_platform.h>
#include <linux/irqdomain.h>
-#include <linux/virtio_pcidev.h>
-#include <linux/virtio-uml.h>
-#include <linux/delay.h>
#include <linux/msi.h>
#include <linux/unaligned.h>
#include <irq_kern.h>
+#include "virt-pci.h"
+
#define MAX_DEVICES 8
#define MAX_MSI_VECTORS 32
#define CFG_SPACE_SIZE 4096
-/* for MSI-X we have a 32-bit payload */
-#define MAX_IRQ_MSG_SIZE (sizeof(struct virtio_pcidev_msg) + sizeof(u32))
-#define NUM_IRQ_MSGS 10
-
-struct um_pci_message_buffer {
- struct virtio_pcidev_msg hdr;
- u8 data[8];
-};
-
-struct um_pci_device {
- struct virtio_device *vdev;
-
- /* for now just standard BARs */
- u8 resptr[PCI_STD_NUM_BARS];
-
- struct virtqueue *cmd_vq, *irq_vq;
-
-#define UM_PCI_WRITE_BUFS 20
- struct um_pci_message_buffer bufs[UM_PCI_WRITE_BUFS + 1];
- void *extra_ptrs[UM_PCI_WRITE_BUFS + 1];
- DECLARE_BITMAP(used_bufs, UM_PCI_WRITE_BUFS);
-
-#define UM_PCI_STAT_WAITING 0
- unsigned long status;
-
- int irq;
-
- bool platform;
-};
-
struct um_pci_device_reg {
struct um_pci_device *dev;
void __iomem *iomem;
@@ -65,179 +32,15 @@ static struct irq_domain *um_pci_inner_domain;
static struct irq_domain *um_pci_msi_domain;
static unsigned long um_pci_msi_used[BITS_TO_LONGS(MAX_MSI_VECTORS)];
-static unsigned int um_pci_max_delay_us = 40000;
-module_param_named(max_delay_us, um_pci_max_delay_us, uint, 0644);
-
-static int um_pci_get_buf(struct um_pci_device *dev, bool *posted)
-{
- int i;
-
- for (i = 0; i < UM_PCI_WRITE_BUFS; i++) {
- if (!test_and_set_bit(i, dev->used_bufs))
- return i;
- }
-
- *posted = false;
- return UM_PCI_WRITE_BUFS;
-}
-
-static void um_pci_free_buf(struct um_pci_device *dev, void *buf)
-{
- int i;
-
- if (buf == &dev->bufs[UM_PCI_WRITE_BUFS]) {
- kfree(dev->extra_ptrs[UM_PCI_WRITE_BUFS]);
- dev->extra_ptrs[UM_PCI_WRITE_BUFS] = NULL;
- return;
- }
-
- for (i = 0; i < UM_PCI_WRITE_BUFS; i++) {
- if (buf == &dev->bufs[i]) {
- kfree(dev->extra_ptrs[i]);
- dev->extra_ptrs[i] = NULL;
- WARN_ON(!test_and_clear_bit(i, dev->used_bufs));
- return;
- }
- }
-
- WARN_ON(1);
-}
-
-static int um_pci_send_cmd(struct um_pci_device *dev,
- struct virtio_pcidev_msg *cmd,
- unsigned int cmd_size,
- const void *extra, unsigned int extra_size,
- void *out, unsigned int out_size)
-{
- struct scatterlist out_sg, extra_sg, in_sg;
- struct scatterlist *sgs_list[] = {
- [0] = &out_sg,
- [1] = extra ? &extra_sg : &in_sg,
- [2] = extra ? &in_sg : NULL,
- };
- struct um_pci_message_buffer *buf;
- int delay_count = 0;
- bool bounce_out;
- int ret, len;
- int buf_idx;
- bool posted;
-
- if (WARN_ON(cmd_size < sizeof(*cmd) || cmd_size > sizeof(*buf)))
- return -EINVAL;
-
- switch (cmd->op) {
- case VIRTIO_PCIDEV_OP_CFG_WRITE:
- case VIRTIO_PCIDEV_OP_MMIO_WRITE:
- case VIRTIO_PCIDEV_OP_MMIO_MEMSET:
- /* in PCI, writes are posted, so don't wait */
- posted = !out;
- WARN_ON(!posted);
- break;
- default:
- posted = false;
- break;
- }
-
- bounce_out = !posted && cmd_size <= sizeof(*cmd) &&
- out && out_size <= sizeof(buf->data);
-
- buf_idx = um_pci_get_buf(dev, &posted);
- buf = &dev->bufs[buf_idx];
- memcpy(buf, cmd, cmd_size);
-
- if (posted && extra && extra_size > sizeof(buf) - cmd_size) {
- dev->extra_ptrs[buf_idx] = kmemdup(extra, extra_size,
- GFP_ATOMIC);
-
- if (!dev->extra_ptrs[buf_idx]) {
- um_pci_free_buf(dev, buf);
- return -ENOMEM;
- }
- extra = dev->extra_ptrs[buf_idx];
- } else if (extra && extra_size <= sizeof(buf) - cmd_size) {
- memcpy((u8 *)buf + cmd_size, extra, extra_size);
- cmd_size += extra_size;
- extra_size = 0;
- extra = NULL;
- cmd = (void *)buf;
- } else {
- cmd = (void *)buf;
- }
-
- sg_init_one(&out_sg, cmd, cmd_size);
- if (extra)
- sg_init_one(&extra_sg, extra, extra_size);
- /* allow stack for small buffers */
- if (bounce_out)
- sg_init_one(&in_sg, buf->data, out_size);
- else if (out)
- sg_init_one(&in_sg, out, out_size);
-
- /* add to internal virtio queue */
- ret = virtqueue_add_sgs(dev->cmd_vq, sgs_list,
- extra ? 2 : 1,
- out ? 1 : 0,
- cmd, GFP_ATOMIC);
- if (ret) {
- um_pci_free_buf(dev, buf);
- return ret;
- }
-
- if (posted) {
- virtqueue_kick(dev->cmd_vq);
- return 0;
- }
-
- /* kick and poll for getting a response on the queue */
- set_bit(UM_PCI_STAT_WAITING, &dev->status);
- virtqueue_kick(dev->cmd_vq);
- ret = 0;
-
- while (1) {
- void *completed = virtqueue_get_buf(dev->cmd_vq, &len);
-
- if (completed == buf)
- break;
-
- if (completed)
- um_pci_free_buf(dev, completed);
-
- if (WARN_ONCE(virtqueue_is_broken(dev->cmd_vq) ||
- ++delay_count > um_pci_max_delay_us,
- "um virt-pci delay: %d", delay_count)) {
- ret = -EIO;
- break;
- }
- udelay(1);
- }
- clear_bit(UM_PCI_STAT_WAITING, &dev->status);
-
- if (bounce_out)
- memcpy(out, buf->data, out_size);
-
- um_pci_free_buf(dev, buf);
-
- return ret;
-}
-
static unsigned long um_pci_cfgspace_read(void *priv, unsigned int offset,
int size)
{
struct um_pci_device_reg *reg = priv;
struct um_pci_device *dev = reg->dev;
- struct virtio_pcidev_msg hdr = {
- .op = VIRTIO_PCIDEV_OP_CFG_READ,
- .size = size,
- .addr = offset,
- };
- /* max 8, we might not use it all */
- u8 data[8];
if (!dev)
return ULONG_MAX;
- memset(data, 0xff, sizeof(data));
-
switch (size) {
case 1:
case 2:
@@ -251,23 +54,7 @@ static unsigned long um_pci_cfgspace_read(void *priv, unsigned int offset,
return ULONG_MAX;
}
- if (um_pci_send_cmd(dev, &hdr, sizeof(hdr), NULL, 0, data, size))
- return ULONG_MAX;
-
- switch (size) {
- case 1:
- return data[0];
- case 2:
- return le16_to_cpup((void *)data);
- case 4:
- return le32_to_cpup((void *)data);
-#ifdef CONFIG_64BIT
- case 8:
- return le64_to_cpup((void *)data);
-#endif
- default:
- return ULONG_MAX;
- }
+ return dev->ops->cfgspace_read(dev, offset, size);
}
static void um_pci_cfgspace_write(void *priv, unsigned int offset, int size,
@@ -275,42 +62,24 @@ static void um_pci_cfgspace_write(void *priv, unsigned int offset, int size,
{
struct um_pci_device_reg *reg = priv;
struct um_pci_device *dev = reg->dev;
- struct {
- struct virtio_pcidev_msg hdr;
- /* maximum size - we may only use parts of it */
- u8 data[8];
- } msg = {
- .hdr = {
- .op = VIRTIO_PCIDEV_OP_CFG_WRITE,
- .size = size,
- .addr = offset,
- },
- };
if (!dev)
return;
switch (size) {
case 1:
- msg.data[0] = (u8)val;
- break;
case 2:
- put_unaligned_le16(val, (void *)msg.data);
- break;
case 4:
- put_unaligned_le32(val, (void *)msg.data);
- break;
#ifdef CONFIG_64BIT
case 8:
- put_unaligned_le64(val, (void *)msg.data);
- break;
#endif
+ break;
default:
WARN(1, "invalid config space write size %d\n", size);
return;
}
- WARN_ON(um_pci_send_cmd(dev, &msg.hdr, sizeof(msg), NULL, 0, NULL, 0));
+ dev->ops->cfgspace_write(dev, offset, size, val);
}
static const struct logic_iomem_ops um_pci_device_cfgspace_ops = {
@@ -318,30 +87,14 @@ static const struct logic_iomem_ops um_pci_device_cfgspace_ops = {
.write = um_pci_cfgspace_write,
};
-static void um_pci_bar_copy_from(void *priv, void *buffer,
- unsigned int offset, int size)
+static unsigned long um_pci_bar_read(void *priv, unsigned int offset,
+ int size)
{
u8 *resptr = priv;
struct um_pci_device *dev = container_of(resptr - *resptr,
struct um_pci_device,
resptr[0]);
- struct virtio_pcidev_msg hdr = {
- .op = VIRTIO_PCIDEV_OP_MMIO_READ,
- .bar = *resptr,
- .size = size,
- .addr = offset,
- };
-
- memset(buffer, 0xff, size);
-
- um_pci_send_cmd(dev, &hdr, sizeof(hdr), NULL, 0, buffer, size);
-}
-
-static unsigned long um_pci_bar_read(void *priv, unsigned int offset,
- int size)
-{
- /* 8 is maximum size - we may only use parts of it */
- u8 data[8];
+ u8 bar = *resptr;
switch (size) {
case 1:
@@ -352,72 +105,60 @@ static unsigned long um_pci_bar_read(void *priv, unsigned int offset,
#endif
break;
default:
- WARN(1, "invalid config space read size %d\n", size);
+ WARN(1, "invalid bar read size %d\n", size);
return ULONG_MAX;
}
- um_pci_bar_copy_from(priv, data, offset, size);
+ return dev->ops->bar_read(dev, bar, offset, size);
+}
+
+static void um_pci_bar_write(void *priv, unsigned int offset, int size,
+ unsigned long val)
+{
+ u8 *resptr = priv;
+ struct um_pci_device *dev = container_of(resptr - *resptr,
+ struct um_pci_device,
+ resptr[0]);
+ u8 bar = *resptr;
switch (size) {
case 1:
- return data[0];
case 2:
- return le16_to_cpup((void *)data);
case 4:
- return le32_to_cpup((void *)data);
#ifdef CONFIG_64BIT
case 8:
- return le64_to_cpup((void *)data);
#endif
+ break;
default:
- return ULONG_MAX;
+ WARN(1, "invalid bar write size %d\n", size);
+ return;
}
+
+ dev->ops->bar_write(dev, bar, offset, size, val);
}
-static void um_pci_bar_copy_to(void *priv, unsigned int offset,
- const void *buffer, int size)
+static void um_pci_bar_copy_from(void *priv, void *buffer,
+ unsigned int offset, int size)
{
u8 *resptr = priv;
struct um_pci_device *dev = container_of(resptr - *resptr,
struct um_pci_device,
resptr[0]);
- struct virtio_pcidev_msg hdr = {
- .op = VIRTIO_PCIDEV_OP_MMIO_WRITE,
- .bar = *resptr,
- .size = size,
- .addr = offset,
- };
+ u8 bar = *resptr;
- um_pci_send_cmd(dev, &hdr, sizeof(hdr), buffer, size, NULL, 0);
+ dev->ops->bar_copy_from(dev, bar, buffer, offset, size);
}
-static void um_pci_bar_write(void *priv, unsigned int offset, int size,
- unsigned long val)
+static void um_pci_bar_copy_to(void *priv, unsigned int offset,
+ const void *buffer, int size)
{
- /* maximum size - we may only use parts of it */
- u8 data[8];
-
- switch (size) {
- case 1:
- data[0] = (u8)val;
- break;
- case 2:
- put_unaligned_le16(val, (void *)data);
- break;
- case 4:
- put_unaligned_le32(val, (void *)data);
- break;
-#ifdef CONFIG_64BIT
- case 8:
- put_unaligned_le64(val, (void *)data);
- break;
-#endif
- default:
- WARN(1, "invalid config space write size %d\n", size);
- return;
- }
+ u8 *resptr = priv;
+ struct um_pci_device *dev = container_of(resptr - *resptr,
+ struct um_pci_device,
+ resptr[0]);
+ u8 bar = *resptr;
- um_pci_bar_copy_to(priv, offset, data, size);
+ dev->ops->bar_copy_to(dev, bar, offset, buffer, size);
}
static void um_pci_bar_set(void *priv, unsigned int offset, u8 value, int size)
@@ -426,20 +167,9 @@ static void um_pci_bar_set(void *priv, unsigned int offset, u8 value, int size)
struct um_pci_device *dev = container_of(resptr - *resptr,
struct um_pci_device,
resptr[0]);
- struct {
- struct virtio_pcidev_msg hdr;
- u8 data;
- } msg = {
- .hdr = {
- .op = VIRTIO_PCIDEV_OP_CFG_WRITE,
- .bar = *resptr,
- .size = size,
- .addr = offset,
- },
- .data = value,
- };
+ u8 bar = *resptr;
- um_pci_send_cmd(dev, &msg.hdr, sizeof(msg), NULL, 0, NULL, 0);
+ dev->ops->bar_set(dev, bar, offset, value, size);
}
static const struct logic_iomem_ops um_pci_device_bar_ops = {
@@ -486,76 +216,6 @@ static void um_pci_rescan(void)
pci_unlock_rescan_remove();
}
-static void um_pci_irq_vq_addbuf(struct virtqueue *vq, void *buf, bool kick)
-{
- struct scatterlist sg[1];
-
- sg_init_one(sg, buf, MAX_IRQ_MSG_SIZE);
- if (virtqueue_add_inbuf(vq, sg, 1, buf, GFP_ATOMIC))
- kfree(buf);
- else if (kick)
- virtqueue_kick(vq);
-}
-
-static void um_pci_handle_irq_message(struct virtqueue *vq,
- struct virtio_pcidev_msg *msg)
-{
- struct virtio_device *vdev = vq->vdev;
- struct um_pci_device *dev = vdev->priv;
-
- if (!dev->irq)
- return;
-
- /* we should properly chain interrupts, but on ARCH=um we don't care */
-
- switch (msg->op) {
- case VIRTIO_PCIDEV_OP_INT:
- generic_handle_irq(dev->irq);
- break;
- case VIRTIO_PCIDEV_OP_MSI:
- /* our MSI message is just the interrupt number */
- if (msg->size == sizeof(u32))
- generic_handle_irq(le32_to_cpup((void *)msg->data));
- else
- generic_handle_irq(le16_to_cpup((void *)msg->data));
- break;
- case VIRTIO_PCIDEV_OP_PME:
- /* nothing to do - we already woke up due to the message */
- break;
- default:
- dev_err(&vdev->dev, "unexpected virt-pci message %d\n", msg->op);
- break;
- }
-}
-
-static void um_pci_cmd_vq_cb(struct virtqueue *vq)
-{
- struct virtio_device *vdev = vq->vdev;
- struct um_pci_device *dev = vdev->priv;
- void *cmd;
- int len;
-
- if (test_bit(UM_PCI_STAT_WAITING, &dev->status))
- return;
-
- while ((cmd = virtqueue_get_buf(vq, &len)))
- um_pci_free_buf(dev, cmd);
-}
-
-static void um_pci_irq_vq_cb(struct virtqueue *vq)
-{
- struct virtio_pcidev_msg *msg;
- int len;
-
- while ((msg = virtqueue_get_buf(vq, &len))) {
- if (len >= sizeof(*msg))
- um_pci_handle_irq_message(vq, msg);
-
- /* recycle the message buffer */
- um_pci_irq_vq_addbuf(vq, msg, true);
- }
-}
-
#ifdef CONFIG_OF
/* Copied from arch/x86/kernel/devicetree.c */
struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus)
@@ -577,200 +237,6 @@ struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus)
}
#endif
-static int um_pci_init_vqs(struct um_pci_device *dev)
-{
- struct virtqueue_info vqs_info[] = {
- { "cmd", um_pci_cmd_vq_cb },
- { "irq", um_pci_irq_vq_cb },
- };
- struct virtqueue *vqs[2];
- int err, i;
-
- err = virtio_find_vqs(dev->vdev, 2, vqs, vqs_info, NULL);
- if (err)
- return err;
-
- dev->cmd_vq = vqs[0];
- dev->irq_vq = vqs[1];
-
- virtio_device_ready(dev->vdev);
-
- for (i = 0; i < NUM_IRQ_MSGS; i++) {
- void *msg = kzalloc(MAX_IRQ_MSG_SIZE, GFP_KERNEL);
-
- if (msg)
- um_pci_irq_vq_addbuf(dev->irq_vq, msg, false);
- }
-
- virtqueue_kick(dev->irq_vq);
-
- return 0;
-}
-
-static void __um_pci_virtio_platform_remove(struct virtio_device *vdev,
- struct um_pci_device *dev)
-{
- virtio_reset_device(vdev);
- vdev->config->del_vqs(vdev);
-
- mutex_lock(&um_pci_mtx);
- um_pci_platform_device = NULL;
- mutex_unlock(&um_pci_mtx);
-
- kfree(dev);
-}
-
-static int um_pci_virtio_platform_probe(struct virtio_device *vdev,
- struct um_pci_device *dev)
-{
- int ret;
-
- dev->platform = true;
-
- mutex_lock(&um_pci_mtx);
-
- if (um_pci_platform_device) {
- mutex_unlock(&um_pci_mtx);
- ret = -EBUSY;
- goto out_free;
- }
-
- ret = um_pci_init_vqs(dev);
- if (ret) {
- mutex_unlock(&um_pci_mtx);
- goto out_free;
- }
-
- um_pci_platform_device = dev;
-
- mutex_unlock(&um_pci_mtx);
-
- ret = of_platform_default_populate(vdev->dev.of_node, NULL, &vdev->dev);
- if (ret)
- __um_pci_virtio_platform_remove(vdev, dev);
-
- return ret;
-
-out_free:
- kfree(dev);
- return ret;
-}
-
-static int um_pci_virtio_probe(struct virtio_device *vdev)
-{
- struct um_pci_device *dev;
- int i, free = -1;
- int err = -ENOSPC;
-
- dev = kzalloc(sizeof(*dev), GFP_KERNEL);
- if (!dev)
- return -ENOMEM;
-
- dev->vdev = vdev;
- vdev->priv = dev;
-
- if (of_device_is_compatible(vdev->dev.of_node, "simple-bus"))
- return um_pci_virtio_platform_probe(vdev, dev);
-
- mutex_lock(&um_pci_mtx);
- for (i = 0; i < MAX_DEVICES; i++) {
- if (um_pci_devices[i].dev)
- continue;
- free = i;
- break;
- }
-
- if (free < 0)
- goto error;
-
- err = um_pci_init_vqs(dev);
- if (err)
- goto error;
-
- dev->irq = irq_alloc_desc(numa_node_id());
- if (dev->irq < 0) {
- err = dev->irq;
- goto err_reset;
- }
- um_pci_devices[free].dev = dev;
- vdev->priv = dev;
-
- mutex_unlock(&um_pci_mtx);
-
- device_set_wakeup_enable(&vdev->dev, true);
-
- /*
- * In order to do suspend-resume properly, don't allow VQs
- * to be suspended.
- */
- virtio_uml_set_no_vq_suspend(vdev, true);
-
- um_pci_rescan();
- return 0;
-err_reset:
- virtio_reset_device(vdev);
- vdev->config->del_vqs(vdev);
-error:
- mutex_unlock(&um_pci_mtx);
- kfree(dev);
- return err;
-}
-
-static void um_pci_virtio_remove(struct virtio_device *vdev)
-{
- struct um_pci_device *dev = vdev->priv;
- int i;
-
- if (dev->platform) {
- of_platform_depopulate(&vdev->dev);
- __um_pci_virtio_platform_remove(vdev, dev);
- return;
- }
-
- device_set_wakeup_enable(&vdev->dev, false);
-
- mutex_lock(&um_pci_mtx);
- for (i = 0; i < MAX_DEVICES; i++) {
- if (um_pci_devices[i].dev != dev)
- continue;
-
- um_pci_devices[i].dev = NULL;
- irq_free_desc(dev->irq);
-
- break;
- }
- mutex_unlock(&um_pci_mtx);
-
- if (i < MAX_DEVICES) {
- struct pci_dev *pci_dev;
-
- pci_dev = pci_get_slot(bridge->bus, i);
- if (pci_dev)
- pci_stop_and_remove_bus_device_locked(pci_dev);
- }
-
- /* Stop all virtqueues */
- virtio_reset_device(vdev);
- dev->cmd_vq = NULL;
- dev->irq_vq = NULL;
- vdev->config->del_vqs(vdev);
-
- kfree(dev);
-}
-
-static struct virtio_device_id id_table[] = {
- { CONFIG_UML_PCI_OVER_VIRTIO_DEVICE_ID, VIRTIO_DEV_ANY_ID },
- { 0 },
-};
-MODULE_DEVICE_TABLE(virtio, id_table);
-
-static struct virtio_driver um_pci_virtio_driver = {
- .driver.name = "virtio-pci",
- .id_table = id_table,
- .probe = um_pci_virtio_probe,
- .remove = um_pci_virtio_remove,
-};
-
static struct resource virt_cfgspace_resource = {
.name = "PCI config space",
.start = 0xf0000000 - MAX_DEVICES * CFG_SPACE_SIZE,
@@ -889,7 +355,7 @@ static void um_pci_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
}
static struct irq_chip um_pci_msi_bottom_irq_chip = {
- .name = "UM virtio MSI",
+ .name = "UM virtual MSI",
.irq_compose_msi_msg = um_pci_compose_msi_msg,
};
@@ -939,7 +405,7 @@ static const struct irq_domain_ops um_pci_inner_domain_ops = {
};
static struct irq_chip um_pci_msi_irq_chip = {
- .name = "UM virtio PCIe MSI",
+ .name = "UM virtual PCIe MSI",
.irq_mask = pci_msi_mask_irq,
.irq_unmask = pci_msi_unmask_irq,
};
@@ -998,6 +464,78 @@ static struct resource virt_platform_resource = {
.flags = IORESOURCE_MEM,
};
+int um_pci_device_register(struct um_pci_device *dev)
+{
+ int i, free = -1;
+ int err = 0;
+
+ mutex_lock(&um_pci_mtx);
+ for (i = 0; i < MAX_DEVICES; i++) {
+ if (um_pci_devices[i].dev)
+ continue;
+ free = i;
+ break;
+ }
+
+ if (free < 0) {
+ err = -ENOSPC;
+ goto out;
+ }
+
+ dev->irq = irq_alloc_desc(numa_node_id());
+ if (dev->irq < 0) {
+ err = dev->irq;
+ goto out;
+ }
+
+ um_pci_devices[free].dev = dev;
+
+out:
+ mutex_unlock(&um_pci_mtx);
+ if (!err)
+ um_pci_rescan();
+ return err;
+}
+
+void um_pci_device_unregister(struct um_pci_device *dev)
+{
+ int i;
+
+ mutex_lock(&um_pci_mtx);
+ for (i = 0; i < MAX_DEVICES; i++) {
+ if (um_pci_devices[i].dev != dev)
+ continue;
+ um_pci_devices[i].dev = NULL;
+ irq_free_desc(dev->irq);
+ break;
+ }
+ mutex_unlock(&um_pci_mtx);
+
+ if (i < MAX_DEVICES) {
+ struct pci_dev *pci_dev;
+
+ pci_dev = pci_get_slot(bridge->bus, i);
+ if (pci_dev)
+ pci_stop_and_remove_bus_device_locked(pci_dev);
+ }
+}
+
+int um_pci_platform_device_register(struct um_pci_device *dev)
+{
+ guard(mutex)(&um_pci_mtx);
+ if (um_pci_platform_device)
+ return -EBUSY;
+ um_pci_platform_device = dev;
+ return 0;
+}
+
+void um_pci_platform_device_unregister(struct um_pci_device *dev)
+{
+ guard(mutex)(&um_pci_mtx);
+ if (um_pci_platform_device == dev)
+ um_pci_platform_device = NULL;
+}
+
static int __init um_pci_init(void)
{
struct irq_domain_info inner_domain_info = {
@@ -1014,10 +552,6 @@ static int __init um_pci_init(void)
WARN_ON(logic_iomem_add_region(&virt_platform_resource,
&um_pci_platform_ops));
- if (WARN(CONFIG_UML_PCI_OVER_VIRTIO_DEVICE_ID < 0,
- "No virtio device ID configured for PCI - no PCI support\n"))
- return 0;
-
bridge = pci_alloc_host_bridge(0);
if (!bridge) {
err = -ENOMEM;
@@ -1065,10 +599,8 @@ static int __init um_pci_init(void)
if (err)
goto free;
- err = register_virtio_driver(&um_pci_virtio_driver);
- if (err)
- goto free;
return 0;
+
free:
if (!IS_ERR_OR_NULL(um_pci_inner_domain))
irq_domain_remove(um_pci_inner_domain);
@@ -1080,11 +612,10 @@ free:
}
return err;
}
-module_init(um_pci_init);
+device_initcall(um_pci_init);
static void __exit um_pci_exit(void)
{
- unregister_virtio_driver(&um_pci_virtio_driver);
irq_domain_remove(um_pci_msi_domain);
irq_domain_remove(um_pci_inner_domain);
pci_free_resource_list(&bridge->windows);
diff --git a/arch/um/drivers/virt-pci.h b/arch/um/drivers/virt-pci.h
new file mode 100644
index 000000000000..b20d1475d1eb
--- /dev/null
+++ b/arch/um/drivers/virt-pci.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __UM_VIRT_PCI_H
+#define __UM_VIRT_PCI_H
+
+#include <linux/pci.h>
+
+struct um_pci_device {
+ const struct um_pci_ops *ops;
+
+ /* for now just standard BARs */
+ u8 resptr[PCI_STD_NUM_BARS];
+
+ int irq;
+};
+
+struct um_pci_ops {
+ unsigned long (*cfgspace_read)(struct um_pci_device *dev,
+ unsigned int offset, int size);
+ void (*cfgspace_write)(struct um_pci_device *dev, unsigned int offset,
+ int size, unsigned long val);
+
+ unsigned long (*bar_read)(struct um_pci_device *dev, int bar,
+ unsigned int offset, int size);
+ void (*bar_write)(struct um_pci_device *dev, int bar,
+ unsigned int offset, int size, unsigned long val);
+
+ void (*bar_copy_from)(struct um_pci_device *dev, int bar, void *buffer,
+ unsigned int offset, int size);
+ void (*bar_copy_to)(struct um_pci_device *dev, int bar,
+ unsigned int offset, const void *buffer, int size);
+ void (*bar_set)(struct um_pci_device *dev, int bar,
+ unsigned int offset, u8 value, int size);
+};
+
+int um_pci_device_register(struct um_pci_device *dev);
+void um_pci_device_unregister(struct um_pci_device *dev);
+
+int um_pci_platform_device_register(struct um_pci_device *dev);
+void um_pci_platform_device_unregister(struct um_pci_device *dev);
+
+#endif /* __UM_VIRT_PCI_H */
diff --git a/arch/um/drivers/virtio_pcidev.c b/arch/um/drivers/virtio_pcidev.c
new file mode 100644
index 000000000000..3c4c4c928fdd
--- /dev/null
+++ b/arch/um/drivers/virtio_pcidev.c
@@ -0,0 +1,628 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020 Intel Corporation
+ * Author: Johannes Berg <johannes@sipsolutions.net>
+ */
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/virtio.h>
+#include <linux/virtio_config.h>
+#include <linux/logic_iomem.h>
+#include <linux/of_platform.h>
+#include <linux/irqdomain.h>
+#include <linux/virtio_pcidev.h>
+#include <linux/virtio-uml.h>
+#include <linux/delay.h>
+#include <linux/msi.h>
+#include <linux/unaligned.h>
+#include <irq_kern.h>
+
+#include "virt-pci.h"
+
+#define to_virtio_pcidev(_pdev) \
+ container_of(_pdev, struct virtio_pcidev_device, pdev)
+
+/* for MSI-X we have a 32-bit payload */
+#define MAX_IRQ_MSG_SIZE (sizeof(struct virtio_pcidev_msg) + sizeof(u32))
+#define NUM_IRQ_MSGS 10
+
+struct virtio_pcidev_message_buffer {
+ struct virtio_pcidev_msg hdr;
+ u8 data[8];
+};
+
+struct virtio_pcidev_device {
+ struct um_pci_device pdev;
+ struct virtio_device *vdev;
+
+ struct virtqueue *cmd_vq, *irq_vq;
+
+#define VIRTIO_PCIDEV_WRITE_BUFS 20
+ struct virtio_pcidev_message_buffer bufs[VIRTIO_PCIDEV_WRITE_BUFS + 1];
+ void *extra_ptrs[VIRTIO_PCIDEV_WRITE_BUFS + 1];
+ DECLARE_BITMAP(used_bufs, VIRTIO_PCIDEV_WRITE_BUFS);
+
+#define UM_PCI_STAT_WAITING 0
+ unsigned long status;
+
+ bool platform;
+};
+
+static unsigned int virtio_pcidev_max_delay_us = 40000;
+module_param_named(max_delay_us, virtio_pcidev_max_delay_us, uint, 0644);
+
+static int virtio_pcidev_get_buf(struct virtio_pcidev_device *dev, bool *posted)
+{
+ int i;
+
+ for (i = 0; i < VIRTIO_PCIDEV_WRITE_BUFS; i++) {
+ if (!test_and_set_bit(i, dev->used_bufs))
+ return i;
+ }
+
+ *posted = false;
+ return VIRTIO_PCIDEV_WRITE_BUFS;
+}
+
+static void virtio_pcidev_free_buf(struct virtio_pcidev_device *dev, void *buf)
+{
+ int i;
+
+ if (buf == &dev->bufs[VIRTIO_PCIDEV_WRITE_BUFS]) {
+ kfree(dev->extra_ptrs[VIRTIO_PCIDEV_WRITE_BUFS]);
+ dev->extra_ptrs[VIRTIO_PCIDEV_WRITE_BUFS] = NULL;
+ return;
+ }
+
+ for (i = 0; i < VIRTIO_PCIDEV_WRITE_BUFS; i++) {
+ if (buf == &dev->bufs[i]) {
+ kfree(dev->extra_ptrs[i]);
+ dev->extra_ptrs[i] = NULL;
+ WARN_ON(!test_and_clear_bit(i, dev->used_bufs));
+ return;
+ }
+ }
+
+ WARN_ON(1);
+}
+
+static int virtio_pcidev_send_cmd(struct virtio_pcidev_device *dev,
+ struct virtio_pcidev_msg *cmd,
+ unsigned int cmd_size,
+ const void *extra, unsigned int extra_size,
+ void *out, unsigned int out_size)
+{
+ struct scatterlist out_sg, extra_sg, in_sg;
+ struct scatterlist *sgs_list[] = {
+ [0] = &out_sg,
+ [1] = extra ? &extra_sg : &in_sg,
+ [2] = extra ? &in_sg : NULL,
+ };
+ struct virtio_pcidev_message_buffer *buf;
+ int delay_count = 0;
+ bool bounce_out;
+ int ret, len;
+ int buf_idx;
+ bool posted;
+
+ if (WARN_ON(cmd_size < sizeof(*cmd) || cmd_size > sizeof(*buf)))
+ return -EINVAL;
+
+ switch (cmd->op) {
+ case VIRTIO_PCIDEV_OP_CFG_WRITE:
+ case VIRTIO_PCIDEV_OP_MMIO_WRITE:
+ case VIRTIO_PCIDEV_OP_MMIO_MEMSET:
+ /* in PCI, writes are posted, so don't wait */
+ posted = !out;
+ WARN_ON(!posted);
+ break;
+ default:
+ posted = false;
+ break;
+ }
+
+ bounce_out = !posted && cmd_size <= sizeof(*cmd) &&
+ out && out_size <= sizeof(buf->data);
+
+ buf_idx = virtio_pcidev_get_buf(dev, &posted);
+ buf = &dev->bufs[buf_idx];
+ memcpy(buf, cmd, cmd_size);
+
+ if (posted && extra && extra_size > sizeof(buf) - cmd_size) {
+ dev->extra_ptrs[buf_idx] = kmemdup(extra, extra_size,
+ GFP_ATOMIC);
+
+ if (!dev->extra_ptrs[buf_idx]) {
+ virtio_pcidev_free_buf(dev, buf);
+ return -ENOMEM;
+ }
+ extra = dev->extra_ptrs[buf_idx];
+ } else if (extra && extra_size <= sizeof(buf) - cmd_size) {
+ memcpy((u8 *)buf + cmd_size, extra, extra_size);
+ cmd_size += extra_size;
+ extra_size = 0;
+ extra = NULL;
+ cmd = (void *)buf;
+ } else {
+ cmd = (void *)buf;
+ }
+
+ sg_init_one(&out_sg, cmd, cmd_size);
+ if (extra)
+ sg_init_one(&extra_sg, extra, extra_size);
+ /* allow stack for small buffers */
+ if (bounce_out)
+ sg_init_one(&in_sg, buf->data, out_size);
+ else if (out)
+ sg_init_one(&in_sg, out, out_size);
+
+ /* add to internal virtio queue */
+ ret = virtqueue_add_sgs(dev->cmd_vq, sgs_list,
+ extra ? 2 : 1,
+ out ? 1 : 0,
+ cmd, GFP_ATOMIC);
+ if (ret) {
+ virtio_pcidev_free_buf(dev, buf);
+ return ret;
+ }
+
+ if (posted) {
+ virtqueue_kick(dev->cmd_vq);
+ return 0;
+ }
+
+ /* kick and poll for getting a response on the queue */
+ set_bit(UM_PCI_STAT_WAITING, &dev->status);
+ virtqueue_kick(dev->cmd_vq);
+ ret = 0;
+
+ while (1) {
+ void *completed = virtqueue_get_buf(dev->cmd_vq, &len);
+
+ if (completed == buf)
+ break;
+
+ if (completed)
+ virtio_pcidev_free_buf(dev, completed);
+
+ if (WARN_ONCE(virtqueue_is_broken(dev->cmd_vq) ||
+ ++delay_count > virtio_pcidev_max_delay_us,
+ "um virt-pci delay: %d", delay_count)) {
+ ret = -EIO;
+ break;
+ }
+ udelay(1);
+ }
+ clear_bit(UM_PCI_STAT_WAITING, &dev->status);
+
+ if (bounce_out)
+ memcpy(out, buf->data, out_size);
+
+ virtio_pcidev_free_buf(dev, buf);
+
+ return ret;
+}
+
+static unsigned long virtio_pcidev_cfgspace_read(struct um_pci_device *pdev,
+ unsigned int offset, int size)
+{
+ struct virtio_pcidev_device *dev = to_virtio_pcidev(pdev);
+ struct virtio_pcidev_msg hdr = {
+ .op = VIRTIO_PCIDEV_OP_CFG_READ,
+ .size = size,
+ .addr = offset,
+ };
+ /* max 8, we might not use it all */
+ u8 data[8];
+
+ memset(data, 0xff, sizeof(data));
+
+ /* size has been checked in um_pci_cfgspace_read() */
+ if (virtio_pcidev_send_cmd(dev, &hdr, sizeof(hdr), NULL, 0, data, size))
+ return ULONG_MAX;
+
+ switch (size) {
+ case 1:
+ return data[0];
+ case 2:
+ return le16_to_cpup((void *)data);
+ case 4:
+ return le32_to_cpup((void *)data);
+#ifdef CONFIG_64BIT
+ case 8:
+ return le64_to_cpup((void *)data);
+#endif
+ default:
+ return ULONG_MAX;
+ }
+}
+
+static void virtio_pcidev_cfgspace_write(struct um_pci_device *pdev,
+ unsigned int offset, int size,
+ unsigned long val)
+{
+ struct virtio_pcidev_device *dev = to_virtio_pcidev(pdev);
+ struct {
+ struct virtio_pcidev_msg hdr;
+ /* maximum size - we may only use parts of it */
+ u8 data[8];
+ } msg = {
+ .hdr = {
+ .op = VIRTIO_PCIDEV_OP_CFG_WRITE,
+ .size = size,
+ .addr = offset,
+ },
+ };
+
+ /* size has been checked in um_pci_cfgspace_write() */
+ switch (size) {
+ case 1:
+ msg.data[0] = (u8)val;
+ break;
+ case 2:
+ put_unaligned_le16(val, (void *)msg.data);
+ break;
+ case 4:
+ put_unaligned_le32(val, (void *)msg.data);
+ break;
+#ifdef CONFIG_64BIT
+ case 8:
+ put_unaligned_le64(val, (void *)msg.data);
+ break;
+#endif
+ }
+
+ WARN_ON(virtio_pcidev_send_cmd(dev, &msg.hdr, sizeof(msg), NULL, 0, NULL, 0));
+}
+
+static void virtio_pcidev_bar_copy_from(struct um_pci_device *pdev,
+ int bar, void *buffer,
+ unsigned int offset, int size)
+{
+ struct virtio_pcidev_device *dev = to_virtio_pcidev(pdev);
+ struct virtio_pcidev_msg hdr = {
+ .op = VIRTIO_PCIDEV_OP_MMIO_READ,
+ .bar = bar,
+ .size = size,
+ .addr = offset,
+ };
+
+ memset(buffer, 0xff, size);
+
+ virtio_pcidev_send_cmd(dev, &hdr, sizeof(hdr), NULL, 0, buffer, size);
+}
+
+static unsigned long virtio_pcidev_bar_read(struct um_pci_device *pdev, int bar,
+ unsigned int offset, int size)
+{
+ /* 8 is maximum size - we may only use parts of it */
+ u8 data[8];
+
+ /* size has been checked in um_pci_bar_read() */
+ virtio_pcidev_bar_copy_from(pdev, bar, data, offset, size);
+
+ switch (size) {
+ case 1:
+ return data[0];
+ case 2:
+ return le16_to_cpup((void *)data);
+ case 4:
+ return le32_to_cpup((void *)data);
+#ifdef CONFIG_64BIT
+ case 8:
+ return le64_to_cpup((void *)data);
+#endif
+ default:
+ return ULONG_MAX;
+ }
+}
+
+static void virtio_pcidev_bar_copy_to(struct um_pci_device *pdev,
+ int bar, unsigned int offset,
+ const void *buffer, int size)
+{
+ struct virtio_pcidev_device *dev = to_virtio_pcidev(pdev);
+ struct virtio_pcidev_msg hdr = {
+ .op = VIRTIO_PCIDEV_OP_MMIO_WRITE,
+ .bar = bar,
+ .size = size,
+ .addr = offset,
+ };
+
+ virtio_pcidev_send_cmd(dev, &hdr, sizeof(hdr), buffer, size, NULL, 0);
+}
+
+static void virtio_pcidev_bar_write(struct um_pci_device *pdev, int bar,
+ unsigned int offset, int size,
+ unsigned long val)
+{
+ /* maximum size - we may only use parts of it */
+ u8 data[8];
+
+ /* size has been checked in um_pci_bar_write() */
+ switch (size) {
+ case 1:
+ data[0] = (u8)val;
+ break;
+ case 2:
+ put_unaligned_le16(val, (void *)data);
+ break;
+ case 4:
+ put_unaligned_le32(val, (void *)data);
+ break;
+#ifdef CONFIG_64BIT
+ case 8:
+ put_unaligned_le64(val, (void *)data);
+ break;
+#endif
+ }
+
+ virtio_pcidev_bar_copy_to(pdev, bar, offset, data, size);
+}
+
+static void virtio_pcidev_bar_set(struct um_pci_device *pdev, int bar,
+ unsigned int offset, u8 value, int size)
+{
+ struct virtio_pcidev_device *dev = to_virtio_pcidev(pdev);
+ struct {
+ struct virtio_pcidev_msg hdr;
+ u8 data;
+ } msg = {
+ .hdr = {
+ .op = VIRTIO_PCIDEV_OP_CFG_WRITE,
+ .bar = bar,
+ .size = size,
+ .addr = offset,
+ },
+ .data = value,
+ };
+
+ virtio_pcidev_send_cmd(dev, &msg.hdr, sizeof(msg), NULL, 0, NULL, 0);
+}
+
+static const struct um_pci_ops virtio_pcidev_um_pci_ops = {
+ .cfgspace_read = virtio_pcidev_cfgspace_read,
+ .cfgspace_write = virtio_pcidev_cfgspace_write,
+ .bar_read = virtio_pcidev_bar_read,
+ .bar_write = virtio_pcidev_bar_write,
+ .bar_copy_from = virtio_pcidev_bar_copy_from,
+ .bar_copy_to = virtio_pcidev_bar_copy_to,
+ .bar_set = virtio_pcidev_bar_set,
+};
+
+static void virtio_pcidev_irq_vq_addbuf(struct virtqueue *vq, void *buf, bool kick)
+{
+ struct scatterlist sg[1];
+
+ sg_init_one(sg, buf, MAX_IRQ_MSG_SIZE);
+ if (virtqueue_add_inbuf(vq, sg, 1, buf, GFP_ATOMIC))
+ kfree(buf);
+ else if (kick)
+ virtqueue_kick(vq);
+}
+
+static void virtio_pcidev_handle_irq_message(struct virtqueue *vq,
+ struct virtio_pcidev_msg *msg)
+{
+ struct virtio_device *vdev = vq->vdev;
+ struct virtio_pcidev_device *dev = vdev->priv;
+
+ if (!dev->pdev.irq)
+ return;
+
+ /* we should properly chain interrupts, but on ARCH=um we don't care */
+
+ switch (msg->op) {
+ case VIRTIO_PCIDEV_OP_INT:
+ generic_handle_irq(dev->pdev.irq);
+ break;
+ case VIRTIO_PCIDEV_OP_MSI:
+ /* our MSI message is just the interrupt number */
+ if (msg->size == sizeof(u32))
+ generic_handle_irq(le32_to_cpup((void *)msg->data));
+ else
+ generic_handle_irq(le16_to_cpup((void *)msg->data));
+ break;
+ case VIRTIO_PCIDEV_OP_PME:
+ /* nothing to do - we already woke up due to the message */
+ break;
+ default:
+ dev_err(&vdev->dev, "unexpected virt-pci message %d\n", msg->op);
+ break;
+ }
+}
+
+static void virtio_pcidev_cmd_vq_cb(struct virtqueue *vq)
+{
+ struct virtio_device *vdev = vq->vdev;
+ struct virtio_pcidev_device *dev = vdev->priv;
+ void *cmd;
+ int len;
+
+ if (test_bit(UM_PCI_STAT_WAITING, &dev->status))
+ return;
+
+ while ((cmd = virtqueue_get_buf(vq, &len)))
+ virtio_pcidev_free_buf(dev, cmd);
+}
+
+static void virtio_pcidev_irq_vq_cb(struct virtqueue *vq)
+{
+ struct virtio_pcidev_msg *msg;
+ int len;
+
+ while ((msg = virtqueue_get_buf(vq, &len))) {
+ if (len >= sizeof(*msg))
+ virtio_pcidev_handle_irq_message(vq, msg);
+
+ /* recycle the message buffer */
+ virtio_pcidev_irq_vq_addbuf(vq, msg, true);
+ }
+}
+
+static int virtio_pcidev_init_vqs(struct virtio_pcidev_device *dev)
+{
+ struct virtqueue_info vqs_info[] = {
+ { "cmd", virtio_pcidev_cmd_vq_cb },
+ { "irq", virtio_pcidev_irq_vq_cb },
+ };
+ struct virtqueue *vqs[2];
+ int err, i;
+
+ err = virtio_find_vqs(dev->vdev, 2, vqs, vqs_info, NULL);
+ if (err)
+ return err;
+
+ dev->cmd_vq = vqs[0];
+ dev->irq_vq = vqs[1];
+
+ virtio_device_ready(dev->vdev);
+
+ for (i = 0; i < NUM_IRQ_MSGS; i++) {
+ void *msg = kzalloc(MAX_IRQ_MSG_SIZE, GFP_KERNEL);
+
+ if (msg)
+ virtio_pcidev_irq_vq_addbuf(dev->irq_vq, msg, false);
+ }
+
+ virtqueue_kick(dev->irq_vq);
+
+ return 0;
+}
+
+static void __virtio_pcidev_virtio_platform_remove(struct virtio_device *vdev,
+ struct virtio_pcidev_device *dev)
+{
+ um_pci_platform_device_unregister(&dev->pdev);
+
+ virtio_reset_device(vdev);
+ vdev->config->del_vqs(vdev);
+
+ kfree(dev);
+}
+
+static int virtio_pcidev_virtio_platform_probe(struct virtio_device *vdev,
+ struct virtio_pcidev_device *dev)
+{
+ int err;
+
+ dev->platform = true;
+
+ err = virtio_pcidev_init_vqs(dev);
+ if (err)
+ goto err_free;
+
+ err = um_pci_platform_device_register(&dev->pdev);
+ if (err)
+ goto err_reset;
+
+ err = of_platform_default_populate(vdev->dev.of_node, NULL, &vdev->dev);
+ if (err)
+ goto err_unregister;
+
+ return 0;
+
+err_unregister:
+ um_pci_platform_device_unregister(&dev->pdev);
+err_reset:
+ virtio_reset_device(vdev);
+ vdev->config->del_vqs(vdev);
+err_free:
+ kfree(dev);
+ return err;
+}
+
+static int virtio_pcidev_virtio_probe(struct virtio_device *vdev)
+{
+ struct virtio_pcidev_device *dev;
+ int err;
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ dev->vdev = vdev;
+ vdev->priv = dev;
+
+ dev->pdev.ops = &virtio_pcidev_um_pci_ops;
+
+ if (of_device_is_compatible(vdev->dev.of_node, "simple-bus"))
+ return virtio_pcidev_virtio_platform_probe(vdev, dev);
+
+ err = virtio_pcidev_init_vqs(dev);
+ if (err)
+ goto err_free;
+
+ err = um_pci_device_register(&dev->pdev);
+ if (err)
+ goto err_reset;
+
+ device_set_wakeup_enable(&vdev->dev, true);
+
+ /*
+ * In order to do suspend-resume properly, don't allow VQs
+ * to be suspended.
+ */
+ virtio_uml_set_no_vq_suspend(vdev, true);
+
+ return 0;
+
+err_reset:
+ virtio_reset_device(vdev);
+ vdev->config->del_vqs(vdev);
+err_free:
+ kfree(dev);
+ return err;
+}
+
+static void virtio_pcidev_virtio_remove(struct virtio_device *vdev)
+{
+ struct virtio_pcidev_device *dev = vdev->priv;
+
+ if (dev->platform) {
+ of_platform_depopulate(&vdev->dev);
+ __virtio_pcidev_virtio_platform_remove(vdev, dev);
+ return;
+ }
+
+ device_set_wakeup_enable(&vdev->dev, false);
+
+ um_pci_device_unregister(&dev->pdev);
+
+ /* Stop all virtqueues */
+ virtio_reset_device(vdev);
+ dev->cmd_vq = NULL;
+ dev->irq_vq = NULL;
+ vdev->config->del_vqs(vdev);
+
+ kfree(dev);
+}
+
+static struct virtio_device_id id_table[] = {
+ { CONFIG_UML_PCI_OVER_VIRTIO_DEVICE_ID, VIRTIO_DEV_ANY_ID },
+ { 0 },
+};
+MODULE_DEVICE_TABLE(virtio, id_table);
+
+static struct virtio_driver virtio_pcidev_virtio_driver = {
+ .driver.name = "virtio-pci",
+ .id_table = id_table,
+ .probe = virtio_pcidev_virtio_probe,
+ .remove = virtio_pcidev_virtio_remove,
+};
+
+static int __init virtio_pcidev_init(void)
+{
+ if (WARN(CONFIG_UML_PCI_OVER_VIRTIO_DEVICE_ID < 0,
+ "No virtio device ID configured for PCI - no PCI support\n"))
+ return 0;
+
+ return register_virtio_driver(&virtio_pcidev_virtio_driver);
+}
+late_initcall(virtio_pcidev_init);
+
+static void __exit virtio_pcidev_exit(void)
+{
+ unregister_virtio_driver(&virtio_pcidev_virtio_driver);
+}
+module_exit(virtio_pcidev_exit);
diff --git a/arch/um/include/asm/Kbuild b/arch/um/include/asm/Kbuild
index 428f2c5158c2..04ab3b653a48 100644
--- a/arch/um/include/asm/Kbuild
+++ b/arch/um/include/asm/Kbuild
@@ -13,6 +13,7 @@ generic-y += irq_work.h
generic-y += kdebug.h
generic-y += mcs_spinlock.h
generic-y += mmiowb.h
+generic-y += module.h
generic-y += module.lds.h
generic-y += param.h
generic-y += parport.h
diff --git a/arch/um/include/asm/pgalloc.h b/arch/um/include/asm/pgalloc.h
index f0af23c3aeb2..826ec44b58cd 100644
--- a/arch/um/include/asm/pgalloc.h
+++ b/arch/um/include/asm/pgalloc.h
@@ -25,27 +25,18 @@
*/
extern pgd_t *pgd_alloc(struct mm_struct *);
-#define __pte_free_tlb(tlb, pte, address) \
-do { \
- pagetable_dtor(page_ptdesc(pte)); \
- tlb_remove_page_ptdesc((tlb), (page_ptdesc(pte))); \
-} while (0)
+#define __pte_free_tlb(tlb, pte, address) \
+ tlb_remove_ptdesc((tlb), page_ptdesc(pte))
#if CONFIG_PGTABLE_LEVELS > 2
-#define __pmd_free_tlb(tlb, pmd, address) \
-do { \
- pagetable_dtor(virt_to_ptdesc(pmd)); \
- tlb_remove_page_ptdesc((tlb), virt_to_ptdesc(pmd)); \
-} while (0)
+#define __pmd_free_tlb(tlb, pmd, address) \
+ tlb_remove_ptdesc((tlb), virt_to_ptdesc(pmd))
#if CONFIG_PGTABLE_LEVELS > 3
-#define __pud_free_tlb(tlb, pud, address) \
-do { \
- pagetable_dtor(virt_to_ptdesc(pud)); \
- tlb_remove_page_ptdesc((tlb), virt_to_ptdesc(pud)); \
-} while (0)
+#define __pud_free_tlb(tlb, pud, address) \
+ tlb_remove_ptdesc((tlb), virt_to_ptdesc(pud))
#endif
#endif
diff --git a/arch/um/include/asm/processor-generic.h b/arch/um/include/asm/processor-generic.h
index 5d6356eafffe..8a789c17acd8 100644
--- a/arch/um/include/asm/processor-generic.h
+++ b/arch/um/include/asm/processor-generic.h
@@ -31,6 +31,8 @@ struct thread_struct {
} thread;
} request;
+ void *segv_continue;
+
/* Contains variable sized FP registers */
struct pt_regs regs;
};
diff --git a/arch/um/include/asm/uaccess.h b/arch/um/include/asm/uaccess.h
index 1d4b6bbc1b65..3a08f9029a3f 100644
--- a/arch/um/include/asm/uaccess.h
+++ b/arch/um/include/asm/uaccess.h
@@ -9,6 +9,7 @@
#include <asm/elf.h>
#include <linux/unaligned.h>
+#include <sysdep/faultinfo.h>
#define __under_task_size(addr, size) \
(((unsigned long) (addr) < TASK_SIZE) && \
@@ -44,19 +45,28 @@ static inline int __access_ok(const void __user *ptr, unsigned long size)
__access_ok_vsyscall(addr, size));
}
-/* no pagefaults for kernel addresses in um */
#define __get_kernel_nofault(dst, src, type, err_label) \
do { \
- *((type *)dst) = get_unaligned((type *)(src)); \
- if (0) /* make sure the label looks used to the compiler */ \
+ int __faulted; \
+ \
+ ___backtrack_faulted(__faulted); \
+ if (__faulted) { \
+ *((type *)dst) = (type) 0; \
goto err_label; \
+ } \
+ *((type *)dst) = get_unaligned((type *)(src)); \
+ current->thread.segv_continue = NULL; \
} while (0)
#define __put_kernel_nofault(dst, src, type, err_label) \
do { \
- put_unaligned(*((type *)src), (type *)(dst)); \
- if (0) /* make sure the label looks used to the compiler */ \
+ int __faulted; \
+ \
+ ___backtrack_faulted(__faulted); \
+ if (__faulted) \
goto err_label; \
+ put_unaligned(*((type *)src), (type *)(dst)); \
+ current->thread.segv_continue = NULL; \
} while (0)
#endif
diff --git a/arch/um/include/linux/time-internal.h b/arch/um/include/linux/time-internal.h
index b22226634ff6..138908b999d7 100644
--- a/arch/um/include/linux/time-internal.h
+++ b/arch/um/include/linux/time-internal.h
@@ -83,6 +83,8 @@ extern void time_travel_not_configured(void);
#define time_travel_del_event(...) time_travel_not_configured()
#endif /* CONFIG_UML_TIME_TRAVEL_SUPPORT */
+extern unsigned long tt_extra_sched_jiffies;
+
/*
* Without CONFIG_UML_TIME_TRAVEL_SUPPORT this is a linker error if used,
* which is intentional since we really shouldn't link it in that case.
diff --git a/arch/um/include/shared/arch.h b/arch/um/include/shared/arch.h
index 880ee42a3329..cc398a21ad96 100644
--- a/arch/um/include/shared/arch.h
+++ b/arch/um/include/shared/arch.h
@@ -12,4 +12,6 @@ extern void arch_check_bugs(void);
extern int arch_fixup(unsigned long address, struct uml_pt_regs *regs);
extern void arch_examine_signal(int sig, struct uml_pt_regs *regs);
+void mc_set_rip(void *_mc, void *target);
+
#endif
diff --git a/arch/um/include/shared/as-layout.h b/arch/um/include/shared/as-layout.h
index ea65f151bf48..4f44dcce8a7c 100644
--- a/arch/um/include/shared/as-layout.h
+++ b/arch/um/include/shared/as-layout.h
@@ -50,7 +50,7 @@ extern int linux_main(int argc, char **argv, char **envp);
extern void uml_finishsetup(void);
struct siginfo;
-extern void (*sig_info[])(int, struct siginfo *si, struct uml_pt_regs *);
+extern void (*sig_info[])(int, struct siginfo *si, struct uml_pt_regs *, void *);
#endif
diff --git a/arch/um/include/shared/irq_user.h b/arch/um/include/shared/irq_user.h
index da0f6eea30d0..88835b52ae2b 100644
--- a/arch/um/include/shared/irq_user.h
+++ b/arch/um/include/shared/irq_user.h
@@ -15,7 +15,8 @@ enum um_irq_type {
};
struct siginfo;
-extern void sigio_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs);
+extern void sigio_handler(int sig, struct siginfo *unused_si,
+ struct uml_pt_regs *regs, void *mc);
void sigio_run_timetravel_handlers(void);
extern void free_irq_by_fd(int fd);
extern void deactivate_fd(int fd, int irqnum);
diff --git a/arch/um/include/shared/kern_util.h b/arch/um/include/shared/kern_util.h
index f21dc8517538..00ca3e12fd9a 100644
--- a/arch/um/include/shared/kern_util.h
+++ b/arch/um/include/shared/kern_util.h
@@ -24,10 +24,12 @@ extern void free_stack(unsigned long stack, int order);
struct pt_regs;
extern void do_signal(struct pt_regs *regs);
extern void interrupt_end(void);
-extern void relay_signal(int sig, struct siginfo *si, struct uml_pt_regs *regs);
+extern void relay_signal(int sig, struct siginfo *si, struct uml_pt_regs *regs,
+ void *mc);
extern unsigned long segv(struct faultinfo fi, unsigned long ip,
- int is_user, struct uml_pt_regs *regs);
+ int is_user, struct uml_pt_regs *regs,
+ void *mc);
extern int handle_page_fault(unsigned long address, unsigned long ip,
int is_write, int is_user, int *code_out);
@@ -59,8 +61,10 @@ extern unsigned long from_irq_stack(int nested);
extern int singlestepping(void);
-extern void segv_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs);
-extern void winch(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs);
+extern void segv_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs,
+ void *mc);
+extern void winch(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs,
+ void *mc);
extern void fatal_sigsegv(void) __attribute__ ((noreturn));
void um_idle_sleep(void);
diff --git a/arch/um/include/shared/mem_user.h b/arch/um/include/shared/mem_user.h
index adfa08062f88..d4727efcf23d 100644
--- a/arch/um/include/shared/mem_user.h
+++ b/arch/um/include/shared/mem_user.h
@@ -47,7 +47,6 @@ extern int iomem_size;
#define ROUND_4M(n) ((((unsigned long) (n)) + (1 << 22)) & ~((1 << 22) - 1))
extern unsigned long find_iomem(char *driver, unsigned long *len_out);
-extern void mem_total_pages(unsigned long physmem, unsigned long iomem);
extern void setup_physmem(unsigned long start, unsigned long usable,
unsigned long len);
extern void map_memory(unsigned long virt, unsigned long phys,
diff --git a/arch/um/include/shared/os.h b/arch/um/include/shared/os.h
index 5babad8c5f75..152a60080d5b 100644
--- a/arch/um/include/shared/os.h
+++ b/arch/um/include/shared/os.h
@@ -213,7 +213,6 @@ extern int os_protect_memory(void *addr, unsigned long len,
extern int os_unmap_memory(void *addr, int len);
extern int os_drop_memory(void *addr, int length);
extern int can_drop_memory(void);
-extern int os_mincore(void *addr, unsigned long len);
void os_set_pdeathsig(void);
@@ -225,6 +224,11 @@ extern int run_helper_thread(int (*proc)(void *), void *arg,
unsigned int flags, unsigned long *stack_out);
extern int helper_wait(int pid);
+struct os_helper_thread;
+int os_run_helper_thread(struct os_helper_thread **td_out,
+ void *(*routine)(void *), void *arg);
+void os_kill_helper_thread(struct os_helper_thread *td);
+void os_fix_helper_thread_signals(void);
/* umid.c */
extern int umid_file_name(char *name, char *buf, int len);
@@ -310,7 +314,7 @@ extern void um_irqs_resume(void);
extern int add_sigio_fd(int fd);
extern int ignore_sigio_fd(int fd);
extern void maybe_sigio_broken(int fd);
-extern void sigio_broken(int fd);
+extern void sigio_broken(void);
/*
* unlocked versions for IRQ controller code.
*
diff --git a/arch/um/include/shared/sigio.h b/arch/um/include/shared/sigio.h
index e60c8b227844..c6c2edce1f6d 100644
--- a/arch/um/include/shared/sigio.h
+++ b/arch/um/include/shared/sigio.h
@@ -6,7 +6,6 @@
#ifndef __SIGIO_H__
#define __SIGIO_H__
-extern int write_sigio_irq(int fd);
extern void sigio_lock(void);
extern void sigio_unlock(void);
diff --git a/arch/um/kernel/Makefile b/arch/um/kernel/Makefile
index f8567b933ffa..4df1cd0d2017 100644
--- a/arch/um/kernel/Makefile
+++ b/arch/um/kernel/Makefile
@@ -17,7 +17,7 @@ extra-y := vmlinux.lds
obj-y = config.o exec.o exitcode.o irq.o ksyms.o mem.o \
physmem.o process.o ptrace.o reboot.o sigio.o \
signal.o sysrq.o time.o tlb.o trap.o \
- um_arch.o umid.o maccess.o kmsg_dump.o capflags.o skas/
+ um_arch.o umid.o kmsg_dump.o capflags.o skas/
obj-y += load_file.o
obj-$(CONFIG_BLK_DEV_INITRD) += initrd.o
diff --git a/arch/um/kernel/irq.c b/arch/um/kernel/irq.c
index a4991746f5ea..abe8f30a521c 100644
--- a/arch/um/kernel/irq.c
+++ b/arch/um/kernel/irq.c
@@ -236,7 +236,8 @@ static void _sigio_handler(struct uml_pt_regs *regs,
free_irqs();
}
-void sigio_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs)
+void sigio_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs,
+ void *mc)
{
preempt_disable();
_sigio_handler(regs, irqs_suspended);
diff --git a/arch/um/kernel/maccess.c b/arch/um/kernel/maccess.c
deleted file mode 100644
index 8ccd56813f68..000000000000
--- a/arch/um/kernel/maccess.c
+++ /dev/null
@@ -1,19 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2013 Richard Weinberger <richrd@nod.at>
- */
-
-#include <linux/uaccess.h>
-#include <linux/kernel.h>
-#include <os.h>
-
-bool copy_from_kernel_nofault_allowed(const void *src, size_t size)
-{
- void *psrc = (void *)rounddown((unsigned long)src, PAGE_SIZE);
-
- if ((unsigned long)src < PAGE_SIZE || size <= 0)
- return false;
- if (os_mincore(psrc, size + src - psrc) <= 0)
- return false;
- return true;
-}
diff --git a/arch/um/kernel/mem.c b/arch/um/kernel/mem.c
index befed230aac2..76bec7de81b5 100644
--- a/arch/um/kernel/mem.c
+++ b/arch/um/kernel/mem.c
@@ -9,6 +9,8 @@
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/slab.h>
+#include <linux/init.h>
+#include <asm/sections.h>
#include <asm/page.h>
#include <asm/pgalloc.h>
#include <as-layout.h>
@@ -54,7 +56,7 @@ int kmalloc_ok = 0;
/* Used during early boot */
static unsigned long brk_end;
-void __init mem_init(void)
+void __init arch_mm_preinit(void)
{
/* clear the zero-page */
memset(empty_zero_page, 0, PAGE_SIZE);
@@ -66,10 +68,12 @@ void __init mem_init(void)
map_memory(brk_end, __pa(brk_end), uml_reserved - brk_end, 1, 1, 0);
memblock_free((void *)brk_end, uml_reserved - brk_end);
uml_reserved = brk_end;
-
- /* this will put all low memory onto the freelists */
- memblock_free_all();
+ min_low_pfn = PFN_UP(__pa(uml_reserved));
max_pfn = max_low_pfn;
+}
+
+void __init mem_init(void)
+{
kmalloc_ok = 1;
}
@@ -241,3 +245,11 @@ static const pgprot_t protection_map[16] = {
[VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_SHARED
};
DECLARE_VM_GET_PAGE_PROT
+
+void mark_rodata_ro(void)
+{
+ unsigned long rodata_start = PFN_ALIGN(__start_rodata);
+ unsigned long rodata_end = PFN_ALIGN(__end_rodata);
+
+ os_protect_memory((void *)rodata_start, rodata_end - rodata_start, 1, 0, 0);
+}
diff --git a/arch/um/kernel/physmem.c b/arch/um/kernel/physmem.c
index a74f17b033c4..af02b5f9911d 100644
--- a/arch/um/kernel/physmem.c
+++ b/arch/um/kernel/physmem.c
@@ -22,18 +22,6 @@ static int physmem_fd = -1;
unsigned long high_physmem;
EXPORT_SYMBOL(high_physmem);
-void __init mem_total_pages(unsigned long physmem, unsigned long iomem)
-{
- unsigned long phys_pages, iomem_pages, total_pages;
-
- phys_pages = physmem >> PAGE_SHIFT;
- iomem_pages = iomem >> PAGE_SHIFT;
-
- total_pages = phys_pages + iomem_pages;
-
- max_mapnr = total_pages;
-}
-
void map_memory(unsigned long virt, unsigned long phys, unsigned long len,
int r, int w, int x)
{
diff --git a/arch/um/kernel/sigio.c b/arch/um/kernel/sigio.c
index 5085a50c3b8c..4fc04742048a 100644
--- a/arch/um/kernel/sigio.c
+++ b/arch/um/kernel/sigio.c
@@ -8,32 +8,6 @@
#include <os.h>
#include <sigio.h>
-/* Protected by sigio_lock() called from write_sigio_workaround */
-static int sigio_irq_fd = -1;
-
-static irqreturn_t sigio_interrupt(int irq, void *data)
-{
- char c;
-
- os_read_file(sigio_irq_fd, &c, sizeof(c));
- return IRQ_HANDLED;
-}
-
-int write_sigio_irq(int fd)
-{
- int err;
-
- err = um_request_irq(SIGIO_WRITE_IRQ, fd, IRQ_READ, sigio_interrupt,
- 0, "write sigio", NULL);
- if (err < 0) {
- printk(KERN_ERR "write_sigio_irq : um_request_irq failed, "
- "err = %d\n", err);
- return -1;
- }
- sigio_irq_fd = fd;
- return 0;
-}
-
/* These are called from os-Linux/sigio.c to protect its pollfds arrays. */
static DEFINE_MUTEX(sigio_mutex);
diff --git a/arch/um/kernel/skas/syscall.c b/arch/um/kernel/skas/syscall.c
index b09e85279d2b..a5beaea2967e 100644
--- a/arch/um/kernel/skas/syscall.c
+++ b/arch/um/kernel/skas/syscall.c
@@ -31,6 +31,17 @@ void handle_syscall(struct uml_pt_regs *r)
goto out;
syscall = UPT_SYSCALL_NR(r);
+
+ /*
+ * If no time passes, then sched_yield may not actually yield, causing
+ * broken spinlock implementations in userspace (ASAN) to hang for long
+ * periods of time.
+ */
+ if ((time_travel_mode == TT_MODE_INFCPU ||
+ time_travel_mode == TT_MODE_EXTERNAL) &&
+ syscall == __NR_sched_yield)
+ tt_extra_sched_jiffies += 1;
+
if (syscall >= 0 && syscall < __NR_syscalls) {
unsigned long ret = EXECUTE_SYSCALL(syscall, regs);
diff --git a/arch/um/kernel/trap.c b/arch/um/kernel/trap.c
index cdaee3e94273..ce073150dc20 100644
--- a/arch/um/kernel/trap.c
+++ b/arch/um/kernel/trap.c
@@ -16,6 +16,7 @@
#include <kern_util.h>
#include <os.h>
#include <skas.h>
+#include <arch.h>
/*
* Note this is constrained to return 0, -EFAULT, -EACCES, -ENOMEM by
@@ -175,12 +176,14 @@ void fatal_sigsegv(void)
* @sig: the signal number
* @unused_si: the signal info struct; unused in this handler
* @regs: the ptrace register information
+ * @mc: the mcontext of the signal
*
* The handler first extracts the faultinfo from the UML ptrace regs struct.
* If the userfault did not happen in an UML userspace process, bad_segv is called.
* Otherwise the signal did happen in a cloned userspace process, handle it.
*/
-void segv_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs)
+void segv_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs,
+ void *mc)
{
struct faultinfo * fi = UPT_FAULTINFO(regs);
@@ -189,7 +192,7 @@ void segv_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs)
bad_segv(*fi, UPT_IP(regs));
return;
}
- segv(*fi, UPT_IP(regs), UPT_IS_USER(regs), regs);
+ segv(*fi, UPT_IP(regs), UPT_IS_USER(regs), regs, mc);
}
/*
@@ -199,7 +202,7 @@ void segv_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs)
* give us bad data!
*/
unsigned long segv(struct faultinfo fi, unsigned long ip, int is_user,
- struct uml_pt_regs *regs)
+ struct uml_pt_regs *regs, void *mc)
{
int si_code;
int err;
@@ -223,6 +226,19 @@ unsigned long segv(struct faultinfo fi, unsigned long ip, int is_user,
goto out;
}
else if (current->mm == NULL) {
+ if (current->pagefault_disabled) {
+ if (!mc) {
+ show_regs(container_of(regs, struct pt_regs, regs));
+ panic("Segfault with pagefaults disabled but no mcontext");
+ }
+ if (!current->thread.segv_continue) {
+ show_regs(container_of(regs, struct pt_regs, regs));
+ panic("Segfault without recovery target");
+ }
+ mc_set_rip(mc, current->thread.segv_continue);
+ current->thread.segv_continue = NULL;
+ goto out;
+ }
show_regs(container_of(regs, struct pt_regs, regs));
panic("Segfault with no mm");
}
@@ -274,7 +290,8 @@ out:
return 0;
}
-void relay_signal(int sig, struct siginfo *si, struct uml_pt_regs *regs)
+void relay_signal(int sig, struct siginfo *si, struct uml_pt_regs *regs,
+ void *mc)
{
int code, err;
if (!UPT_IS_USER(regs)) {
@@ -302,7 +319,8 @@ void relay_signal(int sig, struct siginfo *si, struct uml_pt_regs *regs)
}
}
-void winch(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs)
+void winch(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs,
+ void *mc)
{
do_IRQ(WINCH_IRQ, regs);
}
diff --git a/arch/um/kernel/um_arch.c b/arch/um/kernel/um_arch.c
index 8be91974e786..d4b3b6742ec8 100644
--- a/arch/um/kernel/um_arch.c
+++ b/arch/um/kernel/um_arch.c
@@ -12,6 +12,7 @@
#include <linux/panic_notifier.h>
#include <linux/seq_file.h>
#include <linux/string.h>
+#include <linux/string_choices.h>
#include <linux/utsname.h>
#include <linux/sched.h>
#include <linux/sched/task.h>
@@ -78,7 +79,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
seq_printf(m, "model name\t: UML\n");
seq_printf(m, "mode\t\t: skas\n");
seq_printf(m, "host\t\t: %s\n", host_info);
- seq_printf(m, "fpu\t\t: %s\n", cpu_has(&boot_cpu_data, X86_FEATURE_FPU) ? "yes" : "no");
+ seq_printf(m, "fpu\t\t: %s\n", str_yes_no(cpu_has(&boot_cpu_data, X86_FEATURE_FPU)));
seq_printf(m, "flags\t\t:");
for (i = 0; i < 32*NCAPINTS; i++)
if (cpu_has(&boot_cpu_data, i) && (x86_cap_flags[i] != NULL))
@@ -385,7 +386,6 @@ int __init linux_main(int argc, char **argv, char **envp)
high_physmem = uml_physmem + physmem_size;
end_iomem = high_physmem + iomem_size;
- high_memory = (void *) end_iomem;
start_vm = VMALLOC_START;
@@ -419,7 +419,6 @@ void __init setup_arch(char **cmdline_p)
stack_protections((unsigned long) init_task.stack);
setup_physmem(uml_physmem, uml_reserved, physmem_size);
- mem_total_pages(physmem_size, iomem_size);
uml_dtb_init();
read_initrd();
diff --git a/arch/um/os-Linux/helper.c b/arch/um/os-Linux/helper.c
index 3cb8ac63be6e..89c2ad2a4e3a 100644
--- a/arch/um/os-Linux/helper.c
+++ b/arch/um/os-Linux/helper.c
@@ -8,6 +8,7 @@
#include <unistd.h>
#include <errno.h>
#include <sched.h>
+#include <pthread.h>
#include <linux/limits.h>
#include <sys/socket.h>
#include <sys/wait.h>
@@ -121,6 +122,10 @@ int run_helper_thread(int (*proc)(void *), void *arg, unsigned int flags,
unsigned long stack, sp;
int pid, status, err;
+ /* To share memory space, use os_run_helper_thread() instead. */
+ if (flags & CLONE_VM)
+ return -EINVAL;
+
stack = alloc_stack(0, __uml_cant_sleep());
if (stack == 0)
return -ENOMEM;
@@ -167,3 +172,65 @@ int helper_wait(int pid)
} else
return 0;
}
+
+struct os_helper_thread {
+ pthread_t handle;
+};
+
+int os_run_helper_thread(struct os_helper_thread **td_out,
+ void *(*routine)(void *), void *arg)
+{
+ struct os_helper_thread *td;
+ sigset_t sigset, oset;
+ int err, flags;
+
+ flags = __uml_cant_sleep() ? UM_GFP_ATOMIC : UM_GFP_KERNEL;
+ td = uml_kmalloc(sizeof(*td), flags);
+ if (!td)
+ return -ENOMEM;
+
+ sigfillset(&sigset);
+ if (sigprocmask(SIG_SETMASK, &sigset, &oset) < 0) {
+ err = -errno;
+ kfree(td);
+ return err;
+ }
+
+ err = pthread_create(&td->handle, NULL, routine, arg);
+
+ if (sigprocmask(SIG_SETMASK, &oset, NULL) < 0)
+ panic("Failed to restore the signal mask: %d", errno);
+
+ if (err != 0)
+ kfree(td);
+ else
+ *td_out = td;
+
+ return -err;
+}
+
+void os_kill_helper_thread(struct os_helper_thread *td)
+{
+ pthread_cancel(td->handle);
+ pthread_join(td->handle, NULL);
+ kfree(td);
+}
+
+void os_fix_helper_thread_signals(void)
+{
+ sigset_t sigset;
+
+ sigemptyset(&sigset);
+
+ sigaddset(&sigset, SIGWINCH);
+ sigaddset(&sigset, SIGPIPE);
+ sigaddset(&sigset, SIGPROF);
+ sigaddset(&sigset, SIGINT);
+ sigaddset(&sigset, SIGTERM);
+ sigaddset(&sigset, SIGCHLD);
+ sigaddset(&sigset, SIGALRM);
+ sigaddset(&sigset, SIGIO);
+ sigaddset(&sigset, SIGUSR1);
+
+ pthread_sigmask(SIG_SETMASK, &sigset, NULL);
+}
diff --git a/arch/um/os-Linux/process.c b/arch/um/os-Linux/process.c
index 9f086f939420..184566edeee9 100644
--- a/arch/um/os-Linux/process.c
+++ b/arch/um/os-Linux/process.c
@@ -142,57 +142,6 @@ out:
return ok;
}
-static int os_page_mincore(void *addr)
-{
- char vec[2];
- int ret;
-
- ret = mincore(addr, UM_KERN_PAGE_SIZE, vec);
- if (ret < 0) {
- if (errno == ENOMEM || errno == EINVAL)
- return 0;
- else
- return -errno;
- }
-
- return vec[0] & 1;
-}
-
-int os_mincore(void *addr, unsigned long len)
-{
- char *vec;
- int ret, i;
-
- if (len <= UM_KERN_PAGE_SIZE)
- return os_page_mincore(addr);
-
- vec = calloc(1, (len + UM_KERN_PAGE_SIZE - 1) / UM_KERN_PAGE_SIZE);
- if (!vec)
- return -ENOMEM;
-
- ret = mincore(addr, UM_KERN_PAGE_SIZE, vec);
- if (ret < 0) {
- if (errno == ENOMEM || errno == EINVAL)
- ret = 0;
- else
- ret = -errno;
-
- goto out;
- }
-
- for (i = 0; i < ((len + UM_KERN_PAGE_SIZE - 1) / UM_KERN_PAGE_SIZE); i++) {
- if (!(vec[i] & 1)) {
- ret = 0;
- goto out;
- }
- }
-
- ret = 1;
-out:
- free(vec);
- return ret;
-}
-
void init_new_thread_signals(void)
{
set_handler(SIGSEGV);
diff --git a/arch/um/os-Linux/sigio.c b/arch/um/os-Linux/sigio.c
index 9aac8def4d63..a05a6ecee756 100644
--- a/arch/um/os-Linux/sigio.c
+++ b/arch/um/os-Linux/sigio.c
@@ -11,6 +11,7 @@
#include <sched.h>
#include <signal.h>
#include <string.h>
+#include <sys/epoll.h>
#include <kern_util.h>
#include <init.h>
#include <os.h>
@@ -21,184 +22,51 @@
* Protected by sigio_lock(), also used by sigio_cleanup, which is an
* exitcall.
*/
-static int write_sigio_pid = -1;
-static unsigned long write_sigio_stack;
+static struct os_helper_thread *write_sigio_td;
-/*
- * These arrays are initialized before the sigio thread is started, and
- * the descriptors closed after it is killed. So, it can't see them change.
- * On the UML side, they are changed under the sigio_lock.
- */
-#define SIGIO_FDS_INIT {-1, -1}
-
-static int write_sigio_fds[2] = SIGIO_FDS_INIT;
-static int sigio_private[2] = SIGIO_FDS_INIT;
+static int epollfd = -1;
-struct pollfds {
- struct pollfd *poll;
- int size;
- int used;
-};
+#define MAX_EPOLL_EVENTS 64
-/*
- * Protected by sigio_lock(). Used by the sigio thread, but the UML thread
- * synchronizes with it.
- */
-static struct pollfds current_poll;
-static struct pollfds next_poll;
-static struct pollfds all_sigio_fds;
+static struct epoll_event epoll_events[MAX_EPOLL_EVENTS];
-static int write_sigio_thread(void *unused)
+static void *write_sigio_thread(void *unused)
{
- struct pollfds *fds, tmp;
- struct pollfd *p;
- int i, n, respond_fd;
- char c;
-
- os_set_pdeathsig();
- os_fix_helper_signals();
- fds = &current_poll;
+ int pid = getpid();
+ int r;
+
+ os_fix_helper_thread_signals();
+
while (1) {
- n = poll(fds->poll, fds->used, -1);
- if (n < 0) {
+ r = epoll_wait(epollfd, epoll_events, MAX_EPOLL_EVENTS, -1);
+ if (r < 0) {
if (errno == EINTR)
continue;
- printk(UM_KERN_ERR "write_sigio_thread : poll returned "
- "%d, errno = %d\n", n, errno);
+ printk(UM_KERN_ERR "%s: epoll_wait failed, errno = %d\n",
+ __func__, errno);
}
- for (i = 0; i < fds->used; i++) {
- p = &fds->poll[i];
- if (p->revents == 0)
- continue;
- if (p->fd == sigio_private[1]) {
- CATCH_EINTR(n = read(sigio_private[1], &c,
- sizeof(c)));
- if (n != sizeof(c))
- printk(UM_KERN_ERR
- "write_sigio_thread : "
- "read on socket failed, "
- "err = %d\n", errno);
- tmp = current_poll;
- current_poll = next_poll;
- next_poll = tmp;
- respond_fd = sigio_private[1];
- }
- else {
- respond_fd = write_sigio_fds[1];
- fds->used--;
- memmove(&fds->poll[i], &fds->poll[i + 1],
- (fds->used - i) * sizeof(*fds->poll));
- }
-
- CATCH_EINTR(n = write(respond_fd, &c, sizeof(c)));
- if (n != sizeof(c))
- printk(UM_KERN_ERR "write_sigio_thread : "
- "write on socket failed, err = %d\n",
- errno);
- }
- }
- return 0;
-}
-
-static int need_poll(struct pollfds *polls, int n)
-{
- struct pollfd *new;
-
- if (n <= polls->size)
- return 0;
-
- new = uml_kmalloc(n * sizeof(struct pollfd), UM_GFP_ATOMIC);
- if (new == NULL) {
- printk(UM_KERN_ERR "need_poll : failed to allocate new "
- "pollfds\n");
- return -ENOMEM;
+ CATCH_EINTR(r = tgkill(pid, pid, SIGIO));
+ if (r < 0)
+ printk(UM_KERN_ERR "%s: tgkill failed, errno = %d\n",
+ __func__, errno);
}
- memcpy(new, polls->poll, polls->used * sizeof(struct pollfd));
- kfree(polls->poll);
-
- polls->poll = new;
- polls->size = n;
- return 0;
-}
-
-/*
- * Must be called with sigio_lock held, because it's needed by the marked
- * critical section.
- */
-static void update_thread(void)
-{
- unsigned long flags;
- int n;
- char c;
-
- flags = um_set_signals_trace(0);
- CATCH_EINTR(n = write(sigio_private[0], &c, sizeof(c)));
- if (n != sizeof(c)) {
- printk(UM_KERN_ERR "update_thread : write failed, err = %d\n",
- errno);
- goto fail;
- }
-
- CATCH_EINTR(n = read(sigio_private[0], &c, sizeof(c)));
- if (n != sizeof(c)) {
- printk(UM_KERN_ERR "update_thread : read failed, err = %d\n",
- errno);
- goto fail;
- }
-
- um_set_signals_trace(flags);
- return;
- fail:
- /* Critical section start */
- if (write_sigio_pid != -1) {
- os_kill_process(write_sigio_pid, 1);
- free_stack(write_sigio_stack, 0);
- }
- write_sigio_pid = -1;
- close(sigio_private[0]);
- close(sigio_private[1]);
- close(write_sigio_fds[0]);
- close(write_sigio_fds[1]);
- /* Critical section end */
- um_set_signals_trace(flags);
+ return NULL;
}
int __add_sigio_fd(int fd)
{
- struct pollfd *p;
- int err, i, n;
-
- for (i = 0; i < all_sigio_fds.used; i++) {
- if (all_sigio_fds.poll[i].fd == fd)
- break;
- }
- if (i == all_sigio_fds.used)
- return -ENOSPC;
-
- p = &all_sigio_fds.poll[i];
-
- for (i = 0; i < current_poll.used; i++) {
- if (current_poll.poll[i].fd == fd)
- return 0;
- }
-
- n = current_poll.used;
- err = need_poll(&next_poll, n + 1);
- if (err)
- return err;
-
- memcpy(next_poll.poll, current_poll.poll,
- current_poll.used * sizeof(struct pollfd));
- next_poll.poll[n] = *p;
- next_poll.used = n + 1;
- update_thread();
-
- return 0;
+ struct epoll_event event = {
+ .data.fd = fd,
+ .events = EPOLLIN | EPOLLET,
+ };
+ int r;
+
+ CATCH_EINTR(r = epoll_ctl(epollfd, EPOLL_CTL_ADD, fd, &event));
+ return r < 0 ? -errno : 0;
}
-
int add_sigio_fd(int fd)
{
int err;
@@ -212,38 +80,11 @@ int add_sigio_fd(int fd)
int __ignore_sigio_fd(int fd)
{
- struct pollfd *p;
- int err, i, n = 0;
-
- /*
- * This is called from exitcalls elsewhere in UML - if
- * sigio_cleanup has already run, then update_thread will hang
- * or fail because the thread is no longer running.
- */
- if (write_sigio_pid == -1)
- return -EIO;
-
- for (i = 0; i < current_poll.used; i++) {
- if (current_poll.poll[i].fd == fd)
- break;
- }
- if (i == current_poll.used)
- return -ENOENT;
-
- err = need_poll(&next_poll, current_poll.used - 1);
- if (err)
- return err;
-
- for (i = 0; i < current_poll.used; i++) {
- p = &current_poll.poll[i];
- if (p->fd != fd)
- next_poll.poll[n++] = *p;
- }
- next_poll.used = current_poll.used - 1;
-
- update_thread();
+ struct epoll_event event;
+ int r;
- return 0;
+ CATCH_EINTR(r = epoll_ctl(epollfd, EPOLL_CTL_DEL, fd, &event));
+ return r < 0 ? -errno : 0;
}
int ignore_sigio_fd(int fd)
@@ -257,125 +98,37 @@ int ignore_sigio_fd(int fd)
return err;
}
-static struct pollfd *setup_initial_poll(int fd)
-{
- struct pollfd *p;
-
- p = uml_kmalloc(sizeof(struct pollfd), UM_GFP_KERNEL);
- if (p == NULL) {
- printk(UM_KERN_ERR "setup_initial_poll : failed to allocate "
- "poll\n");
- return NULL;
- }
- *p = ((struct pollfd) { .fd = fd,
- .events = POLLIN,
- .revents = 0 });
- return p;
-}
-
static void write_sigio_workaround(void)
{
- struct pollfd *p;
int err;
- int l_write_sigio_fds[2];
- int l_sigio_private[2];
- int l_write_sigio_pid;
- /* We call this *tons* of times - and most ones we must just fail. */
sigio_lock();
- l_write_sigio_pid = write_sigio_pid;
- sigio_unlock();
-
- if (l_write_sigio_pid != -1)
- return;
+ if (write_sigio_td)
+ goto out;
- err = os_pipe(l_write_sigio_fds, 1, 1);
- if (err < 0) {
- printk(UM_KERN_ERR "write_sigio_workaround - os_pipe 1 failed, "
- "err = %d\n", -err);
- return;
+ epollfd = epoll_create(MAX_EPOLL_EVENTS);
+ if (epollfd < 0) {
+ printk(UM_KERN_ERR "%s: epoll_create failed, errno = %d\n",
+ __func__, errno);
+ goto out;
}
- err = os_pipe(l_sigio_private, 1, 1);
+
+ err = os_run_helper_thread(&write_sigio_td, write_sigio_thread, NULL);
if (err < 0) {
- printk(UM_KERN_ERR "write_sigio_workaround - os_pipe 2 failed, "
- "err = %d\n", -err);
- goto out_close1;
+ printk(UM_KERN_ERR "%s: os_run_helper_thread failed, errno = %d\n",
+ __func__, -err);
+ close(epollfd);
+ epollfd = -1;
+ goto out;
}
- p = setup_initial_poll(l_sigio_private[1]);
- if (!p)
- goto out_close2;
-
- sigio_lock();
-
- /*
- * Did we race? Don't try to optimize this, please, it's not so likely
- * to happen, and no more than once at the boot.
- */
- if (write_sigio_pid != -1)
- goto out_free;
-
- current_poll = ((struct pollfds) { .poll = p,
- .used = 1,
- .size = 1 });
-
- if (write_sigio_irq(l_write_sigio_fds[0]))
- goto out_clear_poll;
-
- memcpy(write_sigio_fds, l_write_sigio_fds, sizeof(l_write_sigio_fds));
- memcpy(sigio_private, l_sigio_private, sizeof(l_sigio_private));
-
- write_sigio_pid = run_helper_thread(write_sigio_thread, NULL,
- CLONE_FILES | CLONE_VM,
- &write_sigio_stack);
-
- if (write_sigio_pid < 0)
- goto out_clear;
-
- sigio_unlock();
- return;
-
-out_clear:
- write_sigio_pid = -1;
- write_sigio_fds[0] = -1;
- write_sigio_fds[1] = -1;
- sigio_private[0] = -1;
- sigio_private[1] = -1;
-out_clear_poll:
- current_poll = ((struct pollfds) { .poll = NULL,
- .size = 0,
- .used = 0 });
-out_free:
+out:
sigio_unlock();
- kfree(p);
-out_close2:
- close(l_sigio_private[0]);
- close(l_sigio_private[1]);
-out_close1:
- close(l_write_sigio_fds[0]);
- close(l_write_sigio_fds[1]);
}
-void sigio_broken(int fd)
+void sigio_broken(void)
{
- int err;
-
write_sigio_workaround();
-
- sigio_lock();
- err = need_poll(&all_sigio_fds, all_sigio_fds.used + 1);
- if (err) {
- printk(UM_KERN_ERR "maybe_sigio_broken - failed to add pollfd "
- "for descriptor %d\n", fd);
- goto out;
- }
-
- all_sigio_fds.poll[all_sigio_fds.used++] =
- ((struct pollfd) { .fd = fd,
- .events = POLLIN,
- .revents = 0 });
-out:
- sigio_unlock();
}
/* Changed during early boot */
@@ -389,17 +142,16 @@ void maybe_sigio_broken(int fd)
if (pty_output_sigio)
return;
- sigio_broken(fd);
+ sigio_broken();
}
static void sigio_cleanup(void)
{
- if (write_sigio_pid == -1)
+ if (!write_sigio_td)
return;
- os_kill_process(write_sigio_pid, 1);
- free_stack(write_sigio_stack, 0);
- write_sigio_pid = -1;
+ os_kill_helper_thread(write_sigio_td);
+ write_sigio_td = NULL;
}
__uml_exitcall(sigio_cleanup);
diff --git a/arch/um/os-Linux/signal.c b/arch/um/os-Linux/signal.c
index 9ea7269ffb77..e71e5b4878d1 100644
--- a/arch/um/os-Linux/signal.c
+++ b/arch/um/os-Linux/signal.c
@@ -21,7 +21,7 @@
#include <sys/ucontext.h>
#include <timetravel.h>
-void (*sig_info[NSIG])(int, struct siginfo *, struct uml_pt_regs *) = {
+void (*sig_info[NSIG])(int, struct siginfo *, struct uml_pt_regs *, void *mc) = {
[SIGTRAP] = relay_signal,
[SIGFPE] = relay_signal,
[SIGILL] = relay_signal,
@@ -47,7 +47,7 @@ static void sig_handler_common(int sig, struct siginfo *si, mcontext_t *mc)
if ((sig != SIGIO) && (sig != SIGWINCH))
unblock_signals_trace();
- (*sig_info[sig])(sig, si, &r);
+ (*sig_info[sig])(sig, si, &r, mc);
errno = save_errno;
}
diff --git a/arch/um/os-Linux/skas/process.c b/arch/um/os-Linux/skas/process.c
index e2f8f156402f..ae2aea062f06 100644
--- a/arch/um/os-Linux/skas/process.c
+++ b/arch/um/os-Linux/skas/process.c
@@ -166,7 +166,7 @@ static void get_skas_faultinfo(int pid, struct faultinfo *fi)
static void handle_segv(int pid, struct uml_pt_regs *regs)
{
get_skas_faultinfo(pid, &regs->faultinfo);
- segv(regs->faultinfo, 0, 1, NULL);
+ segv(regs->faultinfo, 0, 1, NULL, NULL);
}
static void handle_trap(int pid, struct uml_pt_regs *regs)
@@ -525,7 +525,7 @@ void userspace(struct uml_pt_regs *regs)
get_skas_faultinfo(pid,
&regs->faultinfo);
(*sig_info[SIGSEGV])(SIGSEGV, (struct siginfo *)&si,
- regs);
+ regs, NULL);
}
else handle_segv(pid, regs);
break;
@@ -533,7 +533,7 @@ void userspace(struct uml_pt_regs *regs)
handle_trap(pid, regs);
break;
case SIGTRAP:
- relay_signal(SIGTRAP, (struct siginfo *)&si, regs);
+ relay_signal(SIGTRAP, (struct siginfo *)&si, regs, NULL);
break;
case SIGALRM:
break;
@@ -543,7 +543,7 @@ void userspace(struct uml_pt_regs *regs)
case SIGFPE:
case SIGWINCH:
block_signals_trace();
- (*sig_info[sig])(sig, (struct siginfo *)&si, regs);
+ (*sig_info[sig])(sig, (struct siginfo *)&si, regs, NULL);
unblock_signals_trace();
break;
default:
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 15f346f02af0..85ba2e187571 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -26,6 +26,8 @@ config X86_64
depends on 64BIT
# Options that are inherently 64-bit kernel only:
select ARCH_HAS_GIGANTIC_PAGE
+ select ARCH_HAS_PTDUMP
+ select ARCH_SUPPORTS_MSEAL_SYSTEM_MAPPINGS
select ARCH_SUPPORTS_INT128 if CC_HAS_INT128
select ARCH_SUPPORTS_PER_VMA_LOCK
select ARCH_SUPPORTS_HUGE_PFNMAP if TRANSPARENT_HUGEPAGE
@@ -148,6 +150,7 @@ config X86
select ARCH_WANT_LD_ORPHAN_WARN
select ARCH_WANT_OPTIMIZE_DAX_VMEMMAP if X86_64
select ARCH_WANT_OPTIMIZE_HUGETLB_VMEMMAP if X86_64
+ select ARCH_WANT_HUGETLB_VMEMMAP_PREINIT if X86_64
select ARCH_WANTS_THP_SWAP if X86_64
select ARCH_HAS_PARANOID_L1D_FLUSH
select BUILDTIME_TABLE_SORT
@@ -176,7 +179,6 @@ config X86
select GENERIC_IRQ_RESERVATION_MODE
select GENERIC_IRQ_SHOW
select GENERIC_PENDING_IRQ if SMP
- select GENERIC_PTDUMP
select GENERIC_SMP_IDLE_THREAD
select GENERIC_TIME_VSYSCALL
select GENERIC_GETTIMEOFDAY
@@ -888,6 +890,7 @@ config INTEL_TDX_GUEST
depends on X86_64 && CPU_SUP_INTEL
depends on X86_X2APIC
depends on EFI_STUB
+ depends on PARAVIRT
select ARCH_HAS_CC_PLATFORM
select X86_MEM_ENCRYPT
select X86_MCE
diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
index 1eb4d23cdaae..c95c3aaadf97 100644
--- a/arch/x86/Kconfig.debug
+++ b/arch/x86/Kconfig.debug
@@ -59,7 +59,7 @@ config EARLY_PRINTK_USB_XDBC
config EFI_PGT_DUMP
bool "Dump the EFI pagetable"
depends on EFI
- select PTDUMP_CORE
+ select PTDUMP
help
Enable this if you want to dump the EFI page table before
enabling virtual mode. This can be used to debug miscellaneous
diff --git a/arch/x86/Makefile.um b/arch/x86/Makefile.um
index a46b1397ad01..c86cbd9cbba3 100644
--- a/arch/x86/Makefile.um
+++ b/arch/x86/Makefile.um
@@ -7,12 +7,13 @@ core-y += arch/x86/crypto/
# GCC versions < 11. See:
# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=99652
#
-ifeq ($(CONFIG_CC_IS_CLANG),y)
-KBUILD_CFLAGS += -mno-sse -mno-mmx -mno-sse2 -mno-3dnow -mno-avx
-KBUILD_RUSTFLAGS += --target=$(objtree)/scripts/target.json
+ifeq ($(call gcc-min-version, 110000)$(CONFIG_CC_IS_CLANG),y)
+KBUILD_CFLAGS += -mno-sse -mno-mmx -mno-sse2 -mno-3dnow -mno-avx
KBUILD_RUSTFLAGS += -Ctarget-feature=-sse,-sse2,-sse3,-ssse3,-sse4.1,-sse4.2,-avx,-avx2
endif
+KBUILD_RUSTFLAGS += --target=$(objtree)/scripts/target.json
+
ifeq ($(CONFIG_X86_32),y)
START := 0x8048000
diff --git a/arch/x86/coco/tdx/tdx.c b/arch/x86/coco/tdx/tdx.c
index 7772b01ab738..edab6d6049be 100644
--- a/arch/x86/coco/tdx/tdx.c
+++ b/arch/x86/coco/tdx/tdx.c
@@ -14,6 +14,7 @@
#include <asm/ia32.h>
#include <asm/insn.h>
#include <asm/insn-eval.h>
+#include <asm/paravirt_types.h>
#include <asm/pgtable.h>
#include <asm/set_memory.h>
#include <asm/traps.h>
@@ -392,13 +393,21 @@ static int handle_halt(struct ve_info *ve)
{
const bool irq_disabled = irqs_disabled();
+ /*
+ * HLT with IRQs enabled is unsafe, as an IRQ that is intended to be a
+ * wake event may be consumed before requesting HLT emulation, leaving
+ * the vCPU blocking indefinitely.
+ */
+ if (WARN_ONCE(!irq_disabled, "HLT emulation with IRQs enabled"))
+ return -EIO;
+
if (__halt(irq_disabled))
return -EIO;
return ve_instr_len(ve);
}
-void __cpuidle tdx_safe_halt(void)
+void __cpuidle tdx_halt(void)
{
const bool irq_disabled = false;
@@ -409,6 +418,16 @@ void __cpuidle tdx_safe_halt(void)
WARN_ONCE(1, "HLT instruction emulation failed\n");
}
+static void __cpuidle tdx_safe_halt(void)
+{
+ tdx_halt();
+ /*
+ * "__cpuidle" section doesn't support instrumentation, so stick
+ * with raw_* variant that avoids tracing hooks.
+ */
+ raw_local_irq_enable();
+}
+
static int read_msr(struct pt_regs *regs, struct ve_info *ve)
{
struct tdx_module_args args = {
@@ -1110,6 +1129,19 @@ void __init tdx_early_init(void)
x86_platform.guest.enc_kexec_finish = tdx_kexec_finish;
/*
+ * Avoid "sti;hlt" execution in TDX guests as HLT induces a #VE that
+ * will enable interrupts before HLT TDCALL invocation if executed
+ * in STI-shadow, possibly resulting in missed wakeup events.
+ *
+ * Modify all possible HLT execution paths to use TDX specific routines
+ * that directly execute TDCALL and toggle the interrupt state as
+ * needed after TDCALL completion. This also reduces HLT related #VEs
+ * in addition to having a reliable halt logic execution.
+ */
+ pv_ops.irq.safe_halt = tdx_safe_halt;
+ pv_ops.irq.halt = tdx_halt;
+
+ /*
* TDX intercepts the RDMSR to read the X2APIC ID in the parallel
* bringup low level code. That raises #VE which cannot be handled
* there.
diff --git a/arch/x86/entry/vdso/vma.c b/arch/x86/entry/vdso/vma.c
index 9518bf1ddf35..adb299d3b6a1 100644
--- a/arch/x86/entry/vdso/vma.c
+++ b/arch/x86/entry/vdso/vma.c
@@ -162,7 +162,8 @@ static int map_vdso(const struct vdso_image *image, unsigned long addr)
text_start,
image->size,
VM_READ|VM_EXEC|
- VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
+ VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
+ VM_SEALED_SYSMAP,
&vdso_mapping);
if (IS_ERR(vma)) {
@@ -181,7 +182,7 @@ static int map_vdso(const struct vdso_image *image, unsigned long addr)
VDSO_VCLOCK_PAGES_START(addr),
VDSO_NR_VCLOCK_PAGES * PAGE_SIZE,
VM_READ|VM_MAYREAD|VM_IO|VM_DONTDUMP|
- VM_PFNMAP,
+ VM_PFNMAP|VM_SEALED_SYSMAP,
&vvar_vclock_mapping);
if (IS_ERR(vma)) {
diff --git a/arch/x86/include/asm/arch_hweight.h b/arch/x86/include/asm/arch_hweight.h
index b5982b94bdba..cbc6157f0b4b 100644
--- a/arch/x86/include/asm/arch_hweight.h
+++ b/arch/x86/include/asm/arch_hweight.h
@@ -16,7 +16,8 @@ static __always_inline unsigned int __arch_hweight32(unsigned int w)
{
unsigned int res;
- asm_inline (ALTERNATIVE("call __sw_hweight32",
+ asm_inline (ALTERNATIVE(ANNOTATE_IGNORE_ALTERNATIVE
+ "call __sw_hweight32",
"popcntl %[val], %[cnt]", X86_FEATURE_POPCNT)
: [cnt] "=" REG_OUT (res), ASM_CALL_CONSTRAINT
: [val] REG_IN (w));
@@ -45,7 +46,8 @@ static __always_inline unsigned long __arch_hweight64(__u64 w)
{
unsigned long res;
- asm_inline (ALTERNATIVE("call __sw_hweight64",
+ asm_inline (ALTERNATIVE(ANNOTATE_IGNORE_ALTERNATIVE
+ "call __sw_hweight64",
"popcntq %[val], %[cnt]", X86_FEATURE_POPCNT)
: [cnt] "=" REG_OUT (res), ASM_CALL_CONSTRAINT
: [val] REG_IN (w));
diff --git a/arch/x86/include/asm/highmem.h b/arch/x86/include/asm/highmem.h
index 731ee7cc40a5..585bdadba47d 100644
--- a/arch/x86/include/asm/highmem.h
+++ b/arch/x86/include/asm/highmem.h
@@ -69,9 +69,6 @@ extern unsigned long highstart_pfn, highend_pfn;
arch_flush_lazy_mmu_mode(); \
} while (0)
-extern void add_highpages_with_active_regions(int nid, unsigned long start_pfn,
- unsigned long end_pfn);
-
#endif /* __KERNEL__ */
#endif /* _ASM_X86_HIGHMEM_H */
diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
index 1a0dc2b2bf5b..e889c3bab5a2 100644
--- a/arch/x86/include/asm/io.h
+++ b/arch/x86/include/asm/io.h
@@ -170,7 +170,7 @@ extern void __iomem *ioremap_uc(resource_size_t offset, unsigned long size);
#define ioremap_uc ioremap_uc
extern void __iomem *ioremap_cache(resource_size_t offset, unsigned long size);
#define ioremap_cache ioremap_cache
-extern void __iomem *ioremap_prot(resource_size_t offset, unsigned long size, unsigned long prot_val);
+extern void __iomem *ioremap_prot(resource_size_t offset, unsigned long size, pgprot_t prot);
#define ioremap_prot ioremap_prot
extern void __iomem *ioremap_encrypted(resource_size_t phys_addr, unsigned long size);
#define ioremap_encrypted ioremap_encrypted
diff --git a/arch/x86/include/asm/iosf_mbi.h b/arch/x86/include/asm/iosf_mbi.h
index af7541c11821..8ace6559d399 100644
--- a/arch/x86/include/asm/iosf_mbi.h
+++ b/arch/x86/include/asm/iosf_mbi.h
@@ -168,13 +168,6 @@ void iosf_mbi_unblock_punit_i2c_access(void);
int iosf_mbi_register_pmic_bus_access_notifier(struct notifier_block *nb);
/**
- * iosf_mbi_register_pmic_bus_access_notifier - Unregister PMIC bus notifier
- *
- * @nb: notifier_block to unregister
- */
-int iosf_mbi_unregister_pmic_bus_access_notifier(struct notifier_block *nb);
-
-/**
* iosf_mbi_unregister_pmic_bus_access_notifier_unlocked - Unregister PMIC bus
* notifier, unlocked
*
diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
index abb8374c9ff7..9a9b21b78905 100644
--- a/arch/x86/include/asm/irqflags.h
+++ b/arch/x86/include/asm/irqflags.h
@@ -76,6 +76,28 @@ static __always_inline void native_local_irq_restore(unsigned long flags)
#endif
+#ifndef CONFIG_PARAVIRT
+#ifndef __ASSEMBLY__
+/*
+ * Used in the idle loop; sti takes one instruction cycle
+ * to complete:
+ */
+static __always_inline void arch_safe_halt(void)
+{
+ native_safe_halt();
+}
+
+/*
+ * Used when interrupts are already enabled or to
+ * shutdown the processor:
+ */
+static __always_inline void halt(void)
+{
+ native_halt();
+}
+#endif /* __ASSEMBLY__ */
+#endif /* CONFIG_PARAVIRT */
+
#ifdef CONFIG_PARAVIRT_XXL
#include <asm/paravirt.h>
#else
@@ -98,24 +120,6 @@ static __always_inline void arch_local_irq_enable(void)
}
/*
- * Used in the idle loop; sti takes one instruction cycle
- * to complete:
- */
-static __always_inline void arch_safe_halt(void)
-{
- native_safe_halt();
-}
-
-/*
- * Used when interrupts are already enabled or to
- * shutdown the processor:
- */
-static __always_inline void halt(void)
-{
- native_halt();
-}
-
-/*
* For spinlocks, etc:
*/
static __always_inline unsigned long arch_local_irq_save(void)
diff --git a/arch/x86/include/asm/numa.h b/arch/x86/include/asm/numa.h
index 5469d7a7c40f..53ba39ce010c 100644
--- a/arch/x86/include/asm/numa.h
+++ b/arch/x86/include/asm/numa.h
@@ -41,10 +41,6 @@ static inline int numa_cpu_node(int cpu)
}
#endif /* CONFIG_NUMA */
-#ifdef CONFIG_X86_32
-# include <asm/numa_32.h>
-#endif
-
#ifdef CONFIG_NUMA
extern void numa_set_node(int cpu, int node);
extern void numa_clear_node(int cpu);
diff --git a/arch/x86/include/asm/numa_32.h b/arch/x86/include/asm/numa_32.h
deleted file mode 100644
index 9c8e9e85be77..000000000000
--- a/arch/x86/include/asm/numa_32.h
+++ /dev/null
@@ -1,13 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_X86_NUMA_32_H
-#define _ASM_X86_NUMA_32_H
-
-#ifdef CONFIG_HIGHMEM
-extern void set_highmem_pages_init(void);
-#else
-static inline void set_highmem_pages_init(void)
-{
-}
-#endif
-
-#endif /* _ASM_X86_NUMA_32_H */
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index bed346bfac89..c4c23190925c 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -102,6 +102,16 @@ static inline void notify_page_enc_status_changed(unsigned long pfn,
PVOP_VCALL3(mmu.notify_page_enc_status_changed, pfn, npages, enc);
}
+static __always_inline void arch_safe_halt(void)
+{
+ PVOP_VCALL0(irq.safe_halt);
+}
+
+static inline void halt(void)
+{
+ PVOP_VCALL0(irq.halt);
+}
+
#ifdef CONFIG_PARAVIRT_XXL
static inline void load_sp0(unsigned long sp0)
{
@@ -165,16 +175,6 @@ static inline void __write_cr4(unsigned long x)
PVOP_VCALL1(cpu.write_cr4, x);
}
-static __always_inline void arch_safe_halt(void)
-{
- PVOP_VCALL0(irq.safe_halt);
-}
-
-static inline void halt(void)
-{
- PVOP_VCALL0(irq.halt);
-}
-
static inline u64 paravirt_read_msr(unsigned msr)
{
return PVOP_CALL1(u64, cpu.read_msr, msr);
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
index 62912023b46f..631c306ce1ff 100644
--- a/arch/x86/include/asm/paravirt_types.h
+++ b/arch/x86/include/asm/paravirt_types.h
@@ -120,10 +120,9 @@ struct pv_irq_ops {
struct paravirt_callee_save save_fl;
struct paravirt_callee_save irq_disable;
struct paravirt_callee_save irq_enable;
-
+#endif
void (*safe_halt)(void);
void (*halt)(void);
-#endif
} __no_randomize_layout;
struct pv_mmu_ops {
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
index 105db2d33c7b..5fe314a2e73e 100644
--- a/arch/x86/include/asm/percpu.h
+++ b/arch/x86/include/asm/percpu.h
@@ -63,10 +63,14 @@
unsigned long tcp_ptr__ = raw_cpu_read_long(this_cpu_off); \
\
tcp_ptr__ += (__force unsigned long)(_ptr); \
- (typeof(*(_ptr)) __kernel __force *)tcp_ptr__; \
+ (TYPEOF_UNQUAL(*(_ptr)) __force __kernel *)tcp_ptr__; \
})
#else
-#define arch_raw_cpu_ptr(_ptr) ({ BUILD_BUG(); (typeof(_ptr))0; })
+#define arch_raw_cpu_ptr(_ptr) \
+({ \
+ BUILD_BUG(); \
+ (TYPEOF_UNQUAL(*(_ptr)) __force __kernel *)0; \
+})
#endif
#define PER_CPU_VAR(var) %__percpu_seg:(var)__percpu_rel
@@ -81,9 +85,18 @@
#endif /* CONFIG_SMP */
-#define __my_cpu_type(var) typeof(var) __percpu_seg_override
-#define __my_cpu_ptr(ptr) (__my_cpu_type(*(ptr))*)(__force uintptr_t)(ptr)
-#define __my_cpu_var(var) (*__my_cpu_ptr(&(var)))
+#if defined(CONFIG_USE_X86_SEG_SUPPORT) && defined(USE_TYPEOF_UNQUAL)
+# define __my_cpu_type(var) typeof(var)
+# define __my_cpu_ptr(ptr) (ptr)
+# define __my_cpu_var(var) (var)
+
+# define __percpu_qual __percpu_seg_override
+#else
+# define __my_cpu_type(var) typeof(var) __percpu_seg_override
+# define __my_cpu_ptr(ptr) (__my_cpu_type(*(ptr))*)(__force uintptr_t)(ptr)
+# define __my_cpu_var(var) (*__my_cpu_ptr(&(var)))
+#endif
+
#define __percpu_arg(x) __percpu_prefix "%" #x
#define __force_percpu_arg(x) __force_percpu_prefix "%" #x
@@ -150,7 +163,7 @@ do { \
__pcpu_type_##size pto_val__ = __pcpu_cast_##size(_val); \
\
if (0) { \
- typeof(_var) pto_tmp__; \
+ TYPEOF_UNQUAL(_var) pto_tmp__; \
pto_tmp__ = (_val); \
(void)pto_tmp__; \
} \
@@ -191,7 +204,7 @@ do { \
__pcpu_type_##size pto_val__ = __pcpu_cast_##size(_val); \
\
if (0) { \
- typeof(_var) pto_tmp__; \
+ TYPEOF_UNQUAL(_var) pto_tmp__; \
pto_tmp__ = (_val); \
(void)pto_tmp__; \
} \
@@ -212,7 +225,7 @@ do { \
(val) == (typeof(val))-1)) ? (int)(val) : 0; \
\
if (0) { \
- typeof(var) pao_tmp__; \
+ TYPEOF_UNQUAL(var) pao_tmp__; \
pao_tmp__ = (val); \
(void)pao_tmp__; \
} \
@@ -245,7 +258,7 @@ do { \
*/
#define raw_percpu_xchg_op(_var, _nval) \
({ \
- typeof(_var) pxo_old__ = raw_cpu_read(_var); \
+ TYPEOF_UNQUAL(_var) pxo_old__ = raw_cpu_read(_var); \
\
raw_cpu_write(_var, _nval); \
\
@@ -259,7 +272,7 @@ do { \
*/
#define this_percpu_xchg_op(_var, _nval) \
({ \
- typeof(_var) pxo_old__ = this_cpu_read(_var); \
+ TYPEOF_UNQUAL(_var) pxo_old__ = this_cpu_read(_var); \
\
do { } while (!this_cpu_try_cmpxchg(_var, &pxo_old__, _nval)); \
\
diff --git a/arch/x86/include/asm/smap.h b/arch/x86/include/asm/smap.h
index daea94c2993c..55a5e656e4b9 100644
--- a/arch/x86/include/asm/smap.h
+++ b/arch/x86/include/asm/smap.h
@@ -16,23 +16,23 @@
#ifdef __ASSEMBLER__
#define ASM_CLAC \
- ALTERNATIVE "", "clac", X86_FEATURE_SMAP
+ ALTERNATIVE __stringify(ANNOTATE_IGNORE_ALTERNATIVE), "clac", X86_FEATURE_SMAP
#define ASM_STAC \
- ALTERNATIVE "", "stac", X86_FEATURE_SMAP
+ ALTERNATIVE __stringify(ANNOTATE_IGNORE_ALTERNATIVE), "stac", X86_FEATURE_SMAP
#else /* __ASSEMBLER__ */
static __always_inline void clac(void)
{
/* Note: a barrier is implicit in alternative() */
- alternative("", "clac", X86_FEATURE_SMAP);
+ alternative(ANNOTATE_IGNORE_ALTERNATIVE "", "clac", X86_FEATURE_SMAP);
}
static __always_inline void stac(void)
{
/* Note: a barrier is implicit in alternative() */
- alternative("", "stac", X86_FEATURE_SMAP);
+ alternative(ANNOTATE_IGNORE_ALTERNATIVE "", "stac", X86_FEATURE_SMAP);
}
static __always_inline unsigned long smap_save(void)
@@ -40,7 +40,8 @@ static __always_inline unsigned long smap_save(void)
unsigned long flags;
asm volatile ("# smap_save\n\t"
- ALTERNATIVE("", "pushf; pop %0; " "clac" "\n\t",
+ ALTERNATIVE(ANNOTATE_IGNORE_ALTERNATIVE
+ "", "pushf; pop %0; clac",
X86_FEATURE_SMAP)
: "=rm" (flags) : : "memory", "cc");
@@ -50,16 +51,22 @@ static __always_inline unsigned long smap_save(void)
static __always_inline void smap_restore(unsigned long flags)
{
asm volatile ("# smap_restore\n\t"
- ALTERNATIVE("", "push %0; popf\n\t",
+ ALTERNATIVE(ANNOTATE_IGNORE_ALTERNATIVE
+ "", "push %0; popf",
X86_FEATURE_SMAP)
: : "g" (flags) : "memory", "cc");
}
/* These macros can be used in asm() statements */
#define ASM_CLAC \
- ALTERNATIVE("", "clac", X86_FEATURE_SMAP)
+ ALTERNATIVE(ANNOTATE_IGNORE_ALTERNATIVE "", "clac", X86_FEATURE_SMAP)
#define ASM_STAC \
- ALTERNATIVE("", "stac", X86_FEATURE_SMAP)
+ ALTERNATIVE(ANNOTATE_IGNORE_ALTERNATIVE "", "stac", X86_FEATURE_SMAP)
+
+#define ASM_CLAC_UNSAFE \
+ ALTERNATIVE("", ANNOTATE_IGNORE_ALTERNATIVE "clac", X86_FEATURE_SMAP)
+#define ASM_STAC_UNSAFE \
+ ALTERNATIVE("", ANNOTATE_IGNORE_ALTERNATIVE "stac", X86_FEATURE_SMAP)
#endif /* __ASSEMBLER__ */
diff --git a/arch/x86/include/asm/tdx.h b/arch/x86/include/asm/tdx.h
index 65394aa9b49f..4a1922ec80cf 100644
--- a/arch/x86/include/asm/tdx.h
+++ b/arch/x86/include/asm/tdx.h
@@ -58,7 +58,7 @@ void tdx_get_ve_info(struct ve_info *ve);
bool tdx_handle_virt_exception(struct pt_regs *regs, struct ve_info *ve);
-void tdx_safe_halt(void);
+void tdx_halt(void);
bool tdx_early_handle_ve(struct pt_regs *regs);
@@ -72,7 +72,7 @@ void __init tdx_dump_td_ctls(u64 td_ctls);
#else
static inline void tdx_early_init(void) { };
-static inline void tdx_safe_halt(void) { };
+static inline void tdx_halt(void) { };
static inline bool tdx_early_handle_ve(struct pt_regs *regs) { return false; }
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index a9af8759de34..e9b81876ebe4 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -348,8 +348,7 @@ static inline u64 inc_mm_tlb_gen(struct mm_struct *mm)
}
static inline void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch,
- struct mm_struct *mm,
- unsigned long uaddr)
+ struct mm_struct *mm, unsigned long start, unsigned long end)
{
inc_mm_tlb_gen(mm);
cpumask_or(&batch->cpumask, &batch->cpumask, mm_cpumask(mm));
diff --git a/arch/x86/include/asm/xen/hypercall.h b/arch/x86/include/asm/xen/hypercall.h
index 97771b9d33af..59a62c3780a2 100644
--- a/arch/x86/include/asm/xen/hypercall.h
+++ b/arch/x86/include/asm/xen/hypercall.h
@@ -231,14 +231,12 @@ static __always_inline void __xen_stac(void)
* Suppress objtool seeing the STAC/CLAC and getting confused about it
* calling random code with AC=1.
*/
- asm volatile(ANNOTATE_IGNORE_ALTERNATIVE
- ASM_STAC ::: "memory", "flags");
+ asm volatile(ASM_STAC_UNSAFE ::: "memory", "flags");
}
static __always_inline void __xen_clac(void)
{
- asm volatile(ANNOTATE_IGNORE_ALTERNATIVE
- ASM_CLAC ::: "memory", "flags");
+ asm volatile(ASM_CLAC_UNSAFE ::: "memory", "flags");
}
static inline long
diff --git a/arch/x86/include/asm/xen/hypervisor.h b/arch/x86/include/asm/xen/hypervisor.h
index a9088250770f..bd0fc69a10a7 100644
--- a/arch/x86/include/asm/xen/hypervisor.h
+++ b/arch/x86/include/asm/xen/hypervisor.h
@@ -72,18 +72,10 @@ enum xen_lazy_mode {
};
DECLARE_PER_CPU(enum xen_lazy_mode, xen_lazy_mode);
-DECLARE_PER_CPU(unsigned int, xen_lazy_nesting);
static inline void enter_lazy(enum xen_lazy_mode mode)
{
- enum xen_lazy_mode old_mode = this_cpu_read(xen_lazy_mode);
-
- if (mode == old_mode) {
- this_cpu_inc(xen_lazy_nesting);
- return;
- }
-
- BUG_ON(old_mode != XEN_LAZY_NONE);
+ BUG_ON(this_cpu_read(xen_lazy_mode) != XEN_LAZY_NONE);
this_cpu_write(xen_lazy_mode, mode);
}
@@ -92,10 +84,7 @@ static inline void leave_lazy(enum xen_lazy_mode mode)
{
BUG_ON(this_cpu_read(xen_lazy_mode) != mode);
- if (this_cpu_read(xen_lazy_nesting) == 0)
- this_cpu_write(xen_lazy_mode, XEN_LAZY_NONE);
- else
- this_cpu_dec(xen_lazy_nesting);
+ this_cpu_write(xen_lazy_mode, XEN_LAZY_NONE);
}
enum xen_lazy_mode xen_get_lazy_mode(void);
diff --git a/arch/x86/kernel/cpu/mce/severity.c b/arch/x86/kernel/cpu/mce/severity.c
index dac4d64dfb2a..2235a7477436 100644
--- a/arch/x86/kernel/cpu/mce/severity.c
+++ b/arch/x86/kernel/cpu/mce/severity.c
@@ -300,13 +300,12 @@ static noinstr int error_context(struct mce *m, struct pt_regs *regs)
copy_user = is_copy_from_user(regs);
instrumentation_end();
- switch (fixup_type) {
- case EX_TYPE_UACCESS:
- if (!copy_user)
- return IN_KERNEL;
- m->kflags |= MCE_IN_KERNEL_COPYIN;
- fallthrough;
+ if (copy_user) {
+ m->kflags |= MCE_IN_KERNEL_COPYIN | MCE_IN_KERNEL_RECOV;
+ return IN_KERNEL_RECOV;
+ }
+ switch (fixup_type) {
case EX_TYPE_FAULT_MCE_SAFE:
case EX_TYPE_DEFAULT_MCE_SAFE:
m->kflags |= MCE_IN_KERNEL_RECOV;
diff --git a/arch/x86/kernel/cpu/resctrl/internal.h b/arch/x86/kernel/cpu/resctrl/internal.h
index c44c5b496355..eaae99602b61 100644
--- a/arch/x86/kernel/cpu/resctrl/internal.h
+++ b/arch/x86/kernel/cpu/resctrl/internal.h
@@ -403,6 +403,11 @@ static inline struct rdt_hw_resource *resctrl_to_arch_res(struct rdt_resource *r
extern struct mutex rdtgroup_mutex;
+static inline const char *rdt_kn_name(const struct kernfs_node *kn)
+{
+ return rcu_dereference_check(kn->name, lockdep_is_held(&rdtgroup_mutex));
+}
+
extern struct rdt_hw_resource rdt_resources_all[];
extern struct rdtgroup rdtgroup_default;
extern struct dentry *debugfs_resctrl;
diff --git a/arch/x86/kernel/cpu/resctrl/pseudo_lock.c b/arch/x86/kernel/cpu/resctrl/pseudo_lock.c
index 01fa7890b43f..92ea1472bde9 100644
--- a/arch/x86/kernel/cpu/resctrl/pseudo_lock.c
+++ b/arch/x86/kernel/cpu/resctrl/pseudo_lock.c
@@ -52,7 +52,8 @@ static char *pseudo_lock_devnode(const struct device *dev, umode_t *mode)
rdtgrp = dev_get_drvdata(dev);
if (mode)
*mode = 0600;
- return kasprintf(GFP_KERNEL, "pseudo_lock/%s", rdtgrp->kn->name);
+ guard(mutex)(&rdtgroup_mutex);
+ return kasprintf(GFP_KERNEL, "pseudo_lock/%s", rdt_kn_name(rdtgrp->kn));
}
static const struct class pseudo_lock_class = {
@@ -1298,6 +1299,7 @@ int rdtgroup_pseudo_lock_create(struct rdtgroup *rdtgrp)
struct task_struct *thread;
unsigned int new_minor;
struct device *dev;
+ char *kn_name __free(kfree) = NULL;
int ret;
ret = pseudo_lock_region_alloc(plr);
@@ -1309,6 +1311,11 @@ int rdtgroup_pseudo_lock_create(struct rdtgroup *rdtgrp)
ret = -EINVAL;
goto out_region;
}
+ kn_name = kstrdup(rdt_kn_name(rdtgrp->kn), GFP_KERNEL);
+ if (!kn_name) {
+ ret = -ENOMEM;
+ goto out_cstates;
+ }
plr->thread_done = 0;
@@ -1353,8 +1360,7 @@ int rdtgroup_pseudo_lock_create(struct rdtgroup *rdtgrp)
mutex_unlock(&rdtgroup_mutex);
if (!IS_ERR_OR_NULL(debugfs_resctrl)) {
- plr->debugfs_dir = debugfs_create_dir(rdtgrp->kn->name,
- debugfs_resctrl);
+ plr->debugfs_dir = debugfs_create_dir(kn_name, debugfs_resctrl);
if (!IS_ERR_OR_NULL(plr->debugfs_dir))
debugfs_create_file("pseudo_lock_measure", 0200,
plr->debugfs_dir, rdtgrp,
@@ -1363,7 +1369,7 @@ int rdtgroup_pseudo_lock_create(struct rdtgroup *rdtgrp)
dev = device_create(&pseudo_lock_class, NULL,
MKDEV(pseudo_lock_major, new_minor),
- rdtgrp, "%s", rdtgrp->kn->name);
+ rdtgrp, "%s", kn_name);
mutex_lock(&rdtgroup_mutex);
diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
index c6274d40b217..93ec829015f1 100644
--- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c
+++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
@@ -944,14 +944,14 @@ int proc_resctrl_show(struct seq_file *s, struct pid_namespace *ns,
continue;
seq_printf(s, "res:%s%s\n", (rdtg == &rdtgroup_default) ? "/" : "",
- rdtg->kn->name);
+ rdt_kn_name(rdtg->kn));
seq_puts(s, "mon:");
list_for_each_entry(crg, &rdtg->mon.crdtgrp_list,
mon.crdtgrp_list) {
if (!resctrl_arch_match_rmid(tsk, crg->mon.parent->closid,
crg->mon.rmid))
continue;
- seq_printf(s, "%s", crg->kn->name);
+ seq_printf(s, "%s", rdt_kn_name(crg->kn));
break;
}
seq_putc(s, '\n');
@@ -984,10 +984,20 @@ static int rdt_last_cmd_status_show(struct kernfs_open_file *of,
return 0;
}
+static void *rdt_kn_parent_priv(struct kernfs_node *kn)
+{
+ /*
+ * The parent pointer is only valid within RCU section since it can be
+ * replaced.
+ */
+ guard(rcu)();
+ return rcu_dereference(kn->__parent)->priv;
+}
+
static int rdt_num_closids_show(struct kernfs_open_file *of,
struct seq_file *seq, void *v)
{
- struct resctrl_schema *s = of->kn->parent->priv;
+ struct resctrl_schema *s = rdt_kn_parent_priv(of->kn);
seq_printf(seq, "%u\n", s->num_closid);
return 0;
@@ -996,7 +1006,7 @@ static int rdt_num_closids_show(struct kernfs_open_file *of,
static int rdt_default_ctrl_show(struct kernfs_open_file *of,
struct seq_file *seq, void *v)
{
- struct resctrl_schema *s = of->kn->parent->priv;
+ struct resctrl_schema *s = rdt_kn_parent_priv(of->kn);
struct rdt_resource *r = s->res;
seq_printf(seq, "%x\n", resctrl_get_default_ctrl(r));
@@ -1006,7 +1016,7 @@ static int rdt_default_ctrl_show(struct kernfs_open_file *of,
static int rdt_min_cbm_bits_show(struct kernfs_open_file *of,
struct seq_file *seq, void *v)
{
- struct resctrl_schema *s = of->kn->parent->priv;
+ struct resctrl_schema *s = rdt_kn_parent_priv(of->kn);
struct rdt_resource *r = s->res;
seq_printf(seq, "%u\n", r->cache.min_cbm_bits);
@@ -1016,7 +1026,7 @@ static int rdt_min_cbm_bits_show(struct kernfs_open_file *of,
static int rdt_shareable_bits_show(struct kernfs_open_file *of,
struct seq_file *seq, void *v)
{
- struct resctrl_schema *s = of->kn->parent->priv;
+ struct resctrl_schema *s = rdt_kn_parent_priv(of->kn);
struct rdt_resource *r = s->res;
seq_printf(seq, "%x\n", r->cache.shareable_bits);
@@ -1040,7 +1050,7 @@ static int rdt_shareable_bits_show(struct kernfs_open_file *of,
static int rdt_bit_usage_show(struct kernfs_open_file *of,
struct seq_file *seq, void *v)
{
- struct resctrl_schema *s = of->kn->parent->priv;
+ struct resctrl_schema *s = rdt_kn_parent_priv(of->kn);
/*
* Use unsigned long even though only 32 bits are used to ensure
* test_bit() is used safely.
@@ -1122,7 +1132,7 @@ static int rdt_bit_usage_show(struct kernfs_open_file *of,
static int rdt_min_bw_show(struct kernfs_open_file *of,
struct seq_file *seq, void *v)
{
- struct resctrl_schema *s = of->kn->parent->priv;
+ struct resctrl_schema *s = rdt_kn_parent_priv(of->kn);
struct rdt_resource *r = s->res;
seq_printf(seq, "%u\n", r->membw.min_bw);
@@ -1132,7 +1142,7 @@ static int rdt_min_bw_show(struct kernfs_open_file *of,
static int rdt_num_rmids_show(struct kernfs_open_file *of,
struct seq_file *seq, void *v)
{
- struct rdt_resource *r = of->kn->parent->priv;
+ struct rdt_resource *r = rdt_kn_parent_priv(of->kn);
seq_printf(seq, "%d\n", r->num_rmid);
@@ -1142,7 +1152,7 @@ static int rdt_num_rmids_show(struct kernfs_open_file *of,
static int rdt_mon_features_show(struct kernfs_open_file *of,
struct seq_file *seq, void *v)
{
- struct rdt_resource *r = of->kn->parent->priv;
+ struct rdt_resource *r = rdt_kn_parent_priv(of->kn);
struct mon_evt *mevt;
list_for_each_entry(mevt, &r->evt_list, list) {
@@ -1157,7 +1167,7 @@ static int rdt_mon_features_show(struct kernfs_open_file *of,
static int rdt_bw_gran_show(struct kernfs_open_file *of,
struct seq_file *seq, void *v)
{
- struct resctrl_schema *s = of->kn->parent->priv;
+ struct resctrl_schema *s = rdt_kn_parent_priv(of->kn);
struct rdt_resource *r = s->res;
seq_printf(seq, "%u\n", r->membw.bw_gran);
@@ -1167,7 +1177,7 @@ static int rdt_bw_gran_show(struct kernfs_open_file *of,
static int rdt_delay_linear_show(struct kernfs_open_file *of,
struct seq_file *seq, void *v)
{
- struct resctrl_schema *s = of->kn->parent->priv;
+ struct resctrl_schema *s = rdt_kn_parent_priv(of->kn);
struct rdt_resource *r = s->res;
seq_printf(seq, "%u\n", r->membw.delay_linear);
@@ -1185,7 +1195,7 @@ static int max_threshold_occ_show(struct kernfs_open_file *of,
static int rdt_thread_throttle_mode_show(struct kernfs_open_file *of,
struct seq_file *seq, void *v)
{
- struct resctrl_schema *s = of->kn->parent->priv;
+ struct resctrl_schema *s = rdt_kn_parent_priv(of->kn);
struct rdt_resource *r = s->res;
switch (r->membw.throttle_mode) {
@@ -1259,7 +1269,7 @@ static enum resctrl_conf_type resctrl_peer_type(enum resctrl_conf_type my_type)
static int rdt_has_sparse_bitmasks_show(struct kernfs_open_file *of,
struct seq_file *seq, void *v)
{
- struct resctrl_schema *s = of->kn->parent->priv;
+ struct resctrl_schema *s = rdt_kn_parent_priv(of->kn);
struct rdt_resource *r = s->res;
seq_printf(seq, "%u\n", r->cache.arch_has_sparse_bitmasks);
@@ -1670,7 +1680,7 @@ static int mbm_config_show(struct seq_file *s, struct rdt_resource *r, u32 evtid
static int mbm_total_bytes_config_show(struct kernfs_open_file *of,
struct seq_file *seq, void *v)
{
- struct rdt_resource *r = of->kn->parent->priv;
+ struct rdt_resource *r = rdt_kn_parent_priv(of->kn);
mbm_config_show(seq, r, QOS_L3_MBM_TOTAL_EVENT_ID);
@@ -1680,7 +1690,7 @@ static int mbm_total_bytes_config_show(struct kernfs_open_file *of,
static int mbm_local_bytes_config_show(struct kernfs_open_file *of,
struct seq_file *seq, void *v)
{
- struct rdt_resource *r = of->kn->parent->priv;
+ struct rdt_resource *r = rdt_kn_parent_priv(of->kn);
mbm_config_show(seq, r, QOS_L3_MBM_LOCAL_EVENT_ID);
@@ -1787,7 +1797,7 @@ static ssize_t mbm_total_bytes_config_write(struct kernfs_open_file *of,
char *buf, size_t nbytes,
loff_t off)
{
- struct rdt_resource *r = of->kn->parent->priv;
+ struct rdt_resource *r = rdt_kn_parent_priv(of->kn);
int ret;
/* Valid input requires a trailing newline */
@@ -1813,7 +1823,7 @@ static ssize_t mbm_local_bytes_config_write(struct kernfs_open_file *of,
char *buf, size_t nbytes,
loff_t off)
{
- struct rdt_resource *r = of->kn->parent->priv;
+ struct rdt_resource *r = rdt_kn_parent_priv(of->kn);
int ret;
/* Valid input requires a trailing newline */
@@ -2513,12 +2523,13 @@ static struct rdtgroup *kernfs_to_rdtgroup(struct kernfs_node *kn)
* resource. "info" and its subdirectories don't
* have rdtgroup structures, so return NULL here.
*/
- if (kn == kn_info || kn->parent == kn_info)
+ if (kn == kn_info ||
+ rcu_access_pointer(kn->__parent) == kn_info)
return NULL;
else
return kn->priv;
} else {
- return kn->parent->priv;
+ return rdt_kn_parent_priv(kn);
}
}
@@ -3752,7 +3763,7 @@ out_unlock:
*/
static bool is_mon_groups(struct kernfs_node *kn, const char *name)
{
- return (!strcmp(kn->name, "mon_groups") &&
+ return (!strcmp(rdt_kn_name(kn), "mon_groups") &&
strcmp(name, "mon_groups"));
}
@@ -3867,9 +3878,18 @@ static int rdtgroup_rmdir_ctrl(struct rdtgroup *rdtgrp, cpumask_var_t tmpmask)
return 0;
}
+static struct kernfs_node *rdt_kn_parent(struct kernfs_node *kn)
+{
+ /*
+ * Valid within the RCU section it was obtained or while rdtgroup_mutex
+ * is held.
+ */
+ return rcu_dereference_check(kn->__parent, lockdep_is_held(&rdtgroup_mutex));
+}
+
static int rdtgroup_rmdir(struct kernfs_node *kn)
{
- struct kernfs_node *parent_kn = kn->parent;
+ struct kernfs_node *parent_kn;
struct rdtgroup *rdtgrp;
cpumask_var_t tmpmask;
int ret = 0;
@@ -3882,6 +3902,7 @@ static int rdtgroup_rmdir(struct kernfs_node *kn)
ret = -EPERM;
goto out;
}
+ parent_kn = rdt_kn_parent(kn);
/*
* If the rdtgroup is a ctrl_mon group and parent directory
@@ -3899,7 +3920,7 @@ static int rdtgroup_rmdir(struct kernfs_node *kn)
ret = rdtgroup_rmdir_ctrl(rdtgrp, tmpmask);
}
} else if (rdtgrp->type == RDTMON_GROUP &&
- is_mon_groups(parent_kn, kn->name)) {
+ is_mon_groups(parent_kn, rdt_kn_name(kn))) {
ret = rdtgroup_rmdir_mon(rdtgrp, tmpmask);
} else {
ret = -EPERM;
@@ -3950,6 +3971,7 @@ static void mongrp_reparent(struct rdtgroup *rdtgrp,
static int rdtgroup_rename(struct kernfs_node *kn,
struct kernfs_node *new_parent, const char *new_name)
{
+ struct kernfs_node *kn_parent;
struct rdtgroup *new_prdtgrp;
struct rdtgroup *rdtgrp;
cpumask_var_t tmpmask;
@@ -3984,8 +4006,9 @@ static int rdtgroup_rename(struct kernfs_node *kn,
goto out;
}
- if (rdtgrp->type != RDTMON_GROUP || !kn->parent ||
- !is_mon_groups(kn->parent, kn->name)) {
+ kn_parent = rdt_kn_parent(kn);
+ if (rdtgrp->type != RDTMON_GROUP || !kn_parent ||
+ !is_mon_groups(kn_parent, rdt_kn_name(kn))) {
rdt_last_cmd_puts("Source must be a MON group\n");
ret = -EPERM;
goto out;
diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
index 9c9faa1634fb..102641fd2172 100644
--- a/arch/x86/kernel/kgdb.c
+++ b/arch/x86/kernel/kgdb.c
@@ -655,7 +655,7 @@ void kgdb_arch_late(void)
if (breakinfo[i].pev)
continue;
breakinfo[i].pev = register_wide_hw_breakpoint(&attr, NULL, NULL);
- if (IS_ERR((void * __force)breakinfo[i].pev)) {
+ if (IS_ERR_PCPU(breakinfo[i].pev)) {
printk(KERN_ERR "kgdb: Could not allocate hw"
"breakpoints\nDisabling the kernel debugger\n");
breakinfo[i].pev = NULL;
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index 97925632c28e..1ccd05d8999f 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -75,6 +75,11 @@ void paravirt_set_sched_clock(u64 (*func)(void))
static_call_update(pv_sched_clock, func);
}
+static noinstr void pv_native_safe_halt(void)
+{
+ native_safe_halt();
+}
+
#ifdef CONFIG_PARAVIRT_XXL
static noinstr void pv_native_write_cr2(unsigned long val)
{
@@ -100,11 +105,6 @@ static noinstr void pv_native_set_debugreg(int regno, unsigned long val)
{
native_set_debugreg(regno, val);
}
-
-static noinstr void pv_native_safe_halt(void)
-{
- native_safe_halt();
-}
#endif
struct pv_info pv_info = {
@@ -161,9 +161,11 @@ struct paravirt_patch_template pv_ops = {
.irq.save_fl = __PV_IS_CALLEE_SAVE(pv_native_save_fl),
.irq.irq_disable = __PV_IS_CALLEE_SAVE(pv_native_irq_disable),
.irq.irq_enable = __PV_IS_CALLEE_SAVE(pv_native_irq_enable),
+#endif /* CONFIG_PARAVIRT_XXL */
+
+ /* Irq HLT ops. */
.irq.safe_halt = pv_native_safe_halt,
.irq.halt = native_halt,
-#endif /* CONFIG_PARAVIRT_XXL */
/* Mmu ops. */
.mmu.flush_tlb_user = native_flush_tlb_local,
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 91f6ff618852..962c3ce39323 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -939,7 +939,7 @@ void __init select_idle_routine(void)
static_call_update(x86_idle, mwait_idle);
} else if (cpu_feature_enabled(X86_FEATURE_TDX_GUEST)) {
pr_info("using TDX aware idle routine\n");
- static_call_update(x86_idle, tdx_safe_halt);
+ static_call_update(x86_idle, tdx_halt);
} else {
static_call_update(x86_idle, default_idle);
}
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index c7164a8de983..9d2a13b37833 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -578,14 +578,13 @@ static void __init memblock_x86_reserve_range_setup_data(void)
static void __init arch_reserve_crashkernel(void)
{
unsigned long long crash_base, crash_size, low_size = 0;
- char *cmdline = boot_command_line;
bool high = false;
int ret;
if (!IS_ENABLED(CONFIG_CRASH_RESERVE))
return;
- ret = parse_crashkernel(cmdline, memblock_phys_mem_size(),
+ ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(),
&crash_size, &crash_base,
&low_size, &high);
if (ret)
@@ -596,8 +595,7 @@ static void __init arch_reserve_crashkernel(void)
return;
}
- reserve_crashkernel_generic(cmdline, crash_size, crash_base,
- low_size, high);
+ reserve_crashkernel_generic(crash_size, crash_base, low_size, high);
}
static struct resource standard_io_resources[] = {
@@ -1031,8 +1029,6 @@ void __init setup_arch(char **cmdline_p)
max_low_pfn = e820__end_of_low_ram_pfn();
else
max_low_pfn = max_pfn;
-
- high_memory = (void *)__va(max_pfn * PAGE_SIZE - 1) + 1;
#endif
/* Find and reserve MPTABLE area */
@@ -1166,8 +1162,10 @@ void __init setup_arch(char **cmdline_p)
initmem_init();
dma_contiguous_reserve(max_pfn_mapped << PAGE_SHIFT);
- if (boot_cpu_has(X86_FEATURE_GBPAGES))
+ if (boot_cpu_has(X86_FEATURE_GBPAGES)) {
hugetlb_cma_reserve(PUD_SHIFT - PAGE_SHIFT);
+ hugetlb_bootmem_alloc();
+ }
/*
* Reserve memory for crash kernel after SRAT is parsed so that it
diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
index aa8c341b2441..06296eb69fd4 100644
--- a/arch/x86/lib/copy_user_64.S
+++ b/arch/x86/lib/copy_user_64.S
@@ -77,6 +77,24 @@ SYM_FUNC_START(rep_movs_alternative)
_ASM_EXTABLE_UA( 0b, 1b)
.Llarge_movsq:
+ /* Do the first possibly unaligned word */
+0: movq (%rsi),%rax
+1: movq %rax,(%rdi)
+
+ _ASM_EXTABLE_UA( 0b, .Lcopy_user_tail)
+ _ASM_EXTABLE_UA( 1b, .Lcopy_user_tail)
+
+ /* What would be the offset to the aligned destination? */
+ leaq 8(%rdi),%rax
+ andq $-8,%rax
+ subq %rdi,%rax
+
+ /* .. and update pointers and count to match */
+ addq %rax,%rdi
+ addq %rax,%rsi
+ subq %rax,%rcx
+
+ /* make %rcx contain the number of words, %rax the remainder */
movq %rcx,%rax
shrq $3,%rcx
andl $7,%eax
diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile
index 690fbf48e853..32035d5be5a0 100644
--- a/arch/x86/mm/Makefile
+++ b/arch/x86/mm/Makefile
@@ -39,11 +39,9 @@ CFLAGS_fault.o := -I $(src)/../include/asm/trace
obj-$(CONFIG_X86_32) += pgtable_32.o iomap_32.o
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
-obj-$(CONFIG_PTDUMP_CORE) += dump_pagetables.o
+obj-$(CONFIG_PTDUMP) += dump_pagetables.o
obj-$(CONFIG_PTDUMP_DEBUGFS) += debug_pagetables.o
-obj-$(CONFIG_HIGHMEM) += highmem_32.o
-
KASAN_SANITIZE_kasan_init_$(BITS).o := n
obj-$(CONFIG_KASAN) += kasan_init_$(BITS).o
diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
deleted file mode 100644
index d9efa35711ee..000000000000
--- a/arch/x86/mm/highmem_32.c
+++ /dev/null
@@ -1,34 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-#include <linux/highmem.h>
-#include <linux/export.h>
-#include <linux/swap.h> /* for totalram_pages */
-#include <linux/memblock.h>
-#include <asm/numa.h>
-
-void __init set_highmem_pages_init(void)
-{
- struct zone *zone;
- int nid;
-
- /*
- * Explicitly reset zone->managed_pages because set_highmem_pages_init()
- * is invoked before memblock_free_all()
- */
- reset_all_zones_managed_pages();
- for_each_zone(zone) {
- unsigned long zone_start_pfn, zone_end_pfn;
-
- if (!is_highmem(zone))
- continue;
-
- zone_start_pfn = zone->zone_start_pfn;
- zone_end_pfn = zone_start_pfn + zone->spanned_pages;
-
- nid = zone_to_nid(zone);
- printk(KERN_INFO "Initializing %s for node %d (%08lx:%08lx)\n",
- zone->name, nid, zone_start_pfn, zone_end_pfn);
-
- add_highpages_with_active_regions(nid, zone_start_pfn,
- zone_end_pfn);
- }
-}
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index f288aad8dc74..ad662cc4605c 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -394,23 +394,6 @@ static void __init permanent_kmaps_init(pgd_t *pgd_base)
pkmap_page_table = virt_to_kpte(vaddr);
}
-
-void __init add_highpages_with_active_regions(int nid,
- unsigned long start_pfn, unsigned long end_pfn)
-{
- phys_addr_t start, end;
- u64 i;
-
- for_each_free_mem_range(i, nid, MEMBLOCK_NONE, &start, &end, NULL) {
- unsigned long pfn = clamp_t(unsigned long, PFN_UP(start),
- start_pfn, end_pfn);
- unsigned long e_pfn = clamp_t(unsigned long, PFN_DOWN(end),
- start_pfn, end_pfn);
- for ( ; pfn < e_pfn; pfn++)
- if (pfn_valid(pfn))
- free_highmem_page(pfn_to_page(pfn));
- }
-}
#else
static inline void permanent_kmaps_init(pgd_t *pgd_base)
{
@@ -645,9 +628,6 @@ void __init initmem_init(void)
memblock_set_node(0, PHYS_ADDR_MAX, &memblock.memory, 0);
-#ifdef CONFIG_FLATMEM
- max_mapnr = IS_ENABLED(CONFIG_HIGHMEM) ? highend_pfn : max_low_pfn;
-#endif
__vmalloc_start_set = true;
printk(KERN_NOTICE "%ldMB LOWMEM available.\n",
@@ -709,27 +689,17 @@ static void __init test_wp_bit(void)
panic("Linux doesn't support CPUs with broken WP.");
}
-void __init mem_init(void)
+void __init arch_mm_preinit(void)
{
pci_iommu_alloc();
#ifdef CONFIG_FLATMEM
BUG_ON(!mem_map);
#endif
- /*
- * With CONFIG_DEBUG_PAGEALLOC initialization of highmem pages has to
- * be done before memblock_free_all(). Memblock use free low memory for
- * temporary data (see find_range_array()) and for this purpose can use
- * pages that was already passed to the buddy allocator, hence marked as
- * not accessible in the page tables when compiled with
- * CONFIG_DEBUG_PAGEALLOC. Otherwise order of initialization is not
- * important here.
- */
- set_highmem_pages_init();
-
- /* this will put all low memory onto the freelists */
- memblock_free_all();
+}
+void __init mem_init(void)
+{
after_bootmem = 1;
x86_init.hyper.init_after_bootmem();
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 519aa53114fa..7c4f6f591f2b 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -959,9 +959,18 @@ int add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages,
ret = __add_pages(nid, start_pfn, nr_pages, params);
WARN_ON_ONCE(ret);
- /* update max_pfn, max_low_pfn and high_memory */
- update_end_of_memory_vars(start_pfn << PAGE_SHIFT,
- nr_pages << PAGE_SHIFT);
+ /*
+ * Special case: add_pages() is called by memremap_pages() for adding device
+ * private pages. Do not bump up max_pfn in the device private path,
+ * because max_pfn changes affect dma_addressing_limited().
+ *
+ * dma_addressing_limited() returning true when max_pfn is the device's
+ * addressable memory can force device drivers to use bounce buffers
+ * and impact their performance negatively:
+ */
+ if (!params->pgmap)
+ /* update max_pfn, max_low_pfn and high_memory */
+ update_end_of_memory_vars(start_pfn << PAGE_SHIFT, nr_pages << PAGE_SHIFT);
return ret;
}
@@ -1340,14 +1349,15 @@ failed:
panic("Failed to pre-allocate %s pages for vmalloc area\n", lvl);
}
-void __init mem_init(void)
+void __init arch_mm_preinit(void)
{
pci_iommu_alloc();
+}
+void __init mem_init(void)
+{
/* clear_bss() already clear the empty_zero_page */
- /* this will put all memory onto the freelists */
- memblock_free_all();
after_bootmem = 1;
x86_init.hyper.init_after_bootmem();
@@ -1591,11 +1601,14 @@ void register_page_bootmem_memmap(unsigned long section_nr,
}
get_page_bootmem(section_nr, pud_page(*pud), MIX_SECTION_INFO);
- if (!boot_cpu_has(X86_FEATURE_PSE)) {
+ pmd = pmd_offset(pud, addr);
+ if (pmd_none(*pmd)) {
+ next = (addr + PAGE_SIZE) & PAGE_MASK;
+ continue;
+ }
+
+ if (!boot_cpu_has(X86_FEATURE_PSE) || !pmd_leaf(*pmd)) {
next = (addr + PAGE_SIZE) & PAGE_MASK;
- pmd = pmd_offset(pud, addr);
- if (pmd_none(*pmd))
- continue;
get_page_bootmem(section_nr, pmd_page(*pmd),
MIX_SECTION_INFO);
@@ -1606,12 +1619,7 @@ void register_page_bootmem_memmap(unsigned long section_nr,
SECTION_INFO);
} else {
next = pmd_addr_end(addr, end);
-
- pmd = pmd_offset(pud, addr);
- if (pmd_none(*pmd))
- continue;
-
- nr_pmd_pages = 1 << get_order(PMD_SIZE);
+ nr_pmd_pages = (next - addr) >> PAGE_SHIFT;
page = pmd_page(*pmd);
while (nr_pmd_pages--)
get_page_bootmem(section_nr, page++,
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index 42c90b420773..331e101bf801 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -440,10 +440,10 @@ void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
EXPORT_SYMBOL(ioremap_cache);
void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size,
- unsigned long prot_val)
+ pgprot_t prot)
{
return __ioremap_caller(phys_addr, size,
- pgprot2cachemode(__pgprot(prot_val)),
+ pgprot2cachemode(prot),
__builtin_return_address(0), false);
}
EXPORT_SYMBOL(ioremap_prot);
diff --git a/arch/x86/mm/pat/set_memory.c b/arch/x86/mm/pat/set_memory.c
index 72405d315b41..def3d9284254 100644
--- a/arch/x86/mm/pat/set_memory.c
+++ b/arch/x86/mm/pat/set_memory.c
@@ -2274,6 +2274,7 @@ int set_mce_nospec(unsigned long pfn)
pr_warn("Could not invalidate pfn=0x%lx from 1:1 map\n", pfn);
return rc;
}
+EXPORT_SYMBOL_GPL(set_mce_nospec);
/* Restore full speculative operation to the pfn. */
int clear_mce_nospec(unsigned long pfn)
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
index cec321fb74f2..a05fcddfc811 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -20,7 +20,7 @@ pgtable_t pte_alloc_one(struct mm_struct *mm)
void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte)
{
paravirt_release_pte(page_to_pfn(pte));
- tlb_remove_table(tlb, page_ptdesc(pte));
+ tlb_remove_ptdesc(tlb, page_ptdesc(pte));
}
#if CONFIG_PGTABLE_LEVELS > 2
@@ -34,21 +34,21 @@ void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd)
#ifdef CONFIG_X86_PAE
tlb->need_flush_all = 1;
#endif
- tlb_remove_table(tlb, virt_to_ptdesc(pmd));
+ tlb_remove_ptdesc(tlb, virt_to_ptdesc(pmd));
}
#if CONFIG_PGTABLE_LEVELS > 3
void ___pud_free_tlb(struct mmu_gather *tlb, pud_t *pud)
{
paravirt_release_pud(__pa(pud) >> PAGE_SHIFT);
- tlb_remove_table(tlb, virt_to_ptdesc(pud));
+ tlb_remove_ptdesc(tlb, virt_to_ptdesc(pud));
}
#if CONFIG_PGTABLE_LEVELS > 4
void ___p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d)
{
paravirt_release_p4d(__pa(p4d) >> PAGE_SHIFT);
- tlb_remove_table(tlb, virt_to_ptdesc(p4d));
+ tlb_remove_ptdesc(tlb, virt_to_ptdesc(p4d));
}
#endif /* CONFIG_PGTABLE_LEVELS > 4 */
#endif /* CONFIG_PGTABLE_LEVELS > 3 */
diff --git a/arch/x86/platform/intel/iosf_mbi.c b/arch/x86/platform/intel/iosf_mbi.c
index c81cea208c2c..40ae94db20d8 100644
--- a/arch/x86/platform/intel/iosf_mbi.c
+++ b/arch/x86/platform/intel/iosf_mbi.c
@@ -422,19 +422,6 @@ int iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(
}
EXPORT_SYMBOL(iosf_mbi_unregister_pmic_bus_access_notifier_unlocked);
-int iosf_mbi_unregister_pmic_bus_access_notifier(struct notifier_block *nb)
-{
- int ret;
-
- /* Wait for the bus to go inactive before unregistering */
- iosf_mbi_punit_acquire();
- ret = iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(nb);
- iosf_mbi_punit_release();
-
- return ret;
-}
-EXPORT_SYMBOL(iosf_mbi_unregister_pmic_bus_access_notifier);
-
void iosf_mbi_assert_punit_acquired(void)
{
WARN_ON(iosf_mbi_pmic_punit_access_count == 0);
diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
index 63230ff8cf4f..08e76a5ca155 100644
--- a/arch/x86/power/cpu.c
+++ b/arch/x86/power/cpu.c
@@ -27,6 +27,7 @@
#include <asm/mmu_context.h>
#include <asm/cpu_device_id.h>
#include <asm/microcode.h>
+#include <asm/fred.h>
#ifdef CONFIG_X86_32
__visible unsigned long saved_context_ebx;
@@ -231,6 +232,19 @@ static void notrace __restore_processor_state(struct saved_context *ctxt)
*/
#ifdef CONFIG_X86_64
wrmsrl(MSR_GS_BASE, ctxt->kernelmode_gs_base);
+
+ /*
+ * Reinitialize FRED to ensure the FRED MSRs contain the same values
+ * as before hibernation.
+ *
+ * Note, the setup of FRED RSPs requires access to percpu data
+ * structures. Therefore, FRED reinitialization can only occur after
+ * the percpu access pointer (i.e., MSR_GS_BASE) is restored.
+ */
+ if (ctxt->cr4 & X86_CR4_FRED) {
+ cpu_init_fred_exceptions();
+ cpu_init_fred_rsps();
+ }
#else
loadsegment(fs, __KERNEL_PERCPU);
#endif
diff --git a/arch/x86/tools/insn_decoder_test.c b/arch/x86/tools/insn_decoder_test.c
index 6c2986d2ad11..08cd913cbd4e 100644
--- a/arch/x86/tools/insn_decoder_test.c
+++ b/arch/x86/tools/insn_decoder_test.c
@@ -12,8 +12,6 @@
#include <stdarg.h>
#include <linux/kallsyms.h>
-#define unlikely(cond) (cond)
-
#include <asm/insn.h>
#include <inat.c>
#include <insn.c>
diff --git a/arch/x86/um/asm/barrier.h b/arch/x86/um/asm/barrier.h
index 4da336965698..b51aefd6ec2b 100644
--- a/arch/x86/um/asm/barrier.h
+++ b/arch/x86/um/asm/barrier.h
@@ -12,9 +12,9 @@
*/
#ifdef CONFIG_X86_32
-#define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
-#define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)
-#define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM)
+#define mb() alternative("lock addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
+#define rmb() alternative("lock addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)
+#define wmb() alternative("lock addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM)
#else /* CONFIG_X86_32 */
diff --git a/arch/x86/um/asm/module.h b/arch/x86/um/asm/module.h
deleted file mode 100644
index a3b061d66082..000000000000
--- a/arch/x86/um/asm/module.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __UM_MODULE_H
-#define __UM_MODULE_H
-
-/* UML is simple */
-struct mod_arch_specific
-{
-};
-
-#ifdef CONFIG_X86_32
-
-#define Elf_Shdr Elf32_Shdr
-#define Elf_Sym Elf32_Sym
-#define Elf_Ehdr Elf32_Ehdr
-
-#else
-
-#define Elf_Shdr Elf64_Shdr
-#define Elf_Sym Elf64_Sym
-#define Elf_Ehdr Elf64_Ehdr
-
-#endif
-
-#endif
diff --git a/arch/x86/um/os-Linux/mcontext.c b/arch/x86/um/os-Linux/mcontext.c
index e80ab7d28117..37decaa74761 100644
--- a/arch/x86/um/os-Linux/mcontext.c
+++ b/arch/x86/um/os-Linux/mcontext.c
@@ -4,6 +4,7 @@
#include <asm/ptrace.h>
#include <sysdep/ptrace.h>
#include <sysdep/mcontext.h>
+#include <arch.h>
void get_regs_from_mc(struct uml_pt_regs *regs, mcontext_t *mc)
{
@@ -27,7 +28,17 @@ void get_regs_from_mc(struct uml_pt_regs *regs, mcontext_t *mc)
COPY(RIP);
COPY2(EFLAGS, EFL);
COPY2(CS, CSGSFS);
- regs->gp[CS / sizeof(unsigned long)] &= 0xffff;
- regs->gp[CS / sizeof(unsigned long)] |= 3;
+ regs->gp[SS / sizeof(unsigned long)] = mc->gregs[REG_CSGSFS] >> 48;
+#endif
+}
+
+void mc_set_rip(void *_mc, void *target)
+{
+ mcontext_t *mc = _mc;
+
+#ifdef __i386__
+ mc->gregs[REG_EIP] = (unsigned long)target;
+#else
+ mc->gregs[REG_RIP] = (unsigned long)target;
#endif
}
diff --git a/arch/x86/um/shared/sysdep/faultinfo_32.h b/arch/x86/um/shared/sysdep/faultinfo_32.h
index b6f2437ec29c..ab5c8e47049c 100644
--- a/arch/x86/um/shared/sysdep/faultinfo_32.h
+++ b/arch/x86/um/shared/sysdep/faultinfo_32.h
@@ -29,4 +29,16 @@ struct faultinfo {
#define PTRACE_FULL_FAULTINFO 0
+#define ___backtrack_faulted(_faulted) \
+ asm volatile ( \
+ "mov $0, %0\n" \
+ "movl $__get_kernel_nofault_faulted_%=,%1\n" \
+ "jmp _end_%=\n" \
+ "__get_kernel_nofault_faulted_%=:\n" \
+ "mov $1, %0;" \
+ "_end_%=:" \
+ : "=r" (_faulted), \
+ "=m" (current->thread.segv_continue) :: \
+ )
+
#endif
diff --git a/arch/x86/um/shared/sysdep/faultinfo_64.h b/arch/x86/um/shared/sysdep/faultinfo_64.h
index ee88f88974ea..26fb4835d3e9 100644
--- a/arch/x86/um/shared/sysdep/faultinfo_64.h
+++ b/arch/x86/um/shared/sysdep/faultinfo_64.h
@@ -29,4 +29,16 @@ struct faultinfo {
#define PTRACE_FULL_FAULTINFO 1
+#define ___backtrack_faulted(_faulted) \
+ asm volatile ( \
+ "mov $0, %0\n" \
+ "movq $__get_kernel_nofault_faulted_%=,%1\n" \
+ "jmp _end_%=\n" \
+ "__get_kernel_nofault_faulted_%=:\n" \
+ "mov $1, %0;" \
+ "_end_%=:" \
+ : "=r" (_faulted), \
+ "=m" (current->thread.segv_continue) :: \
+ )
+
#endif
diff --git a/arch/x86/um/vdso/vma.c b/arch/x86/um/vdso/vma.c
index f238f7b33cdd..dc8dfb2abd80 100644
--- a/arch/x86/um/vdso/vma.c
+++ b/arch/x86/um/vdso/vma.c
@@ -12,33 +12,22 @@
static unsigned int __read_mostly vdso_enabled = 1;
unsigned long um_vdso_addr;
+static struct page *um_vdso;
extern unsigned long task_size;
extern char vdso_start[], vdso_end[];
-static struct page **vdsop;
-
static int __init init_vdso(void)
{
- struct page *um_vdso;
-
BUG_ON(vdso_end - vdso_start > PAGE_SIZE);
um_vdso_addr = task_size - PAGE_SIZE;
- vdsop = kmalloc(sizeof(struct page *), GFP_KERNEL);
- if (!vdsop)
- goto oom;
-
um_vdso = alloc_page(GFP_KERNEL);
- if (!um_vdso) {
- kfree(vdsop);
-
+ if (!um_vdso)
goto oom;
- }
copy_page(page_address(um_vdso), vdso_start);
- *vdsop = um_vdso;
return 0;
@@ -56,6 +45,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
struct mm_struct *mm = current->mm;
static struct vm_special_mapping vdso_mapping = {
.name = "[vdso]",
+ .pages = &um_vdso,
};
if (!vdso_enabled)
@@ -64,7 +54,6 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
if (mmap_write_lock_killable(mm))
return -EINTR;
- vdso_mapping.pages = vdsop;
vma = _install_special_mapping(mm, um_vdso_addr, PAGE_SIZE,
VM_READ|VM_EXEC|
VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c
index dcc2041f8e61..846b5737d320 100644
--- a/arch/x86/xen/enlighten_pv.c
+++ b/arch/x86/xen/enlighten_pv.c
@@ -138,7 +138,6 @@ struct tls_descs {
};
DEFINE_PER_CPU(enum xen_lazy_mode, xen_lazy_mode) = XEN_LAZY_NONE;
-DEFINE_PER_CPU(unsigned int, xen_lazy_nesting);
enum xen_lazy_mode xen_get_lazy_mode(void)
{
diff --git a/arch/xtensa/include/asm/io.h b/arch/xtensa/include/asm/io.h
index 934e58399c8c..7cdcc2deab3e 100644
--- a/arch/xtensa/include/asm/io.h
+++ b/arch/xtensa/include/asm/io.h
@@ -29,7 +29,7 @@
* I/O memory mapping functions.
*/
void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size,
- unsigned long prot);
+ pgprot_t prot);
#define ioremap_prot ioremap_prot
#define iounmap iounmap
@@ -40,7 +40,7 @@ static inline void __iomem *ioremap(unsigned long offset, unsigned long size)
return (void*)(offset-XCHAL_KIO_PADDR+XCHAL_KIO_BYPASS_VADDR);
else
return ioremap_prot(offset, size,
- pgprot_val(pgprot_noncached(PAGE_KERNEL)));
+ pgprot_noncached(PAGE_KERNEL));
}
#define ioremap ioremap
@@ -51,7 +51,7 @@ static inline void __iomem *ioremap_cache(unsigned long offset,
&& offset - XCHAL_KIO_PADDR < XCHAL_KIO_SIZE)
return (void*)(offset-XCHAL_KIO_PADDR+XCHAL_KIO_CACHED_VADDR);
else
- return ioremap_prot(offset, size, pgprot_val(PAGE_KERNEL));
+ return ioremap_prot(offset, size, PAGE_KERNEL);
}
#define ioremap_cache ioremap_cache
diff --git a/arch/xtensa/mm/init.c b/arch/xtensa/mm/init.c
index b2587a1a7c46..cc52733a0649 100644
--- a/arch/xtensa/mm/init.c
+++ b/arch/xtensa/mm/init.c
@@ -66,59 +66,8 @@ void __init bootmem_init(void)
memblock_dump_all();
}
-
-void __init zones_init(void)
+static void __init print_vm_layout(void)
{
- /* All pages are DMA-able, so we put them all in the DMA zone. */
- unsigned long max_zone_pfn[MAX_NR_ZONES] = {
- [ZONE_NORMAL] = max_low_pfn,
-#ifdef CONFIG_HIGHMEM
- [ZONE_HIGHMEM] = max_pfn,
-#endif
- };
- free_area_init(max_zone_pfn);
-}
-
-static void __init free_highpages(void)
-{
-#ifdef CONFIG_HIGHMEM
- unsigned long max_low = max_low_pfn;
- phys_addr_t range_start, range_end;
- u64 i;
-
- /* set highmem page free */
- for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE,
- &range_start, &range_end, NULL) {
- unsigned long start = PFN_UP(range_start);
- unsigned long end = PFN_DOWN(range_end);
-
- /* Ignore complete lowmem entries */
- if (end <= max_low)
- continue;
-
- /* Truncate partial highmem entries */
- if (start < max_low)
- start = max_low;
-
- for (; start < end; start++)
- free_highmem_page(pfn_to_page(start));
- }
-#endif
-}
-
-/*
- * Initialize memory pages.
- */
-
-void __init mem_init(void)
-{
- free_highpages();
-
- max_mapnr = max_pfn - ARCH_PFN_OFFSET;
- high_memory = (void *)__va(max_low_pfn << PAGE_SHIFT);
-
- memblock_free_all();
-
pr_info("virtual kernel memory layout:\n"
#ifdef CONFIG_KASAN
" kasan : 0x%08lx - 0x%08lx (%5lu MB)\n"
@@ -167,6 +116,19 @@ void __init mem_init(void)
(unsigned long)(__bss_stop - __bss_start) >> 10);
}
+void __init zones_init(void)
+{
+ /* All pages are DMA-able, so we put them all in the DMA zone. */
+ unsigned long max_zone_pfn[MAX_NR_ZONES] = {
+ [ZONE_NORMAL] = max_low_pfn,
+#ifdef CONFIG_HIGHMEM
+ [ZONE_HIGHMEM] = max_pfn,
+#endif
+ };
+ free_area_init(max_zone_pfn);
+ print_vm_layout();
+}
+
static void __init parse_memmap_one(char *p)
{
char *oldp;
diff --git a/arch/xtensa/mm/ioremap.c b/arch/xtensa/mm/ioremap.c
index 8ca660b7ab49..26f238fa9d0d 100644
--- a/arch/xtensa/mm/ioremap.c
+++ b/arch/xtensa/mm/ioremap.c
@@ -11,12 +11,12 @@
#include <asm/io.h>
void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size,
- unsigned long prot)
+ pgprot_t prot)
{
unsigned long pfn = __phys_to_pfn((phys_addr));
WARN_ON(pfn_valid(pfn));
- return generic_ioremap_prot(phys_addr, size, __pgprot(prot));
+ return generic_ioremap_prot(phys_addr, size, prot);
}
EXPORT_SYMBOL(ioremap_prot);
diff --git a/block/blk-mq.c b/block/blk-mq.c
index ae8494d88897..c2697db59109 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -2965,8 +2965,7 @@ static bool blk_mq_attempt_bio_merge(struct request_queue *q,
static struct request *blk_mq_get_new_requests(struct request_queue *q,
struct blk_plug *plug,
- struct bio *bio,
- unsigned int nsegs)
+ struct bio *bio)
{
struct blk_mq_alloc_data data = {
.q = q,
@@ -3125,7 +3124,7 @@ new_request:
if (rq) {
blk_mq_use_cached_rq(rq, plug, bio);
} else {
- rq = blk_mq_get_new_requests(q, plug, bio, nr_segs);
+ rq = blk_mq_get_new_requests(q, plug, bio);
if (unlikely(!rq)) {
if (bio->bi_opf & REQ_NOWAIT)
bio_wouldblock_error(bio);
@@ -4465,14 +4464,12 @@ static struct blk_mq_hw_ctx *blk_mq_alloc_and_init_hctx(
return NULL;
}
-static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
- struct request_queue *q)
+static void __blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
+ struct request_queue *q)
{
struct blk_mq_hw_ctx *hctx;
unsigned long i, j;
- /* protect against switching io scheduler */
- mutex_lock(&q->elevator_lock);
for (i = 0; i < set->nr_hw_queues; i++) {
int old_node;
int node = blk_mq_get_hctx_node(set, i);
@@ -4505,7 +4502,19 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
xa_for_each_start(&q->hctx_table, j, hctx, j)
blk_mq_exit_hctx(q, set, hctx, j);
- mutex_unlock(&q->elevator_lock);
+}
+
+static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
+ struct request_queue *q, bool lock)
+{
+ if (lock) {
+ /* protect against switching io scheduler */
+ mutex_lock(&q->elevator_lock);
+ __blk_mq_realloc_hw_ctxs(set, q);
+ mutex_unlock(&q->elevator_lock);
+ } else {
+ __blk_mq_realloc_hw_ctxs(set, q);
+ }
/* unregister cpuhp callbacks for exited hctxs */
blk_mq_remove_hw_queues_cpuhp(q);
@@ -4537,7 +4546,7 @@ int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
xa_init(&q->hctx_table);
- blk_mq_realloc_hw_ctxs(set, q);
+ blk_mq_realloc_hw_ctxs(set, q, false);
if (!q->nr_hw_queues)
goto err_hctxs;
@@ -5033,7 +5042,7 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
fallback:
blk_mq_update_queue_map(set);
list_for_each_entry(q, &set->tag_list, tag_set_list) {
- blk_mq_realloc_hw_ctxs(set, q);
+ blk_mq_realloc_hw_ctxs(set, q, true);
if (q->nr_hw_queues != set->nr_hw_queues) {
int i = prev_nr_hw_queues;
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index d294c5948b67..abd609d4c8ef 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -302,13 +302,6 @@ struct test_sg_division {
* @key_offset_relative_to_alignmask: if true, add the algorithm's alignmask to
* the @key_offset
* @finalization_type: what finalization function to use for hashes
- * @multibuffer: test with multibuffer
- * @multibuffer_index: random number used to generate the message index to use
- * for multibuffer.
- * @multibuffer_uneven: test with multibuffer using uneven lengths
- * @multibuffer_lens: random lengths to make chained request uneven
- * @multibuffer_count: random number used to generate the num_msgs parameter
- * for multibuffer
* @nosimd: execute with SIMD disabled? Requires !CRYPTO_TFM_REQ_MAY_SLEEP.
* This applies to the parts of the operation that aren't controlled
* individually by @nosimd_setkey or @src_divs[].nosimd.
@@ -328,11 +321,6 @@ struct testvec_config {
enum finalization_type finalization_type;
bool nosimd;
bool nosimd_setkey;
- bool multibuffer;
- unsigned int multibuffer_index;
- unsigned int multibuffer_count;
- bool multibuffer_uneven;
- unsigned int multibuffer_lens[MAX_MB_MSGS];
};
#define TESTVEC_CONFIG_NAMELEN 192
@@ -572,7 +560,6 @@ struct test_sglist {
char *bufs[XBUFSIZE];
struct scatterlist sgl[XBUFSIZE];
struct scatterlist sgl_saved[XBUFSIZE];
- struct scatterlist full_sgl[XBUFSIZE];
struct scatterlist *sgl_ptr;
unsigned int nents;
};
@@ -686,11 +673,6 @@ static int build_test_sglist(struct test_sglist *tsgl,
sg_mark_end(&tsgl->sgl[tsgl->nents - 1]);
tsgl->sgl_ptr = tsgl->sgl;
memcpy(tsgl->sgl_saved, tsgl->sgl, tsgl->nents * sizeof(tsgl->sgl[0]));
-
- sg_init_table(tsgl->full_sgl, XBUFSIZE);
- for (i = 0; i < XBUFSIZE; i++)
- sg_set_buf(tsgl->full_sgl, tsgl->bufs[i], PAGE_SIZE * 2);
-
return 0;
}
@@ -1167,27 +1149,6 @@ static void generate_random_testvec_config(struct rnd_state *rng,
break;
}
- if (prandom_bool(rng)) {
- int i;
-
- cfg->multibuffer = true;
- cfg->multibuffer_count = prandom_u32_state(rng);
- cfg->multibuffer_count %= MAX_MB_MSGS;
- if (cfg->multibuffer_count++) {
- cfg->multibuffer_index = prandom_u32_state(rng);
- cfg->multibuffer_index %= cfg->multibuffer_count;
- }
-
- cfg->multibuffer_uneven = prandom_bool(rng);
- for (i = 0; i < MAX_MB_MSGS; i++)
- cfg->multibuffer_lens[i] =
- generate_random_length(rng, PAGE_SIZE * 2 * XBUFSIZE);
-
- p += scnprintf(p, end - p, " multibuffer(%d/%d%s)",
- cfg->multibuffer_index, cfg->multibuffer_count,
- cfg->multibuffer_uneven ? "/uneven" : "");
- }
-
if (!(cfg->req_flags & CRYPTO_TFM_REQ_MAY_SLEEP)) {
if (prandom_bool(rng)) {
cfg->nosimd = true;
@@ -1492,7 +1453,6 @@ static int do_ahash_op(int (*op)(struct ahash_request *req),
struct ahash_request *req,
struct crypto_wait *wait, bool nosimd)
{
- struct ahash_request *r2;
int err;
if (nosimd)
@@ -1503,15 +1463,7 @@ static int do_ahash_op(int (*op)(struct ahash_request *req),
if (nosimd)
crypto_reenable_simd_for_test();
- err = crypto_wait_req(err, wait);
- if (err)
- return err;
-
- list_for_each_entry(r2, &req->base.list, base.list)
- if (r2->base.err)
- return r2->base.err;
-
- return 0;
+ return crypto_wait_req(err, wait);
}
static int check_nonfinal_ahash_op(const char *op, int err,
@@ -1532,65 +1484,20 @@ static int check_nonfinal_ahash_op(const char *op, int err,
return 0;
}
-static void setup_ahash_multibuffer(
- struct ahash_request *reqs[MAX_MB_MSGS],
- const struct testvec_config *cfg,
- struct test_sglist *tsgl)
-{
- struct scatterlist *sg = tsgl->full_sgl;
- static u8 trash[HASH_MAX_DIGESTSIZE];
- struct ahash_request *req = reqs[0];
- unsigned int num_msgs;
- unsigned int msg_idx;
- int i;
-
- if (!cfg->multibuffer)
- return;
-
- num_msgs = cfg->multibuffer_count;
- if (num_msgs == 1)
- return;
-
- msg_idx = cfg->multibuffer_index;
- for (i = 1; i < num_msgs; i++) {
- struct ahash_request *r2 = reqs[i];
- unsigned int nbytes = req->nbytes;
-
- if (cfg->multibuffer_uneven)
- nbytes = cfg->multibuffer_lens[i];
-
- ahash_request_set_callback(r2, req->base.flags, NULL, NULL);
- ahash_request_set_crypt(r2, sg, trash, nbytes);
- ahash_request_chain(r2, req);
- }
-
- if (msg_idx) {
- reqs[msg_idx]->src = req->src;
- reqs[msg_idx]->nbytes = req->nbytes;
- reqs[msg_idx]->result = req->result;
- req->src = sg;
- if (cfg->multibuffer_uneven)
- req->nbytes = cfg->multibuffer_lens[0];
- req->result = trash;
- }
-}
-
/* Test one hash test vector in one configuration, using the ahash API */
static int test_ahash_vec_cfg(const struct hash_testvec *vec,
const char *vec_name,
const struct testvec_config *cfg,
- struct ahash_request *reqs[MAX_MB_MSGS],
+ struct ahash_request *req,
struct test_sglist *tsgl,
u8 *hashstate)
{
- struct ahash_request *req = reqs[0];
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
const unsigned int digestsize = crypto_ahash_digestsize(tfm);
const unsigned int statesize = crypto_ahash_statesize(tfm);
const char *driver = crypto_ahash_driver_name(tfm);
const u32 req_flags = CRYPTO_TFM_REQ_MAY_BACKLOG | cfg->req_flags;
const struct test_sg_division *divs[XBUFSIZE];
- struct ahash_request *reqi = req;
DECLARE_CRYPTO_WAIT(wait);
unsigned int i;
struct scatterlist *pending_sgl;
@@ -1598,9 +1505,6 @@ static int test_ahash_vec_cfg(const struct hash_testvec *vec,
u8 result[HASH_MAX_DIGESTSIZE + TESTMGR_POISON_LEN];
int err;
- if (cfg->multibuffer)
- reqi = reqs[cfg->multibuffer_index];
-
/* Set the key, if specified */
if (vec->ksize) {
err = do_setkey(crypto_ahash_setkey, tfm, vec->key, vec->ksize,
@@ -1630,7 +1534,7 @@ static int test_ahash_vec_cfg(const struct hash_testvec *vec,
/* Do the actual hashing */
- testmgr_poison(reqi->__ctx, crypto_ahash_reqsize(tfm));
+ testmgr_poison(req->__ctx, crypto_ahash_reqsize(tfm));
testmgr_poison(result, digestsize + TESTMGR_POISON_LEN);
if (cfg->finalization_type == FINALIZATION_TYPE_DIGEST ||
@@ -1639,7 +1543,6 @@ static int test_ahash_vec_cfg(const struct hash_testvec *vec,
ahash_request_set_callback(req, req_flags, crypto_req_done,
&wait);
ahash_request_set_crypt(req, tsgl->sgl, result, vec->psize);
- setup_ahash_multibuffer(reqs, cfg, tsgl);
err = do_ahash_op(crypto_ahash_digest, req, &wait, cfg->nosimd);
if (err) {
if (err == vec->digest_error)
@@ -1661,7 +1564,6 @@ static int test_ahash_vec_cfg(const struct hash_testvec *vec,
ahash_request_set_callback(req, req_flags, crypto_req_done, &wait);
ahash_request_set_crypt(req, NULL, result, 0);
- setup_ahash_multibuffer(reqs, cfg, tsgl);
err = do_ahash_op(crypto_ahash_init, req, &wait, cfg->nosimd);
err = check_nonfinal_ahash_op("init", err, result, digestsize,
driver, vec_name, cfg);
@@ -1678,7 +1580,6 @@ static int test_ahash_vec_cfg(const struct hash_testvec *vec,
crypto_req_done, &wait);
ahash_request_set_crypt(req, pending_sgl, result,
pending_len);
- setup_ahash_multibuffer(reqs, cfg, tsgl);
err = do_ahash_op(crypto_ahash_update, req, &wait,
divs[i]->nosimd);
err = check_nonfinal_ahash_op("update", err,
@@ -1693,7 +1594,7 @@ static int test_ahash_vec_cfg(const struct hash_testvec *vec,
/* Test ->export() and ->import() */
testmgr_poison(hashstate + statesize,
TESTMGR_POISON_LEN);
- err = crypto_ahash_export(reqi, hashstate);
+ err = crypto_ahash_export(req, hashstate);
err = check_nonfinal_ahash_op("export", err,
result, digestsize,
driver, vec_name, cfg);
@@ -1706,8 +1607,8 @@ static int test_ahash_vec_cfg(const struct hash_testvec *vec,
return -EOVERFLOW;
}
- testmgr_poison(reqi->__ctx, crypto_ahash_reqsize(tfm));
- err = crypto_ahash_import(reqi, hashstate);
+ testmgr_poison(req->__ctx, crypto_ahash_reqsize(tfm));
+ err = crypto_ahash_import(req, hashstate);
err = check_nonfinal_ahash_op("import", err,
result, digestsize,
driver, vec_name, cfg);
@@ -1721,7 +1622,6 @@ static int test_ahash_vec_cfg(const struct hash_testvec *vec,
ahash_request_set_callback(req, req_flags, crypto_req_done, &wait);
ahash_request_set_crypt(req, pending_sgl, result, pending_len);
- setup_ahash_multibuffer(reqs, cfg, tsgl);
if (cfg->finalization_type == FINALIZATION_TYPE_FINAL) {
/* finish with update() and final() */
err = do_ahash_op(crypto_ahash_update, req, &wait, cfg->nosimd);
@@ -1753,7 +1653,7 @@ result_ready:
static int test_hash_vec_cfg(const struct hash_testvec *vec,
const char *vec_name,
const struct testvec_config *cfg,
- struct ahash_request *reqs[MAX_MB_MSGS],
+ struct ahash_request *req,
struct shash_desc *desc,
struct test_sglist *tsgl,
u8 *hashstate)
@@ -1773,12 +1673,11 @@ static int test_hash_vec_cfg(const struct hash_testvec *vec,
return err;
}
- return test_ahash_vec_cfg(vec, vec_name, cfg, reqs, tsgl, hashstate);
+ return test_ahash_vec_cfg(vec, vec_name, cfg, req, tsgl, hashstate);
}
static int test_hash_vec(const struct hash_testvec *vec, unsigned int vec_num,
- struct ahash_request *reqs[MAX_MB_MSGS],
- struct shash_desc *desc,
+ struct ahash_request *req, struct shash_desc *desc,
struct test_sglist *tsgl, u8 *hashstate)
{
char vec_name[16];
@@ -1790,7 +1689,7 @@ static int test_hash_vec(const struct hash_testvec *vec, unsigned int vec_num,
for (i = 0; i < ARRAY_SIZE(default_hash_testvec_configs); i++) {
err = test_hash_vec_cfg(vec, vec_name,
&default_hash_testvec_configs[i],
- reqs, desc, tsgl, hashstate);
+ req, desc, tsgl, hashstate);
if (err)
return err;
}
@@ -1807,7 +1706,7 @@ static int test_hash_vec(const struct hash_testvec *vec, unsigned int vec_num,
generate_random_testvec_config(&rng, &cfg, cfgname,
sizeof(cfgname));
err = test_hash_vec_cfg(vec, vec_name, &cfg,
- reqs, desc, tsgl, hashstate);
+ req, desc, tsgl, hashstate);
if (err)
return err;
cond_resched();
@@ -1866,12 +1765,11 @@ done:
*/
static int test_hash_vs_generic_impl(const char *generic_driver,
unsigned int maxkeysize,
- struct ahash_request *reqs[MAX_MB_MSGS],
+ struct ahash_request *req,
struct shash_desc *desc,
struct test_sglist *tsgl,
u8 *hashstate)
{
- struct ahash_request *req = reqs[0];
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
const unsigned int digestsize = crypto_ahash_digestsize(tfm);
const unsigned int blocksize = crypto_ahash_blocksize(tfm);
@@ -1969,7 +1867,7 @@ static int test_hash_vs_generic_impl(const char *generic_driver,
sizeof(cfgname));
err = test_hash_vec_cfg(&vec, vec_name, cfg,
- reqs, desc, tsgl, hashstate);
+ req, desc, tsgl, hashstate);
if (err)
goto out;
cond_resched();
@@ -1987,7 +1885,7 @@ out:
#else /* !CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */
static int test_hash_vs_generic_impl(const char *generic_driver,
unsigned int maxkeysize,
- struct ahash_request *reqs[MAX_MB_MSGS],
+ struct ahash_request *req,
struct shash_desc *desc,
struct test_sglist *tsgl,
u8 *hashstate)
@@ -2034,8 +1932,8 @@ static int __alg_test_hash(const struct hash_testvec *vecs,
u32 type, u32 mask,
const char *generic_driver, unsigned int maxkeysize)
{
- struct ahash_request *reqs[MAX_MB_MSGS] = {};
struct crypto_ahash *atfm = NULL;
+ struct ahash_request *req = NULL;
struct crypto_shash *stfm = NULL;
struct shash_desc *desc = NULL;
struct test_sglist *tsgl = NULL;
@@ -2059,14 +1957,12 @@ static int __alg_test_hash(const struct hash_testvec *vecs,
}
driver = crypto_ahash_driver_name(atfm);
- for (i = 0; i < MAX_MB_MSGS; i++) {
- reqs[i] = ahash_request_alloc(atfm, GFP_KERNEL);
- if (!reqs[i]) {
- pr_err("alg: hash: failed to allocate request for %s\n",
- driver);
- err = -ENOMEM;
- goto out;
- }
+ req = ahash_request_alloc(atfm, GFP_KERNEL);
+ if (!req) {
+ pr_err("alg: hash: failed to allocate request for %s\n",
+ driver);
+ err = -ENOMEM;
+ goto out;
}
/*
@@ -2102,12 +1998,12 @@ static int __alg_test_hash(const struct hash_testvec *vecs,
if (fips_enabled && vecs[i].fips_skip)
continue;
- err = test_hash_vec(&vecs[i], i, reqs, desc, tsgl, hashstate);
+ err = test_hash_vec(&vecs[i], i, req, desc, tsgl, hashstate);
if (err)
goto out;
cond_resched();
}
- err = test_hash_vs_generic_impl(generic_driver, maxkeysize, reqs,
+ err = test_hash_vs_generic_impl(generic_driver, maxkeysize, req,
desc, tsgl, hashstate);
out:
kfree(hashstate);
@@ -2117,12 +2013,7 @@ out:
}
kfree(desc);
crypto_free_shash(stfm);
- if (reqs[0]) {
- ahash_request_set_callback(reqs[0], 0, NULL, NULL);
- for (i = 1; i < MAX_MB_MSGS && reqs[i]; i++)
- ahash_request_chain(reqs[i], reqs[0]);
- ahash_request_free(reqs[0]);
- }
+ ahash_request_free(req);
crypto_free_ahash(atfm);
return err;
}
diff --git a/drivers/accel/habanalabs/common/command_submission.c b/drivers/accel/habanalabs/common/command_submission.c
index 59823e3c3bf7..dee487724918 100644
--- a/drivers/accel/habanalabs/common/command_submission.c
+++ b/drivers/accel/habanalabs/common/command_submission.c
@@ -2586,7 +2586,7 @@ int hl_cs_ioctl(struct drm_device *ddev, void *data, struct drm_file *file_priv)
cs_seq = args->in.seq;
timeout = flags & HL_CS_FLAGS_CUSTOM_TIMEOUT
- ? msecs_to_jiffies(args->in.timeout * 1000)
+ ? secs_to_jiffies(args->in.timeout)
: hpriv->hdev->timeout_jiffies;
switch (cs_type) {
diff --git a/drivers/accel/habanalabs/common/debugfs.c b/drivers/accel/habanalabs/common/debugfs.c
index ca7677293a55..4b391807e5f2 100644
--- a/drivers/accel/habanalabs/common/debugfs.c
+++ b/drivers/accel/habanalabs/common/debugfs.c
@@ -1403,7 +1403,7 @@ static ssize_t hl_timeout_locked_write(struct file *f, const char __user *buf,
return rc;
if (value)
- hdev->timeout_jiffies = msecs_to_jiffies(value * 1000);
+ hdev->timeout_jiffies = secs_to_jiffies(value);
else
hdev->timeout_jiffies = MAX_SCHEDULE_TIMEOUT;
diff --git a/drivers/accel/habanalabs/common/device.c b/drivers/accel/habanalabs/common/device.c
index 30277ae410d4..68eebed3b050 100644
--- a/drivers/accel/habanalabs/common/device.c
+++ b/drivers/accel/habanalabs/common/device.c
@@ -2091,7 +2091,7 @@ int hl_device_cond_reset(struct hl_device *hdev, u32 flags, u64 event_mask)
dev_dbg(hdev->dev, "Device is going to be hard-reset in %u sec unless being released\n",
hdev->device_release_watchdog_timeout_sec);
schedule_delayed_work(&hdev->device_release_watchdog_work.reset_work,
- msecs_to_jiffies(hdev->device_release_watchdog_timeout_sec * 1000));
+ secs_to_jiffies(hdev->device_release_watchdog_timeout_sec));
hdev->reset_info.watchdog_active = 1;
out:
spin_unlock(&hdev->reset_info.lock);
diff --git a/drivers/accel/habanalabs/common/habanalabs_drv.c b/drivers/accel/habanalabs/common/habanalabs_drv.c
index 596c52e8aa26..0035748f3228 100644
--- a/drivers/accel/habanalabs/common/habanalabs_drv.c
+++ b/drivers/accel/habanalabs/common/habanalabs_drv.c
@@ -386,7 +386,7 @@ static int fixup_device_params(struct hl_device *hdev)
hdev->fw_comms_poll_interval_usec = HL_FW_STATUS_POLL_INTERVAL_USEC;
if (tmp_timeout)
- hdev->timeout_jiffies = msecs_to_jiffies(tmp_timeout * MSEC_PER_SEC);
+ hdev->timeout_jiffies = secs_to_jiffies(tmp_timeout);
else
hdev->timeout_jiffies = MAX_SCHEDULE_TIMEOUT;
diff --git a/drivers/accel/habanalabs/common/sysfs.c b/drivers/accel/habanalabs/common/sysfs.c
index e9f8ccc0bbf9..9d58efa2ff38 100644
--- a/drivers/accel/habanalabs/common/sysfs.c
+++ b/drivers/accel/habanalabs/common/sysfs.c
@@ -368,7 +368,7 @@ out:
}
static ssize_t eeprom_read_handler(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf, loff_t offset,
+ const struct bin_attribute *attr, char *buf, loff_t offset,
size_t max_size)
{
struct device *dev = kobj_to_dev(kobj);
@@ -443,10 +443,10 @@ static DEVICE_ATTR_RO(security_enabled);
static DEVICE_ATTR_RO(module_id);
static DEVICE_ATTR_RO(parent_device);
-static struct bin_attribute bin_attr_eeprom = {
+static const struct bin_attribute bin_attr_eeprom = {
.attr = {.name = "eeprom", .mode = (0444)},
.size = PAGE_SIZE,
- .read = eeprom_read_handler
+ .read_new = eeprom_read_handler
};
static struct attribute *hl_dev_attrs[] = {
@@ -472,14 +472,14 @@ static struct attribute *hl_dev_attrs[] = {
NULL,
};
-static struct bin_attribute *hl_dev_bin_attrs[] = {
+static const struct bin_attribute *const hl_dev_bin_attrs[] = {
&bin_attr_eeprom,
NULL
};
static struct attribute_group hl_dev_attr_group = {
.attrs = hl_dev_attrs,
- .bin_attrs = hl_dev_bin_attrs,
+ .bin_attrs_new = hl_dev_bin_attrs,
};
static struct attribute_group hl_dev_clks_attr_group;
diff --git a/drivers/acpi/acpi_pnp.c b/drivers/acpi/acpi_pnp.c
index 435ec60a9682..4ad88187dc7a 100644
--- a/drivers/acpi/acpi_pnp.c
+++ b/drivers/acpi/acpi_pnp.c
@@ -353,8 +353,10 @@ static bool acpi_pnp_match(const char *idstr, const struct acpi_device_id **matc
* device represented by it.
*/
static const struct acpi_device_id acpi_nonpnp_device_ids[] = {
+ {"INT3F0D"},
{"INTC1080"},
{"INTC1081"},
+ {"INTC1099"},
{""},
};
diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c
index efdadc74e3f4..103f29661576 100644
--- a/drivers/acpi/acpi_video.c
+++ b/drivers/acpi/acpi_video.c
@@ -649,6 +649,13 @@ acpi_video_device_EDID(struct acpi_video_device *device, void **edid, int length
obj = buffer.pointer;
+ /*
+ * Some buggy implementations incorrectly return the EDID buffer in an ACPI package.
+ * In this case, extract the buffer from the package.
+ */
+ if (obj && obj->type == ACPI_TYPE_PACKAGE && obj->package.count == 1)
+ obj = &obj->package.elements[0];
+
if (obj && obj->type == ACPI_TYPE_BUFFER) {
*edid = kmemdup(obj->buffer.pointer, obj->buffer.length, GFP_KERNEL);
ret = *edid ? obj->buffer.length : -ENOMEM;
@@ -658,7 +665,7 @@ acpi_video_device_EDID(struct acpi_video_device *device, void **edid, int length
ret = -EFAULT;
}
- kfree(obj);
+ kfree(buffer.pointer);
return ret;
}
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index b72772494655..289e365f84b2 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -674,6 +674,105 @@ static void ghes_defer_non_standard_event(struct acpi_hest_generic_data *gdata,
schedule_work(&entry->work);
}
+/* Room for 8 entries */
+#define CXL_CPER_PROT_ERR_FIFO_DEPTH 8
+static DEFINE_KFIFO(cxl_cper_prot_err_fifo, struct cxl_cper_prot_err_work_data,
+ CXL_CPER_PROT_ERR_FIFO_DEPTH);
+
+/* Synchronize schedule_work() with cxl_cper_prot_err_work changes */
+static DEFINE_SPINLOCK(cxl_cper_prot_err_work_lock);
+struct work_struct *cxl_cper_prot_err_work;
+
+static void cxl_cper_post_prot_err(struct cxl_cper_sec_prot_err *prot_err,
+ int severity)
+{
+#ifdef CONFIG_ACPI_APEI_PCIEAER
+ struct cxl_cper_prot_err_work_data wd;
+ u8 *dvsec_start, *cap_start;
+
+ if (!(prot_err->valid_bits & PROT_ERR_VALID_AGENT_ADDRESS)) {
+ pr_err_ratelimited("CXL CPER invalid agent type\n");
+ return;
+ }
+
+ if (!(prot_err->valid_bits & PROT_ERR_VALID_ERROR_LOG)) {
+ pr_err_ratelimited("CXL CPER invalid protocol error log\n");
+ return;
+ }
+
+ if (prot_err->err_len != sizeof(struct cxl_ras_capability_regs)) {
+ pr_err_ratelimited("CXL CPER invalid RAS Cap size (%u)\n",
+ prot_err->err_len);
+ return;
+ }
+
+ if (!(prot_err->valid_bits & PROT_ERR_VALID_SERIAL_NUMBER))
+ pr_warn(FW_WARN "CXL CPER no device serial number\n");
+
+ guard(spinlock_irqsave)(&cxl_cper_prot_err_work_lock);
+
+ if (!cxl_cper_prot_err_work)
+ return;
+
+ switch (prot_err->agent_type) {
+ case RCD:
+ case DEVICE:
+ case LD:
+ case FMLD:
+ case RP:
+ case DSP:
+ case USP:
+ memcpy(&wd.prot_err, prot_err, sizeof(wd.prot_err));
+
+ dvsec_start = (u8 *)(prot_err + 1);
+ cap_start = dvsec_start + prot_err->dvsec_len;
+
+ memcpy(&wd.ras_cap, cap_start, sizeof(wd.ras_cap));
+ wd.severity = cper_severity_to_aer(severity);
+ break;
+ default:
+ pr_err_ratelimited("CXL CPER invalid agent type: %d\n",
+ prot_err->agent_type);
+ return;
+ }
+
+ if (!kfifo_put(&cxl_cper_prot_err_fifo, wd)) {
+ pr_err_ratelimited("CXL CPER kfifo overflow\n");
+ return;
+ }
+
+ schedule_work(cxl_cper_prot_err_work);
+#endif
+}
+
+int cxl_cper_register_prot_err_work(struct work_struct *work)
+{
+ if (cxl_cper_prot_err_work)
+ return -EINVAL;
+
+ guard(spinlock)(&cxl_cper_prot_err_work_lock);
+ cxl_cper_prot_err_work = work;
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(cxl_cper_register_prot_err_work, "CXL");
+
+int cxl_cper_unregister_prot_err_work(struct work_struct *work)
+{
+ if (cxl_cper_prot_err_work != work)
+ return -EINVAL;
+
+ guard(spinlock)(&cxl_cper_prot_err_work_lock);
+ cxl_cper_prot_err_work = NULL;
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(cxl_cper_unregister_prot_err_work, "CXL");
+
+int cxl_cper_prot_err_kfifo_get(struct cxl_cper_prot_err_work_data *wd)
+{
+ return kfifo_get(&cxl_cper_prot_err_fifo, wd);
+}
+EXPORT_SYMBOL_NS_GPL(cxl_cper_prot_err_kfifo_get, "CXL");
+
/* Room for 8 entries for each of the 4 event log queues */
#define CXL_CPER_FIFO_DEPTH 32
DEFINE_KFIFO(cxl_cper_fifo, struct cxl_cper_work_data, CXL_CPER_FIFO_DEPTH);
@@ -777,6 +876,10 @@ static bool ghes_do_proc(struct ghes *ghes,
}
else if (guid_equal(sec_type, &CPER_SEC_PROC_ARM)) {
queued = ghes_handle_arm_hw_error(gdata, sev, sync);
+ } else if (guid_equal(sec_type, &CPER_SEC_CXL_PROT_ERR)) {
+ struct cxl_cper_sec_prot_err *prot_err = acpi_hest_get_payload(gdata);
+
+ cxl_cper_post_prot_err(prot_err, gdata->error_severity);
} else if (guid_equal(sec_type, &CPER_SEC_CXL_GEN_MEDIA_GUID)) {
struct cxl_cper_event_rec *rec = acpi_hest_get_payload(gdata);
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
index a5d47819b3a4..ae035b93da08 100644
--- a/drivers/acpi/nfit/core.c
+++ b/drivers/acpi/nfit/core.c
@@ -485,7 +485,7 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
cmd_mask = nd_desc->cmd_mask;
if (cmd == ND_CMD_CALL && call_pkg->nd_family) {
family = call_pkg->nd_family;
- if (family > NVDIMM_BUS_FAMILY_MAX ||
+ if (call_pkg->nd_family > NVDIMM_BUS_FAMILY_MAX ||
!test_bit(family, &nd_desc->bus_family_mask))
return -EINVAL;
family = array_index_nospec(family,
diff --git a/drivers/acpi/numa/hmat.c b/drivers/acpi/numa/hmat.c
index bfbb08b1e6af..9d9052258e92 100644
--- a/drivers/acpi/numa/hmat.c
+++ b/drivers/acpi/numa/hmat.c
@@ -108,6 +108,45 @@ static struct memory_target *find_mem_target(unsigned int mem_pxm)
return NULL;
}
+/**
+ * hmat_get_extended_linear_cache_size - Retrieve the extended linear cache size
+ * @backing_res: resource from the backing media
+ * @nid: node id for the memory region
+ * @cache_size: (Output) size of extended linear cache.
+ *
+ * Return: 0 on success. Errno on failure.
+ *
+ */
+int hmat_get_extended_linear_cache_size(struct resource *backing_res, int nid,
+ resource_size_t *cache_size)
+{
+ unsigned int pxm = node_to_pxm(nid);
+ struct memory_target *target;
+ struct target_cache *tcache;
+ struct resource *res;
+
+ target = find_mem_target(pxm);
+ if (!target)
+ return -ENOENT;
+
+ list_for_each_entry(tcache, &target->caches, node) {
+ if (tcache->cache_attrs.address_mode !=
+ NODE_CACHE_ADDR_MODE_EXTENDED_LINEAR)
+ continue;
+
+ res = &target->memregions;
+ if (!resource_contains(res, backing_res))
+ continue;
+
+ *cache_size = tcache->cache_attrs.size;
+ return 0;
+ }
+
+ *cache_size = 0;
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(hmat_get_extended_linear_cache_size, "CXL");
+
static struct memory_target *acpi_find_genport_target(u32 uid)
{
struct memory_target *target;
@@ -506,6 +545,11 @@ static __init int hmat_parse_cache(union acpi_subtable_headers *header,
switch ((attrs & ACPI_HMAT_CACHE_ASSOCIATIVITY) >> 8) {
case ACPI_HMAT_CA_DIRECT_MAPPED:
tcache->cache_attrs.indexing = NODE_CACHE_DIRECT_MAP;
+ /* Extended Linear mode is only valid if cache is direct mapped */
+ if (cache->address_mode == ACPI_HMAT_CACHE_MODE_EXTENDED_LINEAR) {
+ tcache->cache_attrs.address_mode =
+ NODE_CACHE_ADDR_MODE_EXTENDED_LINEAR;
+ }
break;
case ACPI_HMAT_CA_COMPLEX_CACHE_INDEXING:
tcache->cache_attrs.indexing = NODE_CACHE_INDEXED;
diff --git a/drivers/acpi/numa/srat.c b/drivers/acpi/numa/srat.c
index ce815d7cb8f6..0a725e46d017 100644
--- a/drivers/acpi/numa/srat.c
+++ b/drivers/acpi/numa/srat.c
@@ -18,6 +18,7 @@
#include <linux/nodemask.h>
#include <linux/topology.h>
#include <linux/numa_memblks.h>
+#include <linux/string_choices.h>
static nodemask_t nodes_found_map = NODE_MASK_NONE;
@@ -188,8 +189,7 @@ acpi_table_print_srat_entry(struct acpi_subtable_header *header)
pr_debug("SRAT Processor (id[0x%02x] eid[0x%02x]) in proximity domain %d %s\n",
p->apic_id, p->local_sapic_eid,
p->proximity_domain_lo,
- (p->flags & ACPI_SRAT_CPU_ENABLED) ?
- "enabled" : "disabled");
+ str_enabled_disabled(p->flags & ACPI_SRAT_CPU_ENABLED));
}
break;
@@ -201,8 +201,7 @@ acpi_table_print_srat_entry(struct acpi_subtable_header *header)
(unsigned long long)p->base_address,
(unsigned long long)p->length,
p->proximity_domain,
- (p->flags & ACPI_SRAT_MEM_ENABLED) ?
- "enabled" : "disabled",
+ str_enabled_disabled(p->flags & ACPI_SRAT_MEM_ENABLED),
(p->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE) ?
" hot-pluggable" : "",
(p->flags & ACPI_SRAT_MEM_NON_VOLATILE) ?
@@ -217,8 +216,7 @@ acpi_table_print_srat_entry(struct acpi_subtable_header *header)
pr_debug("SRAT Processor (x2apicid[0x%08x]) in proximity domain %d %s\n",
p->apic_id,
p->proximity_domain,
- (p->flags & ACPI_SRAT_CPU_ENABLED) ?
- "enabled" : "disabled");
+ str_enabled_disabled(p->flags & ACPI_SRAT_CPU_ENABLED));
}
break;
@@ -229,8 +227,7 @@ acpi_table_print_srat_entry(struct acpi_subtable_header *header)
pr_debug("SRAT Processor (acpi id[0x%04x]) in proximity domain %d %s\n",
p->acpi_processor_uid,
p->proximity_domain,
- (p->flags & ACPI_SRAT_GICC_ENABLED) ?
- "enabled" : "disabled");
+ str_enabled_disabled(p->flags & ACPI_SRAT_GICC_ENABLED));
}
break;
@@ -248,8 +245,7 @@ acpi_table_print_srat_entry(struct acpi_subtable_header *header)
*(u16 *)(&p->device_handle[0]),
*(u16 *)(&p->device_handle[2]),
p->proximity_domain,
- (p->flags & ACPI_SRAT_GENERIC_AFFINITY_ENABLED) ?
- "enabled" : "disabled");
+ str_enabled_disabled(p->flags & ACPI_SRAT_GENERIC_AFFINITY_ENABLED));
} else {
/*
* In this case we can rely on the device having a
@@ -259,8 +255,7 @@ acpi_table_print_srat_entry(struct acpi_subtable_header *header)
(char *)(&p->device_handle[0]),
(char *)(&p->device_handle[8]),
p->proximity_domain,
- (p->flags & ACPI_SRAT_GENERIC_AFFINITY_ENABLED) ?
- "enabled" : "disabled");
+ str_enabled_disabled(p->flags & ACPI_SRAT_GENERIC_AFFINITY_ENABLED));
}
}
break;
@@ -272,8 +267,7 @@ acpi_table_print_srat_entry(struct acpi_subtable_header *header)
pr_debug("SRAT Processor (acpi id[0x%04x]) in proximity domain %d %s\n",
p->acpi_processor_uid,
p->proximity_domain,
- (p->flags & ACPI_SRAT_RINTC_ENABLED) ?
- "enabled" : "disabled");
+ str_enabled_disabled(p->flags & ACPI_SRAT_RINTC_ENABLED));
}
break;
diff --git a/drivers/acpi/platform_profile.c b/drivers/acpi/platform_profile.c
index 671407fc2bd4..ffbfd32f4cf1 100644
--- a/drivers/acpi/platform_profile.c
+++ b/drivers/acpi/platform_profile.c
@@ -245,7 +245,8 @@ static const struct class platform_profile_class = {
/**
* _aggregate_choices - Aggregate the available profile choices
* @dev: The device
- * @arg: struct aggregate_choices_data
+ * @arg: struct aggregate_choices_data, with it's aggregate member bitmap
+ * initially filled with ones
*
* Return: 0 on success, -errno on failure
*/
@@ -256,12 +257,10 @@ static int _aggregate_choices(struct device *dev, void *arg)
struct platform_profile_handler *handler;
lockdep_assert_held(&profile_lock);
+
handler = to_pprof_handler(dev);
bitmap_or(tmp, handler->choices, handler->hidden_choices, PLATFORM_PROFILE_LAST);
- if (test_bit(PLATFORM_PROFILE_LAST, data->aggregate))
- bitmap_copy(data->aggregate, tmp, PLATFORM_PROFILE_LAST);
- else
- bitmap_and(data->aggregate, tmp, data->aggregate, PLATFORM_PROFILE_LAST);
+ bitmap_and(data->aggregate, tmp, data->aggregate, PLATFORM_PROFILE_LAST);
data->count++;
return 0;
@@ -305,7 +304,6 @@ static ssize_t platform_profile_choices_show(struct kobject *kobj,
};
int err;
- set_bit(PLATFORM_PROFILE_LAST, data.aggregate);
scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &profile_lock) {
err = class_for_each_device(&platform_profile_class, NULL,
&data, _aggregate_choices);
@@ -422,7 +420,7 @@ static ssize_t platform_profile_store(struct kobject *kobj,
i = sysfs_match_string(profile_names, buf);
if (i < 0 || i == PLATFORM_PROFILE_CUSTOM)
return -EINVAL;
- set_bit(PLATFORM_PROFILE_LAST, data.aggregate);
+
scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &profile_lock) {
ret = class_for_each_device(&platform_profile_class, NULL,
&data, _aggregate_choices);
@@ -502,7 +500,6 @@ int platform_profile_cycle(void)
enum platform_profile_option profile = PLATFORM_PROFILE_LAST;
int err;
- set_bit(PLATFORM_PROFILE_LAST, data.aggregate);
scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &profile_lock) {
err = class_for_each_device(&platform_profile_class, NULL,
&profile, _aggregate_profiles);
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 586cc7d1d8aa..b181f7fc2090 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -268,6 +268,10 @@ static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr)
ACPI_CX_DESC_LEN, "ACPI P_LVL3 IOPORT 0x%x",
pr->power.states[ACPI_STATE_C3].address);
+ if (!pr->power.states[ACPI_STATE_C2].address &&
+ !pr->power.states[ACPI_STATE_C3].address)
+ return -ENODEV;
+
return 0;
}
diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
index b4cd14e7fa76..14c7bac4100b 100644
--- a/drivers/acpi/resource.c
+++ b/drivers/acpi/resource.c
@@ -441,6 +441,13 @@ static const struct dmi_system_id irq1_level_low_skip_override[] = {
},
},
{
+ /* Asus Vivobook X1404VAP */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_BOARD_NAME, "X1404VAP"),
+ },
+ },
+ {
/* Asus Vivobook X1504VAP */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
diff --git a/drivers/acpi/x86/utils.c b/drivers/acpi/x86/utils.c
index 068c1612660b..4ee30c2897a2 100644
--- a/drivers/acpi/x86/utils.c
+++ b/drivers/acpi/x86/utils.c
@@ -374,7 +374,8 @@ static const struct dmi_system_id acpi_quirk_skip_dmi_ids[] = {
DMI_MATCH(DMI_PRODUCT_VERSION, "Blade3-10A-001"),
},
.driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS |
- ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY),
+ ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY |
+ ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS),
},
{
/* Medion Lifetab S10346 */
diff --git a/drivers/android/binder_internal.h b/drivers/android/binder_internal.h
index e4eb8357989c..6a66c9769c6c 100644
--- a/drivers/android/binder_internal.h
+++ b/drivers/android/binder_internal.h
@@ -3,7 +3,6 @@
#ifndef _LINUX_BINDER_INTERNAL_H
#define _LINUX_BINDER_INTERNAL_H
-#include <linux/export.h>
#include <linux/fs.h>
#include <linux/list.h>
#include <linux/miscdevice.h>
diff --git a/drivers/ata/libata-zpodd.c b/drivers/ata/libata-zpodd.c
index 4b83b517caec..799531218ea2 100644
--- a/drivers/ata/libata-zpodd.c
+++ b/drivers/ata/libata-zpodd.c
@@ -160,8 +160,7 @@ void zpodd_on_suspend(struct ata_device *dev)
return;
}
- expires = zpodd->last_ready +
- msecs_to_jiffies(zpodd_poweroff_delay * 1000);
+ expires = zpodd->last_ready + secs_to_jiffies(zpodd_poweroff_delay);
if (time_before(jiffies, expires))
return;
diff --git a/drivers/base/bus.c b/drivers/base/bus.c
index 6b9e65a42cd2..5ea3b03af9ba 100644
--- a/drivers/base/bus.c
+++ b/drivers/base/bus.c
@@ -1291,7 +1291,7 @@ EXPORT_SYMBOL_GPL(subsys_system_register);
* @groups: default attributes for the root device
*
* All 'virtual' subsystems have a /sys/devices/system/<name> root device
- * with the name of the subystem. The root device can carry subsystem-wide
+ * with the name of the subsystem. The root device can carry subsystem-wide
* attributes. All registered devices are below this single root device.
* There's no restriction on device naming. This is for kernel software
* constructs which need sysfs interface.
diff --git a/drivers/base/component.c b/drivers/base/component.c
index a482708566bc..abe60eb45c55 100644
--- a/drivers/base/component.c
+++ b/drivers/base/component.c
@@ -87,17 +87,17 @@ static int component_devices_show(struct seq_file *s, void *data)
size_t i;
mutex_lock(&component_mutex);
- seq_printf(s, "%-40s %20s\n", "aggregate_device name", "status");
- seq_puts(s, "-------------------------------------------------------------\n");
- seq_printf(s, "%-40s %20s\n\n",
+ seq_printf(s, "%-50s %20s\n", "aggregate_device name", "status");
+ seq_puts(s, "-----------------------------------------------------------------------\n");
+ seq_printf(s, "%-50s %20s\n\n",
dev_name(m->parent), m->bound ? "bound" : "not bound");
- seq_printf(s, "%-40s %20s\n", "device name", "status");
- seq_puts(s, "-------------------------------------------------------------\n");
+ seq_printf(s, "%-50s %20s\n", "device name", "status");
+ seq_puts(s, "-----------------------------------------------------------------------\n");
for (i = 0; i < match->num; i++) {
struct component *component = match->compare[i].component;
- seq_printf(s, "%-40s %20s\n",
+ seq_printf(s, "%-50s %20s\n",
component ? dev_name(component->dev) : "(unknown)",
component ? (component->bound ? "bound" : "not bound") : "not registered");
}
diff --git a/drivers/base/faux.c b/drivers/base/faux.c
index 531e9d789ee0..407c1d1aad50 100644
--- a/drivers/base/faux.c
+++ b/drivers/base/faux.c
@@ -102,7 +102,9 @@ static void faux_device_release(struct device *dev)
*
* Note, when this function is called, the functions specified in struct
* faux_ops can be called before the function returns, so be prepared for
- * everything to be properly initialized before that point in time.
+ * everything to be properly initialized before that point in time. If the
+ * probe callback (if one is present) does NOT succeed, the creation of the
+ * device will fail and NULL will be returned.
*
* Return:
* * NULL if an error happened with creating the device
@@ -147,6 +149,17 @@ struct faux_device *faux_device_create_with_groups(const char *name,
return NULL;
}
+ /*
+ * Verify that we did bind the driver to the device (i.e. probe worked),
+ * if not, let's fail the creation as trying to guess if probe was
+ * successful is almost impossible to determine by the caller.
+ */
+ if (!dev->driver) {
+ dev_err(dev, "probe did not succeed, tearing down the device\n");
+ faux_device_destroy(faux_dev);
+ faux_dev = NULL;
+ }
+
return faux_dev;
}
EXPORT_SYMBOL_GPL(faux_device_create_with_groups);
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 348c5dbbfa68..8f3a41d9bfaa 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -455,7 +455,7 @@ static ssize_t valid_zones_show(struct device *dev,
struct memory_group *group = mem->group;
struct zone *default_zone;
int nid = mem->nid;
- int len = 0;
+ int len;
/*
* Check the existing zone. Make sure that we do that only on the
@@ -466,22 +466,18 @@ static ssize_t valid_zones_show(struct device *dev,
* If !mem->zone, the memory block spans multiple zones and
* cannot get offlined.
*/
- default_zone = mem->zone;
- if (!default_zone)
- return sysfs_emit(buf, "%s\n", "none");
- len += sysfs_emit_at(buf, len, "%s", default_zone->name);
- goto out;
+ return sysfs_emit(buf, "%s\n",
+ mem->zone ? mem->zone->name : "none");
}
default_zone = zone_for_pfn_range(MMOP_ONLINE, nid, group,
start_pfn, nr_pages);
- len += sysfs_emit_at(buf, len, "%s", default_zone->name);
+ len = sysfs_emit(buf, "%s", default_zone->name);
len += print_allowed_zone(buf, len, nid, group, start_pfn, nr_pages,
MMOP_ONLINE_KERNEL, default_zone);
len += print_allowed_zone(buf, len, nid, group, start_pfn, nr_pages,
MMOP_ONLINE_MOVABLE, default_zone);
-out:
len += sysfs_emit_at(buf, len, "\n");
return len;
}
@@ -822,18 +818,17 @@ static int add_memory_block(unsigned long block_id, unsigned long state,
static int __init add_boot_memory_block(unsigned long base_section_nr)
{
- int section_count = 0;
unsigned long nr;
- for (nr = base_section_nr; nr < base_section_nr + sections_per_block;
- nr++)
- if (present_section_nr(nr))
- section_count++;
+ for_each_present_section_nr(base_section_nr, nr) {
+ if (nr >= (base_section_nr + sections_per_block))
+ break;
- if (section_count == 0)
- return 0;
- return add_memory_block(memory_block_id(base_section_nr),
- MEM_ONLINE, NULL, NULL);
+ return add_memory_block(memory_block_id(base_section_nr),
+ MEM_ONLINE, NULL, NULL);
+ }
+
+ return 0;
}
static int add_hotplug_memory_block(unsigned long block_id,
diff --git a/drivers/base/node.c b/drivers/base/node.c
index 0ea653fa3433..cd13ef287011 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -244,12 +244,14 @@ CACHE_ATTR(size, "%llu")
CACHE_ATTR(line_size, "%u")
CACHE_ATTR(indexing, "%u")
CACHE_ATTR(write_policy, "%u")
+CACHE_ATTR(address_mode, "%#x")
static struct attribute *cache_attrs[] = {
&dev_attr_indexing.attr,
&dev_attr_size.attr,
&dev_attr_line_size.attr,
&dev_attr_write_policy.attr,
+ &dev_attr_address_mode.attr,
NULL,
};
ATTRIBUTE_GROUPS(cache);
diff --git a/drivers/base/physical_location.c b/drivers/base/physical_location.c
index 5db06e825c94..a5539e294d4d 100644
--- a/drivers/base/physical_location.c
+++ b/drivers/base/physical_location.c
@@ -7,6 +7,7 @@
#include <linux/acpi.h>
#include <linux/sysfs.h>
+#include <linux/string_choices.h>
#include "physical_location.h"
@@ -116,7 +117,7 @@ static ssize_t dock_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
return sysfs_emit(buf, "%s\n",
- dev->physical_location->dock ? "yes" : "no");
+ str_yes_no(dev->physical_location->dock));
}
static DEVICE_ATTR_RO(dock);
@@ -124,7 +125,7 @@ static ssize_t lid_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
return sysfs_emit(buf, "%s\n",
- dev->physical_location->lid ? "yes" : "no");
+ str_yes_no(dev->physical_location->lid));
}
static DEVICE_ATTR_RO(lid);
diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
index c060da409ed8..2fd05c1bd30b 100644
--- a/drivers/block/ublk_drv.c
+++ b/drivers/block/ublk_drv.c
@@ -74,13 +74,30 @@
#define UBLK_PARAM_TYPE_ALL \
(UBLK_PARAM_TYPE_BASIC | UBLK_PARAM_TYPE_DISCARD | \
UBLK_PARAM_TYPE_DEVT | UBLK_PARAM_TYPE_ZONED | \
- UBLK_PARAM_TYPE_DMA_ALIGN)
+ UBLK_PARAM_TYPE_DMA_ALIGN | UBLK_PARAM_TYPE_SEGMENT)
struct ublk_rq_data {
struct kref ref;
};
struct ublk_uring_cmd_pdu {
+ /*
+ * Store requests in same batch temporarily for queuing them to
+ * daemon context.
+ *
+ * It should have been stored to request payload, but we do want
+ * to avoid extra pre-allocation, and uring_cmd payload is always
+ * free for us
+ */
+ union {
+ struct request *req;
+ struct request *req_list;
+ };
+
+ /*
+ * The following two are valid in this cmd whole lifetime, and
+ * setup in ublk uring_cmd handler
+ */
struct ublk_queue *ubq;
u16 tag;
};
@@ -141,10 +158,8 @@ struct ublk_queue {
unsigned long flags;
struct task_struct *ubq_daemon;
- char *io_cmd_buf;
+ struct ublksrv_io_desc *io_cmd_buf;
- unsigned long io_addr; /* mapped vm address */
- unsigned int max_io_sz;
bool force_abort;
bool timeout;
bool canceling;
@@ -582,6 +597,18 @@ static int ublk_validate_params(const struct ublk_device *ub)
return -EINVAL;
}
+ if (ub->params.types & UBLK_PARAM_TYPE_SEGMENT) {
+ const struct ublk_param_segment *p = &ub->params.seg;
+
+ if (!is_power_of_2(p->seg_boundary_mask + 1))
+ return -EINVAL;
+
+ if (p->seg_boundary_mask + 1 < UBLK_MIN_SEGMENT_SIZE)
+ return -EINVAL;
+ if (p->max_segment_size < UBLK_MIN_SEGMENT_SIZE)
+ return -EINVAL;
+ }
+
return 0;
}
@@ -598,6 +625,11 @@ static inline bool ublk_support_user_copy(const struct ublk_queue *ubq)
return ubq->flags & (UBLK_F_USER_COPY | UBLK_F_SUPPORT_ZERO_COPY);
}
+static inline bool ublk_need_map_io(const struct ublk_queue *ubq)
+{
+ return !ublk_support_user_copy(ubq);
+}
+
static inline bool ublk_need_req_ref(const struct ublk_queue *ubq)
{
/*
@@ -674,11 +706,11 @@ static inline bool ublk_rq_has_data(const struct request *rq)
static inline struct ublksrv_io_desc *ublk_get_iod(struct ublk_queue *ubq,
int tag)
{
- return (struct ublksrv_io_desc *)
- &(ubq->io_cmd_buf[tag * sizeof(struct ublksrv_io_desc)]);
+ return &ubq->io_cmd_buf[tag];
}
-static inline char *ublk_queue_cmd_buf(struct ublk_device *ub, int q_id)
+static inline struct ublksrv_io_desc *
+ublk_queue_cmd_buf(struct ublk_device *ub, int q_id)
{
return ublk_get_queue(ub, q_id)->io_cmd_buf;
}
@@ -925,7 +957,7 @@ static int ublk_map_io(const struct ublk_queue *ubq, const struct request *req,
{
const unsigned int rq_bytes = blk_rq_bytes(req);
- if (ublk_support_user_copy(ubq))
+ if (!ublk_need_map_io(ubq))
return rq_bytes;
/*
@@ -949,7 +981,7 @@ static int ublk_unmap_io(const struct ublk_queue *ubq,
{
const unsigned int rq_bytes = blk_rq_bytes(req);
- if (ublk_support_user_copy(ubq))
+ if (!ublk_need_map_io(ubq))
return rq_bytes;
if (ublk_need_unmap_req(req)) {
@@ -1037,7 +1069,7 @@ static blk_status_t ublk_setup_iod(struct ublk_queue *ubq, struct request *req)
static inline struct ublk_uring_cmd_pdu *ublk_get_uring_cmd_pdu(
struct io_uring_cmd *ioucmd)
{
- return (struct ublk_uring_cmd_pdu *)&ioucmd->pdu;
+ return io_uring_cmd_to_pdu(ioucmd, struct ublk_uring_cmd_pdu);
}
static inline bool ubq_daemon_is_dying(struct ublk_queue *ubq)
@@ -1155,14 +1187,11 @@ static inline void __ublk_abort_rq(struct ublk_queue *ubq,
blk_mq_end_request(rq, BLK_STS_IOERR);
}
-static void ublk_rq_task_work_cb(struct io_uring_cmd *cmd,
- unsigned int issue_flags)
+static void ublk_dispatch_req(struct ublk_queue *ubq,
+ struct request *req,
+ unsigned int issue_flags)
{
- struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd);
- struct ublk_queue *ubq = pdu->ubq;
- int tag = pdu->tag;
- struct request *req = blk_mq_tag_to_rq(
- ubq->dev->tag_set.tags[ubq->q_id], tag);
+ int tag = req->tag;
struct ublk_io *io = &ubq->ios[tag];
unsigned int mapped_bytes;
@@ -1237,11 +1266,49 @@ static void ublk_rq_task_work_cb(struct io_uring_cmd *cmd,
ubq_complete_io_cmd(io, UBLK_IO_RES_OK, issue_flags);
}
+static void ublk_cmd_tw_cb(struct io_uring_cmd *cmd,
+ unsigned int issue_flags)
+{
+ struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd);
+ struct ublk_queue *ubq = pdu->ubq;
+
+ ublk_dispatch_req(ubq, pdu->req, issue_flags);
+}
+
static void ublk_queue_cmd(struct ublk_queue *ubq, struct request *rq)
{
- struct ublk_io *io = &ubq->ios[rq->tag];
+ struct io_uring_cmd *cmd = ubq->ios[rq->tag].cmd;
+ struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd);
+
+ pdu->req = rq;
+ io_uring_cmd_complete_in_task(cmd, ublk_cmd_tw_cb);
+}
+
+static void ublk_cmd_list_tw_cb(struct io_uring_cmd *cmd,
+ unsigned int issue_flags)
+{
+ struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd);
+ struct request *rq = pdu->req_list;
+ struct ublk_queue *ubq = pdu->ubq;
+ struct request *next;
+
+ do {
+ next = rq->rq_next;
+ rq->rq_next = NULL;
+ ublk_dispatch_req(ubq, rq, issue_flags);
+ rq = next;
+ } while (rq);
+}
+
+static void ublk_queue_cmd_list(struct ublk_queue *ubq, struct rq_list *l)
+{
+ struct request *rq = rq_list_peek(l);
+ struct io_uring_cmd *cmd = ubq->ios[rq->tag].cmd;
+ struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd);
- io_uring_cmd_complete_in_task(io->cmd, ublk_rq_task_work_cb);
+ pdu->req_list = rq;
+ rq_list_init(l);
+ io_uring_cmd_complete_in_task(cmd, ublk_cmd_list_tw_cb);
}
static enum blk_eh_timer_return ublk_timeout(struct request *rq)
@@ -1282,21 +1349,12 @@ static enum blk_eh_timer_return ublk_timeout(struct request *rq)
return BLK_EH_RESET_TIMER;
}
-static blk_status_t ublk_queue_rq(struct blk_mq_hw_ctx *hctx,
- const struct blk_mq_queue_data *bd)
+static blk_status_t ublk_prep_req(struct ublk_queue *ubq, struct request *rq)
{
- struct ublk_queue *ubq = hctx->driver_data;
- struct request *rq = bd->rq;
blk_status_t res;
- if (unlikely(ubq->fail_io)) {
+ if (unlikely(ubq->fail_io))
return BLK_STS_TARGET;
- }
-
- /* fill iod to slot in io cmd buffer */
- res = ublk_setup_iod(ubq, rq);
- if (unlikely(res != BLK_STS_OK))
- return BLK_STS_IOERR;
/* With recovery feature enabled, force_abort is set in
* ublk_stop_dev() before calling del_gendisk(). We have to
@@ -1310,17 +1368,68 @@ static blk_status_t ublk_queue_rq(struct blk_mq_hw_ctx *hctx,
if (ublk_nosrv_should_queue_io(ubq) && unlikely(ubq->force_abort))
return BLK_STS_IOERR;
+ if (unlikely(ubq->canceling))
+ return BLK_STS_IOERR;
+
+ /* fill iod to slot in io cmd buffer */
+ res = ublk_setup_iod(ubq, rq);
+ if (unlikely(res != BLK_STS_OK))
+ return BLK_STS_IOERR;
+
+ blk_mq_start_request(rq);
+ return BLK_STS_OK;
+}
+
+static blk_status_t ublk_queue_rq(struct blk_mq_hw_ctx *hctx,
+ const struct blk_mq_queue_data *bd)
+{
+ struct ublk_queue *ubq = hctx->driver_data;
+ struct request *rq = bd->rq;
+ blk_status_t res;
+
+ res = ublk_prep_req(ubq, rq);
+ if (res != BLK_STS_OK)
+ return res;
+
+ /*
+ * ->canceling has to be handled after ->force_abort and ->fail_io
+ * is dealt with, otherwise this request may not be failed in case
+ * of recovery, and cause hang when deleting disk
+ */
if (unlikely(ubq->canceling)) {
__ublk_abort_rq(ubq, rq);
return BLK_STS_OK;
}
- blk_mq_start_request(bd->rq);
ublk_queue_cmd(ubq, rq);
-
return BLK_STS_OK;
}
+static void ublk_queue_rqs(struct rq_list *rqlist)
+{
+ struct rq_list requeue_list = { };
+ struct rq_list submit_list = { };
+ struct ublk_queue *ubq = NULL;
+ struct request *req;
+
+ while ((req = rq_list_pop(rqlist))) {
+ struct ublk_queue *this_q = req->mq_hctx->driver_data;
+
+ if (ubq && ubq != this_q && !rq_list_empty(&submit_list))
+ ublk_queue_cmd_list(ubq, &submit_list);
+ ubq = this_q;
+
+ if (ublk_prep_req(ubq, req) == BLK_STS_OK)
+ rq_list_add_tail(&submit_list, req);
+ else
+ rq_list_add_tail(&requeue_list, req);
+ }
+
+ if (ubq && !rq_list_empty(&submit_list))
+ ublk_queue_cmd_list(ubq, &submit_list);
+ *rqlist = requeue_list;
+}
+
static int ublk_init_hctx(struct blk_mq_hw_ctx *hctx, void *driver_data,
unsigned int hctx_idx)
{
@@ -1333,6 +1442,7 @@ static int ublk_init_hctx(struct blk_mq_hw_ctx *hctx, void *driver_data,
static const struct blk_mq_ops ublk_mq_ops = {
.queue_rq = ublk_queue_rq,
+ .queue_rqs = ublk_queue_rqs,
.init_hctx = ublk_init_hctx,
.timeout = ublk_timeout,
};
@@ -1446,17 +1556,27 @@ static void ublk_abort_queue(struct ublk_device *ub, struct ublk_queue *ubq)
}
}
+/* Must be called when queue is frozen */
+static bool ublk_mark_queue_canceling(struct ublk_queue *ubq)
+{
+ bool canceled;
+
+ spin_lock(&ubq->cancel_lock);
+ canceled = ubq->canceling;
+ if (!canceled)
+ ubq->canceling = true;
+ spin_unlock(&ubq->cancel_lock);
+
+ return canceled;
+}
+
static bool ublk_abort_requests(struct ublk_device *ub, struct ublk_queue *ubq)
{
+ bool was_canceled = ubq->canceling;
struct gendisk *disk;
- spin_lock(&ubq->cancel_lock);
- if (ubq->canceling) {
- spin_unlock(&ubq->cancel_lock);
+ if (was_canceled)
return false;
- }
- ubq->canceling = true;
- spin_unlock(&ubq->cancel_lock);
spin_lock(&ub->lock);
disk = ub->ub_disk;
@@ -1468,14 +1588,23 @@ static bool ublk_abort_requests(struct ublk_device *ub, struct ublk_queue *ubq)
if (!disk)
return false;
- /* Now we are serialized with ublk_queue_rq() */
+ /*
+ * Now we are serialized with ublk_queue_rq()
+ *
+ * Make sure that ubq->canceling is set when queue is frozen,
+ * because ublk_queue_rq() has to rely on this flag for avoiding to
+ * touch completed uring_cmd
+ */
blk_mq_quiesce_queue(disk->queue);
- /* abort queue is for making forward progress */
- ublk_abort_queue(ub, ubq);
+ was_canceled = ublk_mark_queue_canceling(ubq);
+ if (!was_canceled) {
+ /* abort queue is for making forward progress */
+ ublk_abort_queue(ub, ubq);
+ }
blk_mq_unquiesce_queue(disk->queue);
put_device(disk_to_dev(disk));
- return true;
+ return !was_canceled;
}
static void ublk_cancel_cmd(struct ublk_queue *ubq, struct ublk_io *io,
@@ -1845,7 +1974,7 @@ static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd,
if (io->flags & UBLK_IO_FLAG_OWNED_BY_SRV)
goto out;
- if (!ublk_support_user_copy(ubq)) {
+ if (ublk_need_map_io(ubq)) {
/*
* FETCH_RQ has to provide IO buffer if NEED GET
* DATA is not enabled
@@ -1867,7 +1996,7 @@ static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd,
if (!(io->flags & UBLK_IO_FLAG_OWNED_BY_SRV))
goto out;
- if (!ublk_support_user_copy(ubq)) {
+ if (ublk_need_map_io(ubq)) {
/*
* COMMIT_AND_FETCH_REQ has to provide IO buffer if
* NEED GET DATA is not enabled or it is Read IO.
@@ -2343,6 +2472,12 @@ static int ublk_ctrl_start_dev(struct ublk_device *ub, struct io_uring_cmd *cmd)
if (ub->params.types & UBLK_PARAM_TYPE_DMA_ALIGN)
lim.dma_alignment = ub->params.dma.alignment;
+ if (ub->params.types & UBLK_PARAM_TYPE_SEGMENT) {
+ lim.seg_boundary_mask = ub->params.seg.seg_boundary_mask;
+ lim.max_segment_size = ub->params.seg.max_segment_size;
+ lim.max_segments = ub->params.seg.max_segments;
+ }
+
if (wait_for_completion_interruptible(&ub->completion) != 0)
return -EINTR;
diff --git a/drivers/block/zram/backend_zstd.c b/drivers/block/zram/backend_zstd.c
index 1184c0036f44..22c8067536f3 100644
--- a/drivers/block/zram/backend_zstd.c
+++ b/drivers/block/zram/backend_zstd.c
@@ -24,19 +24,10 @@ struct zstd_params {
/*
* For C/D dictionaries we need to provide zstd with zstd_custom_mem,
* which zstd uses internally to allocate/free memory when needed.
- *
- * This means that allocator.customAlloc() can be called from zcomp_compress()
- * under local-lock (per-CPU compression stream), in which case we must use
- * GFP_ATOMIC.
- *
- * Another complication here is that we can be configured as a swap device.
*/
static void *zstd_custom_alloc(void *opaque, size_t size)
{
- if (!preemptible())
- return kvzalloc(size, GFP_ATOMIC);
-
- return kvzalloc(size, __GFP_KSWAPD_RECLAIM | __GFP_NOWARN);
+ return kvzalloc(size, GFP_NOIO | __GFP_NOWARN);
}
static void zstd_custom_free(void *opaque, void *address)
diff --git a/drivers/block/zram/zcomp.c b/drivers/block/zram/zcomp.c
index bb514403e305..d26a58c67e95 100644
--- a/drivers/block/zram/zcomp.c
+++ b/drivers/block/zram/zcomp.c
@@ -6,8 +6,7 @@
#include <linux/slab.h>
#include <linux/wait.h>
#include <linux/sched.h>
-#include <linux/cpu.h>
-#include <linux/crypto.h>
+#include <linux/cpuhotplug.h>
#include <linux/vmalloc.h>
#include "zcomp.h"
@@ -46,6 +45,7 @@ static const struct zcomp_ops *backends[] = {
static void zcomp_strm_free(struct zcomp *comp, struct zcomp_strm *zstrm)
{
comp->ops->destroy_ctx(&zstrm->ctx);
+ vfree(zstrm->local_copy);
vfree(zstrm->buffer);
zstrm->buffer = NULL;
}
@@ -58,12 +58,13 @@ static int zcomp_strm_init(struct zcomp *comp, struct zcomp_strm *zstrm)
if (ret)
return ret;
+ zstrm->local_copy = vzalloc(PAGE_SIZE);
/*
* allocate 2 pages. 1 for compressed data, plus 1 extra for the
* case when compressed size is larger than the original one
*/
zstrm->buffer = vzalloc(2 * PAGE_SIZE);
- if (!zstrm->buffer) {
+ if (!zstrm->buffer || !zstrm->local_copy) {
zcomp_strm_free(comp, zstrm);
return -ENOMEM;
}
@@ -109,13 +110,29 @@ ssize_t zcomp_available_show(const char *comp, char *buf)
struct zcomp_strm *zcomp_stream_get(struct zcomp *comp)
{
- local_lock(&comp->stream->lock);
- return this_cpu_ptr(comp->stream);
+ for (;;) {
+ struct zcomp_strm *zstrm = raw_cpu_ptr(comp->stream);
+
+ /*
+ * Inspired by zswap
+ *
+ * stream is returned with ->mutex locked which prevents
+ * cpu_dead() from releasing this stream under us, however
+ * there is still a race window between raw_cpu_ptr() and
+ * mutex_lock(), during which we could have been migrated
+ * from a CPU that has already destroyed its stream. If
+ * so then unlock and re-try on the current CPU.
+ */
+ mutex_lock(&zstrm->lock);
+ if (likely(zstrm->buffer))
+ return zstrm;
+ mutex_unlock(&zstrm->lock);
+ }
}
-void zcomp_stream_put(struct zcomp *comp)
+void zcomp_stream_put(struct zcomp_strm *zstrm)
{
- local_unlock(&comp->stream->lock);
+ mutex_unlock(&zstrm->lock);
}
int zcomp_compress(struct zcomp *comp, struct zcomp_strm *zstrm,
@@ -129,6 +146,7 @@ int zcomp_compress(struct zcomp *comp, struct zcomp_strm *zstrm,
};
int ret;
+ might_sleep();
ret = comp->ops->compress(comp->params, &zstrm->ctx, &req);
if (!ret)
*dst_len = req.dst_len;
@@ -145,18 +163,16 @@ int zcomp_decompress(struct zcomp *comp, struct zcomp_strm *zstrm,
.dst_len = PAGE_SIZE,
};
+ might_sleep();
return comp->ops->decompress(comp->params, &zstrm->ctx, &req);
}
int zcomp_cpu_up_prepare(unsigned int cpu, struct hlist_node *node)
{
struct zcomp *comp = hlist_entry(node, struct zcomp, node);
- struct zcomp_strm *zstrm;
+ struct zcomp_strm *zstrm = per_cpu_ptr(comp->stream, cpu);
int ret;
- zstrm = per_cpu_ptr(comp->stream, cpu);
- local_lock_init(&zstrm->lock);
-
ret = zcomp_strm_init(comp, zstrm);
if (ret)
pr_err("Can't allocate a compression stream\n");
@@ -166,16 +182,17 @@ int zcomp_cpu_up_prepare(unsigned int cpu, struct hlist_node *node)
int zcomp_cpu_dead(unsigned int cpu, struct hlist_node *node)
{
struct zcomp *comp = hlist_entry(node, struct zcomp, node);
- struct zcomp_strm *zstrm;
+ struct zcomp_strm *zstrm = per_cpu_ptr(comp->stream, cpu);
- zstrm = per_cpu_ptr(comp->stream, cpu);
+ mutex_lock(&zstrm->lock);
zcomp_strm_free(comp, zstrm);
+ mutex_unlock(&zstrm->lock);
return 0;
}
static int zcomp_init(struct zcomp *comp, struct zcomp_params *params)
{
- int ret;
+ int ret, cpu;
comp->stream = alloc_percpu(struct zcomp_strm);
if (!comp->stream)
@@ -186,6 +203,9 @@ static int zcomp_init(struct zcomp *comp, struct zcomp_params *params)
if (ret)
goto cleanup;
+ for_each_possible_cpu(cpu)
+ mutex_init(&per_cpu_ptr(comp->stream, cpu)->lock);
+
ret = cpuhp_state_add_instance(CPUHP_ZCOMP_PREPARE, &comp->node);
if (ret < 0)
goto cleanup;
diff --git a/drivers/block/zram/zcomp.h b/drivers/block/zram/zcomp.h
index ad5762813842..25339ed1e07e 100644
--- a/drivers/block/zram/zcomp.h
+++ b/drivers/block/zram/zcomp.h
@@ -3,7 +3,7 @@
#ifndef _ZCOMP_H_
#define _ZCOMP_H_
-#include <linux/local_lock.h>
+#include <linux/mutex.h>
#define ZCOMP_PARAM_NO_LEVEL INT_MIN
@@ -31,9 +31,11 @@ struct zcomp_ctx {
};
struct zcomp_strm {
- local_lock_t lock;
+ struct mutex lock;
/* compression buffer */
void *buffer;
+ /* local copy of handle memory */
+ void *local_copy;
struct zcomp_ctx ctx;
};
@@ -77,7 +79,7 @@ struct zcomp *zcomp_create(const char *alg, struct zcomp_params *params);
void zcomp_destroy(struct zcomp *comp);
struct zcomp_strm *zcomp_stream_get(struct zcomp *comp);
-void zcomp_stream_put(struct zcomp *comp);
+void zcomp_stream_put(struct zcomp_strm *zstrm);
int zcomp_compress(struct zcomp *comp, struct zcomp_strm *zstrm,
const void *src, unsigned int *dst_len);
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 9f5020b077c5..fda7d8624889 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -44,6 +44,8 @@ static DEFINE_MUTEX(zram_index_mutex);
static int zram_major;
static const char *default_compressor = CONFIG_ZRAM_DEF_COMP;
+#define ZRAM_MAX_ALGO_NAME_SZ 128
+
/* Module params (documentation at end) */
static unsigned int num_devices = 1;
/*
@@ -58,19 +60,56 @@ static void zram_free_page(struct zram *zram, size_t index);
static int zram_read_from_zspool(struct zram *zram, struct page *page,
u32 index);
-static int zram_slot_trylock(struct zram *zram, u32 index)
+#define slot_dep_map(zram, index) (&(zram)->table[(index)].dep_map)
+
+static void zram_slot_lock_init(struct zram *zram, u32 index)
+{
+ static struct lock_class_key __key;
+
+ lockdep_init_map(slot_dep_map(zram, index), "zram->table[index].lock",
+ &__key, 0);
+}
+
+/*
+ * entry locking rules:
+ *
+ * 1) Lock is exclusive
+ *
+ * 2) lock() function can sleep waiting for the lock
+ *
+ * 3) Lock owner can sleep
+ *
+ * 4) Use TRY lock variant when in atomic context
+ * - must check return value and handle locking failers
+ */
+static __must_check bool zram_slot_trylock(struct zram *zram, u32 index)
{
- return spin_trylock(&zram->table[index].lock);
+ unsigned long *lock = &zram->table[index].flags;
+
+ if (!test_and_set_bit_lock(ZRAM_ENTRY_LOCK, lock)) {
+ mutex_acquire(slot_dep_map(zram, index), 0, 1, _RET_IP_);
+ lock_acquired(slot_dep_map(zram, index), _RET_IP_);
+ return true;
+ }
+
+ return false;
}
static void zram_slot_lock(struct zram *zram, u32 index)
{
- spin_lock(&zram->table[index].lock);
+ unsigned long *lock = &zram->table[index].flags;
+
+ mutex_acquire(slot_dep_map(zram, index), 0, 0, _RET_IP_);
+ wait_on_bit_lock(lock, ZRAM_ENTRY_LOCK, TASK_UNINTERRUPTIBLE);
+ lock_acquired(slot_dep_map(zram, index), _RET_IP_);
}
static void zram_slot_unlock(struct zram *zram, u32 index)
{
- spin_unlock(&zram->table[index].lock);
+ unsigned long *lock = &zram->table[index].flags;
+
+ mutex_release(slot_dep_map(zram, index), _RET_IP_);
+ clear_and_wake_up_bit(ZRAM_ENTRY_LOCK, lock);
}
static inline bool init_done(struct zram *zram)
@@ -93,7 +132,6 @@ static void zram_set_handle(struct zram *zram, u32 index, unsigned long handle)
zram->table[index].handle = handle;
}
-/* flag operations require table entry bit_spin_lock() being held */
static bool zram_test_flag(struct zram *zram, u32 index,
enum zram_pageflags flag)
{
@@ -257,15 +295,24 @@ static void release_pp_ctl(struct zram *zram, struct zram_pp_ctl *ctl)
kfree(ctl);
}
-static void place_pp_slot(struct zram *zram, struct zram_pp_ctl *ctl,
- struct zram_pp_slot *pps)
+static bool place_pp_slot(struct zram *zram, struct zram_pp_ctl *ctl,
+ u32 index)
{
- u32 idx;
+ struct zram_pp_slot *pps;
+ u32 bid;
+
+ pps = kmalloc(sizeof(*pps), GFP_NOIO | __GFP_NOWARN);
+ if (!pps)
+ return false;
+
+ INIT_LIST_HEAD(&pps->entry);
+ pps->index = index;
- idx = zram_get_obj_size(zram, pps->index) / PP_BUCKET_SIZE_RANGE;
- list_add(&pps->entry, &ctl->pp_buckets[idx]);
+ bid = zram_get_obj_size(zram, pps->index) / PP_BUCKET_SIZE_RANGE;
+ list_add(&pps->entry, &ctl->pp_buckets[bid]);
zram_set_flag(zram, pps->index, ZRAM_PP_SLOT);
+ return true;
}
static struct zram_pp_slot *select_pp_slot(struct zram_pp_ctl *ctl)
@@ -699,15 +746,8 @@ static int scan_slots_for_writeback(struct zram *zram, u32 mode,
unsigned long index,
struct zram_pp_ctl *ctl)
{
- struct zram_pp_slot *pps = NULL;
-
for (; nr_pages != 0; index++, nr_pages--) {
- if (!pps)
- pps = kmalloc(sizeof(*pps), GFP_KERNEL);
- if (!pps)
- return -ENOMEM;
-
- INIT_LIST_HEAD(&pps->entry);
+ bool ok = true;
zram_slot_lock(zram, index);
if (!zram_allocated(zram, index))
@@ -727,14 +767,13 @@ static int scan_slots_for_writeback(struct zram *zram, u32 mode,
!zram_test_flag(zram, index, ZRAM_INCOMPRESSIBLE))
goto next;
- pps->index = index;
- place_pp_slot(zram, ctl, pps);
- pps = NULL;
+ ok = place_pp_slot(zram, ctl, index);
next:
zram_slot_unlock(zram, index);
+ if (!ok)
+ break;
}
- kfree(pps);
return 0;
}
@@ -748,7 +787,7 @@ static ssize_t writeback_store(struct device *dev,
unsigned long index = 0;
struct bio bio;
struct bio_vec bio_vec;
- struct page *page;
+ struct page *page = NULL;
ssize_t ret = len;
int mode, err;
unsigned long blk_idx = 0;
@@ -890,8 +929,10 @@ next:
if (blk_idx)
free_block_bdev(zram, blk_idx);
- __free_page(page);
+
release_init_lock:
+ if (page)
+ __free_page(page);
release_pp_ctl(zram, ctl);
atomic_set(&zram->pp_in_progress, 0);
up_read(&zram->init_lock);
@@ -1065,27 +1106,6 @@ static void zram_debugfs_register(struct zram *zram) {};
static void zram_debugfs_unregister(struct zram *zram) {};
#endif
-/*
- * We switched to per-cpu streams and this attr is not needed anymore.
- * However, we will keep it around for some time, because:
- * a) we may revert per-cpu streams in the future
- * b) it's visible to user space and we need to follow our 2 years
- * retirement rule; but we already have a number of 'soon to be
- * altered' attrs, so max_comp_streams need to wait for the next
- * layoff cycle.
- */
-static ssize_t max_comp_streams_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- return scnprintf(buf, PAGE_SIZE, "%d\n", num_online_cpus());
-}
-
-static ssize_t max_comp_streams_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t len)
-{
- return len;
-}
-
static void comp_algorithm_set(struct zram *zram, u32 prio, const char *alg)
{
/* Do not free statically defined compression algorithms */
@@ -1112,7 +1132,7 @@ static int __comp_algorithm_store(struct zram *zram, u32 prio, const char *buf)
size_t sz;
sz = strlen(buf);
- if (sz >= CRYPTO_MAX_ALG_NAME)
+ if (sz >= ZRAM_MAX_ALGO_NAME_SZ)
return -E2BIG;
compressor = kstrdup(buf, GFP_KERNEL);
@@ -1420,9 +1440,8 @@ static ssize_t debug_stat_show(struct device *dev,
down_read(&zram->init_lock);
ret = scnprintf(buf, PAGE_SIZE,
- "version: %d\n%8llu %8llu\n",
+ "version: %d\n0 %8llu\n",
version,
- (u64)atomic64_read(&zram->stats.writestall),
(u64)atomic64_read(&zram->stats.miss_free));
up_read(&zram->init_lock);
@@ -1473,15 +1492,11 @@ static bool zram_meta_alloc(struct zram *zram, u64 disksize)
huge_class_size = zs_huge_class_size(zram->mem_pool);
for (index = 0; index < num_pages; index++)
- spin_lock_init(&zram->table[index].lock);
+ zram_slot_lock_init(zram, index);
+
return true;
}
-/*
- * To protect concurrent access to the same index entry,
- * caller should hold this table index entry's bit_spinlock to
- * indicate this index entry is accessing.
- */
static void zram_free_page(struct zram *zram, size_t index)
{
unsigned long handle;
@@ -1548,11 +1563,11 @@ static int read_incompressible_page(struct zram *zram, struct page *page,
void *src, *dst;
handle = zram_get_handle(zram, index);
- src = zs_map_object(zram->mem_pool, handle, ZS_MM_RO);
+ src = zs_obj_read_begin(zram->mem_pool, handle, NULL);
dst = kmap_local_page(page);
copy_page(dst, src);
kunmap_local(dst);
- zs_unmap_object(zram->mem_pool, handle);
+ zs_obj_read_end(zram->mem_pool, handle, src);
return 0;
}
@@ -1570,12 +1585,12 @@ static int read_compressed_page(struct zram *zram, struct page *page, u32 index)
prio = zram_get_priority(zram, index);
zstrm = zcomp_stream_get(zram->comps[prio]);
- src = zs_map_object(zram->mem_pool, handle, ZS_MM_RO);
+ src = zs_obj_read_begin(zram->mem_pool, handle, zstrm->local_copy);
dst = kmap_local_page(page);
ret = zcomp_decompress(zram->comps[prio], zstrm, src, size, dst);
kunmap_local(dst);
- zs_unmap_object(zram->mem_pool, handle);
- zcomp_stream_put(zram->comps[prio]);
+ zs_obj_read_end(zram->mem_pool, handle, src);
+ zcomp_stream_put(zstrm);
return ret;
}
@@ -1670,7 +1685,7 @@ static int write_incompressible_page(struct zram *zram, struct page *page,
u32 index)
{
unsigned long handle;
- void *src, *dst;
+ void *src;
/*
* This function is called from preemptible context so we don't need
@@ -1678,7 +1693,8 @@ static int write_incompressible_page(struct zram *zram, struct page *page,
* like we do for compressible pages.
*/
handle = zs_malloc(zram->mem_pool, PAGE_SIZE,
- GFP_NOIO | __GFP_HIGHMEM | __GFP_MOVABLE);
+ GFP_NOIO | __GFP_NOWARN |
+ __GFP_HIGHMEM | __GFP_MOVABLE);
if (IS_ERR_VALUE(handle))
return PTR_ERR((void *)handle);
@@ -1687,11 +1703,9 @@ static int write_incompressible_page(struct zram *zram, struct page *page,
return -ENOMEM;
}
- dst = zs_map_object(zram->mem_pool, handle, ZS_MM_WO);
src = kmap_local_page(page);
- memcpy(dst, src, PAGE_SIZE);
+ zs_obj_write(zram->mem_pool, handle, src, PAGE_SIZE);
kunmap_local(src);
- zs_unmap_object(zram->mem_pool, handle);
zram_slot_lock(zram, index);
zram_set_flag(zram, index, ZRAM_HUGE);
@@ -1710,11 +1724,11 @@ static int write_incompressible_page(struct zram *zram, struct page *page,
static int zram_write_page(struct zram *zram, struct page *page, u32 index)
{
int ret = 0;
- unsigned long handle = -ENOMEM;
- unsigned int comp_len = 0;
- void *dst, *mem;
+ unsigned long handle;
+ unsigned int comp_len;
+ void *mem;
struct zcomp_strm *zstrm;
- unsigned long element = 0;
+ unsigned long element;
bool same_filled;
/* First, free memory allocated to this slot (if any) */
@@ -1728,7 +1742,6 @@ static int zram_write_page(struct zram *zram, struct page *page, u32 index)
if (same_filled)
return write_same_filled_page(zram, element, index);
-compress_again:
zstrm = zcomp_stream_get(zram->comps[ZRAM_PRIMARY_COMP]);
mem = kmap_local_page(page);
ret = zcomp_compress(zram->comps[ZRAM_PRIMARY_COMP], zstrm,
@@ -1736,59 +1749,32 @@ compress_again:
kunmap_local(mem);
if (unlikely(ret)) {
- zcomp_stream_put(zram->comps[ZRAM_PRIMARY_COMP]);
+ zcomp_stream_put(zstrm);
pr_err("Compression failed! err=%d\n", ret);
- zs_free(zram->mem_pool, handle);
return ret;
}
if (comp_len >= huge_class_size) {
- zcomp_stream_put(zram->comps[ZRAM_PRIMARY_COMP]);
+ zcomp_stream_put(zstrm);
return write_incompressible_page(zram, page, index);
}
- /*
- * handle allocation has 2 paths:
- * a) fast path is executed with preemption disabled (for
- * per-cpu streams) and has __GFP_DIRECT_RECLAIM bit clear,
- * since we can't sleep;
- * b) slow path enables preemption and attempts to allocate
- * the page with __GFP_DIRECT_RECLAIM bit set. we have to
- * put per-cpu compression stream and, thus, to re-do
- * the compression once handle is allocated.
- *
- * if we have a 'non-null' handle here then we are coming
- * from the slow path and handle has already been allocated.
- */
- if (IS_ERR_VALUE(handle))
- handle = zs_malloc(zram->mem_pool, comp_len,
- __GFP_KSWAPD_RECLAIM |
- __GFP_NOWARN |
- __GFP_HIGHMEM |
- __GFP_MOVABLE);
+ handle = zs_malloc(zram->mem_pool, comp_len,
+ GFP_NOIO | __GFP_NOWARN |
+ __GFP_HIGHMEM | __GFP_MOVABLE);
if (IS_ERR_VALUE(handle)) {
- zcomp_stream_put(zram->comps[ZRAM_PRIMARY_COMP]);
- atomic64_inc(&zram->stats.writestall);
- handle = zs_malloc(zram->mem_pool, comp_len,
- GFP_NOIO | __GFP_HIGHMEM |
- __GFP_MOVABLE);
- if (IS_ERR_VALUE(handle))
- return PTR_ERR((void *)handle);
-
- goto compress_again;
+ zcomp_stream_put(zstrm);
+ return PTR_ERR((void *)handle);
}
if (!zram_can_store_page(zram)) {
- zcomp_stream_put(zram->comps[ZRAM_PRIMARY_COMP]);
+ zcomp_stream_put(zstrm);
zs_free(zram->mem_pool, handle);
return -ENOMEM;
}
- dst = zs_map_object(zram->mem_pool, handle, ZS_MM_WO);
-
- memcpy(dst, zstrm->buffer, comp_len);
- zcomp_stream_put(zram->comps[ZRAM_PRIMARY_COMP]);
- zs_unmap_object(zram->mem_pool, handle);
+ zs_obj_write(zram->mem_pool, handle, zstrm->buffer, comp_len);
+ zcomp_stream_put(zstrm);
zram_slot_lock(zram, index);
zram_set_handle(zram, index, handle);
@@ -1835,20 +1821,14 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
#define RECOMPRESS_IDLE (1 << 0)
#define RECOMPRESS_HUGE (1 << 1)
-static int scan_slots_for_recompress(struct zram *zram, u32 mode,
+static int scan_slots_for_recompress(struct zram *zram, u32 mode, u32 prio_max,
struct zram_pp_ctl *ctl)
{
unsigned long nr_pages = zram->disksize >> PAGE_SHIFT;
- struct zram_pp_slot *pps = NULL;
unsigned long index;
for (index = 0; index < nr_pages; index++) {
- if (!pps)
- pps = kmalloc(sizeof(*pps), GFP_KERNEL);
- if (!pps)
- return -ENOMEM;
-
- INIT_LIST_HEAD(&pps->entry);
+ bool ok = true;
zram_slot_lock(zram, index);
if (!zram_allocated(zram, index))
@@ -1867,14 +1847,17 @@ static int scan_slots_for_recompress(struct zram *zram, u32 mode,
zram_test_flag(zram, index, ZRAM_INCOMPRESSIBLE))
goto next;
- pps->index = index;
- place_pp_slot(zram, ctl, pps);
- pps = NULL;
+ /* Already compressed with same of higher priority */
+ if (zram_get_priority(zram, index) + 1 >= prio_max)
+ goto next;
+
+ ok = place_pp_slot(zram, ctl, index);
next:
zram_slot_unlock(zram, index);
+ if (!ok)
+ break;
}
- kfree(pps);
return 0;
}
@@ -1896,9 +1879,8 @@ static int recompress_slot(struct zram *zram, u32 index, struct page *page,
unsigned int comp_len_new;
unsigned int class_index_old;
unsigned int class_index_new;
- u32 num_recomps = 0;
- void *src, *dst;
- int ret;
+ void *src;
+ int ret = 0;
handle_old = zram_get_handle(zram, index);
if (!handle_old)
@@ -1923,6 +1905,16 @@ static int recompress_slot(struct zram *zram, u32 index, struct page *page,
zram_clear_flag(zram, index, ZRAM_IDLE);
class_index_old = zs_lookup_class_index(zram->mem_pool, comp_len_old);
+
+ prio = max(prio, zram_get_priority(zram, index) + 1);
+ /*
+ * Recompression slots scan should not select slots that are
+ * already compressed with a higher priority algorithm, but
+ * just in case
+ */
+ if (prio >= prio_max)
+ return 0;
+
/*
* Iterate the secondary comp algorithms list (in order of priority)
* and try to recompress the page.
@@ -1931,14 +1923,6 @@ static int recompress_slot(struct zram *zram, u32 index, struct page *page,
if (!zram->comps[prio])
continue;
- /*
- * Skip if the object is already re-compressed with a higher
- * priority algorithm (or same algorithm).
- */
- if (prio <= zram_get_priority(zram, index))
- continue;
-
- num_recomps++;
zstrm = zcomp_stream_get(zram->comps[prio]);
src = kmap_local_page(page);
ret = zcomp_compress(zram->comps[prio], zstrm,
@@ -1946,8 +1930,9 @@ static int recompress_slot(struct zram *zram, u32 index, struct page *page,
kunmap_local(src);
if (ret) {
- zcomp_stream_put(zram->comps[prio]);
- return ret;
+ zcomp_stream_put(zstrm);
+ zstrm = NULL;
+ break;
}
class_index_new = zs_lookup_class_index(zram->mem_pool,
@@ -1956,7 +1941,8 @@ static int recompress_slot(struct zram *zram, u32 index, struct page *page,
/* Continue until we make progress */
if (class_index_new >= class_index_old ||
(threshold && comp_len_new >= threshold)) {
- zcomp_stream_put(zram->comps[prio]);
+ zcomp_stream_put(zstrm);
+ zstrm = NULL;
continue;
}
@@ -1965,14 +1951,6 @@ static int recompress_slot(struct zram *zram, u32 index, struct page *page,
}
/*
- * We did not try to recompress, e.g. when we have only one
- * secondary algorithm and the page is already recompressed
- * using that algorithm
- */
- if (!zstrm)
- return 0;
-
- /*
* Decrement the limit (if set) on pages we can recompress, even
* when current recompression was unsuccessful or did not compress
* the page below the threshold, because we still spent resources
@@ -1981,48 +1959,39 @@ static int recompress_slot(struct zram *zram, u32 index, struct page *page,
if (*num_recomp_pages)
*num_recomp_pages -= 1;
- if (class_index_new >= class_index_old) {
+ /* Compression error */
+ if (ret)
+ return ret;
+
+ if (!zstrm) {
/*
* Secondary algorithms failed to re-compress the page
- * in a way that would save memory, mark the object as
- * incompressible so that we will not try to compress
- * it again.
+ * in a way that would save memory.
*
- * We need to make sure that all secondary algorithms have
- * failed, so we test if the number of recompressions matches
- * the number of active secondary algorithms.
+ * Mark the object incompressible if the max-priority
+ * algorithm couldn't re-compress it.
*/
- if (num_recomps == zram->num_active_comps - 1)
- zram_set_flag(zram, index, ZRAM_INCOMPRESSIBLE);
+ if (prio < zram->num_active_comps)
+ return 0;
+ zram_set_flag(zram, index, ZRAM_INCOMPRESSIBLE);
return 0;
}
- /* Successful recompression but above threshold */
- if (threshold && comp_len_new >= threshold)
- return 0;
-
/*
- * No direct reclaim (slow path) for handle allocation and no
- * re-compression attempt (unlike in zram_write_bvec()) since
- * we already have stored that object in zsmalloc. If we cannot
- * alloc memory for recompressed object then we bail out and
- * simply keep the old (existing) object in zsmalloc.
+ * We are holding per-CPU stream mutex and entry lock so better
+ * avoid direct reclaim. Allocation error is not fatal since
+ * we still have the old object in the mem_pool.
*/
handle_new = zs_malloc(zram->mem_pool, comp_len_new,
- __GFP_KSWAPD_RECLAIM |
- __GFP_NOWARN |
- __GFP_HIGHMEM |
- __GFP_MOVABLE);
+ GFP_NOIO | __GFP_NOWARN |
+ __GFP_HIGHMEM | __GFP_MOVABLE);
if (IS_ERR_VALUE(handle_new)) {
- zcomp_stream_put(zram->comps[prio]);
+ zcomp_stream_put(zstrm);
return PTR_ERR((void *)handle_new);
}
- dst = zs_map_object(zram->mem_pool, handle_new, ZS_MM_WO);
- memcpy(dst, zstrm->buffer, comp_len_new);
- zcomp_stream_put(zram->comps[prio]);
-
- zs_unmap_object(zram->mem_pool, handle_new);
+ zs_obj_write(zram->mem_pool, handle_new, zstrm->buffer, comp_len_new);
+ zcomp_stream_put(zstrm);
zram_free_page(zram, index);
zram_set_handle(zram, index, handle_new);
@@ -2039,16 +2008,19 @@ static ssize_t recompress_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t len)
{
- u32 prio = ZRAM_SECONDARY_COMP, prio_max = ZRAM_MAX_COMPS;
struct zram *zram = dev_to_zram(dev);
char *args, *param, *val, *algo = NULL;
u64 num_recomp_pages = ULLONG_MAX;
struct zram_pp_ctl *ctl = NULL;
struct zram_pp_slot *pps;
u32 mode = 0, threshold = 0;
- struct page *page;
+ u32 prio, prio_max;
+ struct page *page = NULL;
ssize_t ret;
+ prio = ZRAM_SECONDARY_COMP;
+ prio_max = zram->num_active_comps;
+
args = skip_spaces(buf);
while (*args) {
args = next_arg(args, &param, &val);
@@ -2101,7 +2073,7 @@ static ssize_t recompress_store(struct device *dev,
if (prio == ZRAM_PRIMARY_COMP)
prio = ZRAM_SECONDARY_COMP;
- prio_max = min(prio + 1, ZRAM_MAX_COMPS);
+ prio_max = prio + 1;
continue;
}
}
@@ -2129,7 +2101,7 @@ static ssize_t recompress_store(struct device *dev,
continue;
if (!strcmp(zram->comp_algs[prio], algo)) {
- prio_max = min(prio + 1, ZRAM_MAX_COMPS);
+ prio_max = prio + 1;
found = true;
break;
}
@@ -2141,6 +2113,12 @@ static ssize_t recompress_store(struct device *dev,
}
}
+ prio_max = min(prio_max, (u32)zram->num_active_comps);
+ if (prio >= prio_max) {
+ ret = -EINVAL;
+ goto release_init_lock;
+ }
+
page = alloc_page(GFP_KERNEL);
if (!page) {
ret = -ENOMEM;
@@ -2153,7 +2131,7 @@ static ssize_t recompress_store(struct device *dev,
goto release_init_lock;
}
- scan_slots_for_recompress(zram, mode, ctl);
+ scan_slots_for_recompress(zram, mode, prio_max, ctl);
ret = len;
while ((pps = select_pp_slot(ctl))) {
@@ -2181,9 +2159,9 @@ next:
cond_resched();
}
- __free_page(page);
-
release_init_lock:
+ if (page)
+ __free_page(page);
release_pp_ctl(zram, ctl);
atomic_set(&zram->pp_in_progress, 0);
up_read(&zram->init_lock);
@@ -2506,7 +2484,6 @@ static DEVICE_ATTR_WO(reset);
static DEVICE_ATTR_WO(mem_limit);
static DEVICE_ATTR_WO(mem_used_max);
static DEVICE_ATTR_WO(idle);
-static DEVICE_ATTR_RW(max_comp_streams);
static DEVICE_ATTR_RW(comp_algorithm);
#ifdef CONFIG_ZRAM_WRITEBACK
static DEVICE_ATTR_RW(backing_dev);
@@ -2528,7 +2505,6 @@ static struct attribute *zram_disk_attrs[] = {
&dev_attr_mem_limit.attr,
&dev_attr_mem_used_max.attr,
&dev_attr_idle.attr,
- &dev_attr_max_comp_streams.attr,
&dev_attr_comp_algorithm.attr,
#ifdef CONFIG_ZRAM_WRITEBACK
&dev_attr_backing_dev.attr,
diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h
index db78d7c01b9a..6cee93f9c0d0 100644
--- a/drivers/block/zram/zram_drv.h
+++ b/drivers/block/zram/zram_drv.h
@@ -17,7 +17,6 @@
#include <linux/rwsem.h>
#include <linux/zsmalloc.h>
-#include <linux/crypto.h>
#include "zcomp.h"
@@ -28,7 +27,6 @@
#define ZRAM_SECTOR_PER_LOGICAL_BLOCK \
(1 << (ZRAM_LOGICAL_BLOCK_SHIFT - SECTOR_SHIFT))
-
/*
* ZRAM is mainly used for memory efficiency so we want to keep memory
* footprint small and thus squeeze size and zram pageflags into a flags
@@ -46,6 +44,7 @@
/* Flags for zram pages (table[page_no].flags) */
enum zram_pageflags {
ZRAM_SAME = ZRAM_FLAG_SHIFT, /* Page consists the same element */
+ ZRAM_ENTRY_LOCK, /* entry access lock bit */
ZRAM_WB, /* page is stored on backing_device */
ZRAM_PP_SLOT, /* Selected for post-processing */
ZRAM_HUGE, /* Incompressible page */
@@ -58,16 +57,19 @@ enum zram_pageflags {
__NR_ZRAM_PAGEFLAGS,
};
-/*-- Data structures */
-
-/* Allocated for each disk page */
+/*
+ * Allocated for each disk page. We use bit-lock (ZRAM_ENTRY_LOCK bit
+ * of flags) to save memory. There can be plenty of entries and standard
+ * locking primitives (e.g. mutex) will significantly increase sizeof()
+ * of each entry and hence of the meta table.
+ */
struct zram_table_entry {
unsigned long handle;
- unsigned int flags;
- spinlock_t lock;
+ unsigned long flags;
#ifdef CONFIG_ZRAM_TRACK_ENTRY_ACTIME
ktime_t ac_time;
#endif
+ struct lockdep_map dep_map;
};
struct zram_stats {
@@ -80,7 +82,6 @@ struct zram_stats {
atomic64_t huge_pages_since; /* no. of huge pages since zram set up */
atomic64_t pages_stored; /* no. of pages currently stored */
atomic_long_t max_used_pages; /* no. of maximum pages stored */
- atomic64_t writestall; /* no. of write slow paths */
atomic64_t miss_free; /* no. of missed free */
#ifdef CONFIG_ZRAM_WRITEBACK
atomic64_t bd_count; /* no. of pages in backing device */
diff --git a/drivers/bus/mhi/host/main.c b/drivers/bus/mhi/host/main.c
index 4de75674f193..9bb0df43ceef 100644
--- a/drivers/bus/mhi/host/main.c
+++ b/drivers/bus/mhi/host/main.c
@@ -1181,25 +1181,6 @@ int mhi_queue_skb(struct mhi_device *mhi_dev, enum dma_data_direction dir,
}
EXPORT_SYMBOL_GPL(mhi_queue_skb);
-int mhi_queue_dma(struct mhi_device *mhi_dev, enum dma_data_direction dir,
- struct mhi_buf *mhi_buf, size_t len, enum mhi_flags mflags)
-{
- struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
- mhi_dev->dl_chan;
- struct mhi_buf_info buf_info = { };
-
- buf_info.p_addr = mhi_buf->dma_addr;
- buf_info.cb_buf = mhi_buf;
- buf_info.pre_mapped = true;
- buf_info.len = len;
-
- if (unlikely(mhi_chan->pre_alloc))
- return -EINVAL;
-
- return mhi_queue(mhi_dev, &buf_info, dir, mflags);
-}
-EXPORT_SYMBOL_GPL(mhi_queue_dma);
-
int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
struct mhi_buf_info *info, enum mhi_flags flags)
{
@@ -1207,11 +1188,16 @@ int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
struct mhi_ring_element *mhi_tre;
struct mhi_buf_info *buf_info;
int eot, eob, chain, bei;
- int ret;
+ int ret = 0;
/* Protect accesses for reading and incrementing WP */
write_lock_bh(&mhi_chan->lock);
+ if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED) {
+ ret = -ENODEV;
+ goto out;
+ }
+
buf_ring = &mhi_chan->buf_ring;
tre_ring = &mhi_chan->tre_ring;
@@ -1229,10 +1215,8 @@ int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
if (!info->pre_mapped) {
ret = mhi_cntrl->map_single(mhi_cntrl, buf_info);
- if (ret) {
- write_unlock_bh(&mhi_chan->lock);
- return ret;
- }
+ if (ret)
+ goto out;
}
eob = !!(flags & MHI_EOB);
@@ -1250,9 +1234,10 @@ int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
mhi_add_ring_element(mhi_cntrl, tre_ring);
mhi_add_ring_element(mhi_cntrl, buf_ring);
+out:
write_unlock_bh(&mhi_chan->lock);
- return 0;
+ return ret;
}
int mhi_queue_buf(struct mhi_device *mhi_dev, enum dma_data_direction dir,
diff --git a/drivers/bus/mhi/host/pci_generic.c b/drivers/bus/mhi/host/pci_generic.c
index 7ffea0f98162..474f1359c997 100644
--- a/drivers/bus/mhi/host/pci_generic.c
+++ b/drivers/bus/mhi/host/pci_generic.c
@@ -297,6 +297,19 @@ static const struct mhi_pci_dev_info mhi_qcom_qdu100_info = {
.sideband_wake = false,
};
+static const struct mhi_channel_config mhi_qcom_sa8775p_channels[] = {
+ MHI_CHANNEL_CONFIG_UL(46, "IP_SW0", 2048, 1),
+ MHI_CHANNEL_CONFIG_DL(47, "IP_SW0", 2048, 2),
+};
+
+static struct mhi_event_config mhi_qcom_sa8775p_events[] = {
+ /* first ring is control+data ring */
+ MHI_EVENT_CONFIG_CTRL(0, 64),
+ /* Software channels dedicated event ring */
+ MHI_EVENT_CONFIG_SW_DATA(1, 64),
+ MHI_EVENT_CONFIG_SW_DATA(2, 64),
+};
+
static const struct mhi_channel_config modem_qcom_v1_mhi_channels[] = {
MHI_CHANNEL_CONFIG_UL(4, "DIAG", 16, 1),
MHI_CHANNEL_CONFIG_DL(5, "DIAG", 16, 1),
@@ -327,6 +340,15 @@ static struct mhi_event_config modem_qcom_v1_mhi_events[] = {
MHI_EVENT_CONFIG_HW_DATA(5, 2048, 101)
};
+static const struct mhi_controller_config mhi_qcom_sa8775p_config = {
+ .max_channels = 128,
+ .timeout_ms = 8000,
+ .num_channels = ARRAY_SIZE(mhi_qcom_sa8775p_channels),
+ .ch_cfg = mhi_qcom_sa8775p_channels,
+ .num_events = ARRAY_SIZE(mhi_qcom_sa8775p_events),
+ .event_cfg = mhi_qcom_sa8775p_events,
+};
+
static const struct mhi_controller_config modem_qcom_v2_mhiv_config = {
.max_channels = 128,
.timeout_ms = 8000,
@@ -346,6 +368,16 @@ static const struct mhi_controller_config modem_qcom_v1_mhiv_config = {
.event_cfg = modem_qcom_v1_mhi_events,
};
+static const struct mhi_pci_dev_info mhi_qcom_sa8775p_info = {
+ .name = "qcom-sa8775p",
+ .edl_trigger = false,
+ .config = &mhi_qcom_sa8775p_config,
+ .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
+ .dma_data_width = 32,
+ .mru_default = 32768,
+ .sideband_wake = false,
+};
+
static const struct mhi_pci_dev_info mhi_qcom_sdx75_info = {
.name = "qcom-sdx75m",
.fw = "qcom/sdx75m/xbl.elf",
@@ -772,6 +804,8 @@ static const struct mhi_pci_dev_info mhi_netprisma_fcun69_info = {
/* Keep the list sorted based on the PID. New VID should be added as the last entry */
static const struct pci_device_id mhi_pci_id_table[] = {
+ {PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0116),
+ .driver_data = (kernel_ulong_t) &mhi_qcom_sa8775p_info },
{ PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0304),
.driver_data = (kernel_ulong_t) &mhi_qcom_sdx24_info },
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0306, PCI_VENDOR_ID_QCOM, 0x010c),
diff --git a/drivers/bus/mhi/host/pm.c b/drivers/bus/mhi/host/pm.c
index 11c0e751f223..2fb27e6f8f88 100644
--- a/drivers/bus/mhi/host/pm.c
+++ b/drivers/bus/mhi/host/pm.c
@@ -1296,20 +1296,6 @@ int mhi_force_rddm_mode(struct mhi_controller *mhi_cntrl)
}
EXPORT_SYMBOL_GPL(mhi_force_rddm_mode);
-void mhi_device_get(struct mhi_device *mhi_dev)
-{
- struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
-
- mhi_dev->dev_wake++;
- read_lock_bh(&mhi_cntrl->pm_lock);
- if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
- mhi_trigger_resume(mhi_cntrl);
-
- mhi_cntrl->wake_get(mhi_cntrl, true);
- read_unlock_bh(&mhi_cntrl->pm_lock);
-}
-EXPORT_SYMBOL_GPL(mhi_device_get);
-
int mhi_device_get_sync(struct mhi_device *mhi_dev)
{
struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
diff --git a/drivers/char/tlclk.c b/drivers/char/tlclk.c
index 377bebf6c925..6c1d94eda5a2 100644
--- a/drivers/char/tlclk.c
+++ b/drivers/char/tlclk.c
@@ -42,7 +42,7 @@
#include <linux/sysfs.h>
#include <linux/device.h>
#include <linux/miscdevice.h>
-#include <linux/platform_device.h>
+#include <linux/device/faux.h>
#include <asm/io.h> /* inb/outb */
#include <linux/uaccess.h>
@@ -742,7 +742,7 @@ static ssize_t store_reset (struct device *d,
static DEVICE_ATTR(reset, (S_IWUSR|S_IWGRP), NULL, store_reset);
-static struct attribute *tlclk_sysfs_entries[] = {
+static struct attribute *tlclk_attrs[] = {
&dev_attr_current_ref.attr,
&dev_attr_telclock_version.attr,
&dev_attr_alarms.attr,
@@ -766,13 +766,9 @@ static struct attribute *tlclk_sysfs_entries[] = {
&dev_attr_reset.attr,
NULL
};
+ATTRIBUTE_GROUPS(tlclk);
-static const struct attribute_group tlclk_attribute_group = {
- .name = NULL, /* put in device directory */
- .attrs = tlclk_sysfs_entries,
-};
-
-static struct platform_device *tlclk_device;
+static struct faux_device *tlclk_device;
static int __init tlclk_init(void)
{
@@ -817,24 +813,13 @@ static int __init tlclk_init(void)
goto out3;
}
- tlclk_device = platform_device_register_simple("telco_clock",
- -1, NULL, 0);
- if (IS_ERR(tlclk_device)) {
- printk(KERN_ERR "tlclk: platform_device_register failed.\n");
- ret = PTR_ERR(tlclk_device);
+ tlclk_device = faux_device_create_with_groups("telco_clock", NULL, NULL, tlclk_groups);
+ if (!tlclk_device) {
+ ret = -ENODEV;
goto out4;
}
- ret = sysfs_create_group(&tlclk_device->dev.kobj,
- &tlclk_attribute_group);
- if (ret) {
- printk(KERN_ERR "tlclk: failed to create sysfs device attributes.\n");
- goto out5;
- }
-
return 0;
-out5:
- platform_device_unregister(tlclk_device);
out4:
misc_deregister(&tlclk_miscdev);
out3:
@@ -848,8 +833,7 @@ out1:
static void __exit tlclk_cleanup(void)
{
- sysfs_remove_group(&tlclk_device->dev.kobj, &tlclk_attribute_group);
- platform_device_unregister(tlclk_device);
+ faux_device_destroy(tlclk_device);
misc_deregister(&tlclk_miscdev);
unregister_chrdev(tlclk_major, "telco_clock");
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index 18f92dd44d45..5f04951d0dd4 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -26,6 +26,7 @@
#include <linux/workqueue.h>
#include <linux/module.h>
#include <linux/dma-mapping.h>
+#include <linux/string_choices.h>
#include "../tty/hvc/hvc_console.h"
#define is_rproc_enabled IS_ENABLED(CONFIG_REMOTEPROC)
@@ -1269,8 +1270,7 @@ static int port_debugfs_show(struct seq_file *s, void *data)
seq_printf(s, "bytes_sent: %lu\n", port->stats.bytes_sent);
seq_printf(s, "bytes_received: %lu\n", port->stats.bytes_received);
seq_printf(s, "bytes_discarded: %lu\n", port->stats.bytes_discarded);
- seq_printf(s, "is_console: %s\n",
- is_console_port(port) ? "yes" : "no");
+ seq_printf(s, "is_console: %s\n", str_yes_no(is_console_port(port)));
seq_printf(s, "console_vtermno: %u\n", port->cons.vtermno);
return 0;
@@ -1321,7 +1321,6 @@ static void send_sigio_to_port(struct port *port)
static int add_port(struct ports_device *portdev, u32 id)
{
- char debugfs_name[16];
struct port *port;
dev_t devt;
int err;
@@ -1424,9 +1423,7 @@ static int add_port(struct ports_device *portdev, u32 id)
* Finally, create the debugfs file that we can use to
* inspect a port's state at any time
*/
- snprintf(debugfs_name, sizeof(debugfs_name), "vport%up%u",
- port->portdev->vdev->index, id);
- port->debugfs_file = debugfs_create_file(debugfs_name, 0444,
+ port->debugfs_file = debugfs_create_file(dev_name(port->dev), 0444,
pdrvdata.debugfs_dir,
port, &port_debugfs_fops);
return 0;
diff --git a/drivers/clk/clkdev.c b/drivers/clk/clkdev.c
index 2f83fb97c6fb..e0bede6350e1 100644
--- a/drivers/clk/clkdev.c
+++ b/drivers/clk/clkdev.c
@@ -153,7 +153,7 @@ struct clk_lookup_alloc {
char con_id[MAX_CON_ID];
};
-static struct clk_lookup * __ref
+static __printf(3, 0) struct clk_lookup * __ref
vclkdev_alloc(struct clk_hw *hw, const char *con_id, const char *dev_fmt,
va_list ap)
{
@@ -215,7 +215,7 @@ fail:
return &cla->cl;
}
-static struct clk_lookup *
+static __printf(3, 0) struct clk_lookup *
vclkdev_create(struct clk_hw *hw, const char *con_id, const char *dev_fmt,
va_list ap)
{
@@ -303,9 +303,8 @@ void clkdev_drop(struct clk_lookup *cl)
}
EXPORT_SYMBOL(clkdev_drop);
-static struct clk_lookup *__clk_register_clkdev(struct clk_hw *hw,
- const char *con_id,
- const char *dev_id, ...)
+static __printf(3, 4) struct clk_lookup *
+__clk_register_clkdev(struct clk_hw *hw, const char *con_id, const char *dev_id, ...)
{
struct clk_lookup *cl;
va_list ap;
diff --git a/drivers/counter/microchip-tcb-capture.c b/drivers/counter/microchip-tcb-capture.c
index 2f096a5b973d..1de3c50b9804 100644
--- a/drivers/counter/microchip-tcb-capture.c
+++ b/drivers/counter/microchip-tcb-capture.c
@@ -6,18 +6,24 @@
*/
#include <linux/clk.h>
#include <linux/counter.h>
+#include <linux/interrupt.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of.h>
+#include <linux/of_irq.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
+#include <uapi/linux/counter/microchip-tcb-capture.h>
#include <soc/at91/atmel_tcb.h>
#define ATMEL_TC_CMR_MASK (ATMEL_TC_LDRA_RISING | ATMEL_TC_LDRB_FALLING | \
ATMEL_TC_ETRGEDG_RISING | ATMEL_TC_LDBDIS | \
ATMEL_TC_LDBSTOP)
+#define ATMEL_TC_DEF_IRQS (ATMEL_TC_ETRGS | ATMEL_TC_COVFS | \
+ ATMEL_TC_LDRAS | ATMEL_TC_LDRBS | ATMEL_TC_CPCS)
+
#define ATMEL_TC_QDEN BIT(8)
#define ATMEL_TC_POSEN BIT(9)
@@ -247,6 +253,90 @@ static int mchp_tc_count_read(struct counter_device *counter,
return 0;
}
+static int mchp_tc_count_cap_read(struct counter_device *counter,
+ struct counter_count *count, size_t idx, u64 *val)
+{
+ struct mchp_tc_data *const priv = counter_priv(counter);
+ u32 cnt;
+ int ret;
+
+ switch (idx) {
+ case COUNTER_MCHP_EXCAP_RA:
+ ret = regmap_read(priv->regmap, ATMEL_TC_REG(priv->channel[0], RA), &cnt);
+ break;
+ case COUNTER_MCHP_EXCAP_RB:
+ ret = regmap_read(priv->regmap, ATMEL_TC_REG(priv->channel[0], RB), &cnt);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (ret < 0)
+ return ret;
+
+ *val = cnt;
+
+ return 0;
+}
+
+static int mchp_tc_count_cap_write(struct counter_device *counter,
+ struct counter_count *count, size_t idx, u64 val)
+{
+ struct mchp_tc_data *const priv = counter_priv(counter);
+ int ret;
+
+ if (val > U32_MAX)
+ return -ERANGE;
+
+ switch (idx) {
+ case COUNTER_MCHP_EXCAP_RA:
+ ret = regmap_write(priv->regmap, ATMEL_TC_REG(priv->channel[0], RA), val);
+ break;
+ case COUNTER_MCHP_EXCAP_RB:
+ ret = regmap_write(priv->regmap, ATMEL_TC_REG(priv->channel[0], RB), val);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static int mchp_tc_count_compare_read(struct counter_device *counter, struct counter_count *count,
+ u64 *val)
+{
+ struct mchp_tc_data *const priv = counter_priv(counter);
+ u32 cnt;
+ int ret;
+
+ ret = regmap_read(priv->regmap, ATMEL_TC_REG(priv->channel[0], RC), &cnt);
+ if (ret < 0)
+ return ret;
+
+ *val = cnt;
+
+ return 0;
+}
+
+static int mchp_tc_count_compare_write(struct counter_device *counter, struct counter_count *count,
+ u64 val)
+{
+ struct mchp_tc_data *const priv = counter_priv(counter);
+
+ if (val > U32_MAX)
+ return -ERANGE;
+
+ return regmap_write(priv->regmap, ATMEL_TC_REG(priv->channel[0], RC), val);
+}
+
+static DEFINE_COUNTER_ARRAY_CAPTURE(mchp_tc_cnt_cap_array, 2);
+
+static struct counter_comp mchp_tc_count_ext[] = {
+ COUNTER_COMP_ARRAY_CAPTURE(mchp_tc_count_cap_read, mchp_tc_count_cap_write,
+ mchp_tc_cnt_cap_array),
+ COUNTER_COMP_COMPARE(mchp_tc_count_compare_read, mchp_tc_count_compare_write),
+};
+
static struct counter_count mchp_tc_counts[] = {
{
.id = 0,
@@ -255,6 +345,8 @@ static struct counter_count mchp_tc_counts[] = {
.num_functions = ARRAY_SIZE(mchp_tc_count_functions),
.synapses = mchp_tc_count_synapses,
.num_synapses = ARRAY_SIZE(mchp_tc_count_synapses),
+ .ext = mchp_tc_count_ext,
+ .num_ext = ARRAY_SIZE(mchp_tc_count_ext),
},
};
@@ -294,6 +386,65 @@ static const struct of_device_id atmel_tc_of_match[] = {
{ /* sentinel */ }
};
+static irqreturn_t mchp_tc_isr(int irq, void *dev_id)
+{
+ struct counter_device *const counter = dev_id;
+ struct mchp_tc_data *const priv = counter_priv(counter);
+ u32 sr, mask;
+
+ regmap_read(priv->regmap, ATMEL_TC_REG(priv->channel[0], SR), &sr);
+ regmap_read(priv->regmap, ATMEL_TC_REG(priv->channel[0], IMR), &mask);
+
+ sr &= mask;
+ if (!(sr & ATMEL_TC_ALL_IRQ))
+ return IRQ_NONE;
+
+ if (sr & ATMEL_TC_ETRGS)
+ counter_push_event(counter, COUNTER_EVENT_CHANGE_OF_STATE,
+ COUNTER_MCHP_EVCHN_CV);
+ if (sr & ATMEL_TC_LDRAS)
+ counter_push_event(counter, COUNTER_EVENT_CAPTURE,
+ COUNTER_MCHP_EVCHN_RA);
+ if (sr & ATMEL_TC_LDRBS)
+ counter_push_event(counter, COUNTER_EVENT_CAPTURE,
+ COUNTER_MCHP_EVCHN_RB);
+ if (sr & ATMEL_TC_CPCS)
+ counter_push_event(counter, COUNTER_EVENT_THRESHOLD,
+ COUNTER_MCHP_EVCHN_RC);
+ if (sr & ATMEL_TC_COVFS)
+ counter_push_event(counter, COUNTER_EVENT_OVERFLOW,
+ COUNTER_MCHP_EVCHN_CV);
+
+ return IRQ_HANDLED;
+}
+
+static void mchp_tc_irq_remove(void *ptr)
+{
+ struct mchp_tc_data *priv = ptr;
+
+ regmap_write(priv->regmap, ATMEL_TC_REG(priv->channel[0], IDR), ATMEL_TC_DEF_IRQS);
+}
+
+static int mchp_tc_irq_enable(struct counter_device *const counter, int irq)
+{
+ struct mchp_tc_data *const priv = counter_priv(counter);
+ int ret = devm_request_irq(counter->parent, irq, mchp_tc_isr, 0,
+ dev_name(counter->parent), counter);
+
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_write(priv->regmap, ATMEL_TC_REG(priv->channel[0], IER), ATMEL_TC_DEF_IRQS);
+ if (ret < 0)
+ return ret;
+
+ ret = devm_add_action_or_reset(counter->parent, mchp_tc_irq_remove, priv);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
static void mchp_tc_clk_remove(void *ptr)
{
clk_disable_unprepare((struct clk *)ptr);
@@ -368,6 +519,25 @@ static int mchp_tc_probe(struct platform_device *pdev)
channel);
}
+ /* Disable Quadrature Decoder and position measure */
+ ret = regmap_update_bits(regmap, ATMEL_TC_BMR, ATMEL_TC_QDEN | ATMEL_TC_POSEN, 0);
+ if (ret)
+ return ret;
+
+ /* Setup the period capture mode */
+ ret = regmap_update_bits(regmap, ATMEL_TC_REG(priv->channel[0], CMR),
+ ATMEL_TC_WAVE | ATMEL_TC_ABETRG | ATMEL_TC_CMR_MASK |
+ ATMEL_TC_TCCLKS,
+ ATMEL_TC_CMR_MASK);
+ if (ret)
+ return ret;
+
+ /* Enable clock and trigger counter */
+ ret = regmap_write(regmap, ATMEL_TC_REG(priv->channel[0], CCR),
+ ATMEL_TC_CLKEN | ATMEL_TC_SWTRG);
+ if (ret)
+ return ret;
+
priv->tc_cfg = tcb_config;
priv->regmap = regmap;
counter->name = dev_name(&pdev->dev);
@@ -378,6 +548,15 @@ static int mchp_tc_probe(struct platform_device *pdev)
counter->num_signals = ARRAY_SIZE(mchp_tc_count_signals);
counter->signals = mchp_tc_count_signals;
+ i = of_irq_get(np->parent, 0);
+ if (i == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ if (i > 0) {
+ ret = mchp_tc_irq_enable(counter, i);
+ if (ret < 0)
+ return dev_err_probe(&pdev->dev, ret, "Failed to set up IRQ");
+ }
+
ret = devm_counter_add(&pdev->dev, counter);
if (ret < 0)
return dev_err_probe(&pdev->dev, ret, "Failed to add counter\n");
diff --git a/drivers/counter/stm32-lptimer-cnt.c b/drivers/counter/stm32-lptimer-cnt.c
index cf73f65baf60..b249c8647639 100644
--- a/drivers/counter/stm32-lptimer-cnt.c
+++ b/drivers/counter/stm32-lptimer-cnt.c
@@ -58,37 +58,43 @@ static int stm32_lptim_set_enable_state(struct stm32_lptim_cnt *priv,
return 0;
}
+ ret = clk_enable(priv->clk);
+ if (ret)
+ goto disable_cnt;
+
/* LP timer must be enabled before writing CMP & ARR */
ret = regmap_write(priv->regmap, STM32_LPTIM_ARR, priv->ceiling);
if (ret)
- return ret;
+ goto disable_clk;
ret = regmap_write(priv->regmap, STM32_LPTIM_CMP, 0);
if (ret)
- return ret;
+ goto disable_clk;
/* ensure CMP & ARR registers are properly written */
ret = regmap_read_poll_timeout(priv->regmap, STM32_LPTIM_ISR, val,
(val & STM32_LPTIM_CMPOK_ARROK) == STM32_LPTIM_CMPOK_ARROK,
100, 1000);
if (ret)
- return ret;
+ goto disable_clk;
ret = regmap_write(priv->regmap, STM32_LPTIM_ICR,
STM32_LPTIM_CMPOKCF_ARROKCF);
if (ret)
- return ret;
+ goto disable_clk;
- ret = clk_enable(priv->clk);
- if (ret) {
- regmap_write(priv->regmap, STM32_LPTIM_CR, 0);
- return ret;
- }
priv->enabled = true;
/* Start LP timer in continuous mode */
return regmap_update_bits(priv->regmap, STM32_LPTIM_CR,
STM32_LPTIM_CNTSTRT, STM32_LPTIM_CNTSTRT);
+
+disable_clk:
+ clk_disable(priv->clk);
+disable_cnt:
+ regmap_write(priv->regmap, STM32_LPTIM_CR, 0);
+
+ return ret;
}
static int stm32_lptim_setup(struct stm32_lptim_cnt *priv, int enable)
diff --git a/drivers/counter/ti-eqep.c b/drivers/counter/ti-eqep.c
index bc586eff0dae..d21c157e531a 100644
--- a/drivers/counter/ti-eqep.c
+++ b/drivers/counter/ti-eqep.c
@@ -107,6 +107,15 @@
#define QCLR_PCE BIT(1)
#define QCLR_INT BIT(0)
+#define QEPSTS_UPEVNT BIT(7)
+#define QEPSTS_FDF BIT(6)
+#define QEPSTS_QDF BIT(5)
+#define QEPSTS_QDLF BIT(4)
+#define QEPSTS_COEF BIT(3)
+#define QEPSTS_CDEF BIT(2)
+#define QEPSTS_FIMF BIT(1)
+#define QEPSTS_PCEF BIT(0)
+
/* EQEP Inputs */
enum {
TI_EQEP_SIGNAL_QEPA, /* QEPA/XCLK */
@@ -286,6 +295,9 @@ static int ti_eqep_events_configure(struct counter_device *counter)
case COUNTER_EVENT_UNDERFLOW:
qeint |= QEINT_PCU;
break;
+ case COUNTER_EVENT_DIRECTION_CHANGE:
+ qeint |= QEINT_QDC;
+ break;
}
}
@@ -298,6 +310,7 @@ static int ti_eqep_watch_validate(struct counter_device *counter,
switch (watch->event) {
case COUNTER_EVENT_OVERFLOW:
case COUNTER_EVENT_UNDERFLOW:
+ case COUNTER_EVENT_DIRECTION_CHANGE:
if (watch->channel != 0)
return -EINVAL;
@@ -368,11 +381,27 @@ static int ti_eqep_position_enable_write(struct counter_device *counter,
return 0;
}
+static int ti_eqep_direction_read(struct counter_device *counter,
+ struct counter_count *count,
+ enum counter_count_direction *direction)
+{
+ struct ti_eqep_cnt *priv = counter_priv(counter);
+ u32 qepsts;
+
+ regmap_read(priv->regmap16, QEPSTS, &qepsts);
+
+ *direction = (qepsts & QEPSTS_QDF) ? COUNTER_COUNT_DIRECTION_FORWARD
+ : COUNTER_COUNT_DIRECTION_BACKWARD;
+
+ return 0;
+}
+
static struct counter_comp ti_eqep_position_ext[] = {
COUNTER_COMP_CEILING(ti_eqep_position_ceiling_read,
ti_eqep_position_ceiling_write),
COUNTER_COMP_ENABLE(ti_eqep_position_enable_read,
ti_eqep_position_enable_write),
+ COUNTER_COMP_DIRECTION(ti_eqep_direction_read),
};
static struct counter_signal ti_eqep_signals[] = {
@@ -439,6 +468,9 @@ static irqreturn_t ti_eqep_irq_handler(int irq, void *dev_id)
if (qflg & QFLG_PCU)
counter_push_event(counter, COUNTER_EVENT_UNDERFLOW, 0);
+ if (qflg & QFLG_QDC)
+ counter_push_event(counter, COUNTER_EVENT_DIRECTION_CHANGE, 0);
+
regmap_write(priv->regmap16, QCLR, qflg);
return IRQ_HANDLED;
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 0cf5a320bb5e..3841c9da6cac 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -2809,6 +2809,12 @@ EXPORT_SYMBOL(cpufreq_update_policy);
*/
void cpufreq_update_limits(unsigned int cpu)
{
+ struct cpufreq_policy *policy __free(put_cpufreq_policy);
+
+ policy = cpufreq_cpu_get(cpu);
+ if (!policy)
+ return;
+
if (cpufreq_driver->update_limits)
cpufreq_driver->update_limits(cpu);
else
diff --git a/drivers/cxl/Kconfig b/drivers/cxl/Kconfig
index 8ac1e9d70eeb..cf1ba673b8c2 100644
--- a/drivers/cxl/Kconfig
+++ b/drivers/cxl/Kconfig
@@ -158,4 +158,8 @@ config CXL_REGION_INVALIDATION_TEST
If unsure, or if this kernel is meant for production environments,
say N.
+config CXL_MCE
+ def_bool y
+ depends on X86_MCE && MEMORY_FAILURE
+
endif
diff --git a/drivers/cxl/core/Makefile b/drivers/cxl/core/Makefile
index b0bfbd9eac9b..086df97a0fcf 100644
--- a/drivers/cxl/core/Makefile
+++ b/drivers/cxl/core/Makefile
@@ -14,6 +14,9 @@ cxl_core-y += pci.o
cxl_core-y += hdm.o
cxl_core-y += pmu.o
cxl_core-y += cdat.o
+cxl_core-y += ras.o
+cxl_core-y += acpi.o
cxl_core-$(CONFIG_TRACING) += trace.o
cxl_core-$(CONFIG_CXL_REGION) += region.o
+cxl_core-$(CONFIG_CXL_MCE) += mce.o
cxl_core-$(CONFIG_CXL_FEATURES) += features.o
diff --git a/drivers/cxl/core/acpi.c b/drivers/cxl/core/acpi.c
new file mode 100644
index 000000000000..f13b4dae6ac5
--- /dev/null
+++ b/drivers/cxl/core/acpi.c
@@ -0,0 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2024 Intel Corporation. All rights reserved. */
+#include <linux/acpi.h>
+#include "cxl.h"
+#include "core.h"
+
+int cxl_acpi_get_extended_linear_cache_size(struct resource *backing_res,
+ int nid, resource_size_t *size)
+{
+ return hmat_get_extended_linear_cache_size(backing_res, nid, size);
+}
diff --git a/drivers/cxl/core/cdat.c b/drivers/cxl/core/cdat.c
index 8153f8d83a16..edb4f41eeacc 100644
--- a/drivers/cxl/core/cdat.c
+++ b/drivers/cxl/core/cdat.c
@@ -258,27 +258,29 @@ static void update_perf_entry(struct device *dev, struct dsmas_entry *dent,
static void cxl_memdev_set_qos_class(struct cxl_dev_state *cxlds,
struct xarray *dsmas_xa)
{
- struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
struct device *dev = cxlds->dev;
- struct range pmem_range = {
- .start = cxlds->pmem_res.start,
- .end = cxlds->pmem_res.end,
- };
- struct range ram_range = {
- .start = cxlds->ram_res.start,
- .end = cxlds->ram_res.end,
- };
struct dsmas_entry *dent;
unsigned long index;
xa_for_each(dsmas_xa, index, dent) {
- if (resource_size(&cxlds->ram_res) &&
- range_contains(&ram_range, &dent->dpa_range))
- update_perf_entry(dev, dent, &mds->ram_perf);
- else if (resource_size(&cxlds->pmem_res) &&
- range_contains(&pmem_range, &dent->dpa_range))
- update_perf_entry(dev, dent, &mds->pmem_perf);
- else
+ bool found = false;
+
+ for (int i = 0; i < cxlds->nr_partitions; i++) {
+ struct resource *res = &cxlds->part[i].res;
+ struct range range = {
+ .start = res->start,
+ .end = res->end,
+ };
+
+ if (range_contains(&range, &dent->dpa_range)) {
+ update_perf_entry(dev, dent,
+ &cxlds->part[i].perf);
+ found = true;
+ break;
+ }
+ }
+
+ if (!found)
dev_dbg(dev, "no partition for dsmas dpa: %pra\n",
&dent->dpa_range);
}
@@ -343,36 +345,46 @@ static int match_cxlrd_hb(struct device *dev, void *data)
return 0;
}
-static int cxl_qos_class_verify(struct cxl_memdev *cxlmd)
+static void cxl_qos_class_verify(struct cxl_memdev *cxlmd)
{
struct cxl_dev_state *cxlds = cxlmd->cxlds;
- struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
struct cxl_port *root_port;
- int rc;
struct cxl_root *cxl_root __free(put_cxl_root) =
find_cxl_root(cxlmd->endpoint);
+ /*
+ * No need to reset_dpa_perf() here as find_cxl_root() is guaranteed to
+ * succeed when called in the cxl_endpoint_port_probe() path.
+ */
if (!cxl_root)
- return -ENODEV;
+ return;
root_port = &cxl_root->port;
- /* Check that the QTG IDs are all sane between end device and root decoders */
- if (!cxl_qos_match(root_port, &mds->ram_perf))
- reset_dpa_perf(&mds->ram_perf);
- if (!cxl_qos_match(root_port, &mds->pmem_perf))
- reset_dpa_perf(&mds->pmem_perf);
-
- /* Check to make sure that the device's host bridge is under a root decoder */
- rc = device_for_each_child(&root_port->dev,
- cxlmd->endpoint->host_bridge, match_cxlrd_hb);
- if (!rc) {
- reset_dpa_perf(&mds->ram_perf);
- reset_dpa_perf(&mds->pmem_perf);
+ /*
+ * Save userspace from needing to check if a qos class has any matches
+ * by hiding qos class info if the memdev is not mapped by a root
+ * decoder, or the partition class does not match any root decoder
+ * class.
+ */
+ if (!device_for_each_child(&root_port->dev,
+ cxlmd->endpoint->host_bridge,
+ match_cxlrd_hb)) {
+ for (int i = 0; i < cxlds->nr_partitions; i++) {
+ struct cxl_dpa_perf *perf = &cxlds->part[i].perf;
+
+ reset_dpa_perf(perf);
+ }
+ return;
}
- return rc;
+ for (int i = 0; i < cxlds->nr_partitions; i++) {
+ struct cxl_dpa_perf *perf = &cxlds->part[i].perf;
+
+ if (!cxl_qos_match(root_port, perf))
+ reset_dpa_perf(perf);
+ }
}
static void discard_dsmas(struct xarray *xa)
@@ -570,23 +582,18 @@ static bool dpa_perf_contains(struct cxl_dpa_perf *perf,
return range_contains(&perf->dpa_range, &dpa);
}
-static struct cxl_dpa_perf *cxled_get_dpa_perf(struct cxl_endpoint_decoder *cxled,
- enum cxl_decoder_mode mode)
+static struct cxl_dpa_perf *cxled_get_dpa_perf(struct cxl_endpoint_decoder *cxled)
{
struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
- struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
struct cxl_dpa_perf *perf;
- switch (mode) {
- case CXL_DECODER_RAM:
- perf = &mds->ram_perf;
- break;
- case CXL_DECODER_PMEM:
- perf = &mds->pmem_perf;
- break;
- default:
+ if (cxled->part < 0)
+ return ERR_PTR(-EINVAL);
+ perf = &cxlds->part[cxled->part].perf;
+
+ if (!perf)
return ERR_PTR(-EINVAL);
- }
if (!dpa_perf_contains(perf, cxled->dpa_res))
return ERR_PTR(-EINVAL);
@@ -647,11 +654,10 @@ static int cxl_endpoint_gather_bandwidth(struct cxl_region *cxlr,
if (cxlds->rcd)
return -ENODEV;
- perf = cxled_get_dpa_perf(cxled, cxlr->mode);
+ perf = cxled_get_dpa_perf(cxled);
if (IS_ERR(perf))
return PTR_ERR(perf);
- gp_port = to_cxl_port(parent_port->dev.parent);
*gp_is_root = is_cxl_root(gp_port);
/*
@@ -1053,7 +1059,7 @@ void cxl_region_perf_data_calculate(struct cxl_region *cxlr,
lockdep_assert_held(&cxl_dpa_rwsem);
- perf = cxled_get_dpa_perf(cxled, cxlr->mode);
+ perf = cxled_get_dpa_perf(cxled);
if (IS_ERR(perf))
return;
diff --git a/drivers/cxl/core/core.h b/drivers/cxl/core/core.h
index 17e99a25c29a..15699299dc11 100644
--- a/drivers/cxl/core/core.h
+++ b/drivers/cxl/core/core.h
@@ -74,8 +74,8 @@ void __iomem *devm_cxl_iomap_block(struct device *dev, resource_size_t addr,
resource_size_t length);
struct dentry *cxl_debugfs_create_dir(const char *dir);
-int cxl_dpa_set_mode(struct cxl_endpoint_decoder *cxled,
- enum cxl_decoder_mode mode);
+int cxl_dpa_set_part(struct cxl_endpoint_decoder *cxled,
+ enum cxl_partition_mode mode);
int cxl_dpa_alloc(struct cxl_endpoint_decoder *cxled, unsigned long long size);
int cxl_dpa_free(struct cxl_endpoint_decoder *cxled);
resource_size_t cxl_dpa_size(struct cxl_endpoint_decoder *cxled);
@@ -117,6 +117,12 @@ bool cxl_need_node_perf_attrs_update(int nid);
int cxl_port_get_switch_dport_bandwidth(struct cxl_port *port,
struct access_coordinate *c);
+int cxl_ras_init(void);
+void cxl_ras_exit(void);
+int cxl_gpf_port_setup(struct device *dport_dev, struct cxl_port *port);
+int cxl_acpi_get_extended_linear_cache_size(struct resource *backing_res,
+ int nid, resource_size_t *size);
+
#ifdef CONFIG_CXL_FEATURES
size_t cxl_get_feature(struct cxl_mailbox *cxl_mbox, const uuid_t *feat_uuid,
enum cxl_get_feat_selection selection,
diff --git a/drivers/cxl/core/hdm.c b/drivers/cxl/core/hdm.c
index 50e6a45b30ba..70cae4ebf8a4 100644
--- a/drivers/cxl/core/hdm.c
+++ b/drivers/cxl/core/hdm.c
@@ -213,16 +213,46 @@ void cxl_dpa_debug(struct seq_file *file, struct cxl_dev_state *cxlds)
{
struct resource *p1, *p2;
- down_read(&cxl_dpa_rwsem);
+ guard(rwsem_read)(&cxl_dpa_rwsem);
for (p1 = cxlds->dpa_res.child; p1; p1 = p1->sibling) {
__cxl_dpa_debug(file, p1, 0);
for (p2 = p1->child; p2; p2 = p2->sibling)
__cxl_dpa_debug(file, p2, 1);
}
- up_read(&cxl_dpa_rwsem);
}
EXPORT_SYMBOL_NS_GPL(cxl_dpa_debug, "CXL");
+/* See request_skip() kernel-doc */
+static resource_size_t __adjust_skip(struct cxl_dev_state *cxlds,
+ const resource_size_t skip_base,
+ const resource_size_t skip_len,
+ const char *requester)
+{
+ const resource_size_t skip_end = skip_base + skip_len - 1;
+
+ for (int i = 0; i < cxlds->nr_partitions; i++) {
+ const struct resource *part_res = &cxlds->part[i].res;
+ resource_size_t adjust_start, adjust_end, size;
+
+ adjust_start = max(skip_base, part_res->start);
+ adjust_end = min(skip_end, part_res->end);
+
+ if (adjust_end < adjust_start)
+ continue;
+
+ size = adjust_end - adjust_start + 1;
+
+ if (!requester)
+ __release_region(&cxlds->dpa_res, adjust_start, size);
+ else if (!__request_region(&cxlds->dpa_res, adjust_start, size,
+ requester, 0))
+ return adjust_start - skip_base;
+ }
+
+ return skip_len;
+}
+#define release_skip(c, b, l) __adjust_skip((c), (b), (l), NULL)
+
/*
* Must be called in a context that synchronizes against this decoder's
* port ->remove() callback (like an endpoint decoder sysfs attribute)
@@ -241,7 +271,7 @@ static void __cxl_dpa_release(struct cxl_endpoint_decoder *cxled)
skip_start = res->start - cxled->skip;
__release_region(&cxlds->dpa_res, res->start, resource_size(res));
if (cxled->skip)
- __release_region(&cxlds->dpa_res, skip_start, cxled->skip);
+ release_skip(cxlds, skip_start, cxled->skip);
cxled->skip = 0;
cxled->dpa_res = NULL;
put_device(&cxled->cxld.dev);
@@ -250,9 +280,8 @@ static void __cxl_dpa_release(struct cxl_endpoint_decoder *cxled)
static void cxl_dpa_release(void *cxled)
{
- down_write(&cxl_dpa_rwsem);
+ guard(rwsem_write)(&cxl_dpa_rwsem);
__cxl_dpa_release(cxled);
- up_write(&cxl_dpa_rwsem);
}
/*
@@ -268,6 +297,58 @@ static void devm_cxl_dpa_release(struct cxl_endpoint_decoder *cxled)
__cxl_dpa_release(cxled);
}
+/**
+ * request_skip() - Track DPA 'skip' in @cxlds->dpa_res resource tree
+ * @cxlds: CXL.mem device context that parents @cxled
+ * @cxled: Endpoint decoder establishing new allocation that skips lower DPA
+ * @skip_base: DPA < start of new DPA allocation (DPAnew)
+ * @skip_len: @skip_base + @skip_len == DPAnew
+ *
+ * DPA 'skip' arises from out-of-sequence DPA allocation events relative
+ * to free capacity across multiple partitions. It is a wasteful event
+ * as usable DPA gets thrown away, but if a deployment has, for example,
+ * a dual RAM+PMEM device, wants to use PMEM, and has unallocated RAM
+ * DPA, the free RAM DPA must be sacrificed to start allocating PMEM.
+ * See third "Implementation Note" in CXL 3.1 8.2.4.19.13 "Decoder
+ * Protection" for more details.
+ *
+ * A 'skip' always covers the last allocated DPA in a previous partition
+ * to the start of the current partition to allocate. Allocations never
+ * start in the middle of a partition, and allocations are always
+ * de-allocated in reverse order (see cxl_dpa_free(), or natural devm
+ * unwind order from forced in-order allocation).
+ *
+ * If @cxlds->nr_partitions was guaranteed to be <= 2 then the 'skip'
+ * would always be contained to a single partition. Given
+ * @cxlds->nr_partitions may be > 2 it results in cases where the 'skip'
+ * might span "tail capacity of partition[0], all of partition[1], ...,
+ * all of partition[N-1]" to support allocating from partition[N]. That
+ * in turn interacts with the partition 'struct resource' boundaries
+ * within @cxlds->dpa_res whereby 'skip' requests need to be divided by
+ * partition. I.e. this is a quirk of using a 'struct resource' tree to
+ * detect range conflicts while also tracking partition boundaries in
+ * @cxlds->dpa_res.
+ */
+static int request_skip(struct cxl_dev_state *cxlds,
+ struct cxl_endpoint_decoder *cxled,
+ const resource_size_t skip_base,
+ const resource_size_t skip_len)
+{
+ resource_size_t skipped = __adjust_skip(cxlds, skip_base, skip_len,
+ dev_name(&cxled->cxld.dev));
+
+ if (skipped == skip_len)
+ return 0;
+
+ dev_dbg(cxlds->dev,
+ "%s: failed to reserve skipped space (%pa %pa %pa)\n",
+ dev_name(&cxled->cxld.dev), &skip_base, &skip_len, &skipped);
+
+ release_skip(cxlds, skip_base, skipped);
+
+ return -EBUSY;
+}
+
static int __cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled,
resource_size_t base, resource_size_t len,
resource_size_t skipped)
@@ -277,6 +358,7 @@ static int __cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled,
struct cxl_dev_state *cxlds = cxlmd->cxlds;
struct device *dev = &port->dev;
struct resource *res;
+ int rc;
lockdep_assert_held_write(&cxl_dpa_rwsem);
@@ -305,14 +387,9 @@ static int __cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled,
}
if (skipped) {
- res = __request_region(&cxlds->dpa_res, base - skipped, skipped,
- dev_name(&cxled->cxld.dev), 0);
- if (!res) {
- dev_dbg(dev,
- "decoder%d.%d: failed to reserve skipped space\n",
- port->id, cxled->cxld.id);
- return -EBUSY;
- }
+ rc = request_skip(cxlds, cxled, base - skipped, skipped);
+ if (rc)
+ return rc;
}
res = __request_region(&cxlds->dpa_res, base, len,
dev_name(&cxled->cxld.dev), 0);
@@ -320,28 +397,117 @@ static int __cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled,
dev_dbg(dev, "decoder%d.%d: failed to reserve allocation\n",
port->id, cxled->cxld.id);
if (skipped)
- __release_region(&cxlds->dpa_res, base - skipped,
- skipped);
+ release_skip(cxlds, base - skipped, skipped);
return -EBUSY;
}
cxled->dpa_res = res;
cxled->skip = skipped;
- if (resource_contains(&cxlds->pmem_res, res))
- cxled->mode = CXL_DECODER_PMEM;
- else if (resource_contains(&cxlds->ram_res, res))
- cxled->mode = CXL_DECODER_RAM;
- else {
- dev_warn(dev, "decoder%d.%d: %pr mixed mode not supported\n",
- port->id, cxled->cxld.id, cxled->dpa_res);
- cxled->mode = CXL_DECODER_MIXED;
- }
+ /*
+ * When allocating new capacity, ->part is already set, when
+ * discovering decoder settings at initial enumeration, ->part
+ * is not set.
+ */
+ if (cxled->part < 0)
+ for (int i = 0; cxlds->nr_partitions; i++)
+ if (resource_contains(&cxlds->part[i].res, res)) {
+ cxled->part = i;
+ break;
+ }
+
+ if (cxled->part < 0)
+ dev_warn(dev, "decoder%d.%d: %pr does not map any partition\n",
+ port->id, cxled->cxld.id, res);
port->hdm_end++;
get_device(&cxled->cxld.dev);
return 0;
}
+static int add_dpa_res(struct device *dev, struct resource *parent,
+ struct resource *res, resource_size_t start,
+ resource_size_t size, const char *type)
+{
+ int rc;
+
+ *res = (struct resource) {
+ .name = type,
+ .start = start,
+ .end = start + size - 1,
+ .flags = IORESOURCE_MEM,
+ };
+ if (resource_size(res) == 0) {
+ dev_dbg(dev, "DPA(%s): no capacity\n", res->name);
+ return 0;
+ }
+ rc = request_resource(parent, res);
+ if (rc) {
+ dev_err(dev, "DPA(%s): failed to track %pr (%d)\n", res->name,
+ res, rc);
+ return rc;
+ }
+
+ dev_dbg(dev, "DPA(%s): %pr\n", res->name, res);
+
+ return 0;
+}
+
+static const char *cxl_mode_name(enum cxl_partition_mode mode)
+{
+ switch (mode) {
+ case CXL_PARTMODE_RAM:
+ return "ram";
+ case CXL_PARTMODE_PMEM:
+ return "pmem";
+ default:
+ return "";
+ };
+}
+
+/* if this fails the caller must destroy @cxlds, there is no recovery */
+int cxl_dpa_setup(struct cxl_dev_state *cxlds, const struct cxl_dpa_info *info)
+{
+ struct device *dev = cxlds->dev;
+
+ guard(rwsem_write)(&cxl_dpa_rwsem);
+
+ if (cxlds->nr_partitions)
+ return -EBUSY;
+
+ if (!info->size || !info->nr_partitions) {
+ cxlds->dpa_res = DEFINE_RES_MEM(0, 0);
+ cxlds->nr_partitions = 0;
+ return 0;
+ }
+
+ cxlds->dpa_res = DEFINE_RES_MEM(0, info->size);
+
+ for (int i = 0; i < info->nr_partitions; i++) {
+ const struct cxl_dpa_part_info *part = &info->part[i];
+ int rc;
+
+ cxlds->part[i].perf.qos_class = CXL_QOS_CLASS_INVALID;
+ cxlds->part[i].mode = part->mode;
+
+ /* Require ordered + contiguous partitions */
+ if (i) {
+ const struct cxl_dpa_part_info *prev = &info->part[i - 1];
+
+ if (prev->range.end + 1 != part->range.start)
+ return -EINVAL;
+ }
+ rc = add_dpa_res(dev, &cxlds->dpa_res, &cxlds->part[i].res,
+ part->range.start, range_len(&part->range),
+ cxl_mode_name(part->mode));
+ if (rc)
+ return rc;
+ cxlds->nr_partitions++;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cxl_dpa_setup);
+
int devm_cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled,
resource_size_t base, resource_size_t len,
resource_size_t skipped)
@@ -362,14 +528,11 @@ EXPORT_SYMBOL_NS_GPL(devm_cxl_dpa_reserve, "CXL");
resource_size_t cxl_dpa_size(struct cxl_endpoint_decoder *cxled)
{
- resource_size_t size = 0;
-
- down_read(&cxl_dpa_rwsem);
+ guard(rwsem_read)(&cxl_dpa_rwsem);
if (cxled->dpa_res)
- size = resource_size(cxled->dpa_res);
- up_read(&cxl_dpa_rwsem);
+ return resource_size(cxled->dpa_res);
- return size;
+ return 0;
}
resource_size_t cxl_dpa_resource_start(struct cxl_endpoint_decoder *cxled)
@@ -387,151 +550,136 @@ int cxl_dpa_free(struct cxl_endpoint_decoder *cxled)
{
struct cxl_port *port = cxled_to_port(cxled);
struct device *dev = &cxled->cxld.dev;
- int rc;
- down_write(&cxl_dpa_rwsem);
- if (!cxled->dpa_res) {
- rc = 0;
- goto out;
- }
+ guard(rwsem_write)(&cxl_dpa_rwsem);
+ if (!cxled->dpa_res)
+ return 0;
if (cxled->cxld.region) {
dev_dbg(dev, "decoder assigned to: %s\n",
dev_name(&cxled->cxld.region->dev));
- rc = -EBUSY;
- goto out;
+ return -EBUSY;
}
if (cxled->cxld.flags & CXL_DECODER_F_ENABLE) {
dev_dbg(dev, "decoder enabled\n");
- rc = -EBUSY;
- goto out;
+ return -EBUSY;
}
if (cxled->cxld.id != port->hdm_end) {
dev_dbg(dev, "expected decoder%d.%d\n", port->id,
port->hdm_end);
- rc = -EBUSY;
- goto out;
+ return -EBUSY;
}
+
devm_cxl_dpa_release(cxled);
- rc = 0;
-out:
- up_write(&cxl_dpa_rwsem);
- return rc;
+ return 0;
}
-int cxl_dpa_set_mode(struct cxl_endpoint_decoder *cxled,
- enum cxl_decoder_mode mode)
+int cxl_dpa_set_part(struct cxl_endpoint_decoder *cxled,
+ enum cxl_partition_mode mode)
{
struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
struct cxl_dev_state *cxlds = cxlmd->cxlds;
struct device *dev = &cxled->cxld.dev;
-
- switch (mode) {
- case CXL_DECODER_RAM:
- case CXL_DECODER_PMEM:
- break;
- default:
- dev_dbg(dev, "unsupported mode: %d\n", mode);
- return -EINVAL;
- }
+ int part;
guard(rwsem_write)(&cxl_dpa_rwsem);
if (cxled->cxld.flags & CXL_DECODER_F_ENABLE)
return -EBUSY;
- /*
- * Only allow modes that are supported by the current partition
- * configuration
- */
- if (mode == CXL_DECODER_PMEM && !resource_size(&cxlds->pmem_res)) {
- dev_dbg(dev, "no available pmem capacity\n");
- return -ENXIO;
+ for (part = 0; part < cxlds->nr_partitions; part++)
+ if (cxlds->part[part].mode == mode)
+ break;
+
+ if (part >= cxlds->nr_partitions) {
+ dev_dbg(dev, "unsupported mode: %d\n", mode);
+ return -EINVAL;
}
- if (mode == CXL_DECODER_RAM && !resource_size(&cxlds->ram_res)) {
- dev_dbg(dev, "no available ram capacity\n");
+
+ if (!resource_size(&cxlds->part[part].res)) {
+ dev_dbg(dev, "no available capacity for mode: %d\n", mode);
return -ENXIO;
}
- cxled->mode = mode;
+ cxled->part = part;
return 0;
}
-int cxl_dpa_alloc(struct cxl_endpoint_decoder *cxled, unsigned long long size)
+static int __cxl_dpa_alloc(struct cxl_endpoint_decoder *cxled, unsigned long long size)
{
struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
- resource_size_t free_ram_start, free_pmem_start;
- struct cxl_port *port = cxled_to_port(cxled);
struct cxl_dev_state *cxlds = cxlmd->cxlds;
struct device *dev = &cxled->cxld.dev;
- resource_size_t start, avail, skip;
+ struct resource *res, *prev = NULL;
+ resource_size_t start, avail, skip, skip_start;
struct resource *p, *last;
- int rc;
+ int part;
- down_write(&cxl_dpa_rwsem);
+ guard(rwsem_write)(&cxl_dpa_rwsem);
if (cxled->cxld.region) {
dev_dbg(dev, "decoder attached to %s\n",
dev_name(&cxled->cxld.region->dev));
- rc = -EBUSY;
- goto out;
+ return -EBUSY;
}
if (cxled->cxld.flags & CXL_DECODER_F_ENABLE) {
dev_dbg(dev, "decoder enabled\n");
- rc = -EBUSY;
- goto out;
+ return -EBUSY;
}
- for (p = cxlds->ram_res.child, last = NULL; p; p = p->sibling)
- last = p;
- if (last)
- free_ram_start = last->end + 1;
- else
- free_ram_start = cxlds->ram_res.start;
+ part = cxled->part;
+ if (part < 0) {
+ dev_dbg(dev, "partition not set\n");
+ return -EBUSY;
+ }
- for (p = cxlds->pmem_res.child, last = NULL; p; p = p->sibling)
+ res = &cxlds->part[part].res;
+ for (p = res->child, last = NULL; p; p = p->sibling)
last = p;
if (last)
- free_pmem_start = last->end + 1;
+ start = last->end + 1;
else
- free_pmem_start = cxlds->pmem_res.start;
-
- if (cxled->mode == CXL_DECODER_RAM) {
- start = free_ram_start;
- avail = cxlds->ram_res.end - start + 1;
- skip = 0;
- } else if (cxled->mode == CXL_DECODER_PMEM) {
- resource_size_t skip_start, skip_end;
-
- start = free_pmem_start;
- avail = cxlds->pmem_res.end - start + 1;
- skip_start = free_ram_start;
+ start = res->start;
- /*
- * If some pmem is already allocated, then that allocation
- * already handled the skip.
- */
- if (cxlds->pmem_res.child &&
- skip_start == cxlds->pmem_res.child->start)
- skip_end = skip_start - 1;
- else
- skip_end = start - 1;
- skip = skip_end - skip_start + 1;
- } else {
- dev_dbg(dev, "mode not set\n");
- rc = -EINVAL;
- goto out;
+ /*
+ * To allocate at partition N, a skip needs to be calculated for all
+ * unallocated space at lower partitions indices.
+ *
+ * If a partition has any allocations, the search can end because a
+ * previous cxl_dpa_alloc() invocation is assumed to have accounted for
+ * all previous partitions.
+ */
+ skip_start = CXL_RESOURCE_NONE;
+ for (int i = part; i; i--) {
+ prev = &cxlds->part[i - 1].res;
+ for (p = prev->child, last = NULL; p; p = p->sibling)
+ last = p;
+ if (last) {
+ skip_start = last->end + 1;
+ break;
+ }
+ skip_start = prev->start;
}
+ avail = res->end - start + 1;
+ if (skip_start == CXL_RESOURCE_NONE)
+ skip = 0;
+ else
+ skip = res->start - skip_start;
+
if (size > avail) {
dev_dbg(dev, "%pa exceeds available %s capacity: %pa\n", &size,
- cxl_decoder_mode_name(cxled->mode), &avail);
- rc = -ENOSPC;
- goto out;
+ res->name, &avail);
+ return -ENOSPC;
}
- rc = __cxl_dpa_reserve(cxled, start, size, skip);
-out:
- up_write(&cxl_dpa_rwsem);
+ return __cxl_dpa_reserve(cxled, start, size, skip);
+}
+
+int cxl_dpa_alloc(struct cxl_endpoint_decoder *cxled, unsigned long long size)
+{
+ struct cxl_port *port = cxled_to_port(cxled);
+ int rc;
+ rc = __cxl_dpa_alloc(cxled, size);
if (rc)
return rc;
diff --git a/drivers/cxl/core/mbox.c b/drivers/cxl/core/mbox.c
index 78c5346e3e89..d72764056ce6 100644
--- a/drivers/cxl/core/mbox.c
+++ b/drivers/cxl/core/mbox.c
@@ -11,6 +11,7 @@
#include "core.h"
#include "trace.h"
+#include "mce.h"
static bool cxl_raw_allow_all;
@@ -900,7 +901,7 @@ void cxl_event_trace_record(const struct cxl_memdev *cxlmd,
}
if (trace_cxl_general_media_enabled() || trace_cxl_dram_enabled()) {
- u64 dpa, hpa = ULLONG_MAX;
+ u64 dpa, hpa = ULLONG_MAX, hpa_alias = ULLONG_MAX;
struct cxl_region *cxlr;
/*
@@ -913,14 +914,20 @@ void cxl_event_trace_record(const struct cxl_memdev *cxlmd,
dpa = le64_to_cpu(evt->media_hdr.phys_addr) & CXL_DPA_MASK;
cxlr = cxl_dpa_to_region(cxlmd, dpa);
- if (cxlr)
+ if (cxlr) {
+ u64 cache_size = cxlr->params.cache_size;
+
hpa = cxl_dpa_to_hpa(cxlr, cxlmd, dpa);
+ if (cache_size)
+ hpa_alias = hpa - cache_size;
+ }
if (event_type == CXL_CPER_EVENT_GEN_MEDIA)
trace_cxl_general_media(cxlmd, type, cxlr, hpa,
- &evt->gen_media);
+ hpa_alias, &evt->gen_media);
else if (event_type == CXL_CPER_EVENT_DRAM)
- trace_cxl_dram(cxlmd, type, cxlr, hpa, &evt->dram);
+ trace_cxl_dram(cxlmd, type, cxlr, hpa, hpa_alias,
+ &evt->dram);
}
}
EXPORT_SYMBOL_NS_GPL(cxl_event_trace_record, "CXL");
@@ -1126,10 +1133,6 @@ static int cxl_mem_get_partition_info(struct cxl_memdev_state *mds)
le64_to_cpu(pi.active_volatile_cap) * CXL_CAPACITY_MULTIPLIER;
mds->active_persistent_bytes =
le64_to_cpu(pi.active_persistent_cap) * CXL_CAPACITY_MULTIPLIER;
- mds->next_volatile_bytes =
- le64_to_cpu(pi.next_volatile_cap) * CXL_CAPACITY_MULTIPLIER;
- mds->next_persistent_bytes =
- le64_to_cpu(pi.next_volatile_cap) * CXL_CAPACITY_MULTIPLIER;
return 0;
}
@@ -1251,74 +1254,54 @@ int cxl_mem_sanitize(struct cxl_memdev *cxlmd, u16 cmd)
{
struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
struct cxl_port *endpoint;
- int rc;
/* synchronize with cxl_mem_probe() and decoder write operations */
guard(device)(&cxlmd->dev);
endpoint = cxlmd->endpoint;
- down_read(&cxl_region_rwsem);
+ guard(rwsem_read)(&cxl_region_rwsem);
/*
* Require an endpoint to be safe otherwise the driver can not
* be sure that the device is unmapped.
*/
if (endpoint && cxl_num_decoders_committed(endpoint) == 0)
- rc = __cxl_mem_sanitize(mds, cmd);
- else
- rc = -EBUSY;
- up_read(&cxl_region_rwsem);
+ return __cxl_mem_sanitize(mds, cmd);
- return rc;
+ return -EBUSY;
}
-static int add_dpa_res(struct device *dev, struct resource *parent,
- struct resource *res, resource_size_t start,
- resource_size_t size, const char *type)
+static void add_part(struct cxl_dpa_info *info, u64 start, u64 size, enum cxl_partition_mode mode)
{
- int rc;
-
- res->name = type;
- res->start = start;
- res->end = start + size - 1;
- res->flags = IORESOURCE_MEM;
- if (resource_size(res) == 0) {
- dev_dbg(dev, "DPA(%s): no capacity\n", res->name);
- return 0;
- }
- rc = request_resource(parent, res);
- if (rc) {
- dev_err(dev, "DPA(%s): failed to track %pr (%d)\n", res->name,
- res, rc);
- return rc;
- }
+ int i = info->nr_partitions;
- dev_dbg(dev, "DPA(%s): %pr\n", res->name, res);
+ if (size == 0)
+ return;
- return 0;
+ info->part[i].range = (struct range) {
+ .start = start,
+ .end = start + size - 1,
+ };
+ info->part[i].mode = mode;
+ info->nr_partitions++;
}
-int cxl_mem_create_range_info(struct cxl_memdev_state *mds)
+int cxl_mem_dpa_fetch(struct cxl_memdev_state *mds, struct cxl_dpa_info *info)
{
struct cxl_dev_state *cxlds = &mds->cxlds;
struct device *dev = cxlds->dev;
int rc;
if (!cxlds->media_ready) {
- cxlds->dpa_res = DEFINE_RES_MEM(0, 0);
- cxlds->ram_res = DEFINE_RES_MEM(0, 0);
- cxlds->pmem_res = DEFINE_RES_MEM(0, 0);
+ info->size = 0;
return 0;
}
- cxlds->dpa_res = DEFINE_RES_MEM(0, mds->total_bytes);
+ info->size = mds->total_bytes;
if (mds->partition_align_bytes == 0) {
- rc = add_dpa_res(dev, &cxlds->dpa_res, &cxlds->ram_res, 0,
- mds->volatile_only_bytes, "ram");
- if (rc)
- return rc;
- return add_dpa_res(dev, &cxlds->dpa_res, &cxlds->pmem_res,
- mds->volatile_only_bytes,
- mds->persistent_only_bytes, "pmem");
+ add_part(info, 0, mds->volatile_only_bytes, CXL_PARTMODE_RAM);
+ add_part(info, mds->volatile_only_bytes,
+ mds->persistent_only_bytes, CXL_PARTMODE_PMEM);
+ return 0;
}
rc = cxl_mem_get_partition_info(mds);
@@ -1327,15 +1310,52 @@ int cxl_mem_create_range_info(struct cxl_memdev_state *mds)
return rc;
}
- rc = add_dpa_res(dev, &cxlds->dpa_res, &cxlds->ram_res, 0,
- mds->active_volatile_bytes, "ram");
- if (rc)
- return rc;
- return add_dpa_res(dev, &cxlds->dpa_res, &cxlds->pmem_res,
- mds->active_volatile_bytes,
- mds->active_persistent_bytes, "pmem");
+ add_part(info, 0, mds->active_volatile_bytes, CXL_PARTMODE_RAM);
+ add_part(info, mds->active_volatile_bytes, mds->active_persistent_bytes,
+ CXL_PARTMODE_PMEM);
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(cxl_mem_dpa_fetch, "CXL");
+
+int cxl_get_dirty_count(struct cxl_memdev_state *mds, u32 *count)
+{
+ struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
+ struct cxl_mbox_get_health_info_out hi;
+ struct cxl_mbox_cmd mbox_cmd;
+ int rc;
+
+ mbox_cmd = (struct cxl_mbox_cmd) {
+ .opcode = CXL_MBOX_OP_GET_HEALTH_INFO,
+ .size_out = sizeof(hi),
+ .payload_out = &hi,
+ };
+
+ rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
+ if (!rc)
+ *count = le32_to_cpu(hi.dirty_shutdown_cnt);
+
+ return rc;
+}
+EXPORT_SYMBOL_NS_GPL(cxl_get_dirty_count, "CXL");
+
+int cxl_arm_dirty_shutdown(struct cxl_memdev_state *mds)
+{
+ struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
+ struct cxl_mbox_cmd mbox_cmd;
+ struct cxl_mbox_set_shutdown_state_in in = {
+ .state = 1
+ };
+
+ mbox_cmd = (struct cxl_mbox_cmd) {
+ .opcode = CXL_MBOX_OP_SET_SHUTDOWN_STATE,
+ .size_in = sizeof(in),
+ .payload_in = &in,
+ };
+
+ return cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
}
-EXPORT_SYMBOL_NS_GPL(cxl_mem_create_range_info, "CXL");
+EXPORT_SYMBOL_NS_GPL(cxl_arm_dirty_shutdown, "CXL");
int cxl_set_timestamp(struct cxl_memdev_state *mds)
{
@@ -1467,6 +1487,7 @@ EXPORT_SYMBOL_NS_GPL(cxl_mailbox_init, "CXL");
struct cxl_memdev_state *cxl_memdev_state_create(struct device *dev)
{
struct cxl_memdev_state *mds;
+ int rc;
mds = devm_kzalloc(dev, sizeof(*mds), GFP_KERNEL);
if (!mds) {
@@ -1480,8 +1501,12 @@ struct cxl_memdev_state *cxl_memdev_state_create(struct device *dev)
mds->cxlds.cxl_mbox.host = dev;
mds->cxlds.reg_map.resource = CXL_RESOURCE_NONE;
mds->cxlds.type = CXL_DEVTYPE_CLASSMEM;
- mds->ram_perf.qos_class = CXL_QOS_CLASS_INVALID;
- mds->pmem_perf.qos_class = CXL_QOS_CLASS_INVALID;
+
+ rc = devm_cxl_register_mce_notifier(dev, &mds->mce_notifier);
+ if (rc == -EOPNOTSUPP)
+ dev_warn(dev, "CXL MCE unsupported\n");
+ else if (rc)
+ return ERR_PTR(rc);
return mds;
}
diff --git a/drivers/cxl/core/mce.c b/drivers/cxl/core/mce.c
new file mode 100644
index 000000000000..ff8d078c6ca1
--- /dev/null
+++ b/drivers/cxl/core/mce.c
@@ -0,0 +1,65 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2024 Intel Corporation. All rights reserved. */
+#include <linux/mm.h>
+#include <linux/notifier.h>
+#include <linux/set_memory.h>
+#include <asm/mce.h>
+#include <cxlmem.h>
+#include "mce.h"
+
+static int cxl_handle_mce(struct notifier_block *nb, unsigned long val,
+ void *data)
+{
+ struct cxl_memdev_state *mds = container_of(nb, struct cxl_memdev_state,
+ mce_notifier);
+ struct cxl_memdev *cxlmd = mds->cxlds.cxlmd;
+ struct cxl_port *endpoint = cxlmd->endpoint;
+ struct mce *mce = data;
+ u64 spa, spa_alias;
+ unsigned long pfn;
+
+ if (!mce || !mce_usable_address(mce))
+ return NOTIFY_DONE;
+
+ if (!endpoint)
+ return NOTIFY_DONE;
+
+ spa = mce->addr & MCI_ADDR_PHYSADDR;
+
+ pfn = spa >> PAGE_SHIFT;
+ if (!pfn_valid(pfn))
+ return NOTIFY_DONE;
+
+ spa_alias = cxl_port_get_spa_cache_alias(endpoint, spa);
+ if (spa_alias == ~0ULL)
+ return NOTIFY_DONE;
+
+ pfn = spa_alias >> PAGE_SHIFT;
+
+ /*
+ * Take down the aliased memory page. The original memory page flagged
+ * by the MCE will be taken cared of by the standard MCE handler.
+ */
+ dev_emerg(mds->cxlds.dev, "Offlining aliased SPA address0: %#llx\n",
+ spa_alias);
+ if (!memory_failure(pfn, 0))
+ set_mce_nospec(pfn);
+
+ return NOTIFY_OK;
+}
+
+static void cxl_unregister_mce_notifier(void *mce_notifier)
+{
+ mce_unregister_decode_chain(mce_notifier);
+}
+
+int devm_cxl_register_mce_notifier(struct device *dev,
+ struct notifier_block *mce_notifier)
+{
+ mce_notifier->notifier_call = cxl_handle_mce;
+ mce_notifier->priority = MCE_PRIO_UC;
+ mce_register_decode_chain(mce_notifier);
+
+ return devm_add_action_or_reset(dev, cxl_unregister_mce_notifier,
+ mce_notifier);
+}
diff --git a/drivers/cxl/core/mce.h b/drivers/cxl/core/mce.h
new file mode 100644
index 000000000000..ace73424eeb6
--- /dev/null
+++ b/drivers/cxl/core/mce.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2024 Intel Corporation. All rights reserved. */
+#ifndef _CXL_CORE_MCE_H_
+#define _CXL_CORE_MCE_H_
+
+#include <linux/notifier.h>
+
+#ifdef CONFIG_CXL_MCE
+int devm_cxl_register_mce_notifier(struct device *dev,
+ struct notifier_block *mce_notifer);
+#else
+static inline int
+devm_cxl_register_mce_notifier(struct device *dev,
+ struct notifier_block *mce_notifier)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+
+#endif
diff --git a/drivers/cxl/core/memdev.c b/drivers/cxl/core/memdev.c
index 2e2e035abdaa..a16a5886d40a 100644
--- a/drivers/cxl/core/memdev.c
+++ b/drivers/cxl/core/memdev.c
@@ -75,12 +75,20 @@ static ssize_t label_storage_size_show(struct device *dev,
}
static DEVICE_ATTR_RO(label_storage_size);
+static resource_size_t cxl_ram_size(struct cxl_dev_state *cxlds)
+{
+ /* Static RAM is only expected at partition 0. */
+ if (cxlds->part[0].mode != CXL_PARTMODE_RAM)
+ return 0;
+ return resource_size(&cxlds->part[0].res);
+}
+
static ssize_t ram_size_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
struct cxl_dev_state *cxlds = cxlmd->cxlds;
- unsigned long long len = resource_size(&cxlds->ram_res);
+ unsigned long long len = cxl_ram_size(cxlds);
return sysfs_emit(buf, "%#llx\n", len);
}
@@ -93,7 +101,7 @@ static ssize_t pmem_size_show(struct device *dev, struct device_attribute *attr,
{
struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
struct cxl_dev_state *cxlds = cxlmd->cxlds;
- unsigned long long len = resource_size(&cxlds->pmem_res);
+ unsigned long long len = cxl_pmem_size(cxlds);
return sysfs_emit(buf, "%#llx\n", len);
}
@@ -198,22 +206,17 @@ static int cxl_get_poison_by_memdev(struct cxl_memdev *cxlmd)
int rc = 0;
/* CXL 3.0 Spec 8.2.9.8.4.1 Separate pmem and ram poison requests */
- if (resource_size(&cxlds->pmem_res)) {
- offset = cxlds->pmem_res.start;
- length = resource_size(&cxlds->pmem_res);
- rc = cxl_mem_get_poison(cxlmd, offset, length, NULL);
- if (rc)
- return rc;
- }
- if (resource_size(&cxlds->ram_res)) {
- offset = cxlds->ram_res.start;
- length = resource_size(&cxlds->ram_res);
+ for (int i = 0; i < cxlds->nr_partitions; i++) {
+ const struct resource *res = &cxlds->part[i].res;
+
+ offset = res->start;
+ length = resource_size(res);
rc = cxl_mem_get_poison(cxlmd, offset, length, NULL);
/*
* Invalid Physical Address is not an error for
* volatile addresses. Device support is optional.
*/
- if (rc == -EFAULT)
+ if (rc == -EFAULT && cxlds->part[i].mode == CXL_PARTMODE_RAM)
rc = 0;
}
return rc;
@@ -404,14 +407,21 @@ static struct attribute *cxl_memdev_attributes[] = {
NULL,
};
+static struct cxl_dpa_perf *to_pmem_perf(struct cxl_dev_state *cxlds)
+{
+ for (int i = 0; i < cxlds->nr_partitions; i++)
+ if (cxlds->part[i].mode == CXL_PARTMODE_PMEM)
+ return &cxlds->part[i].perf;
+ return NULL;
+}
+
static ssize_t pmem_qos_class_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
struct cxl_dev_state *cxlds = cxlmd->cxlds;
- struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
- return sysfs_emit(buf, "%d\n", mds->pmem_perf.qos_class);
+ return sysfs_emit(buf, "%d\n", to_pmem_perf(cxlds)->qos_class);
}
static struct device_attribute dev_attr_pmem_qos_class =
@@ -423,14 +433,20 @@ static struct attribute *cxl_memdev_pmem_attributes[] = {
NULL,
};
+static struct cxl_dpa_perf *to_ram_perf(struct cxl_dev_state *cxlds)
+{
+ if (cxlds->part[0].mode != CXL_PARTMODE_RAM)
+ return NULL;
+ return &cxlds->part[0].perf;
+}
+
static ssize_t ram_qos_class_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
struct cxl_dev_state *cxlds = cxlmd->cxlds;
- struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
- return sysfs_emit(buf, "%d\n", mds->ram_perf.qos_class);
+ return sysfs_emit(buf, "%d\n", to_ram_perf(cxlds)->qos_class);
}
static struct device_attribute dev_attr_ram_qos_class =
@@ -466,11 +482,11 @@ static umode_t cxl_ram_visible(struct kobject *kobj, struct attribute *a, int n)
{
struct device *dev = kobj_to_dev(kobj);
struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
- struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
+ struct cxl_dpa_perf *perf = to_ram_perf(cxlmd->cxlds);
- if (a == &dev_attr_ram_qos_class.attr)
- if (mds->ram_perf.qos_class == CXL_QOS_CLASS_INVALID)
- return 0;
+ if (a == &dev_attr_ram_qos_class.attr &&
+ (!perf || perf->qos_class == CXL_QOS_CLASS_INVALID))
+ return 0;
return a->mode;
}
@@ -485,11 +501,11 @@ static umode_t cxl_pmem_visible(struct kobject *kobj, struct attribute *a, int n
{
struct device *dev = kobj_to_dev(kobj);
struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
- struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
+ struct cxl_dpa_perf *perf = to_pmem_perf(cxlmd->cxlds);
- if (a == &dev_attr_pmem_qos_class.attr)
- if (mds->pmem_perf.qos_class == CXL_QOS_CLASS_INVALID)
- return 0;
+ if (a == &dev_attr_pmem_qos_class.attr &&
+ (!perf || perf->qos_class == CXL_QOS_CLASS_INVALID))
+ return 0;
return a->mode;
}
@@ -566,10 +582,9 @@ void set_exclusive_cxl_commands(struct cxl_memdev_state *mds,
{
struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
- down_write(&cxl_memdev_rwsem);
+ guard(rwsem_write)(&cxl_memdev_rwsem);
bitmap_or(cxl_mbox->exclusive_cmds, cxl_mbox->exclusive_cmds,
cmds, CXL_MEM_COMMAND_ID_MAX);
- up_write(&cxl_memdev_rwsem);
}
EXPORT_SYMBOL_NS_GPL(set_exclusive_cxl_commands, "CXL");
@@ -583,10 +598,9 @@ void clear_exclusive_cxl_commands(struct cxl_memdev_state *mds,
{
struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
- down_write(&cxl_memdev_rwsem);
+ guard(rwsem_write)(&cxl_memdev_rwsem);
bitmap_andnot(cxl_mbox->exclusive_cmds, cxl_mbox->exclusive_cmds,
cmds, CXL_MEM_COMMAND_ID_MAX);
- up_write(&cxl_memdev_rwsem);
}
EXPORT_SYMBOL_NS_GPL(clear_exclusive_cxl_commands, "CXL");
@@ -594,9 +608,8 @@ static void cxl_memdev_shutdown(struct device *dev)
{
struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
- down_write(&cxl_memdev_rwsem);
+ guard(rwsem_write)(&cxl_memdev_rwsem);
cxlmd->cxlds = NULL;
- up_write(&cxl_memdev_rwsem);
}
static void cxl_memdev_unregister(void *_cxlmd)
@@ -678,15 +691,13 @@ static long cxl_memdev_ioctl(struct file *file, unsigned int cmd,
{
struct cxl_memdev *cxlmd = file->private_data;
struct cxl_dev_state *cxlds;
- int rc = -ENXIO;
- down_read(&cxl_memdev_rwsem);
+ guard(rwsem_read)(&cxl_memdev_rwsem);
cxlds = cxlmd->cxlds;
if (cxlds && cxlds->type == CXL_DEVTYPE_CLASSMEM)
- rc = __cxl_memdev_ioctl(cxlmd, cmd, arg);
- up_read(&cxl_memdev_rwsem);
+ return __cxl_memdev_ioctl(cxlmd, cmd, arg);
- return rc;
+ return -ENXIO;
}
static int cxl_memdev_open(struct inode *inode, struct file *file)
diff --git a/drivers/cxl/core/pci.c b/drivers/cxl/core/pci.c
index 013b869b66cb..96fecb799cbc 100644
--- a/drivers/cxl/core/pci.c
+++ b/drivers/cxl/core/pci.c
@@ -1054,3 +1054,100 @@ int cxl_pci_get_bandwidth(struct pci_dev *pdev, struct access_coordinate *c)
return 0;
}
+
+/*
+ * Set max timeout such that platforms will optimize GPF flow to avoid
+ * the implied worst-case scenario delays. On a sane platform, all
+ * devices should always complete GPF within the energy budget of
+ * the GPF flow. The kernel does not have enough information to pick
+ * anything better than "maximize timeouts and hope it works".
+ *
+ * A misbehaving device could block forward progress of GPF for all
+ * the other devices, exhausting the energy budget of the platform.
+ * However, the spec seems to assume that moving on from slow to respond
+ * devices is a virtue. It is not possible to know that, in actuality,
+ * the slow to respond device is *the* most critical device in the
+ * system to wait.
+ */
+#define GPF_TIMEOUT_BASE_MAX 2
+#define GPF_TIMEOUT_SCALE_MAX 7 /* 10 seconds */
+
+u16 cxl_gpf_get_dvsec(struct device *dev, bool is_port)
+{
+ u16 dvsec;
+
+ if (!dev_is_pci(dev))
+ return 0;
+
+ dvsec = pci_find_dvsec_capability(to_pci_dev(dev), PCI_VENDOR_ID_CXL,
+ is_port ? CXL_DVSEC_PORT_GPF : CXL_DVSEC_DEVICE_GPF);
+ if (!dvsec)
+ dev_warn(dev, "%s GPF DVSEC not present\n",
+ is_port ? "Port" : "Device");
+ return dvsec;
+}
+EXPORT_SYMBOL_NS_GPL(cxl_gpf_get_dvsec, "CXL");
+
+static int update_gpf_port_dvsec(struct pci_dev *pdev, int dvsec, int phase)
+{
+ u64 base, scale;
+ int rc, offset;
+ u16 ctrl;
+
+ switch (phase) {
+ case 1:
+ offset = CXL_DVSEC_PORT_GPF_PHASE_1_CONTROL_OFFSET;
+ base = CXL_DVSEC_PORT_GPF_PHASE_1_TMO_BASE_MASK;
+ scale = CXL_DVSEC_PORT_GPF_PHASE_1_TMO_SCALE_MASK;
+ break;
+ case 2:
+ offset = CXL_DVSEC_PORT_GPF_PHASE_2_CONTROL_OFFSET;
+ base = CXL_DVSEC_PORT_GPF_PHASE_2_TMO_BASE_MASK;
+ scale = CXL_DVSEC_PORT_GPF_PHASE_2_TMO_SCALE_MASK;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ rc = pci_read_config_word(pdev, dvsec + offset, &ctrl);
+ if (rc)
+ return rc;
+
+ if (FIELD_GET(base, ctrl) == GPF_TIMEOUT_BASE_MAX &&
+ FIELD_GET(scale, ctrl) == GPF_TIMEOUT_SCALE_MAX)
+ return 0;
+
+ ctrl = FIELD_PREP(base, GPF_TIMEOUT_BASE_MAX);
+ ctrl |= FIELD_PREP(scale, GPF_TIMEOUT_SCALE_MAX);
+
+ rc = pci_write_config_word(pdev, dvsec + offset, ctrl);
+ if (!rc)
+ pci_dbg(pdev, "Port GPF phase %d timeout: %d0 secs\n",
+ phase, GPF_TIMEOUT_BASE_MAX);
+
+ return rc;
+}
+
+int cxl_gpf_port_setup(struct device *dport_dev, struct cxl_port *port)
+{
+ struct pci_dev *pdev;
+
+ if (!port)
+ return -EINVAL;
+
+ if (!port->gpf_dvsec) {
+ int dvsec;
+
+ dvsec = cxl_gpf_get_dvsec(dport_dev, true);
+ if (!dvsec)
+ return -EINVAL;
+
+ port->gpf_dvsec = dvsec;
+ }
+
+ pdev = to_pci_dev(dport_dev);
+ update_gpf_port_dvsec(pdev, port->gpf_dvsec, 1);
+ update_gpf_port_dvsec(pdev, port->gpf_dvsec, 2);
+
+ return 0;
+}
diff --git a/drivers/cxl/core/port.c b/drivers/cxl/core/port.c
index 78a5c2c25982..0fd6646c1a2e 100644
--- a/drivers/cxl/core/port.c
+++ b/drivers/cxl/core/port.c
@@ -194,25 +194,35 @@ static ssize_t mode_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev);
+ struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
+ /* without @cxl_dpa_rwsem, make sure @part is not reloaded */
+ int part = READ_ONCE(cxled->part);
+ const char *desc;
+
+ if (part < 0)
+ desc = "none";
+ else
+ desc = cxlds->part[part].res.name;
- return sysfs_emit(buf, "%s\n", cxl_decoder_mode_name(cxled->mode));
+ return sysfs_emit(buf, "%s\n", desc);
}
static ssize_t mode_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t len)
{
struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev);
- enum cxl_decoder_mode mode;
+ enum cxl_partition_mode mode;
ssize_t rc;
if (sysfs_streq(buf, "pmem"))
- mode = CXL_DECODER_PMEM;
+ mode = CXL_PARTMODE_PMEM;
else if (sysfs_streq(buf, "ram"))
- mode = CXL_DECODER_RAM;
+ mode = CXL_PARTMODE_RAM;
else
return -EINVAL;
- rc = cxl_dpa_set_mode(cxled, mode);
+ rc = cxl_dpa_set_part(cxled, mode);
if (rc)
return rc;
@@ -549,13 +559,9 @@ static ssize_t decoders_committed_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct cxl_port *port = to_cxl_port(dev);
- int rc;
-
- down_read(&cxl_region_rwsem);
- rc = sysfs_emit(buf, "%d\n", cxl_num_decoders_committed(port));
- up_read(&cxl_region_rwsem);
- return rc;
+ guard(rwsem_read)(&cxl_region_rwsem);
+ return sysfs_emit(buf, "%d\n", cxl_num_decoders_committed(port));
}
static DEVICE_ATTR_RO(decoders_committed);
@@ -1672,6 +1678,8 @@ retry:
if (rc && rc != -EBUSY)
return rc;
+ cxl_gpf_port_setup(dport_dev, port);
+
/* Any more ports to add between this one and the root? */
if (!dev_is_cxl_root_child(&port->dev))
continue;
@@ -1899,6 +1907,7 @@ struct cxl_endpoint_decoder *cxl_endpoint_decoder_alloc(struct cxl_port *port)
return ERR_PTR(-ENOMEM);
cxled->pos = -1;
+ cxled->part = -1;
cxld = &cxled->cxld;
rc = cxl_decoder_init(port, cxld);
if (rc) {
@@ -2339,8 +2348,14 @@ static __init int cxl_core_init(void)
if (rc)
goto err_region;
+ rc = cxl_ras_init();
+ if (rc)
+ goto err_ras;
+
return 0;
+err_ras:
+ cxl_region_exit();
err_region:
bus_unregister(&cxl_bus_type);
err_bus:
@@ -2352,6 +2367,7 @@ err_wq:
static void cxl_core_exit(void)
{
+ cxl_ras_exit();
cxl_region_exit();
bus_unregister(&cxl_bus_type);
destroy_workqueue(cxl_bus_wq);
diff --git a/drivers/cxl/core/ras.c b/drivers/cxl/core/ras.c
new file mode 100644
index 000000000000..485a831695c7
--- /dev/null
+++ b/drivers/cxl/core/ras.c
@@ -0,0 +1,119 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2025 AMD Corporation. All rights reserved. */
+
+#include <linux/pci.h>
+#include <linux/aer.h>
+#include <cxl/event.h>
+#include <cxlmem.h>
+#include "trace.h"
+
+static void cxl_cper_trace_corr_port_prot_err(struct pci_dev *pdev,
+ struct cxl_ras_capability_regs ras_cap)
+{
+ u32 status = ras_cap.cor_status & ~ras_cap.cor_mask;
+
+ trace_cxl_port_aer_correctable_error(&pdev->dev, status);
+}
+
+static void cxl_cper_trace_uncorr_port_prot_err(struct pci_dev *pdev,
+ struct cxl_ras_capability_regs ras_cap)
+{
+ u32 status = ras_cap.uncor_status & ~ras_cap.uncor_mask;
+ u32 fe;
+
+ if (hweight32(status) > 1)
+ fe = BIT(FIELD_GET(CXL_RAS_CAP_CONTROL_FE_MASK,
+ ras_cap.cap_control));
+ else
+ fe = status;
+
+ trace_cxl_port_aer_uncorrectable_error(&pdev->dev, status, fe,
+ ras_cap.header_log);
+}
+
+static void cxl_cper_trace_corr_prot_err(struct pci_dev *pdev,
+ struct cxl_ras_capability_regs ras_cap)
+{
+ u32 status = ras_cap.cor_status & ~ras_cap.cor_mask;
+ struct cxl_dev_state *cxlds;
+
+ cxlds = pci_get_drvdata(pdev);
+ if (!cxlds)
+ return;
+
+ trace_cxl_aer_correctable_error(cxlds->cxlmd, status);
+}
+
+static void cxl_cper_trace_uncorr_prot_err(struct pci_dev *pdev,
+ struct cxl_ras_capability_regs ras_cap)
+{
+ u32 status = ras_cap.uncor_status & ~ras_cap.uncor_mask;
+ struct cxl_dev_state *cxlds;
+ u32 fe;
+
+ cxlds = pci_get_drvdata(pdev);
+ if (!cxlds)
+ return;
+
+ if (hweight32(status) > 1)
+ fe = BIT(FIELD_GET(CXL_RAS_CAP_CONTROL_FE_MASK,
+ ras_cap.cap_control));
+ else
+ fe = status;
+
+ trace_cxl_aer_uncorrectable_error(cxlds->cxlmd, status, fe,
+ ras_cap.header_log);
+}
+
+static void cxl_cper_handle_prot_err(struct cxl_cper_prot_err_work_data *data)
+{
+ unsigned int devfn = PCI_DEVFN(data->prot_err.agent_addr.device,
+ data->prot_err.agent_addr.function);
+ struct pci_dev *pdev __free(pci_dev_put) =
+ pci_get_domain_bus_and_slot(data->prot_err.agent_addr.segment,
+ data->prot_err.agent_addr.bus,
+ devfn);
+ int port_type;
+
+ if (!pdev)
+ return;
+
+ guard(device)(&pdev->dev);
+
+ port_type = pci_pcie_type(pdev);
+ if (port_type == PCI_EXP_TYPE_ROOT_PORT ||
+ port_type == PCI_EXP_TYPE_DOWNSTREAM ||
+ port_type == PCI_EXP_TYPE_UPSTREAM) {
+ if (data->severity == AER_CORRECTABLE)
+ cxl_cper_trace_corr_port_prot_err(pdev, data->ras_cap);
+ else
+ cxl_cper_trace_uncorr_port_prot_err(pdev, data->ras_cap);
+
+ return;
+ }
+
+ if (data->severity == AER_CORRECTABLE)
+ cxl_cper_trace_corr_prot_err(pdev, data->ras_cap);
+ else
+ cxl_cper_trace_uncorr_prot_err(pdev, data->ras_cap);
+}
+
+static void cxl_cper_prot_err_work_fn(struct work_struct *work)
+{
+ struct cxl_cper_prot_err_work_data wd;
+
+ while (cxl_cper_prot_err_kfifo_get(&wd))
+ cxl_cper_handle_prot_err(&wd);
+}
+static DECLARE_WORK(cxl_cper_prot_err_work, cxl_cper_prot_err_work_fn);
+
+int cxl_ras_init(void)
+{
+ return cxl_cper_register_prot_err_work(&cxl_cper_prot_err_work);
+}
+
+void cxl_ras_exit(void)
+{
+ cxl_cper_unregister_prot_err_work(&cxl_cper_prot_err_work);
+ cancel_work_sync(&cxl_cper_prot_err_work);
+}
diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c
index e8d11a988fd9..c3f4dc244df7 100644
--- a/drivers/cxl/core/region.c
+++ b/drivers/cxl/core/region.c
@@ -144,7 +144,7 @@ static ssize_t uuid_show(struct device *dev, struct device_attribute *attr,
rc = down_read_interruptible(&cxl_region_rwsem);
if (rc)
return rc;
- if (cxlr->mode != CXL_DECODER_PMEM)
+ if (cxlr->mode != CXL_PARTMODE_PMEM)
rc = sysfs_emit(buf, "\n");
else
rc = sysfs_emit(buf, "%pUb\n", &p->uuid);
@@ -441,7 +441,7 @@ static umode_t cxl_region_visible(struct kobject *kobj, struct attribute *a,
* Support tooling that expects to find a 'uuid' attribute for all
* regions regardless of mode.
*/
- if (a == &dev_attr_uuid.attr && cxlr->mode != CXL_DECODER_PMEM)
+ if (a == &dev_attr_uuid.attr && cxlr->mode != CXL_PARTMODE_PMEM)
return 0444;
return a->mode;
}
@@ -603,8 +603,16 @@ static ssize_t mode_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct cxl_region *cxlr = to_cxl_region(dev);
+ const char *desc;
- return sysfs_emit(buf, "%s\n", cxl_decoder_mode_name(cxlr->mode));
+ if (cxlr->mode == CXL_PARTMODE_RAM)
+ desc = "ram";
+ else if (cxlr->mode == CXL_PARTMODE_PMEM)
+ desc = "pmem";
+ else
+ desc = "";
+
+ return sysfs_emit(buf, "%s\n", desc);
}
static DEVICE_ATTR_RO(mode);
@@ -630,7 +638,7 @@ static int alloc_hpa(struct cxl_region *cxlr, resource_size_t size)
/* ways, granularity and uuid (if PMEM) need to be set before HPA */
if (!p->interleave_ways || !p->interleave_granularity ||
- (cxlr->mode == CXL_DECODER_PMEM && uuid_is_null(&p->uuid)))
+ (cxlr->mode == CXL_PARTMODE_PMEM && uuid_is_null(&p->uuid)))
return -ENXIO;
div64_u64_rem(size, (u64)SZ_256M * p->interleave_ways, &remainder);
@@ -824,6 +832,21 @@ static int match_free_decoder(struct device *dev, const void *data)
return 1;
}
+static bool region_res_match_cxl_range(const struct cxl_region_params *p,
+ struct range *range)
+{
+ if (!p->res)
+ return false;
+
+ /*
+ * If an extended linear cache region then the CXL range is assumed
+ * to be fronted by the DRAM range in current known implementation.
+ * This assumption will be made until a variant implementation exists.
+ */
+ return p->res->start + p->cache_size == range->start &&
+ p->res->end == range->end;
+}
+
static int match_auto_decoder(struct device *dev, const void *data)
{
const struct cxl_region_params *p = data;
@@ -836,7 +859,7 @@ static int match_auto_decoder(struct device *dev, const void *data)
cxld = to_cxl_decoder(dev);
r = &cxld->hpa_range;
- if (p->res && p->res->start == r->start && p->res->end == r->end)
+ if (region_res_match_cxl_range(p, r))
return 1;
return 0;
@@ -1424,8 +1447,7 @@ static int cxl_port_setup_targets(struct cxl_port *port,
if (test_bit(CXL_REGION_F_AUTO, &cxlr->flags)) {
if (cxld->interleave_ways != iw ||
cxld->interleave_granularity != ig ||
- cxld->hpa_range.start != p->res->start ||
- cxld->hpa_range.end != p->res->end ||
+ !region_res_match_cxl_range(p, &cxld->hpa_range) ||
((cxld->flags & CXL_DECODER_F_ENABLE) == 0)) {
dev_err(&cxlr->dev,
"%s:%s %s expected iw: %d ig: %d %pr\n",
@@ -1888,6 +1910,7 @@ static int cxl_region_attach(struct cxl_region *cxlr,
{
struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(cxlr->dev.parent);
struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
struct cxl_region_params *p = &cxlr->params;
struct cxl_port *ep_port, *root_port;
struct cxl_dport *dport;
@@ -1902,17 +1925,17 @@ static int cxl_region_attach(struct cxl_region *cxlr,
return rc;
}
- if (cxled->mode != cxlr->mode) {
- dev_dbg(&cxlr->dev, "%s region mode: %d mismatch: %d\n",
- dev_name(&cxled->cxld.dev), cxlr->mode, cxled->mode);
- return -EINVAL;
- }
-
- if (cxled->mode == CXL_DECODER_DEAD) {
+ if (cxled->part < 0) {
dev_dbg(&cxlr->dev, "%s dead\n", dev_name(&cxled->cxld.dev));
return -ENODEV;
}
+ if (cxlds->part[cxled->part].mode != cxlr->mode) {
+ dev_dbg(&cxlr->dev, "%s region mode: %d mismatch\n",
+ dev_name(&cxled->cxld.dev), cxlr->mode);
+ return -EINVAL;
+ }
+
/* all full of members, or interleave config not established? */
if (p->state > CXL_CONFIG_INTERLEAVE_ACTIVE) {
dev_dbg(&cxlr->dev, "region already active\n");
@@ -1951,13 +1974,13 @@ static int cxl_region_attach(struct cxl_region *cxlr,
return -ENXIO;
}
- if (resource_size(cxled->dpa_res) * p->interleave_ways !=
+ if (resource_size(cxled->dpa_res) * p->interleave_ways + p->cache_size !=
resource_size(p->res)) {
dev_dbg(&cxlr->dev,
- "%s:%s: decoder-size-%#llx * ways-%d != region-size-%#llx\n",
+ "%s:%s-size-%#llx * ways-%d + cache-%#llx != region-size-%#llx\n",
dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev),
(u64)resource_size(cxled->dpa_res), p->interleave_ways,
- (u64)resource_size(p->res));
+ (u64)p->cache_size, (u64)resource_size(p->res));
return -EINVAL;
}
@@ -2115,7 +2138,7 @@ out:
void cxl_decoder_kill_region(struct cxl_endpoint_decoder *cxled)
{
down_write(&cxl_region_rwsem);
- cxled->mode = CXL_DECODER_DEAD;
+ cxled->part = -1;
cxl_region_detach(cxled);
up_write(&cxl_region_rwsem);
}
@@ -2471,7 +2494,7 @@ static int cxl_region_calculate_adistance(struct notifier_block *nb,
*/
static struct cxl_region *devm_cxl_add_region(struct cxl_root_decoder *cxlrd,
int id,
- enum cxl_decoder_mode mode,
+ enum cxl_partition_mode mode,
enum cxl_decoder_type type)
{
struct cxl_port *port = to_cxl_port(cxlrd->cxlsd.cxld.dev.parent);
@@ -2525,13 +2548,13 @@ static ssize_t create_ram_region_show(struct device *dev,
}
static struct cxl_region *__create_region(struct cxl_root_decoder *cxlrd,
- enum cxl_decoder_mode mode, int id)
+ enum cxl_partition_mode mode, int id)
{
int rc;
switch (mode) {
- case CXL_DECODER_RAM:
- case CXL_DECODER_PMEM:
+ case CXL_PARTMODE_RAM:
+ case CXL_PARTMODE_PMEM:
break;
default:
dev_err(&cxlrd->cxlsd.cxld.dev, "unsupported mode %d\n", mode);
@@ -2551,7 +2574,7 @@ static struct cxl_region *__create_region(struct cxl_root_decoder *cxlrd,
}
static ssize_t create_region_store(struct device *dev, const char *buf,
- size_t len, enum cxl_decoder_mode mode)
+ size_t len, enum cxl_partition_mode mode)
{
struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev);
struct cxl_region *cxlr;
@@ -2572,7 +2595,7 @@ static ssize_t create_pmem_region_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t len)
{
- return create_region_store(dev, buf, len, CXL_DECODER_PMEM);
+ return create_region_store(dev, buf, len, CXL_PARTMODE_PMEM);
}
DEVICE_ATTR_RW(create_pmem_region);
@@ -2580,7 +2603,7 @@ static ssize_t create_ram_region_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t len)
{
- return create_region_store(dev, buf, len, CXL_DECODER_RAM);
+ return create_region_store(dev, buf, len, CXL_PARTMODE_RAM);
}
DEVICE_ATTR_RW(create_ram_region);
@@ -2678,7 +2701,7 @@ EXPORT_SYMBOL_NS_GPL(to_cxl_pmem_region, "CXL");
struct cxl_poison_context {
struct cxl_port *port;
- enum cxl_decoder_mode mode;
+ int part;
u64 offset;
};
@@ -2686,47 +2709,45 @@ static int cxl_get_poison_unmapped(struct cxl_memdev *cxlmd,
struct cxl_poison_context *ctx)
{
struct cxl_dev_state *cxlds = cxlmd->cxlds;
+ const struct resource *res;
+ struct resource *p, *last;
u64 offset, length;
int rc = 0;
+ if (ctx->part < 0)
+ return 0;
+
/*
- * Collect poison for the remaining unmapped resources
- * after poison is collected by committed endpoints.
- *
- * Knowing that PMEM must always follow RAM, get poison
- * for unmapped resources based on the last decoder's mode:
- * ram: scan remains of ram range, then any pmem range
- * pmem: scan remains of pmem range
+ * Collect poison for the remaining unmapped resources after
+ * poison is collected by committed endpoints decoders.
*/
-
- if (ctx->mode == CXL_DECODER_RAM) {
- offset = ctx->offset;
- length = resource_size(&cxlds->ram_res) - offset;
+ for (int i = ctx->part; i < cxlds->nr_partitions; i++) {
+ res = &cxlds->part[i].res;
+ for (p = res->child, last = NULL; p; p = p->sibling)
+ last = p;
+ if (last)
+ offset = last->end + 1;
+ else
+ offset = res->start;
+ length = res->end - offset + 1;
+ if (!length)
+ break;
rc = cxl_mem_get_poison(cxlmd, offset, length, NULL);
- if (rc == -EFAULT)
- rc = 0;
+ if (rc == -EFAULT && cxlds->part[i].mode == CXL_PARTMODE_RAM)
+ continue;
if (rc)
- return rc;
- }
- if (ctx->mode == CXL_DECODER_PMEM) {
- offset = ctx->offset;
- length = resource_size(&cxlds->dpa_res) - offset;
- if (!length)
- return 0;
- } else if (resource_size(&cxlds->pmem_res)) {
- offset = cxlds->pmem_res.start;
- length = resource_size(&cxlds->pmem_res);
- } else {
- return 0;
+ break;
}
- return cxl_mem_get_poison(cxlmd, offset, length, NULL);
+ return rc;
}
static int poison_by_decoder(struct device *dev, void *arg)
{
struct cxl_poison_context *ctx = arg;
struct cxl_endpoint_decoder *cxled;
+ enum cxl_partition_mode mode;
+ struct cxl_dev_state *cxlds;
struct cxl_memdev *cxlmd;
u64 offset, length;
int rc = 0;
@@ -2735,27 +2756,18 @@ static int poison_by_decoder(struct device *dev, void *arg)
return rc;
cxled = to_cxl_endpoint_decoder(dev);
- if (!cxled->dpa_res || !resource_size(cxled->dpa_res))
- return rc;
-
- /*
- * Regions are only created with single mode decoders: pmem or ram.
- * Linux does not support mixed mode decoders. This means that
- * reading poison per endpoint decoder adheres to the requirement
- * that poison reads of pmem and ram must be separated.
- * CXL 3.0 Spec 8.2.9.8.4.1
- */
- if (cxled->mode == CXL_DECODER_MIXED) {
- dev_dbg(dev, "poison list read unsupported in mixed mode\n");
+ if (!cxled->dpa_res)
return rc;
- }
cxlmd = cxled_to_memdev(cxled);
+ cxlds = cxlmd->cxlds;
+ mode = cxlds->part[cxled->part].mode;
+
if (cxled->skip) {
offset = cxled->dpa_res->start - cxled->skip;
length = cxled->skip;
rc = cxl_mem_get_poison(cxlmd, offset, length, NULL);
- if (rc == -EFAULT && cxled->mode == CXL_DECODER_RAM)
+ if (rc == -EFAULT && mode == CXL_PARTMODE_RAM)
rc = 0;
if (rc)
return rc;
@@ -2764,7 +2776,7 @@ static int poison_by_decoder(struct device *dev, void *arg)
offset = cxled->dpa_res->start;
length = cxled->dpa_res->end - offset + 1;
rc = cxl_mem_get_poison(cxlmd, offset, length, cxled->cxld.region);
- if (rc == -EFAULT && cxled->mode == CXL_DECODER_RAM)
+ if (rc == -EFAULT && mode == CXL_PARTMODE_RAM)
rc = 0;
if (rc)
return rc;
@@ -2772,7 +2784,7 @@ static int poison_by_decoder(struct device *dev, void *arg)
/* Iterate until commit_end is reached */
if (cxled->cxld.id == ctx->port->commit_end) {
ctx->offset = cxled->dpa_res->end + 1;
- ctx->mode = cxled->mode;
+ ctx->part = cxled->part;
return 1;
}
@@ -2785,7 +2797,8 @@ int cxl_get_poison_by_endpoint(struct cxl_port *port)
int rc = 0;
ctx = (struct cxl_poison_context) {
- .port = port
+ .port = port,
+ .part = -1,
};
rc = device_for_each_child(&port->dev, &ctx, poison_by_decoder);
@@ -2921,7 +2934,7 @@ u64 cxl_dpa_to_hpa(struct cxl_region *cxlr, const struct cxl_memdev *cxlmd,
hpa_offset |= dpa_offset & GENMASK_ULL(eig + 7, 0);
/* Apply the hpa_offset to the region base address */
- hpa = hpa_offset + p->res->start;
+ hpa = hpa_offset + p->res->start + p->cache_size;
/* Root decoder translation overrides typical modulo decode */
if (cxlrd->hpa_to_spa)
@@ -3038,17 +3051,13 @@ static struct cxl_dax_region *cxl_dax_region_alloc(struct cxl_region *cxlr)
struct cxl_dax_region *cxlr_dax;
struct device *dev;
- down_read(&cxl_region_rwsem);
- if (p->state != CXL_CONFIG_COMMIT) {
- cxlr_dax = ERR_PTR(-ENXIO);
- goto out;
- }
+ guard(rwsem_read)(&cxl_region_rwsem);
+ if (p->state != CXL_CONFIG_COMMIT)
+ return ERR_PTR(-ENXIO);
cxlr_dax = kzalloc(sizeof(*cxlr_dax), GFP_KERNEL);
- if (!cxlr_dax) {
- cxlr_dax = ERR_PTR(-ENOMEM);
- goto out;
- }
+ if (!cxlr_dax)
+ return ERR_PTR(-ENOMEM);
cxlr_dax->hpa_range.start = p->res->start;
cxlr_dax->hpa_range.end = p->res->end;
@@ -3061,8 +3070,6 @@ static struct cxl_dax_region *cxl_dax_region_alloc(struct cxl_region *cxlr)
dev->parent = &cxlr->dev;
dev->bus = &cxl_bus_type;
dev->type = &cxl_dax_region_type;
-out:
- up_read(&cxl_region_rwsem);
return cxlr_dax;
}
@@ -3208,7 +3215,6 @@ static int match_region_by_range(struct device *dev, const void *data)
struct cxl_region_params *p;
struct cxl_region *cxlr;
const struct range *r = data;
- int rc = 0;
if (!is_cxl_region(dev))
return 0;
@@ -3216,60 +3222,96 @@ static int match_region_by_range(struct device *dev, const void *data)
cxlr = to_cxl_region(dev);
p = &cxlr->params;
- down_read(&cxl_region_rwsem);
+ guard(rwsem_read)(&cxl_region_rwsem);
if (p->res && p->res->start == r->start && p->res->end == r->end)
- rc = 1;
- up_read(&cxl_region_rwsem);
+ return 1;
- return rc;
+ return 0;
}
-/* Establish an empty region covering the given HPA range */
-static struct cxl_region *construct_region(struct cxl_root_decoder *cxlrd,
- struct cxl_endpoint_decoder *cxled)
+static int cxl_extended_linear_cache_resize(struct cxl_region *cxlr,
+ struct resource *res)
+{
+ struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(cxlr->dev.parent);
+ struct cxl_region_params *p = &cxlr->params;
+ int nid = phys_to_target_node(res->start);
+ resource_size_t size = resource_size(res);
+ resource_size_t cache_size, start;
+ int rc;
+
+ rc = cxl_acpi_get_extended_linear_cache_size(res, nid, &cache_size);
+ if (rc)
+ return rc;
+
+ if (!cache_size)
+ return 0;
+
+ if (size != cache_size) {
+ dev_warn(&cxlr->dev,
+ "Extended Linear Cache size %pa != CXL size %pa. No Support!",
+ &cache_size, &size);
+ return -ENXIO;
+ }
+
+ /*
+ * Move the start of the range to where the cache range starts. The
+ * implementation assumes that the cache range is in front of the
+ * CXL range. This is not dictated by the HMAT spec but is how the
+ * current known implementation is configured.
+ *
+ * The cache range is expected to be within the CFMWS. The adjusted
+ * res->start should not be less than cxlrd->res->start.
+ */
+ start = res->start - cache_size;
+ if (start < cxlrd->res->start)
+ return -ENXIO;
+
+ res->start = start;
+ p->cache_size = cache_size;
+
+ return 0;
+}
+
+static int __construct_region(struct cxl_region *cxlr,
+ struct cxl_root_decoder *cxlrd,
+ struct cxl_endpoint_decoder *cxled)
{
struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
- struct cxl_port *port = cxlrd_to_port(cxlrd);
struct range *hpa = &cxled->cxld.hpa_range;
struct cxl_region_params *p;
- struct cxl_region *cxlr;
struct resource *res;
int rc;
- do {
- cxlr = __create_region(cxlrd, cxled->mode,
- atomic_read(&cxlrd->region_id));
- } while (IS_ERR(cxlr) && PTR_ERR(cxlr) == -EBUSY);
-
- if (IS_ERR(cxlr)) {
- dev_err(cxlmd->dev.parent,
- "%s:%s: %s failed assign region: %ld\n",
- dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev),
- __func__, PTR_ERR(cxlr));
- return cxlr;
- }
-
- down_write(&cxl_region_rwsem);
+ guard(rwsem_write)(&cxl_region_rwsem);
p = &cxlr->params;
if (p->state >= CXL_CONFIG_INTERLEAVE_ACTIVE) {
dev_err(cxlmd->dev.parent,
"%s:%s: %s autodiscovery interrupted\n",
dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev),
__func__);
- rc = -EBUSY;
- goto err;
+ return -EBUSY;
}
set_bit(CXL_REGION_F_AUTO, &cxlr->flags);
res = kmalloc(sizeof(*res), GFP_KERNEL);
- if (!res) {
- rc = -ENOMEM;
- goto err;
- }
+ if (!res)
+ return -ENOMEM;
*res = DEFINE_RES_MEM_NAMED(hpa->start, range_len(hpa),
dev_name(&cxlr->dev));
+
+ rc = cxl_extended_linear_cache_resize(cxlr, res);
+ if (rc && rc != -EOPNOTSUPP) {
+ /*
+ * Failing to support extended linear cache region resize does not
+ * prevent the region from functioning. Only causes cxl list showing
+ * incorrect region size.
+ */
+ dev_warn(cxlmd->dev.parent,
+ "Extended linear cache calculation failed rc:%d\n", rc);
+ }
+
rc = insert_resource(cxlrd->res, res);
if (rc) {
/*
@@ -3289,7 +3331,7 @@ static struct cxl_region *construct_region(struct cxl_root_decoder *cxlrd,
rc = sysfs_update_group(&cxlr->dev.kobj, get_cxl_region_target_group());
if (rc)
- goto err;
+ return rc;
dev_dbg(cxlmd->dev.parent, "%s:%s: %s %s res: %pr iw: %d ig: %d\n",
dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), __func__,
@@ -3298,14 +3340,40 @@ static struct cxl_region *construct_region(struct cxl_root_decoder *cxlrd,
/* ...to match put_device() in cxl_add_to_region() */
get_device(&cxlr->dev);
- up_write(&cxl_region_rwsem);
- return cxlr;
+ return 0;
+}
-err:
- up_write(&cxl_region_rwsem);
- devm_release_action(port->uport_dev, unregister_region, cxlr);
- return ERR_PTR(rc);
+/* Establish an empty region covering the given HPA range */
+static struct cxl_region *construct_region(struct cxl_root_decoder *cxlrd,
+ struct cxl_endpoint_decoder *cxled)
+{
+ struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
+ struct cxl_port *port = cxlrd_to_port(cxlrd);
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
+ int rc, part = READ_ONCE(cxled->part);
+ struct cxl_region *cxlr;
+
+ do {
+ cxlr = __create_region(cxlrd, cxlds->part[part].mode,
+ atomic_read(&cxlrd->region_id));
+ } while (IS_ERR(cxlr) && PTR_ERR(cxlr) == -EBUSY);
+
+ if (IS_ERR(cxlr)) {
+ dev_err(cxlmd->dev.parent,
+ "%s:%s: %s failed assign region: %ld\n",
+ dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev),
+ __func__, PTR_ERR(cxlr));
+ return cxlr;
+ }
+
+ rc = __construct_region(cxlr, cxlrd, cxled);
+ if (rc) {
+ devm_release_action(port->uport_dev, unregister_region, cxlr);
+ return ERR_PTR(rc);
+ }
+
+ return cxlr;
}
int cxl_add_to_region(struct cxl_port *root, struct cxl_endpoint_decoder *cxled)
@@ -3375,6 +3443,34 @@ out:
}
EXPORT_SYMBOL_NS_GPL(cxl_add_to_region, "CXL");
+u64 cxl_port_get_spa_cache_alias(struct cxl_port *endpoint, u64 spa)
+{
+ struct cxl_region_ref *iter;
+ unsigned long index;
+
+ if (!endpoint)
+ return ~0ULL;
+
+ guard(rwsem_write)(&cxl_region_rwsem);
+
+ xa_for_each(&endpoint->regions, index, iter) {
+ struct cxl_region_params *p = &iter->region->params;
+
+ if (p->res->start <= spa && spa <= p->res->end) {
+ if (!p->cache_size)
+ return ~0ULL;
+
+ if (spa >= p->res->start + p->cache_size)
+ return spa - p->cache_size;
+
+ return spa + p->cache_size;
+ }
+ }
+
+ return ~0ULL;
+}
+EXPORT_SYMBOL_NS_GPL(cxl_port_get_spa_cache_alias, "CXL");
+
static int is_system_ram(struct resource *res, void *arg)
{
struct cxl_region *cxlr = arg;
@@ -3440,9 +3536,9 @@ out:
return rc;
switch (cxlr->mode) {
- case CXL_DECODER_PMEM:
+ case CXL_PARTMODE_PMEM:
return devm_cxl_add_pmem_region(cxlr);
- case CXL_DECODER_RAM:
+ case CXL_PARTMODE_RAM:
/*
* The region can not be manged by CXL if any portion of
* it is already online as 'System RAM'
diff --git a/drivers/cxl/core/trace.h b/drivers/cxl/core/trace.h
index cea706b683b5..25ebfbc1616c 100644
--- a/drivers/cxl/core/trace.h
+++ b/drivers/cxl/core/trace.h
@@ -48,6 +48,34 @@
{ CXL_RAS_UC_IDE_RX_ERR, "IDE Rx Error" } \
)
+TRACE_EVENT(cxl_port_aer_uncorrectable_error,
+ TP_PROTO(struct device *dev, u32 status, u32 fe, u32 *hl),
+ TP_ARGS(dev, status, fe, hl),
+ TP_STRUCT__entry(
+ __string(device, dev_name(dev))
+ __string(host, dev_name(dev->parent))
+ __field(u32, status)
+ __field(u32, first_error)
+ __array(u32, header_log, CXL_HEADERLOG_SIZE_U32)
+ ),
+ TP_fast_assign(
+ __assign_str(device);
+ __assign_str(host);
+ __entry->status = status;
+ __entry->first_error = fe;
+ /*
+ * Embed the 512B headerlog data for user app retrieval and
+ * parsing, but no need to print this in the trace buffer.
+ */
+ memcpy(__entry->header_log, hl, CXL_HEADERLOG_SIZE);
+ ),
+ TP_printk("device=%s host=%s status: '%s' first_error: '%s'",
+ __get_str(device), __get_str(host),
+ show_uc_errs(__entry->status),
+ show_uc_errs(__entry->first_error)
+ )
+);
+
TRACE_EVENT(cxl_aer_uncorrectable_error,
TP_PROTO(const struct cxl_memdev *cxlmd, u32 status, u32 fe, u32 *hl),
TP_ARGS(cxlmd, status, fe, hl),
@@ -96,6 +124,25 @@ TRACE_EVENT(cxl_aer_uncorrectable_error,
{ CXL_RAS_CE_PHYS_LAYER_ERR, "Received Error From Physical Layer" } \
)
+TRACE_EVENT(cxl_port_aer_correctable_error,
+ TP_PROTO(struct device *dev, u32 status),
+ TP_ARGS(dev, status),
+ TP_STRUCT__entry(
+ __string(device, dev_name(dev))
+ __string(host, dev_name(dev->parent))
+ __field(u32, status)
+ ),
+ TP_fast_assign(
+ __assign_str(device);
+ __assign_str(host);
+ __entry->status = status;
+ ),
+ TP_printk("device=%s host=%s status='%s'",
+ __get_str(device), __get_str(host),
+ show_ce_errs(__entry->status)
+ )
+);
+
TRACE_EVENT(cxl_aer_correctable_error,
TP_PROTO(const struct cxl_memdev *cxlmd, u32 status),
TP_ARGS(cxlmd, status),
@@ -392,9 +439,10 @@ TRACE_EVENT(cxl_generic_event,
TRACE_EVENT(cxl_general_media,
TP_PROTO(const struct cxl_memdev *cxlmd, enum cxl_event_log_type log,
- struct cxl_region *cxlr, u64 hpa, struct cxl_event_gen_media *rec),
+ struct cxl_region *cxlr, u64 hpa, u64 hpa_alias0,
+ struct cxl_event_gen_media *rec),
- TP_ARGS(cxlmd, log, cxlr, hpa, rec),
+ TP_ARGS(cxlmd, log, cxlr, hpa, hpa_alias0, rec),
TP_STRUCT__entry(
CXL_EVT_TP_entry
@@ -408,6 +456,7 @@ TRACE_EVENT(cxl_general_media,
__array(u8, comp_id, CXL_EVENT_GEN_MED_COMP_ID_SIZE)
/* Following are out of order to pack trace record */
__field(u64, hpa)
+ __field(u64, hpa_alias0)
__field_struct(uuid_t, region_uuid)
__field(u16, validity_flags)
__field(u8, rank)
@@ -438,6 +487,7 @@ TRACE_EVENT(cxl_general_media,
CXL_EVENT_GEN_MED_COMP_ID_SIZE);
__entry->validity_flags = get_unaligned_le16(&rec->media_hdr.validity_flags);
__entry->hpa = hpa;
+ __entry->hpa_alias0 = hpa_alias0;
if (cxlr) {
__assign_str(region_name);
uuid_copy(&__entry->region_uuid, &cxlr->params.uuid);
@@ -455,7 +505,7 @@ TRACE_EVENT(cxl_general_media,
"device=%x validity_flags='%s' " \
"comp_id=%s comp_id_pldm_valid_flags='%s' " \
"pldm_entity_id=%s pldm_resource_id=%s " \
- "hpa=%llx region=%s region_uuid=%pUb " \
+ "hpa=%llx hpa_alias0=%llx region=%s region_uuid=%pUb " \
"cme_threshold_ev_flags='%s' cme_count=%u",
__entry->dpa, show_dpa_flags(__entry->dpa_flags),
show_event_desc_flags(__entry->descriptor),
@@ -470,7 +520,7 @@ TRACE_EVENT(cxl_general_media,
CXL_GMER_VALID_COMPONENT_ID_FORMAT, __entry->comp_id),
show_pldm_resource_id(__entry->validity_flags, CXL_GMER_VALID_COMPONENT,
CXL_GMER_VALID_COMPONENT_ID_FORMAT, __entry->comp_id),
- __entry->hpa, __get_str(region_name), &__entry->region_uuid,
+ __entry->hpa, __entry->hpa_alias0, __get_str(region_name), &__entry->region_uuid,
show_cme_threshold_ev_flags(__entry->cme_threshold_ev_flags), __entry->cme_count
)
);
@@ -529,9 +579,10 @@ TRACE_EVENT(cxl_general_media,
TRACE_EVENT(cxl_dram,
TP_PROTO(const struct cxl_memdev *cxlmd, enum cxl_event_log_type log,
- struct cxl_region *cxlr, u64 hpa, struct cxl_event_dram *rec),
+ struct cxl_region *cxlr, u64 hpa, u64 hpa_alias0,
+ struct cxl_event_dram *rec),
- TP_ARGS(cxlmd, log, cxlr, hpa, rec),
+ TP_ARGS(cxlmd, log, cxlr, hpa, hpa_alias0, rec),
TP_STRUCT__entry(
CXL_EVT_TP_entry
@@ -547,6 +598,7 @@ TRACE_EVENT(cxl_dram,
__field(u32, row)
__array(u8, cor_mask, CXL_EVENT_DER_CORRECTION_MASK_SIZE)
__field(u64, hpa)
+ __field(u64, hpa_alias0)
__field_struct(uuid_t, region_uuid)
__field(u8, rank) /* Out of order to pack trace record */
__field(u8, bank_group) /* Out of order to pack trace record */
@@ -584,6 +636,7 @@ TRACE_EVENT(cxl_dram,
memcpy(__entry->cor_mask, &rec->correction_mask,
CXL_EVENT_DER_CORRECTION_MASK_SIZE);
__entry->hpa = hpa;
+ __entry->hpa_alias0 = hpa_alias0;
if (cxlr) {
__assign_str(region_name);
uuid_copy(&__entry->region_uuid, &cxlr->params.uuid);
@@ -604,7 +657,7 @@ TRACE_EVENT(cxl_dram,
"validity_flags='%s' " \
"comp_id=%s comp_id_pldm_valid_flags='%s' " \
"pldm_entity_id=%s pldm_resource_id=%s " \
- "hpa=%llx region=%s region_uuid=%pUb " \
+ "hpa=%llx hpa_alias0=%llx region=%s region_uuid=%pUb " \
"sub_channel=%u cme_threshold_ev_flags='%s' cvme_count=%u",
__entry->dpa, show_dpa_flags(__entry->dpa_flags),
show_event_desc_flags(__entry->descriptor),
@@ -622,7 +675,7 @@ TRACE_EVENT(cxl_dram,
CXL_DER_VALID_COMPONENT_ID_FORMAT, __entry->comp_id),
show_pldm_resource_id(__entry->validity_flags, CXL_DER_VALID_COMPONENT,
CXL_DER_VALID_COMPONENT_ID_FORMAT, __entry->comp_id),
- __entry->hpa, __get_str(region_name), &__entry->region_uuid,
+ __entry->hpa, __entry->hpa_alias0, __get_str(region_name), &__entry->region_uuid,
__entry->sub_channel, show_cme_threshold_ev_flags(__entry->cme_threshold_ev_flags),
__entry->cvme_count
)
@@ -870,6 +923,7 @@ TRACE_EVENT(cxl_poison,
__string(region, cxlr ? dev_name(&cxlr->dev) : "")
__field(u64, overflow_ts)
__field(u64, hpa)
+ __field(u64, hpa_alias0)
__field(u64, dpa)
__field(u32, dpa_length)
__array(char, uuid, 16)
@@ -892,16 +946,22 @@ TRACE_EVENT(cxl_poison,
memcpy(__entry->uuid, &cxlr->params.uuid, 16);
__entry->hpa = cxl_dpa_to_hpa(cxlr, cxlmd,
__entry->dpa);
+ if (__entry->hpa != ULLONG_MAX && cxlr->params.cache_size)
+ __entry->hpa_alias0 = __entry->hpa +
+ cxlr->params.cache_size;
+ else
+ __entry->hpa_alias0 = ULLONG_MAX;
} else {
__assign_str(region);
memset(__entry->uuid, 0, 16);
__entry->hpa = ULLONG_MAX;
+ __entry->hpa_alias0 = ULLONG_MAX;
}
),
TP_printk("memdev=%s host=%s serial=%lld trace_type=%s region=%s " \
- "region_uuid=%pU hpa=0x%llx dpa=0x%llx dpa_length=0x%x " \
- "source=%s flags=%s overflow_time=%llu",
+ "region_uuid=%pU hpa=0x%llx hpa_alias0=0x%llx dpa=0x%llx " \
+ "dpa_length=0x%x source=%s flags=%s overflow_time=%llu",
__get_str(memdev),
__get_str(host),
__entry->serial,
@@ -909,6 +969,7 @@ TRACE_EVENT(cxl_poison,
__get_str(region),
__entry->uuid,
__entry->hpa,
+ __entry->hpa_alias0,
__entry->dpa,
__entry->dpa_length,
show_poison_source(__entry->source),
diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h
index bbbaa0d0a670..be8a7dc77719 100644
--- a/drivers/cxl/cxl.h
+++ b/drivers/cxl/cxl.h
@@ -373,32 +373,6 @@ struct cxl_decoder {
};
/*
- * CXL_DECODER_DEAD prevents endpoints from being reattached to regions
- * while cxld_unregister() is running
- */
-enum cxl_decoder_mode {
- CXL_DECODER_NONE,
- CXL_DECODER_RAM,
- CXL_DECODER_PMEM,
- CXL_DECODER_MIXED,
- CXL_DECODER_DEAD,
-};
-
-static inline const char *cxl_decoder_mode_name(enum cxl_decoder_mode mode)
-{
- static const char * const names[] = {
- [CXL_DECODER_NONE] = "none",
- [CXL_DECODER_RAM] = "ram",
- [CXL_DECODER_PMEM] = "pmem",
- [CXL_DECODER_MIXED] = "mixed",
- };
-
- if (mode >= CXL_DECODER_NONE && mode <= CXL_DECODER_MIXED)
- return names[mode];
- return "mixed";
-}
-
-/*
* Track whether this decoder is reserved for region autodiscovery, or
* free for userspace provisioning.
*/
@@ -412,16 +386,16 @@ enum cxl_decoder_state {
* @cxld: base cxl_decoder_object
* @dpa_res: actively claimed DPA span of this decoder
* @skip: offset into @dpa_res where @cxld.hpa_range maps
- * @mode: which memory type / access-mode-partition this decoder targets
* @state: autodiscovery state
+ * @part: partition index this decoder maps
* @pos: interleave position in @cxld.region
*/
struct cxl_endpoint_decoder {
struct cxl_decoder cxld;
struct resource *dpa_res;
resource_size_t skip;
- enum cxl_decoder_mode mode;
enum cxl_decoder_state state;
+ int part;
int pos;
};
@@ -493,6 +467,7 @@ enum cxl_config_state {
* @res: allocated iomem capacity for this region
* @targets: active ordered targets in current decoder configuration
* @nr_targets: number of targets
+ * @cache_size: extended linear cache size if exists, otherwise zero.
*
* State transitions are protected by the cxl_region_rwsem
*/
@@ -504,6 +479,12 @@ struct cxl_region_params {
struct resource *res;
struct cxl_endpoint_decoder *targets[CXL_DECODER_MAX_INTERLEAVE];
int nr_targets;
+ resource_size_t cache_size;
+};
+
+enum cxl_partition_mode {
+ CXL_PARTMODE_RAM,
+ CXL_PARTMODE_PMEM,
};
/*
@@ -525,7 +506,7 @@ struct cxl_region_params {
* struct cxl_region - CXL region
* @dev: This region's device
* @id: This region's id. Id is globally unique across all regions
- * @mode: Endpoint decoder allocation / access mode
+ * @mode: Operational mode of the mapped capacity
* @type: Endpoint decoder target type
* @cxl_nvb: nvdimm bridge for coordinating @cxlr_pmem setup / shutdown
* @cxlr_pmem: (for pmem regions) cached copy of the nvdimm bridge
@@ -538,7 +519,7 @@ struct cxl_region_params {
struct cxl_region {
struct device dev;
int id;
- enum cxl_decoder_mode mode;
+ enum cxl_partition_mode mode;
enum cxl_decoder_type type;
struct cxl_nvdimm_bridge *cxl_nvb;
struct cxl_pmem_region *cxlr_pmem;
@@ -563,6 +544,7 @@ struct cxl_nvdimm {
struct device dev;
struct cxl_memdev *cxlmd;
u8 dev_id[CXL_DEV_ID_LEN]; /* for nvdimm, string of 'serial' */
+ u64 dirty_shutdowns;
};
struct cxl_pmem_region_mapping {
@@ -610,6 +592,7 @@ struct cxl_dax_region {
* @cdat: Cached CDAT data
* @cdat_available: Should a CDAT attribute be available in sysfs
* @pci_latency: Upstream latency in picoseconds
+ * @gpf_dvsec: Cached GPF port DVSEC
*/
struct cxl_port {
struct device dev;
@@ -633,6 +616,7 @@ struct cxl_port {
} cdat;
bool cdat_available;
long pci_latency;
+ int gpf_dvsec;
};
/**
@@ -875,6 +859,7 @@ struct cxl_pmem_region *to_cxl_pmem_region(struct device *dev);
int cxl_add_to_region(struct cxl_port *root,
struct cxl_endpoint_decoder *cxled);
struct cxl_dax_region *to_cxl_dax_region(struct device *dev);
+u64 cxl_port_get_spa_cache_alias(struct cxl_port *endpoint, u64 spa);
#else
static inline bool is_cxl_pmem_region(struct device *dev)
{
@@ -893,6 +878,11 @@ static inline struct cxl_dax_region *to_cxl_dax_region(struct device *dev)
{
return NULL;
}
+static inline u64 cxl_port_get_spa_cache_alias(struct cxl_port *endpoint,
+ u64 spa)
+{
+ return 0;
+}
#endif
void cxl_endpoint_parse_cdat(struct cxl_port *port);
@@ -920,4 +910,6 @@ bool cxl_endpoint_decoder_reset_detected(struct cxl_port *port);
#define __mock static
#endif
+u16 cxl_gpf_get_dvsec(struct device *dev, bool is_port);
+
#endif /* __CXL_H__ */
diff --git a/drivers/cxl/cxlmem.h b/drivers/cxl/cxlmem.h
index dd2b7060d501..3ec6b906371b 100644
--- a/drivers/cxl/cxlmem.h
+++ b/drivers/cxl/cxlmem.h
@@ -97,6 +97,19 @@ int devm_cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled,
resource_size_t base, resource_size_t len,
resource_size_t skipped);
+#define CXL_NR_PARTITIONS_MAX 2
+
+struct cxl_dpa_info {
+ u64 size;
+ struct cxl_dpa_part_info {
+ struct range range;
+ enum cxl_partition_mode mode;
+ } part[CXL_NR_PARTITIONS_MAX];
+ int nr_partitions;
+};
+
+int cxl_dpa_setup(struct cxl_dev_state *cxlds, const struct cxl_dpa_info *info);
+
static inline struct cxl_ep *cxl_ep_load(struct cxl_port *port,
struct cxl_memdev *cxlmd)
{
@@ -373,6 +386,18 @@ struct cxl_dpa_perf {
};
/**
+ * struct cxl_dpa_partition - DPA partition descriptor
+ * @res: shortcut to the partition in the DPA resource tree (cxlds->dpa_res)
+ * @perf: performance attributes of the partition from CDAT
+ * @mode: operation mode for the DPA capacity, e.g. ram, pmem, dynamic...
+ */
+struct cxl_dpa_partition {
+ struct resource res;
+ struct cxl_dpa_perf perf;
+ enum cxl_partition_mode mode;
+};
+
+/**
* struct cxl_dev_state - The driver device state
*
* cxl_dev_state represents the CXL driver/device state. It provides an
@@ -387,8 +412,8 @@ struct cxl_dpa_perf {
* @rcd: operating in RCD mode (CXL 3.0 9.11.8 CXL Devices Attached to an RCH)
* @media_ready: Indicate whether the device media is usable
* @dpa_res: Overall DPA resource tree for the device
- * @pmem_res: Active Persistent memory capacity configuration
- * @ram_res: Active Volatile memory capacity configuration
+ * @part: DPA partition array
+ * @nr_partitions: Number of DPA partitions
* @serial: PCIe Device Serial Number
* @type: Generic Memory Class device or Vendor Specific Memory device
* @cxl_mbox: CXL mailbox context
@@ -403,8 +428,8 @@ struct cxl_dev_state {
bool rcd;
bool media_ready;
struct resource dpa_res;
- struct resource pmem_res;
- struct resource ram_res;
+ struct cxl_dpa_partition part[CXL_NR_PARTITIONS_MAX];
+ unsigned int nr_partitions;
u64 serial;
enum cxl_devtype type;
struct cxl_mailbox cxl_mbox;
@@ -413,6 +438,18 @@ struct cxl_dev_state {
#endif
};
+static inline resource_size_t cxl_pmem_size(struct cxl_dev_state *cxlds)
+{
+ /*
+ * Static PMEM may be at partition index 0 when there is no static RAM
+ * capacity.
+ */
+ for (int i = 0; i < cxlds->nr_partitions; i++)
+ if (cxlds->part[i].mode == CXL_PARTMODE_PMEM)
+ return resource_size(&cxlds->part[i].res);
+ return 0;
+}
+
static inline struct cxl_dev_state *mbox_to_cxlds(struct cxl_mailbox *cxl_mbox)
{
return dev_get_drvdata(cxl_mbox->host);
@@ -435,14 +472,11 @@ static inline struct cxl_dev_state *mbox_to_cxlds(struct cxl_mailbox *cxl_mbox)
* @partition_align_bytes: alignment size for partition-able capacity
* @active_volatile_bytes: sum of hard + soft volatile
* @active_persistent_bytes: sum of hard + soft persistent
- * @next_volatile_bytes: volatile capacity change pending device reset
- * @next_persistent_bytes: persistent capacity change pending device reset
- * @ram_perf: performance data entry matched to RAM partition
- * @pmem_perf: performance data entry matched to PMEM partition
* @event: event log driver state
* @poison: poison driver state info
* @security: security driver state info
* @fw: firmware upload / activation state
+ * @mce_notifier: MCE notifier
*
* See CXL 3.0 8.2.9.8.2 Capacity Configuration and Label Storage for
* details on capacity parameters.
@@ -457,16 +491,12 @@ struct cxl_memdev_state {
u64 partition_align_bytes;
u64 active_volatile_bytes;
u64 active_persistent_bytes;
- u64 next_volatile_bytes;
- u64 next_persistent_bytes;
-
- struct cxl_dpa_perf ram_perf;
- struct cxl_dpa_perf pmem_perf;
struct cxl_event_state event;
struct cxl_poison_state poison;
struct cxl_security_state security;
struct cxl_fw_state fw;
+ struct notifier_block mce_notifier;
};
static inline struct cxl_memdev_state *
@@ -660,6 +690,23 @@ struct cxl_mbox_set_partition_info {
#define CXL_SET_PARTITION_IMMEDIATE_FLAG BIT(0)
+/* Get Health Info Output Payload CXL 3.2 Spec 8.2.10.9.3.1 Table 8-148 */
+struct cxl_mbox_get_health_info_out {
+ u8 health_status;
+ u8 media_status;
+ u8 additional_status;
+ u8 life_used;
+ __le16 device_temperature;
+ __le32 dirty_shutdown_cnt;
+ __le32 corrected_volatile_error_cnt;
+ __le32 corrected_persistent_error_cnt;
+} __packed;
+
+/* Set Shutdown State Input Payload CXL 3.2 Spec 8.2.10.9.3.5 Table 8-152 */
+struct cxl_mbox_set_shutdown_state_in {
+ u8 state;
+} __packed;
+
/* Set Timestamp CXL 3.0 Spec 8.2.9.4.2 */
struct cxl_mbox_set_timestamp_in {
__le64 timestamp;
@@ -785,7 +832,7 @@ int cxl_internal_send_cmd(struct cxl_mailbox *cxl_mbox,
int cxl_dev_state_identify(struct cxl_memdev_state *mds);
int cxl_await_media_ready(struct cxl_dev_state *cxlds);
int cxl_enumerate_cmds(struct cxl_memdev_state *mds);
-int cxl_mem_create_range_info(struct cxl_memdev_state *mds);
+int cxl_mem_dpa_fetch(struct cxl_memdev_state *mds, struct cxl_dpa_info *info);
struct cxl_memdev_state *cxl_memdev_state_create(struct device *dev);
void set_exclusive_cxl_commands(struct cxl_memdev_state *mds,
unsigned long *cmds);
@@ -796,6 +843,8 @@ void cxl_event_trace_record(const struct cxl_memdev *cxlmd,
enum cxl_event_log_type type,
enum cxl_event_type event_type,
const uuid_t *uuid, union cxl_event *evt);
+int cxl_get_dirty_count(struct cxl_memdev_state *mds, u32 *count);
+int cxl_arm_dirty_shutdown(struct cxl_memdev_state *mds);
int cxl_set_timestamp(struct cxl_memdev_state *mds);
int cxl_poison_state_init(struct cxl_memdev_state *mds);
int cxl_mem_get_poison(struct cxl_memdev *cxlmd, u64 offset, u64 len,
diff --git a/drivers/cxl/cxlpci.h b/drivers/cxl/cxlpci.h
index 4da07727ab9c..54e219b0049e 100644
--- a/drivers/cxl/cxlpci.h
+++ b/drivers/cxl/cxlpci.h
@@ -40,6 +40,12 @@
/* CXL 2.0 8.1.6: GPF DVSEC for CXL Port */
#define CXL_DVSEC_PORT_GPF 4
+#define CXL_DVSEC_PORT_GPF_PHASE_1_CONTROL_OFFSET 0x0C
+#define CXL_DVSEC_PORT_GPF_PHASE_1_TMO_BASE_MASK GENMASK(3, 0)
+#define CXL_DVSEC_PORT_GPF_PHASE_1_TMO_SCALE_MASK GENMASK(11, 8)
+#define CXL_DVSEC_PORT_GPF_PHASE_2_CONTROL_OFFSET 0xE
+#define CXL_DVSEC_PORT_GPF_PHASE_2_TMO_BASE_MASK GENMASK(3, 0)
+#define CXL_DVSEC_PORT_GPF_PHASE_2_TMO_SCALE_MASK GENMASK(11, 8)
/* CXL 2.0 8.1.7: GPF DVSEC for CXL Device */
#define CXL_DVSEC_DEVICE_GPF 5
diff --git a/drivers/cxl/mem.c b/drivers/cxl/mem.c
index 2f03a4d5606e..9675243bd05b 100644
--- a/drivers/cxl/mem.c
+++ b/drivers/cxl/mem.c
@@ -152,7 +152,7 @@ static int cxl_mem_probe(struct device *dev)
return -ENXIO;
}
- if (resource_size(&cxlds->pmem_res) && IS_ENABLED(CONFIG_CXL_PMEM)) {
+ if (cxl_pmem_size(cxlds) && IS_ENABLED(CONFIG_CXL_PMEM)) {
rc = devm_cxl_add_nvdimm(parent_port, cxlmd);
if (rc) {
if (rc == -ENODEV)
diff --git a/drivers/cxl/pci.c b/drivers/cxl/pci.c
index 993fa60fe453..7b14a154463c 100644
--- a/drivers/cxl/pci.c
+++ b/drivers/cxl/pci.c
@@ -903,6 +903,7 @@ __ATTRIBUTE_GROUPS(cxl_rcd);
static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct pci_host_bridge *host_bridge = pci_find_host_bridge(pdev->bus);
+ struct cxl_dpa_info range_info = { 0 };
struct cxl_memdev_state *mds;
struct cxl_dev_state *cxlds;
struct cxl_register_map map;
@@ -993,7 +994,11 @@ static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (rc)
return rc;
- rc = cxl_mem_create_range_info(mds);
+ rc = cxl_mem_dpa_fetch(mds, &range_info);
+ if (rc)
+ return rc;
+
+ rc = cxl_dpa_setup(cxlds, &range_info);
if (rc)
return rc;
diff --git a/drivers/cxl/pmem.c b/drivers/cxl/pmem.c
index f9c95996e937..d061fe3d2b86 100644
--- a/drivers/cxl/pmem.c
+++ b/drivers/cxl/pmem.c
@@ -42,15 +42,44 @@ static ssize_t id_show(struct device *dev, struct device_attribute *attr, char *
}
static DEVICE_ATTR_RO(id);
+static ssize_t dirty_shutdown_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct nvdimm *nvdimm = to_nvdimm(dev);
+ struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm);
+
+ return sysfs_emit(buf, "%llu\n", cxl_nvd->dirty_shutdowns);
+}
+static DEVICE_ATTR_RO(dirty_shutdown);
+
static struct attribute *cxl_dimm_attributes[] = {
&dev_attr_id.attr,
&dev_attr_provider.attr,
+ &dev_attr_dirty_shutdown.attr,
NULL
};
+#define CXL_INVALID_DIRTY_SHUTDOWN_COUNT ULLONG_MAX
+static umode_t cxl_dimm_visible(struct kobject *kobj,
+ struct attribute *a, int n)
+{
+ if (a == &dev_attr_dirty_shutdown.attr) {
+ struct device *dev = kobj_to_dev(kobj);
+ struct nvdimm *nvdimm = to_nvdimm(dev);
+ struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm);
+
+ if (cxl_nvd->dirty_shutdowns ==
+ CXL_INVALID_DIRTY_SHUTDOWN_COUNT)
+ return 0;
+ }
+
+ return a->mode;
+}
+
static const struct attribute_group cxl_dimm_attribute_group = {
.name = "cxl",
.attrs = cxl_dimm_attributes,
+ .is_visible = cxl_dimm_visible
};
static const struct attribute_group *cxl_dimm_attribute_groups[] = {
@@ -58,6 +87,38 @@ static const struct attribute_group *cxl_dimm_attribute_groups[] = {
NULL
};
+static void cxl_nvdimm_arm_dirty_shutdown_tracking(struct cxl_nvdimm *cxl_nvd)
+{
+ struct cxl_memdev *cxlmd = cxl_nvd->cxlmd;
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
+ struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
+ struct device *dev = &cxl_nvd->dev;
+ u32 count;
+
+ /*
+ * Dirty tracking is enabled and exposed to the user, only when:
+ * - dirty shutdown on the device can be set, and,
+ * - the device has a Device GPF DVSEC (albeit unused), and,
+ * - the Get Health Info cmd can retrieve the device's dirty count.
+ */
+ cxl_nvd->dirty_shutdowns = CXL_INVALID_DIRTY_SHUTDOWN_COUNT;
+
+ if (cxl_arm_dirty_shutdown(mds)) {
+ dev_warn(dev, "GPF: could not set dirty shutdown state\n");
+ return;
+ }
+
+ if (!cxl_gpf_get_dvsec(cxlds->dev, false))
+ return;
+
+ if (cxl_get_dirty_count(mds, &count)) {
+ dev_warn(dev, "GPF: could not retrieve dirty count\n");
+ return;
+ }
+
+ cxl_nvd->dirty_shutdowns = count;
+}
+
static int cxl_nvdimm_probe(struct device *dev)
{
struct cxl_nvdimm *cxl_nvd = to_cxl_nvdimm(dev);
@@ -78,6 +139,14 @@ static int cxl_nvdimm_probe(struct device *dev)
set_bit(ND_CMD_GET_CONFIG_SIZE, &cmd_mask);
set_bit(ND_CMD_GET_CONFIG_DATA, &cmd_mask);
set_bit(ND_CMD_SET_CONFIG_DATA, &cmd_mask);
+
+ /*
+ * Set dirty shutdown now, with the expectation that the device
+ * clear it upon a successful GPF flow. The exception to this
+ * is upon Viral detection, per CXL 3.2 section 12.4.2.
+ */
+ cxl_nvdimm_arm_dirty_shutdown_tracking(cxl_nvd);
+
nvdimm = __nvdimm_create(cxl_nvb->nvdimm_bus, cxl_nvd,
cxl_dimm_attribute_groups, flags,
cmd_mask, 0, NULL, cxl_nvd->dev_id,
@@ -375,6 +444,16 @@ static int cxl_pmem_region_probe(struct device *dev)
goto out_nvd;
}
+ if (cxlds->serial == 0) {
+ /* include missing alongside invalid in this error message. */
+ dev_err(dev, "%s: invalid or missing serial number\n",
+ dev_name(&cxlmd->dev));
+ rc = -ENXIO;
+ goto out_nvd;
+ }
+ info[i].serial = cxlds->serial;
+ info[i].offset = m->start;
+
m->cxl_nvd = cxl_nvd;
mappings[i] = (struct nd_mapping_desc) {
.nvdimm = nvdimm,
@@ -382,8 +461,6 @@ static int cxl_pmem_region_probe(struct device *dev)
.size = m->size,
.position = i,
};
- info[i].offset = m->start;
- info[i].serial = cxlds->serial;
}
ndr_desc.num_mappings = cxlr_pmem->nr_mappings;
ndr_desc.mapping = mappings;
diff --git a/drivers/cxl/port.c b/drivers/cxl/port.c
index d2bfd1ff5492..a35fc5552845 100644
--- a/drivers/cxl/port.c
+++ b/drivers/cxl/port.c
@@ -153,7 +153,7 @@ static int cxl_port_probe(struct device *dev)
}
static ssize_t CDAT_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t offset, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
@@ -170,7 +170,7 @@ static ssize_t CDAT_read(struct file *filp, struct kobject *kobj,
port->cdat.length);
}
-static BIN_ATTR_ADMIN_RO(CDAT, 0);
+static const BIN_ATTR_ADMIN_RO(CDAT, 0);
static umode_t cxl_port_bin_attr_is_visible(struct kobject *kobj,
const struct bin_attribute *attr, int i)
@@ -184,13 +184,13 @@ static umode_t cxl_port_bin_attr_is_visible(struct kobject *kobj,
return 0;
}
-static struct bin_attribute *cxl_cdat_bin_attributes[] = {
+static const struct bin_attribute *const cxl_cdat_bin_attributes[] = {
&bin_attr_CDAT,
NULL,
};
-static struct attribute_group cxl_cdat_attribute_group = {
- .bin_attrs = cxl_cdat_bin_attributes,
+static const struct attribute_group cxl_cdat_attribute_group = {
+ .bin_attrs_new = cxl_cdat_bin_attributes,
.is_bin_visible = cxl_port_bin_attr_is_visible,
};
diff --git a/drivers/dax/device.c b/drivers/dax/device.c
index 6d74e62bbee0..328231cfb028 100644
--- a/drivers/dax/device.c
+++ b/drivers/dax/device.c
@@ -89,14 +89,13 @@ static void dax_set_mapping(struct vm_fault *vmf, pfn_t pfn,
ALIGN_DOWN(vmf->address, fault_size));
for (i = 0; i < nr_pages; i++) {
- struct page *page = pfn_to_page(pfn_t_to_pfn(pfn) + i);
+ struct folio *folio = pfn_folio(pfn_t_to_pfn(pfn) + i);
- page = compound_head(page);
- if (page->mapping)
+ if (folio->mapping)
continue;
- page->mapping = filp->f_mapping;
- page->index = pgoff + i;
+ folio->mapping = filp->f_mapping;
+ folio->index = pgoff + i;
}
}
@@ -126,11 +125,12 @@ static vm_fault_t __dev_dax_pte_fault(struct dev_dax *dev_dax,
return VM_FAULT_SIGBUS;
}
- pfn = phys_to_pfn_t(phys, PFN_DEV|PFN_MAP);
+ pfn = phys_to_pfn_t(phys, 0);
dax_set_mapping(vmf, pfn, fault_size);
- return vmf_insert_mixed(vmf->vma, vmf->address, pfn);
+ return vmf_insert_page_mkwrite(vmf, pfn_t_to_page(pfn),
+ vmf->flags & FAULT_FLAG_WRITE);
}
static vm_fault_t __dev_dax_pmd_fault(struct dev_dax *dev_dax,
@@ -169,11 +169,12 @@ static vm_fault_t __dev_dax_pmd_fault(struct dev_dax *dev_dax,
return VM_FAULT_SIGBUS;
}
- pfn = phys_to_pfn_t(phys, PFN_DEV|PFN_MAP);
+ pfn = phys_to_pfn_t(phys, 0);
dax_set_mapping(vmf, pfn, fault_size);
- return vmf_insert_pfn_pmd(vmf, pfn, vmf->flags & FAULT_FLAG_WRITE);
+ return vmf_insert_folio_pmd(vmf, page_folio(pfn_t_to_page(pfn)),
+ vmf->flags & FAULT_FLAG_WRITE);
}
#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
@@ -214,11 +215,12 @@ static vm_fault_t __dev_dax_pud_fault(struct dev_dax *dev_dax,
return VM_FAULT_SIGBUS;
}
- pfn = phys_to_pfn_t(phys, PFN_DEV|PFN_MAP);
+ pfn = phys_to_pfn_t(phys, 0);
dax_set_mapping(vmf, pfn, fault_size);
- return vmf_insert_pfn_pud(vmf, pfn, vmf->flags & FAULT_FLAG_WRITE);
+ return vmf_insert_folio_pud(vmf, page_folio(pfn_t_to_page(pfn)),
+ vmf->flags & FAULT_FLAG_WRITE);
}
#else
static vm_fault_t __dev_dax_pud_fault(struct dev_dax *dev_dax,
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 8afea2e23360..df2d2dc00a05 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -546,7 +546,7 @@ config PL330_DMA
config PXA_DMA
bool "PXA DMA support"
- depends on (ARCH_MMP || ARCH_PXA)
+ depends on ARCH_MMP || ARCH_PXA || COMPILE_TEST
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
help
diff --git a/drivers/dma/amd/ae4dma/ae4dma-pci.c b/drivers/dma/amd/ae4dma/ae4dma-pci.c
index aad0dc4294a3..2c63907db228 100644
--- a/drivers/dma/amd/ae4dma/ae4dma-pci.c
+++ b/drivers/dma/amd/ae4dma/ae4dma-pci.c
@@ -46,8 +46,8 @@ static int ae4_get_irqs(struct ae4_device *ae4)
} else {
ae4_msix->msix_count = ret;
- for (i = 0; i < MAX_AE4_HW_QUEUES; i++)
- ae4->ae4_irq[i] = ae4_msix->msix_entry[i].vector;
+ for (i = 0; i < ae4_msix->msix_count; i++)
+ ae4->ae4_irq[i] = pci_irq_vector(pdev, i);
}
return ret;
@@ -137,8 +137,6 @@ static void ae4_pci_remove(struct pci_dev *pdev)
}
static const struct pci_device_id ae4_pci_table[] = {
- { PCI_VDEVICE(AMD, 0x14C8), },
- { PCI_VDEVICE(AMD, 0x14DC), },
{ PCI_VDEVICE(AMD, 0x149B), },
/* Last entry must be zero */
{ 0, }
diff --git a/drivers/dma/amd/ae4dma/ae4dma.h b/drivers/dma/amd/ae4dma/ae4dma.h
index 265c5d436008..57f6048726bb 100644
--- a/drivers/dma/amd/ae4dma/ae4dma.h
+++ b/drivers/dma/amd/ae4dma/ae4dma.h
@@ -37,6 +37,8 @@
#define AE4_DMA_VERSION 4
#define CMD_AE4_DESC_DW0_VAL 2
+#define AE4_TIME_OUT 5000
+
struct ae4_msix {
int msix_count;
struct msix_entry msix_entry[MAX_AE4_HW_QUEUES];
diff --git a/drivers/dma/amd/ptdma/ptdma-dmaengine.c b/drivers/dma/amd/ptdma/ptdma-dmaengine.c
index 35c84ec9608b..715ac3ae067b 100644
--- a/drivers/dma/amd/ptdma/ptdma-dmaengine.c
+++ b/drivers/dma/amd/ptdma/ptdma-dmaengine.c
@@ -198,8 +198,10 @@ static struct pt_dma_desc *pt_handle_active_desc(struct pt_dma_chan *chan,
{
struct dma_async_tx_descriptor *tx_desc;
struct virt_dma_desc *vd;
+ struct pt_device *pt;
unsigned long flags;
+ pt = chan->pt;
/* Loop over descriptors until one is found with commands */
do {
if (desc) {
@@ -217,7 +219,7 @@ static struct pt_dma_desc *pt_handle_active_desc(struct pt_dma_chan *chan,
spin_lock_irqsave(&chan->vc.lock, flags);
- if (desc) {
+ if (pt->ver != AE4_DMA_VERSION && desc) {
if (desc->status != DMA_COMPLETE) {
if (desc->status != DMA_ERROR)
desc->status = DMA_COMPLETE;
@@ -235,7 +237,7 @@ static struct pt_dma_desc *pt_handle_active_desc(struct pt_dma_chan *chan,
spin_unlock_irqrestore(&chan->vc.lock, flags);
- if (tx_desc) {
+ if (pt->ver != AE4_DMA_VERSION && tx_desc) {
dmaengine_desc_get_callback_invoke(tx_desc, NULL);
dma_run_dependencies(tx_desc);
vchan_vdesc_fini(vd);
@@ -245,11 +247,25 @@ static struct pt_dma_desc *pt_handle_active_desc(struct pt_dma_chan *chan,
return NULL;
}
+static inline bool ae4_core_queue_full(struct pt_cmd_queue *cmd_q)
+{
+ u32 front_wi = readl(cmd_q->reg_control + AE4_WR_IDX_OFF);
+ u32 rear_ri = readl(cmd_q->reg_control + AE4_RD_IDX_OFF);
+
+ if (((MAX_CMD_QLEN + front_wi - rear_ri) % MAX_CMD_QLEN) >= (MAX_CMD_QLEN - 1))
+ return true;
+
+ return false;
+}
+
static void pt_cmd_callback(void *data, int err)
{
struct pt_dma_desc *desc = data;
+ struct ae4_cmd_queue *ae4cmd_q;
struct dma_chan *dma_chan;
struct pt_dma_chan *chan;
+ struct ae4_device *ae4;
+ struct pt_device *pt;
int ret;
if (err == -EINPROGRESS)
@@ -257,11 +273,32 @@ static void pt_cmd_callback(void *data, int err)
dma_chan = desc->vd.tx.chan;
chan = to_pt_chan(dma_chan);
+ pt = chan->pt;
if (err)
desc->status = DMA_ERROR;
while (true) {
+ if (pt->ver == AE4_DMA_VERSION) {
+ ae4 = container_of(pt, struct ae4_device, pt);
+ ae4cmd_q = &ae4->ae4cmd_q[chan->id];
+
+ if (ae4cmd_q->q_cmd_count >= (CMD_Q_LEN - 1) ||
+ ae4_core_queue_full(&ae4cmd_q->cmd_q)) {
+ wake_up(&ae4cmd_q->q_w);
+
+ if (wait_for_completion_timeout(&ae4cmd_q->cmp,
+ msecs_to_jiffies(AE4_TIME_OUT))
+ == 0) {
+ dev_err(pt->dev, "TIMEOUT %d:\n", ae4cmd_q->id);
+ break;
+ }
+
+ reinit_completion(&ae4cmd_q->cmp);
+ continue;
+ }
+ }
+
/* Check for DMA descriptor completion */
desc = pt_handle_active_desc(chan, desc);
@@ -296,6 +333,49 @@ static struct pt_dma_desc *pt_alloc_dma_desc(struct pt_dma_chan *chan,
return desc;
}
+static void pt_cmd_callback_work(void *data, int err)
+{
+ struct dma_async_tx_descriptor *tx_desc;
+ struct pt_dma_desc *desc = data;
+ struct dma_chan *dma_chan;
+ struct virt_dma_desc *vd;
+ struct pt_dma_chan *chan;
+ unsigned long flags;
+
+ dma_chan = desc->vd.tx.chan;
+ chan = to_pt_chan(dma_chan);
+
+ if (err == -EINPROGRESS)
+ return;
+
+ tx_desc = &desc->vd.tx;
+ vd = &desc->vd;
+
+ if (err)
+ desc->status = DMA_ERROR;
+
+ spin_lock_irqsave(&chan->vc.lock, flags);
+ if (desc) {
+ if (desc->status != DMA_COMPLETE) {
+ if (desc->status != DMA_ERROR)
+ desc->status = DMA_COMPLETE;
+
+ dma_cookie_complete(tx_desc);
+ dma_descriptor_unmap(tx_desc);
+ } else {
+ tx_desc = NULL;
+ }
+ }
+ spin_unlock_irqrestore(&chan->vc.lock, flags);
+
+ if (tx_desc) {
+ dmaengine_desc_get_callback_invoke(tx_desc, NULL);
+ dma_run_dependencies(tx_desc);
+ list_del(&desc->vd.node);
+ vchan_vdesc_fini(vd);
+ }
+}
+
static struct pt_dma_desc *pt_create_desc(struct dma_chan *dma_chan,
dma_addr_t dst,
dma_addr_t src,
@@ -327,6 +407,7 @@ static struct pt_dma_desc *pt_create_desc(struct dma_chan *dma_chan,
desc->len = len;
if (pt->ver == AE4_DMA_VERSION) {
+ pt_cmd->pt_cmd_callback = pt_cmd_callback_work;
ae4 = container_of(pt, struct ae4_device, pt);
ae4cmd_q = &ae4->ae4cmd_q[chan->id];
mutex_lock(&ae4cmd_q->cmd_lock);
@@ -367,13 +448,16 @@ static void pt_issue_pending(struct dma_chan *dma_chan)
{
struct pt_dma_chan *chan = to_pt_chan(dma_chan);
struct pt_dma_desc *desc;
+ struct pt_device *pt;
unsigned long flags;
bool engine_is_idle = true;
+ pt = chan->pt;
+
spin_lock_irqsave(&chan->vc.lock, flags);
desc = pt_next_dma_desc(chan);
- if (desc)
+ if (desc && pt->ver != AE4_DMA_VERSION)
engine_is_idle = false;
vchan_issue_pending(&chan->vc);
diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c
index 20b10c15c696..0117bb2e8591 100644
--- a/drivers/dma/bcm2835-dma.c
+++ b/drivers/dma/bcm2835-dma.c
@@ -893,7 +893,7 @@ static int bcm2835_dma_suspend_late(struct device *dev)
}
static const struct dev_pm_ops bcm2835_dma_pm_ops = {
- SET_LATE_SYSTEM_SLEEP_PM_OPS(bcm2835_dma_suspend_late, NULL)
+ LATE_SYSTEM_SLEEP_PM_OPS(bcm2835_dma_suspend_late, NULL)
};
static int bcm2835_dma_probe(struct platform_device *pdev)
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index c1357d7f3dc6..758fcd0546d8 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -40,6 +40,8 @@
#include <linux/dmaengine.h>
#include <linux/hardirq.h>
#include <linux/spinlock.h>
+#include <linux/of.h>
+#include <linux/property.h>
#include <linux/percpu.h>
#include <linux/rcupdate.h>
#include <linux/mutex.h>
@@ -812,15 +814,13 @@ static const struct dma_slave_map *dma_filter_match(struct dma_device *device,
*/
struct dma_chan *dma_request_chan(struct device *dev, const char *name)
{
+ struct fwnode_handle *fwnode = dev_fwnode(dev);
struct dma_device *d, *_d;
struct dma_chan *chan = NULL;
- /* If device-tree is present get slave info from here */
- if (dev->of_node)
- chan = of_dma_request_slave_channel(dev->of_node, name);
-
- /* If device was enumerated by ACPI get slave info from here */
- if (has_acpi_companion(dev) && !chan)
+ if (is_of_node(fwnode))
+ chan = of_dma_request_slave_channel(to_of_node(fwnode), name);
+ else if (is_acpi_device_node(fwnode))
chan = acpi_dma_request_slave_chan_by_name(dev, name);
if (PTR_ERR(chan) == -EPROBE_DEFER)
@@ -854,8 +854,8 @@ struct dma_chan *dma_request_chan(struct device *dev, const char *name)
found:
#ifdef CONFIG_DEBUG_FS
- chan->dbg_client_name = kasprintf(GFP_KERNEL, "%s:%s", dev_name(dev),
- name);
+ chan->dbg_client_name = kasprintf(GFP_KERNEL, "%s:%s", dev_name(dev), name);
+ /* No functional issue if it fails, users are supposed to test before use */
#endif
chan->name = kasprintf(GFP_KERNEL, "dma:%s", name);
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index 91b2fbc0b864..d891dfca358e 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -841,9 +841,9 @@ static int dmatest_func(void *data)
} else {
dma_async_issue_pending(chan);
- wait_event_freezable_timeout(thread->done_wait,
- done->done,
- msecs_to_jiffies(params->timeout));
+ wait_event_timeout(thread->done_wait,
+ done->done,
+ msecs_to_jiffies(params->timeout));
status = dma_async_is_tx_complete(chan, cookie, NULL,
NULL);
diff --git a/drivers/dma/dw-edma/dw-edma-core.c b/drivers/dma/dw-edma/dw-edma-core.c
index 68236247059d..c2b88cc99e5d 100644
--- a/drivers/dma/dw-edma/dw-edma-core.c
+++ b/drivers/dma/dw-edma/dw-edma-core.c
@@ -15,6 +15,7 @@
#include <linux/irq.h>
#include <linux/dma/edma.h>
#include <linux/dma-mapping.h>
+#include <linux/string_choices.h>
#include "dw-edma-core.h"
#include "dw-edma-v0-core.h"
@@ -746,7 +747,7 @@ static int dw_edma_channel_setup(struct dw_edma *dw, u32 wr_alloc, u32 rd_alloc)
chan->ll_max -= 1;
dev_vdbg(dev, "L. List:\tChannel %s[%u] max_cnt=%u\n",
- chan->dir == EDMA_DIR_WRITE ? "write" : "read",
+ str_write_read(chan->dir == EDMA_DIR_WRITE),
chan->id, chan->ll_max);
if (dw->nr_irqs == 1)
@@ -767,7 +768,8 @@ static int dw_edma_channel_setup(struct dw_edma *dw, u32 wr_alloc, u32 rd_alloc)
memcpy(&chan->msi, &irq->msi, sizeof(chan->msi));
dev_vdbg(dev, "MSI:\t\tChannel %s[%u] addr=0x%.8x%.8x, data=0x%.8x\n",
- chan->dir == EDMA_DIR_WRITE ? "write" : "read", chan->id,
+ str_write_read(chan->dir == EDMA_DIR_WRITE),
+ chan->id,
chan->msi.address_hi, chan->msi.address_lo,
chan->msi.data);
diff --git a/drivers/dma/dw/pci.c b/drivers/dma/dw/pci.c
index e8a0eb81726a..a3aae3d1c093 100644
--- a/drivers/dma/dw/pci.c
+++ b/drivers/dma/dw/pci.c
@@ -76,8 +76,6 @@ static void dw_pci_remove(struct pci_dev *pdev)
dev_warn(&pdev->dev, "can't remove device properly: %d\n", ret);
}
-#ifdef CONFIG_PM_SLEEP
-
static int dw_pci_suspend_late(struct device *dev)
{
struct dw_dma_chip_pdata *data = dev_get_drvdata(dev);
@@ -94,10 +92,8 @@ static int dw_pci_resume_early(struct device *dev)
return do_dw_dma_enable(chip);
};
-#endif /* CONFIG_PM_SLEEP */
-
static const struct dev_pm_ops dw_pci_dev_pm_ops = {
- SET_LATE_SYSTEM_SLEEP_PM_OPS(dw_pci_suspend_late, dw_pci_resume_early)
+ LATE_SYSTEM_SLEEP_PM_OPS(dw_pci_suspend_late, dw_pci_resume_early)
};
static const struct pci_device_id dw_pci_id_table[] = {
@@ -136,7 +132,7 @@ static struct pci_driver dw_pci_driver = {
.probe = dw_pci_probe,
.remove = dw_pci_remove,
.driver = {
- .pm = &dw_pci_dev_pm_ops,
+ .pm = pm_sleep_ptr(&dw_pci_dev_pm_ops),
},
};
diff --git a/drivers/dma/dw/platform.c b/drivers/dma/dw/platform.c
index 2606cf9cd429..cee56cd31a61 100644
--- a/drivers/dma/dw/platform.c
+++ b/drivers/dma/dw/platform.c
@@ -157,8 +157,6 @@ static const struct acpi_device_id dw_dma_acpi_id_table[] = {
MODULE_DEVICE_TABLE(acpi, dw_dma_acpi_id_table);
#endif
-#ifdef CONFIG_PM_SLEEP
-
static int dw_suspend_late(struct device *dev)
{
struct dw_dma_chip_pdata *data = dev_get_drvdata(dev);
@@ -183,10 +181,8 @@ static int dw_resume_early(struct device *dev)
return do_dw_dma_enable(chip);
}
-#endif /* CONFIG_PM_SLEEP */
-
static const struct dev_pm_ops dw_dev_pm_ops = {
- SET_LATE_SYSTEM_SLEEP_PM_OPS(dw_suspend_late, dw_resume_early)
+ LATE_SYSTEM_SLEEP_PM_OPS(dw_suspend_late, dw_resume_early)
};
static struct platform_driver dw_driver = {
@@ -195,7 +191,7 @@ static struct platform_driver dw_driver = {
.shutdown = dw_shutdown,
.driver = {
.name = DRV_NAME,
- .pm = &dw_dev_pm_ops,
+ .pm = pm_sleep_ptr(&dw_dev_pm_ops),
.of_match_table = of_match_ptr(dw_dma_of_id_table),
.acpi_match_table = ACPI_PTR(dw_dma_acpi_id_table),
},
diff --git a/drivers/dma/fsl-edma-main.c b/drivers/dma/fsl-edma-main.c
index f989b6c9c0a9..756d67325db5 100644
--- a/drivers/dma/fsl-edma-main.c
+++ b/drivers/dma/fsl-edma-main.c
@@ -164,7 +164,7 @@ static bool fsl_edma_srcid_in_use(struct fsl_edma_engine *fsl_edma, u32 srcid)
fsl_chan = &fsl_edma->chans[i];
if (fsl_chan->srcid && srcid == fsl_chan->srcid) {
- dev_err(&fsl_chan->pdev->dev, "The srcid is in use, can't use!");
+ dev_err(&fsl_chan->pdev->dev, "The srcid is in use, can't use!\n");
return true;
}
}
@@ -401,6 +401,7 @@ fsl_edma2_irq_init(struct platform_device *pdev,
/* The last IRQ is for eDMA err */
if (i == count - 1) {
+ fsl_edma->errirq = irq;
ret = devm_request_irq(&pdev->dev, irq,
fsl_edma_err_handler,
0, "eDMA2-ERR", fsl_edma);
@@ -420,10 +421,13 @@ static void fsl_edma_irq_exit(
struct platform_device *pdev, struct fsl_edma_engine *fsl_edma)
{
if (fsl_edma->txirq == fsl_edma->errirq) {
- devm_free_irq(&pdev->dev, fsl_edma->txirq, fsl_edma);
+ if (fsl_edma->txirq >= 0)
+ devm_free_irq(&pdev->dev, fsl_edma->txirq, fsl_edma);
} else {
- devm_free_irq(&pdev->dev, fsl_edma->txirq, fsl_edma);
- devm_free_irq(&pdev->dev, fsl_edma->errirq, fsl_edma);
+ if (fsl_edma->txirq >= 0)
+ devm_free_irq(&pdev->dev, fsl_edma->txirq, fsl_edma);
+ if (fsl_edma->errirq >= 0)
+ devm_free_irq(&pdev->dev, fsl_edma->errirq, fsl_edma);
}
}
@@ -620,6 +624,8 @@ static int fsl_edma_probe(struct platform_device *pdev)
if (!fsl_edma)
return -ENOMEM;
+ fsl_edma->errirq = -EINVAL;
+ fsl_edma->txirq = -EINVAL;
fsl_edma->drvdata = drvdata;
fsl_edma->n_chans = chans;
mutex_init(&fsl_edma->fsl_edma_mutex);
@@ -802,9 +808,9 @@ static void fsl_edma_remove(struct platform_device *pdev)
struct fsl_edma_engine *fsl_edma = platform_get_drvdata(pdev);
fsl_edma_irq_exit(pdev, fsl_edma);
- fsl_edma_cleanup_vchan(&fsl_edma->dma_dev);
of_dma_controller_free(np);
dma_async_device_unregister(&fsl_edma->dma_dev);
+ fsl_edma_cleanup_vchan(&fsl_edma->dma_dev);
fsl_disable_clocks(fsl_edma, fsl_edma->drvdata->dmamuxs);
}
@@ -822,7 +828,7 @@ static int fsl_edma_suspend_late(struct device *dev)
spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
/* Make sure chan is idle or will force disable. */
if (unlikely(fsl_chan->status == DMA_IN_PROGRESS)) {
- dev_warn(dev, "WARN: There is non-idle channel.");
+ dev_warn(dev, "WARN: There is non-idle channel.\n");
fsl_edma_disable_request(fsl_chan);
fsl_edma_chan_mux(fsl_chan, 0, false);
}
diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c
index b946f78f85e1..fca1d2924999 100644
--- a/drivers/dma/idxd/init.c
+++ b/drivers/dma/idxd/init.c
@@ -912,8 +912,7 @@ static void idxd_device_config_restore(struct idxd_device *idxd,
idxd->rdbuf_limit = idxd_saved->saved_idxd.rdbuf_limit;
- if (saved_evl)
- idxd->evl->size = saved_evl->size;
+ idxd->evl->size = saved_evl->size;
for (i = 0; i < idxd->max_groups; i++) {
struct idxd_group *saved_group, *group;
diff --git a/drivers/dma/img-mdc-dma.c b/drivers/dma/img-mdc-dma.c
index 4127c1bdcca7..fd55bcd060ab 100644
--- a/drivers/dma/img-mdc-dma.c
+++ b/drivers/dma/img-mdc-dma.c
@@ -1073,7 +1073,7 @@ static struct platform_driver mdc_dma_driver = {
.driver = {
.name = "img-mdc-dma",
.pm = &img_mdc_pm_ops,
- .of_match_table = of_match_ptr(mdc_dma_of_match),
+ .of_match_table = mdc_dma_of_match,
},
.probe = mdc_dma_probe,
.remove = mdc_dma_remove,
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index a651e0995ce8..de8d7070904e 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -17,6 +17,7 @@
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/dmaengine.h>
@@ -942,7 +943,7 @@ static struct dma_async_tx_descriptor *imxdma_prep_dma_interleaved(
" src_sgl=%s dst_sgl=%s numf=%zu frame_size=%zu\n", __func__,
imxdmac->channel, (unsigned long long)xt->src_start,
(unsigned long long) xt->dst_start,
- xt->src_sgl ? "true" : "false", xt->dst_sgl ? "true" : "false",
+ str_true_false(xt->src_sgl), str_true_false(xt->dst_sgl),
xt->numf, xt->frame_size);
if (list_empty(&imxdmac->ld_free) ||
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index 3449006cd14b..02a85d6f1bea 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -1459,9 +1459,8 @@ static int sdma_alloc_chan_resources(struct dma_chan *chan)
* dmatest, thus create 'struct imx_dma_data mem_data' for this case.
* Please note in any other slave case, you have to setup chan->private
* with 'struct imx_dma_data' in your own filter function if you want to
- * request dma channel by dma_request_channel() rather than
- * dma_request_slave_channel(). Othwise, 'MEMCPY in case?' will appear
- * to warn you to correct your filter function.
+ * request DMA channel by dma_request_channel(), otherwise, 'MEMCPY in
+ * case?' will appear to warn you to correct your filter function.
*/
if (!data) {
dev_dbg(sdmac->sdma->dev, "MEMCPY in case?\n");
diff --git a/drivers/dma/pxa_dma.c b/drivers/dma/pxa_dma.c
index e50cf3357e5e..249296389771 100644
--- a/drivers/dma/pxa_dma.c
+++ b/drivers/dma/pxa_dma.c
@@ -10,6 +10,7 @@
#include <linux/interrupt.h>
#include <linux/dma-mapping.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <linux/dmaengine.h>
#include <linux/platform_device.h>
#include <linux/device.h>
@@ -277,8 +278,7 @@ static int chan_state_show(struct seq_file *s, void *p)
seq_printf(s, "\tPriority : %s\n",
str_prio[(phy->idx & 0xf) / 4]);
seq_printf(s, "\tUnaligned transfer bit: %s\n",
- _phy_readl_relaxed(phy, DALGN) & BIT(phy->idx) ?
- "yes" : "no");
+ str_yes_no(_phy_readl_relaxed(phy, DALGN) & BIT(phy->idx)));
seq_printf(s, "\tDCSR = %08x (%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s)\n",
dcsr, PXA_DCSR_STR(RUN), PXA_DCSR_STR(NODESC),
PXA_DCSR_STR(STOPIRQEN), PXA_DCSR_STR(EORIRQEN),
diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c
index fdd41e1c2263..6b4fce453c85 100644
--- a/drivers/dma/sh/shdma-base.c
+++ b/drivers/dma/sh/shdma-base.c
@@ -725,7 +725,7 @@ static struct dma_async_tx_descriptor *shdma_prep_dma_cyclic(
slave_addr = ops->slave_addr(schan);
/*
- * Allocate the sg list dynamically as it would consumer too much stack
+ * Allocate the sg list dynamically as it would consume too much stack
* space.
*/
sgl = kmalloc_array(sg_len, sizeof(*sgl), GFP_KERNEL);
diff --git a/drivers/dma/sun6i-dma.c b/drivers/dma/sun6i-dma.c
index 95ecb12caaa5..2215ff877bf7 100644
--- a/drivers/dma/sun6i-dma.c
+++ b/drivers/dma/sun6i-dma.c
@@ -19,6 +19,7 @@
#include <linux/platform_device.h>
#include <linux/reset.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <linux/types.h>
#include "virt-dma.h"
@@ -553,7 +554,7 @@ static irqreturn_t sun6i_dma_interrupt(int irq, void *dev_id)
continue;
dev_dbg(sdev->slave.dev, "DMA irq status %s: 0x%x\n",
- i ? "high" : "low", status);
+ str_high_low(i), status);
writel(status, sdev->base + DMA_IRQ_STAT(i));
diff --git a/drivers/dma/ti/edma.c b/drivers/dma/ti/edma.c
index 4ece125b2ae7..3ed406f08c44 100644
--- a/drivers/dma/ti/edma.c
+++ b/drivers/dma/ti/edma.c
@@ -16,6 +16,7 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
+#include <linux/string_choices.h>
#include <linux/of.h>
#include <linux/of_dma.h>
#include <linux/of_irq.h>
@@ -2047,7 +2048,7 @@ static int edma_setup_from_hw(struct device *dev, struct edma_soc_info *pdata,
dev_dbg(dev, "num_qchannels: %u\n", ecc->num_qchannels);
dev_dbg(dev, "num_slots: %u\n", ecc->num_slots);
dev_dbg(dev, "num_tc: %u\n", ecc->num_tc);
- dev_dbg(dev, "chmap_exist: %s\n", ecc->chmap_exist ? "yes" : "no");
+ dev_dbg(dev, "chmap_exist: %s\n", str_yes_no(ecc->chmap_exist));
/* Nothing need to be done if queue priority is provided */
if (pdata->queue_priority_mapping)
@@ -2258,8 +2259,12 @@ static struct dma_chan *of_edma_xlate(struct of_phandle_args *dma_spec,
return NULL;
out:
- /* The channel is going to be used as HW synchronized */
- echan->hw_triggered = true;
+ /*
+ * The channel is going to be HW synchronized, unless it was
+ * reserved as a memcpy channel
+ */
+ echan->hw_triggered =
+ !edma_is_memcpy_channel(i, ecc->info->memcpy_channels);
return dma_get_slave_channel(chan);
}
#else
diff --git a/drivers/dma/ti/k3-udma-glue.c b/drivers/dma/ti/k3-udma-glue.c
index 7c224c3ab7a0..f87d244cc2d6 100644
--- a/drivers/dma/ti/k3-udma-glue.c
+++ b/drivers/dma/ti/k3-udma-glue.c
@@ -84,6 +84,7 @@ struct k3_udma_glue_rx_channel {
struct k3_udma_glue_rx_flow *flows;
u32 flow_num;
u32 flows_ready;
+ bool single_fdq; /* one FDQ for all flows */
};
static void k3_udma_chan_dev_release(struct device *dev)
@@ -970,10 +971,13 @@ k3_udma_glue_request_rx_chn_priv(struct device *dev, const char *name,
ep_cfg = rx_chn->common.ep_config;
- if (xudma_is_pktdma(rx_chn->common.udmax))
+ if (xudma_is_pktdma(rx_chn->common.udmax)) {
rx_chn->udma_rchan_id = ep_cfg->mapped_channel_id;
- else
+ rx_chn->single_fdq = false;
+ } else {
rx_chn->udma_rchan_id = -1;
+ rx_chn->single_fdq = true;
+ }
/* request and cfg UDMAP RX channel */
rx_chn->udma_rchanx = xudma_rchan_get(rx_chn->common.udmax,
@@ -1103,6 +1107,9 @@ k3_udma_glue_request_remote_rx_chn_common(struct k3_udma_glue_rx_channel *rx_chn
rx_chn->common.chan_dev.dma_coherent = true;
dma_coerce_mask_and_coherent(&rx_chn->common.chan_dev,
DMA_BIT_MASK(48));
+ rx_chn->single_fdq = false;
+ } else {
+ rx_chn->single_fdq = true;
}
ret = k3_udma_glue_allocate_rx_flows(rx_chn, cfg);
@@ -1453,7 +1460,7 @@ EXPORT_SYMBOL_GPL(k3_udma_glue_tdown_rx_chn);
void k3_udma_glue_reset_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
u32 flow_num, void *data,
- void (*cleanup)(void *data, dma_addr_t desc_dma), bool skip_fdq)
+ void (*cleanup)(void *data, dma_addr_t desc_dma))
{
struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num];
struct device *dev = rx_chn->common.dev;
@@ -1465,7 +1472,7 @@ void k3_udma_glue_reset_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
dev_dbg(dev, "RX reset flow %u occ_rx %u\n", flow_num, occ_rx);
/* Skip RX FDQ in case one FDQ is used for the set of flows */
- if (skip_fdq)
+ if (rx_chn->single_fdq && flow_num)
goto do_reset;
/*
diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c
index 7ed1956b4642..b223a7aacb0c 100644
--- a/drivers/dma/ti/k3-udma.c
+++ b/drivers/dma/ti/k3-udma.c
@@ -4886,6 +4886,12 @@ static int bcdma_setup_resources(struct udma_dev *ud)
irq_res.desc[i].start = rm_res->desc[i].start +
oes->bcdma_bchan_ring;
irq_res.desc[i].num = rm_res->desc[i].num;
+
+ if (rm_res->desc[i].num_sec) {
+ irq_res.desc[i].start_sec = rm_res->desc[i].start_sec +
+ oes->bcdma_bchan_ring;
+ irq_res.desc[i].num_sec = rm_res->desc[i].num_sec;
+ }
}
}
} else {
@@ -4909,6 +4915,15 @@ static int bcdma_setup_resources(struct udma_dev *ud)
irq_res.desc[i + 1].start = rm_res->desc[j].start +
oes->bcdma_tchan_ring;
irq_res.desc[i + 1].num = rm_res->desc[j].num;
+
+ if (rm_res->desc[j].num_sec) {
+ irq_res.desc[i].start_sec = rm_res->desc[j].start_sec +
+ oes->bcdma_tchan_data;
+ irq_res.desc[i].num_sec = rm_res->desc[j].num_sec;
+ irq_res.desc[i + 1].start_sec = rm_res->desc[j].start_sec +
+ oes->bcdma_tchan_ring;
+ irq_res.desc[i + 1].num_sec = rm_res->desc[j].num_sec;
+ }
}
}
}
@@ -4929,6 +4944,15 @@ static int bcdma_setup_resources(struct udma_dev *ud)
irq_res.desc[i + 1].start = rm_res->desc[j].start +
oes->bcdma_rchan_ring;
irq_res.desc[i + 1].num = rm_res->desc[j].num;
+
+ if (rm_res->desc[j].num_sec) {
+ irq_res.desc[i].start_sec = rm_res->desc[j].start_sec +
+ oes->bcdma_rchan_data;
+ irq_res.desc[i].num_sec = rm_res->desc[j].num_sec;
+ irq_res.desc[i + 1].start_sec = rm_res->desc[j].start_sec +
+ oes->bcdma_rchan_ring;
+ irq_res.desc[i + 1].num_sec = rm_res->desc[j].num_sec;
+ }
}
}
}
@@ -5063,6 +5087,12 @@ static int pktdma_setup_resources(struct udma_dev *ud)
irq_res.desc[i].start = rm_res->desc[i].start +
oes->pktdma_tchan_flow;
irq_res.desc[i].num = rm_res->desc[i].num;
+
+ if (rm_res->desc[i].num_sec) {
+ irq_res.desc[i].start_sec = rm_res->desc[i].start_sec +
+ oes->pktdma_tchan_flow;
+ irq_res.desc[i].num_sec = rm_res->desc[i].num_sec;
+ }
}
}
rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW];
@@ -5074,6 +5104,12 @@ static int pktdma_setup_resources(struct udma_dev *ud)
irq_res.desc[i].start = rm_res->desc[j].start +
oes->pktdma_rchan_flow;
irq_res.desc[i].num = rm_res->desc[j].num;
+
+ if (rm_res->desc[j].num_sec) {
+ irq_res.desc[i].start_sec = rm_res->desc[j].start_sec +
+ oes->pktdma_rchan_flow;
+ irq_res.desc[i].num_sec = rm_res->desc[j].num_sec;
+ }
}
}
ret = ti_sci_inta_msi_domain_alloc_irqs(ud->dev, &irq_res);
diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
index 108a7287f4cd..3ad44afd0e74 100644
--- a/drivers/dma/xilinx/xilinx_dma.c
+++ b/drivers/dma/xilinx/xilinx_dma.c
@@ -46,6 +46,7 @@
#include <linux/of_irq.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <linux/clk.h>
#include <linux/io-64-nonatomic-lo-hi.h>
@@ -2940,7 +2941,7 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
XILINX_DMA_DMASR_SG_MASK)
chan->has_sg = true;
dev_dbg(chan->dev, "ch %d: SG %s\n", chan->id,
- chan->has_sg ? "enabled" : "disabled");
+ str_enabled_disabled(chan->has_sg));
}
/* Initialize the tasklet */
diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
index b360dca2c69e..bd04980009a4 100644
--- a/drivers/firewire/core-cdev.c
+++ b/drivers/firewire/core-cdev.c
@@ -1137,10 +1137,7 @@ static int ioctl_queue_iso(struct client *client, union ioctl_arg *arg)
unsigned long payload, buffer_end, transmit_header_bytes = 0;
u32 control;
int count;
- struct {
- struct fw_iso_packet packet;
- u8 header[256];
- } u;
+ DEFINE_RAW_FLEX(struct fw_iso_packet, u, header, 64);
if (ctx == NULL || a->handle != 0)
return -EINVAL;
@@ -1172,29 +1169,29 @@ static int ioctl_queue_iso(struct client *client, union ioctl_arg *arg)
while (p < end) {
if (get_user(control, &p->control))
return -EFAULT;
- u.packet.payload_length = GET_PAYLOAD_LENGTH(control);
- u.packet.interrupt = GET_INTERRUPT(control);
- u.packet.skip = GET_SKIP(control);
- u.packet.tag = GET_TAG(control);
- u.packet.sy = GET_SY(control);
- u.packet.header_length = GET_HEADER_LENGTH(control);
+ u->payload_length = GET_PAYLOAD_LENGTH(control);
+ u->interrupt = GET_INTERRUPT(control);
+ u->skip = GET_SKIP(control);
+ u->tag = GET_TAG(control);
+ u->sy = GET_SY(control);
+ u->header_length = GET_HEADER_LENGTH(control);
switch (ctx->type) {
case FW_ISO_CONTEXT_TRANSMIT:
- if (u.packet.header_length & 3)
+ if (u->header_length & 3)
return -EINVAL;
- transmit_header_bytes = u.packet.header_length;
+ transmit_header_bytes = u->header_length;
break;
case FW_ISO_CONTEXT_RECEIVE:
- if (u.packet.header_length == 0 ||
- u.packet.header_length % ctx->header_size != 0)
+ if (u->header_length == 0 ||
+ u->header_length % ctx->header_size != 0)
return -EINVAL;
break;
case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
- if (u.packet.payload_length == 0 ||
- u.packet.payload_length & 3)
+ if (u->payload_length == 0 ||
+ u->payload_length & 3)
return -EINVAL;
break;
}
@@ -1204,20 +1201,19 @@ static int ioctl_queue_iso(struct client *client, union ioctl_arg *arg)
if (next > end)
return -EINVAL;
if (copy_from_user
- (u.packet.header, p->header, transmit_header_bytes))
+ (u->header, p->header, transmit_header_bytes))
return -EFAULT;
- if (u.packet.skip && ctx->type == FW_ISO_CONTEXT_TRANSMIT &&
- u.packet.header_length + u.packet.payload_length > 0)
+ if (u->skip && ctx->type == FW_ISO_CONTEXT_TRANSMIT &&
+ u->header_length + u->payload_length > 0)
return -EINVAL;
- if (payload + u.packet.payload_length > buffer_end)
+ if (payload + u->payload_length > buffer_end)
return -EINVAL;
- if (fw_iso_context_queue(ctx, &u.packet,
- &client->buffer, payload))
+ if (fw_iso_context_queue(ctx, u, &client->buffer, payload))
break;
p = next;
- payload += u.packet.payload_length;
+ payload += u->payload_length;
count++;
}
fw_iso_context_queue_flush(ctx);
diff --git a/drivers/firmware/cirrus/cs_dsp.c b/drivers/firmware/cirrus/cs_dsp.c
index 42433c19eb30..560724ce21aa 100644
--- a/drivers/firmware/cirrus/cs_dsp.c
+++ b/drivers/firmware/cirrus/cs_dsp.c
@@ -1631,6 +1631,7 @@ static int cs_dsp_load(struct cs_dsp *dsp, const struct firmware *firmware,
cs_dsp_debugfs_save_wmfwname(dsp, file);
+ ret = 0;
out_fw:
cs_dsp_buf_free(&buf_list);
@@ -2338,6 +2339,7 @@ static int cs_dsp_load_coeff(struct cs_dsp *dsp, const struct firmware *firmware
cs_dsp_debugfs_save_binname(dsp, file);
+ ret = 0;
out_fw:
cs_dsp_buf_free(&buf_list);
diff --git a/drivers/firmware/dmi-sysfs.c b/drivers/firmware/dmi-sysfs.c
index 8d91997036e4..9cc963b2edc0 100644
--- a/drivers/firmware/dmi-sysfs.c
+++ b/drivers/firmware/dmi-sysfs.c
@@ -431,9 +431,9 @@ static ssize_t dmi_sel_raw_read_helper(struct dmi_sysfs_entry *entry,
}
}
-static ssize_t dmi_sel_raw_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
- char *buf, loff_t pos, size_t count)
+static ssize_t raw_event_log_read(struct file *filp, struct kobject *kobj,
+ const struct bin_attribute *bin_attr,
+ char *buf, loff_t pos, size_t count)
{
struct dmi_sysfs_entry *entry = to_entry(kobj->parent);
struct dmi_read_state state = {
@@ -445,10 +445,7 @@ static ssize_t dmi_sel_raw_read(struct file *filp, struct kobject *kobj,
return find_dmi_entry(entry, dmi_sel_raw_read_helper, &state);
}
-static struct bin_attribute dmi_sel_raw_attr = {
- .attr = {.name = "raw_event_log", .mode = 0400},
- .read = dmi_sel_raw_read,
-};
+static const BIN_ATTR_ADMIN_RO(raw_event_log, 0);
static int dmi_system_event_log(struct dmi_sysfs_entry *entry)
{
@@ -464,7 +461,7 @@ static int dmi_system_event_log(struct dmi_sysfs_entry *entry)
if (ret)
goto out_free;
- ret = sysfs_create_bin_file(entry->child, &dmi_sel_raw_attr);
+ ret = sysfs_create_bin_file(entry->child, &bin_attr_raw_event_log);
if (ret)
goto out_del;
@@ -537,10 +534,10 @@ static ssize_t dmi_entry_raw_read_helper(struct dmi_sysfs_entry *entry,
&state->pos, dh, entry_length);
}
-static ssize_t dmi_entry_raw_read(struct file *filp,
- struct kobject *kobj,
- struct bin_attribute *bin_attr,
- char *buf, loff_t pos, size_t count)
+static ssize_t raw_read(struct file *filp,
+ struct kobject *kobj,
+ const struct bin_attribute *bin_attr,
+ char *buf, loff_t pos, size_t count)
{
struct dmi_sysfs_entry *entry = to_entry(kobj);
struct dmi_read_state state = {
@@ -552,10 +549,7 @@ static ssize_t dmi_entry_raw_read(struct file *filp,
return find_dmi_entry(entry, dmi_entry_raw_read_helper, &state);
}
-static const struct bin_attribute dmi_entry_raw_attr = {
- .attr = {.name = "raw", .mode = 0400},
- .read = dmi_entry_raw_read,
-};
+static const BIN_ATTR_ADMIN_RO(raw, 0);
static void dmi_sysfs_entry_release(struct kobject *kobj)
{
@@ -630,7 +624,7 @@ static void __init dmi_sysfs_register_handle(const struct dmi_header *dh,
goto out_err;
/* Create the raw binary file to access the entry */
- *ret = sysfs_create_bin_file(&entry->kobj, &dmi_entry_raw_attr);
+ *ret = sysfs_create_bin_file(&entry->kobj, &bin_attr_raw);
if (*ret)
goto out_err;
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
index fde0656481cc..70d39adf50dc 100644
--- a/drivers/firmware/dmi_scan.c
+++ b/drivers/firmware/dmi_scan.c
@@ -761,8 +761,8 @@ static void __init dmi_scan_machine(void)
pr_info("DMI not present or invalid.\n");
}
-static BIN_ATTR_SIMPLE_ADMIN_RO(smbios_entry_point);
-static BIN_ATTR_SIMPLE_ADMIN_RO(DMI);
+static __ro_after_init BIN_ATTR_SIMPLE_ADMIN_RO(smbios_entry_point);
+static __ro_after_init BIN_ATTR_SIMPLE_ADMIN_RO(DMI);
static int __init dmi_init(void)
{
diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c
index b69e68ef3f02..928409199a1a 100644
--- a/drivers/firmware/efi/cper.c
+++ b/drivers/firmware/efi/cper.c
@@ -24,7 +24,7 @@
#include <linux/bcd.h>
#include <acpi/ghes.h>
#include <ras/ras_event.h>
-#include "cper_cxl.h"
+#include <cxl/event.h>
/*
* CPER record ID need to be unique even after reboot, because record
@@ -624,11 +624,11 @@ cper_estatus_print_section(const char *pfx, struct acpi_hest_generic_data *gdata
else
goto err_section_too_small;
} else if (guid_equal(sec_type, &CPER_SEC_CXL_PROT_ERR)) {
- struct cper_sec_prot_err *prot_err = acpi_hest_get_payload(gdata);
+ struct cxl_cper_sec_prot_err *prot_err = acpi_hest_get_payload(gdata);
printk("%ssection_type: CXL Protocol Error\n", newpfx);
if (gdata->error_data_length >= sizeof(*prot_err))
- cper_print_prot_err(newpfx, prot_err);
+ cxl_cper_print_prot_err(newpfx, prot_err);
else
goto err_section_too_small;
} else {
diff --git a/drivers/firmware/efi/cper_cxl.c b/drivers/firmware/efi/cper_cxl.c
index a55771b99a97..8a7667faf953 100644
--- a/drivers/firmware/efi/cper_cxl.c
+++ b/drivers/firmware/efi/cper_cxl.c
@@ -8,26 +8,7 @@
*/
#include <linux/cper.h>
-#include "cper_cxl.h"
-
-#define PROT_ERR_VALID_AGENT_TYPE BIT_ULL(0)
-#define PROT_ERR_VALID_AGENT_ADDRESS BIT_ULL(1)
-#define PROT_ERR_VALID_DEVICE_ID BIT_ULL(2)
-#define PROT_ERR_VALID_SERIAL_NUMBER BIT_ULL(3)
-#define PROT_ERR_VALID_CAPABILITY BIT_ULL(4)
-#define PROT_ERR_VALID_DVSEC BIT_ULL(5)
-#define PROT_ERR_VALID_ERROR_LOG BIT_ULL(6)
-
-/* CXL RAS Capability Structure, CXL v3.0 sec 8.2.4.16 */
-struct cxl_ras_capability_regs {
- u32 uncor_status;
- u32 uncor_mask;
- u32 uncor_severity;
- u32 cor_status;
- u32 cor_mask;
- u32 cap_control;
- u32 header_log[16];
-};
+#include <cxl/event.h>
static const char * const prot_err_agent_type_strs[] = {
"Restricted CXL Device",
@@ -40,22 +21,8 @@ static const char * const prot_err_agent_type_strs[] = {
"CXL Upstream Switch Port",
};
-/*
- * The layout of the enumeration and the values matches CXL Agent Type
- * field in the UEFI 2.10 Section N.2.13,
- */
-enum {
- RCD, /* Restricted CXL Device */
- RCH_DP, /* Restricted CXL Host Downstream Port */
- DEVICE, /* CXL Device */
- LD, /* CXL Logical Device */
- FMLD, /* CXL Fabric Manager managed Logical Device */
- RP, /* CXL Root Port */
- DSP, /* CXL Downstream Switch Port */
- USP, /* CXL Upstream Switch Port */
-};
-
-void cper_print_prot_err(const char *pfx, const struct cper_sec_prot_err *prot_err)
+void cxl_cper_print_prot_err(const char *pfx,
+ const struct cxl_cper_sec_prot_err *prot_err)
{
if (prot_err->valid_bits & PROT_ERR_VALID_AGENT_TYPE)
pr_info("%s agent_type: %d, %s\n", pfx, prot_err->agent_type,
diff --git a/drivers/firmware/efi/cper_cxl.h b/drivers/firmware/efi/cper_cxl.h
deleted file mode 100644
index 86bfcf7909ec..000000000000
--- a/drivers/firmware/efi/cper_cxl.h
+++ /dev/null
@@ -1,66 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * UEFI Common Platform Error Record (CPER) support for CXL Section.
- *
- * Copyright (C) 2022 Advanced Micro Devices, Inc.
- *
- * Author: Smita Koralahalli <Smita.KoralahalliChannabasappa@amd.com>
- */
-
-#ifndef LINUX_CPER_CXL_H
-#define LINUX_CPER_CXL_H
-
-/* CXL Protocol Error Section */
-#define CPER_SEC_CXL_PROT_ERR \
- GUID_INIT(0x80B9EFB4, 0x52B5, 0x4DE3, 0xA7, 0x77, 0x68, 0x78, \
- 0x4B, 0x77, 0x10, 0x48)
-
-#pragma pack(1)
-
-/* Compute Express Link Protocol Error Section, UEFI v2.10 sec N.2.13 */
-struct cper_sec_prot_err {
- u64 valid_bits;
- u8 agent_type;
- u8 reserved[7];
-
- /*
- * Except for RCH Downstream Port, all the remaining CXL Agent
- * types are uniquely identified by the PCIe compatible SBDF number.
- */
- union {
- u64 rcrb_base_addr;
- struct {
- u8 function;
- u8 device;
- u8 bus;
- u16 segment;
- u8 reserved_1[3];
- };
- } agent_addr;
-
- struct {
- u16 vendor_id;
- u16 device_id;
- u16 subsystem_vendor_id;
- u16 subsystem_id;
- u8 class_code[2];
- u16 slot;
- u8 reserved_1[4];
- } device_id;
-
- struct {
- u32 lower_dw;
- u32 upper_dw;
- } dev_serial_num;
-
- u8 capability[60];
- u16 dvsec_len;
- u16 err_len;
- u8 reserved_2[4];
-};
-
-#pragma pack()
-
-void cper_print_prot_err(const char *pfx, const struct cper_sec_prot_err *prot_err);
-
-#endif //__CPER_CXL_
diff --git a/drivers/firmware/efi/mokvar-table.c b/drivers/firmware/efi/mokvar-table.c
index 208db29613c6..0a856c3f69a3 100644
--- a/drivers/firmware/efi/mokvar-table.c
+++ b/drivers/firmware/efi/mokvar-table.c
@@ -263,7 +263,7 @@ struct efi_mokvar_table_entry *efi_mokvar_entry_find(const char *name)
* amount of data in this mokvar config table entry.
*/
static ssize_t efi_mokvar_sysfs_read(struct file *file, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct efi_mokvar_table_entry *mokvar_entry = bin_attr->private;
@@ -340,7 +340,7 @@ static int __init efi_mokvar_sysfs_init(void)
mokvar_sysfs->bin_attr.attr.name = mokvar_entry->name;
mokvar_sysfs->bin_attr.attr.mode = 0400;
mokvar_sysfs->bin_attr.size = mokvar_entry->data_size;
- mokvar_sysfs->bin_attr.read = efi_mokvar_sysfs_read;
+ mokvar_sysfs->bin_attr.read_new = efi_mokvar_sysfs_read;
err = sysfs_create_bin_file(mokvar_kobj,
&mokvar_sysfs->bin_attr);
diff --git a/drivers/firmware/efi/rci2-table.c b/drivers/firmware/efi/rci2-table.c
index 4fd45d6f69a4..c1bedd244817 100644
--- a/drivers/firmware/efi/rci2-table.c
+++ b/drivers/firmware/efi/rci2-table.c
@@ -40,7 +40,7 @@ static u8 *rci2_base;
static u32 rci2_table_len;
unsigned long rci2_table_phys __ro_after_init = EFI_INVALID_TABLE_ADDR;
-static BIN_ATTR_SIMPLE_ADMIN_RO(rci2);
+static __ro_after_init BIN_ATTR_SIMPLE_ADMIN_RO(rci2);
static u16 checksum(void)
{
diff --git a/drivers/firmware/qemu_fw_cfg.c b/drivers/firmware/qemu_fw_cfg.c
index d58da3e4500a..2615fb780e3c 100644
--- a/drivers/firmware/qemu_fw_cfg.c
+++ b/drivers/firmware/qemu_fw_cfg.c
@@ -460,7 +460,7 @@ static const struct kobj_type fw_cfg_sysfs_entry_ktype = {
/* raw-read method and attribute */
static ssize_t fw_cfg_sysfs_read_raw(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t pos, size_t count)
{
struct fw_cfg_sysfs_entry *entry = to_entry(kobj);
@@ -474,9 +474,9 @@ static ssize_t fw_cfg_sysfs_read_raw(struct file *filp, struct kobject *kobj,
return fw_cfg_read_blob(entry->select, buf, pos, count);
}
-static struct bin_attribute fw_cfg_sysfs_attr_raw = {
+static const struct bin_attribute fw_cfg_sysfs_attr_raw = {
.attr = { .name = "raw", .mode = S_IRUSR },
- .read = fw_cfg_sysfs_read_raw,
+ .read_new = fw_cfg_sysfs_read_raw,
};
/*
diff --git a/drivers/fpga/altera-cvp.c b/drivers/fpga/altera-cvp.c
index 6b0914432445..5af0bd33890c 100644
--- a/drivers/fpga/altera-cvp.c
+++ b/drivers/fpga/altera-cvp.c
@@ -52,7 +52,7 @@
/* V2 Defines */
#define VSE_CVP_TX_CREDITS 0x49 /* 8bit */
-#define V2_CREDIT_TIMEOUT_US 20000
+#define V2_CREDIT_TIMEOUT_US 40000
#define V2_CHECK_CREDIT_US 10
#define V2_POLL_TIMEOUT_US 1000000
#define V2_USER_TIMEOUT_US 500000
diff --git a/drivers/fpga/versal-fpga.c b/drivers/fpga/versal-fpga.c
index 3710e8f01be2..e6189106c468 100644
--- a/drivers/fpga/versal-fpga.c
+++ b/drivers/fpga/versal-fpga.c
@@ -69,7 +69,7 @@ static struct platform_driver versal_fpga_driver = {
.probe = versal_fpga_probe,
.driver = {
.name = "versal_fpga_manager",
- .of_match_table = of_match_ptr(versal_fpga_of_match),
+ .of_match_table = versal_fpga_of_match,
},
};
module_platform_driver(versal_fpga_driver);
diff --git a/drivers/fsi/fsi-core.c b/drivers/fsi/fsi-core.c
index e2e1e9df6115..50e8736039fe 100644
--- a/drivers/fsi/fsi-core.c
+++ b/drivers/fsi/fsi-core.c
@@ -554,7 +554,7 @@ static unsigned long aligned_access_size(size_t offset, size_t count)
}
static ssize_t fsi_slave_sysfs_raw_read(struct file *file,
- struct kobject *kobj, struct bin_attribute *attr, char *buf,
+ struct kobject *kobj, const struct bin_attribute *attr, char *buf,
loff_t off, size_t count)
{
struct fsi_slave *slave = to_fsi_slave(kobj_to_dev(kobj));
@@ -581,7 +581,7 @@ static ssize_t fsi_slave_sysfs_raw_read(struct file *file,
}
static ssize_t fsi_slave_sysfs_raw_write(struct file *file,
- struct kobject *kobj, struct bin_attribute *attr,
+ struct kobject *kobj, const struct bin_attribute *attr,
char *buf, loff_t off, size_t count)
{
struct fsi_slave *slave = to_fsi_slave(kobj_to_dev(kobj));
@@ -613,8 +613,8 @@ static const struct bin_attribute fsi_slave_raw_attr = {
.mode = 0600,
},
.size = 0,
- .read = fsi_slave_sysfs_raw_read,
- .write = fsi_slave_sysfs_raw_write,
+ .read_new = fsi_slave_sysfs_raw_read,
+ .write_new = fsi_slave_sysfs_raw_write,
};
static void fsi_slave_release(struct device *dev)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 6ebf6179064b..a30111d2c3ea 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -246,7 +246,7 @@ static void amdgpu_device_attr_sysfs_fini(struct amdgpu_device *adev)
}
static ssize_t amdgpu_sysfs_reg_state_get(struct file *f, struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
+ const struct bin_attribute *attr, char *buf,
loff_t ppos, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
@@ -282,8 +282,8 @@ static ssize_t amdgpu_sysfs_reg_state_get(struct file *f, struct kobject *kobj,
return bytes_read;
}
-BIN_ATTR(reg_state, 0444, amdgpu_sysfs_reg_state_get, NULL,
- AMDGPU_SYS_REG_STATE_END);
+static const BIN_ATTR(reg_state, 0444, amdgpu_sysfs_reg_state_get, NULL,
+ AMDGPU_SYS_REG_STATE_END);
int amdgpu_reg_state_sysfs_init(struct amdgpu_device *adev)
{
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index d54bb1377262..df5d5dbd7f0f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -4025,7 +4025,7 @@ int is_psp_fw_valid(struct psp_bin_desc bin)
}
static ssize_t amdgpu_psp_vbflash_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buffer, loff_t pos, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
@@ -4061,7 +4061,7 @@ static ssize_t amdgpu_psp_vbflash_write(struct file *filp, struct kobject *kobj,
}
static ssize_t amdgpu_psp_vbflash_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buffer,
+ const struct bin_attribute *bin_attr, char *buffer,
loff_t pos, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
@@ -4113,11 +4113,11 @@ rel_buf:
* Writing to this file will stage an IFWI for update. Reading from this file
* will trigger the update process.
*/
-static struct bin_attribute psp_vbflash_bin_attr = {
+static const struct bin_attribute psp_vbflash_bin_attr = {
.attr = {.name = "psp_vbflash", .mode = 0660},
.size = 0,
- .write = amdgpu_psp_vbflash_write,
- .read = amdgpu_psp_vbflash_read,
+ .write_new = amdgpu_psp_vbflash_write,
+ .read_new = amdgpu_psp_vbflash_read,
};
/**
@@ -4144,7 +4144,7 @@ static ssize_t amdgpu_psp_vbflash_status(struct device *dev,
}
static DEVICE_ATTR(psp_vbflash_status, 0440, amdgpu_psp_vbflash_status, NULL);
-static struct bin_attribute *bin_flash_attrs[] = {
+static const struct bin_attribute *const bin_flash_attrs[] = {
&psp_vbflash_bin_attr,
NULL
};
@@ -4180,7 +4180,7 @@ static umode_t amdgpu_bin_flash_attr_is_visible(struct kobject *kobj,
const struct attribute_group amdgpu_flash_attr_group = {
.attrs = flash_attrs,
- .bin_attrs = bin_flash_attrs,
+ .bin_attrs_new = bin_flash_attrs,
.is_bin_visible = amdgpu_bin_flash_attr_is_visible,
.is_visible = amdgpu_flash_attr_is_visible,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
index bed2603ae4c4..68685aca2835 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
@@ -1733,7 +1733,7 @@ static char *amdgpu_ras_badpage_flags_str(unsigned int flags)
*/
static ssize_t amdgpu_ras_sysfs_badpages_read(struct file *f,
- struct kobject *kobj, struct bin_attribute *attr,
+ struct kobject *kobj, const struct bin_attribute *attr,
char *buf, loff_t ppos, size_t count)
{
struct amdgpu_ras *con =
@@ -2068,8 +2068,8 @@ void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev)
/* debugfs end */
/* ras fs */
-static BIN_ATTR(gpu_vram_bad_pages, S_IRUGO,
- amdgpu_ras_sysfs_badpages_read, NULL, 0);
+static const BIN_ATTR(gpu_vram_bad_pages, S_IRUGO,
+ amdgpu_ras_sysfs_badpages_read, NULL, 0);
static DEVICE_ATTR(features, S_IRUGO,
amdgpu_ras_sysfs_features_read, NULL);
static DEVICE_ATTR(version, 0444,
@@ -2091,7 +2091,7 @@ static int amdgpu_ras_fs_init(struct amdgpu_device *adev)
&con->event_state_attr.attr,
NULL
};
- struct bin_attribute *bin_attrs[] = {
+ const struct bin_attribute *bin_attrs[] = {
NULL,
NULL,
};
@@ -2117,11 +2117,10 @@ static int amdgpu_ras_fs_init(struct amdgpu_device *adev)
if (amdgpu_bad_page_threshold != 0) {
/* add bad_page_features entry */
- bin_attr_gpu_vram_bad_pages.private = NULL;
con->badpages_attr = bin_attr_gpu_vram_bad_pages;
+ sysfs_bin_attr_init(&con->badpages_attr);
bin_attrs[0] = &con->badpages_attr;
- group.bin_attrs = bin_attrs;
- sysfs_bin_attr_init(bin_attrs[0]);
+ group.bin_attrs_new = bin_attrs;
}
r = sysfs_create_group(&adev->dev->kobj, &group);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
index a3e93b2891f0..5198a079b463 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
@@ -597,7 +597,7 @@ static void update_config(void *handle, struct cp_psp_stream_config *config)
* incorrect/corrupted and we should correct our SRM by getting it from PSP
*/
static ssize_t srm_data_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buffer,
+ const struct bin_attribute *bin_attr, char *buffer,
loff_t pos, size_t count)
{
struct hdcp_workqueue *work;
@@ -621,7 +621,7 @@ static ssize_t srm_data_write(struct file *filp, struct kobject *kobj,
}
static ssize_t srm_data_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buffer,
+ const struct bin_attribute *bin_attr, char *buffer,
loff_t pos, size_t count)
{
struct hdcp_workqueue *work;
@@ -681,8 +681,8 @@ ret:
static const struct bin_attribute data_attr = {
.attr = {.name = "hdcp_srm", .mode = 0664},
.size = PSP_HDCP_SRM_FIRST_GEN_MAX_SIZE, /* Limit SRM size */
- .write = srm_data_write,
- .read = srm_data_read,
+ .write_new = srm_data_write,
+ .read_new = srm_data_read,
};
struct hdcp_workqueue *hdcp_create_workqueue(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/drm_gpusvm.c b/drivers/gpu/drm/drm_gpusvm.c
index 2451c816edd5..38431e8360e7 100644
--- a/drivers/gpu/drm/drm_gpusvm.c
+++ b/drivers/gpu/drm/drm_gpusvm.c
@@ -1416,13 +1416,13 @@ map_pages:
goto err_unmap;
}
zdd = page->zone_device_data;
- if (pagemap != page->pgmap) {
+ if (pagemap != page_pgmap(page)) {
if (i > 0) {
err = -EOPNOTSUPP;
goto err_unmap;
}
- pagemap = page->pgmap;
+ pagemap = page_pgmap(page);
dpagemap = zdd->devmem_allocation->dpagemap;
if (drm_WARN_ON(gpusvm->drm, !dpagemap)) {
/*
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index fb3bbb6adcd1..60c1f26edb6f 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -261,7 +261,7 @@ static ssize_t enabled_show(struct device *device,
}
static ssize_t edid_show(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf, loff_t off,
+ const struct bin_attribute *attr, char *buf, loff_t off,
size_t count)
{
struct device *connector_dev = kobj_to_dev(kobj);
@@ -315,21 +315,21 @@ static struct attribute *connector_dev_attrs[] = {
NULL
};
-static struct bin_attribute edid_attr = {
+static const struct bin_attribute edid_attr = {
.attr.name = "edid",
.attr.mode = 0444,
.size = 0,
- .read = edid_show,
+ .read_new = edid_show,
};
-static struct bin_attribute *connector_bin_attrs[] = {
+static const struct bin_attribute *const connector_bin_attrs[] = {
&edid_attr,
NULL
};
static const struct attribute_group connector_dev_group = {
.attrs = connector_dev_attrs,
- .bin_attrs = connector_bin_attrs,
+ .bin_attrs_new = connector_bin_attrs,
};
static const struct attribute_group *connector_dev_groups[] = {
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 819ab933bb10..a6613eed3398 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -2490,7 +2490,7 @@ void i915_gpu_error_debugfs_register(struct drm_i915_private *i915)
}
static ssize_t error_state_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
+ const struct bin_attribute *attr, char *buf,
loff_t off, size_t count)
{
@@ -2526,7 +2526,7 @@ static ssize_t error_state_read(struct file *filp, struct kobject *kobj,
}
static ssize_t error_state_write(struct file *file, struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
+ const struct bin_attribute *attr, char *buf,
loff_t off, size_t count)
{
struct device *kdev = kobj_to_dev(kobj);
@@ -2542,8 +2542,8 @@ static const struct bin_attribute error_state_attr = {
.attr.name = "error",
.attr.mode = S_IRUSR | S_IWUSR,
.size = 0,
- .read = error_state_read,
- .write = error_state_write,
+ .read_new = error_state_read,
+ .write_new = error_state_write,
};
void i915_gpu_error_sysfs_setup(struct drm_i915_private *i915)
diff --git a/drivers/gpu/drm/i915/i915_iosf_mbi.h b/drivers/gpu/drm/i915/i915_iosf_mbi.h
index 8f81b7603d37..317075d0da4e 100644
--- a/drivers/gpu/drm/i915/i915_iosf_mbi.h
+++ b/drivers/gpu/drm/i915/i915_iosf_mbi.h
@@ -31,12 +31,6 @@ iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(struct notifier_block *nb)
{
return 0;
}
-
-static inline
-int iosf_mbi_unregister_pmic_bus_access_notifier(struct notifier_block *nb)
-{
- return 0;
-}
#endif
#endif /* __I915_IOSF_MBI_H__ */
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index 8775beab9cb8..f936e8f1f129 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -60,7 +60,7 @@ static int l3_access_valid(struct drm_i915_private *i915, loff_t offset)
static ssize_t
i915_l3_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
+ const struct bin_attribute *attr, char *buf,
loff_t offset, size_t count)
{
struct device *kdev = kobj_to_dev(kobj);
@@ -88,7 +88,7 @@ i915_l3_read(struct file *filp, struct kobject *kobj,
static ssize_t
i915_l3_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
+ const struct bin_attribute *attr, char *buf,
loff_t offset, size_t count)
{
struct device *kdev = kobj_to_dev(kobj);
@@ -140,8 +140,8 @@ i915_l3_write(struct file *filp, struct kobject *kobj,
static const struct bin_attribute dpf_attrs = {
.attr = {.name = "l3_parity", .mode = (S_IRUSR | S_IWUSR)},
.size = GEN7_L3LOG_SIZE,
- .read = i915_l3_read,
- .write = i915_l3_write,
+ .read_new = i915_l3_read,
+ .write_new = i915_l3_write,
.mmap = NULL,
.private = (void *)0
};
@@ -149,8 +149,8 @@ static const struct bin_attribute dpf_attrs = {
static const struct bin_attribute dpf_attrs_1 = {
.attr = {.name = "l3_parity_slice_1", .mode = (S_IRUSR | S_IWUSR)},
.size = GEN7_L3LOG_SIZE,
- .read = i915_l3_read,
- .write = i915_l3_write,
+ .read_new = i915_l3_read,
+ .write_new = i915_l3_write,
.mmap = NULL,
.private = (void *)1
};
diff --git a/drivers/gpu/drm/lima/lima_drv.c b/drivers/gpu/drm/lima/lima_drv.c
index 2067c5b65c57..11ace5cebf4c 100644
--- a/drivers/gpu/drm/lima/lima_drv.c
+++ b/drivers/gpu/drm/lima/lima_drv.c
@@ -310,7 +310,7 @@ static bool lima_read_block(struct lima_block_reader *reader,
}
static ssize_t lima_error_state_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
+ const struct bin_attribute *attr, char *buf,
loff_t off, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
@@ -336,7 +336,7 @@ static ssize_t lima_error_state_read(struct file *filp, struct kobject *kobj,
}
static ssize_t lima_error_state_write(struct file *file, struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
+ const struct bin_attribute *attr, char *buf,
loff_t off, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
@@ -362,8 +362,8 @@ static const struct bin_attribute lima_error_state_attr = {
.attr.name = "error",
.attr.mode = 0600,
.size = 0,
- .read = lima_error_state_read,
- .write = lima_error_state_write,
+ .read_new = lima_error_state_read,
+ .write_new = lima_error_state_write,
};
static int lima_pdev_probe(struct platform_device *pdev)
diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c
index 1a072568cef6..61d0f411ef84 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dmem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c
@@ -88,7 +88,8 @@ struct nouveau_dmem {
static struct nouveau_dmem_chunk *nouveau_page_to_chunk(struct page *page)
{
- return container_of(page->pgmap, struct nouveau_dmem_chunk, pagemap);
+ return container_of(page_pgmap(page), struct nouveau_dmem_chunk,
+ pagemap);
}
static struct nouveau_drm *page_to_drm(struct page *page)
diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.c b/drivers/gpu/drm/nouveau/nouveau_svm.c
index 825c867eba7c..e12e2596ed84 100644
--- a/drivers/gpu/drm/nouveau/nouveau_svm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_svm.c
@@ -610,10 +610,9 @@ static int nouveau_atomic_range_fault(struct nouveau_svmm *svmm,
notifier_seq = mmu_interval_read_begin(&notifier->notifier);
mmap_read_lock(mm);
- ret = make_device_exclusive_range(mm, start, start + PAGE_SIZE,
- &page, drm->dev);
+ page = make_device_exclusive(mm, start, drm->dev, &folio);
mmap_read_unlock(mm);
- if (ret <= 0 || !page) {
+ if (IS_ERR(page)) {
ret = -EINVAL;
goto out;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
index 1f15990d3934..1d9a42cbc88f 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
@@ -289,7 +289,7 @@ static int vmw_send_msg(struct rpc_channel *channel, const char *msg)
return -EINVAL;
}
-STACK_FRAME_NON_STANDARD(vmw_send_msg);
+STACK_FRAME_NON_STANDARD_FP(vmw_send_msg);
/**
diff --git a/drivers/gpu/drm/xe/xe_svm.c b/drivers/gpu/drm/xe/xe_svm.c
index 516898e99b26..3e829c87d7b4 100644
--- a/drivers/gpu/drm/xe/xe_svm.c
+++ b/drivers/gpu/drm/xe/xe_svm.c
@@ -341,7 +341,7 @@ static void xe_svm_garbage_collector_work_func(struct work_struct *w)
static struct xe_vram_region *page_to_vr(struct page *page)
{
- return container_of(page->pgmap, struct xe_vram_region, pagemap);
+ return container_of(page_pgmap(page), struct xe_vram_region, pagemap);
}
static struct xe_tile *vr_to_tile(struct xe_vram_region *vr)
diff --git a/drivers/gpu/nova-core/driver.rs b/drivers/gpu/nova-core/driver.rs
index 63c19f140fbd..a08fb6599267 100644
--- a/drivers/gpu/nova-core/driver.rs
+++ b/drivers/gpu/nova-core/driver.rs
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
-use kernel::{bindings, c_str, pci, prelude::*};
+use kernel::{bindings, c_str, device::Core, pci, prelude::*};
use crate::gpu::Gpu;
@@ -27,7 +27,7 @@ impl pci::Driver for NovaCore {
type IdInfo = ();
const ID_TABLE: pci::IdTable<Self::IdInfo> = &PCI_TABLE;
- fn probe(pdev: &mut pci::Device, _info: &Self::IdInfo) -> Result<Pin<KBox<Self>>> {
+ fn probe(pdev: &pci::Device<Core>, _info: &Self::IdInfo) -> Result<Pin<KBox<Self>>> {
dev_dbg!(pdev.as_ref(), "Probe Nova Core GPU driver.\n");
pdev.enable_device_mem()?;
diff --git a/drivers/gpu/nova-core/regs.rs b/drivers/gpu/nova-core/regs.rs
index 50aefb150b0b..b1a25b86ef17 100644
--- a/drivers/gpu/nova-core/regs.rs
+++ b/drivers/gpu/nova-core/regs.rs
@@ -35,7 +35,7 @@ pub(crate) struct Boot0(u32);
impl Boot0 {
#[inline]
pub(crate) fn read(bar: &Bar0) -> Self {
- Self(bar.readl(BOOT0_OFFSET))
+ Self(bar.read32(BOOT0_OFFSET))
}
#[inline]
diff --git a/drivers/greybus/gb-beagleplay.c b/drivers/greybus/gb-beagleplay.c
index 473ac3f2d382..da31f1131afc 100644
--- a/drivers/greybus/gb-beagleplay.c
+++ b/drivers/greybus/gb-beagleplay.c
@@ -912,7 +912,9 @@ static enum fw_upload_err cc1352_prepare(struct fw_upload *fw_upload,
cc1352_bootloader_reset(bg);
WRITE_ONCE(bg->flashing_mode, false);
msleep(200);
- gb_greybus_init(bg);
+ if (gb_greybus_init(bg) < 0)
+ return dev_err_probe(&bg->sd->dev, FW_UPLOAD_ERR_RW_ERROR,
+ "Failed to initialize greybus");
gb_beagleplay_start_svc(bg);
return FW_UPLOAD_ERR_FW_INVALID;
}
diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
index fec2f18679e3..2b4080e51f97 100644
--- a/drivers/hv/hv_balloon.c
+++ b/drivers/hv/hv_balloon.c
@@ -1192,6 +1192,7 @@ static void free_balloon_pages(struct hv_dynmem_device *dm,
__ClearPageOffline(pg);
__free_page(pg);
dm->num_pages_ballooned--;
+ mod_node_page_state(page_pgdat(pg), NR_BALLOON_PAGES, -1);
adjust_managed_page_count(pg, 1);
}
}
@@ -1221,6 +1222,7 @@ static unsigned int alloc_balloon_pages(struct hv_dynmem_device *dm,
return i * alloc_unit;
dm->num_pages_ballooned += alloc_unit;
+ mod_node_page_state(page_pgdat(pg), NR_BALLOON_PAGES, alloc_unit);
/*
* If we allocatted 2M pages; split them so we
diff --git a/drivers/hwtracing/coresight/Kconfig b/drivers/hwtracing/coresight/Kconfig
index 06f0a7594169..ecd7086a5b83 100644
--- a/drivers/hwtracing/coresight/Kconfig
+++ b/drivers/hwtracing/coresight/Kconfig
@@ -133,6 +133,18 @@ config CORESIGHT_STM
To compile this driver as a module, choose M here: the
module will be called coresight-stm.
+config CORESIGHT_CTCU
+ tristate "CoreSight TMC Control Unit driver"
+ depends on CORESIGHT_LINK_AND_SINK_TMC
+ help
+ This driver provides support for CoreSight TMC Control Unit
+ that hosts miscellaneous configuration registers. This is
+ primarily used for controlling the behaviors of the TMC
+ ETR device.
+
+ To compile this driver as a module, choose M here: the
+ module will be called coresight-ctcu.
+
config CORESIGHT_CPU_DEBUG
tristate "CoreSight CPU Debug driver"
depends on ARM || ARM64
diff --git a/drivers/hwtracing/coresight/Makefile b/drivers/hwtracing/coresight/Makefile
index 4ba478211b31..8e62c3150aeb 100644
--- a/drivers/hwtracing/coresight/Makefile
+++ b/drivers/hwtracing/coresight/Makefile
@@ -25,7 +25,7 @@ subdir-ccflags-y += $(condflags)
obj-$(CONFIG_CORESIGHT) += coresight.o
coresight-y := coresight-core.o coresight-etm-perf.o coresight-platform.o \
coresight-sysfs.o coresight-syscfg.o coresight-config.o \
- coresight-cfg-preload.o coresight-cfg-afdo.o \
+ coresight-cfg-preload.o coresight-cfg-afdo.o coresight-cfg-pstop.o \
coresight-syscfg-configfs.o coresight-trace-id.o
obj-$(CONFIG_CORESIGHT_LINK_AND_SINK_TMC) += coresight-tmc.o
coresight-tmc-y := coresight-tmc-core.o coresight-tmc-etf.o \
@@ -51,3 +51,5 @@ coresight-cti-y := coresight-cti-core.o coresight-cti-platform.o \
coresight-cti-sysfs.o
obj-$(CONFIG_ULTRASOC_SMB) += ultrasoc-smb.o
obj-$(CONFIG_CORESIGHT_DUMMY) += coresight-dummy.o
+obj-$(CONFIG_CORESIGHT_CTCU) += coresight-ctcu.o
+coresight-ctcu-y := coresight-ctcu-core.o
diff --git a/drivers/hwtracing/coresight/coresight-catu.c b/drivers/hwtracing/coresight/coresight-catu.c
index 275cc0d9f505..fa170c966bc3 100644
--- a/drivers/hwtracing/coresight/coresight-catu.c
+++ b/drivers/hwtracing/coresight/coresight-catu.c
@@ -269,7 +269,7 @@ catu_init_sg_table(struct device *catu_dev, int node,
* Each table can address upto 1MB and we can have
* CATU_PAGES_PER_SYSPAGE tables in a system page.
*/
- nr_tpages = DIV_ROUND_UP(size, SZ_1M) / CATU_PAGES_PER_SYSPAGE;
+ nr_tpages = DIV_ROUND_UP(size, CATU_PAGES_PER_SYSPAGE * SZ_1M);
catu_table = tmc_alloc_sg_table(catu_dev, node, nr_tpages,
size >> PAGE_SHIFT, pages);
if (IS_ERR(catu_table))
@@ -594,7 +594,7 @@ static void catu_remove(struct amba_device *adev)
__catu_remove(&adev->dev);
}
-static struct amba_id catu_ids[] = {
+static const struct amba_id catu_ids[] = {
CS_AMBA_ID(0x000bb9ee),
{},
};
diff --git a/drivers/hwtracing/coresight/coresight-cfg-preload.c b/drivers/hwtracing/coresight/coresight-cfg-preload.c
index e237a4edfa09..4980e68483c5 100644
--- a/drivers/hwtracing/coresight/coresight-cfg-preload.c
+++ b/drivers/hwtracing/coresight/coresight-cfg-preload.c
@@ -13,6 +13,7 @@
static struct cscfg_feature_desc *preload_feats[] = {
#if IS_ENABLED(CONFIG_CORESIGHT_SOURCE_ETM4X)
&strobe_etm4x,
+ &gen_etrig_etm4x,
#endif
NULL
};
@@ -20,6 +21,7 @@ static struct cscfg_feature_desc *preload_feats[] = {
static struct cscfg_config_desc *preload_cfgs[] = {
#if IS_ENABLED(CONFIG_CORESIGHT_SOURCE_ETM4X)
&afdo_etm4x,
+ &pstop_etm4x,
#endif
NULL
};
diff --git a/drivers/hwtracing/coresight/coresight-cfg-preload.h b/drivers/hwtracing/coresight/coresight-cfg-preload.h
index 21299e175477..291ba530a6a5 100644
--- a/drivers/hwtracing/coresight/coresight-cfg-preload.h
+++ b/drivers/hwtracing/coresight/coresight-cfg-preload.h
@@ -10,4 +10,6 @@
#if IS_ENABLED(CONFIG_CORESIGHT_SOURCE_ETM4X)
extern struct cscfg_feature_desc strobe_etm4x;
extern struct cscfg_config_desc afdo_etm4x;
+extern struct cscfg_feature_desc gen_etrig_etm4x;
+extern struct cscfg_config_desc pstop_etm4x;
#endif
diff --git a/drivers/hwtracing/coresight/coresight-cfg-pstop.c b/drivers/hwtracing/coresight/coresight-cfg-pstop.c
new file mode 100644
index 000000000000..c2bfbd07bfaf
--- /dev/null
+++ b/drivers/hwtracing/coresight/coresight-cfg-pstop.c
@@ -0,0 +1,83 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright(C) 2023 Marvell.
+ * Based on coresight-cfg-afdo.c
+ */
+
+#include "coresight-config.h"
+
+/* ETMv4 includes and features */
+#if IS_ENABLED(CONFIG_CORESIGHT_SOURCE_ETM4X)
+#include "coresight-etm4x-cfg.h"
+
+/* preload configurations and features */
+
+/* preload in features for ETMv4 */
+
+/* panic_stop feature */
+static struct cscfg_parameter_desc gen_etrig_params[] = {
+ {
+ .name = "address",
+ .value = (u64)panic,
+ },
+};
+
+static struct cscfg_regval_desc gen_etrig_regs[] = {
+ /* resource selector */
+ {
+ .type = CS_CFG_REG_TYPE_RESOURCE,
+ .offset = TRCRSCTLRn(2),
+ .hw_info = ETM4_CFG_RES_SEL,
+ .val32 = 0x40001,
+ },
+ /* single address comparator */
+ {
+ .type = CS_CFG_REG_TYPE_RESOURCE | CS_CFG_REG_TYPE_VAL_64BIT |
+ CS_CFG_REG_TYPE_VAL_PARAM,
+ .offset = TRCACVRn(0),
+ .val32 = 0x0,
+ },
+ {
+ .type = CS_CFG_REG_TYPE_RESOURCE,
+ .offset = TRCACATRn(0),
+ .val64 = 0xf00,
+ },
+ /* Driver external output[0] with comparator out */
+ {
+ .type = CS_CFG_REG_TYPE_RESOURCE,
+ .offset = TRCEVENTCTL0R,
+ .val32 = 0x2,
+ },
+ /* end of regs */
+};
+
+struct cscfg_feature_desc gen_etrig_etm4x = {
+ .name = "gen_etrig",
+ .description = "Generate external trigger on address match\n"
+ "parameter \'address\': address of kernel address\n",
+ .match_flags = CS_CFG_MATCH_CLASS_SRC_ETM4,
+ .nr_params = ARRAY_SIZE(gen_etrig_params),
+ .params_desc = gen_etrig_params,
+ .nr_regs = ARRAY_SIZE(gen_etrig_regs),
+ .regs_desc = gen_etrig_regs,
+};
+
+/* create a panic stop configuration */
+
+/* the total number of parameters in used features */
+#define PSTOP_NR_PARAMS ARRAY_SIZE(gen_etrig_params)
+
+static const char *pstop_ref_names[] = {
+ "gen_etrig",
+};
+
+struct cscfg_config_desc pstop_etm4x = {
+ .name = "panicstop",
+ .description = "Stop ETM on kernel panic\n",
+ .nr_feat_refs = ARRAY_SIZE(pstop_ref_names),
+ .feat_ref_names = pstop_ref_names,
+ .nr_total_params = PSTOP_NR_PARAMS,
+};
+
+/* end of ETM4x configurations */
+#endif /* IS_ENABLED(CONFIG_CORESIGHT_SOURCE_ETM4X) */
diff --git a/drivers/hwtracing/coresight/coresight-config.c b/drivers/hwtracing/coresight/coresight-config.c
index 4723bf7402a2..4f72ae71b696 100644
--- a/drivers/hwtracing/coresight/coresight-config.c
+++ b/drivers/hwtracing/coresight/coresight-config.c
@@ -76,10 +76,10 @@ static int cscfg_set_on_enable(struct cscfg_feature_csdev *feat_csdev)
unsigned long flags;
int i;
- spin_lock_irqsave(feat_csdev->drv_spinlock, flags);
+ raw_spin_lock_irqsave(feat_csdev->drv_spinlock, flags);
for (i = 0; i < feat_csdev->nr_regs; i++)
cscfg_set_reg(&feat_csdev->regs_csdev[i]);
- spin_unlock_irqrestore(feat_csdev->drv_spinlock, flags);
+ raw_spin_unlock_irqrestore(feat_csdev->drv_spinlock, flags);
dev_dbg(&feat_csdev->csdev->dev, "Feature %s: %s",
feat_csdev->feat_desc->name, "set on enable");
return 0;
@@ -91,10 +91,10 @@ static void cscfg_save_on_disable(struct cscfg_feature_csdev *feat_csdev)
unsigned long flags;
int i;
- spin_lock_irqsave(feat_csdev->drv_spinlock, flags);
+ raw_spin_lock_irqsave(feat_csdev->drv_spinlock, flags);
for (i = 0; i < feat_csdev->nr_regs; i++)
cscfg_save_reg(&feat_csdev->regs_csdev[i]);
- spin_unlock_irqrestore(feat_csdev->drv_spinlock, flags);
+ raw_spin_unlock_irqrestore(feat_csdev->drv_spinlock, flags);
dev_dbg(&feat_csdev->csdev->dev, "Feature %s: %s",
feat_csdev->feat_desc->name, "save on disable");
}
diff --git a/drivers/hwtracing/coresight/coresight-config.h b/drivers/hwtracing/coresight/coresight-config.h
index 6ba013975741..b9ebc9fcfb7f 100644
--- a/drivers/hwtracing/coresight/coresight-config.h
+++ b/drivers/hwtracing/coresight/coresight-config.h
@@ -206,7 +206,7 @@ struct cscfg_feature_csdev {
const struct cscfg_feature_desc *feat_desc;
struct coresight_device *csdev;
struct list_head node;
- spinlock_t *drv_spinlock;
+ raw_spinlock_t *drv_spinlock;
int nr_params;
struct cscfg_parameter_csdev *params_csdev;
int nr_regs;
diff --git a/drivers/hwtracing/coresight/coresight-core.c b/drivers/hwtracing/coresight/coresight-core.c
index 0a9380350fb5..fb43ef6a3b1f 100644
--- a/drivers/hwtracing/coresight/coresight-core.c
+++ b/drivers/hwtracing/coresight/coresight-core.c
@@ -19,10 +19,12 @@
#include <linux/property.h>
#include <linux/delay.h>
#include <linux/pm_runtime.h>
+#include <linux/panic_notifier.h>
#include "coresight-etm-perf.h"
#include "coresight-priv.h"
#include "coresight-syscfg.h"
+#include "coresight-trace-id.h"
/*
* Mutex used to lock all sysfs enable and disable actions and loading and
@@ -75,14 +77,14 @@ struct coresight_device *coresight_get_percpu_sink(int cpu)
}
EXPORT_SYMBOL_GPL(coresight_get_percpu_sink);
-static struct coresight_device *coresight_get_source(struct list_head *path)
+static struct coresight_device *coresight_get_source(struct coresight_path *path)
{
struct coresight_device *csdev;
if (!path)
return NULL;
- csdev = list_first_entry(path, struct coresight_node, link)->csdev;
+ csdev = list_first_entry(&path->path_list, struct coresight_node, link)->csdev;
if (!coresight_is_device_source(csdev))
return NULL;
@@ -331,12 +333,12 @@ static int coresight_enable_helper(struct coresight_device *csdev,
return helper_ops(csdev)->enable(csdev, mode, data);
}
-static void coresight_disable_helper(struct coresight_device *csdev)
+static void coresight_disable_helper(struct coresight_device *csdev, void *data)
{
- helper_ops(csdev)->disable(csdev, NULL);
+ helper_ops(csdev)->disable(csdev, data);
}
-static void coresight_disable_helpers(struct coresight_device *csdev)
+static void coresight_disable_helpers(struct coresight_device *csdev, void *data)
{
int i;
struct coresight_device *helper;
@@ -344,7 +346,7 @@ static void coresight_disable_helpers(struct coresight_device *csdev)
for (i = 0; i < csdev->pdata->nr_outconns; ++i) {
helper = csdev->pdata->out_conns[i]->dest_dev;
if (helper && coresight_is_helper(helper))
- coresight_disable_helper(helper);
+ coresight_disable_helper(helper, data);
}
}
@@ -361,7 +363,7 @@ static void coresight_disable_helpers(struct coresight_device *csdev)
void coresight_disable_source(struct coresight_device *csdev, void *data)
{
source_ops(csdev)->disable(csdev, data);
- coresight_disable_helpers(csdev);
+ coresight_disable_helpers(csdev, NULL);
}
EXPORT_SYMBOL_GPL(coresight_disable_source);
@@ -370,16 +372,16 @@ EXPORT_SYMBOL_GPL(coresight_disable_source);
* @nd in the list. If @nd is NULL, all the components, except the SOURCE are
* disabled.
*/
-static void coresight_disable_path_from(struct list_head *path,
+static void coresight_disable_path_from(struct coresight_path *path,
struct coresight_node *nd)
{
u32 type;
struct coresight_device *csdev, *parent, *child;
if (!nd)
- nd = list_first_entry(path, struct coresight_node, link);
+ nd = list_first_entry(&path->path_list, struct coresight_node, link);
- list_for_each_entry_continue(nd, path, link) {
+ list_for_each_entry_continue(nd, &path->path_list, link) {
csdev = nd->csdev;
type = csdev->type;
@@ -417,11 +419,11 @@ static void coresight_disable_path_from(struct list_head *path,
}
/* Disable all helpers adjacent along the path last */
- coresight_disable_helpers(csdev);
+ coresight_disable_helpers(csdev, path);
}
}
-void coresight_disable_path(struct list_head *path)
+void coresight_disable_path(struct coresight_path *path)
{
coresight_disable_path_from(path, NULL);
}
@@ -446,7 +448,7 @@ static int coresight_enable_helpers(struct coresight_device *csdev,
return 0;
}
-int coresight_enable_path(struct list_head *path, enum cs_mode mode,
+int coresight_enable_path(struct coresight_path *path, enum cs_mode mode,
void *sink_data)
{
int ret = 0;
@@ -456,12 +458,12 @@ int coresight_enable_path(struct list_head *path, enum cs_mode mode,
struct coresight_device *source;
source = coresight_get_source(path);
- list_for_each_entry_reverse(nd, path, link) {
+ list_for_each_entry_reverse(nd, &path->path_list, link) {
csdev = nd->csdev;
type = csdev->type;
/* Enable all helpers adjacent to the path first */
- ret = coresight_enable_helpers(csdev, mode, sink_data);
+ ret = coresight_enable_helpers(csdev, mode, path);
if (ret)
goto err;
/*
@@ -509,20 +511,21 @@ err:
goto out;
}
-struct coresight_device *coresight_get_sink(struct list_head *path)
+struct coresight_device *coresight_get_sink(struct coresight_path *path)
{
struct coresight_device *csdev;
if (!path)
return NULL;
- csdev = list_last_entry(path, struct coresight_node, link)->csdev;
+ csdev = list_last_entry(&path->path_list, struct coresight_node, link)->csdev;
if (csdev->type != CORESIGHT_DEV_TYPE_SINK &&
csdev->type != CORESIGHT_DEV_TYPE_LINKSINK)
return NULL;
return csdev;
}
+EXPORT_SYMBOL_GPL(coresight_get_sink);
u32 coresight_get_sink_id(struct coresight_device *csdev)
{
@@ -653,6 +656,50 @@ static void coresight_drop_device(struct coresight_device *csdev)
}
}
+/*
+ * coresight device will read their existing or alloc a trace ID, if their trace_id
+ * callback is set.
+ *
+ * Return 0 if the trace_id callback is not set.
+ * Return the result of the trace_id callback if it is set. The return value
+ * will be the trace_id if successful, and an error number if it fails.
+ */
+static int coresight_get_trace_id(struct coresight_device *csdev,
+ enum cs_mode mode,
+ struct coresight_device *sink)
+{
+ if (coresight_ops(csdev)->trace_id)
+ return coresight_ops(csdev)->trace_id(csdev, mode, sink);
+
+ return 0;
+}
+
+/*
+ * Call this after creating the path and before enabling it. This leaves
+ * the trace ID set on the path, or it remains 0 if it couldn't be assigned.
+ */
+void coresight_path_assign_trace_id(struct coresight_path *path,
+ enum cs_mode mode)
+{
+ struct coresight_device *sink = coresight_get_sink(path);
+ struct coresight_node *nd;
+ int trace_id;
+
+ list_for_each_entry(nd, &path->path_list, link) {
+ /* Assign a trace ID to the path for the first device that wants to do it */
+ trace_id = coresight_get_trace_id(nd->csdev, mode, sink);
+
+ /*
+ * 0 in this context is that it didn't want to assign so keep searching.
+ * Non 0 is either success or fail.
+ */
+ if (trace_id != 0) {
+ path->trace_id = trace_id;
+ return;
+ }
+ }
+}
+
/**
* _coresight_build_path - recursively build a path from a @csdev to a sink.
* @csdev: The device to start from.
@@ -668,7 +715,7 @@ static void coresight_drop_device(struct coresight_device *csdev)
static int _coresight_build_path(struct coresight_device *csdev,
struct coresight_device *source,
struct coresight_device *sink,
- struct list_head *path)
+ struct coresight_path *path)
{
int i, ret;
bool found = false;
@@ -721,25 +768,25 @@ out:
return -ENOMEM;
node->csdev = csdev;
- list_add(&node->link, path);
+ list_add(&node->link, &path->path_list);
return 0;
}
-struct list_head *coresight_build_path(struct coresight_device *source,
+struct coresight_path *coresight_build_path(struct coresight_device *source,
struct coresight_device *sink)
{
- struct list_head *path;
+ struct coresight_path *path;
int rc;
if (!sink)
return ERR_PTR(-EINVAL);
- path = kzalloc(sizeof(struct list_head), GFP_KERNEL);
+ path = kzalloc(sizeof(struct coresight_path), GFP_KERNEL);
if (!path)
return ERR_PTR(-ENOMEM);
- INIT_LIST_HEAD(path);
+ INIT_LIST_HEAD(&path->path_list);
rc = _coresight_build_path(source, source, sink, path);
if (rc) {
@@ -757,12 +804,12 @@ struct list_head *coresight_build_path(struct coresight_device *source,
* Go through all the elements of a path and 1) removed it from the list and
* 2) free the memory allocated for each node.
*/
-void coresight_release_path(struct list_head *path)
+void coresight_release_path(struct coresight_path *path)
{
struct coresight_device *csdev;
struct coresight_node *nd, *next;
- list_for_each_entry_safe(nd, next, path, link) {
+ list_for_each_entry_safe(nd, next, &path->path_list, link) {
csdev = nd->csdev;
coresight_drop_device(csdev);
@@ -1092,18 +1139,20 @@ static void coresight_remove_conns(struct coresight_device *csdev)
}
/**
- * coresight_timeout - loop until a bit has changed to a specific register
- * state.
+ * coresight_timeout_action - loop until a bit has changed to a specific register
+ * state, with a callback after every trial.
* @csa: coresight device access for the device
* @offset: Offset of the register from the base of the device.
* @position: the position of the bit of interest.
* @value: the value the bit should have.
+ * @cb: Call back after each trial.
*
* Return: 0 as soon as the bit has taken the desired state or -EAGAIN if
* TIMEOUT_US has elapsed, which ever happens first.
*/
-int coresight_timeout(struct csdev_access *csa, u32 offset,
- int position, int value)
+int coresight_timeout_action(struct csdev_access *csa, u32 offset,
+ int position, int value,
+ coresight_timeout_cb_t cb)
{
int i;
u32 val;
@@ -1119,7 +1168,8 @@ int coresight_timeout(struct csdev_access *csa, u32 offset,
if (!(val & BIT(position)))
return 0;
}
-
+ if (cb)
+ cb(csa, offset, position, value);
/*
* Delay is arbitrary - the specification doesn't say how long
* we are expected to wait. Extra check required to make sure
@@ -1131,6 +1181,13 @@ int coresight_timeout(struct csdev_access *csa, u32 offset,
return -EAGAIN;
}
+EXPORT_SYMBOL_GPL(coresight_timeout_action);
+
+int coresight_timeout(struct csdev_access *csa, u32 offset,
+ int position, int value)
+{
+ return coresight_timeout_action(csa, offset, position, value, NULL);
+}
EXPORT_SYMBOL_GPL(coresight_timeout);
u32 coresight_relaxed_read32(struct coresight_device *csdev, u32 offset)
@@ -1239,7 +1296,7 @@ struct coresight_device *coresight_register(struct coresight_desc *desc)
if (csdev->type == CORESIGHT_DEV_TYPE_SINK ||
csdev->type == CORESIGHT_DEV_TYPE_LINKSINK) {
- spin_lock_init(&csdev->perf_sink_id_map.lock);
+ raw_spin_lock_init(&csdev->perf_sink_id_map.lock);
csdev->perf_sink_id_map.cpu_map = alloc_percpu(atomic_t);
if (!csdev->perf_sink_id_map.cpu_map) {
kfree(csdev);
@@ -1453,6 +1510,36 @@ const struct bus_type coresight_bustype = {
.name = "coresight",
};
+static int coresight_panic_sync(struct device *dev, void *data)
+{
+ int mode;
+ struct coresight_device *csdev;
+
+ /* Run through panic sync handlers for all enabled devices */
+ csdev = container_of(dev, struct coresight_device, dev);
+ mode = coresight_get_mode(csdev);
+
+ if ((mode == CS_MODE_SYSFS) || (mode == CS_MODE_PERF)) {
+ if (panic_ops(csdev))
+ panic_ops(csdev)->sync(csdev);
+ }
+
+ return 0;
+}
+
+static int coresight_panic_cb(struct notifier_block *self,
+ unsigned long v, void *p)
+{
+ bus_for_each_dev(&coresight_bustype, NULL, NULL,
+ coresight_panic_sync);
+
+ return 0;
+}
+
+static struct notifier_block coresight_notifier = {
+ .notifier_call = coresight_panic_cb,
+};
+
static int __init coresight_init(void)
{
int ret;
@@ -1465,11 +1552,20 @@ static int __init coresight_init(void)
if (ret)
goto exit_bus_unregister;
+ /* Register function to be called for panic */
+ ret = atomic_notifier_chain_register(&panic_notifier_list,
+ &coresight_notifier);
+ if (ret)
+ goto exit_perf;
+
/* initialise the coresight syscfg API */
ret = cscfg_init();
if (!ret)
return 0;
+ atomic_notifier_chain_unregister(&panic_notifier_list,
+ &coresight_notifier);
+exit_perf:
etm_perf_exit();
exit_bus_unregister:
bus_unregister(&coresight_bustype);
@@ -1479,6 +1575,8 @@ exit_bus_unregister:
static void __exit coresight_exit(void)
{
cscfg_exit();
+ atomic_notifier_chain_unregister(&panic_notifier_list,
+ &coresight_notifier);
etm_perf_exit();
bus_unregister(&coresight_bustype);
}
@@ -1515,6 +1613,38 @@ void coresight_remove_driver(struct amba_driver *amba_drv,
}
EXPORT_SYMBOL_GPL(coresight_remove_driver);
+int coresight_etm_get_trace_id(struct coresight_device *csdev, enum cs_mode mode,
+ struct coresight_device *sink)
+{
+ int cpu, trace_id;
+
+ if (csdev->type != CORESIGHT_DEV_TYPE_SOURCE || !source_ops(csdev)->cpu_id)
+ return -EINVAL;
+
+ cpu = source_ops(csdev)->cpu_id(csdev);
+ switch (mode) {
+ case CS_MODE_SYSFS:
+ trace_id = coresight_trace_id_get_cpu_id(cpu);
+ break;
+ case CS_MODE_PERF:
+ if (WARN_ON(!sink))
+ return -EINVAL;
+
+ trace_id = coresight_trace_id_get_cpu_id_map(cpu, &sink->perf_sink_id_map);
+ break;
+ default:
+ trace_id = -EINVAL;
+ break;
+ }
+
+ if (!IS_VALID_CS_TRACE_ID(trace_id))
+ dev_err(&csdev->dev,
+ "Failed to allocate trace ID on CPU%d\n", cpu);
+
+ return trace_id;
+}
+EXPORT_SYMBOL_GPL(coresight_etm_get_trace_id);
+
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Pratik Patel <pratikp@codeaurora.org>");
MODULE_AUTHOR("Mathieu Poirier <mathieu.poirier@linaro.org>");
diff --git a/drivers/hwtracing/coresight/coresight-ctcu-core.c b/drivers/hwtracing/coresight/coresight-ctcu-core.c
new file mode 100644
index 000000000000..c6bafc96db96
--- /dev/null
+++ b/drivers/hwtracing/coresight/coresight-ctcu-core.c
@@ -0,0 +1,326 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024-2025 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/clk.h>
+#include <linux/coresight.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+
+#include "coresight-ctcu.h"
+#include "coresight-priv.h"
+
+DEFINE_CORESIGHT_DEVLIST(ctcu_devs, "ctcu");
+
+#define ctcu_writel(drvdata, val, offset) __raw_writel((val), drvdata->base + offset)
+#define ctcu_readl(drvdata, offset) __raw_readl(drvdata->base + offset)
+
+/*
+ * The TMC Coresight Control Unit utilizes four ATID registers to control the data
+ * filter function based on the trace ID for each TMC ETR sink. The length of each
+ * ATID register is 32 bits. Therefore, an ETR device has a 128-bit long field
+ * in CTCU. Each trace ID is represented by one bit in that filed.
+ * e.g. ETR0ATID0 layout, set bit 5 for traceid 5
+ * bit5
+ * ------------------------------------------------------
+ * | |28| |24| |20| |16| |12| |8| 1|4| |0|
+ * ------------------------------------------------------
+ *
+ * e.g. ETR0:
+ * 127 0 from ATID_offset for ETR0ATID0
+ * -------------------------
+ * |ATID3|ATID2|ATID1|ATID0|
+ */
+#define CTCU_ATID_REG_OFFSET(traceid, atid_offset) \
+ ((traceid / 32) * 4 + atid_offset)
+
+#define CTCU_ATID_REG_BIT(traceid) (traceid % 32)
+#define CTCU_ATID_REG_SIZE 0x10
+#define CTCU_ETR0_ATID0 0xf8
+#define CTCU_ETR1_ATID0 0x108
+
+static const struct ctcu_etr_config sa8775p_etr_cfgs[] = {
+ {
+ .atid_offset = CTCU_ETR0_ATID0,
+ .port_num = 0,
+ },
+ {
+ .atid_offset = CTCU_ETR1_ATID0,
+ .port_num = 1,
+ },
+};
+
+static const struct ctcu_config sa8775p_cfgs = {
+ .etr_cfgs = sa8775p_etr_cfgs,
+ .num_etr_config = ARRAY_SIZE(sa8775p_etr_cfgs),
+};
+
+static void ctcu_program_atid_register(struct ctcu_drvdata *drvdata, u32 reg_offset,
+ u8 bit, bool enable)
+{
+ u32 val;
+
+ CS_UNLOCK(drvdata->base);
+ val = ctcu_readl(drvdata, reg_offset);
+ if (enable)
+ val |= BIT(bit);
+ else
+ val &= ~BIT(bit);
+
+ ctcu_writel(drvdata, val, reg_offset);
+ CS_LOCK(drvdata->base);
+}
+
+/*
+ * __ctcu_set_etr_traceid: Set bit in the ATID register based on trace ID when enable is true.
+ * Reset the bit of the ATID register based on trace ID when enable is false.
+ *
+ * @csdev: coresight_device of CTCU.
+ * @traceid: trace ID of the source tracer.
+ * @port_num: port number connected to TMC ETR sink.
+ * @enable: True for set bit and false for reset bit.
+ *
+ * Returns 0 indicates success. Non-zero result means failure.
+ */
+static int __ctcu_set_etr_traceid(struct coresight_device *csdev, u8 traceid, int port_num,
+ bool enable)
+{
+ struct ctcu_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+ u32 atid_offset, reg_offset;
+ u8 refcnt, bit;
+
+ atid_offset = drvdata->atid_offset[port_num];
+ if (atid_offset == 0)
+ return -EINVAL;
+
+ bit = CTCU_ATID_REG_BIT(traceid);
+ reg_offset = CTCU_ATID_REG_OFFSET(traceid, atid_offset);
+ if (reg_offset - atid_offset > CTCU_ATID_REG_SIZE)
+ return -EINVAL;
+
+ guard(raw_spinlock_irqsave)(&drvdata->spin_lock);
+ refcnt = drvdata->traceid_refcnt[port_num][traceid];
+ /* Only program the atid register when the refcnt value is 1 or 0 */
+ if ((enable && !refcnt++) || (!enable && !--refcnt))
+ ctcu_program_atid_register(drvdata, reg_offset, bit, enable);
+
+ drvdata->traceid_refcnt[port_num][traceid] = refcnt;
+
+ return 0;
+}
+
+/*
+ * Searching the sink device from helper's view in case there are multiple helper devices
+ * connected to the sink device.
+ */
+static int ctcu_get_active_port(struct coresight_device *sink, struct coresight_device *helper)
+{
+ struct coresight_platform_data *pdata = helper->pdata;
+ int i;
+
+ for (i = 0; i < pdata->nr_inconns; ++i) {
+ if (pdata->in_conns[i]->src_dev == sink)
+ return pdata->in_conns[i]->dest_port;
+ }
+
+ return -EINVAL;
+}
+
+static int ctcu_set_etr_traceid(struct coresight_device *csdev, struct coresight_path *path,
+ bool enable)
+{
+ struct coresight_device *sink = coresight_get_sink(path);
+ u8 traceid = path->trace_id;
+ int port_num;
+
+ if ((sink == NULL) || !IS_VALID_CS_TRACE_ID(traceid)) {
+ dev_err(&csdev->dev, "Invalid sink device or trace ID\n");
+ return -EINVAL;
+ }
+
+ port_num = ctcu_get_active_port(sink, csdev);
+ if (port_num < 0)
+ return -EINVAL;
+
+ dev_dbg(&csdev->dev, "traceid is %d\n", traceid);
+
+ return __ctcu_set_etr_traceid(csdev, traceid, port_num, enable);
+}
+
+static int ctcu_enable(struct coresight_device *csdev, enum cs_mode mode, void *data)
+{
+ struct coresight_path *path = (struct coresight_path *)data;
+
+ return ctcu_set_etr_traceid(csdev, path, true);
+}
+
+static int ctcu_disable(struct coresight_device *csdev, void *data)
+{
+ struct coresight_path *path = (struct coresight_path *)data;
+
+ return ctcu_set_etr_traceid(csdev, path, false);
+}
+
+static const struct coresight_ops_helper ctcu_helper_ops = {
+ .enable = ctcu_enable,
+ .disable = ctcu_disable,
+};
+
+static const struct coresight_ops ctcu_ops = {
+ .helper_ops = &ctcu_helper_ops,
+};
+
+static int ctcu_probe(struct platform_device *pdev)
+{
+ const struct ctcu_etr_config *etr_cfg;
+ struct coresight_platform_data *pdata;
+ struct coresight_desc desc = { 0 };
+ struct device *dev = &pdev->dev;
+ const struct ctcu_config *cfgs;
+ struct ctcu_drvdata *drvdata;
+ void __iomem *base;
+ int i;
+
+ desc.name = coresight_alloc_device_name(&ctcu_devs, dev);
+ if (!desc.name)
+ return -ENOMEM;
+
+ drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
+ if (!drvdata)
+ return -ENOMEM;
+
+ pdata = coresight_get_platform_data(dev);
+ if (IS_ERR(pdata))
+ return PTR_ERR(pdata);
+ dev->platform_data = pdata;
+
+ base = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ drvdata->apb_clk = coresight_get_enable_apb_pclk(dev);
+ if (IS_ERR(drvdata->apb_clk))
+ return -ENODEV;
+
+ cfgs = of_device_get_match_data(dev);
+ if (cfgs) {
+ if (cfgs->num_etr_config <= ETR_MAX_NUM) {
+ for (i = 0; i < cfgs->num_etr_config; i++) {
+ etr_cfg = &cfgs->etr_cfgs[i];
+ drvdata->atid_offset[i] = etr_cfg->atid_offset;
+ }
+ }
+ }
+
+ drvdata->base = base;
+ drvdata->dev = dev;
+ platform_set_drvdata(pdev, drvdata);
+
+ desc.type = CORESIGHT_DEV_TYPE_HELPER;
+ desc.subtype.helper_subtype = CORESIGHT_DEV_SUBTYPE_HELPER_CTCU;
+ desc.pdata = pdata;
+ desc.dev = dev;
+ desc.ops = &ctcu_ops;
+ desc.access = CSDEV_ACCESS_IOMEM(base);
+
+ drvdata->csdev = coresight_register(&desc);
+ if (IS_ERR(drvdata->csdev)) {
+ if (!IS_ERR_OR_NULL(drvdata->apb_clk))
+ clk_put(drvdata->apb_clk);
+
+ return PTR_ERR(drvdata->csdev);
+ }
+
+ return 0;
+}
+
+static void ctcu_remove(struct platform_device *pdev)
+{
+ struct ctcu_drvdata *drvdata = platform_get_drvdata(pdev);
+
+ coresight_unregister(drvdata->csdev);
+}
+
+static int ctcu_platform_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ pm_runtime_get_noresume(&pdev->dev);
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+
+ ret = ctcu_probe(pdev);
+ pm_runtime_put(&pdev->dev);
+ if (ret)
+ pm_runtime_disable(&pdev->dev);
+
+ return ret;
+}
+
+static void ctcu_platform_remove(struct platform_device *pdev)
+{
+ struct ctcu_drvdata *drvdata = platform_get_drvdata(pdev);
+
+ if (WARN_ON(!drvdata))
+ return;
+
+ ctcu_remove(pdev);
+ pm_runtime_disable(&pdev->dev);
+ if (!IS_ERR_OR_NULL(drvdata->apb_clk))
+ clk_put(drvdata->apb_clk);
+}
+
+#ifdef CONFIG_PM
+static int ctcu_runtime_suspend(struct device *dev)
+{
+ struct ctcu_drvdata *drvdata = dev_get_drvdata(dev);
+
+ if (drvdata && !IS_ERR_OR_NULL(drvdata->apb_clk))
+ clk_disable_unprepare(drvdata->apb_clk);
+
+ return 0;
+}
+
+static int ctcu_runtime_resume(struct device *dev)
+{
+ struct ctcu_drvdata *drvdata = dev_get_drvdata(dev);
+
+ if (drvdata && !IS_ERR_OR_NULL(drvdata->apb_clk))
+ clk_prepare_enable(drvdata->apb_clk);
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops ctcu_dev_pm_ops = {
+ SET_RUNTIME_PM_OPS(ctcu_runtime_suspend, ctcu_runtime_resume, NULL)
+};
+
+static const struct of_device_id ctcu_match[] = {
+ {.compatible = "qcom,sa8775p-ctcu", .data = &sa8775p_cfgs},
+ {}
+};
+
+static struct platform_driver ctcu_driver = {
+ .probe = ctcu_platform_probe,
+ .remove = ctcu_platform_remove,
+ .driver = {
+ .name = "coresight-ctcu",
+ .of_match_table = ctcu_match,
+ .pm = &ctcu_dev_pm_ops,
+ .suppress_bind_attrs = true,
+ },
+};
+module_platform_driver(ctcu_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("CoreSight TMC Control Unit driver");
diff --git a/drivers/hwtracing/coresight/coresight-ctcu.h b/drivers/hwtracing/coresight/coresight-ctcu.h
new file mode 100644
index 000000000000..e9594c38dd91
--- /dev/null
+++ b/drivers/hwtracing/coresight/coresight-ctcu.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2024-2025 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _CORESIGHT_CTCU_H
+#define _CORESIGHT_CTCU_H
+#include "coresight-trace-id.h"
+
+/* Maximum number of supported ETR devices for a single CTCU. */
+#define ETR_MAX_NUM 2
+
+/**
+ * struct ctcu_etr_config
+ * @atid_offset: offset to the ATID0 Register.
+ * @port_num: in-port number of CTCU device that connected to ETR.
+ */
+struct ctcu_etr_config {
+ const u32 atid_offset;
+ const u32 port_num;
+};
+
+struct ctcu_config {
+ const struct ctcu_etr_config *etr_cfgs;
+ int num_etr_config;
+};
+
+struct ctcu_drvdata {
+ void __iomem *base;
+ struct clk *apb_clk;
+ struct device *dev;
+ struct coresight_device *csdev;
+ raw_spinlock_t spin_lock;
+ u32 atid_offset[ETR_MAX_NUM];
+ /* refcnt for each traceid of each sink */
+ u8 traceid_refcnt[ETR_MAX_NUM][CORESIGHT_TRACE_ID_RES_TOP];
+};
+
+#endif
diff --git a/drivers/hwtracing/coresight/coresight-cti-core.c b/drivers/hwtracing/coresight/coresight-cti-core.c
index d2b5a5718c29..80f6265e3740 100644
--- a/drivers/hwtracing/coresight/coresight-cti-core.c
+++ b/drivers/hwtracing/coresight/coresight-cti-core.c
@@ -93,7 +93,7 @@ static int cti_enable_hw(struct cti_drvdata *drvdata)
unsigned long flags;
int rc = 0;
- spin_lock_irqsave(&drvdata->spinlock, flags);
+ raw_spin_lock_irqsave(&drvdata->spinlock, flags);
/* no need to do anything if enabled or unpowered*/
if (config->hw_enabled || !config->hw_powered)
@@ -108,7 +108,7 @@ static int cti_enable_hw(struct cti_drvdata *drvdata)
config->hw_enabled = true;
drvdata->config.enable_req_count++;
- spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ raw_spin_unlock_irqrestore(&drvdata->spinlock, flags);
return rc;
cti_state_unchanged:
@@ -116,7 +116,7 @@ cti_state_unchanged:
/* cannot enable due to error */
cti_err_not_enabled:
- spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ raw_spin_unlock_irqrestore(&drvdata->spinlock, flags);
return rc;
}
@@ -125,7 +125,7 @@ static void cti_cpuhp_enable_hw(struct cti_drvdata *drvdata)
{
struct cti_config *config = &drvdata->config;
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
config->hw_powered = true;
/* no need to do anything if no enable request */
@@ -138,12 +138,12 @@ static void cti_cpuhp_enable_hw(struct cti_drvdata *drvdata)
cti_write_all_hw_regs(drvdata);
config->hw_enabled = true;
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return;
/* did not re-enable due to no claim / no request */
cti_hp_not_enabled:
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
}
/* disable hardware */
@@ -153,7 +153,7 @@ static int cti_disable_hw(struct cti_drvdata *drvdata)
struct coresight_device *csdev = drvdata->csdev;
int ret = 0;
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
/* don't allow negative refcounts, return an error */
if (!drvdata->config.enable_req_count) {
@@ -177,12 +177,12 @@ static int cti_disable_hw(struct cti_drvdata *drvdata)
coresight_disclaim_device_unlocked(csdev);
CS_LOCK(drvdata->base);
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return ret;
/* not disabled this call */
cti_not_disabled:
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return ret;
}
@@ -198,11 +198,11 @@ void cti_write_intack(struct device *dev, u32 ackval)
struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct cti_config *config = &drvdata->config;
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
/* write if enabled */
if (cti_active(config))
cti_write_single_reg(drvdata, CTIINTACK, ackval);
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
}
/*
@@ -369,7 +369,7 @@ int cti_channel_trig_op(struct device *dev, enum cti_chan_op op,
reg_offset = (direction == CTI_TRIG_IN ? CTIINEN(trigger_idx) :
CTIOUTEN(trigger_idx));
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
/* read - modify write - the trigger / channel enable value */
reg_value = direction == CTI_TRIG_IN ? config->ctiinen[trigger_idx] :
@@ -388,7 +388,7 @@ int cti_channel_trig_op(struct device *dev, enum cti_chan_op op,
/* write through if enabled */
if (cti_active(config))
cti_write_single_reg(drvdata, reg_offset, reg_value);
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return 0;
}
@@ -406,7 +406,7 @@ int cti_channel_gate_op(struct device *dev, enum cti_chan_gate_op op,
chan_bitmask = BIT(channel_idx);
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
reg_value = config->ctigate;
switch (op) {
case CTI_GATE_CHAN_ENABLE:
@@ -426,7 +426,7 @@ int cti_channel_gate_op(struct device *dev, enum cti_chan_gate_op op,
if (cti_active(config))
cti_write_single_reg(drvdata, CTIGATE, reg_value);
}
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return err;
}
@@ -445,7 +445,7 @@ int cti_channel_setop(struct device *dev, enum cti_chan_set_op op,
chan_bitmask = BIT(channel_idx);
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
reg_value = config->ctiappset;
switch (op) {
case CTI_CHAN_SET:
@@ -473,7 +473,7 @@ int cti_channel_setop(struct device *dev, enum cti_chan_set_op op,
if ((err == 0) && cti_active(config))
cti_write_single_reg(drvdata, reg_offset, reg_value);
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return err;
}
@@ -676,7 +676,7 @@ static int cti_cpu_pm_notify(struct notifier_block *nb, unsigned long cmd,
if (WARN_ON_ONCE(drvdata->ctidev.cpu != cpu))
return NOTIFY_BAD;
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
switch (cmd) {
case CPU_PM_ENTER:
@@ -716,7 +716,7 @@ static int cti_cpu_pm_notify(struct notifier_block *nb, unsigned long cmd,
}
cti_notify_exit:
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return notify_res;
}
@@ -743,11 +743,11 @@ static int cti_dying_cpu(unsigned int cpu)
if (!drvdata)
return 0;
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
drvdata->config.hw_powered = false;
if (drvdata->config.hw_enabled)
coresight_disclaim_device(drvdata->csdev);
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return 0;
}
@@ -888,7 +888,7 @@ static int cti_probe(struct amba_device *adev, const struct amba_id *id)
drvdata->ctidev.ctm_id = 0;
INIT_LIST_HEAD(&drvdata->ctidev.trig_cons);
- spin_lock_init(&drvdata->spinlock);
+ raw_spin_lock_init(&drvdata->spinlock);
/* initialise CTI driver config values */
cti_set_default_config(dev, drvdata);
diff --git a/drivers/hwtracing/coresight/coresight-cti-sysfs.c b/drivers/hwtracing/coresight/coresight-cti-sysfs.c
index d25dd2737b49..572b80ee96fb 100644
--- a/drivers/hwtracing/coresight/coresight-cti-sysfs.c
+++ b/drivers/hwtracing/coresight/coresight-cti-sysfs.c
@@ -84,11 +84,11 @@ static ssize_t enable_show(struct device *dev,
bool enabled, powered;
struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
enable_req = drvdata->config.enable_req_count;
powered = drvdata->config.hw_powered;
enabled = drvdata->config.hw_enabled;
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
if (powered)
return sprintf(buf, "%d\n", enabled);
@@ -134,9 +134,9 @@ static ssize_t powered_show(struct device *dev,
bool powered;
struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
powered = drvdata->config.hw_powered;
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return sprintf(buf, "%d\n", powered);
}
@@ -181,10 +181,10 @@ static ssize_t coresight_cti_reg_show(struct device *dev,
u32 val = 0;
pm_runtime_get_sync(dev->parent);
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
if (drvdata->config.hw_powered)
val = readl_relaxed(drvdata->base + cti_attr->off);
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
pm_runtime_put_sync(dev->parent);
return sysfs_emit(buf, "0x%x\n", val);
}
@@ -202,10 +202,10 @@ static __maybe_unused ssize_t coresight_cti_reg_store(struct device *dev,
return -EINVAL;
pm_runtime_get_sync(dev->parent);
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
if (drvdata->config.hw_powered)
cti_write_single_reg(drvdata, cti_attr->off, val);
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
pm_runtime_put_sync(dev->parent);
return size;
}
@@ -264,7 +264,7 @@ static ssize_t cti_reg32_show(struct device *dev, char *buf,
struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct cti_config *config = &drvdata->config;
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
if ((reg_offset >= 0) && cti_active(config)) {
CS_UNLOCK(drvdata->base);
val = readl_relaxed(drvdata->base + reg_offset);
@@ -274,7 +274,7 @@ static ssize_t cti_reg32_show(struct device *dev, char *buf,
} else if (pcached_val) {
val = *pcached_val;
}
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return sprintf(buf, "%#x\n", val);
}
@@ -293,7 +293,7 @@ static ssize_t cti_reg32_store(struct device *dev, const char *buf,
if (kstrtoul(buf, 0, &val))
return -EINVAL;
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
/* local store */
if (pcached_val)
*pcached_val = (u32)val;
@@ -301,7 +301,7 @@ static ssize_t cti_reg32_store(struct device *dev, const char *buf,
/* write through if offset and enabled */
if ((reg_offset >= 0) && cti_active(config))
cti_write_single_reg(drvdata, reg_offset, val);
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return size;
}
@@ -349,9 +349,9 @@ static ssize_t inout_sel_store(struct device *dev,
if (val > (CTIINOUTEN_MAX - 1))
return -EINVAL;
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
drvdata->config.ctiinout_sel = val;
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR_RW(inout_sel);
@@ -364,10 +364,10 @@ static ssize_t inen_show(struct device *dev,
int index;
struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
index = drvdata->config.ctiinout_sel;
val = drvdata->config.ctiinen[index];
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return sprintf(buf, "%#lx\n", val);
}
@@ -383,14 +383,14 @@ static ssize_t inen_store(struct device *dev,
if (kstrtoul(buf, 0, &val))
return -EINVAL;
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
index = config->ctiinout_sel;
config->ctiinen[index] = val;
/* write through if enabled */
if (cti_active(config))
cti_write_single_reg(drvdata, CTIINEN(index), val);
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR_RW(inen);
@@ -403,10 +403,10 @@ static ssize_t outen_show(struct device *dev,
int index;
struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
index = drvdata->config.ctiinout_sel;
val = drvdata->config.ctiouten[index];
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return sprintf(buf, "%#lx\n", val);
}
@@ -422,14 +422,14 @@ static ssize_t outen_store(struct device *dev,
if (kstrtoul(buf, 0, &val))
return -EINVAL;
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
index = config->ctiinout_sel;
config->ctiouten[index] = val;
/* write through if enabled */
if (cti_active(config))
cti_write_single_reg(drvdata, CTIOUTEN(index), val);
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR_RW(outen);
@@ -463,7 +463,7 @@ static ssize_t appclear_store(struct device *dev,
if (kstrtoul(buf, 0, &val))
return -EINVAL;
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
/* a 1'b1 in appclr clears down the same bit in appset*/
config->ctiappset &= ~val;
@@ -471,7 +471,7 @@ static ssize_t appclear_store(struct device *dev,
/* write through if enabled */
if (cti_active(config))
cti_write_single_reg(drvdata, CTIAPPCLEAR, val);
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR_WO(appclear);
@@ -487,12 +487,12 @@ static ssize_t apppulse_store(struct device *dev,
if (kstrtoul(buf, 0, &val))
return -EINVAL;
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
/* write through if enabled */
if (cti_active(config))
cti_write_single_reg(drvdata, CTIAPPPULSE, val);
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR_WO(apppulse);
@@ -681,9 +681,9 @@ static ssize_t trig_filter_enable_show(struct device *dev,
u32 val;
struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
val = drvdata->config.trig_filter_enable;
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return sprintf(buf, "%d\n", val);
}
@@ -697,9 +697,9 @@ static ssize_t trig_filter_enable_store(struct device *dev,
if (kstrtoul(buf, 0, &val))
return -EINVAL;
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
drvdata->config.trig_filter_enable = !!val;
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR_RW(trig_filter_enable);
@@ -728,7 +728,7 @@ static ssize_t chan_xtrigs_reset_store(struct device *dev,
struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct cti_config *config = &drvdata->config;
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
/* clear the CTI trigger / channel programming registers */
for (i = 0; i < config->nr_trig_max; i++) {
@@ -747,7 +747,7 @@ static ssize_t chan_xtrigs_reset_store(struct device *dev,
if (cti_active(config))
cti_write_all_hw_regs(drvdata);
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR_WO(chan_xtrigs_reset);
@@ -768,9 +768,9 @@ static ssize_t chan_xtrigs_sel_store(struct device *dev,
if (val > (drvdata->config.nr_ctm_channels - 1))
return -EINVAL;
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
drvdata->config.xtrig_rchan_sel = val;
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return size;
}
@@ -781,9 +781,9 @@ static ssize_t chan_xtrigs_sel_show(struct device *dev,
unsigned long val;
struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
val = drvdata->config.xtrig_rchan_sel;
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return sprintf(buf, "%ld\n", val);
}
@@ -838,12 +838,12 @@ static ssize_t print_chan_list(struct device *dev,
unsigned long inuse_bits = 0, chan_mask;
/* scan regs to get bitmap of channels in use. */
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
for (i = 0; i < config->nr_trig_max; i++) {
inuse_bits |= config->ctiinen[i];
inuse_bits |= config->ctiouten[i];
}
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
/* inverse bits if printing free channels */
if (!inuse)
diff --git a/drivers/hwtracing/coresight/coresight-cti.h b/drivers/hwtracing/coresight/coresight-cti.h
index cb9ee616d01f..16e310e7e9d4 100644
--- a/drivers/hwtracing/coresight/coresight-cti.h
+++ b/drivers/hwtracing/coresight/coresight-cti.h
@@ -175,7 +175,7 @@ struct cti_drvdata {
void __iomem *base;
struct coresight_device *csdev;
struct cti_device ctidev;
- spinlock_t spinlock;
+ raw_spinlock_t spinlock;
struct cti_config config;
struct list_head node;
void (*csdev_release)(struct device *dev);
diff --git a/drivers/hwtracing/coresight/coresight-dummy.c b/drivers/hwtracing/coresight/coresight-dummy.c
index 9be53be8964b..aaa92b5081e3 100644
--- a/drivers/hwtracing/coresight/coresight-dummy.c
+++ b/drivers/hwtracing/coresight/coresight-dummy.c
@@ -24,7 +24,7 @@ DEFINE_CORESIGHT_DEVLIST(sink_devs, "dummy_sink");
static int dummy_source_enable(struct coresight_device *csdev,
struct perf_event *event, enum cs_mode mode,
- __maybe_unused struct coresight_trace_id_map *id_map)
+ __maybe_unused struct coresight_path *path)
{
if (!coresight_take_mode(csdev, mode))
return -EBUSY;
@@ -41,6 +41,16 @@ static void dummy_source_disable(struct coresight_device *csdev,
dev_dbg(csdev->dev.parent, "Dummy source disabled\n");
}
+static int dummy_source_trace_id(struct coresight_device *csdev, __maybe_unused enum cs_mode mode,
+ __maybe_unused struct coresight_device *sink)
+{
+ struct dummy_drvdata *drvdata;
+
+ drvdata = dev_get_drvdata(csdev->dev.parent);
+
+ return drvdata->traceid;
+}
+
static int dummy_sink_enable(struct coresight_device *csdev, enum cs_mode mode,
void *data)
{
@@ -62,7 +72,8 @@ static const struct coresight_ops_source dummy_source_ops = {
};
static const struct coresight_ops dummy_source_cs_ops = {
- .source_ops = &dummy_source_ops,
+ .trace_id = dummy_source_trace_id,
+ .source_ops = &dummy_source_ops,
};
static const struct coresight_ops_sink dummy_sink_ops = {
diff --git a/drivers/hwtracing/coresight/coresight-etb10.c b/drivers/hwtracing/coresight/coresight-etb10.c
index aea9ac9c4bd0..7948597d483d 100644
--- a/drivers/hwtracing/coresight/coresight-etb10.c
+++ b/drivers/hwtracing/coresight/coresight-etb10.c
@@ -84,7 +84,7 @@ struct etb_drvdata {
struct clk *atclk;
struct coresight_device *csdev;
struct miscdevice miscdev;
- spinlock_t spinlock;
+ raw_spinlock_t spinlock;
local_t reading;
pid_t pid;
u8 *buf;
@@ -145,7 +145,7 @@ static int etb_enable_sysfs(struct coresight_device *csdev)
unsigned long flags;
struct etb_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
- spin_lock_irqsave(&drvdata->spinlock, flags);
+ raw_spin_lock_irqsave(&drvdata->spinlock, flags);
/* Don't messup with perf sessions. */
if (coresight_get_mode(csdev) == CS_MODE_PERF) {
@@ -163,7 +163,7 @@ static int etb_enable_sysfs(struct coresight_device *csdev)
csdev->refcnt++;
out:
- spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ raw_spin_unlock_irqrestore(&drvdata->spinlock, flags);
return ret;
}
@@ -176,7 +176,7 @@ static int etb_enable_perf(struct coresight_device *csdev, void *data)
struct perf_output_handle *handle = data;
struct cs_buffers *buf = etm_perf_sink_config(handle);
- spin_lock_irqsave(&drvdata->spinlock, flags);
+ raw_spin_lock_irqsave(&drvdata->spinlock, flags);
/* No need to continue if the component is already in used by sysFS. */
if (coresight_get_mode(drvdata->csdev) == CS_MODE_SYSFS) {
@@ -219,7 +219,7 @@ static int etb_enable_perf(struct coresight_device *csdev, void *data)
}
out:
- spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ raw_spin_unlock_irqrestore(&drvdata->spinlock, flags);
return ret;
}
@@ -352,11 +352,11 @@ static int etb_disable(struct coresight_device *csdev)
struct etb_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
unsigned long flags;
- spin_lock_irqsave(&drvdata->spinlock, flags);
+ raw_spin_lock_irqsave(&drvdata->spinlock, flags);
csdev->refcnt--;
if (csdev->refcnt) {
- spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ raw_spin_unlock_irqrestore(&drvdata->spinlock, flags);
return -EBUSY;
}
@@ -366,7 +366,7 @@ static int etb_disable(struct coresight_device *csdev)
/* Dissociate from monitored process. */
drvdata->pid = -1;
coresight_set_mode(csdev, CS_MODE_DISABLED);
- spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ raw_spin_unlock_irqrestore(&drvdata->spinlock, flags);
dev_dbg(&csdev->dev, "ETB disabled\n");
return 0;
@@ -443,7 +443,7 @@ static unsigned long etb_update_buffer(struct coresight_device *csdev,
capacity = drvdata->buffer_depth * ETB_FRAME_SIZE_WORDS;
- spin_lock_irqsave(&drvdata->spinlock, flags);
+ raw_spin_lock_irqsave(&drvdata->spinlock, flags);
/* Don't do anything if another tracer is using this sink */
if (csdev->refcnt != 1)
@@ -566,7 +566,7 @@ static unsigned long etb_update_buffer(struct coresight_device *csdev,
__etb_enable_hw(drvdata);
CS_LOCK(drvdata->base);
out:
- spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ raw_spin_unlock_irqrestore(&drvdata->spinlock, flags);
return to_read;
}
@@ -587,13 +587,13 @@ static void etb_dump(struct etb_drvdata *drvdata)
{
unsigned long flags;
- spin_lock_irqsave(&drvdata->spinlock, flags);
+ raw_spin_lock_irqsave(&drvdata->spinlock, flags);
if (coresight_get_mode(drvdata->csdev) == CS_MODE_SYSFS) {
__etb_disable_hw(drvdata);
etb_dump_hw(drvdata);
__etb_enable_hw(drvdata);
}
- spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ raw_spin_unlock_irqrestore(&drvdata->spinlock, flags);
dev_dbg(&drvdata->csdev->dev, "ETB dumped\n");
}
@@ -746,7 +746,7 @@ static int etb_probe(struct amba_device *adev, const struct amba_id *id)
drvdata->base = base;
desc.access = CSDEV_ACCESS_IOMEM(base);
- spin_lock_init(&drvdata->spinlock);
+ raw_spin_lock_init(&drvdata->spinlock);
drvdata->buffer_depth = etb_get_buffer_depth(drvdata);
diff --git a/drivers/hwtracing/coresight/coresight-etm-perf.c b/drivers/hwtracing/coresight/coresight-etm-perf.c
index ad6a8f4b70b6..f4cccd68e625 100644
--- a/drivers/hwtracing/coresight/coresight-etm-perf.c
+++ b/drivers/hwtracing/coresight/coresight-etm-perf.c
@@ -136,13 +136,13 @@ static const struct attribute_group *etm_pmu_attr_groups[] = {
NULL,
};
-static inline struct list_head **
+static inline struct coresight_path **
etm_event_cpu_path_ptr(struct etm_event_data *data, int cpu)
{
return per_cpu_ptr(data->path, cpu);
}
-static inline struct list_head *
+static inline struct coresight_path *
etm_event_cpu_path(struct etm_event_data *data, int cpu)
{
return *etm_event_cpu_path_ptr(data, cpu);
@@ -226,7 +226,7 @@ static void free_event_data(struct work_struct *work)
cscfg_deactivate_config(event_data->cfg_hash);
for_each_cpu(cpu, mask) {
- struct list_head **ppath;
+ struct coresight_path **ppath;
ppath = etm_event_cpu_path_ptr(event_data, cpu);
if (!(IS_ERR_OR_NULL(*ppath))) {
@@ -276,7 +276,7 @@ static void *alloc_event_data(int cpu)
* unused memory when dealing with single CPU trace scenarios is small
* compared to the cost of searching through an optimized array.
*/
- event_data->path = alloc_percpu(struct list_head *);
+ event_data->path = alloc_percpu(struct coresight_path *);
if (!event_data->path) {
kfree(event_data);
@@ -317,7 +317,6 @@ static void *etm_setup_aux(struct perf_event *event, void **pages,
{
u32 id, cfg_hash;
int cpu = event->cpu;
- int trace_id;
cpumask_t *mask;
struct coresight_device *sink = NULL;
struct coresight_device *user_sink = NULL, *last_sink = NULL;
@@ -352,7 +351,7 @@ static void *etm_setup_aux(struct perf_event *event, void **pages,
* CPUs, we can handle it and fail the session.
*/
for_each_cpu(cpu, mask) {
- struct list_head *path;
+ struct coresight_path *path;
struct coresight_device *csdev;
csdev = per_cpu(csdev_src, cpu);
@@ -407,8 +406,8 @@ static void *etm_setup_aux(struct perf_event *event, void **pages,
}
/* ensure we can allocate a trace ID for this CPU */
- trace_id = coresight_trace_id_get_cpu_id_map(cpu, &sink->perf_sink_id_map);
- if (!IS_VALID_CS_TRACE_ID(trace_id)) {
+ coresight_path_assign_trace_id(path, CS_MODE_PERF);
+ if (!IS_VALID_CS_TRACE_ID(path->trace_id)) {
cpumask_clear_cpu(cpu, mask);
coresight_release_path(path);
continue;
@@ -458,9 +457,8 @@ static void etm_event_start(struct perf_event *event, int flags)
struct etm_ctxt *ctxt = this_cpu_ptr(&etm_ctxt);
struct perf_output_handle *handle = &ctxt->handle;
struct coresight_device *sink, *csdev = per_cpu(csdev_src, cpu);
- struct list_head *path;
+ struct coresight_path *path;
u64 hw_id;
- u8 trace_id;
if (!csdev)
goto fail;
@@ -503,8 +501,7 @@ static void etm_event_start(struct perf_event *event, int flags)
goto fail_end_stop;
/* Finally enable the tracer */
- if (source_ops(csdev)->enable(csdev, event, CS_MODE_PERF,
- &sink->perf_sink_id_map))
+ if (source_ops(csdev)->enable(csdev, event, CS_MODE_PERF, path))
goto fail_disable_path;
/*
@@ -514,13 +511,11 @@ static void etm_event_start(struct perf_event *event, int flags)
if (!cpumask_test_cpu(cpu, &event_data->aux_hwid_done)) {
cpumask_set_cpu(cpu, &event_data->aux_hwid_done);
- trace_id = coresight_trace_id_read_cpu_id_map(cpu, &sink->perf_sink_id_map);
-
hw_id = FIELD_PREP(CS_AUX_HW_ID_MAJOR_VERSION_MASK,
CS_AUX_HW_ID_MAJOR_VERSION);
hw_id |= FIELD_PREP(CS_AUX_HW_ID_MINOR_VERSION_MASK,
CS_AUX_HW_ID_MINOR_VERSION);
- hw_id |= FIELD_PREP(CS_AUX_HW_ID_TRACE_ID_MASK, trace_id);
+ hw_id |= FIELD_PREP(CS_AUX_HW_ID_TRACE_ID_MASK, path->trace_id);
hw_id |= FIELD_PREP(CS_AUX_HW_ID_SINK_ID_MASK, coresight_get_sink_id(sink));
perf_report_aux_output_id(event, hw_id);
@@ -558,7 +553,7 @@ static void etm_event_stop(struct perf_event *event, int mode)
struct etm_ctxt *ctxt = this_cpu_ptr(&etm_ctxt);
struct perf_output_handle *handle = &ctxt->handle;
struct etm_event_data *event_data;
- struct list_head *path;
+ struct coresight_path *path;
/*
* If we still have access to the event_data via handle,
diff --git a/drivers/hwtracing/coresight/coresight-etm-perf.h b/drivers/hwtracing/coresight/coresight-etm-perf.h
index 744531158d6b..5febbcdb8696 100644
--- a/drivers/hwtracing/coresight/coresight-etm-perf.h
+++ b/drivers/hwtracing/coresight/coresight-etm-perf.h
@@ -59,7 +59,7 @@ struct etm_event_data {
cpumask_t aux_hwid_done;
void *snk_config;
u32 cfg_hash;
- struct list_head * __percpu *path;
+ struct coresight_path * __percpu *path;
};
int etm_perf_symlink(struct coresight_device *csdev, bool link);
diff --git a/drivers/hwtracing/coresight/coresight-etm.h b/drivers/hwtracing/coresight/coresight-etm.h
index e02c3ea972c9..171f1384f7c0 100644
--- a/drivers/hwtracing/coresight/coresight-etm.h
+++ b/drivers/hwtracing/coresight/coresight-etm.h
@@ -284,6 +284,5 @@ extern const struct attribute_group *coresight_etm_groups[];
void etm_set_default(struct etm_config *config);
void etm_config_trace_mode(struct etm_config *config);
struct etm_config *get_etm_config(struct etm_drvdata *drvdata);
-int etm_read_alloc_trace_id(struct etm_drvdata *drvdata);
void etm_release_trace_id(struct etm_drvdata *drvdata);
#endif
diff --git a/drivers/hwtracing/coresight/coresight-etm3x-core.c b/drivers/hwtracing/coresight/coresight-etm3x-core.c
index c103f4c70f5d..8927bfaf3af2 100644
--- a/drivers/hwtracing/coresight/coresight-etm3x-core.c
+++ b/drivers/hwtracing/coresight/coresight-etm3x-core.c
@@ -455,26 +455,6 @@ static int etm_cpu_id(struct coresight_device *csdev)
return drvdata->cpu;
}
-int etm_read_alloc_trace_id(struct etm_drvdata *drvdata)
-{
- int trace_id;
-
- /*
- * This will allocate a trace ID to the cpu,
- * or return the one currently allocated.
- *
- * trace id function has its own lock
- */
- trace_id = coresight_trace_id_get_cpu_id(drvdata->cpu);
- if (IS_VALID_CS_TRACE_ID(trace_id))
- drvdata->traceid = (u8)trace_id;
- else
- dev_err(&drvdata->csdev->dev,
- "Failed to allocate trace ID for %s on CPU%d\n",
- dev_name(&drvdata->csdev->dev), drvdata->cpu);
- return trace_id;
-}
-
void etm_release_trace_id(struct etm_drvdata *drvdata)
{
coresight_trace_id_put_cpu_id(drvdata->cpu);
@@ -482,38 +462,22 @@ void etm_release_trace_id(struct etm_drvdata *drvdata)
static int etm_enable_perf(struct coresight_device *csdev,
struct perf_event *event,
- struct coresight_trace_id_map *id_map)
+ struct coresight_path *path)
{
struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
- int trace_id;
if (WARN_ON_ONCE(drvdata->cpu != smp_processor_id()))
return -EINVAL;
/* Configure the tracer based on the session's specifics */
etm_parse_event_config(drvdata, event);
-
- /*
- * perf allocates cpu ids as part of _setup_aux() - device needs to use
- * the allocated ID. This reads the current version without allocation.
- *
- * This does not use the trace id lock to prevent lock_dep issues
- * with perf locks - we know the ID cannot change until perf shuts down
- * the session
- */
- trace_id = coresight_trace_id_read_cpu_id_map(drvdata->cpu, id_map);
- if (!IS_VALID_CS_TRACE_ID(trace_id)) {
- dev_err(&drvdata->csdev->dev, "Failed to set trace ID for %s on CPU%d\n",
- dev_name(&drvdata->csdev->dev), drvdata->cpu);
- return -EINVAL;
- }
- drvdata->traceid = (u8)trace_id;
+ drvdata->traceid = path->trace_id;
/* And enable it */
return etm_enable_hw(drvdata);
}
-static int etm_enable_sysfs(struct coresight_device *csdev)
+static int etm_enable_sysfs(struct coresight_device *csdev, struct coresight_path *path)
{
struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
struct etm_enable_arg arg = { };
@@ -521,10 +485,7 @@ static int etm_enable_sysfs(struct coresight_device *csdev)
spin_lock(&drvdata->spinlock);
- /* sysfs needs to allocate and set a trace ID */
- ret = etm_read_alloc_trace_id(drvdata);
- if (ret < 0)
- goto unlock_enable_sysfs;
+ drvdata->traceid = path->trace_id;
/*
* Configure the ETM only if the CPU is online. If it isn't online
@@ -545,7 +506,6 @@ static int etm_enable_sysfs(struct coresight_device *csdev)
if (ret)
etm_release_trace_id(drvdata);
-unlock_enable_sysfs:
spin_unlock(&drvdata->spinlock);
if (!ret)
@@ -554,7 +514,7 @@ unlock_enable_sysfs:
}
static int etm_enable(struct coresight_device *csdev, struct perf_event *event,
- enum cs_mode mode, struct coresight_trace_id_map *id_map)
+ enum cs_mode mode, struct coresight_path *path)
{
int ret;
struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
@@ -566,10 +526,10 @@ static int etm_enable(struct coresight_device *csdev, struct perf_event *event,
switch (mode) {
case CS_MODE_SYSFS:
- ret = etm_enable_sysfs(csdev);
+ ret = etm_enable_sysfs(csdev, path);
break;
case CS_MODE_PERF:
- ret = etm_enable_perf(csdev, event, id_map);
+ ret = etm_enable_perf(csdev, event, path);
break;
default:
ret = -EINVAL;
@@ -704,6 +664,7 @@ static const struct coresight_ops_source etm_source_ops = {
};
static const struct coresight_ops etm_cs_ops = {
+ .trace_id = coresight_etm_get_trace_id,
.source_ops = &etm_source_ops,
};
diff --git a/drivers/hwtracing/coresight/coresight-etm3x-sysfs.c b/drivers/hwtracing/coresight/coresight-etm3x-sysfs.c
index 68c644be9813..b9006451f515 100644
--- a/drivers/hwtracing/coresight/coresight-etm3x-sysfs.c
+++ b/drivers/hwtracing/coresight/coresight-etm3x-sysfs.c
@@ -1190,10 +1190,9 @@ static DEVICE_ATTR_RO(cpu);
static ssize_t traceid_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- int trace_id;
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ int trace_id = coresight_etm_get_trace_id(drvdata->csdev, CS_MODE_SYSFS, NULL);
- trace_id = etm_read_alloc_trace_id(drvdata);
if (trace_id < 0)
return trace_id;
diff --git a/drivers/hwtracing/coresight/coresight-etm4x-core.c b/drivers/hwtracing/coresight/coresight-etm4x-core.c
index 3d98e3371fff..2b8f10463840 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x-core.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x-core.c
@@ -24,7 +24,6 @@
#include <linux/cpu_pm.h>
#include <linux/coresight.h>
#include <linux/coresight-pmu.h>
-#include <linux/pm_wakeup.h>
#include <linux/amba/bus.h>
#include <linux/seq_file.h>
#include <linux/uaccess.h>
@@ -233,25 +232,6 @@ static int etm4_cpu_id(struct coresight_device *csdev)
return drvdata->cpu;
}
-int etm4_read_alloc_trace_id(struct etmv4_drvdata *drvdata)
-{
- int trace_id;
-
- /*
- * This will allocate a trace ID to the cpu,
- * or return the one currently allocated.
- * The trace id function has its own lock
- */
- trace_id = coresight_trace_id_get_cpu_id(drvdata->cpu);
- if (IS_VALID_CS_TRACE_ID(trace_id))
- drvdata->trcid = (u8)trace_id;
- else
- dev_err(&drvdata->csdev->dev,
- "Failed to allocate trace ID for %s on CPU%d\n",
- dev_name(&drvdata->csdev->dev), drvdata->cpu);
- return trace_id;
-}
-
void etm4_release_trace_id(struct etmv4_drvdata *drvdata)
{
coresight_trace_id_put_cpu_id(drvdata->cpu);
@@ -428,6 +408,29 @@ static void etm4_check_arch_features(struct etmv4_drvdata *drvdata,
}
#endif /* CONFIG_ETM4X_IMPDEF_FEATURE */
+static void etm4x_sys_ins_barrier(struct csdev_access *csa, u32 offset, int pos, int val)
+{
+ if (!csa->io_mem)
+ isb();
+}
+
+/*
+ * etm4x_wait_status: Poll for TRCSTATR.<pos> == <val>. While using system
+ * instruction to access the trace unit, each access must be separated by a
+ * synchronization barrier. See ARM IHI0064H.b section "4.3.7 Synchronization of
+ * register updates", for system instructions section, in "Notes":
+ *
+ * "In particular, whenever disabling or enabling the trace unit, a poll of
+ * TRCSTATR needs explicit synchronization between each read of TRCSTATR"
+ */
+static int etm4x_wait_status(struct csdev_access *csa, int pos, int val)
+{
+ if (!csa->io_mem)
+ return coresight_timeout_action(csa, TRCSTATR, pos, val,
+ etm4x_sys_ins_barrier);
+ return coresight_timeout(csa, TRCSTATR, pos, val);
+}
+
static int etm4_enable_hw(struct etmv4_drvdata *drvdata)
{
int i, rc;
@@ -459,7 +462,7 @@ static int etm4_enable_hw(struct etmv4_drvdata *drvdata)
isb();
/* wait for TRCSTATR.IDLE to go up */
- if (coresight_timeout(csa, TRCSTATR, TRCSTATR_IDLE_BIT, 1))
+ if (etm4x_wait_status(csa, TRCSTATR_IDLE_BIT, 1))
dev_err(etm_dev,
"timeout while waiting for Idle Trace Status\n");
if (drvdata->nr_pe)
@@ -552,7 +555,7 @@ static int etm4_enable_hw(struct etmv4_drvdata *drvdata)
isb();
/* wait for TRCSTATR.IDLE to go back down to '0' */
- if (coresight_timeout(csa, TRCSTATR, TRCSTATR_IDLE_BIT, 0))
+ if (etm4x_wait_status(csa, TRCSTATR_IDLE_BIT, 0))
dev_err(etm_dev,
"timeout while waiting for Idle Trace Status\n");
@@ -788,9 +791,9 @@ out:
static int etm4_enable_perf(struct coresight_device *csdev,
struct perf_event *event,
- struct coresight_trace_id_map *id_map)
+ struct coresight_path *path)
{
- int ret = 0, trace_id;
+ int ret = 0;
struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
if (WARN_ON_ONCE(drvdata->cpu != smp_processor_id())) {
@@ -803,22 +806,7 @@ static int etm4_enable_perf(struct coresight_device *csdev,
if (ret)
goto out;
- /*
- * perf allocates cpu ids as part of _setup_aux() - device needs to use
- * the allocated ID. This reads the current version without allocation.
- *
- * This does not use the trace id lock to prevent lock_dep issues
- * with perf locks - we know the ID cannot change until perf shuts down
- * the session
- */
- trace_id = coresight_trace_id_read_cpu_id_map(drvdata->cpu, id_map);
- if (!IS_VALID_CS_TRACE_ID(trace_id)) {
- dev_err(&drvdata->csdev->dev, "Failed to set trace ID for %s on CPU%d\n",
- dev_name(&drvdata->csdev->dev), drvdata->cpu);
- ret = -EINVAL;
- goto out;
- }
- drvdata->trcid = (u8)trace_id;
+ drvdata->trcid = path->trace_id;
/* And enable it */
ret = etm4_enable_hw(drvdata);
@@ -827,7 +815,7 @@ out:
return ret;
}
-static int etm4_enable_sysfs(struct coresight_device *csdev)
+static int etm4_enable_sysfs(struct coresight_device *csdev, struct coresight_path *path)
{
struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
struct etm4_enable_arg arg = { };
@@ -842,12 +830,9 @@ static int etm4_enable_sysfs(struct coresight_device *csdev)
return ret;
}
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
- /* sysfs needs to read and allocate a trace ID */
- ret = etm4_read_alloc_trace_id(drvdata);
- if (ret < 0)
- goto unlock_sysfs_enable;
+ drvdata->trcid = path->trace_id;
/*
* Executing etm4_enable_hw on the cpu whose ETM is being enabled
@@ -864,8 +849,7 @@ static int etm4_enable_sysfs(struct coresight_device *csdev)
if (ret)
etm4_release_trace_id(drvdata);
-unlock_sysfs_enable:
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
if (!ret)
dev_dbg(&csdev->dev, "ETM tracing enabled\n");
@@ -873,7 +857,7 @@ unlock_sysfs_enable:
}
static int etm4_enable(struct coresight_device *csdev, struct perf_event *event,
- enum cs_mode mode, struct coresight_trace_id_map *id_map)
+ enum cs_mode mode, struct coresight_path *path)
{
int ret;
@@ -884,10 +868,10 @@ static int etm4_enable(struct coresight_device *csdev, struct perf_event *event,
switch (mode) {
case CS_MODE_SYSFS:
- ret = etm4_enable_sysfs(csdev);
+ ret = etm4_enable_sysfs(csdev, path);
break;
case CS_MODE_PERF:
- ret = etm4_enable_perf(csdev, event, id_map);
+ ret = etm4_enable_perf(csdev, event, path);
break;
default:
ret = -EINVAL;
@@ -941,10 +925,25 @@ static void etm4_disable_hw(void *info)
tsb_csync();
etm4x_relaxed_write32(csa, control, TRCPRGCTLR);
+ /*
+ * As recommended by section 4.3.7 ("Synchronization when using system
+ * instructions to progrom the trace unit") of ARM IHI 0064H.b, the
+ * self-hosted trace analyzer must perform a Context synchronization
+ * event between writing to the TRCPRGCTLR and reading the TRCSTATR.
+ */
+ if (!csa->io_mem)
+ isb();
+
/* wait for TRCSTATR.PMSTABLE to go to '1' */
- if (coresight_timeout(csa, TRCSTATR, TRCSTATR_PMSTABLE_BIT, 1))
+ if (etm4x_wait_status(csa, TRCSTATR_PMSTABLE_BIT, 1))
dev_err(etm_dev,
"timeout while waiting for PM stable Trace Status\n");
+ /*
+ * As recommended by section 4.3.7 (Synchronization of register updates)
+ * of ARM IHI 0064H.b.
+ */
+ isb();
+
/* read the status of the single shot comparators */
for (i = 0; i < drvdata->nr_ss_cmp; i++) {
config->ss_status[i] =
@@ -1012,7 +1011,7 @@ static void etm4_disable_sysfs(struct coresight_device *csdev)
* DYING hotplug callback is serviced by the ETM driver.
*/
cpus_read_lock();
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
/*
* Executing etm4_disable_hw on the cpu whose ETM is being disabled
@@ -1020,7 +1019,7 @@ static void etm4_disable_sysfs(struct coresight_device *csdev)
*/
smp_call_function_single(drvdata->cpu, etm4_disable_hw, drvdata, 1);
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
cpus_read_unlock();
/*
@@ -1067,6 +1066,7 @@ static const struct coresight_ops_source etm4_source_ops = {
};
static const struct coresight_ops etm4_cs_ops = {
+ .trace_id = coresight_etm_get_trace_id,
.source_ops = &etm4_source_ops,
};
@@ -1698,13 +1698,13 @@ static int etm4_starting_cpu(unsigned int cpu)
if (!etmdrvdata[cpu])
return 0;
- spin_lock(&etmdrvdata[cpu]->spinlock);
+ raw_spin_lock(&etmdrvdata[cpu]->spinlock);
if (!etmdrvdata[cpu]->os_unlock)
etm4_os_unlock(etmdrvdata[cpu]);
if (coresight_get_mode(etmdrvdata[cpu]->csdev))
etm4_enable_hw(etmdrvdata[cpu]);
- spin_unlock(&etmdrvdata[cpu]->spinlock);
+ raw_spin_unlock(&etmdrvdata[cpu]->spinlock);
return 0;
}
@@ -1713,10 +1713,10 @@ static int etm4_dying_cpu(unsigned int cpu)
if (!etmdrvdata[cpu])
return 0;
- spin_lock(&etmdrvdata[cpu]->spinlock);
+ raw_spin_lock(&etmdrvdata[cpu]->spinlock);
if (coresight_get_mode(etmdrvdata[cpu]->csdev))
etm4_disable_hw(etmdrvdata[cpu]);
- spin_unlock(&etmdrvdata[cpu]->spinlock);
+ raw_spin_unlock(&etmdrvdata[cpu]->spinlock);
return 0;
}
@@ -1746,7 +1746,7 @@ static int __etm4_cpu_save(struct etmv4_drvdata *drvdata)
etm4_os_lock(drvdata);
/* wait for TRCSTATR.PMSTABLE to go up */
- if (coresight_timeout(csa, TRCSTATR, TRCSTATR_PMSTABLE_BIT, 1)) {
+ if (etm4x_wait_status(csa, TRCSTATR_PMSTABLE_BIT, 1)) {
dev_err(etm_dev,
"timeout while waiting for PM Stable Status\n");
etm4_os_unlock(drvdata);
@@ -1837,7 +1837,7 @@ static int __etm4_cpu_save(struct etmv4_drvdata *drvdata)
state->trcpdcr = etm4x_read32(csa, TRCPDCR);
/* wait for TRCSTATR.IDLE to go up */
- if (coresight_timeout(csa, TRCSTATR, TRCSTATR_IDLE_BIT, 1)) {
+ if (etm4x_wait_status(csa, TRCSTATR_PMSTABLE_BIT, 1)) {
dev_err(etm_dev,
"timeout while waiting for Idle Trace Status\n");
etm4_os_unlock(drvdata);
@@ -2160,7 +2160,7 @@ static int etm4_probe(struct device *dev)
return -ENOMEM;
}
- spin_lock_init(&drvdata->spinlock);
+ raw_spin_lock_init(&drvdata->spinlock);
drvdata->cpu = coresight_get_cpu(dev);
if (drvdata->cpu < 0)
diff --git a/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c b/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
index c767f8ae4cf1..fdd0956fecb3 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
@@ -4,6 +4,7 @@
* Author: Mathieu Poirier <mathieu.poirier@linaro.org>
*/
+#include <linux/coresight.h>
#include <linux/pid_namespace.h>
#include <linux/pm_runtime.h>
#include <linux/sysfs.h>
@@ -174,7 +175,7 @@ static ssize_t reset_store(struct device *dev,
if (kstrtoul(buf, 16, &val))
return -EINVAL;
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
if (val)
config->mode = 0x0;
@@ -266,7 +267,7 @@ static ssize_t reset_store(struct device *dev,
config->vmid_mask0 = 0x0;
config->vmid_mask1 = 0x0;
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
/* for sysfs - only release trace id when resetting */
etm4_release_trace_id(drvdata);
@@ -300,7 +301,7 @@ static ssize_t mode_store(struct device *dev,
if (kstrtoul(buf, 16, &val))
return -EINVAL;
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
config->mode = val & ETMv4_MODE_ALL;
if (drvdata->instrp0 == true) {
@@ -437,7 +438,7 @@ static ssize_t mode_store(struct device *dev,
if (config->mode & (ETM_MODE_EXCL_KERN | ETM_MODE_EXCL_USER))
etm4_config_trace_mode(config);
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return size;
}
@@ -466,14 +467,14 @@ static ssize_t pe_store(struct device *dev,
if (kstrtoul(buf, 16, &val))
return -EINVAL;
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
if (val > drvdata->nr_pe) {
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return -EINVAL;
}
config->pe_sel = val;
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR_RW(pe);
@@ -501,7 +502,7 @@ static ssize_t event_store(struct device *dev,
if (kstrtoul(buf, 16, &val))
return -EINVAL;
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
switch (drvdata->nr_event) {
case 0x0:
/* EVENT0, bits[7:0] */
@@ -522,7 +523,7 @@ static ssize_t event_store(struct device *dev,
default:
break;
}
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR_RW(event);
@@ -550,7 +551,7 @@ static ssize_t event_instren_store(struct device *dev,
if (kstrtoul(buf, 16, &val))
return -EINVAL;
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
/* start by clearing all instruction event enable bits */
config->eventctrl1 &= ~TRCEVENTCTL1R_INSTEN_MASK;
switch (drvdata->nr_event) {
@@ -578,7 +579,7 @@ static ssize_t event_instren_store(struct device *dev,
default:
break;
}
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR_RW(event_instren);
@@ -739,11 +740,11 @@ static ssize_t event_vinst_store(struct device *dev,
if (kstrtoul(buf, 16, &val))
return -EINVAL;
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
val &= TRCVICTLR_EVENT_MASK >> __bf_shf(TRCVICTLR_EVENT_MASK);
config->vinst_ctrl &= ~TRCVICTLR_EVENT_MASK;
config->vinst_ctrl |= FIELD_PREP(TRCVICTLR_EVENT_MASK, val);
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR_RW(event_vinst);
@@ -771,13 +772,13 @@ static ssize_t s_exlevel_vinst_store(struct device *dev,
if (kstrtoul(buf, 16, &val))
return -EINVAL;
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
/* clear all EXLEVEL_S bits */
config->vinst_ctrl &= ~TRCVICTLR_EXLEVEL_S_MASK;
/* enable instruction tracing for corresponding exception level */
val &= drvdata->s_ex_level;
config->vinst_ctrl |= val << __bf_shf(TRCVICTLR_EXLEVEL_S_MASK);
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR_RW(s_exlevel_vinst);
@@ -806,13 +807,13 @@ static ssize_t ns_exlevel_vinst_store(struct device *dev,
if (kstrtoul(buf, 16, &val))
return -EINVAL;
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
/* clear EXLEVEL_NS bits */
config->vinst_ctrl &= ~TRCVICTLR_EXLEVEL_NS_MASK;
/* enable instruction tracing for corresponding exception level */
val &= drvdata->ns_ex_level;
config->vinst_ctrl |= val << __bf_shf(TRCVICTLR_EXLEVEL_NS_MASK);
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR_RW(ns_exlevel_vinst);
@@ -846,9 +847,9 @@ static ssize_t addr_idx_store(struct device *dev,
* Use spinlock to ensure index doesn't change while it gets
* dereferenced multiple times within a spinlock block elsewhere.
*/
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
config->addr_idx = val;
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR_RW(addr_idx);
@@ -862,7 +863,7 @@ static ssize_t addr_instdatatype_show(struct device *dev,
struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etmv4_config *config = &drvdata->config;
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
idx = config->addr_idx;
val = FIELD_GET(TRCACATRn_TYPE_MASK, config->addr_acc[idx]);
len = scnprintf(buf, PAGE_SIZE, "%s\n",
@@ -870,7 +871,7 @@ static ssize_t addr_instdatatype_show(struct device *dev,
(val == TRCACATRn_TYPE_DATA_LOAD_ADDR ? "data_load" :
(val == TRCACATRn_TYPE_DATA_STORE_ADDR ? "data_store" :
"data_load_store")));
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return len;
}
@@ -888,13 +889,13 @@ static ssize_t addr_instdatatype_store(struct device *dev,
if (sscanf(buf, "%s", str) != 1)
return -EINVAL;
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
idx = config->addr_idx;
if (!strcmp(str, "instr"))
/* TYPE, bits[1:0] */
config->addr_acc[idx] &= ~TRCACATRn_TYPE_MASK;
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR_RW(addr_instdatatype);
@@ -909,14 +910,14 @@ static ssize_t addr_single_show(struct device *dev,
struct etmv4_config *config = &drvdata->config;
idx = config->addr_idx;
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
config->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return -EPERM;
}
val = (unsigned long)config->addr_val[idx];
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}
@@ -932,17 +933,17 @@ static ssize_t addr_single_store(struct device *dev,
if (kstrtoul(buf, 16, &val))
return -EINVAL;
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
idx = config->addr_idx;
if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
config->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return -EPERM;
}
config->addr_val[idx] = (u64)val;
config->addr_type[idx] = ETM_ADDR_TYPE_SINGLE;
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR_RW(addr_single);
@@ -956,23 +957,23 @@ static ssize_t addr_range_show(struct device *dev,
struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etmv4_config *config = &drvdata->config;
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
idx = config->addr_idx;
if (idx % 2 != 0) {
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return -EPERM;
}
if (!((config->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
config->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
(config->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
config->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return -EPERM;
}
val1 = (unsigned long)config->addr_val[idx];
val2 = (unsigned long)config->addr_val[idx + 1];
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
}
@@ -995,10 +996,10 @@ static ssize_t addr_range_store(struct device *dev,
if (val1 > val2)
return -EINVAL;
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
idx = config->addr_idx;
if (idx % 2 != 0) {
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return -EPERM;
}
@@ -1006,7 +1007,7 @@ static ssize_t addr_range_store(struct device *dev,
config->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
(config->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
config->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return -EPERM;
}
@@ -1023,7 +1024,7 @@ static ssize_t addr_range_store(struct device *dev,
exclude = config->mode & ETM_MODE_EXCLUDE;
etm4_set_mode_exclude(drvdata, exclude ? true : false);
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR_RW(addr_range);
@@ -1037,17 +1038,17 @@ static ssize_t addr_start_show(struct device *dev,
struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etmv4_config *config = &drvdata->config;
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
idx = config->addr_idx;
if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
config->addr_type[idx] == ETM_ADDR_TYPE_START)) {
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return -EPERM;
}
val = (unsigned long)config->addr_val[idx];
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}
@@ -1063,22 +1064,22 @@ static ssize_t addr_start_store(struct device *dev,
if (kstrtoul(buf, 16, &val))
return -EINVAL;
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
idx = config->addr_idx;
if (!drvdata->nr_addr_cmp) {
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return -EINVAL;
}
if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
config->addr_type[idx] == ETM_ADDR_TYPE_START)) {
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return -EPERM;
}
config->addr_val[idx] = (u64)val;
config->addr_type[idx] = ETM_ADDR_TYPE_START;
config->vissctlr |= BIT(idx);
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR_RW(addr_start);
@@ -1092,17 +1093,17 @@ static ssize_t addr_stop_show(struct device *dev,
struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etmv4_config *config = &drvdata->config;
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
idx = config->addr_idx;
if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
config->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return -EPERM;
}
val = (unsigned long)config->addr_val[idx];
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}
@@ -1118,22 +1119,22 @@ static ssize_t addr_stop_store(struct device *dev,
if (kstrtoul(buf, 16, &val))
return -EINVAL;
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
idx = config->addr_idx;
if (!drvdata->nr_addr_cmp) {
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return -EINVAL;
}
if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
config->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return -EPERM;
}
config->addr_val[idx] = (u64)val;
config->addr_type[idx] = ETM_ADDR_TYPE_STOP;
config->vissctlr |= BIT(idx + 16);
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR_RW(addr_stop);
@@ -1147,14 +1148,14 @@ static ssize_t addr_ctxtype_show(struct device *dev,
struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etmv4_config *config = &drvdata->config;
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
idx = config->addr_idx;
/* CONTEXTTYPE, bits[3:2] */
val = FIELD_GET(TRCACATRn_CONTEXTTYPE_MASK, config->addr_acc[idx]);
len = scnprintf(buf, PAGE_SIZE, "%s\n", val == ETM_CTX_NONE ? "none" :
(val == ETM_CTX_CTXID ? "ctxid" :
(val == ETM_CTX_VMID ? "vmid" : "all")));
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return len;
}
@@ -1172,7 +1173,7 @@ static ssize_t addr_ctxtype_store(struct device *dev,
if (sscanf(buf, "%s", str) != 1)
return -EINVAL;
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
idx = config->addr_idx;
if (!strcmp(str, "none"))
/* start by clearing context type bits */
@@ -1199,7 +1200,7 @@ static ssize_t addr_ctxtype_store(struct device *dev,
if (drvdata->numvmidc)
config->addr_acc[idx] |= TRCACATRn_CONTEXTTYPE_VMID;
}
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR_RW(addr_ctxtype);
@@ -1213,11 +1214,11 @@ static ssize_t addr_context_show(struct device *dev,
struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etmv4_config *config = &drvdata->config;
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
idx = config->addr_idx;
/* context ID comparator bits[6:4] */
val = FIELD_GET(TRCACATRn_CONTEXT_MASK, config->addr_acc[idx]);
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}
@@ -1238,12 +1239,12 @@ static ssize_t addr_context_store(struct device *dev,
drvdata->numcidc : drvdata->numvmidc))
return -EINVAL;
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
idx = config->addr_idx;
/* clear context ID comparator bits[6:4] */
config->addr_acc[idx] &= ~TRCACATRn_CONTEXT_MASK;
config->addr_acc[idx] |= val << __bf_shf(TRCACATRn_CONTEXT_MASK);
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR_RW(addr_context);
@@ -1257,10 +1258,10 @@ static ssize_t addr_exlevel_s_ns_show(struct device *dev,
struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etmv4_config *config = &drvdata->config;
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
idx = config->addr_idx;
val = FIELD_GET(TRCACATRn_EXLEVEL_MASK, config->addr_acc[idx]);
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}
@@ -1279,12 +1280,12 @@ static ssize_t addr_exlevel_s_ns_store(struct device *dev,
if (val & ~(TRCACATRn_EXLEVEL_MASK >> __bf_shf(TRCACATRn_EXLEVEL_MASK)))
return -EINVAL;
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
idx = config->addr_idx;
/* clear Exlevel_ns & Exlevel_s bits[14:12, 11:8], bit[15] is res0 */
config->addr_acc[idx] &= ~TRCACATRn_EXLEVEL_MASK;
config->addr_acc[idx] |= val << __bf_shf(TRCACATRn_EXLEVEL_MASK);
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR_RW(addr_exlevel_s_ns);
@@ -1307,7 +1308,7 @@ static ssize_t addr_cmp_view_show(struct device *dev,
int size = 0;
bool exclude = false;
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
idx = config->addr_idx;
addr_v = config->addr_val[idx];
addr_ctrl = config->addr_acc[idx];
@@ -1322,7 +1323,7 @@ static ssize_t addr_cmp_view_show(struct device *dev,
}
exclude = config->viiectlr & BIT(idx / 2 + 16);
}
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
if (addr_type) {
size = scnprintf(buf, PAGE_SIZE, "addr_cmp[%i] %s %#lx", idx,
addr_type_names[addr_type], addr_v);
@@ -1366,9 +1367,9 @@ static ssize_t vinst_pe_cmp_start_stop_store(struct device *dev,
if (!drvdata->nr_pe_cmp)
return -EINVAL;
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
config->vipcssctlr = val;
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR_RW(vinst_pe_cmp_start_stop);
@@ -1402,9 +1403,9 @@ static ssize_t seq_idx_store(struct device *dev,
* Use spinlock to ensure index doesn't change while it gets
* dereferenced multiple times within a spinlock block elsewhere.
*/
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
config->seq_idx = val;
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR_RW(seq_idx);
@@ -1448,10 +1449,10 @@ static ssize_t seq_event_show(struct device *dev,
struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etmv4_config *config = &drvdata->config;
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
idx = config->seq_idx;
val = config->seq_ctrl[idx];
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}
@@ -1467,11 +1468,11 @@ static ssize_t seq_event_store(struct device *dev,
if (kstrtoul(buf, 16, &val))
return -EINVAL;
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
idx = config->seq_idx;
/* Seq control has two masks B[15:8] F[7:0] */
config->seq_ctrl[idx] = val & 0xFFFF;
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR_RW(seq_event);
@@ -1535,9 +1536,9 @@ static ssize_t cntr_idx_store(struct device *dev,
* Use spinlock to ensure index doesn't change while it gets
* dereferenced multiple times within a spinlock block elsewhere.
*/
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
config->cntr_idx = val;
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR_RW(cntr_idx);
@@ -1551,10 +1552,10 @@ static ssize_t cntrldvr_show(struct device *dev,
struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etmv4_config *config = &drvdata->config;
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
idx = config->cntr_idx;
val = config->cntrldvr[idx];
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}
@@ -1572,10 +1573,10 @@ static ssize_t cntrldvr_store(struct device *dev,
if (val > ETM_CNTR_MAX_VAL)
return -EINVAL;
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
idx = config->cntr_idx;
config->cntrldvr[idx] = val;
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR_RW(cntrldvr);
@@ -1589,10 +1590,10 @@ static ssize_t cntr_val_show(struct device *dev,
struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etmv4_config *config = &drvdata->config;
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
idx = config->cntr_idx;
val = config->cntr_val[idx];
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}
@@ -1610,10 +1611,10 @@ static ssize_t cntr_val_store(struct device *dev,
if (val > ETM_CNTR_MAX_VAL)
return -EINVAL;
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
idx = config->cntr_idx;
config->cntr_val[idx] = val;
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR_RW(cntr_val);
@@ -1627,10 +1628,10 @@ static ssize_t cntr_ctrl_show(struct device *dev,
struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etmv4_config *config = &drvdata->config;
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
idx = config->cntr_idx;
val = config->cntr_ctrl[idx];
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}
@@ -1646,10 +1647,10 @@ static ssize_t cntr_ctrl_store(struct device *dev,
if (kstrtoul(buf, 16, &val))
return -EINVAL;
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
idx = config->cntr_idx;
config->cntr_ctrl[idx] = val;
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR_RW(cntr_ctrl);
@@ -1687,9 +1688,9 @@ static ssize_t res_idx_store(struct device *dev,
* Use spinlock to ensure index doesn't change while it gets
* dereferenced multiple times within a spinlock block elsewhere.
*/
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
config->res_idx = val;
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR_RW(res_idx);
@@ -1703,10 +1704,10 @@ static ssize_t res_ctrl_show(struct device *dev,
struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etmv4_config *config = &drvdata->config;
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
idx = config->res_idx;
val = config->res_ctrl[idx];
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}
@@ -1722,7 +1723,7 @@ static ssize_t res_ctrl_store(struct device *dev,
if (kstrtoul(buf, 16, &val))
return -EINVAL;
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
idx = config->res_idx;
/* For odd idx pair inversal bit is RES0 */
if (idx % 2 != 0)
@@ -1732,7 +1733,7 @@ static ssize_t res_ctrl_store(struct device *dev,
TRCRSCTLRn_INV |
TRCRSCTLRn_GROUP_MASK |
TRCRSCTLRn_SELECT_MASK);
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR_RW(res_ctrl);
@@ -1761,9 +1762,9 @@ static ssize_t sshot_idx_store(struct device *dev,
if (val >= drvdata->nr_ss_cmp)
return -EINVAL;
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
config->ss_idx = val;
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR_RW(sshot_idx);
@@ -1776,9 +1777,9 @@ static ssize_t sshot_ctrl_show(struct device *dev,
struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etmv4_config *config = &drvdata->config;
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
val = config->ss_ctrl[config->ss_idx];
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}
@@ -1794,12 +1795,12 @@ static ssize_t sshot_ctrl_store(struct device *dev,
if (kstrtoul(buf, 16, &val))
return -EINVAL;
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
idx = config->ss_idx;
config->ss_ctrl[idx] = FIELD_PREP(TRCSSCCRn_SAC_ARC_RST_MASK, val);
/* must clear bit 31 in related status register on programming */
config->ss_status[idx] &= ~TRCSSCSRn_STATUS;
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR_RW(sshot_ctrl);
@@ -1811,9 +1812,9 @@ static ssize_t sshot_status_show(struct device *dev,
struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etmv4_config *config = &drvdata->config;
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
val = config->ss_status[config->ss_idx];
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}
static DEVICE_ATTR_RO(sshot_status);
@@ -1826,9 +1827,9 @@ static ssize_t sshot_pe_ctrl_show(struct device *dev,
struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etmv4_config *config = &drvdata->config;
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
val = config->ss_pe_cmp[config->ss_idx];
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}
@@ -1844,12 +1845,12 @@ static ssize_t sshot_pe_ctrl_store(struct device *dev,
if (kstrtoul(buf, 16, &val))
return -EINVAL;
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
idx = config->ss_idx;
config->ss_pe_cmp[idx] = FIELD_PREP(TRCSSPCICRn_PC_MASK, val);
/* must clear bit 31 in related status register on programming */
config->ss_status[idx] &= ~TRCSSCSRn_STATUS;
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR_RW(sshot_pe_ctrl);
@@ -1883,9 +1884,9 @@ static ssize_t ctxid_idx_store(struct device *dev,
* Use spinlock to ensure index doesn't change while it gets
* dereferenced multiple times within a spinlock block elsewhere.
*/
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
config->ctxid_idx = val;
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR_RW(ctxid_idx);
@@ -1906,10 +1907,10 @@ static ssize_t ctxid_pid_show(struct device *dev,
if (task_active_pid_ns(current) != &init_pid_ns)
return -EINVAL;
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
idx = config->ctxid_idx;
val = (unsigned long)config->ctxid_pid[idx];
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}
@@ -1944,10 +1945,10 @@ static ssize_t ctxid_pid_store(struct device *dev,
if (kstrtoul(buf, 16, &pid))
return -EINVAL;
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
idx = config->ctxid_idx;
config->ctxid_pid[idx] = (u64)pid;
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR_RW(ctxid_pid);
@@ -1967,10 +1968,10 @@ static ssize_t ctxid_masks_show(struct device *dev,
if (task_active_pid_ns(current) != &init_pid_ns)
return -EINVAL;
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
val1 = config->ctxid_mask0;
val2 = config->ctxid_mask1;
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
}
@@ -2003,7 +2004,7 @@ static ssize_t ctxid_masks_store(struct device *dev,
if ((drvdata->numcidc > 4) && (nr_inputs != 2))
return -EINVAL;
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
/*
* each byte[0..3] controls mask value applied to ctxid
* comparator[0..3]
@@ -2075,7 +2076,7 @@ static ssize_t ctxid_masks_store(struct device *dev,
mask >>= 0x8;
}
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR_RW(ctxid_masks);
@@ -2109,9 +2110,9 @@ static ssize_t vmid_idx_store(struct device *dev,
* Use spinlock to ensure index doesn't change while it gets
* dereferenced multiple times within a spinlock block elsewhere.
*/
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
config->vmid_idx = val;
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR_RW(vmid_idx);
@@ -2131,9 +2132,9 @@ static ssize_t vmid_val_show(struct device *dev,
if (!task_is_in_init_pid_ns(current))
return -EINVAL;
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
val = (unsigned long)config->vmid_val[config->vmid_idx];
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}
@@ -2161,9 +2162,9 @@ static ssize_t vmid_val_store(struct device *dev,
if (kstrtoul(buf, 16, &val))
return -EINVAL;
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
config->vmid_val[config->vmid_idx] = (u64)val;
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR_RW(vmid_val);
@@ -2182,10 +2183,10 @@ static ssize_t vmid_masks_show(struct device *dev,
if (!task_is_in_init_pid_ns(current))
return -EINVAL;
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
val1 = config->vmid_mask0;
val2 = config->vmid_mask1;
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
}
@@ -2217,7 +2218,7 @@ static ssize_t vmid_masks_store(struct device *dev,
if ((drvdata->numvmidc > 4) && (nr_inputs != 2))
return -EINVAL;
- spin_lock(&drvdata->spinlock);
+ raw_spin_lock(&drvdata->spinlock);
/*
* each byte[0..3] controls mask value applied to vmid
@@ -2290,7 +2291,7 @@ static ssize_t vmid_masks_store(struct device *dev,
else
mask >>= 0x8;
}
- spin_unlock(&drvdata->spinlock);
+ raw_spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR_RW(vmid_masks);
@@ -2402,10 +2403,9 @@ static ssize_t trctraceid_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- int trace_id;
struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ int trace_id = coresight_etm_get_trace_id(drvdata->csdev, CS_MODE_SYSFS, NULL);
- trace_id = etm4_read_alloc_trace_id(drvdata);
if (trace_id < 0)
return trace_id;
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.h b/drivers/hwtracing/coresight/coresight-etm4x.h
index 1119762b5cec..bd7db36ba197 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x.h
+++ b/drivers/hwtracing/coresight/coresight-etm4x.h
@@ -989,7 +989,7 @@ struct etmv4_drvdata {
struct clk *pclk;
void __iomem *base;
struct coresight_device *csdev;
- spinlock_t spinlock;
+ raw_spinlock_t spinlock;
int cpu;
u8 arch;
u8 nr_pe;
@@ -1066,6 +1066,5 @@ static inline bool etm4x_is_ete(struct etmv4_drvdata *drvdata)
return drvdata->arch >= ETM_ARCH_ETE;
}
-int etm4_read_alloc_trace_id(struct etmv4_drvdata *drvdata);
void etm4_release_trace_id(struct etmv4_drvdata *drvdata);
#endif
diff --git a/drivers/hwtracing/coresight/coresight-funnel.c b/drivers/hwtracing/coresight/coresight-funnel.c
index 8faf51469bb8..0541712b2bcb 100644
--- a/drivers/hwtracing/coresight/coresight-funnel.c
+++ b/drivers/hwtracing/coresight/coresight-funnel.c
@@ -47,7 +47,7 @@ struct funnel_drvdata {
struct clk *pclk;
struct coresight_device *csdev;
unsigned long priority;
- spinlock_t spinlock;
+ raw_spinlock_t spinlock;
};
static int dynamic_funnel_enable_hw(struct funnel_drvdata *drvdata, int port)
@@ -85,7 +85,7 @@ static int funnel_enable(struct coresight_device *csdev,
unsigned long flags;
bool first_enable = false;
- spin_lock_irqsave(&drvdata->spinlock, flags);
+ raw_spin_lock_irqsave(&drvdata->spinlock, flags);
if (in->dest_refcnt == 0) {
if (drvdata->base)
rc = dynamic_funnel_enable_hw(drvdata, in->dest_port);
@@ -94,7 +94,7 @@ static int funnel_enable(struct coresight_device *csdev,
}
if (!rc)
in->dest_refcnt++;
- spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ raw_spin_unlock_irqrestore(&drvdata->spinlock, flags);
if (first_enable)
dev_dbg(&csdev->dev, "FUNNEL inport %d enabled\n",
@@ -129,13 +129,13 @@ static void funnel_disable(struct coresight_device *csdev,
unsigned long flags;
bool last_disable = false;
- spin_lock_irqsave(&drvdata->spinlock, flags);
+ raw_spin_lock_irqsave(&drvdata->spinlock, flags);
if (--in->dest_refcnt == 0) {
if (drvdata->base)
dynamic_funnel_disable_hw(drvdata, in->dest_port);
last_disable = true;
}
- spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ raw_spin_unlock_irqrestore(&drvdata->spinlock, flags);
if (last_disable)
dev_dbg(&csdev->dev, "FUNNEL inport %d disabled\n",
@@ -266,7 +266,7 @@ static int funnel_probe(struct device *dev, struct resource *res)
}
dev->platform_data = pdata;
- spin_lock_init(&drvdata->spinlock);
+ raw_spin_lock_init(&drvdata->spinlock);
desc.type = CORESIGHT_DEV_TYPE_LINK;
desc.subtype.link_subtype = CORESIGHT_DEV_SUBTYPE_LINK_MERG;
desc.ops = &funnel_cs_ops;
diff --git a/drivers/hwtracing/coresight/coresight-priv.h b/drivers/hwtracing/coresight/coresight-priv.h
index 76403530f33e..82644aff8d2b 100644
--- a/drivers/hwtracing/coresight/coresight-priv.h
+++ b/drivers/hwtracing/coresight/coresight-priv.h
@@ -132,16 +132,16 @@ static inline void CS_UNLOCK(void __iomem *addr)
} while (0);
}
-void coresight_disable_path(struct list_head *path);
-int coresight_enable_path(struct list_head *path, enum cs_mode mode,
+void coresight_disable_path(struct coresight_path *path);
+int coresight_enable_path(struct coresight_path *path, enum cs_mode mode,
void *sink_data);
-struct coresight_device *coresight_get_sink(struct list_head *path);
+struct coresight_device *coresight_get_sink(struct coresight_path *path);
struct coresight_device *coresight_get_sink_by_id(u32 id);
struct coresight_device *
coresight_find_default_sink(struct coresight_device *csdev);
-struct list_head *coresight_build_path(struct coresight_device *csdev,
- struct coresight_device *sink);
-void coresight_release_path(struct list_head *path);
+struct coresight_path *coresight_build_path(struct coresight_device *csdev,
+ struct coresight_device *sink);
+void coresight_release_path(struct coresight_path *path);
int coresight_add_sysfs_link(struct coresight_sysfs_link *info);
void coresight_remove_sysfs_link(struct coresight_sysfs_link *info);
int coresight_create_conns_sysfs_group(struct coresight_device *csdev);
@@ -152,6 +152,8 @@ int coresight_make_links(struct coresight_device *orig,
void coresight_remove_links(struct coresight_device *orig,
struct coresight_connection *conn);
u32 coresight_get_sink_id(struct coresight_device *csdev);
+void coresight_path_assign_trace_id(struct coresight_path *path,
+ enum cs_mode mode);
#if IS_ENABLED(CONFIG_CORESIGHT_SOURCE_ETM3X)
extern int etm_readl_cp14(u32 off, unsigned int *val);
diff --git a/drivers/hwtracing/coresight/coresight-replicator.c b/drivers/hwtracing/coresight/coresight-replicator.c
index a1181c9048c0..ee7ee79f6cf7 100644
--- a/drivers/hwtracing/coresight/coresight-replicator.c
+++ b/drivers/hwtracing/coresight/coresight-replicator.c
@@ -41,7 +41,7 @@ struct replicator_drvdata {
struct clk *atclk;
struct clk *pclk;
struct coresight_device *csdev;
- spinlock_t spinlock;
+ raw_spinlock_t spinlock;
bool check_idfilter_val;
};
@@ -125,7 +125,7 @@ static int replicator_enable(struct coresight_device *csdev,
unsigned long flags;
bool first_enable = false;
- spin_lock_irqsave(&drvdata->spinlock, flags);
+ raw_spin_lock_irqsave(&drvdata->spinlock, flags);
if (out->src_refcnt == 0) {
if (drvdata->base)
rc = dynamic_replicator_enable(drvdata, in->dest_port,
@@ -135,7 +135,7 @@ static int replicator_enable(struct coresight_device *csdev,
}
if (!rc)
out->src_refcnt++;
- spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ raw_spin_unlock_irqrestore(&drvdata->spinlock, flags);
if (first_enable)
dev_dbg(&csdev->dev, "REPLICATOR enabled\n");
@@ -179,14 +179,14 @@ static void replicator_disable(struct coresight_device *csdev,
unsigned long flags;
bool last_disable = false;
- spin_lock_irqsave(&drvdata->spinlock, flags);
+ raw_spin_lock_irqsave(&drvdata->spinlock, flags);
if (--out->src_refcnt == 0) {
if (drvdata->base)
dynamic_replicator_disable(drvdata, in->dest_port,
out->src_port);
last_disable = true;
}
- spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ raw_spin_unlock_irqrestore(&drvdata->spinlock, flags);
if (last_disable)
dev_dbg(&csdev->dev, "REPLICATOR disabled\n");
@@ -277,7 +277,7 @@ static int replicator_probe(struct device *dev, struct resource *res)
}
dev->platform_data = pdata;
- spin_lock_init(&drvdata->spinlock);
+ raw_spin_lock_init(&drvdata->spinlock);
desc.type = CORESIGHT_DEV_TYPE_LINK;
desc.subtype.link_subtype = CORESIGHT_DEV_SUBTYPE_LINK_SPLIT;
desc.ops = &replicator_cs_ops;
diff --git a/drivers/hwtracing/coresight/coresight-stm.c b/drivers/hwtracing/coresight/coresight-stm.c
index b581a30a1cd9..26f9339f38b9 100644
--- a/drivers/hwtracing/coresight/coresight-stm.c
+++ b/drivers/hwtracing/coresight/coresight-stm.c
@@ -195,7 +195,7 @@ static void stm_enable_hw(struct stm_drvdata *drvdata)
static int stm_enable(struct coresight_device *csdev, struct perf_event *event,
enum cs_mode mode,
- __maybe_unused struct coresight_trace_id_map *trace_id)
+ __maybe_unused struct coresight_path *path)
{
struct stm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
@@ -281,12 +281,23 @@ static void stm_disable(struct coresight_device *csdev,
}
}
+static int stm_trace_id(struct coresight_device *csdev, __maybe_unused enum cs_mode mode,
+ __maybe_unused struct coresight_device *sink)
+{
+ struct stm_drvdata *drvdata;
+
+ drvdata = dev_get_drvdata(csdev->dev.parent);
+
+ return drvdata->traceid;
+}
+
static const struct coresight_ops_source stm_source_ops = {
.enable = stm_enable,
.disable = stm_disable,
};
static const struct coresight_ops stm_cs_ops = {
+ .trace_id = stm_trace_id,
.source_ops = &stm_source_ops,
};
diff --git a/drivers/hwtracing/coresight/coresight-syscfg-configfs.c b/drivers/hwtracing/coresight/coresight-syscfg-configfs.c
index 433ede94dd63..213b4159b062 100644
--- a/drivers/hwtracing/coresight/coresight-syscfg-configfs.c
+++ b/drivers/hwtracing/coresight/coresight-syscfg-configfs.c
@@ -160,7 +160,7 @@ static struct configfs_attribute *cscfg_config_view_attrs[] = {
NULL,
};
-static struct config_item_type cscfg_config_view_type = {
+static const struct config_item_type cscfg_config_view_type = {
.ct_owner = THIS_MODULE,
.ct_attrs = cscfg_config_view_attrs,
};
@@ -170,7 +170,7 @@ static struct configfs_attribute *cscfg_config_preset_attrs[] = {
NULL,
};
-static struct config_item_type cscfg_config_preset_type = {
+static const struct config_item_type cscfg_config_preset_type = {
.ct_owner = THIS_MODULE,
.ct_attrs = cscfg_config_preset_attrs,
};
@@ -272,7 +272,7 @@ static struct configfs_attribute *cscfg_feature_view_attrs[] = {
NULL,
};
-static struct config_item_type cscfg_feature_view_type = {
+static const struct config_item_type cscfg_feature_view_type = {
.ct_owner = THIS_MODULE,
.ct_attrs = cscfg_feature_view_attrs,
};
@@ -309,7 +309,7 @@ static struct configfs_attribute *cscfg_param_view_attrs[] = {
NULL,
};
-static struct config_item_type cscfg_param_view_type = {
+static const struct config_item_type cscfg_param_view_type = {
.ct_owner = THIS_MODULE,
.ct_attrs = cscfg_param_view_attrs,
};
@@ -380,7 +380,7 @@ static struct config_group *cscfg_create_feature_group(struct cscfg_feature_desc
return &feat_view->group;
}
-static struct config_item_type cscfg_configs_type = {
+static const struct config_item_type cscfg_configs_type = {
.ct_owner = THIS_MODULE,
};
@@ -414,7 +414,7 @@ void cscfg_configfs_del_config(struct cscfg_config_desc *config_desc)
}
}
-static struct config_item_type cscfg_features_type = {
+static const struct config_item_type cscfg_features_type = {
.ct_owner = THIS_MODULE,
};
diff --git a/drivers/hwtracing/coresight/coresight-syscfg.c b/drivers/hwtracing/coresight/coresight-syscfg.c
index 11138a9762b0..a70c1454b410 100644
--- a/drivers/hwtracing/coresight/coresight-syscfg.c
+++ b/drivers/hwtracing/coresight/coresight-syscfg.c
@@ -89,9 +89,9 @@ static int cscfg_add_csdev_cfg(struct coresight_device *csdev,
}
/* if matched features, add config to device.*/
if (config_csdev) {
- spin_lock_irqsave(&csdev->cscfg_csdev_lock, flags);
+ raw_spin_lock_irqsave(&csdev->cscfg_csdev_lock, flags);
list_add(&config_csdev->node, &csdev->config_csdev_list);
- spin_unlock_irqrestore(&csdev->cscfg_csdev_lock, flags);
+ raw_spin_unlock_irqrestore(&csdev->cscfg_csdev_lock, flags);
}
return 0;
@@ -194,9 +194,9 @@ static int cscfg_load_feat_csdev(struct coresight_device *csdev,
/* add to internal csdev feature list & initialise using reset call */
cscfg_reset_feat(feat_csdev);
- spin_lock_irqsave(&csdev->cscfg_csdev_lock, flags);
+ raw_spin_lock_irqsave(&csdev->cscfg_csdev_lock, flags);
list_add(&feat_csdev->node, &csdev->feature_csdev_list);
- spin_unlock_irqrestore(&csdev->cscfg_csdev_lock, flags);
+ raw_spin_unlock_irqrestore(&csdev->cscfg_csdev_lock, flags);
return 0;
}
@@ -765,7 +765,7 @@ static int cscfg_list_add_csdev(struct coresight_device *csdev,
INIT_LIST_HEAD(&csdev->feature_csdev_list);
INIT_LIST_HEAD(&csdev->config_csdev_list);
- spin_lock_init(&csdev->cscfg_csdev_lock);
+ raw_spin_lock_init(&csdev->cscfg_csdev_lock);
return 0;
}
@@ -855,7 +855,7 @@ void cscfg_csdev_reset_feats(struct coresight_device *csdev)
struct cscfg_feature_csdev *feat_csdev;
unsigned long flags;
- spin_lock_irqsave(&csdev->cscfg_csdev_lock, flags);
+ raw_spin_lock_irqsave(&csdev->cscfg_csdev_lock, flags);
if (list_empty(&csdev->feature_csdev_list))
goto unlock_exit;
@@ -863,7 +863,7 @@ void cscfg_csdev_reset_feats(struct coresight_device *csdev)
cscfg_reset_feat(feat_csdev);
unlock_exit:
- spin_unlock_irqrestore(&csdev->cscfg_csdev_lock, flags);
+ raw_spin_unlock_irqrestore(&csdev->cscfg_csdev_lock, flags);
}
EXPORT_SYMBOL_GPL(cscfg_csdev_reset_feats);
@@ -1059,7 +1059,7 @@ int cscfg_csdev_enable_active_config(struct coresight_device *csdev,
* Look for matching configuration - set the active configuration
* context if found.
*/
- spin_lock_irqsave(&csdev->cscfg_csdev_lock, flags);
+ raw_spin_lock_irqsave(&csdev->cscfg_csdev_lock, flags);
list_for_each_entry(config_csdev_item, &csdev->config_csdev_list, node) {
config_desc = config_csdev_item->config_desc;
if ((atomic_read(&config_desc->active_cnt)) &&
@@ -1069,7 +1069,7 @@ int cscfg_csdev_enable_active_config(struct coresight_device *csdev,
break;
}
}
- spin_unlock_irqrestore(&csdev->cscfg_csdev_lock, flags);
+ raw_spin_unlock_irqrestore(&csdev->cscfg_csdev_lock, flags);
/*
* If found, attempt to enable
@@ -1090,12 +1090,12 @@ int cscfg_csdev_enable_active_config(struct coresight_device *csdev,
*
* Set enabled if OK, err if not.
*/
- spin_lock_irqsave(&csdev->cscfg_csdev_lock, flags);
+ raw_spin_lock_irqsave(&csdev->cscfg_csdev_lock, flags);
if (csdev->active_cscfg_ctxt)
config_csdev_active->enabled = true;
else
err = -EBUSY;
- spin_unlock_irqrestore(&csdev->cscfg_csdev_lock, flags);
+ raw_spin_unlock_irqrestore(&csdev->cscfg_csdev_lock, flags);
}
}
return err;
@@ -1124,7 +1124,7 @@ void cscfg_csdev_disable_active_config(struct coresight_device *csdev)
* If it was not enabled, we have no work to do, otherwise mark as disabled.
* Clear the active config pointer.
*/
- spin_lock_irqsave(&csdev->cscfg_csdev_lock, flags);
+ raw_spin_lock_irqsave(&csdev->cscfg_csdev_lock, flags);
config_csdev = (struct cscfg_config_csdev *)csdev->active_cscfg_ctxt;
if (config_csdev) {
if (!config_csdev->enabled)
@@ -1133,7 +1133,7 @@ void cscfg_csdev_disable_active_config(struct coresight_device *csdev)
config_csdev->enabled = false;
}
csdev->active_cscfg_ctxt = NULL;
- spin_unlock_irqrestore(&csdev->cscfg_csdev_lock, flags);
+ raw_spin_unlock_irqrestore(&csdev->cscfg_csdev_lock, flags);
/* true if there was an enabled active config */
if (config_csdev)
diff --git a/drivers/hwtracing/coresight/coresight-sysfs.c b/drivers/hwtracing/coresight/coresight-sysfs.c
index a01c9e54e2ed..feadaf065b53 100644
--- a/drivers/hwtracing/coresight/coresight-sysfs.c
+++ b/drivers/hwtracing/coresight/coresight-sysfs.c
@@ -22,7 +22,7 @@ static DEFINE_IDR(path_idr);
* When operating Coresight drivers from the sysFS interface, only a single
* path can exist from a tracer (associated to a CPU) to a sink.
*/
-static DEFINE_PER_CPU(struct list_head *, tracer_path);
+static DEFINE_PER_CPU(struct coresight_path *, tracer_path);
ssize_t coresight_simple_show_pair(struct device *_dev,
struct device_attribute *attr, char *buf)
@@ -53,7 +53,8 @@ ssize_t coresight_simple_show32(struct device *_dev,
EXPORT_SYMBOL_GPL(coresight_simple_show32);
static int coresight_enable_source_sysfs(struct coresight_device *csdev,
- enum cs_mode mode, void *data)
+ enum cs_mode mode,
+ struct coresight_path *path)
{
int ret;
@@ -64,7 +65,7 @@ static int coresight_enable_source_sysfs(struct coresight_device *csdev,
*/
lockdep_assert_held(&coresight_mutex);
if (coresight_get_mode(csdev) != CS_MODE_SYSFS) {
- ret = source_ops(csdev)->enable(csdev, data, mode, NULL);
+ ret = source_ops(csdev)->enable(csdev, NULL, mode, path);
if (ret)
return ret;
}
@@ -167,7 +168,7 @@ int coresight_enable_sysfs(struct coresight_device *csdev)
{
int cpu, ret = 0;
struct coresight_device *sink;
- struct list_head *path;
+ struct coresight_path *path;
enum coresight_dev_subtype_source subtype;
u32 hash;
@@ -209,11 +210,15 @@ int coresight_enable_sysfs(struct coresight_device *csdev)
goto out;
}
+ coresight_path_assign_trace_id(path, CS_MODE_SYSFS);
+ if (!IS_VALID_CS_TRACE_ID(path->trace_id))
+ goto err_path;
+
ret = coresight_enable_path(path, CS_MODE_SYSFS, NULL);
if (ret)
goto err_path;
- ret = coresight_enable_source_sysfs(csdev, CS_MODE_SYSFS, NULL);
+ ret = coresight_enable_source_sysfs(csdev, CS_MODE_SYSFS, path);
if (ret)
goto err_source;
@@ -262,7 +267,7 @@ EXPORT_SYMBOL_GPL(coresight_enable_sysfs);
void coresight_disable_sysfs(struct coresight_device *csdev)
{
int cpu, ret;
- struct list_head *path = NULL;
+ struct coresight_path *path = NULL;
u32 hash;
mutex_lock(&coresight_mutex);
diff --git a/drivers/hwtracing/coresight/coresight-tmc-core.c b/drivers/hwtracing/coresight/coresight-tmc-core.c
index e9876252a789..a7814e8e657b 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-core.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-core.c
@@ -23,6 +23,7 @@
#include <linux/spinlock.h>
#include <linux/pm_runtime.h>
#include <linux/of.h>
+#include <linux/of_address.h>
#include <linux/coresight.h>
#include <linux/amba/bus.h>
#include <linux/platform_device.h>
@@ -104,6 +105,128 @@ u32 tmc_get_memwidth_mask(struct tmc_drvdata *drvdata)
return mask;
}
+static bool is_tmc_crashdata_valid(struct tmc_drvdata *drvdata)
+{
+ struct tmc_crash_metadata *mdata;
+
+ if (!tmc_has_reserved_buffer(drvdata) ||
+ !tmc_has_crash_mdata_buffer(drvdata))
+ return false;
+
+ mdata = drvdata->crash_mdata.vaddr;
+
+ /* Check version match */
+ if (mdata->version != CS_CRASHDATA_VERSION)
+ return false;
+
+ /* Check for valid metadata */
+ if (!mdata->valid) {
+ dev_dbg(&drvdata->csdev->dev,
+ "Data invalid in tmc crash metadata\n");
+ return false;
+ }
+
+ /*
+ * Buffer address given by metadata for retrieval of trace data
+ * from previous boot is expected to be same as the reserved
+ * trace buffer memory region provided through DTS
+ */
+ if (drvdata->resrv_buf.paddr != mdata->trace_paddr) {
+ dev_dbg(&drvdata->csdev->dev,
+ "Trace buffer address of previous boot invalid\n");
+ return false;
+ }
+
+ /* Check data integrity of metadata */
+ if (mdata->crc32_mdata != find_crash_metadata_crc(mdata)) {
+ dev_err(&drvdata->csdev->dev,
+ "CRC mismatch in tmc crash metadata\n");
+ return false;
+ }
+ /* Check data integrity of tracedata */
+ if (mdata->crc32_tdata != find_crash_tracedata_crc(drvdata, mdata)) {
+ dev_err(&drvdata->csdev->dev,
+ "CRC mismatch in tmc crash tracedata\n");
+ return false;
+ }
+
+ return true;
+}
+
+static inline ssize_t tmc_get_resvbuf_trace(struct tmc_drvdata *drvdata,
+ loff_t pos, size_t len, char **bufpp)
+{
+ s64 offset;
+ ssize_t actual = len;
+ struct tmc_resrv_buf *rbuf = &drvdata->resrv_buf;
+
+ if (pos + actual > rbuf->len)
+ actual = rbuf->len - pos;
+ if (actual <= 0)
+ return 0;
+
+ /* Compute the offset from which we read the data */
+ offset = rbuf->offset + pos;
+ if (offset >= rbuf->size)
+ offset -= rbuf->size;
+
+ /* Adjust the length to limit this transaction to end of buffer */
+ actual = (actual < (rbuf->size - offset)) ?
+ actual : rbuf->size - offset;
+
+ *bufpp = (char *)rbuf->vaddr + offset;
+
+ return actual;
+}
+
+static int tmc_prepare_crashdata(struct tmc_drvdata *drvdata)
+{
+ char *bufp;
+ ssize_t len;
+ u32 status, size;
+ u64 rrp, rwp, dba;
+ struct tmc_resrv_buf *rbuf;
+ struct tmc_crash_metadata *mdata;
+
+ mdata = drvdata->crash_mdata.vaddr;
+ rbuf = &drvdata->resrv_buf;
+
+ rrp = mdata->tmc_rrp;
+ rwp = mdata->tmc_rwp;
+ dba = mdata->tmc_dba;
+ status = mdata->tmc_sts;
+ size = mdata->tmc_ram_size << 2;
+
+ /* Sync the buffer pointers */
+ rbuf->offset = rrp - dba;
+ if (status & TMC_STS_FULL)
+ rbuf->len = size;
+ else
+ rbuf->len = rwp - rrp;
+
+ /* Additional sanity checks for validating metadata */
+ if ((rbuf->offset > size) ||
+ (rbuf->len > size)) {
+ dev_dbg(&drvdata->csdev->dev,
+ "Offset and length invalid in tmc crash metadata\n");
+ return -EINVAL;
+ }
+
+ if (status & TMC_STS_FULL) {
+ len = tmc_get_resvbuf_trace(drvdata, 0x0,
+ CORESIGHT_BARRIER_PKT_SIZE, &bufp);
+ if (len >= CORESIGHT_BARRIER_PKT_SIZE) {
+ coresight_insert_barrier_packet(bufp);
+ /* Recalculate crc */
+ mdata->crc32_tdata = find_crash_tracedata_crc(drvdata,
+ mdata);
+ mdata->crc32_mdata = find_crash_metadata_crc(mdata);
+ }
+ }
+
+ return 0;
+}
+
static int tmc_read_prepare(struct tmc_drvdata *drvdata)
{
int ret = 0;
@@ -222,6 +345,84 @@ static const struct file_operations tmc_fops = {
.release = tmc_release,
};
+static int tmc_crashdata_open(struct inode *inode, struct file *file)
+{
+ int err = 0;
+ unsigned long flags;
+ struct tmc_resrv_buf *rbuf;
+ struct tmc_crash_metadata *mdata;
+ struct tmc_drvdata *drvdata = container_of(file->private_data,
+ struct tmc_drvdata,
+ crashdev);
+
+ mdata = drvdata->crash_mdata.vaddr;
+ rbuf = &drvdata->resrv_buf;
+
+ raw_spin_lock_irqsave(&drvdata->spinlock, flags);
+ if (mdata->valid)
+ rbuf->reading = true;
+ else
+ err = -ENOENT;
+ raw_spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ if (err)
+ goto exit;
+
+ nonseekable_open(inode, file);
+ dev_dbg(&drvdata->csdev->dev, "%s: successfully opened\n", __func__);
+exit:
+ return err;
+}
+
+static ssize_t tmc_crashdata_read(struct file *file, char __user *data,
+ size_t len, loff_t *ppos)
+{
+ char *bufp;
+ ssize_t actual;
+ struct tmc_drvdata *drvdata = container_of(file->private_data,
+ struct tmc_drvdata,
+ crashdev);
+
+ actual = tmc_get_resvbuf_trace(drvdata, *ppos, len, &bufp);
+ if (actual <= 0)
+ return 0;
+
+ if (copy_to_user(data, bufp, actual)) {
+ dev_dbg(&drvdata->csdev->dev,
+ "%s: copy_to_user failed\n", __func__);
+ return -EFAULT;
+ }
+
+ *ppos += actual;
+ dev_dbg(&drvdata->csdev->dev, "%zu bytes copied\n", actual);
+
+ return actual;
+}
+
+static int tmc_crashdata_release(struct inode *inode, struct file *file)
+{
+ int ret = 0;
+ unsigned long flags;
+ struct tmc_resrv_buf *rbuf;
+ struct tmc_drvdata *drvdata = container_of(file->private_data,
+ struct tmc_drvdata,
+ crashdev);
+
+ rbuf = &drvdata->resrv_buf;
+ raw_spin_lock_irqsave(&drvdata->spinlock, flags);
+ rbuf->reading = false;
+ raw_spin_unlock_irqrestore(&drvdata->spinlock, flags);
+
+ dev_dbg(&drvdata->csdev->dev, "%s: released\n", __func__);
+ return ret;
+}
+
+static const struct file_operations tmc_crashdata_fops = {
+ .owner = THIS_MODULE,
+ .open = tmc_crashdata_open,
+ .read = tmc_crashdata_read,
+ .release = tmc_crashdata_release,
+};
+
static enum tmc_mem_intf_width tmc_get_memwidth(u32 devid)
{
enum tmc_mem_intf_width memwidth;
@@ -331,9 +532,40 @@ static ssize_t buffer_size_store(struct device *dev,
static DEVICE_ATTR_RW(buffer_size);
+static ssize_t stop_on_flush_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ return sprintf(buf, "%#x\n", drvdata->stop_on_flush);
+}
+
+static ssize_t stop_on_flush_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int ret;
+ u8 val;
+ struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ ret = kstrtou8(buf, 0, &val);
+ if (ret)
+ return ret;
+ if (val)
+ drvdata->stop_on_flush = true;
+ else
+ drvdata->stop_on_flush = false;
+
+ return size;
+}
+
+static DEVICE_ATTR_RW(stop_on_flush);
+
+
static struct attribute *coresight_tmc_attrs[] = {
&dev_attr_trigger_cntr.attr,
&dev_attr_buffer_size.attr,
+ &dev_attr_stop_on_flush.attr,
NULL,
};
@@ -398,6 +630,67 @@ static inline bool tmc_etr_has_non_secure_access(struct tmc_drvdata *drvdata)
static const struct amba_id tmc_ids[];
+static int of_tmc_get_reserved_resource_by_name(struct device *dev,
+ const char *name,
+ struct resource *res)
+{
+ int index, rc = -ENODEV;
+ struct device_node *node;
+
+ if (!is_of_node(dev->fwnode))
+ return -ENODEV;
+
+ index = of_property_match_string(dev->of_node, "memory-region-names",
+ name);
+ if (index < 0)
+ return rc;
+
+ node = of_parse_phandle(dev->of_node, "memory-region", index);
+ if (!node)
+ return rc;
+
+ if (!of_address_to_resource(node, 0, res) &&
+ res->start != 0 && resource_size(res) != 0)
+ rc = 0;
+ of_node_put(node);
+
+ return rc;
+}
+
+static void tmc_get_reserved_region(struct device *parent)
+{
+ struct tmc_drvdata *drvdata = dev_get_drvdata(parent);
+ struct resource res;
+
+ if (of_tmc_get_reserved_resource_by_name(parent, "tracedata", &res))
+ return;
+
+ drvdata->resrv_buf.vaddr = memremap(res.start,
+ resource_size(&res),
+ MEMREMAP_WC);
+ if (IS_ERR_OR_NULL(drvdata->resrv_buf.vaddr)) {
+ dev_err(parent, "Reserved trace buffer mapping failed\n");
+ return;
+ }
+
+ drvdata->resrv_buf.paddr = res.start;
+ drvdata->resrv_buf.size = resource_size(&res);
+
+ if (of_tmc_get_reserved_resource_by_name(parent, "metadata", &res))
+ return;
+
+ drvdata->crash_mdata.vaddr = memremap(res.start,
+ resource_size(&res),
+ MEMREMAP_WC);
+ if (IS_ERR_OR_NULL(drvdata->crash_mdata.vaddr)) {
+ dev_err(parent, "Metadata memory mapping failed\n");
+ return;
+ }
+
+ drvdata->crash_mdata.paddr = res.start;
+ drvdata->crash_mdata.size = resource_size(&res);
+}
+
/* Detect and initialise the capabilities of a TMC ETR */
static int tmc_etr_setup_caps(struct device *parent, u32 devid,
struct csdev_access *access)
@@ -470,6 +763,22 @@ static u32 tmc_etr_get_max_burst_size(struct device *dev)
return burst_size;
}
+static void register_crash_dev_interface(struct tmc_drvdata *drvdata,
+ const char *name)
+{
+ drvdata->crashdev.name =
+ devm_kasprintf(&drvdata->csdev->dev, GFP_KERNEL, "%s_%s", "crash", name);
+ drvdata->crashdev.minor = MISC_DYNAMIC_MINOR;
+ drvdata->crashdev.fops = &tmc_crashdata_fops;
+ if (misc_register(&drvdata->crashdev)) {
+ dev_dbg(&drvdata->csdev->dev,
+ "Failed to setup user interface for crashdata\n");
+ drvdata->crashdev.fops = NULL;
+ } else
+ dev_info(&drvdata->csdev->dev,
+ "Valid crash tracedata found\n");
+}
+
static int __tmc_probe(struct device *dev, struct resource *res)
{
int ret = 0;
@@ -492,7 +801,7 @@ static int __tmc_probe(struct device *dev, struct resource *res)
drvdata->base = base;
desc.access = CSDEV_ACCESS_IOMEM(base);
- spin_lock_init(&drvdata->spinlock);
+ raw_spin_lock_init(&drvdata->spinlock);
devid = readl_relaxed(drvdata->base + CORESIGHT_DEVID);
drvdata->config_type = BMVAL(devid, 6, 7);
@@ -508,6 +817,8 @@ static int __tmc_probe(struct device *dev, struct resource *res)
drvdata->size = readl_relaxed(drvdata->base + TMC_RSZ) * 4;
}
+ tmc_get_reserved_region(dev);
+
desc.dev = dev;
switch (drvdata->config_type) {
@@ -568,9 +879,15 @@ static int __tmc_probe(struct device *dev, struct resource *res)
drvdata->miscdev.minor = MISC_DYNAMIC_MINOR;
drvdata->miscdev.fops = &tmc_fops;
ret = misc_register(&drvdata->miscdev);
- if (ret)
+ if (ret) {
coresight_unregister(drvdata->csdev);
+ goto out;
+ }
+
out:
+ if (is_tmc_crashdata_valid(drvdata) &&
+ !tmc_prepare_crashdata(drvdata))
+ register_crash_dev_interface(drvdata, desc.name);
return ret;
}
@@ -596,7 +913,7 @@ static void tmc_shutdown(struct amba_device *adev)
unsigned long flags;
struct tmc_drvdata *drvdata = amba_get_drvdata(adev);
- spin_lock_irqsave(&drvdata->spinlock, flags);
+ raw_spin_lock_irqsave(&drvdata->spinlock, flags);
if (coresight_get_mode(drvdata->csdev) == CS_MODE_DISABLED)
goto out;
@@ -610,7 +927,7 @@ static void tmc_shutdown(struct amba_device *adev)
* the system is going down after this.
*/
out:
- spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ raw_spin_unlock_irqrestore(&drvdata->spinlock, flags);
}
static void __tmc_remove(struct device *dev)
@@ -623,6 +940,8 @@ static void __tmc_remove(struct device *dev)
* handler to this device is closed.
*/
misc_deregister(&drvdata->miscdev);
+ if (drvdata->crashdev.fops)
+ misc_deregister(&drvdata->crashdev);
coresight_unregister(drvdata->csdev);
}
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etf.c b/drivers/hwtracing/coresight/coresight-tmc-etf.c
index d4f641cd9de6..d858740001c2 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etf.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etf.c
@@ -19,6 +19,7 @@ static int tmc_set_etf_buffer(struct coresight_device *csdev,
static int __tmc_etb_enable_hw(struct tmc_drvdata *drvdata)
{
int rc = 0;
+ u32 ffcr;
CS_UNLOCK(drvdata->base);
@@ -32,10 +33,12 @@ static int __tmc_etb_enable_hw(struct tmc_drvdata *drvdata)
}
writel_relaxed(TMC_MODE_CIRCULAR_BUFFER, drvdata->base + TMC_MODE);
- writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI |
- TMC_FFCR_FON_FLIN | TMC_FFCR_FON_TRIG_EVT |
- TMC_FFCR_TRIGON_TRIGIN,
- drvdata->base + TMC_FFCR);
+
+ ffcr = TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI | TMC_FFCR_FON_FLIN |
+ TMC_FFCR_FON_TRIG_EVT | TMC_FFCR_TRIGON_TRIGIN;
+ if (drvdata->stop_on_flush)
+ ffcr |= TMC_FFCR_STOP_ON_FLUSH;
+ writel_relaxed(ffcr, drvdata->base + TMC_FFCR);
writel_relaxed(drvdata->trigger_cntr, drvdata->base + TMC_TRG);
tmc_enable_hw(drvdata);
@@ -182,9 +185,9 @@ static int tmc_enable_etf_sink_sysfs(struct coresight_device *csdev)
* If we don't have a buffer release the lock and allocate memory.
* Otherwise keep the lock and move along.
*/
- spin_lock_irqsave(&drvdata->spinlock, flags);
+ raw_spin_lock_irqsave(&drvdata->spinlock, flags);
if (!drvdata->buf) {
- spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ raw_spin_unlock_irqrestore(&drvdata->spinlock, flags);
/* Allocating the memory here while outside of the spinlock */
buf = kzalloc(drvdata->size, GFP_KERNEL);
@@ -192,7 +195,7 @@ static int tmc_enable_etf_sink_sysfs(struct coresight_device *csdev)
return -ENOMEM;
/* Let's try again */
- spin_lock_irqsave(&drvdata->spinlock, flags);
+ raw_spin_lock_irqsave(&drvdata->spinlock, flags);
}
if (drvdata->reading) {
@@ -225,7 +228,6 @@ static int tmc_enable_etf_sink_sysfs(struct coresight_device *csdev)
used = true;
drvdata->buf = buf;
}
-
ret = tmc_etb_enable_hw(drvdata);
if (!ret) {
coresight_set_mode(csdev, CS_MODE_SYSFS);
@@ -235,7 +237,7 @@ static int tmc_enable_etf_sink_sysfs(struct coresight_device *csdev)
used = false;
}
out:
- spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ raw_spin_unlock_irqrestore(&drvdata->spinlock, flags);
/* Free memory outside the spinlock if need be */
if (!used)
@@ -253,7 +255,7 @@ static int tmc_enable_etf_sink_perf(struct coresight_device *csdev, void *data)
struct perf_output_handle *handle = data;
struct cs_buffers *buf = etm_perf_sink_config(handle);
- spin_lock_irqsave(&drvdata->spinlock, flags);
+ raw_spin_lock_irqsave(&drvdata->spinlock, flags);
do {
ret = -EINVAL;
if (drvdata->reading)
@@ -296,7 +298,7 @@ static int tmc_enable_etf_sink_perf(struct coresight_device *csdev, void *data)
csdev->refcnt++;
}
} while (0);
- spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ raw_spin_unlock_irqrestore(&drvdata->spinlock, flags);
return ret;
}
@@ -331,16 +333,16 @@ static int tmc_disable_etf_sink(struct coresight_device *csdev)
unsigned long flags;
struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
- spin_lock_irqsave(&drvdata->spinlock, flags);
+ raw_spin_lock_irqsave(&drvdata->spinlock, flags);
if (drvdata->reading) {
- spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ raw_spin_unlock_irqrestore(&drvdata->spinlock, flags);
return -EBUSY;
}
csdev->refcnt--;
if (csdev->refcnt) {
- spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ raw_spin_unlock_irqrestore(&drvdata->spinlock, flags);
return -EBUSY;
}
@@ -351,7 +353,7 @@ static int tmc_disable_etf_sink(struct coresight_device *csdev)
drvdata->pid = -1;
coresight_set_mode(csdev, CS_MODE_DISABLED);
- spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ raw_spin_unlock_irqrestore(&drvdata->spinlock, flags);
dev_dbg(&csdev->dev, "TMC-ETB/ETF disabled\n");
return 0;
@@ -366,9 +368,9 @@ static int tmc_enable_etf_link(struct coresight_device *csdev,
struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
bool first_enable = false;
- spin_lock_irqsave(&drvdata->spinlock, flags);
+ raw_spin_lock_irqsave(&drvdata->spinlock, flags);
if (drvdata->reading) {
- spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ raw_spin_unlock_irqrestore(&drvdata->spinlock, flags);
return -EBUSY;
}
@@ -381,7 +383,7 @@ static int tmc_enable_etf_link(struct coresight_device *csdev,
}
if (!ret)
csdev->refcnt++;
- spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ raw_spin_unlock_irqrestore(&drvdata->spinlock, flags);
if (first_enable)
dev_dbg(&csdev->dev, "TMC-ETF enabled\n");
@@ -396,9 +398,9 @@ static void tmc_disable_etf_link(struct coresight_device *csdev,
struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
bool last_disable = false;
- spin_lock_irqsave(&drvdata->spinlock, flags);
+ raw_spin_lock_irqsave(&drvdata->spinlock, flags);
if (drvdata->reading) {
- spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ raw_spin_unlock_irqrestore(&drvdata->spinlock, flags);
return;
}
@@ -408,7 +410,7 @@ static void tmc_disable_etf_link(struct coresight_device *csdev,
coresight_set_mode(csdev, CS_MODE_DISABLED);
last_disable = true;
}
- spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ raw_spin_unlock_irqrestore(&drvdata->spinlock, flags);
if (last_disable)
dev_dbg(&csdev->dev, "TMC-ETF disabled\n");
@@ -488,7 +490,7 @@ static unsigned long tmc_update_etf_buffer(struct coresight_device *csdev,
if (WARN_ON_ONCE(coresight_get_mode(csdev) != CS_MODE_PERF))
return 0;
- spin_lock_irqsave(&drvdata->spinlock, flags);
+ raw_spin_lock_irqsave(&drvdata->spinlock, flags);
/* Don't do anything if another tracer is using this sink */
if (csdev->refcnt != 1)
@@ -585,11 +587,86 @@ static unsigned long tmc_update_etf_buffer(struct coresight_device *csdev,
*/
CS_LOCK(drvdata->base);
out:
- spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ raw_spin_unlock_irqrestore(&drvdata->spinlock, flags);
return to_read;
}
+static int tmc_panic_sync_etf(struct coresight_device *csdev)
+{
+ u32 val;
+ struct tmc_crash_metadata *mdata;
+ struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+
+ mdata = (struct tmc_crash_metadata *)drvdata->crash_mdata.vaddr;
+
+ /* Make sure we have valid reserved memory */
+ if (!tmc_has_reserved_buffer(drvdata) ||
+ !tmc_has_crash_mdata_buffer(drvdata))
+ return 0;
+
+ tmc_crashdata_set_invalid(drvdata);
+
+ CS_UNLOCK(drvdata->base);
+
+ /* Proceed only if ETF is enabled or configured as sink */
+ val = readl(drvdata->base + TMC_CTL);
+ if (!(val & TMC_CTL_CAPT_EN))
+ goto out;
+ val = readl(drvdata->base + TMC_MODE);
+ if (val != TMC_MODE_CIRCULAR_BUFFER)
+ goto out;
+
+ val = readl(drvdata->base + TMC_FFSR);
+ /* Do manual flush and stop only if its not auto-stopped */
+ if (!(val & TMC_FFSR_FT_STOPPED)) {
+ dev_dbg(&csdev->dev,
+ "%s: Triggering manual flush\n", __func__);
+ tmc_flush_and_stop(drvdata);
+ } else
+ tmc_wait_for_tmcready(drvdata);
+
+ /* Sync registers from hardware to metadata region */
+ mdata->tmc_sts = readl(drvdata->base + TMC_STS);
+ mdata->tmc_mode = readl(drvdata->base + TMC_MODE);
+ mdata->tmc_ffcr = readl(drvdata->base + TMC_FFCR);
+ mdata->tmc_ffsr = readl(drvdata->base + TMC_FFSR);
+
+ /* Sync Internal SRAM to reserved trace buffer region */
+ drvdata->buf = drvdata->resrv_buf.vaddr;
+ tmc_etb_dump_hw(drvdata);
+ /* Store as per RSZ register convention */
+ mdata->tmc_ram_size = drvdata->len >> 2;
+
+ /* Other fields for processing trace buffer reads */
+ mdata->tmc_rrp = 0;
+ mdata->tmc_dba = 0;
+ mdata->tmc_rwp = drvdata->len;
+ mdata->trace_paddr = drvdata->resrv_buf.paddr;
+
+ mdata->version = CS_CRASHDATA_VERSION;
+
+ /*
+ * Make sure all previous writes are ordered,
+ * before we mark valid
+ */
+ dmb(sy);
+ mdata->valid = true;
+ /*
+ * Below order need to maintained, since crc of metadata
+ * is dependent on first
+ */
+ mdata->crc32_tdata = find_crash_tracedata_crc(drvdata, mdata);
+ mdata->crc32_mdata = find_crash_metadata_crc(mdata);
+
+ tmc_disable_hw(drvdata);
+
+ dev_dbg(&csdev->dev, "%s: success\n", __func__);
+out:
+ CS_UNLOCK(drvdata->base);
+ return 0;
+}
+
static const struct coresight_ops_sink tmc_etf_sink_ops = {
.enable = tmc_enable_etf_sink,
.disable = tmc_disable_etf_sink,
@@ -603,6 +680,10 @@ static const struct coresight_ops_link tmc_etf_link_ops = {
.disable = tmc_disable_etf_link,
};
+static const struct coresight_ops_panic tmc_etf_sync_ops = {
+ .sync = tmc_panic_sync_etf,
+};
+
const struct coresight_ops tmc_etb_cs_ops = {
.sink_ops = &tmc_etf_sink_ops,
};
@@ -610,6 +691,7 @@ const struct coresight_ops tmc_etb_cs_ops = {
const struct coresight_ops tmc_etf_cs_ops = {
.sink_ops = &tmc_etf_sink_ops,
.link_ops = &tmc_etf_link_ops,
+ .panic_ops = &tmc_etf_sync_ops,
};
int tmc_read_prepare_etb(struct tmc_drvdata *drvdata)
@@ -623,7 +705,7 @@ int tmc_read_prepare_etb(struct tmc_drvdata *drvdata)
drvdata->config_type != TMC_CONFIG_TYPE_ETF))
return -EINVAL;
- spin_lock_irqsave(&drvdata->spinlock, flags);
+ raw_spin_lock_irqsave(&drvdata->spinlock, flags);
if (drvdata->reading) {
ret = -EBUSY;
@@ -655,7 +737,7 @@ int tmc_read_prepare_etb(struct tmc_drvdata *drvdata)
drvdata->reading = true;
out:
- spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ raw_spin_unlock_irqrestore(&drvdata->spinlock, flags);
return ret;
}
@@ -672,14 +754,14 @@ int tmc_read_unprepare_etb(struct tmc_drvdata *drvdata)
drvdata->config_type != TMC_CONFIG_TYPE_ETF))
return -EINVAL;
- spin_lock_irqsave(&drvdata->spinlock, flags);
+ raw_spin_lock_irqsave(&drvdata->spinlock, flags);
/* Re-enable the TMC if need be */
if (coresight_get_mode(drvdata->csdev) == CS_MODE_SYSFS) {
/* There is no point in reading a TMC in HW FIFO mode */
mode = readl_relaxed(drvdata->base + TMC_MODE);
if (mode != TMC_MODE_CIRCULAR_BUFFER) {
- spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ raw_spin_unlock_irqrestore(&drvdata->spinlock, flags);
return -EINVAL;
}
/*
@@ -693,7 +775,7 @@ int tmc_read_unprepare_etb(struct tmc_drvdata *drvdata)
memset(drvdata->buf, 0, drvdata->size);
rc = __tmc_etb_enable_hw(drvdata);
if (rc) {
- spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ raw_spin_unlock_irqrestore(&drvdata->spinlock, flags);
return rc;
}
} else {
@@ -706,7 +788,7 @@ int tmc_read_unprepare_etb(struct tmc_drvdata *drvdata)
}
drvdata->reading = false;
- spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ raw_spin_unlock_irqrestore(&drvdata->spinlock, flags);
/*
* Free allocated memory outside of the spinlock. There is no need
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etr.c b/drivers/hwtracing/coresight/coresight-tmc-etr.c
index a48bb85d0e7f..76a8cb29b68a 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etr.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etr.c
@@ -30,6 +30,7 @@ struct etr_buf_hw {
bool has_iommu;
bool has_etr_sg;
bool has_catu;
+ bool has_resrv;
};
/*
@@ -696,6 +697,75 @@ static const struct etr_buf_operations etr_flat_buf_ops = {
};
/*
+ * tmc_etr_alloc_resrv_buf: Allocate a contiguous DMA buffer from reserved region.
+ */
+static int tmc_etr_alloc_resrv_buf(struct tmc_drvdata *drvdata,
+ struct etr_buf *etr_buf, int node,
+ void **pages)
+{
+ struct etr_flat_buf *resrv_buf;
+ struct device *real_dev = drvdata->csdev->dev.parent;
+
+ /* We cannot reuse existing pages for resrv buf */
+ if (pages)
+ return -EINVAL;
+
+ resrv_buf = kzalloc(sizeof(*resrv_buf), GFP_KERNEL);
+ if (!resrv_buf)
+ return -ENOMEM;
+
+ resrv_buf->daddr = dma_map_resource(real_dev, drvdata->resrv_buf.paddr,
+ drvdata->resrv_buf.size,
+ DMA_FROM_DEVICE, 0);
+ if (dma_mapping_error(real_dev, resrv_buf->daddr)) {
+ dev_err(real_dev, "failed to map source buffer address\n");
+ kfree(resrv_buf);
+ return -ENOMEM;
+ }
+
+ resrv_buf->vaddr = drvdata->resrv_buf.vaddr;
+ resrv_buf->size = etr_buf->size = drvdata->resrv_buf.size;
+ resrv_buf->dev = &drvdata->csdev->dev;
+ etr_buf->hwaddr = resrv_buf->daddr;
+ etr_buf->mode = ETR_MODE_RESRV;
+ etr_buf->private = resrv_buf;
+ return 0;
+}
+
+static void tmc_etr_free_resrv_buf(struct etr_buf *etr_buf)
+{
+ struct etr_flat_buf *resrv_buf = etr_buf->private;
+
+ if (resrv_buf && resrv_buf->daddr) {
+ struct device *real_dev = resrv_buf->dev->parent;
+
+ dma_unmap_resource(real_dev, resrv_buf->daddr,
+ resrv_buf->size, DMA_FROM_DEVICE, 0);
+ }
+ kfree(resrv_buf);
+}
+
+static void tmc_etr_sync_resrv_buf(struct etr_buf *etr_buf, u64 rrp, u64 rwp)
+{
+ /*
+ * Adjust the buffer to point to the beginning of the trace data
+ * and update the available trace data.
+ */
+ etr_buf->offset = rrp - etr_buf->hwaddr;
+ if (etr_buf->full)
+ etr_buf->len = etr_buf->size;
+ else
+ etr_buf->len = rwp - rrp;
+}
+
+static const struct etr_buf_operations etr_resrv_buf_ops = {
+ .alloc = tmc_etr_alloc_resrv_buf,
+ .free = tmc_etr_free_resrv_buf,
+ .sync = tmc_etr_sync_resrv_buf,
+ .get_data = tmc_etr_get_data_flat_buf,
+};
+
+/*
* tmc_etr_alloc_sg_buf: Allocate an SG buf @etr_buf. Setup the parameters
* appropriately.
*/
@@ -801,6 +871,7 @@ static const struct etr_buf_operations *etr_buf_ops[] = {
[ETR_MODE_FLAT] = &etr_flat_buf_ops,
[ETR_MODE_ETR_SG] = &etr_sg_buf_ops,
[ETR_MODE_CATU] = NULL,
+ [ETR_MODE_RESRV] = &etr_resrv_buf_ops
};
void tmc_etr_set_catu_ops(const struct etr_buf_operations *catu)
@@ -826,6 +897,7 @@ static inline int tmc_etr_mode_alloc_buf(int mode,
case ETR_MODE_FLAT:
case ETR_MODE_ETR_SG:
case ETR_MODE_CATU:
+ case ETR_MODE_RESRV:
if (etr_buf_ops[mode] && etr_buf_ops[mode]->alloc)
rc = etr_buf_ops[mode]->alloc(drvdata, etr_buf,
node, pages);
@@ -844,6 +916,7 @@ static void get_etr_buf_hw(struct device *dev, struct etr_buf_hw *buf_hw)
buf_hw->has_iommu = iommu_get_domain_for_dev(dev->parent);
buf_hw->has_etr_sg = tmc_etr_has_cap(drvdata, TMC_ETR_SG);
buf_hw->has_catu = !!tmc_etr_get_catu_device(drvdata);
+ buf_hw->has_resrv = tmc_has_reserved_buffer(drvdata);
}
static bool etr_can_use_flat_mode(struct etr_buf_hw *buf_hw, ssize_t etr_buf_size)
@@ -987,7 +1060,7 @@ static void tmc_sync_etr_buf(struct tmc_drvdata *drvdata)
static int __tmc_etr_enable_hw(struct tmc_drvdata *drvdata)
{
- u32 axictl, sts;
+ u32 axictl, sts, ffcr;
struct etr_buf *etr_buf = drvdata->etr_buf;
int rc = 0;
@@ -1033,10 +1106,12 @@ static int __tmc_etr_enable_hw(struct tmc_drvdata *drvdata)
writel_relaxed(sts, drvdata->base + TMC_STS);
}
- writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI |
- TMC_FFCR_FON_FLIN | TMC_FFCR_FON_TRIG_EVT |
- TMC_FFCR_TRIGON_TRIGIN,
- drvdata->base + TMC_FFCR);
+ ffcr = TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI | TMC_FFCR_FON_FLIN |
+ TMC_FFCR_FON_TRIG_EVT | TMC_FFCR_TRIGON_TRIGIN;
+ if (drvdata->stop_on_flush)
+ ffcr |= TMC_FFCR_STOP_ON_FLUSH;
+ writel_relaxed(ffcr, drvdata->base + TMC_FFCR);
+
writel_relaxed(drvdata->trigger_cntr, drvdata->base + TMC_TRG);
tmc_enable_hw(drvdata);
@@ -1176,10 +1251,10 @@ static struct etr_buf *tmc_etr_get_sysfs_buffer(struct coresight_device *csdev)
* buffer, provided the size matches. Any allocation has to be done
* with the lock released.
*/
- spin_lock_irqsave(&drvdata->spinlock, flags);
+ raw_spin_lock_irqsave(&drvdata->spinlock, flags);
sysfs_buf = READ_ONCE(drvdata->sysfs_buf);
if (!sysfs_buf || (sysfs_buf->size != drvdata->size)) {
- spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ raw_spin_unlock_irqrestore(&drvdata->spinlock, flags);
/* Allocate memory with the locks released */
free_buf = new_buf = tmc_etr_setup_sysfs_buf(drvdata);
@@ -1187,7 +1262,7 @@ static struct etr_buf *tmc_etr_get_sysfs_buffer(struct coresight_device *csdev)
return new_buf;
/* Let's try again */
- spin_lock_irqsave(&drvdata->spinlock, flags);
+ raw_spin_lock_irqsave(&drvdata->spinlock, flags);
}
if (drvdata->reading || coresight_get_mode(csdev) == CS_MODE_PERF) {
@@ -1206,7 +1281,7 @@ static struct etr_buf *tmc_etr_get_sysfs_buffer(struct coresight_device *csdev)
}
out:
- spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ raw_spin_unlock_irqrestore(&drvdata->spinlock, flags);
/* Free memory outside the spinlock if need be */
if (free_buf)
@@ -1224,7 +1299,7 @@ static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev)
if (IS_ERR(sysfs_buf))
return PTR_ERR(sysfs_buf);
- spin_lock_irqsave(&drvdata->spinlock, flags);
+ raw_spin_lock_irqsave(&drvdata->spinlock, flags);
/*
* In sysFS mode we can have multiple writers per sink. Since this
@@ -1243,7 +1318,7 @@ static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev)
}
out:
- spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ raw_spin_unlock_irqrestore(&drvdata->spinlock, flags);
if (!ret)
dev_dbg(&csdev->dev, "TMC-ETR enabled\n");
@@ -1562,17 +1637,17 @@ tmc_update_etr_buffer(struct coresight_device *csdev,
struct etr_perf_buffer *etr_perf = config;
struct etr_buf *etr_buf = etr_perf->etr_buf;
- spin_lock_irqsave(&drvdata->spinlock, flags);
+ raw_spin_lock_irqsave(&drvdata->spinlock, flags);
/* Don't do anything if another tracer is using this sink */
if (csdev->refcnt != 1) {
- spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ raw_spin_unlock_irqrestore(&drvdata->spinlock, flags);
goto out;
}
if (WARN_ON(drvdata->perf_buf != etr_buf)) {
lost = true;
- spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ raw_spin_unlock_irqrestore(&drvdata->spinlock, flags);
goto out;
}
@@ -1582,7 +1657,7 @@ tmc_update_etr_buffer(struct coresight_device *csdev,
tmc_sync_etr_buf(drvdata);
CS_LOCK(drvdata->base);
- spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ raw_spin_unlock_irqrestore(&drvdata->spinlock, flags);
lost = etr_buf->full;
offset = etr_buf->offset;
@@ -1651,7 +1726,7 @@ static int tmc_enable_etr_sink_perf(struct coresight_device *csdev, void *data)
struct perf_output_handle *handle = data;
struct etr_perf_buffer *etr_perf = etm_perf_sink_config(handle);
- spin_lock_irqsave(&drvdata->spinlock, flags);
+ raw_spin_lock_irqsave(&drvdata->spinlock, flags);
/* Don't use this sink if it is already claimed by sysFS */
if (coresight_get_mode(csdev) == CS_MODE_SYSFS) {
rc = -EBUSY;
@@ -1691,7 +1766,7 @@ static int tmc_enable_etr_sink_perf(struct coresight_device *csdev, void *data)
}
unlock_out:
- spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ raw_spin_unlock_irqrestore(&drvdata->spinlock, flags);
return rc;
}
@@ -1713,16 +1788,16 @@ static int tmc_disable_etr_sink(struct coresight_device *csdev)
unsigned long flags;
struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
- spin_lock_irqsave(&drvdata->spinlock, flags);
+ raw_spin_lock_irqsave(&drvdata->spinlock, flags);
if (drvdata->reading) {
- spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ raw_spin_unlock_irqrestore(&drvdata->spinlock, flags);
return -EBUSY;
}
csdev->refcnt--;
if (csdev->refcnt) {
- spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ raw_spin_unlock_irqrestore(&drvdata->spinlock, flags);
return -EBUSY;
}
@@ -1735,12 +1810,80 @@ static int tmc_disable_etr_sink(struct coresight_device *csdev)
/* Reset perf specific data */
drvdata->perf_buf = NULL;
- spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ raw_spin_unlock_irqrestore(&drvdata->spinlock, flags);
dev_dbg(&csdev->dev, "TMC-ETR disabled\n");
return 0;
}
+static int tmc_panic_sync_etr(struct coresight_device *csdev)
+{
+ u32 val;
+ struct tmc_crash_metadata *mdata;
+ struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+
+ mdata = (struct tmc_crash_metadata *)drvdata->crash_mdata.vaddr;
+
+ if (!drvdata->etr_buf)
+ return 0;
+
+ /* Being in RESRV mode implies valid reserved memory as well */
+ if (drvdata->etr_buf->mode != ETR_MODE_RESRV)
+ return 0;
+
+ if (!tmc_has_crash_mdata_buffer(drvdata))
+ return 0;
+
+ CS_UNLOCK(drvdata->base);
+
+ /* Proceed only if ETR is enabled */
+ val = readl(drvdata->base + TMC_CTL);
+ if (!(val & TMC_CTL_CAPT_EN))
+ goto out;
+
+ val = readl(drvdata->base + TMC_FFSR);
+ /* Do manual flush and stop only if its not auto-stopped */
+ if (!(val & TMC_FFSR_FT_STOPPED)) {
+ dev_dbg(&csdev->dev,
+ "%s: Triggering manual flush\n", __func__);
+ tmc_flush_and_stop(drvdata);
+ } else
+ tmc_wait_for_tmcready(drvdata);
+
+ /* Sync registers from hardware to metadata region */
+ mdata->tmc_ram_size = readl(drvdata->base + TMC_RSZ);
+ mdata->tmc_sts = readl(drvdata->base + TMC_STS);
+ mdata->tmc_mode = readl(drvdata->base + TMC_MODE);
+ mdata->tmc_ffcr = readl(drvdata->base + TMC_FFCR);
+ mdata->tmc_ffsr = readl(drvdata->base + TMC_FFSR);
+ mdata->tmc_rrp = tmc_read_rrp(drvdata);
+ mdata->tmc_rwp = tmc_read_rwp(drvdata);
+ mdata->tmc_dba = tmc_read_dba(drvdata);
+ mdata->trace_paddr = drvdata->resrv_buf.paddr;
+ mdata->version = CS_CRASHDATA_VERSION;
+
+ /*
+ * Make sure all previous writes are ordered,
+ * before we mark valid
+ */
+ dmb(sy);
+ mdata->valid = true;
+ /*
+ * Below order need to maintained, since crc of metadata
+ * is dependent on first
+ */
+ mdata->crc32_tdata = find_crash_tracedata_crc(drvdata, mdata);
+ mdata->crc32_mdata = find_crash_metadata_crc(mdata);
+
+ tmc_disable_hw(drvdata);
+
+ dev_dbg(&csdev->dev, "%s: success\n", __func__);
+out:
+ CS_UNLOCK(drvdata->base);
+
+ return 0;
+}
+
static const struct coresight_ops_sink tmc_etr_sink_ops = {
.enable = tmc_enable_etr_sink,
.disable = tmc_disable_etr_sink,
@@ -1749,8 +1892,13 @@ static const struct coresight_ops_sink tmc_etr_sink_ops = {
.free_buffer = tmc_free_etr_buffer,
};
+static const struct coresight_ops_panic tmc_etr_sync_ops = {
+ .sync = tmc_panic_sync_etr,
+};
+
const struct coresight_ops tmc_etr_cs_ops = {
.sink_ops = &tmc_etr_sink_ops,
+ .panic_ops = &tmc_etr_sync_ops,
};
int tmc_read_prepare_etr(struct tmc_drvdata *drvdata)
@@ -1762,7 +1910,7 @@ int tmc_read_prepare_etr(struct tmc_drvdata *drvdata)
if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETR))
return -EINVAL;
- spin_lock_irqsave(&drvdata->spinlock, flags);
+ raw_spin_lock_irqsave(&drvdata->spinlock, flags);
if (drvdata->reading) {
ret = -EBUSY;
goto out;
@@ -1784,7 +1932,7 @@ int tmc_read_prepare_etr(struct tmc_drvdata *drvdata)
drvdata->reading = true;
out:
- spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ raw_spin_unlock_irqrestore(&drvdata->spinlock, flags);
return ret;
}
@@ -1798,7 +1946,7 @@ int tmc_read_unprepare_etr(struct tmc_drvdata *drvdata)
if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETR))
return -EINVAL;
- spin_lock_irqsave(&drvdata->spinlock, flags);
+ raw_spin_lock_irqsave(&drvdata->spinlock, flags);
/* RE-enable the TMC if need be */
if (coresight_get_mode(drvdata->csdev) == CS_MODE_SYSFS) {
@@ -1818,7 +1966,7 @@ int tmc_read_unprepare_etr(struct tmc_drvdata *drvdata)
}
drvdata->reading = false;
- spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ raw_spin_unlock_irqrestore(&drvdata->spinlock, flags);
/* Free allocated memory out side of the spinlock */
if (sysfs_buf)
@@ -1831,6 +1979,7 @@ static const char *const buf_modes_str[] = {
[ETR_MODE_FLAT] = "flat",
[ETR_MODE_ETR_SG] = "tmc-sg",
[ETR_MODE_CATU] = "catu",
+ [ETR_MODE_RESRV] = "resrv",
[ETR_MODE_AUTO] = "auto",
};
@@ -1849,6 +1998,9 @@ static ssize_t buf_modes_available_show(struct device *dev,
if (buf_hw.has_catu)
size += sysfs_emit_at(buf, size, "%s ", buf_modes_str[ETR_MODE_CATU]);
+ if (buf_hw.has_resrv)
+ size += sysfs_emit_at(buf, size, "%s ", buf_modes_str[ETR_MODE_RESRV]);
+
size += sysfs_emit_at(buf, size, "\n");
return size;
}
@@ -1862,6 +2014,26 @@ static ssize_t buf_mode_preferred_show(struct device *dev,
return sysfs_emit(buf, "%s\n", buf_modes_str[drvdata->etr_mode]);
}
+static int buf_mode_set_resrv(struct tmc_drvdata *drvdata)
+{
+ int err = -EBUSY;
+ unsigned long flags;
+ struct tmc_resrv_buf *rbuf;
+
+ rbuf = &drvdata->resrv_buf;
+
+ /* Ensure there are no active crashdata read sessions */
+ raw_spin_lock_irqsave(&drvdata->spinlock, flags);
+ if (!rbuf->reading) {
+ tmc_crashdata_set_invalid(drvdata);
+ rbuf->len = 0;
+ drvdata->etr_mode = ETR_MODE_RESRV;
+ err = 0;
+ }
+ raw_spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ return err;
+}
+
static ssize_t buf_mode_preferred_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
@@ -1876,6 +2048,8 @@ static ssize_t buf_mode_preferred_store(struct device *dev,
drvdata->etr_mode = ETR_MODE_ETR_SG;
else if (sysfs_streq(buf, buf_modes_str[ETR_MODE_CATU]) && buf_hw.has_catu)
drvdata->etr_mode = ETR_MODE_CATU;
+ else if (sysfs_streq(buf, buf_modes_str[ETR_MODE_RESRV]) && buf_hw.has_resrv)
+ return buf_mode_set_resrv(drvdata) ? : size;
else if (sysfs_streq(buf, buf_modes_str[ETR_MODE_AUTO]))
drvdata->etr_mode = ETR_MODE_AUTO;
else
diff --git a/drivers/hwtracing/coresight/coresight-tmc.h b/drivers/hwtracing/coresight/coresight-tmc.h
index 2671926be62a..6541a27a018e 100644
--- a/drivers/hwtracing/coresight/coresight-tmc.h
+++ b/drivers/hwtracing/coresight/coresight-tmc.h
@@ -12,6 +12,7 @@
#include <linux/miscdevice.h>
#include <linux/mutex.h>
#include <linux/refcount.h>
+#include <linux/crc32.h>
#define TMC_RSZ 0x004
#define TMC_STS 0x00c
@@ -76,6 +77,9 @@
#define TMC_AXICTL_AXCACHE_OS (0xf << 2)
#define TMC_AXICTL_ARCACHE_OS (0xf << 16)
+/* TMC_FFSR - 0x300 */
+#define TMC_FFSR_FT_STOPPED BIT(1)
+
/* TMC_FFCR - 0x304 */
#define TMC_FFCR_FLUSHMAN_BIT 6
#define TMC_FFCR_EN_FMT BIT(0)
@@ -94,6 +98,9 @@
#define TMC_AUTH_NSID_MASK GENMASK(1, 0)
+/* Major version 1 Minor version 0 */
+#define CS_CRASHDATA_VERSION (1 << 16)
+
enum tmc_config_type {
TMC_CONFIG_TYPE_ETB,
TMC_CONFIG_TYPE_ETR,
@@ -131,10 +138,30 @@ enum tmc_mem_intf_width {
#define CORESIGHT_SOC_600_ETR_CAPS \
(TMC_ETR_SAVE_RESTORE | TMC_ETR_AXI_ARCACHE)
+/* TMC metadata region for ETR and ETF configurations */
+struct tmc_crash_metadata {
+ uint32_t crc32_mdata; /* crc of metadata */
+ uint32_t crc32_tdata; /* crc of tracedata */
+ uint32_t version; /* 31:16 Major version, 15:0 Minor version */
+ uint32_t valid; /* Indicate if this ETF/ETR was enabled */
+ uint32_t tmc_ram_size; /* Ram Size register */
+ uint32_t tmc_sts; /* Status register */
+ uint32_t tmc_mode; /* Mode register */
+ uint32_t tmc_ffcr; /* Formatter and flush control register */
+ uint32_t tmc_ffsr; /* Formatter and flush status register */
+ uint32_t reserved32;
+ uint64_t tmc_rrp; /* Ram Read pointer register */
+ uint64_t tmc_rwp; /* Ram Write pointer register */
+ uint64_t tmc_dba; /* Data buffer address register */
+ uint64_t trace_paddr; /* Phys address of trace buffer */
+ uint64_t reserved64[3];
+};
+
enum etr_mode {
ETR_MODE_FLAT, /* Uses contiguous flat buffer */
ETR_MODE_ETR_SG, /* Uses in-built TMC ETR SG mechanism */
ETR_MODE_CATU, /* Use SG mechanism in CATU */
+ ETR_MODE_RESRV, /* Use reserved region contiguous buffer */
ETR_MODE_AUTO, /* Use the default mechanism */
};
@@ -165,15 +192,35 @@ struct etr_buf {
};
/**
+ * @paddr : Start address of reserved memory region.
+ * @vaddr : Corresponding CPU virtual address.
+ * @size : Size of reserved memory region.
+ * @offset : Offset of the trace data in the buffer for consumption.
+ * @reading : Flag to indicate if reading is active
+ * @len : Available trace data @buf (may round up to the beginning).
+ */
+struct tmc_resrv_buf {
+ phys_addr_t paddr;
+ void *vaddr;
+ size_t size;
+ unsigned long offset;
+ bool reading;
+ s64 len;
+};
+
+/**
* struct tmc_drvdata - specifics associated to an TMC component
* @pclk: APB clock if present, otherwise NULL
* @base: memory mapped base address for this component.
* @csdev: component vitals needed by the framework.
* @miscdev: specifics to handle "/dev/xyz.tmc" entry.
+ * @crashdev: specifics to handle "/dev/crash_tmc_xyz" entry for reading
+ * crash tracedata.
* @spinlock: only one at a time pls.
* @pid: Process ID of the process that owns the session that is using
* this component. For example this would be the pid of the Perf
* process.
+ * @stop_on_flush: Stop on flush trigger user configuration.
* @buf: Snapshot of the trace data for ETF/ETB.
* @etr_buf: details of buffer used in TMC-ETR
* @len: size of the available trace for ETF/ETB.
@@ -189,15 +236,23 @@ struct etr_buf {
* @idr_mutex: Access serialisation for idr.
* @sysfs_buf: SYSFS buffer for ETR.
* @perf_buf: PERF buffer for ETR.
+ * @resrv_buf: Used by ETR as hardware trace buffer and for trace data
+ * retention (after crash) only when ETR_MODE_RESRV buffer
+ * mode is enabled. Used by ETF for trace data retention
+ * (after crash) by default.
+ * @crash_mdata: Reserved memory for storing tmc crash metadata.
+ * Used by ETR/ETF.
*/
struct tmc_drvdata {
struct clk *pclk;
void __iomem *base;
struct coresight_device *csdev;
struct miscdevice miscdev;
- spinlock_t spinlock;
+ struct miscdevice crashdev;
+ raw_spinlock_t spinlock;
pid_t pid;
bool reading;
+ bool stop_on_flush;
union {
char *buf; /* TMC ETB */
struct etr_buf *etr_buf; /* TMC ETR */
@@ -214,6 +269,8 @@ struct tmc_drvdata {
struct mutex idr_mutex;
struct etr_buf *sysfs_buf;
struct etr_buf *perf_buf;
+ struct tmc_resrv_buf resrv_buf;
+ struct tmc_resrv_buf crash_mdata;
};
struct etr_buf_operations {
@@ -263,6 +320,7 @@ void tmc_flush_and_stop(struct tmc_drvdata *drvdata);
void tmc_enable_hw(struct tmc_drvdata *drvdata);
void tmc_disable_hw(struct tmc_drvdata *drvdata);
u32 tmc_get_memwidth_mask(struct tmc_drvdata *drvdata);
+int tmc_read_prepare_crashdata(struct tmc_drvdata *drvdata);
/* ETB/ETF functions */
int tmc_read_prepare_etb(struct tmc_drvdata *drvdata);
@@ -325,12 +383,58 @@ void tmc_sg_table_sync_data_range(struct tmc_sg_table *table,
u64 offset, u64 size);
ssize_t tmc_sg_table_get_data(struct tmc_sg_table *sg_table,
u64 offset, size_t len, char **bufpp);
+
static inline unsigned long
tmc_sg_table_buf_size(struct tmc_sg_table *sg_table)
{
return (unsigned long)sg_table->data_pages.nr_pages << PAGE_SHIFT;
}
+static inline bool tmc_has_reserved_buffer(struct tmc_drvdata *drvdata)
+{
+ if (drvdata->resrv_buf.vaddr &&
+ drvdata->resrv_buf.size)
+ return true;
+ return false;
+}
+
+static inline bool tmc_has_crash_mdata_buffer(struct tmc_drvdata *drvdata)
+{
+ if (drvdata->crash_mdata.vaddr &&
+ drvdata->crash_mdata.size)
+ return true;
+ return false;
+}
+
+static inline void tmc_crashdata_set_invalid(struct tmc_drvdata *drvdata)
+{
+ struct tmc_crash_metadata *mdata;
+
+ mdata = (struct tmc_crash_metadata *)drvdata->crash_mdata.vaddr;
+
+ if (tmc_has_crash_mdata_buffer(drvdata))
+ mdata->valid = false;
+}
+
+static inline uint32_t find_crash_metadata_crc(struct tmc_crash_metadata *md)
+{
+ unsigned long crc_size;
+
+ crc_size = sizeof(struct tmc_crash_metadata) -
+ offsetof(struct tmc_crash_metadata, crc32_tdata);
+ return crc32_le(0, (void *)&md->crc32_tdata, crc_size);
+}
+
+static inline uint32_t find_crash_tracedata_crc(struct tmc_drvdata *drvdata,
+ struct tmc_crash_metadata *md)
+{
+ unsigned long crc_size;
+
+ /* Take CRC of configured buffer size to keep it simple */
+ crc_size = md->tmc_ram_size << 2;
+ return crc32_le(0, (void *)drvdata->resrv_buf.vaddr, crc_size);
+}
+
struct coresight_device *tmc_etr_get_catu_device(struct tmc_drvdata *drvdata);
void tmc_etr_set_catu_ops(const struct etr_buf_operations *catu);
diff --git a/drivers/hwtracing/coresight/coresight-tpda.c b/drivers/hwtracing/coresight/coresight-tpda.c
index 189a4abc2561..0633f04beb24 100644
--- a/drivers/hwtracing/coresight/coresight-tpda.c
+++ b/drivers/hwtracing/coresight/coresight-tpda.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2023-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/amba/bus.h>
@@ -68,11 +68,12 @@ static int tpdm_read_element_size(struct tpda_drvdata *drvdata,
int rc = -EINVAL;
struct tpdm_drvdata *tpdm_data = dev_get_drvdata(csdev->dev.parent);
- if (tpdm_has_dsb_dataset(tpdm_data)) {
+ if (tpdm_data->dsb) {
rc = fwnode_property_read_u32(dev_fwnode(csdev->dev.parent),
"qcom,dsb-element-bits", &drvdata->dsb_esize);
}
- if (tpdm_has_cmb_dataset(tpdm_data)) {
+
+ if (tpdm_data->cmb) {
rc = fwnode_property_read_u32(dev_fwnode(csdev->dev.parent),
"qcom,cmb-element-bits", &drvdata->cmb_esize);
}
@@ -241,12 +242,23 @@ static void tpda_disable(struct coresight_device *csdev,
dev_dbg(drvdata->dev, "TPDA inport %d disabled\n", in->dest_port);
}
+static int tpda_trace_id(struct coresight_device *csdev, __maybe_unused enum cs_mode mode,
+ __maybe_unused struct coresight_device *sink)
+{
+ struct tpda_drvdata *drvdata;
+
+ drvdata = dev_get_drvdata(csdev->dev.parent);
+
+ return drvdata->atid;
+}
+
static const struct coresight_ops_link tpda_link_ops = {
.enable = tpda_enable,
.disable = tpda_disable,
};
static const struct coresight_ops tpda_cs_ops = {
+ .trace_id = tpda_trace_id,
.link_ops = &tpda_link_ops,
};
@@ -331,7 +343,7 @@ static void tpda_remove(struct amba_device *adev)
* Different TPDA has different periph id.
* The difference is 0-7 bits' value. So ignore 0-7 bits.
*/
-static struct amba_id tpda_ids[] = {
+static const struct amba_id tpda_ids[] = {
{
.id = 0x000f0f00,
.mask = 0x000fff00,
diff --git a/drivers/hwtracing/coresight/coresight-tpdm.c b/drivers/hwtracing/coresight/coresight-tpdm.c
index c38f9701665e..7214e65097ec 100644
--- a/drivers/hwtracing/coresight/coresight-tpdm.c
+++ b/drivers/hwtracing/coresight/coresight-tpdm.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2023-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/amba/bus.h>
@@ -21,6 +21,21 @@
DEFINE_CORESIGHT_DEVLIST(tpdm_devs, "tpdm");
+static bool tpdm_has_dsb_dataset(struct tpdm_drvdata *drvdata)
+{
+ return (drvdata->datasets & TPDM_PIDR0_DS_DSB);
+}
+
+static bool tpdm_has_cmb_dataset(struct tpdm_drvdata *drvdata)
+{
+ return (drvdata->datasets & TPDM_PIDR0_DS_CMB);
+}
+
+static bool tpdm_has_mcmb_dataset(struct tpdm_drvdata *drvdata)
+{
+ return (drvdata->datasets & TPDM_PIDR0_DS_MCMB);
+}
+
/* Read dataset array member with the index number */
static ssize_t tpdm_simple_dataset_show(struct device *dev,
struct device_attribute *attr,
@@ -198,7 +213,7 @@ static umode_t tpdm_cmb_is_visible(struct kobject *kobj,
struct device *dev = kobj_to_dev(kobj);
struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
- if (drvdata && tpdm_has_cmb_dataset(drvdata))
+ if (drvdata && drvdata->cmb)
return attr->mode;
return 0;
@@ -237,6 +252,18 @@ static umode_t tpdm_cmb_msr_is_visible(struct kobject *kobj,
return 0;
}
+static umode_t tpdm_mcmb_is_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ if (drvdata && tpdm_has_mcmb_dataset(drvdata))
+ return attr->mode;
+
+ return 0;
+}
+
static void tpdm_reset_datasets(struct tpdm_drvdata *drvdata)
{
if (tpdm_has_dsb_dataset(drvdata)) {
@@ -388,7 +415,7 @@ static void tpdm_enable_cmb(struct tpdm_drvdata *drvdata)
{
u32 val, i;
- if (!tpdm_has_cmb_dataset(drvdata))
+ if (!drvdata->cmb)
return;
/* Configure pattern registers */
@@ -415,6 +442,19 @@ static void tpdm_enable_cmb(struct tpdm_drvdata *drvdata)
val |= TPDM_CMB_CR_MODE;
else
val &= ~TPDM_CMB_CR_MODE;
+
+ if (tpdm_has_mcmb_dataset(drvdata)) {
+ val &= ~TPDM_CMB_CR_XTRIG_LNSEL;
+ /* Set the lane participates in the output pattern */
+ val |= FIELD_PREP(TPDM_CMB_CR_XTRIG_LNSEL,
+ drvdata->cmb->mcmb.trig_lane);
+
+ /* Set the enablement of the lane */
+ val &= ~TPDM_CMB_CR_E_LN;
+ val |= FIELD_PREP(TPDM_CMB_CR_E_LN,
+ drvdata->cmb->mcmb.lane_select);
+ }
+
/* Set the enable bit of CMB control register to 1 */
val |= TPDM_CMB_CR_ENA;
writel_relaxed(val, drvdata->base + TPDM_CMB_CR);
@@ -440,7 +480,7 @@ static void __tpdm_enable(struct tpdm_drvdata *drvdata)
static int tpdm_enable(struct coresight_device *csdev, struct perf_event *event,
enum cs_mode mode,
- __maybe_unused struct coresight_trace_id_map *id_map)
+ __maybe_unused struct coresight_path *path)
{
struct tpdm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
@@ -480,7 +520,7 @@ static void tpdm_disable_cmb(struct tpdm_drvdata *drvdata)
{
u32 val;
- if (!tpdm_has_cmb_dataset(drvdata))
+ if (!drvdata->cmb)
return;
val = readl_relaxed(drvdata->base + TPDM_CMB_CR);
@@ -542,12 +582,14 @@ static int tpdm_datasets_setup(struct tpdm_drvdata *drvdata)
if (!drvdata->dsb)
return -ENOMEM;
}
- if (tpdm_has_cmb_dataset(drvdata) && (!drvdata->cmb)) {
+ if ((tpdm_has_cmb_dataset(drvdata) || tpdm_has_mcmb_dataset(drvdata))
+ && (!drvdata->cmb)) {
drvdata->cmb = devm_kzalloc(drvdata->dev,
sizeof(*drvdata->cmb), GFP_KERNEL);
if (!drvdata->cmb)
return -ENOMEM;
}
+
tpdm_reset_datasets(drvdata);
return 0;
@@ -990,6 +1032,62 @@ static ssize_t cmb_trig_ts_store(struct device *dev,
}
static DEVICE_ATTR_RW(cmb_trig_ts);
+static ssize_t mcmb_trig_lane_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ return sysfs_emit(buf, "%u\n",
+ (unsigned int)drvdata->cmb->mcmb.trig_lane);
+}
+
+static ssize_t mcmb_trig_lane_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t size)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long val;
+
+ if ((kstrtoul(buf, 0, &val)) || (val >= TPDM_MCMB_MAX_LANES))
+ return -EINVAL;
+
+ guard(spinlock)(&drvdata->spinlock);
+ drvdata->cmb->mcmb.trig_lane = val;
+
+ return size;
+}
+static DEVICE_ATTR_RW(mcmb_trig_lane);
+
+static ssize_t mcmb_lanes_select_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ return sysfs_emit(buf, "%u\n",
+ (unsigned int)drvdata->cmb->mcmb.lane_select);
+}
+
+static ssize_t mcmb_lanes_select_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t size)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long val;
+
+ if (kstrtoul(buf, 0, &val) || (val & ~TPDM_MCMB_E_LN_MASK))
+ return -EINVAL;
+
+ guard(spinlock)(&drvdata->spinlock);
+ drvdata->cmb->mcmb.lane_select = val & TPDM_MCMB_E_LN_MASK;
+
+ return size;
+}
+static DEVICE_ATTR_RW(mcmb_lanes_select);
+
static struct attribute *tpdm_dsb_edge_attrs[] = {
&dev_attr_ctrl_idx.attr,
&dev_attr_ctrl_val.attr,
@@ -1152,6 +1250,12 @@ static struct attribute *tpdm_cmb_msr_attrs[] = {
NULL,
};
+static struct attribute *tpdm_mcmb_attrs[] = {
+ &dev_attr_mcmb_trig_lane.attr,
+ &dev_attr_mcmb_lanes_select.attr,
+ NULL,
+};
+
static struct attribute *tpdm_dsb_attrs[] = {
&dev_attr_dsb_mode.attr,
&dev_attr_dsb_trig_ts.attr,
@@ -1218,6 +1322,11 @@ static struct attribute_group tpdm_cmb_msr_grp = {
.name = "cmb_msr",
};
+static struct attribute_group tpdm_mcmb_attr_grp = {
+ .attrs = tpdm_mcmb_attrs,
+ .is_visible = tpdm_mcmb_is_visible,
+};
+
static const struct attribute_group *tpdm_attr_grps[] = {
&tpdm_attr_grp,
&tpdm_dsb_attr_grp,
@@ -1229,6 +1338,7 @@ static const struct attribute_group *tpdm_attr_grps[] = {
&tpdm_cmb_trig_patt_grp,
&tpdm_cmb_patt_grp,
&tpdm_cmb_msr_grp,
+ &tpdm_mcmb_attr_grp,
NULL,
};
@@ -1305,7 +1415,7 @@ static void tpdm_remove(struct amba_device *adev)
* Different TPDM has different periph id.
* The difference is 0-7 bits' value. So ignore 0-7 bits.
*/
-static struct amba_id tpdm_ids[] = {
+static const struct amba_id tpdm_ids[] = {
{
.id = 0x001f0e00,
.mask = 0x00ffff00,
diff --git a/drivers/hwtracing/coresight/coresight-tpdm.h b/drivers/hwtracing/coresight/coresight-tpdm.h
index e08d212642e3..b11754389734 100644
--- a/drivers/hwtracing/coresight/coresight-tpdm.h
+++ b/drivers/hwtracing/coresight/coresight-tpdm.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2023-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _CORESIGHT_CORESIGHT_TPDM_H
@@ -9,7 +9,7 @@
/* The max number of the datasets that TPDM supports */
#define TPDM_DATASETS 7
-/* CMB Subunit Registers */
+/* CMB/MCMB Subunit Registers */
#define TPDM_CMB_CR (0xA00)
/* CMB subunit timestamp insertion enable register */
#define TPDM_CMB_TIER (0xA04)
@@ -28,6 +28,10 @@
#define TPDM_CMB_CR_ENA BIT(0)
/* Trace collection mode for CMB subunit */
#define TPDM_CMB_CR_MODE BIT(1)
+/* MCMB trigger lane select */
+#define TPDM_CMB_CR_XTRIG_LNSEL GENMASK(20, 18)
+/* MCMB lane enablement */
+#define TPDM_CMB_CR_E_LN GENMASK(17, 10)
/* Timestamp control for pattern match */
#define TPDM_CMB_TIER_PATT_TSENAB BIT(0)
/* CMB CTI timestamp request */
@@ -41,6 +45,12 @@
/* MAX number of DSB MSR */
#define TPDM_CMB_MAX_MSR 32
+/* MAX lanes in the output pattern for MCMB configurations*/
+#define TPDM_MCMB_MAX_LANES 8
+
+/* Filter bit 0~7 from the value for CR_E_LN */
+#define TPDM_MCMB_E_LN_MASK GENMASK(7, 0)
+
/* DSB Subunit Registers */
#define TPDM_DSB_CR (0x780)
#define TPDM_DSB_TIER (0x784)
@@ -112,11 +122,13 @@
* PERIPHIDR0[0] : Fix to 1 if ImplDef subunit present, else 0
* PERIPHIDR0[1] : Fix to 1 if DSB subunit present, else 0
* PERIPHIDR0[2] : Fix to 1 if CMB subunit present, else 0
+ * PERIPHIDR0[6] : Fix to 1 if MCMB subunit present, else 0
*/
#define TPDM_PIDR0_DS_IMPDEF BIT(0)
#define TPDM_PIDR0_DS_DSB BIT(1)
#define TPDM_PIDR0_DS_CMB BIT(2)
+#define TPDM_PIDR0_DS_MCMB BIT(6)
#define TPDM_DSB_MAX_LINES 256
/* MAX number of EDCR registers */
@@ -256,6 +268,9 @@ struct dsb_dataset {
* @patt_ts: Indicates if pattern match for timestamp is enabled.
* @trig_ts: Indicates if CTI trigger for timestamp is enabled.
* @ts_all: Indicates if timestamp is enabled for all packets.
+ * struct mcmb_dataset
+ * @mcmb_trig_lane: Save data for trigger lane
+ * @mcmb_lane_select: Save data for lane enablement
*/
struct cmb_dataset {
u32 trace_mode;
@@ -267,6 +282,10 @@ struct cmb_dataset {
bool patt_ts;
bool trig_ts;
bool ts_all;
+ struct {
+ u8 trig_lane;
+ u8 lane_select;
+ } mcmb;
};
/**
@@ -324,14 +343,4 @@ struct tpdm_dataset_attribute {
enum dataset_mem mem;
u32 idx;
};
-
-static bool tpdm_has_dsb_dataset(struct tpdm_drvdata *drvdata)
-{
- return (drvdata->datasets & TPDM_PIDR0_DS_DSB);
-}
-
-static bool tpdm_has_cmb_dataset(struct tpdm_drvdata *drvdata)
-{
- return (drvdata->datasets & TPDM_PIDR0_DS_CMB);
-}
#endif /* _CORESIGHT_CORESIGHT_TPDM_H */
diff --git a/drivers/hwtracing/coresight/coresight-trace-id.c b/drivers/hwtracing/coresight/coresight-trace-id.c
index 378af743be45..7ed337d54d3e 100644
--- a/drivers/hwtracing/coresight/coresight-trace-id.c
+++ b/drivers/hwtracing/coresight/coresight-trace-id.c
@@ -22,7 +22,7 @@ enum trace_id_flags {
static DEFINE_PER_CPU(atomic_t, id_map_default_cpu_ids) = ATOMIC_INIT(0);
static struct coresight_trace_id_map id_map_default = {
.cpu_map = &id_map_default_cpu_ids,
- .lock = __SPIN_LOCK_UNLOCKED(id_map_default.lock)
+ .lock = __RAW_SPIN_LOCK_UNLOCKED(id_map_default.lock)
};
/* #define TRACE_ID_DEBUG 1 */
@@ -131,11 +131,11 @@ static void coresight_trace_id_release_all(struct coresight_trace_id_map *id_map
unsigned long flags;
int cpu;
- spin_lock_irqsave(&id_map->lock, flags);
+ raw_spin_lock_irqsave(&id_map->lock, flags);
bitmap_zero(id_map->used_ids, CORESIGHT_TRACE_IDS_MAX);
for_each_possible_cpu(cpu)
atomic_set(per_cpu_ptr(id_map->cpu_map, cpu), 0);
- spin_unlock_irqrestore(&id_map->lock, flags);
+ raw_spin_unlock_irqrestore(&id_map->lock, flags);
DUMP_ID_MAP(id_map);
}
@@ -144,7 +144,7 @@ static int _coresight_trace_id_get_cpu_id(int cpu, struct coresight_trace_id_map
unsigned long flags;
int id;
- spin_lock_irqsave(&id_map->lock, flags);
+ raw_spin_lock_irqsave(&id_map->lock, flags);
/* check for existing allocation for this CPU */
id = _coresight_trace_id_read_cpu_id(cpu, id_map);
@@ -171,7 +171,7 @@ static int _coresight_trace_id_get_cpu_id(int cpu, struct coresight_trace_id_map
atomic_set(per_cpu_ptr(id_map->cpu_map, cpu), id);
get_cpu_id_out_unlock:
- spin_unlock_irqrestore(&id_map->lock, flags);
+ raw_spin_unlock_irqrestore(&id_map->lock, flags);
DUMP_ID_CPU(cpu, id);
DUMP_ID_MAP(id_map);
@@ -188,12 +188,12 @@ static void _coresight_trace_id_put_cpu_id(int cpu, struct coresight_trace_id_ma
if (!id)
return;
- spin_lock_irqsave(&id_map->lock, flags);
+ raw_spin_lock_irqsave(&id_map->lock, flags);
coresight_trace_id_free(id, id_map);
atomic_set(per_cpu_ptr(id_map->cpu_map, cpu), 0);
- spin_unlock_irqrestore(&id_map->lock, flags);
+ raw_spin_unlock_irqrestore(&id_map->lock, flags);
DUMP_ID_CPU(cpu, id);
DUMP_ID_MAP(id_map);
}
@@ -204,9 +204,9 @@ static int coresight_trace_id_map_get_system_id(struct coresight_trace_id_map *i
unsigned long flags;
int id;
- spin_lock_irqsave(&id_map->lock, flags);
+ raw_spin_lock_irqsave(&id_map->lock, flags);
id = coresight_trace_id_alloc_new_id(id_map, preferred_id, traceid_flags);
- spin_unlock_irqrestore(&id_map->lock, flags);
+ raw_spin_unlock_irqrestore(&id_map->lock, flags);
DUMP_ID(id);
DUMP_ID_MAP(id_map);
@@ -217,9 +217,9 @@ static void coresight_trace_id_map_put_system_id(struct coresight_trace_id_map *
{
unsigned long flags;
- spin_lock_irqsave(&id_map->lock, flags);
+ raw_spin_lock_irqsave(&id_map->lock, flags);
coresight_trace_id_free(id, id_map);
- spin_unlock_irqrestore(&id_map->lock, flags);
+ raw_spin_unlock_irqrestore(&id_map->lock, flags);
DUMP_ID(id);
DUMP_ID_MAP(id_map);
diff --git a/drivers/hwtracing/coresight/ultrasoc-smb.c b/drivers/hwtracing/coresight/ultrasoc-smb.c
index dc3c9504dd7c..26cfc939e5bd 100644
--- a/drivers/hwtracing/coresight/ultrasoc-smb.c
+++ b/drivers/hwtracing/coresight/ultrasoc-smb.c
@@ -98,7 +98,7 @@ static int smb_open(struct inode *inode, struct file *file)
struct smb_drv_data *drvdata = container_of(file->private_data,
struct smb_drv_data, miscdev);
- guard(spinlock)(&drvdata->spinlock);
+ guard(raw_spinlock)(&drvdata->spinlock);
if (drvdata->reading)
return -EBUSY;
@@ -152,7 +152,7 @@ static int smb_release(struct inode *inode, struct file *file)
struct smb_drv_data *drvdata = container_of(file->private_data,
struct smb_drv_data, miscdev);
- guard(spinlock)(&drvdata->spinlock);
+ guard(raw_spinlock)(&drvdata->spinlock);
drvdata->reading = false;
return 0;
@@ -245,7 +245,7 @@ static int smb_enable(struct coresight_device *csdev, enum cs_mode mode,
struct smb_drv_data *drvdata = dev_get_drvdata(csdev->dev.parent);
int ret = 0;
- guard(spinlock)(&drvdata->spinlock);
+ guard(raw_spinlock)(&drvdata->spinlock);
/* Do nothing, the trace data is reading by other interface now */
if (drvdata->reading)
@@ -280,7 +280,7 @@ static int smb_disable(struct coresight_device *csdev)
{
struct smb_drv_data *drvdata = dev_get_drvdata(csdev->dev.parent);
- guard(spinlock)(&drvdata->spinlock);
+ guard(raw_spinlock)(&drvdata->spinlock);
if (drvdata->reading)
return -EBUSY;
@@ -378,7 +378,7 @@ static unsigned long smb_update_buffer(struct coresight_device *csdev,
if (!buf)
return 0;
- guard(spinlock)(&drvdata->spinlock);
+ guard(raw_spinlock)(&drvdata->spinlock);
/* Don't do anything if another tracer is using this sink. */
if (csdev->refcnt != 1)
@@ -563,7 +563,7 @@ static int smb_probe(struct platform_device *pdev)
smb_reset_buffer(drvdata);
platform_set_drvdata(pdev, drvdata);
- spin_lock_init(&drvdata->spinlock);
+ raw_spin_lock_init(&drvdata->spinlock);
drvdata->pid = -1;
ret = smb_register_sink(pdev, drvdata);
diff --git a/drivers/hwtracing/coresight/ultrasoc-smb.h b/drivers/hwtracing/coresight/ultrasoc-smb.h
index a91d39cfccb8..c4c111275627 100644
--- a/drivers/hwtracing/coresight/ultrasoc-smb.h
+++ b/drivers/hwtracing/coresight/ultrasoc-smb.h
@@ -115,7 +115,7 @@ struct smb_drv_data {
struct coresight_device *csdev;
struct smb_data_buffer sdb;
struct miscdevice miscdev;
- spinlock_t spinlock;
+ raw_spinlock_t spinlock;
bool reading;
pid_t pid;
};
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index fc438f445771..83c88c79afe2 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -783,6 +783,23 @@ config I2C_JZ4780
If you don't know what to do here, say N.
+config I2C_K1
+ tristate "SpacemiT K1 I2C adapter"
+ depends on ARCH_SPACEMIT || COMPILE_TEST
+ depends on OF
+ help
+ This option enables support for the I2C interface on the SpacemiT K1
+ platform.
+
+ If you enable this configuration, the kernel will include support for
+ the I2C adapter specific to the SpacemiT K1 platform. This driver can
+ be used to manage I2C bus transactions, which are necessary for
+ interfacing with I2C peripherals such as sensors, EEPROMs, and other
+ devices.
+
+ This driver can also be built as a module. If so, the
+ module will be called `i2c-k1`.
+
config I2C_KEBA
tristate "KEBA I2C controller support"
depends on HAS_IOMEM
@@ -940,6 +957,7 @@ config I2C_OMAP
tristate "OMAP I2C adapter"
depends on ARCH_OMAP || ARCH_K3 || COMPILE_TEST
default MACH_OMAP_OSK
+ select MULTIPLEXER
help
If you say yes to this option, support will be included for the
I2C interface on the Texas Instruments OMAP1/2 family of processors.
diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile
index 1c2a4510abe4..c1252e2b779e 100644
--- a/drivers/i2c/busses/Makefile
+++ b/drivers/i2c/busses/Makefile
@@ -74,6 +74,7 @@ obj-$(CONFIG_I2C_IMX) += i2c-imx.o
obj-$(CONFIG_I2C_IMX_LPI2C) += i2c-imx-lpi2c.o
obj-$(CONFIG_I2C_IOP3XX) += i2c-iop3xx.o
obj-$(CONFIG_I2C_JZ4780) += i2c-jz4780.o
+obj-$(CONFIG_I2C_K1) += i2c-k1.o
obj-$(CONFIG_I2C_KEBA) += i2c-keba.o
obj-$(CONFIG_I2C_KEMPLD) += i2c-kempld.o
obj-$(CONFIG_I2C_LPC2K) += i2c-lpc2k.o
diff --git a/drivers/i2c/busses/i2c-amd-asf-plat.c b/drivers/i2c/busses/i2c-amd-asf-plat.c
index 93ebec162c6d..ca45f0f23321 100644
--- a/drivers/i2c/busses/i2c-amd-asf-plat.c
+++ b/drivers/i2c/busses/i2c-amd-asf-plat.c
@@ -69,7 +69,7 @@ static void amd_asf_process_target(struct work_struct *work)
/* Check if no error bits are set in target status register */
if (reg & ASF_ERROR_STATUS) {
/* Set bank as full */
- cmd = 0;
+ cmd = 1;
reg |= GENMASK(3, 2);
outb_p(reg, ASFDATABNKSEL);
} else {
@@ -272,9 +272,9 @@ static u32 amd_asf_func(struct i2c_adapter *adapter)
}
static const struct i2c_algorithm amd_asf_smbus_algorithm = {
- .master_xfer = amd_asf_xfer,
- .reg_slave = amd_asf_reg_target,
- .unreg_slave = amd_asf_unreg_target,
+ .xfer = amd_asf_xfer,
+ .reg_target = amd_asf_reg_target,
+ .unreg_target = amd_asf_unreg_target,
.functionality = amd_asf_func,
};
diff --git a/drivers/i2c/busses/i2c-axxia.c b/drivers/i2c/busses/i2c-axxia.c
index 48916cf45ff7..50030256cd85 100644
--- a/drivers/i2c/busses/i2c-axxia.c
+++ b/drivers/i2c/busses/i2c-axxia.c
@@ -255,11 +255,6 @@ static int i2c_m_rd(const struct i2c_msg *msg)
return (msg->flags & I2C_M_RD) != 0;
}
-static int i2c_m_ten(const struct i2c_msg *msg)
-{
- return (msg->flags & I2C_M_TEN) != 0;
-}
-
static int i2c_m_recv_len(const struct i2c_msg *msg)
{
return (msg->flags & I2C_M_RECV_LEN) != 0;
@@ -439,20 +434,10 @@ static void axxia_i2c_set_addr(struct axxia_i2c_dev *idev, struct i2c_msg *msg)
{
u32 addr_1, addr_2;
- if (i2c_m_ten(msg)) {
- /* 10-bit address
- * addr_1: 5'b11110 | addr[9:8] | (R/nW)
- * addr_2: addr[7:0]
- */
- addr_1 = 0xF0 | ((msg->addr >> 7) & 0x06);
- if (i2c_m_rd(msg))
- addr_1 |= 1; /* Set the R/nW bit of the address */
- addr_2 = msg->addr & 0xFF;
+ if (msg->flags & I2C_M_TEN) {
+ addr_1 = i2c_10bit_addr_hi_from_msg(msg);
+ addr_2 = i2c_10bit_addr_lo_from_msg(msg);
} else {
- /* 7-bit address
- * addr_1: addr[6:0] | (R/nW)
- * addr_2: dont care
- */
addr_1 = i2c_8bit_addr_from_msg(msg);
addr_2 = 0;
}
diff --git a/drivers/i2c/busses/i2c-bcm-iproc.c b/drivers/i2c/busses/i2c-bcm-iproc.c
index 15b632a146e1..332a0fcca28d 100644
--- a/drivers/i2c/busses/i2c-bcm-iproc.c
+++ b/drivers/i2c/busses/i2c-bcm-iproc.c
@@ -678,7 +678,7 @@ static irqreturn_t bcm_iproc_i2c_isr(int irq, void *data)
return IRQ_HANDLED;
}
-static int bcm_iproc_i2c_init(struct bcm_iproc_i2c_dev *iproc_i2c)
+static void bcm_iproc_i2c_init(struct bcm_iproc_i2c_dev *iproc_i2c)
{
u32 val;
@@ -706,8 +706,6 @@ static int bcm_iproc_i2c_init(struct bcm_iproc_i2c_dev *iproc_i2c)
/* clear all pending interrupts */
iproc_i2c_wr_reg(iproc_i2c, IS_OFFSET, 0xffffffff);
-
- return 0;
}
static void bcm_iproc_i2c_enable_disable(struct bcm_iproc_i2c_dev *iproc_i2c,
@@ -1081,9 +1079,7 @@ static int bcm_iproc_i2c_probe(struct platform_device *pdev)
bcm_iproc_algo.unreg_slave = NULL;
}
- ret = bcm_iproc_i2c_init(iproc_i2c);
- if (ret)
- return ret;
+ bcm_iproc_i2c_init(iproc_i2c);
ret = bcm_iproc_i2c_cfg_speed(iproc_i2c);
if (ret)
@@ -1162,16 +1158,13 @@ static int bcm_iproc_i2c_suspend(struct device *dev)
static int bcm_iproc_i2c_resume(struct device *dev)
{
struct bcm_iproc_i2c_dev *iproc_i2c = dev_get_drvdata(dev);
- int ret;
u32 val;
/*
* Power domain could have been shut off completely in system deep
* sleep, so re-initialize the block here
*/
- ret = bcm_iproc_i2c_init(iproc_i2c);
- if (ret)
- return ret;
+ bcm_iproc_i2c_init(iproc_i2c);
/* configure to the desired bus speed */
val = iproc_i2c_rd_reg(iproc_i2c, TIM_CFG_OFFSET);
diff --git a/drivers/i2c/busses/i2c-bcm-kona.c b/drivers/i2c/busses/i2c-bcm-kona.c
index 340fe1305dd9..9d8838bbd938 100644
--- a/drivers/i2c/busses/i2c-bcm-kona.c
+++ b/drivers/i2c/busses/i2c-bcm-kona.c
@@ -471,12 +471,12 @@ static int bcm_kona_i2c_do_addr(struct bcm_kona_i2c_dev *dev,
if (msg->flags & I2C_M_TEN) {
/* First byte is 11110XX0 where XX is upper 2 bits */
- addr = 0xF0 | ((msg->addr & 0x300) >> 7);
+ addr = i2c_10bit_addr_hi_from_msg(msg) & ~I2C_M_RD;
if (bcm_kona_i2c_write_byte(dev, addr, 0) < 0)
return -EREMOTEIO;
/* Second byte is the remaining 8 bits */
- addr = msg->addr & 0xFF;
+ addr = i2c_10bit_addr_lo_from_msg(msg);
if (bcm_kona_i2c_write_byte(dev, addr, 0) < 0)
return -EREMOTEIO;
@@ -486,7 +486,7 @@ static int bcm_kona_i2c_do_addr(struct bcm_kona_i2c_dev *dev,
return -EREMOTEIO;
/* Then re-send the first byte with the read bit set */
- addr = 0xF0 | ((msg->addr & 0x300) >> 7) | 0x01;
+ addr = i2c_10bit_addr_hi_from_msg(msg);
if (bcm_kona_i2c_write_byte(dev, addr, 0) < 0)
return -EREMOTEIO;
}
diff --git a/drivers/i2c/busses/i2c-brcmstb.c b/drivers/i2c/busses/i2c-brcmstb.c
index 00f1a046e985..5fa30e8926c5 100644
--- a/drivers/i2c/busses/i2c-brcmstb.c
+++ b/drivers/i2c/busses/i2c-brcmstb.c
@@ -414,23 +414,22 @@ static int brcmstb_i2c_do_addr(struct brcmstb_i2c_dev *dev,
if (msg->flags & I2C_M_TEN) {
/* First byte is 11110XX0 where XX is upper 2 bits */
- addr = 0xF0 | ((msg->addr & 0x300) >> 7);
+ addr = i2c_10bit_addr_hi_from_msg(msg) & ~I2C_M_RD;
bsc_writel(dev, addr, chip_address);
/* Second byte is the remaining 8 bits */
- addr = msg->addr & 0xFF;
+ addr = i2c_10bit_addr_lo_from_msg(msg);
if (brcmstb_i2c_write_data_byte(dev, &addr, 0) < 0)
return -EREMOTEIO;
if (msg->flags & I2C_M_RD) {
/* For read, send restart without stop condition */
- brcmstb_set_i2c_start_stop(dev, COND_RESTART
- | COND_NOSTOP);
+ brcmstb_set_i2c_start_stop(dev, COND_RESTART | COND_NOSTOP);
+
/* Then re-send the first byte with the read bit set */
- addr = 0xF0 | ((msg->addr & 0x300) >> 7) | 0x01;
+ addr = i2c_10bit_addr_hi_from_msg(msg);
if (brcmstb_i2c_write_data_byte(dev, &addr, 0) < 0)
return -EREMOTEIO;
-
}
} else {
addr = i2c_8bit_addr_from_msg(msg);
diff --git a/drivers/i2c/busses/i2c-cadence.c b/drivers/i2c/busses/i2c-cadence.c
index b64026fbca66..8df63aaf2a80 100644
--- a/drivers/i2c/busses/i2c-cadence.c
+++ b/drivers/i2c/busses/i2c-cadence.c
@@ -1541,7 +1541,7 @@ static int cdns_i2c_probe(struct platform_device *pdev)
snprintf(id->adap.name, sizeof(id->adap.name),
"Cadence I2C at %08lx", (unsigned long)r_mem->start);
- id->clk = devm_clk_get(&pdev->dev, NULL);
+ id->clk = devm_clk_get_enabled(&pdev->dev, NULL);
if (IS_ERR(id->clk))
return dev_err_probe(&pdev->dev, PTR_ERR(id->clk),
"input clock not found.\n");
@@ -1551,16 +1551,10 @@ static int cdns_i2c_probe(struct platform_device *pdev)
return dev_err_probe(&pdev->dev, PTR_ERR(id->reset),
"Failed to request reset.\n");
- ret = clk_prepare_enable(id->clk);
- if (ret)
- dev_err(&pdev->dev, "Unable to enable clock.\n");
-
ret = reset_control_deassert(id->reset);
- if (ret) {
- dev_err_probe(&pdev->dev, ret,
- "Failed to de-assert reset.\n");
- goto err_clk_dis;
- }
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "Failed to de-assert reset.\n");
pm_runtime_set_autosuspend_delay(id->dev, CNDS_I2C_PM_TIMEOUT);
pm_runtime_use_autosuspend(id->dev);
@@ -1615,11 +1609,9 @@ static int cdns_i2c_probe(struct platform_device *pdev)
err_clk_notifier_unregister:
clk_notifier_unregister(id->clk, &id->clk_rate_change_nb);
- reset_control_assert(id->reset);
-err_clk_dis:
- clk_disable_unprepare(id->clk);
pm_runtime_disable(&pdev->dev);
pm_runtime_set_suspended(&pdev->dev);
+ reset_control_assert(id->reset);
return ret;
}
@@ -1642,7 +1634,6 @@ static void cdns_i2c_remove(struct platform_device *pdev)
i2c_del_adapter(&id->adap);
clk_notifier_unregister(id->clk, &id->clk_rate_change_nb);
reset_control_assert(id->reset);
- clk_disable_unprepare(id->clk);
}
static struct platform_driver cdns_i2c_drv = {
diff --git a/drivers/i2c/busses/i2c-designware-amdpsp.c b/drivers/i2c/busses/i2c-designware-amdpsp.c
index 8fbd2a10c31a..404571ad61a8 100644
--- a/drivers/i2c/busses/i2c-designware-amdpsp.c
+++ b/drivers/i2c/busses/i2c-designware-amdpsp.c
@@ -151,19 +151,16 @@ static void release_bus(void)
static void psp_release_i2c_bus_deferred(struct work_struct *work)
{
- mutex_lock(&psp_i2c_access_mutex);
+ guard(mutex)(&psp_i2c_access_mutex);
/*
* If there is any pending transaction, cannot release the bus here.
* psp_release_i2c_bus() will take care of this later.
*/
if (psp_i2c_access_count)
- goto cleanup;
+ return;
release_bus();
-
-cleanup:
- mutex_unlock(&psp_i2c_access_mutex);
}
static DECLARE_DELAYED_WORK(release_queue, psp_release_i2c_bus_deferred);
@@ -171,11 +168,11 @@ static int psp_acquire_i2c_bus(void)
{
int status;
- mutex_lock(&psp_i2c_access_mutex);
+ guard(mutex)(&psp_i2c_access_mutex);
/* Return early if mailbox malfunctioned */
if (psp_i2c_mbox_fail)
- goto cleanup;
+ return 0;
psp_i2c_access_count++;
@@ -184,11 +181,11 @@ static int psp_acquire_i2c_bus(void)
* reservation period.
*/
if (psp_i2c_sem_acquired)
- goto cleanup;
+ return 0;
status = psp_send_i2c_req(PSP_I2C_REQ_ACQUIRE);
if (status)
- goto cleanup;
+ return 0;
psp_i2c_sem_acquired = jiffies;
@@ -201,18 +198,16 @@ static int psp_acquire_i2c_bus(void)
* communication with PSP. At any case i2c bus is granted to the caller,
* thus always return success.
*/
-cleanup:
- mutex_unlock(&psp_i2c_access_mutex);
return 0;
}
static void psp_release_i2c_bus(void)
{
- mutex_lock(&psp_i2c_access_mutex);
+ guard(mutex)(&psp_i2c_access_mutex);
/* Return early if mailbox was malfunctioned */
if (psp_i2c_mbox_fail)
- goto cleanup;
+ return;
/*
* If we are last owner of PSP semaphore, need to release arbitration
@@ -220,7 +215,7 @@ static void psp_release_i2c_bus(void)
*/
psp_i2c_access_count--;
if (psp_i2c_access_count)
- goto cleanup;
+ return;
/*
* Send a release command to PSP if the semaphore reservation timeout
@@ -228,9 +223,6 @@ static void psp_release_i2c_bus(void)
*/
if (!delayed_work_pending(&release_queue))
release_bus();
-
-cleanup:
- mutex_unlock(&psp_i2c_access_mutex);
}
/*
diff --git a/drivers/i2c/busses/i2c-designware-master.c b/drivers/i2c/busses/i2c-designware-master.c
index 2569bf1a72e0..c5394229b77f 100644
--- a/drivers/i2c/busses/i2c-designware-master.c
+++ b/drivers/i2c/busses/i2c-designware-master.c
@@ -907,7 +907,7 @@ done_nolock:
}
static const struct i2c_algorithm i2c_dw_algo = {
- .master_xfer = i2c_dw_xfer,
+ .xfer = i2c_dw_xfer,
.functionality = i2c_dw_func,
};
diff --git a/drivers/i2c/busses/i2c-eg20t.c b/drivers/i2c/busses/i2c-eg20t.c
index 4914bfbee2a9..efdaddf99f9e 100644
--- a/drivers/i2c/busses/i2c-eg20t.c
+++ b/drivers/i2c/busses/i2c-eg20t.c
@@ -48,8 +48,6 @@
#define BUS_IDLE_TIMEOUT 20
#define PCH_I2CCTL_I2CMEN 0x0080
-#define TEN_BIT_ADDR_DEFAULT 0xF000
-#define TEN_BIT_ADDR_MASK 0xF0
#define PCH_START 0x0020
#define PCH_RESTART 0x0004
#define PCH_ESR_START 0x0001
@@ -58,7 +56,6 @@
#define PCH_ACK 0x0008
#define PCH_GETACK 0x0001
#define CLR_REG 0x0
-#define I2C_RD 0x1
#define I2CMCF_BIT 0x0080
#define I2CMIF_BIT 0x0002
#define I2CMAL_BIT 0x0010
@@ -76,8 +73,6 @@
#define I2CMBB_BIT 0x0020
#define BUFFER_MODE_MASK (I2CBMFI_BIT | I2CBMAL_BIT | I2CBMNA_BIT | \
I2CBMTO_BIT | I2CBMIS_BIT)
-#define I2C_ADDR_MSK 0xFF
-#define I2C_MSB_2B_MSK 0x300
#define FAST_MODE_CLK 400
#define FAST_MODE_EN 0x0001
#define SUB_ADDR_LEN_MAX 4
@@ -371,16 +366,12 @@ static s32 pch_i2c_writebytes(struct i2c_adapter *i2c_adap,
struct i2c_algo_pch_data *adap = i2c_adap->algo_data;
u8 *buf;
u32 length;
- u32 addr;
- u32 addr_2_msb;
- u32 addr_8_lsb;
s32 wrcount;
s32 rtn;
void __iomem *p = adap->pch_base_address;
length = msgs->len;
buf = msgs->buf;
- addr = msgs->addr;
/* enable master tx */
pch_setbit(adap->pch_base_address, PCH_I2CCTL, I2C_TX_MODE);
@@ -394,8 +385,7 @@ static s32 pch_i2c_writebytes(struct i2c_adapter *i2c_adap,
}
if (msgs->flags & I2C_M_TEN) {
- addr_2_msb = ((addr & I2C_MSB_2B_MSK) >> 7) & 0x06;
- iowrite32(addr_2_msb | TEN_BIT_ADDR_MASK, p + PCH_I2CDR);
+ iowrite32(i2c_10bit_addr_hi_from_msg(msgs), p + PCH_I2CDR);
if (first)
pch_i2c_start(adap);
@@ -403,8 +393,7 @@ static s32 pch_i2c_writebytes(struct i2c_adapter *i2c_adap,
if (rtn)
return rtn;
- addr_8_lsb = (addr & I2C_ADDR_MSK);
- iowrite32(addr_8_lsb, p + PCH_I2CDR);
+ iowrite32(i2c_10bit_addr_lo_from_msg(msgs), p + PCH_I2CDR);
} else {
/* set 7 bit slave address and R/W bit as 0 */
iowrite32(i2c_8bit_addr_from_msg(msgs), p + PCH_I2CDR);
@@ -490,15 +479,11 @@ static s32 pch_i2c_readbytes(struct i2c_adapter *i2c_adap, struct i2c_msg *msgs,
u8 *buf;
u32 count;
u32 length;
- u32 addr;
- u32 addr_2_msb;
- u32 addr_8_lsb;
void __iomem *p = adap->pch_base_address;
s32 rtn;
length = msgs->len;
buf = msgs->buf;
- addr = msgs->addr;
/* enable master reception */
pch_clrbit(adap->pch_base_address, PCH_I2CCTL, I2C_TX_MODE);
@@ -509,8 +494,7 @@ static s32 pch_i2c_readbytes(struct i2c_adapter *i2c_adap, struct i2c_msg *msgs,
}
if (msgs->flags & I2C_M_TEN) {
- addr_2_msb = ((addr & I2C_MSB_2B_MSK) >> 7);
- iowrite32(addr_2_msb | TEN_BIT_ADDR_MASK, p + PCH_I2CDR);
+ iowrite32(i2c_10bit_addr_hi_from_msg(msgs) & ~I2C_M_RD, p + PCH_I2CDR);
if (first)
pch_i2c_start(adap);
@@ -518,8 +502,7 @@ static s32 pch_i2c_readbytes(struct i2c_adapter *i2c_adap, struct i2c_msg *msgs,
if (rtn)
return rtn;
- addr_8_lsb = (addr & I2C_ADDR_MSK);
- iowrite32(addr_8_lsb, p + PCH_I2CDR);
+ iowrite32(i2c_10bit_addr_lo_from_msg(msgs), p + PCH_I2CDR);
pch_i2c_restart(adap);
@@ -527,8 +510,7 @@ static s32 pch_i2c_readbytes(struct i2c_adapter *i2c_adap, struct i2c_msg *msgs,
if (rtn)
return rtn;
- addr_2_msb |= I2C_RD;
- iowrite32(addr_2_msb | TEN_BIT_ADDR_MASK, p + PCH_I2CDR);
+ iowrite32(i2c_10bit_addr_hi_from_msg(msgs), p + PCH_I2CDR);
} else {
/* 7 address bits + R/W bit */
iowrite32(i2c_8bit_addr_from_msg(msgs), p + PCH_I2CDR);
diff --git a/drivers/i2c/busses/i2c-exynos5.c b/drivers/i2c/busses/i2c-exynos5.c
index 6cdd957ea7e4..02f24479aa07 100644
--- a/drivers/i2c/busses/i2c-exynos5.c
+++ b/drivers/i2c/busses/i2c-exynos5.c
@@ -814,7 +814,7 @@ static int exynos5_i2c_xfer_msg(struct exynos5_i2c *i2c,
ret = i2c->state;
/*
- * If this is the last message to be transfered (stop == 1)
+ * If this is the last message to be transferred (stop == 1)
* Then check if the bus can be brought back to idle.
*/
if (ret == 0 && stop)
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index 171d29d2770e..48e1af544b75 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -144,6 +144,7 @@
#define SMBNTFDADD(p) (20 + (p)->smba) /* ICH3 and later */
/* PCI Address Constants */
+#define SMBBAR_MMIO 0
#define SMBBAR 4
#define SMBHSTCFG 0x040
#define TCOBASE 0x050
@@ -276,7 +277,7 @@ struct i801_mux_config {
struct i801_priv {
struct i2c_adapter adapter;
- unsigned long smba;
+ void __iomem *smba;
unsigned char original_hstcfg;
unsigned char original_hstcnt;
unsigned char original_slvcmd;
@@ -337,9 +338,43 @@ MODULE_PARM_DESC(disable_features, "Disable selected driver features:\n"
"\t\t 0x10 don't use interrupts\n"
"\t\t 0x20 disable SMBus Host Notify ");
+/* Wait for BUSY being cleared and either INTR or an error flag being set */
+static int i801_wait_intr(struct i801_priv *priv)
+{
+ unsigned long timeout = jiffies + priv->adapter.timeout;
+ int status, busy;
+
+ do {
+ usleep_range(250, 500);
+ status = ioread8(SMBHSTSTS(priv));
+ busy = status & SMBHSTSTS_HOST_BUSY;
+ status &= STATUS_ERROR_FLAGS | SMBHSTSTS_INTR;
+ if (!busy && status)
+ return status & STATUS_ERROR_FLAGS;
+ } while (time_is_after_eq_jiffies(timeout));
+
+ return -ETIMEDOUT;
+}
+
+/* Wait for either BYTE_DONE or an error flag being set */
+static int i801_wait_byte_done(struct i801_priv *priv)
+{
+ unsigned long timeout = jiffies + priv->adapter.timeout;
+ int status;
+
+ do {
+ usleep_range(250, 500);
+ status = ioread8(SMBHSTSTS(priv));
+ if (status & (STATUS_ERROR_FLAGS | SMBHSTSTS_BYTE_DONE))
+ return status & STATUS_ERROR_FLAGS;
+ } while (time_is_after_eq_jiffies(timeout));
+
+ return -ETIMEDOUT;
+}
+
static int i801_get_block_len(struct i801_priv *priv)
{
- u8 len = inb_p(SMBHSTDAT0(priv));
+ u8 len = ioread8(SMBHSTDAT0(priv));
if (len < 1 || len > I2C_SMBUS_BLOCK_MAX) {
pci_err(priv->pci_dev, "Illegal SMBus block read size %u\n", len);
@@ -356,9 +391,9 @@ static int i801_check_and_clear_pec_error(struct i801_priv *priv)
if (!(priv->features & FEATURE_SMBUS_PEC))
return 0;
- status = inb_p(SMBAUXSTS(priv)) & SMBAUXSTS_CRCE;
+ status = ioread8(SMBAUXSTS(priv)) & SMBAUXSTS_CRCE;
if (status) {
- outb_p(status, SMBAUXSTS(priv));
+ iowrite8(status, SMBAUXSTS(priv));
return -EBADMSG;
}
@@ -371,7 +406,7 @@ static int i801_check_pre(struct i801_priv *priv)
{
int status, result;
- status = inb_p(SMBHSTSTS(priv));
+ status = ioread8(SMBHSTSTS(priv));
if (status & SMBHSTSTS_HOST_BUSY) {
pci_err(priv->pci_dev, "SMBus is busy, can't use it!\n");
return -EBUSY;
@@ -380,7 +415,7 @@ static int i801_check_pre(struct i801_priv *priv)
status &= STATUS_FLAGS;
if (status) {
pci_dbg(priv->pci_dev, "Clearing status flags (%02x)\n", status);
- outb_p(status, SMBHSTSTS(priv));
+ iowrite8(status, SMBHSTSTS(priv));
}
/*
@@ -406,22 +441,19 @@ static int i801_check_post(struct i801_priv *priv, int status)
*/
if (unlikely(status < 0)) {
/* try to stop the current command */
- outb_p(SMBHSTCNT_KILL, SMBHSTCNT(priv));
- usleep_range(1000, 2000);
- outb_p(0, SMBHSTCNT(priv));
+ iowrite8(SMBHSTCNT_KILL, SMBHSTCNT(priv));
+ status = i801_wait_intr(priv);
+ iowrite8(0, SMBHSTCNT(priv));
/* Check if it worked */
- status = inb_p(SMBHSTSTS(priv));
- if ((status & SMBHSTSTS_HOST_BUSY) ||
- !(status & SMBHSTSTS_FAILED))
- dev_dbg(&priv->pci_dev->dev,
- "Failed terminating the transaction\n");
+ if (status < 0 || !(status & SMBHSTSTS_FAILED))
+ pci_dbg(priv->pci_dev, "Failed terminating the transaction\n");
return -ETIMEDOUT;
}
if (status & SMBHSTSTS_FAILED) {
result = -EIO;
- dev_err(&priv->pci_dev->dev, "Transaction failed\n");
+ pci_err(priv->pci_dev, "Transaction failed\n");
}
if (status & SMBHSTSTS_DEV_ERR) {
/*
@@ -449,46 +481,12 @@ static int i801_check_post(struct i801_priv *priv, int status)
}
if (status & SMBHSTSTS_BUS_ERR) {
result = -EAGAIN;
- dev_dbg(&priv->pci_dev->dev, "Lost arbitration\n");
+ pci_dbg(priv->pci_dev, "Lost arbitration\n");
}
return result;
}
-/* Wait for BUSY being cleared and either INTR or an error flag being set */
-static int i801_wait_intr(struct i801_priv *priv)
-{
- unsigned long timeout = jiffies + priv->adapter.timeout;
- int status, busy;
-
- do {
- usleep_range(250, 500);
- status = inb_p(SMBHSTSTS(priv));
- busy = status & SMBHSTSTS_HOST_BUSY;
- status &= STATUS_ERROR_FLAGS | SMBHSTSTS_INTR;
- if (!busy && status)
- return status & STATUS_ERROR_FLAGS;
- } while (time_is_after_eq_jiffies(timeout));
-
- return -ETIMEDOUT;
-}
-
-/* Wait for either BYTE_DONE or an error flag being set */
-static int i801_wait_byte_done(struct i801_priv *priv)
-{
- unsigned long timeout = jiffies + priv->adapter.timeout;
- int status;
-
- do {
- usleep_range(250, 500);
- status = inb_p(SMBHSTSTS(priv));
- if (status & (STATUS_ERROR_FLAGS | SMBHSTSTS_BYTE_DONE))
- return status & STATUS_ERROR_FLAGS;
- } while (time_is_after_eq_jiffies(timeout));
-
- return -ETIMEDOUT;
-}
-
static int i801_transaction(struct i801_priv *priv, int xact)
{
unsigned long result;
@@ -496,13 +494,13 @@ static int i801_transaction(struct i801_priv *priv, int xact)
if (priv->features & FEATURE_IRQ) {
reinit_completion(&priv->done);
- outb_p(xact | SMBHSTCNT_INTREN | SMBHSTCNT_START,
+ iowrite8(xact | SMBHSTCNT_INTREN | SMBHSTCNT_START,
SMBHSTCNT(priv));
result = wait_for_completion_timeout(&priv->done, adap->timeout);
return result ? priv->status : -ETIMEDOUT;
}
- outb_p(xact | SMBHSTCNT_START, SMBHSTCNT(priv));
+ iowrite8(xact | SMBHSTCNT_START, SMBHSTCNT(priv));
return i801_wait_intr(priv);
}
@@ -511,7 +509,7 @@ static int i801_block_transaction_by_block(struct i801_priv *priv,
union i2c_smbus_data *data,
char read_write, int command)
{
- int i, len, status, xact;
+ int len, status, xact;
switch (command) {
case I2C_SMBUS_BLOCK_PROC_CALL:
@@ -525,14 +523,13 @@ static int i801_block_transaction_by_block(struct i801_priv *priv,
}
/* Set block buffer mode */
- outb_p(inb_p(SMBAUXCTL(priv)) | SMBAUXCTL_E32B, SMBAUXCTL(priv));
+ iowrite8(ioread8(SMBAUXCTL(priv)) | SMBAUXCTL_E32B, SMBAUXCTL(priv));
if (read_write == I2C_SMBUS_WRITE) {
len = data->block[0];
- outb_p(len, SMBHSTDAT0(priv));
- inb_p(SMBHSTCNT(priv)); /* reset the data buffer index */
- for (i = 0; i < len; i++)
- outb_p(data->block[i+1], SMBBLKDAT(priv));
+ iowrite8(len, SMBHSTDAT0(priv));
+ ioread8(SMBHSTCNT(priv)); /* reset the data buffer index */
+ iowrite8_rep(SMBBLKDAT(priv), data->block + 1, len);
}
status = i801_transaction(priv, xact);
@@ -548,12 +545,11 @@ static int i801_block_transaction_by_block(struct i801_priv *priv,
}
data->block[0] = len;
- inb_p(SMBHSTCNT(priv)); /* reset the data buffer index */
- for (i = 0; i < len; i++)
- data->block[i + 1] = inb_p(SMBBLKDAT(priv));
+ ioread8(SMBHSTCNT(priv)); /* reset the data buffer index */
+ ioread8_rep(SMBBLKDAT(priv), data->block + 1, len);
}
out:
- outb_p(inb_p(SMBAUXCTL(priv)) & ~SMBAUXCTL_E32B, SMBAUXCTL(priv));
+ iowrite8(ioread8(SMBAUXCTL(priv)) & ~SMBAUXCTL_E32B, SMBAUXCTL(priv));
return status;
}
@@ -576,18 +572,17 @@ static void i801_isr_byte_done(struct i801_priv *priv)
/* Read next byte */
if (priv->count < priv->len)
- priv->data[priv->count++] = inb(SMBBLKDAT(priv));
+ priv->data[priv->count++] = ioread8(SMBBLKDAT(priv));
else
- dev_dbg(&priv->pci_dev->dev,
- "Discarding extra byte on block read\n");
+ pci_dbg(priv->pci_dev, "Discarding extra byte on block read\n");
/* Set LAST_BYTE for last byte of read transaction */
if (priv->count == priv->len - 1)
- outb_p(priv->cmd | SMBHSTCNT_LAST_BYTE,
+ iowrite8(priv->cmd | SMBHSTCNT_LAST_BYTE,
SMBHSTCNT(priv));
} else if (priv->count < priv->len - 1) {
/* Write next byte, except for IRQ after last byte */
- outb_p(priv->data[++priv->count], SMBBLKDAT(priv));
+ iowrite8(priv->data[++priv->count], SMBBLKDAT(priv));
}
}
@@ -595,7 +590,7 @@ static irqreturn_t i801_host_notify_isr(struct i801_priv *priv)
{
unsigned short addr;
- addr = inb_p(SMBNTFDADD(priv)) >> 1;
+ addr = ioread8(SMBNTFDADD(priv)) >> 1;
/*
* With the tested platforms, reading SMBNTFDDAT (22 + (p)->smba)
@@ -605,7 +600,7 @@ static irqreturn_t i801_host_notify_isr(struct i801_priv *priv)
i2c_handle_smbus_host_notify(&priv->adapter, addr);
/* clear Host Notify bit and return */
- outb_p(SMBSLVSTS_HST_NTFY_STS, SMBSLVSTS(priv));
+ iowrite8(SMBSLVSTS_HST_NTFY_STS, SMBSLVSTS(priv));
return IRQ_HANDLED;
}
@@ -636,12 +631,12 @@ static irqreturn_t i801_isr(int irq, void *dev_id)
return IRQ_NONE;
if (priv->features & FEATURE_HOST_NOTIFY) {
- status = inb_p(SMBSLVSTS(priv));
+ status = ioread8(SMBSLVSTS(priv));
if (status & SMBSLVSTS_HST_NTFY_STS)
return i801_host_notify_isr(priv);
}
- status = inb_p(SMBHSTSTS(priv));
+ status = ioread8(SMBHSTSTS(priv));
if ((status & (SMBHSTSTS_BYTE_DONE | STATUS_ERROR_FLAGS)) == SMBHSTSTS_BYTE_DONE)
i801_isr_byte_done(priv);
@@ -651,7 +646,7 @@ static irqreturn_t i801_isr(int irq, void *dev_id)
* so clear it always when the status is set.
*/
status &= STATUS_FLAGS | SMBHSTSTS_SMBALERT_STS;
- outb_p(status, SMBHSTSTS(priv));
+ iowrite8(status, SMBHSTSTS(priv));
status &= STATUS_ERROR_FLAGS | SMBHSTSTS_INTR;
if (status) {
@@ -683,8 +678,8 @@ static int i801_block_transaction_byte_by_byte(struct i801_priv *priv,
len = data->block[0];
if (read_write == I2C_SMBUS_WRITE) {
- outb_p(len, SMBHSTDAT0(priv));
- outb_p(data->block[1], SMBBLKDAT(priv));
+ iowrite8(len, SMBHSTDAT0(priv));
+ iowrite8(data->block[1], SMBBLKDAT(priv));
}
if (command == I2C_SMBUS_I2C_BLOCK_DATA &&
@@ -703,14 +698,14 @@ static int i801_block_transaction_byte_by_byte(struct i801_priv *priv,
priv->data = &data->block[1];
reinit_completion(&priv->done);
- outb_p(priv->cmd | SMBHSTCNT_START, SMBHSTCNT(priv));
+ iowrite8(priv->cmd | SMBHSTCNT_START, SMBHSTCNT(priv));
result = wait_for_completion_timeout(&priv->done, adap->timeout);
return result ? priv->status : -ETIMEDOUT;
}
if (len == 1 && read_write == I2C_SMBUS_READ)
smbcmd |= SMBHSTCNT_LAST_BYTE;
- outb_p(smbcmd | SMBHSTCNT_START, SMBHSTCNT(priv));
+ iowrite8(smbcmd | SMBHSTCNT_START, SMBHSTCNT(priv));
for (i = 1; i <= len; i++) {
status = i801_wait_byte_done(priv);
@@ -726,27 +721,27 @@ static int i801_block_transaction_byte_by_byte(struct i801_priv *priv,
len = i801_get_block_len(priv);
if (len < 0) {
/* Recover */
- while (inb_p(SMBHSTSTS(priv)) &
+ while (ioread8(SMBHSTSTS(priv)) &
SMBHSTSTS_HOST_BUSY)
- outb_p(SMBHSTSTS_BYTE_DONE,
+ iowrite8(SMBHSTSTS_BYTE_DONE,
SMBHSTSTS(priv));
- outb_p(SMBHSTSTS_INTR, SMBHSTSTS(priv));
+ iowrite8(SMBHSTSTS_INTR, SMBHSTSTS(priv));
return -EPROTO;
}
data->block[0] = len;
}
if (read_write == I2C_SMBUS_READ) {
- data->block[i] = inb_p(SMBBLKDAT(priv));
+ data->block[i] = ioread8(SMBBLKDAT(priv));
if (i == len - 1)
- outb_p(smbcmd | SMBHSTCNT_LAST_BYTE, SMBHSTCNT(priv));
+ iowrite8(smbcmd | SMBHSTCNT_LAST_BYTE, SMBHSTCNT(priv));
}
if (read_write == I2C_SMBUS_WRITE && i+1 <= len)
- outb_p(data->block[i+1], SMBBLKDAT(priv));
+ iowrite8(data->block[i+1], SMBBLKDAT(priv));
/* signals SMBBLKDAT ready */
- outb_p(SMBHSTSTS_BYTE_DONE, SMBHSTSTS(priv));
+ iowrite8(SMBHSTSTS_BYTE_DONE, SMBHSTSTS(priv));
}
return i801_wait_intr(priv);
@@ -754,7 +749,7 @@ static int i801_block_transaction_byte_by_byte(struct i801_priv *priv,
static void i801_set_hstadd(struct i801_priv *priv, u8 addr, char read_write)
{
- outb_p((addr << 1) | (read_write & 0x01), SMBHSTADD(priv));
+ iowrite8((addr << 1) | (read_write & 0x01), SMBHSTADD(priv));
}
/* Single value transaction function */
@@ -771,30 +766,30 @@ static int i801_simple_transaction(struct i801_priv *priv, union i2c_smbus_data
case I2C_SMBUS_BYTE:
i801_set_hstadd(priv, addr, read_write);
if (read_write == I2C_SMBUS_WRITE)
- outb_p(hstcmd, SMBHSTCMD(priv));
+ iowrite8(hstcmd, SMBHSTCMD(priv));
xact = I801_BYTE;
break;
case I2C_SMBUS_BYTE_DATA:
i801_set_hstadd(priv, addr, read_write);
if (read_write == I2C_SMBUS_WRITE)
- outb_p(data->byte, SMBHSTDAT0(priv));
- outb_p(hstcmd, SMBHSTCMD(priv));
+ iowrite8(data->byte, SMBHSTDAT0(priv));
+ iowrite8(hstcmd, SMBHSTCMD(priv));
xact = I801_BYTE_DATA;
break;
case I2C_SMBUS_WORD_DATA:
i801_set_hstadd(priv, addr, read_write);
if (read_write == I2C_SMBUS_WRITE) {
- outb_p(data->word & 0xff, SMBHSTDAT0(priv));
- outb_p((data->word & 0xff00) >> 8, SMBHSTDAT1(priv));
+ iowrite8(data->word & 0xff, SMBHSTDAT0(priv));
+ iowrite8((data->word & 0xff00) >> 8, SMBHSTDAT1(priv));
}
- outb_p(hstcmd, SMBHSTCMD(priv));
+ iowrite8(hstcmd, SMBHSTCMD(priv));
xact = I801_WORD_DATA;
break;
case I2C_SMBUS_PROC_CALL:
i801_set_hstadd(priv, addr, I2C_SMBUS_WRITE);
- outb_p(data->word & 0xff, SMBHSTDAT0(priv));
- outb_p((data->word & 0xff00) >> 8, SMBHSTDAT1(priv));
- outb_p(hstcmd, SMBHSTCMD(priv));
+ iowrite8(data->word & 0xff, SMBHSTDAT0(priv));
+ iowrite8((data->word & 0xff00) >> 8, SMBHSTDAT1(priv));
+ iowrite8(hstcmd, SMBHSTCMD(priv));
read_write = I2C_SMBUS_READ;
xact = I801_PROC_CALL;
break;
@@ -810,12 +805,12 @@ static int i801_simple_transaction(struct i801_priv *priv, union i2c_smbus_data
switch (command) {
case I2C_SMBUS_BYTE:
case I2C_SMBUS_BYTE_DATA:
- data->byte = inb_p(SMBHSTDAT0(priv));
+ data->byte = ioread8(SMBHSTDAT0(priv));
break;
case I2C_SMBUS_WORD_DATA:
case I2C_SMBUS_PROC_CALL:
- data->word = inb_p(SMBHSTDAT0(priv)) +
- (inb_p(SMBHSTDAT1(priv)) << 8);
+ data->word = ioread8(SMBHSTDAT0(priv)) +
+ (ioread8(SMBHSTDAT1(priv)) << 8);
break;
}
@@ -836,7 +831,7 @@ static int i801_smbus_block_transaction(struct i801_priv *priv, union i2c_smbus_
i801_set_hstadd(priv, addr, I2C_SMBUS_WRITE);
else
i801_set_hstadd(priv, addr, read_write);
- outb_p(hstcmd, SMBHSTCMD(priv));
+ iowrite8(hstcmd, SMBHSTCMD(priv));
if (priv->features & FEATURE_BLOCK_BUFFER)
return i801_block_transaction_by_block(priv, data, read_write, command);
@@ -862,9 +857,9 @@ static int i801_i2c_block_transaction(struct i801_priv *priv, union i2c_smbus_da
/* NB: page 240 of ICH5 datasheet shows that DATA1 is the cmd field when reading */
if (read_write == I2C_SMBUS_READ)
- outb_p(hstcmd, SMBHSTDAT1(priv));
+ iowrite8(hstcmd, SMBHSTDAT1(priv));
else
- outb_p(hstcmd, SMBHSTCMD(priv));
+ iowrite8(hstcmd, SMBHSTCMD(priv));
if (read_write == I2C_SMBUS_WRITE) {
/* set I2C_EN bit in configuration register */
@@ -907,9 +902,9 @@ static s32 i801_access(struct i2c_adapter *adap, u16 addr,
&& size != I2C_SMBUS_I2C_BLOCK_DATA;
if (hwpec) /* enable/disable hardware PEC */
- outb_p(inb_p(SMBAUXCTL(priv)) | SMBAUXCTL_CRC, SMBAUXCTL(priv));
+ iowrite8(ioread8(SMBAUXCTL(priv)) | SMBAUXCTL_CRC, SMBAUXCTL(priv));
else
- outb_p(inb_p(SMBAUXCTL(priv)) & (~SMBAUXCTL_CRC),
+ iowrite8(ioread8(SMBAUXCTL(priv)) & (~SMBAUXCTL_CRC),
SMBAUXCTL(priv));
if (size == I2C_SMBUS_BLOCK_DATA || size == I2C_SMBUS_BLOCK_PROC_CALL)
@@ -925,13 +920,13 @@ static s32 i801_access(struct i2c_adapter *adap, u16 addr,
* time, so we forcibly disable it after every transaction.
*/
if (hwpec)
- outb_p(inb_p(SMBAUXCTL(priv)) & ~SMBAUXCTL_CRC, SMBAUXCTL(priv));
+ iowrite8(ioread8(SMBAUXCTL(priv)) & ~SMBAUXCTL_CRC, SMBAUXCTL(priv));
out:
/*
* Unlock the SMBus device for use by BIOS/ACPI,
* and clear status flags if not done already.
*/
- outb_p(SMBHSTSTS_INUSE_STS | STATUS_FLAGS, SMBHSTSTS(priv));
+ iowrite8(SMBHSTSTS_INUSE_STS | STATUS_FLAGS, SMBHSTSTS(priv));
pm_runtime_mark_last_busy(&priv->pci_dev->dev);
pm_runtime_put_autosuspend(&priv->pci_dev->dev);
@@ -968,11 +963,11 @@ static void i801_enable_host_notify(struct i2c_adapter *adapter)
* from the SMB_ALERT signal because the driver does not support
* SMBus Alert.
*/
- outb_p(SMBSLVCMD_HST_NTFY_INTREN | SMBSLVCMD_SMBALERT_DISABLE |
+ iowrite8(SMBSLVCMD_HST_NTFY_INTREN | SMBSLVCMD_SMBALERT_DISABLE |
priv->original_slvcmd, SMBSLVCMD(priv));
/* clear Host Notify bit to allow a new notification */
- outb_p(SMBSLVSTS_HST_NTFY_STS, SMBSLVSTS(priv));
+ iowrite8(SMBSLVSTS_HST_NTFY_STS, SMBSLVSTS(priv));
}
static void i801_disable_host_notify(struct i801_priv *priv)
@@ -980,7 +975,7 @@ static void i801_disable_host_notify(struct i801_priv *priv)
if (!(priv->features & FEATURE_HOST_NOTIFY))
return;
- outb_p(priv->original_slvcmd, SMBSLVCMD(priv));
+ iowrite8(priv->original_slvcmd, SMBSLVCMD(priv));
}
static const struct i2c_algorithm smbus_algorithm = {
@@ -1438,14 +1433,14 @@ static void i801_add_tco(struct i801_priv *priv)
priv->tco_pdev = i801_add_tco_spt(pci_dev, tco_res);
if (IS_ERR(priv->tco_pdev))
- dev_warn(&pci_dev->dev, "failed to create iTCO device\n");
+ pci_warn(pci_dev, "failed to create iTCO device\n");
}
#ifdef CONFIG_ACPI
static bool i801_acpi_is_smbus_ioport(const struct i801_priv *priv,
acpi_physical_address address)
{
- return address >= priv->smba &&
+ return address >= pci_resource_start(priv->pci_dev, SMBBAR) &&
address <= pci_resource_end(priv->pci_dev, SMBBAR);
}
@@ -1467,8 +1462,8 @@ i801_acpi_io_handler(u32 function, acpi_physical_address address, u32 bits,
if (!priv->acpi_reserved && i801_acpi_is_smbus_ioport(priv, address)) {
priv->acpi_reserved = true;
- dev_warn(&pdev->dev, "BIOS is accessing SMBus registers\n");
- dev_warn(&pdev->dev, "Driver SMBus register access inhibited\n");
+ pci_warn(pdev, "BIOS is accessing SMBus registers\n");
+ pci_warn(pdev, "Driver SMBus register access inhibited\n");
/*
* BIOS is accessing the host controller so prevent it from
@@ -1522,13 +1517,13 @@ static void i801_setup_hstcfg(struct i801_priv *priv)
static void i801_restore_regs(struct i801_priv *priv)
{
- outb_p(priv->original_hstcnt, SMBHSTCNT(priv));
+ iowrite8(priv->original_hstcnt, SMBHSTCNT(priv));
pci_write_config_byte(priv->pci_dev, SMBHSTCFG, priv->original_hstcfg);
}
static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
- int err, i;
+ int err, i, bar = SMBBAR;
struct i801_priv *priv;
priv = devm_kzalloc(&dev->dev, sizeof(*priv), GFP_KERNEL);
@@ -1549,8 +1544,7 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
/* Disable features on user request */
for (i = 0; i < ARRAY_SIZE(i801_feature_names); i++) {
if (priv->features & disable_features & (1 << i))
- dev_notice(&dev->dev, "%s disabled by user\n",
- i801_feature_names[i]);
+ pci_notice(dev, "%s disabled by user\n", i801_feature_names[i]);
}
priv->features &= ~disable_features;
@@ -1564,48 +1558,46 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
*/
err = pci_enable_device(dev);
if (err) {
- dev_err(&dev->dev, "Failed to enable SMBus PCI device (%d)\n",
- err);
+ pci_err(dev, "Failed to enable SMBus PCI device (%d)\n", err);
return err;
}
/* Determine the address of the SMBus area */
- priv->smba = pci_resource_start(dev, SMBBAR);
- if (!priv->smba) {
- dev_err(&dev->dev,
- "SMBus base address uninitialized, upgrade BIOS\n");
+ if (!pci_resource_start(dev, SMBBAR)) {
+ pci_err(dev, "SMBus base address uninitialized, upgrade BIOS\n");
return -ENODEV;
}
if (i801_acpi_probe(priv))
return -ENODEV;
- err = pcim_iomap_regions(dev, 1 << SMBBAR, DRV_NAME);
- if (err) {
- dev_err(&dev->dev,
- "Failed to request SMBus region 0x%lx-0x%Lx\n",
- priv->smba,
- (unsigned long long)pci_resource_end(dev, SMBBAR));
+ if (pci_resource_flags(dev, SMBBAR_MMIO) & IORESOURCE_MEM)
+ bar = SMBBAR_MMIO;
+
+ priv->smba = pcim_iomap_region(dev, bar, DRV_NAME);
+ if (IS_ERR(priv->smba)) {
+ pci_err(dev, "Failed to request SMBus region %pr\n",
+ pci_resource_n(dev, bar));
i801_acpi_remove(priv);
- return err;
+ return PTR_ERR(priv->smba);
}
- pci_read_config_byte(priv->pci_dev, SMBHSTCFG, &priv->original_hstcfg);
+ pci_read_config_byte(dev, SMBHSTCFG, &priv->original_hstcfg);
i801_setup_hstcfg(priv);
if (!(priv->original_hstcfg & SMBHSTCFG_HST_EN))
- dev_info(&dev->dev, "Enabling SMBus device\n");
+ pci_info(dev, "Enabling SMBus device\n");
if (priv->original_hstcfg & SMBHSTCFG_SMB_SMI_EN) {
- dev_dbg(&dev->dev, "SMBus using interrupt SMI#\n");
+ pci_dbg(dev, "SMBus using interrupt SMI#\n");
/* Disable SMBus interrupt feature if SMBus using SMI# */
priv->features &= ~FEATURE_IRQ;
}
if (priv->original_hstcfg & SMBHSTCFG_SPD_WD)
- dev_info(&dev->dev, "SPD Write Disable is set\n");
+ pci_info(dev, "SPD Write Disable is set\n");
/* Clear special mode bits */
if (priv->features & (FEATURE_SMBUS_PEC | FEATURE_BLOCK_BUFFER))
- outb_p(inb_p(SMBAUXCTL(priv)) &
+ iowrite8(ioread8(SMBAUXCTL(priv)) &
~(SMBAUXCTL_CRC | SMBAUXCTL_E32B), SMBAUXCTL(priv));
/* Default timeout in interrupt mode: 200 ms */
@@ -1620,7 +1612,7 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
/* Complain if an interrupt is already pending */
pci_read_config_word(priv->pci_dev, PCI_STATUS, &pcists);
if (pcists & PCI_STATUS_INTERRUPT)
- dev_warn(&dev->dev, "An interrupt is pending!\n");
+ pci_warn(dev, "An interrupt is pending!\n");
}
if (priv->features & FEATURE_IRQ) {
@@ -1629,12 +1621,11 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
err = devm_request_irq(&dev->dev, dev->irq, i801_isr,
IRQF_SHARED, DRV_NAME, priv);
if (err) {
- dev_err(&dev->dev, "Failed to allocate irq %d: %d\n",
- dev->irq, err);
+ pci_err(dev, "Failed to allocate irq %d: %d\n", dev->irq, err);
priv->features &= ~FEATURE_IRQ;
}
}
- dev_info(&dev->dev, "SMBus using %s\n",
+ pci_info(dev, "SMBus using %s\n",
priv->features & FEATURE_IRQ ? "PCI interrupt" : "polling");
/* Host notification uses an interrupt */
@@ -1642,9 +1633,9 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
priv->features &= ~FEATURE_HOST_NOTIFY;
/* Remember original Interrupt and Host Notify settings */
- priv->original_hstcnt = inb_p(SMBHSTCNT(priv)) & ~SMBHSTCNT_KILL;
+ priv->original_hstcnt = ioread8(SMBHSTCNT(priv)) & ~SMBHSTCNT_KILL;
if (priv->features & FEATURE_HOST_NOTIFY)
- priv->original_slvcmd = inb_p(SMBSLVCMD(priv));
+ priv->original_slvcmd = ioread8(SMBSLVCMD(priv));
i801_add_tco(priv);
@@ -1653,9 +1644,9 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
* to instantiante i2c_clients, do not change.
*/
snprintf(priv->adapter.name, sizeof(priv->adapter.name),
- "SMBus %s adapter at %04lx",
+ "SMBus %s adapter at %s",
(priv->features & FEATURE_IDF) ? "I801 IDF" : "I801",
- priv->smba);
+ pci_name(dev));
err = i2c_add_adapter(&priv->adapter);
if (err) {
diff --git a/drivers/i2c/busses/i2c-ibm_iic.c b/drivers/i2c/busses/i2c-ibm_iic.c
index c76c4116ddc7..6bf45d752ff9 100644
--- a/drivers/i2c/busses/i2c-ibm_iic.c
+++ b/drivers/i2c/busses/i2c-ibm_iic.c
@@ -512,19 +512,17 @@ static int iic_xfer_bytes(struct ibm_iic_private* dev, struct i2c_msg* pm,
static inline void iic_address(struct ibm_iic_private* dev, struct i2c_msg* msg)
{
volatile struct iic_regs __iomem *iic = dev->vaddr;
- u16 addr = msg->addr;
DBG2("%d: iic_address, 0x%03x (%d-bit)\n", dev->idx,
- addr, msg->flags & I2C_M_TEN ? 10 : 7);
+ msg->addr, msg->flags & I2C_M_TEN ? 10 : 7);
- if (msg->flags & I2C_M_TEN){
+ if (msg->flags & I2C_M_TEN) {
out_8(&iic->cntl, CNTL_AMD);
- out_8(&iic->lmadr, addr);
- out_8(&iic->hmadr, 0xf0 | ((addr >> 7) & 0x06));
- }
- else {
+ out_8(&iic->lmadr, i2c_10bit_addr_lo_from_msg(msg));
+ out_8(&iic->hmadr, i2c_10bit_addr_hi_from_msg(msg) & ~I2C_M_RD);
+ } else {
out_8(&iic->cntl, 0);
- out_8(&iic->lmadr, addr << 1);
+ out_8(&iic->lmadr, i2c_8bit_addr_from_msg(msg) & ~I2C_M_RD);
}
}
diff --git a/drivers/i2c/busses/i2c-k1.c b/drivers/i2c/busses/i2c-k1.c
new file mode 100644
index 000000000000..5965b4cf6220
--- /dev/null
+++ b/drivers/i2c/busses/i2c-k1.c
@@ -0,0 +1,602 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2024-2025 Troy Mitchell <troymitchell988@gmail.com>
+ */
+
+ #include <linux/clk.h>
+ #include <linux/i2c.h>
+ #include <linux/iopoll.h>
+ #include <linux/module.h>
+ #include <linux/of_address.h>
+ #include <linux/platform_device.h>
+
+/* spacemit i2c registers */
+#define SPACEMIT_ICR 0x0 /* Control register */
+#define SPACEMIT_ISR 0x4 /* Status register */
+#define SPACEMIT_IDBR 0xc /* Data buffer register */
+#define SPACEMIT_IBMR 0x1c /* Bus monitor register */
+
+/* SPACEMIT_ICR register fields */
+#define SPACEMIT_CR_START BIT(0) /* start bit */
+#define SPACEMIT_CR_STOP BIT(1) /* stop bit */
+#define SPACEMIT_CR_ACKNAK BIT(2) /* send ACK(0) or NAK(1) */
+#define SPACEMIT_CR_TB BIT(3) /* transfer byte bit */
+/* Bits 4-7 are reserved */
+#define SPACEMIT_CR_MODE_FAST BIT(8) /* bus mode (master operation) */
+/* Bit 9 is reserved */
+#define SPACEMIT_CR_UR BIT(10) /* unit reset */
+/* Bits 11-12 are reserved */
+#define SPACEMIT_CR_SCLE BIT(13) /* master clock enable */
+#define SPACEMIT_CR_IUE BIT(14) /* unit enable */
+/* Bits 15-17 are reserved */
+#define SPACEMIT_CR_ALDIE BIT(18) /* enable arbitration interrupt */
+#define SPACEMIT_CR_DTEIE BIT(19) /* enable TX interrupts */
+#define SPACEMIT_CR_DRFIE BIT(20) /* enable RX interrupts */
+#define SPACEMIT_CR_GCD BIT(21) /* general call disable */
+#define SPACEMIT_CR_BEIE BIT(22) /* enable bus error ints */
+/* Bits 23-24 are reserved */
+#define SPACEMIT_CR_MSDIE BIT(25) /* master STOP detected int enable */
+#define SPACEMIT_CR_MSDE BIT(26) /* master STOP detected enable */
+#define SPACEMIT_CR_TXDONEIE BIT(27) /* transaction done int enable */
+#define SPACEMIT_CR_TXEIE BIT(28) /* transmit FIFO empty int enable */
+#define SPACEMIT_CR_RXHFIE BIT(29) /* receive FIFO half-full int enable */
+#define SPACEMIT_CR_RXFIE BIT(30) /* receive FIFO full int enable */
+#define SPACEMIT_CR_RXOVIE BIT(31) /* receive FIFO overrun int enable */
+
+#define SPACEMIT_I2C_INT_CTRL_MASK (SPACEMIT_CR_ALDIE | SPACEMIT_CR_DTEIE | \
+ SPACEMIT_CR_DRFIE | SPACEMIT_CR_BEIE | \
+ SPACEMIT_CR_TXDONEIE | SPACEMIT_CR_TXEIE | \
+ SPACEMIT_CR_RXHFIE | SPACEMIT_CR_RXFIE | \
+ SPACEMIT_CR_RXOVIE | SPACEMIT_CR_MSDIE)
+
+/* SPACEMIT_ISR register fields */
+/* Bits 0-13 are reserved */
+#define SPACEMIT_SR_ACKNAK BIT(14) /* ACK/NACK status */
+#define SPACEMIT_SR_UB BIT(15) /* unit busy */
+#define SPACEMIT_SR_IBB BIT(16) /* i2c bus busy */
+#define SPACEMIT_SR_EBB BIT(17) /* early bus busy */
+#define SPACEMIT_SR_ALD BIT(18) /* arbitration loss detected */
+#define SPACEMIT_SR_ITE BIT(19) /* TX buffer empty */
+#define SPACEMIT_SR_IRF BIT(20) /* RX buffer full */
+#define SPACEMIT_SR_GCAD BIT(21) /* general call address detected */
+#define SPACEMIT_SR_BED BIT(22) /* bus error no ACK/NAK */
+#define SPACEMIT_SR_SAD BIT(23) /* slave address detected */
+#define SPACEMIT_SR_SSD BIT(24) /* slave stop detected */
+/* Bit 25 is reserved */
+#define SPACEMIT_SR_MSD BIT(26) /* master stop detected */
+#define SPACEMIT_SR_TXDONE BIT(27) /* transaction done */
+#define SPACEMIT_SR_TXE BIT(28) /* TX FIFO empty */
+#define SPACEMIT_SR_RXHF BIT(29) /* RX FIFO half-full */
+#define SPACEMIT_SR_RXF BIT(30) /* RX FIFO full */
+#define SPACEMIT_SR_RXOV BIT(31) /* RX FIFO overrun */
+
+#define SPACEMIT_I2C_INT_STATUS_MASK (SPACEMIT_SR_RXOV | SPACEMIT_SR_RXF | SPACEMIT_SR_RXHF | \
+ SPACEMIT_SR_TXE | SPACEMIT_SR_TXDONE | SPACEMIT_SR_MSD | \
+ SPACEMIT_SR_SSD | SPACEMIT_SR_SAD | SPACEMIT_SR_BED | \
+ SPACEMIT_SR_GCAD | SPACEMIT_SR_IRF | SPACEMIT_SR_ITE | \
+ SPACEMIT_SR_ALD)
+
+/* SPACEMIT_IBMR register fields */
+#define SPACEMIT_BMR_SDA BIT(0) /* SDA line level */
+#define SPACEMIT_BMR_SCL BIT(1) /* SCL line level */
+
+/* i2c bus recover timeout: us */
+#define SPACEMIT_I2C_BUS_BUSY_TIMEOUT 100000
+
+#define SPACEMIT_I2C_MAX_STANDARD_MODE_FREQ 100000 /* Hz */
+#define SPACEMIT_I2C_MAX_FAST_MODE_FREQ 400000 /* Hz */
+
+#define SPACEMIT_SR_ERR (SPACEMIT_SR_BED | SPACEMIT_SR_RXOV | SPACEMIT_SR_ALD)
+
+enum spacemit_i2c_state {
+ SPACEMIT_STATE_IDLE,
+ SPACEMIT_STATE_START,
+ SPACEMIT_STATE_READ,
+ SPACEMIT_STATE_WRITE,
+};
+
+/* i2c-spacemit driver's main struct */
+struct spacemit_i2c_dev {
+ struct device *dev;
+ struct i2c_adapter adapt;
+
+ /* hardware resources */
+ void __iomem *base;
+ int irq;
+ u32 clock_freq;
+
+ struct i2c_msg *msgs;
+ u32 msg_num;
+
+ /* index of the current message being processed */
+ u32 msg_idx;
+ u8 *msg_buf;
+ /* the number of unprocessed bytes remaining in the current message */
+ u32 unprocessed;
+
+ enum spacemit_i2c_state state;
+ bool read;
+ struct completion complete;
+ u32 status;
+};
+
+static void spacemit_i2c_enable(struct spacemit_i2c_dev *i2c)
+{
+ u32 val;
+
+ val = readl(i2c->base + SPACEMIT_ICR);
+ val |= SPACEMIT_CR_IUE;
+ writel(val, i2c->base + SPACEMIT_ICR);
+}
+
+static void spacemit_i2c_disable(struct spacemit_i2c_dev *i2c)
+{
+ u32 val;
+
+ val = readl(i2c->base + SPACEMIT_ICR);
+ val &= ~SPACEMIT_CR_IUE;
+ writel(val, i2c->base + SPACEMIT_ICR);
+}
+
+static void spacemit_i2c_reset(struct spacemit_i2c_dev *i2c)
+{
+ writel(SPACEMIT_CR_UR, i2c->base + SPACEMIT_ICR);
+ udelay(5);
+ writel(0, i2c->base + SPACEMIT_ICR);
+}
+
+static int spacemit_i2c_handle_err(struct spacemit_i2c_dev *i2c)
+{
+ dev_dbg(i2c->dev, "i2c error status: 0x%08x\n", i2c->status);
+
+ if (i2c->status & (SPACEMIT_SR_BED | SPACEMIT_SR_ALD)) {
+ spacemit_i2c_reset(i2c);
+ return -EAGAIN;
+ }
+
+ return i2c->status & SPACEMIT_SR_ACKNAK ? -ENXIO : -EIO;
+}
+
+static void spacemit_i2c_conditionally_reset_bus(struct spacemit_i2c_dev *i2c)
+{
+ u32 status;
+
+ /* if bus is locked, reset unit. 0: locked */
+ status = readl(i2c->base + SPACEMIT_IBMR);
+ if ((status & SPACEMIT_BMR_SDA) && (status & SPACEMIT_BMR_SCL))
+ return;
+
+ spacemit_i2c_reset(i2c);
+ usleep_range(10, 20);
+
+ /* check scl status again */
+ status = readl(i2c->base + SPACEMIT_IBMR);
+ if (!(status & SPACEMIT_BMR_SCL))
+ dev_warn_ratelimited(i2c->dev, "unit reset failed\n");
+}
+
+static int spacemit_i2c_wait_bus_idle(struct spacemit_i2c_dev *i2c)
+{
+ int ret;
+ u32 val;
+
+ val = readl(i2c->base + SPACEMIT_ISR);
+ if (!(val & (SPACEMIT_SR_UB | SPACEMIT_SR_IBB)))
+ return 0;
+
+ ret = readl_poll_timeout(i2c->base + SPACEMIT_ISR,
+ val, !(val & (SPACEMIT_SR_UB | SPACEMIT_SR_IBB)),
+ 1500, SPACEMIT_I2C_BUS_BUSY_TIMEOUT);
+ if (ret)
+ spacemit_i2c_reset(i2c);
+
+ return ret;
+}
+
+static void spacemit_i2c_check_bus_release(struct spacemit_i2c_dev *i2c)
+{
+ /* in case bus is not released after transfer completes */
+ if (readl(i2c->base + SPACEMIT_ISR) & SPACEMIT_SR_EBB) {
+ spacemit_i2c_conditionally_reset_bus(i2c);
+ usleep_range(90, 150);
+ }
+}
+
+static void spacemit_i2c_init(struct spacemit_i2c_dev *i2c)
+{
+ u32 val;
+
+ /*
+ * Unmask interrupt bits for all xfer mode:
+ * bus error, arbitration loss detected.
+ * For transaction complete signal, we use master stop
+ * interrupt, so we don't need to unmask SPACEMIT_CR_TXDONEIE.
+ */
+ val = SPACEMIT_CR_BEIE | SPACEMIT_CR_ALDIE;
+
+ /*
+ * Unmask interrupt bits for interrupt xfer mode:
+ * When IDBR receives a byte, an interrupt is triggered.
+ *
+ * For the tx empty interrupt, it will be enabled in the
+ * i2c_start function.
+ * Otherwise, it will cause an erroneous empty interrupt before i2c_start.
+ */
+ val |= SPACEMIT_CR_DRFIE;
+
+ if (i2c->clock_freq == SPACEMIT_I2C_MAX_FAST_MODE_FREQ)
+ val |= SPACEMIT_CR_MODE_FAST;
+
+ /* disable response to general call */
+ val |= SPACEMIT_CR_GCD;
+
+ /* enable SCL clock output */
+ val |= SPACEMIT_CR_SCLE;
+
+ /* enable master stop detected */
+ val |= SPACEMIT_CR_MSDE | SPACEMIT_CR_MSDIE;
+
+ writel(val, i2c->base + SPACEMIT_ICR);
+}
+
+static inline void
+spacemit_i2c_clear_int_status(struct spacemit_i2c_dev *i2c, u32 mask)
+{
+ writel(mask & SPACEMIT_I2C_INT_STATUS_MASK, i2c->base + SPACEMIT_ISR);
+}
+
+static void spacemit_i2c_start(struct spacemit_i2c_dev *i2c)
+{
+ u32 target_addr_rw, val;
+ struct i2c_msg *cur_msg = i2c->msgs + i2c->msg_idx;
+
+ i2c->read = !!(cur_msg->flags & I2C_M_RD);
+
+ i2c->state = SPACEMIT_STATE_START;
+
+ target_addr_rw = (cur_msg->addr & 0x7f) << 1;
+ if (cur_msg->flags & I2C_M_RD)
+ target_addr_rw |= 1;
+
+ writel(target_addr_rw, i2c->base + SPACEMIT_IDBR);
+
+ /* send start pulse */
+ val = readl(i2c->base + SPACEMIT_ICR);
+ val &= ~SPACEMIT_CR_STOP;
+ val |= SPACEMIT_CR_START | SPACEMIT_CR_TB | SPACEMIT_CR_DTEIE;
+ writel(val, i2c->base + SPACEMIT_ICR);
+}
+
+static void spacemit_i2c_stop(struct spacemit_i2c_dev *i2c)
+{
+ u32 val;
+
+ val = readl(i2c->base + SPACEMIT_ICR);
+ val |= SPACEMIT_CR_STOP | SPACEMIT_CR_ALDIE | SPACEMIT_CR_TB;
+
+ if (i2c->read)
+ val |= SPACEMIT_CR_ACKNAK;
+
+ writel(val, i2c->base + SPACEMIT_ICR);
+}
+
+static int spacemit_i2c_xfer_msg(struct spacemit_i2c_dev *i2c)
+{
+ unsigned long time_left;
+ struct i2c_msg *msg;
+
+ for (i2c->msg_idx = 0; i2c->msg_idx < i2c->msg_num; i2c->msg_idx++) {
+ msg = &i2c->msgs[i2c->msg_idx];
+ i2c->msg_buf = msg->buf;
+ i2c->unprocessed = msg->len;
+ i2c->status = 0;
+
+ reinit_completion(&i2c->complete);
+
+ spacemit_i2c_start(i2c);
+
+ time_left = wait_for_completion_timeout(&i2c->complete,
+ i2c->adapt.timeout);
+ if (!time_left) {
+ dev_err(i2c->dev, "msg completion timeout\n");
+ spacemit_i2c_conditionally_reset_bus(i2c);
+ spacemit_i2c_reset(i2c);
+ return -ETIMEDOUT;
+ }
+
+ if (i2c->status & SPACEMIT_SR_ERR)
+ return spacemit_i2c_handle_err(i2c);
+ }
+
+ return 0;
+}
+
+static bool spacemit_i2c_is_last_msg(struct spacemit_i2c_dev *i2c)
+{
+ if (i2c->msg_idx != i2c->msg_num - 1)
+ return false;
+
+ if (i2c->read)
+ return i2c->unprocessed == 1;
+
+ return !i2c->unprocessed;
+}
+
+static void spacemit_i2c_handle_write(struct spacemit_i2c_dev *i2c)
+{
+ /* if transfer completes, SPACEMIT_ISR will handle it */
+ if (i2c->status & SPACEMIT_SR_MSD)
+ return;
+
+ if (i2c->unprocessed) {
+ writel(*i2c->msg_buf++, i2c->base + SPACEMIT_IDBR);
+ i2c->unprocessed--;
+ return;
+ }
+
+ /* SPACEMIT_STATE_IDLE avoids trigger next byte */
+ i2c->state = SPACEMIT_STATE_IDLE;
+ complete(&i2c->complete);
+}
+
+static void spacemit_i2c_handle_read(struct spacemit_i2c_dev *i2c)
+{
+ if (i2c->unprocessed) {
+ *i2c->msg_buf++ = readl(i2c->base + SPACEMIT_IDBR);
+ i2c->unprocessed--;
+ }
+
+ /* if transfer completes, SPACEMIT_ISR will handle it */
+ if (i2c->status & (SPACEMIT_SR_MSD | SPACEMIT_SR_ACKNAK))
+ return;
+
+ /* it has to append stop bit in icr that read last byte */
+ if (i2c->unprocessed)
+ return;
+
+ /* SPACEMIT_STATE_IDLE avoids trigger next byte */
+ i2c->state = SPACEMIT_STATE_IDLE;
+ complete(&i2c->complete);
+}
+
+static void spacemit_i2c_handle_start(struct spacemit_i2c_dev *i2c)
+{
+ i2c->state = i2c->read ? SPACEMIT_STATE_READ : SPACEMIT_STATE_WRITE;
+ if (i2c->state == SPACEMIT_STATE_WRITE)
+ spacemit_i2c_handle_write(i2c);
+}
+
+static void spacemit_i2c_err_check(struct spacemit_i2c_dev *i2c)
+{
+ u32 val;
+
+ /*
+ * Send transaction complete signal:
+ * error happens, detect master stop
+ */
+ if (!(i2c->status & (SPACEMIT_SR_ERR | SPACEMIT_SR_MSD)))
+ return;
+
+ /*
+ * Here the transaction is already done, we don't need any
+ * other interrupt signals from now, in case any interrupt
+ * happens before spacemit_i2c_xfer to disable irq and i2c unit,
+ * we mask all the interrupt signals and clear the interrupt
+ * status.
+ */
+ val = readl(i2c->base + SPACEMIT_ICR);
+ val &= ~SPACEMIT_I2C_INT_CTRL_MASK;
+ writel(val, i2c->base + SPACEMIT_ICR);
+
+ spacemit_i2c_clear_int_status(i2c, SPACEMIT_I2C_INT_STATUS_MASK);
+
+ i2c->state = SPACEMIT_STATE_IDLE;
+ complete(&i2c->complete);
+}
+
+static irqreturn_t spacemit_i2c_irq_handler(int irq, void *devid)
+{
+ struct spacemit_i2c_dev *i2c = devid;
+ u32 status, val;
+
+ status = readl(i2c->base + SPACEMIT_ISR);
+ if (!status)
+ return IRQ_HANDLED;
+
+ i2c->status = status;
+
+ spacemit_i2c_clear_int_status(i2c, status);
+
+ if (i2c->status & SPACEMIT_SR_ERR)
+ goto err_out;
+
+ val = readl(i2c->base + SPACEMIT_ICR);
+ val &= ~(SPACEMIT_CR_TB | SPACEMIT_CR_ACKNAK | SPACEMIT_CR_STOP | SPACEMIT_CR_START);
+ writel(val, i2c->base + SPACEMIT_ICR);
+
+ switch (i2c->state) {
+ case SPACEMIT_STATE_START:
+ spacemit_i2c_handle_start(i2c);
+ break;
+ case SPACEMIT_STATE_READ:
+ spacemit_i2c_handle_read(i2c);
+ break;
+ case SPACEMIT_STATE_WRITE:
+ spacemit_i2c_handle_write(i2c);
+ break;
+ default:
+ break;
+ }
+
+ if (i2c->state != SPACEMIT_STATE_IDLE) {
+ if (spacemit_i2c_is_last_msg(i2c)) {
+ /* trigger next byte with stop */
+ spacemit_i2c_stop(i2c);
+ } else {
+ /* trigger next byte */
+ val |= SPACEMIT_CR_ALDIE | SPACEMIT_CR_TB;
+ writel(val, i2c->base + SPACEMIT_ICR);
+ }
+ }
+
+err_out:
+ spacemit_i2c_err_check(i2c);
+ return IRQ_HANDLED;
+}
+
+static void spacemit_i2c_calc_timeout(struct spacemit_i2c_dev *i2c)
+{
+ unsigned long timeout;
+ int idx = 0, cnt = 0;
+
+ for (; idx < i2c->msg_num; idx++)
+ cnt += (i2c->msgs + idx)->len + 1;
+
+ /*
+ * Multiply by 9 because each byte in I2C transmission requires
+ * 9 clock cycles: 8 bits of data plus 1 ACK/NACK bit.
+ */
+ timeout = cnt * 9 * USEC_PER_SEC / i2c->clock_freq;
+
+ i2c->adapt.timeout = usecs_to_jiffies(timeout + USEC_PER_SEC / 10) / i2c->msg_num;
+}
+
+static int spacemit_i2c_xfer(struct i2c_adapter *adapt, struct i2c_msg *msgs, int num)
+{
+ struct spacemit_i2c_dev *i2c = i2c_get_adapdata(adapt);
+ int ret;
+
+ i2c->msgs = msgs;
+ i2c->msg_num = num;
+
+ spacemit_i2c_calc_timeout(i2c);
+
+ spacemit_i2c_init(i2c);
+
+ spacemit_i2c_enable(i2c);
+
+ ret = spacemit_i2c_wait_bus_idle(i2c);
+ if (!ret)
+ spacemit_i2c_xfer_msg(i2c);
+ else if (ret < 0)
+ dev_dbg(i2c->dev, "i2c transfer error: %d\n", ret);
+ else
+ spacemit_i2c_check_bus_release(i2c);
+
+ spacemit_i2c_disable(i2c);
+
+ if (ret == -ETIMEDOUT || ret == -EAGAIN)
+ dev_err(i2c->dev, "i2c transfer failed, ret %d err 0x%lx\n",
+ ret, i2c->status & SPACEMIT_SR_ERR);
+
+ return ret < 0 ? ret : num;
+}
+
+static u32 spacemit_i2c_func(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_I2C | (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK);
+}
+
+static const struct i2c_algorithm spacemit_i2c_algo = {
+ .xfer = spacemit_i2c_xfer,
+ .functionality = spacemit_i2c_func,
+};
+
+static int spacemit_i2c_probe(struct platform_device *pdev)
+{
+ struct clk *clk;
+ struct device *dev = &pdev->dev;
+ struct device_node *of_node = pdev->dev.of_node;
+ struct spacemit_i2c_dev *i2c;
+ int ret;
+
+ i2c = devm_kzalloc(dev, sizeof(*i2c), GFP_KERNEL);
+ if (!i2c)
+ return -ENOMEM;
+
+ ret = of_property_read_u32(of_node, "clock-frequency", &i2c->clock_freq);
+ if (ret && ret != -EINVAL)
+ dev_warn(dev, "failed to read clock-frequency property: %d\n", ret);
+
+ /* For now, this driver doesn't support high-speed. */
+ if (!i2c->clock_freq || i2c->clock_freq > SPACEMIT_I2C_MAX_FAST_MODE_FREQ) {
+ dev_warn(dev, "unsupported clock frequency %u; using %u\n",
+ i2c->clock_freq, SPACEMIT_I2C_MAX_FAST_MODE_FREQ);
+ i2c->clock_freq = SPACEMIT_I2C_MAX_FAST_MODE_FREQ;
+ } else if (i2c->clock_freq < SPACEMIT_I2C_MAX_STANDARD_MODE_FREQ) {
+ dev_warn(dev, "unsupported clock frequency %u; using %u\n",
+ i2c->clock_freq, SPACEMIT_I2C_MAX_STANDARD_MODE_FREQ);
+ i2c->clock_freq = SPACEMIT_I2C_MAX_STANDARD_MODE_FREQ;
+ }
+
+ i2c->dev = &pdev->dev;
+
+ i2c->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(i2c->base))
+ return dev_err_probe(dev, PTR_ERR(i2c->base), "failed to do ioremap");
+
+ i2c->irq = platform_get_irq(pdev, 0);
+ if (i2c->irq < 0)
+ return dev_err_probe(dev, i2c->irq, "failed to get irq resource");
+
+ ret = devm_request_irq(i2c->dev, i2c->irq, spacemit_i2c_irq_handler,
+ IRQF_NO_SUSPEND | IRQF_ONESHOT, dev_name(i2c->dev), i2c);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to request irq");
+
+ clk = devm_clk_get_enabled(dev, "func");
+ if (IS_ERR(clk))
+ return dev_err_probe(dev, PTR_ERR(clk), "failed to enable func clock");
+
+ clk = devm_clk_get_enabled(dev, "bus");
+ if (IS_ERR(clk))
+ return dev_err_probe(dev, PTR_ERR(clk), "failed to enable bus clock");
+
+ spacemit_i2c_reset(i2c);
+
+ i2c_set_adapdata(&i2c->adapt, i2c);
+ i2c->adapt.owner = THIS_MODULE;
+ i2c->adapt.algo = &spacemit_i2c_algo;
+ i2c->adapt.dev.parent = i2c->dev;
+ i2c->adapt.nr = pdev->id;
+
+ i2c->adapt.dev.of_node = of_node;
+
+ strscpy(i2c->adapt.name, "spacemit-i2c-adapter", sizeof(i2c->adapt.name));
+
+ init_completion(&i2c->complete);
+
+ platform_set_drvdata(pdev, i2c);
+
+ ret = i2c_add_numbered_adapter(&i2c->adapt);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "failed to add i2c adapter");
+
+ return 0;
+}
+
+static void spacemit_i2c_remove(struct platform_device *pdev)
+{
+ struct spacemit_i2c_dev *i2c = platform_get_drvdata(pdev);
+
+ i2c_del_adapter(&i2c->adapt);
+}
+
+static const struct of_device_id spacemit_i2c_of_match[] = {
+ { .compatible = "spacemit,k1-i2c", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, spacemit_i2c_of_match);
+
+static struct platform_driver spacemit_i2c_driver = {
+ .probe = spacemit_i2c_probe,
+ .remove = spacemit_i2c_remove,
+ .driver = {
+ .name = "i2c-k1",
+ .of_match_table = spacemit_i2c_of_match,
+ },
+};
+module_platform_driver(spacemit_i2c_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("I2C bus driver for SpacemiT K1 SoC");
diff --git a/drivers/i2c/busses/i2c-kempld.c b/drivers/i2c/busses/i2c-kempld.c
index 212196af68ba..9b4c7cba62b6 100644
--- a/drivers/i2c/busses/i2c-kempld.c
+++ b/drivers/i2c/busses/i2c-kempld.c
@@ -115,9 +115,7 @@ static int kempld_i2c_process(struct kempld_i2c_data *i2c)
if (i2c->state == STATE_ADDR) {
/* 10 bit address? */
if (i2c->msg->flags & I2C_M_TEN) {
- addr = 0xf0 | ((i2c->msg->addr >> 7) & 0x6);
- /* Set read bit if necessary */
- addr |= (i2c->msg->flags & I2C_M_RD) ? 1 : 0;
+ addr = i2c_10bit_addr_hi_from_msg(msg);
i2c->state = STATE_ADDR10;
} else {
addr = i2c_8bit_addr_from_msg(i2c->msg);
@@ -132,10 +130,12 @@ static int kempld_i2c_process(struct kempld_i2c_data *i2c)
/* Second part of 10 bit addressing */
if (i2c->state == STATE_ADDR10) {
- kempld_write8(pld, KEMPLD_I2C_DATA, i2c->msg->addr & 0xff);
+ addr = i2c_10bit_addr_lo_from_msg(msg);
+ i2c->state = STATE_START;
+
+ kempld_write8(pld, KEMPLD_I2C_DATA, addr);
kempld_write8(pld, KEMPLD_I2C_CMD, I2C_CMD_WRITE);
- i2c->state = STATE_START;
return 0;
}
diff --git a/drivers/i2c/busses/i2c-mlxbf.c b/drivers/i2c/busses/i2c-mlxbf.c
index 21f67f3b65b6..280dde53d7f3 100644
--- a/drivers/i2c/busses/i2c-mlxbf.c
+++ b/drivers/i2c/busses/i2c-mlxbf.c
@@ -12,6 +12,7 @@
#include <linux/interrupt.h>
#include <linux/i2c.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mutex.h>
@@ -495,65 +496,6 @@ static u8 mlxbf_i2c_bus_count;
static struct mutex mlxbf_i2c_bus_lock;
-/*
- * Function to poll a set of bits at a specific address; it checks whether
- * the bits are equal to zero when eq_zero is set to 'true', and not equal
- * to zero when eq_zero is set to 'false'.
- * Note that the timeout is given in microseconds.
- */
-static u32 mlxbf_i2c_poll(void __iomem *io, u32 addr, u32 mask,
- bool eq_zero, u32 timeout)
-{
- u32 bits;
-
- timeout = (timeout / MLXBF_I2C_POLL_FREQ_IN_USEC) + 1;
-
- do {
- bits = readl(io + addr) & mask;
- if (eq_zero ? bits == 0 : bits != 0)
- return eq_zero ? 1 : bits;
- udelay(MLXBF_I2C_POLL_FREQ_IN_USEC);
- } while (timeout-- != 0);
-
- return 0;
-}
-
-/*
- * SW must make sure that the SMBus Master GW is idle before starting
- * a transaction. Accordingly, this function polls the Master FSM stop
- * bit; it returns false when the bit is asserted, true if not.
- */
-static bool mlxbf_i2c_smbus_master_wait_for_idle(struct mlxbf_i2c_priv *priv)
-{
- u32 mask = MLXBF_I2C_SMBUS_MASTER_FSM_STOP_MASK;
- u32 addr = priv->chip->smbus_master_fsm_off;
- u32 timeout = MLXBF_I2C_SMBUS_TIMEOUT;
-
- if (mlxbf_i2c_poll(priv->mst->io, addr, mask, true, timeout))
- return true;
-
- return false;
-}
-
-/*
- * wait for the lock to be released before acquiring it.
- */
-static bool mlxbf_i2c_smbus_master_lock(struct mlxbf_i2c_priv *priv)
-{
- if (mlxbf_i2c_poll(priv->mst->io, MLXBF_I2C_SMBUS_MASTER_GW,
- MLXBF_I2C_MASTER_LOCK_BIT, true,
- MLXBF_I2C_SMBUS_LOCK_POLL_TIMEOUT))
- return true;
-
- return false;
-}
-
-static void mlxbf_i2c_smbus_master_unlock(struct mlxbf_i2c_priv *priv)
-{
- /* Clear the gw to clear the lock */
- writel(0, priv->mst->io + MLXBF_I2C_SMBUS_MASTER_GW);
-}
-
static bool mlxbf_i2c_smbus_transaction_success(u32 master_status,
u32 cause_status)
{
@@ -583,6 +525,7 @@ static int mlxbf_i2c_smbus_check_status(struct mlxbf_i2c_priv *priv)
{
u32 master_status_bits;
u32 cause_status_bits;
+ u32 bits;
/*
* GW busy bit is raised by the driver and cleared by the HW
@@ -591,9 +534,9 @@ static int mlxbf_i2c_smbus_check_status(struct mlxbf_i2c_priv *priv)
* then read the cause and master status bits to determine if
* errors occurred during the transaction.
*/
- mlxbf_i2c_poll(priv->mst->io, MLXBF_I2C_SMBUS_MASTER_GW,
- MLXBF_I2C_MASTER_BUSY_BIT, true,
- MLXBF_I2C_SMBUS_TIMEOUT);
+ readl_poll_timeout_atomic(priv->mst->io + MLXBF_I2C_SMBUS_MASTER_GW,
+ bits, !(bits & MLXBF_I2C_MASTER_BUSY_BIT),
+ MLXBF_I2C_POLL_FREQ_IN_USEC, MLXBF_I2C_SMBUS_TIMEOUT);
/* Read cause status bits. */
cause_status_bits = readl(priv->mst_cause->io +
@@ -740,7 +683,8 @@ mlxbf_i2c_smbus_start_transaction(struct mlxbf_i2c_priv *priv,
u8 read_en, write_en, block_en, pec_en;
u8 slave, flags, addr;
u8 *read_buf;
- int ret = 0;
+ u32 bits;
+ int ret;
if (request->operation_cnt > MLXBF_I2C_SMBUS_MAX_OP_CNT)
return -EINVAL;
@@ -760,11 +704,22 @@ mlxbf_i2c_smbus_start_transaction(struct mlxbf_i2c_priv *priv,
* Try to acquire the smbus gw lock before any reads of the GW register since
* a read sets the lock.
*/
- if (WARN_ON(!mlxbf_i2c_smbus_master_lock(priv)))
+ ret = readl_poll_timeout_atomic(priv->mst->io + MLXBF_I2C_SMBUS_MASTER_GW,
+ bits, !(bits & MLXBF_I2C_MASTER_LOCK_BIT),
+ MLXBF_I2C_POLL_FREQ_IN_USEC,
+ MLXBF_I2C_SMBUS_LOCK_POLL_TIMEOUT);
+ if (WARN_ON(ret))
return -EBUSY;
- /* Check whether the HW is idle */
- if (WARN_ON(!mlxbf_i2c_smbus_master_wait_for_idle(priv))) {
+ /*
+ * SW must make sure that the SMBus Master GW is idle before starting
+ * a transaction. Accordingly, this call polls the Master FSM stop bit;
+ * it returns -ETIMEDOUT when the bit is asserted, 0 if not.
+ */
+ ret = readl_poll_timeout_atomic(priv->mst->io + priv->chip->smbus_master_fsm_off,
+ bits, !(bits & MLXBF_I2C_SMBUS_MASTER_FSM_STOP_MASK),
+ MLXBF_I2C_POLL_FREQ_IN_USEC, MLXBF_I2C_SMBUS_TIMEOUT);
+ if (WARN_ON(ret)) {
ret = -EBUSY;
goto out_unlock;
}
@@ -855,7 +810,8 @@ mlxbf_i2c_smbus_start_transaction(struct mlxbf_i2c_priv *priv,
}
out_unlock:
- mlxbf_i2c_smbus_master_unlock(priv);
+ /* Clear the gw to clear the lock */
+ writel(0, priv->mst->io + MLXBF_I2C_SMBUS_MASTER_GW);
return ret;
}
@@ -1829,18 +1785,6 @@ static bool mlxbf_i2c_has_coalesce(struct mlxbf_i2c_priv *priv, bool *read,
return true;
}
-static bool mlxbf_i2c_slave_wait_for_idle(struct mlxbf_i2c_priv *priv,
- u32 timeout)
-{
- u32 mask = MLXBF_I2C_CAUSE_S_GW_BUSY_FALL;
- u32 addr = MLXBF_I2C_CAUSE_ARBITER;
-
- if (mlxbf_i2c_poll(priv->slv_cause->io, addr, mask, false, timeout))
- return true;
-
- return false;
-}
-
static struct i2c_client *mlxbf_i2c_get_slave_from_addr(
struct mlxbf_i2c_priv *priv, u8 addr)
{
@@ -1943,7 +1887,9 @@ static int mlxbf_i2c_irq_send(struct mlxbf_i2c_priv *priv, u8 recv_bytes)
* Wait until the transfer is completed; the driver will wait
* until the GW is idle, a cause will rise on fall of GW busy.
*/
- mlxbf_i2c_slave_wait_for_idle(priv, MLXBF_I2C_SMBUS_TIMEOUT);
+ readl_poll_timeout_atomic(priv->slv_cause->io + MLXBF_I2C_CAUSE_ARBITER,
+ data32, data32 & MLXBF_I2C_CAUSE_S_GW_BUSY_FALL,
+ MLXBF_I2C_POLL_FREQ_IN_USEC, MLXBF_I2C_SMBUS_TIMEOUT);
clear_csr:
/* Release the Slave GW. */
diff --git a/drivers/i2c/busses/i2c-mt7621.c b/drivers/i2c/busses/i2c-mt7621.c
index 2103f21f9ddd..0a288c998419 100644
--- a/drivers/i2c/busses/i2c-mt7621.c
+++ b/drivers/i2c/busses/i2c-mt7621.c
@@ -164,22 +164,18 @@ static int mtk_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
/* write address */
if (pmsg->flags & I2C_M_TEN) {
/* 10 bits address */
- addr = 0xf0 | ((pmsg->addr >> 7) & 0x06);
- addr |= (pmsg->addr & 0xff) << 8;
- if (pmsg->flags & I2C_M_RD)
- addr |= 1;
- iowrite32(addr, i2c->base + REG_SM0D0_REG);
- ret = mtk_i2c_cmd(i2c, SM0CTL1_WRITE, 2);
- if (ret)
- goto err_timeout;
+ addr = i2c_10bit_addr_hi_from_msg(pmsg);
+ addr |= i2c_10bit_addr_lo_from_msg(pmsg) << 8;
+ len = 2;
} else {
/* 7 bits address */
addr = i2c_8bit_addr_from_msg(pmsg);
- iowrite32(addr, i2c->base + REG_SM0D0_REG);
- ret = mtk_i2c_cmd(i2c, SM0CTL1_WRITE, 1);
- if (ret)
- goto err_timeout;
+ len = 1;
}
+ iowrite32(addr, i2c->base + REG_SM0D0_REG);
+ ret = mtk_i2c_cmd(i2c, SM0CTL1_WRITE, len);
+ if (ret)
+ goto err_timeout;
/* check address ACK */
if (!(pmsg->flags & I2C_M_IGNORE_NAK)) {
diff --git a/drivers/i2c/busses/i2c-mv64xxx.c b/drivers/i2c/busses/i2c-mv64xxx.c
index 874309580c33..8fc26a511320 100644
--- a/drivers/i2c/busses/i2c-mv64xxx.c
+++ b/drivers/i2c/busses/i2c-mv64xxx.c
@@ -27,7 +27,6 @@
#include <linux/err.h>
#include <linux/delay.h>
-#define MV64XXX_I2C_ADDR_ADDR(val) ((val & 0x7f) << 1)
#define MV64XXX_I2C_BAUD_DIV_N(val) (val & 0x7)
#define MV64XXX_I2C_BAUD_DIV_M(val) ((val & 0xf) << 3)
@@ -176,22 +175,17 @@ static void
mv64xxx_i2c_prepare_for_io(struct mv64xxx_i2c_data *drv_data,
struct i2c_msg *msg)
{
- u32 dir = 0;
-
drv_data->cntl_bits = MV64XXX_I2C_REG_CONTROL_ACK |
MV64XXX_I2C_REG_CONTROL_TWSIEN;
if (!drv_data->atomic)
drv_data->cntl_bits |= MV64XXX_I2C_REG_CONTROL_INTEN;
- if (msg->flags & I2C_M_RD)
- dir = 1;
-
if (msg->flags & I2C_M_TEN) {
- drv_data->addr1 = 0xf0 | (((u32)msg->addr & 0x300) >> 7) | dir;
- drv_data->addr2 = (u32)msg->addr & 0xff;
+ drv_data->addr1 = i2c_10bit_addr_hi_from_msg(msg);
+ drv_data->addr2 = i2c_10bit_addr_lo_from_msg(msg);
} else {
- drv_data->addr1 = MV64XXX_I2C_ADDR_ADDR((u32)msg->addr) | dir;
+ drv_data->addr1 = i2c_8bit_addr_from_msg(msg);
drv_data->addr2 = 0;
}
}
diff --git a/drivers/i2c/busses/i2c-octeon-core.c b/drivers/i2c/busses/i2c-octeon-core.c
index 16cc34a0526e..baf6b27f3752 100644
--- a/drivers/i2c/busses/i2c-octeon-core.c
+++ b/drivers/i2c/busses/i2c-octeon-core.c
@@ -45,7 +45,7 @@ static bool octeon_i2c_test_iflg(struct octeon_i2c *i2c)
* octeon_i2c_wait - wait for the IFLG to be set
* @i2c: The struct octeon_i2c
*
- * Returns 0 on success, otherwise a negative errno.
+ * Returns: 0 on success, otherwise a negative errno.
*/
static int octeon_i2c_wait(struct octeon_i2c *i2c)
{
@@ -139,7 +139,7 @@ static void octeon_i2c_hlc_disable(struct octeon_i2c *i2c)
* octeon_i2c_hlc_wait - wait for an HLC operation to complete
* @i2c: The struct octeon_i2c
*
- * Returns 0 on success, otherwise -ETIMEDOUT.
+ * Returns: 0 on success, otherwise -ETIMEDOUT.
*/
static int octeon_i2c_hlc_wait(struct octeon_i2c *i2c)
{
@@ -273,7 +273,7 @@ static int octeon_i2c_recovery(struct octeon_i2c *i2c)
* octeon_i2c_start - send START to the bus
* @i2c: The struct octeon_i2c
*
- * Returns 0 on success, otherwise a negative errno.
+ * Returns: 0 on success, otherwise a negative errno.
*/
static int octeon_i2c_start(struct octeon_i2c *i2c)
{
@@ -314,7 +314,7 @@ static void octeon_i2c_stop(struct octeon_i2c *i2c)
*
* The address is sent over the bus, then the data is read.
*
- * Returns 0 on success, otherwise a negative errno.
+ * Returns: 0 on success, otherwise a negative errno.
*/
static int octeon_i2c_read(struct octeon_i2c *i2c, int target,
u8 *data, u16 *rlength, bool recv_len)
@@ -382,7 +382,7 @@ static int octeon_i2c_read(struct octeon_i2c *i2c, int target,
*
* The address is sent over the bus, then the data.
*
- * Returns 0 on success, otherwise a negative errno.
+ * Returns: 0 on success, otherwise a negative errno.
*/
static int octeon_i2c_write(struct octeon_i2c *i2c, int target,
const u8 *data, int length)
@@ -421,17 +421,12 @@ static int octeon_i2c_hlc_read(struct octeon_i2c *i2c, struct i2c_msg *msgs)
octeon_i2c_hlc_enable(i2c);
octeon_i2c_hlc_int_clear(i2c);
- cmd = SW_TWSI_V | SW_TWSI_R | SW_TWSI_SOVR;
+ cmd = SW_TWSI_V | SW_TWSI_R | SW_TWSI_SOVR | SW_TWSI_OP_7;
/* SIZE */
cmd |= (u64)(msgs[0].len - 1) << SW_TWSI_SIZE_SHIFT;
/* A */
cmd |= (u64)(msgs[0].addr & 0x7full) << SW_TWSI_ADDR_SHIFT;
- if (msgs[0].flags & I2C_M_TEN)
- cmd |= SW_TWSI_OP_10;
- else
- cmd |= SW_TWSI_OP_7;
-
octeon_i2c_writeq_flush(cmd, i2c->twsi_base + OCTEON_REG_SW_TWSI(i2c));
ret = octeon_i2c_hlc_wait(i2c);
if (ret)
@@ -463,17 +458,12 @@ static int octeon_i2c_hlc_write(struct octeon_i2c *i2c, struct i2c_msg *msgs)
octeon_i2c_hlc_enable(i2c);
octeon_i2c_hlc_int_clear(i2c);
- cmd = SW_TWSI_V | SW_TWSI_SOVR;
+ cmd = SW_TWSI_V | SW_TWSI_SOVR | SW_TWSI_OP_7;
/* SIZE */
cmd |= (u64)(msgs[0].len - 1) << SW_TWSI_SIZE_SHIFT;
/* A */
cmd |= (u64)(msgs[0].addr & 0x7full) << SW_TWSI_ADDR_SHIFT;
- if (msgs[0].flags & I2C_M_TEN)
- cmd |= SW_TWSI_OP_10;
- else
- cmd |= SW_TWSI_OP_7;
-
for (i = 0, j = msgs[0].len - 1; i < msgs[0].len && i < 4; i++, j--)
cmd |= (u64)msgs[0].buf[j] << (8 * i);
@@ -498,6 +488,45 @@ err:
return ret;
}
+/* Process hlc transaction */
+static int octeon_i2c_hlc_cmd_send(struct octeon_i2c *i2c, u64 cmd)
+{
+ octeon_i2c_hlc_int_clear(i2c);
+ octeon_i2c_writeq_flush(cmd, i2c->twsi_base + OCTEON_REG_SW_TWSI(i2c));
+
+ return octeon_i2c_hlc_wait(i2c);
+}
+
+/* Generic consideration for extended internal addresses in i2c hlc r/w ops */
+static bool octeon_i2c_hlc_ext(struct octeon_i2c *i2c, struct i2c_msg msg, u64 *cmd_in, u64 *ext)
+{
+ bool set_ext = false;
+ u64 cmd = 0;
+
+ if (msg.len == 2) {
+ cmd |= SW_TWSI_EIA;
+ *ext = (u64)msg.buf[0] << SW_TWSI_IA_SHIFT;
+ cmd |= (u64)msg.buf[1] << SW_TWSI_IA_SHIFT;
+ set_ext = true;
+ } else {
+ cmd |= (u64)msg.buf[0] << SW_TWSI_IA_SHIFT;
+ }
+
+ *cmd_in |= cmd;
+ return set_ext;
+}
+
+/* Construct and send i2c transaction core cmd for read ops */
+static int octeon_i2c_hlc_read_cmd(struct octeon_i2c *i2c, struct i2c_msg msg, u64 cmd)
+{
+ u64 ext = 0;
+
+ if (octeon_i2c_hlc_ext(i2c, msg, &cmd, &ext))
+ octeon_i2c_writeq_flush(ext, i2c->twsi_base + OCTEON_REG_SW_TWSI_EXT(i2c));
+
+ return octeon_i2c_hlc_cmd_send(i2c, cmd);
+}
+
/* high-level-controller composite write+read, msg0=addr, msg1=data */
static int octeon_i2c_hlc_comp_read(struct octeon_i2c *i2c, struct i2c_msg *msgs)
{
@@ -506,32 +535,14 @@ static int octeon_i2c_hlc_comp_read(struct octeon_i2c *i2c, struct i2c_msg *msgs
octeon_i2c_hlc_enable(i2c);
- cmd = SW_TWSI_V | SW_TWSI_R | SW_TWSI_SOVR;
+ cmd = SW_TWSI_V | SW_TWSI_R | SW_TWSI_SOVR | SW_TWSI_OP_7_IA;
/* SIZE */
cmd |= (u64)(msgs[1].len - 1) << SW_TWSI_SIZE_SHIFT;
/* A */
cmd |= (u64)(msgs[0].addr & 0x7full) << SW_TWSI_ADDR_SHIFT;
- if (msgs[0].flags & I2C_M_TEN)
- cmd |= SW_TWSI_OP_10_IA;
- else
- cmd |= SW_TWSI_OP_7_IA;
-
- if (msgs[0].len == 2) {
- u64 ext = 0;
-
- cmd |= SW_TWSI_EIA;
- ext = (u64)msgs[0].buf[0] << SW_TWSI_IA_SHIFT;
- cmd |= (u64)msgs[0].buf[1] << SW_TWSI_IA_SHIFT;
- octeon_i2c_writeq_flush(ext, i2c->twsi_base + OCTEON_REG_SW_TWSI_EXT(i2c));
- } else {
- cmd |= (u64)msgs[0].buf[0] << SW_TWSI_IA_SHIFT;
- }
-
- octeon_i2c_hlc_int_clear(i2c);
- octeon_i2c_writeq_flush(cmd, i2c->twsi_base + OCTEON_REG_SW_TWSI(i2c));
-
- ret = octeon_i2c_hlc_wait(i2c);
+ /* Send core command */
+ ret = octeon_i2c_hlc_read_cmd(i2c, msgs[0], cmd);
if (ret)
goto err;
@@ -561,25 +572,14 @@ static int octeon_i2c_hlc_comp_write(struct octeon_i2c *i2c, struct i2c_msg *msg
octeon_i2c_hlc_enable(i2c);
- cmd = SW_TWSI_V | SW_TWSI_SOVR;
+ cmd = SW_TWSI_V | SW_TWSI_SOVR | SW_TWSI_OP_7_IA;
/* SIZE */
cmd |= (u64)(msgs[1].len - 1) << SW_TWSI_SIZE_SHIFT;
/* A */
cmd |= (u64)(msgs[0].addr & 0x7full) << SW_TWSI_ADDR_SHIFT;
- if (msgs[0].flags & I2C_M_TEN)
- cmd |= SW_TWSI_OP_10_IA;
- else
- cmd |= SW_TWSI_OP_7_IA;
-
- if (msgs[0].len == 2) {
- cmd |= SW_TWSI_EIA;
- ext |= (u64)msgs[0].buf[0] << SW_TWSI_IA_SHIFT;
- set_ext = true;
- cmd |= (u64)msgs[0].buf[1] << SW_TWSI_IA_SHIFT;
- } else {
- cmd |= (u64)msgs[0].buf[0] << SW_TWSI_IA_SHIFT;
- }
+ /* Set parameters for extended message (if required) */
+ set_ext = octeon_i2c_hlc_ext(i2c, msgs[0], &cmd, &ext);
for (i = 0, j = msgs[1].len - 1; i < msgs[1].len && i < 4; i++, j--)
cmd |= (u64)msgs[1].buf[j] << (8 * i);
@@ -592,10 +592,7 @@ static int octeon_i2c_hlc_comp_write(struct octeon_i2c *i2c, struct i2c_msg *msg
if (set_ext)
octeon_i2c_writeq_flush(ext, i2c->twsi_base + OCTEON_REG_SW_TWSI_EXT(i2c));
- octeon_i2c_hlc_int_clear(i2c);
- octeon_i2c_writeq_flush(cmd, i2c->twsi_base + OCTEON_REG_SW_TWSI(i2c));
-
- ret = octeon_i2c_hlc_wait(i2c);
+ ret = octeon_i2c_hlc_cmd_send(i2c, cmd);
if (ret)
goto err;
@@ -613,7 +610,7 @@ err:
* @msgs: Pointer to the messages to be processed
* @num: Length of the MSGS array
*
- * Returns the number of messages processed, or a negative errno on failure.
+ * Returns: the number of messages processed, or a negative errno on failure.
*/
int octeon_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
{
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index f18c3e74b076..16afb9ca19bb 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -24,6 +24,7 @@
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/io.h>
+#include <linux/mux/consumer.h>
#include <linux/of.h>
#include <linux/slab.h>
#include <linux/platform_data/i2c-omap.h>
@@ -211,6 +212,7 @@ struct omap_i2c_dev {
u16 syscstate;
u16 westate;
u16 errata;
+ struct mux_state *mux_state;
};
static const u8 reg_map_ip_v1[] = {
@@ -1452,6 +1454,23 @@ omap_i2c_probe(struct platform_device *pdev)
(1000 * omap->speed / 8);
}
+ if (of_property_read_bool(node, "mux-states")) {
+ struct mux_state *mux_state;
+
+ mux_state = devm_mux_state_get(&pdev->dev, NULL);
+ if (IS_ERR(mux_state)) {
+ r = PTR_ERR(mux_state);
+ dev_dbg(&pdev->dev, "failed to get I2C mux: %d\n", r);
+ goto err_disable_pm;
+ }
+ omap->mux_state = mux_state;
+ r = mux_state_select(omap->mux_state);
+ if (r) {
+ dev_err(&pdev->dev, "failed to select I2C mux: %d\n", r);
+ goto err_disable_pm;
+ }
+ }
+
/* reset ASAP, clearing any IRQs */
omap_i2c_init(omap);
@@ -1511,6 +1530,9 @@ static void omap_i2c_remove(struct platform_device *pdev)
i2c_del_adapter(&omap->adapter);
+ if (omap->mux_state)
+ mux_state_deselect(omap->mux_state);
+
ret = pm_runtime_get_sync(&pdev->dev);
if (ret < 0)
dev_err(omap->dev, "Failed to resume hardware, skip disable\n");
diff --git a/drivers/i2c/busses/i2c-pasemi-core.c b/drivers/i2c/busses/i2c-pasemi-core.c
index dac694a9d781..bd128ab2e2eb 100644
--- a/drivers/i2c/busses/i2c-pasemi-core.c
+++ b/drivers/i2c/busses/i2c-pasemi-core.c
@@ -5,6 +5,7 @@
* SMBus host driver for PA Semi PWRficient
*/
+#include <linux/bitfield.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/kernel.h>
@@ -26,21 +27,30 @@
#define REG_REV 0x28
/* Register defs */
-#define MTXFIFO_READ 0x00000400
-#define MTXFIFO_STOP 0x00000200
-#define MTXFIFO_START 0x00000100
-#define MTXFIFO_DATA_M 0x000000ff
-
-#define MRXFIFO_EMPTY 0x00000100
-#define MRXFIFO_DATA_M 0x000000ff
-
-#define SMSTA_XEN 0x08000000
-#define SMSTA_MTN 0x00200000
-
-#define CTL_MRR 0x00000400
-#define CTL_MTR 0x00000200
-#define CTL_EN 0x00000800
-#define CTL_CLK_M 0x000000ff
+#define MTXFIFO_READ BIT(10)
+#define MTXFIFO_STOP BIT(9)
+#define MTXFIFO_START BIT(8)
+#define MTXFIFO_DATA_M GENMASK(7, 0)
+
+#define MRXFIFO_EMPTY BIT(8)
+#define MRXFIFO_DATA_M GENMASK(7, 0)
+
+#define SMSTA_XIP BIT(28)
+#define SMSTA_XEN BIT(27)
+#define SMSTA_JMD BIT(25)
+#define SMSTA_JAM BIT(24)
+#define SMSTA_MTO BIT(23)
+#define SMSTA_MTA BIT(22)
+#define SMSTA_MTN BIT(21)
+#define SMSTA_MRNE BIT(19)
+#define SMSTA_MTE BIT(16)
+#define SMSTA_TOM BIT(6)
+
+#define CTL_EN BIT(11)
+#define CTL_MRR BIT(10)
+#define CTL_MTR BIT(9)
+#define CTL_UJM BIT(8)
+#define CTL_CLK_M GENMASK(7, 0)
static inline void reg_write(struct pasemi_smbus *smbus, int reg, int val)
{
diff --git a/drivers/i2c/busses/i2c-pxa.c b/drivers/i2c/busses/i2c-pxa.c
index cb6988482673..4415a29f749b 100644
--- a/drivers/i2c/busses/i2c-pxa.c
+++ b/drivers/i2c/busses/i2c-pxa.c
@@ -1503,7 +1503,10 @@ static int i2c_pxa_probe(struct platform_device *dev)
i2c->adap.name);
}
- clk_prepare_enable(i2c->clk);
+ ret = clk_prepare_enable(i2c->clk);
+ if (ret)
+ return dev_err_probe(&dev->dev, ret,
+ "failed to enable clock\n");
if (i2c->use_pio) {
i2c->adap.algo = &i2c_pxa_pio_algorithm;
diff --git a/drivers/i2c/busses/i2c-qcom-geni.c b/drivers/i2c/busses/i2c-qcom-geni.c
index 7bbd478171e0..515a784c951c 100644
--- a/drivers/i2c/busses/i2c-qcom-geni.c
+++ b/drivers/i2c/busses/i2c-qcom-geni.c
@@ -148,9 +148,9 @@ struct geni_i2c_clk_fld {
* source_clock = 19.2 MHz
*/
static const struct geni_i2c_clk_fld geni_i2c_clk_map_19p2mhz[] = {
- {KHZ(100), 7, 10, 11, 26},
- {KHZ(400), 2, 5, 12, 24},
- {KHZ(1000), 1, 3, 9, 18},
+ {KHZ(100), 7, 10, 12, 26},
+ {KHZ(400), 2, 5, 11, 22},
+ {KHZ(1000), 1, 2, 8, 18},
{},
};
diff --git a/drivers/i2c/busses/i2c-qup.c b/drivers/i2c/busses/i2c-qup.c
index da20b4487c9a..3a36d682ed57 100644
--- a/drivers/i2c/busses/i2c-qup.c
+++ b/drivers/i2c/busses/i2c-qup.c
@@ -14,6 +14,7 @@
#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/i2c.h>
+#include <linux/interconnect.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
@@ -150,6 +151,8 @@
/* TAG length for DATA READ in RX FIFO */
#define READ_RX_TAGS_LEN 2
+#define QUP_BUS_WIDTH 8
+
static unsigned int scl_freq;
module_param_named(scl_freq, scl_freq, uint, 0444);
MODULE_PARM_DESC(scl_freq, "SCL frequency override");
@@ -227,6 +230,7 @@ struct qup_i2c_dev {
int irq;
struct clk *clk;
struct clk *pclk;
+ struct icc_path *icc_path;
struct i2c_adapter adap;
int clk_ctl;
@@ -255,6 +259,10 @@ struct qup_i2c_dev {
/* To configure when bus is in run state */
u32 config_run;
+ /* bandwidth votes */
+ u32 src_clk_freq;
+ u32 cur_bw_clk_freq;
+
/* dma parameters */
bool is_dma;
/* To check if the current transfer is using DMA */
@@ -453,6 +461,23 @@ static int qup_i2c_bus_active(struct qup_i2c_dev *qup, int len)
return ret;
}
+static int qup_i2c_vote_bw(struct qup_i2c_dev *qup, u32 clk_freq)
+{
+ u32 needed_peak_bw;
+ int ret;
+
+ if (qup->cur_bw_clk_freq == clk_freq)
+ return 0;
+
+ needed_peak_bw = Bps_to_icc(clk_freq * QUP_BUS_WIDTH);
+ ret = icc_set_bw(qup->icc_path, 0, needed_peak_bw);
+ if (ret)
+ return ret;
+
+ qup->cur_bw_clk_freq = clk_freq;
+ return 0;
+}
+
static void qup_i2c_write_tx_fifo_v1(struct qup_i2c_dev *qup)
{
struct qup_i2c_block *blk = &qup->blk;
@@ -838,6 +863,10 @@ static int qup_i2c_bam_xfer(struct i2c_adapter *adap, struct i2c_msg *msg,
int ret = 0;
int idx = 0;
+ ret = qup_i2c_vote_bw(qup, qup->src_clk_freq);
+ if (ret)
+ return ret;
+
enable_irq(qup->irq);
ret = qup_i2c_req_dma(qup);
@@ -1643,6 +1672,7 @@ static void qup_i2c_disable_clocks(struct qup_i2c_dev *qup)
config = readl(qup->base + QUP_CONFIG);
config |= QUP_CLOCK_AUTO_GATE;
writel(config, qup->base + QUP_CONFIG);
+ qup_i2c_vote_bw(qup, 0);
clk_disable_unprepare(qup->pclk);
}
@@ -1743,6 +1773,11 @@ static int qup_i2c_probe(struct platform_device *pdev)
goto fail_dma;
}
qup->is_dma = true;
+
+ qup->icc_path = devm_of_icc_get(&pdev->dev, NULL);
+ if (IS_ERR(qup->icc_path))
+ return dev_err_probe(&pdev->dev, PTR_ERR(qup->icc_path),
+ "failed to get interconnect path\n");
}
nodma:
@@ -1791,6 +1826,7 @@ nodma:
qup_i2c_enable_clocks(qup);
src_clk_freq = clk_get_rate(qup->clk);
}
+ qup->src_clk_freq = src_clk_freq;
/*
* Bootloaders might leave a pending interrupt on certain QUP's,
diff --git a/drivers/i2c/busses/i2c-rzv2m.c b/drivers/i2c/busses/i2c-rzv2m.c
index 02b76e24a476..53762cc56d28 100644
--- a/drivers/i2c/busses/i2c-rzv2m.c
+++ b/drivers/i2c/busses/i2c-rzv2m.c
@@ -287,20 +287,15 @@ static int rzv2m_i2c_send_address(struct rzv2m_i2c_priv *priv,
int ret;
if (msg->flags & I2C_M_TEN) {
- /*
- * 10-bit address
- * addr_1: 5'b11110 | addr[9:8] | (R/nW)
- * addr_2: addr[7:0]
- */
- addr = 0xf0 | ((msg->addr & GENMASK(9, 8)) >> 7);
- addr |= !!(msg->flags & I2C_M_RD);
- /* Send 1st address(extend code) */
+ /* 10-bit address: Send 1st address(extend code) */
+ addr = i2c_10bit_addr_hi_from_msg(msg);
ret = rzv2m_i2c_write_with_ack(priv, addr);
if (ret)
return ret;
- /* Send 2nd address */
- ret = rzv2m_i2c_write_with_ack(priv, msg->addr & 0xff);
+ /* 10-bit address: Send 2nd address */
+ addr = i2c_10bit_addr_lo_from_msg(msg);
+ ret = rzv2m_i2c_write_with_ack(priv, addr);
} else {
/* 7-bit address */
addr = i2c_8bit_addr_from_msg(msg);
diff --git a/drivers/i2c/i2c-core-of.c b/drivers/i2c/i2c-core-of.c
index a6c407d36800..02feee6c9ba9 100644
--- a/drivers/i2c/i2c-core-of.c
+++ b/drivers/i2c/i2c-core-of.c
@@ -157,7 +157,6 @@ const struct of_device_id
return i2c_of_match_device_sysfs(matches, client);
}
-EXPORT_SYMBOL_GPL(i2c_of_match_device);
#if IS_ENABLED(CONFIG_OF_DYNAMIC)
static int of_i2c_notify(struct notifier_block *nb, unsigned long action,
diff --git a/drivers/i2c/i2c-core.h b/drivers/i2c/i2c-core.h
index 36587f38dff3..4797ba88331c 100644
--- a/drivers/i2c/i2c-core.h
+++ b/drivers/i2c/i2c-core.h
@@ -84,8 +84,17 @@ static inline void i2c_acpi_remove_space_handler(struct i2c_adapter *adapter) {
#ifdef CONFIG_OF
void of_i2c_register_devices(struct i2c_adapter *adap);
+const struct of_device_id *i2c_of_match_device(const struct of_device_id *matches,
+ struct i2c_client *client);
+
#else
static inline void of_i2c_register_devices(struct i2c_adapter *adap) { }
+static inline
+const struct of_device_id *i2c_of_match_device(const struct of_device_id *matches,
+ struct i2c_client *client)
+{
+ return NULL;
+}
#endif
extern struct notifier_block i2c_of_notifier;
diff --git a/drivers/i2c/muxes/i2c-mux-ltc4306.c b/drivers/i2c/muxes/i2c-mux-ltc4306.c
index 19a7c370946d..8a87f19bf5d5 100644
--- a/drivers/i2c/muxes/i2c-mux-ltc4306.c
+++ b/drivers/i2c/muxes/i2c-mux-ltc4306.c
@@ -303,7 +303,7 @@ static void ltc4306_remove(struct i2c_client *client)
static struct i2c_driver ltc4306_driver = {
.driver = {
.name = "ltc4306",
- .of_match_table = of_match_ptr(ltc4306_of_match),
+ .of_match_table = ltc4306_of_match,
},
.probe = ltc4306_probe,
.remove = ltc4306_remove,
diff --git a/drivers/i2c/muxes/i2c-mux-pca954x.c b/drivers/i2c/muxes/i2c-mux-pca954x.c
index 6f84018258c4..db95113a5b49 100644
--- a/drivers/i2c/muxes/i2c-mux-pca954x.c
+++ b/drivers/i2c/muxes/i2c-mux-pca954x.c
@@ -414,7 +414,7 @@ static irqreturn_t pca954x_irq_handler(int irq, void *dev_id)
pending = (ret >> PCA954X_IRQ_OFFSET) & (BIT(data->chip->nchans) - 1);
for_each_set_bit(i, &pending, data->chip->nchans)
- handle_nested_irq(irq_linear_revmap(data->irq, i));
+ handle_nested_irq(irq_find_mapping(data->irq, i));
return IRQ_RETVAL(pending);
}
diff --git a/drivers/i2c/muxes/i2c-mux-reg.c b/drivers/i2c/muxes/i2c-mux-reg.c
index dfa472d514cc..1e566ea92bc9 100644
--- a/drivers/i2c/muxes/i2c-mux-reg.c
+++ b/drivers/i2c/muxes/i2c-mux-reg.c
@@ -250,7 +250,7 @@ static struct platform_driver i2c_mux_reg_driver = {
.remove = i2c_mux_reg_remove,
.driver = {
.name = "i2c-mux-reg",
- .of_match_table = of_match_ptr(i2c_mux_reg_of_match),
+ .of_match_table = i2c_mux_reg_of_match,
},
};
diff --git a/drivers/i3c/master.c b/drivers/i3c/master.c
index d5dc4180afbc..fd81871609d9 100644
--- a/drivers/i3c/master.c
+++ b/drivers/i3c/master.c
@@ -2276,7 +2276,7 @@ static int of_i3c_master_add_dev(struct i3c_master_controller *master,
u32 reg[3];
int ret;
- if (!master || !node)
+ if (!master)
return -EINVAL;
ret = of_property_read_u32_array(node, "reg", reg, ARRAY_SIZE(reg));
@@ -2369,14 +2369,10 @@ static u8 i3c_master_i2c_get_lvr(struct i2c_client *client)
{
/* Fall back to no spike filters and FM bus mode. */
u8 lvr = I3C_LVR_I2C_INDEX(2) | I3C_LVR_I2C_FM_MODE;
+ u32 reg[3];
- if (client->dev.of_node) {
- u32 reg[3];
-
- if (!of_property_read_u32_array(client->dev.of_node, "reg",
- reg, ARRAY_SIZE(reg)))
- lvr = reg[2];
- }
+ if (!of_property_read_u32_array(client->dev.of_node, "reg", reg, ARRAY_SIZE(reg)))
+ lvr = reg[2];
return lvr;
}
@@ -2486,7 +2482,7 @@ static int i3c_master_i2c_adapter_init(struct i3c_master_controller *master)
struct i2c_adapter *adap = i3c_master_to_i2c_adapter(master);
struct i2c_dev_desc *i2cdev;
struct i2c_dev_boardinfo *i2cboardinfo;
- int ret, id = -ENODEV;
+ int ret, id;
adap->dev.parent = master->dev.parent;
adap->owner = master->dev.parent->driver->owner;
@@ -2497,9 +2493,7 @@ static int i3c_master_i2c_adapter_init(struct i3c_master_controller *master)
adap->timeout = 1000;
adap->retries = 3;
- if (master->dev.of_node)
- id = of_alias_get_id(master->dev.of_node, "i2c");
-
+ id = of_alias_get_id(master->dev.of_node, "i2c");
if (id >= 0) {
adap->nr = id;
ret = i2c_add_numbered_adapter(adap);
@@ -2561,6 +2555,9 @@ static void i3c_master_unregister_i3c_devs(struct i3c_master_controller *master)
*/
void i3c_master_queue_ibi(struct i3c_dev_desc *dev, struct i3c_ibi_slot *slot)
{
+ if (!dev->ibi || !slot)
+ return;
+
atomic_inc(&dev->ibi->pending_ibis);
queue_work(dev->ibi->wq, &slot->work);
}
diff --git a/drivers/i3c/master/dw-i3c-master.c b/drivers/i3c/master/dw-i3c-master.c
index 2fbf8b2addd0..611c22b72c15 100644
--- a/drivers/i3c/master/dw-i3c-master.c
+++ b/drivers/i3c/master/dw-i3c-master.c
@@ -1079,7 +1079,7 @@ static void dw_i3c_master_detach_i3c_dev(struct i3c_dev_desc *dev)
}
static int dw_i3c_master_i2c_xfers(struct i2c_dev_desc *dev,
- const struct i2c_msg *i2c_xfers,
+ struct i2c_msg *i2c_xfers,
int i2c_nxfers)
{
struct dw_i3c_i2c_dev_data *data = i2c_dev_get_master_data(dev);
diff --git a/drivers/i3c/master/i3c-master-cdns.c b/drivers/i3c/master/i3c-master-cdns.c
index fedbe6624a1c..fd3752cea654 100644
--- a/drivers/i3c/master/i3c-master-cdns.c
+++ b/drivers/i3c/master/i3c-master-cdns.c
@@ -813,7 +813,7 @@ static int cdns_i3c_master_priv_xfers(struct i3c_dev_desc *dev,
}
static int cdns_i3c_master_i2c_xfers(struct i2c_dev_desc *dev,
- const struct i2c_msg *xfers, int nxfers)
+ struct i2c_msg *xfers, int nxfers)
{
struct i3c_master_controller *m = i2c_dev_get_master(dev);
struct cdns_i3c_master *master = to_cdns_i3c_master(m);
diff --git a/drivers/i3c/master/mipi-i3c-hci/core.c b/drivers/i3c/master/mipi-i3c-hci/core.c
index 648c501407ce..a71226d7ca59 100644
--- a/drivers/i3c/master/mipi-i3c-hci/core.c
+++ b/drivers/i3c/master/mipi-i3c-hci/core.c
@@ -367,7 +367,7 @@ out:
}
static int i3c_hci_i2c_xfers(struct i2c_dev_desc *dev,
- const struct i2c_msg *i2c_xfers, int nxfers)
+ struct i2c_msg *i2c_xfers, int nxfers)
{
struct i3c_master_controller *m = i2c_dev_get_master(dev);
struct i3c_hci *hci = to_i3c_hci(m);
@@ -382,14 +382,11 @@ static int i3c_hci_i2c_xfers(struct i2c_dev_desc *dev,
return -ENOMEM;
for (i = 0; i < nxfers; i++) {
- xfer[i].data = i2c_xfers[i].buf;
+ xfer[i].data = i2c_get_dma_safe_msg_buf(&i2c_xfers[i], 1);
xfer[i].data_len = i2c_xfers[i].len;
xfer[i].rnw = i2c_xfers[i].flags & I2C_M_RD;
hci->cmd->prep_i2c_xfer(hci, dev, &xfer[i]);
xfer[i].cmd_desc[0] |= CMD_0_ROC;
- ret = i3c_hci_alloc_safe_xfer_buf(hci, &xfer[i]);
- if (ret)
- goto out;
}
last = i - 1;
xfer[last].cmd_desc[0] |= CMD_0_TOC;
@@ -412,7 +409,8 @@ static int i3c_hci_i2c_xfers(struct i2c_dev_desc *dev,
out:
for (i = 0; i < nxfers; i++)
- i3c_hci_free_safe_xfer_buf(hci, &xfer[i]);
+ i2c_put_dma_safe_msg_buf(xfer[i].data, &i2c_xfers[i],
+ ret ? false : true);
hci_free_xfer(xfer, nxfers);
return ret;
diff --git a/drivers/i3c/master/svc-i3c-master.c b/drivers/i3c/master/svc-i3c-master.c
index d6057d8c7dec..85e16de208d3 100644
--- a/drivers/i3c/master/svc-i3c-master.c
+++ b/drivers/i3c/master/svc-i3c-master.c
@@ -32,6 +32,7 @@
#define SVC_I3C_MCONFIG_ODBAUD(x) FIELD_PREP(GENMASK(23, 16), (x))
#define SVC_I3C_MCONFIG_ODHPP(x) FIELD_PREP(BIT(24), (x))
#define SVC_I3C_MCONFIG_SKEW(x) FIELD_PREP(GENMASK(27, 25), (x))
+#define SVC_I3C_MCONFIG_SKEW_MASK GENMASK(27, 25)
#define SVC_I3C_MCONFIG_I2CBAUD(x) FIELD_PREP(GENMASK(31, 28), (x))
#define SVC_I3C_MCTRL 0x084
@@ -58,6 +59,7 @@
#define SVC_I3C_MSTATUS 0x088
#define SVC_I3C_MSTATUS_STATE(x) FIELD_GET(GENMASK(2, 0), (x))
#define SVC_I3C_MSTATUS_STATE_DAA(x) (SVC_I3C_MSTATUS_STATE(x) == 5)
+#define SVC_I3C_MSTATUS_STATE_SLVREQ(x) (SVC_I3C_MSTATUS_STATE(x) == 1)
#define SVC_I3C_MSTATUS_STATE_IDLE(x) (SVC_I3C_MSTATUS_STATE(x) == 0)
#define SVC_I3C_MSTATUS_BETWEEN(x) FIELD_GET(BIT(4), (x))
#define SVC_I3C_MSTATUS_NACKED(x) FIELD_GET(BIT(5), (x))
@@ -113,6 +115,7 @@
#define SVC_I3C_MWDATAHE 0x0BC
#define SVC_I3C_MRDATAB 0x0C0
#define SVC_I3C_MRDATAH 0x0C8
+#define SVC_I3C_MWDATAB1 0x0CC
#define SVC_I3C_MWMSG_SDR 0x0D0
#define SVC_I3C_MRMSG_SDR 0x0D4
#define SVC_I3C_MWMSG_DDR 0x0D8
@@ -133,6 +136,32 @@
#define SVC_I3C_EVENT_IBI GENMASK(7, 0)
#define SVC_I3C_EVENT_HOTJOIN BIT(31)
+/*
+ * SVC_I3C_QUIRK_FIFO_EMPTY:
+ * I3C HW stalls the write transfer if the transmit FIFO becomes empty,
+ * when new data is written to FIFO, I3C HW resumes the transfer but
+ * the first transmitted data bit may have the wrong value.
+ * Workaround:
+ * Fill the FIFO in advance to prevent FIFO from becoming empty.
+ */
+#define SVC_I3C_QUIRK_FIFO_EMPTY BIT(0)
+/*
+ * SVC_I3C_QUIRK_FLASE_SLVSTART:
+ * I3C HW may generate an invalid SlvStart event when emitting a STOP.
+ * If it is a true SlvStart, the MSTATUS state is SLVREQ.
+ */
+#define SVC_I3C_QUIRK_FALSE_SLVSTART BIT(1)
+/*
+ * SVC_I3C_QUIRK_DAA_CORRUPT:
+ * When MCONFIG.SKEW=0 and MCONFIG.ODHPP=0, the ENTDAA transaction gets
+ * corrupted and results in a no repeated-start condition at the end of
+ * address assignment.
+ * Workaround:
+ * Set MCONFIG.SKEW to 1 before initiating the DAA process. After the DAA
+ * process is completed, return MCONFIG.SKEW to its previous value.
+ */
+#define SVC_I3C_QUIRK_DAA_CORRUPT BIT(2)
+
struct svc_i3c_cmd {
u8 addr;
bool rnw;
@@ -158,6 +187,10 @@ struct svc_i3c_regs_save {
u32 mdynaddr;
};
+struct svc_i3c_drvdata {
+ u32 quirks;
+};
+
/**
* struct svc_i3c_master - Silvaco I3C Master structure
* @base: I3C master controller
@@ -183,6 +216,7 @@ struct svc_i3c_regs_save {
* @ibi.tbq_slot: To be queued IBI slot
* @ibi.lock: IBI lock
* @lock: Transfer lock, protect between IBI work thread and callbacks from master
+ * @drvdata: Driver data
* @enabled_events: Bit masks for enable events (IBI, HotJoin).
* @mctrl_config: Configuration value in SVC_I3C_MCTRL for setting speed back.
*/
@@ -214,6 +248,7 @@ struct svc_i3c_master {
spinlock_t lock;
} ibi;
struct mutex lock;
+ const struct svc_i3c_drvdata *drvdata;
u32 enabled_events;
u32 mctrl_config;
};
@@ -230,6 +265,18 @@ struct svc_i3c_i2c_dev_data {
struct i3c_generic_ibi_pool *ibi_pool;
};
+static inline bool svc_has_quirk(struct svc_i3c_master *master, u32 quirk)
+{
+ return (master->drvdata->quirks & quirk);
+}
+
+static inline bool svc_has_daa_corrupt(struct svc_i3c_master *master)
+{
+ return ((master->drvdata->quirks & SVC_I3C_QUIRK_DAA_CORRUPT) &&
+ !(master->mctrl_config &
+ (SVC_I3C_MCONFIG_SKEW_MASK | SVC_I3C_MCONFIG_ODHPP(1))));
+}
+
static inline bool is_events_enabled(struct svc_i3c_master *master, u32 mask)
{
return !!(master->enabled_events & mask);
@@ -378,7 +425,7 @@ static int svc_i3c_master_handle_ibi(struct svc_i3c_master *master,
slot->len < SVC_I3C_FIFO_SIZE) {
mdatactrl = readl(master->regs + SVC_I3C_MDATACTRL);
count = SVC_I3C_MDATACTRL_RXCOUNT(mdatactrl);
- readsl(master->regs + SVC_I3C_MRDATAB, buf, count);
+ readsb(master->regs + SVC_I3C_MRDATAB, buf, count);
slot->len += count;
buf += count;
}
@@ -545,6 +592,8 @@ static void svc_i3c_master_ibi_work(struct work_struct *work)
queue_work(master->base.wq, &master->hj_work);
break;
case SVC_I3C_MSTATUS_IBITYPE_MASTER_REQUEST:
+ svc_i3c_master_emit_stop(master);
+ break;
default:
break;
}
@@ -564,6 +613,11 @@ static irqreturn_t svc_i3c_master_irq_handler(int irq, void *dev_id)
/* Clear the interrupt status */
writel(SVC_I3C_MINT_SLVSTART, master->regs + SVC_I3C_MSTATUS);
+ /* Ignore the false event */
+ if (svc_has_quirk(master, SVC_I3C_QUIRK_FALSE_SLVSTART) &&
+ !SVC_I3C_MSTATUS_STATE_SLVREQ(active))
+ return IRQ_HANDLED;
+
svc_i3c_master_disable_interrupts(master);
/* Handle the interrupt in a non atomic context */
@@ -888,10 +942,12 @@ static int svc_i3c_master_do_daa_locked(struct svc_i3c_master *master,
u8 *addrs, unsigned int *count)
{
u64 prov_id[SVC_I3C_MAX_DEVS] = {}, nacking_prov_id = 0;
- unsigned int dev_nb = 0, last_addr = 0;
+ unsigned int dev_nb = 0, last_addr = 0, dyn_addr = 0;
u32 reg;
int ret, i;
+ svc_i3c_master_flush_fifo(master);
+
while (true) {
/* clean SVC_I3C_MINT_IBIWON w1c bits */
writel(SVC_I3C_MINT_IBIWON, master->regs + SVC_I3C_MSTATUS);
@@ -932,6 +988,26 @@ static int svc_i3c_master_do_daa_locked(struct svc_i3c_master *master,
u8 data[6];
/*
+ * One slave sends its ID to request for address assignment,
+ * prefilling the dynamic address can reduce SCL clock stalls
+ * and also fix the SVC_I3C_QUIRK_FIFO_EMPTY quirk.
+ *
+ * Ideally, prefilling before the processDAA command is better.
+ * However, it requires an additional check to write the dyn_addr
+ * at the right time because the driver needs to write the processDAA
+ * command twice for one assignment.
+ * Prefilling here is safe and efficient because the FIFO starts
+ * filling within a few hundred nanoseconds, which is significantly
+ * faster compared to the 64 SCL clock cycles.
+ */
+ ret = i3c_master_get_free_addr(&master->base, last_addr + 1);
+ if (ret < 0)
+ break;
+
+ dyn_addr = ret;
+ writel(dyn_addr, master->regs + SVC_I3C_MWDATAB);
+
+ /*
* We only care about the 48-bit provisioned ID yet to
* be sure a device does not nack an address twice.
* Otherwise, we would just need to flush the RX FIFO.
@@ -1009,21 +1085,16 @@ static int svc_i3c_master_do_daa_locked(struct svc_i3c_master *master,
if (ret)
break;
- /* Give the slave device a suitable dynamic address */
- ret = i3c_master_get_free_addr(&master->base, last_addr + 1);
- if (ret < 0)
- break;
-
- addrs[dev_nb] = ret;
+ addrs[dev_nb] = dyn_addr;
dev_dbg(master->dev, "DAA: device %d assigned to 0x%02x\n",
dev_nb, addrs[dev_nb]);
-
- writel(addrs[dev_nb], master->regs + SVC_I3C_MWDATAB);
last_addr = addrs[dev_nb++];
}
/* Need manual issue STOP except for Complete condition */
svc_i3c_master_emit_stop(master);
+ svc_i3c_master_flush_fifo(master);
+
return ret;
}
@@ -1037,7 +1108,7 @@ static int svc_i3c_update_ibirules(struct svc_i3c_master *master)
/* Create the IBIRULES register for both cases */
i3c_bus_for_each_i3cdev(&master->base.bus, dev) {
- if (I3C_BCR_DEVICE_ROLE(dev->info.bcr) == I3C_BCR_I3C_MASTER)
+ if (!(dev->info.bcr & I3C_BCR_IBI_REQ_CAP))
continue;
if (dev->info.bcr & I3C_BCR_IBI_PAYLOAD) {
@@ -1096,7 +1167,16 @@ static int svc_i3c_master_do_daa(struct i3c_master_controller *m)
}
spin_lock_irqsave(&master->xferqueue.lock, flags);
+
+ if (svc_has_daa_corrupt(master))
+ writel(master->mctrl_config | SVC_I3C_MCONFIG_SKEW(1),
+ master->regs + SVC_I3C_MCONFIG);
+
ret = svc_i3c_master_do_daa_locked(master, addrs, &dev_nb);
+
+ if (svc_has_daa_corrupt(master))
+ writel(master->mctrl_config, master->regs + SVC_I3C_MCONFIG);
+
spin_unlock_irqrestore(&master->xferqueue.lock, flags);
svc_i3c_master_clear_merrwarn(master);
@@ -1220,6 +1300,24 @@ static int svc_i3c_master_xfer(struct svc_i3c_master *master,
SVC_I3C_MCTRL_RDTERM(*actual_len),
master->regs + SVC_I3C_MCTRL);
+ /*
+ * The entire transaction can consist of multiple write transfers.
+ * Prefilling before EmitStartAddr causes the data to be emitted
+ * immediately, becoming part of the previous transfer.
+ * The only way to work around this hardware issue is to let the
+ * FIFO start filling as soon as possible after EmitStartAddr.
+ */
+ if (svc_has_quirk(master, SVC_I3C_QUIRK_FIFO_EMPTY) && !rnw && xfer_len) {
+ u32 end = xfer_len > SVC_I3C_FIFO_SIZE ? 0 : SVC_I3C_MWDATAB_END;
+ u32 len = min_t(u32, xfer_len, SVC_I3C_FIFO_SIZE);
+
+ writesb(master->regs + SVC_I3C_MWDATAB1, out, len - 1);
+ /* Mark END bit if this is the last byte */
+ writel(out[len - 1] | end, master->regs + SVC_I3C_MWDATAB);
+ xfer_len -= len;
+ out += len;
+ }
+
ret = readl_poll_timeout(master->regs + SVC_I3C_MSTATUS, reg,
SVC_I3C_MSTATUS_MCTRLDONE(reg), 0, 1000);
if (ret)
@@ -1308,6 +1406,7 @@ static int svc_i3c_master_xfer(struct svc_i3c_master *master,
emit_stop:
svc_i3c_master_emit_stop(master);
svc_i3c_master_clear_merrwarn(master);
+ svc_i3c_master_flush_fifo(master);
return ret;
}
@@ -1584,7 +1683,7 @@ static int svc_i3c_master_priv_xfers(struct i3c_dev_desc *dev,
}
static int svc_i3c_master_i2c_xfers(struct i2c_dev_desc *dev,
- const struct i2c_msg *xfers,
+ struct i2c_msg *xfers,
int nxfers)
{
struct i3c_master_controller *m = i2c_dev_get_master(dev);
@@ -1817,6 +1916,10 @@ static int svc_i3c_master_probe(struct platform_device *pdev)
if (!master)
return -ENOMEM;
+ master->drvdata = of_device_get_match_data(dev);
+ if (!master->drvdata)
+ return -EINVAL;
+
master->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(master->regs))
return PTR_ERR(master->regs);
@@ -1958,8 +2061,17 @@ static const struct dev_pm_ops svc_i3c_pm_ops = {
svc_i3c_runtime_resume, NULL)
};
+static const struct svc_i3c_drvdata npcm845_drvdata = {
+ .quirks = SVC_I3C_QUIRK_FIFO_EMPTY |
+ SVC_I3C_QUIRK_FALSE_SLVSTART |
+ SVC_I3C_QUIRK_DAA_CORRUPT,
+};
+
+static const struct svc_i3c_drvdata svc_default_drvdata = {};
+
static const struct of_device_id svc_i3c_master_of_match_tbl[] = {
- { .compatible = "silvaco,i3c-master-v1"},
+ { .compatible = "nuvoton,npcm845-i3c", .data = &npcm845_drvdata },
+ { .compatible = "silvaco,i3c-master-v1", .data = &svc_default_drvdata },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, svc_i3c_master_of_match_tbl);
diff --git a/drivers/iio/accel/adxl345.h b/drivers/iio/accel/adxl345.h
index 517e494ba555..bc6d634bd85c 100644
--- a/drivers/iio/accel/adxl345.h
+++ b/drivers/iio/accel/adxl345.h
@@ -43,7 +43,6 @@
#define ADXL345_REG_INT_ENABLE 0x2E
#define ADXL345_REG_INT_MAP 0x2F
#define ADXL345_REG_INT_SOURCE 0x30
-#define ADXL345_REG_INT_SOURCE_MSK 0xFF
#define ADXL345_REG_DATA_FORMAT 0x31
#define ADXL345_REG_XYZ_BASE 0x32
#define ADXL345_REG_DATA_AXIS(index) \
diff --git a/drivers/iio/accel/adxl345_core.c b/drivers/iio/accel/adxl345_core.c
index d1b2d3985a40..375c27d16827 100644
--- a/drivers/iio/accel/adxl345_core.c
+++ b/drivers/iio/accel/adxl345_core.c
@@ -76,6 +76,26 @@ static const unsigned long adxl345_scan_masks[] = {
0
};
+/**
+ * adxl345_set_measure_en() - Enable and disable measuring.
+ *
+ * @st: The device data.
+ * @en: Enable measurements, else standby mode.
+ *
+ * For lowest power operation, standby mode can be used. In standby mode,
+ * current consumption is supposed to be reduced to 0.1uA (typical). In this
+ * mode no measurements are made. Placing the device into standby mode
+ * preserves the contents of FIFO.
+ *
+ * Return: Returns 0 if successful, or a negative error value.
+ */
+static int adxl345_set_measure_en(struct adxl345_state *st, bool en)
+{
+ unsigned int val = en ? ADXL345_POWER_CTL_MEASURE : ADXL345_POWER_CTL_STANDBY;
+
+ return regmap_write(st->regmap, ADXL345_REG_POWER_CTL, val);
+}
+
static int adxl345_set_interrupts(struct adxl345_state *st)
{
int ret;
@@ -87,8 +107,7 @@ static int adxl345_set_interrupts(struct adxl345_state *st)
* interrupts to the INT1 pin, whereas bits set to 1 send their respective
* interrupts to the INT2 pin. The intio shall convert this accordingly.
*/
- int_map = FIELD_GET(ADXL345_REG_INT_SOURCE_MSK,
- st->intio ? st->int_map : ~st->int_map);
+ int_map = st->intio ? st->int_map : ~st->int_map;
ret = regmap_write(st->regmap, ADXL345_REG_INT_MAP, int_map);
if (ret)
@@ -182,6 +201,16 @@ static int adxl345_write_raw(struct iio_dev *indio_dev,
return -EINVAL;
}
+static int adxl345_reg_access(struct iio_dev *indio_dev, unsigned int reg,
+ unsigned int writeval, unsigned int *readval)
+{
+ struct adxl345_state *st = iio_priv(indio_dev);
+
+ if (readval)
+ return regmap_read(st->regmap, reg, readval);
+ return regmap_write(st->regmap, reg, writeval);
+}
+
static int adxl345_set_watermark(struct iio_dev *indio_dev, unsigned int value)
{
struct adxl345_state *st = iio_priv(indio_dev);
@@ -214,26 +243,6 @@ static int adxl345_write_raw_get_fmt(struct iio_dev *indio_dev,
}
}
-/**
- * adxl345_set_measure_en() - Enable and disable measuring.
- *
- * @st: The device data.
- * @en: Enable measurements, else standby mode.
- *
- * For lowest power operation, standby mode can be used. In standby mode,
- * current consumption is supposed to be reduced to 0.1uA (typical). In this
- * mode no measurements are made. Placing the device into standby mode
- * preserves the contents of FIFO.
- *
- * Return: Returns 0 if successful, or a negative error value.
- */
-static int adxl345_set_measure_en(struct adxl345_state *st, bool en)
-{
- unsigned int val = en ? ADXL345_POWER_CTL_MEASURE : ADXL345_POWER_CTL_STANDBY;
-
- return regmap_write(st->regmap, ADXL345_REG_POWER_CTL, val);
-}
-
static void adxl345_powerdown(void *ptr)
{
struct adxl345_state *st = ptr;
@@ -394,18 +403,6 @@ static const struct iio_buffer_setup_ops adxl345_buffer_ops = {
.predisable = adxl345_buffer_predisable,
};
-static int adxl345_get_status(struct adxl345_state *st)
-{
- int ret;
- unsigned int regval;
-
- ret = regmap_read(st->regmap, ADXL345_REG_INT_SOURCE, &regval);
- if (ret < 0)
- return ret;
-
- return FIELD_GET(ADXL345_REG_INT_SOURCE_MSK, regval);
-}
-
static int adxl345_fifo_push(struct iio_dev *indio_dev,
int samples)
{
@@ -439,14 +436,10 @@ static irqreturn_t adxl345_irq_handler(int irq, void *p)
int int_stat;
int samples;
- int_stat = adxl345_get_status(st);
- if (int_stat <= 0)
+ if (regmap_read(st->regmap, ADXL345_REG_INT_SOURCE, &int_stat))
return IRQ_NONE;
- if (int_stat & ADXL345_INT_OVERRUN)
- goto err;
-
- if (int_stat & ADXL345_INT_WATERMARK) {
+ if (FIELD_GET(ADXL345_INT_WATERMARK, int_stat)) {
samples = adxl345_get_samples(st);
if (samples < 0)
goto err;
@@ -454,6 +447,10 @@ static irqreturn_t adxl345_irq_handler(int irq, void *p)
if (adxl345_fifo_push(indio_dev, samples) < 0)
goto err;
}
+
+ if (FIELD_GET(ADXL345_INT_OVERRUN, int_stat))
+ goto err;
+
return IRQ_HANDLED;
err:
@@ -467,6 +464,7 @@ static const struct iio_info adxl345_info = {
.read_raw = adxl345_read_raw,
.write_raw = adxl345_write_raw,
.write_raw_get_fmt = adxl345_write_raw_get_fmt,
+ .debugfs_reg_access = &adxl345_reg_access,
.hwfifo_set_watermark = adxl345_set_watermark,
};
diff --git a/drivers/iio/accel/adxl367.c b/drivers/iio/accel/adxl367.c
index a48ac0d7bd96..add4053e7a02 100644
--- a/drivers/iio/accel/adxl367.c
+++ b/drivers/iio/accel/adxl367.c
@@ -477,45 +477,42 @@ static int adxl367_set_fifo_watermark(struct adxl367_state *st,
static int adxl367_set_range(struct iio_dev *indio_dev,
enum adxl367_range range)
{
- iio_device_claim_direct_scoped(return -EBUSY, indio_dev) {
- struct adxl367_state *st = iio_priv(indio_dev);
- int ret;
+ struct adxl367_state *st = iio_priv(indio_dev);
+ int ret;
- guard(mutex)(&st->lock);
+ guard(mutex)(&st->lock);
- ret = adxl367_set_measure_en(st, false);
- if (ret)
- return ret;
+ ret = adxl367_set_measure_en(st, false);
+ if (ret)
+ return ret;
- ret = regmap_update_bits(st->regmap, ADXL367_REG_FILTER_CTL,
- ADXL367_FILTER_CTL_RANGE_MASK,
- FIELD_PREP(ADXL367_FILTER_CTL_RANGE_MASK,
- range));
- if (ret)
- return ret;
+ ret = regmap_update_bits(st->regmap, ADXL367_REG_FILTER_CTL,
+ ADXL367_FILTER_CTL_RANGE_MASK,
+ FIELD_PREP(ADXL367_FILTER_CTL_RANGE_MASK,
+ range));
+ if (ret)
+ return ret;
- adxl367_scale_act_thresholds(st, st->range, range);
+ adxl367_scale_act_thresholds(st, st->range, range);
- /* Activity thresholds depend on range */
- ret = _adxl367_set_act_threshold(st, ADXL367_ACTIVITY,
- st->act_threshold);
- if (ret)
- return ret;
+ /* Activity thresholds depend on range */
+ ret = _adxl367_set_act_threshold(st, ADXL367_ACTIVITY,
+ st->act_threshold);
+ if (ret)
+ return ret;
- ret = _adxl367_set_act_threshold(st, ADXL367_INACTIVITY,
- st->inact_threshold);
- if (ret)
- return ret;
+ ret = _adxl367_set_act_threshold(st, ADXL367_INACTIVITY,
+ st->inact_threshold);
+ if (ret)
+ return ret;
- ret = adxl367_set_measure_en(st, true);
- if (ret)
- return ret;
+ ret = adxl367_set_measure_en(st, true);
+ if (ret)
+ return ret;
- st->range = range;
+ st->range = range;
- return 0;
- }
- unreachable();
+ return 0;
}
static int adxl367_time_ms_to_samples(struct adxl367_state *st, unsigned int ms)
@@ -620,23 +617,20 @@ static int _adxl367_set_odr(struct adxl367_state *st, enum adxl367_odr odr)
static int adxl367_set_odr(struct iio_dev *indio_dev, enum adxl367_odr odr)
{
- iio_device_claim_direct_scoped(return -EBUSY, indio_dev) {
- struct adxl367_state *st = iio_priv(indio_dev);
- int ret;
+ struct adxl367_state *st = iio_priv(indio_dev);
+ int ret;
- guard(mutex)(&st->lock);
+ guard(mutex)(&st->lock);
- ret = adxl367_set_measure_en(st, false);
- if (ret)
- return ret;
+ ret = adxl367_set_measure_en(st, false);
+ if (ret)
+ return ret;
- ret = _adxl367_set_odr(st, odr);
- if (ret)
- return ret;
+ ret = _adxl367_set_odr(st, odr);
+ if (ret)
+ return ret;
- return adxl367_set_measure_en(st, true);
- }
- unreachable();
+ return adxl367_set_measure_en(st, true);
}
static int adxl367_set_temp_adc_en(struct adxl367_state *st, unsigned int reg,
@@ -725,32 +719,29 @@ static int adxl367_read_sample(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int *val)
{
- iio_device_claim_direct_scoped(return -EBUSY, indio_dev) {
- struct adxl367_state *st = iio_priv(indio_dev);
- u16 sample;
- int ret;
+ struct adxl367_state *st = iio_priv(indio_dev);
+ u16 sample;
+ int ret;
- guard(mutex)(&st->lock);
+ guard(mutex)(&st->lock);
- ret = adxl367_set_temp_adc_reg_en(st, chan->address, true);
- if (ret)
- return ret;
+ ret = adxl367_set_temp_adc_reg_en(st, chan->address, true);
+ if (ret)
+ return ret;
- ret = regmap_bulk_read(st->regmap, chan->address, &st->sample_buf,
- sizeof(st->sample_buf));
- if (ret)
- return ret;
+ ret = regmap_bulk_read(st->regmap, chan->address, &st->sample_buf,
+ sizeof(st->sample_buf));
+ if (ret)
+ return ret;
- sample = FIELD_GET(ADXL367_DATA_MASK, be16_to_cpu(st->sample_buf));
- *val = sign_extend32(sample, chan->scan_type.realbits - 1);
+ sample = FIELD_GET(ADXL367_DATA_MASK, be16_to_cpu(st->sample_buf));
+ *val = sign_extend32(sample, chan->scan_type.realbits - 1);
- ret = adxl367_set_temp_adc_reg_en(st, chan->address, false);
- if (ret)
- return ret;
+ ret = adxl367_set_temp_adc_reg_en(st, chan->address, false);
+ if (ret)
+ return ret;
- return IIO_VAL_INT;
- }
- unreachable();
+ return IIO_VAL_INT;
}
static int adxl367_get_status(struct adxl367_state *st, u8 *status,
@@ -852,10 +843,15 @@ static int adxl367_read_raw(struct iio_dev *indio_dev,
int *val, int *val2, long info)
{
struct adxl367_state *st = iio_priv(indio_dev);
+ int ret;
switch (info) {
case IIO_CHAN_INFO_RAW:
- return adxl367_read_sample(indio_dev, chan, val);
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+ ret = adxl367_read_sample(indio_dev, chan, val);
+ iio_device_release_direct(indio_dev);
+ return ret;
case IIO_CHAN_INFO_SCALE:
switch (chan->type) {
case IIO_ACCEL: {
@@ -912,7 +908,12 @@ static int adxl367_write_raw(struct iio_dev *indio_dev,
if (ret)
return ret;
- return adxl367_set_odr(indio_dev, odr);
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+
+ ret = adxl367_set_odr(indio_dev, odr);
+ iio_device_release_direct(indio_dev);
+ return ret;
}
case IIO_CHAN_INFO_SCALE: {
enum adxl367_range range;
@@ -921,7 +922,12 @@ static int adxl367_write_raw(struct iio_dev *indio_dev,
if (ret)
return ret;
- return adxl367_set_range(indio_dev, range);
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+
+ ret = adxl367_set_range(indio_dev, range);
+ iio_device_release_direct(indio_dev);
+ return ret;
}
default:
return -EINVAL;
@@ -1069,13 +1075,15 @@ static int adxl367_read_event_config(struct iio_dev *indio_dev,
}
}
-static int adxl367_write_event_config(struct iio_dev *indio_dev,
- const struct iio_chan_spec *chan,
- enum iio_event_type type,
- enum iio_event_direction dir,
- bool state)
+static int __adxl367_write_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ bool state)
{
+ struct adxl367_state *st = iio_priv(indio_dev);
enum adxl367_activity_type act;
+ int ret;
switch (dir) {
case IIO_EV_DIR_RISING:
@@ -1088,28 +1096,38 @@ static int adxl367_write_event_config(struct iio_dev *indio_dev,
return -EINVAL;
}
- iio_device_claim_direct_scoped(return -EBUSY, indio_dev) {
- struct adxl367_state *st = iio_priv(indio_dev);
- int ret;
+ guard(mutex)(&st->lock);
+
+ ret = adxl367_set_measure_en(st, false);
+ if (ret)
+ return ret;
- guard(mutex)(&st->lock);
+ ret = adxl367_set_act_interrupt_en(st, act, state);
+ if (ret)
+ return ret;
- ret = adxl367_set_measure_en(st, false);
- if (ret)
- return ret;
+ ret = adxl367_set_act_en(st, act, state ? ADCL367_ACT_REF_ENABLED
+ : ADXL367_ACT_DISABLED);
+ if (ret)
+ return ret;
- ret = adxl367_set_act_interrupt_en(st, act, state);
- if (ret)
- return ret;
+ return adxl367_set_measure_en(st, true);
+}
- ret = adxl367_set_act_en(st, act, state ? ADCL367_ACT_REF_ENABLED
- : ADXL367_ACT_DISABLED);
- if (ret)
- return ret;
+static int adxl367_write_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ bool state)
+{
+ int ret;
- return adxl367_set_measure_en(st, true);
- }
- unreachable();
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+
+ ret = __adxl367_write_event_config(indio_dev, chan, type, dir, state);
+ iio_device_release_direct(indio_dev);
+ return ret;
}
static ssize_t adxl367_get_fifo_enabled(struct device *dev,
diff --git a/drivers/iio/accel/adxl372.c b/drivers/iio/accel/adxl372.c
index 8ba5fbe6e1f5..961145b50293 100644
--- a/drivers/iio/accel/adxl372.c
+++ b/drivers/iio/accel/adxl372.c
@@ -763,12 +763,11 @@ static int adxl372_read_raw(struct iio_dev *indio_dev,
switch (info) {
case IIO_CHAN_INFO_RAW:
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
ret = adxl372_read_axis(st, chan->address);
- iio_device_release_direct_mode(indio_dev);
+ iio_device_release_direct(indio_dev);
if (ret < 0)
return ret;
diff --git a/drivers/iio/accel/adxl380.c b/drivers/iio/accel/adxl380.c
index 90340f134722..0cf3c6815829 100644
--- a/drivers/iio/accel/adxl380.c
+++ b/drivers/iio/accel/adxl380.c
@@ -1175,12 +1175,11 @@ static int adxl380_read_raw(struct iio_dev *indio_dev,
switch (info) {
case IIO_CHAN_INFO_RAW:
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
ret = adxl380_read_chn(st, chan->address);
- iio_device_release_direct_mode(indio_dev);
+ iio_device_release_direct(indio_dev);
if (ret < 0)
return ret;
diff --git a/drivers/iio/accel/bma180.c b/drivers/iio/accel/bma180.c
index 128db14ba726..aa664a923f91 100644
--- a/drivers/iio/accel/bma180.c
+++ b/drivers/iio/accel/bma180.c
@@ -540,14 +540,13 @@ static int bma180_read_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_RAW:
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
mutex_lock(&data->mutex);
ret = bma180_get_data_reg(data, chan->scan_index);
mutex_unlock(&data->mutex);
- iio_device_release_direct_mode(indio_dev);
+ iio_device_release_direct(indio_dev);
if (ret < 0)
return ret;
if (chan->scan_type.sign == 's') {
diff --git a/drivers/iio/accel/bma400_core.c b/drivers/iio/accel/bma400_core.c
index ae806ed60271..23f5e1ce9cc4 100644
--- a/drivers/iio/accel/bma400_core.c
+++ b/drivers/iio/accel/bma400_core.c
@@ -190,7 +190,7 @@ const struct regmap_config bma400_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
.max_register = BMA400_CMD_REG,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
.writeable_reg = bma400_is_writable_reg,
.volatile_reg = bma400_is_volatile_reg,
};
diff --git a/drivers/iio/accel/bmi088-accel-core.c b/drivers/iio/accel/bmi088-accel-core.c
index 9206fbdbf520..dea126f993c1 100644
--- a/drivers/iio/accel/bmi088-accel-core.c
+++ b/drivers/iio/accel/bmi088-accel-core.c
@@ -145,7 +145,7 @@ const struct regmap_config bmi088_regmap_conf = {
.val_bits = 8,
.max_register = 0x7E,
.volatile_table = &bmi088_volatile_table,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
};
EXPORT_SYMBOL_NS_GPL(bmi088_regmap_conf, "IIO_BMI088");
@@ -313,12 +313,13 @@ static int bmi088_accel_read_raw(struct iio_dev *indio_dev,
if (ret)
return ret;
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
+ if (!iio_device_claim_direct(indio_dev)) {
+ ret = -EBUSY;
goto out_read_raw_pm_put;
+ }
ret = bmi088_accel_get_axis(data, chan, val);
- iio_device_release_direct_mode(indio_dev);
+ iio_device_release_direct(indio_dev);
if (!ret)
ret = IIO_VAL_INT;
diff --git a/drivers/iio/accel/fxls8962af-core.c b/drivers/iio/accel/fxls8962af-core.c
index 987212a7c038..48e4282964a0 100644
--- a/drivers/iio/accel/fxls8962af-core.c
+++ b/drivers/iio/accel/fxls8962af-core.c
@@ -460,22 +460,20 @@ static int fxls8962af_write_raw(struct iio_dev *indio_dev,
if (val != 0)
return -EINVAL;
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
ret = fxls8962af_set_full_scale(data, val2);
- iio_device_release_direct_mode(indio_dev);
+ iio_device_release_direct(indio_dev);
return ret;
case IIO_CHAN_INFO_SAMP_FREQ:
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
ret = fxls8962af_set_samp_freq(data, val, val2);
- iio_device_release_direct_mode(indio_dev);
+ iio_device_release_direct(indio_dev);
return ret;
default:
return -EINVAL;
@@ -683,14 +681,13 @@ fxls8962af_write_event_config(struct iio_dev *indio_dev,
fxls8962af_active(data);
ret = fxls8962af_power_on(data);
} else {
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
/* Not in buffered mode so disable power */
ret = fxls8962af_power_off(data);
- iio_device_release_direct_mode(indio_dev);
+ iio_device_release_direct(indio_dev);
}
return ret;
diff --git a/drivers/iio/accel/kionix-kx022a.c b/drivers/iio/accel/kionix-kx022a.c
index 5aeb3b951ac5..07dcf5f0599f 100644
--- a/drivers/iio/accel/kionix-kx022a.c
+++ b/drivers/iio/accel/kionix-kx022a.c
@@ -149,7 +149,7 @@ static const struct regmap_config kx022a_regmap_config = {
.rd_noinc_table = &kx022a_nir_regs,
.precious_table = &kx022a_precious_regs,
.max_register = KX022A_MAX_REGISTER,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
};
/* Regmap configs kx132 */
@@ -260,7 +260,7 @@ static const struct regmap_config kx132_regmap_config = {
.rd_noinc_table = &kx132_nir_regs,
.precious_table = &kx132_precious_regs,
.max_register = KX132_MAX_REGISTER,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
};
struct kx022a_data {
@@ -510,26 +510,13 @@ static int kx022a_write_raw_get_fmt(struct iio_dev *idev,
}
}
-static int kx022a_write_raw(struct iio_dev *idev,
- struct iio_chan_spec const *chan,
- int val, int val2, long mask)
+static int __kx022a_write_raw(struct iio_dev *idev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
{
struct kx022a_data *data = iio_priv(idev);
int ret, n;
- /*
- * We should not allow changing scale or frequency when FIFO is running
- * as it will mess the timestamp/scale for samples existing in the
- * buffer. If this turns out to be an issue we can later change logic
- * to internally flush the fifo before reconfiguring so the samples in
- * fifo keep matching the freq/scale settings. (Such setup could cause
- * issues if users trust the watermark to be reached within known
- * time-limit).
- */
- ret = iio_device_claim_direct_mode(idev);
- if (ret)
- return ret;
-
switch (mask) {
case IIO_CHAN_INFO_SAMP_FREQ:
n = ARRAY_SIZE(kx022a_accel_samp_freq_table);
@@ -538,20 +525,19 @@ static int kx022a_write_raw(struct iio_dev *idev,
if (val == kx022a_accel_samp_freq_table[n][0] &&
val2 == kx022a_accel_samp_freq_table[n][1])
break;
- if (n < 0) {
- ret = -EINVAL;
- goto unlock_out;
- }
+ if (n < 0)
+ return -EINVAL;
+
ret = kx022a_turn_off_lock(data);
if (ret)
- break;
+ return ret;
ret = regmap_update_bits(data->regmap,
data->chip_info->odcntl,
KX022A_MASK_ODR, n);
data->odr_ns = kx022a_odrs[n];
kx022a_turn_on_unlock(data);
- break;
+ return ret;
case IIO_CHAN_INFO_SCALE:
n = data->chip_info->scale_table_size / 2;
@@ -559,27 +545,44 @@ static int kx022a_write_raw(struct iio_dev *idev,
if (val == data->chip_info->scale_table[n][0] &&
val2 == data->chip_info->scale_table[n][1])
break;
- if (n < 0) {
- ret = -EINVAL;
- goto unlock_out;
- }
+ if (n < 0)
+ return -EINVAL;
ret = kx022a_turn_off_lock(data);
if (ret)
- break;
+ return ret;
ret = regmap_update_bits(data->regmap, data->chip_info->cntl,
KX022A_MASK_GSEL,
n << KX022A_GSEL_SHIFT);
kx022a_turn_on_unlock(data);
- break;
+ return ret;
default:
- ret = -EINVAL;
- break;
+ return -EINVAL;
}
+}
-unlock_out:
- iio_device_release_direct_mode(idev);
+static int kx022a_write_raw(struct iio_dev *idev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ int ret;
+
+ /*
+ * We should not allow changing scale or frequency when FIFO is running
+ * as it will mess the timestamp/scale for samples existing in the
+ * buffer. If this turns out to be an issue we can later change logic
+ * to internally flush the fifo before reconfiguring so the samples in
+ * fifo keep matching the freq/scale settings. (Such setup could cause
+ * issues if users trust the watermark to be reached within known
+ * time-limit).
+ */
+ if (!iio_device_claim_direct(idev))
+ return -EBUSY;
+
+ ret = __kx022a_write_raw(idev, chan, val, val2, mask);
+
+ iio_device_release_direct(idev);
return ret;
}
@@ -620,15 +623,14 @@ static int kx022a_read_raw(struct iio_dev *idev,
switch (mask) {
case IIO_CHAN_INFO_RAW:
- ret = iio_device_claim_direct_mode(idev);
- if (ret)
- return ret;
+ if (!iio_device_claim_direct(idev))
+ return -EBUSY;
mutex_lock(&data->mutex);
ret = kx022a_get_axis(data, chan, val);
mutex_unlock(&data->mutex);
- iio_device_release_direct_mode(idev);
+ iio_device_release_direct(idev);
return ret;
diff --git a/drivers/iio/accel/mc3230.c b/drivers/iio/accel/mc3230.c
index caa40a14a631..e2853090fa6e 100644
--- a/drivers/iio/accel/mc3230.c
+++ b/drivers/iio/accel/mc3230.c
@@ -22,20 +22,37 @@
#define MC3230_MODE_OPCON_STANDBY 0x03
#define MC3230_REG_CHIP_ID 0x18
-#define MC3230_CHIP_ID 0x01
-
#define MC3230_REG_PRODUCT_CODE 0x3b
-#define MC3230_PRODUCT_CODE 0x19
/*
* The accelerometer has one measurement range:
*
* -1.5g - +1.5g (8-bit, signed)
*
- * scale = (1.5 + 1.5) * 9.81 / (2^8 - 1) = 0.115411765
*/
-static const int mc3230_nscale = 115411765;
+struct mc3230_chip_info {
+ const char *name;
+ const u8 chip_id;
+ const u8 product_code;
+ const int scale;
+};
+
+static const struct mc3230_chip_info mc3230_chip_info = {
+ .name = "mc3230",
+ .chip_id = 0x01,
+ .product_code = 0x19,
+ /* (1.5 + 1.5) * 9.81 / (2^8 - 1) = 0.115411765 */
+ .scale = 115411765,
+};
+
+static const struct mc3230_chip_info mc3510c_chip_info = {
+ .name = "mc3510c",
+ .chip_id = 0x23,
+ .product_code = 0x10,
+ /* Was obtained empirically */
+ .scale = 625000000,
+};
#define MC3230_CHANNEL(reg, axis) { \
.type = IIO_ACCEL, \
@@ -44,18 +61,35 @@ static const int mc3230_nscale = 115411765;
.channel2 = IIO_MOD_##axis, \
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+ .ext_info = mc3230_ext_info, \
+}
+
+struct mc3230_data {
+ const struct mc3230_chip_info *chip_info;
+ struct i2c_client *client;
+ struct iio_mount_matrix orientation;
+};
+
+static const struct iio_mount_matrix *
+mc3230_get_mount_matrix(const struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan)
+{
+ struct mc3230_data *data = iio_priv(indio_dev);
+
+ return &data->orientation;
}
+static const struct iio_chan_spec_ext_info mc3230_ext_info[] = {
+ IIO_MOUNT_MATRIX(IIO_SHARED_BY_DIR, mc3230_get_mount_matrix),
+ { }
+};
+
static const struct iio_chan_spec mc3230_channels[] = {
MC3230_CHANNEL(MC3230_REG_XOUT, X),
MC3230_CHANNEL(MC3230_REG_YOUT, Y),
MC3230_CHANNEL(MC3230_REG_ZOUT, Z),
};
-struct mc3230_data {
- struct i2c_client *client;
-};
-
static int mc3230_set_opcon(struct mc3230_data *data, int opcon)
{
int ret;
@@ -95,7 +129,7 @@ static int mc3230_read_raw(struct iio_dev *indio_dev,
return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
*val = 0;
- *val2 = mc3230_nscale;
+ *val2 = data->chip_info->scale;
return IIO_VAL_INT_PLUS_NANO;
default:
return -EINVAL;
@@ -111,15 +145,28 @@ static int mc3230_probe(struct i2c_client *client)
int ret;
struct iio_dev *indio_dev;
struct mc3230_data *data;
+ const struct mc3230_chip_info *chip_info;
+
+ chip_info = i2c_get_match_data(client);
+ if (chip_info == NULL) {
+ dev_err(&client->dev, "failed to get match data");
+ return -ENODATA;
+ }
/* First check chip-id and product-id */
ret = i2c_smbus_read_byte_data(client, MC3230_REG_CHIP_ID);
- if (ret != MC3230_CHIP_ID)
- return (ret < 0) ? ret : -ENODEV;
+ if (ret != chip_info->chip_id) {
+ dev_info(&client->dev,
+ "chip id check fail: 0x%x != 0x%x !\n",
+ ret, chip_info->chip_id);
+ }
ret = i2c_smbus_read_byte_data(client, MC3230_REG_PRODUCT_CODE);
- if (ret != MC3230_PRODUCT_CODE)
- return (ret < 0) ? ret : -ENODEV;
+ if (ret != chip_info->product_code) {
+ dev_info(&client->dev,
+ "product code check fail: 0x%x != 0x%x !\n",
+ ret, chip_info->product_code);
+ }
indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
if (!indio_dev) {
@@ -128,11 +175,12 @@ static int mc3230_probe(struct i2c_client *client)
}
data = iio_priv(indio_dev);
+ data->chip_info = chip_info;
data->client = client;
i2c_set_clientdata(client, indio_dev);
indio_dev->info = &mc3230_info;
- indio_dev->name = "mc3230";
+ indio_dev->name = chip_info->name;
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->channels = mc3230_channels;
indio_dev->num_channels = ARRAY_SIZE(mc3230_channels);
@@ -141,6 +189,10 @@ static int mc3230_probe(struct i2c_client *client)
if (ret < 0)
return ret;
+ ret = iio_read_mount_matrix(&client->dev, &data->orientation);
+ if (ret)
+ return ret;
+
ret = iio_device_register(indio_dev);
if (ret < 0) {
dev_err(&client->dev, "device_register failed\n");
@@ -180,14 +232,23 @@ static int mc3230_resume(struct device *dev)
static DEFINE_SIMPLE_DEV_PM_OPS(mc3230_pm_ops, mc3230_suspend, mc3230_resume);
static const struct i2c_device_id mc3230_i2c_id[] = {
- { "mc3230" },
- {}
+ { "mc3230", (kernel_ulong_t)&mc3230_chip_info },
+ { "mc3510c", (kernel_ulong_t)&mc3510c_chip_info },
+ { }
};
MODULE_DEVICE_TABLE(i2c, mc3230_i2c_id);
+static const struct of_device_id mc3230_of_match[] = {
+ { .compatible = "mcube,mc3230", &mc3230_chip_info },
+ { .compatible = "mcube,mc3510c", &mc3510c_chip_info },
+ { }
+};
+MODULE_DEVICE_TABLE(of, mc3230_of_match);
+
static struct i2c_driver mc3230_driver = {
.driver = {
.name = "mc3230",
+ .of_match_table = mc3230_of_match,
.pm = pm_sleep_ptr(&mc3230_pm_ops),
},
.probe = mc3230_probe,
diff --git a/drivers/iio/accel/mma8452.c b/drivers/iio/accel/mma8452.c
index 962d289065ab..05f5482f366e 100644
--- a/drivers/iio/accel/mma8452.c
+++ b/drivers/iio/accel/mma8452.c
@@ -497,14 +497,13 @@ static int mma8452_read_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_RAW:
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
mutex_lock(&data->lock);
ret = mma8452_read(data, buffer);
mutex_unlock(&data->lock);
- iio_device_release_direct_mode(indio_dev);
+ iio_device_release_direct(indio_dev);
if (ret < 0)
return ret;
@@ -707,55 +706,45 @@ static int mma8452_set_hp_filter_frequency(struct mma8452_data *data,
return mma8452_change_config(data, MMA8452_HP_FILTER_CUTOFF, reg);
}
-static int mma8452_write_raw(struct iio_dev *indio_dev,
+static int __mma8452_write_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int val, int val2, long mask)
{
struct mma8452_data *data = iio_priv(indio_dev);
- int i, ret;
-
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
+ int i, j, ret;
switch (mask) {
case IIO_CHAN_INFO_SAMP_FREQ:
i = mma8452_get_samp_freq_index(data, val, val2);
- if (i < 0) {
- ret = i;
- break;
- }
+ if (i < 0)
+ return i;
+
data->ctrl_reg1 &= ~MMA8452_CTRL_DR_MASK;
data->ctrl_reg1 |= i << MMA8452_CTRL_DR_SHIFT;
data->sleep_val = mma8452_calculate_sleep(data);
- ret = mma8452_change_config(data, MMA8452_CTRL_REG1,
- data->ctrl_reg1);
- break;
+ return mma8452_change_config(data, MMA8452_CTRL_REG1,
+ data->ctrl_reg1);
+
case IIO_CHAN_INFO_SCALE:
i = mma8452_get_scale_index(data, val, val2);
- if (i < 0) {
- ret = i;
- break;
- }
+ if (i < 0)
+ return i;
data->data_cfg &= ~MMA8452_DATA_CFG_FS_MASK;
data->data_cfg |= i;
- ret = mma8452_change_config(data, MMA8452_DATA_CFG,
- data->data_cfg);
- break;
+ return mma8452_change_config(data, MMA8452_DATA_CFG,
+ data->data_cfg);
+
case IIO_CHAN_INFO_CALIBBIAS:
- if (val < -128 || val > 127) {
- ret = -EINVAL;
- break;
- }
+ if (val < -128 || val > 127)
+ return -EINVAL;
- ret = mma8452_change_config(data,
- MMA8452_OFF_X + chan->scan_index,
- val);
- break;
+ return mma8452_change_config(data,
+ MMA8452_OFF_X + chan->scan_index,
+ val);
case IIO_CHAN_INFO_HIGH_PASS_FILTER_3DB_FREQUENCY:
if (val == 0 && val2 == 0) {
@@ -764,29 +753,38 @@ static int mma8452_write_raw(struct iio_dev *indio_dev,
data->data_cfg |= MMA8452_DATA_CFG_HPF_MASK;
ret = mma8452_set_hp_filter_frequency(data, val, val2);
if (ret < 0)
- break;
+ return ret;
}
- ret = mma8452_change_config(data, MMA8452_DATA_CFG,
+ return mma8452_change_config(data, MMA8452_DATA_CFG,
data->data_cfg);
- break;
case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
- ret = mma8452_get_odr_index(data);
+ j = mma8452_get_odr_index(data);
for (i = 0; i < ARRAY_SIZE(mma8452_os_ratio); i++) {
- if (mma8452_os_ratio[i][ret] == val) {
- ret = mma8452_set_power_mode(data, i);
- break;
- }
+ if (mma8452_os_ratio[i][j] == val)
+ return mma8452_set_power_mode(data, i);
}
- break;
+
+ return -EINVAL;
+
default:
- ret = -EINVAL;
- break;
+ return -EINVAL;
}
+}
+
+static int mma8452_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ int ret;
+
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
- iio_device_release_direct_mode(indio_dev);
+ ret = __mma8452_write_raw(indio_dev, chan, val, val2, mask);
+ iio_device_release_direct(indio_dev);
return ret;
}
diff --git a/drivers/iio/accel/msa311.c b/drivers/iio/accel/msa311.c
index e7fb860f3233..d31c11fbbe68 100644
--- a/drivers/iio/accel/msa311.c
+++ b/drivers/iio/accel/msa311.c
@@ -332,7 +332,7 @@ static const struct regmap_config msa311_regmap_config = {
.wr_table = &msa311_writeable_table,
.rd_table = &msa311_readable_table,
.volatile_table = &msa311_volatile_table,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
};
#define MSA311_GENMASK(field) ({ \
@@ -594,23 +594,24 @@ static int msa311_read_raw_data(struct iio_dev *indio_dev,
__le16 axis;
int err;
- err = pm_runtime_resume_and_get(dev);
- if (err)
- return err;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
- err = iio_device_claim_direct_mode(indio_dev);
- if (err)
+ err = pm_runtime_resume_and_get(dev);
+ if (err) {
+ iio_device_release_direct(indio_dev);
return err;
+ }
mutex_lock(&msa311->lock);
err = msa311_get_axis(msa311, chan, &axis);
mutex_unlock(&msa311->lock);
- iio_device_release_direct_mode(indio_dev);
-
pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
+ iio_device_release_direct(indio_dev);
+
if (err) {
dev_err(dev, "can't get axis %s (%pe)\n",
chan->datasheet_name, ERR_PTR(err));
@@ -756,18 +757,19 @@ static int msa311_write_samp_freq(struct iio_dev *indio_dev, int val, int val2)
unsigned int odr;
int err;
- err = pm_runtime_resume_and_get(dev);
- if (err)
- return err;
-
/*
* Sampling frequency changing is prohibited when buffer mode is
* enabled, because sometimes MSA311 chip returns outliers during
* frequency values growing up in the read operation moment.
*/
- err = iio_device_claim_direct_mode(indio_dev);
- if (err)
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+
+ err = pm_runtime_resume_and_get(dev);
+ if (err) {
+ iio_device_release_direct(indio_dev);
return err;
+ }
err = -EINVAL;
for (odr = 0; odr < ARRAY_SIZE(msa311_odr_table); odr++)
@@ -779,11 +781,11 @@ static int msa311_write_samp_freq(struct iio_dev *indio_dev, int val, int val2)
break;
}
- iio_device_release_direct_mode(indio_dev);
-
pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
+ iio_device_release_direct(indio_dev);
+
if (err)
dev_err(dev, "can't update frequency (%pe)\n", ERR_PTR(err));
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
index 849c90203071..6529df1a498c 100644
--- a/drivers/iio/adc/Kconfig
+++ b/drivers/iio/adc/Kconfig
@@ -33,6 +33,20 @@ config AD4000
To compile this driver as a module, choose M here: the module will be
called ad4000.
+config AD4030
+ tristate "Analog Devices AD4030 ADC Driver"
+ depends on SPI
+ depends on GPIOLIB
+ select REGMAP
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ help
+ Say yes here to build support for Analog Devices AD4030 and AD4630 high speed
+ SPI analog to digital converters (ADC).
+
+ To compile this driver as a module, choose M here: the module will be
+ called ad4030.
+
config AD4130
tristate "Analog Device AD4130 ADC Driver"
depends on SPI
@@ -51,9 +65,11 @@ config AD4130
config AD4695
tristate "Analog Device AD4695 ADC Driver"
depends on SPI
- select REGMAP_SPI
select IIO_BUFFER
+ select IIO_BUFFER_DMAENGINE
select IIO_TRIGGERED_BUFFER
+ select REGMAP
+ select SPI_OFFLOAD
help
Say yes here to build support for Analog Devices AD4695 and similar
analog to digital converters (ADC).
@@ -61,6 +77,20 @@ config AD4695
To compile this driver as a module, choose M here: the module will be
called ad4695.
+config AD4851
+ tristate "Analog Device AD4851 DAS Driver"
+ depends on SPI
+ depends on PWM
+ select REGMAP_SPI
+ select IIO_BACKEND
+ help
+ Say yes here to build support for Analog Devices AD4851, AD4852,
+ AD4853, AD4854, AD4855, AD4856, AD4857, AD4858, AD4858I high speed
+ data acquisition system (DAS).
+
+ To compile this driver as a module, choose M here: the module will be
+ called ad4851.
+
config AD7091R
tristate
@@ -112,6 +142,16 @@ config AD7173
To compile this driver as a module, choose M here: the module will be
called ad7173.
+config AD7191
+ tristate "Analog Devices AD7191 ADC driver"
+ depends on SPI
+ select AD_SIGMA_DELTA
+ help
+ Say yes here to build support for Analog Devices AD7191.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ad7191.
+
config AD7192
tristate "Analog Devices AD7192 and similar ADC driver"
depends on SPI
@@ -188,7 +228,9 @@ config AD7298
config AD7380
tristate "Analog Devices AD7380 ADC driver"
depends on SPI_MASTER
+ select SPI_OFFLOAD
select IIO_BUFFER
+ select IIO_BUFFER_DMAENGINE
select IIO_TRIGGER
select IIO_TRIGGERED_BUFFER
help
@@ -360,7 +402,9 @@ config AD7923
config AD7944
tristate "Analog Devices AD7944 and similar ADCs driver"
depends on SPI
+ select SPI_OFFLOAD
select IIO_BUFFER
+ select IIO_BUFFER_DMAENGINE
select IIO_TRIGGERED_BUFFER
help
Say yes here to build support for Analog Devices
@@ -1467,6 +1511,16 @@ config TI_ADS1119
This driver can also be built as a module. If so, the module will be
called ti-ads1119.
+config TI_ADS7138
+ tristate "Texas Instruments ADS7128 and ADS7138 ADC driver"
+ depends on I2C
+ help
+ If you say yes here you get support for Texas Instruments ADS7128 and
+ ADS7138 8-channel A/D converters with 12-bit resolution.
+
+ This driver can also be built as a module. If so, the module will be
+ called ti-ads7138.
+
config TI_ADS7924
tristate "Texas Instruments ADS7924 ADC"
depends on I2C
diff --git a/drivers/iio/adc/Makefile b/drivers/iio/adc/Makefile
index ee19afba62b7..3e918c3eec69 100644
--- a/drivers/iio/adc/Makefile
+++ b/drivers/iio/adc/Makefile
@@ -7,13 +7,16 @@
obj-$(CONFIG_AB8500_GPADC) += ab8500-gpadc.o
obj-$(CONFIG_AD_SIGMA_DELTA) += ad_sigma_delta.o
obj-$(CONFIG_AD4000) += ad4000.o
+obj-$(CONFIG_AD4030) += ad4030.o
obj-$(CONFIG_AD4130) += ad4130.o
obj-$(CONFIG_AD4695) += ad4695.o
+obj-$(CONFIG_AD4851) += ad4851.o
obj-$(CONFIG_AD7091R) += ad7091r-base.o
obj-$(CONFIG_AD7091R5) += ad7091r5.o
obj-$(CONFIG_AD7091R8) += ad7091r8.o
obj-$(CONFIG_AD7124) += ad7124.o
obj-$(CONFIG_AD7173) += ad7173.o
+obj-$(CONFIG_AD7191) += ad7191.o
obj-$(CONFIG_AD7192) += ad7192.o
obj-$(CONFIG_AD7266) += ad7266.o
obj-$(CONFIG_AD7280) += ad7280a.o
@@ -133,6 +136,7 @@ obj-$(CONFIG_TI_ADS1119) += ti-ads1119.o
obj-$(CONFIG_TI_ADS124S08) += ti-ads124s08.o
obj-$(CONFIG_TI_ADS1298) += ti-ads1298.o
obj-$(CONFIG_TI_ADS131E08) += ti-ads131e08.o
+obj-$(CONFIG_TI_ADS7138) += ti-ads7138.o
obj-$(CONFIG_TI_ADS7924) += ti-ads7924.o
obj-$(CONFIG_TI_ADS7950) += ti-ads7950.o
obj-$(CONFIG_TI_ADS8344) += ti-ads8344.o
diff --git a/drivers/iio/adc/ad4000.c b/drivers/iio/adc/ad4000.c
index 1d556a842a68..4fe8dee48da9 100644
--- a/drivers/iio/adc/ad4000.c
+++ b/drivers/iio/adc/ad4000.c
@@ -535,12 +535,16 @@ static int ad4000_read_raw(struct iio_dev *indio_dev,
int *val2, long info)
{
struct ad4000_state *st = iio_priv(indio_dev);
+ int ret;
switch (info) {
case IIO_CHAN_INFO_RAW:
- iio_device_claim_direct_scoped(return -EBUSY, indio_dev)
- return ad4000_single_conversion(indio_dev, chan, val);
- unreachable();
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+
+ ret = ad4000_single_conversion(indio_dev, chan, val);
+ iio_device_release_direct(indio_dev);
+ return ret;
case IIO_CHAN_INFO_SCALE:
*val = st->scale_tbl[st->span_comp][0];
*val2 = st->scale_tbl[st->span_comp][1];
@@ -585,36 +589,46 @@ static int ad4000_write_raw_get_fmt(struct iio_dev *indio_dev,
}
}
-static int ad4000_write_raw(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan, int val, int val2,
- long mask)
+static int __ad4000_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val2)
{
struct ad4000_state *st = iio_priv(indio_dev);
unsigned int reg_val;
bool span_comp_en;
int ret;
- switch (mask) {
- case IIO_CHAN_INFO_SCALE:
- iio_device_claim_direct_scoped(return -EBUSY, indio_dev) {
- guard(mutex)(&st->lock);
+ guard(mutex)(&st->lock);
+
+ ret = ad4000_read_reg(st, &reg_val);
+ if (ret < 0)
+ return ret;
+
+ span_comp_en = val2 == st->scale_tbl[1][1];
+ reg_val &= ~AD4000_CFG_SPAN_COMP;
+ reg_val |= FIELD_PREP(AD4000_CFG_SPAN_COMP, span_comp_en);
- ret = ad4000_read_reg(st, &reg_val);
- if (ret < 0)
- return ret;
+ ret = ad4000_write_reg(st, reg_val);
+ if (ret < 0)
+ return ret;
- span_comp_en = val2 == st->scale_tbl[1][1];
- reg_val &= ~AD4000_CFG_SPAN_COMP;
- reg_val |= FIELD_PREP(AD4000_CFG_SPAN_COMP, span_comp_en);
+ st->span_comp = span_comp_en;
+ return 0;
+}
- ret = ad4000_write_reg(st, reg_val);
- if (ret < 0)
- return ret;
+static int ad4000_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ int ret;
- st->span_comp = span_comp_en;
- return 0;
- }
- unreachable();
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+ ret = __ad4000_write_raw(indio_dev, chan, val2);
+ iio_device_release_direct(indio_dev);
+ return ret;
default:
return -EINVAL;
}
diff --git a/drivers/iio/adc/ad4030.c b/drivers/iio/adc/ad4030.c
new file mode 100644
index 000000000000..9a020680885d
--- /dev/null
+++ b/drivers/iio/adc/ad4030.c
@@ -0,0 +1,1230 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Analog Devices AD4030 and AD4630 ADC family driver.
+ *
+ * Copyright 2024 Analog Devices, Inc.
+ * Copyright 2024 BayLibre, SAS
+ *
+ * based on code from:
+ * Analog Devices, Inc.
+ * Sergiu Cuciurean <sergiu.cuciurean@analog.com>
+ * Nuno Sa <nuno.sa@analog.com>
+ * Marcelo Schmitt <marcelo.schmitt@analog.com>
+ * Liviu Adace <liviu.adace@analog.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/spi/spi.h>
+#include <linux/unaligned.h>
+#include <linux/units.h>
+
+#define AD4030_REG_INTERFACE_CONFIG_A 0x00
+#define AD4030_REG_INTERFACE_CONFIG_A_SW_RESET (BIT(0) | BIT(7))
+#define AD4030_REG_INTERFACE_CONFIG_B 0x01
+#define AD4030_REG_DEVICE_CONFIG 0x02
+#define AD4030_REG_CHIP_TYPE 0x03
+#define AD4030_REG_PRODUCT_ID_L 0x04
+#define AD4030_REG_PRODUCT_ID_H 0x05
+#define AD4030_REG_CHIP_GRADE 0x06
+#define AD4030_REG_CHIP_GRADE_AD4030_24_GRADE 0x10
+#define AD4030_REG_CHIP_GRADE_AD4630_16_GRADE 0x03
+#define AD4030_REG_CHIP_GRADE_AD4630_24_GRADE 0x00
+#define AD4030_REG_CHIP_GRADE_AD4632_16_GRADE 0x05
+#define AD4030_REG_CHIP_GRADE_AD4632_24_GRADE 0x02
+#define AD4030_REG_CHIP_GRADE_MASK_CHIP_GRADE GENMASK(7, 3)
+#define AD4030_REG_SCRATCH_PAD 0x0A
+#define AD4030_REG_SPI_REVISION 0x0B
+#define AD4030_REG_VENDOR_L 0x0C
+#define AD4030_REG_VENDOR_H 0x0D
+#define AD4030_REG_STREAM_MODE 0x0E
+#define AD4030_REG_INTERFACE_CONFIG_C 0x10
+#define AD4030_REG_INTERFACE_STATUS_A 0x11
+#define AD4030_REG_EXIT_CFG_MODE 0x14
+#define AD4030_REG_EXIT_CFG_MODE_EXIT_MSK BIT(0)
+#define AD4030_REG_AVG 0x15
+#define AD4030_REG_AVG_MASK_AVG_SYNC BIT(7)
+#define AD4030_REG_AVG_MASK_AVG_VAL GENMASK(4, 0)
+#define AD4030_REG_OFFSET_X0_0 0x16
+#define AD4030_REG_OFFSET_X0_1 0x17
+#define AD4030_REG_OFFSET_X0_2 0x18
+#define AD4030_REG_OFFSET_X1_0 0x19
+#define AD4030_REG_OFFSET_X1_1 0x1A
+#define AD4030_REG_OFFSET_X1_2 0x1B
+#define AD4030_REG_OFFSET_BYTES_NB 3
+#define AD4030_REG_OFFSET_CHAN(ch) \
+ (AD4030_REG_OFFSET_X0_2 + (AD4030_REG_OFFSET_BYTES_NB * (ch)))
+#define AD4030_REG_GAIN_X0_LSB 0x1C
+#define AD4030_REG_GAIN_X0_MSB 0x1D
+#define AD4030_REG_GAIN_X1_LSB 0x1E
+#define AD4030_REG_GAIN_X1_MSB 0x1F
+#define AD4030_REG_GAIN_MAX_GAIN 1999970
+#define AD4030_REG_GAIN_BYTES_NB 2
+#define AD4030_REG_GAIN_CHAN(ch) \
+ (AD4030_REG_GAIN_X0_MSB + (AD4030_REG_GAIN_BYTES_NB * (ch)))
+#define AD4030_REG_MODES 0x20
+#define AD4030_REG_MODES_MASK_OUT_DATA_MODE GENMASK(2, 0)
+#define AD4030_REG_MODES_MASK_LANE_MODE GENMASK(7, 6)
+#define AD4030_REG_OSCILATOR 0x21
+#define AD4030_REG_IO 0x22
+#define AD4030_REG_IO_MASK_IO2X BIT(1)
+#define AD4030_REG_PAT0 0x23
+#define AD4030_REG_PAT1 0x24
+#define AD4030_REG_PAT2 0x25
+#define AD4030_REG_PAT3 0x26
+#define AD4030_REG_DIG_DIAG 0x34
+#define AD4030_REG_DIG_ERR 0x35
+
+/* Sequence starting with "1 0 1" to enable reg access */
+#define AD4030_REG_ACCESS 0xA0
+
+#define AD4030_MAX_IIO_SAMPLE_SIZE_BUFFERED BITS_TO_BYTES(64)
+#define AD4030_MAX_HARDWARE_CHANNEL_NB 2
+#define AD4030_MAX_IIO_CHANNEL_NB 5
+#define AD4030_SINGLE_COMMON_BYTE_CHANNELS_MASK 0b10
+#define AD4030_DUAL_COMMON_BYTE_CHANNELS_MASK 0b1100
+#define AD4030_GAIN_MIDLE_POINT 0x8000
+/*
+ * This accounts for 1 sample per channel plus one s64 for the timestamp,
+ * aligned on a s64 boundary
+ */
+#define AD4030_MAXIMUM_RX_BUFFER_SIZE \
+ (ALIGN(AD4030_MAX_IIO_SAMPLE_SIZE_BUFFERED * \
+ AD4030_MAX_HARDWARE_CHANNEL_NB, \
+ sizeof(s64)) + sizeof(s64))
+
+#define AD4030_VREF_MIN_UV (4096 * MILLI)
+#define AD4030_VREF_MAX_UV (5000 * MILLI)
+#define AD4030_VIO_THRESHOLD_UV (1400 * MILLI)
+#define AD4030_SPI_MAX_XFER_LEN 8
+#define AD4030_SPI_MAX_REG_XFER_SPEED (80 * MEGA)
+#define AD4030_TCNVH_NS 10
+#define AD4030_TCNVL_NS 20
+#define AD4030_TCYC_NS 500
+#define AD4030_TCYC_ADJUSTED_NS (AD4030_TCYC_NS - AD4030_TCNVL_NS)
+#define AD4030_TRESET_PW_NS 50
+#define AD4632_TCYC_NS 2000
+#define AD4632_TCYC_ADJUSTED_NS (AD4632_TCYC_NS - AD4030_TCNVL_NS)
+#define AD4030_TRESET_COM_DELAY_MS 750
+
+enum ad4030_out_mode {
+ AD4030_OUT_DATA_MD_DIFF,
+ AD4030_OUT_DATA_MD_16_DIFF_8_COM,
+ AD4030_OUT_DATA_MD_24_DIFF_8_COM,
+ AD4030_OUT_DATA_MD_30_AVERAGED_DIFF,
+ AD4030_OUT_DATA_MD_32_PATTERN,
+};
+
+enum {
+ AD4030_LANE_MD_1_PER_CH,
+ AD4030_LANE_MD_2_PER_CH,
+ AD4030_LANE_MD_4_PER_CH,
+ AD4030_LANE_MD_INTERLEAVED,
+};
+
+enum {
+ AD4030_SCAN_TYPE_NORMAL,
+ AD4030_SCAN_TYPE_AVG,
+};
+
+struct ad4030_chip_info {
+ const char *name;
+ const unsigned long *available_masks;
+ const struct iio_chan_spec channels[AD4030_MAX_IIO_CHANNEL_NB];
+ u8 grade;
+ u8 precision_bits;
+ /* Number of hardware channels */
+ int num_voltage_inputs;
+ unsigned int tcyc_ns;
+};
+
+struct ad4030_state {
+ struct spi_device *spi;
+ struct regmap *regmap;
+ const struct ad4030_chip_info *chip;
+ const struct iio_scan_type *current_scan_type;
+ struct gpio_desc *cnv_gpio;
+ int vref_uv;
+ int vio_uv;
+ int offset_avail[3];
+ unsigned int avg_log2;
+ enum ad4030_out_mode mode;
+
+ /*
+ * DMA (thus cache coherency maintenance) requires the transfer buffers
+ * to live in their own cache lines.
+ */
+ u8 tx_data[AD4030_SPI_MAX_XFER_LEN] __aligned(IIO_DMA_MINALIGN);
+ union {
+ u8 raw[AD4030_MAXIMUM_RX_BUFFER_SIZE];
+ struct {
+ s32 diff;
+ u8 common;
+ } single;
+ struct {
+ s32 diff[2];
+ u8 common[2];
+ } dual;
+ } rx_data;
+};
+
+/*
+ * For a chip with 2 hardware channel this will be used to create 2 common-mode
+ * channels:
+ * - voltage4
+ * - voltage5
+ * As the common-mode channels are after the differential ones, we compute the
+ * channel number like this:
+ * - _idx is the scan_index (the order in the output buffer)
+ * - _ch is the hardware channel number this common-mode channel is related
+ * - _idx - _ch gives us the number of channel in the chip
+ * - _idx - _ch * 2 is the starting number of the common-mode channels, since
+ * for each differential channel there is a common-mode channel
+ * - _idx - _ch * 2 + _ch gives the channel number for this specific common-mode
+ * channel
+ */
+#define AD4030_CHAN_CMO(_idx, _ch) { \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_SCALE), \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .address = (_ch), \
+ .channel = ((_idx) - (_ch)) * 2 + (_ch), \
+ .scan_index = (_idx), \
+ .scan_type = { \
+ .sign = 'u', \
+ .storagebits = 8, \
+ .realbits = 8, \
+ .endianness = IIO_BE, \
+ }, \
+}
+
+/*
+ * For a chip with 2 hardware channel this will be used to create 2 differential
+ * channels:
+ * - voltage0-voltage1
+ * - voltage2-voltage3
+ */
+#define AD4030_CHAN_DIFF(_idx, _scan_type) { \
+ .info_mask_shared_by_all = \
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), \
+ .info_mask_shared_by_all_available = \
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_CALIBSCALE) | \
+ BIT(IIO_CHAN_INFO_CALIBBIAS) | \
+ BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_separate_available = BIT(IIO_CHAN_INFO_CALIBBIAS) | \
+ BIT(IIO_CHAN_INFO_CALIBSCALE), \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .address = (_idx), \
+ .channel = (_idx) * 2, \
+ .channel2 = (_idx) * 2 + 1, \
+ .scan_index = (_idx), \
+ .differential = true, \
+ .has_ext_scan_type = 1, \
+ .ext_scan_type = _scan_type, \
+ .num_ext_scan_type = ARRAY_SIZE(_scan_type), \
+}
+
+static const int ad4030_average_modes[] = {
+ 1, 2, 4, 8, 16, 32, 64, 128,
+ 256, 512, 1024, 2048, 4096, 8192, 16384, 32768,
+ 65536,
+};
+
+static int ad4030_enter_config_mode(struct ad4030_state *st)
+{
+ st->tx_data[0] = AD4030_REG_ACCESS;
+
+ struct spi_transfer xfer = {
+ .tx_buf = st->tx_data,
+ .bits_per_word = 8,
+ .len = 1,
+ .speed_hz = AD4030_SPI_MAX_REG_XFER_SPEED,
+ };
+
+ return spi_sync_transfer(st->spi, &xfer, 1);
+}
+
+static int ad4030_exit_config_mode(struct ad4030_state *st)
+{
+ st->tx_data[0] = 0;
+ st->tx_data[1] = AD4030_REG_EXIT_CFG_MODE;
+ st->tx_data[2] = AD4030_REG_EXIT_CFG_MODE_EXIT_MSK;
+
+ struct spi_transfer xfer = {
+ .tx_buf = st->tx_data,
+ .bits_per_word = 8,
+ .len = 3,
+ .speed_hz = AD4030_SPI_MAX_REG_XFER_SPEED,
+ };
+
+ return spi_sync_transfer(st->spi, &xfer, 1);
+}
+
+static int ad4030_spi_read(void *context, const void *reg, size_t reg_size,
+ void *val, size_t val_size)
+{
+ int ret;
+ struct ad4030_state *st = context;
+ struct spi_transfer xfer = {
+ .tx_buf = st->tx_data,
+ .rx_buf = st->rx_data.raw,
+ .bits_per_word = 8,
+ .len = reg_size + val_size,
+ .speed_hz = AD4030_SPI_MAX_REG_XFER_SPEED,
+ };
+
+ if (xfer.len > sizeof(st->tx_data) ||
+ xfer.len > sizeof(st->rx_data.raw))
+ return -EINVAL;
+
+ ret = ad4030_enter_config_mode(st);
+ if (ret)
+ return ret;
+
+ memset(st->tx_data, 0, sizeof(st->tx_data));
+ memcpy(st->tx_data, reg, reg_size);
+
+ ret = spi_sync_transfer(st->spi, &xfer, 1);
+ if (ret)
+ return ret;
+
+ memcpy(val, &st->rx_data.raw[reg_size], val_size);
+
+ return ad4030_exit_config_mode(st);
+}
+
+static int ad4030_spi_write(void *context, const void *data, size_t count)
+{
+ int ret;
+ struct ad4030_state *st = context;
+ bool is_reset = count >= 3 &&
+ ((u8 *)data)[0] == 0 &&
+ ((u8 *)data)[1] == 0 &&
+ ((u8 *)data)[2] == 0x81;
+ struct spi_transfer xfer = {
+ .tx_buf = st->tx_data,
+ .bits_per_word = 8,
+ .len = count,
+ .speed_hz = AD4030_SPI_MAX_REG_XFER_SPEED,
+ };
+
+ if (count > sizeof(st->tx_data))
+ return -EINVAL;
+
+ ret = ad4030_enter_config_mode(st);
+ if (ret)
+ return ret;
+
+ memcpy(st->tx_data, data, count);
+
+ ret = spi_sync_transfer(st->spi, &xfer, 1);
+ if (ret)
+ return ret;
+
+ /*
+ * From datasheet: "After a [...] reset, no SPI commands or conversions
+ * can be started for 750us"
+ * After a reset we are in conversion mode, no need to exit config mode
+ */
+ if (is_reset) {
+ fsleep(750);
+ return 0;
+ }
+
+ return ad4030_exit_config_mode(st);
+}
+
+static const struct regmap_bus ad4030_regmap_bus = {
+ .read = ad4030_spi_read,
+ .write = ad4030_spi_write,
+ .reg_format_endian_default = REGMAP_ENDIAN_BIG,
+};
+
+static const struct regmap_range ad4030_regmap_rd_range[] = {
+ regmap_reg_range(AD4030_REG_INTERFACE_CONFIG_A, AD4030_REG_CHIP_GRADE),
+ regmap_reg_range(AD4030_REG_SCRATCH_PAD, AD4030_REG_STREAM_MODE),
+ regmap_reg_range(AD4030_REG_INTERFACE_CONFIG_C,
+ AD4030_REG_INTERFACE_STATUS_A),
+ regmap_reg_range(AD4030_REG_EXIT_CFG_MODE, AD4030_REG_PAT3),
+ regmap_reg_range(AD4030_REG_DIG_DIAG, AD4030_REG_DIG_ERR),
+};
+
+static const struct regmap_range ad4030_regmap_wr_range[] = {
+ regmap_reg_range(AD4030_REG_CHIP_TYPE, AD4030_REG_CHIP_GRADE),
+ regmap_reg_range(AD4030_REG_SPI_REVISION, AD4030_REG_VENDOR_H),
+};
+
+static const struct regmap_access_table ad4030_regmap_rd_table = {
+ .yes_ranges = ad4030_regmap_rd_range,
+ .n_yes_ranges = ARRAY_SIZE(ad4030_regmap_rd_range),
+};
+
+static const struct regmap_access_table ad4030_regmap_wr_table = {
+ .no_ranges = ad4030_regmap_wr_range,
+ .n_no_ranges = ARRAY_SIZE(ad4030_regmap_wr_range),
+};
+
+static const struct regmap_config ad4030_regmap_config = {
+ .reg_bits = 16,
+ .val_bits = 8,
+ .read_flag_mask = 0x80,
+ .rd_table = &ad4030_regmap_rd_table,
+ .wr_table = &ad4030_regmap_wr_table,
+ .max_register = AD4030_REG_DIG_ERR,
+};
+
+static int ad4030_get_chan_scale(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val,
+ int *val2)
+{
+ struct ad4030_state *st = iio_priv(indio_dev);
+ const struct iio_scan_type *scan_type;
+
+ if (chan->differential) {
+ scan_type = iio_get_current_scan_type(indio_dev,
+ st->chip->channels);
+ *val = (st->vref_uv * 2) / MILLI;
+ *val2 = scan_type->realbits;
+ return IIO_VAL_FRACTIONAL_LOG2;
+ }
+
+ *val = st->vref_uv / MILLI;
+ *val2 = chan->scan_type.realbits;
+ return IIO_VAL_FRACTIONAL_LOG2;
+}
+
+static int ad4030_get_chan_calibscale(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val,
+ int *val2)
+{
+ struct ad4030_state *st = iio_priv(indio_dev);
+ u16 gain;
+ int ret;
+
+ ret = regmap_bulk_read(st->regmap, AD4030_REG_GAIN_CHAN(chan->address),
+ st->rx_data.raw, AD4030_REG_GAIN_BYTES_NB);
+ if (ret)
+ return ret;
+
+ gain = get_unaligned_be16(st->rx_data.raw);
+
+ /* From datasheet: multiplied output = input × gain word/0x8000 */
+ *val = gain / AD4030_GAIN_MIDLE_POINT;
+ *val2 = mul_u64_u32_div(gain % AD4030_GAIN_MIDLE_POINT, NANO,
+ AD4030_GAIN_MIDLE_POINT);
+
+ return IIO_VAL_INT_PLUS_NANO;
+}
+
+/* Returns the offset where 1 LSB = (VREF/2^precision_bits - 1)/gain */
+static int ad4030_get_chan_calibbias(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val)
+{
+ struct ad4030_state *st = iio_priv(indio_dev);
+ int ret;
+
+ ret = regmap_bulk_read(st->regmap,
+ AD4030_REG_OFFSET_CHAN(chan->address),
+ st->rx_data.raw, AD4030_REG_OFFSET_BYTES_NB);
+ if (ret)
+ return ret;
+
+ switch (st->chip->precision_bits) {
+ case 16:
+ *val = sign_extend32(get_unaligned_be16(st->rx_data.raw), 15);
+ return IIO_VAL_INT;
+
+ case 24:
+ *val = sign_extend32(get_unaligned_be24(st->rx_data.raw), 23);
+ return IIO_VAL_INT;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ad4030_set_chan_calibscale(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int gain_int,
+ int gain_frac)
+{
+ struct ad4030_state *st = iio_priv(indio_dev);
+ u64 gain;
+
+ if (gain_int < 0 || gain_frac < 0)
+ return -EINVAL;
+
+ gain = mul_u32_u32(gain_int, MICRO) + gain_frac;
+
+ if (gain > AD4030_REG_GAIN_MAX_GAIN)
+ return -EINVAL;
+
+ put_unaligned_be16(DIV_ROUND_CLOSEST_ULL(gain * AD4030_GAIN_MIDLE_POINT,
+ MICRO),
+ st->tx_data);
+
+ return regmap_bulk_write(st->regmap,
+ AD4030_REG_GAIN_CHAN(chan->address),
+ st->tx_data, AD4030_REG_GAIN_BYTES_NB);
+}
+
+static int ad4030_set_chan_calibbias(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int offset)
+{
+ struct ad4030_state *st = iio_priv(indio_dev);
+
+ if (offset < st->offset_avail[0] || offset > st->offset_avail[2])
+ return -EINVAL;
+
+ st->tx_data[2] = 0;
+
+ switch (st->chip->precision_bits) {
+ case 16:
+ put_unaligned_be16(offset, st->tx_data);
+ break;
+
+ case 24:
+ put_unaligned_be24(offset, st->tx_data);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return regmap_bulk_write(st->regmap,
+ AD4030_REG_OFFSET_CHAN(chan->address),
+ st->tx_data, AD4030_REG_OFFSET_BYTES_NB);
+}
+
+static int ad4030_set_avg_frame_len(struct iio_dev *dev, int avg_val)
+{
+ struct ad4030_state *st = iio_priv(dev);
+ unsigned int avg_log2 = ilog2(avg_val);
+ unsigned int last_avg_idx = ARRAY_SIZE(ad4030_average_modes) - 1;
+ int ret;
+
+ if (avg_val < 0 || avg_val > ad4030_average_modes[last_avg_idx])
+ return -EINVAL;
+
+ ret = regmap_write(st->regmap, AD4030_REG_AVG,
+ AD4030_REG_AVG_MASK_AVG_SYNC |
+ FIELD_PREP(AD4030_REG_AVG_MASK_AVG_VAL, avg_log2));
+ if (ret)
+ return ret;
+
+ st->avg_log2 = avg_log2;
+
+ return 0;
+}
+
+static bool ad4030_is_common_byte_asked(struct ad4030_state *st,
+ unsigned int mask)
+{
+ return mask & (st->chip->num_voltage_inputs == 1 ?
+ AD4030_SINGLE_COMMON_BYTE_CHANNELS_MASK :
+ AD4030_DUAL_COMMON_BYTE_CHANNELS_MASK);
+}
+
+static int ad4030_set_mode(struct iio_dev *indio_dev, unsigned long mask)
+{
+ struct ad4030_state *st = iio_priv(indio_dev);
+
+ if (st->avg_log2 > 0) {
+ st->mode = AD4030_OUT_DATA_MD_30_AVERAGED_DIFF;
+ } else if (ad4030_is_common_byte_asked(st, mask)) {
+ switch (st->chip->precision_bits) {
+ case 16:
+ st->mode = AD4030_OUT_DATA_MD_16_DIFF_8_COM;
+ break;
+
+ case 24:
+ st->mode = AD4030_OUT_DATA_MD_24_DIFF_8_COM;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+ } else {
+ st->mode = AD4030_OUT_DATA_MD_DIFF;
+ }
+
+ st->current_scan_type = iio_get_current_scan_type(indio_dev,
+ st->chip->channels);
+ if (IS_ERR(st->current_scan_type))
+ return PTR_ERR(st->current_scan_type);
+
+ return regmap_update_bits(st->regmap, AD4030_REG_MODES,
+ AD4030_REG_MODES_MASK_OUT_DATA_MODE,
+ st->mode);
+}
+
+/*
+ * Descramble 2 32bits numbers out of a 64bits. The bits are interleaved:
+ * 1 bit for first number, 1 bit for the second, and so on...
+ */
+static void ad4030_extract_interleaved(u8 *src, u32 *ch0, u32 *ch1)
+{
+ u8 h0, h1, l0, l1;
+ u32 out0, out1;
+ u8 *out0_raw = (u8 *)&out0;
+ u8 *out1_raw = (u8 *)&out1;
+
+ for (int i = 0; i < 4; i++) {
+ h0 = src[i * 2];
+ l1 = src[i * 2 + 1];
+ h1 = h0 << 1;
+ l0 = l1 >> 1;
+
+ h0 &= 0xAA;
+ l0 &= 0x55;
+ h1 &= 0xAA;
+ l1 &= 0x55;
+
+ h0 = (h0 | h0 << 001) & 0xCC;
+ h1 = (h1 | h1 << 001) & 0xCC;
+ l0 = (l0 | l0 >> 001) & 0x33;
+ l1 = (l1 | l1 >> 001) & 0x33;
+ h0 = (h0 | h0 << 002) & 0xF0;
+ h1 = (h1 | h1 << 002) & 0xF0;
+ l0 = (l0 | l0 >> 002) & 0x0F;
+ l1 = (l1 | l1 >> 002) & 0x0F;
+
+ out0_raw[i] = h0 | l0;
+ out1_raw[i] = h1 | l1;
+ }
+
+ *ch0 = out0;
+ *ch1 = out1;
+}
+
+static int ad4030_conversion(struct iio_dev *indio_dev)
+{
+ struct ad4030_state *st = iio_priv(indio_dev);
+ unsigned char diff_realbytes =
+ BITS_TO_BYTES(st->current_scan_type->realbits);
+ unsigned char diff_storagebytes =
+ BITS_TO_BYTES(st->current_scan_type->storagebits);
+ unsigned int bytes_to_read;
+ unsigned long cnv_nb = BIT(st->avg_log2);
+ unsigned int i;
+ int ret;
+
+ /* Number of bytes for one differential channel */
+ bytes_to_read = diff_realbytes;
+ /* Add one byte if we are using a differential + common byte mode */
+ bytes_to_read += (st->mode == AD4030_OUT_DATA_MD_24_DIFF_8_COM ||
+ st->mode == AD4030_OUT_DATA_MD_16_DIFF_8_COM) ? 1 : 0;
+ /* Mulitiply by the number of hardware channels */
+ bytes_to_read *= st->chip->num_voltage_inputs;
+
+ for (i = 0; i < cnv_nb; i++) {
+ gpiod_set_value_cansleep(st->cnv_gpio, 1);
+ ndelay(AD4030_TCNVH_NS);
+ gpiod_set_value_cansleep(st->cnv_gpio, 0);
+ ndelay(st->chip->tcyc_ns);
+ }
+
+ ret = spi_read(st->spi, st->rx_data.raw, bytes_to_read);
+ if (ret)
+ return ret;
+
+ if (st->chip->num_voltage_inputs == 2)
+ ad4030_extract_interleaved(st->rx_data.raw,
+ &st->rx_data.dual.diff[0],
+ &st->rx_data.dual.diff[1]);
+
+ if (st->mode != AD4030_OUT_DATA_MD_16_DIFF_8_COM &&
+ st->mode != AD4030_OUT_DATA_MD_24_DIFF_8_COM)
+ return 0;
+
+ if (st->chip->num_voltage_inputs == 1) {
+ st->rx_data.single.common = st->rx_data.raw[diff_realbytes];
+ return 0;
+ }
+
+ for (i = 0; i < st->chip->num_voltage_inputs; i++)
+ st->rx_data.dual.common[i] =
+ st->rx_data.raw[diff_storagebytes * i + diff_realbytes];
+
+ return 0;
+}
+
+static int ad4030_single_conversion(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, int *val)
+{
+ struct ad4030_state *st = iio_priv(indio_dev);
+ int ret;
+
+ ret = ad4030_set_mode(indio_dev, BIT(chan->scan_index));
+ if (ret)
+ return ret;
+
+ st->current_scan_type = iio_get_current_scan_type(indio_dev,
+ st->chip->channels);
+ if (IS_ERR(st->current_scan_type))
+ return PTR_ERR(st->current_scan_type);
+
+ ret = ad4030_conversion(indio_dev);
+ if (ret)
+ return ret;
+
+ if (chan->differential)
+ if (st->chip->num_voltage_inputs == 1)
+ *val = st->rx_data.single.diff;
+ else
+ *val = st->rx_data.dual.diff[chan->address];
+ else
+ if (st->chip->num_voltage_inputs == 1)
+ *val = st->rx_data.single.common;
+ else
+ *val = st->rx_data.dual.common[chan->address];
+
+ return IIO_VAL_INT;
+}
+
+static irqreturn_t ad4030_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct ad4030_state *st = iio_priv(indio_dev);
+ int ret;
+
+ ret = ad4030_conversion(indio_dev);
+ if (ret)
+ goto out;
+
+ iio_push_to_buffers_with_timestamp(indio_dev, st->rx_data.raw,
+ pf->timestamp);
+
+out:
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
+static const int ad4030_gain_avail[3][2] = {
+ { 0, 0 },
+ { 0, 30518 },
+ { 1, 999969482 },
+};
+
+static int ad4030_read_avail(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *channel,
+ const int **vals, int *type,
+ int *length, long mask)
+{
+ struct ad4030_state *st = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_CALIBBIAS:
+ *vals = st->offset_avail;
+ *type = IIO_VAL_INT;
+ return IIO_AVAIL_RANGE;
+
+ case IIO_CHAN_INFO_CALIBSCALE:
+ *vals = (void *)ad4030_gain_avail;
+ *type = IIO_VAL_INT_PLUS_NANO;
+ return IIO_AVAIL_RANGE;
+
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+ *vals = ad4030_average_modes;
+ *type = IIO_VAL_INT;
+ *length = ARRAY_SIZE(ad4030_average_modes);
+ return IIO_AVAIL_LIST;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ad4030_read_raw_dispatch(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val,
+ int *val2, long info)
+{
+ struct ad4030_state *st = iio_priv(indio_dev);
+
+ switch (info) {
+ case IIO_CHAN_INFO_RAW:
+ return ad4030_single_conversion(indio_dev, chan, val);
+
+ case IIO_CHAN_INFO_CALIBSCALE:
+ return ad4030_get_chan_calibscale(indio_dev, chan, val, val2);
+
+ case IIO_CHAN_INFO_CALIBBIAS:
+ return ad4030_get_chan_calibbias(indio_dev, chan, val);
+
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+ *val = BIT(st->avg_log2);
+ return IIO_VAL_INT;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ad4030_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val,
+ int *val2, long info)
+{
+ int ret;
+
+ if (info == IIO_CHAN_INFO_SCALE)
+ return ad4030_get_chan_scale(indio_dev, chan, val, val2);
+
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+
+ ret = ad4030_read_raw_dispatch(indio_dev, chan, val, val2, info);
+
+ iio_device_release_direct(indio_dev);
+
+ return ret;
+}
+
+static int ad4030_write_raw_dispatch(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int val,
+ int val2, long info)
+{
+ switch (info) {
+ case IIO_CHAN_INFO_CALIBSCALE:
+ return ad4030_set_chan_calibscale(indio_dev, chan, val, val2);
+
+ case IIO_CHAN_INFO_CALIBBIAS:
+ if (val2 != 0)
+ return -EINVAL;
+ return ad4030_set_chan_calibbias(indio_dev, chan, val);
+
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+ return ad4030_set_avg_frame_len(indio_dev, val);
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ad4030_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int val,
+ int val2, long info)
+{
+ int ret;
+
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+
+ ret = ad4030_write_raw_dispatch(indio_dev, chan, val, val2, info);
+
+ iio_device_release_direct(indio_dev);
+
+ return ret;
+}
+
+static int ad4030_reg_access(struct iio_dev *indio_dev, unsigned int reg,
+ unsigned int writeval, unsigned int *readval)
+{
+ const struct ad4030_state *st = iio_priv(indio_dev);
+ int ret;
+
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+
+ if (readval)
+ ret = regmap_read(st->regmap, reg, readval);
+ else
+ ret = regmap_write(st->regmap, reg, writeval);
+
+ iio_device_release_direct(indio_dev);
+
+ return ret;
+}
+
+static int ad4030_read_label(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ char *label)
+{
+ if (chan->differential)
+ return sprintf(label, "differential%lu\n", chan->address);
+ return sprintf(label, "common-mode%lu\n", chan->address);
+}
+
+static int ad4030_get_current_scan_type(const struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan)
+{
+ struct ad4030_state *st = iio_priv(indio_dev);
+
+ return st->avg_log2 ? AD4030_SCAN_TYPE_AVG : AD4030_SCAN_TYPE_NORMAL;
+}
+
+static const struct iio_info ad4030_iio_info = {
+ .read_avail = ad4030_read_avail,
+ .read_raw = ad4030_read_raw,
+ .write_raw = ad4030_write_raw,
+ .debugfs_reg_access = ad4030_reg_access,
+ .read_label = ad4030_read_label,
+ .get_current_scan_type = ad4030_get_current_scan_type,
+};
+
+static int ad4030_buffer_preenable(struct iio_dev *indio_dev)
+{
+ return ad4030_set_mode(indio_dev, *indio_dev->active_scan_mask);
+}
+
+static bool ad4030_validate_scan_mask(struct iio_dev *indio_dev,
+ const unsigned long *scan_mask)
+{
+ struct ad4030_state *st = iio_priv(indio_dev);
+
+ /* Asking for both common channels and averaging */
+ if (st->avg_log2 && ad4030_is_common_byte_asked(st, *scan_mask))
+ return false;
+
+ return true;
+}
+
+static const struct iio_buffer_setup_ops ad4030_buffer_setup_ops = {
+ .preenable = ad4030_buffer_preenable,
+ .validate_scan_mask = ad4030_validate_scan_mask,
+};
+
+static int ad4030_regulators_get(struct ad4030_state *st)
+{
+ struct device *dev = &st->spi->dev;
+ static const char * const ids[] = { "vdd-5v", "vdd-1v8" };
+ int ret;
+
+ ret = devm_regulator_bulk_get_enable(dev, ARRAY_SIZE(ids), ids);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to enable regulators\n");
+
+ st->vio_uv = devm_regulator_get_enable_read_voltage(dev, "vio");
+ if (st->vio_uv < 0)
+ return dev_err_probe(dev, st->vio_uv,
+ "Failed to enable and read vio voltage\n");
+
+ st->vref_uv = devm_regulator_get_enable_read_voltage(dev, "ref");
+ if (st->vref_uv < 0) {
+ if (st->vref_uv != -ENODEV)
+ return dev_err_probe(dev, st->vref_uv,
+ "Failed to read ref voltage\n");
+
+ /* if not using optional REF, the REFIN must be used */
+ st->vref_uv = devm_regulator_get_enable_read_voltage(dev,
+ "refin");
+ if (st->vref_uv < 0)
+ return dev_err_probe(dev, st->vref_uv,
+ "Failed to read refin voltage\n");
+ }
+
+ return 0;
+}
+
+static int ad4030_reset(struct ad4030_state *st)
+{
+ struct device *dev = &st->spi->dev;
+ struct gpio_desc *reset;
+
+ reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(reset))
+ return dev_err_probe(dev, PTR_ERR(reset),
+ "Failed to get reset GPIO\n");
+
+ if (reset) {
+ ndelay(50);
+ gpiod_set_value_cansleep(reset, 0);
+ return 0;
+ }
+
+ return regmap_write(st->regmap, AD4030_REG_INTERFACE_CONFIG_A,
+ AD4030_REG_INTERFACE_CONFIG_A_SW_RESET);
+}
+
+static int ad4030_detect_chip_info(const struct ad4030_state *st)
+{
+ unsigned int grade;
+ int ret;
+
+ ret = regmap_read(st->regmap, AD4030_REG_CHIP_GRADE, &grade);
+ if (ret)
+ return ret;
+
+ grade = FIELD_GET(AD4030_REG_CHIP_GRADE_MASK_CHIP_GRADE, grade);
+ if (grade != st->chip->grade)
+ dev_warn(&st->spi->dev, "Unknown grade(0x%x) for %s\n", grade,
+ st->chip->name);
+
+ return 0;
+}
+
+static int ad4030_config(struct ad4030_state *st)
+{
+ int ret;
+ u8 reg_modes;
+
+ st->offset_avail[0] = (int)BIT(st->chip->precision_bits - 1) * -1;
+ st->offset_avail[1] = 1;
+ st->offset_avail[2] = BIT(st->chip->precision_bits - 1) - 1;
+
+ if (st->chip->num_voltage_inputs > 1)
+ reg_modes = FIELD_PREP(AD4030_REG_MODES_MASK_LANE_MODE,
+ AD4030_LANE_MD_INTERLEAVED);
+ else
+ reg_modes = FIELD_PREP(AD4030_REG_MODES_MASK_LANE_MODE,
+ AD4030_LANE_MD_1_PER_CH);
+
+ ret = regmap_write(st->regmap, AD4030_REG_MODES, reg_modes);
+ if (ret)
+ return ret;
+
+ if (st->vio_uv < AD4030_VIO_THRESHOLD_UV)
+ return regmap_write(st->regmap, AD4030_REG_IO,
+ AD4030_REG_IO_MASK_IO2X);
+
+ return 0;
+}
+
+static int ad4030_probe(struct spi_device *spi)
+{
+ struct device *dev = &spi->dev;
+ struct iio_dev *indio_dev;
+ struct ad4030_state *st;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*st));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ st = iio_priv(indio_dev);
+ st->spi = spi;
+
+ st->regmap = devm_regmap_init(dev, &ad4030_regmap_bus, st,
+ &ad4030_regmap_config);
+ if (IS_ERR(st->regmap))
+ return dev_err_probe(dev, PTR_ERR(st->regmap),
+ "Failed to initialize regmap\n");
+
+ st->chip = spi_get_device_match_data(spi);
+ if (!st->chip)
+ return -EINVAL;
+
+ ret = ad4030_regulators_get(st);
+ if (ret)
+ return ret;
+
+ /*
+ * From datasheet: "Perform a reset no sooner than 3ms after the power
+ * supplies are valid and stable"
+ */
+ fsleep(3000);
+
+ ret = ad4030_reset(st);
+ if (ret)
+ return ret;
+
+ ret = ad4030_detect_chip_info(st);
+ if (ret)
+ return ret;
+
+ ret = ad4030_config(st);
+ if (ret)
+ return ret;
+
+ st->cnv_gpio = devm_gpiod_get(dev, "cnv", GPIOD_OUT_LOW);
+ if (IS_ERR(st->cnv_gpio))
+ return dev_err_probe(dev, PTR_ERR(st->cnv_gpio),
+ "Failed to get cnv gpio\n");
+
+ /*
+ * One hardware channel is split in two software channels when using
+ * common byte mode. Add one more channel for the timestamp.
+ */
+ indio_dev->num_channels = 2 * st->chip->num_voltage_inputs + 1;
+ indio_dev->name = st->chip->name;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->info = &ad4030_iio_info;
+ indio_dev->channels = st->chip->channels;
+ indio_dev->available_scan_masks = st->chip->available_masks;
+
+ ret = devm_iio_triggered_buffer_setup(dev, indio_dev,
+ iio_pollfunc_store_time,
+ ad4030_trigger_handler,
+ &ad4030_buffer_setup_ops);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Failed to setup triggered buffer\n");
+
+ return devm_iio_device_register(dev, indio_dev);
+}
+
+static const unsigned long ad4030_channel_masks[] = {
+ /* Differential only */
+ BIT(0),
+ /* Differential and common-mode voltage */
+ GENMASK(1, 0),
+ 0,
+};
+
+static const unsigned long ad4630_channel_masks[] = {
+ /* Differential only */
+ BIT(1) | BIT(0),
+ /* Differential with common byte */
+ GENMASK(3, 0),
+ 0,
+};
+
+static const struct iio_scan_type ad4030_24_scan_types[] = {
+ [AD4030_SCAN_TYPE_NORMAL] = {
+ .sign = 's',
+ .storagebits = 32,
+ .realbits = 24,
+ .shift = 8,
+ .endianness = IIO_BE,
+ },
+ [AD4030_SCAN_TYPE_AVG] = {
+ .sign = 's',
+ .storagebits = 32,
+ .realbits = 30,
+ .shift = 2,
+ .endianness = IIO_BE,
+ },
+};
+
+static const struct iio_scan_type ad4030_16_scan_types[] = {
+ [AD4030_SCAN_TYPE_NORMAL] = {
+ .sign = 's',
+ .storagebits = 32,
+ .realbits = 16,
+ .shift = 16,
+ .endianness = IIO_BE,
+ },
+ [AD4030_SCAN_TYPE_AVG] = {
+ .sign = 's',
+ .storagebits = 32,
+ .realbits = 30,
+ .shift = 2,
+ .endianness = IIO_BE,
+ }
+};
+
+static const struct ad4030_chip_info ad4030_24_chip_info = {
+ .name = "ad4030-24",
+ .available_masks = ad4030_channel_masks,
+ .channels = {
+ AD4030_CHAN_DIFF(0, ad4030_24_scan_types),
+ AD4030_CHAN_CMO(1, 0),
+ IIO_CHAN_SOFT_TIMESTAMP(2),
+ },
+ .grade = AD4030_REG_CHIP_GRADE_AD4030_24_GRADE,
+ .precision_bits = 24,
+ .num_voltage_inputs = 1,
+ .tcyc_ns = AD4030_TCYC_ADJUSTED_NS,
+};
+
+static const struct ad4030_chip_info ad4630_16_chip_info = {
+ .name = "ad4630-16",
+ .available_masks = ad4630_channel_masks,
+ .channels = {
+ AD4030_CHAN_DIFF(0, ad4030_16_scan_types),
+ AD4030_CHAN_DIFF(1, ad4030_16_scan_types),
+ AD4030_CHAN_CMO(2, 0),
+ AD4030_CHAN_CMO(3, 1),
+ IIO_CHAN_SOFT_TIMESTAMP(4),
+ },
+ .grade = AD4030_REG_CHIP_GRADE_AD4630_16_GRADE,
+ .precision_bits = 16,
+ .num_voltage_inputs = 2,
+ .tcyc_ns = AD4030_TCYC_ADJUSTED_NS,
+};
+
+static const struct ad4030_chip_info ad4630_24_chip_info = {
+ .name = "ad4630-24",
+ .available_masks = ad4630_channel_masks,
+ .channels = {
+ AD4030_CHAN_DIFF(0, ad4030_24_scan_types),
+ AD4030_CHAN_DIFF(1, ad4030_24_scan_types),
+ AD4030_CHAN_CMO(2, 0),
+ AD4030_CHAN_CMO(3, 1),
+ IIO_CHAN_SOFT_TIMESTAMP(4),
+ },
+ .grade = AD4030_REG_CHIP_GRADE_AD4630_24_GRADE,
+ .precision_bits = 24,
+ .num_voltage_inputs = 2,
+ .tcyc_ns = AD4030_TCYC_ADJUSTED_NS,
+};
+
+static const struct ad4030_chip_info ad4632_16_chip_info = {
+ .name = "ad4632-16",
+ .available_masks = ad4630_channel_masks,
+ .channels = {
+ AD4030_CHAN_DIFF(0, ad4030_16_scan_types),
+ AD4030_CHAN_DIFF(1, ad4030_16_scan_types),
+ AD4030_CHAN_CMO(2, 0),
+ AD4030_CHAN_CMO(3, 1),
+ IIO_CHAN_SOFT_TIMESTAMP(4),
+ },
+ .grade = AD4030_REG_CHIP_GRADE_AD4632_16_GRADE,
+ .precision_bits = 16,
+ .num_voltage_inputs = 2,
+ .tcyc_ns = AD4632_TCYC_ADJUSTED_NS,
+};
+
+static const struct ad4030_chip_info ad4632_24_chip_info = {
+ .name = "ad4632-24",
+ .available_masks = ad4630_channel_masks,
+ .channels = {
+ AD4030_CHAN_DIFF(0, ad4030_24_scan_types),
+ AD4030_CHAN_DIFF(1, ad4030_24_scan_types),
+ AD4030_CHAN_CMO(2, 0),
+ AD4030_CHAN_CMO(3, 1),
+ IIO_CHAN_SOFT_TIMESTAMP(4),
+ },
+ .grade = AD4030_REG_CHIP_GRADE_AD4632_24_GRADE,
+ .precision_bits = 24,
+ .num_voltage_inputs = 2,
+ .tcyc_ns = AD4632_TCYC_ADJUSTED_NS,
+};
+
+static const struct spi_device_id ad4030_id_table[] = {
+ { "ad4030-24", (kernel_ulong_t)&ad4030_24_chip_info },
+ { "ad4630-16", (kernel_ulong_t)&ad4630_16_chip_info },
+ { "ad4630-24", (kernel_ulong_t)&ad4630_24_chip_info },
+ { "ad4632-16", (kernel_ulong_t)&ad4632_16_chip_info },
+ { "ad4632-24", (kernel_ulong_t)&ad4632_24_chip_info },
+ { }
+};
+MODULE_DEVICE_TABLE(spi, ad4030_id_table);
+
+static const struct of_device_id ad4030_of_match[] = {
+ { .compatible = "adi,ad4030-24", .data = &ad4030_24_chip_info },
+ { .compatible = "adi,ad4630-16", .data = &ad4630_16_chip_info },
+ { .compatible = "adi,ad4630-24", .data = &ad4630_24_chip_info },
+ { .compatible = "adi,ad4632-16", .data = &ad4632_16_chip_info },
+ { .compatible = "adi,ad4632-24", .data = &ad4632_24_chip_info },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ad4030_of_match);
+
+static struct spi_driver ad4030_driver = {
+ .driver = {
+ .name = "ad4030",
+ .of_match_table = ad4030_of_match,
+ },
+ .probe = ad4030_probe,
+ .id_table = ad4030_id_table,
+};
+module_spi_driver(ad4030_driver);
+
+MODULE_AUTHOR("Esteban Blanc <eblanc@baylibre.com>");
+MODULE_DESCRIPTION("Analog Devices AD4630 ADC family driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/adc/ad4130.c b/drivers/iio/adc/ad4130.c
index de32cc9d18c5..0f4c9cd6c102 100644
--- a/drivers/iio/adc/ad4130.c
+++ b/drivers/iio/adc/ad4130.c
@@ -6,6 +6,7 @@
#include <linux/bitfield.h>
#include <linux/bitops.h>
+#include <linux/cleanup.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/delay.h>
@@ -203,7 +204,7 @@ enum ad4130_mode {
AD4130_MODE_IDLE = 0b0100,
};
-enum ad4130_filter_mode {
+enum ad4130_filter_type {
AD4130_FILTER_SINC4,
AD4130_FILTER_SINC4_SINC1,
AD4130_FILTER_SINC3,
@@ -223,6 +224,10 @@ enum ad4130_pin_function {
AD4130_PIN_FN_VBIAS = BIT(3),
};
+/*
+ * If you make adaptations in this struct, you most likely also have to adapt
+ * ad4130_setup_info_eq(), too.
+ */
struct ad4130_setup_info {
unsigned int iout0_val;
unsigned int iout1_val;
@@ -230,7 +235,7 @@ struct ad4130_setup_info {
unsigned int pga;
unsigned int fs;
u32 ref_sel;
- enum ad4130_filter_mode filter_mode;
+ enum ad4130_filter_type filter_type;
bool ref_bufp;
bool ref_bufm;
};
@@ -251,7 +256,7 @@ struct ad4130_chan_info {
};
struct ad4130_filter_config {
- enum ad4130_filter_mode filter_mode;
+ enum ad4130_filter_type filter_type;
unsigned int odr_div;
unsigned int fs_max;
enum iio_available_type samp_freq_avail_type;
@@ -337,9 +342,9 @@ static const unsigned int ad4130_burnout_current_na_tbl[AD4130_BURNOUT_MAX] = {
[AD4130_BURNOUT_4000NA] = 4000,
};
-#define AD4130_VARIABLE_ODR_CONFIG(_filter_mode, _odr_div, _fs_max) \
+#define AD4130_VARIABLE_ODR_CONFIG(_filter_type, _odr_div, _fs_max) \
{ \
- .filter_mode = (_filter_mode), \
+ .filter_type = (_filter_type), \
.odr_div = (_odr_div), \
.fs_max = (_fs_max), \
.samp_freq_avail_type = IIO_AVAIL_RANGE, \
@@ -350,9 +355,9 @@ static const unsigned int ad4130_burnout_current_na_tbl[AD4130_BURNOUT_MAX] = {
}, \
}
-#define AD4130_FIXED_ODR_CONFIG(_filter_mode, _odr_div) \
+#define AD4130_FIXED_ODR_CONFIG(_filter_type, _odr_div) \
{ \
- .filter_mode = (_filter_mode), \
+ .filter_type = (_filter_type), \
.odr_div = (_odr_div), \
.fs_max = AD4130_FILTER_SELECT_MIN, \
.samp_freq_avail_type = IIO_AVAIL_LIST, \
@@ -374,7 +379,7 @@ static const struct ad4130_filter_config ad4130_filter_configs[] = {
AD4130_FIXED_ODR_CONFIG(AD4130_FILTER_SINC3_PF4, 148),
};
-static const char * const ad4130_filter_modes_str[] = {
+static const char * const ad4130_filter_types_str[] = {
[AD4130_FILTER_SINC4] = "sinc4",
[AD4130_FILTER_SINC4_SINC1] = "sinc4+sinc1",
[AD4130_FILTER_SINC3] = "sinc3",
@@ -591,6 +596,40 @@ static irqreturn_t ad4130_irq_handler(int irq, void *private)
return IRQ_HANDLED;
}
+static bool ad4130_setup_info_eq(struct ad4130_setup_info *a,
+ struct ad4130_setup_info *b)
+{
+ /*
+ * This is just to make sure that the comparison is adapted after
+ * struct ad4130_setup_info was changed.
+ */
+ static_assert(sizeof(*a) ==
+ sizeof(struct {
+ unsigned int iout0_val;
+ unsigned int iout1_val;
+ unsigned int burnout;
+ unsigned int pga;
+ unsigned int fs;
+ u32 ref_sel;
+ enum ad4130_filter_type filter_type;
+ bool ref_bufp;
+ bool ref_bufm;
+ }));
+
+ if (a->iout0_val != b->iout0_val ||
+ a->iout1_val != b->iout1_val ||
+ a->burnout != b->burnout ||
+ a->pga != b->pga ||
+ a->fs != b->fs ||
+ a->ref_sel != b->ref_sel ||
+ a->filter_type != b->filter_type ||
+ a->ref_bufp != b->ref_bufp ||
+ a->ref_bufm != b->ref_bufm)
+ return false;
+
+ return true;
+}
+
static int ad4130_find_slot(struct ad4130_state *st,
struct ad4130_setup_info *target_setup_info,
unsigned int *slot, bool *overwrite)
@@ -604,8 +643,7 @@ static int ad4130_find_slot(struct ad4130_state *st,
struct ad4130_slot_info *slot_info = &st->slots_info[i];
/* Immediately accept a matching setup info. */
- if (!memcmp(target_setup_info, &slot_info->setup,
- sizeof(*target_setup_info))) {
+ if (ad4130_setup_info_eq(target_setup_info, &slot_info->setup)) {
*slot = i;
return 0;
}
@@ -691,7 +729,7 @@ static int ad4130_write_slot_setup(struct ad4130_state *st,
if (ret)
return ret;
- val = FIELD_PREP(AD4130_FILTER_MODE_MASK, setup_info->filter_mode) |
+ val = FIELD_PREP(AD4130_FILTER_MODE_MASK, setup_info->filter_type) |
FIELD_PREP(AD4130_FILTER_SELECT_MASK, setup_info->fs);
ret = regmap_write(st->regmap, AD4130_FILTER_X_REG(slot), val);
@@ -835,11 +873,11 @@ static int ad4130_set_channel_enable(struct ad4130_state *st,
* (used in ad4130_fs_to_freq)
*/
-static void ad4130_freq_to_fs(enum ad4130_filter_mode filter_mode,
+static void ad4130_freq_to_fs(enum ad4130_filter_type filter_type,
int val, int val2, unsigned int *fs)
{
const struct ad4130_filter_config *filter_config =
- &ad4130_filter_configs[filter_mode];
+ &ad4130_filter_configs[filter_type];
u64 dividend, divisor;
int temp;
@@ -858,11 +896,11 @@ static void ad4130_freq_to_fs(enum ad4130_filter_mode filter_mode,
*fs = temp;
}
-static void ad4130_fs_to_freq(enum ad4130_filter_mode filter_mode,
+static void ad4130_fs_to_freq(enum ad4130_filter_type filter_type,
unsigned int fs, int *val, int *val2)
{
const struct ad4130_filter_config *filter_config =
- &ad4130_filter_configs[filter_mode];
+ &ad4130_filter_configs[filter_type];
unsigned int dividend, divisor;
u64 temp;
@@ -874,7 +912,7 @@ static void ad4130_fs_to_freq(enum ad4130_filter_mode filter_mode,
*val = div_u64_rem(temp, NANO, val2);
}
-static int ad4130_set_filter_mode(struct iio_dev *indio_dev,
+static int ad4130_set_filter_type(struct iio_dev *indio_dev,
const struct iio_chan_spec *chan,
unsigned int val)
{
@@ -882,17 +920,17 @@ static int ad4130_set_filter_mode(struct iio_dev *indio_dev,
unsigned int channel = chan->scan_index;
struct ad4130_chan_info *chan_info = &st->chans_info[channel];
struct ad4130_setup_info *setup_info = &chan_info->setup;
- enum ad4130_filter_mode old_filter_mode;
+ enum ad4130_filter_type old_filter_type;
int freq_val, freq_val2;
unsigned int old_fs;
int ret = 0;
guard(mutex)(&st->lock);
- if (setup_info->filter_mode == val)
+ if (setup_info->filter_type == val)
return 0;
old_fs = setup_info->fs;
- old_filter_mode = setup_info->filter_mode;
+ old_filter_type = setup_info->filter_type;
/*
* When switching between filter modes, try to match the ODR as
@@ -900,48 +938,55 @@ static int ad4130_set_filter_mode(struct iio_dev *indio_dev,
* using the old filter mode, then convert it back into FS using
* the new filter mode.
*/
- ad4130_fs_to_freq(setup_info->filter_mode, setup_info->fs,
+ ad4130_fs_to_freq(setup_info->filter_type, setup_info->fs,
&freq_val, &freq_val2);
ad4130_freq_to_fs(val, freq_val, freq_val2, &setup_info->fs);
- setup_info->filter_mode = val;
+ setup_info->filter_type = val;
ret = ad4130_write_channel_setup(st, channel, false);
if (ret) {
setup_info->fs = old_fs;
- setup_info->filter_mode = old_filter_mode;
+ setup_info->filter_type = old_filter_type;
return ret;
}
return 0;
}
-static int ad4130_get_filter_mode(struct iio_dev *indio_dev,
+static int ad4130_get_filter_type(struct iio_dev *indio_dev,
const struct iio_chan_spec *chan)
{
struct ad4130_state *st = iio_priv(indio_dev);
unsigned int channel = chan->scan_index;
struct ad4130_setup_info *setup_info = &st->chans_info[channel].setup;
- enum ad4130_filter_mode filter_mode;
+ enum ad4130_filter_type filter_type;
guard(mutex)(&st->lock);
- filter_mode = setup_info->filter_mode;
+ filter_type = setup_info->filter_type;
- return filter_mode;
+ return filter_type;
}
-static const struct iio_enum ad4130_filter_mode_enum = {
- .items = ad4130_filter_modes_str,
- .num_items = ARRAY_SIZE(ad4130_filter_modes_str),
- .set = ad4130_set_filter_mode,
- .get = ad4130_get_filter_mode,
+static const struct iio_enum ad4130_filter_type_enum = {
+ .items = ad4130_filter_types_str,
+ .num_items = ARRAY_SIZE(ad4130_filter_types_str),
+ .set = ad4130_set_filter_type,
+ .get = ad4130_get_filter_type,
};
-static const struct iio_chan_spec_ext_info ad4130_filter_mode_ext_info[] = {
- IIO_ENUM("filter_mode", IIO_SEPARATE, &ad4130_filter_mode_enum),
+static const struct iio_chan_spec_ext_info ad4130_ext_info[] = {
+ /*
+ * `filter_type` is the standardized IIO ABI for digital filtering.
+ * `filter_mode` is just kept for backwards compatibility.
+ */
+ IIO_ENUM("filter_mode", IIO_SEPARATE, &ad4130_filter_type_enum),
IIO_ENUM_AVAILABLE("filter_mode", IIO_SHARED_BY_TYPE,
- &ad4130_filter_mode_enum),
+ &ad4130_filter_type_enum),
+ IIO_ENUM("filter_type", IIO_SEPARATE, &ad4130_filter_type_enum),
+ IIO_ENUM_AVAILABLE("filter_type", IIO_SHARED_BY_TYPE,
+ &ad4130_filter_type_enum),
{ }
};
@@ -955,7 +1000,7 @@ static const struct iio_chan_spec ad4130_channel_template = {
BIT(IIO_CHAN_INFO_SAMP_FREQ),
.info_mask_separate_available = BIT(IIO_CHAN_INFO_SCALE) |
BIT(IIO_CHAN_INFO_SAMP_FREQ),
- .ext_info = ad4130_filter_mode_ext_info,
+ .ext_info = ad4130_ext_info,
.scan_type = {
.sign = 'u',
.endianness = IIO_BE,
@@ -1005,7 +1050,7 @@ static int ad4130_set_channel_freq(struct ad4130_state *st,
guard(mutex)(&st->lock);
old_fs = setup_info->fs;
- ad4130_freq_to_fs(setup_info->filter_mode, val, val2, &fs);
+ ad4130_freq_to_fs(setup_info->filter_type, val, val2, &fs);
if (fs == setup_info->fs)
return 0;
@@ -1060,13 +1105,11 @@ static int _ad4130_read_sample(struct iio_dev *indio_dev, unsigned int channel,
static int ad4130_read_sample(struct iio_dev *indio_dev, unsigned int channel,
int *val)
{
- iio_device_claim_direct_scoped(return -EBUSY, indio_dev) {
- struct ad4130_state *st = iio_priv(indio_dev);
+ struct ad4130_state *st = iio_priv(indio_dev);
- guard(mutex)(&st->lock);
- return _ad4130_read_sample(indio_dev, channel, val);
- }
- unreachable();
+ guard(mutex)(&st->lock);
+
+ return _ad4130_read_sample(indio_dev, channel, val);
}
static int ad4130_read_raw(struct iio_dev *indio_dev,
@@ -1076,10 +1119,16 @@ static int ad4130_read_raw(struct iio_dev *indio_dev,
struct ad4130_state *st = iio_priv(indio_dev);
unsigned int channel = chan->scan_index;
struct ad4130_setup_info *setup_info = &st->chans_info[channel].setup;
+ int ret;
switch (info) {
case IIO_CHAN_INFO_RAW:
- return ad4130_read_sample(indio_dev, channel, val);
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+
+ ret = ad4130_read_sample(indio_dev, channel, val);
+ iio_device_release_direct(indio_dev);
+ return ret;
case IIO_CHAN_INFO_SCALE: {
guard(mutex)(&st->lock);
*val = st->scale_tbls[setup_info->ref_sel][setup_info->pga][0];
@@ -1093,7 +1142,7 @@ static int ad4130_read_raw(struct iio_dev *indio_dev,
return IIO_VAL_INT;
case IIO_CHAN_INFO_SAMP_FREQ: {
guard(mutex)(&st->lock);
- ad4130_fs_to_freq(setup_info->filter_mode, setup_info->fs,
+ ad4130_fs_to_freq(setup_info->filter_type, setup_info->fs,
val, val2);
return IIO_VAL_INT_PLUS_NANO;
@@ -1123,7 +1172,7 @@ static int ad4130_read_avail(struct iio_dev *indio_dev,
return IIO_AVAIL_LIST;
case IIO_CHAN_INFO_SAMP_FREQ:
scoped_guard(mutex, &st->lock) {
- filter_config = &ad4130_filter_configs[setup_info->filter_mode];
+ filter_config = &ad4130_filter_configs[setup_info->filter_type];
}
*vals = (int *)filter_config->samp_freq_avail;
diff --git a/drivers/iio/adc/ad4695.c b/drivers/iio/adc/ad4695.c
index b79d135a5471..8222c8ab2940 100644
--- a/drivers/iio/adc/ad4695.c
+++ b/drivers/iio/adc/ad4695.c
@@ -19,14 +19,19 @@
#include <linux/device.h>
#include <linux/err.h>
#include <linux/gpio/consumer.h>
+#include <linux/iio/buffer-dmaengine.h>
#include <linux/iio/buffer.h>
#include <linux/iio/iio.h>
#include <linux/iio/triggered_buffer.h>
#include <linux/iio/trigger_consumer.h>
#include <linux/minmax.h>
+#include <linux/mutex.h>
#include <linux/property.h>
+#include <linux/pwm.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
+#include <linux/spi/offload/consumer.h>
+#include <linux/spi/offload/provider.h>
#include <linux/spi/spi.h>
#include <linux/units.h>
@@ -66,12 +71,15 @@
#define AD4695_REG_STD_SEQ_CONFIG 0x0024
#define AD4695_REG_GPIO_CTRL 0x0026
#define AD4695_REG_GP_MODE 0x0027
+#define AD4695_REG_GP_MODE_BUSY_GP_SEL BIT(5)
+#define AD4695_REG_GP_MODE_BUSY_GP_EN BIT(1)
#define AD4695_REG_TEMP_CTRL 0x0029
#define AD4695_REG_TEMP_CTRL_TEMP_EN BIT(0)
#define AD4695_REG_CONFIG_IN(n) (0x0030 | (n))
#define AD4695_REG_CONFIG_IN_MODE BIT(6)
#define AD4695_REG_CONFIG_IN_PAIR GENMASK(5, 4)
#define AD4695_REG_CONFIG_IN_AINHIGHZ_EN BIT(3)
+#define AD4695_REG_CONFIG_IN_OSR_SET GENMASK(1, 0)
#define AD4695_REG_UPPER_IN(n) (0x0040 | (2 * (n)))
#define AD4695_REG_LOWER_IN(n) (0x0060 | (2 * (n)))
#define AD4695_REG_HYST_IN(n) (0x0080 | (2 * (n)))
@@ -92,6 +100,8 @@
#define AD4695_T_REFBUF_MS 100
#define AD4695_T_REGCONFIG_NS 20
#define AD4695_T_SCK_CNV_DELAY_NS 80
+#define AD4695_T_CNVL_NS 80
+#define AD4695_T_CNVH_NS 10
#define AD4695_REG_ACCESS_SCLK_HZ (10 * MEGA)
/* Max number of voltage input channels. */
@@ -118,17 +128,27 @@ struct ad4695_channel_config {
bool bipolar;
enum ad4695_in_pair pin_pairing;
unsigned int common_mode_mv;
+ unsigned int oversampling_ratio;
};
struct ad4695_state {
struct spi_device *spi;
+ struct spi_offload *offload;
+ struct spi_offload_trigger *offload_trigger;
struct regmap *regmap;
struct regmap *regmap16;
struct gpio_desc *reset_gpio;
+ /* currently PWM CNV only supported with SPI offload use */
+ struct pwm_device *cnv_pwm;
+ /* protects against concurrent use of cnv_pwm */
+ struct mutex cnv_pwm_lock;
+ /* offload also requires separate gpio to manually control CNV */
+ struct gpio_desc *cnv_gpio;
/* voltages channels plus temperature and timestamp */
struct iio_chan_spec iio_chan[AD4695_MAX_CHANNELS + 2];
struct ad4695_channel_config channels_cfg[AD4695_MAX_CHANNELS];
const struct ad4695_chip_info *chip_info;
+ int sample_freq_range[3];
/* Reference voltage. */
unsigned int vref_mv;
/* Common mode input pin voltage. */
@@ -148,6 +168,8 @@ struct ad4695_state {
/* Commands to send for single conversion. */
u16 cnv_cmd;
u8 cnv_cmd2;
+ /* Buffer for storing data from regmap bus reads/writes */
+ u8 regmap_bus_data[4];
};
static const struct regmap_range ad4695_regmap_rd_ranges[] = {
@@ -192,7 +214,6 @@ static const struct regmap_config ad4695_regmap_config = {
.max_register = AD4695_REG_AS_SLOT(127),
.rd_table = &ad4695_regmap_rd_table,
.wr_table = &ad4695_regmap_wr_table,
- .can_multi_write = true,
};
static const struct regmap_range ad4695_regmap16_rd_ranges[] = {
@@ -224,7 +245,126 @@ static const struct regmap_config ad4695_regmap16_config = {
.max_register = AD4695_REG_GAIN_IN(15),
.rd_table = &ad4695_regmap16_rd_table,
.wr_table = &ad4695_regmap16_wr_table,
- .can_multi_write = true,
+};
+
+static int ad4695_regmap_bus_reg_write(void *context, const void *data,
+ size_t count)
+{
+ struct ad4695_state *st = context;
+ struct spi_transfer xfer = {
+ .speed_hz = AD4695_REG_ACCESS_SCLK_HZ,
+ .len = count,
+ .tx_buf = st->regmap_bus_data,
+ };
+
+ if (count > ARRAY_SIZE(st->regmap_bus_data))
+ return -EINVAL;
+
+ memcpy(st->regmap_bus_data, data, count);
+
+ return spi_sync_transfer(st->spi, &xfer, 1);
+}
+
+static int ad4695_regmap_bus_reg_read(void *context, const void *reg,
+ size_t reg_size, void *val,
+ size_t val_size)
+{
+ struct ad4695_state *st = context;
+ struct spi_transfer xfers[] = {
+ {
+ .speed_hz = AD4695_REG_ACCESS_SCLK_HZ,
+ .len = reg_size,
+ .tx_buf = &st->regmap_bus_data[0],
+ }, {
+ .speed_hz = AD4695_REG_ACCESS_SCLK_HZ,
+ .len = val_size,
+ .rx_buf = &st->regmap_bus_data[2],
+ },
+ };
+ int ret;
+
+ if (reg_size > 2)
+ return -EINVAL;
+
+ if (val_size > 2)
+ return -EINVAL;
+
+ memcpy(&st->regmap_bus_data[0], reg, reg_size);
+
+ ret = spi_sync_transfer(st->spi, xfers, ARRAY_SIZE(xfers));
+ if (ret)
+ return ret;
+
+ memcpy(val, &st->regmap_bus_data[2], val_size);
+
+ return 0;
+}
+
+static const struct regmap_bus ad4695_regmap_bus = {
+ .write = ad4695_regmap_bus_reg_write,
+ .read = ad4695_regmap_bus_reg_read,
+ .read_flag_mask = 0x80,
+ .reg_format_endian_default = REGMAP_ENDIAN_BIG,
+ .val_format_endian_default = REGMAP_ENDIAN_BIG,
+};
+
+enum {
+ AD4695_SCAN_TYPE_OSR_1,
+ AD4695_SCAN_TYPE_OSR_4,
+ AD4695_SCAN_TYPE_OSR_16,
+ AD4695_SCAN_TYPE_OSR_64,
+};
+
+static const struct iio_scan_type ad4695_scan_type_offload_u[] = {
+ [AD4695_SCAN_TYPE_OSR_1] = {
+ .sign = 'u',
+ .realbits = 16,
+ .shift = 3,
+ .storagebits = 32,
+ },
+ [AD4695_SCAN_TYPE_OSR_4] = {
+ .sign = 'u',
+ .realbits = 17,
+ .shift = 2,
+ .storagebits = 32,
+ },
+ [AD4695_SCAN_TYPE_OSR_16] = {
+ .sign = 'u',
+ .realbits = 18,
+ .shift = 1,
+ .storagebits = 32,
+ },
+ [AD4695_SCAN_TYPE_OSR_64] = {
+ .sign = 'u',
+ .realbits = 19,
+ .storagebits = 32,
+ },
+};
+
+static const struct iio_scan_type ad4695_scan_type_offload_s[] = {
+ [AD4695_SCAN_TYPE_OSR_1] = {
+ .sign = 's',
+ .realbits = 16,
+ .shift = 3,
+ .storagebits = 32,
+ },
+ [AD4695_SCAN_TYPE_OSR_4] = {
+ .sign = 's',
+ .realbits = 17,
+ .shift = 2,
+ .storagebits = 32,
+ },
+ [AD4695_SCAN_TYPE_OSR_16] = {
+ .sign = 's',
+ .realbits = 18,
+ .shift = 1,
+ .storagebits = 32,
+ },
+ [AD4695_SCAN_TYPE_OSR_64] = {
+ .sign = 's',
+ .realbits = 19,
+ .storagebits = 32,
+ },
};
static const struct iio_chan_spec ad4695_channel_template = {
@@ -264,6 +404,10 @@ static const char * const ad4695_power_supplies[] = {
"avdd", "vio"
};
+static const int ad4695_oversampling_ratios[] = {
+ 1, 4, 16, 64,
+};
+
static const struct ad4695_chip_info ad4695_chip_info = {
.name = "ad4695",
.max_sample_rate = 500 * KILO,
@@ -292,6 +436,13 @@ static const struct ad4695_chip_info ad4698_chip_info = {
.num_voltage_inputs = 8,
};
+static void ad4695_cnv_manual_trigger(struct ad4695_state *st)
+{
+ gpiod_set_value_cansleep(st->cnv_gpio, 1);
+ ndelay(10);
+ gpiod_set_value_cansleep(st->cnv_gpio, 0);
+}
+
/**
* ad4695_set_single_cycle_mode - Set the device in single cycle mode
* @st: The AD4695 state
@@ -364,11 +515,31 @@ static int ad4695_enter_advanced_sequencer_mode(struct ad4695_state *st, u32 n)
*/
static int ad4695_exit_conversion_mode(struct ad4695_state *st)
{
- struct spi_transfer xfer = {
- .tx_buf = &st->cnv_cmd2,
- .len = 1,
- .delay.value = AD4695_T_REGCONFIG_NS,
- .delay.unit = SPI_DELAY_UNIT_NSECS,
+ /*
+ * An extra transfer is needed to trigger a conversion here so
+ * that we can be 100% sure the command will be processed by the
+ * ADC, rather than relying on it to be in the correct state
+ * when this function is called (this chip has a quirk where the
+ * command only works when reading a conversion, and if the
+ * previous conversion was already read then it won't work). The
+ * actual conversion command is then run at the slower
+ * AD4695_REG_ACCESS_SCLK_HZ speed to guarantee this works.
+ */
+ struct spi_transfer xfers[] = {
+ {
+ .delay.value = AD4695_T_CNVL_NS,
+ .delay.unit = SPI_DELAY_UNIT_NSECS,
+ .cs_change = 1,
+ .cs_change_delay.value = AD4695_T_CNVH_NS,
+ .cs_change_delay.unit = SPI_DELAY_UNIT_NSECS,
+ },
+ {
+ .speed_hz = AD4695_REG_ACCESS_SCLK_HZ,
+ .tx_buf = &st->cnv_cmd2,
+ .len = 1,
+ .delay.value = AD4695_T_REGCONFIG_NS,
+ .delay.unit = SPI_DELAY_UNIT_NSECS,
+ },
};
/*
@@ -377,7 +548,18 @@ static int ad4695_exit_conversion_mode(struct ad4695_state *st)
*/
st->cnv_cmd2 = AD4695_CMD_EXIT_CNV_MODE << 3;
- return spi_sync_transfer(st->spi, &xfer, 1);
+ if (st->cnv_gpio) {
+ ad4695_cnv_manual_trigger(st);
+
+ /*
+ * In this case, CNV is not connected to CS, so we don't need
+ * the extra CS toggle to trigger the conversion and toggling
+ * CS would have no effect.
+ */
+ return spi_sync_transfer(st->spi, &xfers[1], 1);
+ }
+
+ return spi_sync_transfer(st->spi, xfers, ARRAY_SIZE(xfers));
}
static int ad4695_set_ref_voltage(struct ad4695_state *st, int vref_mv)
@@ -402,6 +584,29 @@ static int ad4695_set_ref_voltage(struct ad4695_state *st, int vref_mv)
FIELD_PREP(AD4695_REG_REF_CTRL_VREF_SET, val));
}
+/**
+ * ad4695_osr_to_regval - convert ratio to OSR register value
+ * @ratio: ratio to check
+ *
+ * Check if ratio is present in the list of available ratios and return
+ * the corresponding value that needs to be written to the register to
+ * select that ratio.
+ *
+ * Returns: register value (0 to 3) or -EINVAL if there is not an exact
+ * match
+ */
+static int ad4695_osr_to_regval(int ratio)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ad4695_oversampling_ratios); i++) {
+ if (ratio == ad4695_oversampling_ratios[i])
+ return i;
+ }
+
+ return -EINVAL;
+}
+
static int ad4695_write_chn_cfg(struct ad4695_state *st,
struct ad4695_channel_config *cfg)
{
@@ -604,6 +809,161 @@ out:
return IRQ_HANDLED;
}
+static int ad4695_offload_buffer_postenable(struct iio_dev *indio_dev)
+{
+ struct ad4695_state *st = iio_priv(indio_dev);
+ struct spi_offload_trigger_config config = {
+ .type = SPI_OFFLOAD_TRIGGER_DATA_READY,
+ };
+ struct spi_transfer *xfer = &st->buf_read_xfer[0];
+ struct pwm_state state;
+ u8 temp_chan_bit = st->chip_info->num_voltage_inputs;
+ u8 num_slots = 0;
+ u8 temp_en = 0;
+ unsigned int bit;
+ int ret;
+
+ iio_for_each_active_channel(indio_dev, bit) {
+ if (bit == temp_chan_bit) {
+ temp_en = 1;
+ continue;
+ }
+
+ ret = regmap_write(st->regmap, AD4695_REG_AS_SLOT(num_slots),
+ FIELD_PREP(AD4695_REG_AS_SLOT_INX, bit));
+ if (ret)
+ return ret;
+
+ num_slots++;
+ }
+
+ /*
+ * For non-offload, we could discard data to work around this
+ * restriction, but with offload, that is not possible.
+ */
+ if (num_slots < 2) {
+ dev_err(&st->spi->dev,
+ "At least two voltage channels must be enabled.\n");
+ return -EINVAL;
+ }
+
+ ret = regmap_update_bits(st->regmap, AD4695_REG_TEMP_CTRL,
+ AD4695_REG_TEMP_CTRL_TEMP_EN,
+ FIELD_PREP(AD4695_REG_TEMP_CTRL_TEMP_EN,
+ temp_en));
+ if (ret)
+ return ret;
+
+ /* Each BUSY event means just one sample for one channel is ready. */
+ memset(xfer, 0, sizeof(*xfer));
+ xfer->offload_flags = SPI_OFFLOAD_XFER_RX_STREAM;
+ /* Using 19 bits per word to allow for possible oversampling */
+ xfer->bits_per_word = 19;
+ xfer->len = 4;
+
+ spi_message_init_with_transfers(&st->buf_read_msg, xfer, 1);
+ st->buf_read_msg.offload = st->offload;
+
+ ret = spi_optimize_message(st->spi, &st->buf_read_msg);
+ if (ret)
+ return ret;
+
+ /*
+ * NB: technically, this is part the SPI offload trigger enable, but it
+ * doesn't work to call it from the offload trigger enable callback
+ * because it requires accessing the SPI bus. Calling it from the
+ * trigger enable callback could cause a deadlock.
+ */
+ ret = regmap_set_bits(st->regmap, AD4695_REG_GP_MODE,
+ AD4695_REG_GP_MODE_BUSY_GP_EN);
+ if (ret)
+ goto err_unoptimize_message;
+
+ ret = spi_offload_trigger_enable(st->offload, st->offload_trigger,
+ &config);
+ if (ret)
+ goto err_disable_busy_output;
+
+ ret = ad4695_enter_advanced_sequencer_mode(st, num_slots);
+ if (ret)
+ goto err_offload_trigger_disable;
+
+ mutex_lock(&st->cnv_pwm_lock);
+ pwm_get_state(st->cnv_pwm, &state);
+ /*
+ * PWM subsystem generally rounds down, so requesting 2x minimum high
+ * time ensures that we meet the minimum high time in any case.
+ */
+ state.duty_cycle = AD4695_T_CNVH_NS * 2;
+ ret = pwm_apply_might_sleep(st->cnv_pwm, &state);
+ mutex_unlock(&st->cnv_pwm_lock);
+ if (ret)
+ goto err_offload_exit_conversion_mode;
+
+ return 0;
+
+err_offload_exit_conversion_mode:
+ /*
+ * We have to unwind in a different order to avoid triggering offload.
+ * ad4695_exit_conversion_mode() triggers a conversion, so it has to be
+ * done after spi_offload_trigger_disable().
+ */
+ spi_offload_trigger_disable(st->offload, st->offload_trigger);
+ ad4695_exit_conversion_mode(st);
+ goto err_disable_busy_output;
+
+err_offload_trigger_disable:
+ spi_offload_trigger_disable(st->offload, st->offload_trigger);
+
+err_disable_busy_output:
+ regmap_clear_bits(st->regmap, AD4695_REG_GP_MODE,
+ AD4695_REG_GP_MODE_BUSY_GP_EN);
+
+err_unoptimize_message:
+ spi_unoptimize_message(&st->buf_read_msg);
+
+ return ret;
+}
+
+static int ad4695_offload_buffer_predisable(struct iio_dev *indio_dev)
+{
+ struct ad4695_state *st = iio_priv(indio_dev);
+ struct pwm_state state;
+ int ret;
+
+ scoped_guard(mutex, &st->cnv_pwm_lock) {
+ pwm_get_state(st->cnv_pwm, &state);
+ state.duty_cycle = 0;
+ ret = pwm_apply_might_sleep(st->cnv_pwm, &state);
+ if (ret)
+ return ret;
+ }
+
+ spi_offload_trigger_disable(st->offload, st->offload_trigger);
+
+ /*
+ * ad4695_exit_conversion_mode() triggers a conversion, so it has to be
+ * done after spi_offload_trigger_disable().
+ */
+ ret = ad4695_exit_conversion_mode(st);
+ if (ret)
+ return ret;
+
+ ret = regmap_clear_bits(st->regmap, AD4695_REG_GP_MODE,
+ AD4695_REG_GP_MODE_BUSY_GP_EN);
+ if (ret)
+ return ret;
+
+ spi_unoptimize_message(&st->buf_read_msg);
+
+ return 0;
+}
+
+static const struct iio_buffer_setup_ops ad4695_offload_buffer_setup_ops = {
+ .postenable = ad4695_offload_buffer_postenable,
+ .predisable = ad4695_offload_buffer_predisable,
+};
+
/**
* ad4695_read_one_sample - Read a single sample using single-cycle mode
* @st: The AD4695 state
@@ -636,6 +996,13 @@ static int ad4695_read_one_sample(struct ad4695_state *st, unsigned int address)
return ret;
/*
+ * If CNV is connected to CS, the previous function will have triggered
+ * the conversion, otherwise, we do it manually.
+ */
+ if (st->cnv_gpio)
+ ad4695_cnv_manual_trigger(st);
+
+ /*
* Setting the first channel to the temperature channel isn't supported
* in single-cycle mode, so we have to do an extra conversion to read
* the temperature.
@@ -646,6 +1013,13 @@ static int ad4695_read_one_sample(struct ad4695_state *st, unsigned int address)
ret = spi_sync_transfer(st->spi, xfers, ARRAY_SIZE(xfers));
if (ret)
return ret;
+
+ /*
+ * If CNV is connected to CS, the previous function will have
+ * triggered the conversion, otherwise, we do it manually.
+ */
+ if (st->cnv_gpio)
+ ad4695_cnv_manual_trigger(st);
}
/* Then read the result and exit conversion mode. */
@@ -655,36 +1029,58 @@ static int ad4695_read_one_sample(struct ad4695_state *st, unsigned int address)
return spi_sync_transfer(st->spi, xfers, ARRAY_SIZE(xfers));
}
+static int __ad4695_read_info_raw(struct ad4695_state *st,
+ struct iio_chan_spec const *chan,
+ int *val)
+{
+ u8 realbits = chan->scan_type.realbits;
+ int ret;
+
+ ret = ad4695_read_one_sample(st, chan->address);
+ if (ret)
+ return ret;
+
+ if (chan->scan_type.sign == 's')
+ *val = sign_extend32(st->raw_data, realbits - 1);
+ else
+ *val = st->raw_data;
+
+ return IIO_VAL_INT;
+}
+
static int ad4695_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int *val, int *val2, long mask)
{
struct ad4695_state *st = iio_priv(indio_dev);
- struct ad4695_channel_config *cfg = &st->channels_cfg[chan->scan_index];
- u8 realbits = chan->scan_type.realbits;
+ const struct iio_scan_type *scan_type;
+ struct ad4695_channel_config *cfg;
unsigned int reg_val;
int ret, tmp;
+ u8 realbits;
+
+ if (chan->type == IIO_VOLTAGE)
+ cfg = &st->channels_cfg[chan->scan_index];
+
+ scan_type = iio_get_current_scan_type(indio_dev, chan);
+ if (IS_ERR(scan_type))
+ return PTR_ERR(scan_type);
+
+ realbits = scan_type->realbits;
switch (mask) {
case IIO_CHAN_INFO_RAW:
- iio_device_claim_direct_scoped(return -EBUSY, indio_dev) {
- ret = ad4695_read_one_sample(st, chan->address);
- if (ret)
- return ret;
-
- if (chan->scan_type.sign == 's')
- *val = sign_extend32(st->raw_data, realbits - 1);
- else
- *val = st->raw_data;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
- return IIO_VAL_INT;
- }
- unreachable();
+ ret = __ad4695_read_info_raw(st, chan, val);
+ iio_device_release_direct(indio_dev);
+ return ret;
case IIO_CHAN_INFO_SCALE:
switch (chan->type) {
case IIO_VOLTAGE:
*val = st->vref_mv;
- *val2 = chan->scan_type.realbits;
+ *val2 = realbits;
return IIO_VAL_FRACTIONAL_LOG2;
case IIO_TEMP:
/* T_scale (°C) = raw * V_REF (mV) / (-1.8 mV/°C * 2^16) */
@@ -717,111 +1113,245 @@ static int ad4695_read_raw(struct iio_dev *indio_dev,
case IIO_CHAN_INFO_CALIBSCALE:
switch (chan->type) {
case IIO_VOLTAGE:
- iio_device_claim_direct_scoped(return -EBUSY, indio_dev) {
- ret = regmap_read(st->regmap16,
- AD4695_REG_GAIN_IN(chan->scan_index),
- &reg_val);
- if (ret)
- return ret;
-
- *val = reg_val;
- *val2 = 15;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+ ret = regmap_read(st->regmap16,
+ AD4695_REG_GAIN_IN(chan->scan_index),
+ &reg_val);
+ iio_device_release_direct(indio_dev);
+ if (ret)
+ return ret;
+ *val = reg_val;
+ *val2 = 15;
- return IIO_VAL_FRACTIONAL_LOG2;
- }
- unreachable();
+ return IIO_VAL_FRACTIONAL_LOG2;
default:
return -EINVAL;
}
case IIO_CHAN_INFO_CALIBBIAS:
- switch (chan->type) {
- case IIO_VOLTAGE:
- iio_device_claim_direct_scoped(return -EBUSY, indio_dev) {
- ret = regmap_read(st->regmap16,
- AD4695_REG_OFFSET_IN(chan->scan_index),
- &reg_val);
- if (ret)
- return ret;
+ switch (chan->type)
+ case IIO_VOLTAGE: {
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+ ret = regmap_read(st->regmap16,
+ AD4695_REG_OFFSET_IN(chan->scan_index),
+ &reg_val);
+ iio_device_release_direct(indio_dev);
+ if (ret)
+ return ret;
- tmp = sign_extend32(reg_val, 15);
+ tmp = sign_extend32(reg_val, 15);
+ switch (cfg->oversampling_ratio) {
+ case 1:
*val = tmp / 4;
*val2 = abs(tmp) % 4 * MICRO / 4;
+ break;
+ case 4:
+ *val = tmp / 2;
+ *val2 = abs(tmp) % 2 * MICRO / 2;
+ break;
+ case 16:
+ *val = tmp;
+ *val2 = 0;
+ break;
+ case 64:
+ *val = tmp * 2;
+ *val2 = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
- if (tmp < 0 && *val2) {
- *val *= -1;
- *val2 *= -1;
- }
-
- return IIO_VAL_INT_PLUS_MICRO;
+ if (tmp < 0 && *val2) {
+ *val *= -1;
+ *val2 *= -1;
}
- unreachable();
+
+ return IIO_VAL_INT_PLUS_MICRO;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+ switch (chan->type) {
+ case IIO_VOLTAGE:
+ *val = cfg->oversampling_ratio;
+ return IIO_VAL_INT;
default:
return -EINVAL;
}
+ case IIO_CHAN_INFO_SAMP_FREQ: {
+ struct pwm_state state;
+ unsigned int osr = 1;
+
+ if (chan->type == IIO_VOLTAGE)
+ osr = cfg->oversampling_ratio;
+
+ ret = pwm_get_state_hw(st->cnv_pwm, &state);
+ if (ret)
+ return ret;
+
+ /*
+ * The effective sampling frequency for a channel is the input
+ * frequency divided by the channel's OSR value.
+ */
+ *val = DIV_ROUND_UP_ULL(NSEC_PER_SEC, state.period * osr);
+
+ return IIO_VAL_INT;
+ }
default:
return -EINVAL;
}
}
-static int ad4695_write_raw(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan,
- int val, int val2, long mask)
+static int ad4695_write_raw_get_fmt(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ long mask)
+{
+ switch (mask) {
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ return IIO_VAL_INT;
+ default:
+ return IIO_VAL_INT_PLUS_MICRO;
+ }
+}
+
+static int ad4695_set_osr_val(struct ad4695_state *st,
+ struct iio_chan_spec const *chan,
+ int val)
+{
+ int osr = ad4695_osr_to_regval(val);
+
+ if (osr < 0)
+ return osr;
+
+ switch (chan->type) {
+ case IIO_VOLTAGE:
+ st->channels_cfg[chan->scan_index].oversampling_ratio = val;
+ return regmap_update_bits(st->regmap,
+ AD4695_REG_CONFIG_IN(chan->scan_index),
+ AD4695_REG_CONFIG_IN_OSR_SET,
+ FIELD_PREP(AD4695_REG_CONFIG_IN_OSR_SET, osr));
+ default:
+ return -EINVAL;
+ }
+}
+
+static unsigned int ad4695_get_calibbias(int val, int val2, int osr)
+{
+ int val_calc, scale;
+
+ switch (osr) {
+ case 4:
+ scale = 4;
+ break;
+ case 16:
+ scale = 2;
+ break;
+ case 64:
+ scale = 1;
+ break;
+ default:
+ scale = 8;
+ break;
+ }
+
+ val = clamp_t(int, val, S32_MIN / 8, S32_MAX / 8);
+
+ /* val2 range is (-MICRO, MICRO) if val == 0, otherwise [0, MICRO) */
+ if (val < 0)
+ val_calc = val * scale - val2 * scale / MICRO;
+ else if (val2 < 0)
+ /* if val2 < 0 then val == 0 */
+ val_calc = val2 * scale / (int)MICRO;
+ else
+ val_calc = val * scale + val2 * scale / MICRO;
+
+ val_calc /= 2;
+
+ return clamp_t(int, val_calc, S16_MIN, S16_MAX);
+}
+
+static int __ad4695_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
{
struct ad4695_state *st = iio_priv(indio_dev);
unsigned int reg_val;
+ unsigned int osr = 1;
- iio_device_claim_direct_scoped(return -EBUSY, indio_dev) {
- switch (mask) {
- case IIO_CHAN_INFO_CALIBSCALE:
- switch (chan->type) {
- case IIO_VOLTAGE:
- if (val < 0 || val2 < 0)
- reg_val = 0;
- else if (val > 1)
- reg_val = U16_MAX;
- else
- reg_val = (val * (1 << 16) +
- mul_u64_u32_div(val2, 1 << 16,
- MICRO)) / 2;
-
- return regmap_write(st->regmap16,
- AD4695_REG_GAIN_IN(chan->scan_index),
- reg_val);
- default:
- return -EINVAL;
- }
- case IIO_CHAN_INFO_CALIBBIAS:
- switch (chan->type) {
- case IIO_VOLTAGE:
- if (val2 >= 0 && val > S16_MAX / 4)
- reg_val = S16_MAX;
- else if ((val2 < 0 ? -val : val) < S16_MIN / 4)
- reg_val = S16_MIN;
- else if (val2 < 0)
- reg_val = clamp_t(int,
- -(val * 4 + -val2 * 4 / MICRO),
- S16_MIN, S16_MAX);
- else if (val < 0)
- reg_val = clamp_t(int,
- val * 4 - val2 * 4 / MICRO,
- S16_MIN, S16_MAX);
- else
- reg_val = clamp_t(int,
- val * 4 + val2 * 4 / MICRO,
- S16_MIN, S16_MAX);
-
- return regmap_write(st->regmap16,
- AD4695_REG_OFFSET_IN(chan->scan_index),
- reg_val);
- default:
- return -EINVAL;
- }
+ if (chan->type == IIO_VOLTAGE)
+ osr = st->channels_cfg[chan->scan_index].oversampling_ratio;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_CALIBSCALE:
+ switch (chan->type) {
+ case IIO_VOLTAGE:
+ if (val < 0 || val2 < 0)
+ reg_val = 0;
+ else if (val > 1)
+ reg_val = U16_MAX;
+ else
+ reg_val = (val * (1 << 16) +
+ mul_u64_u32_div(val2, 1 << 16,
+ MICRO)) / 2;
+
+ return regmap_write(st->regmap16,
+ AD4695_REG_GAIN_IN(chan->scan_index),
+ reg_val);
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_CALIBBIAS:
+ switch (chan->type) {
+ case IIO_VOLTAGE:
+ reg_val = ad4695_get_calibbias(val, val2, osr);
+ return regmap_write(st->regmap16,
+ AD4695_REG_OFFSET_IN(chan->scan_index),
+ reg_val);
default:
return -EINVAL;
}
+ case IIO_CHAN_INFO_SAMP_FREQ: {
+ struct pwm_state state;
+ /*
+ * Limit the maximum acceptable sample rate according to
+ * the channel's oversampling ratio.
+ */
+ u64 max_osr_rate = DIV_ROUND_UP_ULL(st->chip_info->max_sample_rate,
+ osr);
+
+ if (val <= 0 || val > max_osr_rate)
+ return -EINVAL;
+
+ guard(mutex)(&st->cnv_pwm_lock);
+ pwm_get_state(st->cnv_pwm, &state);
+ /*
+ * The required sample frequency for a given OSR is the
+ * input frequency multiplied by it.
+ */
+ state.period = DIV_ROUND_UP_ULL(NSEC_PER_SEC, val * osr);
+ return pwm_apply_might_sleep(st->cnv_pwm, &state);
+ }
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+ return ad4695_set_osr_val(st, chan, val);
+ default:
+ return -EINVAL;
}
- unreachable();
+}
+
+static int ad4695_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ int ret;
+
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+ ret = __ad4695_write_raw(indio_dev, chan, val, val2, mask);
+ iio_device_release_direct(indio_dev);
+
+ return ret;
}
static int ad4695_read_avail(struct iio_dev *indio_dev,
@@ -829,17 +1359,43 @@ static int ad4695_read_avail(struct iio_dev *indio_dev,
const int **vals, int *type, int *length,
long mask)
{
+ int ret;
static const int ad4695_calibscale_available[6] = {
/* Range of 0 (inclusive) to 2 (exclusive) */
0, 15, 1, 15, U16_MAX, 15
};
- static const int ad4695_calibbias_available[6] = {
+ static const int ad4695_calibbias_available[4][6] = {
/*
* Datasheet says FSR/8 which translates to signed/4. The step
- * depends on oversampling ratio which is always 1 for now.
+ * depends on oversampling ratio, so we need four different
+ * ranges to select from.
*/
- S16_MIN / 4, 0, 0, MICRO / 4, S16_MAX / 4, S16_MAX % 4 * MICRO / 4
+ {
+ S16_MIN / 4, 0,
+ 0, MICRO / 4,
+ S16_MAX / 4, S16_MAX % 4 * MICRO / 4
+ },
+ {
+ S16_MIN / 2, 0,
+ 0, MICRO / 2,
+ S16_MAX / 2, S16_MAX % 2 * MICRO / 2,
+ },
+ {
+ S16_MIN, 0,
+ 1, 0,
+ S16_MAX, 0,
+ },
+ {
+ S16_MIN * 2, 0,
+ 2, 0,
+ S16_MAX * 2, 0,
+ },
};
+ struct ad4695_state *st = iio_priv(indio_dev);
+ unsigned int osr = 1;
+
+ if (chan->type == IIO_VOLTAGE)
+ osr = st->channels_cfg[chan->scan_index].oversampling_ratio;
switch (mask) {
case IIO_CHAN_INFO_CALIBSCALE:
@@ -854,12 +1410,36 @@ static int ad4695_read_avail(struct iio_dev *indio_dev,
case IIO_CHAN_INFO_CALIBBIAS:
switch (chan->type) {
case IIO_VOLTAGE:
- *vals = ad4695_calibbias_available;
+ ret = ad4695_osr_to_regval(osr);
+ if (ret < 0)
+ return ret;
+ /*
+ * Select the appropriate calibbias array based on the
+ * OSR value in the register.
+ */
+ *vals = ad4695_calibbias_available[ret];
*type = IIO_VAL_INT_PLUS_MICRO;
return IIO_AVAIL_RANGE;
default:
return -EINVAL;
}
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ /* Max sample rate for the channel depends on OSR */
+ st->sample_freq_range[2] =
+ DIV_ROUND_UP_ULL(st->chip_info->max_sample_rate, osr);
+ *vals = st->sample_freq_range;
+ *type = IIO_VAL_INT;
+ return IIO_AVAIL_RANGE;
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+ switch (chan->type) {
+ case IIO_VOLTAGE:
+ *vals = ad4695_oversampling_ratios;
+ *length = ARRAY_SIZE(ad4695_oversampling_ratios);
+ *type = IIO_VAL_INT;
+ return IIO_AVAIL_LIST;
+ default:
+ return -EINVAL;
+ }
default:
return -EINVAL;
}
@@ -871,31 +1451,64 @@ static int ad4695_debugfs_reg_access(struct iio_dev *indio_dev,
unsigned int *readval)
{
struct ad4695_state *st = iio_priv(indio_dev);
-
- iio_device_claim_direct_scoped(return -EBUSY, indio_dev) {
- if (readval) {
- if (regmap_check_range_table(st->regmap, reg,
- &ad4695_regmap_rd_table))
- return regmap_read(st->regmap, reg, readval);
- if (regmap_check_range_table(st->regmap16, reg,
- &ad4695_regmap16_rd_table))
- return regmap_read(st->regmap16, reg, readval);
- } else {
- if (regmap_check_range_table(st->regmap, reg,
- &ad4695_regmap_wr_table))
- return regmap_write(st->regmap, reg, writeval);
- if (regmap_check_range_table(st->regmap16, reg,
- &ad4695_regmap16_wr_table))
- return regmap_write(st->regmap16, reg, writeval);
- }
+ int ret = -EINVAL;
+
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+
+ if (readval) {
+ if (regmap_check_range_table(st->regmap, reg,
+ &ad4695_regmap_rd_table))
+ ret = regmap_read(st->regmap, reg, readval);
+ if (regmap_check_range_table(st->regmap16, reg,
+ &ad4695_regmap16_rd_table))
+ ret = regmap_read(st->regmap16, reg, readval);
+ } else {
+ if (regmap_check_range_table(st->regmap, reg,
+ &ad4695_regmap_wr_table))
+ ret = regmap_write(st->regmap, reg, writeval);
+ if (regmap_check_range_table(st->regmap16, reg,
+ &ad4695_regmap16_wr_table))
+ ret = regmap_write(st->regmap16, reg, writeval);
}
+ iio_device_release_direct(indio_dev);
- return -EINVAL;
+ return ret;
+}
+
+static int ad4695_get_current_scan_type(const struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan)
+{
+ struct ad4695_state *st = iio_priv(indio_dev);
+ unsigned int osr = st->channels_cfg[chan->scan_index].oversampling_ratio;
+
+ switch (osr) {
+ case 1:
+ return AD4695_SCAN_TYPE_OSR_1;
+ case 4:
+ return AD4695_SCAN_TYPE_OSR_4;
+ case 16:
+ return AD4695_SCAN_TYPE_OSR_16;
+ case 64:
+ return AD4695_SCAN_TYPE_OSR_64;
+ default:
+ return -EINVAL;
+ }
}
static const struct iio_info ad4695_info = {
.read_raw = &ad4695_read_raw,
+ .write_raw_get_fmt = &ad4695_write_raw_get_fmt,
+ .write_raw = &ad4695_write_raw,
+ .read_avail = &ad4695_read_avail,
+ .debugfs_reg_access = &ad4695_debugfs_reg_access,
+};
+
+static const struct iio_info ad4695_offload_info = {
+ .read_raw = &ad4695_read_raw,
+ .write_raw_get_fmt = &ad4695_write_raw_get_fmt,
.write_raw = &ad4695_write_raw,
+ .get_current_scan_type = &ad4695_get_current_scan_type,
.read_avail = &ad4695_read_avail,
.debugfs_reg_access = &ad4695_debugfs_reg_access,
};
@@ -915,6 +1528,9 @@ static int ad4695_parse_channel_cfg(struct ad4695_state *st)
chan_cfg->highz_en = true;
chan_cfg->channel = i;
+ /* This is the default OSR after reset */
+ chan_cfg->oversampling_ratio = 1;
+
*iio_chan = ad4695_channel_template;
iio_chan->channel = i;
iio_chan->scan_index = i;
@@ -1008,26 +1624,188 @@ static int ad4695_parse_channel_cfg(struct ad4695_state *st)
return 0;
}
+static bool ad4695_offload_trigger_match(struct spi_offload_trigger *trigger,
+ enum spi_offload_trigger_type type,
+ u64 *args, u32 nargs)
+{
+ if (type != SPI_OFFLOAD_TRIGGER_DATA_READY)
+ return false;
+
+ /*
+ * Requires 2 args:
+ * args[0] is the trigger event.
+ * args[1] is the GPIO pin number.
+ */
+ if (nargs != 2 || args[0] != AD4695_TRIGGER_EVENT_BUSY)
+ return false;
+
+ return true;
+}
+
+static int ad4695_offload_trigger_request(struct spi_offload_trigger *trigger,
+ enum spi_offload_trigger_type type,
+ u64 *args, u32 nargs)
+{
+ struct ad4695_state *st = spi_offload_trigger_get_priv(trigger);
+
+ /* Should already be validated by match, but just in case. */
+ if (nargs != 2)
+ return -EINVAL;
+
+ /* DT tells us if BUSY event uses GP0 or GP3. */
+ if (args[1] == AD4695_TRIGGER_PIN_GP3)
+ return regmap_set_bits(st->regmap, AD4695_REG_GP_MODE,
+ AD4695_REG_GP_MODE_BUSY_GP_SEL);
+
+ return regmap_clear_bits(st->regmap, AD4695_REG_GP_MODE,
+ AD4695_REG_GP_MODE_BUSY_GP_SEL);
+}
+
+static int
+ad4695_offload_trigger_validate(struct spi_offload_trigger *trigger,
+ struct spi_offload_trigger_config *config)
+{
+ if (config->type != SPI_OFFLOAD_TRIGGER_DATA_READY)
+ return -EINVAL;
+
+ return 0;
+}
+
+/*
+ * NB: There are no enable/disable callbacks here due to requiring a SPI
+ * message to enable or disable the BUSY output on the ADC.
+ */
+static const struct spi_offload_trigger_ops ad4695_offload_trigger_ops = {
+ .match = ad4695_offload_trigger_match,
+ .request = ad4695_offload_trigger_request,
+ .validate = ad4695_offload_trigger_validate,
+};
+
+static void ad4695_pwm_disable(void *pwm)
+{
+ pwm_disable(pwm);
+}
+
+static int ad4695_probe_spi_offload(struct iio_dev *indio_dev,
+ struct ad4695_state *st)
+{
+ struct device *dev = &st->spi->dev;
+ struct spi_offload_trigger_info trigger_info = {
+ .fwnode = dev_fwnode(dev),
+ .ops = &ad4695_offload_trigger_ops,
+ .priv = st,
+ };
+ struct pwm_state pwm_state;
+ struct dma_chan *rx_dma;
+ int ret, i;
+
+ indio_dev->info = &ad4695_offload_info;
+ indio_dev->num_channels = st->chip_info->num_voltage_inputs + 1;
+ indio_dev->setup_ops = &ad4695_offload_buffer_setup_ops;
+
+ if (!st->cnv_gpio)
+ return dev_err_probe(dev, -ENODEV,
+ "CNV GPIO is required for SPI offload\n");
+
+ ret = devm_spi_offload_trigger_register(dev, &trigger_info);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "failed to register offload trigger\n");
+
+ st->offload_trigger = devm_spi_offload_trigger_get(dev, st->offload,
+ SPI_OFFLOAD_TRIGGER_DATA_READY);
+ if (IS_ERR(st->offload_trigger))
+ return dev_err_probe(dev, PTR_ERR(st->offload_trigger),
+ "failed to get offload trigger\n");
+
+ ret = devm_mutex_init(dev, &st->cnv_pwm_lock);
+ if (ret)
+ return ret;
+
+ st->cnv_pwm = devm_pwm_get(dev, NULL);
+ if (IS_ERR(st->cnv_pwm))
+ return dev_err_probe(dev, PTR_ERR(st->cnv_pwm),
+ "failed to get CNV PWM\n");
+
+ pwm_init_state(st->cnv_pwm, &pwm_state);
+
+ /* If firmware didn't provide default rate, use 10kHz (arbitrary). */
+ if (pwm_state.period == 0)
+ pwm_state.period = 100 * MILLI;
+
+ pwm_state.enabled = true;
+
+ ret = pwm_apply_might_sleep(st->cnv_pwm, &pwm_state);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to apply CNV PWM\n");
+
+ ret = devm_add_action_or_reset(dev, ad4695_pwm_disable, st->cnv_pwm);
+ if (ret)
+ return ret;
+
+ rx_dma = devm_spi_offload_rx_stream_request_dma_chan(dev, st->offload);
+ if (IS_ERR(rx_dma))
+ return dev_err_probe(dev, PTR_ERR(rx_dma),
+ "failed to get offload RX DMA\n");
+
+ for (i = 0; i < indio_dev->num_channels; i++) {
+ struct iio_chan_spec *chan = &st->iio_chan[i];
+ struct ad4695_channel_config *cfg;
+
+ /*
+ * NB: When using offload support, all channels need to have the
+ * same bits_per_word because they all use the same SPI message
+ * for reading one sample. In order to prevent breaking
+ * userspace in the future when oversampling support is added,
+ * all channels are set read 19 bits with a shift of 3 to mask
+ * out the extra bits even though we currently only support 16
+ * bit samples (oversampling ratio == 1).
+ */
+ chan->scan_type.shift = 3;
+ chan->scan_type.storagebits = 32;
+ /* add sample frequency for PWM CNV trigger */
+ chan->info_mask_separate |= BIT(IIO_CHAN_INFO_SAMP_FREQ);
+ chan->info_mask_separate_available |= BIT(IIO_CHAN_INFO_SAMP_FREQ);
+
+ /* Add the oversampling properties only for voltage channels */
+ if (chan->type != IIO_VOLTAGE)
+ continue;
+
+ cfg = &st->channels_cfg[i];
+
+ chan->info_mask_separate |= BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO);
+ chan->info_mask_separate_available |=
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO);
+ chan->has_ext_scan_type = 1;
+ if (cfg->bipolar) {
+ chan->ext_scan_type = ad4695_scan_type_offload_s;
+ chan->num_ext_scan_type =
+ ARRAY_SIZE(ad4695_scan_type_offload_s);
+ } else {
+ chan->ext_scan_type = ad4695_scan_type_offload_u;
+ chan->num_ext_scan_type =
+ ARRAY_SIZE(ad4695_scan_type_offload_u);
+ }
+ }
+
+ return devm_iio_dmaengine_buffer_setup_with_handle(dev, indio_dev,
+ rx_dma, IIO_BUFFER_DIRECTION_IN);
+}
+
+static const struct spi_offload_config ad4695_spi_offload_config = {
+ .capability_flags = SPI_OFFLOAD_CAP_TRIGGER |
+ SPI_OFFLOAD_CAP_RX_STREAM_DMA,
+};
+
static int ad4695_probe(struct spi_device *spi)
{
struct device *dev = &spi->dev;
struct ad4695_state *st;
struct iio_dev *indio_dev;
- struct gpio_desc *cnv_gpio;
bool use_internal_ldo_supply;
bool use_internal_ref_buffer;
int ret;
- cnv_gpio = devm_gpiod_get_optional(dev, "cnv", GPIOD_OUT_LOW);
- if (IS_ERR(cnv_gpio))
- return dev_err_probe(dev, PTR_ERR(cnv_gpio),
- "Failed to get CNV GPIO\n");
-
- /* Driver currently requires CNV pin to be connected to SPI CS */
- if (cnv_gpio)
- return dev_err_probe(dev, -ENODEV,
- "CNV GPIO is not supported\n");
-
indio_dev = devm_iio_device_alloc(dev, sizeof(*st));
if (!indio_dev)
return -ENOMEM;
@@ -1039,19 +1817,27 @@ static int ad4695_probe(struct spi_device *spi)
if (!st->chip_info)
return -EINVAL;
- /* Registers cannot be read at the max allowable speed */
- spi->max_speed_hz = AD4695_REG_ACCESS_SCLK_HZ;
+ st->sample_freq_range[0] = 1; /* min */
+ st->sample_freq_range[1] = 1; /* step */
+ st->sample_freq_range[2] = st->chip_info->max_sample_rate; /* max */
- st->regmap = devm_regmap_init_spi(spi, &ad4695_regmap_config);
+ st->regmap = devm_regmap_init(dev, &ad4695_regmap_bus, st,
+ &ad4695_regmap_config);
if (IS_ERR(st->regmap))
return dev_err_probe(dev, PTR_ERR(st->regmap),
"Failed to initialize regmap\n");
- st->regmap16 = devm_regmap_init_spi(spi, &ad4695_regmap16_config);
+ st->regmap16 = devm_regmap_init(dev, &ad4695_regmap_bus, st,
+ &ad4695_regmap16_config);
if (IS_ERR(st->regmap16))
return dev_err_probe(dev, PTR_ERR(st->regmap16),
"Failed to initialize regmap16\n");
+ st->cnv_gpio = devm_gpiod_get_optional(dev, "cnv", GPIOD_OUT_LOW);
+ if (IS_ERR(st->cnv_gpio))
+ return dev_err_probe(dev, PTR_ERR(st->cnv_gpio),
+ "Failed to get CNV GPIO\n");
+
ret = devm_regulator_bulk_get_enable(dev,
ARRAY_SIZE(ad4695_power_supplies),
ad4695_power_supplies);
@@ -1179,12 +1965,31 @@ static int ad4695_probe(struct spi_device *spi)
indio_dev->channels = st->iio_chan;
indio_dev->num_channels = st->chip_info->num_voltage_inputs + 2;
- ret = devm_iio_triggered_buffer_setup(dev, indio_dev,
- iio_pollfunc_store_time,
- ad4695_trigger_handler,
- &ad4695_buffer_setup_ops);
- if (ret)
- return ret;
+ st->offload = devm_spi_offload_get(dev, spi, &ad4695_spi_offload_config);
+ ret = PTR_ERR_OR_ZERO(st->offload);
+ if (ret && ret != -ENODEV)
+ return dev_err_probe(dev, ret, "failed to get SPI offload\n");
+
+ /* If no SPI offload, fall back to low speed usage. */
+ if (ret == -ENODEV) {
+ /* Driver currently requires CNV pin to be connected to SPI CS */
+ if (st->cnv_gpio)
+ return dev_err_probe(dev, -EINVAL,
+ "CNV GPIO is not supported\n");
+
+ indio_dev->num_channels = st->chip_info->num_voltage_inputs + 2;
+
+ ret = devm_iio_triggered_buffer_setup(dev, indio_dev,
+ iio_pollfunc_store_time,
+ ad4695_trigger_handler,
+ &ad4695_buffer_setup_ops);
+ if (ret)
+ return ret;
+ } else {
+ ret = ad4695_probe_spi_offload(indio_dev, st);
+ if (ret)
+ return ret;
+ }
return devm_iio_device_register(dev, indio_dev);
}
@@ -1221,3 +2026,4 @@ MODULE_AUTHOR("Ramona Gradinariu <ramona.gradinariu@analog.com>");
MODULE_AUTHOR("David Lechner <dlechner@baylibre.com>");
MODULE_DESCRIPTION("Analog Devices AD4695 ADC driver");
MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS("IIO_DMAENGINE_BUFFER");
diff --git a/drivers/iio/adc/ad4851.c b/drivers/iio/adc/ad4851.c
new file mode 100644
index 000000000000..98ebc853db79
--- /dev/null
+++ b/drivers/iio/adc/ad4851.c
@@ -0,0 +1,1315 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Analog Devices AD4851 DAS driver
+ *
+ * Copyright 2024 Analog Devices Inc.
+ */
+
+#include <linux/array_size.h>
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/minmax.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/pwm.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/spi/spi.h>
+#include <linux/types.h>
+#include <linux/unaligned.h>
+#include <linux/units.h>
+
+#include <linux/iio/backend.h>
+#include <linux/iio/iio.h>
+
+#define AD4851_REG_INTERFACE_CONFIG_A 0x00
+#define AD4851_REG_INTERFACE_CONFIG_B 0x01
+#define AD4851_REG_PRODUCT_ID_L 0x04
+#define AD4851_REG_PRODUCT_ID_H 0x05
+#define AD4851_REG_DEVICE_CTRL 0x25
+#define AD4851_REG_PACKET 0x26
+#define AD4851_REG_OVERSAMPLE 0x27
+
+#define AD4851_REG_CH_CONFIG_BASE 0x2A
+#define AD4851_REG_CHX_SOFTSPAN(ch) ((0x12 * (ch)) + AD4851_REG_CH_CONFIG_BASE)
+#define AD4851_REG_CHX_OFFSET(ch) (AD4851_REG_CHX_SOFTSPAN(ch) + 0x01)
+#define AD4851_REG_CHX_OFFSET_LSB(ch) AD4851_REG_CHX_OFFSET(ch)
+#define AD4851_REG_CHX_OFFSET_MID(ch) (AD4851_REG_CHX_OFFSET_LSB(ch) + 0x01)
+#define AD4851_REG_CHX_OFFSET_MSB(ch) (AD4851_REG_CHX_OFFSET_MID(ch) + 0x01)
+#define AD4851_REG_CHX_GAIN(ch) (AD4851_REG_CHX_OFFSET(ch) + 0x03)
+#define AD4851_REG_CHX_GAIN_LSB(ch) AD4851_REG_CHX_GAIN(ch)
+#define AD4851_REG_CHX_GAIN_MSB(ch) (AD4851_REG_CHX_GAIN(ch) + 0x01)
+#define AD4851_REG_CHX_PHASE(ch) (AD4851_REG_CHX_GAIN(ch) + 0x02)
+#define AD4851_REG_CHX_PHASE_LSB(ch) AD4851_REG_CHX_PHASE(ch)
+#define AD4851_REG_CHX_PHASE_MSB(ch) (AD4851_REG_CHX_PHASE_LSB(ch) + 0x01)
+
+#define AD4851_REG_TESTPAT_0(c) (0x38 + (c) * 0x12)
+#define AD4851_REG_TESTPAT_1(c) (0x39 + (c) * 0x12)
+#define AD4851_REG_TESTPAT_2(c) (0x3A + (c) * 0x12)
+#define AD4851_REG_TESTPAT_3(c) (0x3B + (c) * 0x12)
+
+#define AD4851_SW_RESET (BIT(7) | BIT(0))
+#define AD4851_SDO_ENABLE BIT(4)
+#define AD4851_SINGLE_INSTRUCTION BIT(7)
+#define AD4851_REFBUF BIT(2)
+#define AD4851_REFSEL BIT(1)
+#define AD4851_ECHO_CLOCK_MODE BIT(0)
+
+#define AD4851_PACKET_FORMAT_0 0
+#define AD4851_PACKET_FORMAT_1 1
+#define AD4851_PACKET_FORMAT_MASK GENMASK(1, 0)
+
+#define AD4851_OS_EN_MSK BIT(7)
+#define AD4851_OS_RATIO_MSK GENMASK(3, 0)
+
+#define AD4851_TEST_PAT BIT(2)
+
+#define AD4858_PACKET_SIZE_20 0
+#define AD4858_PACKET_SIZE_24 1
+#define AD4858_PACKET_SIZE_32 2
+
+#define AD4857_PACKET_SIZE_16 0
+#define AD4857_PACKET_SIZE_24 1
+
+#define AD4851_TESTPAT_0_DEFAULT 0x2A
+#define AD4851_TESTPAT_1_DEFAULT 0x3C
+#define AD4851_TESTPAT_2_DEFAULT 0xCE
+#define AD4851_TESTPAT_3_DEFAULT(c) (0x0A + (0x10 * (c)))
+
+#define AD4851_SOFTSPAN_0V_2V5 0
+#define AD4851_SOFTSPAN_N2V5_2V5 1
+#define AD4851_SOFTSPAN_0V_5V 2
+#define AD4851_SOFTSPAN_N5V_5V 3
+#define AD4851_SOFTSPAN_0V_6V25 4
+#define AD4851_SOFTSPAN_N6V25_6V25 5
+#define AD4851_SOFTSPAN_0V_10V 6
+#define AD4851_SOFTSPAN_N10V_10V 7
+#define AD4851_SOFTSPAN_0V_12V5 8
+#define AD4851_SOFTSPAN_N12V5_12V5 9
+#define AD4851_SOFTSPAN_0V_20V 10
+#define AD4851_SOFTSPAN_N20V_20V 11
+#define AD4851_SOFTSPAN_0V_25V 12
+#define AD4851_SOFTSPAN_N25V_25V 13
+#define AD4851_SOFTSPAN_0V_40V 14
+#define AD4851_SOFTSPAN_N40V_40V 15
+
+#define AD4851_MAX_LANES 8
+#define AD4851_MAX_IODELAY 32
+
+#define AD4851_T_CNVH_NS 40
+#define AD4851_T_CNVH_NS_MARGIN 10
+
+#define AD4841_MAX_SCALE_AVAIL 8
+
+#define AD4851_MAX_CH_NR 8
+#define AD4851_CH_START 0
+
+struct ad4851_scale {
+ unsigned int scale_val;
+ u8 reg_val;
+};
+
+static const struct ad4851_scale ad4851_scale_table_unipolar[] = {
+ { 2500, 0x0 },
+ { 5000, 0x2 },
+ { 6250, 0x4 },
+ { 10000, 0x6 },
+ { 12500, 0x8 },
+ { 20000, 0xA },
+ { 25000, 0xC },
+ { 40000, 0xE },
+};
+
+static const struct ad4851_scale ad4851_scale_table_bipolar[] = {
+ { 5000, 0x1 },
+ { 10000, 0x3 },
+ { 12500, 0x5 },
+ { 20000, 0x7 },
+ { 25000, 0x9 },
+ { 40000, 0xB },
+ { 50000, 0xD },
+ { 80000, 0xF },
+};
+
+static const unsigned int ad4851_scale_avail_unipolar[] = {
+ 2500,
+ 5000,
+ 6250,
+ 10000,
+ 12500,
+ 20000,
+ 25000,
+ 40000,
+};
+
+static const unsigned int ad4851_scale_avail_bipolar[] = {
+ 5000,
+ 10000,
+ 12500,
+ 20000,
+ 25000,
+ 40000,
+ 50000,
+ 80000,
+};
+
+struct ad4851_chip_info {
+ const char *name;
+ unsigned int product_id;
+ int num_scales;
+ unsigned long max_sample_rate_hz;
+ unsigned int resolution;
+ unsigned int max_channels;
+ int (*parse_channels)(struct iio_dev *indio_dev);
+};
+
+enum {
+ AD4851_SCAN_TYPE_NORMAL,
+ AD4851_SCAN_TYPE_RESOLUTION_BOOST,
+};
+
+struct ad4851_state {
+ struct spi_device *spi;
+ struct pwm_device *cnv;
+ struct iio_backend *back;
+ /*
+ * Synchronize access to members the of driver state, and ensure
+ * atomicity of consecutive regmap operations.
+ */
+ struct mutex lock;
+ struct regmap *regmap;
+ const struct ad4851_chip_info *info;
+ struct gpio_desc *pd_gpio;
+ bool resolution_boost_enabled;
+ unsigned long cnv_trigger_rate_hz;
+ unsigned int osr;
+ bool vrefbuf_en;
+ bool vrefio_en;
+ bool bipolar_ch[AD4851_MAX_CH_NR];
+ unsigned int scales_unipolar[AD4841_MAX_SCALE_AVAIL][2];
+ unsigned int scales_bipolar[AD4841_MAX_SCALE_AVAIL][2];
+};
+
+static int ad4851_reg_access(struct iio_dev *indio_dev,
+ unsigned int reg,
+ unsigned int writeval,
+ unsigned int *readval)
+{
+ struct ad4851_state *st = iio_priv(indio_dev);
+
+ if (readval)
+ return regmap_read(st->regmap, reg, readval);
+
+ return regmap_write(st->regmap, reg, writeval);
+}
+
+static int ad4851_set_sampling_freq(struct ad4851_state *st, unsigned int freq)
+{
+ struct pwm_state cnv_state = {
+ .duty_cycle = AD4851_T_CNVH_NS + AD4851_T_CNVH_NS_MARGIN,
+ .enabled = true,
+ };
+ int ret;
+
+ freq = clamp(freq, 1, st->info->max_sample_rate_hz);
+
+ cnv_state.period = DIV_ROUND_UP_ULL(NSEC_PER_SEC, freq);
+
+ ret = pwm_apply_might_sleep(st->cnv, &cnv_state);
+ if (ret)
+ return ret;
+
+ st->cnv_trigger_rate_hz = freq;
+
+ return 0;
+}
+
+static const int ad4851_oversampling_ratios[] = {
+ 1, 2, 4, 8, 16, 32, 64, 128,
+ 256, 512, 1024, 2048, 4096, 8192, 16384, 32768,
+ 65536,
+};
+
+static int ad4851_osr_to_regval(unsigned int ratio)
+{
+ int i;
+
+ for (i = 1; i < ARRAY_SIZE(ad4851_oversampling_ratios); i++)
+ if (ratio == ad4851_oversampling_ratios[i])
+ return i - 1;
+
+ return -EINVAL;
+}
+
+static int __ad4851_get_scale(struct iio_dev *indio_dev, int scale_tbl,
+ unsigned int *val, unsigned int *val2)
+{
+ const struct iio_scan_type *scan_type;
+ unsigned int tmp;
+
+ scan_type = iio_get_current_scan_type(indio_dev, &indio_dev->channels[0]);
+ if (IS_ERR(scan_type))
+ return PTR_ERR(scan_type);
+
+ tmp = ((u64)scale_tbl * MICRO) >> scan_type->realbits;
+ *val = tmp / MICRO;
+ *val2 = tmp % MICRO;
+
+ return 0;
+}
+
+static int ad4851_scale_fill(struct iio_dev *indio_dev)
+{
+ struct ad4851_state *st = iio_priv(indio_dev);
+ unsigned int i, val1, val2;
+ int ret;
+
+ for (i = 0; i < ARRAY_SIZE(ad4851_scale_avail_unipolar); i++) {
+ ret = __ad4851_get_scale(indio_dev,
+ ad4851_scale_avail_unipolar[i],
+ &val1, &val2);
+ if (ret)
+ return ret;
+
+ st->scales_unipolar[i][0] = val1;
+ st->scales_unipolar[i][1] = val2;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(ad4851_scale_avail_bipolar); i++) {
+ ret = __ad4851_get_scale(indio_dev,
+ ad4851_scale_avail_bipolar[i],
+ &val1, &val2);
+ if (ret)
+ return ret;
+
+ st->scales_bipolar[i][0] = val1;
+ st->scales_bipolar[i][1] = val2;
+ }
+
+ return 0;
+}
+
+static int ad4851_set_oversampling_ratio(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ unsigned int osr)
+{
+ struct ad4851_state *st = iio_priv(indio_dev);
+ int val, ret;
+
+ guard(mutex)(&st->lock);
+
+ if (osr == 1) {
+ ret = regmap_clear_bits(st->regmap, AD4851_REG_OVERSAMPLE,
+ AD4851_OS_EN_MSK);
+ if (ret)
+ return ret;
+ } else {
+ val = ad4851_osr_to_regval(osr);
+ if (val < 0)
+ return -EINVAL;
+
+ ret = regmap_update_bits(st->regmap, AD4851_REG_OVERSAMPLE,
+ AD4851_OS_EN_MSK |
+ AD4851_OS_RATIO_MSK,
+ FIELD_PREP(AD4851_OS_EN_MSK, 1) |
+ FIELD_PREP(AD4851_OS_RATIO_MSK, val));
+ if (ret)
+ return ret;
+ }
+
+ ret = iio_backend_oversampling_ratio_set(st->back, osr);
+ if (ret)
+ return ret;
+
+ switch (st->info->resolution) {
+ case 20:
+ switch (osr) {
+ case 0:
+ return -EINVAL;
+ case 1:
+ val = 20;
+ break;
+ default:
+ val = 24;
+ break;
+ }
+ break;
+ case 16:
+ val = 16;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = iio_backend_data_size_set(st->back, val);
+ if (ret)
+ return ret;
+
+ if (osr == 1 || st->info->resolution == 16) {
+ ret = regmap_clear_bits(st->regmap, AD4851_REG_PACKET,
+ AD4851_PACKET_FORMAT_MASK);
+ if (ret)
+ return ret;
+
+ st->resolution_boost_enabled = false;
+ } else {
+ ret = regmap_update_bits(st->regmap, AD4851_REG_PACKET,
+ AD4851_PACKET_FORMAT_MASK,
+ FIELD_PREP(AD4851_PACKET_FORMAT_MASK, 1));
+ if (ret)
+ return ret;
+
+ st->resolution_boost_enabled = true;
+ }
+
+ if (st->osr != osr) {
+ ret = ad4851_scale_fill(indio_dev);
+ if (ret)
+ return ret;
+
+ st->osr = osr;
+ }
+
+ return 0;
+}
+
+static int ad4851_get_oversampling_ratio(struct ad4851_state *st, unsigned int *val)
+{
+ unsigned int osr;
+ int ret;
+
+ guard(mutex)(&st->lock);
+
+ ret = regmap_read(st->regmap, AD4851_REG_OVERSAMPLE, &osr);
+ if (ret)
+ return ret;
+
+ if (!FIELD_GET(AD4851_OS_EN_MSK, osr))
+ *val = 1;
+ else
+ *val = ad4851_oversampling_ratios[FIELD_GET(AD4851_OS_RATIO_MSK, osr) + 1];
+
+ st->osr = *val;
+
+ return IIO_VAL_INT;
+}
+
+static void ad4851_pwm_disable(void *data)
+{
+ pwm_disable(data);
+}
+
+static int ad4851_setup(struct ad4851_state *st)
+{
+ unsigned int product_id;
+ int ret;
+
+ if (st->pd_gpio) {
+ /* To initiate a global reset, bring the PD pin high twice */
+ gpiod_set_value(st->pd_gpio, 1);
+ fsleep(1);
+ gpiod_set_value(st->pd_gpio, 0);
+ fsleep(1);
+ gpiod_set_value(st->pd_gpio, 1);
+ fsleep(1);
+ gpiod_set_value(st->pd_gpio, 0);
+ fsleep(1000);
+ } else {
+ ret = regmap_set_bits(st->regmap, AD4851_REG_INTERFACE_CONFIG_A,
+ AD4851_SW_RESET);
+ if (ret)
+ return ret;
+ }
+
+ if (st->vrefbuf_en) {
+ ret = regmap_set_bits(st->regmap, AD4851_REG_DEVICE_CTRL,
+ AD4851_REFBUF);
+ if (ret)
+ return ret;
+ }
+
+ if (st->vrefio_en) {
+ ret = regmap_set_bits(st->regmap, AD4851_REG_DEVICE_CTRL,
+ AD4851_REFSEL);
+ if (ret)
+ return ret;
+ }
+
+ ret = regmap_write(st->regmap, AD4851_REG_INTERFACE_CONFIG_B,
+ AD4851_SINGLE_INSTRUCTION);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(st->regmap, AD4851_REG_INTERFACE_CONFIG_A,
+ AD4851_SDO_ENABLE);
+ if (ret)
+ return ret;
+
+ ret = regmap_read(st->regmap, AD4851_REG_PRODUCT_ID_L, &product_id);
+ if (ret)
+ return ret;
+
+ if (product_id != st->info->product_id)
+ dev_info(&st->spi->dev, "Unknown product ID: 0x%02X\n",
+ product_id);
+
+ ret = regmap_set_bits(st->regmap, AD4851_REG_DEVICE_CTRL,
+ AD4851_ECHO_CLOCK_MODE);
+ if (ret)
+ return ret;
+
+ return regmap_write(st->regmap, AD4851_REG_PACKET, 0);
+}
+
+/*
+ * Find the longest consecutive sequence of false values from field
+ * and return starting index.
+ */
+static int ad4851_find_opt(const unsigned long *field, unsigned int start,
+ unsigned int nbits, unsigned int *val)
+{
+ unsigned int bit = start, end, start_cnt, cnt = 0;
+
+ for_each_clear_bitrange_from(bit, end, field, start + nbits) {
+ if (end - bit > cnt) {
+ cnt = end - bit;
+ start_cnt = bit - start;
+ }
+ }
+
+ if (!cnt)
+ return -ENOENT;
+
+ *val = start_cnt;
+
+ return cnt;
+}
+
+static int ad4851_calibrate(struct iio_dev *indio_dev)
+{
+ struct ad4851_state *st = iio_priv(indio_dev);
+ unsigned int opt_delay, num_lanes, delay, i, s;
+ enum iio_backend_interface_type interface_type;
+ DECLARE_BITMAP(pn_status, AD4851_MAX_LANES * AD4851_MAX_IODELAY);
+ bool status;
+ int c, ret;
+
+ ret = iio_backend_interface_type_get(st->back, &interface_type);
+ if (ret)
+ return ret;
+
+ switch (interface_type) {
+ case IIO_BACKEND_INTERFACE_SERIAL_CMOS:
+ num_lanes = indio_dev->num_channels;
+ break;
+ case IIO_BACKEND_INTERFACE_SERIAL_LVDS:
+ num_lanes = 1;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (st->info->resolution == 16) {
+ ret = iio_backend_data_size_set(st->back, 24);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(st->regmap, AD4851_REG_PACKET,
+ AD4851_TEST_PAT | AD4857_PACKET_SIZE_24);
+ if (ret)
+ return ret;
+ } else {
+ ret = iio_backend_data_size_set(st->back, 32);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(st->regmap, AD4851_REG_PACKET,
+ AD4851_TEST_PAT | AD4858_PACKET_SIZE_32);
+ if (ret)
+ return ret;
+ }
+
+ for (i = 0; i < indio_dev->num_channels; i++) {
+ ret = regmap_write(st->regmap, AD4851_REG_TESTPAT_0(i),
+ AD4851_TESTPAT_0_DEFAULT);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(st->regmap, AD4851_REG_TESTPAT_1(i),
+ AD4851_TESTPAT_1_DEFAULT);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(st->regmap, AD4851_REG_TESTPAT_2(i),
+ AD4851_TESTPAT_2_DEFAULT);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(st->regmap, AD4851_REG_TESTPAT_3(i),
+ AD4851_TESTPAT_3_DEFAULT(i));
+ if (ret)
+ return ret;
+
+ ret = iio_backend_chan_enable(st->back,
+ indio_dev->channels[i].channel);
+ if (ret)
+ return ret;
+ }
+
+ for (i = 0; i < num_lanes; i++) {
+ for (delay = 0; delay < AD4851_MAX_IODELAY; delay++) {
+ ret = iio_backend_iodelay_set(st->back, i, delay);
+ if (ret)
+ return ret;
+
+ ret = iio_backend_chan_status(st->back, i, &status);
+ if (ret)
+ return ret;
+
+ __assign_bit(i * AD4851_MAX_IODELAY + delay, pn_status,
+ status);
+ }
+ }
+
+ for (i = 0; i < num_lanes; i++) {
+ c = ad4851_find_opt(pn_status, i * AD4851_MAX_IODELAY,
+ AD4851_MAX_IODELAY, &s);
+ if (c < 0)
+ return c;
+
+ opt_delay = s + c / 2;
+ ret = iio_backend_iodelay_set(st->back, i, opt_delay);
+ if (ret)
+ return ret;
+ }
+
+ for (i = 0; i < indio_dev->num_channels; i++) {
+ ret = iio_backend_chan_disable(st->back, i);
+ if (ret)
+ return ret;
+ }
+
+ ret = iio_backend_data_size_set(st->back, 20);
+ if (ret)
+ return ret;
+
+ return regmap_write(st->regmap, AD4851_REG_PACKET, 0);
+}
+
+static int ad4851_get_calibscale(struct ad4851_state *st, int ch, int *val, int *val2)
+{
+ unsigned int reg_val;
+ int gain;
+ int ret;
+
+ guard(mutex)(&st->lock);
+
+ ret = regmap_read(st->regmap, AD4851_REG_CHX_GAIN_MSB(ch), &reg_val);
+ if (ret)
+ return ret;
+
+ gain = reg_val << 8;
+
+ ret = regmap_read(st->regmap, AD4851_REG_CHX_GAIN_LSB(ch), &reg_val);
+ if (ret)
+ return ret;
+
+ gain |= reg_val;
+
+ *val = gain;
+ *val2 = 15;
+
+ return IIO_VAL_FRACTIONAL_LOG2;
+}
+
+static int ad4851_set_calibscale(struct ad4851_state *st, int ch, int val,
+ int val2)
+{
+ u64 gain;
+ u8 buf[2];
+ int ret;
+
+ if (val < 0 || val2 < 0)
+ return -EINVAL;
+
+ gain = val * MICRO + val2;
+ gain = DIV_U64_ROUND_CLOSEST(gain * 32768, MICRO);
+
+ put_unaligned_be16(gain, buf);
+
+ guard(mutex)(&st->lock);
+
+ ret = regmap_write(st->regmap, AD4851_REG_CHX_GAIN_MSB(ch), buf[0]);
+ if (ret)
+ return ret;
+
+ return regmap_write(st->regmap, AD4851_REG_CHX_GAIN_LSB(ch), buf[1]);
+}
+
+static int ad4851_get_calibbias(struct ad4851_state *st, int ch, int *val)
+{
+ unsigned int lsb, mid, msb;
+ int ret;
+
+ guard(mutex)(&st->lock);
+ /*
+ * After testing, the bulk_write operations doesn't work as expected
+ * here since the cs needs to be raised after each byte transaction.
+ */
+ ret = regmap_read(st->regmap, AD4851_REG_CHX_OFFSET_MSB(ch), &msb);
+ if (ret)
+ return ret;
+
+ ret = regmap_read(st->regmap, AD4851_REG_CHX_OFFSET_MID(ch), &mid);
+ if (ret)
+ return ret;
+
+ ret = regmap_read(st->regmap, AD4851_REG_CHX_OFFSET_LSB(ch), &lsb);
+ if (ret)
+ return ret;
+
+ if (st->info->resolution == 16) {
+ *val = msb << 8;
+ *val |= mid;
+ *val = sign_extend32(*val, 15);
+ } else {
+ *val = msb << 12;
+ *val |= mid << 4;
+ *val |= lsb >> 4;
+ *val = sign_extend32(*val, 19);
+ }
+
+ return IIO_VAL_INT;
+}
+
+static int ad4851_set_calibbias(struct ad4851_state *st, int ch, int val)
+{
+ u8 buf[3];
+ int ret;
+
+ if (val < 0)
+ return -EINVAL;
+
+ if (st->info->resolution == 16)
+ put_unaligned_be16(val, buf);
+ else
+ put_unaligned_be24(val << 4, buf);
+
+ guard(mutex)(&st->lock);
+ /*
+ * After testing, the bulk_write operations doesn't work as expected
+ * here since the cs needs to be raised after each byte transaction.
+ */
+ ret = regmap_write(st->regmap, AD4851_REG_CHX_OFFSET_LSB(ch), buf[2]);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(st->regmap, AD4851_REG_CHX_OFFSET_MID(ch), buf[1]);
+ if (ret)
+ return ret;
+
+ return regmap_write(st->regmap, AD4851_REG_CHX_OFFSET_MSB(ch), buf[0]);
+}
+
+static int ad4851_set_scale(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, int val, int val2)
+{
+ struct ad4851_state *st = iio_priv(indio_dev);
+ unsigned int scale_val[2];
+ unsigned int i;
+ const struct ad4851_scale *scale_table;
+ size_t table_size;
+ int ret;
+
+ if (st->bipolar_ch[chan->channel]) {
+ scale_table = ad4851_scale_table_bipolar;
+ table_size = ARRAY_SIZE(ad4851_scale_table_bipolar);
+ } else {
+ scale_table = ad4851_scale_table_unipolar;
+ table_size = ARRAY_SIZE(ad4851_scale_table_unipolar);
+ }
+
+ for (i = 0; i < table_size; i++) {
+ ret = __ad4851_get_scale(indio_dev, scale_table[i].scale_val,
+ &scale_val[0], &scale_val[1]);
+ if (ret)
+ return ret;
+
+ if (scale_val[0] != val || scale_val[1] != val2)
+ continue;
+
+ return regmap_write(st->regmap,
+ AD4851_REG_CHX_SOFTSPAN(chan->channel),
+ scale_table[i].reg_val);
+ }
+
+ return -EINVAL;
+}
+
+static int ad4851_get_scale(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, int *val,
+ int *val2)
+{
+ struct ad4851_state *st = iio_priv(indio_dev);
+ const struct ad4851_scale *scale_table;
+ size_t table_size;
+ u32 softspan_val;
+ int i, ret;
+
+ if (st->bipolar_ch[chan->channel]) {
+ scale_table = ad4851_scale_table_bipolar;
+ table_size = ARRAY_SIZE(ad4851_scale_table_bipolar);
+ } else {
+ scale_table = ad4851_scale_table_unipolar;
+ table_size = ARRAY_SIZE(ad4851_scale_table_unipolar);
+ }
+
+ ret = regmap_read(st->regmap, AD4851_REG_CHX_SOFTSPAN(chan->channel),
+ &softspan_val);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < table_size; i++) {
+ if (softspan_val == scale_table[i].reg_val)
+ break;
+ }
+
+ if (i == table_size)
+ return -EIO;
+
+ ret = __ad4851_get_scale(indio_dev, scale_table[i].scale_val, val,
+ val2);
+ if (ret)
+ return ret;
+
+ return IIO_VAL_INT_PLUS_MICRO;
+}
+
+static int ad4851_read_raw(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ int *val, int *val2, long info)
+{
+ struct ad4851_state *st = iio_priv(indio_dev);
+
+ switch (info) {
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ *val = st->cnv_trigger_rate_hz;
+ *val2 = st->osr;
+ return IIO_VAL_FRACTIONAL;
+ case IIO_CHAN_INFO_CALIBSCALE:
+ return ad4851_get_calibscale(st, chan->channel, val, val2);
+ case IIO_CHAN_INFO_SCALE:
+ return ad4851_get_scale(indio_dev, chan, val, val2);
+ case IIO_CHAN_INFO_CALIBBIAS:
+ return ad4851_get_calibbias(st, chan->channel, val);
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+ return ad4851_get_oversampling_ratio(st, val);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ad4851_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long info)
+{
+ struct ad4851_state *st = iio_priv(indio_dev);
+
+ switch (info) {
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ if (val < 0 || val2 < 0)
+ return -EINVAL;
+ return ad4851_set_sampling_freq(st, val * st->osr + val2 * st->osr / MICRO);
+ case IIO_CHAN_INFO_SCALE:
+ return ad4851_set_scale(indio_dev, chan, val, val2);
+ case IIO_CHAN_INFO_CALIBSCALE:
+ return ad4851_set_calibscale(st, chan->channel, val, val2);
+ case IIO_CHAN_INFO_CALIBBIAS:
+ return ad4851_set_calibbias(st, chan->channel, val);
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+ return ad4851_set_oversampling_ratio(indio_dev, chan, val);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ad4851_update_scan_mode(struct iio_dev *indio_dev,
+ const unsigned long *scan_mask)
+{
+ struct ad4851_state *st = iio_priv(indio_dev);
+ unsigned int c;
+ int ret;
+
+ for (c = 0; c < indio_dev->num_channels; c++) {
+ if (test_bit(c, scan_mask))
+ ret = iio_backend_chan_enable(st->back, c);
+ else
+ ret = iio_backend_chan_disable(st->back, c);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ad4851_read_avail(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ const int **vals, int *type, int *length,
+ long mask)
+{
+ struct ad4851_state *st = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+ if (st->bipolar_ch[chan->channel]) {
+ *vals = (const int *)st->scales_bipolar;
+ *type = IIO_VAL_INT_PLUS_MICRO;
+ /* Values are stored in a 2D matrix */
+ *length = ARRAY_SIZE(ad4851_scale_avail_bipolar) * 2;
+ } else {
+ *vals = (const int *)st->scales_unipolar;
+ *type = IIO_VAL_INT_PLUS_MICRO;
+ /* Values are stored in a 2D matrix */
+ *length = ARRAY_SIZE(ad4851_scale_avail_unipolar) * 2;
+ }
+ return IIO_AVAIL_LIST;
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+ *vals = ad4851_oversampling_ratios;
+ *length = ARRAY_SIZE(ad4851_oversampling_ratios);
+ *type = IIO_VAL_INT;
+ return IIO_AVAIL_LIST;
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct iio_scan_type ad4851_scan_type_20_u[] = {
+ [AD4851_SCAN_TYPE_NORMAL] = {
+ .sign = 'u',
+ .realbits = 20,
+ .storagebits = 32,
+ },
+ [AD4851_SCAN_TYPE_RESOLUTION_BOOST] = {
+ .sign = 'u',
+ .realbits = 24,
+ .storagebits = 32,
+ },
+};
+
+static const struct iio_scan_type ad4851_scan_type_20_b[] = {
+ [AD4851_SCAN_TYPE_NORMAL] = {
+ .sign = 's',
+ .realbits = 20,
+ .storagebits = 32,
+ },
+ [AD4851_SCAN_TYPE_RESOLUTION_BOOST] = {
+ .sign = 's',
+ .realbits = 24,
+ .storagebits = 32,
+ },
+};
+
+static int ad4851_get_current_scan_type(const struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan)
+{
+ struct ad4851_state *st = iio_priv(indio_dev);
+
+ return st->resolution_boost_enabled ? AD4851_SCAN_TYPE_RESOLUTION_BOOST
+ : AD4851_SCAN_TYPE_NORMAL;
+}
+
+#define AD4851_IIO_CHANNEL \
+ .type = IIO_VOLTAGE, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_CALIBSCALE) | \
+ BIT(IIO_CHAN_INFO_CALIBBIAS) | \
+ BIT(IIO_CHAN_INFO_SCALE), \
+ .info_mask_separate_available = BIT(IIO_CHAN_INFO_SCALE), \
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ) | \
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), \
+ .info_mask_shared_by_all_available = \
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), \
+ .indexed = 1
+
+/*
+ * In case of AD4858_IIO_CHANNEL the scan_type is handled dynamically during the
+ * parse_channels function.
+ */
+#define AD4858_IIO_CHANNEL \
+{ \
+ AD4851_IIO_CHANNEL \
+}
+
+#define AD4857_IIO_CHANNEL \
+{ \
+ AD4851_IIO_CHANNEL, \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = 16, \
+ .storagebits = 16, \
+ }, \
+}
+
+static int ad4851_parse_channels_common(struct iio_dev *indio_dev,
+ struct iio_chan_spec **chans,
+ const struct iio_chan_spec ad4851_chan)
+{
+ struct ad4851_state *st = iio_priv(indio_dev);
+ struct device *dev = &st->spi->dev;
+ struct iio_chan_spec *channels, *chan_start;
+ unsigned int num_channels, reg;
+ unsigned int index = 0;
+ int ret;
+
+ num_channels = device_get_child_node_count(dev);
+ if (num_channels > AD4851_MAX_CH_NR)
+ return dev_err_probe(dev, -EINVAL, "Too many channels: %u\n",
+ num_channels);
+
+ channels = devm_kcalloc(dev, num_channels, sizeof(*channels), GFP_KERNEL);
+ if (!channels)
+ return -ENOMEM;
+
+ chan_start = channels;
+
+ device_for_each_child_node_scoped(dev, child) {
+ ret = fwnode_property_read_u32(child, "reg", &reg);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Missing channel number\n");
+ if (reg >= AD4851_MAX_CH_NR)
+ return dev_err_probe(dev, -EINVAL,
+ "Invalid channel number\n");
+ *channels = ad4851_chan;
+ channels->scan_index = index++;
+ channels->channel = reg;
+
+ if (fwnode_property_present(child, "diff-channels")) {
+ channels->channel2 = reg + st->info->max_channels;
+ channels->differential = 1;
+ }
+
+ st->bipolar_ch[reg] = fwnode_property_read_bool(child, "bipolar");
+
+ if (st->bipolar_ch[reg]) {
+ channels->scan_type.sign = 's';
+ } else {
+ ret = regmap_write(st->regmap, AD4851_REG_CHX_SOFTSPAN(reg),
+ AD4851_SOFTSPAN_0V_40V);
+ if (ret)
+ return ret;
+ }
+
+ channels++;
+ }
+
+ *chans = chan_start;
+
+ return num_channels;
+}
+
+static int ad4857_parse_channels(struct iio_dev *indio_dev)
+{
+ struct iio_chan_spec *ad4851_channels;
+ const struct iio_chan_spec ad4851_chan = AD4857_IIO_CHANNEL;
+ int ret;
+
+ ret = ad4851_parse_channels_common(indio_dev, &ad4851_channels,
+ ad4851_chan);
+ if (ret < 0)
+ return ret;
+
+ indio_dev->channels = ad4851_channels;
+ indio_dev->num_channels = ret;
+
+ return 0;
+}
+
+static int ad4858_parse_channels(struct iio_dev *indio_dev)
+{
+ struct ad4851_state *st = iio_priv(indio_dev);
+ struct device *dev = &st->spi->dev;
+ struct iio_chan_spec *ad4851_channels;
+ const struct iio_chan_spec ad4851_chan = AD4858_IIO_CHANNEL;
+ int ret;
+
+ ret = ad4851_parse_channels_common(indio_dev, &ad4851_channels,
+ ad4851_chan);
+ if (ret < 0)
+ return ret;
+
+ device_for_each_child_node_scoped(dev, child) {
+ ad4851_channels->has_ext_scan_type = 1;
+ if (fwnode_property_read_bool(child, "bipolar")) {
+ ad4851_channels->ext_scan_type = ad4851_scan_type_20_b;
+ ad4851_channels->num_ext_scan_type = ARRAY_SIZE(ad4851_scan_type_20_b);
+ } else {
+ ad4851_channels->ext_scan_type = ad4851_scan_type_20_u;
+ ad4851_channels->num_ext_scan_type = ARRAY_SIZE(ad4851_scan_type_20_u);
+ }
+ ad4851_channels++;
+ }
+
+ indio_dev->channels = ad4851_channels;
+ indio_dev->num_channels = ret;
+
+ return 0;
+}
+
+/*
+ * parse_channels() function handles the rest of the channel related attributes
+ * that are usually are stored in the chip info structure.
+ */
+static const struct ad4851_chip_info ad4851_info = {
+ .name = "ad4851",
+ .product_id = 0x67,
+ .max_sample_rate_hz = 250 * KILO,
+ .resolution = 16,
+ .max_channels = AD4851_MAX_CH_NR,
+ .parse_channels = ad4857_parse_channels,
+};
+
+static const struct ad4851_chip_info ad4852_info = {
+ .name = "ad4852",
+ .product_id = 0x66,
+ .max_sample_rate_hz = 250 * KILO,
+ .resolution = 20,
+ .max_channels = AD4851_MAX_CH_NR,
+ .parse_channels = ad4858_parse_channels,
+};
+
+static const struct ad4851_chip_info ad4853_info = {
+ .name = "ad4853",
+ .product_id = 0x65,
+ .max_sample_rate_hz = 1 * MEGA,
+ .resolution = 16,
+ .max_channels = AD4851_MAX_CH_NR,
+ .parse_channels = ad4857_parse_channels,
+};
+
+static const struct ad4851_chip_info ad4854_info = {
+ .name = "ad4854",
+ .product_id = 0x64,
+ .max_sample_rate_hz = 1 * MEGA,
+ .resolution = 20,
+ .max_channels = AD4851_MAX_CH_NR,
+ .parse_channels = ad4858_parse_channels,
+};
+
+static const struct ad4851_chip_info ad4855_info = {
+ .name = "ad4855",
+ .product_id = 0x63,
+ .max_sample_rate_hz = 250 * KILO,
+ .resolution = 16,
+ .max_channels = AD4851_MAX_CH_NR,
+ .parse_channels = ad4857_parse_channels,
+};
+
+static const struct ad4851_chip_info ad4856_info = {
+ .name = "ad4856",
+ .product_id = 0x62,
+ .max_sample_rate_hz = 250 * KILO,
+ .resolution = 20,
+ .max_channels = AD4851_MAX_CH_NR,
+ .parse_channels = ad4858_parse_channels,
+};
+
+static const struct ad4851_chip_info ad4857_info = {
+ .name = "ad4857",
+ .product_id = 0x61,
+ .max_sample_rate_hz = 1 * MEGA,
+ .resolution = 16,
+ .max_channels = AD4851_MAX_CH_NR,
+ .parse_channels = ad4857_parse_channels,
+};
+
+static const struct ad4851_chip_info ad4858_info = {
+ .name = "ad4858",
+ .product_id = 0x60,
+ .max_sample_rate_hz = 1 * MEGA,
+ .resolution = 20,
+ .max_channels = AD4851_MAX_CH_NR,
+ .parse_channels = ad4858_parse_channels,
+};
+
+static const struct ad4851_chip_info ad4858i_info = {
+ .name = "ad4858i",
+ .product_id = 0x6F,
+ .max_sample_rate_hz = 1 * MEGA,
+ .resolution = 20,
+ .max_channels = AD4851_MAX_CH_NR,
+ .parse_channels = ad4858_parse_channels,
+};
+
+static const struct iio_info ad4851_iio_info = {
+ .debugfs_reg_access = ad4851_reg_access,
+ .read_raw = ad4851_read_raw,
+ .write_raw = ad4851_write_raw,
+ .update_scan_mode = ad4851_update_scan_mode,
+ .get_current_scan_type = ad4851_get_current_scan_type,
+ .read_avail = ad4851_read_avail,
+};
+
+static const struct regmap_config regmap_config = {
+ .reg_bits = 16,
+ .val_bits = 8,
+ .read_flag_mask = BIT(7),
+};
+
+static const char * const ad4851_power_supplies[] = {
+ "vcc", "vdd", "vee", "vio",
+};
+
+static int ad4851_probe(struct spi_device *spi)
+{
+ struct iio_dev *indio_dev;
+ struct device *dev = &spi->dev;
+ struct ad4851_state *st;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*st));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ st = iio_priv(indio_dev);
+ st->spi = spi;
+
+ ret = devm_mutex_init(dev, &st->lock);
+ if (ret)
+ return ret;
+
+ ret = devm_regulator_bulk_get_enable(dev,
+ ARRAY_SIZE(ad4851_power_supplies),
+ ad4851_power_supplies);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "failed to get and enable supplies\n");
+
+ ret = devm_regulator_get_enable_optional(dev, "vddh");
+ if (ret < 0 && ret != -ENODEV)
+ return dev_err_probe(dev, ret, "failed to enable vddh voltage\n");
+
+ ret = devm_regulator_get_enable_optional(dev, "vddl");
+ if (ret < 0 && ret != -ENODEV)
+ return dev_err_probe(dev, ret, "failed to enable vddl voltage\n");
+
+ ret = devm_regulator_get_enable_optional(dev, "vrefbuf");
+ if (ret < 0 && ret != -ENODEV)
+ return dev_err_probe(dev, ret, "failed to enable vrefbuf voltage\n");
+
+ st->vrefbuf_en = ret != -ENODEV;
+
+ ret = devm_regulator_get_enable_optional(dev, "vrefio");
+ if (ret < 0 && ret != -ENODEV)
+ return dev_err_probe(dev, ret, "failed to enable vrefio voltage\n");
+
+ st->vrefio_en = ret != -ENODEV;
+
+ st->pd_gpio = devm_gpiod_get_optional(dev, "pd", GPIOD_OUT_LOW);
+ if (IS_ERR(st->pd_gpio))
+ return dev_err_probe(dev, PTR_ERR(st->pd_gpio),
+ "Error on requesting pd GPIO\n");
+
+ st->cnv = devm_pwm_get(dev, NULL);
+ if (IS_ERR(st->cnv))
+ return dev_err_probe(dev, PTR_ERR(st->cnv),
+ "Error on requesting pwm\n");
+
+ st->info = spi_get_device_match_data(spi);
+ if (!st->info)
+ return -ENODEV;
+
+ st->regmap = devm_regmap_init_spi(spi, &regmap_config);
+ if (IS_ERR(st->regmap))
+ return PTR_ERR(st->regmap);
+
+ ret = ad4851_set_sampling_freq(st, HZ_PER_MHZ);
+ if (ret)
+ return ret;
+
+ ret = devm_add_action_or_reset(&st->spi->dev, ad4851_pwm_disable,
+ st->cnv);
+ if (ret)
+ return ret;
+
+ ret = ad4851_setup(st);
+ if (ret)
+ return ret;
+
+ indio_dev->name = st->info->name;
+ indio_dev->info = &ad4851_iio_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ ret = st->info->parse_channels(indio_dev);
+ if (ret)
+ return ret;
+
+ ret = ad4851_scale_fill(indio_dev);
+ if (ret)
+ return ret;
+
+ st->back = devm_iio_backend_get(dev, NULL);
+ if (IS_ERR(st->back))
+ return PTR_ERR(st->back);
+
+ ret = devm_iio_backend_request_buffer(dev, st->back, indio_dev);
+ if (ret)
+ return ret;
+
+ ret = devm_iio_backend_enable(dev, st->back);
+ if (ret)
+ return ret;
+
+ ret = ad4851_calibrate(indio_dev);
+ if (ret)
+ return ret;
+
+ return devm_iio_device_register(dev, indio_dev);
+}
+
+static const struct of_device_id ad4851_of_match[] = {
+ { .compatible = "adi,ad4851", .data = &ad4851_info, },
+ { .compatible = "adi,ad4852", .data = &ad4852_info, },
+ { .compatible = "adi,ad4853", .data = &ad4853_info, },
+ { .compatible = "adi,ad4854", .data = &ad4854_info, },
+ { .compatible = "adi,ad4855", .data = &ad4855_info, },
+ { .compatible = "adi,ad4856", .data = &ad4856_info, },
+ { .compatible = "adi,ad4857", .data = &ad4857_info, },
+ { .compatible = "adi,ad4858", .data = &ad4858_info, },
+ { .compatible = "adi,ad4858i", .data = &ad4858i_info, },
+ { }
+};
+
+static const struct spi_device_id ad4851_spi_id[] = {
+ { "ad4851", (kernel_ulong_t)&ad4851_info },
+ { "ad4852", (kernel_ulong_t)&ad4852_info },
+ { "ad4853", (kernel_ulong_t)&ad4853_info },
+ { "ad4854", (kernel_ulong_t)&ad4854_info },
+ { "ad4855", (kernel_ulong_t)&ad4855_info },
+ { "ad4856", (kernel_ulong_t)&ad4856_info },
+ { "ad4857", (kernel_ulong_t)&ad4857_info },
+ { "ad4858", (kernel_ulong_t)&ad4858_info },
+ { "ad4858i", (kernel_ulong_t)&ad4858i_info },
+ { }
+};
+MODULE_DEVICE_TABLE(spi, ad4851_spi_id);
+
+static struct spi_driver ad4851_driver = {
+ .probe = ad4851_probe,
+ .driver = {
+ .name = "ad4851",
+ .of_match_table = ad4851_of_match,
+ },
+ .id_table = ad4851_spi_id,
+};
+module_spi_driver(ad4851_driver);
+
+MODULE_AUTHOR("Sergiu Cuciurean <sergiu.cuciurean@analog.com>");
+MODULE_AUTHOR("Dragos Bogdan <dragos.bogdan@analog.com>");
+MODULE_AUTHOR("Antoniu Miclaus <antoniu.miclaus@analog.com>");
+MODULE_DESCRIPTION("Analog Devices AD4851 DAS driver");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS("IIO_BACKEND");
diff --git a/drivers/iio/adc/ad7091r-base.c b/drivers/iio/adc/ad7091r-base.c
index 606486c4dfe8..931ff71b2888 100644
--- a/drivers/iio/adc/ad7091r-base.c
+++ b/drivers/iio/adc/ad7091r-base.c
@@ -7,6 +7,7 @@
#include <linux/bitops.h>
#include <linux/bitfield.h>
+#include <linux/cleanup.h>
#include <linux/iio/events.h>
#include <linux/iio/iio.h>
#include <linux/interrupt.h>
diff --git a/drivers/iio/adc/ad7124.c b/drivers/iio/adc/ad7124.c
index 6ae27cdd3250..3ea81a98e455 100644
--- a/drivers/iio/adc/ad7124.c
+++ b/drivers/iio/adc/ad7124.c
@@ -53,6 +53,11 @@
#define AD7124_ADC_CTRL_MODE_MSK GENMASK(5, 2)
#define AD7124_ADC_CTRL_MODE(x) FIELD_PREP(AD7124_ADC_CTRL_MODE_MSK, x)
+#define AD7124_MODE_CAL_INT_ZERO 0x5 /* Internal Zero-Scale Calibration */
+#define AD7124_MODE_CAL_INT_FULL 0x6 /* Internal Full-Scale Calibration */
+#define AD7124_MODE_CAL_SYS_ZERO 0x7 /* System Zero-Scale Calibration */
+#define AD7124_MODE_CAL_SYS_FULL 0x8 /* System Full-Scale Calibration */
+
/* AD7124 ID */
#define AD7124_DEVICE_ID_MSK GENMASK(7, 4)
#define AD7124_DEVICE_ID_GET(x) FIELD_GET(AD7124_DEVICE_ID_MSK, x)
@@ -151,7 +156,11 @@ struct ad7124_chip_info {
struct ad7124_channel_config {
bool live;
unsigned int cfg_slot;
- /* Following fields are used to compare equality. */
+ /*
+ * Following fields are used to compare for equality. If you
+ * make adaptations in it, you most likely also have to adapt
+ * ad7124_find_similar_live_cfg(), too.
+ */
struct_group(config_props,
enum ad7124_ref_sel refsel;
bool bipolar;
@@ -162,6 +171,8 @@ struct ad7124_channel_config {
unsigned int odr;
unsigned int odr_sel_bits;
unsigned int filter_type;
+ unsigned int calibration_offset;
+ unsigned int calibration_gain;
);
};
@@ -170,6 +181,7 @@ struct ad7124_channel {
struct ad7124_channel_config cfg;
unsigned int ain;
unsigned int slot;
+ u8 syscalib_mode;
};
struct ad7124_state {
@@ -182,24 +194,13 @@ struct ad7124_state {
unsigned int num_channels;
struct mutex cfgs_lock; /* lock for configs access */
unsigned long cfg_slots_status; /* bitmap with slot status (1 means it is used) */
- DECLARE_KFIFO(live_cfgs_fifo, struct ad7124_channel_config *, AD7124_MAX_CONFIGS);
-};
-static const struct iio_chan_spec ad7124_channel_template = {
- .type = IIO_VOLTAGE,
- .indexed = 1,
- .differential = 1,
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
- BIT(IIO_CHAN_INFO_SCALE) |
- BIT(IIO_CHAN_INFO_OFFSET) |
- BIT(IIO_CHAN_INFO_SAMP_FREQ) |
- BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY),
- .scan_type = {
- .sign = 'u',
- .realbits = 24,
- .storagebits = 32,
- .endianness = IIO_BE,
- },
+ /*
+ * Stores the power-on reset value for the GAIN(x) registers which are
+ * needed for measurements at gain 1 (i.e. CONFIG(x).PGA == 0)
+ */
+ unsigned int gain_default;
+ DECLARE_KFIFO(live_cfgs_fifo, struct ad7124_channel_config *, AD7124_MAX_CONFIGS);
};
static struct ad7124_chip_info ad7124_chip_info_tbl[] = {
@@ -338,15 +339,42 @@ static struct ad7124_channel_config *ad7124_find_similar_live_cfg(struct ad7124_
struct ad7124_channel_config *cfg)
{
struct ad7124_channel_config *cfg_aux;
- ptrdiff_t cmp_size;
int i;
- cmp_size = sizeof_field(struct ad7124_channel_config, config_props);
+ /*
+ * This is just to make sure that the comparison is adapted after
+ * struct ad7124_channel_config was changed.
+ */
+ static_assert(sizeof_field(struct ad7124_channel_config, config_props) ==
+ sizeof(struct {
+ enum ad7124_ref_sel refsel;
+ bool bipolar;
+ bool buf_positive;
+ bool buf_negative;
+ unsigned int vref_mv;
+ unsigned int pga_bits;
+ unsigned int odr;
+ unsigned int odr_sel_bits;
+ unsigned int filter_type;
+ unsigned int calibration_offset;
+ unsigned int calibration_gain;
+ }));
+
for (i = 0; i < st->num_channels; i++) {
cfg_aux = &st->channels[i].cfg;
if (cfg_aux->live &&
- !memcmp(&cfg->config_props, &cfg_aux->config_props, cmp_size))
+ cfg->refsel == cfg_aux->refsel &&
+ cfg->bipolar == cfg_aux->bipolar &&
+ cfg->buf_positive == cfg_aux->buf_positive &&
+ cfg->buf_negative == cfg_aux->buf_negative &&
+ cfg->vref_mv == cfg_aux->vref_mv &&
+ cfg->pga_bits == cfg_aux->pga_bits &&
+ cfg->odr == cfg_aux->odr &&
+ cfg->odr_sel_bits == cfg_aux->odr_sel_bits &&
+ cfg->filter_type == cfg_aux->filter_type &&
+ cfg->calibration_offset == cfg_aux->calibration_offset &&
+ cfg->calibration_gain == cfg_aux->calibration_gain)
return cfg_aux;
}
@@ -402,6 +430,14 @@ static int ad7124_write_config(struct ad7124_state *st, struct ad7124_channel_co
cfg->cfg_slot = cfg_slot;
+ ret = ad_sd_write_reg(&st->sd, AD7124_OFFSET(cfg->cfg_slot), 3, cfg->calibration_offset);
+ if (ret)
+ return ret;
+
+ ret = ad_sd_write_reg(&st->sd, AD7124_GAIN(cfg->cfg_slot), 3, cfg->calibration_gain);
+ if (ret)
+ return ret;
+
tmp = (cfg->buf_positive << 1) + cfg->buf_negative;
val = AD7124_CONFIG_BIPOLAR(cfg->bipolar) | AD7124_CONFIG_REF_SEL(cfg->refsel) |
AD7124_CONFIG_IN_BUFF(tmp) | AD7124_CONFIG_PGA(cfg->pga_bits);
@@ -540,14 +576,21 @@ static int ad7124_append_status(struct ad_sigma_delta *sd, bool append)
return 0;
}
-static int ad7124_disable_all(struct ad_sigma_delta *sd)
+static int ad7124_disable_one(struct ad_sigma_delta *sd, unsigned int chan)
{
struct ad7124_state *st = container_of(sd, struct ad7124_state, sd);
+
+ /* The relevant thing here is that AD7124_CHANNEL_EN_MSK is cleared. */
+ return ad_sd_write_reg(&st->sd, AD7124_CHANNEL(chan), 2, 0);
+}
+
+static int ad7124_disable_all(struct ad_sigma_delta *sd)
+{
int ret;
int i;
- for (i = 0; i < st->num_channels; i++) {
- ret = ad7124_spi_write_mask(st, AD7124_CHANNEL(i), AD7124_CHANNEL_EN_MSK, 0, 2);
+ for (i = 0; i < 16; i++) {
+ ret = ad7124_disable_one(sd, i);
if (ret < 0)
return ret;
}
@@ -555,13 +598,6 @@ static int ad7124_disable_all(struct ad_sigma_delta *sd)
return 0;
}
-static int ad7124_disable_one(struct ad_sigma_delta *sd, unsigned int chan)
-{
- struct ad7124_state *st = container_of(sd, struct ad7124_state, sd);
-
- return ad7124_spi_write_mask(st, AD7124_CHANNEL(chan), AD7124_CHANNEL_EN_MSK, 0, 2);
-}
-
static const struct ad_sigma_delta_info ad7124_sigma_delta_info = {
.set_channel = ad7124_set_channel,
.append_status = ad7124_append_status,
@@ -808,13 +844,22 @@ static int ad7124_soft_reset(struct ad7124_state *st)
return dev_err_probe(dev, ret, "Error reading status register\n");
if (!(readval & AD7124_STATUS_POR_FLAG_MSK))
- return 0;
+ break;
/* The AD7124 requires typically 2ms to power up and settle */
usleep_range(100, 2000);
} while (--timeout);
- return dev_err_probe(dev, -EIO, "Soft reset failed\n");
+ if (readval & AD7124_STATUS_POR_FLAG_MSK)
+ return dev_err_probe(dev, -EIO, "Soft reset failed\n");
+
+ ret = ad_sd_read_reg(&st->sd, AD7124_GAIN(0), 3, &st->gain_default);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Error reading gain register\n");
+
+ dev_dbg(dev, "Reset value of GAIN register is 0x%x\n", st->gain_default);
+
+ return 0;
}
static int ad7124_check_chip_id(struct ad7124_state *st)
@@ -842,6 +887,140 @@ static int ad7124_check_chip_id(struct ad7124_state *st)
return 0;
}
+enum {
+ AD7124_SYSCALIB_ZERO_SCALE,
+ AD7124_SYSCALIB_FULL_SCALE,
+};
+
+static int ad7124_syscalib_locked(struct ad7124_state *st, const struct iio_chan_spec *chan)
+{
+ struct device *dev = &st->sd.spi->dev;
+ struct ad7124_channel *ch = &st->channels[chan->channel];
+ int ret;
+
+ if (ch->syscalib_mode == AD7124_SYSCALIB_ZERO_SCALE) {
+ ch->cfg.calibration_offset = 0x800000;
+
+ ret = ad_sd_calibrate(&st->sd, AD7124_MODE_CAL_SYS_ZERO,
+ chan->address);
+ if (ret < 0)
+ return ret;
+
+ ret = ad_sd_read_reg(&st->sd, AD7124_OFFSET(ch->cfg.cfg_slot), 3,
+ &ch->cfg.calibration_offset);
+ if (ret < 0)
+ return ret;
+
+ dev_dbg(dev, "offset for channel %d after zero-scale calibration: 0x%x\n",
+ chan->channel, ch->cfg.calibration_offset);
+ } else {
+ ch->cfg.calibration_gain = st->gain_default;
+
+ ret = ad_sd_calibrate(&st->sd, AD7124_MODE_CAL_SYS_FULL,
+ chan->address);
+ if (ret < 0)
+ return ret;
+
+ ret = ad_sd_read_reg(&st->sd, AD7124_GAIN(ch->cfg.cfg_slot), 3,
+ &ch->cfg.calibration_gain);
+ if (ret < 0)
+ return ret;
+
+ dev_dbg(dev, "gain for channel %d after full-scale calibration: 0x%x\n",
+ chan->channel, ch->cfg.calibration_gain);
+ }
+
+ return 0;
+}
+
+static ssize_t ad7124_write_syscalib(struct iio_dev *indio_dev,
+ uintptr_t private,
+ const struct iio_chan_spec *chan,
+ const char *buf, size_t len)
+{
+ struct ad7124_state *st = iio_priv(indio_dev);
+ bool sys_calib;
+ int ret;
+
+ ret = kstrtobool(buf, &sys_calib);
+ if (ret)
+ return ret;
+
+ if (!sys_calib)
+ return len;
+
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+
+ ret = ad7124_syscalib_locked(st, chan);
+
+ iio_device_release_direct(indio_dev);
+
+ return ret ?: len;
+}
+
+static const char * const ad7124_syscalib_modes[] = {
+ [AD7124_SYSCALIB_ZERO_SCALE] = "zero_scale",
+ [AD7124_SYSCALIB_FULL_SCALE] = "full_scale",
+};
+
+static int ad7124_set_syscalib_mode(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ unsigned int mode)
+{
+ struct ad7124_state *st = iio_priv(indio_dev);
+
+ st->channels[chan->channel].syscalib_mode = mode;
+
+ return 0;
+}
+
+static int ad7124_get_syscalib_mode(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan)
+{
+ struct ad7124_state *st = iio_priv(indio_dev);
+
+ return st->channels[chan->channel].syscalib_mode;
+}
+
+static const struct iio_enum ad7124_syscalib_mode_enum = {
+ .items = ad7124_syscalib_modes,
+ .num_items = ARRAY_SIZE(ad7124_syscalib_modes),
+ .set = ad7124_set_syscalib_mode,
+ .get = ad7124_get_syscalib_mode
+};
+
+static const struct iio_chan_spec_ext_info ad7124_calibsys_ext_info[] = {
+ {
+ .name = "sys_calibration",
+ .write = ad7124_write_syscalib,
+ .shared = IIO_SEPARATE,
+ },
+ IIO_ENUM("sys_calibration_mode", IIO_SEPARATE,
+ &ad7124_syscalib_mode_enum),
+ IIO_ENUM_AVAILABLE("sys_calibration_mode", IIO_SHARED_BY_TYPE,
+ &ad7124_syscalib_mode_enum),
+ { }
+};
+
+static const struct iio_chan_spec ad7124_channel_template = {
+ .type = IIO_VOLTAGE,
+ .indexed = 1,
+ .differential = 1,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_OFFSET) |
+ BIT(IIO_CHAN_INFO_SAMP_FREQ) |
+ BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY),
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 24,
+ .storagebits = 32,
+ .endianness = IIO_BE,
+ },
+ .ext_info = ad7124_calibsys_ext_info,
+};
+
/*
* Input specifiers 8 - 15 are explicitly reserved for ad7124-4
* while they are fine for ad7124-8. Values above 31 don't fit
@@ -881,12 +1060,12 @@ static int ad7124_parse_channel_config(struct iio_dev *indio_dev,
/* Add one for temperature */
st->num_channels = min(num_channels + 1, AD7124_MAX_CHANNELS);
- chan = devm_kcalloc(indio_dev->dev.parent, st->num_channels,
+ chan = devm_kcalloc(dev, st->num_channels,
sizeof(*chan), GFP_KERNEL);
if (!chan)
return -ENOMEM;
- channels = devm_kcalloc(indio_dev->dev.parent, st->num_channels, sizeof(*channels),
+ channels = devm_kcalloc(dev, st->num_channels, sizeof(*channels),
GFP_KERNEL);
if (!channels)
return -ENOMEM;
@@ -1016,11 +1195,10 @@ static int ad7124_setup(struct ad7124_state *st)
* set all channels to this default value.
*/
ad7124_set_channel_odr(st, i, 10);
-
- /* Disable all channels to prevent unintended conversions. */
- ad_sd_write_reg(&st->sd, AD7124_CHANNEL(i), 2, 0);
}
+ ad7124_disable_all(&st->sd);
+
ret = ad_sd_write_reg(&st->sd, AD7124_ADC_CONTROL, 2, st->adc_control);
if (ret < 0)
return dev_err_probe(dev, ret, "Failed to setup CONTROL register\n");
@@ -1028,6 +1206,91 @@ static int ad7124_setup(struct ad7124_state *st)
return ret;
}
+static int __ad7124_calibrate_all(struct ad7124_state *st, struct iio_dev *indio_dev)
+{
+ struct device *dev = &st->sd.spi->dev;
+ int ret, i;
+
+ for (i = 0; i < st->num_channels; i++) {
+
+ if (indio_dev->channels[i].type != IIO_VOLTAGE)
+ continue;
+
+ /*
+ * For calibration the OFFSET register should hold its reset default
+ * value. For the GAIN register there is no such requirement but
+ * for gain 1 it should hold the reset default value, too. So to
+ * simplify matters use the reset default value for both.
+ */
+ st->channels[i].cfg.calibration_offset = 0x800000;
+ st->channels[i].cfg.calibration_gain = st->gain_default;
+
+ /*
+ * Full-scale calibration isn't supported at gain 1, so skip in
+ * that case. Note that untypically full-scale calibration has
+ * to happen before zero-scale calibration. This only applies to
+ * the internal calibration. For system calibration it's as
+ * usual: first zero-scale then full-scale calibration.
+ */
+ if (st->channels[i].cfg.pga_bits > 0) {
+ ret = ad_sd_calibrate(&st->sd, AD7124_MODE_CAL_INT_FULL, i);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * read out the resulting value of GAIN
+ * after full-scale calibration because the next
+ * ad_sd_calibrate() call overwrites this via
+ * ad_sigma_delta_set_channel() -> ad7124_set_channel()
+ * ... -> ad7124_enable_channel().
+ */
+ ret = ad_sd_read_reg(&st->sd, AD7124_GAIN(st->channels[i].cfg.cfg_slot), 3,
+ &st->channels[i].cfg.calibration_gain);
+ if (ret < 0)
+ return ret;
+ }
+
+ ret = ad_sd_calibrate(&st->sd, AD7124_MODE_CAL_INT_ZERO, i);
+ if (ret < 0)
+ return ret;
+
+ ret = ad_sd_read_reg(&st->sd, AD7124_OFFSET(st->channels[i].cfg.cfg_slot), 3,
+ &st->channels[i].cfg.calibration_offset);
+ if (ret < 0)
+ return ret;
+
+ dev_dbg(dev, "offset and gain for channel %d = 0x%x + 0x%x\n", i,
+ st->channels[i].cfg.calibration_offset,
+ st->channels[i].cfg.calibration_gain);
+ }
+
+ return 0;
+}
+
+static int ad7124_calibrate_all(struct ad7124_state *st, struct iio_dev *indio_dev)
+{
+ int ret;
+ unsigned int adc_control = st->adc_control;
+
+ /*
+ * Calibration isn't supported at full power, so speed down a bit.
+ * Setting .adc_control is enough here because the control register is
+ * written as part of ad_sd_calibrate() -> ad_sigma_delta_set_mode().
+ * The resulting calibration is then also valid for high-speed, so just
+ * restore adc_control afterwards.
+ */
+ if (FIELD_GET(AD7124_ADC_CTRL_PWR_MSK, adc_control) >= AD7124_FULL_POWER) {
+ st->adc_control &= ~AD7124_ADC_CTRL_PWR_MSK;
+ st->adc_control |= AD7124_ADC_CTRL_PWR(AD7124_MID_POWER);
+ }
+
+ ret = __ad7124_calibrate_all(st, indio_dev);
+
+ st->adc_control = adc_control;
+
+ return ret;
+}
+
static void ad7124_reg_disable(void *r)
{
regulator_disable(r);
@@ -1106,6 +1369,10 @@ static int ad7124_probe(struct spi_device *spi)
if (ret < 0)
return dev_err_probe(dev, ret, "Failed to setup triggers\n");
+ ret = ad7124_calibrate_all(st, indio_dev);
+ if (ret)
+ return ret;
+
ret = devm_iio_device_register(&spi->dev, indio_dev);
if (ret < 0)
return dev_err_probe(dev, ret, "Failed to register iio device\n");
diff --git a/drivers/iio/adc/ad7173.c b/drivers/iio/adc/ad7173.c
index 6c4ed10ae580..69de5886474c 100644
--- a/drivers/iio/adc/ad7173.c
+++ b/drivers/iio/adc/ad7173.c
@@ -35,6 +35,7 @@
#include <linux/units.h>
#include <linux/iio/buffer.h>
+#include <linux/iio/events.h>
#include <linux/iio/iio.h>
#include <linux/iio/trigger_consumer.h>
#include <linux/iio/triggered_buffer.h>
@@ -102,6 +103,7 @@
#define AD7173_GPIO_PDSW BIT(14)
#define AD7173_GPIO_OP_EN2_3 BIT(13)
+#define AD4111_GPIO_GP_OW_EN BIT(12)
#define AD7173_GPIO_MUX_IO BIT(12)
#define AD7173_GPIO_SYNC_EN BIT(11)
#define AD7173_GPIO_ERR_EN BIT(10)
@@ -149,6 +151,7 @@
#define AD7173_FILTER_ODR0_MASK GENMASK(5, 0)
#define AD7173_MAX_CONFIGS 8
+#define AD4111_OW_DET_THRSH_MV 300
#define AD7173_MODE_CAL_INT_ZERO 0x4 /* Internal Zero-Scale Calibration */
#define AD7173_MODE_CAL_INT_FULL 0x5 /* Internal Full-Scale Calibration */
@@ -171,6 +174,7 @@ struct ad7173_device_info {
unsigned int clock;
unsigned int id;
char *name;
+ const struct ad_sigma_delta_info *sd_info;
bool has_current_inputs;
bool has_vincom_input;
bool has_temp;
@@ -181,15 +185,23 @@ struct ad7173_device_info {
bool has_int_ref;
bool has_ref2;
bool has_internal_fs_calibration;
+ bool has_openwire_det;
bool higher_gpio_bits;
u8 num_gpios;
};
struct ad7173_channel_config {
+ /* Openwire detection threshold */
+ unsigned int openwire_thrsh_raw;
+ int openwire_comp_chan;
u8 cfg_slot;
bool live;
- /* Following fields are used to compare equality. */
+ /*
+ * Following fields are used to compare equality. If you
+ * make adaptations in it, you most likely also have to adapt
+ * ad7173_find_live_config(), too.
+ */
struct_group(config_props,
bool bipolar;
bool input_buf;
@@ -202,11 +214,11 @@ struct ad7173_channel {
unsigned int ain;
struct ad7173_channel_config cfg;
u8 syscalib_mode;
+ bool openwire_det_en;
};
struct ad7173_state {
struct ad_sigma_delta sd;
- struct ad_sigma_delta_info sigma_delta_info;
const struct ad7173_device_info *info;
struct ad7173_channel *channels;
struct regulator_bulk_data regulators[3];
@@ -265,228 +277,6 @@ static unsigned int ad4111_current_channel_config[] = {
0x18B, /* 12:IIN3+ 11:IIN3− */
};
-static const struct ad7173_device_info ad4111_device_info = {
- .name = "ad4111",
- .id = AD4111_ID,
- .num_voltage_in_div = 8,
- .num_channels = 16,
- .num_configs = 8,
- .num_voltage_in = 8,
- .num_gpios = 2,
- .higher_gpio_bits = true,
- .has_temp = true,
- .has_vincom_input = true,
- .has_input_buf = true,
- .has_current_inputs = true,
- .has_int_ref = true,
- .has_internal_fs_calibration = true,
- .clock = 2 * HZ_PER_MHZ,
- .sinc5_data_rates = ad7173_sinc5_data_rates,
- .num_sinc5_data_rates = ARRAY_SIZE(ad7173_sinc5_data_rates),
-};
-
-static const struct ad7173_device_info ad4112_device_info = {
- .name = "ad4112",
- .id = AD4112_ID,
- .num_voltage_in_div = 8,
- .num_channels = 16,
- .num_configs = 8,
- .num_voltage_in = 8,
- .num_gpios = 2,
- .higher_gpio_bits = true,
- .has_vincom_input = true,
- .has_temp = true,
- .has_input_buf = true,
- .has_current_inputs = true,
- .has_int_ref = true,
- .has_internal_fs_calibration = true,
- .clock = 2 * HZ_PER_MHZ,
- .sinc5_data_rates = ad7173_sinc5_data_rates,
- .num_sinc5_data_rates = ARRAY_SIZE(ad7173_sinc5_data_rates),
-};
-
-static const struct ad7173_device_info ad4113_device_info = {
- .name = "ad4113",
- .id = AD4113_ID,
- .num_voltage_in_div = 8,
- .num_channels = 16,
- .num_configs = 8,
- .num_voltage_in = 8,
- .num_gpios = 2,
- .data_reg_only_16bit = true,
- .higher_gpio_bits = true,
- .has_vincom_input = true,
- .has_input_buf = true,
- .has_int_ref = true,
- .clock = 2 * HZ_PER_MHZ,
- .sinc5_data_rates = ad7173_sinc5_data_rates,
- .num_sinc5_data_rates = ARRAY_SIZE(ad7173_sinc5_data_rates),
-};
-
-static const struct ad7173_device_info ad4114_device_info = {
- .name = "ad4114",
- .id = AD4114_ID,
- .num_voltage_in_div = 16,
- .num_channels = 16,
- .num_configs = 8,
- .num_voltage_in = 16,
- .num_gpios = 4,
- .has_vincom_input = true,
- .has_temp = true,
- .has_input_buf = true,
- .has_int_ref = true,
- .has_internal_fs_calibration = true,
- .clock = 2 * HZ_PER_MHZ,
- .sinc5_data_rates = ad7173_sinc5_data_rates,
- .num_sinc5_data_rates = ARRAY_SIZE(ad7173_sinc5_data_rates),
-};
-
-static const struct ad7173_device_info ad4115_device_info = {
- .name = "ad4115",
- .id = AD4115_ID,
- .num_voltage_in_div = 16,
- .num_channels = 16,
- .num_configs = 8,
- .num_voltage_in = 16,
- .num_gpios = 4,
- .has_vincom_input = true,
- .has_temp = true,
- .has_input_buf = true,
- .has_int_ref = true,
- .has_internal_fs_calibration = true,
- .clock = 8 * HZ_PER_MHZ,
- .sinc5_data_rates = ad4115_sinc5_data_rates,
- .num_sinc5_data_rates = ARRAY_SIZE(ad4115_sinc5_data_rates),
-};
-
-static const struct ad7173_device_info ad4116_device_info = {
- .name = "ad4116",
- .id = AD4116_ID,
- .num_voltage_in_div = 11,
- .num_channels = 16,
- .num_configs = 8,
- .num_voltage_in = 16,
- .num_gpios = 4,
- .has_vincom_input = true,
- .has_temp = true,
- .has_input_buf = true,
- .has_int_ref = true,
- .has_internal_fs_calibration = true,
- .clock = 4 * HZ_PER_MHZ,
- .sinc5_data_rates = ad4116_sinc5_data_rates,
- .num_sinc5_data_rates = ARRAY_SIZE(ad4116_sinc5_data_rates),
-};
-
-static const struct ad7173_device_info ad7172_2_device_info = {
- .name = "ad7172-2",
- .id = AD7172_2_ID,
- .num_voltage_in = 5,
- .num_channels = 4,
- .num_configs = 4,
- .num_gpios = 2,
- .has_temp = true,
- .has_input_buf = true,
- .has_int_ref = true,
- .has_pow_supply_monitoring = true,
- .clock = 2 * HZ_PER_MHZ,
- .sinc5_data_rates = ad7173_sinc5_data_rates,
- .num_sinc5_data_rates = ARRAY_SIZE(ad7173_sinc5_data_rates),
-};
-
-static const struct ad7173_device_info ad7172_4_device_info = {
- .name = "ad7172-4",
- .id = AD7172_4_ID,
- .num_voltage_in = 9,
- .num_channels = 8,
- .num_configs = 8,
- .num_gpios = 4,
- .has_input_buf = true,
- .has_ref2 = true,
- .has_pow_supply_monitoring = true,
- .clock = 2 * HZ_PER_MHZ,
- .sinc5_data_rates = ad7173_sinc5_data_rates,
- .num_sinc5_data_rates = ARRAY_SIZE(ad7173_sinc5_data_rates),
-};
-
-static const struct ad7173_device_info ad7173_8_device_info = {
- .name = "ad7173-8",
- .id = AD7173_ID,
- .num_voltage_in = 17,
- .num_channels = 16,
- .num_configs = 8,
- .num_gpios = 4,
- .has_temp = true,
- .has_input_buf = true,
- .has_int_ref = true,
- .has_ref2 = true,
- .clock = 2 * HZ_PER_MHZ,
- .sinc5_data_rates = ad7173_sinc5_data_rates,
- .num_sinc5_data_rates = ARRAY_SIZE(ad7173_sinc5_data_rates),
-};
-
-static const struct ad7173_device_info ad7175_2_device_info = {
- .name = "ad7175-2",
- .id = AD7175_2_ID,
- .num_voltage_in = 5,
- .num_channels = 4,
- .num_configs = 4,
- .num_gpios = 2,
- .has_temp = true,
- .has_input_buf = true,
- .has_int_ref = true,
- .has_pow_supply_monitoring = true,
- .clock = 16 * HZ_PER_MHZ,
- .sinc5_data_rates = ad7175_sinc5_data_rates,
- .num_sinc5_data_rates = ARRAY_SIZE(ad7175_sinc5_data_rates),
-};
-
-static const struct ad7173_device_info ad7175_8_device_info = {
- .name = "ad7175-8",
- .id = AD7175_8_ID,
- .num_voltage_in = 17,
- .num_channels = 16,
- .num_configs = 8,
- .num_gpios = 4,
- .has_temp = true,
- .has_input_buf = true,
- .has_int_ref = true,
- .has_ref2 = true,
- .has_pow_supply_monitoring = true,
- .clock = 16 * HZ_PER_MHZ,
- .sinc5_data_rates = ad7175_sinc5_data_rates,
- .num_sinc5_data_rates = ARRAY_SIZE(ad7175_sinc5_data_rates),
-};
-
-static const struct ad7173_device_info ad7176_2_device_info = {
- .name = "ad7176-2",
- .id = AD7176_ID,
- .num_voltage_in = 5,
- .num_channels = 4,
- .num_configs = 4,
- .num_gpios = 2,
- .has_int_ref = true,
- .clock = 16 * HZ_PER_MHZ,
- .sinc5_data_rates = ad7175_sinc5_data_rates,
- .num_sinc5_data_rates = ARRAY_SIZE(ad7175_sinc5_data_rates),
-};
-
-static const struct ad7173_device_info ad7177_2_device_info = {
- .name = "ad7177-2",
- .id = AD7177_ID,
- .num_voltage_in = 5,
- .num_channels = 4,
- .num_configs = 4,
- .num_gpios = 2,
- .has_temp = true,
- .has_input_buf = true,
- .has_int_ref = true,
- .has_pow_supply_monitoring = true,
- .clock = 16 * HZ_PER_MHZ,
- .odr_start_value = AD7177_ODR_START_VALUE,
- .sinc5_data_rates = ad7175_sinc5_data_rates,
- .num_sinc5_data_rates = ARRAY_SIZE(ad7175_sinc5_data_rates),
-};
-
static const char *const ad7173_ref_sel_str[] = {
[AD7173_SETUP_REF_SEL_EXT_REF] = "vref",
[AD7173_SETUP_REF_SEL_EXT_REF2] = "vref2",
@@ -559,6 +349,9 @@ static ssize_t ad7173_write_syscalib(struct iio_dev *indio_dev,
if (ret)
return ret;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+
mode = st->channels[chan->channel].syscalib_mode;
if (sys_calib) {
if (mode == AD7173_SYSCALIB_ZERO_SCALE)
@@ -569,6 +362,8 @@ static ssize_t ad7173_write_syscalib(struct iio_dev *indio_dev,
chan->address);
}
+ iio_device_release_direct(indio_dev);
+
return ret ? : len;
}
@@ -616,6 +411,76 @@ static int ad7173_calibrate_all(struct ad7173_state *st, struct iio_dev *indio_d
return 0;
}
+/*
+ * Associative array of channel pairs for open wire detection
+ * The array is indexed by ain and gives the associated channel pair
+ * to perform the open wire detection with
+ * the channel pair [0] is for non differential and pair [1]
+ * is for differential inputs
+ */
+static int openwire_ain_to_channel_pair[][2][2] = {
+/* AIN Single Differential */
+ [0] = { { 0, 15 }, { 1, 2 } },
+ [1] = { { 1, 2 }, { 2, 1 } },
+ [2] = { { 3, 4 }, { 5, 6 } },
+ [3] = { { 5, 6 }, { 6, 5 } },
+ [4] = { { 7, 8 }, { 9, 10 } },
+ [5] = { { 9, 10 }, { 10, 9 } },
+ [6] = { { 11, 12 }, { 13, 14 } },
+ [7] = { { 13, 14 }, { 14, 13 } },
+};
+
+/*
+ * Openwire detection on ad4111 works by running the same input measurement
+ * on two different channels and compare if the difference between the two
+ * measurements exceeds a certain value (typical 300mV)
+ */
+static int ad4111_openwire_event(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan)
+{
+ struct ad7173_state *st = iio_priv(indio_dev);
+ struct ad7173_channel *adchan = &st->channels[chan->address];
+ struct ad7173_channel_config *cfg = &adchan->cfg;
+ int ret, val1, val2;
+
+ ret = regmap_set_bits(st->reg_gpiocon_regmap, AD7173_REG_GPIO,
+ AD4111_GPIO_GP_OW_EN);
+ if (ret)
+ return ret;
+
+ adchan->cfg.openwire_comp_chan =
+ openwire_ain_to_channel_pair[chan->channel][chan->differential][0];
+
+ ret = ad_sigma_delta_single_conversion(indio_dev, chan, &val1);
+ if (ret < 0) {
+ dev_err(&indio_dev->dev,
+ "Error running ad_sigma_delta single conversion: %d", ret);
+ goto out;
+ }
+
+ adchan->cfg.openwire_comp_chan =
+ openwire_ain_to_channel_pair[chan->channel][chan->differential][1];
+
+ ret = ad_sigma_delta_single_conversion(indio_dev, chan, &val2);
+ if (ret < 0) {
+ dev_err(&indio_dev->dev,
+ "Error running ad_sigma_delta single conversion: %d", ret);
+ goto out;
+ }
+
+ if (abs(val1 - val2) > cfg->openwire_thrsh_raw)
+ iio_push_event(indio_dev,
+ IIO_UNMOD_EVENT_CODE(IIO_VOLTAGE, chan->address,
+ IIO_EV_TYPE_FAULT, IIO_EV_DIR_FAULT_OPENWIRE),
+ iio_get_time_ns(indio_dev));
+
+out:
+ adchan->cfg.openwire_comp_chan = -1;
+ regmap_clear_bits(st->reg_gpiocon_regmap, AD7173_REG_GPIO,
+ AD4111_GPIO_GP_OW_EN);
+ return ret;
+}
+
static int ad7173_mask_xlate(struct gpio_regmap *gpio, unsigned int base,
unsigned int offset, unsigned int *reg,
unsigned int *mask)
@@ -712,15 +577,28 @@ static struct ad7173_channel_config *
ad7173_find_live_config(struct ad7173_state *st, struct ad7173_channel_config *cfg)
{
struct ad7173_channel_config *cfg_aux;
- ptrdiff_t cmp_size;
int i;
- cmp_size = sizeof_field(struct ad7173_channel_config, config_props);
+ /*
+ * This is just to make sure that the comparison is adapted after
+ * struct ad7173_channel_config was changed.
+ */
+ static_assert(sizeof_field(struct ad7173_channel_config, config_props) ==
+ sizeof(struct {
+ bool bipolar;
+ bool input_buf;
+ u8 odr;
+ u8 ref_sel;
+ }));
+
for (i = 0; i < st->num_channels; i++) {
cfg_aux = &st->channels[i].cfg;
if (cfg_aux->live &&
- !memcmp(&cfg->config_props, &cfg_aux->config_props, cmp_size))
+ cfg->bipolar == cfg_aux->bipolar &&
+ cfg->input_buf == cfg_aux->input_buf &&
+ cfg->odr == cfg_aux->odr &&
+ cfg->ref_sel == cfg_aux->ref_sel)
return cfg_aux;
}
return NULL;
@@ -813,6 +691,9 @@ static int ad7173_set_channel(struct ad_sigma_delta *sd, unsigned int channel)
FIELD_PREP(AD7173_CH_SETUP_SEL_MASK, st->channels[channel].cfg.cfg_slot) |
st->channels[channel].ain;
+ if (st->channels[channel].cfg.openwire_comp_chan >= 0)
+ channel = st->channels[channel].cfg.openwire_comp_chan;
+
return ad_sd_write_reg(&st->sd, AD7173_REG_CH(channel), 2, val);
}
@@ -861,21 +742,280 @@ static int ad7173_disable_all(struct ad_sigma_delta *sd)
static int ad7173_disable_one(struct ad_sigma_delta *sd, unsigned int chan)
{
+ struct ad7173_state *st = ad_sigma_delta_to_ad7173(sd);
+
+ if (st->channels[chan].cfg.openwire_comp_chan >= 0)
+ chan = st->channels[chan].cfg.openwire_comp_chan;
+
return ad_sd_write_reg(sd, AD7173_REG_CH(chan), 2, 0);
}
-static const struct ad_sigma_delta_info ad7173_sigma_delta_info = {
+static const struct ad_sigma_delta_info ad7173_sigma_delta_info_4_slots = {
+ .set_channel = ad7173_set_channel,
+ .append_status = ad7173_append_status,
+ .disable_all = ad7173_disable_all,
+ .disable_one = ad7173_disable_one,
+ .set_mode = ad7173_set_mode,
+ .has_registers = true,
+ .has_named_irqs = true,
+ .addr_shift = 0,
+ .read_mask = BIT(6),
+ .status_ch_mask = GENMASK(3, 0),
+ .data_reg = AD7173_REG_DATA,
+ .num_resetclks = 64,
+ .num_slots = 4,
+};
+
+static const struct ad_sigma_delta_info ad7173_sigma_delta_info_8_slots = {
.set_channel = ad7173_set_channel,
.append_status = ad7173_append_status,
.disable_all = ad7173_disable_all,
.disable_one = ad7173_disable_one,
.set_mode = ad7173_set_mode,
.has_registers = true,
+ .has_named_irqs = true,
.addr_shift = 0,
.read_mask = BIT(6),
.status_ch_mask = GENMASK(3, 0),
.data_reg = AD7173_REG_DATA,
.num_resetclks = 64,
+ .num_slots = 8,
+};
+
+static const struct ad7173_device_info ad4111_device_info = {
+ .name = "ad4111",
+ .id = AD4111_ID,
+ .sd_info = &ad7173_sigma_delta_info_8_slots,
+ .num_voltage_in_div = 8,
+ .num_channels = 16,
+ .num_configs = 8,
+ .num_voltage_in = 8,
+ .num_gpios = 2,
+ .higher_gpio_bits = true,
+ .has_temp = true,
+ .has_vincom_input = true,
+ .has_input_buf = true,
+ .has_current_inputs = true,
+ .has_int_ref = true,
+ .has_internal_fs_calibration = true,
+ .has_openwire_det = true,
+ .clock = 2 * HZ_PER_MHZ,
+ .sinc5_data_rates = ad7173_sinc5_data_rates,
+ .num_sinc5_data_rates = ARRAY_SIZE(ad7173_sinc5_data_rates),
+};
+
+static const struct ad7173_device_info ad4112_device_info = {
+ .name = "ad4112",
+ .id = AD4112_ID,
+ .sd_info = &ad7173_sigma_delta_info_8_slots,
+ .num_voltage_in_div = 8,
+ .num_channels = 16,
+ .num_configs = 8,
+ .num_voltage_in = 8,
+ .num_gpios = 2,
+ .higher_gpio_bits = true,
+ .has_vincom_input = true,
+ .has_temp = true,
+ .has_input_buf = true,
+ .has_current_inputs = true,
+ .has_int_ref = true,
+ .has_internal_fs_calibration = true,
+ .clock = 2 * HZ_PER_MHZ,
+ .sinc5_data_rates = ad7173_sinc5_data_rates,
+ .num_sinc5_data_rates = ARRAY_SIZE(ad7173_sinc5_data_rates),
+};
+
+static const struct ad7173_device_info ad4113_device_info = {
+ .name = "ad4113",
+ .id = AD4113_ID,
+ .sd_info = &ad7173_sigma_delta_info_8_slots,
+ .num_voltage_in_div = 8,
+ .num_channels = 16,
+ .num_configs = 8,
+ .num_voltage_in = 8,
+ .num_gpios = 2,
+ .data_reg_only_16bit = true,
+ .higher_gpio_bits = true,
+ .has_vincom_input = true,
+ .has_input_buf = true,
+ .has_int_ref = true,
+ .clock = 2 * HZ_PER_MHZ,
+ .sinc5_data_rates = ad7173_sinc5_data_rates,
+ .num_sinc5_data_rates = ARRAY_SIZE(ad7173_sinc5_data_rates),
+};
+
+static const struct ad7173_device_info ad4114_device_info = {
+ .name = "ad4114",
+ .id = AD4114_ID,
+ .sd_info = &ad7173_sigma_delta_info_8_slots,
+ .num_voltage_in_div = 16,
+ .num_channels = 16,
+ .num_configs = 8,
+ .num_voltage_in = 16,
+ .num_gpios = 4,
+ .has_vincom_input = true,
+ .has_temp = true,
+ .has_input_buf = true,
+ .has_int_ref = true,
+ .has_internal_fs_calibration = true,
+ .clock = 2 * HZ_PER_MHZ,
+ .sinc5_data_rates = ad7173_sinc5_data_rates,
+ .num_sinc5_data_rates = ARRAY_SIZE(ad7173_sinc5_data_rates),
+};
+
+static const struct ad7173_device_info ad4115_device_info = {
+ .name = "ad4115",
+ .id = AD4115_ID,
+ .sd_info = &ad7173_sigma_delta_info_8_slots,
+ .num_voltage_in_div = 16,
+ .num_channels = 16,
+ .num_configs = 8,
+ .num_voltage_in = 16,
+ .num_gpios = 4,
+ .has_vincom_input = true,
+ .has_temp = true,
+ .has_input_buf = true,
+ .has_int_ref = true,
+ .has_internal_fs_calibration = true,
+ .clock = 8 * HZ_PER_MHZ,
+ .sinc5_data_rates = ad4115_sinc5_data_rates,
+ .num_sinc5_data_rates = ARRAY_SIZE(ad4115_sinc5_data_rates),
+};
+
+static const struct ad7173_device_info ad4116_device_info = {
+ .name = "ad4116",
+ .id = AD4116_ID,
+ .sd_info = &ad7173_sigma_delta_info_8_slots,
+ .num_voltage_in_div = 11,
+ .num_channels = 16,
+ .num_configs = 8,
+ .num_voltage_in = 16,
+ .num_gpios = 4,
+ .has_vincom_input = true,
+ .has_temp = true,
+ .has_input_buf = true,
+ .has_int_ref = true,
+ .has_internal_fs_calibration = true,
+ .clock = 4 * HZ_PER_MHZ,
+ .sinc5_data_rates = ad4116_sinc5_data_rates,
+ .num_sinc5_data_rates = ARRAY_SIZE(ad4116_sinc5_data_rates),
+};
+
+static const struct ad7173_device_info ad7172_2_device_info = {
+ .name = "ad7172-2",
+ .id = AD7172_2_ID,
+ .sd_info = &ad7173_sigma_delta_info_8_slots,
+ .num_voltage_in = 5,
+ .num_channels = 4,
+ .num_configs = 4,
+ .num_gpios = 2,
+ .has_temp = true,
+ .has_input_buf = true,
+ .has_int_ref = true,
+ .has_pow_supply_monitoring = true,
+ .clock = 2 * HZ_PER_MHZ,
+ .sinc5_data_rates = ad7173_sinc5_data_rates,
+ .num_sinc5_data_rates = ARRAY_SIZE(ad7173_sinc5_data_rates),
+};
+
+static const struct ad7173_device_info ad7172_4_device_info = {
+ .name = "ad7172-4",
+ .id = AD7172_4_ID,
+ .sd_info = &ad7173_sigma_delta_info_8_slots,
+ .num_voltage_in = 9,
+ .num_channels = 8,
+ .num_configs = 8,
+ .num_gpios = 4,
+ .has_input_buf = true,
+ .has_ref2 = true,
+ .has_pow_supply_monitoring = true,
+ .clock = 2 * HZ_PER_MHZ,
+ .sinc5_data_rates = ad7173_sinc5_data_rates,
+ .num_sinc5_data_rates = ARRAY_SIZE(ad7173_sinc5_data_rates),
+};
+
+static const struct ad7173_device_info ad7173_8_device_info = {
+ .name = "ad7173-8",
+ .id = AD7173_ID,
+ .sd_info = &ad7173_sigma_delta_info_8_slots,
+ .num_voltage_in = 17,
+ .num_channels = 16,
+ .num_configs = 8,
+ .num_gpios = 4,
+ .has_temp = true,
+ .has_input_buf = true,
+ .has_int_ref = true,
+ .has_ref2 = true,
+ .clock = 2 * HZ_PER_MHZ,
+ .sinc5_data_rates = ad7173_sinc5_data_rates,
+ .num_sinc5_data_rates = ARRAY_SIZE(ad7173_sinc5_data_rates),
+};
+
+static const struct ad7173_device_info ad7175_2_device_info = {
+ .name = "ad7175-2",
+ .id = AD7175_2_ID,
+ .sd_info = &ad7173_sigma_delta_info_8_slots,
+ .num_voltage_in = 5,
+ .num_channels = 4,
+ .num_configs = 4,
+ .num_gpios = 2,
+ .has_temp = true,
+ .has_input_buf = true,
+ .has_int_ref = true,
+ .has_pow_supply_monitoring = true,
+ .clock = 16 * HZ_PER_MHZ,
+ .sinc5_data_rates = ad7175_sinc5_data_rates,
+ .num_sinc5_data_rates = ARRAY_SIZE(ad7175_sinc5_data_rates),
+};
+
+static const struct ad7173_device_info ad7175_8_device_info = {
+ .name = "ad7175-8",
+ .id = AD7175_8_ID,
+ .sd_info = &ad7173_sigma_delta_info_8_slots,
+ .num_voltage_in = 17,
+ .num_channels = 16,
+ .num_configs = 8,
+ .num_gpios = 4,
+ .has_temp = true,
+ .has_input_buf = true,
+ .has_int_ref = true,
+ .has_ref2 = true,
+ .has_pow_supply_monitoring = true,
+ .clock = 16 * HZ_PER_MHZ,
+ .sinc5_data_rates = ad7175_sinc5_data_rates,
+ .num_sinc5_data_rates = ARRAY_SIZE(ad7175_sinc5_data_rates),
+};
+
+static const struct ad7173_device_info ad7176_2_device_info = {
+ .name = "ad7176-2",
+ .id = AD7176_ID,
+ .sd_info = &ad7173_sigma_delta_info_4_slots,
+ .num_voltage_in = 5,
+ .num_channels = 4,
+ .num_configs = 4,
+ .num_gpios = 2,
+ .has_int_ref = true,
+ .clock = 16 * HZ_PER_MHZ,
+ .sinc5_data_rates = ad7175_sinc5_data_rates,
+ .num_sinc5_data_rates = ARRAY_SIZE(ad7175_sinc5_data_rates),
+};
+
+static const struct ad7173_device_info ad7177_2_device_info = {
+ .name = "ad7177-2",
+ .id = AD7177_ID,
+ .sd_info = &ad7173_sigma_delta_info_4_slots,
+ .num_voltage_in = 5,
+ .num_channels = 4,
+ .num_configs = 4,
+ .num_gpios = 2,
+ .has_temp = true,
+ .has_input_buf = true,
+ .has_int_ref = true,
+ .has_pow_supply_monitoring = true,
+ .clock = 16 * HZ_PER_MHZ,
+ .odr_start_value = AD7177_ODR_START_VALUE,
+ .sinc5_data_rates = ad7175_sinc5_data_rates,
+ .num_sinc5_data_rates = ARRAY_SIZE(ad7175_sinc5_data_rates),
};
static int ad7173_setup(struct iio_dev *indio_dev)
@@ -969,6 +1109,12 @@ static int ad7173_read_raw(struct iio_dev *indio_dev,
if (ret < 0)
return ret;
+ if (ch->openwire_det_en) {
+ ret = ad4111_openwire_event(indio_dev, chan);
+ if (ret < 0)
+ return ret;
+ }
+
return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
@@ -1033,11 +1179,10 @@ static int ad7173_write_raw(struct iio_dev *indio_dev,
struct ad7173_state *st = iio_priv(indio_dev);
struct ad7173_channel_config *cfg;
unsigned int freq, i;
- int ret;
+ int ret = 0;
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
switch (info) {
/*
@@ -1071,7 +1216,7 @@ static int ad7173_write_raw(struct iio_dev *indio_dev,
break;
}
- iio_device_release_direct_mode(indio_dev);
+ iio_device_release_direct(indio_dev);
return ret;
}
@@ -1113,12 +1258,57 @@ static int ad7173_debug_reg_access(struct iio_dev *indio_dev, unsigned int reg,
return ad_sd_write_reg(&st->sd, reg, reg_size, writeval);
}
+static int ad7173_write_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ bool state)
+{
+ struct ad7173_state *st = iio_priv(indio_dev);
+ struct ad7173_channel *adchan = &st->channels[chan->address];
+
+ switch (type) {
+ case IIO_EV_TYPE_FAULT:
+ adchan->openwire_det_en = state;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ad7173_read_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir)
+{
+ struct ad7173_state *st = iio_priv(indio_dev);
+ struct ad7173_channel *adchan = &st->channels[chan->address];
+
+ switch (type) {
+ case IIO_EV_TYPE_FAULT:
+ return adchan->openwire_det_en;
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct iio_event_spec ad4111_events[] = {
+ {
+ .type = IIO_EV_TYPE_FAULT,
+ .dir = IIO_EV_DIR_FAULT_OPENWIRE,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE),
+ .mask_shared_by_all = BIT(IIO_EV_INFO_ENABLE),
+ },
+};
+
static const struct iio_info ad7173_info = {
.read_raw = &ad7173_read_raw,
.write_raw = &ad7173_write_raw,
.debugfs_reg_access = &ad7173_debug_reg_access,
.validate_trigger = ad_sd_validate_trigger,
.update_scan_mode = ad7173_update_scan_mode,
+ .write_event_config = ad7173_write_event_config,
+ .read_event_config = ad7173_read_event_config,
};
static const struct iio_scan_type ad4113_scan_type = {
@@ -1322,6 +1512,37 @@ static int ad7173_validate_reference(struct ad7173_state *st, int ref_sel)
return 0;
}
+static int ad7173_validate_openwire_ain_inputs(struct ad7173_state *st,
+ bool differential,
+ unsigned int ain0,
+ unsigned int ain1)
+{
+ /*
+ * If the channel is configured as differential,
+ * the ad4111 requires specific ains to be used together
+ */
+ if (differential)
+ return (ain0 % 2) ? (ain0 - 1) == ain1 : (ain0 + 1) == ain1;
+
+ return ain1 == AD4111_VINCOM_INPUT;
+}
+
+static unsigned int ad7173_calc_openwire_thrsh_raw(struct ad7173_state *st,
+ struct iio_chan_spec *chan,
+ struct ad7173_channel *chan_st_priv,
+ unsigned int thrsh_mv) {
+ unsigned int thrsh_raw;
+
+ thrsh_raw =
+ BIT(chan->scan_type.realbits - !!(chan_st_priv->cfg.bipolar))
+ * thrsh_mv
+ / ad7173_get_ref_voltage_milli(st, chan_st_priv->cfg.ref_sel);
+ if (chan->channel < st->info->num_voltage_in_div)
+ thrsh_raw /= AD4111_DIVIDER_RATIO;
+
+ return thrsh_raw;
+}
+
static int ad7173_fw_parse_channel_config(struct iio_dev *indio_dev)
{
struct ad7173_channel *chans_st_arr, *chan_st_priv;
@@ -1369,6 +1590,7 @@ static int ad7173_fw_parse_channel_config(struct iio_dev *indio_dev)
chan_st_priv->cfg.bipolar = false;
chan_st_priv->cfg.input_buf = st->info->has_input_buf;
chan_st_priv->cfg.ref_sel = AD7173_SETUP_REF_SEL_INT_REF;
+ chan_st_priv->cfg.openwire_comp_chan = -1;
st->adc_mode |= AD7173_ADC_MODE_REF_EN;
if (st->info->data_reg_only_16bit)
chan_arr[chan_index].scan_type = ad4113_scan_type;
@@ -1435,6 +1657,7 @@ static int ad7173_fw_parse_channel_config(struct iio_dev *indio_dev)
chan->channel = ain[0];
chan_st_priv->cfg.input_buf = st->info->has_input_buf;
chan_st_priv->cfg.odr = 0;
+ chan_st_priv->cfg.openwire_comp_chan = -1;
chan_st_priv->cfg.bipolar = fwnode_property_read_bool(child, "bipolar");
if (chan_st_priv->cfg.bipolar)
@@ -1449,6 +1672,14 @@ static int ad7173_fw_parse_channel_config(struct iio_dev *indio_dev)
chan_st_priv->cfg.input_buf = st->info->has_input_buf;
chan->channel2 = ain[1];
chan_st_priv->ain = AD7173_CH_ADDRESS(ain[0], ain[1]);
+ if (st->info->has_openwire_det &&
+ ad7173_validate_openwire_ain_inputs(st, chan->differential, ain[0], ain[1])) {
+ chan->event_spec = ad4111_events;
+ chan->num_event_specs = ARRAY_SIZE(ad4111_events);
+ chan_st_priv->cfg.openwire_thrsh_raw =
+ ad7173_calc_openwire_thrsh_raw(st, chan, chan_st_priv,
+ AD4111_OW_DET_THRSH_MV);
+ }
}
if (st->info->data_reg_only_16bit)
@@ -1515,12 +1746,6 @@ static int ad7173_fw_parse_device_config(struct iio_dev *indio_dev)
return ret;
}
- ret = fwnode_irq_get_byname(dev_fwnode(dev), "rdy");
- if (ret < 0)
- return dev_err_probe(dev, ret, "Interrupt 'rdy' is required\n");
-
- st->sigma_delta_info.irq_line = ret;
-
return ad7173_fw_parse_channel_config(indio_dev);
}
@@ -1552,9 +1777,7 @@ static int ad7173_probe(struct spi_device *spi)
spi->mode = SPI_MODE_3;
spi_setup(spi);
- st->sigma_delta_info = ad7173_sigma_delta_info;
- st->sigma_delta_info.num_slots = st->info->num_configs;
- ret = ad_sd_init(&st->sd, indio_dev, spi, &st->sigma_delta_info);
+ ret = ad_sd_init(&st->sd, indio_dev, spi, st->info->sd_info);
if (ret)
return ret;
diff --git a/drivers/iio/adc/ad7191.c b/drivers/iio/adc/ad7191.c
new file mode 100644
index 000000000000..d9cd903ffdd2
--- /dev/null
+++ b/drivers/iio/adc/ad7191.c
@@ -0,0 +1,554 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * AD7191 ADC driver
+ *
+ * Copyright 2025 Analog Devices Inc.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/gpio/consumer.h>
+#include <linux/interrupt.h>
+#include <linux/mod_devicetable.h>
+#include <linux/mutex.h>
+#include <linux/property.h>
+#include <linux/regulator/consumer.h>
+#include <linux/spi/spi.h>
+#include <linux/types.h>
+#include <linux/units.h>
+
+#include <linux/iio/adc/ad_sigma_delta.h>
+#include <linux/iio/iio.h>
+
+#define ad_sigma_delta_to_ad7191(sigmad) \
+ container_of((sigmad), struct ad7191_state, sd)
+
+#define AD7191_TEMP_CODES_PER_DEGREE 2815
+
+#define AD7191_CHAN_MASK BIT(0)
+#define AD7191_TEMP_MASK BIT(1)
+
+enum ad7191_channel {
+ AD7191_CH_AIN1_AIN2,
+ AD7191_CH_AIN3_AIN4,
+ AD7191_CH_TEMP,
+};
+
+/*
+ * NOTE:
+ * The AD7191 features a dual-use data out ready DOUT/RDY output.
+ * In order to avoid contentions on the SPI bus, it's therefore necessary
+ * to use SPI bus locking.
+ *
+ * The DOUT/RDY output must also be wired to an interrupt-capable GPIO.
+ *
+ * The SPI controller's chip select must be connected to the PDOWN pin
+ * of the ADC. When CS (PDOWN) is high, it powers down the device and
+ * resets the internal circuitry.
+ */
+
+struct ad7191_state {
+ struct ad_sigma_delta sd;
+ struct mutex lock; /* Protect device state */
+
+ struct gpio_descs *odr_gpios;
+ struct gpio_descs *pga_gpios;
+ struct gpio_desc *temp_gpio;
+ struct gpio_desc *chan_gpio;
+
+ u16 int_vref_mv;
+ const u32 (*scale_avail)[2];
+ size_t scale_avail_size;
+ u32 scale_index;
+ const u32 *samp_freq_avail;
+ size_t samp_freq_avail_size;
+ u32 samp_freq_index;
+
+ struct clk *mclk;
+};
+
+static int ad7191_set_channel(struct ad_sigma_delta *sd, unsigned int address)
+{
+ struct ad7191_state *st = ad_sigma_delta_to_ad7191(sd);
+ u8 temp_gpio_val, chan_gpio_val;
+
+ if (!FIELD_FIT(AD7191_CHAN_MASK | AD7191_TEMP_MASK, address))
+ return -EINVAL;
+
+ chan_gpio_val = FIELD_GET(AD7191_CHAN_MASK, address);
+ temp_gpio_val = FIELD_GET(AD7191_TEMP_MASK, address);
+
+ gpiod_set_value(st->chan_gpio, chan_gpio_val);
+ gpiod_set_value(st->temp_gpio, temp_gpio_val);
+
+ return 0;
+}
+
+static int ad7191_set_cs(struct ad_sigma_delta *sigma_delta, int assert)
+{
+ struct spi_transfer t = {
+ .len = 0,
+ .cs_change = assert,
+ };
+ struct spi_message m;
+
+ spi_message_init_with_transfers(&m, &t, 1);
+
+ return spi_sync_locked(sigma_delta->spi, &m);
+}
+
+static int ad7191_set_mode(struct ad_sigma_delta *sd,
+ enum ad_sigma_delta_mode mode)
+{
+ struct ad7191_state *st = ad_sigma_delta_to_ad7191(sd);
+
+ switch (mode) {
+ case AD_SD_MODE_CONTINUOUS:
+ case AD_SD_MODE_SINGLE:
+ return ad7191_set_cs(&st->sd, 1);
+ case AD_SD_MODE_IDLE:
+ return ad7191_set_cs(&st->sd, 0);
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct ad_sigma_delta_info ad7191_sigma_delta_info = {
+ .set_channel = ad7191_set_channel,
+ .set_mode = ad7191_set_mode,
+ .has_registers = false,
+};
+
+static int ad7191_init_regulators(struct iio_dev *indio_dev)
+{
+ struct ad7191_state *st = iio_priv(indio_dev);
+ struct device *dev = &st->sd.spi->dev;
+ int ret;
+
+ ret = devm_regulator_get_enable(dev, "avdd");
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to enable specified AVdd supply\n");
+
+ ret = devm_regulator_get_enable(dev, "dvdd");
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to enable specified DVdd supply\n");
+
+ ret = devm_regulator_get_enable_read_voltage(dev, "vref");
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Failed to get Vref voltage\n");
+
+ st->int_vref_mv = ret / 1000;
+
+ return 0;
+}
+
+static int ad7191_config_setup(struct iio_dev *indio_dev)
+{
+ struct ad7191_state *st = iio_priv(indio_dev);
+ struct device *dev = &st->sd.spi->dev;
+ /* Sampling frequencies in Hz, see Table 5 */
+ static const u32 samp_freq[4] = { 120, 60, 50, 10 };
+ /* Gain options, see Table 7 */
+ const u32 gain[4] = { 1, 8, 64, 128 };
+ static u32 scale_buffer[4][2];
+ int odr_value, odr_index = 0, pga_value, pga_index = 0, i, ret;
+ u64 scale_uv;
+
+ st->samp_freq_index = 0;
+ st->scale_index = 0;
+
+ ret = device_property_read_u32(dev, "adi,odr-value", &odr_value);
+ if (ret && ret != -EINVAL)
+ return dev_err_probe(dev, ret, "Failed to get odr value.\n");
+
+ if (ret == -EINVAL) {
+ st->odr_gpios = devm_gpiod_get_array(dev, "odr", GPIOD_OUT_LOW);
+ if (IS_ERR(st->odr_gpios))
+ return dev_err_probe(dev, PTR_ERR(st->odr_gpios),
+ "Failed to get odr gpios.\n");
+
+ if (st->odr_gpios->ndescs != 2)
+ return dev_err_probe(dev, -EINVAL, "Expected 2 odr gpio pins.\n");
+
+ st->samp_freq_avail = samp_freq;
+ st->samp_freq_avail_size = ARRAY_SIZE(samp_freq);
+ } else {
+ for (i = 0; i < ARRAY_SIZE(samp_freq); i++) {
+ if (odr_value != samp_freq[i])
+ continue;
+ odr_index = i;
+ break;
+ }
+
+ st->samp_freq_avail = &samp_freq[odr_index];
+ st->samp_freq_avail_size = 1;
+
+ st->odr_gpios = NULL;
+ }
+
+ mutex_lock(&st->lock);
+
+ for (i = 0; i < ARRAY_SIZE(scale_buffer); i++) {
+ scale_uv = ((u64)st->int_vref_mv * NANO) >>
+ (indio_dev->channels[0].scan_type.realbits - 1);
+ do_div(scale_uv, gain[i]);
+ scale_buffer[i][1] = do_div(scale_uv, NANO);
+ scale_buffer[i][0] = scale_uv;
+ }
+
+ mutex_unlock(&st->lock);
+
+ ret = device_property_read_u32(dev, "adi,pga-value", &pga_value);
+ if (ret && ret != -EINVAL)
+ return dev_err_probe(dev, ret, "Failed to get pga value.\n");
+
+ if (ret == -EINVAL) {
+ st->pga_gpios = devm_gpiod_get_array(dev, "pga", GPIOD_OUT_LOW);
+ if (IS_ERR(st->pga_gpios))
+ return dev_err_probe(dev, PTR_ERR(st->pga_gpios),
+ "Failed to get pga gpios.\n");
+
+ if (st->pga_gpios->ndescs != 2)
+ return dev_err_probe(dev, -EINVAL, "Expected 2 pga gpio pins.\n");
+
+ st->scale_avail = scale_buffer;
+ st->scale_avail_size = ARRAY_SIZE(scale_buffer);
+ } else {
+ for (i = 0; i < ARRAY_SIZE(gain); i++) {
+ if (pga_value != gain[i])
+ continue;
+ pga_index = i;
+ break;
+ }
+
+ st->scale_avail = &scale_buffer[pga_index];
+ st->scale_avail_size = 1;
+
+ st->pga_gpios = NULL;
+ }
+
+ st->temp_gpio = devm_gpiod_get(dev, "temp", GPIOD_OUT_LOW);
+ if (IS_ERR(st->temp_gpio))
+ return dev_err_probe(dev, PTR_ERR(st->temp_gpio),
+ "Failed to get temp gpio.\n");
+
+ st->chan_gpio = devm_gpiod_get(dev, "chan", GPIOD_OUT_LOW);
+ if (IS_ERR(st->chan_gpio))
+ return dev_err_probe(dev, PTR_ERR(st->chan_gpio),
+ "Failed to get chan gpio.\n");
+
+ return 0;
+}
+
+static int ad7191_clock_setup(struct ad7191_state *st)
+{
+ struct device *dev = &st->sd.spi->dev;
+
+ st->mclk = devm_clk_get_optional_enabled(dev, "mclk");
+ if (IS_ERR(st->mclk))
+ return dev_err_probe(dev, PTR_ERR(st->mclk),
+ "Failed to get mclk.\n");
+
+ return 0;
+}
+
+static int ad7191_setup(struct iio_dev *indio_dev)
+{
+ struct ad7191_state *st = iio_priv(indio_dev);
+ int ret;
+
+ ret = ad7191_init_regulators(indio_dev);
+ if (ret)
+ return ret;
+
+ ret = ad7191_config_setup(indio_dev);
+ if (ret)
+ return ret;
+
+ return ad7191_clock_setup(st);
+}
+
+static int ad7191_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val,
+ int *val2, long m)
+{
+ struct ad7191_state *st = iio_priv(indio_dev);
+
+ switch (m) {
+ case IIO_CHAN_INFO_RAW:
+ return ad_sigma_delta_single_conversion(indio_dev, chan, val);
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_VOLTAGE: {
+ guard(mutex)(&st->lock);
+ *val = st->scale_avail[st->scale_index][0];
+ *val2 = st->scale_avail[st->scale_index][1];
+ return IIO_VAL_INT_PLUS_NANO;
+ }
+ case IIO_TEMP:
+ *val = 0;
+ *val2 = NANO / AD7191_TEMP_CODES_PER_DEGREE;
+ return IIO_VAL_INT_PLUS_NANO;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_OFFSET:
+ *val = -(1 << (chan->scan_type.realbits - 1));
+ switch (chan->type) {
+ case IIO_VOLTAGE:
+ return IIO_VAL_INT;
+ case IIO_TEMP:
+ *val -= 273 * AD7191_TEMP_CODES_PER_DEGREE;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ *val = st->samp_freq_avail[st->samp_freq_index];
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ad7191_set_gain(struct ad7191_state *st, int gain_index)
+{
+ DECLARE_BITMAP(bitmap, 2) = { };
+
+ st->scale_index = gain_index;
+
+ bitmap_write(bitmap, gain_index, 0, 2);
+
+ return gpiod_multi_set_value_cansleep(st->pga_gpios, bitmap);
+}
+
+static int ad7191_set_samp_freq(struct ad7191_state *st, int samp_freq_index)
+{
+ DECLARE_BITMAP(bitmap, 2) = {};
+
+ st->samp_freq_index = samp_freq_index;
+
+ bitmap_write(bitmap, samp_freq_index, 0, 2);
+
+ return gpiod_multi_set_value_cansleep(st->odr_gpios, bitmap);
+}
+
+static int __ad7191_write_raw(struct ad7191_state *st,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ int i;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE: {
+ if (!st->pga_gpios)
+ return -EPERM;
+ guard(mutex)(&st->lock);
+ for (i = 0; i < st->scale_avail_size; i++) {
+ if (val2 == st->scale_avail[i][1])
+ return ad7191_set_gain(st, i);
+ }
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_SAMP_FREQ: {
+ if (!st->odr_gpios)
+ return -EPERM;
+ guard(mutex)(&st->lock);
+ for (i = 0; i < st->samp_freq_avail_size; i++) {
+ if (val == st->samp_freq_avail[i])
+ return ad7191_set_samp_freq(st, i);
+ }
+ return -EINVAL;
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ad7191_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int val, int val2,
+ long mask)
+{
+ struct ad7191_state *st = iio_priv(indio_dev);
+ int ret;
+
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+
+ ret = __ad7191_write_raw(st, chan, val, val2, mask);
+
+ iio_device_release_direct(indio_dev);
+
+ return ret;
+}
+
+static int ad7191_write_raw_get_fmt(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, long mask)
+{
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+ return IIO_VAL_INT_PLUS_NANO;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ad7191_read_avail(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, const int **vals,
+ int *type, int *length, long mask)
+{
+ struct ad7191_state *st = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+ *vals = (int *)st->scale_avail;
+ *type = IIO_VAL_INT_PLUS_NANO;
+ *length = st->scale_avail_size * 2;
+ return IIO_AVAIL_LIST;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ *vals = (int *)st->samp_freq_avail;
+ *type = IIO_VAL_INT;
+ *length = st->samp_freq_avail_size;
+ return IIO_AVAIL_LIST;
+ }
+
+ return -EINVAL;
+}
+
+static const struct iio_info ad7191_info = {
+ .read_raw = ad7191_read_raw,
+ .write_raw = ad7191_write_raw,
+ .write_raw_get_fmt = ad7191_write_raw_get_fmt,
+ .read_avail = ad7191_read_avail,
+ .validate_trigger = ad_sd_validate_trigger,
+};
+
+static const struct iio_chan_spec ad7191_channels[] = {
+ {
+ .type = IIO_TEMP,
+ .address = AD7191_CH_TEMP,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_OFFSET) |
+ BIT(IIO_CHAN_INFO_SAMP_FREQ),
+ .info_mask_shared_by_all_available = BIT(IIO_CHAN_INFO_SAMP_FREQ),
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 24,
+ .storagebits = 32,
+ .endianness = IIO_BE,
+ },
+ },
+ {
+ .type = IIO_VOLTAGE,
+ .differential = 1,
+ .indexed = 1,
+ .channel = 1,
+ .channel2 = 2,
+ .address = AD7191_CH_AIN1_AIN2,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_OFFSET) |
+ BIT(IIO_CHAN_INFO_SAMP_FREQ),
+ .info_mask_shared_by_all_available = BIT(IIO_CHAN_INFO_SAMP_FREQ),
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
+ .info_mask_shared_by_type_available = BIT(IIO_CHAN_INFO_SCALE),
+ .scan_index = 1,
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 24,
+ .storagebits = 32,
+ .endianness = IIO_BE,
+ },
+ },
+ {
+ .type = IIO_VOLTAGE,
+ .differential = 1,
+ .indexed = 1,
+ .channel = 3,
+ .channel2 = 4,
+ .address = AD7191_CH_AIN3_AIN4,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_OFFSET) |
+ BIT(IIO_CHAN_INFO_SAMP_FREQ),
+ .info_mask_shared_by_all_available = BIT(IIO_CHAN_INFO_SAMP_FREQ),
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
+ .info_mask_shared_by_type_available = BIT(IIO_CHAN_INFO_SCALE),
+ .scan_index = 2,
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 24,
+ .storagebits = 32,
+ .endianness = IIO_BE,
+ },
+ },
+ IIO_CHAN_SOFT_TIMESTAMP(3),
+};
+
+static int ad7191_probe(struct spi_device *spi)
+{
+ struct device *dev = &spi->dev;
+ struct ad7191_state *st;
+ struct iio_dev *indio_dev;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*st));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ st = iio_priv(indio_dev);
+
+ ret = devm_mutex_init(dev, &st->lock);
+ if (ret)
+ return ret;
+
+ indio_dev->name = "ad7191";
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = ad7191_channels;
+ indio_dev->num_channels = ARRAY_SIZE(ad7191_channels);
+ indio_dev->info = &ad7191_info;
+
+ ret = ad_sd_init(&st->sd, indio_dev, spi, &ad7191_sigma_delta_info);
+ if (ret)
+ return ret;
+
+ ret = devm_ad_sd_setup_buffer_and_trigger(dev, indio_dev);
+ if (ret)
+ return ret;
+
+ ret = ad7191_setup(indio_dev);
+ if (ret)
+ return ret;
+
+ return devm_iio_device_register(dev, indio_dev);
+}
+
+static const struct of_device_id ad7191_of_match[] = {
+ { .compatible = "adi,ad7191", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ad7191_of_match);
+
+static const struct spi_device_id ad7191_id_table[] = {
+ { "ad7191" },
+ { }
+};
+MODULE_DEVICE_TABLE(spi, ad7191_id_table);
+
+static struct spi_driver ad7191_driver = {
+ .driver = {
+ .name = "ad7191",
+ .of_match_table = ad7191_of_match,
+ },
+ .probe = ad7191_probe,
+ .id_table = ad7191_id_table,
+};
+module_spi_driver(ad7191_driver);
+
+MODULE_AUTHOR("Alisa-Dariana Roman <alisa.roman@analog.com>");
+MODULE_DESCRIPTION("Analog Devices AD7191 ADC");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS("IIO_AD_SIGMA_DELTA");
diff --git a/drivers/iio/adc/ad7192.c b/drivers/iio/adc/ad7192.c
index cfaf8f7e0a07..530e1d307860 100644
--- a/drivers/iio/adc/ad7192.c
+++ b/drivers/iio/adc/ad7192.c
@@ -7,6 +7,7 @@
#include <linux/interrupt.h>
#include <linux/bitfield.h>
+#include <linux/cleanup.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/device.h>
@@ -256,6 +257,9 @@ static ssize_t ad7192_write_syscalib(struct iio_dev *indio_dev,
if (ret)
return ret;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+
temp = st->syscalib_mode[chan->channel];
if (sys_calib) {
if (temp == AD7192_SYSCALIB_ZERO_SCALE)
@@ -266,6 +270,8 @@ static ssize_t ad7192_write_syscalib(struct iio_dev *indio_dev,
chan->address);
}
+ iio_device_release_direct(indio_dev);
+
return ret ? ret : len;
}
@@ -693,9 +699,8 @@ static ssize_t ad7192_set(struct device *dev,
if (ret < 0)
return ret;
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
switch ((u32)this_attr->address) {
case AD7192_REG_GPOCON:
@@ -718,7 +723,7 @@ static ssize_t ad7192_set(struct device *dev,
ret = -EINVAL;
}
- iio_device_release_direct_mode(indio_dev);
+ iio_device_release_direct(indio_dev);
return ret ? ret : len;
}
@@ -945,82 +950,83 @@ static int ad7192_read_raw(struct iio_dev *indio_dev,
return -EINVAL;
}
-static int ad7192_write_raw(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan,
- int val,
- int val2,
- long mask)
+static int __ad7192_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val,
+ int val2,
+ long mask)
{
struct ad7192_state *st = iio_priv(indio_dev);
- int ret, i, div;
+ int i, div;
unsigned int tmp;
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
-
- mutex_lock(&st->lock);
+ guard(mutex)(&st->lock);
switch (mask) {
case IIO_CHAN_INFO_SCALE:
- ret = -EINVAL;
- for (i = 0; i < ARRAY_SIZE(st->scale_avail); i++)
- if (val2 == st->scale_avail[i][1]) {
- ret = 0;
- tmp = st->conf;
- st->conf &= ~AD7192_CONF_GAIN_MASK;
- st->conf |= FIELD_PREP(AD7192_CONF_GAIN_MASK, i);
- if (tmp == st->conf)
- break;
- ad_sd_write_reg(&st->sd, AD7192_REG_CONF,
- 3, st->conf);
- ad7192_calibrate_all(st);
- break;
- }
- break;
- case IIO_CHAN_INFO_SAMP_FREQ:
- if (!val) {
- ret = -EINVAL;
- break;
+ for (i = 0; i < ARRAY_SIZE(st->scale_avail); i++) {
+ if (val2 != st->scale_avail[i][1])
+ continue;
+
+ tmp = st->conf;
+ st->conf &= ~AD7192_CONF_GAIN_MASK;
+ st->conf |= FIELD_PREP(AD7192_CONF_GAIN_MASK, i);
+ if (tmp == st->conf)
+ return 0;
+ ad_sd_write_reg(&st->sd, AD7192_REG_CONF, 3, st->conf);
+ ad7192_calibrate_all(st);
+ return 0;
}
+ return -EINVAL;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ if (!val)
+ return -EINVAL;
div = st->fclk / (val * ad7192_get_f_order(st) * 1024);
- if (div < 1 || div > 1023) {
- ret = -EINVAL;
- break;
- }
+ if (div < 1 || div > 1023)
+ return -EINVAL;
st->mode &= ~AD7192_MODE_RATE_MASK;
st->mode |= FIELD_PREP(AD7192_MODE_RATE_MASK, div);
ad_sd_write_reg(&st->sd, AD7192_REG_MODE, 3, st->mode);
ad7192_update_filter_freq_avail(st);
- break;
+ return 0;
case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
- ret = ad7192_set_3db_filter_freq(st, val, val2 / 1000);
- break;
+ return ad7192_set_3db_filter_freq(st, val, val2 / 1000);
case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
- ret = -EINVAL;
- for (i = 0; i < ARRAY_SIZE(st->oversampling_ratio_avail); i++)
- if (val == st->oversampling_ratio_avail[i]) {
- ret = 0;
- tmp = st->mode;
- st->mode &= ~AD7192_MODE_AVG_MASK;
- st->mode |= FIELD_PREP(AD7192_MODE_AVG_MASK, i);
- if (tmp == st->mode)
- break;
- ad_sd_write_reg(&st->sd, AD7192_REG_MODE,
- 3, st->mode);
- break;
- }
- ad7192_update_filter_freq_avail(st);
- break;
+ for (i = 0; i < ARRAY_SIZE(st->oversampling_ratio_avail); i++) {
+ if (val != st->oversampling_ratio_avail[i])
+ continue;
+
+ tmp = st->mode;
+ st->mode &= ~AD7192_MODE_AVG_MASK;
+ st->mode |= FIELD_PREP(AD7192_MODE_AVG_MASK, i);
+ if (tmp == st->mode)
+ return 0;
+ ad_sd_write_reg(&st->sd, AD7192_REG_MODE, 3, st->mode);
+ ad7192_update_filter_freq_avail(st);
+ return 0;
+ }
+ return -EINVAL;
default:
- ret = -EINVAL;
+ return -EINVAL;
}
+}
+
+static int ad7192_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val,
+ int val2,
+ long mask)
+{
+ int ret;
+
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
- mutex_unlock(&st->lock);
+ ret = __ad7192_write_raw(indio_dev, chan, val, val2, mask);
- iio_device_release_direct_mode(indio_dev);
+ iio_device_release_direct(indio_dev);
return ret;
}
diff --git a/drivers/iio/adc/ad7266.c b/drivers/iio/adc/ad7266.c
index 858c8be2ff1a..18559757f908 100644
--- a/drivers/iio/adc/ad7266.c
+++ b/drivers/iio/adc/ad7266.c
@@ -153,11 +153,10 @@ static int ad7266_read_raw(struct iio_dev *indio_dev,
switch (m) {
case IIO_CHAN_INFO_RAW:
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
ret = ad7266_read_single(st, val, chan->address);
- iio_device_release_direct_mode(indio_dev);
+ iio_device_release_direct(indio_dev);
if (ret < 0)
return ret;
diff --git a/drivers/iio/adc/ad7298.c b/drivers/iio/adc/ad7298.c
index b35bd4d9ef81..28b88092b4aa 100644
--- a/drivers/iio/adc/ad7298.c
+++ b/drivers/iio/adc/ad7298.c
@@ -232,16 +232,15 @@ static int ad7298_read_raw(struct iio_dev *indio_dev,
switch (m) {
case IIO_CHAN_INFO_RAW:
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
if (chan->address == AD7298_CH_TEMP)
ret = ad7298_scan_temp(st, val);
else
ret = ad7298_scan_direct(st, chan->address);
- iio_device_release_direct_mode(indio_dev);
+ iio_device_release_direct(indio_dev);
if (ret < 0)
return ret;
diff --git a/drivers/iio/adc/ad7380.c b/drivers/iio/adc/ad7380.c
index 4f32cb22f140..4fcb49fdf566 100644
--- a/drivers/iio/adc/ad7380.c
+++ b/drivers/iio/adc/ad7380.c
@@ -15,6 +15,10 @@
* ad7386/7/8-4 : https://www.analog.com/media/en/technical-documentation/data-sheets/ad7386-4-7387-4-7388-4.pdf
* adaq4370-4 : https://www.analog.com/media/en/technical-documentation/data-sheets/adaq4370-4.pdf
* adaq4380-4 : https://www.analog.com/media/en/technical-documentation/data-sheets/adaq4380-4.pdf
+ * adaq4381-4 : https://www.analog.com/media/en/technical-documentation/data-sheets/adaq4381-4.pdf
+ *
+ * HDL ad738x_fmc: https://analogdevicesinc.github.io/hdl/projects/ad738x_fmc/index.html
+ *
*/
#include <linux/align.h>
@@ -29,11 +33,14 @@
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
+#include <linux/spi/offload/consumer.h>
#include <linux/spi/spi.h>
#include <linux/units.h>
#include <linux/util_macros.h>
#include <linux/iio/buffer.h>
+#include <linux/iio/buffer-dmaengine.h>
+#include <linux/iio/events.h>
#include <linux/iio/iio.h>
#include <linux/iio/trigger_consumer.h>
#include <linux/iio/triggered_buffer.h>
@@ -91,6 +98,12 @@
#define AD7380_NUM_SDO_LINES 1
#define AD7380_DEFAULT_GAIN_MILLI 1000
+/*
+ * Using SPI offload, storagebits is always 32, so can't be used to compute struct
+ * spi_transfer.len. Using realbits instead.
+ */
+#define AD7380_SPI_BYTES(scan_type) ((scan_type)->realbits > 16 ? 4 : 2)
+
struct ad7380_timing_specs {
const unsigned int t_csh_ns; /* CS minimum high time */
};
@@ -98,6 +111,7 @@ struct ad7380_timing_specs {
struct ad7380_chip_info {
const char *name;
const struct iio_chan_spec *channels;
+ const struct iio_chan_spec *offload_channels;
unsigned int num_channels;
unsigned int num_simult_channels;
bool has_hardware_gain;
@@ -110,6 +124,25 @@ struct ad7380_chip_info {
unsigned int num_vcm_supplies;
const unsigned long *available_scan_masks;
const struct ad7380_timing_specs *timing_specs;
+ u32 max_conversion_rate_hz;
+};
+
+static const struct iio_event_spec ad7380_events[] = {
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_RISING,
+ .mask_shared_by_dir = BIT(IIO_EV_INFO_VALUE),
+ },
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_FALLING,
+ .mask_shared_by_dir = BIT(IIO_EV_INFO_VALUE),
+ },
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_EITHER,
+ .mask_shared_by_all = BIT(IIO_EV_INFO_ENABLE),
+ },
};
enum {
@@ -197,6 +230,91 @@ static const struct iio_scan_type ad7380_scan_type_16_u[] = {
},
};
+/*
+ * Defining here scan types for offload mode, since with current available HDL
+ * only a value of 32 for storagebits is supported.
+ */
+
+/* Extended scan types for 12-bit unsigned chips, offload support. */
+static const struct iio_scan_type ad7380_scan_type_12_u_offload[] = {
+ [AD7380_SCAN_TYPE_NORMAL] = {
+ .sign = 'u',
+ .realbits = 12,
+ .storagebits = 32,
+ .endianness = IIO_CPU,
+ },
+ [AD7380_SCAN_TYPE_RESOLUTION_BOOST] = {
+ .sign = 'u',
+ .realbits = 14,
+ .storagebits = 32,
+ .endianness = IIO_CPU,
+ },
+};
+
+/* Extended scan types for 14-bit signed chips, offload support. */
+static const struct iio_scan_type ad7380_scan_type_14_s_offload[] = {
+ [AD7380_SCAN_TYPE_NORMAL] = {
+ .sign = 's',
+ .realbits = 14,
+ .storagebits = 32,
+ .endianness = IIO_CPU,
+ },
+ [AD7380_SCAN_TYPE_RESOLUTION_BOOST] = {
+ .sign = 's',
+ .realbits = 16,
+ .storagebits = 32,
+ .endianness = IIO_CPU,
+ },
+};
+
+/* Extended scan types for 14-bit unsigned chips, offload support. */
+static const struct iio_scan_type ad7380_scan_type_14_u_offload[] = {
+ [AD7380_SCAN_TYPE_NORMAL] = {
+ .sign = 'u',
+ .realbits = 14,
+ .storagebits = 32,
+ .endianness = IIO_CPU,
+ },
+ [AD7380_SCAN_TYPE_RESOLUTION_BOOST] = {
+ .sign = 'u',
+ .realbits = 16,
+ .storagebits = 32,
+ .endianness = IIO_CPU,
+ },
+};
+
+/* Extended scan types for 16-bit signed_chips, offload support. */
+static const struct iio_scan_type ad7380_scan_type_16_s_offload[] = {
+ [AD7380_SCAN_TYPE_NORMAL] = {
+ .sign = 's',
+ .realbits = 16,
+ .storagebits = 32,
+ .endianness = IIO_CPU,
+ },
+ [AD7380_SCAN_TYPE_RESOLUTION_BOOST] = {
+ .sign = 's',
+ .realbits = 18,
+ .storagebits = 32,
+ .endianness = IIO_CPU,
+ },
+};
+
+/* Extended scan types for 16-bit unsigned chips, offload support. */
+static const struct iio_scan_type ad7380_scan_type_16_u_offload[] = {
+ [AD7380_SCAN_TYPE_NORMAL] = {
+ .sign = 'u',
+ .realbits = 16,
+ .storagebits = 32,
+ .endianness = IIO_CPU,
+ },
+ [AD7380_SCAN_TYPE_RESOLUTION_BOOST] = {
+ .sign = 'u',
+ .realbits = 18,
+ .storagebits = 32,
+ .endianness = IIO_CPU,
+ },
+};
+
#define _AD7380_CHANNEL(index, bits, diff, sign, gain) { \
.type = IIO_VOLTAGE, \
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
@@ -214,50 +332,127 @@ static const struct iio_scan_type ad7380_scan_type_16_u[] = {
.has_ext_scan_type = 1, \
.ext_scan_type = ad7380_scan_type_##bits##_##sign, \
.num_ext_scan_type = ARRAY_SIZE(ad7380_scan_type_##bits##_##sign), \
+ .event_spec = ad7380_events, \
+ .num_event_specs = ARRAY_SIZE(ad7380_events), \
+}
+
+#define _AD7380_OFFLOAD_CHANNEL(index, bits, diff, sign, gain) { \
+ .type = IIO_VOLTAGE, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ ((gain) ? BIT(IIO_CHAN_INFO_SCALE) : 0) | \
+ ((diff) ? 0 : BIT(IIO_CHAN_INFO_OFFSET)), \
+ .info_mask_shared_by_type = ((gain) ? 0 : BIT(IIO_CHAN_INFO_SCALE)) | \
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO) | \
+ BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ .info_mask_shared_by_type_available = \
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO) | \
+ BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ .indexed = 1, \
+ .differential = (diff), \
+ .channel = (diff) ? (2 * (index)) : (index), \
+ .channel2 = (diff) ? (2 * (index) + 1) : 0, \
+ .scan_index = (index), \
+ .has_ext_scan_type = 1, \
+ .ext_scan_type = ad7380_scan_type_##bits##_##sign##_offload, \
+ .num_ext_scan_type = \
+ ARRAY_SIZE(ad7380_scan_type_##bits##_##sign##_offload), \
+ .event_spec = ad7380_events, \
+ .num_event_specs = ARRAY_SIZE(ad7380_events), \
}
+/*
+ * Notes on the offload channels:
+ * - There is no soft timestamp since everything is done in hardware.
+ * - There is a sampling frequency attribute added. This controls the SPI
+ * offload trigger.
+ * - The storagebits value depends on the SPI offload provider. Currently there
+ * is only one supported provider, namely the ADI PULSAR ADC HDL project,
+ * which always uses 32-bit words for data values, even for <= 16-bit ADCs.
+ * So the value is just hardcoded to 32 for now.
+ */
+
#define AD7380_CHANNEL(index, bits, diff, sign) \
_AD7380_CHANNEL(index, bits, diff, sign, false)
#define ADAQ4380_CHANNEL(index, bits, diff, sign) \
_AD7380_CHANNEL(index, bits, diff, sign, true)
-#define DEFINE_AD7380_2_CHANNEL(name, bits, diff, sign) \
+#define DEFINE_AD7380_2_CHANNEL(name, bits, diff, sign) \
+static const struct iio_chan_spec name[] = { \
+ AD7380_CHANNEL(0, bits, diff, sign), \
+ AD7380_CHANNEL(1, bits, diff, sign), \
+ IIO_CHAN_SOFT_TIMESTAMP(2), \
+}
+
+#define DEFINE_AD7380_4_CHANNEL(name, bits, diff, sign) \
+static const struct iio_chan_spec name[] = { \
+ AD7380_CHANNEL(0, bits, diff, sign), \
+ AD7380_CHANNEL(1, bits, diff, sign), \
+ AD7380_CHANNEL(2, bits, diff, sign), \
+ AD7380_CHANNEL(3, bits, diff, sign), \
+ IIO_CHAN_SOFT_TIMESTAMP(4), \
+}
+
+#define DEFINE_ADAQ4380_4_CHANNEL(name, bits, diff, sign) \
+static const struct iio_chan_spec name[] = { \
+ ADAQ4380_CHANNEL(0, bits, diff, sign), \
+ ADAQ4380_CHANNEL(1, bits, diff, sign), \
+ ADAQ4380_CHANNEL(2, bits, diff, sign), \
+ ADAQ4380_CHANNEL(3, bits, diff, sign), \
+ IIO_CHAN_SOFT_TIMESTAMP(4), \
+}
+
+#define DEFINE_AD7380_8_CHANNEL(name, bits, diff, sign) \
+static const struct iio_chan_spec name[] = { \
+ AD7380_CHANNEL(0, bits, diff, sign), \
+ AD7380_CHANNEL(1, bits, diff, sign), \
+ AD7380_CHANNEL(2, bits, diff, sign), \
+ AD7380_CHANNEL(3, bits, diff, sign), \
+ AD7380_CHANNEL(4, bits, diff, sign), \
+ AD7380_CHANNEL(5, bits, diff, sign), \
+ AD7380_CHANNEL(6, bits, diff, sign), \
+ AD7380_CHANNEL(7, bits, diff, sign), \
+ IIO_CHAN_SOFT_TIMESTAMP(8), \
+}
+
+#define AD7380_OFFLOAD_CHANNEL(index, bits, diff, sign) \
+_AD7380_OFFLOAD_CHANNEL(index, bits, diff, sign, false)
+
+#define ADAQ4380_OFFLOAD_CHANNEL(index, bits, diff, sign) \
+_AD7380_OFFLOAD_CHANNEL(index, bits, diff, sign, true)
+
+#define DEFINE_AD7380_2_OFFLOAD_CHANNEL(name, bits, diff, sign) \
static const struct iio_chan_spec name[] = { \
- AD7380_CHANNEL(0, bits, diff, sign), \
- AD7380_CHANNEL(1, bits, diff, sign), \
- IIO_CHAN_SOFT_TIMESTAMP(2), \
+ AD7380_OFFLOAD_CHANNEL(0, bits, diff, sign), \
+ AD7380_OFFLOAD_CHANNEL(1, bits, diff, sign), \
}
-#define DEFINE_AD7380_4_CHANNEL(name, bits, diff, sign) \
+#define DEFINE_AD7380_4_OFFLOAD_CHANNEL(name, bits, diff, sign) \
static const struct iio_chan_spec name[] = { \
- AD7380_CHANNEL(0, bits, diff, sign), \
- AD7380_CHANNEL(1, bits, diff, sign), \
- AD7380_CHANNEL(2, bits, diff, sign), \
- AD7380_CHANNEL(3, bits, diff, sign), \
- IIO_CHAN_SOFT_TIMESTAMP(4), \
+ AD7380_OFFLOAD_CHANNEL(0, bits, diff, sign), \
+ AD7380_OFFLOAD_CHANNEL(1, bits, diff, sign), \
+ AD7380_OFFLOAD_CHANNEL(2, bits, diff, sign), \
+ AD7380_OFFLOAD_CHANNEL(3, bits, diff, sign), \
}
-#define DEFINE_ADAQ4380_4_CHANNEL(name, bits, diff, sign) \
-static const struct iio_chan_spec name[] = { \
- ADAQ4380_CHANNEL(0, bits, diff, sign), \
- ADAQ4380_CHANNEL(1, bits, diff, sign), \
- ADAQ4380_CHANNEL(2, bits, diff, sign), \
- ADAQ4380_CHANNEL(3, bits, diff, sign), \
- IIO_CHAN_SOFT_TIMESTAMP(4), \
+#define DEFINE_ADAQ4380_4_OFFLOAD_CHANNEL(name, bits, diff, sign) \
+static const struct iio_chan_spec name[] = { \
+ AD7380_OFFLOAD_CHANNEL(0, bits, diff, sign), \
+ AD7380_OFFLOAD_CHANNEL(1, bits, diff, sign), \
+ AD7380_OFFLOAD_CHANNEL(2, bits, diff, sign), \
+ AD7380_OFFLOAD_CHANNEL(3, bits, diff, sign), \
}
-#define DEFINE_AD7380_8_CHANNEL(name, bits, diff, sign) \
+#define DEFINE_AD7380_8_OFFLOAD_CHANNEL(name, bits, diff, sign) \
static const struct iio_chan_spec name[] = { \
- AD7380_CHANNEL(0, bits, diff, sign), \
- AD7380_CHANNEL(1, bits, diff, sign), \
- AD7380_CHANNEL(2, bits, diff, sign), \
- AD7380_CHANNEL(3, bits, diff, sign), \
- AD7380_CHANNEL(4, bits, diff, sign), \
- AD7380_CHANNEL(5, bits, diff, sign), \
- AD7380_CHANNEL(6, bits, diff, sign), \
- AD7380_CHANNEL(7, bits, diff, sign), \
- IIO_CHAN_SOFT_TIMESTAMP(8), \
+ AD7380_OFFLOAD_CHANNEL(0, bits, diff, sign), \
+ AD7380_OFFLOAD_CHANNEL(1, bits, diff, sign), \
+ AD7380_OFFLOAD_CHANNEL(2, bits, diff, sign), \
+ AD7380_OFFLOAD_CHANNEL(3, bits, diff, sign), \
+ AD7380_OFFLOAD_CHANNEL(4, bits, diff, sign), \
+ AD7380_OFFLOAD_CHANNEL(5, bits, diff, sign), \
+ AD7380_OFFLOAD_CHANNEL(6, bits, diff, sign), \
+ AD7380_OFFLOAD_CHANNEL(7, bits, diff, sign), \
}
/* fully differential */
@@ -266,6 +461,7 @@ DEFINE_AD7380_2_CHANNEL(ad7381_channels, 14, 1, s);
DEFINE_AD7380_4_CHANNEL(ad7380_4_channels, 16, 1, s);
DEFINE_AD7380_4_CHANNEL(ad7381_4_channels, 14, 1, s);
DEFINE_ADAQ4380_4_CHANNEL(adaq4380_4_channels, 16, 1, s);
+DEFINE_ADAQ4380_4_CHANNEL(adaq4381_4_channels, 14, 1, s);
/* pseudo differential */
DEFINE_AD7380_2_CHANNEL(ad7383_channels, 16, 0, s);
DEFINE_AD7380_2_CHANNEL(ad7384_channels, 14, 0, s);
@@ -280,6 +476,28 @@ DEFINE_AD7380_8_CHANNEL(ad7386_4_channels, 16, 0, u);
DEFINE_AD7380_8_CHANNEL(ad7387_4_channels, 14, 0, u);
DEFINE_AD7380_8_CHANNEL(ad7388_4_channels, 12, 0, u);
+/* offload channels */
+DEFINE_AD7380_2_OFFLOAD_CHANNEL(ad7380_offload_channels, 16, 1, s);
+DEFINE_AD7380_2_OFFLOAD_CHANNEL(ad7381_offload_channels, 14, 1, s);
+DEFINE_AD7380_4_OFFLOAD_CHANNEL(ad7380_4_offload_channels, 16, 1, s);
+DEFINE_AD7380_4_OFFLOAD_CHANNEL(ad7381_4_offload_channels, 14, 1, s);
+DEFINE_ADAQ4380_4_OFFLOAD_CHANNEL(adaq4380_4_offload_channels, 16, 1, s);
+DEFINE_ADAQ4380_4_OFFLOAD_CHANNEL(adaq4381_4_offload_channels, 14, 1, s);
+
+/* pseudo differential */
+DEFINE_AD7380_2_OFFLOAD_CHANNEL(ad7383_offload_channels, 16, 0, s);
+DEFINE_AD7380_2_OFFLOAD_CHANNEL(ad7384_offload_channels, 14, 0, s);
+DEFINE_AD7380_4_OFFLOAD_CHANNEL(ad7383_4_offload_channels, 16, 0, s);
+DEFINE_AD7380_4_OFFLOAD_CHANNEL(ad7384_4_offload_channels, 14, 0, s);
+
+/* Single ended */
+DEFINE_AD7380_4_OFFLOAD_CHANNEL(ad7386_offload_channels, 16, 0, u);
+DEFINE_AD7380_4_OFFLOAD_CHANNEL(ad7387_offload_channels, 14, 0, u);
+DEFINE_AD7380_4_OFFLOAD_CHANNEL(ad7388_offload_channels, 12, 0, u);
+DEFINE_AD7380_8_OFFLOAD_CHANNEL(ad7386_4_offload_channels, 16, 0, u);
+DEFINE_AD7380_8_OFFLOAD_CHANNEL(ad7387_4_offload_channels, 14, 0, u);
+DEFINE_AD7380_8_OFFLOAD_CHANNEL(ad7388_4_offload_channels, 12, 0, u);
+
static const char * const ad7380_supplies[] = {
"vcc", "vlogic",
};
@@ -386,28 +604,33 @@ static const int ad7380_gains[] = {
static const struct ad7380_chip_info ad7380_chip_info = {
.name = "ad7380",
.channels = ad7380_channels,
+ .offload_channels = ad7380_offload_channels,
.num_channels = ARRAY_SIZE(ad7380_channels),
.num_simult_channels = 2,
.supplies = ad7380_supplies,
.num_supplies = ARRAY_SIZE(ad7380_supplies),
.available_scan_masks = ad7380_2_channel_scan_masks,
.timing_specs = &ad7380_timing,
+ .max_conversion_rate_hz = 4 * MEGA,
};
static const struct ad7380_chip_info ad7381_chip_info = {
.name = "ad7381",
.channels = ad7381_channels,
+ .offload_channels = ad7381_offload_channels,
.num_channels = ARRAY_SIZE(ad7381_channels),
.num_simult_channels = 2,
.supplies = ad7380_supplies,
.num_supplies = ARRAY_SIZE(ad7380_supplies),
.available_scan_masks = ad7380_2_channel_scan_masks,
.timing_specs = &ad7380_timing,
+ .max_conversion_rate_hz = 4 * MEGA,
};
static const struct ad7380_chip_info ad7383_chip_info = {
.name = "ad7383",
.channels = ad7383_channels,
+ .offload_channels = ad7383_offload_channels,
.num_channels = ARRAY_SIZE(ad7383_channels),
.num_simult_channels = 2,
.supplies = ad7380_supplies,
@@ -416,11 +639,13 @@ static const struct ad7380_chip_info ad7383_chip_info = {
.num_vcm_supplies = ARRAY_SIZE(ad7380_2_channel_vcm_supplies),
.available_scan_masks = ad7380_2_channel_scan_masks,
.timing_specs = &ad7380_timing,
+ .max_conversion_rate_hz = 4 * MEGA,
};
static const struct ad7380_chip_info ad7384_chip_info = {
.name = "ad7384",
.channels = ad7384_channels,
+ .offload_channels = ad7384_offload_channels,
.num_channels = ARRAY_SIZE(ad7384_channels),
.num_simult_channels = 2,
.supplies = ad7380_supplies,
@@ -429,11 +654,13 @@ static const struct ad7380_chip_info ad7384_chip_info = {
.num_vcm_supplies = ARRAY_SIZE(ad7380_2_channel_vcm_supplies),
.available_scan_masks = ad7380_2_channel_scan_masks,
.timing_specs = &ad7380_timing,
+ .max_conversion_rate_hz = 4 * MEGA,
};
static const struct ad7380_chip_info ad7386_chip_info = {
.name = "ad7386",
.channels = ad7386_channels,
+ .offload_channels = ad7386_offload_channels,
.num_channels = ARRAY_SIZE(ad7386_channels),
.num_simult_channels = 2,
.supplies = ad7380_supplies,
@@ -441,11 +668,13 @@ static const struct ad7380_chip_info ad7386_chip_info = {
.has_mux = true,
.available_scan_masks = ad7380_2x2_channel_scan_masks,
.timing_specs = &ad7380_timing,
+ .max_conversion_rate_hz = 4 * MEGA,
};
static const struct ad7380_chip_info ad7387_chip_info = {
.name = "ad7387",
.channels = ad7387_channels,
+ .offload_channels = ad7387_offload_channels,
.num_channels = ARRAY_SIZE(ad7387_channels),
.num_simult_channels = 2,
.supplies = ad7380_supplies,
@@ -453,11 +682,13 @@ static const struct ad7380_chip_info ad7387_chip_info = {
.has_mux = true,
.available_scan_masks = ad7380_2x2_channel_scan_masks,
.timing_specs = &ad7380_timing,
+ .max_conversion_rate_hz = 4 * MEGA,
};
static const struct ad7380_chip_info ad7388_chip_info = {
.name = "ad7388",
.channels = ad7388_channels,
+ .offload_channels = ad7388_offload_channels,
.num_channels = ARRAY_SIZE(ad7388_channels),
.num_simult_channels = 2,
.supplies = ad7380_supplies,
@@ -465,11 +696,13 @@ static const struct ad7380_chip_info ad7388_chip_info = {
.has_mux = true,
.available_scan_masks = ad7380_2x2_channel_scan_masks,
.timing_specs = &ad7380_timing,
+ .max_conversion_rate_hz = 4 * MEGA,
};
static const struct ad7380_chip_info ad7380_4_chip_info = {
.name = "ad7380-4",
.channels = ad7380_4_channels,
+ .offload_channels = ad7380_4_offload_channels,
.num_channels = ARRAY_SIZE(ad7380_4_channels),
.num_simult_channels = 4,
.supplies = ad7380_supplies,
@@ -477,22 +710,26 @@ static const struct ad7380_chip_info ad7380_4_chip_info = {
.external_ref_only = true,
.available_scan_masks = ad7380_4_channel_scan_masks,
.timing_specs = &ad7380_4_timing,
+ .max_conversion_rate_hz = 4 * MEGA,
};
static const struct ad7380_chip_info ad7381_4_chip_info = {
.name = "ad7381-4",
.channels = ad7381_4_channels,
+ .offload_channels = ad7381_4_offload_channels,
.num_channels = ARRAY_SIZE(ad7381_4_channels),
.num_simult_channels = 4,
.supplies = ad7380_supplies,
.num_supplies = ARRAY_SIZE(ad7380_supplies),
.available_scan_masks = ad7380_4_channel_scan_masks,
.timing_specs = &ad7380_4_timing,
+ .max_conversion_rate_hz = 4 * MEGA,
};
static const struct ad7380_chip_info ad7383_4_chip_info = {
.name = "ad7383-4",
.channels = ad7383_4_channels,
+ .offload_channels = ad7383_4_offload_channels,
.num_channels = ARRAY_SIZE(ad7383_4_channels),
.num_simult_channels = 4,
.supplies = ad7380_supplies,
@@ -501,11 +738,13 @@ static const struct ad7380_chip_info ad7383_4_chip_info = {
.num_vcm_supplies = ARRAY_SIZE(ad7380_4_channel_vcm_supplies),
.available_scan_masks = ad7380_4_channel_scan_masks,
.timing_specs = &ad7380_4_timing,
+ .max_conversion_rate_hz = 4 * MEGA,
};
static const struct ad7380_chip_info ad7384_4_chip_info = {
.name = "ad7384-4",
.channels = ad7384_4_channels,
+ .offload_channels = ad7384_4_offload_channels,
.num_channels = ARRAY_SIZE(ad7384_4_channels),
.num_simult_channels = 4,
.supplies = ad7380_supplies,
@@ -514,11 +753,13 @@ static const struct ad7380_chip_info ad7384_4_chip_info = {
.num_vcm_supplies = ARRAY_SIZE(ad7380_4_channel_vcm_supplies),
.available_scan_masks = ad7380_4_channel_scan_masks,
.timing_specs = &ad7380_4_timing,
+ .max_conversion_rate_hz = 4 * MEGA,
};
static const struct ad7380_chip_info ad7386_4_chip_info = {
.name = "ad7386-4",
.channels = ad7386_4_channels,
+ .offload_channels = ad7386_4_offload_channels,
.num_channels = ARRAY_SIZE(ad7386_4_channels),
.num_simult_channels = 4,
.supplies = ad7380_supplies,
@@ -526,11 +767,13 @@ static const struct ad7380_chip_info ad7386_4_chip_info = {
.has_mux = true,
.available_scan_masks = ad7380_2x4_channel_scan_masks,
.timing_specs = &ad7380_4_timing,
+ .max_conversion_rate_hz = 4 * MEGA,
};
static const struct ad7380_chip_info ad7387_4_chip_info = {
.name = "ad7387-4",
.channels = ad7387_4_channels,
+ .offload_channels = ad7387_4_offload_channels,
.num_channels = ARRAY_SIZE(ad7387_4_channels),
.num_simult_channels = 4,
.supplies = ad7380_supplies,
@@ -538,11 +781,13 @@ static const struct ad7380_chip_info ad7387_4_chip_info = {
.has_mux = true,
.available_scan_masks = ad7380_2x4_channel_scan_masks,
.timing_specs = &ad7380_4_timing,
+ .max_conversion_rate_hz = 4 * MEGA,
};
static const struct ad7380_chip_info ad7388_4_chip_info = {
.name = "ad7388-4",
.channels = ad7388_4_channels,
+ .offload_channels = ad7388_4_offload_channels,
.num_channels = ARRAY_SIZE(ad7388_4_channels),
.num_simult_channels = 4,
.supplies = ad7380_supplies,
@@ -550,11 +795,13 @@ static const struct ad7380_chip_info ad7388_4_chip_info = {
.has_mux = true,
.available_scan_masks = ad7380_2x4_channel_scan_masks,
.timing_specs = &ad7380_4_timing,
+ .max_conversion_rate_hz = 4 * MEGA,
};
static const struct ad7380_chip_info adaq4370_4_chip_info = {
.name = "adaq4370-4",
.channels = adaq4380_4_channels,
+ .offload_channels = adaq4380_4_offload_channels,
.num_channels = ARRAY_SIZE(adaq4380_4_channels),
.num_simult_channels = 4,
.supplies = adaq4380_supplies,
@@ -563,11 +810,13 @@ static const struct ad7380_chip_info adaq4370_4_chip_info = {
.has_hardware_gain = true,
.available_scan_masks = ad7380_4_channel_scan_masks,
.timing_specs = &ad7380_4_timing,
+ .max_conversion_rate_hz = 2 * MEGA,
};
static const struct ad7380_chip_info adaq4380_4_chip_info = {
.name = "adaq4380-4",
.channels = adaq4380_4_channels,
+ .offload_channels = adaq4380_4_offload_channels,
.num_channels = ARRAY_SIZE(adaq4380_4_channels),
.num_simult_channels = 4,
.supplies = adaq4380_supplies,
@@ -576,13 +825,32 @@ static const struct ad7380_chip_info adaq4380_4_chip_info = {
.has_hardware_gain = true,
.available_scan_masks = ad7380_4_channel_scan_masks,
.timing_specs = &ad7380_4_timing,
+ .max_conversion_rate_hz = 4 * MEGA,
+};
+
+static const struct ad7380_chip_info adaq4381_4_chip_info = {
+ .name = "adaq4381-4",
+ .channels = adaq4381_4_channels,
+ .offload_channels = adaq4381_4_offload_channels,
+ .num_channels = ARRAY_SIZE(adaq4381_4_channels),
+ .num_simult_channels = 4,
+ .supplies = adaq4380_supplies,
+ .num_supplies = ARRAY_SIZE(adaq4380_supplies),
+ .adaq_internal_ref_only = true,
+ .has_hardware_gain = true,
+ .available_scan_masks = ad7380_4_channel_scan_masks,
+ .timing_specs = &ad7380_4_timing,
+};
+
+static const struct spi_offload_config ad7380_offload_config = {
+ .capability_flags = SPI_OFFLOAD_CAP_TRIGGER |
+ SPI_OFFLOAD_CAP_RX_STREAM_DMA,
};
struct ad7380_state {
const struct ad7380_chip_info *chip_info;
struct spi_device *spi;
struct regmap *regmap;
- unsigned int oversampling_ratio;
bool resolution_boost_enabled;
unsigned int ch;
bool seq;
@@ -594,6 +862,13 @@ struct ad7380_state {
struct spi_message normal_msg;
struct spi_transfer seq_xfer[4];
struct spi_message seq_msg;
+ struct spi_transfer offload_xfer;
+ struct spi_message offload_msg;
+ struct spi_offload *offload;
+ struct spi_offload_trigger *offload_trigger;
+ unsigned long offload_trigger_hz;
+
+ int sample_freq_range[3];
/*
* DMA (thus cache coherency maintenance) requires the transfer buffers
* to live in their own cache lines.
@@ -663,6 +938,20 @@ static int ad7380_regmap_reg_read(void *context, unsigned int reg,
return 0;
}
+static const struct reg_default ad7380_reg_defaults[] = {
+ { AD7380_REG_ADDR_ALERT_LOW_TH, 0x800 },
+ { AD7380_REG_ADDR_ALERT_HIGH_TH, 0x7FF },
+};
+
+static const struct regmap_range ad7380_volatile_reg_ranges[] = {
+ regmap_reg_range(AD7380_REG_ADDR_CONFIG2, AD7380_REG_ADDR_ALERT),
+};
+
+static const struct regmap_access_table ad7380_volatile_regs = {
+ .yes_ranges = ad7380_volatile_reg_ranges,
+ .n_yes_ranges = ARRAY_SIZE(ad7380_volatile_reg_ranges),
+};
+
static const struct regmap_config ad7380_regmap_config = {
.reg_bits = 3,
.val_bits = 12,
@@ -670,20 +959,59 @@ static const struct regmap_config ad7380_regmap_config = {
.reg_write = ad7380_regmap_reg_write,
.max_register = AD7380_REG_ADDR_ALERT_HIGH_TH,
.can_sleep = true,
+ .reg_defaults = ad7380_reg_defaults,
+ .num_reg_defaults = ARRAY_SIZE(ad7380_reg_defaults),
+ .volatile_table = &ad7380_volatile_regs,
+ .cache_type = REGCACHE_MAPLE,
};
static int ad7380_debugfs_reg_access(struct iio_dev *indio_dev, u32 reg,
u32 writeval, u32 *readval)
{
- iio_device_claim_direct_scoped(return -EBUSY, indio_dev) {
- struct ad7380_state *st = iio_priv(indio_dev);
+ struct ad7380_state *st = iio_priv(indio_dev);
+ int ret;
- if (readval)
- return regmap_read(st->regmap, reg, readval);
- else
- return regmap_write(st->regmap, reg, writeval);
- }
- unreachable();
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+
+ if (readval)
+ ret = regmap_read(st->regmap, reg, readval);
+ else
+ ret = regmap_write(st->regmap, reg, writeval);
+
+ iio_device_release_direct(indio_dev);
+
+ return ret;
+}
+
+/**
+ * ad7380_regval_to_osr - convert OSR register value to ratio
+ * @regval: register value to check
+ *
+ * Returns: the ratio corresponding to the OSR register. If regval is not in
+ * bound, return 1 (oversampling disabled)
+ *
+ */
+static int ad7380_regval_to_osr(unsigned int regval)
+{
+ if (regval >= ARRAY_SIZE(ad7380_oversampling_ratios))
+ return 1;
+
+ return ad7380_oversampling_ratios[regval];
+}
+
+static int ad7380_get_osr(struct ad7380_state *st, int *val)
+{
+ u32 tmp;
+ int ret;
+
+ ret = regmap_read(st->regmap, AD7380_REG_ADDR_CONFIG1, &tmp);
+ if (ret)
+ return ret;
+
+ *val = ad7380_regval_to_osr(FIELD_GET(AD7380_CONFIG1_OSR, tmp));
+
+ return 0;
}
/*
@@ -701,11 +1029,15 @@ static int ad7380_set_ch(struct ad7380_state *st, unsigned int ch)
.unit = SPI_DELAY_UNIT_NSECS,
}
};
- int ret;
+ int oversampling_ratio, ret;
if (st->ch == ch)
return 0;
+ ret = ad7380_get_osr(st, &oversampling_ratio);
+ if (ret)
+ return ret;
+
ret = regmap_update_bits(st->regmap,
AD7380_REG_ADDR_CONFIG1,
AD7380_CONFIG1_CH,
@@ -716,9 +1048,9 @@ static int ad7380_set_ch(struct ad7380_state *st, unsigned int ch)
st->ch = ch;
- if (st->oversampling_ratio > 1)
+ if (oversampling_ratio > 1)
xfer.delay.value = T_CONVERT_0_NS +
- T_CONVERT_X_NS * (st->oversampling_ratio - 1) *
+ T_CONVERT_X_NS * (oversampling_ratio - 1) *
st->chip_info->num_simult_channels / AD7380_NUM_SDO_LINES;
return spi_sync_transfer(st->spi, &xfer, 1);
@@ -729,20 +1061,25 @@ static int ad7380_set_ch(struct ad7380_state *st, unsigned int ch)
* @st: device instance specific state
* @scan_type: current scan type
*/
-static void ad7380_update_xfers(struct ad7380_state *st,
+static int ad7380_update_xfers(struct ad7380_state *st,
const struct iio_scan_type *scan_type)
{
struct spi_transfer *xfer = st->seq ? st->seq_xfer : st->normal_xfer;
unsigned int t_convert = T_CONVERT_NS;
+ int oversampling_ratio, ret;
/*
* In the case of oversampling, conversion time is higher than in normal
* mode. Technically T_CONVERT_X_NS is lower for some chips, but we use
* the maximum value for simplicity for now.
*/
- if (st->oversampling_ratio > 1)
+ ret = ad7380_get_osr(st, &oversampling_ratio);
+ if (ret)
+ return ret;
+
+ if (oversampling_ratio > 1)
t_convert = T_CONVERT_0_NS + T_CONVERT_X_NS *
- (st->oversampling_ratio - 1) *
+ (oversampling_ratio - 1) *
st->chip_info->num_simult_channels / AD7380_NUM_SDO_LINES;
if (st->seq) {
@@ -751,11 +1088,11 @@ static void ad7380_update_xfers(struct ad7380_state *st,
xfer[2].bits_per_word = xfer[3].bits_per_word =
scan_type->realbits;
xfer[2].len = xfer[3].len =
- BITS_TO_BYTES(scan_type->storagebits) *
+ AD7380_SPI_BYTES(scan_type) *
st->chip_info->num_simult_channels;
xfer[3].rx_buf = xfer[2].rx_buf + xfer[2].len;
/* Additional delay required here when oversampling is enabled */
- if (st->oversampling_ratio > 1)
+ if (oversampling_ratio > 1)
xfer[2].delay.value = t_convert;
else
xfer[2].delay.value = 0;
@@ -764,16 +1101,145 @@ static void ad7380_update_xfers(struct ad7380_state *st,
xfer[0].delay.value = t_convert;
xfer[0].delay.unit = SPI_DELAY_UNIT_NSECS;
xfer[1].bits_per_word = scan_type->realbits;
- xfer[1].len = BITS_TO_BYTES(scan_type->storagebits) *
+ xfer[1].len = AD7380_SPI_BYTES(scan_type) *
st->chip_info->num_simult_channels;
}
+
+ return 0;
}
+static int ad7380_set_sample_freq(struct ad7380_state *st, int val)
+{
+ struct spi_offload_trigger_config config = {
+ .type = SPI_OFFLOAD_TRIGGER_PERIODIC,
+ .periodic = {
+ .frequency_hz = val,
+ },
+ };
+ int ret;
+
+ ret = spi_offload_trigger_validate(st->offload_trigger, &config);
+ if (ret)
+ return ret;
+
+ st->offload_trigger_hz = config.periodic.frequency_hz;
+
+ return 0;
+}
+
+static int ad7380_init_offload_msg(struct ad7380_state *st,
+ struct iio_dev *indio_dev)
+{
+ struct spi_transfer *xfer = &st->offload_xfer;
+ struct device *dev = &st->spi->dev;
+ const struct iio_scan_type *scan_type;
+ int oversampling_ratio;
+ int ret;
+
+ scan_type = iio_get_current_scan_type(indio_dev,
+ &indio_dev->channels[0]);
+ if (IS_ERR(scan_type))
+ return PTR_ERR(scan_type);
+
+ if (st->chip_info->has_mux) {
+ int index;
+
+ ret = iio_active_scan_mask_index(indio_dev);
+ if (ret < 0)
+ return ret;
+
+ index = ret;
+ if (index == AD7380_SCAN_MASK_SEQ) {
+ ret = regmap_set_bits(st->regmap, AD7380_REG_ADDR_CONFIG1,
+ AD7380_CONFIG1_SEQ);
+ if (ret)
+ return ret;
+
+ st->seq = true;
+ } else {
+ ret = ad7380_set_ch(st, index);
+ if (ret)
+ return ret;
+ }
+ }
+
+ ret = ad7380_get_osr(st, &oversampling_ratio);
+ if (ret)
+ return ret;
+
+ xfer->bits_per_word = scan_type->realbits;
+ xfer->offload_flags = SPI_OFFLOAD_XFER_RX_STREAM;
+ xfer->len = AD7380_SPI_BYTES(scan_type) * st->chip_info->num_simult_channels;
+
+ spi_message_init_with_transfers(&st->offload_msg, xfer, 1);
+ st->offload_msg.offload = st->offload;
+
+ ret = spi_optimize_message(st->spi, &st->offload_msg);
+ if (ret) {
+ dev_err(dev, "failed to prepare offload msg, err: %d\n",
+ ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ad7380_offload_buffer_postenable(struct iio_dev *indio_dev)
+{
+ struct ad7380_state *st = iio_priv(indio_dev);
+ struct spi_offload_trigger_config config = {
+ .type = SPI_OFFLOAD_TRIGGER_PERIODIC,
+ .periodic = {
+ .frequency_hz = st->offload_trigger_hz,
+ },
+ };
+ int ret;
+
+ ret = ad7380_init_offload_msg(st, indio_dev);
+ if (ret)
+ return ret;
+
+ ret = spi_offload_trigger_enable(st->offload, st->offload_trigger, &config);
+ if (ret)
+ spi_unoptimize_message(&st->offload_msg);
+
+ return ret;
+}
+
+static int ad7380_offload_buffer_predisable(struct iio_dev *indio_dev)
+{
+ struct ad7380_state *st = iio_priv(indio_dev);
+ int ret;
+
+ if (st->seq) {
+ ret = regmap_update_bits(st->regmap,
+ AD7380_REG_ADDR_CONFIG1,
+ AD7380_CONFIG1_SEQ,
+ FIELD_PREP(AD7380_CONFIG1_SEQ, 0));
+ if (ret)
+ return ret;
+
+ st->seq = false;
+ }
+
+ spi_offload_trigger_disable(st->offload, st->offload_trigger);
+
+ spi_unoptimize_message(&st->offload_msg);
+
+ return 0;
+}
+
+static const struct iio_buffer_setup_ops ad7380_offload_buffer_setup_ops = {
+ .postenable = ad7380_offload_buffer_postenable,
+ .predisable = ad7380_offload_buffer_predisable,
+};
+
static int ad7380_triggered_buffer_preenable(struct iio_dev *indio_dev)
{
struct ad7380_state *st = iio_priv(indio_dev);
const struct iio_scan_type *scan_type;
struct spi_message *msg = &st->normal_msg;
+ int ret;
/*
* Currently, we always read all channels at the same time. The scan_type
@@ -785,7 +1251,6 @@ static int ad7380_triggered_buffer_preenable(struct iio_dev *indio_dev)
if (st->chip_info->has_mux) {
unsigned int index;
- int ret;
/*
* Depending on the requested scan_mask and current state,
@@ -816,7 +1281,9 @@ static int ad7380_triggered_buffer_preenable(struct iio_dev *indio_dev)
}
- ad7380_update_xfers(st, scan_type);
+ ret = ad7380_update_xfers(st, scan_type);
+ if (ret)
+ return ret;
return spi_optimize_message(st->spi, msg);
}
@@ -889,13 +1356,15 @@ static int ad7380_read_direct(struct ad7380_state *st, unsigned int scan_index,
return ret;
}
- ad7380_update_xfers(st, scan_type);
+ ret = ad7380_update_xfers(st, scan_type);
+ if (ret)
+ return ret;
ret = spi_sync(st->spi, &st->normal_msg);
if (ret < 0)
return ret;
- if (scan_type->storagebits > 16) {
+ if (scan_type->realbits > 16) {
if (scan_type->sign == 's')
*val = sign_extend32(*(u32 *)(st->scan_data + 4 * index),
scan_type->realbits - 1);
@@ -920,6 +1389,7 @@ static int ad7380_read_raw(struct iio_dev *indio_dev,
{
struct ad7380_state *st = iio_priv(indio_dev);
const struct iio_scan_type *scan_type;
+ int ret;
scan_type = iio_get_current_scan_type(indio_dev, chan);
@@ -928,11 +1398,15 @@ static int ad7380_read_raw(struct iio_dev *indio_dev,
switch (info) {
case IIO_CHAN_INFO_RAW:
- iio_device_claim_direct_scoped(return -EBUSY, indio_dev) {
- return ad7380_read_direct(st, chan->scan_index,
- scan_type, val);
- }
- unreachable();
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+
+ ret = ad7380_read_direct(st, chan->scan_index,
+ scan_type, val);
+
+ iio_device_release_direct(indio_dev);
+
+ return ret;
case IIO_CHAN_INFO_SCALE:
/*
* According to the datasheet, the LSB size is:
@@ -961,8 +1435,19 @@ static int ad7380_read_raw(struct iio_dev *indio_dev,
return IIO_VAL_INT;
case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
- *val = st->oversampling_ratio;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+ ret = ad7380_get_osr(st, val);
+
+ iio_device_release_direct(indio_dev);
+
+ if (ret)
+ return ret;
+
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ *val = st->offload_trigger_hz;
return IIO_VAL_INT;
default:
return -EINVAL;
@@ -974,6 +1459,8 @@ static int ad7380_read_avail(struct iio_dev *indio_dev,
const int **vals, int *type, int *length,
long mask)
{
+ struct ad7380_state *st = iio_priv(indio_dev);
+
switch (mask) {
case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
*vals = ad7380_oversampling_ratios;
@@ -981,6 +1468,10 @@ static int ad7380_read_avail(struct iio_dev *indio_dev,
*type = IIO_VAL_INT;
return IIO_AVAIL_LIST;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ *vals = st->sample_freq_range;
+ *type = IIO_VAL_INT;
+ return IIO_AVAIL_RANGE;
default:
return -EINVAL;
}
@@ -1008,47 +1499,61 @@ static int ad7380_osr_to_regval(int ratio)
return -EINVAL;
}
+static int ad7380_set_oversampling_ratio(struct ad7380_state *st, int val)
+{
+ int ret, osr, boost;
+
+ osr = ad7380_osr_to_regval(val);
+ if (osr < 0)
+ return osr;
+
+ /* always enable resolution boost when oversampling is enabled */
+ boost = osr > 0 ? 1 : 0;
+
+ ret = regmap_update_bits(st->regmap,
+ AD7380_REG_ADDR_CONFIG1,
+ AD7380_CONFIG1_OSR | AD7380_CONFIG1_RES,
+ FIELD_PREP(AD7380_CONFIG1_OSR, osr) |
+ FIELD_PREP(AD7380_CONFIG1_RES, boost));
+
+ if (ret)
+ return ret;
+
+ st->resolution_boost_enabled = boost;
+
+ /*
+ * Perform a soft reset. This will flush the oversampling
+ * block and FIFO but will maintain the content of the
+ * configurable registers.
+ */
+ ret = regmap_update_bits(st->regmap,
+ AD7380_REG_ADDR_CONFIG2,
+ AD7380_CONFIG2_RESET,
+ FIELD_PREP(AD7380_CONFIG2_RESET,
+ AD7380_CONFIG2_RESET_SOFT));
+ return ret;
+}
static int ad7380_write_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan, int val,
int val2, long mask)
{
struct ad7380_state *st = iio_priv(indio_dev);
- int ret, osr, boost;
+ int ret;
switch (mask) {
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ if (val < 1)
+ return -EINVAL;
+ return ad7380_set_sample_freq(st, val);
case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
- osr = ad7380_osr_to_regval(val);
- if (osr < 0)
- return osr;
-
- /* always enable resolution boost when oversampling is enabled */
- boost = osr > 0 ? 1 : 0;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
- iio_device_claim_direct_scoped(return -EBUSY, indio_dev) {
- ret = regmap_update_bits(st->regmap,
- AD7380_REG_ADDR_CONFIG1,
- AD7380_CONFIG1_OSR | AD7380_CONFIG1_RES,
- FIELD_PREP(AD7380_CONFIG1_OSR, osr) |
- FIELD_PREP(AD7380_CONFIG1_RES, boost));
+ ret = ad7380_set_oversampling_ratio(st, val);
- if (ret)
- return ret;
+ iio_device_release_direct(indio_dev);
- st->oversampling_ratio = val;
- st->resolution_boost_enabled = boost;
-
- /*
- * Perform a soft reset. This will flush the oversampling
- * block and FIFO but will maintain the content of the
- * configurable registers.
- */
- return regmap_update_bits(st->regmap,
- AD7380_REG_ADDR_CONFIG2,
- AD7380_CONFIG2_RESET,
- FIELD_PREP(AD7380_CONFIG2_RESET,
- AD7380_CONFIG2_RESET_SOFT));
- }
- unreachable();
+ return ret;
default:
return -EINVAL;
}
@@ -1063,12 +1568,179 @@ static int ad7380_get_current_scan_type(const struct iio_dev *indio_dev,
: AD7380_SCAN_TYPE_NORMAL;
}
+static int ad7380_read_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir)
+{
+ struct ad7380_state *st = iio_priv(indio_dev);
+ int tmp, ret;
+
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+
+ ret = regmap_read(st->regmap, AD7380_REG_ADDR_CONFIG1, &tmp);
+
+ iio_device_release_direct(indio_dev);
+
+ if (ret)
+ return ret;
+
+ return FIELD_GET(AD7380_CONFIG1_ALERTEN, tmp);
+}
+
+static int ad7380_write_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ bool state)
+{
+ struct ad7380_state *st = iio_priv(indio_dev);
+ int ret;
+
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+
+ ret = regmap_update_bits(st->regmap,
+ AD7380_REG_ADDR_CONFIG1,
+ AD7380_CONFIG1_ALERTEN,
+ FIELD_PREP(AD7380_CONFIG1_ALERTEN, state));
+
+ iio_device_release_direct(indio_dev);
+
+ return ret;
+}
+
+static int ad7380_get_alert_th(struct ad7380_state *st,
+ enum iio_event_direction dir,
+ int *val)
+{
+ int ret, tmp;
+
+ switch (dir) {
+ case IIO_EV_DIR_RISING:
+ ret = regmap_read(st->regmap,
+ AD7380_REG_ADDR_ALERT_HIGH_TH,
+ &tmp);
+ if (ret)
+ return ret;
+
+ *val = FIELD_GET(AD7380_ALERT_HIGH_TH, tmp);
+ return IIO_VAL_INT;
+ case IIO_EV_DIR_FALLING:
+ ret = regmap_read(st->regmap,
+ AD7380_REG_ADDR_ALERT_LOW_TH,
+ &tmp);
+ if (ret)
+ return ret;
+
+ *val = FIELD_GET(AD7380_ALERT_LOW_TH, tmp);
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ad7380_read_event_value(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info,
+ int *val, int *val2)
+{
+ struct ad7380_state *st = iio_priv(indio_dev);
+ int ret;
+
+ switch (info) {
+ case IIO_EV_INFO_VALUE:
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+
+ ret = ad7380_get_alert_th(st, dir, val);
+
+ iio_device_release_direct(indio_dev);
+ return ret;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ad7380_set_alert_th(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_direction dir,
+ int val)
+{
+ struct ad7380_state *st = iio_priv(indio_dev);
+ const struct iio_scan_type *scan_type;
+ u16 th;
+
+ /*
+ * According to the datasheet,
+ * AD7380_REG_ADDR_ALERT_HIGH_TH[11:0] are the 12 MSB of the
+ * 16-bits internal alert high register. LSB are set to 0xf.
+ * AD7380_REG_ADDR_ALERT_LOW_TH[11:0] are the 12 MSB of the
+ * 16 bits internal alert low register. LSB are set to 0x0.
+ *
+ * When alert is enabled the conversion from the adc is compared
+ * immediately to the alert high/low thresholds, before any
+ * oversampling. This means that the thresholds are the same for
+ * normal mode and oversampling mode.
+ */
+
+ /* Extract the 12 MSB of val */
+ scan_type = iio_get_current_scan_type(indio_dev, chan);
+ if (IS_ERR(scan_type))
+ return PTR_ERR(scan_type);
+
+ th = val >> (scan_type->realbits - 12);
+
+ switch (dir) {
+ case IIO_EV_DIR_RISING:
+ return regmap_write(st->regmap,
+ AD7380_REG_ADDR_ALERT_HIGH_TH,
+ th);
+ case IIO_EV_DIR_FALLING:
+ return regmap_write(st->regmap,
+ AD7380_REG_ADDR_ALERT_LOW_TH,
+ th);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ad7380_write_event_value(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info,
+ int val, int val2)
+{
+ int ret;
+
+ switch (info) {
+ case IIO_EV_INFO_VALUE:
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+
+ ret = ad7380_set_alert_th(indio_dev, chan, dir, val);
+
+ iio_device_release_direct(indio_dev);
+ return ret;
+ default:
+ return -EINVAL;
+ }
+}
+
static const struct iio_info ad7380_info = {
.read_raw = &ad7380_read_raw,
.read_avail = &ad7380_read_avail,
.write_raw = &ad7380_write_raw,
.get_current_scan_type = &ad7380_get_current_scan_type,
.debugfs_reg_access = &ad7380_debugfs_reg_access,
+ .read_event_config = &ad7380_read_event_config,
+ .write_event_config = &ad7380_write_event_config,
+ .read_event_value = &ad7380_read_event_value,
+ .write_event_value = &ad7380_write_event_value,
};
static int ad7380_init(struct ad7380_state *st, bool external_ref_en)
@@ -1092,7 +1764,6 @@ static int ad7380_init(struct ad7380_state *st, bool external_ref_en)
}
/* This is the default value after reset. */
- st->oversampling_ratio = 1;
st->ch = 0;
st->seq = false;
@@ -1103,6 +1774,53 @@ static int ad7380_init(struct ad7380_state *st, bool external_ref_en)
AD7380_NUM_SDO_LINES));
}
+static int ad7380_probe_spi_offload(struct iio_dev *indio_dev,
+ struct ad7380_state *st)
+{
+ struct spi_device *spi = st->spi;
+ struct device *dev = &spi->dev;
+ struct dma_chan *rx_dma;
+ int sample_rate, ret;
+
+ indio_dev->setup_ops = &ad7380_offload_buffer_setup_ops;
+ indio_dev->channels = st->chip_info->offload_channels;
+ /* Just removing the timestamp channel. */
+ indio_dev->num_channels--;
+
+ st->offload_trigger = devm_spi_offload_trigger_get(dev, st->offload,
+ SPI_OFFLOAD_TRIGGER_PERIODIC);
+ if (IS_ERR(st->offload_trigger))
+ return dev_err_probe(dev, PTR_ERR(st->offload_trigger),
+ "failed to get offload trigger\n");
+
+ sample_rate = st->chip_info->max_conversion_rate_hz *
+ AD7380_NUM_SDO_LINES / st->chip_info->num_simult_channels;
+
+ st->sample_freq_range[0] = 1; /* min */
+ st->sample_freq_range[1] = 1; /* step */
+ st->sample_freq_range[2] = sample_rate; /* max */
+
+ /*
+ * Starting with a quite low frequency, to allow oversampling x32,
+ * user is then reponsible to adjust the frequency for the specific case.
+ */
+ ret = ad7380_set_sample_freq(st, sample_rate / 32);
+ if (ret)
+ return ret;
+
+ rx_dma = devm_spi_offload_rx_stream_request_dma_chan(dev, st->offload);
+ if (IS_ERR(rx_dma))
+ return dev_err_probe(dev, PTR_ERR(rx_dma),
+ "failed to get offload RX DMA\n");
+
+ ret = devm_iio_dmaengine_buffer_setup_with_handle(dev, indio_dev,
+ rx_dma, IIO_BUFFER_DIRECTION_IN);
+ if (ret)
+ return dev_err_probe(dev, ret, "cannot setup dma buffer\n");
+
+ return 0;
+}
+
static int ad7380_probe(struct spi_device *spi)
{
struct device *dev = &spi->dev;
@@ -1274,12 +1992,24 @@ static int ad7380_probe(struct spi_device *spi)
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->available_scan_masks = st->chip_info->available_scan_masks;
- ret = devm_iio_triggered_buffer_setup(dev, indio_dev,
- iio_pollfunc_store_time,
- ad7380_trigger_handler,
- &ad7380_buffer_setup_ops);
- if (ret)
- return ret;
+ st->offload = devm_spi_offload_get(dev, spi, &ad7380_offload_config);
+ ret = PTR_ERR_OR_ZERO(st->offload);
+ if (ret && ret != -ENODEV)
+ return dev_err_probe(dev, ret, "failed to get offload\n");
+
+ /* If no SPI offload, fall back to low speed usage. */
+ if (ret == -ENODEV) {
+ ret = devm_iio_triggered_buffer_setup(dev, indio_dev,
+ iio_pollfunc_store_time,
+ ad7380_trigger_handler,
+ &ad7380_buffer_setup_ops);
+ if (ret)
+ return ret;
+ } else {
+ ret = ad7380_probe_spi_offload(indio_dev, st);
+ if (ret)
+ return ret;
+ }
ret = ad7380_init(st, external_ref_en);
if (ret)
@@ -1305,6 +2035,7 @@ static const struct of_device_id ad7380_of_match_table[] = {
{ .compatible = "adi,ad7388-4", .data = &ad7388_4_chip_info },
{ .compatible = "adi,adaq4370-4", .data = &adaq4370_4_chip_info },
{ .compatible = "adi,adaq4380-4", .data = &adaq4380_4_chip_info },
+ { .compatible = "adi,adaq4381-4", .data = &adaq4381_4_chip_info },
{ }
};
@@ -1325,6 +2056,7 @@ static const struct spi_device_id ad7380_id_table[] = {
{ "ad7388-4", (kernel_ulong_t)&ad7388_4_chip_info },
{ "adaq4370-4", (kernel_ulong_t)&adaq4370_4_chip_info },
{ "adaq4380-4", (kernel_ulong_t)&adaq4380_4_chip_info },
+ { "adaq4381-4", (kernel_ulong_t)&adaq4381_4_chip_info },
{ }
};
MODULE_DEVICE_TABLE(spi, ad7380_id_table);
@@ -1342,3 +2074,4 @@ module_spi_driver(ad7380_driver);
MODULE_AUTHOR("Stefan Popa <stefan.popa@analog.com>");
MODULE_DESCRIPTION("Analog Devices AD738x ADC driver");
MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS("IIO_DMAENGINE_BUFFER");
diff --git a/drivers/iio/adc/ad7476.c b/drivers/iio/adc/ad7476.c
index aeb8e383fe71..37b0515cf4fc 100644
--- a/drivers/iio/adc/ad7476.c
+++ b/drivers/iio/adc/ad7476.c
@@ -138,11 +138,10 @@ static int ad7476_read_raw(struct iio_dev *indio_dev,
switch (m) {
case IIO_CHAN_INFO_RAW:
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
ret = ad7476_scan_direct(st);
- iio_device_release_direct_mode(indio_dev);
+ iio_device_release_direct(indio_dev);
if (ret < 0)
return ret;
diff --git a/drivers/iio/adc/ad7606.c b/drivers/iio/adc/ad7606.c
index d39354afd539..1a314fddd7eb 100644
--- a/drivers/iio/adc/ad7606.c
+++ b/drivers/iio/adc/ad7606.c
@@ -5,6 +5,7 @@
* Copyright 2011 Analog Devices Inc.
*/
+#include <linux/cleanup.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/err.h>
@@ -85,6 +86,10 @@ static const unsigned int ad7606_oversampling_avail[7] = {
1, 2, 4, 8, 16, 32, 64,
};
+static const unsigned int ad7606b_oversampling_avail[9] = {
+ 1, 2, 4, 8, 16, 32, 64, 128, 256,
+};
+
static const unsigned int ad7616_oversampling_avail[8] = {
1, 2, 4, 8, 16, 32, 64, 128,
};
@@ -187,6 +192,8 @@ static int ad7608_chan_scale_setup(struct iio_dev *indio_dev,
struct iio_chan_spec *chan, int ch);
static int ad7609_chan_scale_setup(struct iio_dev *indio_dev,
struct iio_chan_spec *chan, int ch);
+static int ad7616_sw_mode_setup(struct iio_dev *indio_dev);
+static int ad7606b_sw_mode_setup(struct iio_dev *indio_dev);
const struct ad7606_chip_info ad7605_4_info = {
.channels = ad7605_channels,
@@ -239,6 +246,7 @@ const struct ad7606_chip_info ad7606b_info = {
.oversampling_avail = ad7606_oversampling_avail,
.oversampling_num = ARRAY_SIZE(ad7606_oversampling_avail),
.scale_setup_cb = ad7606_16bit_chan_scale_setup,
+ .sw_setup_cb = ad7606b_sw_mode_setup,
};
EXPORT_SYMBOL_NS_GPL(ad7606b_info, "IIO_AD7606");
@@ -250,6 +258,7 @@ const struct ad7606_chip_info ad7606c_16_info = {
.oversampling_avail = ad7606_oversampling_avail,
.oversampling_num = ARRAY_SIZE(ad7606_oversampling_avail),
.scale_setup_cb = ad7606c_16bit_chan_scale_setup,
+ .sw_setup_cb = ad7606b_sw_mode_setup,
};
EXPORT_SYMBOL_NS_GPL(ad7606c_16_info, "IIO_AD7606");
@@ -294,6 +303,7 @@ const struct ad7606_chip_info ad7606c_18_info = {
.oversampling_avail = ad7606_oversampling_avail,
.oversampling_num = ARRAY_SIZE(ad7606_oversampling_avail),
.scale_setup_cb = ad7606c_18bit_chan_scale_setup,
+ .sw_setup_cb = ad7606b_sw_mode_setup,
};
EXPORT_SYMBOL_NS_GPL(ad7606c_18_info, "IIO_AD7606");
@@ -307,6 +317,7 @@ const struct ad7606_chip_info ad7616_info = {
.oversampling_num = ARRAY_SIZE(ad7616_oversampling_avail),
.os_req_reset = true,
.scale_setup_cb = ad7606_16bit_chan_scale_setup,
+ .sw_setup_cb = ad7616_sw_mode_setup,
};
EXPORT_SYMBOL_NS_GPL(ad7616_info, "IIO_AD7606");
@@ -752,13 +763,13 @@ static int ad7606_read_raw(struct iio_dev *indio_dev,
switch (m) {
case IIO_CHAN_INFO_RAW:
- iio_device_claim_direct_scoped(return -EBUSY, indio_dev) {
- ret = ad7606_scan_direct(indio_dev, chan->address, val);
- if (ret < 0)
- return ret;
- return IIO_VAL_INT;
- }
- unreachable();
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+ ret = ad7606_scan_direct(indio_dev, chan->address, val);
+ iio_device_release_direct(indio_dev);
+ if (ret < 0)
+ return ret;
+ return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
if (st->sw_mode_en)
ch = chan->address;
@@ -818,8 +829,7 @@ static int ad7606_write_os_hw(struct iio_dev *indio_dev, int val)
values[0] = val & GENMASK(2, 0);
- gpiod_set_array_value(st->gpio_os->ndescs, st->gpio_os->desc,
- st->gpio_os->info, values);
+ gpiod_multi_set_value_cansleep(st->gpio_os, values);
/* AD7616 requires a reset to update value */
if (st->chip_info->os_req_reset)
@@ -852,7 +862,11 @@ static int ad7606_write_raw(struct iio_dev *indio_dev,
}
val = (val * MICRO) + val2;
i = find_closest(val, scale_avail_uv, cs->num_scales);
+
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
ret = st->write_scale(indio_dev, ch, i + cs->reg_offset);
+ iio_device_release_direct(indio_dev);
if (ret < 0)
return ret;
cs->range = i;
@@ -863,7 +877,11 @@ static int ad7606_write_raw(struct iio_dev *indio_dev,
return -EINVAL;
i = find_closest(val, st->oversampling_avail,
st->num_os_ratios);
+
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
ret = st->write_os(indio_dev, i);
+ iio_device_release_direct(indio_dev);
if (ret < 0)
return ret;
st->oversampling = st->oversampling_avail[i];
@@ -1138,16 +1156,117 @@ static const struct iio_trigger_ops ad7606_trigger_ops = {
.validate_device = iio_trigger_validate_own_device,
};
-static int ad7606_sw_mode_setup(struct iio_dev *indio_dev)
+static int ad7606_write_mask(struct ad7606_state *st, unsigned int addr,
+ unsigned long mask, unsigned int val)
+{
+ int readval;
+
+ readval = st->bops->reg_read(st, addr);
+ if (readval < 0)
+ return readval;
+
+ readval &= ~mask;
+ readval |= val;
+
+ return st->bops->reg_write(st, addr, readval);
+}
+
+static int ad7616_write_scale_sw(struct iio_dev *indio_dev, int ch, int val)
{
struct ad7606_state *st = iio_priv(indio_dev);
+ unsigned int ch_addr, mode, ch_index;
- st->sw_mode_en = st->bops->sw_mode_config &&
- device_property_present(st->dev, "adi,sw-mode");
- if (!st->sw_mode_en)
- return 0;
+ /*
+ * Ad7616 has 16 channels divided in group A and group B.
+ * The range of channels from A are stored in registers with address 4
+ * while channels from B are stored in register with address 6.
+ * The last bit from channels determines if it is from group A or B
+ * because the order of channels in iio is 0A, 0B, 1A, 1B...
+ */
+ ch_index = ch >> 1;
+
+ ch_addr = AD7616_RANGE_CH_ADDR(ch_index);
+
+ if ((ch & 0x1) == 0) /* channel A */
+ ch_addr += AD7616_RANGE_CH_A_ADDR_OFF;
+ else /* channel B */
+ ch_addr += AD7616_RANGE_CH_B_ADDR_OFF;
+
+ /* 0b01 for 2.5v, 0b10 for 5v and 0b11 for 10v */
+ mode = AD7616_RANGE_CH_MODE(ch_index, ((val + 1) & 0b11));
+
+ return ad7606_write_mask(st, ch_addr, AD7616_RANGE_CH_MSK(ch_index),
+ mode);
+}
+
+static int ad7616_write_os_sw(struct iio_dev *indio_dev, int val)
+{
+ struct ad7606_state *st = iio_priv(indio_dev);
+
+ return ad7606_write_mask(st, AD7616_CONFIGURATION_REGISTER,
+ AD7616_OS_MASK, val << 2);
+}
+
+static int ad7606_write_scale_sw(struct iio_dev *indio_dev, int ch, int val)
+{
+ struct ad7606_state *st = iio_priv(indio_dev);
+
+ return ad7606_write_mask(st, AD7606_RANGE_CH_ADDR(ch),
+ AD7606_RANGE_CH_MSK(ch),
+ AD7606_RANGE_CH_MODE(ch, val));
+}
+
+static int ad7606_write_os_sw(struct iio_dev *indio_dev, int val)
+{
+ struct ad7606_state *st = iio_priv(indio_dev);
+
+ return st->bops->reg_write(st, AD7606_OS_MODE, val);
+}
+
+static int ad7616_sw_mode_setup(struct iio_dev *indio_dev)
+{
+ struct ad7606_state *st = iio_priv(indio_dev);
+ int ret;
+
+ /*
+ * Scale can be configured individually for each channel
+ * in software mode.
+ */
+
+ st->write_scale = ad7616_write_scale_sw;
+ st->write_os = &ad7616_write_os_sw;
+
+ ret = st->bops->sw_mode_config(indio_dev);
+ if (ret)
+ return ret;
- indio_dev->info = &ad7606_info_sw_mode;
+ /* Activate Burst mode and SEQEN MODE */
+ return ad7606_write_mask(st, AD7616_CONFIGURATION_REGISTER,
+ AD7616_BURST_MODE | AD7616_SEQEN_MODE,
+ AD7616_BURST_MODE | AD7616_SEQEN_MODE);
+}
+
+static int ad7606b_sw_mode_setup(struct iio_dev *indio_dev)
+{
+ struct ad7606_state *st = iio_priv(indio_dev);
+ DECLARE_BITMAP(os, 3);
+
+ bitmap_fill(os, 3);
+ /*
+ * Software mode is enabled when all three oversampling
+ * pins are set to high. If oversampling gpios are defined
+ * in the device tree, then they need to be set to high,
+ * otherwise, they must be hardwired to VDD
+ */
+ if (st->gpio_os)
+ gpiod_multi_set_value_cansleep(st->gpio_os, os);
+
+ /* OS of 128 and 256 are available only in software mode */
+ st->oversampling_avail = ad7606b_oversampling_avail;
+ st->num_os_ratios = ARRAY_SIZE(ad7606b_oversampling_avail);
+
+ st->write_scale = ad7606_write_scale_sw;
+ st->write_os = &ad7606_write_os_sw;
return st->bops->sw_mode_config(indio_dev);
}
@@ -1246,17 +1365,6 @@ int ad7606_probe(struct device *dev, int irq, void __iomem *base_address,
return -ERESTARTSYS;
}
- st->write_scale = ad7606_write_scale_hw;
- st->write_os = ad7606_write_os_hw;
-
- ret = ad7606_sw_mode_setup(indio_dev);
- if (ret)
- return ret;
-
- ret = ad7606_chan_scales_setup(indio_dev);
- if (ret)
- return ret;
-
/* If convst pin is not defined, setup PWM. */
if (!st->gpio_convst) {
st->cnvst_pwm = devm_pwm_get(dev, NULL);
@@ -1334,6 +1442,20 @@ int ad7606_probe(struct device *dev, int irq, void __iomem *base_address,
return ret;
}
+ st->write_scale = ad7606_write_scale_hw;
+ st->write_os = ad7606_write_os_hw;
+
+ st->sw_mode_en = st->chip_info->sw_setup_cb &&
+ device_property_present(st->dev, "adi,sw-mode");
+ if (st->sw_mode_en) {
+ indio_dev->info = &ad7606_info_sw_mode;
+ st->chip_info->sw_setup_cb(indio_dev);
+ }
+
+ ret = ad7606_chan_scales_setup(indio_dev);
+ if (ret)
+ return ret;
+
return devm_iio_device_register(dev, indio_dev);
}
EXPORT_SYMBOL_NS_GPL(ad7606_probe, "IIO_AD7606");
diff --git a/drivers/iio/adc/ad7606.h b/drivers/iio/adc/ad7606.h
index 8778ffe515b3..71a30525eaab 100644
--- a/drivers/iio/adc/ad7606.h
+++ b/drivers/iio/adc/ad7606.h
@@ -10,37 +10,49 @@
#define AD760X_MAX_CHANNELS 16
-#define AD760X_CHANNEL(num, mask_sep, mask_type, mask_all, bits) { \
+#define AD7616_CONFIGURATION_REGISTER 0x02
+#define AD7616_OS_MASK GENMASK(4, 2)
+#define AD7616_BURST_MODE BIT(6)
+#define AD7616_SEQEN_MODE BIT(5)
+#define AD7616_RANGE_CH_A_ADDR_OFF 0x04
+#define AD7616_RANGE_CH_B_ADDR_OFF 0x06
+/*
+ * Range of channels from a group are stored in 2 registers.
+ * 0, 1, 2, 3 in a register followed by 4, 5, 6, 7 in second register.
+ * For channels from second group(8-15) the order is the same, only with
+ * an offset of 2 for register address.
+ */
+#define AD7616_RANGE_CH_ADDR(ch) ((ch) >> 2)
+/* The range of the channel is stored in 2 bits */
+#define AD7616_RANGE_CH_MSK(ch) (0b11 << (((ch) & 0b11) * 2))
+#define AD7616_RANGE_CH_MODE(ch, mode) ((mode) << ((((ch) & 0b11)) * 2))
+
+#define AD7606_CONFIGURATION_REGISTER 0x02
+#define AD7606_SINGLE_DOUT 0x00
+
+/*
+ * Range for AD7606B channels are stored in registers starting with address 0x3.
+ * Each register stores range for 2 channels(4 bits per channel).
+ */
+#define AD7606_RANGE_CH_MSK(ch) (GENMASK(3, 0) << (4 * ((ch) & 0x1)))
+#define AD7606_RANGE_CH_MODE(ch, mode) \
+ ((GENMASK(3, 0) & (mode)) << (4 * ((ch) & 0x1)))
+#define AD7606_RANGE_CH_ADDR(ch) (0x03 + ((ch) >> 1))
+#define AD7606_OS_MODE 0x08
+
+#define AD760X_CHANNEL(num, mask_sep, mask_type, mask_all, \
+ mask_sep_avail, mask_all_avail, bits) { \
.type = IIO_VOLTAGE, \
.indexed = 1, \
.channel = num, \
.address = num, \
.info_mask_separate = mask_sep, \
+ .info_mask_separate_available = \
+ mask_sep_avail, \
.info_mask_shared_by_type = mask_type, \
.info_mask_shared_by_all = mask_all, \
- .scan_index = num, \
- .scan_type = { \
- .sign = 's', \
- .realbits = (bits), \
- .storagebits = (bits) > 16 ? 32 : 16, \
- .endianness = IIO_CPU, \
- }, \
-}
-
-#define AD7606_SW_CHANNEL(num, bits) { \
- .type = IIO_VOLTAGE, \
- .indexed = 1, \
- .channel = num, \
- .address = num, \
- .info_mask_separate = \
- BIT(IIO_CHAN_INFO_RAW) | \
- BIT(IIO_CHAN_INFO_SCALE), \
- .info_mask_separate_available = \
- BIT(IIO_CHAN_INFO_SCALE), \
- .info_mask_shared_by_all = \
- BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), \
.info_mask_shared_by_all_available = \
- BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), \
+ mask_all_avail, \
.scan_index = num, \
.scan_type = { \
.sign = 's', \
@@ -50,14 +62,30 @@
}, \
}
+#define AD7606_SW_CHANNEL(num, bits) \
+ AD760X_CHANNEL(num, \
+ /* mask separate */ \
+ BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_SCALE), \
+ /* mask type */ \
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), \
+ /* mask all */ \
+ 0, \
+ /* mask separate available */ \
+ BIT(IIO_CHAN_INFO_SCALE), \
+ /* mask all available */ \
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), \
+ bits)
+
#define AD7605_CHANNEL(num) \
AD760X_CHANNEL(num, BIT(IIO_CHAN_INFO_RAW), \
- BIT(IIO_CHAN_INFO_SCALE), 0, 16)
+ BIT(IIO_CHAN_INFO_SCALE), 0, 0, 0, 16)
#define AD7606_CHANNEL(num, bits) \
AD760X_CHANNEL(num, BIT(IIO_CHAN_INFO_RAW), \
BIT(IIO_CHAN_INFO_SCALE), \
- BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), bits)
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), \
+ 0, 0, bits)
#define AD7616_CHANNEL(num) AD7606_SW_CHANNEL(num, 16)
@@ -65,12 +93,29 @@
AD760X_CHANNEL(num, 0, \
BIT(IIO_CHAN_INFO_SCALE), \
BIT(IIO_CHAN_INFO_SAMP_FREQ) | \
- BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), 16)
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), \
+ 0, 0, 16)
+
+#define AD7606_BI_SW_CHANNEL(num) \
+ AD760X_CHANNEL(num, \
+ /* mask separate */ \
+ BIT(IIO_CHAN_INFO_SCALE), \
+ /* mask type */ \
+ 0, \
+ /* mask all */ \
+ BIT(IIO_CHAN_INFO_SAMP_FREQ) | \
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), \
+ /* mask separate available */ \
+ BIT(IIO_CHAN_INFO_SCALE), \
+ /* mask all available */ \
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), \
+ 16)
struct ad7606_state;
typedef int (*ad7606_scale_setup_cb_t)(struct iio_dev *indio_dev,
struct iio_chan_spec *chan, int ch);
+typedef int (*ad7606_sw_setup_cb_t)(struct iio_dev *indio_dev);
/**
* struct ad7606_chip_info - chip specific information
@@ -80,6 +125,7 @@ typedef int (*ad7606_scale_setup_cb_t)(struct iio_dev *indio_dev,
* @num_channels: number of channels
* @num_adc_channels the number of channels the ADC actually inputs.
* @scale_setup_cb: callback to setup the scales for each channel
+ * @sw_setup_cb: callback to setup the software mode if available.
* @oversampling_avail pointer to the array which stores the available
* oversampling ratios.
* @oversampling_num number of elements stored in oversampling_avail array
@@ -94,6 +140,7 @@ struct ad7606_chip_info {
unsigned int num_adc_channels;
unsigned int num_channels;
ad7606_scale_setup_cb_t scale_setup_cb;
+ ad7606_sw_setup_cb_t sw_setup_cb;
const unsigned int *oversampling_avail;
unsigned int oversampling_num;
bool os_req_reset;
@@ -206,10 +253,6 @@ struct ad7606_bus_ops {
int (*reg_write)(struct ad7606_state *st,
unsigned int addr,
unsigned int val);
- int (*write_mask)(struct ad7606_state *st,
- unsigned int addr,
- unsigned long mask,
- unsigned int val);
int (*update_scan_mode)(struct iio_dev *indio_dev, const unsigned long *scan_mask);
u16 (*rd_wr_cmd)(int addr, char isWriteOp);
};
diff --git a/drivers/iio/adc/ad7606_bus_iface.h b/drivers/iio/adc/ad7606_bus_iface.h
new file mode 100644
index 000000000000..f2c979a9b7f3
--- /dev/null
+++ b/drivers/iio/adc/ad7606_bus_iface.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (c) 2010-2024 Analog Devices Inc.
+ * Copyright (c) 2025 Baylibre, SAS
+ */
+#ifndef __LINUX_PLATFORM_DATA_AD7606_H__
+#define __LINUX_PLATFORM_DATA_AD7606_H__
+
+struct iio_backend;
+
+struct ad7606_platform_data {
+ int (*bus_reg_read)(struct iio_backend *back, u32 reg, u32 *val);
+ int (*bus_reg_write)(struct iio_backend *back, u32 reg, u32 val);
+};
+
+#endif /* __LINUX_PLATFORM_DATA_AD7606_H__ */
diff --git a/drivers/iio/adc/ad7606_par.c b/drivers/iio/adc/ad7606_par.c
index 64733b607aa8..335fb481bfde 100644
--- a/drivers/iio/adc/ad7606_par.c
+++ b/drivers/iio/adc/ad7606_par.c
@@ -19,6 +19,7 @@
#include <linux/iio/iio.h>
#include "ad7606.h"
+#include "ad7606_bus_iface.h"
static const struct iio_chan_spec ad7606b_bi_channels[] = {
AD7606_BI_CHANNEL(0),
@@ -31,7 +32,19 @@ static const struct iio_chan_spec ad7606b_bi_channels[] = {
AD7606_BI_CHANNEL(7),
};
-static int ad7606_bi_update_scan_mode(struct iio_dev *indio_dev, const unsigned long *scan_mask)
+static const struct iio_chan_spec ad7606b_bi_sw_channels[] = {
+ AD7606_BI_SW_CHANNEL(0),
+ AD7606_BI_SW_CHANNEL(1),
+ AD7606_BI_SW_CHANNEL(2),
+ AD7606_BI_SW_CHANNEL(3),
+ AD7606_BI_SW_CHANNEL(4),
+ AD7606_BI_SW_CHANNEL(5),
+ AD7606_BI_SW_CHANNEL(6),
+ AD7606_BI_SW_CHANNEL(7),
+};
+
+static int ad7606_par_bus_update_scan_mode(struct iio_dev *indio_dev,
+ const unsigned long *scan_mask)
{
struct ad7606_state *st = iio_priv(indio_dev);
unsigned int c, ret;
@@ -48,7 +61,8 @@ static int ad7606_bi_update_scan_mode(struct iio_dev *indio_dev, const unsigned
return 0;
}
-static int ad7606_bi_setup_iio_backend(struct device *dev, struct iio_dev *indio_dev)
+static int ad7606_par_bus_setup_iio_backend(struct device *dev,
+ struct iio_dev *indio_dev)
{
struct ad7606_state *st = iio_priv(indio_dev);
unsigned int ret, c;
@@ -86,9 +100,39 @@ static int ad7606_bi_setup_iio_backend(struct device *dev, struct iio_dev *indio
return 0;
}
+static int ad7606_par_bus_reg_read(struct ad7606_state *st, unsigned int addr)
+{
+ struct ad7606_platform_data *pdata = st->dev->platform_data;
+ int val, ret;
+
+ ret = pdata->bus_reg_read(st->back, addr, &val);
+ if (ret)
+ return ret;
+
+ return val;
+}
+
+static int ad7606_par_bus_reg_write(struct ad7606_state *st, unsigned int addr,
+ unsigned int val)
+{
+ struct ad7606_platform_data *pdata = st->dev->platform_data;
+
+ return pdata->bus_reg_write(st->back, addr, val);
+}
+
+static int ad7606_par_bus_sw_mode_config(struct iio_dev *indio_dev)
+{
+ indio_dev->channels = ad7606b_bi_sw_channels;
+
+ return 0;
+}
+
static const struct ad7606_bus_ops ad7606_bi_bops = {
- .iio_backend_config = ad7606_bi_setup_iio_backend,
- .update_scan_mode = ad7606_bi_update_scan_mode,
+ .iio_backend_config = ad7606_par_bus_setup_iio_backend,
+ .update_scan_mode = ad7606_par_bus_update_scan_mode,
+ .reg_read = ad7606_par_bus_reg_read,
+ .reg_write = ad7606_par_bus_reg_write,
+ .sw_mode_config = ad7606_par_bus_sw_mode_config,
};
static int ad7606_par16_read_block(struct device *dev,
diff --git a/drivers/iio/adc/ad7606_spi.c b/drivers/iio/adc/ad7606_spi.c
index e2c147525706..885bf0b68e77 100644
--- a/drivers/iio/adc/ad7606_spi.c
+++ b/drivers/iio/adc/ad7606_spi.c
@@ -15,36 +15,6 @@
#define MAX_SPI_FREQ_HZ 23500000 /* VDRIVE above 4.75 V */
-#define AD7616_CONFIGURATION_REGISTER 0x02
-#define AD7616_OS_MASK GENMASK(4, 2)
-#define AD7616_BURST_MODE BIT(6)
-#define AD7616_SEQEN_MODE BIT(5)
-#define AD7616_RANGE_CH_A_ADDR_OFF 0x04
-#define AD7616_RANGE_CH_B_ADDR_OFF 0x06
-/*
- * Range of channels from a group are stored in 2 registers.
- * 0, 1, 2, 3 in a register followed by 4, 5, 6, 7 in second register.
- * For channels from second group(8-15) the order is the same, only with
- * an offset of 2 for register address.
- */
-#define AD7616_RANGE_CH_ADDR(ch) ((ch) >> 2)
-/* The range of the channel is stored in 2 bits */
-#define AD7616_RANGE_CH_MSK(ch) (0b11 << (((ch) & 0b11) * 2))
-#define AD7616_RANGE_CH_MODE(ch, mode) ((mode) << ((((ch) & 0b11)) * 2))
-
-#define AD7606_CONFIGURATION_REGISTER 0x02
-#define AD7606_SINGLE_DOUT 0x00
-
-/*
- * Range for AD7606B channels are stored in registers starting with address 0x3.
- * Each register stores range for 2 channels(4 bits per channel).
- */
-#define AD7606_RANGE_CH_MSK(ch) (GENMASK(3, 0) << (4 * ((ch) & 0x1)))
-#define AD7606_RANGE_CH_MODE(ch, mode) \
- ((GENMASK(3, 0) & mode) << (4 * ((ch) & 0x1)))
-#define AD7606_RANGE_CH_ADDR(ch) (0x03 + ((ch) >> 1))
-#define AD7606_OS_MODE 0x08
-
static const struct iio_chan_spec ad7616_sw_channels[] = {
IIO_CHAN_SOFT_TIMESTAMP(16),
AD7616_CHANNEL(0),
@@ -89,10 +59,6 @@ static const struct iio_chan_spec ad7606c_18_sw_channels[] = {
AD7606_SW_CHANNEL(7, 18),
};
-static const unsigned int ad7606B_oversampling_avail[9] = {
- 1, 2, 4, 8, 16, 32, 64, 128, 256
-};
-
static u16 ad7616_spi_rd_wr_cmd(int addr, char isWriteOp)
{
/*
@@ -194,118 +160,20 @@ static int ad7606_spi_reg_write(struct ad7606_state *st,
return spi_write(spi, &st->d16[0], sizeof(st->d16[0]));
}
-static int ad7606_spi_write_mask(struct ad7606_state *st,
- unsigned int addr,
- unsigned long mask,
- unsigned int val)
-{
- int readval;
-
- readval = st->bops->reg_read(st, addr);
- if (readval < 0)
- return readval;
-
- readval &= ~mask;
- readval |= val;
-
- return st->bops->reg_write(st, addr, readval);
-}
-
-static int ad7616_write_scale_sw(struct iio_dev *indio_dev, int ch, int val)
-{
- struct ad7606_state *st = iio_priv(indio_dev);
- unsigned int ch_addr, mode, ch_index;
-
-
- /*
- * Ad7616 has 16 channels divided in group A and group B.
- * The range of channels from A are stored in registers with address 4
- * while channels from B are stored in register with address 6.
- * The last bit from channels determines if it is from group A or B
- * because the order of channels in iio is 0A, 0B, 1A, 1B...
- */
- ch_index = ch >> 1;
-
- ch_addr = AD7616_RANGE_CH_ADDR(ch_index);
-
- if ((ch & 0x1) == 0) /* channel A */
- ch_addr += AD7616_RANGE_CH_A_ADDR_OFF;
- else /* channel B */
- ch_addr += AD7616_RANGE_CH_B_ADDR_OFF;
-
- /* 0b01 for 2.5v, 0b10 for 5v and 0b11 for 10v */
- mode = AD7616_RANGE_CH_MODE(ch_index, ((val + 1) & 0b11));
- return st->bops->write_mask(st, ch_addr, AD7616_RANGE_CH_MSK(ch_index),
- mode);
-}
-
-static int ad7616_write_os_sw(struct iio_dev *indio_dev, int val)
-{
- struct ad7606_state *st = iio_priv(indio_dev);
-
- return st->bops->write_mask(st, AD7616_CONFIGURATION_REGISTER,
- AD7616_OS_MASK, val << 2);
-}
-
-static int ad7606_write_scale_sw(struct iio_dev *indio_dev, int ch, int val)
-{
- struct ad7606_state *st = iio_priv(indio_dev);
-
- return ad7606_spi_write_mask(st,
- AD7606_RANGE_CH_ADDR(ch),
- AD7606_RANGE_CH_MSK(ch),
- AD7606_RANGE_CH_MODE(ch, val));
-}
-
-static int ad7606_write_os_sw(struct iio_dev *indio_dev, int val)
-{
- struct ad7606_state *st = iio_priv(indio_dev);
-
- return ad7606_spi_reg_write(st, AD7606_OS_MODE, val);
-}
-
static int ad7616_sw_mode_config(struct iio_dev *indio_dev)
{
- struct ad7606_state *st = iio_priv(indio_dev);
-
/*
* Scale can be configured individually for each channel
* in software mode.
*/
indio_dev->channels = ad7616_sw_channels;
- st->write_scale = ad7616_write_scale_sw;
- st->write_os = &ad7616_write_os_sw;
-
- /* Activate Burst mode and SEQEN MODE */
- return st->bops->write_mask(st,
- AD7616_CONFIGURATION_REGISTER,
- AD7616_BURST_MODE | AD7616_SEQEN_MODE,
- AD7616_BURST_MODE | AD7616_SEQEN_MODE);
+ return 0;
}
static int ad7606B_sw_mode_config(struct iio_dev *indio_dev)
{
struct ad7606_state *st = iio_priv(indio_dev);
- DECLARE_BITMAP(os, 3);
-
- bitmap_fill(os, 3);
- /*
- * Software mode is enabled when all three oversampling
- * pins are set to high. If oversampling gpios are defined
- * in the device tree, then they need to be set to high,
- * otherwise, they must be hardwired to VDD
- */
- if (st->gpio_os) {
- gpiod_set_array_value(st->gpio_os->ndescs,
- st->gpio_os->desc, st->gpio_os->info, os);
- }
- /* OS of 128 and 256 are available only in software mode */
- st->oversampling_avail = ad7606B_oversampling_avail;
- st->num_os_ratios = ARRAY_SIZE(ad7606B_oversampling_avail);
-
- st->write_scale = ad7606_write_scale_sw;
- st->write_os = &ad7606_write_os_sw;
/* Configure device spi to output on a single channel */
st->bops->reg_write(st,
@@ -350,7 +218,6 @@ static const struct ad7606_bus_ops ad7616_spi_bops = {
.read_block = ad7606_spi_read_block,
.reg_read = ad7606_spi_reg_read,
.reg_write = ad7606_spi_reg_write,
- .write_mask = ad7606_spi_write_mask,
.rd_wr_cmd = ad7616_spi_rd_wr_cmd,
.sw_mode_config = ad7616_sw_mode_config,
};
@@ -359,7 +226,6 @@ static const struct ad7606_bus_ops ad7606b_spi_bops = {
.read_block = ad7606_spi_read_block,
.reg_read = ad7606_spi_reg_read,
.reg_write = ad7606_spi_reg_write,
- .write_mask = ad7606_spi_write_mask,
.rd_wr_cmd = ad7606B_spi_rd_wr_cmd,
.sw_mode_config = ad7606B_sw_mode_config,
};
@@ -368,7 +234,6 @@ static const struct ad7606_bus_ops ad7606c_18_spi_bops = {
.read_block = ad7606_spi_read_block18to32,
.reg_read = ad7606_spi_reg_read,
.reg_write = ad7606_spi_reg_write,
- .write_mask = ad7606_spi_write_mask,
.rd_wr_cmd = ad7606B_spi_rd_wr_cmd,
.sw_mode_config = ad7606c_18_sw_mode_config,
};
diff --git a/drivers/iio/adc/ad7625.c b/drivers/iio/adc/ad7625.c
index afa9bf4ddf3c..0466c0c7eae4 100644
--- a/drivers/iio/adc/ad7625.c
+++ b/drivers/iio/adc/ad7625.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+// SPDX-License-Identifier: (GPL-2.0-only)
/*
* Analog Devices Inc. AD7625 ADC driver
*
@@ -248,12 +248,15 @@ static int ad7625_write_raw(struct iio_dev *indio_dev,
int val, int val2, long info)
{
struct ad7625_state *st = iio_priv(indio_dev);
+ int ret;
switch (info) {
case IIO_CHAN_INFO_SAMP_FREQ:
- iio_device_claim_direct_scoped(return -EBUSY, indio_dev)
- return ad7625_set_sampling_freq(st, val);
- unreachable();
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+ ret = ad7625_set_sampling_freq(st, val);
+ iio_device_release_direct(indio_dev);
+ return ret;
default:
return -EINVAL;
}
@@ -680,5 +683,5 @@ module_platform_driver(ad7625_driver);
MODULE_AUTHOR("Trevor Gamblin <tgamblin@baylibre.com>");
MODULE_DESCRIPTION("Analog Devices AD7625 ADC");
-MODULE_LICENSE("Dual BSD/GPL");
+MODULE_LICENSE("GPL");
MODULE_IMPORT_NS("IIO_BACKEND");
diff --git a/drivers/iio/adc/ad7768-1.c b/drivers/iio/adc/ad7768-1.c
index 113703fb7245..5a863005aca6 100644
--- a/drivers/iio/adc/ad7768-1.c
+++ b/drivers/iio/adc/ad7768-1.c
@@ -142,7 +142,7 @@ static const struct iio_chan_spec ad7768_channels[] = {
.channel = 0,
.scan_index = 0,
.scan_type = {
- .sign = 'u',
+ .sign = 's',
.realbits = 24,
.storagebits = 32,
.shift = 8,
@@ -154,7 +154,6 @@ static const struct iio_chan_spec ad7768_channels[] = {
struct ad7768_state {
struct spi_device *spi;
struct regulator *vref;
- struct mutex lock;
struct clk *mclk;
unsigned int mclk_freq;
unsigned int samp_freq;
@@ -256,18 +255,20 @@ static int ad7768_reg_access(struct iio_dev *indio_dev,
struct ad7768_state *st = iio_priv(indio_dev);
int ret;
- mutex_lock(&st->lock);
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+
if (readval) {
ret = ad7768_spi_reg_read(st, reg, 1);
if (ret < 0)
- goto err_unlock;
+ goto err_release;
*readval = ret;
ret = 0;
} else {
ret = ad7768_spi_reg_write(st, reg, writeval);
}
-err_unlock:
- mutex_unlock(&st->lock);
+err_release:
+ iio_device_release_direct(indio_dev);
return ret;
}
@@ -365,17 +366,15 @@ static int ad7768_read_raw(struct iio_dev *indio_dev,
switch (info) {
case IIO_CHAN_INFO_RAW:
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
ret = ad7768_scan_direct(indio_dev);
- if (ret >= 0)
- *val = ret;
- iio_device_release_direct_mode(indio_dev);
+ iio_device_release_direct(indio_dev);
if (ret < 0)
return ret;
+ *val = sign_extend32(ret, chan->scan_type.realbits - 1);
return IIO_VAL_INT;
@@ -471,18 +470,15 @@ static irqreturn_t ad7768_trigger_handler(int irq, void *p)
struct ad7768_state *st = iio_priv(indio_dev);
int ret;
- mutex_lock(&st->lock);
-
ret = spi_read(st->spi, &st->data.scan.chan, 3);
if (ret < 0)
- goto err_unlock;
+ goto out;
iio_push_to_buffers_with_timestamp(indio_dev, &st->data.scan,
iio_get_time_ns(indio_dev));
-err_unlock:
+out:
iio_trigger_notify_done(indio_dev->trig);
- mutex_unlock(&st->lock);
return IRQ_HANDLED;
}
@@ -574,6 +570,21 @@ static int ad7768_probe(struct spi_device *spi)
return -ENOMEM;
st = iio_priv(indio_dev);
+ /*
+ * Datasheet recommends SDI line to be kept high when data is not being
+ * clocked out of the controller and the spi clock is free running,
+ * to prevent accidental reset.
+ * Since many controllers do not support the SPI_MOSI_IDLE_HIGH flag
+ * yet, only request the MOSI idle state to enable if the controller
+ * supports it.
+ */
+ if (spi->controller->mode_bits & SPI_MOSI_IDLE_HIGH) {
+ spi->mode |= SPI_MOSI_IDLE_HIGH;
+ ret = spi_setup(spi);
+ if (ret < 0)
+ return ret;
+ }
+
st->spi = spi;
st->vref = devm_regulator_get(&spi->dev, "vref");
@@ -596,8 +607,6 @@ static int ad7768_probe(struct spi_device *spi)
st->mclk_freq = clk_get_rate(st->mclk);
- mutex_init(&st->lock);
-
indio_dev->channels = ad7768_channels;
indio_dev->num_channels = ARRAY_SIZE(ad7768_channels);
indio_dev->name = spi_get_device_id(spi)->name;
diff --git a/drivers/iio/adc/ad7779.c b/drivers/iio/adc/ad7779.c
index 2537dab69a35..a5d87faa5e12 100644
--- a/drivers/iio/adc/ad7779.c
+++ b/drivers/iio/adc/ad7779.c
@@ -467,59 +467,82 @@ static int ad7779_set_calibbias(struct ad7779_state *st, int channel, int val)
calibbias[2]);
}
+static int __ad7779_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val,
+ int *val2, long mask)
+{
+ struct ad7779_state *st = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_CALIBSCALE:
+ ret = ad7779_get_calibscale(st, chan->channel);
+ if (ret < 0)
+ return ret;
+ *val = ret;
+ *val2 = GAIN_REL;
+ return IIO_VAL_FRACTIONAL;
+ case IIO_CHAN_INFO_CALIBBIAS:
+ ret = ad7779_get_calibbias(st, chan->channel);
+ if (ret < 0)
+ return ret;
+ *val = ret;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ *val = st->sampling_freq;
+ if (*val < 0)
+ return -EINVAL;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+}
+
static int ad7779_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan, int *val,
int *val2, long mask)
{
- struct ad7779_state *st = iio_priv(indio_dev);
int ret;
- iio_device_claim_direct_scoped(return -EBUSY, indio_dev) {
- switch (mask) {
- case IIO_CHAN_INFO_CALIBSCALE:
- ret = ad7779_get_calibscale(st, chan->channel);
- if (ret < 0)
- return ret;
- *val = ret;
- *val2 = GAIN_REL;
- return IIO_VAL_FRACTIONAL;
- case IIO_CHAN_INFO_CALIBBIAS:
- ret = ad7779_get_calibbias(st, chan->channel);
- if (ret < 0)
- return ret;
- *val = ret;
- return IIO_VAL_INT;
- case IIO_CHAN_INFO_SAMP_FREQ:
- *val = st->sampling_freq;
- if (*val < 0)
- return -EINVAL;
- return IIO_VAL_INT;
- default:
- return -EINVAL;
- }
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+
+ ret = __ad7779_read_raw(indio_dev, chan, val, val2, mask);
+ iio_device_release_direct(indio_dev);
+ return ret;
+}
+
+static int __ad7779_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2,
+ long mask)
+{
+ struct ad7779_state *st = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_CALIBSCALE:
+ return ad7779_set_calibscale(st, chan->channel, val2);
+ case IIO_CHAN_INFO_CALIBBIAS:
+ return ad7779_set_calibbias(st, chan->channel, val);
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ return ad7779_set_sampling_frequency(st, val);
+ default:
+ return -EINVAL;
}
- unreachable();
}
static int ad7779_write_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan, int val, int val2,
long mask)
{
- struct ad7779_state *st = iio_priv(indio_dev);
+ int ret;
- iio_device_claim_direct_scoped(return -EBUSY, indio_dev) {
- switch (mask) {
- case IIO_CHAN_INFO_CALIBSCALE:
- return ad7779_set_calibscale(st, chan->channel, val2);
- case IIO_CHAN_INFO_CALIBBIAS:
- return ad7779_set_calibbias(st, chan->channel, val);
- case IIO_CHAN_INFO_SAMP_FREQ:
- return ad7779_set_sampling_frequency(st, val);
- default:
- return -EINVAL;
- }
- }
- unreachable();
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+
+ ret = __ad7779_write_raw(indio_dev, chan, val, val2, mask);
+ iio_device_release_direct(indio_dev);
+ return ret;
}
static int ad7779_buffer_preenable(struct iio_dev *indio_dev)
diff --git a/drivers/iio/adc/ad7791.c b/drivers/iio/adc/ad7791.c
index 76118fe22db8..597c2686ffa4 100644
--- a/drivers/iio/adc/ad7791.c
+++ b/drivers/iio/adc/ad7791.c
@@ -310,15 +310,11 @@ static int ad7791_read_raw(struct iio_dev *indio_dev,
return -EINVAL;
}
-static int ad7791_write_raw(struct iio_dev *indio_dev,
+static int __ad7791_write_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan, int val, int val2, long mask)
{
struct ad7791_state *st = iio_priv(indio_dev);
- int ret, i;
-
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
+ int i;
switch (mask) {
case IIO_CHAN_INFO_SAMP_FREQ:
@@ -328,22 +324,31 @@ static int ad7791_write_raw(struct iio_dev *indio_dev,
break;
}
- if (i == ARRAY_SIZE(ad7791_sample_freq_avail)) {
- ret = -EINVAL;
- break;
- }
+ if (i == ARRAY_SIZE(ad7791_sample_freq_avail))
+ return -EINVAL;
st->filter &= ~AD7791_FILTER_RATE_MASK;
st->filter |= i;
ad_sd_write_reg(&st->sd, AD7791_REG_FILTER,
sizeof(st->filter),
st->filter);
- break;
+ return 0;
default:
- ret = -EINVAL;
+ return -EINVAL;
}
+}
+
+static int ad7791_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int val, int val2, long mask)
+{
+ int ret;
+
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+
+ ret = __ad7791_write_raw(indio_dev, chan, val, val2, mask);
- iio_device_release_direct_mode(indio_dev);
+ iio_device_release_direct(indio_dev);
return ret;
}
diff --git a/drivers/iio/adc/ad7793.c b/drivers/iio/adc/ad7793.c
index 1b50d9643a63..ccf18ce48e34 100644
--- a/drivers/iio/adc/ad7793.c
+++ b/drivers/iio/adc/ad7793.c
@@ -462,64 +462,68 @@ static int ad7793_read_raw(struct iio_dev *indio_dev,
return -EINVAL;
}
-static int ad7793_write_raw(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan,
- int val,
- int val2,
- long mask)
+static int __ad7793_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
{
struct ad7793_state *st = iio_priv(indio_dev);
- int ret, i;
+ int i;
unsigned int tmp;
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
-
switch (mask) {
case IIO_CHAN_INFO_SCALE:
- ret = -EINVAL;
- for (i = 0; i < ARRAY_SIZE(st->scale_avail); i++)
- if (val2 == st->scale_avail[i][1]) {
- ret = 0;
- tmp = st->conf;
- st->conf &= ~AD7793_CONF_GAIN(-1);
- st->conf |= AD7793_CONF_GAIN(i);
-
- if (tmp == st->conf)
- break;
-
- ad_sd_write_reg(&st->sd, AD7793_REG_CONF,
- sizeof(st->conf), st->conf);
- ad7793_calibrate_all(st);
- break;
- }
- break;
- case IIO_CHAN_INFO_SAMP_FREQ:
- if (!val) {
- ret = -EINVAL;
- break;
+ for (i = 0; i < ARRAY_SIZE(st->scale_avail); i++) {
+ if (val2 != st->scale_avail[i][1])
+ continue;
+
+ tmp = st->conf;
+ st->conf &= ~AD7793_CONF_GAIN(-1);
+ st->conf |= AD7793_CONF_GAIN(i);
+
+ if (tmp == st->conf)
+ return 0;
+
+ ad_sd_write_reg(&st->sd, AD7793_REG_CONF,
+ sizeof(st->conf), st->conf);
+ ad7793_calibrate_all(st);
+
+ return 0;
}
+ return -EINVAL;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ if (!val)
+ return -EINVAL;
for (i = 0; i < 16; i++)
if (val == st->chip_info->sample_freq_avail[i])
break;
- if (i == 16) {
- ret = -EINVAL;
- break;
- }
+ if (i == 16)
+ return -EINVAL;
st->mode &= ~AD7793_MODE_RATE(-1);
st->mode |= AD7793_MODE_RATE(i);
ad_sd_write_reg(&st->sd, AD7793_REG_MODE, sizeof(st->mode),
st->mode);
- break;
+ return 0;
default:
- ret = -EINVAL;
+ return -EINVAL;
}
+}
+
+static int ad7793_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ int ret;
+
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+
+ ret = __ad7793_write_raw(indio_dev, chan, val, val2, mask);
+
+ iio_device_release_direct(indio_dev);
- iio_device_release_direct_mode(indio_dev);
return ret;
}
diff --git a/drivers/iio/adc/ad7887.c b/drivers/iio/adc/ad7887.c
index 69add1dc4b53..87ff95643794 100644
--- a/drivers/iio/adc/ad7887.c
+++ b/drivers/iio/adc/ad7887.c
@@ -152,11 +152,10 @@ static int ad7887_read_raw(struct iio_dev *indio_dev,
switch (m) {
case IIO_CHAN_INFO_RAW:
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
ret = ad7887_scan_direct(st, chan->address);
- iio_device_release_direct_mode(indio_dev);
+ iio_device_release_direct(indio_dev);
if (ret < 0)
return ret;
diff --git a/drivers/iio/adc/ad7923.c b/drivers/iio/adc/ad7923.c
index acc44cb34f82..87945efb940b 100644
--- a/drivers/iio/adc/ad7923.c
+++ b/drivers/iio/adc/ad7923.c
@@ -260,11 +260,10 @@ static int ad7923_read_raw(struct iio_dev *indio_dev,
switch (m) {
case IIO_CHAN_INFO_RAW:
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
ret = ad7923_scan_direct(st, chan->address);
- iio_device_release_direct_mode(indio_dev);
+ iio_device_release_direct(indio_dev);
if (ret < 0)
return ret;
diff --git a/drivers/iio/adc/ad7944.c b/drivers/iio/adc/ad7944.c
index 0ec9cda10f5f..2f949fe55873 100644
--- a/drivers/iio/adc/ad7944.c
+++ b/drivers/iio/adc/ad7944.c
@@ -16,11 +16,14 @@
#include <linux/module.h>
#include <linux/property.h>
#include <linux/regulator/consumer.h>
+#include <linux/spi/offload/consumer.h>
#include <linux/spi/spi.h>
#include <linux/string_helpers.h>
+#include <linux/units.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
+#include <linux/iio/buffer-dmaengine.h>
#include <linux/iio/trigger_consumer.h>
#include <linux/iio/triggered_buffer.h>
@@ -54,6 +57,12 @@ struct ad7944_adc {
enum ad7944_spi_mode spi_mode;
struct spi_transfer xfers[3];
struct spi_message msg;
+ struct spi_transfer offload_xfers[2];
+ struct spi_message offload_msg;
+ struct spi_offload *offload;
+ struct spi_offload_trigger *offload_trigger;
+ unsigned long offload_trigger_hz;
+ int sample_freq_range[3];
void *chain_mode_buf;
/* Chip-specific timing specifications. */
const struct ad7944_timing_spec *timing_spec;
@@ -81,6 +90,8 @@ struct ad7944_adc {
/* quite time before CNV rising edge */
#define AD7944_T_QUIET_NS 20
+/* minimum CNV high time to trigger conversion */
+#define AD7944_T_CNVH_NS 10
static const struct ad7944_timing_spec ad7944_timing_spec = {
.conv_ns = 420,
@@ -95,20 +106,27 @@ static const struct ad7944_timing_spec ad7986_timing_spec = {
struct ad7944_chip_info {
const char *name;
const struct ad7944_timing_spec *timing_spec;
+ u32 max_sample_rate_hz;
const struct iio_chan_spec channels[2];
+ const struct iio_chan_spec offload_channels[1];
};
+/* get number of bytes for SPI xfer */
+#define AD7944_SPI_BYTES(scan_type) ((scan_type).realbits > 16 ? 4 : 2)
+
/*
* AD7944_DEFINE_CHIP_INFO - Define a chip info structure for a specific chip
* @_name: The name of the chip
* @_ts: The timing specification for the chip
+ * @_max: The maximum sample rate in Hz
* @_bits: The number of bits in the conversion result
* @_diff: Whether the chip is true differential or not
*/
-#define AD7944_DEFINE_CHIP_INFO(_name, _ts, _bits, _diff) \
+#define AD7944_DEFINE_CHIP_INFO(_name, _ts, _max, _bits, _diff) \
static const struct ad7944_chip_info _name##_chip_info = { \
.name = #_name, \
.timing_spec = &_ts##_timing_spec, \
+ .max_sample_rate_hz = _max, \
.channels = { \
{ \
.type = IIO_VOLTAGE, \
@@ -126,13 +144,43 @@ static const struct ad7944_chip_info _name##_chip_info = { \
}, \
IIO_CHAN_SOFT_TIMESTAMP(1), \
}, \
+ .offload_channels = { \
+ { \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .differential = _diff, \
+ .channel = 0, \
+ .channel2 = _diff ? 1 : 0, \
+ .scan_index = 0, \
+ .scan_type.sign = _diff ? 's' : 'u', \
+ .scan_type.realbits = _bits, \
+ .scan_type.storagebits = 32, \
+ .scan_type.endianness = IIO_CPU, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) \
+ | BIT(IIO_CHAN_INFO_SCALE) \
+ | BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ .info_mask_separate_available = \
+ BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ }, \
+ }, \
}
+/*
+ * Notes on the offload channels:
+ * - There is no soft timestamp since everything is done in hardware.
+ * - There is a sampling frequency attribute added. This controls the SPI
+ * offload trigger.
+ * - The storagebits value depends on the SPI offload provider. Currently there
+ * is only one supported provider, namely the ADI PULSAR ADC HDL project,
+ * which always uses 32-bit words for data values, even for <= 16-bit ADCs.
+ * So the value is just hardcoded to 32 for now.
+ */
+
/* pseudo-differential with ground sense */
-AD7944_DEFINE_CHIP_INFO(ad7944, ad7944, 14, 0);
-AD7944_DEFINE_CHIP_INFO(ad7985, ad7944, 16, 0);
+AD7944_DEFINE_CHIP_INFO(ad7944, ad7944, 2.5 * MEGA, 14, 0);
+AD7944_DEFINE_CHIP_INFO(ad7985, ad7944, 2.5 * MEGA, 16, 0);
/* fully differential */
-AD7944_DEFINE_CHIP_INFO(ad7986, ad7986, 18, 1);
+AD7944_DEFINE_CHIP_INFO(ad7986, ad7986, 2 * MEGA, 18, 1);
static int ad7944_3wire_cs_mode_init_msg(struct device *dev, struct ad7944_adc *adc,
const struct iio_chan_spec *chan)
@@ -164,7 +212,7 @@ static int ad7944_3wire_cs_mode_init_msg(struct device *dev, struct ad7944_adc *
/* Then we can read the data during the acquisition phase */
xfers[2].rx_buf = &adc->sample.raw;
- xfers[2].len = BITS_TO_BYTES(chan->scan_type.storagebits);
+ xfers[2].len = AD7944_SPI_BYTES(chan->scan_type);
xfers[2].bits_per_word = chan->scan_type.realbits;
spi_message_init_with_transfers(&adc->msg, xfers, 3);
@@ -193,7 +241,7 @@ static int ad7944_4wire_mode_init_msg(struct device *dev, struct ad7944_adc *adc
xfers[0].delay.unit = SPI_DELAY_UNIT_NSECS;
xfers[1].rx_buf = &adc->sample.raw;
- xfers[1].len = BITS_TO_BYTES(chan->scan_type.storagebits);
+ xfers[1].len = AD7944_SPI_BYTES(chan->scan_type);
xfers[1].bits_per_word = chan->scan_type.realbits;
spi_message_init_with_transfers(&adc->msg, xfers, 2);
@@ -228,7 +276,7 @@ static int ad7944_chain_mode_init_msg(struct device *dev, struct ad7944_adc *adc
xfers[0].delay.unit = SPI_DELAY_UNIT_NSECS;
xfers[1].rx_buf = adc->chain_mode_buf;
- xfers[1].len = BITS_TO_BYTES(chan->scan_type.storagebits) * n_chain_dev;
+ xfers[1].len = AD7944_SPI_BYTES(chan->scan_type) * n_chain_dev;
xfers[1].bits_per_word = chan->scan_type.realbits;
spi_message_init_with_transfers(&adc->msg, xfers, 2);
@@ -236,6 +284,48 @@ static int ad7944_chain_mode_init_msg(struct device *dev, struct ad7944_adc *adc
return devm_spi_optimize_message(dev, adc->spi, &adc->msg);
}
+/*
+ * Unlike ad7944_3wire_cs_mode_init_msg(), this creates a message that reads
+ * during the conversion phase instead of the acquisition phase when reading
+ * a sample from the ADC. This is needed to be able to read at the maximum
+ * sample rate. It requires the SPI controller to have offload support and a
+ * high enough SCLK rate to read the sample during the conversion phase.
+ */
+static int ad7944_3wire_cs_mode_init_offload_msg(struct device *dev,
+ struct ad7944_adc *adc,
+ const struct iio_chan_spec *chan)
+{
+ struct spi_transfer *xfers = adc->offload_xfers;
+ int ret;
+
+ /*
+ * CS is tied to CNV and we need a low to high transition to start the
+ * conversion, so place CNV low for t_QUIET to prepare for this.
+ */
+ xfers[0].delay.value = AD7944_T_QUIET_NS;
+ xfers[0].delay.unit = SPI_DELAY_UNIT_NSECS;
+ /* CNV has to be high for a minimum time to trigger conversion. */
+ xfers[0].cs_change = 1;
+ xfers[0].cs_change_delay.value = AD7944_T_CNVH_NS;
+ xfers[0].cs_change_delay.unit = SPI_DELAY_UNIT_NSECS;
+
+ /* Then we can read the previous sample during the conversion phase */
+ xfers[1].offload_flags = SPI_OFFLOAD_XFER_RX_STREAM;
+ xfers[1].len = AD7944_SPI_BYTES(chan->scan_type);
+ xfers[1].bits_per_word = chan->scan_type.realbits;
+
+ spi_message_init_with_transfers(&adc->offload_msg, xfers,
+ ARRAY_SIZE(adc->offload_xfers));
+
+ adc->offload_msg.offload = adc->offload;
+
+ ret = devm_spi_optimize_message(dev, adc->spi, &adc->offload_msg);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to prepare offload msg\n");
+
+ return 0;
+}
+
/**
* ad7944_convert_and_acquire - Perform a single conversion and acquisition
* @adc: The ADC device structure
@@ -274,12 +364,12 @@ static int ad7944_single_conversion(struct ad7944_adc *adc,
return ret;
if (adc->spi_mode == AD7944_SPI_MODE_CHAIN) {
- if (chan->scan_type.storagebits > 16)
+ if (chan->scan_type.realbits > 16)
*val = ((u32 *)adc->chain_mode_buf)[chan->scan_index];
else
*val = ((u16 *)adc->chain_mode_buf)[chan->scan_index];
} else {
- if (chan->scan_type.storagebits > 16)
+ if (chan->scan_type.realbits > 16)
*val = adc->sample.raw.u32;
else
*val = adc->sample.raw.u16;
@@ -291,6 +381,23 @@ static int ad7944_single_conversion(struct ad7944_adc *adc,
return IIO_VAL_INT;
}
+static int ad7944_read_avail(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ const int **vals, int *type, int *length,
+ long mask)
+{
+ struct ad7944_adc *adc = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ *vals = adc->sample_freq_range;
+ *type = IIO_VAL_INT;
+ return IIO_AVAIL_RANGE;
+ default:
+ return -EINVAL;
+ }
+}
+
static int ad7944_read_raw(struct iio_dev *indio_dev,
const struct iio_chan_spec *chan,
int *val, int *val2, long info)
@@ -300,12 +407,11 @@ static int ad7944_read_raw(struct iio_dev *indio_dev,
switch (info) {
case IIO_CHAN_INFO_RAW:
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
ret = ad7944_single_conversion(adc, chan, val);
- iio_device_release_direct_mode(indio_dev);
+ iio_device_release_direct(indio_dev);
return ret;
case IIO_CHAN_INFO_SCALE:
@@ -323,13 +429,104 @@ static int ad7944_read_raw(struct iio_dev *indio_dev,
return -EINVAL;
}
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ *val = adc->offload_trigger_hz;
+ return IIO_VAL_INT;
+
default:
return -EINVAL;
}
}
+static int ad7944_set_sample_freq(struct ad7944_adc *adc, int val)
+{
+ struct spi_offload_trigger_config config = {
+ .type = SPI_OFFLOAD_TRIGGER_PERIODIC,
+ .periodic = {
+ .frequency_hz = val,
+ },
+ };
+ int ret;
+
+ ret = spi_offload_trigger_validate(adc->offload_trigger, &config);
+ if (ret)
+ return ret;
+
+ adc->offload_trigger_hz = config.periodic.frequency_hz;
+
+ return 0;
+}
+
+static int ad7944_write_raw(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ int val, int val2, long info)
+{
+ struct ad7944_adc *adc = iio_priv(indio_dev);
+
+ switch (info) {
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ if (val < 1 || val > adc->sample_freq_range[2])
+ return -EINVAL;
+
+ return ad7944_set_sample_freq(adc, val);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ad7944_write_raw_get_fmt(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ long mask)
+{
+ switch (mask) {
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ return IIO_VAL_INT;
+ default:
+ return IIO_VAL_INT_PLUS_MICRO;
+ }
+}
+
static const struct iio_info ad7944_iio_info = {
+ .read_avail = &ad7944_read_avail,
.read_raw = &ad7944_read_raw,
+ .write_raw = &ad7944_write_raw,
+ .write_raw_get_fmt = &ad7944_write_raw_get_fmt,
+};
+
+static int ad7944_offload_buffer_postenable(struct iio_dev *indio_dev)
+{
+ struct ad7944_adc *adc = iio_priv(indio_dev);
+ struct spi_offload_trigger_config config = {
+ .type = SPI_OFFLOAD_TRIGGER_PERIODIC,
+ .periodic = {
+ .frequency_hz = adc->offload_trigger_hz,
+ },
+ };
+ int ret;
+
+ gpiod_set_value_cansleep(adc->turbo, 1);
+
+ ret = spi_offload_trigger_enable(adc->offload, adc->offload_trigger,
+ &config);
+ if (ret)
+ gpiod_set_value_cansleep(adc->turbo, 0);
+
+ return ret;
+}
+
+static int ad7944_offload_buffer_predisable(struct iio_dev *indio_dev)
+{
+ struct ad7944_adc *adc = iio_priv(indio_dev);
+
+ spi_offload_trigger_disable(adc->offload, adc->offload_trigger);
+ gpiod_set_value_cansleep(adc->turbo, 0);
+
+ return 0;
+}
+
+static const struct iio_buffer_setup_ops ad7944_offload_buffer_setup_ops = {
+ .postenable = &ad7944_offload_buffer_postenable,
+ .predisable = &ad7944_offload_buffer_predisable,
};
static irqreturn_t ad7944_trigger_handler(int irq, void *p)
@@ -409,8 +606,7 @@ static int ad7944_chain_mode_alloc(struct device *dev,
/* 1 word for each voltage channel + aligned u64 for timestamp */
chain_mode_buf_size = ALIGN(n_chain_dev *
- BITS_TO_BYTES(chan[0].scan_type.storagebits), sizeof(u64))
- + sizeof(u64);
+ AD7944_SPI_BYTES(chan[0].scan_type), sizeof(u64)) + sizeof(u64);
buf = devm_kzalloc(dev, chain_mode_buf_size, GFP_KERNEL);
if (!buf)
return -ENOMEM;
@@ -444,6 +640,11 @@ static const char * const ad7944_power_supplies[] = {
"avdd", "dvdd", "bvdd", "vio"
};
+static const struct spi_offload_config ad7944_offload_config = {
+ .capability_flags = SPI_OFFLOAD_CAP_TRIGGER |
+ SPI_OFFLOAD_CAP_RX_STREAM_DMA,
+};
+
static int ad7944_probe(struct spi_device *spi)
{
const struct ad7944_chip_info *chip_info;
@@ -469,6 +670,10 @@ static int ad7944_probe(struct spi_device *spi)
adc->timing_spec = chip_info->timing_spec;
+ adc->sample_freq_range[0] = 1; /* min */
+ adc->sample_freq_range[1] = 1; /* step */
+ adc->sample_freq_range[2] = chip_info->max_sample_rate_hz; /* max */
+
ret = device_property_match_property_string(dev, "adi,spi-mode",
ad7944_spi_modes,
ARRAY_SIZE(ad7944_spi_modes));
@@ -588,20 +793,74 @@ static int ad7944_probe(struct spi_device *spi)
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->info = &ad7944_iio_info;
- if (adc->spi_mode == AD7944_SPI_MODE_CHAIN) {
- indio_dev->available_scan_masks = chain_scan_masks;
- indio_dev->channels = chain_chan;
- indio_dev->num_channels = n_chain_dev + 1;
+ adc->offload = devm_spi_offload_get(dev, spi, &ad7944_offload_config);
+ ret = PTR_ERR_OR_ZERO(adc->offload);
+ if (ret && ret != -ENODEV)
+ return dev_err_probe(dev, ret, "failed to get offload\n");
+
+ /* Fall back to low speed usage when no SPI offload available. */
+ if (ret == -ENODEV) {
+ if (adc->spi_mode == AD7944_SPI_MODE_CHAIN) {
+ indio_dev->available_scan_masks = chain_scan_masks;
+ indio_dev->channels = chain_chan;
+ indio_dev->num_channels = n_chain_dev + 1;
+ } else {
+ indio_dev->channels = chip_info->channels;
+ indio_dev->num_channels = ARRAY_SIZE(chip_info->channels);
+ }
+
+ ret = devm_iio_triggered_buffer_setup(dev, indio_dev,
+ iio_pollfunc_store_time,
+ ad7944_trigger_handler,
+ NULL);
+ if (ret)
+ return ret;
} else {
- indio_dev->channels = chip_info->channels;
- indio_dev->num_channels = ARRAY_SIZE(chip_info->channels);
- }
+ struct dma_chan *rx_dma;
- ret = devm_iio_triggered_buffer_setup(dev, indio_dev,
- iio_pollfunc_store_time,
- ad7944_trigger_handler, NULL);
- if (ret)
- return ret;
+ if (adc->spi_mode != AD7944_SPI_MODE_SINGLE)
+ return dev_err_probe(dev, -EINVAL,
+ "offload only supported in single mode\n");
+
+ indio_dev->setup_ops = &ad7944_offload_buffer_setup_ops;
+ indio_dev->channels = chip_info->offload_channels;
+ indio_dev->num_channels = ARRAY_SIZE(chip_info->offload_channels);
+
+ adc->offload_trigger = devm_spi_offload_trigger_get(dev,
+ adc->offload, SPI_OFFLOAD_TRIGGER_PERIODIC);
+ if (IS_ERR(adc->offload_trigger))
+ return dev_err_probe(dev, PTR_ERR(adc->offload_trigger),
+ "failed to get offload trigger\n");
+
+ ret = ad7944_set_sample_freq(adc, 2 * MEGA);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "failed to init sample rate\n");
+
+ rx_dma = devm_spi_offload_rx_stream_request_dma_chan(dev,
+ adc->offload);
+ if (IS_ERR(rx_dma))
+ return dev_err_probe(dev, PTR_ERR(rx_dma),
+ "failed to get offload RX DMA\n");
+
+ /*
+ * REVISIT: ideally, we would confirm that the offload RX DMA
+ * buffer layout is the same as what is hard-coded in
+ * offload_channels. Right now, the only supported offload
+ * is the pulsar_adc project which always uses 32-bit word
+ * size for data values, regardless of the SPI bits per word.
+ */
+
+ ret = devm_iio_dmaengine_buffer_setup_with_handle(dev,
+ indio_dev, rx_dma, IIO_BUFFER_DIRECTION_IN);
+ if (ret)
+ return ret;
+
+ ret = ad7944_3wire_cs_mode_init_offload_msg(dev, adc,
+ &chip_info->offload_channels[0]);
+ if (ret)
+ return ret;
+ }
return devm_iio_device_register(dev, indio_dev);
}
@@ -636,3 +895,4 @@ module_spi_driver(ad7944_driver);
MODULE_AUTHOR("David Lechner <dlechner@baylibre.com>");
MODULE_DESCRIPTION("Analog Devices AD7944 PulSAR ADC family driver");
MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS("IIO_DMAENGINE_BUFFER");
diff --git a/drivers/iio/adc/ad799x.c b/drivers/iio/adc/ad799x.c
index aa44b4e2542b..993f4651b73a 100644
--- a/drivers/iio/adc/ad799x.c
+++ b/drivers/iio/adc/ad799x.c
@@ -291,13 +291,12 @@ static int ad799x_read_raw(struct iio_dev *indio_dev,
switch (m) {
case IIO_CHAN_INFO_RAW:
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
mutex_lock(&st->lock);
ret = ad799x_scan_direct(st, chan->scan_index);
mutex_unlock(&st->lock);
- iio_device_release_direct_mode(indio_dev);
+ iio_device_release_direct(indio_dev);
if (ret < 0)
return ret;
@@ -411,9 +410,8 @@ static int ad799x_write_event_config(struct iio_dev *indio_dev,
struct ad799x_state *st = iio_priv(indio_dev);
int ret;
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
mutex_lock(&st->lock);
@@ -429,7 +427,7 @@ static int ad799x_write_event_config(struct iio_dev *indio_dev,
ret = ad799x_write_config(st, st->config);
mutex_unlock(&st->lock);
- iio_device_release_direct_mode(indio_dev);
+ iio_device_release_direct(indio_dev);
return ret;
}
diff --git a/drivers/iio/adc/ad9467.c b/drivers/iio/adc/ad9467.c
index f30119b42ba0..f7a9f46ea0dc 100644
--- a/drivers/iio/adc/ad9467.c
+++ b/drivers/iio/adc/ad9467.c
@@ -813,6 +813,18 @@ static int ad9467_read_raw(struct iio_dev *indio_dev,
}
}
+static int __ad9467_update_clock(struct ad9467_state *st, long r_clk)
+{
+ int ret;
+
+ ret = clk_set_rate(st->clk, r_clk);
+ if (ret)
+ return ret;
+
+ guard(mutex)(&st->lock);
+ return ad9467_calibrate(st);
+}
+
static int ad9467_write_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int val, int val2, long mask)
@@ -842,14 +854,11 @@ static int ad9467_write_raw(struct iio_dev *indio_dev,
if (sample_rate == r_clk)
return 0;
- iio_device_claim_direct_scoped(return -EBUSY, indio_dev) {
- ret = clk_set_rate(st->clk, r_clk);
- if (ret)
- return ret;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
- guard(mutex)(&st->lock);
- ret = ad9467_calibrate(st);
- }
+ ret = __ad9467_update_clock(st, r_clk);
+ iio_device_release_direct(indio_dev);
return ret;
default:
return -EINVAL;
diff --git a/drivers/iio/adc/ad_sigma_delta.c b/drivers/iio/adc/ad_sigma_delta.c
index d5d81581ab34..6c37f8e21120 100644
--- a/drivers/iio/adc/ad_sigma_delta.c
+++ b/drivers/iio/adc/ad_sigma_delta.c
@@ -339,6 +339,7 @@ int ad_sd_calibrate(struct ad_sigma_delta *sigma_delta,
out:
sigma_delta->keep_cs_asserted = false;
ad_sigma_delta_set_mode(sigma_delta, AD_SD_MODE_IDLE);
+ ad_sigma_delta_disable_one(sigma_delta, channel);
sigma_delta->bus_locked = false;
spi_bus_unlock(sigma_delta->spi->controller);
@@ -386,11 +387,12 @@ int ad_sigma_delta_single_conversion(struct iio_dev *indio_dev,
unsigned int data_reg;
int ret = 0;
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
- ad_sigma_delta_set_channel(sigma_delta, chan->address);
+ ret = ad_sigma_delta_set_channel(sigma_delta, chan->address);
+ if (ret)
+ goto out_release;
spi_bus_lock(sigma_delta->spi->controller);
sigma_delta->bus_locked = true;
@@ -431,7 +433,8 @@ out_unlock:
sigma_delta->keep_cs_asserted = false;
sigma_delta->bus_locked = false;
spi_bus_unlock(sigma_delta->spi->controller);
- iio_device_release_direct_mode(indio_dev);
+out_release:
+ iio_device_release_direct(indio_dev);
if (ret)
return ret;
@@ -801,10 +804,15 @@ int ad_sd_init(struct ad_sigma_delta *sigma_delta, struct iio_dev *indio_dev,
spin_lock_init(&sigma_delta->irq_lock);
- if (info->irq_line)
- sigma_delta->irq_line = info->irq_line;
- else
+ if (info->has_named_irqs) {
+ sigma_delta->irq_line = fwnode_irq_get_byname(dev_fwnode(&spi->dev),
+ "rdy");
+ if (sigma_delta->irq_line < 0)
+ return dev_err_probe(&spi->dev, sigma_delta->irq_line,
+ "Interrupt 'rdy' is required\n");
+ } else {
sigma_delta->irq_line = spi->irq;
+ }
sigma_delta->rdy_gpiod = devm_gpiod_get_optional(&spi->dev, "rdy", GPIOD_IN);
if (IS_ERR(sigma_delta->rdy_gpiod))
diff --git a/drivers/iio/adc/adi-axi-adc.c b/drivers/iio/adc/adi-axi-adc.c
index c7357601f0f8..cf942c043457 100644
--- a/drivers/iio/adc/adi-axi-adc.c
+++ b/drivers/iio/adc/adi-axi-adc.c
@@ -12,9 +12,9 @@
#include <linux/err.h>
#include <linux/io.h>
#include <linux/delay.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/mutex.h>
-#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/property.h>
#include <linux/regmap.h>
@@ -27,6 +27,7 @@
#include <linux/iio/buffer.h>
#include <linux/iio/iio.h>
+#include "ad7606_bus_iface.h"
/*
* Register definitions:
* https://wiki.analog.com/resources/fpga/docs/axi_adc_ip#register_map
@@ -39,9 +40,19 @@
#define ADI_AXI_REG_RSTN_MMCM_RSTN BIT(1)
#define ADI_AXI_REG_RSTN_RSTN BIT(0)
+#define ADI_AXI_ADC_REG_CONFIG 0x000c
+#define ADI_AXI_ADC_REG_CONFIG_CMOS_OR_LVDS_N BIT(7)
+
#define ADI_AXI_ADC_REG_CTRL 0x0044
#define ADI_AXI_ADC_CTRL_DDR_EDGESEL_MASK BIT(1)
+#define ADI_AXI_ADC_REG_CNTRL_3 0x004c
+#define AXI_AD485X_CNTRL_3_OS_EN_MSK BIT(2)
+#define AXI_AD485X_CNTRL_3_PACKET_FORMAT_MSK GENMASK(1, 0)
+#define AXI_AD485X_PACKET_FORMAT_20BIT 0x0
+#define AXI_AD485X_PACKET_FORMAT_24BIT 0x1
+#define AXI_AD485X_PACKET_FORMAT_32BIT 0x2
+
#define ADI_AXI_ADC_REG_DRP_STATUS 0x0074
#define ADI_AXI_ADC_DRP_LOCKED BIT(17)
@@ -73,6 +84,12 @@
#define ADI_AXI_ADC_REG_DELAY(l) (0x0800 + (l) * 0x4)
#define AXI_ADC_DELAY_CTRL_MASK GENMASK(4, 0)
+#define ADI_AXI_REG_CONFIG_WR 0x0080
+#define ADI_AXI_REG_CONFIG_RD 0x0084
+#define ADI_AXI_REG_CONFIG_CTRL 0x008c
+#define ADI_AXI_REG_CONFIG_CTRL_READ 0x03
+#define ADI_AXI_REG_CONFIG_CTRL_WRITE 0x01
+
#define ADI_AXI_ADC_MAX_IO_NUM_LANES 15
#define ADI_AXI_REG_CHAN_CTRL_DEFAULTS \
@@ -80,7 +97,20 @@
ADI_AXI_REG_CHAN_CTRL_FMT_EN | \
ADI_AXI_REG_CHAN_CTRL_ENABLE)
+#define ADI_AXI_REG_READ_BIT 0x8000
+#define ADI_AXI_REG_ADDRESS_MASK 0xff00
+#define ADI_AXI_REG_VALUE_MASK 0x00ff
+
+struct axi_adc_info {
+ unsigned int version;
+ const struct iio_backend_info *backend_info;
+ bool has_child_nodes;
+ const void *pdata;
+ unsigned int pdata_sz;
+};
+
struct adi_axi_adc_state {
+ const struct axi_adc_info *info;
struct regmap *regmap;
struct device *dev;
/* lock to protect multiple accesses to the device registers */
@@ -290,6 +320,88 @@ static int axi_adc_chan_disable(struct iio_backend *back, unsigned int chan)
ADI_AXI_REG_CHAN_CTRL_ENABLE);
}
+static int axi_adc_interface_type_get(struct iio_backend *back,
+ enum iio_backend_interface_type *type)
+{
+ struct adi_axi_adc_state *st = iio_backend_get_priv(back);
+ unsigned int val;
+ int ret;
+
+ ret = regmap_read(st->regmap, ADI_AXI_ADC_REG_CONFIG, &val);
+ if (ret)
+ return ret;
+
+ if (val & ADI_AXI_ADC_REG_CONFIG_CMOS_OR_LVDS_N)
+ *type = IIO_BACKEND_INTERFACE_SERIAL_CMOS;
+ else
+ *type = IIO_BACKEND_INTERFACE_SERIAL_LVDS;
+
+ return 0;
+}
+
+static int axi_adc_ad485x_data_size_set(struct iio_backend *back,
+ unsigned int size)
+{
+ struct adi_axi_adc_state *st = iio_backend_get_priv(back);
+ unsigned int val;
+
+ switch (size) {
+ /*
+ * There are two different variants of the AXI AXI_AD485X IP block, a
+ * 16-bit and a 20-bit variant.
+ * The 0x0 value (AXI_AD485X_PACKET_FORMAT_20BIT) is corresponding also
+ * to the 16-bit variant of the IP block.
+ */
+ case 16:
+ case 20:
+ val = AXI_AD485X_PACKET_FORMAT_20BIT;
+ break;
+ case 24:
+ val = AXI_AD485X_PACKET_FORMAT_24BIT;
+ break;
+ /*
+ * The 0x2 (AXI_AD485X_PACKET_FORMAT_32BIT) corresponds only to the
+ * 20-bit variant of the IP block. Setting this value properly is
+ * ensured by the upper layers of the drivers calling the axi-adc
+ * functions.
+ * Also, for 16-bit IP block, the 0x2 (AXI_AD485X_PACKET_FORMAT_32BIT)
+ * value is handled as maximum size available which is 24-bit for this
+ * configuration.
+ */
+ case 32:
+ val = AXI_AD485X_PACKET_FORMAT_32BIT;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return regmap_update_bits(st->regmap, ADI_AXI_ADC_REG_CNTRL_3,
+ AXI_AD485X_CNTRL_3_PACKET_FORMAT_MSK,
+ FIELD_PREP(AXI_AD485X_CNTRL_3_PACKET_FORMAT_MSK, val));
+}
+
+static int axi_adc_ad485x_oversampling_ratio_set(struct iio_backend *back,
+ unsigned int ratio)
+{
+ struct adi_axi_adc_state *st = iio_backend_get_priv(back);
+
+ /* The current state of the function enables or disables the
+ * oversampling in REG_CNTRL_3 register. A ratio equal to 1 implies no
+ * oversampling, while a value greater than 1 implies oversampling being
+ * enabled.
+ */
+ switch (ratio) {
+ case 0:
+ return -EINVAL;
+ case 1:
+ return regmap_clear_bits(st->regmap, ADI_AXI_ADC_REG_CNTRL_3,
+ AXI_AD485X_CNTRL_3_OS_EN_MSK);
+ default:
+ return regmap_set_bits(st->regmap, ADI_AXI_ADC_REG_CNTRL_3,
+ AXI_AD485X_CNTRL_3_OS_EN_MSK);
+ }
+}
+
static struct iio_buffer *axi_adc_request_buffer(struct iio_backend *back,
struct iio_dev *indio_dev)
{
@@ -302,10 +414,79 @@ static struct iio_buffer *axi_adc_request_buffer(struct iio_backend *back,
return iio_dmaengine_buffer_setup(st->dev, indio_dev, dma_name);
}
+static int axi_adc_raw_write(struct iio_backend *back, u32 val)
+{
+ struct adi_axi_adc_state *st = iio_backend_get_priv(back);
+
+ regmap_write(st->regmap, ADI_AXI_REG_CONFIG_WR, val);
+ regmap_write(st->regmap, ADI_AXI_REG_CONFIG_CTRL,
+ ADI_AXI_REG_CONFIG_CTRL_WRITE);
+ fsleep(100);
+ regmap_write(st->regmap, ADI_AXI_REG_CONFIG_CTRL, 0x00);
+ fsleep(100);
+
+ return 0;
+}
+
+static int axi_adc_raw_read(struct iio_backend *back, u32 *val)
+{
+ struct adi_axi_adc_state *st = iio_backend_get_priv(back);
+
+ regmap_write(st->regmap, ADI_AXI_REG_CONFIG_CTRL,
+ ADI_AXI_REG_CONFIG_CTRL_READ);
+ fsleep(100);
+ regmap_read(st->regmap, ADI_AXI_REG_CONFIG_RD, val);
+ regmap_write(st->regmap, ADI_AXI_REG_CONFIG_CTRL, 0x00);
+ fsleep(100);
+
+ return 0;
+}
+
+static int ad7606_bus_reg_read(struct iio_backend *back, u32 reg, u32 *val)
+{
+ struct adi_axi_adc_state *st = iio_backend_get_priv(back);
+ int addr;
+
+ guard(mutex)(&st->lock);
+
+ /*
+ * The address is written on the highest weight byte, and the MSB set
+ * at 1 indicates a read operation.
+ */
+ addr = FIELD_PREP(ADI_AXI_REG_ADDRESS_MASK, reg) | ADI_AXI_REG_READ_BIT;
+ axi_adc_raw_write(back, addr);
+ axi_adc_raw_read(back, val);
+
+ /* Write 0x0 on the bus to get back to ADC mode */
+ axi_adc_raw_write(back, 0);
+
+ return 0;
+}
+
+static int ad7606_bus_reg_write(struct iio_backend *back, u32 reg, u32 val)
+{
+ struct adi_axi_adc_state *st = iio_backend_get_priv(back);
+ u32 buf;
+
+ guard(mutex)(&st->lock);
+
+ /* Write any register to switch to register mode */
+ axi_adc_raw_write(back, 0xaf00);
+
+ buf = FIELD_PREP(ADI_AXI_REG_ADDRESS_MASK, reg) |
+ FIELD_PREP(ADI_AXI_REG_VALUE_MASK, val);
+ axi_adc_raw_write(back, buf);
+
+ /* Write 0x0 on the bus to get back to ADC mode */
+ axi_adc_raw_write(back, 0);
+
+ return 0;
+}
+
static void axi_adc_free_buffer(struct iio_backend *back,
struct iio_buffer *buffer)
{
- iio_dmaengine_buffer_free(buffer);
+ iio_dmaengine_buffer_teardown(buffer);
}
static int axi_adc_reg_access(struct iio_backend *back, unsigned int reg,
@@ -325,6 +506,36 @@ static const struct regmap_config axi_adc_regmap_config = {
.reg_stride = 4,
};
+static void axi_adc_child_remove(void *data)
+{
+ platform_device_unregister(data);
+}
+
+static int axi_adc_create_platform_device(struct adi_axi_adc_state *st,
+ struct fwnode_handle *child)
+{
+ struct platform_device_info pi = {
+ .parent = st->dev,
+ .name = fwnode_get_name(child),
+ .id = PLATFORM_DEVID_AUTO,
+ .fwnode = child,
+ .data = st->info->pdata,
+ .size_data = st->info->pdata_sz,
+ };
+ struct platform_device *pdev;
+ int ret;
+
+ pdev = platform_device_register_full(&pi);
+ if (IS_ERR(pdev))
+ return PTR_ERR(pdev);
+
+ ret = devm_add_action_or_reset(st->dev, axi_adc_child_remove, pdev);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
static const struct iio_backend_ops adi_axi_adc_ops = {
.enable = axi_adc_enable,
.disable = axi_adc_disable,
@@ -337,6 +548,7 @@ static const struct iio_backend_ops adi_axi_adc_ops = {
.iodelay_set = axi_adc_iodelays_set,
.test_pattern_set = axi_adc_test_pattern_set,
.chan_status = axi_adc_chan_status,
+ .interface_type_get = axi_adc_interface_type_get,
.debugfs_reg_access = iio_backend_debugfs_ptr(axi_adc_reg_access),
.debugfs_print_chan_status = iio_backend_debugfs_ptr(axi_adc_debugfs_print_chan_status),
};
@@ -346,9 +558,32 @@ static const struct iio_backend_info adi_axi_adc_generic = {
.ops = &adi_axi_adc_ops,
};
+static const struct iio_backend_ops adi_ad485x_ops = {
+ .enable = axi_adc_enable,
+ .disable = axi_adc_disable,
+ .data_format_set = axi_adc_data_format_set,
+ .chan_enable = axi_adc_chan_enable,
+ .chan_disable = axi_adc_chan_disable,
+ .request_buffer = axi_adc_request_buffer,
+ .free_buffer = axi_adc_free_buffer,
+ .data_sample_trigger = axi_adc_data_sample_trigger,
+ .iodelay_set = axi_adc_iodelays_set,
+ .chan_status = axi_adc_chan_status,
+ .interface_type_get = axi_adc_interface_type_get,
+ .data_size_set = axi_adc_ad485x_data_size_set,
+ .oversampling_ratio_set = axi_adc_ad485x_oversampling_ratio_set,
+ .debugfs_reg_access = iio_backend_debugfs_ptr(axi_adc_reg_access),
+ .debugfs_print_chan_status =
+ iio_backend_debugfs_ptr(axi_adc_debugfs_print_chan_status),
+};
+
+static const struct iio_backend_info axi_ad485x = {
+ .name = "axi-ad485x",
+ .ops = &adi_ad485x_ops,
+};
+
static int adi_axi_adc_probe(struct platform_device *pdev)
{
- const unsigned int *expected_ver;
struct adi_axi_adc_state *st;
void __iomem *base;
unsigned int ver;
@@ -370,8 +605,8 @@ static int adi_axi_adc_probe(struct platform_device *pdev)
return dev_err_probe(&pdev->dev, PTR_ERR(st->regmap),
"failed to init register map\n");
- expected_ver = device_get_match_data(&pdev->dev);
- if (!expected_ver)
+ st->info = device_get_match_data(&pdev->dev);
+ if (!st->info)
return -ENODEV;
clk = devm_clk_get_enabled(&pdev->dev, NULL);
@@ -391,23 +626,46 @@ static int adi_axi_adc_probe(struct platform_device *pdev)
if (ret)
return ret;
- if (ADI_AXI_PCORE_VER_MAJOR(ver) != ADI_AXI_PCORE_VER_MAJOR(*expected_ver)) {
+ if (ADI_AXI_PCORE_VER_MAJOR(ver) !=
+ ADI_AXI_PCORE_VER_MAJOR(st->info->version)) {
dev_err(&pdev->dev,
"Major version mismatch. Expected %d.%.2d.%c, Reported %d.%.2d.%c\n",
- ADI_AXI_PCORE_VER_MAJOR(*expected_ver),
- ADI_AXI_PCORE_VER_MINOR(*expected_ver),
- ADI_AXI_PCORE_VER_PATCH(*expected_ver),
+ ADI_AXI_PCORE_VER_MAJOR(st->info->version),
+ ADI_AXI_PCORE_VER_MINOR(st->info->version),
+ ADI_AXI_PCORE_VER_PATCH(st->info->version),
ADI_AXI_PCORE_VER_MAJOR(ver),
ADI_AXI_PCORE_VER_MINOR(ver),
ADI_AXI_PCORE_VER_PATCH(ver));
return -ENODEV;
}
- ret = devm_iio_backend_register(&pdev->dev, &adi_axi_adc_generic, st);
+ ret = devm_iio_backend_register(&pdev->dev, st->info->backend_info, st);
if (ret)
return dev_err_probe(&pdev->dev, ret,
"failed to register iio backend\n");
+ device_for_each_child_node_scoped(&pdev->dev, child) {
+ int val;
+
+ if (!st->info->has_child_nodes)
+ return dev_err_probe(&pdev->dev, -EINVAL,
+ "invalid fdt axi-dac compatible.");
+
+ /* Processing only reg 0 node */
+ ret = fwnode_property_read_u32(child, "reg", &val);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "invalid reg property.");
+ if (val != 0)
+ return dev_err_probe(&pdev->dev, -EINVAL,
+ "invalid node address.");
+
+ ret = axi_adc_create_platform_device(st, child);
+ if (ret)
+ return dev_err_probe(&pdev->dev, -EINVAL,
+ "cannot create device.");
+ }
+
dev_info(&pdev->dev, "AXI ADC IP core (%d.%.2d.%c) probed\n",
ADI_AXI_PCORE_VER_MAJOR(ver),
ADI_AXI_PCORE_VER_MINOR(ver),
@@ -416,11 +674,34 @@ static int adi_axi_adc_probe(struct platform_device *pdev)
return 0;
}
-static unsigned int adi_axi_adc_10_0_a_info = ADI_AXI_PCORE_VER(10, 0, 'a');
+static const struct axi_adc_info adc_generic = {
+ .version = ADI_AXI_PCORE_VER(10, 0, 'a'),
+ .backend_info = &adi_axi_adc_generic,
+};
+
+static const struct axi_adc_info adi_axi_ad485x = {
+ .version = ADI_AXI_PCORE_VER(10, 0, 'a'),
+ .backend_info = &axi_ad485x,
+};
+
+static const struct ad7606_platform_data ad7606_pdata = {
+ .bus_reg_read = ad7606_bus_reg_read,
+ .bus_reg_write = ad7606_bus_reg_write,
+};
+
+static const struct axi_adc_info adc_ad7606 = {
+ .version = ADI_AXI_PCORE_VER(10, 0, 'a'),
+ .backend_info = &adi_axi_adc_generic,
+ .pdata = &ad7606_pdata,
+ .pdata_sz = sizeof(ad7606_pdata),
+ .has_child_nodes = true,
+};
/* Match table for of_platform binding */
static const struct of_device_id adi_axi_adc_of_match[] = {
- { .compatible = "adi,axi-adc-10.0.a", .data = &adi_axi_adc_10_0_a_info },
+ { .compatible = "adi,axi-adc-10.0.a", .data = &adc_generic },
+ { .compatible = "adi,axi-ad485x", .data = &adi_axi_ad485x },
+ { .compatible = "adi,axi-ad7606x", .data = &adc_ad7606 },
{ /* end of list */ }
};
MODULE_DEVICE_TABLE(of, adi_axi_adc_of_match);
diff --git a/drivers/iio/adc/at91-sama5d2_adc.c b/drivers/iio/adc/at91-sama5d2_adc.c
index c3a1dea2aa82..414610afcb2c 100644
--- a/drivers/iio/adc/at91-sama5d2_adc.c
+++ b/drivers/iio/adc/at91-sama5d2_adc.c
@@ -9,6 +9,7 @@
*/
#include <linux/bitops.h>
+#include <linux/cleanup.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
@@ -1826,19 +1827,10 @@ static int at91_adc_read_info_locked(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan, int *val)
{
struct at91_adc_state *st = iio_priv(indio_dev);
- int ret;
-
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
-
- mutex_lock(&st->lock);
- ret = at91_adc_read_info_raw(indio_dev, chan, val);
- mutex_unlock(&st->lock);
- iio_device_release_direct_mode(indio_dev);
+ guard(mutex)(&st->lock);
- return ret;
+ return at91_adc_read_info_raw(indio_dev, chan, val);
}
static void at91_adc_temp_sensor_configure(struct at91_adc_state *st,
@@ -1883,14 +1875,11 @@ static int at91_adc_read_temp(struct iio_dev *indio_dev,
u32 tmp;
int ret, vbg, vtemp;
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
- mutex_lock(&st->lock);
+ guard(mutex)(&st->lock);
ret = pm_runtime_resume_and_get(st->dev);
if (ret < 0)
- goto unlock;
+ return ret;
at91_adc_temp_sensor_configure(st, true);
@@ -1912,9 +1901,6 @@ restore_config:
at91_adc_temp_sensor_configure(st, false);
pm_runtime_mark_last_busy(st->dev);
pm_runtime_put_autosuspend(st->dev);
-unlock:
- mutex_unlock(&st->lock);
- iio_device_release_direct_mode(indio_dev);
if (ret < 0)
return ret;
@@ -1936,10 +1922,16 @@ static int at91_adc_read_raw(struct iio_dev *indio_dev,
int *val, int *val2, long mask)
{
struct at91_adc_state *st = iio_priv(indio_dev);
+ int ret;
switch (mask) {
case IIO_CHAN_INFO_RAW:
- return at91_adc_read_info_locked(indio_dev, chan, val);
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+
+ ret = at91_adc_read_info_locked(indio_dev, chan, val);
+ iio_device_release_direct(indio_dev);
+ return ret;
case IIO_CHAN_INFO_SCALE:
*val = st->vref_uv / 1000;
@@ -1951,7 +1943,13 @@ static int at91_adc_read_raw(struct iio_dev *indio_dev,
case IIO_CHAN_INFO_PROCESSED:
if (chan->type != IIO_TEMP)
return -EINVAL;
- return at91_adc_read_temp(indio_dev, chan, val);
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+
+ ret = at91_adc_read_temp(indio_dev, chan, val);
+ iio_device_release_direct(indio_dev);
+
+ return ret;
case IIO_CHAN_INFO_SAMP_FREQ:
*val = at91_adc_get_sample_freq(st);
@@ -1979,28 +1977,26 @@ static int at91_adc_write_raw(struct iio_dev *indio_dev,
if (val == st->oversampling_ratio)
return 0;
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
mutex_lock(&st->lock);
/* update ratio */
ret = at91_adc_config_emr(st, val, 0);
mutex_unlock(&st->lock);
- iio_device_release_direct_mode(indio_dev);
+ iio_device_release_direct(indio_dev);
return ret;
case IIO_CHAN_INFO_SAMP_FREQ:
if (val < st->soc_info.min_sample_rate ||
val > st->soc_info.max_sample_rate)
return -EINVAL;
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
mutex_lock(&st->lock);
at91_adc_setup_samp_freq(indio_dev, val,
st->soc_info.startup_time, 0);
mutex_unlock(&st->lock);
- iio_device_release_direct_mode(indio_dev);
+ iio_device_release_direct(indio_dev);
return 0;
default:
return -EINVAL;
diff --git a/drivers/iio/adc/dln2-adc.c b/drivers/iio/adc/dln2-adc.c
index 221a5fdc1eaa..a1e48a756a7b 100644
--- a/drivers/iio/adc/dln2-adc.c
+++ b/drivers/iio/adc/dln2-adc.c
@@ -314,15 +314,14 @@ static int dln2_adc_read_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_RAW:
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret < 0)
- return ret;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
mutex_lock(&dln2->mutex);
ret = dln2_adc_read(dln2, chan->channel);
mutex_unlock(&dln2->mutex);
- iio_device_release_direct_mode(indio_dev);
+ iio_device_release_direct(indio_dev);
if (ret < 0)
return ret;
diff --git a/drivers/iio/adc/max1027.c b/drivers/iio/adc/max1027.c
index f5ba4a1b5a7d..7e736e77d8bb 100644
--- a/drivers/iio/adc/max1027.c
+++ b/drivers/iio/adc/max1027.c
@@ -336,10 +336,6 @@ static int max1027_read_single_value(struct iio_dev *indio_dev,
int ret;
struct max1027_state *st = iio_priv(indio_dev);
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
-
/* Configure conversion register with the requested chan */
st->reg = MAX1027_CONV_REG | MAX1027_CHAN(chan->channel) |
MAX1027_NOSCAN;
@@ -349,7 +345,7 @@ static int max1027_read_single_value(struct iio_dev *indio_dev,
if (ret < 0) {
dev_err(&indio_dev->dev,
"Failed to configure conversion register\n");
- goto release;
+ return ret;
}
/*
@@ -359,14 +355,10 @@ static int max1027_read_single_value(struct iio_dev *indio_dev,
*/
ret = max1027_wait_eoc(indio_dev);
if (ret)
- goto release;
+ return ret;
/* Read result */
ret = spi_read(st->spi, st->buffer, (chan->type == IIO_TEMP) ? 4 : 2);
-
-release:
- iio_device_release_direct_mode(indio_dev);
-
if (ret < 0)
return ret;
@@ -382,37 +374,32 @@ static int max1027_read_raw(struct iio_dev *indio_dev,
int ret = 0;
struct max1027_state *st = iio_priv(indio_dev);
- mutex_lock(&st->lock);
+ guard(mutex)(&st->lock);
switch (mask) {
case IIO_CHAN_INFO_RAW:
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+
ret = max1027_read_single_value(indio_dev, chan, val);
- break;
+ iio_device_release_direct(indio_dev);
+ return ret;
case IIO_CHAN_INFO_SCALE:
switch (chan->type) {
case IIO_TEMP:
*val = 1;
*val2 = 8;
- ret = IIO_VAL_FRACTIONAL;
- break;
+ return IIO_VAL_FRACTIONAL;
case IIO_VOLTAGE:
*val = 2500;
*val2 = chan->scan_type.realbits;
- ret = IIO_VAL_FRACTIONAL_LOG2;
- break;
+ return IIO_VAL_FRACTIONAL_LOG2;
default:
- ret = -EINVAL;
- break;
+ return -EINVAL;
}
- break;
default:
- ret = -EINVAL;
- break;
+ return -EINVAL;
}
-
- mutex_unlock(&st->lock);
-
- return ret;
}
static int max1027_debugfs_reg_access(struct iio_dev *indio_dev,
diff --git a/drivers/iio/adc/max11410.c b/drivers/iio/adc/max11410.c
index 76abafd47404..437d9f24b5a1 100644
--- a/drivers/iio/adc/max11410.c
+++ b/drivers/iio/adc/max11410.c
@@ -471,9 +471,8 @@ static int max11410_read_raw(struct iio_dev *indio_dev,
return IIO_VAL_INT;
case IIO_CHAN_INFO_RAW:
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
mutex_lock(&state->lock);
@@ -481,7 +480,7 @@ static int max11410_read_raw(struct iio_dev *indio_dev,
mutex_unlock(&state->lock);
- iio_device_release_direct_mode(indio_dev);
+ iio_device_release_direct(indio_dev);
if (ret)
return ret;
@@ -507,12 +506,37 @@ static int max11410_read_raw(struct iio_dev *indio_dev,
return -EINVAL;
}
+static int __max11410_write_samp_freq(struct max11410_state *st,
+ int val, int val2)
+{
+ int ret, i, reg_val, filter;
+
+ guard(mutex)(&st->lock);
+
+ ret = regmap_read(st->regmap, MAX11410_REG_FILTER, &reg_val);
+ if (ret)
+ return ret;
+
+ filter = FIELD_GET(MAX11410_FILTER_LINEF_MASK, reg_val);
+
+ for (i = 0; i < max11410_sampling_len[filter]; ++i) {
+ if (val == max11410_sampling_rates[filter][i][0] &&
+ val2 == max11410_sampling_rates[filter][i][1])
+ break;
+ }
+ if (i == max11410_sampling_len[filter])
+ return -EINVAL;
+
+ return regmap_write_bits(st->regmap, MAX11410_REG_FILTER,
+ MAX11410_FILTER_RATE_MASK, i);
+}
+
static int max11410_write_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int val, int val2, long mask)
{
struct max11410_state *st = iio_priv(indio_dev);
- int i, ret, reg_val, filter, gain;
+ int ret, gain;
u32 *scale_avail;
switch (mask) {
@@ -525,9 +549,8 @@ static int max11410_write_raw(struct iio_dev *indio_dev,
if (val != 0 || val2 == 0)
return -EINVAL;
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
/* Convert from INT_PLUS_MICRO to FRACTIONAL_LOG2 */
val2 = val2 * DIV_ROUND_CLOSEST(BIT(24), 1000000);
@@ -536,38 +559,15 @@ static int max11410_write_raw(struct iio_dev *indio_dev,
st->channels[chan->address].gain = clamp_val(gain, 0, 7);
- iio_device_release_direct_mode(indio_dev);
+ iio_device_release_direct(indio_dev);
return 0;
case IIO_CHAN_INFO_SAMP_FREQ:
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
-
- mutex_lock(&st->lock);
-
- ret = regmap_read(st->regmap, MAX11410_REG_FILTER, &reg_val);
- if (ret)
- goto out;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
- filter = FIELD_GET(MAX11410_FILTER_LINEF_MASK, reg_val);
-
- for (i = 0; i < max11410_sampling_len[filter]; ++i) {
- if (val == max11410_sampling_rates[filter][i][0] &&
- val2 == max11410_sampling_rates[filter][i][1])
- break;
- }
- if (i == max11410_sampling_len[filter]) {
- ret = -EINVAL;
- goto out;
- }
-
- ret = regmap_write_bits(st->regmap, MAX11410_REG_FILTER,
- MAX11410_FILTER_RATE_MASK, i);
-
-out:
- mutex_unlock(&st->lock);
- iio_device_release_direct_mode(indio_dev);
+ ret = __max11410_write_samp_freq(st, val, val2);
+ iio_device_release_direct(indio_dev);
return ret;
default:
diff --git a/drivers/iio/adc/max1363.c b/drivers/iio/adc/max1363.c
index e8d731bc34e0..35717ec082ce 100644
--- a/drivers/iio/adc/max1363.c
+++ b/drivers/iio/adc/max1363.c
@@ -364,55 +364,52 @@ static int max1363_read_single_chan(struct iio_dev *indio_dev,
int *val,
long m)
{
- iio_device_claim_direct_scoped(return -EBUSY, indio_dev) {
- s32 data;
- u8 rxbuf[2];
- struct max1363_state *st = iio_priv(indio_dev);
- struct i2c_client *client = st->client;
-
- guard(mutex)(&st->lock);
-
- /*
- * If monitor mode is enabled, the method for reading a single
- * channel will have to be rather different and has not yet
- * been implemented.
- *
- * Also, cannot read directly if buffered capture enabled.
- */
- if (st->monitor_on)
- return -EBUSY;
+ s32 data;
+ u8 rxbuf[2];
+ struct max1363_state *st = iio_priv(indio_dev);
+ struct i2c_client *client = st->client;
- /* Check to see if current scan mode is correct */
- if (st->current_mode != &max1363_mode_table[chan->address]) {
- int ret;
+ guard(mutex)(&st->lock);
- /* Update scan mode if needed */
- st->current_mode = &max1363_mode_table[chan->address];
- ret = max1363_set_scan_mode(st);
- if (ret < 0)
- return ret;
- }
- if (st->chip_info->bits != 8) {
- /* Get reading */
- data = st->recv(client, rxbuf, 2);
- if (data < 0)
- return data;
-
- data = get_unaligned_be16(rxbuf) &
- ((1 << st->chip_info->bits) - 1);
- } else {
- /* Get reading */
- data = st->recv(client, rxbuf, 1);
- if (data < 0)
- return data;
-
- data = rxbuf[0];
- }
- *val = data;
+ /*
+ * If monitor mode is enabled, the method for reading a single
+ * channel will have to be rather different and has not yet
+ * been implemented.
+ *
+ * Also, cannot read directly if buffered capture enabled.
+ */
+ if (st->monitor_on)
+ return -EBUSY;
+
+ /* Check to see if current scan mode is correct */
+ if (st->current_mode != &max1363_mode_table[chan->address]) {
+ int ret;
+
+ /* Update scan mode if needed */
+ st->current_mode = &max1363_mode_table[chan->address];
+ ret = max1363_set_scan_mode(st);
+ if (ret < 0)
+ return ret;
+ }
+ if (st->chip_info->bits != 8) {
+ /* Get reading */
+ data = st->recv(client, rxbuf, 2);
+ if (data < 0)
+ return data;
+
+ data = get_unaligned_be16(rxbuf) &
+ ((1 << st->chip_info->bits) - 1);
+ } else {
+ /* Get reading */
+ data = st->recv(client, rxbuf, 1);
+ if (data < 0)
+ return data;
- return 0;
+ data = rxbuf[0];
}
- unreachable();
+ *val = data;
+
+ return 0;
}
static int max1363_read_raw(struct iio_dev *indio_dev,
@@ -426,7 +423,11 @@ static int max1363_read_raw(struct iio_dev *indio_dev,
switch (m) {
case IIO_CHAN_INFO_RAW:
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+
ret = max1363_read_single_chan(indio_dev, chan, val, m);
+ iio_device_release_direct(indio_dev);
if (ret < 0)
return ret;
return IIO_VAL_INT;
@@ -947,46 +948,58 @@ error_ret:
return ret;
}
-static int max1363_write_event_config(struct iio_dev *indio_dev,
- const struct iio_chan_spec *chan, enum iio_event_type type,
+static int __max1363_write_event_config(struct max1363_state *st,
+ const struct iio_chan_spec *chan,
enum iio_event_direction dir, bool state)
{
- struct max1363_state *st = iio_priv(indio_dev);
-
- iio_device_claim_direct_scoped(return -EBUSY, indio_dev) {
- int number = chan->channel;
- u16 unifiedmask;
- int ret;
+ int number = chan->channel;
+ u16 unifiedmask;
+ int ret;
- guard(mutex)(&st->lock);
+ guard(mutex)(&st->lock);
- unifiedmask = st->mask_low | st->mask_high;
- if (dir == IIO_EV_DIR_FALLING) {
+ unifiedmask = st->mask_low | st->mask_high;
+ if (dir == IIO_EV_DIR_FALLING) {
- if (state == 0)
- st->mask_low &= ~(1 << number);
- else {
- ret = __max1363_check_event_mask((1 << number),
- unifiedmask);
- if (ret)
- return ret;
- st->mask_low |= (1 << number);
- }
- } else {
- if (state == 0)
- st->mask_high &= ~(1 << number);
- else {
- ret = __max1363_check_event_mask((1 << number),
- unifiedmask);
- if (ret)
- return ret;
- st->mask_high |= (1 << number);
- }
+ if (state == 0)
+ st->mask_low &= ~(1 << number);
+ else {
+ ret = __max1363_check_event_mask((1 << number),
+ unifiedmask);
+ if (ret)
+ return ret;
+ st->mask_low |= (1 << number);
+ }
+ } else {
+ if (state == 0)
+ st->mask_high &= ~(1 << number);
+ else {
+ ret = __max1363_check_event_mask((1 << number),
+ unifiedmask);
+ if (ret)
+ return ret;
+ st->mask_high |= (1 << number);
}
}
- max1363_monitor_mode_update(st, !!(st->mask_high | st->mask_low));
return 0;
+
+}
+static int max1363_write_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, enum iio_event_type type,
+ enum iio_event_direction dir, bool state)
+{
+ struct max1363_state *st = iio_priv(indio_dev);
+ int ret;
+
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+
+ ret = __max1363_write_event_config(st, chan, dir, state);
+ iio_device_release_direct(indio_dev);
+ max1363_monitor_mode_update(st, !!(st->mask_high | st->mask_low));
+
+ return ret;
}
/*
diff --git a/drivers/iio/adc/max34408.c b/drivers/iio/adc/max34408.c
index 971e6e5dee9b..4f45fd22a90c 100644
--- a/drivers/iio/adc/max34408.c
+++ b/drivers/iio/adc/max34408.c
@@ -8,6 +8,7 @@
*/
#include <linux/bitfield.h>
+#include <linux/cleanup.h>
#include <linux/init.h>
#include <linux/i2c.h>
#include <linux/module.h>
diff --git a/drivers/iio/adc/pac1921.c b/drivers/iio/adc/pac1921.c
index 63f518215156..beb5511c4504 100644
--- a/drivers/iio/adc/pac1921.c
+++ b/drivers/iio/adc/pac1921.c
@@ -7,6 +7,7 @@
#include <linux/unaligned.h>
#include <linux/bitfield.h>
+#include <linux/cleanup.h>
#include <linux/i2c.h>
#include <linux/iio/events.h>
#include <linux/iio/iio.h>
diff --git a/drivers/iio/adc/rockchip_saradc.c b/drivers/iio/adc/rockchip_saradc.c
index a29e54754c8f..9a099df79518 100644
--- a/drivers/iio/adc/rockchip_saradc.c
+++ b/drivers/iio/adc/rockchip_saradc.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Rockchip Successive Approximation Register (SAR) A/D Converter
- * Copyright (C) 2014 ROCKCHIP, Inc.
+ * Copyright (C) 2014 Rockchip Electronics Co., Ltd.
*/
#include <linux/bitfield.h>
@@ -275,6 +275,40 @@ static const struct rockchip_saradc_data rk3399_saradc_data = {
.power_down = rockchip_saradc_power_down_v1,
};
+static const struct iio_chan_spec rockchip_rk3528_saradc_iio_channels[] = {
+ SARADC_CHANNEL(0, "adc0", 10),
+ SARADC_CHANNEL(1, "adc1", 10),
+ SARADC_CHANNEL(2, "adc2", 10),
+ SARADC_CHANNEL(3, "adc3", 10),
+};
+
+static const struct rockchip_saradc_data rk3528_saradc_data = {
+ .channels = rockchip_rk3528_saradc_iio_channels,
+ .num_channels = ARRAY_SIZE(rockchip_rk3528_saradc_iio_channels),
+ .clk_rate = 1000000,
+ .start = rockchip_saradc_start_v2,
+ .read = rockchip_saradc_read_v2,
+};
+
+static const struct iio_chan_spec rockchip_rk3562_saradc_iio_channels[] = {
+ SARADC_CHANNEL(0, "adc0", 10),
+ SARADC_CHANNEL(1, "adc1", 10),
+ SARADC_CHANNEL(2, "adc2", 10),
+ SARADC_CHANNEL(3, "adc3", 10),
+ SARADC_CHANNEL(4, "adc4", 10),
+ SARADC_CHANNEL(5, "adc5", 10),
+ SARADC_CHANNEL(6, "adc6", 10),
+ SARADC_CHANNEL(7, "adc7", 10),
+};
+
+static const struct rockchip_saradc_data rk3562_saradc_data = {
+ .channels = rockchip_rk3562_saradc_iio_channels,
+ .num_channels = ARRAY_SIZE(rockchip_rk3562_saradc_iio_channels),
+ .clk_rate = 1000000,
+ .start = rockchip_saradc_start_v2,
+ .read = rockchip_saradc_read_v2,
+};
+
static const struct iio_chan_spec rockchip_rk3568_saradc_iio_channels[] = {
SARADC_CHANNEL(0, "adc0", 10),
SARADC_CHANNEL(1, "adc1", 10),
@@ -325,6 +359,12 @@ static const struct of_device_id rockchip_saradc_match[] = {
.compatible = "rockchip,rk3399-saradc",
.data = &rk3399_saradc_data,
}, {
+ .compatible = "rockchip,rk3528-saradc",
+ .data = &rk3528_saradc_data,
+ }, {
+ .compatible = "rockchip,rk3562-saradc",
+ .data = &rk3562_saradc_data,
+ }, {
.compatible = "rockchip,rk3568-saradc",
.data = &rk3568_saradc_data,
}, {
diff --git a/drivers/iio/adc/rtq6056.c b/drivers/iio/adc/rtq6056.c
index 337bc8b31b2c..54239df61d86 100644
--- a/drivers/iio/adc/rtq6056.c
+++ b/drivers/iio/adc/rtq6056.c
@@ -514,26 +514,37 @@ static int rtq6056_adc_read_avail(struct iio_dev *indio_dev,
}
}
-static int rtq6056_adc_write_raw(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan, int val,
- int val2, long mask)
+static int __rtq6056_adc_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int val,
+ long mask)
{
struct rtq6056_priv *priv = iio_priv(indio_dev);
const struct richtek_dev_data *devdata = priv->devdata;
- iio_device_claim_direct_scoped(return -EBUSY, indio_dev) {
- switch (mask) {
- case IIO_CHAN_INFO_SAMP_FREQ:
- if (devdata->fixed_samp_freq)
- return -EINVAL;
- return rtq6056_adc_set_samp_freq(priv, chan, val);
- case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
- return devdata->set_average(priv, val);
- default:
+ switch (mask) {
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ if (devdata->fixed_samp_freq)
return -EINVAL;
- }
+ return rtq6056_adc_set_samp_freq(priv, chan, val);
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+ return devdata->set_average(priv, val);
+ default:
+ return -EINVAL;
}
- unreachable();
+}
+
+static int rtq6056_adc_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int val,
+ int val2, long mask)
+{
+ int ret;
+
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+
+ ret = __rtq6056_adc_write_raw(indio_dev, chan, val, mask);
+ iio_device_release_direct(indio_dev);
+ return ret;
}
static const char *rtq6056_channel_labels[RTQ6056_MAX_CHANNEL] = {
@@ -590,9 +601,8 @@ static ssize_t shunt_resistor_store(struct device *dev,
struct rtq6056_priv *priv = iio_priv(indio_dev);
int val, val_fract, ret;
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
ret = iio_str_to_fixpoint(buf, 100000, &val, &val_fract);
if (ret)
@@ -601,7 +611,7 @@ static ssize_t shunt_resistor_store(struct device *dev,
ret = rtq6056_set_shunt_resistor(priv, val * 1000000 + val_fract);
out_store:
- iio_device_release_direct_mode(indio_dev);
+ iio_device_release_direct(indio_dev);
return ret ?: len;
}
diff --git a/drivers/iio/adc/stm32-adc-core.c b/drivers/iio/adc/stm32-adc-core.c
index 2201ee9987ae..0914148d1a22 100644
--- a/drivers/iio/adc/stm32-adc-core.c
+++ b/drivers/iio/adc/stm32-adc-core.c
@@ -615,8 +615,7 @@ static int stm32_adc_core_switches_probe(struct device *dev,
}
/* Booster can be used to supply analog switches (optional) */
- if (priv->cfg->has_syscfg & HAS_VBOOSTER &&
- of_property_read_bool(np, "booster-supply")) {
+ if (priv->cfg->has_syscfg & HAS_VBOOSTER) {
priv->booster = devm_regulator_get_optional(dev, "booster");
if (IS_ERR(priv->booster)) {
ret = PTR_ERR(priv->booster);
@@ -628,8 +627,7 @@ static int stm32_adc_core_switches_probe(struct device *dev,
}
/* Vdd can be used to supply analog switches (optional) */
- if (priv->cfg->has_syscfg & HAS_ANASWVDD &&
- of_property_read_bool(np, "vdd-supply")) {
+ if (priv->cfg->has_syscfg & HAS_ANASWVDD) {
priv->vdd = devm_regulator_get_optional(dev, "vdd");
if (IS_ERR(priv->vdd)) {
ret = PTR_ERR(priv->vdd);
diff --git a/drivers/iio/adc/stm32-adc.c b/drivers/iio/adc/stm32-adc.c
index 9d3b23efcc06..5dbf5f136768 100644
--- a/drivers/iio/adc/stm32-adc.c
+++ b/drivers/iio/adc/stm32-adc.c
@@ -1471,9 +1471,8 @@ static int stm32_adc_read_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_RAW:
case IIO_CHAN_INFO_PROCESSED:
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
if (chan->type == IIO_VOLTAGE)
ret = stm32_adc_single_conv(indio_dev, chan, val);
else
@@ -1482,7 +1481,7 @@ static int stm32_adc_read_raw(struct iio_dev *indio_dev,
if (mask == IIO_CHAN_INFO_PROCESSED)
*val = STM32_ADC_VREFINT_VOLTAGE * adc->vrefint.vrefint_cal / *val;
- iio_device_release_direct_mode(indio_dev);
+ iio_device_release_direct(indio_dev);
return ret;
case IIO_CHAN_INFO_SCALE:
diff --git a/drivers/iio/adc/stm32-dfsdm-adc.c b/drivers/iio/adc/stm32-dfsdm-adc.c
index fe11b0d8eab3..726ddafc9f6d 100644
--- a/drivers/iio/adc/stm32-dfsdm-adc.c
+++ b/drivers/iio/adc/stm32-dfsdm-adc.c
@@ -1275,9 +1275,8 @@ static int stm32_dfsdm_write_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
ret = stm32_dfsdm_compute_all_osrs(indio_dev, val);
if (!ret) {
@@ -1287,25 +1286,56 @@ static int stm32_dfsdm_write_raw(struct iio_dev *indio_dev,
adc->oversamp = val;
adc->sample_freq = spi_freq / val;
}
- iio_device_release_direct_mode(indio_dev);
+ iio_device_release_direct(indio_dev);
return ret;
case IIO_CHAN_INFO_SAMP_FREQ:
if (!val)
return -EINVAL;
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
ret = dfsdm_adc_set_samp_freq(indio_dev, val, spi_freq);
- iio_device_release_direct_mode(indio_dev);
+ iio_device_release_direct(indio_dev);
return ret;
}
return -EINVAL;
}
+static int __stm32_dfsdm_read_info_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val)
+{
+ struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
+ int ret = 0;
+
+ if (adc->hwc)
+ ret = iio_hw_consumer_enable(adc->hwc);
+ if (adc->backend)
+ ret = iio_backend_enable(adc->backend[chan->scan_index]);
+ if (ret < 0) {
+ dev_err(&indio_dev->dev,
+ "%s: IIO enable failed (channel %d)\n",
+ __func__, chan->channel);
+ return ret;
+ }
+ ret = stm32_dfsdm_single_conv(indio_dev, chan, val);
+ if (adc->hwc)
+ iio_hw_consumer_disable(adc->hwc);
+ if (adc->backend)
+ iio_backend_disable(adc->backend[chan->scan_index]);
+ if (ret < 0) {
+ dev_err(&indio_dev->dev,
+ "%s: Conversion failed (channel %d)\n",
+ __func__, chan->channel);
+ return ret;
+ }
+
+ return 0;
+}
+
static int stm32_dfsdm_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan, int *val,
int *val2, long mask)
@@ -1323,33 +1353,13 @@ static int stm32_dfsdm_read_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_RAW:
- ret = iio_device_claim_direct_mode(indio_dev);
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+
+ ret = __stm32_dfsdm_read_info_raw(indio_dev, chan, val);
+ iio_device_release_direct(indio_dev);
if (ret)
return ret;
- if (adc->hwc)
- ret = iio_hw_consumer_enable(adc->hwc);
- if (adc->backend)
- ret = iio_backend_enable(adc->backend[idx]);
- if (ret < 0) {
- dev_err(&indio_dev->dev,
- "%s: IIO enable failed (channel %d)\n",
- __func__, chan->channel);
- iio_device_release_direct_mode(indio_dev);
- return ret;
- }
- ret = stm32_dfsdm_single_conv(indio_dev, chan, val);
- if (adc->hwc)
- iio_hw_consumer_disable(adc->hwc);
- if (adc->backend)
- iio_backend_disable(adc->backend[idx]);
- if (ret < 0) {
- dev_err(&indio_dev->dev,
- "%s: Conversion failed (channel %d)\n",
- __func__, chan->channel);
- iio_device_release_direct_mode(indio_dev);
- return ret;
- }
- iio_device_release_direct_mode(indio_dev);
return IIO_VAL_INT;
case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
diff --git a/drivers/iio/adc/ti-adc084s021.c b/drivers/iio/adc/ti-adc084s021.c
index da16876c32ae..9c845ee01697 100644
--- a/drivers/iio/adc/ti-adc084s021.c
+++ b/drivers/iio/adc/ti-adc084s021.c
@@ -96,19 +96,18 @@ static int adc084s021_read_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_RAW:
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret < 0)
- return ret;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
ret = regulator_enable(adc->reg);
if (ret) {
- iio_device_release_direct_mode(indio_dev);
+ iio_device_release_direct(indio_dev);
return ret;
}
adc->tx_buf[0] = channel->channel << 3;
ret = adc084s021_adc_conversion(adc, &be_val);
- iio_device_release_direct_mode(indio_dev);
+ iio_device_release_direct(indio_dev);
regulator_disable(adc->reg);
if (ret < 0)
return ret;
diff --git a/drivers/iio/adc/ti-adc108s102.c b/drivers/iio/adc/ti-adc108s102.c
index 9758ac801310..7d615e2bbf39 100644
--- a/drivers/iio/adc/ti-adc108s102.c
+++ b/drivers/iio/adc/ti-adc108s102.c
@@ -181,13 +181,12 @@ static int adc108s102_read_raw(struct iio_dev *indio_dev,
switch (m) {
case IIO_CHAN_INFO_RAW:
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
ret = adc108s102_scan_direct(st, chan->address);
- iio_device_release_direct_mode(indio_dev);
+ iio_device_release_direct(indio_dev);
if (ret < 0)
return ret;
diff --git a/drivers/iio/adc/ti-adc161s626.c b/drivers/iio/adc/ti-adc161s626.c
index 474e733fb8e0..28aa6b80160c 100644
--- a/drivers/iio/adc/ti-adc161s626.c
+++ b/drivers/iio/adc/ti-adc161s626.c
@@ -137,13 +137,13 @@ static int ti_adc_read_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_RAW:
- iio_device_claim_direct_scoped(return -EBUSY, indio_dev) {
- ret = ti_adc_read_measurement(data, chan, val);
- if (ret)
- return ret;
- return IIO_VAL_INT;
- }
- unreachable();
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+ ret = ti_adc_read_measurement(data, chan, val);
+ iio_device_release_direct(indio_dev);
+ if (ret)
+ return ret;
+ return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
ret = regulator_get_voltage(data->ref);
if (ret < 0)
diff --git a/drivers/iio/adc/ti-ads1119.c b/drivers/iio/adc/ti-ads1119.c
index de019b3faa48..f120e7e21cff 100644
--- a/drivers/iio/adc/ti-ads1119.c
+++ b/drivers/iio/adc/ti-ads1119.c
@@ -336,19 +336,24 @@ static int ads1119_read_raw(struct iio_dev *indio_dev,
{
struct ads1119_state *st = iio_priv(indio_dev);
unsigned int index = chan->address;
+ int ret;
if (index >= st->num_channels_cfg)
return -EINVAL;
switch (mask) {
case IIO_CHAN_INFO_RAW:
- iio_device_claim_direct_scoped(return -EBUSY, indio_dev)
- return ads1119_single_conversion(st, chan, val, false);
- unreachable();
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+ ret = ads1119_single_conversion(st, chan, val, false);
+ iio_device_release_direct(indio_dev);
+ return ret;
case IIO_CHAN_INFO_OFFSET:
- iio_device_claim_direct_scoped(return -EBUSY, indio_dev)
- return ads1119_single_conversion(st, chan, val, true);
- unreachable();
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+ ret = ads1119_single_conversion(st, chan, val, true);
+ iio_device_release_direct(indio_dev);
+ return ret;
case IIO_CHAN_INFO_SCALE:
*val = st->vref_uV / 1000;
*val /= st->channels_cfg[index].gain;
diff --git a/drivers/iio/adc/ti-ads124s08.c b/drivers/iio/adc/ti-ads124s08.c
index f452f57f11c9..77c299bb4ebc 100644
--- a/drivers/iio/adc/ti-ads124s08.c
+++ b/drivers/iio/adc/ti-ads124s08.c
@@ -184,7 +184,7 @@ static int ads124s_reset(struct iio_dev *indio_dev)
if (priv->reset_gpio) {
gpiod_set_value_cansleep(priv->reset_gpio, 0);
- udelay(200);
+ fsleep(200);
gpiod_set_value_cansleep(priv->reset_gpio, 1);
} else {
return ads124s_write_cmd(indio_dev, ADS124S08_CMD_RESET);
diff --git a/drivers/iio/adc/ti-ads1298.c b/drivers/iio/adc/ti-ads1298.c
index 03f762415fa5..ae30b47e4514 100644
--- a/drivers/iio/adc/ti-ads1298.c
+++ b/drivers/iio/adc/ti-ads1298.c
@@ -319,13 +319,12 @@ static int ads1298_read_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_RAW:
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
ret = ads1298_read_one(priv, chan->scan_index);
- iio_device_release_direct_mode(indio_dev);
+ iio_device_release_direct(indio_dev);
if (ret)
return ret;
diff --git a/drivers/iio/adc/ti-ads131e08.c b/drivers/iio/adc/ti-ads131e08.c
index 91a79ebc4bde..c6096b64664e 100644
--- a/drivers/iio/adc/ti-ads131e08.c
+++ b/drivers/iio/adc/ti-ads131e08.c
@@ -505,12 +505,11 @@ static int ads131e08_read_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_RAW:
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
ret = ads131e08_read_direct(indio_dev, channel, value);
- iio_device_release_direct_mode(indio_dev);
+ iio_device_release_direct(indio_dev);
if (ret)
return ret;
@@ -551,12 +550,11 @@ static int ads131e08_write_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_SAMP_FREQ:
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
ret = ads131e08_set_data_rate(st, value);
- iio_device_release_direct_mode(indio_dev);
+ iio_device_release_direct(indio_dev);
return ret;
default:
diff --git a/drivers/iio/adc/ti-ads7138.c b/drivers/iio/adc/ti-ads7138.c
new file mode 100644
index 000000000000..ee5c1b8e3a8e
--- /dev/null
+++ b/drivers/iio/adc/ti-ads7138.c
@@ -0,0 +1,749 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * ADS7138 - Texas Instruments Analog-to-Digital Converter
+ */
+
+#include <linux/bitfield.h>
+#include <linux/cleanup.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <linux/regulator/consumer.h>
+#include <linux/unaligned.h>
+
+#include <linux/iio/events.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/types.h>
+
+/*
+ * Always assume 16 bits resolution as HW registers are aligned like that and
+ * with enabled oversampling/averaging it actually corresponds to 16 bits.
+ */
+#define ADS7138_RES_BITS 16
+
+/* ADS7138 operation codes */
+#define ADS7138_OPCODE_SINGLE_WRITE 0x08
+#define ADS7138_OPCODE_SET_BIT 0x18
+#define ADS7138_OPCODE_CLEAR_BIT 0x20
+#define ADS7138_OPCODE_BLOCK_WRITE 0x28
+#define ADS7138_OPCODE_BLOCK_READ 0x30
+
+/* ADS7138 registers */
+#define ADS7138_REG_GENERAL_CFG 0x01
+#define ADS7138_REG_OSR_CFG 0x03
+#define ADS7138_REG_OPMODE_CFG 0x04
+#define ADS7138_REG_SEQUENCE_CFG 0x10
+#define ADS7138_REG_AUTO_SEQ_CH_SEL 0x12
+#define ADS7138_REG_ALERT_CH_SEL 0x14
+#define ADS7138_REG_EVENT_FLAG 0x18
+#define ADS7138_REG_EVENT_HIGH_FLAG 0x1A
+#define ADS7138_REG_EVENT_LOW_FLAG 0x1C
+#define ADS7138_REG_HIGH_TH_HYS_CH(x) ((x) * 4 + 0x20)
+#define ADS7138_REG_LOW_TH_CNT_CH(x) ((x) * 4 + 0x22)
+#define ADS7138_REG_MAX_LSB_CH(x) ((x) * 2 + 0x60)
+#define ADS7138_REG_MIN_LSB_CH(x) ((x) * 2 + 0x80)
+#define ADS7138_REG_RECENT_LSB_CH(x) ((x) * 2 + 0xA0)
+
+#define ADS7138_GENERAL_CFG_RST BIT(0)
+#define ADS7138_GENERAL_CFG_DWC_EN BIT(4)
+#define ADS7138_GENERAL_CFG_STATS_EN BIT(5)
+#define ADS7138_OSR_CFG_MASK GENMASK(2, 0)
+#define ADS7138_OPMODE_CFG_CONV_MODE BIT(5)
+#define ADS7138_OPMODE_CFG_FREQ_MASK GENMASK(4, 0)
+#define ADS7138_SEQUENCE_CFG_SEQ_MODE BIT(0)
+#define ADS7138_SEQUENCE_CFG_SEQ_START BIT(4)
+#define ADS7138_THRESHOLD_LSB_MASK GENMASK(7, 4)
+
+enum ads7138_modes {
+ ADS7138_MODE_MANUAL,
+ ADS7138_MODE_AUTO,
+};
+
+struct ads7138_chip_data {
+ const char *name;
+ const int channel_num;
+};
+
+struct ads7138_data {
+ /* Protects RMW access to the I2C interface */
+ struct mutex lock;
+ struct i2c_client *client;
+ struct regulator *vref_regu;
+ const struct ads7138_chip_data *chip_data;
+};
+
+/*
+ * 2D array of available sampling frequencies and the corresponding register
+ * values. Structured like this to be easily usable in read_avail function.
+ */
+static const int ads7138_samp_freqs_bits[2][26] = {
+ {
+ 163, 244, 326, 488, 651, 977, 1302, 1953,
+ 2604, 3906, 5208, 7813, 10417, 15625, 20833, 31250,
+ 41667, 62500, 83333, 125000, 166667, 250000, 333333, 500000,
+ 666667, 1000000
+ }, {
+ 0x1f, 0x1e, 0x1d, 0x1c, 0x1b, 0x1a, 0x19, 0x18,
+ 0x17, 0x16, 0x15, 0x14, 0x13, 0x12, 0x11, 0x10,
+ /* Here is a hole, due to duplicate frequencies */
+ 0x09, 0x08, 0x07, 0x06, 0x05, 0x04, 0x03, 0x02,
+ 0x01, 0x00
+ }
+};
+
+static const int ads7138_oversampling_ratios[] = {
+ 1, 2, 4, 8, 16, 32, 64, 128
+};
+
+static int ads7138_i2c_write_block(const struct i2c_client *client, u8 reg,
+ u8 *values, u8 length)
+{
+ int ret;
+ int len = length + 2; /* "+ 2" for OPCODE and reg */
+
+ u8 *buf __free(kfree) = kmalloc(len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ buf[0] = ADS7138_OPCODE_BLOCK_WRITE;
+ buf[1] = reg;
+ memcpy(&buf[2], values, length);
+
+ ret = i2c_master_send(client, buf, len);
+ if (ret < 0)
+ return ret;
+ if (ret != len)
+ return -EIO;
+
+ return 0;
+}
+
+static int ads7138_i2c_write_with_opcode(const struct i2c_client *client,
+ u8 reg, u8 regval, u8 opcode)
+{
+ u8 buf[3] = { opcode, reg, regval };
+ int ret;
+
+ ret = i2c_master_send(client, buf, ARRAY_SIZE(buf));
+ if (ret < 0)
+ return ret;
+ if (ret != ARRAY_SIZE(buf))
+ return -EIO;
+
+ return 0;
+}
+
+static int ads7138_i2c_write(const struct i2c_client *client, u8 reg, u8 value)
+{
+ return ads7138_i2c_write_with_opcode(client, reg, value,
+ ADS7138_OPCODE_SINGLE_WRITE);
+}
+
+static int ads7138_i2c_set_bit(const struct i2c_client *client, u8 reg, u8 bits)
+{
+ return ads7138_i2c_write_with_opcode(client, reg, bits,
+ ADS7138_OPCODE_SET_BIT);
+}
+
+static int ads7138_i2c_clear_bit(const struct i2c_client *client, u8 reg, u8 bits)
+{
+ return ads7138_i2c_write_with_opcode(client, reg, bits,
+ ADS7138_OPCODE_CLEAR_BIT);
+}
+
+static int ads7138_i2c_read_block(const struct i2c_client *client, u8 reg,
+ u8 *out_values, u8 length)
+{
+ u8 buf[2] = { ADS7138_OPCODE_BLOCK_READ, reg };
+ int ret;
+ struct i2c_msg msgs[] = {
+ {
+ .addr = client->addr,
+ .len = ARRAY_SIZE(buf),
+ .buf = buf,
+ },
+ {
+ .addr = client->addr,
+ .flags = I2C_M_RD,
+ .len = length,
+ .buf = out_values,
+ },
+ };
+
+ ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
+ if (ret < 0)
+ return ret;
+ if (ret != ARRAY_SIZE(msgs))
+ return -EIO;
+
+ return 0;
+}
+
+static int ads7138_i2c_read(const struct i2c_client *client, u8 reg)
+{
+ u8 value;
+ int ret;
+
+ ret = ads7138_i2c_read_block(client, reg, &value, sizeof(value));
+ if (ret)
+ return ret;
+ return value;
+}
+
+static int ads7138_freq_to_bits(int freq)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ads7138_samp_freqs_bits[0]); i++)
+ if (freq == ads7138_samp_freqs_bits[0][i])
+ return ads7138_samp_freqs_bits[1][i];
+
+ return -EINVAL;
+}
+
+static int ads7138_bits_to_freq(int bits)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ads7138_samp_freqs_bits[1]); i++)
+ if (bits == ads7138_samp_freqs_bits[1][i])
+ return ads7138_samp_freqs_bits[0][i];
+
+ return -EINVAL;
+}
+
+static int ads7138_osr_to_bits(int osr)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ads7138_oversampling_ratios); i++)
+ if (osr == ads7138_oversampling_ratios[i])
+ return i;
+
+ return -EINVAL;
+}
+
+static int ads7138_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val,
+ int *val2, long mask)
+{
+ struct ads7138_data *data = iio_priv(indio_dev);
+ int ret, vref, bits;
+ u8 values[2];
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ ret = ads7138_i2c_read_block(data->client,
+ ADS7138_REG_RECENT_LSB_CH(chan->channel),
+ values, ARRAY_SIZE(values));
+ if (ret)
+ return ret;
+
+ *val = get_unaligned_le16(values);
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_PEAK:
+ ret = ads7138_i2c_read_block(data->client,
+ ADS7138_REG_MAX_LSB_CH(chan->channel),
+ values, ARRAY_SIZE(values));
+ if (ret)
+ return ret;
+
+ *val = get_unaligned_le16(values);
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_TROUGH:
+ ret = ads7138_i2c_read_block(data->client,
+ ADS7138_REG_MIN_LSB_CH(chan->channel),
+ values, ARRAY_SIZE(values));
+ if (ret)
+ return ret;
+
+ *val = get_unaligned_le16(values);
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ ret = ads7138_i2c_read(data->client, ADS7138_REG_OPMODE_CFG);
+ if (ret < 0)
+ return ret;
+
+ bits = FIELD_GET(ADS7138_OPMODE_CFG_FREQ_MASK, ret);
+ *val = ads7138_bits_to_freq(bits);
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ vref = regulator_get_voltage(data->vref_regu);
+ if (vref < 0)
+ return vref;
+ *val = vref / 1000;
+ *val2 = ADS7138_RES_BITS;
+ return IIO_VAL_FRACTIONAL_LOG2;
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+ ret = ads7138_i2c_read(data->client, ADS7138_REG_OSR_CFG);
+ if (ret < 0)
+ return ret;
+
+ bits = FIELD_GET(ADS7138_OSR_CFG_MASK, ret);
+ *val = ads7138_oversampling_ratios[bits];
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ads7138_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int val,
+ int val2, long mask)
+{
+ struct ads7138_data *data = iio_priv(indio_dev);
+ int bits, ret;
+ u8 value;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SAMP_FREQ: {
+ bits = ads7138_freq_to_bits(val);
+ if (bits < 0)
+ return bits;
+
+ guard(mutex)(&data->lock);
+ ret = ads7138_i2c_read(data->client, ADS7138_REG_OPMODE_CFG);
+ if (ret < 0)
+ return ret;
+
+ value = ret & ~ADS7138_OPMODE_CFG_FREQ_MASK;
+ value |= FIELD_PREP(ADS7138_OPMODE_CFG_FREQ_MASK, bits);
+ return ads7138_i2c_write(data->client, ADS7138_REG_OPMODE_CFG,
+ value);
+ }
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+ bits = ads7138_osr_to_bits(val);
+ if (bits < 0)
+ return bits;
+
+ return ads7138_i2c_write(data->client, ADS7138_REG_OSR_CFG,
+ bits);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ads7138_read_event(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info, int *val, int *val2)
+{
+ struct ads7138_data *data = iio_priv(indio_dev);
+ u8 reg, values[2];
+ int ret;
+
+ switch (info) {
+ case IIO_EV_INFO_VALUE:
+ reg = (dir == IIO_EV_DIR_RISING) ?
+ ADS7138_REG_HIGH_TH_HYS_CH(chan->channel) :
+ ADS7138_REG_LOW_TH_CNT_CH(chan->channel);
+ ret = ads7138_i2c_read_block(data->client, reg, values,
+ ARRAY_SIZE(values));
+ if (ret)
+ return ret;
+
+ *val = ((values[1] << 4) | (values[0] >> 4));
+ return IIO_VAL_INT;
+ case IIO_EV_INFO_HYSTERESIS:
+ ret = ads7138_i2c_read(data->client,
+ ADS7138_REG_HIGH_TH_HYS_CH(chan->channel));
+ if (ret < 0)
+ return ret;
+
+ *val = ret & ~ADS7138_THRESHOLD_LSB_MASK;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ads7138_write_event(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info, int val, int val2)
+{
+ struct ads7138_data *data = iio_priv(indio_dev);
+ u8 reg, values[2];
+ int ret;
+
+ switch (info) {
+ case IIO_EV_INFO_VALUE: {
+ if (val >= BIT(12) || val < 0)
+ return -EINVAL;
+
+ reg = (dir == IIO_EV_DIR_RISING) ?
+ ADS7138_REG_HIGH_TH_HYS_CH(chan->channel) :
+ ADS7138_REG_LOW_TH_CNT_CH(chan->channel);
+
+ guard(mutex)(&data->lock);
+ ret = ads7138_i2c_read(data->client, reg);
+ if (ret < 0)
+ return ret;
+
+ values[0] = ret & ~ADS7138_THRESHOLD_LSB_MASK;
+ values[0] |= FIELD_PREP(ADS7138_THRESHOLD_LSB_MASK, val);
+ values[1] = (val >> 4);
+ return ads7138_i2c_write_block(data->client, reg, values,
+ ARRAY_SIZE(values));
+ }
+ case IIO_EV_INFO_HYSTERESIS: {
+ if (val >= BIT(4) || val < 0)
+ return -EINVAL;
+
+ reg = ADS7138_REG_HIGH_TH_HYS_CH(chan->channel);
+
+ guard(mutex)(&data->lock);
+ ret = ads7138_i2c_read(data->client, reg);
+ if (ret < 0)
+ return ret;
+
+ values[0] = val & ~ADS7138_THRESHOLD_LSB_MASK;
+ values[0] |= FIELD_PREP(ADS7138_THRESHOLD_LSB_MASK, ret >> 4);
+ return ads7138_i2c_write(data->client, reg, values[0]);
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ads7138_read_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir)
+{
+ struct ads7138_data *data = iio_priv(indio_dev);
+ int ret;
+
+ if (dir != IIO_EV_DIR_EITHER)
+ return -EINVAL;
+
+ ret = ads7138_i2c_read(data->client, ADS7138_REG_ALERT_CH_SEL);
+ if (ret < 0)
+ return ret;
+
+ return (ret & BIT(chan->channel)) ? 1 : 0;
+}
+
+static int ads7138_write_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir, bool state)
+{
+ struct ads7138_data *data = iio_priv(indio_dev);
+
+ if (dir != IIO_EV_DIR_EITHER)
+ return -EINVAL;
+
+ if (state)
+ return ads7138_i2c_set_bit(data->client,
+ ADS7138_REG_ALERT_CH_SEL,
+ BIT(chan->channel));
+ else
+ return ads7138_i2c_clear_bit(data->client,
+ ADS7138_REG_ALERT_CH_SEL,
+ BIT(chan->channel));
+}
+
+static int ads7138_read_avail(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ const int **vals, int *type, int *length,
+ long mask)
+{
+ switch (mask) {
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ *vals = ads7138_samp_freqs_bits[0];
+ *length = ARRAY_SIZE(ads7138_samp_freqs_bits[0]);
+ *type = IIO_VAL_INT;
+
+ return IIO_AVAIL_LIST;
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+ *vals = ads7138_oversampling_ratios;
+ *length = ARRAY_SIZE(ads7138_oversampling_ratios);
+ *type = IIO_VAL_INT;
+
+ return IIO_AVAIL_LIST;
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct iio_info ti_ads7138_info = {
+ .read_raw = &ads7138_read_raw,
+ .read_avail = &ads7138_read_avail,
+ .write_raw = &ads7138_write_raw,
+ .read_event_value = &ads7138_read_event,
+ .write_event_value = &ads7138_write_event,
+ .read_event_config = &ads7138_read_event_config,
+ .write_event_config = &ads7138_write_event_config,
+};
+
+static const struct iio_event_spec ads7138_events[] = {
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_RISING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE)
+ }, {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_FALLING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE),
+ }, {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_EITHER,
+ .mask_separate = BIT(IIO_EV_INFO_HYSTERESIS) |
+ BIT(IIO_EV_INFO_ENABLE),
+ },
+};
+
+#define ADS7138_V_CHAN(_chan) { \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .channel = _chan, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_PEAK) | \
+ BIT(IIO_CHAN_INFO_TROUGH), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SAMP_FREQ) | \
+ BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), \
+ .info_mask_shared_by_type_available = \
+ BIT(IIO_CHAN_INFO_SAMP_FREQ) | \
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), \
+ .datasheet_name = "AIN"#_chan, \
+ .event_spec = ads7138_events, \
+ .num_event_specs = ARRAY_SIZE(ads7138_events), \
+}
+
+static const struct iio_chan_spec ads7138_channels[] = {
+ ADS7138_V_CHAN(0),
+ ADS7138_V_CHAN(1),
+ ADS7138_V_CHAN(2),
+ ADS7138_V_CHAN(3),
+ ADS7138_V_CHAN(4),
+ ADS7138_V_CHAN(5),
+ ADS7138_V_CHAN(6),
+ ADS7138_V_CHAN(7),
+};
+
+static irqreturn_t ads7138_event_handler(int irq, void *priv)
+{
+ struct iio_dev *indio_dev = priv;
+ struct ads7138_data *data = iio_priv(indio_dev);
+ struct device *dev = &data->client->dev;
+ u8 i, events_high, events_low;
+ u64 code;
+ int ret;
+
+ /* Check if interrupt was trigger by us */
+ ret = ads7138_i2c_read(data->client, ADS7138_REG_EVENT_FLAG);
+ if (ret <= 0)
+ return IRQ_NONE;
+
+ ret = ads7138_i2c_read(data->client, ADS7138_REG_EVENT_HIGH_FLAG);
+ if (ret < 0) {
+ dev_warn(dev, "Failed to read event high flags: %d\n", ret);
+ return IRQ_HANDLED;
+ }
+ events_high = ret;
+
+ ret = ads7138_i2c_read(data->client, ADS7138_REG_EVENT_LOW_FLAG);
+ if (ret < 0) {
+ dev_warn(dev, "Failed to read event low flags: %d\n", ret);
+ return IRQ_HANDLED;
+ }
+ events_low = ret;
+
+ for (i = 0; i < data->chip_data->channel_num; i++) {
+ if (events_high & BIT(i)) {
+ code = IIO_UNMOD_EVENT_CODE(IIO_VOLTAGE, i,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_RISING);
+ iio_push_event(indio_dev, code,
+ iio_get_time_ns(indio_dev));
+ }
+ if (events_low & BIT(i)) {
+ code = IIO_UNMOD_EVENT_CODE(IIO_VOLTAGE, i,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_FALLING);
+ iio_push_event(indio_dev, code,
+ iio_get_time_ns(indio_dev));
+ }
+ }
+
+ /* Try to clear all interrupt flags */
+ ret = ads7138_i2c_write(data->client, ADS7138_REG_EVENT_HIGH_FLAG, 0xFF);
+ if (ret)
+ dev_warn(dev, "Failed to clear event high flags: %d\n", ret);
+
+ ret = ads7138_i2c_write(data->client, ADS7138_REG_EVENT_LOW_FLAG, 0xFF);
+ if (ret)
+ dev_warn(dev, "Failed to clear event low flags: %d\n", ret);
+
+ return IRQ_HANDLED;
+}
+
+static int ads7138_set_conv_mode(struct ads7138_data *data,
+ enum ads7138_modes mode)
+{
+ if (mode == ADS7138_MODE_AUTO)
+ return ads7138_i2c_set_bit(data->client, ADS7138_REG_OPMODE_CFG,
+ ADS7138_OPMODE_CFG_CONV_MODE);
+ return ads7138_i2c_clear_bit(data->client, ADS7138_REG_OPMODE_CFG,
+ ADS7138_OPMODE_CFG_CONV_MODE);
+}
+
+static int ads7138_init_hw(struct ads7138_data *data)
+{
+ struct device *dev = &data->client->dev;
+ int ret;
+
+ data->vref_regu = devm_regulator_get(dev, "avdd");
+ if (IS_ERR(data->vref_regu))
+ return dev_err_probe(dev, PTR_ERR(data->vref_regu),
+ "Failed to get avdd regulator\n");
+
+ ret = regulator_get_voltage(data->vref_regu);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Failed to get avdd voltage\n");
+
+ /* Reset the chip to get a defined starting configuration */
+ ret = ads7138_i2c_set_bit(data->client, ADS7138_REG_GENERAL_CFG,
+ ADS7138_GENERAL_CFG_RST);
+ if (ret)
+ return ret;
+
+ ret = ads7138_set_conv_mode(data, ADS7138_MODE_AUTO);
+ if (ret)
+ return ret;
+
+ /* Enable statistics and digital window comparator */
+ ret = ads7138_i2c_set_bit(data->client, ADS7138_REG_GENERAL_CFG,
+ ADS7138_GENERAL_CFG_STATS_EN |
+ ADS7138_GENERAL_CFG_DWC_EN);
+ if (ret)
+ return ret;
+
+ /* Enable all channels for auto sequencing */
+ ret = ads7138_i2c_set_bit(data->client, ADS7138_REG_AUTO_SEQ_CH_SEL, 0xFF);
+ if (ret)
+ return ret;
+
+ /* Set auto sequence mode and start sequencing */
+ return ads7138_i2c_set_bit(data->client, ADS7138_REG_SEQUENCE_CFG,
+ ADS7138_SEQUENCE_CFG_SEQ_START |
+ ADS7138_SEQUENCE_CFG_SEQ_MODE);
+}
+
+static int ads7138_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct iio_dev *indio_dev;
+ struct ads7138_data *data;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ data = iio_priv(indio_dev);
+ data->client = client;
+ data->chip_data = i2c_get_match_data(client);
+ if (!data->chip_data)
+ return -ENODEV;
+
+ ret = devm_mutex_init(dev, &data->lock);
+ if (ret)
+ return ret;
+
+ indio_dev->name = data->chip_data->name;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = ads7138_channels;
+ indio_dev->num_channels = ARRAY_SIZE(ads7138_channels);
+ indio_dev->info = &ti_ads7138_info;
+
+ i2c_set_clientdata(client, indio_dev);
+
+ if (client->irq > 0) {
+ ret = devm_request_threaded_irq(dev, client->irq,
+ NULL, ads7138_event_handler,
+ IRQF_TRIGGER_LOW |
+ IRQF_ONESHOT | IRQF_SHARED,
+ client->name, indio_dev);
+ if (ret)
+ return ret;
+ }
+
+ ret = ads7138_init_hw(data);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to initialize device\n");
+
+ ret = devm_iio_device_register(dev, indio_dev);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to register iio device\n");
+
+ return 0;
+}
+
+static int ads7138_runtime_suspend(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ads7138_data *data = iio_priv(indio_dev);
+
+ return ads7138_set_conv_mode(data, ADS7138_MODE_MANUAL);
+}
+
+static int ads7138_runtime_resume(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ads7138_data *data = iio_priv(indio_dev);
+
+ return ads7138_set_conv_mode(data, ADS7138_MODE_AUTO);
+}
+
+static DEFINE_RUNTIME_DEV_PM_OPS(ads7138_pm_ops,
+ ads7138_runtime_suspend,
+ ads7138_runtime_resume,
+ NULL);
+
+static const struct ads7138_chip_data ads7128_data = {
+ .name = "ads7128",
+ .channel_num = 8,
+};
+
+static const struct ads7138_chip_data ads7138_data = {
+ .name = "ads7138",
+ .channel_num = 8,
+};
+
+static const struct of_device_id ads7138_of_match[] = {
+ { .compatible = "ti,ads7128", .data = &ads7128_data },
+ { .compatible = "ti,ads7138", .data = &ads7138_data },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ads7138_of_match);
+
+static const struct i2c_device_id ads7138_device_ids[] = {
+ { "ads7128", (kernel_ulong_t)&ads7128_data },
+ { "ads7138", (kernel_ulong_t)&ads7138_data },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, ads7138_device_ids);
+
+static struct i2c_driver ads7138_driver = {
+ .driver = {
+ .name = "ads7138",
+ .of_match_table = ads7138_of_match,
+ .pm = pm_ptr(&ads7138_pm_ops),
+ },
+ .id_table = ads7138_device_ids,
+ .probe = ads7138_probe,
+};
+module_i2c_driver(ads7138_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Tobias Sperling <tobias.sperling@softing.com>");
+MODULE_DESCRIPTION("Driver for TI ADS7138 ADCs");
diff --git a/drivers/iio/adc/ti-ads7924.c b/drivers/iio/adc/ti-ads7924.c
index 66b54c0d75aa..b1f745f75dbe 100644
--- a/drivers/iio/adc/ti-ads7924.c
+++ b/drivers/iio/adc/ti-ads7924.c
@@ -251,11 +251,8 @@ static const struct iio_info ads7924_info = {
.read_raw = ads7924_read_raw,
};
-static int ads7924_get_channels_config(struct i2c_client *client,
- struct iio_dev *indio_dev)
+static int ads7924_get_channels_config(struct device *dev)
{
- struct ads7924_data *priv = iio_priv(indio_dev);
- struct device *dev = priv->dev;
struct fwnode_handle *node;
int num_channels = 0;
@@ -380,7 +377,7 @@ static int ads7924_probe(struct i2c_client *client)
indio_dev->num_channels = ARRAY_SIZE(ads7924_channels);
indio_dev->info = &ads7924_info;
- ret = ads7924_get_channels_config(client, indio_dev);
+ ret = ads7924_get_channels_config(dev);
if (ret < 0)
return dev_err_probe(dev, ret,
"failed to get channels configuration\n");
diff --git a/drivers/iio/adc/ti-tlc4541.c b/drivers/iio/adc/ti-tlc4541.c
index 08de997584fd..5a138be983ed 100644
--- a/drivers/iio/adc/ti-tlc4541.c
+++ b/drivers/iio/adc/ti-tlc4541.c
@@ -131,11 +131,10 @@ static int tlc4541_read_raw(struct iio_dev *indio_dev,
switch (m) {
case IIO_CHAN_INFO_RAW:
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
ret = spi_sync(st->spi, &st->scan_single_msg);
- iio_device_release_direct_mode(indio_dev);
+ iio_device_release_direct(indio_dev);
if (ret < 0)
return ret;
*val = be16_to_cpu(st->rx_buf[0]);
diff --git a/drivers/iio/addac/ad74413r.c b/drivers/iio/addac/ad74413r.c
index daea2bde7acf..f14d12b03da6 100644
--- a/drivers/iio/addac/ad74413r.c
+++ b/drivers/iio/addac/ad74413r.c
@@ -826,6 +826,8 @@ static int _ad74413r_get_single_adc_result(struct ad74413r_state *st,
unsigned int uval;
int ret;
+ guard(mutex)(&st->lock);
+
reinit_completion(&st->adc_data_completion);
ret = ad74413r_set_adc_channel_enable(st, channel, true);
@@ -865,12 +867,14 @@ static int ad74413r_get_single_adc_result(struct iio_dev *indio_dev,
unsigned int channel, int *val)
{
struct ad74413r_state *st = iio_priv(indio_dev);
+ int ret;
- iio_device_claim_direct_scoped(return -EBUSY, indio_dev) {
- guard(mutex)(&st->lock);
- return _ad74413r_get_single_adc_result(st, channel, val);
- }
- unreachable();
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+
+ ret = _ad74413r_get_single_adc_result(st, channel, val);
+ iio_device_release_direct(indio_dev);
+ return ret;
}
static void ad74413r_adc_to_resistance_result(int adc_result, int *val)
diff --git a/drivers/iio/amplifiers/hmc425a.c b/drivers/iio/amplifiers/hmc425a.c
index 2ee4c0d70281..d9a359e1388a 100644
--- a/drivers/iio/amplifiers/hmc425a.c
+++ b/drivers/iio/amplifiers/hmc425a.c
@@ -161,8 +161,7 @@ static int hmc425a_write(struct iio_dev *indio_dev, u32 value)
values[0] = value;
- gpiod_set_array_value_cansleep(st->gpios->ndescs, st->gpios->desc,
- NULL, values);
+ gpiod_multi_set_value_cansleep(st->gpios, values);
return 0;
}
diff --git a/drivers/iio/buffer/industrialio-buffer-dma.c b/drivers/iio/buffer/industrialio-buffer-dma.c
index 7ea784304ffb..ee294a775e8a 100644
--- a/drivers/iio/buffer/industrialio-buffer-dma.c
+++ b/drivers/iio/buffer/industrialio-buffer-dma.c
@@ -624,7 +624,7 @@ out_unlock:
/**
* iio_dma_buffer_read() - DMA buffer read callback
- * @buffer: Buffer to read form
+ * @buffer: Buffer to read from
* @n: Number of bytes to read
* @user_buffer: Userspace buffer to copy the data to
*
@@ -640,7 +640,7 @@ EXPORT_SYMBOL_NS_GPL(iio_dma_buffer_read, "IIO_DMA_BUFFER");
/**
* iio_dma_buffer_write() - DMA buffer write callback
- * @buffer: Buffer to read form
+ * @buffer: Buffer to write to
* @n: Number of bytes to read
* @user_buffer: Userspace buffer to copy the data from
*
diff --git a/drivers/iio/buffer/industrialio-buffer-dmaengine.c b/drivers/iio/buffer/industrialio-buffer-dmaengine.c
index 614e1c4189a9..e9d9a7d39fe1 100644
--- a/drivers/iio/buffer/industrialio-buffer-dmaengine.c
+++ b/drivers/iio/buffer/industrialio-buffer-dmaengine.c
@@ -206,39 +206,29 @@ static const struct iio_dev_attr *iio_dmaengine_buffer_attrs[] = {
/**
* iio_dmaengine_buffer_alloc() - Allocate new buffer which uses DMAengine
- * @dev: DMA channel consumer device
- * @channel: DMA channel name, typically "rx".
+ * @chan: DMA channel.
*
* This allocates a new IIO buffer which internally uses the DMAengine framework
- * to perform its transfers. The parent device will be used to request the DMA
- * channel.
+ * to perform its transfers.
*
* Once done using the buffer iio_dmaengine_buffer_free() should be used to
* release it.
*/
-static struct iio_buffer *iio_dmaengine_buffer_alloc(struct device *dev,
- const char *channel)
+static struct iio_buffer *iio_dmaengine_buffer_alloc(struct dma_chan *chan)
{
struct dmaengine_buffer *dmaengine_buffer;
unsigned int width, src_width, dest_width;
struct dma_slave_caps caps;
- struct dma_chan *chan;
int ret;
+ ret = dma_get_slave_caps(chan, &caps);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
dmaengine_buffer = kzalloc(sizeof(*dmaengine_buffer), GFP_KERNEL);
if (!dmaengine_buffer)
return ERR_PTR(-ENOMEM);
- chan = dma_request_chan(dev, channel);
- if (IS_ERR(chan)) {
- ret = PTR_ERR(chan);
- goto err_free;
- }
-
- ret = dma_get_slave_caps(chan, &caps);
- if (ret < 0)
- goto err_release;
-
/* Needs to be aligned to the maximum of the minimums */
if (caps.src_addr_widths)
src_width = __ffs(caps.src_addr_widths);
@@ -262,12 +252,6 @@ static struct iio_buffer *iio_dmaengine_buffer_alloc(struct device *dev,
dmaengine_buffer->queue.buffer.access = &iio_dmaengine_buffer_ops;
return &dmaengine_buffer->queue.buffer;
-
-err_release:
- dma_release_channel(chan);
-err_free:
- kfree(dmaengine_buffer);
- return ERR_PTR(ret);
}
/**
@@ -276,17 +260,57 @@ err_free:
*
* Frees a buffer previously allocated with iio_dmaengine_buffer_alloc().
*/
-void iio_dmaengine_buffer_free(struct iio_buffer *buffer)
+static void iio_dmaengine_buffer_free(struct iio_buffer *buffer)
{
struct dmaengine_buffer *dmaengine_buffer =
iio_buffer_to_dmaengine_buffer(buffer);
iio_dma_buffer_exit(&dmaengine_buffer->queue);
- dma_release_channel(dmaengine_buffer->chan);
-
iio_buffer_put(buffer);
}
-EXPORT_SYMBOL_NS_GPL(iio_dmaengine_buffer_free, "IIO_DMAENGINE_BUFFER");
+
+/**
+ * iio_dmaengine_buffer_teardown() - Releases DMA channel and frees buffer
+ * @buffer: Buffer to free
+ *
+ * Releases the DMA channel and frees the buffer previously setup with
+ * iio_dmaengine_buffer_setup_ext().
+ */
+void iio_dmaengine_buffer_teardown(struct iio_buffer *buffer)
+{
+ struct dmaengine_buffer *dmaengine_buffer =
+ iio_buffer_to_dmaengine_buffer(buffer);
+ struct dma_chan *chan = dmaengine_buffer->chan;
+
+ iio_dmaengine_buffer_free(buffer);
+ dma_release_channel(chan);
+}
+EXPORT_SYMBOL_NS_GPL(iio_dmaengine_buffer_teardown, "IIO_DMAENGINE_BUFFER");
+
+static struct iio_buffer
+*__iio_dmaengine_buffer_setup_ext(struct iio_dev *indio_dev,
+ struct dma_chan *chan,
+ enum iio_buffer_direction dir)
+{
+ struct iio_buffer *buffer;
+ int ret;
+
+ buffer = iio_dmaengine_buffer_alloc(chan);
+ if (IS_ERR(buffer))
+ return ERR_CAST(buffer);
+
+ indio_dev->modes |= INDIO_BUFFER_HARDWARE;
+
+ buffer->direction = dir;
+
+ ret = iio_device_attach_buffer(indio_dev, buffer);
+ if (ret) {
+ iio_dmaengine_buffer_free(buffer);
+ return ERR_PTR(ret);
+ }
+
+ return buffer;
+}
/**
* iio_dmaengine_buffer_setup_ext() - Setup a DMA buffer for an IIO device
@@ -300,7 +324,7 @@ EXPORT_SYMBOL_NS_GPL(iio_dmaengine_buffer_free, "IIO_DMAENGINE_BUFFER");
* It also appends the INDIO_BUFFER_HARDWARE mode to the supported modes of the
* IIO device.
*
- * Once done using the buffer iio_dmaengine_buffer_free() should be used to
+ * Once done using the buffer iio_dmaengine_buffer_teardown() should be used to
* release it.
*/
struct iio_buffer *iio_dmaengine_buffer_setup_ext(struct device *dev,
@@ -308,30 +332,24 @@ struct iio_buffer *iio_dmaengine_buffer_setup_ext(struct device *dev,
const char *channel,
enum iio_buffer_direction dir)
{
+ struct dma_chan *chan;
struct iio_buffer *buffer;
- int ret;
-
- buffer = iio_dmaengine_buffer_alloc(dev, channel);
- if (IS_ERR(buffer))
- return ERR_CAST(buffer);
-
- indio_dev->modes |= INDIO_BUFFER_HARDWARE;
- buffer->direction = dir;
+ chan = dma_request_chan(dev, channel);
+ if (IS_ERR(chan))
+ return ERR_CAST(chan);
- ret = iio_device_attach_buffer(indio_dev, buffer);
- if (ret) {
- iio_dmaengine_buffer_free(buffer);
- return ERR_PTR(ret);
- }
+ buffer = __iio_dmaengine_buffer_setup_ext(indio_dev, chan, dir);
+ if (IS_ERR(buffer))
+ dma_release_channel(chan);
return buffer;
}
EXPORT_SYMBOL_NS_GPL(iio_dmaengine_buffer_setup_ext, "IIO_DMAENGINE_BUFFER");
-static void __devm_iio_dmaengine_buffer_free(void *buffer)
+static void devm_iio_dmaengine_buffer_teardown(void *buffer)
{
- iio_dmaengine_buffer_free(buffer);
+ iio_dmaengine_buffer_teardown(buffer);
}
/**
@@ -357,11 +375,49 @@ int devm_iio_dmaengine_buffer_setup_ext(struct device *dev,
if (IS_ERR(buffer))
return PTR_ERR(buffer);
- return devm_add_action_or_reset(dev, __devm_iio_dmaengine_buffer_free,
+ return devm_add_action_or_reset(dev, devm_iio_dmaengine_buffer_teardown,
buffer);
}
EXPORT_SYMBOL_NS_GPL(devm_iio_dmaengine_buffer_setup_ext, "IIO_DMAENGINE_BUFFER");
+static void devm_iio_dmaengine_buffer_free(void *buffer)
+{
+ iio_dmaengine_buffer_free(buffer);
+}
+
+/**
+ * devm_iio_dmaengine_buffer_setup_with_handle() - Setup a DMA buffer for an
+ * IIO device
+ * @dev: Device for devm ownership
+ * @indio_dev: IIO device to which to attach this buffer.
+ * @chan: DMA channel
+ * @dir: Direction of buffer (in or out)
+ *
+ * This allocates a new IIO buffer with devm_iio_dmaengine_buffer_alloc()
+ * and attaches it to an IIO device with iio_device_attach_buffer().
+ * It also appends the INDIO_BUFFER_HARDWARE mode to the supported modes of the
+ * IIO device.
+ *
+ * This is the same as devm_iio_dmaengine_buffer_setup_ext() except that the
+ * caller manages requesting and releasing the DMA channel handle.
+ */
+int devm_iio_dmaengine_buffer_setup_with_handle(struct device *dev,
+ struct iio_dev *indio_dev,
+ struct dma_chan *chan,
+ enum iio_buffer_direction dir)
+{
+ struct iio_buffer *buffer;
+
+ buffer = __iio_dmaengine_buffer_setup_ext(indio_dev, chan, dir);
+ if (IS_ERR(buffer))
+ return PTR_ERR(buffer);
+
+ return devm_add_action_or_reset(dev, devm_iio_dmaengine_buffer_free,
+ buffer);
+}
+EXPORT_SYMBOL_NS_GPL(devm_iio_dmaengine_buffer_setup_with_handle,
+ "IIO_DMAENGINE_BUFFER");
+
MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
MODULE_DESCRIPTION("DMA buffer for the IIO framework");
MODULE_LICENSE("GPL");
diff --git a/drivers/iio/chemical/ens160_core.c b/drivers/iio/chemical/ens160_core.c
index 48d5ad2075b6..152f81ff57e3 100644
--- a/drivers/iio/chemical/ens160_core.c
+++ b/drivers/iio/chemical/ens160_core.c
@@ -100,25 +100,35 @@ static const struct iio_chan_spec ens160_channels[] = {
IIO_CHAN_SOFT_TIMESTAMP(2),
};
+static int __ens160_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val)
+{
+ struct ens160_data *data = iio_priv(indio_dev);
+ int ret;
+
+ guard(mutex)(&data->mutex);
+ ret = regmap_bulk_read(data->regmap, chan->address,
+ &data->buf, sizeof(data->buf));
+ if (ret)
+ return ret;
+ *val = le16_to_cpu(data->buf);
+ return IIO_VAL_INT;
+}
+
static int ens160_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int *val, int *val2, long mask)
{
- struct ens160_data *data = iio_priv(indio_dev);
int ret;
switch (mask) {
case IIO_CHAN_INFO_RAW:
- iio_device_claim_direct_scoped(return -EBUSY, indio_dev) {
- guard(mutex)(&data->mutex);
- ret = regmap_bulk_read(data->regmap, chan->address,
- &data->buf, sizeof(data->buf));
- if (ret)
- return ret;
- *val = le16_to_cpu(data->buf);
- return IIO_VAL_INT;
- }
- unreachable();
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+ ret = __ens160_read_raw(indio_dev, chan, val);
+ iio_device_release_direct(indio_dev);
+ return ret;
case IIO_CHAN_INFO_SCALE:
switch (chan->channel2) {
case IIO_MOD_CO2:
diff --git a/drivers/iio/chemical/scd30_core.c b/drivers/iio/chemical/scd30_core.c
index d613c54cb28d..3fed6b63710f 100644
--- a/drivers/iio/chemical/scd30_core.c
+++ b/drivers/iio/chemical/scd30_core.c
@@ -5,6 +5,7 @@
* Copyright (c) 2020 Tomasz Duszynski <tomasz.duszynski@octakon.com>
*/
#include <linux/bits.h>
+#include <linux/cleanup.h>
#include <linux/completion.h>
#include <linux/delay.h>
#include <linux/device.h>
@@ -198,112 +199,103 @@ static int scd30_read_raw(struct iio_dev *indio_dev, struct iio_chan_spec const
int *val, int *val2, long mask)
{
struct scd30_state *state = iio_priv(indio_dev);
- int ret = -EINVAL;
+ int ret;
u16 tmp;
- mutex_lock(&state->lock);
+ guard(mutex)(&state->lock);
switch (mask) {
case IIO_CHAN_INFO_RAW:
case IIO_CHAN_INFO_PROCESSED:
if (chan->output) {
*val = state->pressure_comp;
- ret = IIO_VAL_INT;
- break;
+ return IIO_VAL_INT;
}
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- break;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
ret = scd30_read(state);
if (ret) {
- iio_device_release_direct_mode(indio_dev);
- break;
+ iio_device_release_direct(indio_dev);
+ return ret;
}
*val = state->meas[chan->address];
- iio_device_release_direct_mode(indio_dev);
- ret = IIO_VAL_INT;
- break;
+ iio_device_release_direct(indio_dev);
+ return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
*val = 0;
*val2 = 1;
- ret = IIO_VAL_INT_PLUS_MICRO;
- break;
+ return IIO_VAL_INT_PLUS_MICRO;
case IIO_CHAN_INFO_SAMP_FREQ:
ret = scd30_command_read(state, CMD_MEAS_INTERVAL, &tmp);
if (ret)
- break;
+ return ret;
*val = 0;
*val2 = 1000000000 / tmp;
- ret = IIO_VAL_INT_PLUS_NANO;
- break;
+ return IIO_VAL_INT_PLUS_NANO;
case IIO_CHAN_INFO_CALIBBIAS:
ret = scd30_command_read(state, CMD_TEMP_OFFSET, &tmp);
if (ret)
- break;
+ return ret;
*val = tmp;
- ret = IIO_VAL_INT;
- break;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
}
- mutex_unlock(&state->lock);
-
- return ret;
}
static int scd30_write_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *chan,
int val, int val2, long mask)
{
struct scd30_state *state = iio_priv(indio_dev);
- int ret = -EINVAL;
+ int ret;
- mutex_lock(&state->lock);
+ guard(mutex)(&state->lock);
switch (mask) {
case IIO_CHAN_INFO_SAMP_FREQ:
if (val)
- break;
+ return -EINVAL;
val = 1000000000 / val2;
if (val < SCD30_MEAS_INTERVAL_MIN_S || val > SCD30_MEAS_INTERVAL_MAX_S)
- break;
+ return -EINVAL;
ret = scd30_command_write(state, CMD_MEAS_INTERVAL, val);
if (ret)
- break;
+ return ret;
state->meas_interval = val;
- break;
+ return 0;
case IIO_CHAN_INFO_RAW:
switch (chan->type) {
case IIO_PRESSURE:
if (val < SCD30_PRESSURE_COMP_MIN_MBAR ||
val > SCD30_PRESSURE_COMP_MAX_MBAR)
- break;
+ return -EINVAL;
ret = scd30_command_write(state, CMD_START_MEAS, val);
if (ret)
- break;
+ return ret;
state->pressure_comp = val;
- break;
+ return 0;
default:
- break;
+ return -EINVAL;
}
- break;
case IIO_CHAN_INFO_CALIBBIAS:
if (val < 0 || val > SCD30_TEMP_OFFSET_MAX)
- break;
+ return -EINVAL;
/*
* Manufacturer does not explicitly specify min/max sensible
* values hence check is omitted for simplicity.
*/
- ret = scd30_command_write(state, CMD_TEMP_OFFSET / 10, val);
+ return scd30_command_write(state, CMD_TEMP_OFFSET / 10, val);
+ default:
+ return -EINVAL;
}
- mutex_unlock(&state->lock);
-
- return ret;
}
static int scd30_write_raw_get_fmt(struct iio_dev *indio_dev, struct iio_chan_spec const *chan,
diff --git a/drivers/iio/common/cros_ec_sensors/Makefile b/drivers/iio/common/cros_ec_sensors/Makefile
index e0a33ab66d21..c358fa0328ab 100644
--- a/drivers/iio/common/cros_ec_sensors/Makefile
+++ b/drivers/iio/common/cros_ec_sensors/Makefile
@@ -3,6 +3,7 @@
# Makefile for sensors seen through the ChromeOS EC sensor hub.
#
-obj-$(CONFIG_IIO_CROS_EC_SENSORS_CORE) += cros_ec_sensors_core.o
+cros-ec-sensors-core-objs += cros_ec_sensors_core.o cros_ec_sensors_trace.o
+obj-$(CONFIG_IIO_CROS_EC_SENSORS_CORE) += cros-ec-sensors-core.o
obj-$(CONFIG_IIO_CROS_EC_SENSORS) += cros_ec_sensors.o
obj-$(CONFIG_IIO_CROS_EC_SENSORS_LID_ANGLE) += cros_ec_lid_angle.o
diff --git a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c
index 9fc71a73caa1..7751d6f69b12 100644
--- a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c
+++ b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c
@@ -23,6 +23,8 @@
#include <linux/platform_data/cros_ec_sensorhub.h>
#include <linux/platform_device.h>
+#include "cros_ec_sensors_trace.h"
+
/*
* Hard coded to the first device to support sensor fifo. The EC has a 2048
* byte fifo and will trigger an interrupt when fifo is 2/3 full.
@@ -413,6 +415,7 @@ EXPORT_SYMBOL_GPL(cros_ec_sensors_core_register);
int cros_ec_motion_send_host_cmd(struct cros_ec_sensors_core_state *state,
u16 opt_length)
{
+ struct ec_response_motion_sense *resp = (struct ec_response_motion_sense *)state->msg->data;
int ret;
if (opt_length)
@@ -423,12 +426,12 @@ int cros_ec_motion_send_host_cmd(struct cros_ec_sensors_core_state *state,
memcpy(state->msg->data, &state->param, sizeof(state->param));
ret = cros_ec_cmd_xfer_status(state->ec, state->msg);
+ trace_cros_ec_motion_host_cmd(&state->param, resp, ret);
if (ret < 0)
return ret;
- if (ret &&
- state->resp != (struct ec_response_motion_sense *)state->msg->data)
- memcpy(state->resp, state->msg->data, ret);
+ if (ret && state->resp != resp)
+ memcpy(state->resp, resp, ret);
return 0;
}
diff --git a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_trace.c b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_trace.c
new file mode 100644
index 000000000000..c4db949fa775
--- /dev/null
+++ b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_trace.c
@@ -0,0 +1,32 @@
+// SPDX-License-Identifier: GPL-2.0
+// Trace events for the ChromeOS Embedded Controller
+//
+// Copyright 2025 Google LLC.
+
+#define TRACE_SYMBOL(a) {a, #a}
+
+// Generate the list using the following script:
+// sed -n 's/^.*\(MOTIONSENSE_CMD.*\) = .*,$/\tTRACE_SYMBOL(\1), \\/p' include/linux/platform_data/cros_ec_commands.h
+#define MOTIONSENSE_CMDS \
+ TRACE_SYMBOL(MOTIONSENSE_CMD_DUMP), \
+ TRACE_SYMBOL(MOTIONSENSE_CMD_INFO), \
+ TRACE_SYMBOL(MOTIONSENSE_CMD_EC_RATE), \
+ TRACE_SYMBOL(MOTIONSENSE_CMD_SENSOR_ODR), \
+ TRACE_SYMBOL(MOTIONSENSE_CMD_SENSOR_RANGE), \
+ TRACE_SYMBOL(MOTIONSENSE_CMD_KB_WAKE_ANGLE), \
+ TRACE_SYMBOL(MOTIONSENSE_CMD_DATA), \
+ TRACE_SYMBOL(MOTIONSENSE_CMD_FIFO_INFO), \
+ TRACE_SYMBOL(MOTIONSENSE_CMD_FIFO_FLUSH), \
+ TRACE_SYMBOL(MOTIONSENSE_CMD_FIFO_READ), \
+ TRACE_SYMBOL(MOTIONSENSE_CMD_PERFORM_CALIB), \
+ TRACE_SYMBOL(MOTIONSENSE_CMD_SENSOR_OFFSET), \
+ TRACE_SYMBOL(MOTIONSENSE_CMD_LIST_ACTIVITIES), \
+ TRACE_SYMBOL(MOTIONSENSE_CMD_SET_ACTIVITY), \
+ TRACE_SYMBOL(MOTIONSENSE_CMD_LID_ANGLE), \
+ TRACE_SYMBOL(MOTIONSENSE_CMD_FIFO_INT_ENABLE), \
+ TRACE_SYMBOL(MOTIONSENSE_CMD_SPOOF), \
+ TRACE_SYMBOL(MOTIONSENSE_CMD_TABLET_MODE_LID_ANGLE), \
+ TRACE_SYMBOL(MOTIONSENSE_CMD_SENSOR_SCALE)
+
+#define CREATE_TRACE_POINTS
+#include "cros_ec_sensors_trace.h"
diff --git a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_trace.h b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_trace.h
new file mode 100644
index 000000000000..8956f2e8ad08
--- /dev/null
+++ b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_trace.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Trace events for the ChromeOS Embedded Controller
+ *
+ * Copyright 2025 Google LLC.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM cros_ec
+
+#if !defined(_CROS_EC_SENSORS_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
+#define _CROS_EC_SENSORS_TRACE_H_
+
+#include <linux/bits.h>
+#include <linux/types.h>
+#include <linux/platform_data/cros_ec_commands.h>
+#include <linux/platform_data/cros_ec_proto.h>
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(cros_ec_motion_host_cmd,
+ TP_PROTO(struct ec_params_motion_sense *param,
+ struct ec_response_motion_sense *resp,
+ int retval),
+ TP_ARGS(param, resp, retval),
+ TP_STRUCT__entry(__field(uint8_t, cmd)
+ __field(uint8_t, sensor_id)
+ __field(uint32_t, data)
+ __field(int, retval)
+ __field(int32_t, ret)
+ ),
+ TP_fast_assign(__entry->cmd = param->cmd;
+ __entry->sensor_id = param->sensor_odr.sensor_num;
+ __entry->data = param->sensor_odr.data;
+ __entry->retval = retval;
+ __entry->ret = retval > 0 ? resp->sensor_odr.ret : -1;
+ ),
+ TP_printk("%s, id: %d, data: %u, result: %u, return: %d",
+ __print_symbolic(__entry->cmd, MOTIONSENSE_CMDS),
+ __entry->sensor_id,
+ __entry->data,
+ __entry->retval,
+ __entry->ret)
+);
+
+#endif /* _CROS_EC_SENSORS_TRACE_H_ */
+
+/* this part must be outside header guard */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH ../../drivers/iio/common/cros_ec_sensors
+
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE cros_ec_sensors_trace
+
+#include <trace/define_trace.h>
diff --git a/drivers/iio/dac/Kconfig b/drivers/iio/dac/Kconfig
index 5690a37267d8..4811ea973125 100644
--- a/drivers/iio/dac/Kconfig
+++ b/drivers/iio/dac/Kconfig
@@ -296,6 +296,9 @@ config AD5770R
config AD5791
tristate "Analog Devices AD5760/AD5780/AD5781/AD5790/AD5791 DAC SPI driver"
depends on SPI
+ select SPI_OFFLOAD
+ select IIO_BUFFER
+ select IIO_BUFFER_DMAENGINE
help
Say yes here to build support for Analog Devices AD5760, AD5780,
AD5781, AD5790, AD5791 High Resolution Voltage Output Digital to
diff --git a/drivers/iio/dac/ad3552r-common.c b/drivers/iio/dac/ad3552r-common.c
index 03e0864f5084..b8807e54fa05 100644
--- a/drivers/iio/dac/ad3552r-common.c
+++ b/drivers/iio/dac/ad3552r-common.c
@@ -11,23 +11,21 @@
#include "ad3552r.h"
-const s32 ad3552r_ch_ranges[AD3552R_MAX_RANGES][2] = {
+static const s32 ad3552r_ch_ranges[AD3552R_MAX_RANGES][2] = {
[AD3552R_CH_OUTPUT_RANGE_0__2P5V] = { 0, 2500 },
[AD3552R_CH_OUTPUT_RANGE_0__5V] = { 0, 5000 },
[AD3552R_CH_OUTPUT_RANGE_0__10V] = { 0, 10000 },
[AD3552R_CH_OUTPUT_RANGE_NEG_5__5V] = { -5000, 5000 },
[AD3552R_CH_OUTPUT_RANGE_NEG_10__10V] = { -10000, 10000 }
};
-EXPORT_SYMBOL_NS_GPL(ad3552r_ch_ranges, "IIO_AD3552R");
-const s32 ad3542r_ch_ranges[AD3542R_MAX_RANGES][2] = {
+static const s32 ad3542r_ch_ranges[AD3542R_MAX_RANGES][2] = {
[AD3542R_CH_OUTPUT_RANGE_0__2P5V] = { 0, 2500 },
[AD3542R_CH_OUTPUT_RANGE_0__5V] = { 0, 5000 },
[AD3542R_CH_OUTPUT_RANGE_0__10V] = { 0, 10000 },
[AD3542R_CH_OUTPUT_RANGE_NEG_5__5V] = { -5000, 5000 },
[AD3542R_CH_OUTPUT_RANGE_NEG_2P5__7P5V] = { -2500, 7500 }
};
-EXPORT_SYMBOL_NS_GPL(ad3542r_ch_ranges, "IIO_AD3552R");
/* Gain * AD3552R_GAIN_SCALE */
static const s32 gains_scaling_table[] = {
@@ -37,6 +35,50 @@ static const s32 gains_scaling_table[] = {
[AD3552R_CH_GAIN_SCALING_0_125] = 125
};
+const struct ad3552r_model_data ad3541r_model_data = {
+ .model_name = "ad3541r",
+ .chip_id = AD3541R_ID,
+ .num_hw_channels = 1,
+ .ranges_table = ad3542r_ch_ranges,
+ .num_ranges = ARRAY_SIZE(ad3542r_ch_ranges),
+ .requires_output_range = true,
+ .num_spi_data_lanes = 2,
+};
+EXPORT_SYMBOL_NS_GPL(ad3541r_model_data, "IIO_AD3552R");
+
+const struct ad3552r_model_data ad3542r_model_data = {
+ .model_name = "ad3542r",
+ .chip_id = AD3542R_ID,
+ .num_hw_channels = 2,
+ .ranges_table = ad3542r_ch_ranges,
+ .num_ranges = ARRAY_SIZE(ad3542r_ch_ranges),
+ .requires_output_range = true,
+ .num_spi_data_lanes = 2,
+};
+EXPORT_SYMBOL_NS_GPL(ad3542r_model_data, "IIO_AD3552R");
+
+const struct ad3552r_model_data ad3551r_model_data = {
+ .model_name = "ad3551r",
+ .chip_id = AD3551R_ID,
+ .num_hw_channels = 1,
+ .ranges_table = ad3552r_ch_ranges,
+ .num_ranges = ARRAY_SIZE(ad3552r_ch_ranges),
+ .requires_output_range = false,
+ .num_spi_data_lanes = 4,
+};
+EXPORT_SYMBOL_NS_GPL(ad3551r_model_data, "IIO_AD3552R");
+
+const struct ad3552r_model_data ad3552r_model_data = {
+ .model_name = "ad3552r",
+ .chip_id = AD3552R_ID,
+ .num_hw_channels = 2,
+ .ranges_table = ad3552r_ch_ranges,
+ .num_ranges = ARRAY_SIZE(ad3552r_ch_ranges),
+ .requires_output_range = false,
+ .num_spi_data_lanes = 4,
+};
+EXPORT_SYMBOL_NS_GPL(ad3552r_model_data, "IIO_AD3552R");
+
u16 ad3552r_calc_custom_gain(u8 p, u8 n, s16 goffs)
{
return FIELD_PREP(AD3552R_MASK_CH_RANGE_OVERRIDE, 1) |
diff --git a/drivers/iio/dac/ad3552r-hs.c b/drivers/iio/dac/ad3552r-hs.c
index 8974df625670..cd8dabb60c55 100644
--- a/drivers/iio/dac/ad3552r-hs.c
+++ b/drivers/iio/dac/ad3552r-hs.c
@@ -19,6 +19,31 @@
#include "ad3552r.h"
#include "ad3552r-hs.h"
+/*
+ * Important notes for register map access:
+ * ========================================
+ *
+ * Register address space is divided in 2 regions, primary (config) and
+ * secondary (DAC). Primary region can only be accessed in simple SPI mode,
+ * with exception for ad355x models where setting QSPI pin high allows QSPI
+ * access to both the regions.
+ *
+ * Due to the fact that ad3541/2r do not implement QSPI, for proper device
+ * detection, HDL keeps "QSPI" pin level low at boot (see ad3552r manual, rev B
+ * table 7, pin 31, digital input). For this reason, actually the working mode
+ * between SPI, DSPI and QSPI must be set via software, configuring the target
+ * DAC appropriately, together with the backend API to configure the bus mode
+ * accordingly.
+ *
+ * Also, important to note that none of the three modes allow to read in DDR.
+ *
+ * In non-buffering operations, mode is set to simple SPI SDR for all primary
+ * and secondary region r/w accesses, to avoid to switch the mode each time DAC
+ * register is accessed (raw accesses, r/w), and to be able to dump registers
+ * content (possible as non DDR only).
+ * In buffering mode, driver sets best possible mode, D/QSPI and DDR.
+ */
+
struct ad3552r_hs_state {
const struct ad3552r_model_data *model_data;
struct gpio_desc *reset_gpio;
@@ -27,16 +52,26 @@ struct ad3552r_hs_state {
bool single_channel;
struct ad3552r_ch_data ch_data[AD3552R_MAX_CH];
struct ad3552r_hs_platform_data *data;
+ /* INTERFACE_CONFIG_D register cache, in DDR we cannot read values. */
+ u32 config_d;
};
-static int ad3552r_qspi_update_reg_bits(struct ad3552r_hs_state *st,
- u32 reg, u32 mask, u32 val,
- size_t xfer_size)
+static int ad3552r_hs_reg_read(struct ad3552r_hs_state *st, u32 reg, u32 *val,
+ size_t xfer_size)
+{
+ /* No chip in the family supports DDR read. Informing of this. */
+ WARN_ON_ONCE(st->config_d & AD3552R_MASK_SPI_CONFIG_DDR);
+
+ return st->data->bus_reg_read(st->back, reg, val, xfer_size);
+}
+
+static int ad3552r_hs_update_reg_bits(struct ad3552r_hs_state *st, u32 reg,
+ u32 mask, u32 val, size_t xfer_size)
{
u32 rval;
int ret;
- ret = st->data->bus_reg_read(st->back, reg, &rval, xfer_size);
+ ret = ad3552r_hs_reg_read(st, reg, &rval, xfer_size);
if (ret)
return ret;
@@ -56,16 +91,20 @@ static int ad3552r_hs_read_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_SAMP_FREQ:
/*
- * Using 4 lanes (QSPI), then using 2 as DDR mode is
- * considered always on (considering buffering mode always).
+ * Using a "num_spi_data_lanes" variable since ad3541/2 have
+ * only DSPI interface, while ad355x is QSPI. Then using 2 as
+ * DDR mode is considered always on (considering buffering
+ * mode always).
*/
*val = DIV_ROUND_CLOSEST(st->data->bus_sample_data_clock_hz *
- 4 * 2, chan->scan_type.realbits);
+ st->model_data->num_spi_data_lanes * 2,
+ chan->scan_type.realbits);
return IIO_VAL_INT;
case IIO_CHAN_INFO_RAW:
- ret = st->data->bus_reg_read(st->back,
+ /* For RAW accesses, stay always in simple-spi. */
+ ret = ad3552r_hs_reg_read(st,
AD3552R_REG_ADDR_CH_DAC_16B(chan->channel),
val, 2);
if (ret)
@@ -90,20 +129,60 @@ static int ad3552r_hs_write_raw(struct iio_dev *indio_dev,
int val, int val2, long mask)
{
struct ad3552r_hs_state *st = iio_priv(indio_dev);
+ int ret;
switch (mask) {
case IIO_CHAN_INFO_RAW:
- iio_device_claim_direct_scoped(return -EBUSY, indio_dev) {
- return st->data->bus_reg_write(st->back,
- AD3552R_REG_ADDR_CH_DAC_16B(chan->channel),
- val, 2);
- }
- unreachable();
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+ /* For RAW accesses, stay always in simple-spi. */
+ ret = st->data->bus_reg_write(st->back,
+ AD3552R_REG_ADDR_CH_DAC_16B(chan->channel),
+ val, 2);
+
+ iio_device_release_direct(indio_dev);
+ return ret;
default:
return -EINVAL;
}
}
+static int ad3552r_hs_set_bus_io_mode_hs(struct ad3552r_hs_state *st)
+{
+ int bus_mode;
+
+ if (st->model_data->num_spi_data_lanes == 4)
+ bus_mode = AD3552R_IO_MODE_QSPI;
+ else
+ bus_mode = AD3552R_IO_MODE_DSPI;
+
+ return st->data->bus_set_io_mode(st->back, bus_mode);
+}
+
+static int ad3552r_hs_set_target_io_mode_hs(struct ad3552r_hs_state *st)
+{
+ u32 mode_target;
+
+ /*
+ * Best access for secondary reg area, QSPI where possible,
+ * else as DSPI.
+ */
+ if (st->model_data->num_spi_data_lanes == 4)
+ mode_target = AD3552R_QUAD_SPI;
+ else
+ mode_target = AD3552R_DUAL_SPI;
+
+ /*
+ * Better to not use update here, since generally it is already
+ * set as DDR mode, and it's not possible to read in DDR mode.
+ */
+ return st->data->bus_reg_write(st->back,
+ AD3552R_REG_ADDR_TRANSFER_REGISTER,
+ FIELD_PREP(AD3552R_MASK_MULTI_IO_MODE,
+ mode_target) |
+ AD3552R_MASK_STREAM_LENGTH_KEEP_VALUE, 1);
+}
+
static int ad3552r_hs_buffer_postenable(struct iio_dev *indio_dev)
{
struct ad3552r_hs_state *st = iio_priv(indio_dev);
@@ -132,48 +211,111 @@ static int ad3552r_hs_buffer_postenable(struct iio_dev *indio_dev)
return -EINVAL;
}
- ret = st->data->bus_reg_write(st->back, AD3552R_REG_ADDR_STREAM_MODE,
- loop_len, 1);
+ /*
+ * With ad3541/2r support, QSPI pin is held low at reset from HDL,
+ * streaming start sequence must respect strictly the order below.
+ */
+
+ /* Primary region access, set streaming mode (now in SPI + SDR). */
+ ret = ad3552r_hs_update_reg_bits(st,
+ AD3552R_REG_ADDR_INTERFACE_CONFIG_B,
+ AD3552R_MASK_SINGLE_INST, 0, 1);
if (ret)
return ret;
- /* Inform DAC chip to switch into DDR mode */
- ret = ad3552r_qspi_update_reg_bits(st,
- AD3552R_REG_ADDR_INTERFACE_CONFIG_D,
- AD3552R_MASK_SPI_CONFIG_DDR,
- AD3552R_MASK_SPI_CONFIG_DDR, 1);
+ /*
+ * Set target loop len, keeping the value: streaming writes at address
+ * 0x2c or 0x2a, in descending loop (2 or 4 bytes), keeping loop len
+ * value so that it's not cleared hereafter when _CS is deasserted.
+ */
+ ret = ad3552r_hs_update_reg_bits(st, AD3552R_REG_ADDR_TRANSFER_REGISTER,
+ AD3552R_MASK_STREAM_LENGTH_KEEP_VALUE,
+ AD3552R_MASK_STREAM_LENGTH_KEEP_VALUE,
+ 1);
if (ret)
- return ret;
+ goto exit_err_streaming;
+
+ ret = st->data->bus_reg_write(st->back,
+ AD3552R_REG_ADDR_STREAM_MODE,
+ loop_len, 1);
+ if (ret)
+ goto exit_err_streaming;
+
+ st->config_d |= AD3552R_MASK_SPI_CONFIG_DDR;
+ ret = st->data->bus_reg_write(st->back,
+ AD3552R_REG_ADDR_INTERFACE_CONFIG_D,
+ st->config_d, 1);
+ if (ret)
+ goto exit_err_streaming;
- /* Inform DAC IP to go for DDR mode from now on */
ret = iio_backend_ddr_enable(st->back);
- if (ret) {
- dev_err(st->dev, "could not set DDR mode, not streaming");
- goto exit_err;
- }
+ if (ret)
+ goto exit_err_ddr_mode_target;
+
+ /*
+ * From here onward mode is DDR, so reading any register is not possible
+ * anymore, including calling "ad3552r_hs_update_reg_bits" function.
+ */
+ /* Set target to best high speed mode (D or QSPI). */
+ ret = ad3552r_hs_set_target_io_mode_hs(st);
+ if (ret)
+ goto exit_err_ddr_mode;
+
+ /* Set bus to best high speed mode (D or QSPI). */
+ ret = ad3552r_hs_set_bus_io_mode_hs(st);
+ if (ret)
+ goto exit_err_bus_mode_target;
+
+ /*
+ * Backend setup must be done now only, or related register values will
+ * be disrupted by previous bus accesses.
+ */
ret = iio_backend_data_transfer_addr(st->back, val);
if (ret)
- goto exit_err;
+ goto exit_err_bus_mode_target;
ret = iio_backend_data_format_set(st->back, 0, &fmt);
if (ret)
- goto exit_err;
+ goto exit_err_bus_mode_target;
ret = iio_backend_data_stream_enable(st->back);
if (ret)
- goto exit_err;
+ goto exit_err_bus_mode_target;
return 0;
-exit_err:
- ad3552r_qspi_update_reg_bits(st,
- AD3552R_REG_ADDR_INTERFACE_CONFIG_D,
- AD3552R_MASK_SPI_CONFIG_DDR,
- 0, 1);
+exit_err_bus_mode_target:
+ /* Back to simple SPI, not using update to avoid read. */
+ st->data->bus_reg_write(st->back, AD3552R_REG_ADDR_TRANSFER_REGISTER,
+ FIELD_PREP(AD3552R_MASK_MULTI_IO_MODE,
+ AD3552R_SPI) |
+ AD3552R_MASK_STREAM_LENGTH_KEEP_VALUE, 1);
+
+ /*
+ * Back bus to simple SPI, this must be executed together with above
+ * target mode unwind, and can be done only after it.
+ */
+ st->data->bus_set_io_mode(st->back, AD3552R_IO_MODE_SPI);
+exit_err_ddr_mode:
iio_backend_ddr_disable(st->back);
+exit_err_ddr_mode_target:
+ /*
+ * Back to SDR. In DDR we cannot read, whatever the mode is, so not
+ * using update.
+ */
+ st->config_d &= ~AD3552R_MASK_SPI_CONFIG_DDR;
+ st->data->bus_reg_write(st->back, AD3552R_REG_ADDR_INTERFACE_CONFIG_D,
+ st->config_d, 1);
+
+exit_err_streaming:
+ /* Back to single instruction mode, disabling loop. */
+ st->data->bus_reg_write(st->back, AD3552R_REG_ADDR_INTERFACE_CONFIG_B,
+ AD3552R_MASK_SINGLE_INST |
+ AD3552R_MASK_SHORT_INSTRUCTION, 1);
+
return ret;
}
@@ -186,11 +328,22 @@ static int ad3552r_hs_buffer_predisable(struct iio_dev *indio_dev)
if (ret)
return ret;
- /* Inform DAC to set in SDR mode */
- ret = ad3552r_qspi_update_reg_bits(st,
- AD3552R_REG_ADDR_INTERFACE_CONFIG_D,
- AD3552R_MASK_SPI_CONFIG_DDR,
- 0, 1);
+ /*
+ * Set us to simple SPI, even if still in ddr, so to be able to write
+ * in primary region.
+ */
+ ret = st->data->bus_set_io_mode(st->back, AD3552R_IO_MODE_SPI);
+ if (ret)
+ return ret;
+
+ /*
+ * Back to SDR (in DDR we cannot read, whatever the mode is, so not
+ * using update).
+ */
+ st->config_d &= ~AD3552R_MASK_SPI_CONFIG_DDR;
+ ret = st->data->bus_reg_write(st->back,
+ AD3552R_REG_ADDR_INTERFACE_CONFIG_D,
+ st->config_d, 1);
if (ret)
return ret;
@@ -198,6 +351,24 @@ static int ad3552r_hs_buffer_predisable(struct iio_dev *indio_dev)
if (ret)
return ret;
+ /*
+ * Back to simple SPI for secondary region too now, so to be able to
+ * dump/read registers there too if needed.
+ */
+ ret = ad3552r_hs_update_reg_bits(st, AD3552R_REG_ADDR_TRANSFER_REGISTER,
+ AD3552R_MASK_MULTI_IO_MODE,
+ AD3552R_SPI, 1);
+ if (ret)
+ return ret;
+
+ /* Back to single instruction mode, disabling loop. */
+ ret = ad3552r_hs_update_reg_bits(st,
+ AD3552R_REG_ADDR_INTERFACE_CONFIG_B,
+ AD3552R_MASK_SINGLE_INST,
+ AD3552R_MASK_SINGLE_INST, 1);
+ if (ret)
+ return ret;
+
return 0;
}
@@ -211,10 +382,10 @@ static inline int ad3552r_hs_set_output_range(struct ad3552r_hs_state *st,
else
val = FIELD_PREP(AD3552R_MASK_CH1_RANGE, mode);
- return ad3552r_qspi_update_reg_bits(st,
- AD3552R_REG_ADDR_CH0_CH1_OUTPUT_RANGE,
- AD3552R_MASK_CH_OUTPUT_RANGE_SEL(ch),
- val, 1);
+ return ad3552r_hs_update_reg_bits(st,
+ AD3552R_REG_ADDR_CH0_CH1_OUTPUT_RANGE,
+ AD3552R_MASK_CH_OUTPUT_RANGE_SEL(ch),
+ val, 1);
}
static int ad3552r_hs_reset(struct ad3552r_hs_state *st)
@@ -230,10 +401,10 @@ static int ad3552r_hs_reset(struct ad3552r_hs_state *st)
fsleep(10);
gpiod_set_value_cansleep(st->reset_gpio, 0);
} else {
- ret = ad3552r_qspi_update_reg_bits(st,
- AD3552R_REG_ADDR_INTERFACE_CONFIG_A,
- AD3552R_MASK_SOFTWARE_RESET,
- AD3552R_MASK_SOFTWARE_RESET, 1);
+ ret = ad3552r_hs_update_reg_bits(st,
+ AD3552R_REG_ADDR_INTERFACE_CONFIG_A,
+ AD3552R_MASK_SOFTWARE_RESET,
+ AD3552R_MASK_SOFTWARE_RESET, 1);
if (ret)
return ret;
}
@@ -304,30 +475,49 @@ static int ad3552r_hs_setup(struct ad3552r_hs_state *st)
if (ret)
return ret;
+ /* HDL starts with DDR enabled, disabling it. */
ret = iio_backend_ddr_disable(st->back);
if (ret)
return ret;
+ ret = st->data->bus_reg_write(st->back,
+ AD3552R_REG_ADDR_INTERFACE_CONFIG_B,
+ AD3552R_MASK_SINGLE_INST |
+ AD3552R_MASK_SHORT_INSTRUCTION, 1);
+ if (ret)
+ return ret;
+
ret = ad3552r_hs_scratch_pad_test(st);
if (ret)
return ret;
- ret = st->data->bus_reg_read(st->back, AD3552R_REG_ADDR_PRODUCT_ID_L,
- &val, 1);
+ /*
+ * Caching config_d, needed to restore it after streaming,
+ * and also, to detect possible DDR read, that's not allowed.
+ */
+ ret = st->data->bus_reg_read(st->back,
+ AD3552R_REG_ADDR_INTERFACE_CONFIG_D,
+ &st->config_d, 1);
+ if (ret)
+ return ret;
+
+ ret = ad3552r_hs_reg_read(st, AD3552R_REG_ADDR_PRODUCT_ID_L, &val, 1);
if (ret)
return ret;
id = val;
- ret = st->data->bus_reg_read(st->back, AD3552R_REG_ADDR_PRODUCT_ID_H,
- &val, 1);
+ ret = ad3552r_hs_reg_read(st, AD3552R_REG_ADDR_PRODUCT_ID_H, &val, 1);
if (ret)
return ret;
id |= val << 8;
if (id != st->model_data->chip_id)
- dev_info(st->dev, "Chip ID error. Expected 0x%x, Read 0x%x\n",
- AD3552R_ID, id);
+ dev_warn(st->dev,
+ "chip ID mismatch, detected 0x%x but expected 0x%x\n",
+ id, st->model_data->chip_id);
+
+ dev_dbg(st->dev, "chip id %s detected", st->model_data->model_name);
/* Clear reset error flag, see ad3552r manual, rev B table 38. */
ret = st->data->bus_reg_write(st->back, AD3552R_REG_ADDR_ERR_STATUS,
@@ -341,14 +531,6 @@ static int ad3552r_hs_setup(struct ad3552r_hs_state *st)
if (ret)
return ret;
- ret = st->data->bus_reg_write(st->back,
- AD3552R_REG_ADDR_TRANSFER_REGISTER,
- FIELD_PREP(AD3552R_MASK_MULTI_IO_MODE,
- AD3552R_QUAD_SPI) |
- AD3552R_MASK_STREAM_LENGTH_KEEP_VALUE, 1);
- if (ret)
- return ret;
-
ret = iio_backend_data_source_set(st->back, 0, IIO_BACKEND_EXTERNAL);
if (ret)
return ret;
@@ -363,19 +545,21 @@ static int ad3552r_hs_setup(struct ad3552r_hs_state *st)
val = ret;
- ret = ad3552r_qspi_update_reg_bits(st,
- AD3552R_REG_ADDR_SH_REFERENCE_CONFIG,
- AD3552R_MASK_REFERENCE_VOLTAGE_SEL,
- val, 1);
+ ret = ad3552r_hs_update_reg_bits(st,
+ AD3552R_REG_ADDR_SH_REFERENCE_CONFIG,
+ AD3552R_MASK_REFERENCE_VOLTAGE_SEL,
+ val, 1);
if (ret)
return ret;
ret = ad3552r_get_drive_strength(st->dev, &val);
if (!ret) {
- ret = ad3552r_qspi_update_reg_bits(st,
+ st->config_d |=
+ FIELD_PREP(AD3552R_MASK_SDO_DRIVE_STRENGTH, val);
+
+ ret = st->data->bus_reg_write(st->back,
AD3552R_REG_ADDR_INTERFACE_CONFIG_D,
- AD3552R_MASK_SDO_DRIVE_STRENGTH,
- val, 1);
+ st->config_d, 1);
if (ret)
return ret;
}
@@ -504,15 +688,10 @@ static int ad3552r_hs_probe(struct platform_device *pdev)
return devm_iio_device_register(&pdev->dev, indio_dev);
}
-static const struct ad3552r_model_data ad3552r_model_data = {
- .model_name = "ad3552r",
- .chip_id = AD3552R_ID,
- .num_hw_channels = 2,
- .ranges_table = ad3552r_ch_ranges,
- .num_ranges = ARRAY_SIZE(ad3552r_ch_ranges),
-};
-
static const struct of_device_id ad3552r_hs_of_id[] = {
+ { .compatible = "adi,ad3541r", .data = &ad3541r_model_data },
+ { .compatible = "adi,ad3542r", .data = &ad3542r_model_data },
+ { .compatible = "adi,ad3551r", .data = &ad3551r_model_data },
{ .compatible = "adi,ad3552r", .data = &ad3552r_model_data },
{ }
};
diff --git a/drivers/iio/dac/ad3552r-hs.h b/drivers/iio/dac/ad3552r-hs.h
index 724261d38dea..4a9e35234124 100644
--- a/drivers/iio/dac/ad3552r-hs.h
+++ b/drivers/iio/dac/ad3552r-hs.h
@@ -8,11 +8,19 @@
struct iio_backend;
+enum ad3552r_io_mode {
+ AD3552R_IO_MODE_SPI,
+ AD3552R_IO_MODE_DSPI,
+ AD3552R_IO_MODE_QSPI,
+};
+
struct ad3552r_hs_platform_data {
int (*bus_reg_read)(struct iio_backend *back, u32 reg, u32 *val,
size_t data_size);
int (*bus_reg_write)(struct iio_backend *back, u32 reg, u32 val,
size_t data_size);
+ int (*bus_set_io_mode)(struct iio_backend *back,
+ enum ad3552r_io_mode mode);
u32 bus_sample_data_clock_hz;
};
diff --git a/drivers/iio/dac/ad3552r.c b/drivers/iio/dac/ad3552r.c
index 7944f5c1d264..a44b163f3183 100644
--- a/drivers/iio/dac/ad3552r.c
+++ b/drivers/iio/dac/ad3552r.c
@@ -655,42 +655,6 @@ static int ad3552r_probe(struct spi_device *spi)
return devm_iio_device_register(&spi->dev, indio_dev);
}
-static const struct ad3552r_model_data ad3541r_model_data = {
- .model_name = "ad3541r",
- .chip_id = AD3541R_ID,
- .num_hw_channels = 1,
- .ranges_table = ad3542r_ch_ranges,
- .num_ranges = ARRAY_SIZE(ad3542r_ch_ranges),
- .requires_output_range = true,
-};
-
-static const struct ad3552r_model_data ad3542r_model_data = {
- .model_name = "ad3542r",
- .chip_id = AD3542R_ID,
- .num_hw_channels = 2,
- .ranges_table = ad3542r_ch_ranges,
- .num_ranges = ARRAY_SIZE(ad3542r_ch_ranges),
- .requires_output_range = true,
-};
-
-static const struct ad3552r_model_data ad3551r_model_data = {
- .model_name = "ad3551r",
- .chip_id = AD3551R_ID,
- .num_hw_channels = 1,
- .ranges_table = ad3552r_ch_ranges,
- .num_ranges = ARRAY_SIZE(ad3552r_ch_ranges),
- .requires_output_range = false,
-};
-
-static const struct ad3552r_model_data ad3552r_model_data = {
- .model_name = "ad3552r",
- .chip_id = AD3552R_ID,
- .num_hw_channels = 2,
- .ranges_table = ad3552r_ch_ranges,
- .num_ranges = ARRAY_SIZE(ad3552r_ch_ranges),
- .requires_output_range = false,
-};
-
static const struct spi_device_id ad3552r_id[] = {
{
.name = "ad3541r",
diff --git a/drivers/iio/dac/ad3552r.h b/drivers/iio/dac/ad3552r.h
index 4b5581039ae9..768fa264d39e 100644
--- a/drivers/iio/dac/ad3552r.h
+++ b/drivers/iio/dac/ad3552r.h
@@ -132,10 +132,14 @@
#define AD3552R_MAX_RANGES 5
#define AD3542R_MAX_RANGES 5
+#define AD3552R_SPI 0
+#define AD3552R_DUAL_SPI 1
#define AD3552R_QUAD_SPI 2
-extern const s32 ad3552r_ch_ranges[AD3552R_MAX_RANGES][2];
-extern const s32 ad3542r_ch_ranges[AD3542R_MAX_RANGES][2];
+extern const struct ad3552r_model_data ad3541r_model_data;
+extern const struct ad3552r_model_data ad3542r_model_data;
+extern const struct ad3552r_model_data ad3551r_model_data;
+extern const struct ad3552r_model_data ad3552r_model_data;
enum ad3552r_id {
AD3541R_ID = 0x400b,
@@ -151,6 +155,7 @@ struct ad3552r_model_data {
const s32 (*ranges_table)[2];
int num_ranges;
bool requires_output_range;
+ int num_spi_data_lanes;
};
struct ad3552r_ch_data {
diff --git a/drivers/iio/dac/ad5791.c b/drivers/iio/dac/ad5791.c
index 57374f78f6b8..07848be3f8d5 100644
--- a/drivers/iio/dac/ad5791.c
+++ b/drivers/iio/dac/ad5791.c
@@ -6,21 +6,24 @@
* Copyright 2011 Analog Devices Inc.
*/
-#include <linux/interrupt.h>
-#include <linux/fs.h>
-#include <linux/device.h>
+#include <linux/bitops.h>
#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/interrupt.h>
#include <linux/kernel.h>
-#include <linux/spi/spi.h>
+#include <linux/module.h>
+#include <linux/regulator/consumer.h>
#include <linux/slab.h>
+#include <linux/spi/offload/consumer.h>
+#include <linux/spi/spi.h>
#include <linux/sysfs.h>
-#include <linux/regulator/consumer.h>
-#include <linux/module.h>
-#include <linux/bitops.h>
+#include <linux/units.h>
+#include <linux/iio/buffer-dmaengine.h>
+#include <linux/iio/dac/ad5791.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
-#include <linux/iio/dac/ad5791.h>
#define AD5791_DAC_MASK GENMASK(19, 0)
@@ -64,11 +67,13 @@
* struct ad5791_chip_info - chip specific information
* @name: name of the dac chip
* @channel: channel specification
+ * @channel_offload: channel specification for offload
* @get_lin_comp: function pointer to the device specific function
*/
struct ad5791_chip_info {
const char *name;
const struct iio_chan_spec channel;
+ const struct iio_chan_spec channel_offload;
int (*get_lin_comp)(unsigned int span);
};
@@ -81,6 +86,11 @@ struct ad5791_chip_info {
* @gpio_clear: clear gpio
* @gpio_ldac: load dac gpio
* @chip_info: chip model specific constants
+ * @offload_msg: spi message used for offload
+ * @offload_xfer: spi transfer used for offload
+ * @offload: offload device
+ * @offload_trigger: offload trigger
+ * @offload_trigger_hz: offload sample rate
* @vref_mv: actual reference voltage used
* @vref_neg_mv: voltage of the negative supply
* @ctrl: control register cache
@@ -96,6 +106,11 @@ struct ad5791_state {
struct gpio_desc *gpio_clear;
struct gpio_desc *gpio_ldac;
const struct ad5791_chip_info *chip_info;
+ struct spi_message offload_msg;
+ struct spi_transfer offload_xfer;
+ struct spi_offload *offload;
+ struct spi_offload_trigger *offload_trigger;
+ unsigned int offload_trigger_hz;
unsigned short vref_mv;
unsigned int vref_neg_mv;
unsigned ctrl;
@@ -232,6 +247,25 @@ static int ad5780_get_lin_comp(unsigned int span)
return AD5780_LINCOMP_10_20;
}
+static int ad5791_set_sample_freq(struct ad5791_state *st, int val)
+{
+ struct spi_offload_trigger_config config = {
+ .type = SPI_OFFLOAD_TRIGGER_PERIODIC,
+ .periodic = {
+ .frequency_hz = val,
+ },
+ };
+ int ret;
+
+ ret = spi_offload_trigger_validate(st->offload_trigger, &config);
+ if (ret)
+ return ret;
+
+ st->offload_trigger_hz = config.periodic.frequency_hz;
+
+ return 0;
+}
+
static int ad5791_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int *val,
@@ -259,6 +293,9 @@ static int ad5791_read_raw(struct iio_dev *indio_dev,
do_div(val64, st->vref_mv);
*val = -val64;
return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ *val = st->offload_trigger_hz;
+ return IIO_VAL_INT;
default:
return -EINVAL;
}
@@ -294,7 +331,25 @@ static const struct ad5791_chip_info _name##_chip_info = { \
.scan_type = { \
.sign = 'u', \
.realbits = (bits), \
- .storagebits = 24, \
+ .storagebits = 32, \
+ .shift = (_shift), \
+ }, \
+ .ext_info = ad5791_ext_info, \
+ }, \
+ .channel_offload = { \
+ .type = IIO_VOLTAGE, \
+ .output = 1, \
+ .indexed = 1, \
+ .address = AD5791_ADDR_DAC0, \
+ .channel = 0, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_OFFSET), \
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),\
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = (bits), \
+ .storagebits = 32, \
.shift = (_shift), \
}, \
.ext_info = ad5791_ext_info, \
@@ -322,14 +377,106 @@ static int ad5791_write_raw(struct iio_dev *indio_dev,
return ad5791_spi_write(st, chan->address, val);
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ if (val < 1)
+ return -EINVAL;
+ return ad5791_set_sample_freq(st, val);
default:
return -EINVAL;
}
}
+static int ad5791_write_raw_get_fmt(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ long mask)
+{
+ switch (mask) {
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ return IIO_VAL_INT;
+ default:
+ return IIO_VAL_INT_PLUS_MICRO;
+ }
+}
+
+static int ad5791_buffer_preenable(struct iio_dev *indio_dev)
+{
+ struct ad5791_state *st = iio_priv(indio_dev);
+ struct spi_offload_trigger_config config = {
+ .type = SPI_OFFLOAD_TRIGGER_PERIODIC,
+ .periodic = {
+ .frequency_hz = st->offload_trigger_hz,
+ },
+ };
+
+ if (st->pwr_down)
+ return -EINVAL;
+
+ return spi_offload_trigger_enable(st->offload, st->offload_trigger,
+ &config);
+}
+
+static int ad5791_buffer_postdisable(struct iio_dev *indio_dev)
+{
+ struct ad5791_state *st = iio_priv(indio_dev);
+
+ spi_offload_trigger_disable(st->offload, st->offload_trigger);
+
+ return 0;
+}
+
+static const struct iio_buffer_setup_ops ad5791_buffer_setup_ops = {
+ .preenable = &ad5791_buffer_preenable,
+ .postdisable = &ad5791_buffer_postdisable,
+};
+
+static int ad5791_offload_setup(struct iio_dev *indio_dev)
+{
+ struct ad5791_state *st = iio_priv(indio_dev);
+ struct spi_device *spi = st->spi;
+ struct dma_chan *tx_dma;
+ int ret;
+
+ st->offload_trigger = devm_spi_offload_trigger_get(&spi->dev,
+ st->offload, SPI_OFFLOAD_TRIGGER_PERIODIC);
+ if (IS_ERR(st->offload_trigger))
+ return dev_err_probe(&spi->dev, PTR_ERR(st->offload_trigger),
+ "failed to get offload trigger\n");
+
+ ret = ad5791_set_sample_freq(st, 1 * MEGA);
+ if (ret)
+ return dev_err_probe(&spi->dev, ret,
+ "failed to init sample rate\n");
+
+ tx_dma = devm_spi_offload_tx_stream_request_dma_chan(&spi->dev,
+ st->offload);
+ if (IS_ERR(tx_dma))
+ return dev_err_probe(&spi->dev, PTR_ERR(tx_dma),
+ "failed to get offload TX DMA\n");
+
+ ret = devm_iio_dmaengine_buffer_setup_with_handle(&spi->dev,
+ indio_dev, tx_dma, IIO_BUFFER_DIRECTION_OUT);
+ if (ret)
+ return ret;
+
+ st->offload_xfer.len = 4;
+ st->offload_xfer.bits_per_word = 24;
+ st->offload_xfer.offload_flags = SPI_OFFLOAD_XFER_TX_STREAM;
+
+ spi_message_init_with_transfers(&st->offload_msg, &st->offload_xfer, 1);
+ st->offload_msg.offload = st->offload;
+
+ return devm_spi_optimize_message(&spi->dev, st->spi, &st->offload_msg);
+}
+
static const struct iio_info ad5791_info = {
.read_raw = &ad5791_read_raw,
.write_raw = &ad5791_write_raw,
+ .write_raw_get_fmt = &ad5791_write_raw_get_fmt,
+};
+
+static const struct spi_offload_config ad5791_offload_config = {
+ .capability_flags = SPI_OFFLOAD_CAP_TRIGGER |
+ SPI_OFFLOAD_CAP_TX_STREAM_DMA,
};
static int ad5791_probe(struct spi_device *spi)
@@ -416,6 +563,21 @@ static int ad5791_probe(struct spi_device *spi)
indio_dev->channels = &st->chip_info->channel;
indio_dev->num_channels = 1;
indio_dev->name = st->chip_info->name;
+
+ st->offload = devm_spi_offload_get(&spi->dev, spi, &ad5791_offload_config);
+ ret = PTR_ERR_OR_ZERO(st->offload);
+ if (ret && ret != -ENODEV)
+ return dev_err_probe(&spi->dev, ret, "failed to get offload\n");
+
+ if (ret != -ENODEV) {
+ indio_dev->channels = &st->chip_info->channel_offload;
+ indio_dev->setup_ops = &ad5791_buffer_setup_ops;
+ ret = ad5791_offload_setup(indio_dev);
+ if (ret)
+ return dev_err_probe(&spi->dev, ret,
+ "fail to setup offload\n");
+ }
+
return devm_iio_device_register(&spi->dev, indio_dev);
}
@@ -452,3 +614,4 @@ module_spi_driver(ad5791_driver);
MODULE_AUTHOR("Michael Hennerich <michael.hennerich@analog.com>");
MODULE_DESCRIPTION("Analog Devices AD5760/AD5780/AD5781/AD5790/AD5791 DAC");
MODULE_LICENSE("GPL v2");
+MODULE_IMPORT_NS("IIO_DMAENGINE_BUFFER");
diff --git a/drivers/iio/dac/ad8460.c b/drivers/iio/dac/ad8460.c
index 535ee3105af6..6e45686902dd 100644
--- a/drivers/iio/dac/ad8460.c
+++ b/drivers/iio/dac/ad8460.c
@@ -264,9 +264,12 @@ static ssize_t ad8460_write_toggle_en(struct iio_dev *indio_dev, uintptr_t priva
if (ret)
return ret;
- iio_device_claim_direct_scoped(return -EBUSY, indio_dev)
- return ad8460_enable_apg_mode(state, toggle_en);
- unreachable();
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+
+ ret = ad8460_enable_apg_mode(state, toggle_en);
+ iio_device_release_direct(indio_dev);
+ return ret;
}
static ssize_t ad8460_read_powerdown(struct iio_dev *indio_dev, uintptr_t private,
@@ -421,14 +424,17 @@ static int ad8460_write_raw(struct iio_dev *indio_dev,
long mask)
{
struct ad8460_state *state = iio_priv(indio_dev);
+ int ret;
switch (mask) {
case IIO_CHAN_INFO_RAW:
switch (chan->type) {
case IIO_VOLTAGE:
- iio_device_claim_direct_scoped(return -EBUSY, indio_dev)
- return ad8460_set_sample(state, val);
- unreachable();
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+ ret = ad8460_set_sample(state, val);
+ iio_device_release_direct(indio_dev);
+ return ret;
case IIO_CURRENT:
return regmap_write(state->regmap, AD8460_CTRL_REG(0x04),
FIELD_PREP(AD8460_QUIESCENT_CURRENT_MSK, val));
diff --git a/drivers/iio/dac/adi-axi-dac.c b/drivers/iio/dac/adi-axi-dac.c
index b143f7ed6847..892d770aec69 100644
--- a/drivers/iio/dac/adi-axi-dac.c
+++ b/drivers/iio/dac/adi-axi-dac.c
@@ -64,7 +64,7 @@
#define AXI_DAC_UI_STATUS_IF_BUSY BIT(4)
#define AXI_DAC_CUSTOM_CTRL_REG 0x008C
#define AXI_DAC_CUSTOM_CTRL_ADDRESS GENMASK(31, 24)
-#define AXI_DAC_CUSTOM_CTRL_SYNCED_TRANSFER BIT(2)
+#define AXI_DAC_CUSTOM_CTRL_MULTI_IO_MODE GENMASK(3, 2)
#define AXI_DAC_CUSTOM_CTRL_STREAM BIT(1)
#define AXI_DAC_CUSTOM_CTRL_TRANSFER_DATA BIT(0)
@@ -168,7 +168,7 @@ static struct iio_buffer *axi_dac_request_buffer(struct iio_backend *back,
static void axi_dac_free_buffer(struct iio_backend *back,
struct iio_buffer *buffer)
{
- iio_dmaengine_buffer_free(buffer);
+ iio_dmaengine_buffer_teardown(buffer);
}
enum {
@@ -585,6 +585,14 @@ static int axi_dac_ddr_disable(struct iio_backend *back)
static int axi_dac_data_stream_enable(struct iio_backend *back)
{
struct axi_dac_state *st = iio_backend_get_priv(back);
+ int ret, val;
+
+ ret = regmap_read_poll_timeout(st->regmap,
+ AXI_DAC_UI_STATUS_REG, val,
+ FIELD_GET(AXI_DAC_UI_STATUS_IF_BUSY, val) == 0,
+ 10, 100 * KILO);
+ if (ret)
+ return ret;
return regmap_set_bits(st->regmap, AXI_DAC_CUSTOM_CTRL_REG,
AXI_DAC_CUSTOM_CTRL_STREAM_ENABLE);
@@ -714,6 +722,28 @@ static int axi_dac_bus_reg_read(struct iio_backend *back, u32 reg, u32 *val,
return regmap_read(st->regmap, AXI_DAC_CUSTOM_RD_REG, val);
}
+static int axi_dac_bus_set_io_mode(struct iio_backend *back,
+ enum ad3552r_io_mode mode)
+{
+ struct axi_dac_state *st = iio_backend_get_priv(back);
+ int ival, ret;
+
+ if (mode > AD3552R_IO_MODE_QSPI)
+ return -EINVAL;
+
+ guard(mutex)(&st->lock);
+
+ ret = regmap_update_bits(st->regmap, AXI_DAC_CUSTOM_CTRL_REG,
+ AXI_DAC_CUSTOM_CTRL_MULTI_IO_MODE,
+ FIELD_PREP(AXI_DAC_CUSTOM_CTRL_MULTI_IO_MODE, mode));
+ if (ret)
+ return ret;
+
+ return regmap_read_poll_timeout(st->regmap, AXI_DAC_UI_STATUS_REG, ival,
+ FIELD_GET(AXI_DAC_UI_STATUS_IF_BUSY, ival) == 0, 10,
+ 100 * KILO);
+}
+
static void axi_dac_child_remove(void *data)
{
platform_device_unregister(data);
@@ -725,6 +755,7 @@ static int axi_dac_create_platform_device(struct axi_dac_state *st,
struct ad3552r_hs_platform_data pdata = {
.bus_reg_read = axi_dac_bus_reg_read,
.bus_reg_write = axi_dac_bus_reg_write,
+ .bus_set_io_mode = axi_dac_bus_set_io_mode,
.bus_sample_data_clock_hz = st->dac_clk_rate,
};
struct platform_device_info pi = {
diff --git a/drivers/iio/dummy/iio_simple_dummy.c b/drivers/iio/dummy/iio_simple_dummy.c
index 09efacaf8f78..8575d4a08963 100644
--- a/drivers/iio/dummy/iio_simple_dummy.c
+++ b/drivers/iio/dummy/iio_simple_dummy.c
@@ -267,6 +267,65 @@ static const struct iio_chan_spec iio_dummy_channels[] = {
},
};
+static int __iio_dummy_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val)
+{
+ struct iio_dummy_state *st = iio_priv(indio_dev);
+
+ guard(mutex)(&st->lock);
+ switch (chan->type) {
+ case IIO_VOLTAGE:
+ if (chan->output) {
+ /* Set integer part to cached value */
+ *val = st->dac_val;
+ return IIO_VAL_INT;
+ } else if (chan->differential) {
+ if (chan->channel == 1)
+ *val = st->differential_adc_val[0];
+ else
+ *val = st->differential_adc_val[1];
+ return IIO_VAL_INT;
+ } else {
+ *val = st->single_ended_adc_val;
+ return IIO_VAL_INT;
+ }
+
+ case IIO_ACCEL:
+ *val = st->accel_val;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int __iio_dummy_read_processed(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val)
+{
+ struct iio_dummy_state *st = iio_priv(indio_dev);
+
+ guard(mutex)(&st->lock);
+ switch (chan->type) {
+ case IIO_STEPS:
+ *val = st->steps;
+ return IIO_VAL_INT;
+ case IIO_ACTIVITY:
+ switch (chan->channel2) {
+ case IIO_MOD_RUNNING:
+ *val = st->activity_running;
+ return IIO_VAL_INT;
+ case IIO_MOD_WALKING:
+ *val = st->activity_walking;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
/**
* iio_dummy_read_raw() - data read function.
* @indio_dev: the struct iio_dev associated with this device instance
@@ -283,59 +342,21 @@ static int iio_dummy_read_raw(struct iio_dev *indio_dev,
long mask)
{
struct iio_dummy_state *st = iio_priv(indio_dev);
+ int ret;
switch (mask) {
case IIO_CHAN_INFO_RAW: /* magic value - channel value read */
- iio_device_claim_direct_scoped(return -EBUSY, indio_dev) {
- guard(mutex)(&st->lock);
- switch (chan->type) {
- case IIO_VOLTAGE:
- if (chan->output) {
- /* Set integer part to cached value */
- *val = st->dac_val;
- return IIO_VAL_INT;
- } else if (chan->differential) {
- if (chan->channel == 1)
- *val = st->differential_adc_val[0];
- else
- *val = st->differential_adc_val[1];
- return IIO_VAL_INT;
- } else {
- *val = st->single_ended_adc_val;
- return IIO_VAL_INT;
- }
-
- case IIO_ACCEL:
- *val = st->accel_val;
- return IIO_VAL_INT;
- default:
- return -EINVAL;
- }
- }
- unreachable();
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+ ret = __iio_dummy_read_raw(indio_dev, chan, val);
+ iio_device_release_direct(indio_dev);
+ return ret;
case IIO_CHAN_INFO_PROCESSED:
- iio_device_claim_direct_scoped(return -EBUSY, indio_dev) {
- guard(mutex)(&st->lock);
- switch (chan->type) {
- case IIO_STEPS:
- *val = st->steps;
- return IIO_VAL_INT;
- case IIO_ACTIVITY:
- switch (chan->channel2) {
- case IIO_MOD_RUNNING:
- *val = st->activity_running;
- return IIO_VAL_INT;
- case IIO_MOD_WALKING:
- *val = st->activity_walking;
- return IIO_VAL_INT;
- default:
- return -EINVAL;
- }
- default:
- return -EINVAL;
- }
- }
- unreachable();
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+ ret = __iio_dummy_read_processed(indio_dev, chan, val);
+ iio_device_release_direct(indio_dev);
+ return ret;
case IIO_CHAN_INFO_OFFSET:
/* only single ended adc -> 7 */
*val = 7;
diff --git a/drivers/iio/frequency/adf4371.c b/drivers/iio/frequency/adf4371.c
index d752507e0c98..9a84e81787b1 100644
--- a/drivers/iio/frequency/adf4371.c
+++ b/drivers/iio/frequency/adf4371.c
@@ -42,6 +42,12 @@
#define ADF4371_MOD2WORD_MSK GENMASK(5, 0)
#define ADF4371_MOD2WORD(x) FIELD_PREP(ADF4371_MOD2WORD_MSK, x)
+/* ADF4371_REG22 */
+#define ADF4371_REFIN_MODE_MASK BIT(6)
+#define ADF4371_REFIN_MODE(x) FIELD_PREP(ADF4371_REFIN_MODE_MASK, x)
+#define ADF4371_REF_DOUB_MASK BIT(5)
+#define ADF4371_REF_DOUB(x) FIELD_PREP(ADF4371_REF_DOUB_MASK, x)\
+
/* ADF4371_REG24 */
#define ADF4371_RF_DIV_SEL_MSK GENMASK(6, 4)
#define ADF4371_RF_DIV_SEL(x) FIELD_PREP(ADF4371_RF_DIV_SEL_MSK, x)
@@ -70,6 +76,10 @@
#define ADF4371_MAX_FREQ_PFD 250000000UL /* Hz */
#define ADF4371_MAX_FREQ_REFIN 600000000UL /* Hz */
+#define ADF4371_MAX_FREQ_REFIN_SE 500000000UL /* Hz */
+
+#define ADF4371_MIN_CLKIN_DOUB_FREQ 10000000ULL /* Hz */
+#define ADF4371_MAX_CLKIN_DOUB_FREQ 125000000ULL /* Hz */
/* MOD1 is a 24-bit primary modulus with fixed value of 2^25 */
#define ADF4371_MODULUS1 33554432ULL
@@ -176,6 +186,7 @@ struct adf4371_state {
unsigned int mod2;
unsigned int rf_div_sel;
unsigned int ref_div_factor;
+ bool ref_diff_en;
u8 buf[10] __aligned(IIO_DMA_MINALIGN);
};
@@ -477,7 +488,7 @@ static const struct iio_info adf4371_info = {
static int adf4371_setup(struct adf4371_state *st)
{
unsigned int synth_timeout = 2, timeout = 1, vco_alc_timeout = 1;
- unsigned int vco_band_div, tmp;
+ unsigned int vco_band_div, tmp, ref_doubler_en = 0;
int ret;
/* Perform a software reset */
@@ -505,6 +516,23 @@ static int adf4371_setup(struct adf4371_state *st)
ADF4371_ADDR_ASC(1) | ADF4371_ADDR_ASC_R(1));
if (ret < 0)
return ret;
+
+ if ((st->ref_diff_en && st->clkin_freq > ADF4371_MAX_FREQ_REFIN) ||
+ (!st->ref_diff_en && st->clkin_freq > ADF4371_MAX_FREQ_REFIN_SE))
+ return -EINVAL;
+
+ if (st->clkin_freq < ADF4371_MAX_CLKIN_DOUB_FREQ &&
+ st->clkin_freq > ADF4371_MIN_CLKIN_DOUB_FREQ)
+ ref_doubler_en = 1;
+
+ ret = regmap_update_bits(st->regmap, ADF4371_REG(0x22),
+ ADF4371_REF_DOUB_MASK |
+ ADF4371_REFIN_MODE_MASK,
+ ADF4371_REF_DOUB(ref_doubler_en) |
+ ADF4371_REFIN_MODE(st->ref_diff_en));
+ if (ret < 0)
+ return ret;
+
/*
* Calculate and maximize PFD frequency
* fPFD = REFIN × ((1 + D)/(R × (1 + T)))
@@ -514,7 +542,8 @@ static int adf4371_setup(struct adf4371_state *st)
*/
do {
st->ref_div_factor++;
- st->fpfd = st->clkin_freq / st->ref_div_factor;
+ st->fpfd = st->clkin_freq * (1 + ref_doubler_en) /
+ st->ref_div_factor;
} while (st->fpfd > ADF4371_MAX_FREQ_PFD);
/* Calculate Timeouts */
@@ -574,10 +603,16 @@ static int adf4371_probe(struct spi_device *spi)
indio_dev->channels = st->chip_info->channels;
indio_dev->num_channels = st->chip_info->num_channels;
+ st->ref_diff_en = false;
+
clkin = devm_clk_get_enabled(&spi->dev, "clkin");
- if (IS_ERR(clkin))
- return dev_err_probe(&spi->dev, PTR_ERR(clkin),
- "Failed to get clkin\n");
+ if (IS_ERR(clkin)) {
+ clkin = devm_clk_get_enabled(&spi->dev, "clkin-diff");
+ if (IS_ERR(clkin))
+ return dev_err_probe(&spi->dev, PTR_ERR(clkin),
+ "Failed to get clkin/clkin-diff\n");
+ st->ref_diff_en = true;
+ }
st->clkin_freq = clk_get_rate(clkin);
diff --git a/drivers/iio/gyro/bmg160_i2c.c b/drivers/iio/gyro/bmg160_i2c.c
index 9c5d7e8ee99c..e6caab49f98a 100644
--- a/drivers/iio/gyro/bmg160_i2c.c
+++ b/drivers/iio/gyro/bmg160_i2c.c
@@ -58,6 +58,7 @@ MODULE_DEVICE_TABLE(i2c, bmg160_i2c_id);
static const struct of_device_id bmg160_of_match[] = {
{ .compatible = "bosch,bmg160" },
{ .compatible = "bosch,bmi055_gyro" },
+ { .compatible = "bosch,bmi088_gyro" },
{ }
};
diff --git a/drivers/iio/gyro/bmg160_spi.c b/drivers/iio/gyro/bmg160_spi.c
index fc2e453527b9..ac04b3b1b554 100644
--- a/drivers/iio/gyro/bmg160_spi.c
+++ b/drivers/iio/gyro/bmg160_spi.c
@@ -41,9 +41,19 @@ static const struct spi_device_id bmg160_spi_id[] = {
MODULE_DEVICE_TABLE(spi, bmg160_spi_id);
+static const struct of_device_id bmg160_of_match[] = {
+ { .compatible = "bosch,bmg160" },
+ { .compatible = "bosch,bmi055_gyro" },
+ { .compatible = "bosch,bmi088_gyro" },
+ { }
+};
+
+MODULE_DEVICE_TABLE(of, bmg160_of_match);
+
static struct spi_driver bmg160_spi_driver = {
.driver = {
.name = "bmg160_spi",
+ .of_match_table = bmg160_of_match,
.pm = &bmg160_pm_ops,
},
.probe = bmg160_spi_probe,
diff --git a/drivers/iio/humidity/dht11.c b/drivers/iio/humidity/dht11.c
index c97e25448772..48c59d09eea7 100644
--- a/drivers/iio/humidity/dht11.c
+++ b/drivers/iio/humidity/dht11.c
@@ -11,6 +11,7 @@
#include <linux/kernel.h>
#include <linux/printk.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <linux/sysfs.h>
#include <linux/io.h>
#include <linux/mod_devicetable.h>
@@ -99,7 +100,7 @@ static void dht11_edges_print(struct dht11 *dht11)
for (i = 1; i < dht11->num_edges; ++i) {
dev_dbg(dht11->dev, "%d: %lld ns %s\n", i,
dht11->edges[i].ts - dht11->edges[i - 1].ts,
- dht11->edges[i - 1].value ? "high" : "low");
+ str_high_low(dht11->edges[i - 1].value));
}
}
#endif /* CONFIG_DYNAMIC_DEBUG */
diff --git a/drivers/iio/imu/Kconfig b/drivers/iio/imu/Kconfig
index ca0efecb5b5c..15612f0f189b 100644
--- a/drivers/iio/imu/Kconfig
+++ b/drivers/iio/imu/Kconfig
@@ -52,6 +52,19 @@ config ADIS16480
Say yes here to build support for Analog Devices ADIS16375, ADIS16480,
ADIS16485, ADIS16488 inertial sensors.
+config ADIS16550
+ tristate "Analog Devices ADIS16550 and similar IMU driver"
+ depends on SPI
+ select IIO_ADIS_LIB
+ select IIO_ADIS_LIB_BUFFER if IIO_BUFFER
+ select CRC32
+ help
+ Say yes here to build support for Analog Devices ADIS16550 inertial
+ sensor containing triaxis gyroscope and triaxis accelerometer.
+
+ To compile this driver as a module, choose M here: the module will be
+ called adis16550.
+
source "drivers/iio/imu/bmi160/Kconfig"
source "drivers/iio/imu/bmi270/Kconfig"
source "drivers/iio/imu/bmi323/Kconfig"
diff --git a/drivers/iio/imu/Makefile b/drivers/iio/imu/Makefile
index 04c77c2c4df8..e901aea498d3 100644
--- a/drivers/iio/imu/Makefile
+++ b/drivers/iio/imu/Makefile
@@ -8,6 +8,7 @@ obj-$(CONFIG_ADIS16400) += adis16400.o
obj-$(CONFIG_ADIS16460) += adis16460.o
obj-$(CONFIG_ADIS16475) += adis16475.o
obj-$(CONFIG_ADIS16480) += adis16480.o
+obj-$(CONFIG_ADIS16550) += adis16550.o
adis_lib-y += adis.o
adis_lib-$(CONFIG_IIO_ADIS_LIB_BUFFER) += adis_trigger.o
diff --git a/drivers/iio/imu/adis.c b/drivers/iio/imu/adis.c
index 494171844812..0ea072a4c966 100644
--- a/drivers/iio/imu/adis.c
+++ b/drivers/iio/imu/adis.c
@@ -223,13 +223,13 @@ int __adis_update_bits_base(struct adis *adis, unsigned int reg, const u32 mask,
int ret;
u32 __val;
- ret = __adis_read_reg(adis, reg, &__val, size);
+ ret = adis->ops->read(adis, reg, &__val, size);
if (ret)
return ret;
__val = (__val & ~mask) | (val & mask);
- return __adis_write_reg(adis, reg, __val, size);
+ return adis->ops->write(adis, reg, __val, size);
}
EXPORT_SYMBOL_NS_GPL(__adis_update_bits_base, "IIO_ADISLIB");
@@ -304,11 +304,20 @@ EXPORT_SYMBOL_NS(__adis_enable_irq, "IIO_ADISLIB");
*/
int __adis_check_status(struct adis *adis)
{
- u16 status;
+ unsigned int status;
+ int diag_stat_bits;
+ u16 status_16 = 0;
int ret;
int i;
- ret = __adis_read_reg_16(adis, adis->data->diag_stat_reg, &status);
+ if (adis->data->diag_stat_size) {
+ ret = adis->ops->read(adis, adis->data->diag_stat_reg, &status,
+ adis->data->diag_stat_size);
+ } else {
+ ret = __adis_read_reg_16(adis, adis->data->diag_stat_reg,
+ &status_16);
+ status = status_16;
+ }
if (ret)
return ret;
@@ -317,7 +326,10 @@ int __adis_check_status(struct adis *adis)
if (status == 0)
return 0;
- for (i = 0; i < 16; ++i) {
+ diag_stat_bits = BITS_PER_BYTE * (adis->data->diag_stat_size ?
+ adis->data->diag_stat_size : 2);
+
+ for (i = 0; i < diag_stat_bits; ++i) {
if (status & BIT(i)) {
dev_err(&adis->spi->dev, "%s.\n",
adis->data->status_error_msgs[i]);
@@ -468,7 +480,7 @@ int adis_single_conversion(struct iio_dev *indio_dev,
guard(mutex)(&adis->state_lock);
- ret = __adis_read_reg(adis, chan->address, &uval,
+ ret = adis->ops->read(adis, chan->address, &uval,
chan->scan_type.storagebits / 8);
if (ret)
return ret;
@@ -488,6 +500,12 @@ int adis_single_conversion(struct iio_dev *indio_dev,
}
EXPORT_SYMBOL_NS_GPL(adis_single_conversion, "IIO_ADISLIB");
+static const struct adis_ops adis_default_ops = {
+ .read = __adis_read_reg,
+ .write = __adis_write_reg,
+ .reset = __adis_reset,
+};
+
/**
* adis_init() - Initialize adis device structure
* @adis: The adis device
@@ -517,6 +535,11 @@ int adis_init(struct adis *adis, struct iio_dev *indio_dev,
adis->spi = spi;
adis->data = data;
+ if (!adis->ops->write && !adis->ops->read && !adis->ops->reset)
+ adis->ops = &adis_default_ops;
+ else if (!adis->ops->write || !adis->ops->read || !adis->ops->reset)
+ return -EINVAL;
+
iio_device_set_drvdata(indio_dev, adis);
if (data->has_paging) {
diff --git a/drivers/iio/imu/adis16550.c b/drivers/iio/imu/adis16550.c
new file mode 100644
index 000000000000..b14ea8937c7f
--- /dev/null
+++ b/drivers/iio/imu/adis16550.c
@@ -0,0 +1,1147 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ADIS16550 IMU driver
+ *
+ * Copyright 2024 Analog Devices Inc.
+ */
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/crc32.h>
+#include <linux/debugfs.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/imu/adis.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/lcm.h>
+#include <linux/math.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/regulator/consumer.h>
+#include <linux/spi/spi.h>
+#include <linux/swab.h>
+#include <linux/unaligned.h>
+
+#define ADIS16550_REG_BURST_GYRO_ACCEL 0x0a
+#define ADIS16550_REG_BURST_DELTA_ANG_VEL 0x0b
+#define ADIS16550_BURST_DATA_GYRO_ACCEL_MASK GENMASK(6, 1)
+#define ADIS16550_BURST_DATA_DELTA_ANG_VEL_MASK GENMASK(12, 7)
+
+#define ADIS16550_REG_STATUS 0x0e
+#define ADIS16550_REG_TEMP 0x10
+#define ADIS16550_REG_X_GYRO 0x12
+#define ADIS16550_REG_Y_GYRO 0x14
+#define ADIS16550_REG_Z_GYRO 0x16
+#define ADIS16550_REG_X_ACCEL 0x18
+#define ADIS16550_REG_Y_ACCEL 0x1a
+#define ADIS16550_REG_Z_ACCEL 0x1c
+#define ADIS16550_REG_X_DELTANG_L 0x1E
+#define ADIS16550_REG_Y_DELTANG_L 0x20
+#define ADIS16550_REG_Z_DELTANG_L 0x22
+#define ADIS16550_REG_X_DELTVEL_L 0x24
+#define ADIS16550_REG_Y_DELTVEL_L 0x26
+#define ADIS16550_REG_Z_DELTVEL_L 0x28
+#define ADIS16550_REG_X_GYRO_SCALE 0x30
+#define ADIS16550_REG_Y_GYRO_SCALE 0x32
+#define ADIS16550_REG_Z_GYRO_SCALE 0x34
+#define ADIS16550_REG_X_ACCEL_SCALE 0x36
+#define ADIS16550_REG_Y_ACCEL_SCALE 0x38
+#define ADIS16550_REG_Z_ACCEL_SCALE 0x3a
+#define ADIS16550_REG_X_GYRO_BIAS 0x40
+#define ADIS16550_REG_Y_GYRO_BIAS 0x42
+#define ADIS16550_REG_Z_GYRO_BIAS 0x44
+#define ADIS16550_REG_X_ACCEL_BIAS 0x46
+#define ADIS16550_REG_Y_ACCEL_BIAS 0x48
+#define ADIS16550_REG_Z_ACCEL_BIAS 0x4a
+#define ADIS16550_REG_COMMAND 0x50
+#define ADIS16550_REG_CONFIG 0x52
+#define ADIS16550_GYRO_FIR_EN_MASK BIT(3)
+#define ADIS16550_ACCL_FIR_EN_MASK BIT(2)
+#define ADIS16550_SYNC_MASK \
+ (ADIS16550_SYNC_EN_MASK | ADIS16550_SYNC_MODE_MASK)
+#define ADIS16550_SYNC_MODE_MASK BIT(1)
+#define ADIS16550_SYNC_EN_MASK BIT(0)
+/* max of 4000 SPS in scale sync */
+#define ADIS16550_SYNC_SCALE_MAX_RATE (4000 * 1000)
+#define ADIS16550_REG_DEC_RATE 0x54
+#define ADIS16550_REG_SYNC_SCALE 0x56
+#define ADIS16550_REG_SERIAL_NUM 0x76
+#define ADIS16550_REG_FW_REV 0x7A
+#define ADIS16550_REG_FW_DATE 0x7C
+#define ADIS16550_REG_PROD_ID 0x7E
+#define ADIS16550_REG_FLASH_CNT 0x72
+/* SPI protocol*/
+#define ADIS16550_SPI_DATA_MASK GENMASK(31, 16)
+#define ADIS16550_SPI_REG_MASK GENMASK(14, 8)
+#define ADIS16550_SPI_R_W_MASK BIT(7)
+#define ADIS16550_SPI_CRC_MASK GENMASK(3, 0)
+#define ADIS16550_SPI_SV_MASK GENMASK(7, 6)
+/* burst read */
+#define ADIS16550_BURST_N_ELEM 12
+#define ADIS16550_BURST_DATA_LEN (ADIS16550_BURST_N_ELEM * 4)
+#define ADIS16550_MAX_SCAN_DATA 12
+
+struct adis16550_sync {
+ u16 sync_mode;
+ u16 min_rate;
+ u16 max_rate;
+};
+
+struct adis16550_chip_info {
+ const struct iio_chan_spec *channels;
+ const struct adis16550_sync *sync_mode;
+ char *name;
+ u32 num_channels;
+ u32 gyro_max_val;
+ u32 gyro_max_scale;
+ u32 accel_max_val;
+ u32 accel_max_scale;
+ u32 temp_scale;
+ u32 deltang_max_val;
+ u32 deltvel_max_val;
+ u32 int_clk;
+ u16 max_dec;
+ u16 num_sync;
+};
+
+struct adis16550 {
+ const struct adis16550_chip_info *info;
+ struct adis adis;
+ unsigned long clk_freq_hz;
+ u32 sync_mode;
+ struct spi_transfer xfer[2];
+ u8 buffer[ADIS16550_BURST_DATA_LEN + sizeof(u32)] __aligned(IIO_DMA_MINALIGN);
+ __be32 din[2];
+ __be32 dout[2];
+};
+
+enum {
+ ADIS16550_SV_INIT,
+ ADIS16550_SV_OK,
+ ADIS16550_SV_NOK,
+ ADIS16550_SV_SPI_ERROR,
+};
+
+/*
+ * This is a simplified implementation of lib/crc4.c. It could not be used
+ * directly since the polynomial used is different from the one used by the
+ * 16550 which is 0b10001
+ */
+static u8 spi_crc4(const u32 val)
+{
+ int i;
+ const int bits = 28;
+ u8 crc = 0xa;
+ /* ignore 4lsb */
+ const u32 __val = val >> 4;
+
+ /* Calculate crc4 over four-bit nibbles, starting at the MSbit */
+ for (i = bits - 4; i >= 0; i -= 4)
+ crc = crc ^ ((__val >> i) & 0xf);
+
+ return crc;
+}
+
+static int adis16550_spi_validate(const struct adis *adis, __be32 dout,
+ u16 *data)
+{
+ u32 __dout;
+ u8 crc, crc_rcv, sv;
+
+ __dout = be32_to_cpu(dout);
+
+ /* validate received message */
+ crc_rcv = FIELD_GET(ADIS16550_SPI_CRC_MASK, __dout);
+ crc = spi_crc4(__dout);
+ if (crc_rcv != crc) {
+ dev_err(&adis->spi->dev,
+ "Invalid crc, rcv: 0x%02x, calc: 0x%02x!\n",
+ crc_rcv, crc);
+ return -EIO;
+ }
+ sv = FIELD_GET(ADIS16550_SPI_SV_MASK, __dout);
+ if (sv >= ADIS16550_SV_NOK) {
+ dev_err(&adis->spi->dev,
+ "State vector error detected: %02X", sv);
+ return -EIO;
+ }
+ *data = FIELD_GET(ADIS16550_SPI_DATA_MASK, __dout);
+
+ return 0;
+}
+
+static void adis16550_spi_msg_prepare(const u32 reg, const bool write,
+ const u16 data, __be32 *din)
+{
+ u8 crc;
+ u32 __din;
+
+ __din = FIELD_PREP(ADIS16550_SPI_REG_MASK, reg);
+
+ if (write) {
+ __din |= FIELD_PREP(ADIS16550_SPI_R_W_MASK, 1);
+ __din |= FIELD_PREP(ADIS16550_SPI_DATA_MASK, data);
+ }
+
+ crc = spi_crc4(__din);
+ __din |= FIELD_PREP(ADIS16550_SPI_CRC_MASK, crc);
+
+ *din = cpu_to_be32(__din);
+}
+
+static int adis16550_spi_xfer(const struct adis *adis, u32 reg, u32 len,
+ u32 *readval, u32 writeval)
+{
+ int ret;
+ u16 data = 0;
+ struct spi_message msg;
+ bool wr = readval ? false : true;
+ struct spi_device *spi = adis->spi;
+ struct adis16550 *st = container_of(adis, struct adis16550, adis);
+ struct spi_transfer xfers[] = {
+ {
+ .tx_buf = &st->din[0],
+ .len = 4,
+ .cs_change = 1,
+ }, {
+ .tx_buf = &st->din[1],
+ .len = 4,
+ .cs_change = 1,
+ .rx_buf = st->dout,
+ }, {
+ .tx_buf = &st->din[1],
+ .rx_buf = &st->dout[1],
+ .len = 4,
+ },
+ };
+
+ spi_message_init(&msg);
+
+ switch (len) {
+ case 4:
+ adis16550_spi_msg_prepare(reg + 1, wr, writeval >> 16,
+ &st->din[0]);
+ spi_message_add_tail(&xfers[0], &msg);
+ fallthrough;
+ case 2:
+ adis16550_spi_msg_prepare(reg, wr, writeval, &st->din[1]);
+ spi_message_add_tail(&xfers[1], &msg);
+ spi_message_add_tail(&xfers[2], &msg);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = spi_sync(spi, &msg);
+ if (ret) {
+ dev_err(&spi->dev, "Spi failure %d\n", ret);
+ return ret;
+ }
+ /*
+ * When writing a register, the device will reply with a readback on the
+ * transfer so that we can validate if our data was actually written..
+ */
+ switch (len) {
+ case 4:
+ ret = adis16550_spi_validate(adis, st->dout[0], &data);
+ if (ret)
+ return ret;
+
+ if (readval) {
+ *readval = data << 16;
+ } else if ((writeval >> 16) != data && reg != ADIS16550_REG_COMMAND) {
+ dev_err(&spi->dev,
+ "Data not written: wr: 0x%04X, rcv: 0x%04X\n",
+ writeval >> 16, data);
+ return -EIO;
+ }
+
+ fallthrough;
+ case 2:
+ ret = adis16550_spi_validate(adis, st->dout[1], &data);
+ if (ret)
+ return ret;
+
+ if (readval) {
+ *readval = (*readval & GENMASK(31, 16)) | data;
+ } else if ((writeval & GENMASK(15, 0)) != data && reg != ADIS16550_REG_COMMAND) {
+ dev_err(&spi->dev,
+ "Data not written: wr: 0x%04X, rcv: 0x%04X\n",
+ (u16)writeval, data);
+ return -EIO;
+ }
+ }
+
+ return 0;
+}
+
+static int adis16550_spi_read(struct adis *adis, const u32 reg,
+ u32 *value, const u32 len)
+{
+ return adis16550_spi_xfer(adis, reg, len, value, 0);
+}
+
+static int adis16550_spi_write(struct adis *adis, const u32 reg,
+ const u32 value, const u32 len)
+{
+ return adis16550_spi_xfer(adis, reg, len, NULL, value);
+}
+
+static ssize_t adis16550_show_firmware_revision(struct file *file,
+ char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ struct adis16550 *st = file->private_data;
+ char buf[7];
+ size_t len;
+ u16 rev;
+ int ret;
+
+ ret = adis_read_reg_16(&st->adis, ADIS16550_REG_FW_REV, &rev);
+ if (ret)
+ return ret;
+
+ len = scnprintf(buf, sizeof(buf), "%x.%x\n", rev >> 8, rev & 0xff);
+
+ return simple_read_from_buffer(userbuf, count, ppos, buf, len);
+}
+
+static const struct file_operations adis16550_firmware_revision_fops = {
+ .open = simple_open,
+ .read = adis16550_show_firmware_revision,
+ .llseek = default_llseek,
+ .owner = THIS_MODULE,
+};
+
+static ssize_t adis16550_show_firmware_date(struct file *file,
+ char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ struct adis16550 *st = file->private_data;
+ char buf[12];
+ size_t len;
+ u32 date;
+ int ret;
+
+ ret = adis_read_reg_32(&st->adis, ADIS16550_REG_FW_DATE, &date);
+ if (ret)
+ return ret;
+
+ len = scnprintf(buf, sizeof(buf), "%.2x-%.2x-%.4x\n", date & 0xff,
+ (date >> 8) & 0xff, date >> 16);
+
+ return simple_read_from_buffer(userbuf, count, ppos, buf, len);
+}
+
+static const struct file_operations adis16550_firmware_date_fops = {
+ .open = simple_open,
+ .read = adis16550_show_firmware_date,
+ .llseek = default_llseek,
+ .owner = THIS_MODULE,
+};
+
+static int adis16550_show_serial_number(void *arg, u64 *val)
+{
+ struct adis16550 *st = arg;
+ u32 serial;
+ int ret;
+
+ ret = adis_read_reg_32(&st->adis, ADIS16550_REG_SERIAL_NUM, &serial);
+ if (ret)
+ return ret;
+
+ *val = serial;
+
+ return 0;
+}
+DEFINE_DEBUGFS_ATTRIBUTE(adis16550_serial_number_fops,
+ adis16550_show_serial_number, NULL, "0x%.8llx\n");
+
+static int adis16550_show_product_id(void *arg, u64 *val)
+{
+ struct adis16550 *st = arg;
+ u16 prod_id;
+ int ret;
+
+ ret = adis_read_reg_16(&st->adis, ADIS16550_REG_PROD_ID, &prod_id);
+ if (ret)
+ return ret;
+
+ *val = prod_id;
+
+ return 0;
+}
+DEFINE_DEBUGFS_ATTRIBUTE(adis16550_product_id_fops,
+ adis16550_show_product_id, NULL, "%llu\n");
+
+static int adis16550_show_flash_count(void *arg, u64 *val)
+{
+ struct adis16550 *st = arg;
+ u16 flash_count;
+ int ret;
+
+ ret = adis_read_reg_16(&st->adis, ADIS16550_REG_FLASH_CNT, &flash_count);
+ if (ret)
+ return ret;
+
+ *val = flash_count;
+
+ return 0;
+}
+DEFINE_DEBUGFS_ATTRIBUTE(adis16550_flash_count_fops,
+ adis16550_show_flash_count, NULL, "%lld\n");
+
+static void adis16550_debugfs_init(struct iio_dev *indio_dev)
+{
+ struct adis16550 *st = iio_priv(indio_dev);
+ struct dentry *d = iio_get_debugfs_dentry(indio_dev);
+
+ debugfs_create_file_unsafe("serial_number", 0400, d, st,
+ &adis16550_serial_number_fops);
+ debugfs_create_file_unsafe("product_id", 0400, d, st,
+ &adis16550_product_id_fops);
+ debugfs_create_file("firmware_revision", 0400, d, st,
+ &adis16550_firmware_revision_fops);
+ debugfs_create_file("firmware_date", 0400, d, st,
+ &adis16550_firmware_date_fops);
+ debugfs_create_file_unsafe("flash_count", 0400, d, st,
+ &adis16550_flash_count_fops);
+}
+
+enum {
+ ADIS16550_SYNC_MODE_DIRECT,
+ ADIS16550_SYNC_MODE_SCALED,
+};
+
+static int adis16550_get_freq(struct adis16550 *st, u32 *freq)
+{
+ int ret;
+ u16 dec = 0;
+ u32 sample_rate = st->clk_freq_hz;
+
+ adis_dev_auto_lock(&st->adis);
+
+ if (st->sync_mode == ADIS16550_SYNC_MODE_SCALED) {
+ u16 sync_scale;
+
+ ret = __adis_read_reg_16(&st->adis, ADIS16550_REG_SYNC_SCALE, &sync_scale);
+ if (ret)
+ return ret;
+
+ sample_rate = st->clk_freq_hz * sync_scale;
+ }
+
+ ret = __adis_read_reg_16(&st->adis, ADIS16550_REG_DEC_RATE, &dec);
+ if (ret)
+ return -EINVAL;
+ *freq = DIV_ROUND_CLOSEST(sample_rate, dec + 1);
+
+ return 0;
+}
+
+static int adis16550_set_freq_hz(struct adis16550 *st, u32 freq_hz)
+{
+ u16 dec;
+ int ret;
+ u32 sample_rate = st->clk_freq_hz;
+ /*
+ * The optimal sample rate for the supported IMUs is between
+ * int_clk - 1000 and int_clk + 500.
+ */
+ u32 max_sample_rate = st->info->int_clk * 1000 + 500000;
+ u32 min_sample_rate = st->info->int_clk * 1000 - 1000000;
+
+ if (!freq_hz)
+ return -EINVAL;
+
+ adis_dev_auto_lock(&st->adis);
+
+ if (st->sync_mode == ADIS16550_SYNC_MODE_SCALED) {
+ unsigned long scaled_rate = lcm(st->clk_freq_hz, freq_hz);
+ int sync_scale;
+
+ if (scaled_rate > max_sample_rate)
+ scaled_rate = max_sample_rate / st->clk_freq_hz * st->clk_freq_hz;
+ else
+ scaled_rate = max_sample_rate / scaled_rate * scaled_rate;
+
+ if (scaled_rate < min_sample_rate)
+ scaled_rate = roundup(min_sample_rate, st->clk_freq_hz);
+
+ sync_scale = scaled_rate / st->clk_freq_hz;
+ ret = __adis_write_reg_16(&st->adis, ADIS16550_REG_SYNC_SCALE,
+ sync_scale);
+ if (ret)
+ return ret;
+
+ sample_rate = scaled_rate;
+ }
+
+ dec = DIV_ROUND_CLOSEST(sample_rate, freq_hz);
+
+ if (dec)
+ dec--;
+
+ dec = min(dec, st->info->max_dec);
+
+ return __adis_write_reg_16(&st->adis, ADIS16550_REG_DEC_RATE, dec);
+}
+
+static int adis16550_get_accl_filter_freq(struct adis16550 *st, int *freq_hz)
+{
+ int ret;
+ u16 config = 0;
+
+ ret = adis_read_reg_16(&st->adis, ADIS16550_REG_CONFIG, &config);
+ if (ret)
+ return -EINVAL;
+
+ if (FIELD_GET(ADIS16550_ACCL_FIR_EN_MASK, config))
+ *freq_hz = 100;
+ else
+ *freq_hz = 0;
+
+ return 0;
+}
+
+static int adis16550_set_accl_filter_freq(struct adis16550 *st, int freq_hz)
+{
+ u8 en = freq_hz ? 1 : 0;
+ u16 val = FIELD_PREP(ADIS16550_ACCL_FIR_EN_MASK, en);
+
+ return __adis_update_bits(&st->adis, ADIS16550_REG_CONFIG,
+ ADIS16550_ACCL_FIR_EN_MASK, val);
+}
+
+static int adis16550_get_gyro_filter_freq(struct adis16550 *st, int *freq_hz)
+{
+ int ret;
+ u16 config = 0;
+
+ ret = adis_read_reg_16(&st->adis, ADIS16550_REG_CONFIG, &config);
+ if (ret)
+ return -EINVAL;
+
+ if (FIELD_GET(ADIS16550_GYRO_FIR_EN_MASK, config))
+ *freq_hz = 100;
+ else
+ *freq_hz = 0;
+
+ return 0;
+}
+
+static int adis16550_set_gyro_filter_freq(struct adis16550 *st, int freq_hz)
+{
+ u8 en = freq_hz ? 1 : 0;
+ u16 val = FIELD_PREP(ADIS16550_GYRO_FIR_EN_MASK, en);
+
+ return __adis_update_bits(&st->adis, ADIS16550_REG_CONFIG,
+ ADIS16550_GYRO_FIR_EN_MASK, val);
+}
+
+enum {
+ ADIS16550_SCAN_TEMP,
+ ADIS16550_SCAN_GYRO_X,
+ ADIS16550_SCAN_GYRO_Y,
+ ADIS16550_SCAN_GYRO_Z,
+ ADIS16550_SCAN_ACCEL_X,
+ ADIS16550_SCAN_ACCEL_Y,
+ ADIS16550_SCAN_ACCEL_Z,
+ ADIS16550_SCAN_DELTANG_X,
+ ADIS16550_SCAN_DELTANG_Y,
+ ADIS16550_SCAN_DELTANG_Z,
+ ADIS16550_SCAN_DELTVEL_X,
+ ADIS16550_SCAN_DELTVEL_Y,
+ ADIS16550_SCAN_DELTVEL_Z,
+};
+
+static const u32 adis16550_calib_bias[] = {
+ [ADIS16550_SCAN_GYRO_X] = ADIS16550_REG_X_GYRO_BIAS,
+ [ADIS16550_SCAN_GYRO_Y] = ADIS16550_REG_Y_GYRO_BIAS,
+ [ADIS16550_SCAN_GYRO_Z] = ADIS16550_REG_Z_GYRO_BIAS,
+ [ADIS16550_SCAN_ACCEL_X] = ADIS16550_REG_X_ACCEL_BIAS,
+ [ADIS16550_SCAN_ACCEL_Y] = ADIS16550_REG_Y_ACCEL_BIAS,
+ [ADIS16550_SCAN_ACCEL_Z] = ADIS16550_REG_Z_ACCEL_BIAS,
+
+};
+
+static const u32 adis16550_calib_scale[] = {
+ [ADIS16550_SCAN_GYRO_X] = ADIS16550_REG_X_GYRO_SCALE,
+ [ADIS16550_SCAN_GYRO_Y] = ADIS16550_REG_Y_GYRO_SCALE,
+ [ADIS16550_SCAN_GYRO_Z] = ADIS16550_REG_Z_GYRO_SCALE,
+ [ADIS16550_SCAN_ACCEL_X] = ADIS16550_REG_X_ACCEL_SCALE,
+ [ADIS16550_SCAN_ACCEL_Y] = ADIS16550_REG_Y_ACCEL_SCALE,
+ [ADIS16550_SCAN_ACCEL_Z] = ADIS16550_REG_Z_ACCEL_SCALE,
+};
+
+static int adis16550_read_raw(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ int *val, int *val2, long info)
+{
+ struct adis16550 *st = iio_priv(indio_dev);
+ const int idx = chan->scan_index;
+ u16 scale;
+ int ret;
+ u32 tmp;
+
+ switch (info) {
+ case IIO_CHAN_INFO_RAW:
+ return adis_single_conversion(indio_dev, chan, 0, val);
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_ANGL_VEL:
+ *val = st->info->gyro_max_val;
+ *val2 = st->info->gyro_max_scale;
+ return IIO_VAL_FRACTIONAL;
+ case IIO_ACCEL:
+ *val = st->info->accel_max_val;
+ *val2 = st->info->accel_max_scale;
+ return IIO_VAL_FRACTIONAL;
+ case IIO_TEMP:
+ *val = st->info->temp_scale;
+ return IIO_VAL_INT;
+ case IIO_DELTA_ANGL:
+ *val = st->info->deltang_max_val;
+ *val2 = 31;
+ return IIO_VAL_FRACTIONAL_LOG2;
+ case IIO_DELTA_VELOCITY:
+ *val = st->info->deltvel_max_val;
+ *val2 = 31;
+ return IIO_VAL_FRACTIONAL_LOG2;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_OFFSET:
+ /* temperature centered at 25°C */
+ *val = DIV_ROUND_CLOSEST(25000, st->info->temp_scale);
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_CALIBBIAS:
+ ret = adis_read_reg_32(&st->adis,
+ adis16550_calib_bias[idx], val);
+ if (ret)
+ return ret;
+
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_CALIBSCALE:
+ ret = adis_read_reg_16(&st->adis,
+ adis16550_calib_scale[idx], &scale);
+ if (ret)
+ return ret;
+
+ *val = scale;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ ret = adis16550_get_freq(st, &tmp);
+ if (ret)
+ return ret;
+
+ *val = tmp / 1000;
+ *val2 = (tmp % 1000) * 1000;
+ return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
+ switch (chan->type) {
+ case IIO_ANGL_VEL:
+ ret = adis16550_get_accl_filter_freq(st, val);
+ if (ret)
+ return ret;
+ return IIO_VAL_INT;
+ case IIO_ACCEL:
+ ret = adis16550_get_gyro_filter_freq(st, val);
+ if (ret)
+ return ret;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+static int adis16550_write_raw(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ int val, int val2, long info)
+{
+ struct adis16550 *st = iio_priv(indio_dev);
+ const int idx = chan->scan_index;
+ u32 tmp;
+
+ switch (info) {
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ tmp = val * 1000 + val2 / 1000;
+ return adis16550_set_freq_hz(st, tmp);
+ case IIO_CHAN_INFO_CALIBBIAS:
+ return adis_write_reg_32(&st->adis, adis16550_calib_bias[idx],
+ val);
+ case IIO_CHAN_INFO_CALIBSCALE:
+ return adis_write_reg_16(&st->adis, adis16550_calib_scale[idx],
+ val);
+ case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
+ switch (chan->type) {
+ case IIO_ANGL_VEL:
+ return adis16550_set_accl_filter_freq(st, val);
+ case IIO_ACCEL:
+ return adis16550_set_gyro_filter_freq(st, val);
+ default:
+ return -EINVAL;
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+#define ADIS16550_MOD_CHAN(_type, _mod, _address, _si) \
+ { \
+ .type = (_type), \
+ .modified = 1, \
+ .channel2 = (_mod), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_CALIBBIAS) | \
+ BIT(IIO_CHAN_INFO_CALIBSCALE), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY), \
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ .address = (_address), \
+ .scan_index = (_si), \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = 32, \
+ .storagebits = 32, \
+ .endianness = IIO_BE, \
+ }, \
+ }
+
+#define ADIS16550_GYRO_CHANNEL(_mod) \
+ ADIS16550_MOD_CHAN(IIO_ANGL_VEL, IIO_MOD_ ## _mod, \
+ ADIS16550_REG_ ## _mod ## _GYRO, ADIS16550_SCAN_GYRO_ ## _mod)
+
+#define ADIS16550_ACCEL_CHANNEL(_mod) \
+ ADIS16550_MOD_CHAN(IIO_ACCEL, IIO_MOD_ ## _mod, \
+ ADIS16550_REG_ ## _mod ## _ACCEL, ADIS16550_SCAN_ACCEL_ ## _mod)
+
+#define ADIS16550_TEMP_CHANNEL() { \
+ .type = IIO_TEMP, \
+ .indexed = 1, \
+ .channel = 0, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_SCALE) | BIT(IIO_CHAN_INFO_OFFSET), \
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ .address = ADIS16550_REG_TEMP, \
+ .scan_index = ADIS16550_SCAN_TEMP, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = 16, \
+ .storagebits = 32, \
+ .endianness = IIO_BE, \
+ }, \
+ }
+
+#define ADIS16550_MOD_CHAN_DELTA(_type, _mod, _address, _si) { \
+ .type = (_type), \
+ .modified = 1, \
+ .channel2 = (_mod), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ .address = (_address), \
+ .scan_index = _si, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = 32, \
+ .storagebits = 32, \
+ .endianness = IIO_BE, \
+ }, \
+ }
+
+#define ADIS16550_DELTANG_CHAN(_mod) \
+ ADIS16550_MOD_CHAN_DELTA(IIO_DELTA_ANGL, IIO_MOD_ ## _mod, \
+ ADIS16550_REG_ ## _mod ## _DELTANG_L, ADIS16550_SCAN_DELTANG_ ## _mod)
+
+#define ADIS16550_DELTVEL_CHAN(_mod) \
+ ADIS16550_MOD_CHAN_DELTA(IIO_DELTA_VELOCITY, IIO_MOD_ ## _mod, \
+ ADIS16550_REG_ ## _mod ## _DELTVEL_L, ADIS16550_SCAN_DELTVEL_ ## _mod)
+
+#define ADIS16550_DELTANG_CHAN_NO_SCAN(_mod) \
+ ADIS16550_MOD_CHAN_DELTA(IIO_DELTA_ANGL, IIO_MOD_ ## _mod, \
+ ADIS16550_REG_ ## _mod ## _DELTANG_L, -1)
+
+#define ADIS16550_DELTVEL_CHAN_NO_SCAN(_mod) \
+ ADIS16550_MOD_CHAN_DELTA(IIO_DELTA_VELOCITY, IIO_MOD_ ## _mod, \
+ ADIS16550_REG_ ## _mod ## _DELTVEL_L, -1)
+
+static const struct iio_chan_spec adis16550_channels[] = {
+ ADIS16550_TEMP_CHANNEL(),
+ ADIS16550_GYRO_CHANNEL(X),
+ ADIS16550_GYRO_CHANNEL(Y),
+ ADIS16550_GYRO_CHANNEL(Z),
+ ADIS16550_ACCEL_CHANNEL(X),
+ ADIS16550_ACCEL_CHANNEL(Y),
+ ADIS16550_ACCEL_CHANNEL(Z),
+ ADIS16550_DELTANG_CHAN(X),
+ ADIS16550_DELTANG_CHAN(Y),
+ ADIS16550_DELTANG_CHAN(Z),
+ ADIS16550_DELTVEL_CHAN(X),
+ ADIS16550_DELTVEL_CHAN(Y),
+ ADIS16550_DELTVEL_CHAN(Z),
+ IIO_CHAN_SOFT_TIMESTAMP(13),
+};
+
+static const struct adis16550_sync adis16550_sync_modes[] = {
+ { ADIS16550_SYNC_MODE_DIRECT, 3000, 4500 },
+ { ADIS16550_SYNC_MODE_SCALED, 1, 128 },
+};
+
+static const struct adis16550_chip_info adis16550_chip_info = {
+ .num_channels = ARRAY_SIZE(adis16550_channels),
+ .channels = adis16550_channels,
+ .name = "adis16550",
+ .gyro_max_val = 1,
+ .gyro_max_scale = IIO_RAD_TO_DEGREE(80 << 16),
+ .accel_max_val = 1,
+ .accel_max_scale = IIO_M_S_2_TO_G(102400000),
+ .temp_scale = 4,
+ .deltang_max_val = IIO_DEGREE_TO_RAD(720),
+ .deltvel_max_val = 125,
+ .int_clk = 4000,
+ .max_dec = 4095,
+ .sync_mode = adis16550_sync_modes,
+ .num_sync = ARRAY_SIZE(adis16550_sync_modes),
+};
+
+static u32 adis16550_validate_crc(__be32 *buffer, const u8 n_elem)
+{
+ int i;
+ u32 crc_calc;
+ u32 crc_buf[ADIS16550_BURST_N_ELEM - 2];
+ u32 crc = be32_to_cpu(buffer[ADIS16550_BURST_N_ELEM - 1]);
+ /*
+ * The crc calculation of the data is done in little endian. Hence, we
+ * always swap the 32bit elements making sure that the data LSB is
+ * always on address 0...
+ */
+ for (i = 0; i < n_elem; i++)
+ crc_buf[i] = be32_to_cpu(buffer[i]);
+
+ crc_calc = crc32(~0, crc_buf, n_elem * 4);
+ crc_calc ^= ~0;
+
+ return (crc_calc == crc);
+}
+
+static irqreturn_t adis16550_trigger_handler(int irq, void *p)
+{
+ int ret;
+ u16 dummy;
+ bool valid;
+ struct iio_poll_func *pf = p;
+ __be32 data[ADIS16550_MAX_SCAN_DATA];
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct adis16550 *st = iio_priv(indio_dev);
+ struct adis *adis = iio_device_get_drvdata(indio_dev);
+ __be32 *buffer = (__be32 *)st->buffer;
+
+ ret = spi_sync(adis->spi, &adis->msg);
+ if (ret)
+ goto done;
+ /*
+ * Validate the header. The header is a normal spi reply with state
+ * vector and crc4.
+ */
+ ret = adis16550_spi_validate(&st->adis, buffer[0], &dummy);
+ if (ret)
+ goto done;
+
+ /* the header is not included in the crc */
+ valid = adis16550_validate_crc(buffer, ADIS16550_BURST_N_ELEM - 2);
+ if (!valid) {
+ dev_err(&adis->spi->dev, "Burst Invalid crc!\n");
+ goto done;
+ }
+
+ /* copy the temperature together with sensor data */
+ memcpy(data, &buffer[3],
+ (ADIS16550_SCAN_ACCEL_Z - ADIS16550_SCAN_GYRO_X + 2) *
+ sizeof(__be32));
+ iio_push_to_buffers_with_timestamp(indio_dev, data, pf->timestamp);
+done:
+ iio_trigger_notify_done(indio_dev->trig);
+ return IRQ_HANDLED;
+}
+
+static const unsigned long adis16550_channel_masks[] = {
+ ADIS16550_BURST_DATA_GYRO_ACCEL_MASK | BIT(ADIS16550_SCAN_TEMP),
+ ADIS16550_BURST_DATA_DELTA_ANG_VEL_MASK | BIT(ADIS16550_SCAN_TEMP),
+ 0
+};
+
+static int adis16550_update_scan_mode(struct iio_dev *indio_dev,
+ const unsigned long *scan_mask)
+{
+ u16 burst_length = ADIS16550_BURST_DATA_LEN;
+ struct adis16550 *st = iio_priv(indio_dev);
+ u8 burst_cmd;
+ u8 *tx;
+
+ memset(st->buffer, 0, burst_length + sizeof(u32));
+
+ if (*scan_mask & ADIS16550_BURST_DATA_GYRO_ACCEL_MASK)
+ burst_cmd = ADIS16550_REG_BURST_GYRO_ACCEL;
+ else
+ burst_cmd = ADIS16550_REG_BURST_DELTA_ANG_VEL;
+
+ tx = st->buffer + burst_length;
+ tx[0] = 0x00;
+ tx[1] = 0x00;
+ tx[2] = burst_cmd;
+ /* crc4 is 0 on burst command */
+ tx[3] = spi_crc4(get_unaligned_le32(tx));
+
+ return 0;
+}
+
+static int adis16550_reset(struct adis *adis)
+{
+ return __adis_write_reg_16(adis, ADIS16550_REG_COMMAND, BIT(15));
+}
+
+static int adis16550_config_sync(struct adis16550 *st)
+{
+ struct device *dev = &st->adis.spi->dev;
+ const struct adis16550_sync *sync_mode_data;
+ struct clk *clk;
+ int ret, i;
+ u16 mode;
+
+ clk = devm_clk_get_optional_enabled(dev, NULL);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+ if (!clk) {
+ st->clk_freq_hz = st->info->int_clk * 1000;
+ return 0;
+ }
+
+ st->clk_freq_hz = clk_get_rate(clk);
+
+ for (i = 0; i < st->info->num_sync; i++) {
+ if (st->clk_freq_hz >= st->info->sync_mode[i].min_rate &&
+ st->clk_freq_hz <= st->info->sync_mode[i].max_rate) {
+ sync_mode_data = &st->info->sync_mode[i];
+ break;
+ }
+ }
+
+ if (i == st->info->num_sync)
+ return dev_err_probe(dev, -EINVAL, "Clk rate: %lu not in a valid range",
+ st->clk_freq_hz);
+
+ if (sync_mode_data->sync_mode == ADIS16550_SYNC_MODE_SCALED) {
+ u16 sync_scale;
+ /*
+ * In sps scaled sync we must scale the input clock to a range
+ * of [3000 4500].
+ */
+
+ sync_scale = DIV_ROUND_CLOSEST(st->info->int_clk, st->clk_freq_hz);
+
+ if (3000 > sync_scale || 4500 < sync_scale)
+ return dev_err_probe(dev, -EINVAL,
+ "Invalid value:%u for sync_scale",
+ sync_scale);
+
+ ret = adis_write_reg_16(&st->adis, ADIS16550_REG_SYNC_SCALE,
+ sync_scale);
+ if (ret)
+ return ret;
+
+ st->clk_freq_hz = st->info->int_clk;
+ }
+
+ st->clk_freq_hz *= 1000;
+
+ mode = FIELD_PREP(ADIS16550_SYNC_MODE_MASK, sync_mode_data->sync_mode) |
+ FIELD_PREP(ADIS16550_SYNC_EN_MASK, true);
+
+ return __adis_update_bits(&st->adis, ADIS16550_REG_CONFIG,
+ ADIS16550_SYNC_MASK, mode);
+}
+
+static const struct iio_info adis16550_info = {
+ .read_raw = &adis16550_read_raw,
+ .write_raw = &adis16550_write_raw,
+ .update_scan_mode = adis16550_update_scan_mode,
+ .debugfs_reg_access = adis_debugfs_reg_access,
+};
+
+enum {
+ ADIS16550_STATUS_CRC_CODE,
+ ADIS16550_STATUS_CRC_CONFIG,
+ ADIS16550_STATUS_FLASH_UPDATE,
+ ADIS16550_STATUS_INERIAL,
+ ADIS16550_STATUS_SENSOR,
+ ADIS16550_STATUS_TEMPERATURE,
+ ADIS16550_STATUS_SPI,
+ ADIS16550_STATUS_PROCESSING,
+ ADIS16550_STATUS_POWER,
+ ADIS16550_STATUS_BOOT,
+ ADIS16550_STATUS_WATCHDOG = 15,
+ ADIS16550_STATUS_REGULATOR = 28,
+ ADIS16550_STATUS_SENSOR_SUPPLY,
+ ADIS16550_STATUS_CPU_SUPPLY,
+ ADIS16550_STATUS_5V_SUPPLY,
+};
+
+static const char * const adis16550_status_error_msgs[] = {
+ [ADIS16550_STATUS_CRC_CODE] = "Code CRC Error",
+ [ADIS16550_STATUS_CRC_CONFIG] = "Configuration/Calibration CRC Error",
+ [ADIS16550_STATUS_FLASH_UPDATE] = "Flash Update Error",
+ [ADIS16550_STATUS_INERIAL] = "Overrange for Inertial Signals",
+ [ADIS16550_STATUS_SENSOR] = "Sensor failure",
+ [ADIS16550_STATUS_TEMPERATURE] = "Temperature Error",
+ [ADIS16550_STATUS_SPI] = "SPI Communication Error",
+ [ADIS16550_STATUS_PROCESSING] = "Processing Overrun Error",
+ [ADIS16550_STATUS_POWER] = "Power Supply Failure",
+ [ADIS16550_STATUS_BOOT] = "Boot Memory Failure",
+ [ADIS16550_STATUS_WATCHDOG] = "Watchdog timer flag",
+ [ADIS16550_STATUS_REGULATOR] = "Internal Regulator Error",
+ [ADIS16550_STATUS_SENSOR_SUPPLY] = "Internal Sensor Supply Error.",
+ [ADIS16550_STATUS_CPU_SUPPLY] = "Internal Processor Supply Error.",
+ [ADIS16550_STATUS_5V_SUPPLY] = "External 5V Supply Error",
+};
+
+static const struct adis_timeout adis16550_timeouts = {
+ .reset_ms = 1000,
+ .sw_reset_ms = 1000,
+ .self_test_ms = 1000,
+};
+
+static const struct adis_data adis16550_data = {
+ .diag_stat_reg = ADIS16550_REG_STATUS,
+ .diag_stat_size = 4,
+ .prod_id_reg = ADIS16550_REG_PROD_ID,
+ .prod_id = 16550,
+ .self_test_mask = BIT(1),
+ .self_test_reg = ADIS16550_REG_COMMAND,
+ .cs_change_delay = 5,
+ .unmasked_drdy = true,
+ .status_error_msgs = adis16550_status_error_msgs,
+ .status_error_mask = BIT(ADIS16550_STATUS_CRC_CODE) |
+ BIT(ADIS16550_STATUS_CRC_CONFIG) |
+ BIT(ADIS16550_STATUS_FLASH_UPDATE) |
+ BIT(ADIS16550_STATUS_INERIAL) |
+ BIT(ADIS16550_STATUS_SENSOR) |
+ BIT(ADIS16550_STATUS_TEMPERATURE) |
+ BIT(ADIS16550_STATUS_SPI) |
+ BIT(ADIS16550_STATUS_PROCESSING) |
+ BIT(ADIS16550_STATUS_POWER) |
+ BIT(ADIS16550_STATUS_BOOT) |
+ BIT(ADIS16550_STATUS_WATCHDOG) |
+ BIT(ADIS16550_STATUS_REGULATOR) |
+ BIT(ADIS16550_STATUS_SENSOR_SUPPLY) |
+ BIT(ADIS16550_STATUS_CPU_SUPPLY) |
+ BIT(ADIS16550_STATUS_5V_SUPPLY),
+ .timeouts = &adis16550_timeouts,
+};
+
+static const struct adis_ops adis16550_ops = {
+ .write = adis16550_spi_write,
+ .read = adis16550_spi_read,
+ .reset = adis16550_reset,
+};
+
+static int adis16550_probe(struct spi_device *spi)
+{
+ u16 burst_length = ADIS16550_BURST_DATA_LEN;
+ struct device *dev = &spi->dev;
+ struct iio_dev *indio_dev;
+ struct adis16550 *st;
+ struct adis *adis;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*st));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ st = iio_priv(indio_dev);
+ st->info = spi_get_device_match_data(spi);
+ if (!st->info)
+ return -EINVAL;
+ adis = &st->adis;
+ indio_dev->name = st->info->name;
+ indio_dev->channels = st->info->channels;
+ indio_dev->num_channels = st->info->num_channels;
+ indio_dev->available_scan_masks = adis16550_channel_masks;
+ indio_dev->info = &adis16550_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ st->adis.ops = &adis16550_ops;
+ st->xfer[0].tx_buf = st->buffer + burst_length;
+ st->xfer[0].len = 4;
+ st->xfer[0].cs_change = 1;
+ st->xfer[0].delay.value = 8;
+ st->xfer[0].delay.unit = SPI_DELAY_UNIT_USECS;
+ st->xfer[1].rx_buf = st->buffer;
+ st->xfer[1].len = burst_length;
+
+ spi_message_init_with_transfers(&adis->msg, st->xfer, 2);
+
+ ret = devm_regulator_get_enable(dev, "vdd");
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to get vdd regulator\n");
+
+ ret = adis_init(&st->adis, indio_dev, spi, &adis16550_data);
+ if (ret)
+ return ret;
+
+ ret = __adis_initial_startup(&st->adis);
+ if (ret)
+ return ret;
+
+ ret = adis16550_config_sync(st);
+ if (ret)
+ return ret;
+
+ ret = devm_adis_setup_buffer_and_trigger(&st->adis, indio_dev,
+ adis16550_trigger_handler);
+ if (ret)
+ return ret;
+
+ ret = devm_iio_device_register(dev, indio_dev);
+ if (ret)
+ return ret;
+
+ adis16550_debugfs_init(indio_dev);
+
+ return 0;
+}
+
+static const struct spi_device_id adis16550_id[] = {
+ { "adis16550", (kernel_ulong_t)&adis16550_chip_info},
+ { }
+};
+MODULE_DEVICE_TABLE(spi, adis16550_id);
+
+static const struct of_device_id adis16550_of_match[] = {
+ { .compatible = "adi,adis16550", .data = &adis16550_chip_info },
+ { }
+};
+MODULE_DEVICE_TABLE(of, adis16550_of_match);
+
+static struct spi_driver adis16550_driver = {
+ .driver = {
+ .name = "adis16550",
+ .of_match_table = adis16550_of_match,
+ },
+ .probe = adis16550_probe,
+ .id_table = adis16550_id,
+};
+module_spi_driver(adis16550_driver);
+
+MODULE_AUTHOR("Nuno Sa <nuno.sa@analog.com>");
+MODULE_AUTHOR("Ramona Gradinariu <ramona.gradinariu@analog.com>");
+MODULE_AUTHOR("Antoniu Miclaus <antoniu.miclaus@analog.com>");
+MODULE_AUTHOR("Robert Budai <robert.budai@analog.com>");
+MODULE_DESCRIPTION("Analog Devices ADIS16550 IMU driver");
+MODULE_IMPORT_NS("IIO_ADISLIB");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/imu/bmi270/bmi270.h b/drivers/iio/imu/bmi270/bmi270.h
index fdfad5784cc5..d94525f6aee8 100644
--- a/drivers/iio/imu/bmi270/bmi270.h
+++ b/drivers/iio/imu/bmi270/bmi270.h
@@ -6,22 +6,6 @@
#include <linux/regmap.h>
#include <linux/iio/iio.h>
-struct device;
-struct bmi270_data {
- struct device *dev;
- struct regmap *regmap;
- const struct bmi270_chip_info *chip_info;
-
- /*
- * Where IIO_DMA_MINALIGN may be larger than 8 bytes, align to
- * that to ensure a DMA safe buffer.
- */
- struct {
- __le16 channels[6];
- aligned_s64 timestamp;
- } data __aligned(IIO_DMA_MINALIGN);
-};
-
struct bmi270_chip_info {
const char *name;
int chip_id;
@@ -32,6 +16,7 @@ extern const struct regmap_config bmi270_regmap_config;
extern const struct bmi270_chip_info bmi260_chip_info;
extern const struct bmi270_chip_info bmi270_chip_info;
+struct device;
int bmi270_core_probe(struct device *dev, struct regmap *regmap,
const struct bmi270_chip_info *chip_info);
diff --git a/drivers/iio/imu/bmi270/bmi270_core.c b/drivers/iio/imu/bmi270/bmi270_core.c
index 7fec52e0b486..a86be5af5ccb 100644
--- a/drivers/iio/imu/bmi270/bmi270_core.c
+++ b/drivers/iio/imu/bmi270/bmi270_core.c
@@ -4,10 +4,13 @@
#include <linux/firmware.h>
#include <linux/i2c.h>
#include <linux/module.h>
+#include <linux/mutex.h>
#include <linux/regmap.h>
+#include <linux/units.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
+#include <linux/iio/trigger.h>
#include <linux/iio/triggered_buffer.h>
#include <linux/iio/trigger_consumer.h>
@@ -25,13 +28,17 @@
#define BMI270_ACCEL_X_REG 0x0c
#define BMI270_ANG_VEL_X_REG 0x12
+#define BMI270_INT_STATUS_1_REG 0x1d
+#define BMI270_INT_STATUS_1_ACC_GYR_DRDY_MSK GENMASK(7, 6)
+
#define BMI270_INTERNAL_STATUS_REG 0x21
#define BMI270_INTERNAL_STATUS_MSG_MSK GENMASK(3, 0)
#define BMI270_INTERNAL_STATUS_MSG_INIT_OK 0x01
-
#define BMI270_INTERNAL_STATUS_AXES_REMAP_ERR_MSK BIT(5)
#define BMI270_INTERNAL_STATUS_ODR_50HZ_ERR_MSK BIT(6)
+#define BMI270_TEMPERATURE_0_REG 0x22
+
#define BMI270_ACC_CONF_REG 0x40
#define BMI270_ACC_CONF_ODR_MSK GENMASK(3, 0)
#define BMI270_ACC_CONF_ODR_100HZ 0x08
@@ -53,6 +60,20 @@
#define BMI270_GYR_CONF_RANGE_REG 0x43
#define BMI270_GYR_CONF_RANGE_MSK GENMASK(2, 0)
+#define BMI270_INT1_IO_CTRL_REG 0x53
+#define BMI270_INT2_IO_CTRL_REG 0x54
+#define BMI270_INT_IO_CTRL_LVL_MSK BIT(1)
+#define BMI270_INT_IO_CTRL_OD_MSK BIT(2)
+#define BMI270_INT_IO_CTRL_OP_MSK BIT(3)
+#define BMI270_INT_IO_LVL_OD_OP_MSK GENMASK(3, 1)
+
+#define BMI270_INT_LATCH_REG 0x55
+#define BMI270_INT_LATCH_REG_MSK BIT(0)
+
+#define BMI270_INT_MAP_DATA_REG 0x58
+#define BMI270_INT_MAP_DATA_DRDY_INT1_MSK BIT(2)
+#define BMI270_INT_MAP_DATA_DRDY_INT2_MSK BIT(6)
+
#define BMI270_INIT_CTRL_REG 0x59
#define BMI270_INIT_CTRL_LOAD_DONE_MSK BIT(0)
@@ -69,9 +90,38 @@
#define BMI270_PWR_CTRL_ACCEL_EN_MSK BIT(2)
#define BMI270_PWR_CTRL_TEMP_EN_MSK BIT(3)
+/* See datasheet section 4.6.14, Temperature Sensor */
+#define BMI270_TEMP_OFFSET 11776
+#define BMI270_TEMP_SCALE 1953125
+
#define BMI260_INIT_DATA_FILE "bmi260-init-data.fw"
#define BMI270_INIT_DATA_FILE "bmi270-init-data.fw"
+enum bmi270_irq_pin {
+ BMI270_IRQ_DISABLED,
+ BMI270_IRQ_INT1,
+ BMI270_IRQ_INT2,
+};
+
+struct bmi270_data {
+ struct device *dev;
+ struct regmap *regmap;
+ const struct bmi270_chip_info *chip_info;
+ enum bmi270_irq_pin irq_pin;
+ struct iio_trigger *trig;
+ /* Protect device's private data from concurrent access */
+ struct mutex mutex;
+
+ /*
+ * Where IIO_DMA_MINALIGN may be larger than 8 bytes, align to
+ * that to ensure a DMA safe buffer.
+ */
+ struct {
+ __le16 channels[6];
+ aligned_s64 timestamp;
+ } buffer __aligned(IIO_DMA_MINALIGN);
+};
+
enum bmi270_scan {
BMI270_SCAN_ACCEL_X,
BMI270_SCAN_ACCEL_Y,
@@ -109,6 +159,7 @@ EXPORT_SYMBOL_NS_GPL(bmi270_chip_info, "IIO_BMI270");
enum bmi270_sensor_type {
BMI270_ACCEL = 0,
BMI270_GYRO,
+ BMI270_TEMP,
};
struct bmi270_scale {
@@ -136,6 +187,10 @@ static const struct bmi270_scale bmi270_gyro_scale[] = {
{ 0, 66 },
};
+static const struct bmi270_scale bmi270_temp_scale[] = {
+ { BMI270_TEMP_SCALE / MICRO, BMI270_TEMP_SCALE % MICRO },
+};
+
struct bmi270_scale_item {
const struct bmi270_scale *tbl;
int num;
@@ -150,6 +205,10 @@ static const struct bmi270_scale_item bmi270_scale_table[] = {
.tbl = bmi270_gyro_scale,
.num = ARRAY_SIZE(bmi270_gyro_scale),
},
+ [BMI270_TEMP] = {
+ .tbl = bmi270_temp_scale,
+ .num = ARRAY_SIZE(bmi270_temp_scale),
+ },
};
static const struct bmi270_odr bmi270_accel_odr[] = {
@@ -244,6 +303,8 @@ static int bmi270_set_scale(struct bmi270_data *data, int chan_type, int uscale)
return -EINVAL;
}
+ guard(mutex)(&data->mutex);
+
for (i = 0; i < bmi270_scale_item.num; i++) {
if (bmi270_scale_item.tbl[i].uscale != uscale)
continue;
@@ -254,17 +315,18 @@ static int bmi270_set_scale(struct bmi270_data *data, int chan_type, int uscale)
return -EINVAL;
}
-static int bmi270_get_scale(struct bmi270_data *bmi270_device, int chan_type,
+static int bmi270_get_scale(struct bmi270_data *data, int chan_type, int *scale,
int *uscale)
{
int ret;
unsigned int val;
struct bmi270_scale_item bmi270_scale_item;
+ guard(mutex)(&data->mutex);
+
switch (chan_type) {
case IIO_ACCEL:
- ret = regmap_read(bmi270_device->regmap,
- BMI270_ACC_CONF_RANGE_REG, &val);
+ ret = regmap_read(data->regmap, BMI270_ACC_CONF_RANGE_REG, &val);
if (ret)
return ret;
@@ -272,14 +334,17 @@ static int bmi270_get_scale(struct bmi270_data *bmi270_device, int chan_type,
bmi270_scale_item = bmi270_scale_table[BMI270_ACCEL];
break;
case IIO_ANGL_VEL:
- ret = regmap_read(bmi270_device->regmap,
- BMI270_GYR_CONF_RANGE_REG, &val);
+ ret = regmap_read(data->regmap, BMI270_GYR_CONF_RANGE_REG, &val);
if (ret)
return ret;
val = FIELD_GET(BMI270_GYR_CONF_RANGE_MSK, val);
bmi270_scale_item = bmi270_scale_table[BMI270_GYRO];
break;
+ case IIO_TEMP:
+ val = 0;
+ bmi270_scale_item = bmi270_scale_table[BMI270_TEMP];
+ break;
default:
return -EINVAL;
}
@@ -287,6 +352,7 @@ static int bmi270_get_scale(struct bmi270_data *bmi270_device, int chan_type,
if (val >= bmi270_scale_item.num)
return -EINVAL;
+ *scale = bmi270_scale_item.tbl[val].scale;
*uscale = bmi270_scale_item.tbl[val].uscale;
return 0;
}
@@ -313,6 +379,8 @@ static int bmi270_set_odr(struct bmi270_data *data, int chan_type, int odr,
return -EINVAL;
}
+ guard(mutex)(&data->mutex);
+
for (i = 0; i < bmi270_odr_item.num; i++) {
if (bmi270_odr_item.tbl[i].odr != odr ||
bmi270_odr_item.tbl[i].uodr != uodr)
@@ -331,6 +399,8 @@ static int bmi270_get_odr(struct bmi270_data *data, int chan_type, int *odr,
int i, val, ret;
struct bmi270_odr_item bmi270_odr_item;
+ guard(mutex)(&data->mutex);
+
switch (chan_type) {
case IIO_ACCEL:
ret = regmap_read(data->regmap, BMI270_ACC_CONF_REG, &val);
@@ -364,29 +434,85 @@ static int bmi270_get_odr(struct bmi270_data *data, int chan_type, int *odr,
return -EINVAL;
}
+static irqreturn_t bmi270_irq_thread_handler(int irq, void *private)
+{
+ struct iio_dev *indio_dev = private;
+ struct bmi270_data *data = iio_priv(indio_dev);
+ unsigned int status;
+ int ret;
+
+ scoped_guard(mutex, &data->mutex) {
+ ret = regmap_read(data->regmap, BMI270_INT_STATUS_1_REG,
+ &status);
+ if (ret)
+ return IRQ_NONE;
+ }
+
+ if (FIELD_GET(BMI270_INT_STATUS_1_ACC_GYR_DRDY_MSK, status))
+ iio_trigger_poll_nested(data->trig);
+
+ return IRQ_HANDLED;
+}
+
+static int bmi270_data_rdy_trigger_set_state(struct iio_trigger *trig,
+ bool state)
+{
+ struct bmi270_data *data = iio_trigger_get_drvdata(trig);
+ unsigned int field_value = 0;
+ unsigned int mask;
+
+ guard(mutex)(&data->mutex);
+
+ switch (data->irq_pin) {
+ case BMI270_IRQ_INT1:
+ mask = BMI270_INT_MAP_DATA_DRDY_INT1_MSK;
+ set_mask_bits(&field_value, BMI270_INT_MAP_DATA_DRDY_INT1_MSK,
+ FIELD_PREP(BMI270_INT_MAP_DATA_DRDY_INT1_MSK,
+ state));
+ break;
+ case BMI270_IRQ_INT2:
+ mask = BMI270_INT_MAP_DATA_DRDY_INT2_MSK;
+ set_mask_bits(&field_value, BMI270_INT_MAP_DATA_DRDY_INT2_MSK,
+ FIELD_PREP(BMI270_INT_MAP_DATA_DRDY_INT2_MSK,
+ state));
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return regmap_update_bits(data->regmap, BMI270_INT_MAP_DATA_REG, mask,
+ field_value);
+}
+
+static const struct iio_trigger_ops bmi270_trigger_ops = {
+ .set_trigger_state = &bmi270_data_rdy_trigger_set_state,
+};
+
static irqreturn_t bmi270_trigger_handler(int irq, void *p)
{
struct iio_poll_func *pf = p;
struct iio_dev *indio_dev = pf->indio_dev;
- struct bmi270_data *bmi270_device = iio_priv(indio_dev);
+ struct bmi270_data *data = iio_priv(indio_dev);
int ret;
- ret = regmap_bulk_read(bmi270_device->regmap, BMI270_ACCEL_X_REG,
- &bmi270_device->data.channels,
- sizeof(bmi270_device->data.channels));
+ guard(mutex)(&data->mutex);
+
+ ret = regmap_bulk_read(data->regmap, BMI270_ACCEL_X_REG,
+ &data->buffer.channels,
+ sizeof(data->buffer.channels));
if (ret)
goto done;
- iio_push_to_buffers_with_timestamp(indio_dev, &bmi270_device->data,
+ iio_push_to_buffers_with_timestamp(indio_dev, &data->buffer,
pf->timestamp);
done:
iio_trigger_notify_done(indio_dev->trig);
return IRQ_HANDLED;
}
-static int bmi270_get_data(struct bmi270_data *bmi270_device,
- int chan_type, int axis, int *val)
+static int bmi270_get_data(struct bmi270_data *data, int chan_type, int axis,
+ int *val)
{
__le16 sample;
int reg;
@@ -399,17 +525,22 @@ static int bmi270_get_data(struct bmi270_data *bmi270_device,
case IIO_ANGL_VEL:
reg = BMI270_ANG_VEL_X_REG + (axis - IIO_MOD_X) * 2;
break;
+ case IIO_TEMP:
+ reg = BMI270_TEMPERATURE_0_REG;
+ break;
default:
return -EINVAL;
}
- ret = regmap_bulk_read(bmi270_device->regmap, reg, &sample, sizeof(sample));
+ guard(mutex)(&data->mutex);
+
+ ret = regmap_bulk_read(data->regmap, reg, &sample, sizeof(sample));
if (ret)
return ret;
*val = sign_extend32(le16_to_cpu(sample), 15);
- return 0;
+ return IIO_VAL_INT;
}
static int bmi270_read_raw(struct iio_dev *indio_dev,
@@ -417,21 +548,28 @@ static int bmi270_read_raw(struct iio_dev *indio_dev,
int *val, int *val2, long mask)
{
int ret;
- struct bmi270_data *bmi270_device = iio_priv(indio_dev);
+ struct bmi270_data *data = iio_priv(indio_dev);
switch (mask) {
case IIO_CHAN_INFO_RAW:
- ret = bmi270_get_data(bmi270_device, chan->type, chan->channel2, val);
- if (ret)
- return ret;
-
- return IIO_VAL_INT;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+ ret = bmi270_get_data(data, chan->type, chan->channel2, val);
+ iio_device_release_direct(indio_dev);
+ return ret;
case IIO_CHAN_INFO_SCALE:
- *val = 0;
- ret = bmi270_get_scale(bmi270_device, chan->type, val2);
+ ret = bmi270_get_scale(data, chan->type, val, val2);
return ret ? ret : IIO_VAL_INT_PLUS_MICRO;
+ case IIO_CHAN_INFO_OFFSET:
+ switch (chan->type) {
+ case IIO_TEMP:
+ *val = BMI270_TEMP_OFFSET;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
case IIO_CHAN_INFO_SAMP_FREQ:
- ret = bmi270_get_odr(bmi270_device, chan->type, val, val2);
+ ret = bmi270_get_odr(data, chan->type, val, val2);
return ret ? ret : IIO_VAL_INT_PLUS_MICRO;
default:
return -EINVAL;
@@ -443,12 +581,21 @@ static int bmi270_write_raw(struct iio_dev *indio_dev,
int val, int val2, long mask)
{
struct bmi270_data *data = iio_priv(indio_dev);
+ int ret;
switch (mask) {
case IIO_CHAN_INFO_SCALE:
- return bmi270_set_scale(data, chan->type, val2);
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+ ret = bmi270_set_scale(data, chan->type, val2);
+ iio_device_release_direct(indio_dev);
+ return ret;
case IIO_CHAN_INFO_SAMP_FREQ:
- return bmi270_set_odr(data, chan->type, val, val2);
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+ ret = bmi270_set_odr(data, chan->type, val, val2);
+ iio_device_release_direct(indio_dev);
+ return ret;
default:
return -EINVAL;
}
@@ -544,15 +691,132 @@ static const struct iio_chan_spec bmi270_channels[] = {
BMI270_ANG_VEL_CHANNEL(X),
BMI270_ANG_VEL_CHANNEL(Y),
BMI270_ANG_VEL_CHANNEL(Z),
+ {
+ .type = IIO_TEMP,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_OFFSET),
+ .scan_index = -1, /* No buffer support */
+ },
IIO_CHAN_SOFT_TIMESTAMP(BMI270_SCAN_TIMESTAMP),
};
-static int bmi270_validate_chip_id(struct bmi270_data *bmi270_device)
+static int bmi270_int_pin_config(struct bmi270_data *data,
+ enum bmi270_irq_pin irq_pin,
+ bool active_high, bool open_drain, bool latch)
+{
+ unsigned int reg, field_value;
+ int ret;
+
+ ret = regmap_update_bits(data->regmap, BMI270_INT_LATCH_REG,
+ BMI270_INT_LATCH_REG_MSK,
+ FIELD_PREP(BMI270_INT_LATCH_REG_MSK, latch));
+ if (ret)
+ return ret;
+
+ switch (irq_pin) {
+ case BMI270_IRQ_INT1:
+ reg = BMI270_INT1_IO_CTRL_REG;
+ break;
+ case BMI270_IRQ_INT2:
+ reg = BMI270_INT2_IO_CTRL_REG;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ field_value = FIELD_PREP(BMI270_INT_IO_CTRL_LVL_MSK, active_high) |
+ FIELD_PREP(BMI270_INT_IO_CTRL_OD_MSK, open_drain) |
+ FIELD_PREP(BMI270_INT_IO_CTRL_OP_MSK, 1);
+ return regmap_update_bits(data->regmap, reg,
+ BMI270_INT_IO_LVL_OD_OP_MSK, field_value);
+}
+
+static int bmi270_trigger_probe(struct bmi270_data *data,
+ struct iio_dev *indio_dev)
+{
+ bool open_drain, active_high, latch;
+ struct fwnode_handle *fwnode;
+ enum bmi270_irq_pin irq_pin;
+ int ret, irq, irq_type;
+
+ fwnode = dev_fwnode(data->dev);
+ if (!fwnode)
+ return -ENODEV;
+
+ irq = fwnode_irq_get_byname(fwnode, "INT1");
+ if (irq > 0) {
+ irq_pin = BMI270_IRQ_INT1;
+ } else {
+ irq = fwnode_irq_get_byname(fwnode, "INT2");
+ if (irq < 0)
+ return 0;
+
+ irq_pin = BMI270_IRQ_INT2;
+ }
+
+ irq_type = irq_get_trigger_type(irq);
+ switch (irq_type) {
+ case IRQF_TRIGGER_RISING:
+ latch = false;
+ active_high = true;
+ break;
+ case IRQF_TRIGGER_HIGH:
+ latch = true;
+ active_high = true;
+ break;
+ case IRQF_TRIGGER_FALLING:
+ latch = false;
+ active_high = false;
+ break;
+ case IRQF_TRIGGER_LOW:
+ latch = true;
+ active_high = false;
+ break;
+ default:
+ return dev_err_probe(data->dev, -EINVAL,
+ "Invalid interrupt type 0x%x specified\n",
+ irq_type);
+ }
+
+ open_drain = fwnode_property_read_bool(fwnode, "drive-open-drain");
+
+ ret = bmi270_int_pin_config(data, irq_pin, active_high, open_drain,
+ latch);
+ if (ret)
+ return dev_err_probe(data->dev, ret,
+ "Failed to configure irq line\n");
+
+ data->trig = devm_iio_trigger_alloc(data->dev, "%s-trig-%d",
+ indio_dev->name, irq_pin);
+ if (!data->trig)
+ return -ENOMEM;
+
+ data->trig->ops = &bmi270_trigger_ops;
+ iio_trigger_set_drvdata(data->trig, data);
+
+ ret = devm_request_threaded_irq(data->dev, irq, NULL,
+ bmi270_irq_thread_handler,
+ IRQF_ONESHOT, "bmi270-int", indio_dev);
+ if (ret)
+ return dev_err_probe(data->dev, ret, "Failed to request IRQ\n");
+
+ ret = devm_iio_trigger_register(data->dev, data->trig);
+ if (ret)
+ return dev_err_probe(data->dev, ret,
+ "Trigger registration failed\n");
+
+ data->irq_pin = irq_pin;
+
+ return 0;
+}
+
+static int bmi270_validate_chip_id(struct bmi270_data *data)
{
int chip_id;
int ret;
- struct device *dev = bmi270_device->dev;
- struct regmap *regmap = bmi270_device->regmap;
+ struct device *dev = data->dev;
+ struct regmap *regmap = data->regmap;
ret = regmap_read(regmap, BMI270_CHIP_ID_REG, &chip_id);
if (ret)
@@ -566,24 +830,24 @@ static int bmi270_validate_chip_id(struct bmi270_data *bmi270_device)
if (chip_id == BMI160_CHIP_ID_VAL)
return -ENODEV;
- if (chip_id != bmi270_device->chip_info->chip_id)
+ if (chip_id != data->chip_info->chip_id)
dev_info(dev, "Unexpected chip id 0x%x", chip_id);
if (chip_id == bmi260_chip_info.chip_id)
- bmi270_device->chip_info = &bmi260_chip_info;
+ data->chip_info = &bmi260_chip_info;
else if (chip_id == bmi270_chip_info.chip_id)
- bmi270_device->chip_info = &bmi270_chip_info;
+ data->chip_info = &bmi270_chip_info;
return 0;
}
-static int bmi270_write_calibration_data(struct bmi270_data *bmi270_device)
+static int bmi270_write_calibration_data(struct bmi270_data *data)
{
int ret;
int status = 0;
const struct firmware *init_data;
- struct device *dev = bmi270_device->dev;
- struct regmap *regmap = bmi270_device->regmap;
+ struct device *dev = data->dev;
+ struct regmap *regmap = data->regmap;
ret = regmap_clear_bits(regmap, BMI270_PWR_CONF_REG,
BMI270_PWR_CONF_ADV_PWR_SAVE_MSK);
@@ -604,8 +868,7 @@ static int bmi270_write_calibration_data(struct bmi270_data *bmi270_device)
return dev_err_probe(dev, ret,
"Failed to prepare device to load init data");
- ret = request_firmware(&init_data,
- bmi270_device->chip_info->fw_name, dev);
+ ret = request_firmware(&init_data, data->chip_info->fw_name, dev);
if (ret)
return dev_err_probe(dev, ret, "Failed to load init data file");
@@ -637,16 +900,17 @@ static int bmi270_write_calibration_data(struct bmi270_data *bmi270_device)
return 0;
}
-static int bmi270_configure_imu(struct bmi270_data *bmi270_device)
+static int bmi270_configure_imu(struct bmi270_data *data)
{
int ret;
- struct device *dev = bmi270_device->dev;
- struct regmap *regmap = bmi270_device->regmap;
+ struct device *dev = data->dev;
+ struct regmap *regmap = data->regmap;
ret = regmap_set_bits(regmap, BMI270_PWR_CTRL_REG,
BMI270_PWR_CTRL_AUX_EN_MSK |
BMI270_PWR_CTRL_GYR_EN_MSK |
- BMI270_PWR_CTRL_ACCEL_EN_MSK);
+ BMI270_PWR_CTRL_ACCEL_EN_MSK |
+ BMI270_PWR_CTRL_TEMP_EN_MSK);
if (ret)
return dev_err_probe(dev, ret, "Failed to enable accelerometer and gyroscope");
@@ -677,38 +941,40 @@ static int bmi270_configure_imu(struct bmi270_data *bmi270_device)
return 0;
}
-static int bmi270_chip_init(struct bmi270_data *bmi270_device)
+static int bmi270_chip_init(struct bmi270_data *data)
{
int ret;
- ret = bmi270_validate_chip_id(bmi270_device);
+ ret = bmi270_validate_chip_id(data);
if (ret)
return ret;
- ret = bmi270_write_calibration_data(bmi270_device);
+ ret = bmi270_write_calibration_data(data);
if (ret)
return ret;
- return bmi270_configure_imu(bmi270_device);
+ return bmi270_configure_imu(data);
}
int bmi270_core_probe(struct device *dev, struct regmap *regmap,
const struct bmi270_chip_info *chip_info)
{
int ret;
- struct bmi270_data *bmi270_device;
+ struct bmi270_data *data;
struct iio_dev *indio_dev;
- indio_dev = devm_iio_device_alloc(dev, sizeof(*bmi270_device));
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
if (!indio_dev)
return -ENOMEM;
- bmi270_device = iio_priv(indio_dev);
- bmi270_device->dev = dev;
- bmi270_device->regmap = regmap;
- bmi270_device->chip_info = chip_info;
+ data = iio_priv(indio_dev);
+ data->dev = dev;
+ data->regmap = regmap;
+ data->chip_info = chip_info;
+ data->irq_pin = BMI270_IRQ_DISABLED;
+ mutex_init(&data->mutex);
- ret = bmi270_chip_init(bmi270_device);
+ ret = bmi270_chip_init(data);
if (ret)
return ret;
@@ -719,6 +985,10 @@ int bmi270_core_probe(struct device *dev, struct regmap *regmap,
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->info = &bmi270_info;
+ ret = bmi270_trigger_probe(data, indio_dev);
+ if (ret)
+ return ret;
+
ret = devm_iio_triggered_buffer_setup(dev, indio_dev,
iio_pollfunc_store_time,
bmi270_trigger_handler, NULL);
diff --git a/drivers/iio/imu/bmi323/bmi323_core.c b/drivers/iio/imu/bmi323/bmi323_core.c
index 7f386c5e58b4..fc54d464a3ae 100644
--- a/drivers/iio/imu/bmi323/bmi323_core.c
+++ b/drivers/iio/imu/bmi323/bmi323_core.c
@@ -1702,26 +1702,30 @@ static int bmi323_write_raw(struct iio_dev *indio_dev,
int val2, long mask)
{
struct bmi323_data *data = iio_priv(indio_dev);
+ int ret;
switch (mask) {
case IIO_CHAN_INFO_SAMP_FREQ:
- iio_device_claim_direct_scoped(return -EBUSY, indio_dev)
- return bmi323_set_odr(data,
- bmi323_iio_to_sensor(chan->type),
- val, val2);
- unreachable();
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+ ret = bmi323_set_odr(data, bmi323_iio_to_sensor(chan->type),
+ val, val2);
+ iio_device_release_direct(indio_dev);
+ return ret;
case IIO_CHAN_INFO_SCALE:
- iio_device_claim_direct_scoped(return -EBUSY, indio_dev)
- return bmi323_set_scale(data,
- bmi323_iio_to_sensor(chan->type),
- val, val2);
- unreachable();
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+ ret = bmi323_set_scale(data, bmi323_iio_to_sensor(chan->type),
+ val, val2);
+ iio_device_release_direct(indio_dev);
+ return ret;
case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
- iio_device_claim_direct_scoped(return -EBUSY, indio_dev)
- return bmi323_set_average(data,
- bmi323_iio_to_sensor(chan->type),
- val);
- unreachable();
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+ ret = bmi323_set_average(data, bmi323_iio_to_sensor(chan->type),
+ val);
+ iio_device_release_direct(indio_dev);
+ return ret;
case IIO_CHAN_INFO_ENABLE:
return bmi323_enable_steps(data, val);
case IIO_CHAN_INFO_PROCESSED: {
@@ -1747,6 +1751,7 @@ static int bmi323_read_raw(struct iio_dev *indio_dev,
int *val2, long mask)
{
struct bmi323_data *data = iio_priv(indio_dev);
+ int ret;
switch (mask) {
case IIO_CHAN_INFO_PROCESSED:
@@ -1755,10 +1760,11 @@ static int bmi323_read_raw(struct iio_dev *indio_dev,
switch (chan->type) {
case IIO_ACCEL:
case IIO_ANGL_VEL:
- iio_device_claim_direct_scoped(return -EBUSY,
- indio_dev)
- return bmi323_read_axis(data, chan, val);
- unreachable();
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+ ret = bmi323_read_axis(data, chan, val);
+ iio_device_release_direct(indio_dev);
+ return ret;
case IIO_TEMP:
return bmi323_get_temp_data(data, val);
default:
diff --git a/drivers/iio/industrialio-backend.c b/drivers/iio/industrialio-backend.c
index 363281272035..a43c8d1bb3d0 100644
--- a/drivers/iio/industrialio-backend.c
+++ b/drivers/iio/industrialio-backend.c
@@ -155,10 +155,12 @@ static ssize_t iio_backend_debugfs_write_reg(struct file *file,
ssize_t rc;
int ret;
- rc = simple_write_to_buffer(buf, sizeof(buf), ppos, userbuf, count);
+ rc = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, userbuf, count);
if (rc < 0)
return rc;
+ buf[count] = '\0';
+
ret = sscanf(buf, "%i %i", &back->cached_reg_addr, &val);
switch (ret) {
@@ -637,6 +639,66 @@ ssize_t iio_backend_ext_info_set(struct iio_dev *indio_dev, uintptr_t private,
EXPORT_SYMBOL_NS_GPL(iio_backend_ext_info_set, "IIO_BACKEND");
/**
+ * iio_backend_interface_type_get - get the interface type used.
+ * @back: Backend device
+ * @type: Interface type
+ *
+ * RETURNS:
+ * 0 on success, negative error number on failure.
+ */
+int iio_backend_interface_type_get(struct iio_backend *back,
+ enum iio_backend_interface_type *type)
+{
+ int ret;
+
+ ret = iio_backend_op_call(back, interface_type_get, type);
+ if (ret)
+ return ret;
+
+ if (*type >= IIO_BACKEND_INTERFACE_MAX)
+ return -EINVAL;
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(iio_backend_interface_type_get, "IIO_BACKEND");
+
+/**
+ * iio_backend_data_size_set - set the data width/size in the data bus.
+ * @back: Backend device
+ * @size: Size in bits
+ *
+ * Some frontend devices can dynamically control the word/data size on the
+ * interface/data bus. Hence, the backend device needs to be aware of it so
+ * data can be correctly transferred.
+ *
+ * Return:
+ * 0 on success, negative error number on failure.
+ */
+int iio_backend_data_size_set(struct iio_backend *back, unsigned int size)
+{
+ if (!size)
+ return -EINVAL;
+
+ return iio_backend_op_call(back, data_size_set, size);
+}
+EXPORT_SYMBOL_NS_GPL(iio_backend_data_size_set, "IIO_BACKEND");
+
+/**
+ * iio_backend_oversampling_ratio_set - set the oversampling ratio
+ * @back: Backend device
+ * @ratio: The oversampling ratio - value 1 corresponds to no oversampling.
+ *
+ * Return:
+ * 0 on success, negative error number on failure.
+ */
+int iio_backend_oversampling_ratio_set(struct iio_backend *back,
+ unsigned int ratio)
+{
+ return iio_backend_op_call(back, oversampling_ratio_set, ratio);
+}
+EXPORT_SYMBOL_NS_GPL(iio_backend_oversampling_ratio_set, "IIO_BACKEND");
+
+/**
* iio_backend_extend_chan_spec - Extend an IIO channel
* @back: Backend device
* @chan: IIO channel
diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
index a2117ad1337d..b9f4113ae5fc 100644
--- a/drivers/iio/industrialio-core.c
+++ b/drivers/iio/industrialio-core.c
@@ -410,11 +410,12 @@ static ssize_t iio_debugfs_write_reg(struct file *file,
char buf[80];
int ret;
- count = min(count, sizeof(buf) - 1);
- if (copy_from_user(buf, userbuf, count))
- return -EFAULT;
+ ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, userbuf,
+ count);
+ if (ret < 0)
+ return ret;
- buf[count] = 0;
+ buf[count] = '\0';
ret = sscanf(buf, "%i %i", &reg, &val);
diff --git a/drivers/iio/industrialio-event.c b/drivers/iio/industrialio-event.c
index db06501b0e61..06295cfc2da8 100644
--- a/drivers/iio/industrialio-event.c
+++ b/drivers/iio/industrialio-event.c
@@ -232,6 +232,7 @@ static const char * const iio_ev_type_text[] = {
[IIO_EV_TYPE_CHANGE] = "change",
[IIO_EV_TYPE_MAG_REFERENCED] = "mag_referenced",
[IIO_EV_TYPE_GESTURE] = "gesture",
+ [IIO_EV_TYPE_FAULT] = "fault",
};
static const char * const iio_ev_dir_text[] = {
@@ -240,6 +241,7 @@ static const char * const iio_ev_dir_text[] = {
[IIO_EV_DIR_FALLING] = "falling",
[IIO_EV_DIR_SINGLETAP] = "singletap",
[IIO_EV_DIR_DOUBLETAP] = "doubletap",
+ [IIO_EV_DIR_FAULT_OPENWIRE] = "openwire",
};
static const char * const iio_ev_info_text[] = {
diff --git a/drivers/iio/industrialio-gts-helper.c b/drivers/iio/industrialio-gts-helper.c
index d70ebe3bf774..f35c36fd4a55 100644
--- a/drivers/iio/industrialio-gts-helper.c
+++ b/drivers/iio/industrialio-gts-helper.c
@@ -160,16 +160,123 @@ static void iio_gts_purge_avail_scale_table(struct iio_gts *gts)
gts->num_avail_all_scales = 0;
}
+static int scale_eq(int *sc1, int *sc2)
+{
+ return sc1[0] == sc2[0] && sc1[1] == sc2[1];
+}
+
+static int scale_smaller(int *sc1, int *sc2)
+{
+ if (sc1[0] != sc2[0])
+ return sc1[0] < sc2[0];
+
+ /* If integer parts are equal, fixp parts */
+ return sc1[1] < sc2[1];
+}
+
+/*
+ * Do a single table listing all the unique scales that any combination of
+ * supported gains and times can provide.
+ */
+static int do_combined_scaletable(struct iio_gts *gts,
+ size_t all_scales_tbl_bytes)
+{
+ int t_idx, i, new_idx;
+ int **scales = gts->per_time_avail_scale_tables;
+ int *all_scales = kcalloc(gts->num_itime, all_scales_tbl_bytes,
+ GFP_KERNEL);
+
+ if (!all_scales)
+ return -ENOMEM;
+ /*
+ * Create table containing all of the supported scales by looping
+ * through all of the per-time scales and copying the unique scales
+ * into one sorted table.
+ *
+ * We assume all the gains for same integration time were unique.
+ * It is likely the first time table had greatest time multiplier as
+ * the times are in the order of preference and greater times are
+ * usually preferred. Hence we start from the last table which is likely
+ * to have the smallest total gains.
+ */
+ t_idx = gts->num_itime - 1;
+ memcpy(all_scales, scales[t_idx], all_scales_tbl_bytes);
+ new_idx = gts->num_hwgain * 2;
+
+ while (t_idx-- > 0) {
+ for (i = 0; i < gts->num_hwgain ; i++) {
+ int *candidate = &scales[t_idx][i * 2];
+ int chk;
+
+ if (scale_smaller(candidate, &all_scales[new_idx - 2])) {
+ all_scales[new_idx] = candidate[0];
+ all_scales[new_idx + 1] = candidate[1];
+ new_idx += 2;
+
+ continue;
+ }
+ for (chk = 0; chk < new_idx; chk += 2)
+ if (!scale_smaller(candidate, &all_scales[chk]))
+ break;
+
+ if (scale_eq(candidate, &all_scales[chk]))
+ continue;
+
+ memmove(&all_scales[chk + 2], &all_scales[chk],
+ (new_idx - chk) * sizeof(int));
+ all_scales[chk] = candidate[0];
+ all_scales[chk + 1] = candidate[1];
+ new_idx += 2;
+ }
+ }
+
+ gts->num_avail_all_scales = new_idx / 2;
+ gts->avail_all_scales_table = all_scales;
+
+ return 0;
+}
+
+static void iio_gts_free_int_table_array(int **arr, int num_tables)
+{
+ int i;
+
+ for (i = 0; i < num_tables; i++)
+ kfree(arr[i]);
+
+ kfree(arr);
+}
+
+static int iio_gts_alloc_int_table_array(int ***arr, int num_tables, int num_table_items)
+{
+ int i, **tmp;
+
+ tmp = kcalloc(num_tables, sizeof(**arr), GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+
+ for (i = 0; i < num_tables; i++) {
+ tmp[i] = kcalloc(num_table_items, sizeof(int), GFP_KERNEL);
+ if (!tmp[i])
+ goto err_free;
+ }
+
+ *arr = tmp;
+
+ return 0;
+err_free:
+ iio_gts_free_int_table_array(tmp, i);
+
+ return -ENOMEM;
+}
+
static int iio_gts_gain_cmp(const void *a, const void *b)
{
return *(int *)a - *(int *)b;
}
-static int gain_to_scaletables(struct iio_gts *gts, int **gains, int **scales)
+static int fill_and_sort_scaletables(struct iio_gts *gts, int **gains, int **scales)
{
- int i, j, new_idx, time_idx, ret = 0;
- int *all_gains;
- size_t gain_bytes;
+ int i, j, ret;
for (i = 0; i < gts->num_itime; i++) {
/*
@@ -189,71 +296,69 @@ static int gain_to_scaletables(struct iio_gts *gts, int **gains, int **scales)
}
}
- gain_bytes = array_size(gts->num_hwgain, sizeof(int));
- all_gains = kcalloc(gts->num_itime, gain_bytes, GFP_KERNEL);
- if (!all_gains)
- return -ENOMEM;
+ return 0;
+}
+
+static void compute_per_time_gains(struct iio_gts *gts, int **gains)
+{
+ int i, j;
+
+ for (i = 0; i < gts->num_itime; i++) {
+ for (j = 0; j < gts->num_hwgain; j++)
+ gains[i][j] = gts->hwgain_table[j].gain *
+ gts->itime_table[i].mul;
+ }
+}
+
+static int compute_per_time_tables(struct iio_gts *gts, int **scales)
+{
+ int **per_time_gains;
+ int ret;
/*
- * We assume all the gains for same integration time were unique.
- * It is likely the first time table had greatest time multiplier as
- * the times are in the order of preference and greater times are
- * usually preferred. Hence we start from the last table which is likely
- * to have the smallest total gains.
+ * Create a temporary array of the 'total gains' for each integration
+ * time.
*/
- time_idx = gts->num_itime - 1;
- memcpy(all_gains, gains[time_idx], gain_bytes);
- new_idx = gts->num_hwgain;
+ ret = iio_gts_alloc_int_table_array(&per_time_gains, gts->num_itime,
+ gts->num_hwgain);
+ if (ret)
+ return ret;
- while (time_idx-- > 0) {
- for (j = 0; j < gts->num_hwgain; j++) {
- int candidate = gains[time_idx][j];
- int chk;
+ compute_per_time_gains(gts, per_time_gains);
- if (candidate > all_gains[new_idx - 1]) {
- all_gains[new_idx] = candidate;
- new_idx++;
+ /* Convert the gains to scales and populate the scale tables */
+ ret = fill_and_sort_scaletables(gts, per_time_gains, scales);
- continue;
- }
- for (chk = 0; chk < new_idx; chk++)
- if (candidate <= all_gains[chk])
- break;
+ iio_gts_free_int_table_array(per_time_gains, gts->num_itime);
- if (candidate == all_gains[chk])
- continue;
+ return ret;
+}
- memmove(&all_gains[chk + 1], &all_gains[chk],
- (new_idx - chk) * sizeof(int));
- all_gains[chk] = candidate;
- new_idx++;
- }
- }
+/*
+ * Create a table of supported scales for each supported integration time.
+ * This can be used as available_scales by drivers which don't allow scale
+ * setting to change the integration time to display correct set of scales
+ * depending on the used integration time.
+ */
+static int **create_per_time_scales(struct iio_gts *gts)
+{
+ int **per_time_scales, ret;
- gts->avail_all_scales_table = kcalloc(new_idx, 2 * sizeof(int),
- GFP_KERNEL);
- if (!gts->avail_all_scales_table) {
- ret = -ENOMEM;
- goto free_out;
- }
- gts->num_avail_all_scales = new_idx;
+ ret = iio_gts_alloc_int_table_array(&per_time_scales, gts->num_itime,
+ gts->num_hwgain * 2);
+ if (ret)
+ return ERR_PTR(ret);
- for (i = 0; i < gts->num_avail_all_scales; i++) {
- ret = iio_gts_total_gain_to_scale(gts, all_gains[i],
- &gts->avail_all_scales_table[i * 2],
- &gts->avail_all_scales_table[i * 2 + 1]);
+ ret = compute_per_time_tables(gts, per_time_scales);
+ if (ret)
+ goto err_out;
- if (ret) {
- kfree(gts->avail_all_scales_table);
- gts->num_avail_all_scales = 0;
- goto free_out;
- }
- }
+ return per_time_scales;
-free_out:
- kfree(all_gains);
+err_out:
+ iio_gts_free_int_table_array(per_time_scales, gts->num_itime);
- return ret;
+ return ERR_PTR(ret);
}
/**
@@ -275,55 +380,26 @@ free_out:
*/
static int iio_gts_build_avail_scale_table(struct iio_gts *gts)
{
- int **per_time_gains, **per_time_scales, i, j, ret = -ENOMEM;
+ int ret, all_scales_tbl_bytes;
+ int **per_time_scales;
- per_time_gains = kcalloc(gts->num_itime, sizeof(*per_time_gains), GFP_KERNEL);
- if (!per_time_gains)
- return ret;
-
- per_time_scales = kcalloc(gts->num_itime, sizeof(*per_time_scales), GFP_KERNEL);
- if (!per_time_scales)
- goto free_gains;
-
- for (i = 0; i < gts->num_itime; i++) {
- per_time_scales[i] = kcalloc(gts->num_hwgain, 2 * sizeof(int),
- GFP_KERNEL);
- if (!per_time_scales[i])
- goto err_free_out;
-
- per_time_gains[i] = kcalloc(gts->num_hwgain, sizeof(int),
- GFP_KERNEL);
- if (!per_time_gains[i]) {
- kfree(per_time_scales[i]);
- goto err_free_out;
- }
-
- for (j = 0; j < gts->num_hwgain; j++)
- per_time_gains[i][j] = gts->hwgain_table[j].gain *
- gts->itime_table[i].mul;
- }
+ if (unlikely(check_mul_overflow(gts->num_hwgain, 2 * sizeof(int),
+ &all_scales_tbl_bytes)))
+ return -EOVERFLOW;
- ret = gain_to_scaletables(gts, per_time_gains, per_time_scales);
- if (ret)
- goto err_free_out;
+ per_time_scales = create_per_time_scales(gts);
+ if (IS_ERR(per_time_scales))
+ return PTR_ERR(per_time_scales);
- for (i = 0; i < gts->num_itime; i++)
- kfree(per_time_gains[i]);
- kfree(per_time_gains);
gts->per_time_avail_scale_tables = per_time_scales;
- return 0;
-
-err_free_out:
- for (i--; i >= 0; i--) {
- kfree(per_time_scales[i]);
- kfree(per_time_gains[i]);
+ ret = do_combined_scaletable(gts, all_scales_tbl_bytes);
+ if (ret) {
+ iio_gts_free_int_table_array(per_time_scales, gts->num_itime);
+ return ret;
}
- kfree(per_time_scales);
-free_gains:
- kfree(per_time_gains);
- return ret;
+ return 0;
}
static void iio_gts_us_to_int_micro(int *time_us, int *int_micro_times,
@@ -950,7 +1026,15 @@ int iio_gts_find_gain_time_sel_for_scale(struct iio_gts *gts, int scale_int,
}
EXPORT_SYMBOL_NS_GPL(iio_gts_find_gain_time_sel_for_scale, "IIO_GTS_HELPER");
-static int iio_gts_get_total_gain(struct iio_gts *gts, int gain, int time)
+/**
+ * iio_gts_get_total_gain - Fetch total gain for given HW-gain and time
+ * @gts: Gain time scale descriptor
+ * @gain: HW-gain for which the total gain is searched for
+ * @time: Integration time for which the total gain is searched for
+ *
+ * Return: total gain on success and -EINVAL on error.
+ */
+int iio_gts_get_total_gain(struct iio_gts *gts, int gain, int time)
{
const struct iio_itime_sel_mul *itime;
@@ -966,6 +1050,7 @@ static int iio_gts_get_total_gain(struct iio_gts *gts, int gain, int time)
return gain * itime->mul;
}
+EXPORT_SYMBOL_NS_GPL(iio_gts_get_total_gain, "IIO_GTS_HELPER");
static int iio_gts_get_scale_linear(struct iio_gts *gts, int gain, int time,
u64 *scale)
diff --git a/drivers/iio/light/Kconfig b/drivers/iio/light/Kconfig
index e34e551eef3e..4a7d983c9cd4 100644
--- a/drivers/iio/light/Kconfig
+++ b/drivers/iio/light/Kconfig
@@ -43,6 +43,16 @@ config ADUX1020
To compile this driver as a module, choose M here: the
module will be called adux1020.
+config AL3000A
+ tristate "AL3000a ambient light sensor"
+ depends on I2C
+ help
+ Say Y here if you want to build a driver for the Dyna Image AL3000a
+ ambient light sensor.
+
+ To compile this driver as a module, choose M here: the
+ module will be called al3000a.
+
config AL3010
tristate "AL3010 ambient light sensor"
depends on I2C
@@ -63,6 +73,17 @@ config AL3320A
To compile this driver as a module, choose M here: the
module will be called al3320a.
+config APDS9160
+ tristate "APDS9160 combined als and proximity sensor"
+ depends on I2C
+ select REGMAP_I2C
+ help
+ Say Y here if you want to build support for a Broadcom APDS9160
+ combined ambient light and proximity sensor.
+
+ To compile this driver as a module, choose M here: the
+ module will be called apds9160.
+
config APDS9300
tristate "APDS9300 ambient light sensor"
depends on I2C
@@ -683,6 +704,7 @@ config VEML6030
select REGMAP_I2C
select IIO_BUFFER
select IIO_TRIGGERED_BUFFER
+ select IIO_GTS_HELPER
depends on I2C
help
Say Y here if you want to build a driver for the Vishay VEML6030
diff --git a/drivers/iio/light/Makefile b/drivers/iio/light/Makefile
index 11a4041b918a..8229ebe6edc4 100644
--- a/drivers/iio/light/Makefile
+++ b/drivers/iio/light/Makefile
@@ -7,8 +7,10 @@
obj-$(CONFIG_ACPI_ALS) += acpi-als.o
obj-$(CONFIG_ADJD_S311) += adjd_s311.o
obj-$(CONFIG_ADUX1020) += adux1020.o
+obj-$(CONFIG_AL3000A) += al3000a.o
obj-$(CONFIG_AL3010) += al3010.o
obj-$(CONFIG_AL3320A) += al3320a.o
+obj-$(CONFIG_APDS9160) += apds9160.o
obj-$(CONFIG_APDS9300) += apds9300.o
obj-$(CONFIG_APDS9306) += apds9306.o
obj-$(CONFIG_APDS9960) += apds9960.o
diff --git a/drivers/iio/light/adux1020.c b/drivers/iio/light/adux1020.c
index 593d614b1689..9240983a6cc4 100644
--- a/drivers/iio/light/adux1020.c
+++ b/drivers/iio/light/adux1020.c
@@ -118,7 +118,6 @@ static const struct regmap_config adux1020_regmap_config = {
.reg_bits = 8,
.val_bits = 16,
.max_register = 0x6F,
- .cache_type = REGCACHE_NONE,
};
static const struct reg_sequence adux1020_def_conf[] = {
diff --git a/drivers/iio/light/al3000a.c b/drivers/iio/light/al3000a.c
new file mode 100644
index 000000000000..e2fbb1270040
--- /dev/null
+++ b/drivers/iio/light/al3000a.c
@@ -0,0 +1,209 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/array_size.h>
+#include <linux/bitfield.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/pm.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/types.h>
+
+#include <linux/iio/iio.h>
+
+#define AL3000A_REG_SYSTEM 0x00
+#define AL3000A_REG_DATA 0x05
+
+#define AL3000A_CONFIG_ENABLE 0x00
+#define AL3000A_CONFIG_DISABLE 0x0b
+#define AL3000A_CONFIG_RESET 0x0f
+#define AL3000A_GAIN_MASK GENMASK(5, 0)
+
+/*
+ * These are pre-calculated lux values based on possible output of sensor
+ * (range 0x00 - 0x3F)
+ */
+static const u32 lux_table[] = {
+ 1, 1, 1, 2, 2, 2, 3, 4, /* 0 - 7 */
+ 4, 5, 6, 7, 9, 11, 13, 16, /* 8 - 15 */
+ 19, 22, 27, 32, 39, 46, 56, 67, /* 16 - 23 */
+ 80, 96, 116, 139, 167, 200, 240, 289, /* 24 - 31 */
+ 347, 416, 499, 600, 720, 864, 1037, 1245, /* 32 - 39 */
+ 1495, 1795, 2155, 2587, 3105, 3728, 4475, 5373, /* 40 - 47 */
+ 6450, 7743, 9296, 11160, 13397, 16084, 19309, 23180, /* 48 - 55 */
+ 27828, 33408, 40107, 48148, 57803, 69393, 83306, 100000 /* 56 - 63 */
+};
+
+static const struct regmap_config al3000a_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = AL3000A_REG_DATA,
+};
+
+struct al3000a_data {
+ struct regmap *regmap;
+ struct regulator *vdd_supply;
+};
+
+static const struct iio_chan_spec al3000a_channels[] = {
+ {
+ .type = IIO_LIGHT,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
+ },
+};
+
+static int al3000a_set_pwr_on(struct al3000a_data *data)
+{
+ struct device *dev = regmap_get_device(data->regmap);
+ int ret;
+
+ ret = regulator_enable(data->vdd_supply);
+ if (ret) {
+ dev_err(dev, "failed to enable vdd power supply\n");
+ return ret;
+ }
+
+ return regmap_write(data->regmap, AL3000A_REG_SYSTEM, AL3000A_CONFIG_ENABLE);
+}
+
+static void al3000a_set_pwr_off(void *_data)
+{
+ struct al3000a_data *data = _data;
+ struct device *dev = regmap_get_device(data->regmap);
+ int ret;
+
+ ret = regmap_write(data->regmap, AL3000A_REG_SYSTEM, AL3000A_CONFIG_DISABLE);
+ if (ret)
+ dev_err(dev, "failed to write system register\n");
+
+ ret = regulator_disable(data->vdd_supply);
+ if (ret)
+ dev_err(dev, "failed to disable vdd power supply\n");
+}
+
+static int al3000a_init(struct al3000a_data *data)
+{
+ int ret;
+
+ ret = al3000a_set_pwr_on(data);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(data->regmap, AL3000A_REG_SYSTEM, AL3000A_CONFIG_RESET);
+ if (ret)
+ return ret;
+
+ return regmap_write(data->regmap, AL3000A_REG_SYSTEM, AL3000A_CONFIG_ENABLE);
+}
+
+static int al3000a_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val,
+ int *val2, long mask)
+{
+ struct al3000a_data *data = iio_priv(indio_dev);
+ int ret, gain;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_PROCESSED:
+ ret = regmap_read(data->regmap, AL3000A_REG_DATA, &gain);
+ if (ret)
+ return ret;
+
+ *val = lux_table[gain & AL3000A_GAIN_MASK];
+
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct iio_info al3000a_info = {
+ .read_raw = al3000a_read_raw,
+};
+
+static int al3000a_probe(struct i2c_client *client)
+{
+ struct al3000a_data *data;
+ struct device *dev = &client->dev;
+ struct iio_dev *indio_dev;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ data = iio_priv(indio_dev);
+ i2c_set_clientdata(client, indio_dev);
+
+ data->regmap = devm_regmap_init_i2c(client, &al3000a_regmap_config);
+ if (IS_ERR(data->regmap))
+ return dev_err_probe(dev, PTR_ERR(data->regmap),
+ "cannot allocate regmap\n");
+
+ data->vdd_supply = devm_regulator_get(dev, "vdd");
+ if (IS_ERR(data->vdd_supply))
+ return dev_err_probe(dev, PTR_ERR(data->vdd_supply),
+ "failed to get vdd regulator\n");
+
+ indio_dev->info = &al3000a_info;
+ indio_dev->name = "al3000a";
+ indio_dev->channels = al3000a_channels;
+ indio_dev->num_channels = ARRAY_SIZE(al3000a_channels);
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ ret = al3000a_init(data);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to init ALS\n");
+
+ ret = devm_add_action_or_reset(dev, al3000a_set_pwr_off, data);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to add action\n");
+
+ return devm_iio_device_register(dev, indio_dev);
+}
+
+static int al3000a_suspend(struct device *dev)
+{
+ struct al3000a_data *data = iio_priv(dev_get_drvdata(dev));
+
+ al3000a_set_pwr_off(data);
+ return 0;
+}
+
+static int al3000a_resume(struct device *dev)
+{
+ struct al3000a_data *data = iio_priv(dev_get_drvdata(dev));
+
+ return al3000a_set_pwr_on(data);
+}
+
+static DEFINE_SIMPLE_DEV_PM_OPS(al3000a_pm_ops, al3000a_suspend, al3000a_resume);
+
+static const struct i2c_device_id al3000a_id[] = {
+ { "al3000a" },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, al3000a_id);
+
+static const struct of_device_id al3000a_of_match[] = {
+ { .compatible = "dynaimage,al3000a" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, al3000a_of_match);
+
+static struct i2c_driver al3000a_driver = {
+ .driver = {
+ .name = "al3000a",
+ .of_match_table = al3000a_of_match,
+ .pm = pm_sleep_ptr(&al3000a_pm_ops),
+ },
+ .probe = al3000a_probe,
+ .id_table = al3000a_id,
+};
+module_i2c_driver(al3000a_driver);
+
+MODULE_AUTHOR("Svyatolsav Ryhel <clamor95@gmail.com>");
+MODULE_DESCRIPTION("al3000a Ambient Light Sensor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/light/apds9160.c b/drivers/iio/light/apds9160.c
new file mode 100644
index 000000000000..d3f415930ec9
--- /dev/null
+++ b/drivers/iio/light/apds9160.c
@@ -0,0 +1,1594 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * APDS9160 sensor driver.
+ * Chip is combined proximity and ambient light sensor.
+ * Author: 2024 Mikael Gonella-Bolduc <m.gonella.bolduc@gmail.com>
+ */
+
+#include <linux/bits.h>
+#include <linux/bitfield.h>
+#include <linux/cleanup.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/types.h>
+#include <linux/units.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/events.h>
+#include <linux/iio/sysfs.h>
+
+#include <linux/unaligned.h>
+
+#define APDS9160_REGMAP_NAME "apds9160_regmap"
+
+/* Main control register */
+#define APDS9160_REG_CTRL 0x00
+#define APDS9160_CTRL_SWRESET BIT(4) /* 1: Activate reset */
+#define APDS9160_CTRL_MODE_RGB BIT(2) /* 0: ALS & IR, 1: RGB & IR */
+#define APDS9160_CTRL_EN_ALS BIT(1) /* 1: ALS active */
+#define APDS9160_CTLR_EN_PS BIT(0) /* 1: PS active */
+
+/* Status register */
+#define APDS9160_SR_LS_INT BIT(4)
+#define APDS9160_SR_LS_NEW_DATA BIT(3)
+#define APDS9160_SR_PS_INT BIT(1)
+#define APDS9160_SR_PS_NEW_DATA BIT(0)
+
+/* Interrupt configuration registers */
+#define APDS9160_REG_INT_CFG 0x19
+#define APDS9160_REG_INT_PST 0x1A
+#define APDS9160_INT_CFG_EN_LS BIT(2) /* LS int enable */
+#define APDS9160_INT_CFG_EN_PS BIT(0) /* PS int enable */
+
+/* Proximity registers */
+#define APDS9160_REG_PS_LED 0x01
+#define APDS9160_REG_PS_PULSES 0x02
+#define APDS9160_REG_PS_MEAS_RATE 0x03
+#define APDS9160_REG_PS_THRES_HI_LSB 0x1B
+#define APDS9160_REG_PS_THRES_HI_MSB 0x1C
+#define APDS9160_REG_PS_THRES_LO_LSB 0x1D
+#define APDS9160_REG_PS_THRES_LO_MSB 0x1E
+#define APDS9160_REG_PS_DATA_LSB 0x08
+#define APDS9160_REG_PS_DATA_MSB 0x09
+#define APDS9160_REG_PS_CAN_LEVEL_DIG_LSB 0x1F
+#define APDS9160_REG_PS_CAN_LEVEL_DIG_MSB 0x20
+#define APDS9160_REG_PS_CAN_LEVEL_ANA_DUR 0x21
+#define APDS9160_REG_PS_CAN_LEVEL_ANA_CURRENT 0x22
+
+/* Light sensor registers */
+#define APDS9160_REG_LS_MEAS_RATE 0x04
+#define APDS9160_REG_LS_GAIN 0x05
+#define APDS9160_REG_LS_DATA_CLEAR_LSB 0x0A
+#define APDS9160_REG_LS_DATA_CLEAR 0x0B
+#define APDS9160_REG_LS_DATA_CLEAR_MSB 0x0C
+#define APDS9160_REG_LS_DATA_ALS_LSB 0x0D
+#define APDS9160_REG_LS_DATA_ALS 0x0E
+#define APDS9160_REG_LS_DATA_ALS_MSB 0x0F
+#define APDS9160_REG_LS_THRES_UP_LSB 0x24
+#define APDS9160_REG_LS_THRES_UP 0x25
+#define APDS9160_REG_LS_THRES_UP_MSB 0x26
+#define APDS9160_REG_LS_THRES_LO_LSB 0x27
+#define APDS9160_REG_LS_THRES_LO 0x28
+#define APDS9160_REG_LS_THRES_LO_MSB 0x29
+#define APDS9160_REG_LS_THRES_VAR 0x2A
+
+/* Part identification number register */
+#define APDS9160_REG_ID 0x06
+
+/* Status register */
+#define APDS9160_REG_SR 0x07
+#define APDS9160_SR_DATA_ALS BIT(3)
+#define APDS9160_SR_DATA_PS BIT(0)
+
+/* Supported ID:s */
+#define APDS9160_PART_ID_0 0x03
+
+#define APDS9160_PS_THRES_MAX 0x7FF
+#define APDS9160_LS_THRES_MAX 0xFFFFF
+#define APDS9160_CMD_LS_RESOLUTION_25MS 0x04
+#define APDS9160_CMD_LS_RESOLUTION_50MS 0x03
+#define APDS9160_CMD_LS_RESOLUTION_100MS 0x02
+#define APDS9160_CMD_LS_RESOLUTION_200MS 0x01
+#define APDS9160_PS_DATA_MASK 0x7FF
+
+#define APDS9160_DEFAULT_LS_GAIN 3
+#define APDS9160_DEFAULT_LS_RATE 100
+#define APDS9160_DEFAULT_PS_RATE 100
+#define APDS9160_DEFAULT_PS_CANCELLATION_LEVEL 0
+#define APDS9160_DEFAULT_PS_ANALOG_CANCELLATION 0
+#define APDS9160_DEFAULT_PS_GAIN 1
+#define APDS9160_DEFAULT_PS_CURRENT 100
+#define APDS9160_DEFAULT_PS_RESOLUTION_11BITS 0x03
+
+static const struct reg_default apds9160_reg_defaults[] = {
+ { APDS9160_REG_CTRL, 0x00 }, /* Sensors disabled by default */
+ { APDS9160_REG_PS_LED, 0x33 }, /* 60 kHz frequency, 100 mA */
+ { APDS9160_REG_PS_PULSES, 0x08 }, /* 8 pulses */
+ { APDS9160_REG_PS_MEAS_RATE, 0x05 }, /* 100ms */
+ { APDS9160_REG_LS_MEAS_RATE, 0x22 }, /* 100ms */
+ { APDS9160_REG_LS_GAIN, 0x01 }, /* 3x */
+ { APDS9160_REG_INT_CFG, 0x10 }, /* Interrupts disabled */
+ { APDS9160_REG_INT_PST, 0x00 },
+ { APDS9160_REG_PS_THRES_HI_LSB, 0xFF },
+ { APDS9160_REG_PS_THRES_HI_MSB, 0x07 },
+ { APDS9160_REG_PS_THRES_LO_LSB, 0x00 },
+ { APDS9160_REG_PS_THRES_LO_MSB, 0x00 },
+ { APDS9160_REG_PS_CAN_LEVEL_DIG_LSB, 0x00 },
+ { APDS9160_REG_PS_CAN_LEVEL_DIG_MSB, 0x00 },
+ { APDS9160_REG_PS_CAN_LEVEL_ANA_DUR, 0x00 },
+ { APDS9160_REG_PS_CAN_LEVEL_ANA_CURRENT, 0x00 },
+ { APDS9160_REG_LS_THRES_UP_LSB, 0xFF },
+ { APDS9160_REG_LS_THRES_UP, 0xFF },
+ { APDS9160_REG_LS_THRES_UP_MSB, 0x0F },
+ { APDS9160_REG_LS_THRES_LO_LSB, 0x00 },
+ { APDS9160_REG_LS_THRES_LO, 0x00 },
+ { APDS9160_REG_LS_THRES_LO_MSB, 0x00 },
+ { APDS9160_REG_LS_THRES_VAR, 0x00 },
+};
+
+static const struct regmap_range apds9160_readable_ranges[] = {
+ regmap_reg_range(APDS9160_REG_CTRL, APDS9160_REG_LS_THRES_VAR),
+};
+
+static const struct regmap_access_table apds9160_readable_table = {
+ .yes_ranges = apds9160_readable_ranges,
+ .n_yes_ranges = ARRAY_SIZE(apds9160_readable_ranges),
+};
+
+static const struct regmap_range apds9160_writeable_ranges[] = {
+ regmap_reg_range(APDS9160_REG_CTRL, APDS9160_REG_LS_GAIN),
+ regmap_reg_range(APDS9160_REG_INT_CFG, APDS9160_REG_LS_THRES_VAR),
+};
+
+static const struct regmap_access_table apds9160_writeable_table = {
+ .yes_ranges = apds9160_writeable_ranges,
+ .n_yes_ranges = ARRAY_SIZE(apds9160_writeable_ranges),
+};
+
+static const struct regmap_range apds9160_volatile_ranges[] = {
+ regmap_reg_range(APDS9160_REG_SR, APDS9160_REG_LS_DATA_ALS_MSB),
+};
+
+static const struct regmap_access_table apds9160_volatile_table = {
+ .yes_ranges = apds9160_volatile_ranges,
+ .n_yes_ranges = ARRAY_SIZE(apds9160_volatile_ranges),
+};
+
+static const struct regmap_config apds9160_regmap_config = {
+ .name = APDS9160_REGMAP_NAME,
+ .reg_bits = 8,
+ .val_bits = 8,
+ .use_single_read = true,
+ .use_single_write = true,
+
+ .rd_table = &apds9160_readable_table,
+ .wr_table = &apds9160_writeable_table,
+ .volatile_table = &apds9160_volatile_table,
+
+ .reg_defaults = apds9160_reg_defaults,
+ .num_reg_defaults = ARRAY_SIZE(apds9160_reg_defaults),
+ .max_register = 37,
+ .cache_type = REGCACHE_RBTREE,
+};
+
+static const struct iio_event_spec apds9160_event_spec[] = {
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_RISING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE),
+ },
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_FALLING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE),
+ },
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_EITHER,
+ .mask_separate = BIT(IIO_EV_INFO_ENABLE),
+ },
+};
+
+static const struct iio_chan_spec apds9160_channels[] = {
+ {
+ /* Proximity sensor channel */
+ .type = IIO_PROXIMITY,
+ .address = APDS9160_REG_PS_DATA_LSB,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_INT_TIME) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_CALIBBIAS),
+ .info_mask_separate_available = BIT(IIO_CHAN_INFO_INT_TIME) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ .event_spec = apds9160_event_spec,
+ .num_event_specs = ARRAY_SIZE(apds9160_event_spec),
+ },
+ {
+ /* Proximity sensor led current */
+ .type = IIO_CURRENT,
+ .output = 1,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_separate_available = BIT(IIO_CHAN_INFO_RAW),
+ },
+ {
+ /* Illuminance */
+ .type = IIO_LIGHT,
+ .address = APDS9160_REG_LS_DATA_ALS_LSB,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_INT_TIME) |
+ BIT(IIO_CHAN_INFO_HARDWAREGAIN) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ .info_mask_separate_available = BIT(IIO_CHAN_INFO_INT_TIME) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ .event_spec = apds9160_event_spec,
+ .num_event_specs = ARRAY_SIZE(apds9160_event_spec),
+ },
+ {
+ /* Clear channel */
+ .type = IIO_INTENSITY,
+ .address = APDS9160_REG_LS_DATA_CLEAR_LSB,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .channel2 = IIO_MOD_LIGHT_CLEAR,
+ .modified = 1,
+ },
+};
+
+static const struct iio_chan_spec apds9160_channels_without_events[] = {
+ {
+ /* Proximity sensor channel */
+ .type = IIO_PROXIMITY,
+ .address = APDS9160_REG_PS_DATA_LSB,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_INT_TIME) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_CALIBBIAS),
+ .info_mask_separate_available = BIT(IIO_CHAN_INFO_INT_TIME) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ },
+ {
+ /* Proximity sensor led current */
+ .type = IIO_CURRENT,
+ .output = 1,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_separate_available = BIT(IIO_CHAN_INFO_RAW),
+ },
+ {
+ /* Illuminance */
+ .type = IIO_LIGHT,
+ .address = APDS9160_REG_LS_DATA_ALS_LSB,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_INT_TIME) |
+ BIT(IIO_CHAN_INFO_HARDWAREGAIN) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ .info_mask_separate_available = BIT(IIO_CHAN_INFO_INT_TIME) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ },
+ {
+ /* Clear channel */
+ .type = IIO_INTENSITY,
+ .address = APDS9160_REG_LS_DATA_CLEAR_LSB,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .channel2 = IIO_MOD_LIGHT_CLEAR,
+ .modified = 1,
+ },
+};
+
+static const int apds9160_als_rate_avail[] = {
+ 25, 50, 100, 200
+};
+
+static const int apds9160_als_rate_map[][2] = {
+ { 25, 0x00 },
+ { 50, 0x01 },
+ { 100, 0x02 },
+ { 200, 0x03 },
+};
+
+static const int apds9160_als_gain_map[][2] = {
+ { 1, 0x00 },
+ { 3, 0x01 },
+ { 6, 0x02 },
+ { 18, 0x03 },
+ { 54, 0x04 },
+};
+
+static const int apds9160_ps_gain_avail[] = {
+ 1, 2, 4, 8
+};
+
+static const int apds9160_ps_gain_map[][2] = {
+ { 1, 0x00 },
+ { 2, 0x01 },
+ { 4, 0x02 },
+ { 8, 0x03 },
+};
+
+static const int apds9160_ps_rate_avail[] = {
+ 25, 50, 100, 200, 400
+};
+
+static const int apds9160_ps_rate_map[][2] = {
+ { 25, 0x03 },
+ { 50, 0x04 },
+ { 100, 0x05 },
+ { 200, 0x06 },
+ { 400, 0x07 },
+};
+
+static const int apds9160_ps_led_current_avail[] = {
+ 10, 25, 50, 100, 150, 175, 200
+};
+
+static const int apds9160_ps_led_current_map[][2] = {
+ { 10, 0x00 },
+ { 25, 0x01 },
+ { 50, 0x02 },
+ { 100, 0x03 },
+ { 150, 0x04 },
+ { 175, 0x05 },
+ { 200, 0x06 },
+};
+
+/**
+ * struct apds9160_scale - apds9160 scale mapping definition
+ *
+ * @itime: Integration time in ms
+ * @gain: Gain multiplier
+ * @scale1: lux/count resolution
+ * @scale2: micro lux/count
+ */
+struct apds9160_scale {
+ int itime;
+ int gain;
+ int scale1;
+ int scale2;
+};
+
+/* Scale mapping extracted from datasheet */
+static const struct apds9160_scale apds9160_als_scale_map[] = {
+ {
+ .itime = 25,
+ .gain = 1,
+ .scale1 = 3,
+ .scale2 = 272000,
+ },
+ {
+ .itime = 25,
+ .gain = 3,
+ .scale1 = 1,
+ .scale2 = 77000,
+ },
+ {
+ .itime = 25,
+ .gain = 6,
+ .scale1 = 0,
+ .scale2 = 525000,
+ },
+ {
+ .itime = 25,
+ .gain = 18,
+ .scale1 = 0,
+ .scale2 = 169000,
+ },
+ {
+ .itime = 25,
+ .gain = 54,
+ .scale1 = 0,
+ .scale2 = 49000,
+ },
+ {
+ .itime = 50,
+ .gain = 1,
+ .scale1 = 1,
+ .scale2 = 639000,
+ },
+ {
+ .itime = 50,
+ .gain = 3,
+ .scale1 = 0,
+ .scale2 = 538000,
+ },
+ {
+ .itime = 50,
+ .gain = 6,
+ .scale1 = 0,
+ .scale2 = 263000,
+ },
+ {
+ .itime = 50,
+ .gain = 18,
+ .scale1 = 0,
+ .scale2 = 84000,
+ },
+ {
+ .itime = 50,
+ .gain = 54,
+ .scale1 = 0,
+ .scale2 = 25000,
+ },
+ {
+ .itime = 100,
+ .gain = 1,
+ .scale1 = 0,
+ .scale2 = 819000,
+ },
+ {
+ .itime = 100,
+ .gain = 3,
+ .scale1 = 0,
+ .scale2 = 269000,
+ },
+ {
+ .itime = 100,
+ .gain = 6,
+ .scale1 = 0,
+ .scale2 = 131000,
+ },
+ {
+ .itime = 100,
+ .gain = 18,
+ .scale1 = 0,
+ .scale2 = 42000,
+ },
+ {
+ .itime = 100,
+ .gain = 54,
+ .scale1 = 0,
+ .scale2 = 12000,
+ },
+ {
+ .itime = 200,
+ .gain = 1,
+ .scale1 = 0,
+ .scale2 = 409000,
+ },
+ {
+ .itime = 200,
+ .gain = 3,
+ .scale1 = 0,
+ .scale2 = 135000,
+ },
+ {
+ .itime = 200,
+ .gain = 6,
+ .scale1 = 0,
+ .scale2 = 66000,
+ },
+ {
+ .itime = 200,
+ .gain = 18,
+ .scale1 = 0,
+ .scale2 = 21000,
+ },
+ {
+ .itime = 200,
+ .gain = 54,
+ .scale1 = 0,
+ .scale2 = 6000,
+ },
+};
+
+static const int apds9160_25ms_avail[][2] = {
+ { 3, 272000 },
+ { 1, 77000 },
+ { 0, 525000 },
+ { 0, 169000 },
+ { 0, 49000 },
+};
+
+static const int apds9160_50ms_avail[][2] = {
+ { 1, 639000 },
+ { 0, 538000 },
+ { 0, 263000 },
+ { 0, 84000 },
+ { 0, 25000 },
+};
+
+static const int apds9160_100ms_avail[][2] = {
+ { 0, 819000 },
+ { 0, 269000 },
+ { 0, 131000 },
+ { 0, 42000 },
+ { 0, 12000 },
+};
+
+static const int apds9160_200ms_avail[][2] = {
+ { 0, 409000 },
+ { 0, 135000 },
+ { 0, 66000 },
+ { 0, 21000 },
+ { 0, 6000 },
+};
+
+static const struct reg_field apds9160_reg_field_ls_en =
+ REG_FIELD(APDS9160_REG_CTRL, 1, 1);
+
+static const struct reg_field apds9160_reg_field_ps_en =
+ REG_FIELD(APDS9160_REG_CTRL, 0, 0);
+
+static const struct reg_field apds9160_reg_field_int_ps =
+ REG_FIELD(APDS9160_REG_INT_CFG, 0, 0);
+
+static const struct reg_field apds9160_reg_field_int_als =
+ REG_FIELD(APDS9160_REG_INT_CFG, 2, 2);
+
+static const struct reg_field apds9160_reg_field_ps_overflow =
+ REG_FIELD(APDS9160_REG_PS_DATA_MSB, 3, 3);
+
+static const struct reg_field apds9160_reg_field_als_rate =
+ REG_FIELD(APDS9160_REG_LS_MEAS_RATE, 0, 2);
+
+static const struct reg_field apds9160_reg_field_als_gain =
+ REG_FIELD(APDS9160_REG_LS_GAIN, 0, 2);
+
+static const struct reg_field apds9160_reg_field_ps_rate =
+ REG_FIELD(APDS9160_REG_PS_MEAS_RATE, 0, 2);
+
+static const struct reg_field apds9160_reg_field_als_res =
+ REG_FIELD(APDS9160_REG_LS_MEAS_RATE, 4, 6);
+
+static const struct reg_field apds9160_reg_field_ps_current =
+ REG_FIELD(APDS9160_REG_PS_LED, 0, 2);
+
+static const struct reg_field apds9160_reg_field_ps_gain =
+ REG_FIELD(APDS9160_REG_PS_MEAS_RATE, 6, 7);
+
+static const struct reg_field apds9160_reg_field_ps_resolution =
+ REG_FIELD(APDS9160_REG_PS_MEAS_RATE, 3, 4);
+
+struct apds9160_chip {
+ struct i2c_client *client;
+ struct regmap *regmap;
+
+ struct regmap_field *reg_enable_ps;
+ struct regmap_field *reg_enable_als;
+ struct regmap_field *reg_int_ps;
+ struct regmap_field *reg_int_als;
+ struct regmap_field *reg_ps_overflow;
+ struct regmap_field *reg_als_rate;
+ struct regmap_field *reg_als_resolution;
+ struct regmap_field *reg_ps_rate;
+ struct regmap_field *reg_als_gain;
+ struct regmap_field *reg_ps_current;
+ struct regmap_field *reg_ps_gain;
+ struct regmap_field *reg_ps_resolution;
+
+ struct mutex lock; /* protects state and config data */
+
+ /* State data */
+ int als_int;
+ int ps_int;
+
+ /* Configuration values */
+ int als_itime;
+ int als_hwgain;
+ int als_scale1;
+ int als_scale2;
+ int ps_rate;
+ int ps_cancellation_level;
+ int ps_current;
+ int ps_gain;
+};
+
+static int apds9160_set_ps_rate(struct apds9160_chip *data, int val)
+{
+ int idx;
+
+ for (idx = 0; idx < ARRAY_SIZE(apds9160_ps_rate_map); idx++) {
+ int ret;
+
+ if (apds9160_ps_rate_map[idx][0] != val)
+ continue;
+
+ ret = regmap_field_write(data->reg_ps_rate,
+ apds9160_ps_rate_map[idx][1]);
+ if (ret)
+ return ret;
+ data->ps_rate = val;
+
+ return ret;
+ }
+
+ return -EINVAL;
+}
+
+static int apds9160_set_ps_gain(struct apds9160_chip *data, int val)
+{
+ int idx;
+
+ for (idx = 0; idx < ARRAY_SIZE(apds9160_ps_gain_map); idx++) {
+ int ret;
+
+ if (apds9160_ps_gain_map[idx][0] != val)
+ continue;
+
+ ret = regmap_field_write(data->reg_ps_gain,
+ apds9160_ps_gain_map[idx][1]);
+ if (ret)
+ return ret;
+ data->ps_gain = val;
+
+ return ret;
+ }
+
+ return -EINVAL;
+}
+
+/*
+ * The PS intelligent cancellation level register allows
+ * for an on-chip substraction of the ADC count caused by
+ * unwanted reflected light from PS ADC output.
+ */
+static int apds9160_set_ps_cancellation_level(struct apds9160_chip *data,
+ int val)
+{
+ int ret;
+ __le16 buf;
+
+ if (val < 0 || val > 0xFFFF)
+ return -EINVAL;
+
+ buf = cpu_to_le16(val);
+ ret = regmap_bulk_write(data->regmap, APDS9160_REG_PS_CAN_LEVEL_DIG_LSB,
+ &buf, 2);
+ if (ret)
+ return ret;
+
+ data->ps_cancellation_level = val;
+
+ return ret;
+}
+
+/*
+ * This parameter determines the cancellation pulse duration
+ * in each of the PWM pulse. The cancellation is applied during the
+ * integration phase of the PS measurement.
+ * Duration is programmed in half clock cycles
+ * A duration value of 0 or 1 will not generate any cancellation pulse
+ */
+static int apds9160_set_ps_analog_cancellation(struct apds9160_chip *data,
+ int val)
+{
+ if (val < 0 || val > 63)
+ return -EINVAL;
+
+ return regmap_write(data->regmap, APDS9160_REG_PS_CAN_LEVEL_ANA_DUR,
+ val);
+}
+
+/*
+ * This parameter works in conjunction with the cancellation pulse duration
+ * The value determines the current used for crosstalk cancellation
+ * Coarse value is in steps of 60 nA
+ * Fine value is in steps of 2.4 nA
+ */
+static int apds9160_set_ps_cancellation_current(struct apds9160_chip *data,
+ int coarse_val,
+ int fine_val)
+{
+ int val;
+
+ if (coarse_val < 0 || coarse_val > 4)
+ return -EINVAL;
+
+ if (fine_val < 0 || fine_val > 15)
+ return -EINVAL;
+
+ /* Coarse value at B4:B5 and fine value at B0:B3 */
+ val = (coarse_val << 4) | fine_val;
+
+ return regmap_write(data->regmap, APDS9160_REG_PS_CAN_LEVEL_ANA_CURRENT,
+ val);
+}
+
+static int apds9160_ps_init_analog_cancellation(struct device *dev,
+ struct apds9160_chip *data)
+{
+ int ret, duration, picoamp, idx, coarse, fine;
+
+ ret = device_property_read_u32(dev,
+ "ps-cancellation-duration", &duration);
+ if (ret || duration == 0) {
+ /* Don't fail since this is not required */
+ return 0;
+ }
+
+ ret = device_property_read_u32(dev,
+ "ps-cancellation-current-picoamp", &picoamp);
+ if (ret)
+ return ret;
+
+ if (picoamp < 60000 || picoamp > 276000 || picoamp % 2400 != 0)
+ return dev_err_probe(dev, -EINVAL,
+ "Invalid cancellation current\n");
+
+ /* Compute required coarse and fine value from requested current */
+ fine = 0;
+ coarse = 0;
+ for (idx = 60000; idx < picoamp; idx += 2400) {
+ if (fine == 15) {
+ fine = 0;
+ coarse++;
+ idx += 21600;
+ } else {
+ fine++;
+ }
+ }
+
+ if (picoamp != idx)
+ dev_warn(dev,
+ "Invalid cancellation current %i, rounding to %i\n",
+ picoamp, idx);
+
+ ret = apds9160_set_ps_analog_cancellation(data, duration);
+ if (ret)
+ return ret;
+
+ return apds9160_set_ps_cancellation_current(data, coarse, fine);
+}
+
+static int apds9160_set_ps_current(struct apds9160_chip *data, int val)
+{
+ int idx;
+
+ for (idx = 0; idx < ARRAY_SIZE(apds9160_ps_led_current_map); idx++) {
+ int ret;
+
+ if (apds9160_ps_led_current_map[idx][0] != val)
+ continue;
+
+ ret = regmap_field_write(
+ data->reg_ps_current,
+ apds9160_ps_led_current_map[idx][1]);
+ if (ret)
+ return ret;
+ data->ps_current = val;
+
+ return ret;
+ }
+
+ return -EINVAL;
+}
+
+static int apds9160_set_als_gain(struct apds9160_chip *data, int gain)
+{
+ int idx;
+
+ for (idx = 0; idx < ARRAY_SIZE(apds9160_als_gain_map); idx++) {
+ int ret;
+
+ if (gain != apds9160_als_gain_map[idx][0])
+ continue;
+
+ ret = regmap_field_write(data->reg_als_gain,
+ apds9160_als_gain_map[idx][1]);
+ if (ret)
+ return ret;
+ data->als_hwgain = gain;
+
+ return ret;
+ }
+
+ return -EINVAL;
+}
+
+static int apds9160_set_als_scale(struct apds9160_chip *data, int val, int val2)
+{
+ int idx;
+
+ for (idx = 0; idx < ARRAY_SIZE(apds9160_als_scale_map); idx++) {
+ if (apds9160_als_scale_map[idx].itime == data->als_itime &&
+ apds9160_als_scale_map[idx].scale1 == val &&
+ apds9160_als_scale_map[idx].scale2 == val2) {
+ int ret = apds9160_set_als_gain(data,
+ apds9160_als_scale_map[idx].gain);
+ if (ret)
+ return ret;
+ data->als_scale1 = val;
+ data->als_scale2 = val2;
+
+ return ret;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int apds9160_set_als_resolution(struct apds9160_chip *data, int val)
+{
+ switch (val) {
+ case 25:
+ return regmap_field_write(data->reg_als_resolution,
+ APDS9160_CMD_LS_RESOLUTION_25MS);
+ case 50:
+ return regmap_field_write(data->reg_als_resolution,
+ APDS9160_CMD_LS_RESOLUTION_50MS);
+ case 200:
+ return regmap_field_write(data->reg_als_resolution,
+ APDS9160_CMD_LS_RESOLUTION_200MS);
+ default:
+ return regmap_field_write(data->reg_als_resolution,
+ APDS9160_CMD_LS_RESOLUTION_100MS);
+ }
+}
+
+static int apds9160_set_als_rate(struct apds9160_chip *data, int val)
+{
+ int idx;
+
+ for (idx = 0; idx < ARRAY_SIZE(apds9160_als_rate_map); idx++) {
+ if (apds9160_als_rate_map[idx][0] != val)
+ continue;
+
+ return regmap_field_write(data->reg_als_rate,
+ apds9160_als_rate_map[idx][1]);
+ }
+
+ return -EINVAL;
+}
+
+/*
+ * Setting the integration time ajusts resolution, rate, scale and gain
+ */
+static int apds9160_set_als_int_time(struct apds9160_chip *data, int val)
+{
+ int ret;
+ int idx;
+
+ ret = apds9160_set_als_rate(data, val);
+ if (ret)
+ return ret;
+
+ /* Match resolution register with rate */
+ ret = apds9160_set_als_resolution(data, val);
+ if (ret)
+ return ret;
+
+ data->als_itime = val;
+
+ /* Set the scale minimum gain */
+ for (idx = 0; idx < ARRAY_SIZE(apds9160_als_scale_map); idx++) {
+ if (data->als_itime != apds9160_als_scale_map[idx].itime)
+ continue;
+
+ return apds9160_set_als_scale(data,
+ apds9160_als_scale_map[idx].scale1,
+ apds9160_als_scale_map[idx].scale2);
+ }
+
+ return -EINVAL;
+}
+
+static int apds9160_read_avail(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ const int **vals, int *type, int *length,
+ long mask)
+{
+ struct apds9160_chip *data = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_INT_TIME:
+ switch (chan->type) {
+ case IIO_LIGHT:
+ *length = ARRAY_SIZE(apds9160_als_rate_avail);
+ *vals = (const int *)apds9160_als_rate_avail;
+ *type = IIO_VAL_INT;
+
+ return IIO_AVAIL_LIST;
+ case IIO_PROXIMITY:
+ *length = ARRAY_SIZE(apds9160_ps_rate_avail);
+ *vals = (const int *)apds9160_ps_rate_avail;
+ *type = IIO_VAL_INT;
+
+ return IIO_AVAIL_LIST;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_PROXIMITY:
+ *length = ARRAY_SIZE(apds9160_ps_gain_avail);
+ *vals = (const int *)apds9160_ps_gain_avail;
+ *type = IIO_VAL_INT;
+
+ return IIO_AVAIL_LIST;
+ case IIO_LIGHT:
+ /* The available scales changes depending on itime */
+ switch (data->als_itime) {
+ case 25:
+ *length = ARRAY_SIZE(apds9160_25ms_avail) * 2;
+ *vals = (const int *)apds9160_25ms_avail;
+ *type = IIO_VAL_INT_PLUS_MICRO;
+
+ return IIO_AVAIL_LIST;
+ case 50:
+ *length = ARRAY_SIZE(apds9160_50ms_avail) * 2;
+ *vals = (const int *)apds9160_50ms_avail;
+ *type = IIO_VAL_INT_PLUS_MICRO;
+
+ return IIO_AVAIL_LIST;
+ case 100:
+ *length = ARRAY_SIZE(apds9160_100ms_avail) * 2;
+ *vals = (const int *)apds9160_100ms_avail;
+ *type = IIO_VAL_INT_PLUS_MICRO;
+
+ return IIO_AVAIL_LIST;
+ case 200:
+ *length = ARRAY_SIZE(apds9160_200ms_avail) * 2;
+ *vals = (const int *)apds9160_200ms_avail;
+ *type = IIO_VAL_INT_PLUS_MICRO;
+
+ return IIO_AVAIL_LIST;
+ default:
+ return -EINVAL;
+ }
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_RAW:
+ switch (chan->type) {
+ case IIO_CURRENT:
+ *length = ARRAY_SIZE(apds9160_ps_led_current_avail);
+ *vals = (const int *)apds9160_ps_led_current_avail;
+ *type = IIO_VAL_INT;
+
+ return IIO_AVAIL_LIST;
+ default:
+ return -EINVAL;
+ }
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static int apds9160_write_raw_get_fmt(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ long mask)
+{
+ switch (mask) {
+ case IIO_CHAN_INFO_INT_TIME:
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_CALIBBIAS:
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_HARDWAREGAIN:
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_RAW:
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ return IIO_VAL_INT_PLUS_MICRO;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int apds9160_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val,
+ int *val2, long mask)
+{
+ struct apds9160_chip *data = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ switch (chan->type) {
+ case IIO_PROXIMITY: {
+ __le16 buf;
+
+ ret = regmap_bulk_read(data->regmap, chan->address,
+ &buf, 2);
+ if (ret)
+ return ret;
+ *val = le16_to_cpu(buf);
+ /* Remove overflow bits from result */
+ *val = FIELD_GET(APDS9160_PS_DATA_MASK, *val);
+
+ return IIO_VAL_INT;
+ }
+ case IIO_LIGHT:
+ case IIO_INTENSITY: {
+ u8 buf[3];
+
+ ret = regmap_bulk_read(data->regmap, chan->address,
+ &buf, 3);
+ if (ret)
+ return ret;
+ *val = get_unaligned_le24(buf);
+
+ return IIO_VAL_INT;
+ }
+ case IIO_CURRENT:
+ *val = data->ps_current;
+
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_HARDWAREGAIN:
+ switch (chan->type) {
+ case IIO_LIGHT:
+ *val = data->als_hwgain;
+
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_INT_TIME:
+ switch (chan->type) {
+ case IIO_PROXIMITY:
+ *val = data->ps_rate;
+
+ return IIO_VAL_INT;
+ case IIO_LIGHT:
+ *val = data->als_itime;
+
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_CALIBBIAS:
+ switch (chan->type) {
+ case IIO_PROXIMITY:
+ *val = data->ps_cancellation_level;
+
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_PROXIMITY:
+ *val = data->ps_gain;
+
+ return IIO_VAL_INT;
+ case IIO_LIGHT:
+ *val = data->als_scale1;
+ *val2 = data->als_scale2;
+
+ return IIO_VAL_INT_PLUS_MICRO;
+ default:
+ return -EINVAL;
+ }
+ default:
+ return -EINVAL;
+ }
+};
+
+static int apds9160_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int val,
+ int val2, long mask)
+{
+ struct apds9160_chip *data = iio_priv(indio_dev);
+
+ guard(mutex)(&data->lock);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_INT_TIME:
+ if (val2 != 0)
+ return -EINVAL;
+ switch (chan->type) {
+ case IIO_PROXIMITY:
+ return apds9160_set_ps_rate(data, val);
+ case IIO_LIGHT:
+ return apds9160_set_als_int_time(data, val);
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_PROXIMITY:
+ return apds9160_set_ps_gain(data, val);
+ case IIO_LIGHT:
+ return apds9160_set_als_scale(data, val, val2);
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_CALIBBIAS:
+ if (val2 != 0)
+ return -EINVAL;
+ switch (chan->type) {
+ case IIO_PROXIMITY:
+ return apds9160_set_ps_cancellation_level(data, val);
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_RAW:
+ if (val2 != 0)
+ return -EINVAL;
+ switch (chan->type) {
+ case IIO_CURRENT:
+ return apds9160_set_ps_current(data, val);
+ default:
+ return -EINVAL;
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+static inline int apds9160_get_thres_reg(const struct iio_chan_spec *chan,
+ enum iio_event_direction dir, u8 *reg)
+{
+ switch (dir) {
+ case IIO_EV_DIR_RISING:
+ switch (chan->type) {
+ case IIO_PROXIMITY:
+ *reg = APDS9160_REG_PS_THRES_HI_LSB;
+ break;
+ case IIO_LIGHT:
+ *reg = APDS9160_REG_LS_THRES_UP_LSB;
+ break;
+ default:
+ return -EINVAL;
+ } break;
+ case IIO_EV_DIR_FALLING:
+ switch (chan->type) {
+ case IIO_PROXIMITY:
+ *reg = APDS9160_REG_PS_THRES_LO_LSB;
+ break;
+ case IIO_LIGHT:
+ *reg = APDS9160_REG_LS_THRES_LO_LSB;
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int apds9160_read_event(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info, int *val, int *val2)
+{
+ u8 reg;
+ int ret;
+ struct apds9160_chip *data = iio_priv(indio_dev);
+
+ if (info != IIO_EV_INFO_VALUE)
+ return -EINVAL;
+
+ ret = apds9160_get_thres_reg(chan, dir, &reg);
+ if (ret < 0)
+ return ret;
+
+ switch (chan->type) {
+ case IIO_PROXIMITY: {
+ __le16 buf;
+
+ ret = regmap_bulk_read(data->regmap, reg, &buf, 2);
+ if (ret < 0)
+ return ret;
+ *val = le16_to_cpu(buf);
+ return IIO_VAL_INT;
+ }
+ case IIO_LIGHT: {
+ u8 buf[3];
+
+ ret = regmap_bulk_read(data->regmap, reg, &buf, 3);
+ if (ret < 0)
+ return ret;
+ *val = get_unaligned_le24(buf);
+ return IIO_VAL_INT;
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+static int apds9160_write_event(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info, int val, int val2)
+{
+ u8 reg;
+ int ret = 0;
+ struct apds9160_chip *data = iio_priv(indio_dev);
+
+ if (info != IIO_EV_INFO_VALUE)
+ return -EINVAL;
+
+ ret = apds9160_get_thres_reg(chan, dir, &reg);
+ if (ret < 0)
+ return ret;
+
+ switch (chan->type) {
+ case IIO_PROXIMITY: {
+ __le16 buf;
+
+ if (val < 0 || val > APDS9160_PS_THRES_MAX)
+ return -EINVAL;
+
+ buf = cpu_to_le16(val);
+ return regmap_bulk_write(data->regmap, reg, &buf, 2);
+ }
+ case IIO_LIGHT: {
+ u8 buf[3];
+
+ if (val < 0 || val > APDS9160_LS_THRES_MAX)
+ return -EINVAL;
+
+ put_unaligned_le24(val, buf);
+ return regmap_bulk_write(data->regmap, reg, &buf, 3);
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+static int apds9160_read_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir)
+{
+ struct apds9160_chip *data = iio_priv(indio_dev);
+
+ switch (chan->type) {
+ case IIO_PROXIMITY:
+ return data->ps_int;
+ case IIO_LIGHT:
+ return data->als_int;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int apds9160_write_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir, bool state)
+{
+ struct apds9160_chip *data = iio_priv(indio_dev);
+ int ret;
+
+ switch (chan->type) {
+ case IIO_PROXIMITY:
+ ret = regmap_field_write(data->reg_int_ps, state);
+ if (ret)
+ return ret;
+ data->ps_int = state;
+
+ return 0;
+ case IIO_LIGHT:
+ ret = regmap_field_write(data->reg_int_als, state);
+ if (ret)
+ return ret;
+ data->als_int = state;
+
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static irqreturn_t apds9160_irq_handler(int irq, void *private)
+{
+ struct iio_dev *indio_dev = private;
+ struct apds9160_chip *data = iio_priv(indio_dev);
+ int ret, status;
+
+ /* Reading status register clears the interrupt flag */
+ ret = regmap_read(data->regmap, APDS9160_REG_SR, &status);
+ if (ret < 0) {
+ dev_err_ratelimited(&data->client->dev,
+ "irq status reg read failed\n");
+ return IRQ_HANDLED;
+ }
+
+ if ((status & APDS9160_SR_LS_INT) &&
+ (status & APDS9160_SR_LS_NEW_DATA) && data->als_int) {
+ iio_push_event(indio_dev,
+ IIO_UNMOD_EVENT_CODE(IIO_LIGHT, 0,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_EITHER),
+ iio_get_time_ns(indio_dev));
+ }
+
+ if ((status & APDS9160_SR_PS_INT) &&
+ (status & APDS9160_SR_PS_NEW_DATA) && data->ps_int) {
+ iio_push_event(indio_dev,
+ IIO_UNMOD_EVENT_CODE(IIO_PROXIMITY, 0,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_EITHER),
+ iio_get_time_ns(indio_dev));
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int apds9160_detect(struct apds9160_chip *chip)
+{
+ struct i2c_client *client = chip->client;
+ int ret;
+ u32 val;
+
+ ret = regmap_read(chip->regmap, APDS9160_REG_ID, &val);
+ if (ret < 0) {
+ dev_err(&client->dev, "ID read failed\n");
+ return ret;
+ }
+
+ if (val != APDS9160_PART_ID_0)
+ dev_info(&client->dev, "Unknown part id %u\n", val);
+
+ return 0;
+}
+
+static void apds9160_disable(void *chip)
+{
+ struct apds9160_chip *data = chip;
+ int ret;
+
+ ret = regmap_field_write(data->reg_enable_als, 0);
+ if (ret)
+ return;
+
+ regmap_field_write(data->reg_enable_ps, 0);
+}
+
+static int apds9160_chip_init(struct apds9160_chip *chip)
+{
+ int ret;
+
+ /* Write default values to interrupt register */
+ ret = regmap_field_write(chip->reg_int_ps, 0);
+ chip->ps_int = 0;
+ if (ret)
+ return ret;
+
+ ret = regmap_field_write(chip->reg_int_als, 0);
+ chip->als_int = 0;
+ if (ret)
+ return ret;
+
+ /* Write default values to control register */
+ ret = regmap_field_write(chip->reg_enable_als, 1);
+ if (ret)
+ return ret;
+
+ ret = regmap_field_write(chip->reg_enable_ps, 1);
+ if (ret)
+ return ret;
+
+ /* Write other default values */
+ ret = regmap_field_write(chip->reg_ps_resolution,
+ APDS9160_DEFAULT_PS_RESOLUTION_11BITS);
+ if (ret)
+ return ret;
+
+ /* Write default values to configuration registers */
+ ret = apds9160_set_ps_current(chip, APDS9160_DEFAULT_PS_CURRENT);
+ if (ret)
+ return ret;
+
+ ret = apds9160_set_ps_rate(chip, APDS9160_DEFAULT_PS_RATE);
+ if (ret)
+ return ret;
+
+ ret = apds9160_set_als_int_time(chip, APDS9160_DEFAULT_LS_RATE);
+ if (ret)
+ return ret;
+
+ ret = apds9160_set_als_scale(chip,
+ apds9160_100ms_avail[0][0],
+ apds9160_100ms_avail[0][1]);
+ if (ret)
+ return ret;
+
+ ret = apds9160_set_ps_gain(chip, APDS9160_DEFAULT_PS_GAIN);
+ if (ret)
+ return ret;
+
+ ret = apds9160_set_ps_analog_cancellation(
+ chip, APDS9160_DEFAULT_PS_ANALOG_CANCELLATION);
+ if (ret)
+ return ret;
+
+ ret = apds9160_set_ps_cancellation_level(
+ chip, APDS9160_DEFAULT_PS_CANCELLATION_LEVEL);
+ if (ret)
+ return ret;
+
+ return devm_add_action_or_reset(&chip->client->dev, apds9160_disable,
+ chip);
+}
+
+static int apds9160_regfield_init(struct apds9160_chip *data)
+{
+ struct device *dev = &data->client->dev;
+ struct regmap *regmap = data->regmap;
+ struct regmap_field *tmp;
+
+ tmp = devm_regmap_field_alloc(dev, regmap, apds9160_reg_field_int_als);
+ if (IS_ERR(tmp))
+ return PTR_ERR(tmp);
+ data->reg_int_als = tmp;
+
+ tmp = devm_regmap_field_alloc(dev, regmap, apds9160_reg_field_int_ps);
+ if (IS_ERR(tmp))
+ return PTR_ERR(tmp);
+ data->reg_int_ps = tmp;
+
+ tmp = devm_regmap_field_alloc(dev, regmap, apds9160_reg_field_ls_en);
+ if (IS_ERR(tmp))
+ return PTR_ERR(tmp);
+ data->reg_enable_als = tmp;
+
+ tmp = devm_regmap_field_alloc(dev, regmap, apds9160_reg_field_ps_en);
+ if (IS_ERR(tmp))
+ return PTR_ERR(tmp);
+ data->reg_enable_ps = tmp;
+
+ tmp = devm_regmap_field_alloc(dev, regmap,
+ apds9160_reg_field_ps_overflow);
+ if (IS_ERR(tmp))
+ return PTR_ERR(tmp);
+ data->reg_ps_overflow = tmp;
+
+ tmp = devm_regmap_field_alloc(dev, regmap, apds9160_reg_field_als_rate);
+ if (IS_ERR(tmp))
+ return PTR_ERR(tmp);
+ data->reg_als_rate = tmp;
+
+ tmp = devm_regmap_field_alloc(dev, regmap, apds9160_reg_field_als_res);
+ if (IS_ERR(tmp))
+ return PTR_ERR(tmp);
+ data->reg_als_resolution = tmp;
+
+ tmp = devm_regmap_field_alloc(dev, regmap, apds9160_reg_field_ps_rate);
+ if (IS_ERR(tmp))
+ return PTR_ERR(tmp);
+ data->reg_ps_rate = tmp;
+
+ tmp = devm_regmap_field_alloc(dev, regmap, apds9160_reg_field_als_gain);
+ if (IS_ERR(tmp))
+ return PTR_ERR(tmp);
+ data->reg_als_gain = tmp;
+
+ tmp = devm_regmap_field_alloc(dev, regmap,
+ apds9160_reg_field_ps_current);
+ if (IS_ERR(tmp))
+ return PTR_ERR(tmp);
+ data->reg_ps_current = tmp;
+
+ tmp = devm_regmap_field_alloc(dev, regmap, apds9160_reg_field_ps_gain);
+ if (IS_ERR(tmp))
+ return PTR_ERR(tmp);
+ data->reg_ps_gain = tmp;
+
+ tmp = devm_regmap_field_alloc(dev, regmap,
+ apds9160_reg_field_ps_resolution);
+ if (IS_ERR(tmp))
+ return PTR_ERR(tmp);
+ data->reg_ps_resolution = tmp;
+
+ return 0;
+}
+
+static const struct iio_info apds9160_info = {
+ .read_avail = apds9160_read_avail,
+ .read_raw = apds9160_read_raw,
+ .write_raw = apds9160_write_raw,
+ .write_raw_get_fmt = apds9160_write_raw_get_fmt,
+ .read_event_value = apds9160_read_event,
+ .write_event_value = apds9160_write_event,
+ .read_event_config = apds9160_read_event_config,
+ .write_event_config = apds9160_write_event_config,
+};
+
+static const struct iio_info apds9160_info_no_events = {
+ .read_avail = apds9160_read_avail,
+ .read_raw = apds9160_read_raw,
+ .write_raw = apds9160_write_raw,
+ .write_raw_get_fmt = apds9160_write_raw_get_fmt,
+};
+
+static int apds9160_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct apds9160_chip *chip;
+ struct iio_dev *indio_dev;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*chip));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ ret = devm_regulator_get_enable(dev, "vdd");
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to enable vdd supply\n");
+
+ indio_dev->name = "apds9160";
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ chip = iio_priv(indio_dev);
+ chip->client = client;
+ chip->regmap = devm_regmap_init_i2c(client, &apds9160_regmap_config);
+ if (IS_ERR(chip->regmap))
+ return dev_err_probe(dev, PTR_ERR(chip->regmap),
+ "regmap initialization failed.\n");
+
+ chip->client = client;
+ mutex_init(&chip->lock);
+
+ ret = apds9160_detect(chip);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "apds9160 not found\n");
+
+ ret = apds9160_regfield_init(chip);
+ if (ret)
+ return ret;
+
+ ret = apds9160_chip_init(chip);
+ if (ret)
+ return ret;
+
+ ret = apds9160_ps_init_analog_cancellation(dev, chip);
+ if (ret)
+ return ret;
+
+ if (client->irq > 0) {
+ indio_dev->info = &apds9160_info;
+ indio_dev->channels = apds9160_channels;
+ indio_dev->num_channels = ARRAY_SIZE(apds9160_channels);
+ ret = devm_request_threaded_irq(dev, client->irq, NULL,
+ apds9160_irq_handler,
+ IRQF_ONESHOT, "apds9160_event",
+ indio_dev);
+ if (ret) {
+ return dev_err_probe(dev, ret,
+ "request irq (%d) failed\n",
+ client->irq);
+ }
+ } else {
+ indio_dev->info = &apds9160_info_no_events;
+ indio_dev->channels = apds9160_channels_without_events;
+ indio_dev->num_channels =
+ ARRAY_SIZE(apds9160_channels_without_events);
+ }
+
+ ret = devm_iio_device_register(dev, indio_dev);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "failed iio device registration\n");
+
+ return ret;
+}
+
+static const struct of_device_id apds9160_of_match[] = {
+ { .compatible = "brcm,apds9160" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, apds9160_of_match);
+
+static const struct i2c_device_id apds9160_id[] = {
+ { "apds9160", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, apds9160_id);
+
+static struct i2c_driver apds9160_driver = {
+ .driver = {
+ .name = "apds9160",
+ .of_match_table = apds9160_of_match,
+ },
+ .probe = apds9160_probe,
+ .id_table = apds9160_id,
+};
+module_i2c_driver(apds9160_driver);
+
+MODULE_DESCRIPTION("APDS9160 combined ALS and proximity sensor");
+MODULE_AUTHOR("Mikael Gonella-Bolduc <m.gonella.bolduc@gmail.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/light/bh1745.c b/drivers/iio/light/bh1745.c
index 3b4056be54a0..56ab5fe90ff9 100644
--- a/drivers/iio/light/bh1745.c
+++ b/drivers/iio/light/bh1745.c
@@ -426,16 +426,16 @@ static int bh1745_read_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_RAW:
- iio_device_claim_direct_scoped(return -EBUSY, indio_dev) {
- ret = regmap_bulk_read(data->regmap, chan->address,
- &value, 2);
- if (ret)
- return ret;
- *val = value;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
- return IIO_VAL_INT;
- }
- unreachable();
+ ret = regmap_bulk_read(data->regmap, chan->address, &value, 2);
+ iio_device_release_direct(indio_dev);
+ if (ret)
+ return ret;
+ *val = value;
+
+ return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE: {
guard(mutex)(&data->lock);
diff --git a/drivers/iio/light/cm32181.c b/drivers/iio/light/cm32181.c
index aeae0566ec12..bb90f738312a 100644
--- a/drivers/iio/light/cm32181.c
+++ b/drivers/iio/light/cm32181.c
@@ -492,7 +492,7 @@ static int cm32181_probe(struct i2c_client *client)
ret = devm_iio_device_register(dev, indio_dev);
if (ret) {
- dev_err(dev, "%s: regist device failed\n", __func__);
+ dev_err(dev, "%s: register device failed\n", __func__);
return ret;
}
diff --git a/drivers/iio/light/cm36651.c b/drivers/iio/light/cm36651.c
index ae3fc3299eec..446dd54d5037 100644
--- a/drivers/iio/light/cm36651.c
+++ b/drivers/iio/light/cm36651.c
@@ -683,7 +683,7 @@ static int cm36651_probe(struct i2c_client *client)
ret = iio_device_register(indio_dev);
if (ret) {
- dev_err(&client->dev, "%s: regist device failed\n", __func__);
+ dev_err(&client->dev, "%s: register device failed\n", __func__);
goto error_free_irq;
}
diff --git a/drivers/iio/light/veml6030.c b/drivers/iio/light/veml6030.c
index 9b71825eea9b..473a9c3e32a3 100644
--- a/drivers/iio/light/veml6030.c
+++ b/drivers/iio/light/veml6030.c
@@ -24,10 +24,12 @@
#include <linux/regmap.h>
#include <linux/interrupt.h>
#include <linux/pm_runtime.h>
+#include <linux/units.h>
#include <linux/regulator/consumer.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
#include <linux/iio/events.h>
+#include <linux/iio/iio-gts-helper.h>
#include <linux/iio/trigger_consumer.h>
#include <linux/iio/triggered_buffer.h>
@@ -59,22 +61,36 @@
#define VEML6035_INT_CHAN BIT(3)
#define VEML6035_CHAN_EN BIT(2)
+/* Regfields */
+#define VEML6030_GAIN_RF REG_FIELD(VEML6030_REG_ALS_CONF, 11, 12)
+#define VEML6030_IT_RF REG_FIELD(VEML6030_REG_ALS_CONF, 6, 9)
+
+#define VEML6035_GAIN_RF REG_FIELD(VEML6030_REG_ALS_CONF, 10, 12)
+
+/* Maximum scales x 10000 to work with integers */
+#define VEML6030_MAX_SCALE 21504
+#define VEML6035_MAX_SCALE 4096
+
enum veml6030_scan {
VEML6030_SCAN_ALS,
VEML6030_SCAN_WH,
VEML6030_SCAN_TIMESTAMP,
};
+struct veml6030_rf {
+ struct regmap_field *it;
+ struct regmap_field *gain;
+};
+
struct veml603x_chip {
const char *name;
- const int(*scale_vals)[][2];
- const int num_scale_vals;
const struct iio_chan_spec *channels;
const int num_channels;
+ const struct reg_field gain_rf;
+ const struct reg_field it_rf;
+ const int max_scale;
int (*hw_init)(struct iio_dev *indio_dev, struct device *dev);
int (*set_info)(struct iio_dev *indio_dev);
- int (*set_als_gain)(struct iio_dev *indio_dev, int val, int val2);
- int (*get_als_gain)(struct iio_dev *indio_dev, int *val, int *val2);
};
/*
@@ -91,40 +107,56 @@ struct veml603x_chip {
struct veml6030_data {
struct i2c_client *client;
struct regmap *regmap;
- int cur_resolution;
- int cur_gain;
- int cur_integration_time;
+ struct veml6030_rf rf;
const struct veml603x_chip *chip;
+ struct iio_gts gts;
+
};
-static const int veml6030_it_times[][2] = {
- { 0, 25000 },
- { 0, 50000 },
- { 0, 100000 },
- { 0, 200000 },
- { 0, 400000 },
- { 0, 800000 },
+#define VEML6030_SEL_IT_25MS 0x0C
+#define VEML6030_SEL_IT_50MS 0x08
+#define VEML6030_SEL_IT_100MS 0x00
+#define VEML6030_SEL_IT_200MS 0x01
+#define VEML6030_SEL_IT_400MS 0x02
+#define VEML6030_SEL_IT_800MS 0x03
+static const struct iio_itime_sel_mul veml6030_it_sel[] = {
+ GAIN_SCALE_ITIME_US(25000, VEML6030_SEL_IT_25MS, 1),
+ GAIN_SCALE_ITIME_US(50000, VEML6030_SEL_IT_50MS, 2),
+ GAIN_SCALE_ITIME_US(100000, VEML6030_SEL_IT_100MS, 4),
+ GAIN_SCALE_ITIME_US(200000, VEML6030_SEL_IT_200MS, 8),
+ GAIN_SCALE_ITIME_US(400000, VEML6030_SEL_IT_400MS, 16),
+ GAIN_SCALE_ITIME_US(800000, VEML6030_SEL_IT_800MS, 32),
};
-/*
- * Scale is 1/gain. Value 0.125 is ALS gain x (1/8), 0.25 is
- * ALS gain x (1/4), 0.5 is ALS gain x (1/2), 1.0 is ALS gain x 1,
- * 2.0 is ALS gain x2, and 4.0 is ALS gain x 4.
+/* Gains are multiplied by 8 to work with integers. The values in the
+ * iio-gts tables don't need corrections because the maximum value of
+ * the scale refers to GAIN = x1, and the rest of the values are
+ * obtained from the resulting linear function.
*/
-static const int veml6030_scale_vals[][2] = {
- { 0, 125000 },
- { 0, 250000 },
- { 1, 0 },
- { 2, 0 },
+#define VEML6030_SEL_MILLI_GAIN_X125 2
+#define VEML6030_SEL_MILLI_GAIN_X250 3
+#define VEML6030_SEL_MILLI_GAIN_X1000 0
+#define VEML6030_SEL_MILLI_GAIN_X2000 1
+static const struct iio_gain_sel_pair veml6030_gain_sel[] = {
+ GAIN_SCALE_GAIN(1, VEML6030_SEL_MILLI_GAIN_X125),
+ GAIN_SCALE_GAIN(2, VEML6030_SEL_MILLI_GAIN_X250),
+ GAIN_SCALE_GAIN(8, VEML6030_SEL_MILLI_GAIN_X1000),
+ GAIN_SCALE_GAIN(16, VEML6030_SEL_MILLI_GAIN_X2000),
};
-static const int veml6035_scale_vals[][2] = {
- { 0, 125000 },
- { 0, 250000 },
- { 0, 500000 },
- { 1, 0 },
- { 2, 0 },
- { 4, 0 },
+#define VEML6035_SEL_MILLI_GAIN_X125 4
+#define VEML6035_SEL_MILLI_GAIN_X250 5
+#define VEML6035_SEL_MILLI_GAIN_X500 7
+#define VEML6035_SEL_MILLI_GAIN_X1000 0
+#define VEML6035_SEL_MILLI_GAIN_X2000 1
+#define VEML6035_SEL_MILLI_GAIN_X4000 3
+static const struct iio_gain_sel_pair veml6035_gain_sel[] = {
+ GAIN_SCALE_GAIN(1, VEML6035_SEL_MILLI_GAIN_X125),
+ GAIN_SCALE_GAIN(2, VEML6035_SEL_MILLI_GAIN_X250),
+ GAIN_SCALE_GAIN(4, VEML6035_SEL_MILLI_GAIN_X500),
+ GAIN_SCALE_GAIN(8, VEML6035_SEL_MILLI_GAIN_X1000),
+ GAIN_SCALE_GAIN(16, VEML6035_SEL_MILLI_GAIN_X2000),
+ GAIN_SCALE_GAIN(32, VEML6035_SEL_MILLI_GAIN_X4000),
};
/*
@@ -319,113 +351,112 @@ static const struct iio_chan_spec veml7700_channels[] = {
IIO_CHAN_SOFT_TIMESTAMP(VEML6030_SCAN_TIMESTAMP),
};
+static const struct regmap_range veml6030_readable_ranges[] = {
+ regmap_reg_range(VEML6030_REG_ALS_CONF, VEML6030_REG_ALS_INT),
+};
+
+static const struct regmap_access_table veml6030_readable_table = {
+ .yes_ranges = veml6030_readable_ranges,
+ .n_yes_ranges = ARRAY_SIZE(veml6030_readable_ranges),
+};
+
+static const struct regmap_range veml6030_writable_ranges[] = {
+ regmap_reg_range(VEML6030_REG_ALS_CONF, VEML6030_REG_ALS_PSM),
+};
+
+static const struct regmap_access_table veml6030_writable_table = {
+ .yes_ranges = veml6030_writable_ranges,
+ .n_yes_ranges = ARRAY_SIZE(veml6030_writable_ranges),
+};
+
+static const struct regmap_range veml6030_volatile_ranges[] = {
+ regmap_reg_range(VEML6030_REG_ALS_DATA, VEML6030_REG_WH_DATA),
+};
+
+static const struct regmap_access_table veml6030_volatile_table = {
+ .yes_ranges = veml6030_volatile_ranges,
+ .n_yes_ranges = ARRAY_SIZE(veml6030_volatile_ranges),
+};
+
static const struct regmap_config veml6030_regmap_config = {
.name = "veml6030_regmap",
.reg_bits = 8,
.val_bits = 16,
.max_register = VEML6030_REG_ALS_INT,
.val_format_endian = REGMAP_ENDIAN_LITTLE,
+ .rd_table = &veml6030_readable_table,
+ .wr_table = &veml6030_writable_table,
+ .volatile_table = &veml6030_volatile_table,
+ .cache_type = REGCACHE_RBTREE,
};
-static int veml6030_get_intgrn_tm(struct iio_dev *indio_dev,
- int *val, int *val2)
+static int veml6030_get_it(struct veml6030_data *data, int *val, int *val2)
{
- int ret, reg;
- struct veml6030_data *data = iio_priv(indio_dev);
+ int ret, it_idx;
- ret = regmap_read(data->regmap, VEML6030_REG_ALS_CONF, &reg);
- if (ret) {
- dev_err(&data->client->dev,
- "can't read als conf register %d\n", ret);
+ ret = regmap_field_read(data->rf.it, &it_idx);
+ if (ret)
return ret;
- }
- switch ((reg >> 6) & 0xF) {
- case 0:
- *val2 = 100000;
- break;
- case 1:
- *val2 = 200000;
- break;
- case 2:
- *val2 = 400000;
- break;
- case 3:
- *val2 = 800000;
- break;
- case 8:
- *val2 = 50000;
- break;
- case 12:
- *val2 = 25000;
- break;
- default:
- return -EINVAL;
- }
+ ret = iio_gts_find_int_time_by_sel(&data->gts, it_idx);
+ if (ret < 0)
+ return ret;
+ *val2 = ret;
*val = 0;
+
return IIO_VAL_INT_PLUS_MICRO;
}
-static int veml6030_set_intgrn_tm(struct iio_dev *indio_dev,
- int val, int val2)
+static int veml6030_set_it(struct iio_dev *indio_dev, int val, int val2)
{
- int ret, new_int_time, int_idx;
struct veml6030_data *data = iio_priv(indio_dev);
+ int ret, gain_idx, it_idx, new_gain, prev_gain, prev_it;
+ bool in_range;
- if (val)
+ if (val || !iio_gts_valid_time(&data->gts, val2))
return -EINVAL;
- switch (val2) {
- case 25000:
- new_int_time = 0x300;
- int_idx = 5;
- break;
- case 50000:
- new_int_time = 0x200;
- int_idx = 4;
- break;
- case 100000:
- new_int_time = 0x00;
- int_idx = 3;
- break;
- case 200000:
- new_int_time = 0x40;
- int_idx = 2;
- break;
- case 400000:
- new_int_time = 0x80;
- int_idx = 1;
- break;
- case 800000:
- new_int_time = 0xC0;
- int_idx = 0;
- break;
- default:
- return -EINVAL;
- }
+ ret = regmap_field_read(data->rf.it, &it_idx);
+ if (ret)
+ return ret;
- ret = regmap_update_bits(data->regmap, VEML6030_REG_ALS_CONF,
- VEML6030_ALS_IT, new_int_time);
- if (ret) {
- dev_err(&data->client->dev,
- "can't update als integration time %d\n", ret);
+ ret = regmap_field_read(data->rf.gain, &gain_idx);
+ if (ret)
return ret;
- }
- /*
- * Cache current integration time and update resolution. For every
- * increase in integration time to next level, resolution is halved
- * and vice-versa.
- */
- if (data->cur_integration_time < int_idx)
- data->cur_resolution <<= int_idx - data->cur_integration_time;
- else if (data->cur_integration_time > int_idx)
- data->cur_resolution >>= data->cur_integration_time - int_idx;
+ prev_it = iio_gts_find_int_time_by_sel(&data->gts, it_idx);
+ if (prev_it < 0)
+ return prev_it;
+
+ if (prev_it == val2)
+ return 0;
- data->cur_integration_time = int_idx;
+ prev_gain = iio_gts_find_gain_by_sel(&data->gts, gain_idx);
+ if (prev_gain < 0)
+ return prev_gain;
- return ret;
+ ret = iio_gts_find_new_gain_by_gain_time_min(&data->gts, prev_gain, prev_it,
+ val2, &new_gain, &in_range);
+ if (ret)
+ return ret;
+
+ if (!in_range)
+ dev_dbg(&data->client->dev, "Optimal gain out of range\n");
+
+ ret = iio_gts_find_sel_by_int_time(&data->gts, val2);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_field_write(data->rf.it, ret);
+ if (ret)
+ return ret;
+
+ ret = iio_gts_find_sel_by_gain(&data->gts, new_gain);
+ if (ret < 0)
+ return ret;
+
+ return regmap_field_write(data->rf.gain, ret);
}
static int veml6030_read_persistence(struct iio_dev *indio_dev,
@@ -434,7 +465,7 @@ static int veml6030_read_persistence(struct iio_dev *indio_dev,
int ret, reg, period, x, y;
struct veml6030_data *data = iio_priv(indio_dev);
- ret = veml6030_get_intgrn_tm(indio_dev, &x, &y);
+ ret = veml6030_get_it(data, &x, &y);
if (ret < 0)
return ret;
@@ -459,7 +490,7 @@ static int veml6030_write_persistence(struct iio_dev *indio_dev,
int ret, period, x, y;
struct veml6030_data *data = iio_priv(indio_dev);
- ret = veml6030_get_intgrn_tm(indio_dev, &x, &y);
+ ret = veml6030_get_it(data, &x, &y);
if (ret < 0)
return ret;
@@ -488,177 +519,29 @@ static int veml6030_write_persistence(struct iio_dev *indio_dev,
return ret;
}
-/*
- * Cache currently set gain & update resolution. For every
- * increase in the gain to next level, resolution is halved
- * and vice-versa.
- */
-static void veml6030_update_gain_res(struct veml6030_data *data, int gain_idx)
-{
- if (data->cur_gain < gain_idx)
- data->cur_resolution <<= gain_idx - data->cur_gain;
- else if (data->cur_gain > gain_idx)
- data->cur_resolution >>= data->cur_gain - gain_idx;
-
- data->cur_gain = gain_idx;
-}
-
-static int veml6030_set_als_gain(struct iio_dev *indio_dev,
- int val, int val2)
+static int veml6030_set_scale(struct iio_dev *indio_dev, int val, int val2)
{
- int ret, new_gain, gain_idx;
+ int ret, gain_sel, it_idx, it_sel;
struct veml6030_data *data = iio_priv(indio_dev);
- if (val == 0 && val2 == 125000) {
- new_gain = 0x1000; /* 0x02 << 11 */
- gain_idx = 3;
- } else if (val == 0 && val2 == 250000) {
- new_gain = 0x1800;
- gain_idx = 2;
- } else if (val == 1 && val2 == 0) {
- new_gain = 0x00;
- gain_idx = 1;
- } else if (val == 2 && val2 == 0) {
- new_gain = 0x800;
- gain_idx = 0;
- } else {
- return -EINVAL;
- }
-
- ret = regmap_update_bits(data->regmap, VEML6030_REG_ALS_CONF,
- VEML6030_ALS_GAIN, new_gain);
- if (ret) {
- dev_err(&data->client->dev,
- "can't set als gain %d\n", ret);
+ ret = regmap_field_read(data->rf.it, &it_idx);
+ if (ret)
return ret;
- }
-
- veml6030_update_gain_res(data, gain_idx);
-
- return 0;
-}
-
-static int veml6035_set_als_gain(struct iio_dev *indio_dev, int val, int val2)
-{
- int ret, new_gain, gain_idx;
- struct veml6030_data *data = iio_priv(indio_dev);
- if (val == 0 && val2 == 125000) {
- new_gain = VEML6035_SENS;
- gain_idx = 5;
- } else if (val == 0 && val2 == 250000) {
- new_gain = VEML6035_SENS | VEML6035_GAIN;
- gain_idx = 4;
- } else if (val == 0 && val2 == 500000) {
- new_gain = VEML6035_SENS | VEML6035_GAIN |
- VEML6035_DG;
- gain_idx = 3;
- } else if (val == 1 && val2 == 0) {
- new_gain = 0x0000;
- gain_idx = 2;
- } else if (val == 2 && val2 == 0) {
- new_gain = VEML6035_GAIN;
- gain_idx = 1;
- } else if (val == 4 && val2 == 0) {
- new_gain = VEML6035_GAIN | VEML6035_DG;
- gain_idx = 0;
- } else {
- return -EINVAL;
- }
-
- ret = regmap_update_bits(data->regmap, VEML6030_REG_ALS_CONF,
- VEML6035_GAIN_M, new_gain);
- if (ret) {
- dev_err(&data->client->dev, "can't set als gain %d\n", ret);
+ ret = iio_gts_find_gain_time_sel_for_scale(&data->gts, val, val2,
+ &gain_sel, &it_sel);
+ if (ret)
return ret;
- }
-
- veml6030_update_gain_res(data, gain_idx);
- return 0;
-}
-
-static int veml6030_get_als_gain(struct iio_dev *indio_dev,
- int *val, int *val2)
-{
- int ret, reg;
- struct veml6030_data *data = iio_priv(indio_dev);
-
- ret = regmap_read(data->regmap, VEML6030_REG_ALS_CONF, &reg);
- if (ret) {
- dev_err(&data->client->dev,
- "can't read als conf register %d\n", ret);
+ ret = regmap_field_write(data->rf.it, it_sel);
+ if (ret)
return ret;
- }
- switch ((reg >> 11) & 0x03) {
- case 0:
- *val = 1;
- *val2 = 0;
- break;
- case 1:
- *val = 2;
- *val2 = 0;
- break;
- case 2:
- *val = 0;
- *val2 = 125000;
- break;
- case 3:
- *val = 0;
- *val2 = 250000;
- break;
- default:
- return -EINVAL;
- }
-
- return IIO_VAL_INT_PLUS_MICRO;
-}
-
-static int veml6035_get_als_gain(struct iio_dev *indio_dev, int *val, int *val2)
-{
- int ret, reg;
- struct veml6030_data *data = iio_priv(indio_dev);
-
- ret = regmap_read(data->regmap, VEML6030_REG_ALS_CONF, &reg);
- if (ret) {
- dev_err(&data->client->dev,
- "can't read als conf register %d\n", ret);
+ ret = regmap_field_write(data->rf.gain, gain_sel);
+ if (ret)
return ret;
- }
- switch (FIELD_GET(VEML6035_GAIN_M, reg)) {
- case 0:
- *val = 1;
- *val2 = 0;
- break;
- case 1:
- case 2:
- *val = 2;
- *val2 = 0;
- break;
- case 3:
- *val = 4;
- *val2 = 0;
- break;
- case 4:
- *val = 0;
- *val2 = 125000;
- break;
- case 5:
- case 6:
- *val = 0;
- *val2 = 250000;
- break;
- case 7:
- *val = 0;
- *val2 = 500000;
- break;
- default:
- return -EINVAL;
- }
-
- return IIO_VAL_INT_PLUS_MICRO;
+ return 0;
}
static int veml6030_read_thresh(struct iio_dev *indio_dev,
@@ -705,6 +588,71 @@ static int veml6030_write_thresh(struct iio_dev *indio_dev,
return ret;
}
+static int veml6030_get_total_gain(struct veml6030_data *data)
+{
+ int gain, it, reg, ret;
+
+ ret = regmap_field_read(data->rf.gain, &reg);
+ if (ret)
+ return ret;
+
+ gain = iio_gts_find_gain_by_sel(&data->gts, reg);
+ if (gain < 0)
+ return gain;
+
+ ret = regmap_field_read(data->rf.it, &reg);
+ if (ret)
+ return ret;
+
+ it = iio_gts_find_int_time_by_sel(&data->gts, reg);
+ if (it < 0)
+ return it;
+
+ return iio_gts_get_total_gain(&data->gts, gain, it);
+}
+
+static int veml6030_get_scale(struct veml6030_data *data, int *val, int *val2)
+{
+ int gain, it, reg, ret;
+
+ ret = regmap_field_read(data->rf.gain, &reg);
+ if (ret)
+ return ret;
+
+ gain = iio_gts_find_gain_by_sel(&data->gts, reg);
+ if (gain < 0)
+ return gain;
+
+ ret = regmap_field_read(data->rf.it, &reg);
+ if (ret)
+ return ret;
+
+ it = iio_gts_find_int_time_by_sel(&data->gts, reg);
+ if (it < 0)
+ return it;
+
+ ret = iio_gts_get_scale(&data->gts, gain, it, val, val2);
+ if (ret)
+ return ret;
+
+ return IIO_VAL_INT_PLUS_NANO;
+}
+
+static int veml6030_process_als(struct veml6030_data *data, int raw,
+ int *val, int *val2)
+{
+ int total_gain;
+
+ total_gain = veml6030_get_total_gain(data);
+ if (total_gain < 0)
+ return total_gain;
+
+ *val = raw * data->chip->max_scale / total_gain / 10000;
+ *val2 = raw * data->chip->max_scale / total_gain % 10000 * 100;
+
+ return IIO_VAL_INT_PLUS_MICRO;
+}
+
/*
* Provide both raw as well as light reading in lux.
* light (in lux) = resolution * raw reading
@@ -728,11 +676,9 @@ static int veml6030_read_raw(struct iio_dev *indio_dev,
dev_err(dev, "can't read als data %d\n", ret);
return ret;
}
- if (mask == IIO_CHAN_INFO_PROCESSED) {
- *val = (reg * data->cur_resolution) / 10000;
- *val2 = (reg * data->cur_resolution) % 10000 * 100;
- return IIO_VAL_INT_PLUS_MICRO;
- }
+ if (mask == IIO_CHAN_INFO_PROCESSED)
+ return veml6030_process_als(data, reg, val, val2);
+
*val = reg;
return IIO_VAL_INT;
case IIO_INTENSITY:
@@ -747,9 +693,9 @@ static int veml6030_read_raw(struct iio_dev *indio_dev,
return -EINVAL;
}
case IIO_CHAN_INFO_INT_TIME:
- return veml6030_get_intgrn_tm(indio_dev, val, val2);
+ return veml6030_get_it(data, val, val2);
case IIO_CHAN_INFO_SCALE:
- return data->chip->get_als_gain(indio_dev, val, val2);
+ return veml6030_get_scale(data, val, val2);
default:
return -EINVAL;
}
@@ -764,15 +710,9 @@ static int veml6030_read_avail(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_INT_TIME:
- *vals = (int *)&veml6030_it_times;
- *length = 2 * ARRAY_SIZE(veml6030_it_times);
- *type = IIO_VAL_INT_PLUS_MICRO;
- return IIO_AVAIL_LIST;
+ return iio_gts_avail_times(&data->gts, vals, type, length);
case IIO_CHAN_INFO_SCALE:
- *vals = (int *)*data->chip->scale_vals;
- *length = 2 * data->chip->num_scale_vals;
- *type = IIO_VAL_INT_PLUS_MICRO;
- return IIO_AVAIL_LIST;
+ return iio_gts_all_avail_scales(&data->gts, vals, type, length);
}
return -EINVAL;
@@ -782,13 +722,25 @@ static int veml6030_write_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int val, int val2, long mask)
{
- struct veml6030_data *data = iio_priv(indio_dev);
-
switch (mask) {
case IIO_CHAN_INFO_INT_TIME:
- return veml6030_set_intgrn_tm(indio_dev, val, val2);
+ return veml6030_set_it(indio_dev, val, val2);
case IIO_CHAN_INFO_SCALE:
- return data->chip->set_als_gain(indio_dev, val, val2);
+ return veml6030_set_scale(indio_dev, val, val2);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int veml6030_write_raw_get_fmt(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ long mask)
+{
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+ return IIO_VAL_INT_PLUS_NANO;
+ case IIO_CHAN_INFO_INT_TIME:
+ return IIO_VAL_INT_PLUS_MICRO;
default:
return -EINVAL;
}
@@ -886,6 +838,7 @@ static const struct iio_info veml6030_info = {
.read_raw = veml6030_read_raw,
.read_avail = veml6030_read_avail,
.write_raw = veml6030_write_raw,
+ .write_raw_get_fmt = veml6030_write_raw_get_fmt,
.read_event_value = veml6030_read_event_val,
.write_event_value = veml6030_write_event_val,
.read_event_config = veml6030_read_interrupt_config,
@@ -897,6 +850,7 @@ static const struct iio_info veml6030_info_no_irq = {
.read_raw = veml6030_read_raw,
.read_avail = veml6030_read_avail,
.write_raw = veml6030_write_raw,
+ .write_raw_get_fmt = veml6030_write_raw_get_fmt,
};
static irqreturn_t veml6030_event_handler(int irq, void *private)
@@ -990,6 +944,27 @@ static int veml7700_set_info(struct iio_dev *indio_dev)
return 0;
}
+static int veml6030_regfield_init(struct iio_dev *indio_dev)
+{
+ struct veml6030_data *data = iio_priv(indio_dev);
+ struct regmap *regmap = data->regmap;
+ struct device *dev = &data->client->dev;
+ struct regmap_field *rm_field;
+ struct veml6030_rf *rf = &data->rf;
+
+ rm_field = devm_regmap_field_alloc(dev, regmap, data->chip->it_rf);
+ if (IS_ERR(rm_field))
+ return PTR_ERR(rm_field);
+ rf->it = rm_field;
+
+ rm_field = devm_regmap_field_alloc(dev, regmap, data->chip->gain_rf);
+ if (IS_ERR(rm_field))
+ return PTR_ERR(rm_field);
+ rf->gain = rm_field;
+
+ return 0;
+}
+
/*
* Set ALS gain to 1/8, integration time to 100 ms, PSM to mode 2,
* persistence to 1 x integration time and the threshold
@@ -1001,6 +976,13 @@ static int veml6030_hw_init(struct iio_dev *indio_dev, struct device *dev)
int ret, val;
struct veml6030_data *data = iio_priv(indio_dev);
+ ret = devm_iio_init_iio_gts(dev, 2, 150400000,
+ veml6030_gain_sel, ARRAY_SIZE(veml6030_gain_sel),
+ veml6030_it_sel, ARRAY_SIZE(veml6030_it_sel),
+ &data->gts);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to init iio gts\n");
+
ret = veml6030_als_shut_down(data);
if (ret)
return dev_err_probe(dev, ret, "can't shutdown als\n");
@@ -1036,11 +1018,6 @@ static int veml6030_hw_init(struct iio_dev *indio_dev, struct device *dev)
return dev_err_probe(dev, ret,
"can't clear als interrupt status\n");
- /* Cache currently active measurement parameters */
- data->cur_gain = 3;
- data->cur_resolution = 5376;
- data->cur_integration_time = 3;
-
return ret;
}
@@ -1056,6 +1033,13 @@ static int veml6035_hw_init(struct iio_dev *indio_dev, struct device *dev)
int ret, val;
struct veml6030_data *data = iio_priv(indio_dev);
+ ret = devm_iio_init_iio_gts(dev, 0, 409600000,
+ veml6035_gain_sel, ARRAY_SIZE(veml6035_gain_sel),
+ veml6030_it_sel, ARRAY_SIZE(veml6030_it_sel),
+ &data->gts);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to init iio gts\n");
+
ret = veml6030_als_shut_down(data);
if (ret)
return dev_err_probe(dev, ret, "can't shutdown als\n");
@@ -1092,11 +1076,6 @@ static int veml6035_hw_init(struct iio_dev *indio_dev, struct device *dev)
return dev_err_probe(dev, ret,
"can't clear als interrupt status\n");
- /* Cache currently active measurement parameters */
- data->cur_gain = 5;
- data->cur_resolution = 1024;
- data->cur_integration_time = 3;
-
return 0;
}
@@ -1143,6 +1122,11 @@ static int veml6030_probe(struct i2c_client *client)
if (ret < 0)
return ret;
+ ret = veml6030_regfield_init(indio_dev);
+ if (ret)
+ return dev_err_probe(&client->dev, ret,
+ "failed to init regfields\n");
+
ret = data->chip->hw_init(indio_dev, &client->dev);
if (ret < 0)
return ret;
@@ -1187,38 +1171,35 @@ static DEFINE_RUNTIME_DEV_PM_OPS(veml6030_pm_ops, veml6030_runtime_suspend,
static const struct veml603x_chip veml6030_chip = {
.name = "veml6030",
- .scale_vals = &veml6030_scale_vals,
- .num_scale_vals = ARRAY_SIZE(veml6030_scale_vals),
.channels = veml6030_channels,
.num_channels = ARRAY_SIZE(veml6030_channels),
+ .gain_rf = VEML6030_GAIN_RF,
+ .it_rf = VEML6030_IT_RF,
+ .max_scale = VEML6030_MAX_SCALE,
.hw_init = veml6030_hw_init,
.set_info = veml6030_set_info,
- .set_als_gain = veml6030_set_als_gain,
- .get_als_gain = veml6030_get_als_gain,
};
static const struct veml603x_chip veml6035_chip = {
.name = "veml6035",
- .scale_vals = &veml6035_scale_vals,
- .num_scale_vals = ARRAY_SIZE(veml6035_scale_vals),
.channels = veml6030_channels,
.num_channels = ARRAY_SIZE(veml6030_channels),
+ .gain_rf = VEML6035_GAIN_RF,
+ .it_rf = VEML6030_IT_RF,
+ .max_scale = VEML6035_MAX_SCALE,
.hw_init = veml6035_hw_init,
.set_info = veml6030_set_info,
- .set_als_gain = veml6035_set_als_gain,
- .get_als_gain = veml6035_get_als_gain,
};
static const struct veml603x_chip veml7700_chip = {
.name = "veml7700",
- .scale_vals = &veml6030_scale_vals,
- .num_scale_vals = ARRAY_SIZE(veml6030_scale_vals),
.channels = veml7700_channels,
.num_channels = ARRAY_SIZE(veml7700_channels),
+ .gain_rf = VEML6030_GAIN_RF,
+ .it_rf = VEML6030_IT_RF,
+ .max_scale = VEML6030_MAX_SCALE,
.hw_init = veml6030_hw_init,
.set_info = veml7700_set_info,
- .set_als_gain = veml6030_set_als_gain,
- .get_als_gain = veml6030_get_als_gain,
};
static const struct of_device_id veml6030_of_match[] = {
@@ -1260,3 +1241,4 @@ module_i2c_driver(veml6030_driver);
MODULE_AUTHOR("Rishi Gupta <gupt21@gmail.com>");
MODULE_DESCRIPTION("VEML6030 Ambient Light Sensor");
MODULE_LICENSE("GPL v2");
+MODULE_IMPORT_NS("IIO_GTS_HELPER");
diff --git a/drivers/iio/light/veml6075.c b/drivers/iio/light/veml6075.c
index 05d4c0e9015d..859891e8f115 100644
--- a/drivers/iio/light/veml6075.c
+++ b/drivers/iio/light/veml6075.c
@@ -195,13 +195,17 @@ static int veml6075_read_uv_direct(struct veml6075_data *data, int chan,
static int veml6075_read_int_time_index(struct veml6075_data *data)
{
- int ret, conf;
+ int ret, conf, int_index;
ret = regmap_read(data->regmap, VEML6075_CMD_CONF, &conf);
if (ret < 0)
return ret;
- return FIELD_GET(VEML6075_CONF_IT, conf);
+ int_index = FIELD_GET(VEML6075_CONF_IT, conf);
+ if (int_index >= ARRAY_SIZE(veml6075_it_ms))
+ return -EINVAL;
+
+ return int_index;
}
static int veml6075_read_int_time_ms(struct veml6075_data *data, int *val)
diff --git a/drivers/iio/magnetometer/Kconfig b/drivers/iio/magnetometer/Kconfig
index 7177cd1d67cb..3debf1320ad1 100644
--- a/drivers/iio/magnetometer/Kconfig
+++ b/drivers/iio/magnetometer/Kconfig
@@ -235,6 +235,17 @@ config SENSORS_RM3100_SPI
To compile this driver as a module, choose M here: the module
will be called rm3100-spi.
+config SI7210
+ tristate "SI7210 Hall effect sensor"
+ depends on I2C
+ select REGMAP_I2C
+ help
+ Say Y here to add support for the SI7210 Hall effect sensor.
+
+ This driver can also be compiled as a module.
+ To compile this driver as a module, choose M here: the module
+ will be called si7210.
+
config TI_TMAG5273
tristate "TI TMAG5273 Low-Power Linear 3D Hall-Effect Sensor"
depends on I2C
diff --git a/drivers/iio/magnetometer/Makefile b/drivers/iio/magnetometer/Makefile
index 3e4c2ecd9adf..9297723a97d8 100644
--- a/drivers/iio/magnetometer/Makefile
+++ b/drivers/iio/magnetometer/Makefile
@@ -31,6 +31,8 @@ obj-$(CONFIG_SENSORS_RM3100) += rm3100-core.o
obj-$(CONFIG_SENSORS_RM3100_I2C) += rm3100-i2c.o
obj-$(CONFIG_SENSORS_RM3100_SPI) += rm3100-spi.o
+obj-$(CONFIG_SI7210) += si7210.o
+
obj-$(CONFIG_TI_TMAG5273) += tmag5273.o
obj-$(CONFIG_YAMAHA_YAS530) += yamaha-yas530.o
diff --git a/drivers/iio/magnetometer/af8133j.c b/drivers/iio/magnetometer/af8133j.c
index a70bf8a3c73b..c1fc339e85b4 100644
--- a/drivers/iio/magnetometer/af8133j.c
+++ b/drivers/iio/magnetometer/af8133j.c
@@ -383,7 +383,6 @@ static const struct regmap_config af8133j_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
.max_register = AF8133J_REG_SWR,
- .cache_type = REGCACHE_NONE,
};
static void af8133j_power_down_action(void *ptr)
diff --git a/drivers/iio/magnetometer/si7210.c b/drivers/iio/magnetometer/si7210.c
new file mode 100644
index 000000000000..27e3feba7a0f
--- /dev/null
+++ b/drivers/iio/magnetometer/si7210.c
@@ -0,0 +1,446 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Silicon Labs Si7210 Hall Effect sensor driver
+ *
+ * Copyright (c) 2024 Antoni Pokusinski <apokusinski01@gmail.com>
+ *
+ * Datasheet:
+ * https://www.silabs.com/documents/public/data-sheets/si7210-datasheet.pdf
+ */
+
+#include <linux/array_size.h>
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/cleanup.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/iio/iio.h>
+#include <linux/math64.h>
+#include <linux/mod_devicetable.h>
+#include <linux/mutex.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/types.h>
+#include <linux/units.h>
+#include <asm/byteorder.h>
+
+/* Registers offsets and masks */
+#define SI7210_REG_DSPSIGM 0xC1
+#define SI7210_REG_DSPSIGL 0xC2
+
+#define SI7210_MASK_DSPSIGSEL GENMASK(2, 0)
+#define SI7210_REG_DSPSIGSEL 0xC3
+
+#define SI7210_MASK_STOP BIT(1)
+#define SI7210_MASK_ONEBURST BIT(2)
+#define SI7210_REG_POWER_CTRL 0xC4
+
+#define SI7210_MASK_ARAUTOINC BIT(0)
+#define SI7210_REG_ARAUTOINC 0xC5
+
+#define SI7210_REG_A0 0xCA
+#define SI7210_REG_A1 0xCB
+#define SI7210_REG_A2 0xCC
+#define SI7210_REG_A3 0xCE
+#define SI7210_REG_A4 0xCF
+#define SI7210_REG_A5 0xD0
+
+#define SI7210_REG_OTP_ADDR 0xE1
+#define SI7210_REG_OTP_DATA 0xE2
+
+#define SI7210_MASK_OTP_READ_EN BIT(1)
+#define SI7210_REG_OTP_CTRL 0xE3
+
+/* OTP data registers offsets */
+#define SI7210_OTPREG_TMP_OFF 0x1D
+#define SI7210_OTPREG_TMP_GAIN 0x1E
+
+#define SI7210_OTPREG_A0_20 0x21
+#define SI7210_OTPREG_A1_20 0x22
+#define SI7210_OTPREG_A2_20 0x23
+#define SI7210_OTPREG_A3_20 0x24
+#define SI7210_OTPREG_A4_20 0x25
+#define SI7210_OTPREG_A5_20 0x26
+
+#define SI7210_OTPREG_A0_200 0x27
+#define SI7210_OTPREG_A1_200 0x28
+#define SI7210_OTPREG_A2_200 0x29
+#define SI7210_OTPREG_A3_200 0x2A
+#define SI7210_OTPREG_A4_200 0x2B
+#define SI7210_OTPREG_A5_200 0x2C
+
+#define A_REGS_COUNT 6
+
+static const unsigned int a20_otp_regs[A_REGS_COUNT] = {
+ SI7210_OTPREG_A0_20, SI7210_OTPREG_A1_20, SI7210_OTPREG_A2_20,
+ SI7210_OTPREG_A3_20, SI7210_OTPREG_A4_20, SI7210_OTPREG_A5_20,
+};
+
+static const unsigned int a200_otp_regs[A_REGS_COUNT] = {
+ SI7210_OTPREG_A0_200, SI7210_OTPREG_A1_200, SI7210_OTPREG_A2_200,
+ SI7210_OTPREG_A3_200, SI7210_OTPREG_A4_200, SI7210_OTPREG_A5_200,
+};
+
+static const struct regmap_range si7210_read_reg_ranges[] = {
+ regmap_reg_range(SI7210_REG_DSPSIGM, SI7210_REG_ARAUTOINC),
+ regmap_reg_range(SI7210_REG_A0, SI7210_REG_A2),
+ regmap_reg_range(SI7210_REG_A3, SI7210_REG_A5),
+ regmap_reg_range(SI7210_REG_OTP_ADDR, SI7210_REG_OTP_CTRL),
+};
+
+static const struct regmap_access_table si7210_readable_regs = {
+ .yes_ranges = si7210_read_reg_ranges,
+ .n_yes_ranges = ARRAY_SIZE(si7210_read_reg_ranges),
+};
+
+static const struct regmap_range si7210_write_reg_ranges[] = {
+ regmap_reg_range(SI7210_REG_DSPSIGSEL, SI7210_REG_ARAUTOINC),
+ regmap_reg_range(SI7210_REG_A0, SI7210_REG_A2),
+ regmap_reg_range(SI7210_REG_A3, SI7210_REG_A5),
+ regmap_reg_range(SI7210_REG_OTP_ADDR, SI7210_REG_OTP_CTRL),
+};
+
+static const struct regmap_access_table si7210_writeable_regs = {
+ .yes_ranges = si7210_write_reg_ranges,
+ .n_yes_ranges = ARRAY_SIZE(si7210_write_reg_ranges),
+};
+
+static const struct regmap_range si7210_volatile_reg_ranges[] = {
+ regmap_reg_range(SI7210_REG_DSPSIGM, SI7210_REG_DSPSIGL),
+ regmap_reg_range(SI7210_REG_POWER_CTRL, SI7210_REG_POWER_CTRL),
+};
+
+static const struct regmap_access_table si7210_volatile_regs = {
+ .yes_ranges = si7210_volatile_reg_ranges,
+ .n_yes_ranges = ARRAY_SIZE(si7210_volatile_reg_ranges),
+};
+
+static const struct regmap_config si7210_regmap_conf = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = SI7210_REG_OTP_CTRL,
+
+ .rd_table = &si7210_readable_regs,
+ .wr_table = &si7210_writeable_regs,
+ .volatile_table = &si7210_volatile_regs,
+};
+
+struct si7210_data {
+ struct regmap *regmap;
+ struct i2c_client *client;
+ struct regulator *vdd;
+ struct mutex fetch_lock; /* lock for a single measurement fetch */
+ s8 temp_offset;
+ s8 temp_gain;
+ s8 scale_20_a[A_REGS_COUNT];
+ s8 scale_200_a[A_REGS_COUNT];
+ u8 curr_scale;
+};
+
+static const struct iio_chan_spec si7210_channels[] = {
+ {
+ .type = IIO_MAGN,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE) | BIT(IIO_CHAN_INFO_OFFSET),
+ }, {
+ .type = IIO_TEMP,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
+ },
+};
+
+static int si7210_fetch_measurement(struct si7210_data *data,
+ struct iio_chan_spec const *chan,
+ u16 *buf)
+{
+ u8 dspsigsel = chan->type == IIO_MAGN ? 0 : 1;
+ int ret;
+ __be16 result;
+
+ guard(mutex)(&data->fetch_lock);
+
+ ret = regmap_update_bits(data->regmap, SI7210_REG_DSPSIGSEL,
+ SI7210_MASK_DSPSIGSEL, dspsigsel);
+ if (ret)
+ return ret;
+
+ ret = regmap_update_bits(data->regmap, SI7210_REG_POWER_CTRL,
+ SI7210_MASK_ONEBURST | SI7210_MASK_STOP,
+ SI7210_MASK_ONEBURST & ~SI7210_MASK_STOP);
+ if (ret)
+ return ret;
+
+ /*
+ * Read the contents of the
+ * registers containing the result: DSPSIGM, DSPSIGL
+ */
+ ret = regmap_bulk_read(data->regmap, SI7210_REG_DSPSIGM,
+ &result, sizeof(result));
+ if (ret)
+ return ret;
+
+ *buf = be16_to_cpu(result);
+
+ return 0;
+}
+
+static int si7210_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct si7210_data *data = iio_priv(indio_dev);
+ long long temp;
+ u16 dspsig;
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ ret = si7210_fetch_measurement(data, chan, &dspsig);
+ if (ret)
+ return ret;
+
+ *val = dspsig & GENMASK(14, 0);
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ *val = 0;
+ if (data->curr_scale == 20)
+ *val2 = 12500;
+ else /* data->curr_scale == 200 */
+ *val2 = 125000;
+ return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_CHAN_INFO_OFFSET:
+ *val = -16384;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_PROCESSED:
+ ret = si7210_fetch_measurement(data, chan, &dspsig);
+ if (ret)
+ return ret;
+
+ /* temp = 32 * Dspsigm[6:0] + (Dspsigl[7:0] >> 3) */
+ temp = FIELD_GET(GENMASK(14, 3), dspsig);
+ temp = div_s64(-383 * temp * temp, 100) + 160940 * temp - 279800000;
+ temp *= (1 + (data->temp_gain / 2048));
+ temp += (int)(MICRO / 16) * data->temp_offset;
+
+ ret = regulator_get_voltage(data->vdd);
+ if (ret < 0)
+ return ret;
+
+ /* temp -= 0.222 * VDD */
+ temp -= 222 * div_s64(ret, MILLI);
+
+ *val = div_s64(temp, MILLI);
+
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int si7210_set_scale(struct si7210_data *data, unsigned int scale)
+{
+ s8 *a_otp_values;
+ int ret;
+
+ if (scale == 20)
+ a_otp_values = data->scale_20_a;
+ else if (scale == 200)
+ a_otp_values = data->scale_200_a;
+ else
+ return -EINVAL;
+
+ guard(mutex)(&data->fetch_lock);
+
+ /* Write the registers 0xCA - 0xCC */
+ ret = regmap_bulk_write(data->regmap, SI7210_REG_A0, a_otp_values, 3);
+ if (ret)
+ return ret;
+
+ /* Write the registers 0xCE - 0xD0 */
+ ret = regmap_bulk_write(data->regmap, SI7210_REG_A3, &a_otp_values[3], 3);
+ if (ret)
+ return ret;
+
+ data->curr_scale = scale;
+
+ return 0;
+}
+
+static int si7210_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct si7210_data *data = iio_priv(indio_dev);
+ unsigned int scale;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+ if (val == 0 && val2 == 12500)
+ scale = 20;
+ else if (val == 0 && val2 == 125000)
+ scale = 200;
+ else
+ return -EINVAL;
+
+ return si7210_set_scale(data, scale);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int si7210_read_otpreg_val(struct si7210_data *data, unsigned int otpreg, u8 *val)
+{
+ int ret;
+ unsigned int otpdata;
+
+ ret = regmap_write(data->regmap, SI7210_REG_OTP_ADDR, otpreg);
+ if (ret)
+ return ret;
+
+ ret = regmap_update_bits(data->regmap, SI7210_REG_OTP_CTRL,
+ SI7210_MASK_OTP_READ_EN, SI7210_MASK_OTP_READ_EN);
+ if (ret)
+ return ret;
+
+ ret = regmap_read(data->regmap, SI7210_REG_OTP_DATA, &otpdata);
+ if (ret)
+ return ret;
+
+ *val = otpdata;
+
+ return 0;
+}
+
+/*
+ * According to the datasheet, the primary method to wake up a
+ * device is to send an empty write. However this is not feasible
+ * using the current API so we use the other method i.e. read a single
+ * byte. The device should respond with 0xFF.
+ */
+static int si7210_device_wake(struct si7210_data *data)
+{
+ int ret;
+
+ ret = i2c_smbus_read_byte(data->client);
+ if (ret < 0)
+ return ret;
+
+ if (ret != 0xFF)
+ return -EIO;
+
+ return 0;
+}
+
+static int si7210_device_init(struct si7210_data *data)
+{
+ int ret;
+ unsigned int i;
+
+ ret = si7210_device_wake(data);
+ if (ret)
+ return ret;
+
+ fsleep(1000);
+
+ ret = si7210_read_otpreg_val(data, SI7210_OTPREG_TMP_GAIN, &data->temp_gain);
+ if (ret)
+ return ret;
+
+ ret = si7210_read_otpreg_val(data, SI7210_OTPREG_TMP_OFF, &data->temp_offset);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < A_REGS_COUNT; i++) {
+ ret = si7210_read_otpreg_val(data, a20_otp_regs[i], &data->scale_20_a[i]);
+ if (ret)
+ return ret;
+ }
+
+ for (i = 0; i < A_REGS_COUNT; i++) {
+ ret = si7210_read_otpreg_val(data, a200_otp_regs[i], &data->scale_200_a[i]);
+ if (ret)
+ return ret;
+ }
+
+ ret = regmap_update_bits(data->regmap, SI7210_REG_ARAUTOINC,
+ SI7210_MASK_ARAUTOINC, SI7210_MASK_ARAUTOINC);
+ if (ret)
+ return ret;
+
+ return si7210_set_scale(data, 20);
+}
+
+static const struct iio_info si7210_info = {
+ .read_raw = si7210_read_raw,
+ .write_raw = si7210_write_raw,
+};
+
+static int si7210_probe(struct i2c_client *client)
+{
+ struct si7210_data *data;
+ struct iio_dev *indio_dev;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ data = iio_priv(indio_dev);
+ data->client = client;
+
+ ret = devm_mutex_init(&client->dev, &data->fetch_lock);
+ if (ret)
+ return ret;
+
+ data->regmap = devm_regmap_init_i2c(client, &si7210_regmap_conf);
+ if (IS_ERR(data->regmap))
+ return dev_err_probe(&client->dev, PTR_ERR(data->regmap),
+ "failed to register regmap\n");
+
+ data->vdd = devm_regulator_get(&client->dev, "vdd");
+ if (IS_ERR(data->vdd))
+ return dev_err_probe(&client->dev, PTR_ERR(data->vdd),
+ "failed to get VDD regulator\n");
+
+ ret = regulator_enable(data->vdd);
+ if (ret)
+ return ret;
+
+ indio_dev->name = dev_name(&client->dev);
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->info = &si7210_info;
+ indio_dev->channels = si7210_channels;
+ indio_dev->num_channels = ARRAY_SIZE(si7210_channels);
+
+ ret = si7210_device_init(data);
+ if (ret)
+ return dev_err_probe(&client->dev, ret,
+ "device initialization failed\n");
+
+ return devm_iio_device_register(&client->dev, indio_dev);
+}
+
+static const struct i2c_device_id si7210_id[] = {
+ { "si7210" },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, si7210_id);
+
+static const struct of_device_id si7210_dt_ids[] = {
+ { .compatible = "silabs,si7210" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, si7210_dt_ids);
+
+static struct i2c_driver si7210_driver = {
+ .driver = {
+ .name = "si7210",
+ .of_match_table = si7210_dt_ids,
+ },
+ .probe = si7210_probe,
+ .id_table = si7210_id,
+};
+module_i2c_driver(si7210_driver);
+
+MODULE_AUTHOR("Antoni Pokusinski <apokusinski01@gmail.com>");
+MODULE_DESCRIPTION("Silicon Labs Si7210 Hall Effect sensor I2C driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/pressure/zpa2326_i2c.c b/drivers/iio/pressure/zpa2326_i2c.c
index 49a239ebdabf..a6034bf05d97 100644
--- a/drivers/iio/pressure/zpa2326_i2c.c
+++ b/drivers/iio/pressure/zpa2326_i2c.c
@@ -25,7 +25,6 @@ static const struct regmap_config zpa2326_regmap_i2c_config = {
.precious_reg = zpa2326_isreg_precious,
.max_register = ZPA2326_TEMP_OUT_H_REG,
.read_flag_mask = BIT(7),
- .cache_type = REGCACHE_NONE,
};
static unsigned int zpa2326_i2c_hwid(const struct i2c_client *client)
diff --git a/drivers/iio/pressure/zpa2326_spi.c b/drivers/iio/pressure/zpa2326_spi.c
index 317270fa1c43..c678f5b96266 100644
--- a/drivers/iio/pressure/zpa2326_spi.c
+++ b/drivers/iio/pressure/zpa2326_spi.c
@@ -26,7 +26,6 @@ static const struct regmap_config zpa2326_regmap_spi_config = {
.precious_reg = zpa2326_isreg_precious,
.max_register = ZPA2326_TEMP_OUT_H_REG,
.read_flag_mask = BIT(7) | BIT(6),
- .cache_type = REGCACHE_NONE,
};
static int zpa2326_probe_spi(struct spi_device *spi)
diff --git a/drivers/iio/proximity/irsd200.c b/drivers/iio/proximity/irsd200.c
index b09d15230111..b0ffd3574013 100644
--- a/drivers/iio/proximity/irsd200.c
+++ b/drivers/iio/proximity/irsd200.c
@@ -10,6 +10,7 @@
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/regmap.h>
+#include <linux/string_choices.h>
#include <linux/iio/buffer.h>
#include <linux/iio/events.h>
@@ -783,7 +784,7 @@ static int irsd200_set_trigger_state(struct iio_trigger *trig, bool state)
ret = regmap_field_write(data->regfields[IRS_REGF_INTR_DATA], state);
if (ret) {
dev_err(data->dev, "Could not %s data interrupt source (%d)\n",
- state ? "enable" : "disable", ret);
+ str_enable_disable(state), ret);
}
return ret;
diff --git a/drivers/iio/proximity/sx9310.c b/drivers/iio/proximity/sx9310.c
index 0d7f0518d4fb..b60707eba39d 100644
--- a/drivers/iio/proximity/sx9310.c
+++ b/drivers/iio/proximity/sx9310.c
@@ -337,19 +337,26 @@ static int sx9310_read_raw(struct iio_dev *indio_dev,
int *val2, long mask)
{
struct sx_common_data *data = iio_priv(indio_dev);
+ int ret;
if (chan->type != IIO_PROXIMITY)
return -EINVAL;
switch (mask) {
case IIO_CHAN_INFO_RAW:
- iio_device_claim_direct_scoped(return -EBUSY, indio_dev)
- return sx_common_read_proximity(data, chan, val);
- unreachable();
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+
+ ret = sx_common_read_proximity(data, chan, val);
+ iio_device_release_direct(indio_dev);
+ return ret;
case IIO_CHAN_INFO_HARDWAREGAIN:
- iio_device_claim_direct_scoped(return -EBUSY, indio_dev)
- return sx9310_read_gain(data, chan, val);
- unreachable();
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+
+ ret = sx9310_read_gain(data, chan, val);
+ iio_device_release_direct(indio_dev);
+ return ret;
case IIO_CHAN_INFO_SAMP_FREQ:
return sx9310_read_samp_freq(data, val, val2);
default:
diff --git a/drivers/iio/proximity/sx9324.c b/drivers/iio/proximity/sx9324.c
index f7819dd2775c..73d972416c01 100644
--- a/drivers/iio/proximity/sx9324.c
+++ b/drivers/iio/proximity/sx9324.c
@@ -429,16 +429,23 @@ static int sx9324_read_raw(struct iio_dev *indio_dev,
int *val, int *val2, long mask)
{
struct sx_common_data *data = iio_priv(indio_dev);
+ int ret;
switch (mask) {
case IIO_CHAN_INFO_RAW:
- iio_device_claim_direct_scoped(return -EBUSY, indio_dev)
- return sx_common_read_proximity(data, chan, val);
- unreachable();
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+
+ ret = sx_common_read_proximity(data, chan, val);
+ iio_device_release_direct(indio_dev);
+ return ret;
case IIO_CHAN_INFO_HARDWAREGAIN:
- iio_device_claim_direct_scoped(return -EBUSY, indio_dev)
- return sx9324_read_gain(data, chan, val);
- unreachable();
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+
+ ret = sx9324_read_gain(data, chan, val);
+ iio_device_release_direct(indio_dev);
+ return ret;
case IIO_CHAN_INFO_SAMP_FREQ:
return sx9324_read_samp_freq(data, val, val2);
default:
diff --git a/drivers/iio/proximity/sx9360.c b/drivers/iio/proximity/sx9360.c
index a6ff16e33c1e..4448988d4e7e 100644
--- a/drivers/iio/proximity/sx9360.c
+++ b/drivers/iio/proximity/sx9360.c
@@ -321,16 +321,23 @@ static int sx9360_read_raw(struct iio_dev *indio_dev,
int *val, int *val2, long mask)
{
struct sx_common_data *data = iio_priv(indio_dev);
+ int ret;
switch (mask) {
case IIO_CHAN_INFO_RAW:
- iio_device_claim_direct_scoped(return -EBUSY, indio_dev)
- return sx_common_read_proximity(data, chan, val);
- unreachable();
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+
+ ret = sx_common_read_proximity(data, chan, val);
+ iio_device_release_direct(indio_dev);
+ return ret;
case IIO_CHAN_INFO_HARDWAREGAIN:
- iio_device_claim_direct_scoped(return -EBUSY, indio_dev)
- return sx9360_read_gain(data, chan, val);
- unreachable();
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+
+ ret = sx9360_read_gain(data, chan, val);
+ iio_device_release_direct(indio_dev);
+ return ret;
case IIO_CHAN_INFO_SAMP_FREQ:
return sx9360_read_samp_freq(data, val, val2);
default:
diff --git a/drivers/iio/resolver/ad2s1210.c b/drivers/iio/resolver/ad2s1210.c
index b681129a99b6..ab860cedecd1 100644
--- a/drivers/iio/resolver/ad2s1210.c
+++ b/drivers/iio/resolver/ad2s1210.c
@@ -46,6 +46,7 @@
*/
#include <linux/bitfield.h>
+#include <linux/bitmap.h>
#include <linux/bits.h>
#include <linux/cleanup.h>
#include <linux/clk.h>
@@ -175,15 +176,14 @@ struct ad2s1210_state {
static int ad2s1210_set_mode(struct ad2s1210_state *st, enum ad2s1210_mode mode)
{
struct gpio_descs *gpios = st->mode_gpios;
- DECLARE_BITMAP(bitmap, 2);
+ DECLARE_BITMAP(bitmap, 2) = { };
if (!gpios)
return mode == st->fixed_mode ? 0 : -EOPNOTSUPP;
- bitmap[0] = mode;
+ bitmap_write(bitmap, mode, 0, 2);
- return gpiod_set_array_value(gpios->ndescs, gpios->desc, gpios->info,
- bitmap);
+ return gpiod_multi_set_value_cansleep(gpios, bitmap);
}
/*
@@ -1427,7 +1427,7 @@ static int ad2s1210_setup_gpios(struct ad2s1210_state *st)
struct device *dev = &st->sdev->dev;
struct gpio_descs *resolution_gpios;
struct gpio_desc *reset_gpio;
- DECLARE_BITMAP(bitmap, 2);
+ DECLARE_BITMAP(bitmap, 2) = { };
int ret;
/* should not be sampling on startup */
@@ -1471,12 +1471,9 @@ static int ad2s1210_setup_gpios(struct ad2s1210_state *st)
return dev_err_probe(dev, -EINVAL,
"requires exactly 2 resolution-gpios\n");
- bitmap[0] = st->resolution;
+ bitmap_write(bitmap, st->resolution, 0, 2);
- ret = gpiod_set_array_value(resolution_gpios->ndescs,
- resolution_gpios->desc,
- resolution_gpios->info,
- bitmap);
+ ret = gpiod_multi_set_value_cansleep(resolution_gpios, bitmap);
if (ret < 0)
return dev_err_probe(dev, ret,
"failed to set resolution gpios\n");
diff --git a/drivers/iio/temperature/tmp006.c b/drivers/iio/temperature/tmp006.c
index 1998047a1f24..b5c94b7492f5 100644
--- a/drivers/iio/temperature/tmp006.c
+++ b/drivers/iio/temperature/tmp006.c
@@ -85,19 +85,25 @@ static int tmp006_read_raw(struct iio_dev *indio_dev,
case IIO_CHAN_INFO_RAW:
if (channel->type == IIO_VOLTAGE) {
/* LSB is 156.25 nV */
- iio_device_claim_direct_scoped(return -EBUSY, indio_dev) {
- ret = tmp006_read_measurement(data, TMP006_VOBJECT);
- if (ret < 0)
- return ret;
- }
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+
+ ret = tmp006_read_measurement(data, TMP006_VOBJECT);
+ iio_device_release_direct(indio_dev);
+ if (ret < 0)
+ return ret;
+
*val = sign_extend32(ret, 15);
} else if (channel->type == IIO_TEMP) {
/* LSB is 0.03125 degrees Celsius */
- iio_device_claim_direct_scoped(return -EBUSY, indio_dev) {
- ret = tmp006_read_measurement(data, TMP006_TAMBIENT);
- if (ret < 0)
- return ret;
- }
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+
+ ret = tmp006_read_measurement(data, TMP006_TAMBIENT);
+ iio_device_release_direct(indio_dev);
+ if (ret < 0)
+ return ret;
+
*val = sign_extend32(ret, 15) >> TMP006_TAMBIENT_SHIFT;
} else {
break;
@@ -142,9 +148,8 @@ static int tmp006_write_raw(struct iio_dev *indio_dev,
for (i = 0; i < ARRAY_SIZE(tmp006_freqs); i++)
if ((val == tmp006_freqs[i][0]) &&
(val2 == tmp006_freqs[i][1])) {
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
data->config &= ~TMP006_CONFIG_CR_MASK;
data->config |= i << TMP006_CONFIG_CR_SHIFT;
@@ -153,7 +158,7 @@ static int tmp006_write_raw(struct iio_dev *indio_dev,
TMP006_CONFIG,
data->config);
- iio_device_release_direct_mode(indio_dev);
+ iio_device_release_direct(indio_dev);
return ret;
}
return -EINVAL;
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
index d23074383428..804bc773b4ef 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
@@ -160,7 +160,7 @@ static int __wait_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie)
wait_event_timeout(cmdq->waitq,
!crsqe->is_in_used ||
test_bit(ERR_DEVICE_DETACHED, &cmdq->flags),
- msecs_to_jiffies(rcfw->max_timeout * 1000));
+ secs_to_jiffies(rcfw->max_timeout));
if (!crsqe->is_in_used)
return 0;
diff --git a/drivers/infiniband/hw/qib/qib_fs.c b/drivers/infiniband/hw/qib/qib_fs.c
index b27791029fa9..b9f4a2937c3a 100644
--- a/drivers/infiniband/hw/qib/qib_fs.c
+++ b/drivers/infiniband/hw/qib/qib_fs.c
@@ -55,6 +55,7 @@ static int qibfs_mknod(struct inode *dir, struct dentry *dentry,
struct inode *inode = new_inode(dir->i_sb);
if (!inode) {
+ dput(dentry);
error = -EPERM;
goto bail;
}
diff --git a/drivers/input/mouse/cyapa.c b/drivers/input/mouse/cyapa.c
index 2f2d925a55d7..00c87c0532a6 100644
--- a/drivers/input/mouse/cyapa.c
+++ b/drivers/input/mouse/cyapa.c
@@ -1080,8 +1080,8 @@ static ssize_t cyapa_update_fw_store(struct device *dev,
char fw_name[NAME_MAX];
int ret, error;
- if (count >= NAME_MAX) {
- dev_err(dev, "File name too long\n");
+ if (!count || count >= NAME_MAX) {
+ dev_err(dev, "Bad file name size\n");
return -EINVAL;
}
diff --git a/drivers/input/touchscreen/goodix_berlin_core.c b/drivers/input/touchscreen/goodix_berlin_core.c
index 7f8cfdd106fa..f7ea443b152e 100644
--- a/drivers/input/touchscreen/goodix_berlin_core.c
+++ b/drivers/input/touchscreen/goodix_berlin_core.c
@@ -673,7 +673,7 @@ static void goodix_berlin_power_off_act(void *data)
}
static ssize_t registers_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
@@ -686,7 +686,7 @@ static ssize_t registers_read(struct file *filp, struct kobject *kobj,
}
static ssize_t registers_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
@@ -698,15 +698,15 @@ static ssize_t registers_write(struct file *filp, struct kobject *kobj,
return error ? error : count;
}
-static BIN_ATTR_ADMIN_RW(registers, 0);
+static const BIN_ATTR_ADMIN_RW(registers, 0);
-static struct bin_attribute *goodix_berlin_bin_attrs[] = {
+static const struct bin_attribute *const goodix_berlin_bin_attrs[] = {
&bin_attr_registers,
NULL,
};
static const struct attribute_group goodix_berlin_attr_group = {
- .bin_attrs = goodix_berlin_bin_attrs,
+ .bin_attrs_new = goodix_berlin_bin_attrs,
};
const struct attribute_group *goodix_berlin_groups[] = {
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-iommufd.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-iommufd.c
index 5aa2e7af58b4..e4fd8d522af8 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-iommufd.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-iommufd.c
@@ -43,6 +43,8 @@ static void arm_smmu_make_nested_cd_table_ste(
target->data[0] |= nested_domain->ste[0] &
~cpu_to_le64(STRTAB_STE_0_CFG);
target->data[1] |= nested_domain->ste[1];
+ /* Merge events for DoS mitigations on eventq */
+ target->data[1] |= cpu_to_le64(STRTAB_STE_1_MEV);
}
/*
@@ -85,6 +87,47 @@ static void arm_smmu_make_nested_domain_ste(
}
}
+int arm_smmu_attach_prepare_vmaster(struct arm_smmu_attach_state *state,
+ struct arm_smmu_nested_domain *nested_domain)
+{
+ struct arm_smmu_vmaster *vmaster;
+ unsigned long vsid;
+ int ret;
+
+ iommu_group_mutex_assert(state->master->dev);
+
+ ret = iommufd_viommu_get_vdev_id(&nested_domain->vsmmu->core,
+ state->master->dev, &vsid);
+ if (ret)
+ return ret;
+
+ vmaster = kzalloc(sizeof(*vmaster), GFP_KERNEL);
+ if (!vmaster)
+ return -ENOMEM;
+ vmaster->vsmmu = nested_domain->vsmmu;
+ vmaster->vsid = vsid;
+ state->vmaster = vmaster;
+
+ return 0;
+}
+
+void arm_smmu_attach_commit_vmaster(struct arm_smmu_attach_state *state)
+{
+ struct arm_smmu_master *master = state->master;
+
+ mutex_lock(&master->smmu->streams_mutex);
+ kfree(master->vmaster);
+ master->vmaster = state->vmaster;
+ mutex_unlock(&master->smmu->streams_mutex);
+}
+
+void arm_smmu_master_clear_vmaster(struct arm_smmu_master *master)
+{
+ struct arm_smmu_attach_state state = { .master = master };
+
+ arm_smmu_attach_commit_vmaster(&state);
+}
+
static int arm_smmu_attach_dev_nested(struct iommu_domain *domain,
struct device *dev)
{
@@ -392,4 +435,21 @@ struct iommufd_viommu *arm_vsmmu_alloc(struct device *dev,
return &vsmmu->core;
}
+int arm_vmaster_report_event(struct arm_smmu_vmaster *vmaster, u64 *evt)
+{
+ struct iommu_vevent_arm_smmuv3 vevt;
+ int i;
+
+ lockdep_assert_held(&vmaster->vsmmu->smmu->streams_mutex);
+
+ vevt.evt[0] = cpu_to_le64((evt[0] & ~EVTQ_0_SID) |
+ FIELD_PREP(EVTQ_0_SID, vmaster->vsid));
+ for (i = 1; i < EVTQ_ENT_DWORDS; i++)
+ vevt.evt[i] = cpu_to_le64(evt[i]);
+
+ return iommufd_viommu_report_event(&vmaster->vsmmu->core,
+ IOMMU_VEVENTQ_TYPE_ARM_SMMUV3, &vevt,
+ sizeof(vevt));
+}
+
MODULE_IMPORT_NS("IOMMUFD");
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
index 358072b4e293..b4c21aaed126 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
@@ -1052,7 +1052,7 @@ void arm_smmu_get_ste_used(const __le64 *ent, __le64 *used_bits)
cpu_to_le64(STRTAB_STE_1_S1DSS | STRTAB_STE_1_S1CIR |
STRTAB_STE_1_S1COR | STRTAB_STE_1_S1CSH |
STRTAB_STE_1_S1STALLD | STRTAB_STE_1_STRW |
- STRTAB_STE_1_EATS);
+ STRTAB_STE_1_EATS | STRTAB_STE_1_MEV);
used_bits[2] |= cpu_to_le64(STRTAB_STE_2_S2VMID);
/*
@@ -1068,7 +1068,7 @@ void arm_smmu_get_ste_used(const __le64 *ent, __le64 *used_bits)
if (cfg & BIT(1)) {
used_bits[1] |=
cpu_to_le64(STRTAB_STE_1_S2FWB | STRTAB_STE_1_EATS |
- STRTAB_STE_1_SHCFG);
+ STRTAB_STE_1_SHCFG | STRTAB_STE_1_MEV);
used_bits[2] |=
cpu_to_le64(STRTAB_STE_2_S2VMID | STRTAB_STE_2_VTCR |
STRTAB_STE_2_S2AA64 | STRTAB_STE_2_S2ENDI |
@@ -1813,8 +1813,8 @@ static void arm_smmu_decode_event(struct arm_smmu_device *smmu, u64 *raw,
mutex_unlock(&smmu->streams_mutex);
}
-static int arm_smmu_handle_event(struct arm_smmu_device *smmu,
- struct arm_smmu_event *event)
+static int arm_smmu_handle_event(struct arm_smmu_device *smmu, u64 *evt,
+ struct arm_smmu_event *event)
{
int ret = 0;
u32 perm = 0;
@@ -1823,6 +1823,10 @@ static int arm_smmu_handle_event(struct arm_smmu_device *smmu,
struct iommu_fault *flt = &fault_evt.fault;
switch (event->id) {
+ case EVT_ID_BAD_STE_CONFIG:
+ case EVT_ID_STREAM_DISABLED_FAULT:
+ case EVT_ID_BAD_SUBSTREAMID_CONFIG:
+ case EVT_ID_BAD_CD_CONFIG:
case EVT_ID_TRANSLATION_FAULT:
case EVT_ID_ADDR_SIZE_FAULT:
case EVT_ID_ACCESS_FAULT:
@@ -1832,31 +1836,30 @@ static int arm_smmu_handle_event(struct arm_smmu_device *smmu,
return -EOPNOTSUPP;
}
- if (!event->stall)
- return -EOPNOTSUPP;
-
- if (event->read)
- perm |= IOMMU_FAULT_PERM_READ;
- else
- perm |= IOMMU_FAULT_PERM_WRITE;
+ if (event->stall) {
+ if (event->read)
+ perm |= IOMMU_FAULT_PERM_READ;
+ else
+ perm |= IOMMU_FAULT_PERM_WRITE;
- if (event->instruction)
- perm |= IOMMU_FAULT_PERM_EXEC;
+ if (event->instruction)
+ perm |= IOMMU_FAULT_PERM_EXEC;
- if (event->privileged)
- perm |= IOMMU_FAULT_PERM_PRIV;
+ if (event->privileged)
+ perm |= IOMMU_FAULT_PERM_PRIV;
- flt->type = IOMMU_FAULT_PAGE_REQ;
- flt->prm = (struct iommu_fault_page_request) {
- .flags = IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE,
- .grpid = event->stag,
- .perm = perm,
- .addr = event->iova,
- };
+ flt->type = IOMMU_FAULT_PAGE_REQ;
+ flt->prm = (struct iommu_fault_page_request){
+ .flags = IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE,
+ .grpid = event->stag,
+ .perm = perm,
+ .addr = event->iova,
+ };
- if (event->ssv) {
- flt->prm.flags |= IOMMU_FAULT_PAGE_REQUEST_PASID_VALID;
- flt->prm.pasid = event->ssid;
+ if (event->ssv) {
+ flt->prm.flags |= IOMMU_FAULT_PAGE_REQUEST_PASID_VALID;
+ flt->prm.pasid = event->ssid;
+ }
}
mutex_lock(&smmu->streams_mutex);
@@ -1866,7 +1869,12 @@ static int arm_smmu_handle_event(struct arm_smmu_device *smmu,
goto out_unlock;
}
- ret = iommu_report_device_fault(master->dev, &fault_evt);
+ if (event->stall)
+ ret = iommu_report_device_fault(master->dev, &fault_evt);
+ else if (master->vmaster && !event->s2)
+ ret = arm_vmaster_report_event(master->vmaster, evt);
+ else
+ ret = -EOPNOTSUPP; /* Unhandled events should be pinned */
out_unlock:
mutex_unlock(&smmu->streams_mutex);
return ret;
@@ -1944,7 +1952,7 @@ static irqreturn_t arm_smmu_evtq_thread(int irq, void *dev)
do {
while (!queue_remove_raw(q, evt)) {
arm_smmu_decode_event(smmu, evt, &event);
- if (arm_smmu_handle_event(smmu, &event))
+ if (arm_smmu_handle_event(smmu, evt, &event))
arm_smmu_dump_event(smmu, evt, &event, &rs);
put_device(event.dev);
@@ -2803,6 +2811,7 @@ int arm_smmu_attach_prepare(struct arm_smmu_attach_state *state,
struct arm_smmu_domain *smmu_domain =
to_smmu_domain_devices(new_domain);
unsigned long flags;
+ int ret;
/*
* arm_smmu_share_asid() must not see two domains pointing to the same
@@ -2832,9 +2841,18 @@ int arm_smmu_attach_prepare(struct arm_smmu_attach_state *state,
}
if (smmu_domain) {
+ if (new_domain->type == IOMMU_DOMAIN_NESTED) {
+ ret = arm_smmu_attach_prepare_vmaster(
+ state, to_smmu_nested_domain(new_domain));
+ if (ret)
+ return ret;
+ }
+
master_domain = kzalloc(sizeof(*master_domain), GFP_KERNEL);
- if (!master_domain)
+ if (!master_domain) {
+ kfree(state->vmaster);
return -ENOMEM;
+ }
master_domain->master = master;
master_domain->ssid = state->ssid;
if (new_domain->type == IOMMU_DOMAIN_NESTED)
@@ -2861,6 +2879,7 @@ int arm_smmu_attach_prepare(struct arm_smmu_attach_state *state,
spin_unlock_irqrestore(&smmu_domain->devices_lock,
flags);
kfree(master_domain);
+ kfree(state->vmaster);
return -EINVAL;
}
@@ -2893,6 +2912,8 @@ void arm_smmu_attach_commit(struct arm_smmu_attach_state *state)
lockdep_assert_held(&arm_smmu_asid_lock);
+ arm_smmu_attach_commit_vmaster(state);
+
if (state->ats_enabled && !master->ats_enabled) {
arm_smmu_enable_ats(master);
} else if (state->ats_enabled && master->ats_enabled) {
@@ -3162,6 +3183,7 @@ static int arm_smmu_attach_dev_identity(struct iommu_domain *domain,
struct arm_smmu_ste ste;
struct arm_smmu_master *master = dev_iommu_priv_get(dev);
+ arm_smmu_master_clear_vmaster(master);
arm_smmu_make_bypass_ste(master->smmu, &ste);
arm_smmu_attach_dev_ste(domain, dev, &ste, STRTAB_STE_1_S1DSS_BYPASS);
return 0;
@@ -3180,7 +3202,9 @@ static int arm_smmu_attach_dev_blocked(struct iommu_domain *domain,
struct device *dev)
{
struct arm_smmu_ste ste;
+ struct arm_smmu_master *master = dev_iommu_priv_get(dev);
+ arm_smmu_master_clear_vmaster(master);
arm_smmu_make_abort_ste(&ste);
arm_smmu_attach_dev_ste(domain, dev, &ste,
STRTAB_STE_1_S1DSS_TERMINATE);
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
index bd9d7c85576a..dd1ad56ce863 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
@@ -266,6 +266,7 @@ static inline u32 arm_smmu_strtab_l2_idx(u32 sid)
#define STRTAB_STE_1_S1COR GENMASK_ULL(5, 4)
#define STRTAB_STE_1_S1CSH GENMASK_ULL(7, 6)
+#define STRTAB_STE_1_MEV (1UL << 19)
#define STRTAB_STE_1_S2FWB (1UL << 25)
#define STRTAB_STE_1_S1STALLD (1UL << 27)
@@ -799,6 +800,11 @@ struct arm_smmu_stream {
struct rb_node node;
};
+struct arm_smmu_vmaster {
+ struct arm_vsmmu *vsmmu;
+ unsigned long vsid;
+};
+
struct arm_smmu_event {
u8 stall : 1,
ssv : 1,
@@ -824,6 +830,7 @@ struct arm_smmu_master {
struct arm_smmu_device *smmu;
struct device *dev;
struct arm_smmu_stream *streams;
+ struct arm_smmu_vmaster *vmaster; /* use smmu->streams_mutex */
/* Locked by the iommu core using the group mutex */
struct arm_smmu_ctx_desc_cfg cd_table;
unsigned int num_streams;
@@ -972,6 +979,7 @@ struct arm_smmu_attach_state {
bool disable_ats;
ioasid_t ssid;
/* Resulting state */
+ struct arm_smmu_vmaster *vmaster;
bool ats_enabled;
};
@@ -1055,9 +1063,37 @@ struct iommufd_viommu *arm_vsmmu_alloc(struct device *dev,
struct iommu_domain *parent,
struct iommufd_ctx *ictx,
unsigned int viommu_type);
+int arm_smmu_attach_prepare_vmaster(struct arm_smmu_attach_state *state,
+ struct arm_smmu_nested_domain *nested_domain);
+void arm_smmu_attach_commit_vmaster(struct arm_smmu_attach_state *state);
+void arm_smmu_master_clear_vmaster(struct arm_smmu_master *master);
+int arm_vmaster_report_event(struct arm_smmu_vmaster *vmaster, u64 *evt);
#else
#define arm_smmu_hw_info NULL
#define arm_vsmmu_alloc NULL
+
+static inline int
+arm_smmu_attach_prepare_vmaster(struct arm_smmu_attach_state *state,
+ struct arm_smmu_nested_domain *nested_domain)
+{
+ return 0;
+}
+
+static inline void
+arm_smmu_attach_commit_vmaster(struct arm_smmu_attach_state *state)
+{
+}
+
+static inline void
+arm_smmu_master_clear_vmaster(struct arm_smmu_master *master)
+{
+}
+
+static inline int arm_vmaster_report_event(struct arm_smmu_vmaster *vmaster,
+ u64 *evt)
+{
+ return -EOPNOTSUPP;
+}
#endif /* CONFIG_ARM_SMMU_V3_IOMMUFD */
#endif /* _ARM_SMMU_V3_H */
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 0832998eca38..6054d0ab8023 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -42,11 +42,6 @@ struct iommu_dma_msi_page {
phys_addr_t phys;
};
-enum iommu_dma_cookie_type {
- IOMMU_DMA_IOVA_COOKIE,
- IOMMU_DMA_MSI_COOKIE,
-};
-
enum iommu_dma_queue_type {
IOMMU_DMA_OPTS_PER_CPU_QUEUE,
IOMMU_DMA_OPTS_SINGLE_QUEUE,
@@ -59,34 +54,30 @@ struct iommu_dma_options {
};
struct iommu_dma_cookie {
- enum iommu_dma_cookie_type type;
+ struct iova_domain iovad;
+ struct list_head msi_page_list;
+ /* Flush queue */
union {
- /* Full allocator for IOMMU_DMA_IOVA_COOKIE */
- struct {
- struct iova_domain iovad;
- /* Flush queue */
- union {
- struct iova_fq *single_fq;
- struct iova_fq __percpu *percpu_fq;
- };
- /* Number of TLB flushes that have been started */
- atomic64_t fq_flush_start_cnt;
- /* Number of TLB flushes that have been finished */
- atomic64_t fq_flush_finish_cnt;
- /* Timer to regularily empty the flush queues */
- struct timer_list fq_timer;
- /* 1 when timer is active, 0 when not */
- atomic_t fq_timer_on;
- };
- /* Trivial linear page allocator for IOMMU_DMA_MSI_COOKIE */
- dma_addr_t msi_iova;
+ struct iova_fq *single_fq;
+ struct iova_fq __percpu *percpu_fq;
};
- struct list_head msi_page_list;
-
+ /* Number of TLB flushes that have been started */
+ atomic64_t fq_flush_start_cnt;
+ /* Number of TLB flushes that have been finished */
+ atomic64_t fq_flush_finish_cnt;
+ /* Timer to regularily empty the flush queues */
+ struct timer_list fq_timer;
+ /* 1 when timer is active, 0 when not */
+ atomic_t fq_timer_on;
/* Domain for flush queue callback; NULL if flush queue not in use */
- struct iommu_domain *fq_domain;
+ struct iommu_domain *fq_domain;
/* Options for dma-iommu use */
- struct iommu_dma_options options;
+ struct iommu_dma_options options;
+};
+
+struct iommu_dma_msi_cookie {
+ dma_addr_t msi_iova;
+ struct list_head msi_page_list;
};
static DEFINE_STATIC_KEY_FALSE(iommu_deferred_attach_enabled);
@@ -102,9 +93,6 @@ static int __init iommu_dma_forcedac_setup(char *str)
}
early_param("iommu.forcedac", iommu_dma_forcedac_setup);
-static int iommu_dma_sw_msi(struct iommu_domain *domain, struct msi_desc *desc,
- phys_addr_t msi_addr);
-
/* Number of entries per flush queue */
#define IOVA_DEFAULT_FQ_SIZE 256
#define IOVA_SINGLE_FQ_SIZE 32768
@@ -368,39 +356,24 @@ int iommu_dma_init_fq(struct iommu_domain *domain)
return 0;
}
-static inline size_t cookie_msi_granule(struct iommu_dma_cookie *cookie)
-{
- if (cookie->type == IOMMU_DMA_IOVA_COOKIE)
- return cookie->iovad.granule;
- return PAGE_SIZE;
-}
-
-static struct iommu_dma_cookie *cookie_alloc(enum iommu_dma_cookie_type type)
-{
- struct iommu_dma_cookie *cookie;
-
- cookie = kzalloc(sizeof(*cookie), GFP_KERNEL);
- if (cookie) {
- INIT_LIST_HEAD(&cookie->msi_page_list);
- cookie->type = type;
- }
- return cookie;
-}
-
/**
* iommu_get_dma_cookie - Acquire DMA-API resources for a domain
* @domain: IOMMU domain to prepare for DMA-API usage
*/
int iommu_get_dma_cookie(struct iommu_domain *domain)
{
- if (domain->iova_cookie)
+ struct iommu_dma_cookie *cookie;
+
+ if (domain->cookie_type != IOMMU_COOKIE_NONE)
return -EEXIST;
- domain->iova_cookie = cookie_alloc(IOMMU_DMA_IOVA_COOKIE);
- if (!domain->iova_cookie)
+ cookie = kzalloc(sizeof(*cookie), GFP_KERNEL);
+ if (!cookie)
return -ENOMEM;
- iommu_domain_set_sw_msi(domain, iommu_dma_sw_msi);
+ INIT_LIST_HEAD(&cookie->msi_page_list);
+ domain->cookie_type = IOMMU_COOKIE_DMA_IOVA;
+ domain->iova_cookie = cookie;
return 0;
}
@@ -418,54 +391,56 @@ int iommu_get_dma_cookie(struct iommu_domain *domain)
*/
int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base)
{
- struct iommu_dma_cookie *cookie;
+ struct iommu_dma_msi_cookie *cookie;
if (domain->type != IOMMU_DOMAIN_UNMANAGED)
return -EINVAL;
- if (domain->iova_cookie)
+ if (domain->cookie_type != IOMMU_COOKIE_NONE)
return -EEXIST;
- cookie = cookie_alloc(IOMMU_DMA_MSI_COOKIE);
+ cookie = kzalloc(sizeof(*cookie), GFP_KERNEL);
if (!cookie)
return -ENOMEM;
cookie->msi_iova = base;
- domain->iova_cookie = cookie;
- iommu_domain_set_sw_msi(domain, iommu_dma_sw_msi);
+ INIT_LIST_HEAD(&cookie->msi_page_list);
+ domain->cookie_type = IOMMU_COOKIE_DMA_MSI;
+ domain->msi_cookie = cookie;
return 0;
}
EXPORT_SYMBOL(iommu_get_msi_cookie);
/**
* iommu_put_dma_cookie - Release a domain's DMA mapping resources
- * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() or
- * iommu_get_msi_cookie()
+ * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie()
*/
void iommu_put_dma_cookie(struct iommu_domain *domain)
{
struct iommu_dma_cookie *cookie = domain->iova_cookie;
struct iommu_dma_msi_page *msi, *tmp;
-#if IS_ENABLED(CONFIG_IRQ_MSI_IOMMU)
- if (domain->sw_msi != iommu_dma_sw_msi)
- return;
-#endif
-
- if (!cookie)
- return;
-
- if (cookie->type == IOMMU_DMA_IOVA_COOKIE && cookie->iovad.granule) {
+ if (cookie->iovad.granule) {
iommu_dma_free_fq(cookie);
put_iova_domain(&cookie->iovad);
}
+ list_for_each_entry_safe(msi, tmp, &cookie->msi_page_list, list)
+ kfree(msi);
+ kfree(cookie);
+}
+
+/**
+ * iommu_put_msi_cookie - Release a domain's MSI mapping resources
+ * @domain: IOMMU domain previously prepared by iommu_get_msi_cookie()
+ */
+void iommu_put_msi_cookie(struct iommu_domain *domain)
+{
+ struct iommu_dma_msi_cookie *cookie = domain->msi_cookie;
+ struct iommu_dma_msi_page *msi, *tmp;
- list_for_each_entry_safe(msi, tmp, &cookie->msi_page_list, list) {
- list_del(&msi->list);
+ list_for_each_entry_safe(msi, tmp, &cookie->msi_page_list, list)
kfree(msi);
- }
kfree(cookie);
- domain->iova_cookie = NULL;
}
/**
@@ -685,7 +660,7 @@ static int iommu_dma_init_domain(struct iommu_domain *domain, struct device *dev
struct iova_domain *iovad;
int ret;
- if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE)
+ if (!cookie || domain->cookie_type != IOMMU_COOKIE_DMA_IOVA)
return -EINVAL;
iovad = &cookie->iovad;
@@ -768,9 +743,9 @@ static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain,
struct iova_domain *iovad = &cookie->iovad;
unsigned long shift, iova_len, iova;
- if (cookie->type == IOMMU_DMA_MSI_COOKIE) {
- cookie->msi_iova += size;
- return cookie->msi_iova - size;
+ if (domain->cookie_type == IOMMU_COOKIE_DMA_MSI) {
+ domain->msi_cookie->msi_iova += size;
+ return domain->msi_cookie->msi_iova - size;
}
shift = iova_shift(iovad);
@@ -807,16 +782,16 @@ done:
return (dma_addr_t)iova << shift;
}
-static void iommu_dma_free_iova(struct iommu_dma_cookie *cookie,
- dma_addr_t iova, size_t size, struct iommu_iotlb_gather *gather)
+static void iommu_dma_free_iova(struct iommu_domain *domain, dma_addr_t iova,
+ size_t size, struct iommu_iotlb_gather *gather)
{
- struct iova_domain *iovad = &cookie->iovad;
+ struct iova_domain *iovad = &domain->iova_cookie->iovad;
/* The MSI case is only ever cleaning up its most recent allocation */
- if (cookie->type == IOMMU_DMA_MSI_COOKIE)
- cookie->msi_iova -= size;
+ if (domain->cookie_type == IOMMU_COOKIE_DMA_MSI)
+ domain->msi_cookie->msi_iova -= size;
else if (gather && gather->queued)
- queue_iova(cookie, iova_pfn(iovad, iova),
+ queue_iova(domain->iova_cookie, iova_pfn(iovad, iova),
size >> iova_shift(iovad),
&gather->freelist);
else
@@ -844,7 +819,7 @@ static void __iommu_dma_unmap(struct device *dev, dma_addr_t dma_addr,
if (!iotlb_gather.queued)
iommu_iotlb_sync(domain, &iotlb_gather);
- iommu_dma_free_iova(cookie, dma_addr, size, &iotlb_gather);
+ iommu_dma_free_iova(domain, dma_addr, size, &iotlb_gather);
}
static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
@@ -872,7 +847,7 @@ static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
return DMA_MAPPING_ERROR;
if (iommu_map(domain, iova, phys - iova_off, size, prot, GFP_ATOMIC)) {
- iommu_dma_free_iova(cookie, iova, size, NULL);
+ iommu_dma_free_iova(domain, iova, size, NULL);
return DMA_MAPPING_ERROR;
}
return iova + iova_off;
@@ -1009,7 +984,7 @@ static struct page **__iommu_dma_alloc_noncontiguous(struct device *dev,
out_free_sg:
sg_free_table(sgt);
out_free_iova:
- iommu_dma_free_iova(cookie, iova, size, NULL);
+ iommu_dma_free_iova(domain, iova, size, NULL);
out_free_pages:
__iommu_dma_free_pages(pages, count);
return NULL;
@@ -1486,7 +1461,7 @@ int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
return __finalise_sg(dev, sg, nents, iova);
out_free_iova:
- iommu_dma_free_iova(cookie, iova, iova_len, NULL);
+ iommu_dma_free_iova(domain, iova, iova_len, NULL);
out_restore_sg:
__invalidate_sg(sg, nents);
out:
@@ -1764,17 +1739,47 @@ out_err:
dev->dma_iommu = false;
}
+static bool has_msi_cookie(const struct iommu_domain *domain)
+{
+ return domain && (domain->cookie_type == IOMMU_COOKIE_DMA_IOVA ||
+ domain->cookie_type == IOMMU_COOKIE_DMA_MSI);
+}
+
+static size_t cookie_msi_granule(const struct iommu_domain *domain)
+{
+ switch (domain->cookie_type) {
+ case IOMMU_COOKIE_DMA_IOVA:
+ return domain->iova_cookie->iovad.granule;
+ case IOMMU_COOKIE_DMA_MSI:
+ return PAGE_SIZE;
+ default:
+ BUG();
+ };
+}
+
+static struct list_head *cookie_msi_pages(const struct iommu_domain *domain)
+{
+ switch (domain->cookie_type) {
+ case IOMMU_COOKIE_DMA_IOVA:
+ return &domain->iova_cookie->msi_page_list;
+ case IOMMU_COOKIE_DMA_MSI:
+ return &domain->msi_cookie->msi_page_list;
+ default:
+ BUG();
+ };
+}
+
static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
phys_addr_t msi_addr, struct iommu_domain *domain)
{
- struct iommu_dma_cookie *cookie = domain->iova_cookie;
+ struct list_head *msi_page_list = cookie_msi_pages(domain);
struct iommu_dma_msi_page *msi_page;
dma_addr_t iova;
int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
- size_t size = cookie_msi_granule(cookie);
+ size_t size = cookie_msi_granule(domain);
msi_addr &= ~(phys_addr_t)(size - 1);
- list_for_each_entry(msi_page, &cookie->msi_page_list, list)
+ list_for_each_entry(msi_page, msi_page_list, list)
if (msi_page->phys == msi_addr)
return msi_page;
@@ -1792,23 +1797,23 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
INIT_LIST_HEAD(&msi_page->list);
msi_page->phys = msi_addr;
msi_page->iova = iova;
- list_add(&msi_page->list, &cookie->msi_page_list);
+ list_add(&msi_page->list, msi_page_list);
return msi_page;
out_free_iova:
- iommu_dma_free_iova(cookie, iova, size, NULL);
+ iommu_dma_free_iova(domain, iova, size, NULL);
out_free_page:
kfree(msi_page);
return NULL;
}
-static int iommu_dma_sw_msi(struct iommu_domain *domain, struct msi_desc *desc,
- phys_addr_t msi_addr)
+int iommu_dma_sw_msi(struct iommu_domain *domain, struct msi_desc *desc,
+ phys_addr_t msi_addr)
{
struct device *dev = msi_desc_to_dev(desc);
const struct iommu_dma_msi_page *msi_page;
- if (!domain->iova_cookie) {
+ if (!has_msi_cookie(domain)) {
msi_desc_set_iommu_msi_iova(desc, 0, 0);
return 0;
}
@@ -1818,9 +1823,8 @@ static int iommu_dma_sw_msi(struct iommu_domain *domain, struct msi_desc *desc,
if (!msi_page)
return -ENOMEM;
- msi_desc_set_iommu_msi_iova(
- desc, msi_page->iova,
- ilog2(cookie_msi_granule(domain->iova_cookie)));
+ msi_desc_set_iommu_msi_iova(desc, msi_page->iova,
+ ilog2(cookie_msi_granule(domain)));
return 0;
}
diff --git a/drivers/iommu/dma-iommu.h b/drivers/iommu/dma-iommu.h
index c12d63457c76..eca201c1f963 100644
--- a/drivers/iommu/dma-iommu.h
+++ b/drivers/iommu/dma-iommu.h
@@ -13,11 +13,15 @@ void iommu_setup_dma_ops(struct device *dev);
int iommu_get_dma_cookie(struct iommu_domain *domain);
void iommu_put_dma_cookie(struct iommu_domain *domain);
+void iommu_put_msi_cookie(struct iommu_domain *domain);
int iommu_dma_init_fq(struct iommu_domain *domain);
void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list);
+int iommu_dma_sw_msi(struct iommu_domain *domain, struct msi_desc *desc,
+ phys_addr_t msi_addr);
+
extern bool iommu_dma_forcedac;
#else /* CONFIG_IOMMU_DMA */
@@ -40,9 +44,19 @@ static inline void iommu_put_dma_cookie(struct iommu_domain *domain)
{
}
+static inline void iommu_put_msi_cookie(struct iommu_domain *domain)
+{
+}
+
static inline void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list)
{
}
+static inline int iommu_dma_sw_msi(struct iommu_domain *domain,
+ struct msi_desc *desc, phys_addr_t msi_addr)
+{
+ return -ENODEV;
+}
+
#endif /* CONFIG_IOMMU_DMA */
#endif /* __DMA_IOMMU_H */
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index ec2f385ae25b..6e67cc66a204 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -3383,7 +3383,8 @@ intel_iommu_domain_alloc_paging_flags(struct device *dev, u32 flags,
bool first_stage;
if (flags &
- (~(IOMMU_HWPT_ALLOC_NEST_PARENT | IOMMU_HWPT_ALLOC_DIRTY_TRACKING)))
+ (~(IOMMU_HWPT_ALLOC_NEST_PARENT | IOMMU_HWPT_ALLOC_DIRTY_TRACKING |
+ IOMMU_HWPT_ALLOC_PASID)))
return ERR_PTR(-EOPNOTSUPP);
if (nested_parent && !nested_supported(iommu))
return ERR_PTR(-EOPNOTSUPP);
diff --git a/drivers/iommu/intel/nested.c b/drivers/iommu/intel/nested.c
index aba92c00b427..6ac5c534bef4 100644
--- a/drivers/iommu/intel/nested.c
+++ b/drivers/iommu/intel/nested.c
@@ -198,7 +198,7 @@ intel_iommu_domain_alloc_nested(struct device *dev, struct iommu_domain *parent,
struct dmar_domain *domain;
int ret;
- if (!nested_supported(iommu) || flags)
+ if (!nested_supported(iommu) || flags & ~IOMMU_HWPT_ALLOC_PASID)
return ERR_PTR(-EOPNOTSUPP);
/* Must be nested domain */
diff --git a/drivers/iommu/iommu-priv.h b/drivers/iommu/iommu-priv.h
index 05fa6e682e88..e236b932e766 100644
--- a/drivers/iommu/iommu-priv.h
+++ b/drivers/iommu/iommu-priv.h
@@ -5,6 +5,7 @@
#define __LINUX_IOMMU_PRIV_H
#include <linux/iommu.h>
+#include <linux/msi.h>
static inline const struct iommu_ops *dev_iommu_ops(struct device *dev)
{
@@ -47,4 +48,19 @@ void iommu_detach_group_handle(struct iommu_domain *domain,
int iommu_replace_group_handle(struct iommu_group *group,
struct iommu_domain *new_domain,
struct iommu_attach_handle *handle);
+
+#if IS_ENABLED(CONFIG_IOMMUFD_DRIVER_CORE) && IS_ENABLED(CONFIG_IRQ_MSI_IOMMU)
+int iommufd_sw_msi(struct iommu_domain *domain, struct msi_desc *desc,
+ phys_addr_t msi_addr);
+#else /* !CONFIG_IOMMUFD_DRIVER_CORE || !CONFIG_IRQ_MSI_IOMMU */
+static inline int iommufd_sw_msi(struct iommu_domain *domain,
+ struct msi_desc *desc, phys_addr_t msi_addr)
+{
+ return -EOPNOTSUPP;
+}
+#endif /* CONFIG_IOMMUFD_DRIVER_CORE && CONFIG_IRQ_MSI_IOMMU */
+
+int iommu_replace_device_pasid(struct iommu_domain *domain,
+ struct device *dev, ioasid_t pasid,
+ struct iommu_attach_handle *handle);
#endif /* __LINUX_IOMMU_PRIV_H */
diff --git a/drivers/iommu/iommu-sva.c b/drivers/iommu/iommu-sva.c
index 503c5d23c1ea..ab18bc494eef 100644
--- a/drivers/iommu/iommu-sva.c
+++ b/drivers/iommu/iommu-sva.c
@@ -310,6 +310,7 @@ static struct iommu_domain *iommu_sva_domain_alloc(struct device *dev,
}
domain->type = IOMMU_DOMAIN_SVA;
+ domain->cookie_type = IOMMU_COOKIE_SVA;
mmgrab(mm);
domain->mm = mm;
domain->owner = ops;
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 9e1b444246f8..c8033ca66377 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -18,6 +18,7 @@
#include <linux/errno.h>
#include <linux/host1x_context_bus.h>
#include <linux/iommu.h>
+#include <linux/iommufd.h>
#include <linux/idr.h>
#include <linux/err.h>
#include <linux/pci.h>
@@ -539,6 +540,13 @@ static void iommu_deinit_device(struct device *dev)
dev_iommu_free(dev);
}
+static struct iommu_domain *pasid_array_entry_to_domain(void *entry)
+{
+ if (xa_pointer_tag(entry) == IOMMU_PASID_ARRAY_DOMAIN)
+ return xa_untag_pointer(entry);
+ return ((struct iommu_attach_handle *)xa_untag_pointer(entry))->domain;
+}
+
DEFINE_MUTEX(iommu_probe_device_lock);
static int __iommu_probe_device(struct device *dev, struct list_head *group_list)
@@ -1973,8 +1981,10 @@ void iommu_set_fault_handler(struct iommu_domain *domain,
iommu_fault_handler_t handler,
void *token)
{
- BUG_ON(!domain);
+ if (WARN_ON(!domain || domain->cookie_type != IOMMU_COOKIE_NONE))
+ return;
+ domain->cookie_type = IOMMU_COOKIE_FAULT_HANDLER;
domain->handler = handler;
domain->handler_token = token;
}
@@ -2044,9 +2054,19 @@ EXPORT_SYMBOL_GPL(iommu_paging_domain_alloc_flags);
void iommu_domain_free(struct iommu_domain *domain)
{
- if (domain->type == IOMMU_DOMAIN_SVA)
+ switch (domain->cookie_type) {
+ case IOMMU_COOKIE_DMA_IOVA:
+ iommu_put_dma_cookie(domain);
+ break;
+ case IOMMU_COOKIE_DMA_MSI:
+ iommu_put_msi_cookie(domain);
+ break;
+ case IOMMU_COOKIE_SVA:
mmdrop(domain->mm);
- iommu_put_dma_cookie(domain);
+ break;
+ default:
+ break;
+ }
if (domain->ops->free)
domain->ops->free(domain);
}
@@ -3335,14 +3355,15 @@ static void iommu_remove_dev_pasid(struct device *dev, ioasid_t pasid,
}
static int __iommu_set_group_pasid(struct iommu_domain *domain,
- struct iommu_group *group, ioasid_t pasid)
+ struct iommu_group *group, ioasid_t pasid,
+ struct iommu_domain *old)
{
struct group_device *device, *last_gdev;
int ret;
for_each_group_device(group, device) {
ret = domain->ops->set_dev_pasid(domain, device->dev,
- pasid, NULL);
+ pasid, old);
if (ret)
goto err_revert;
}
@@ -3354,7 +3375,15 @@ err_revert:
for_each_group_device(group, device) {
if (device == last_gdev)
break;
- iommu_remove_dev_pasid(device->dev, pasid, domain);
+ /*
+ * If no old domain, undo the succeeded devices/pasid.
+ * Otherwise, rollback the succeeded devices/pasid to the old
+ * domain. And it is a driver bug to fail attaching with a
+ * previously good domain.
+ */
+ if (!old || WARN_ON(old->ops->set_dev_pasid(old, device->dev,
+ pasid, domain)))
+ iommu_remove_dev_pasid(device->dev, pasid, domain);
}
return ret;
}
@@ -3376,6 +3405,9 @@ static void __iommu_remove_group_pasid(struct iommu_group *group,
* @pasid: the pasid of the device.
* @handle: the attach handle.
*
+ * Caller should always provide a new handle to avoid race with the paths
+ * that have lockless reference to handle if it intends to pass a valid handle.
+ *
* Return: 0 on success, or an error.
*/
int iommu_attach_device_pasid(struct iommu_domain *domain,
@@ -3420,7 +3452,7 @@ int iommu_attach_device_pasid(struct iommu_domain *domain,
if (ret)
goto out_unlock;
- ret = __iommu_set_group_pasid(domain, group, pasid);
+ ret = __iommu_set_group_pasid(domain, group, pasid, NULL);
if (ret) {
xa_release(&group->pasid_array, pasid);
goto out_unlock;
@@ -3441,6 +3473,97 @@ out_unlock:
}
EXPORT_SYMBOL_GPL(iommu_attach_device_pasid);
+/**
+ * iommu_replace_device_pasid - Replace the domain that a specific pasid
+ * of the device is attached to
+ * @domain: the new iommu domain
+ * @dev: the attached device.
+ * @pasid: the pasid of the device.
+ * @handle: the attach handle.
+ *
+ * This API allows the pasid to switch domains. The @pasid should have been
+ * attached. Otherwise, this fails. The pasid will keep the old configuration
+ * if replacement failed.
+ *
+ * Caller should always provide a new handle to avoid race with the paths
+ * that have lockless reference to handle if it intends to pass a valid handle.
+ *
+ * Return 0 on success, or an error.
+ */
+int iommu_replace_device_pasid(struct iommu_domain *domain,
+ struct device *dev, ioasid_t pasid,
+ struct iommu_attach_handle *handle)
+{
+ /* Caller must be a probed driver on dev */
+ struct iommu_group *group = dev->iommu_group;
+ struct iommu_attach_handle *entry;
+ struct iommu_domain *curr_domain;
+ void *curr;
+ int ret;
+
+ if (!group)
+ return -ENODEV;
+
+ if (!domain->ops->set_dev_pasid)
+ return -EOPNOTSUPP;
+
+ if (dev_iommu_ops(dev) != domain->owner ||
+ pasid == IOMMU_NO_PASID || !handle)
+ return -EINVAL;
+
+ mutex_lock(&group->mutex);
+ entry = iommu_make_pasid_array_entry(domain, handle);
+ curr = xa_cmpxchg(&group->pasid_array, pasid, NULL,
+ XA_ZERO_ENTRY, GFP_KERNEL);
+ if (xa_is_err(curr)) {
+ ret = xa_err(curr);
+ goto out_unlock;
+ }
+
+ /*
+ * No domain (with or without handle) attached, hence not
+ * a replace case.
+ */
+ if (!curr) {
+ xa_release(&group->pasid_array, pasid);
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ /*
+ * Reusing handle is problematic as there are paths that refers
+ * the handle without lock. To avoid race, reject the callers that
+ * attempt it.
+ */
+ if (curr == entry) {
+ WARN_ON(1);
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ curr_domain = pasid_array_entry_to_domain(curr);
+ ret = 0;
+
+ if (curr_domain != domain) {
+ ret = __iommu_set_group_pasid(domain, group,
+ pasid, curr_domain);
+ if (ret)
+ goto out_unlock;
+ }
+
+ /*
+ * The above xa_cmpxchg() reserved the memory, and the
+ * group->mutex is held, this cannot fail.
+ */
+ WARN_ON(xa_is_err(xa_store(&group->pasid_array,
+ pasid, entry, GFP_KERNEL)));
+
+out_unlock:
+ mutex_unlock(&group->mutex);
+ return ret;
+}
+EXPORT_SYMBOL_NS_GPL(iommu_replace_device_pasid, "IOMMUFD_INTERNAL");
+
/*
* iommu_detach_device_pasid() - Detach the domain from pasid of device
* @domain: the iommu domain.
@@ -3536,6 +3659,9 @@ EXPORT_SYMBOL_NS_GPL(iommu_attach_handle_get, "IOMMUFD_INTERNAL");
* This is a variant of iommu_attach_group(). It allows the caller to provide
* an attach handle and use it when the domain is attached. This is currently
* used by IOMMUFD to deliver the I/O page faults.
+ *
+ * Caller should always provide a new handle to avoid race with the paths
+ * that have lockless reference to handle.
*/
int iommu_attach_group_handle(struct iommu_domain *domain,
struct iommu_group *group,
@@ -3605,6 +3731,9 @@ EXPORT_SYMBOL_NS_GPL(iommu_detach_group_handle, "IOMMUFD_INTERNAL");
*
* If the currently attached domain is a core domain (e.g. a default_domain),
* it will act just like the iommu_attach_group_handle().
+ *
+ * Caller should always provide a new handle to avoid race with the paths
+ * that have lockless reference to handle.
*/
int iommu_replace_group_handle(struct iommu_group *group,
struct iommu_domain *new_domain,
@@ -3662,8 +3791,21 @@ int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr)
return 0;
mutex_lock(&group->mutex);
- if (group->domain && group->domain->sw_msi)
- ret = group->domain->sw_msi(group->domain, desc, msi_addr);
+ /* An IDENTITY domain must pass through */
+ if (group->domain && group->domain->type != IOMMU_DOMAIN_IDENTITY) {
+ switch (group->domain->cookie_type) {
+ case IOMMU_COOKIE_DMA_MSI:
+ case IOMMU_COOKIE_DMA_IOVA:
+ ret = iommu_dma_sw_msi(group->domain, desc, msi_addr);
+ break;
+ case IOMMU_COOKIE_IOMMUFD:
+ ret = iommufd_sw_msi(group->domain, desc, msi_addr);
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ break;
+ }
+ }
mutex_unlock(&group->mutex);
return ret;
}
diff --git a/drivers/iommu/iommufd/Kconfig b/drivers/iommu/iommufd/Kconfig
index 0a07f9449fd9..2beeb4f60ee5 100644
--- a/drivers/iommu/iommufd/Kconfig
+++ b/drivers/iommu/iommufd/Kconfig
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
config IOMMUFD_DRIVER_CORE
- tristate
+ bool
default (IOMMUFD_DRIVER || IOMMUFD) if IOMMUFD!=n
config IOMMUFD
diff --git a/drivers/iommu/iommufd/Makefile b/drivers/iommu/iommufd/Makefile
index cb784da6cddc..71d692c9a8f4 100644
--- a/drivers/iommu/iommufd/Makefile
+++ b/drivers/iommu/iommufd/Makefile
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
iommufd-y := \
device.o \
- fault.o \
+ eventq.o \
hw_pagetable.o \
io_pagetable.o \
ioas.o \
diff --git a/drivers/iommu/iommufd/device.c b/drivers/iommu/iommufd/device.c
index 4e107f69f951..2111bad72c72 100644
--- a/drivers/iommu/iommufd/device.c
+++ b/drivers/iommu/iommufd/device.c
@@ -3,9 +3,9 @@
*/
#include <linux/iommu.h>
#include <linux/iommufd.h>
+#include <linux/pci-ats.h>
#include <linux/slab.h>
#include <uapi/linux/iommufd.h>
-#include <linux/msi.h>
#include "../iommu-priv.h"
#include "io_pagetable.h"
@@ -18,12 +18,17 @@ MODULE_PARM_DESC(
"Allow IOMMUFD to bind to devices even if the platform cannot isolate "
"the MSI interrupt window. Enabling this is a security weakness.");
+struct iommufd_attach {
+ struct iommufd_hw_pagetable *hwpt;
+ struct xarray device_array;
+};
+
static void iommufd_group_release(struct kref *kref)
{
struct iommufd_group *igroup =
container_of(kref, struct iommufd_group, ref);
- WARN_ON(igroup->hwpt || !list_empty(&igroup->device_list));
+ WARN_ON(!xa_empty(&igroup->pasid_attach));
xa_cmpxchg(&igroup->ictx->groups, iommu_group_id(igroup->group), igroup,
NULL, GFP_KERNEL);
@@ -90,7 +95,7 @@ static struct iommufd_group *iommufd_get_group(struct iommufd_ctx *ictx,
kref_init(&new_igroup->ref);
mutex_init(&new_igroup->lock);
- INIT_LIST_HEAD(&new_igroup->device_list);
+ xa_init(&new_igroup->pasid_attach);
new_igroup->sw_msi_start = PHYS_ADDR_MAX;
/* group reference moves into new_igroup */
new_igroup->group = group;
@@ -294,129 +299,24 @@ u32 iommufd_device_to_id(struct iommufd_device *idev)
}
EXPORT_SYMBOL_NS_GPL(iommufd_device_to_id, "IOMMUFD");
-/*
- * Get a iommufd_sw_msi_map for the msi physical address requested by the irq
- * layer. The mapping to IOVA is global to the iommufd file descriptor, every
- * domain that is attached to a device using the same MSI parameters will use
- * the same IOVA.
- */
-static __maybe_unused struct iommufd_sw_msi_map *
-iommufd_sw_msi_get_map(struct iommufd_ctx *ictx, phys_addr_t msi_addr,
- phys_addr_t sw_msi_start)
-{
- struct iommufd_sw_msi_map *cur;
- unsigned int max_pgoff = 0;
-
- lockdep_assert_held(&ictx->sw_msi_lock);
-
- list_for_each_entry(cur, &ictx->sw_msi_list, sw_msi_item) {
- if (cur->sw_msi_start != sw_msi_start)
- continue;
- max_pgoff = max(max_pgoff, cur->pgoff + 1);
- if (cur->msi_addr == msi_addr)
- return cur;
- }
-
- if (ictx->sw_msi_id >=
- BITS_PER_BYTE * sizeof_field(struct iommufd_sw_msi_maps, bitmap))
- return ERR_PTR(-EOVERFLOW);
-
- cur = kzalloc(sizeof(*cur), GFP_KERNEL);
- if (!cur)
- return ERR_PTR(-ENOMEM);
-
- cur->sw_msi_start = sw_msi_start;
- cur->msi_addr = msi_addr;
- cur->pgoff = max_pgoff;
- cur->id = ictx->sw_msi_id++;
- list_add_tail(&cur->sw_msi_item, &ictx->sw_msi_list);
- return cur;
-}
-
-static int iommufd_sw_msi_install(struct iommufd_ctx *ictx,
- struct iommufd_hwpt_paging *hwpt_paging,
- struct iommufd_sw_msi_map *msi_map)
+static unsigned int iommufd_group_device_num(struct iommufd_group *igroup,
+ ioasid_t pasid)
{
- unsigned long iova;
-
- lockdep_assert_held(&ictx->sw_msi_lock);
+ struct iommufd_attach *attach;
+ struct iommufd_device *idev;
+ unsigned int count = 0;
+ unsigned long index;
- iova = msi_map->sw_msi_start + msi_map->pgoff * PAGE_SIZE;
- if (!test_bit(msi_map->id, hwpt_paging->present_sw_msi.bitmap)) {
- int rc;
+ lockdep_assert_held(&igroup->lock);
- rc = iommu_map(hwpt_paging->common.domain, iova,
- msi_map->msi_addr, PAGE_SIZE,
- IOMMU_WRITE | IOMMU_READ | IOMMU_MMIO,
- GFP_KERNEL_ACCOUNT);
- if (rc)
- return rc;
- __set_bit(msi_map->id, hwpt_paging->present_sw_msi.bitmap);
- }
- return 0;
+ attach = xa_load(&igroup->pasid_attach, pasid);
+ if (attach)
+ xa_for_each(&attach->device_array, index, idev)
+ count++;
+ return count;
}
-/*
- * Called by the irq code if the platform translates the MSI address through the
- * IOMMU. msi_addr is the physical address of the MSI page. iommufd will
- * allocate a fd global iova for the physical page that is the same on all
- * domains and devices.
- */
#ifdef CONFIG_IRQ_MSI_IOMMU
-int iommufd_sw_msi(struct iommu_domain *domain, struct msi_desc *desc,
- phys_addr_t msi_addr)
-{
- struct device *dev = msi_desc_to_dev(desc);
- struct iommufd_hwpt_paging *hwpt_paging;
- struct iommu_attach_handle *raw_handle;
- struct iommufd_attach_handle *handle;
- struct iommufd_sw_msi_map *msi_map;
- struct iommufd_ctx *ictx;
- unsigned long iova;
- int rc;
-
- /*
- * It is safe to call iommu_attach_handle_get() here because the iommu
- * core code invokes this under the group mutex which also prevents any
- * change of the attach handle for the duration of this function.
- */
- iommu_group_mutex_assert(dev);
-
- raw_handle =
- iommu_attach_handle_get(dev->iommu_group, IOMMU_NO_PASID, 0);
- if (IS_ERR(raw_handle))
- return 0;
- hwpt_paging = find_hwpt_paging(domain->iommufd_hwpt);
-
- handle = to_iommufd_handle(raw_handle);
- /* No IOMMU_RESV_SW_MSI means no change to the msi_msg */
- if (handle->idev->igroup->sw_msi_start == PHYS_ADDR_MAX)
- return 0;
-
- ictx = handle->idev->ictx;
- guard(mutex)(&ictx->sw_msi_lock);
- /*
- * The input msi_addr is the exact byte offset of the MSI doorbell, we
- * assume the caller has checked that it is contained with a MMIO region
- * that is secure to map at PAGE_SIZE.
- */
- msi_map = iommufd_sw_msi_get_map(handle->idev->ictx,
- msi_addr & PAGE_MASK,
- handle->idev->igroup->sw_msi_start);
- if (IS_ERR(msi_map))
- return PTR_ERR(msi_map);
-
- rc = iommufd_sw_msi_install(ictx, hwpt_paging, msi_map);
- if (rc)
- return rc;
- __set_bit(msi_map->id, handle->idev->igroup->required_sw_msi.bitmap);
-
- iova = msi_map->sw_msi_start + msi_map->pgoff * PAGE_SIZE;
- msi_desc_set_iommu_msi_iova(desc, iova, PAGE_SHIFT);
- return 0;
-}
-#endif
-
static int iommufd_group_setup_msi(struct iommufd_group *igroup,
struct iommufd_hwpt_paging *hwpt_paging)
{
@@ -443,23 +343,39 @@ static int iommufd_group_setup_msi(struct iommufd_group *igroup,
}
return 0;
}
+#else
+static inline int
+iommufd_group_setup_msi(struct iommufd_group *igroup,
+ struct iommufd_hwpt_paging *hwpt_paging)
+{
+ return 0;
+}
+#endif
+
+static bool
+iommufd_group_first_attach(struct iommufd_group *igroup, ioasid_t pasid)
+{
+ lockdep_assert_held(&igroup->lock);
+ return !xa_load(&igroup->pasid_attach, pasid);
+}
static int
iommufd_device_attach_reserved_iova(struct iommufd_device *idev,
struct iommufd_hwpt_paging *hwpt_paging)
{
+ struct iommufd_group *igroup = idev->igroup;
int rc;
- lockdep_assert_held(&idev->igroup->lock);
+ lockdep_assert_held(&igroup->lock);
rc = iopt_table_enforce_dev_resv_regions(&hwpt_paging->ioas->iopt,
idev->dev,
- &idev->igroup->sw_msi_start);
+ &igroup->sw_msi_start);
if (rc)
return rc;
- if (list_empty(&idev->igroup->device_list)) {
- rc = iommufd_group_setup_msi(idev->igroup, hwpt_paging);
+ if (iommufd_group_first_attach(igroup, IOMMU_NO_PASID)) {
+ rc = iommufd_group_setup_msi(igroup, hwpt_paging);
if (rc) {
iopt_remove_reserved_iova(&hwpt_paging->ioas->iopt,
idev->dev);
@@ -471,13 +387,54 @@ iommufd_device_attach_reserved_iova(struct iommufd_device *idev,
/* The device attach/detach/replace helpers for attach_handle */
+static bool iommufd_device_is_attached(struct iommufd_device *idev,
+ ioasid_t pasid)
+{
+ struct iommufd_attach *attach;
+
+ attach = xa_load(&idev->igroup->pasid_attach, pasid);
+ return xa_load(&attach->device_array, idev->obj.id);
+}
+
+static int iommufd_hwpt_pasid_compat(struct iommufd_hw_pagetable *hwpt,
+ struct iommufd_device *idev,
+ ioasid_t pasid)
+{
+ struct iommufd_group *igroup = idev->igroup;
+
+ lockdep_assert_held(&igroup->lock);
+
+ if (pasid == IOMMU_NO_PASID) {
+ unsigned long start = IOMMU_NO_PASID;
+
+ if (!hwpt->pasid_compat &&
+ xa_find_after(&igroup->pasid_attach,
+ &start, UINT_MAX, XA_PRESENT))
+ return -EINVAL;
+ } else {
+ struct iommufd_attach *attach;
+
+ if (!hwpt->pasid_compat)
+ return -EINVAL;
+
+ attach = xa_load(&igroup->pasid_attach, IOMMU_NO_PASID);
+ if (attach && attach->hwpt && !attach->hwpt->pasid_compat)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int iommufd_hwpt_attach_device(struct iommufd_hw_pagetable *hwpt,
- struct iommufd_device *idev)
+ struct iommufd_device *idev,
+ ioasid_t pasid)
{
struct iommufd_attach_handle *handle;
int rc;
- lockdep_assert_held(&idev->igroup->lock);
+ rc = iommufd_hwpt_pasid_compat(hwpt, idev, pasid);
+ if (rc)
+ return rc;
handle = kzalloc(sizeof(*handle), GFP_KERNEL);
if (!handle)
@@ -490,8 +447,12 @@ static int iommufd_hwpt_attach_device(struct iommufd_hw_pagetable *hwpt,
}
handle->idev = idev;
- rc = iommu_attach_group_handle(hwpt->domain, idev->igroup->group,
- &handle->handle);
+ if (pasid == IOMMU_NO_PASID)
+ rc = iommu_attach_group_handle(hwpt->domain, idev->igroup->group,
+ &handle->handle);
+ else
+ rc = iommu_attach_device_pasid(hwpt->domain, idev->dev, pasid,
+ &handle->handle);
if (rc)
goto out_disable_iopf;
@@ -506,26 +467,31 @@ out_free_handle:
}
static struct iommufd_attach_handle *
-iommufd_device_get_attach_handle(struct iommufd_device *idev)
+iommufd_device_get_attach_handle(struct iommufd_device *idev, ioasid_t pasid)
{
struct iommu_attach_handle *handle;
lockdep_assert_held(&idev->igroup->lock);
handle =
- iommu_attach_handle_get(idev->igroup->group, IOMMU_NO_PASID, 0);
+ iommu_attach_handle_get(idev->igroup->group, pasid, 0);
if (IS_ERR(handle))
return NULL;
return to_iommufd_handle(handle);
}
static void iommufd_hwpt_detach_device(struct iommufd_hw_pagetable *hwpt,
- struct iommufd_device *idev)
+ struct iommufd_device *idev,
+ ioasid_t pasid)
{
struct iommufd_attach_handle *handle;
- handle = iommufd_device_get_attach_handle(idev);
- iommu_detach_group_handle(hwpt->domain, idev->igroup->group);
+ handle = iommufd_device_get_attach_handle(idev, pasid);
+ if (pasid == IOMMU_NO_PASID)
+ iommu_detach_group_handle(hwpt->domain, idev->igroup->group);
+ else
+ iommu_detach_device_pasid(hwpt->domain, idev->dev, pasid);
+
if (hwpt->fault) {
iommufd_auto_response_faults(hwpt, handle);
iommufd_fault_iopf_disable(idev);
@@ -534,13 +500,19 @@ static void iommufd_hwpt_detach_device(struct iommufd_hw_pagetable *hwpt,
}
static int iommufd_hwpt_replace_device(struct iommufd_device *idev,
+ ioasid_t pasid,
struct iommufd_hw_pagetable *hwpt,
struct iommufd_hw_pagetable *old)
{
- struct iommufd_attach_handle *handle, *old_handle =
- iommufd_device_get_attach_handle(idev);
+ struct iommufd_attach_handle *handle, *old_handle;
int rc;
+ rc = iommufd_hwpt_pasid_compat(hwpt, idev, pasid);
+ if (rc)
+ return rc;
+
+ old_handle = iommufd_device_get_attach_handle(idev, pasid);
+
handle = kzalloc(sizeof(*handle), GFP_KERNEL);
if (!handle)
return -ENOMEM;
@@ -552,8 +524,12 @@ static int iommufd_hwpt_replace_device(struct iommufd_device *idev,
}
handle->idev = idev;
- rc = iommu_replace_group_handle(idev->igroup->group, hwpt->domain,
- &handle->handle);
+ if (pasid == IOMMU_NO_PASID)
+ rc = iommu_replace_group_handle(idev->igroup->group,
+ hwpt->domain, &handle->handle);
+ else
+ rc = iommu_replace_device_pasid(hwpt->domain, idev->dev,
+ pasid, &handle->handle);
if (rc)
goto out_disable_iopf;
@@ -575,22 +551,51 @@ out_free_handle:
}
int iommufd_hw_pagetable_attach(struct iommufd_hw_pagetable *hwpt,
- struct iommufd_device *idev)
+ struct iommufd_device *idev, ioasid_t pasid)
{
struct iommufd_hwpt_paging *hwpt_paging = find_hwpt_paging(hwpt);
+ bool attach_resv = hwpt_paging && pasid == IOMMU_NO_PASID;
+ struct iommufd_group *igroup = idev->igroup;
+ struct iommufd_hw_pagetable *old_hwpt;
+ struct iommufd_attach *attach;
int rc;
- mutex_lock(&idev->igroup->lock);
+ mutex_lock(&igroup->lock);
- if (idev->igroup->hwpt != NULL && idev->igroup->hwpt != hwpt) {
- rc = -EINVAL;
+ attach = xa_cmpxchg(&igroup->pasid_attach, pasid, NULL,
+ XA_ZERO_ENTRY, GFP_KERNEL);
+ if (xa_is_err(attach)) {
+ rc = xa_err(attach);
goto err_unlock;
}
- if (hwpt_paging) {
+ if (!attach) {
+ attach = kzalloc(sizeof(*attach), GFP_KERNEL);
+ if (!attach) {
+ rc = -ENOMEM;
+ goto err_release_pasid;
+ }
+ xa_init(&attach->device_array);
+ }
+
+ old_hwpt = attach->hwpt;
+
+ rc = xa_insert(&attach->device_array, idev->obj.id, XA_ZERO_ENTRY,
+ GFP_KERNEL);
+ if (rc) {
+ WARN_ON(rc == -EBUSY && !old_hwpt);
+ goto err_free_attach;
+ }
+
+ if (old_hwpt && old_hwpt != hwpt) {
+ rc = -EINVAL;
+ goto err_release_devid;
+ }
+
+ if (attach_resv) {
rc = iommufd_device_attach_reserved_iova(idev, hwpt_paging);
if (rc)
- goto err_unlock;
+ goto err_release_devid;
}
/*
@@ -600,51 +605,74 @@ int iommufd_hw_pagetable_attach(struct iommufd_hw_pagetable *hwpt,
* reserved regions are only updated during individual device
* attachment.
*/
- if (list_empty(&idev->igroup->device_list)) {
- rc = iommufd_hwpt_attach_device(hwpt, idev);
+ if (iommufd_group_first_attach(igroup, pasid)) {
+ rc = iommufd_hwpt_attach_device(hwpt, idev, pasid);
if (rc)
goto err_unresv;
- idev->igroup->hwpt = hwpt;
+ attach->hwpt = hwpt;
+ WARN_ON(xa_is_err(xa_store(&igroup->pasid_attach, pasid, attach,
+ GFP_KERNEL)));
}
refcount_inc(&hwpt->obj.users);
- list_add_tail(&idev->group_item, &idev->igroup->device_list);
- mutex_unlock(&idev->igroup->lock);
+ WARN_ON(xa_is_err(xa_store(&attach->device_array, idev->obj.id,
+ idev, GFP_KERNEL)));
+ mutex_unlock(&igroup->lock);
return 0;
err_unresv:
- if (hwpt_paging)
+ if (attach_resv)
iopt_remove_reserved_iova(&hwpt_paging->ioas->iopt, idev->dev);
+err_release_devid:
+ xa_release(&attach->device_array, idev->obj.id);
+err_free_attach:
+ if (iommufd_group_first_attach(igroup, pasid))
+ kfree(attach);
+err_release_pasid:
+ if (iommufd_group_first_attach(igroup, pasid))
+ xa_release(&igroup->pasid_attach, pasid);
err_unlock:
- mutex_unlock(&idev->igroup->lock);
+ mutex_unlock(&igroup->lock);
return rc;
}
struct iommufd_hw_pagetable *
-iommufd_hw_pagetable_detach(struct iommufd_device *idev)
+iommufd_hw_pagetable_detach(struct iommufd_device *idev, ioasid_t pasid)
{
- struct iommufd_hw_pagetable *hwpt = idev->igroup->hwpt;
- struct iommufd_hwpt_paging *hwpt_paging = find_hwpt_paging(hwpt);
+ struct iommufd_group *igroup = idev->igroup;
+ struct iommufd_hwpt_paging *hwpt_paging;
+ struct iommufd_hw_pagetable *hwpt;
+ struct iommufd_attach *attach;
+
+ mutex_lock(&igroup->lock);
+ attach = xa_load(&igroup->pasid_attach, pasid);
+ if (!attach) {
+ mutex_unlock(&igroup->lock);
+ return NULL;
+ }
- mutex_lock(&idev->igroup->lock);
- list_del(&idev->group_item);
- if (list_empty(&idev->igroup->device_list)) {
- iommufd_hwpt_detach_device(hwpt, idev);
- idev->igroup->hwpt = NULL;
+ hwpt = attach->hwpt;
+ hwpt_paging = find_hwpt_paging(hwpt);
+
+ xa_erase(&attach->device_array, idev->obj.id);
+ if (xa_empty(&attach->device_array)) {
+ iommufd_hwpt_detach_device(hwpt, idev, pasid);
+ xa_erase(&igroup->pasid_attach, pasid);
+ kfree(attach);
}
- if (hwpt_paging)
+ if (hwpt_paging && pasid == IOMMU_NO_PASID)
iopt_remove_reserved_iova(&hwpt_paging->ioas->iopt, idev->dev);
- mutex_unlock(&idev->igroup->lock);
+ mutex_unlock(&igroup->lock);
/* Caller must destroy hwpt */
return hwpt;
}
static struct iommufd_hw_pagetable *
-iommufd_device_do_attach(struct iommufd_device *idev,
+iommufd_device_do_attach(struct iommufd_device *idev, ioasid_t pasid,
struct iommufd_hw_pagetable *hwpt)
{
int rc;
- rc = iommufd_hw_pagetable_attach(hwpt, idev);
+ rc = iommufd_hw_pagetable_attach(hwpt, idev, pasid);
if (rc)
return ERR_PTR(rc);
return NULL;
@@ -654,11 +682,14 @@ static void
iommufd_group_remove_reserved_iova(struct iommufd_group *igroup,
struct iommufd_hwpt_paging *hwpt_paging)
{
+ struct iommufd_attach *attach;
struct iommufd_device *cur;
+ unsigned long index;
lockdep_assert_held(&igroup->lock);
- list_for_each_entry(cur, &igroup->device_list, group_item)
+ attach = xa_load(&igroup->pasid_attach, IOMMU_NO_PASID);
+ xa_for_each(&attach->device_array, index, cur)
iopt_remove_reserved_iova(&hwpt_paging->ioas->iopt, cur->dev);
}
@@ -667,14 +698,17 @@ iommufd_group_do_replace_reserved_iova(struct iommufd_group *igroup,
struct iommufd_hwpt_paging *hwpt_paging)
{
struct iommufd_hwpt_paging *old_hwpt_paging;
+ struct iommufd_attach *attach;
struct iommufd_device *cur;
+ unsigned long index;
int rc;
lockdep_assert_held(&igroup->lock);
- old_hwpt_paging = find_hwpt_paging(igroup->hwpt);
+ attach = xa_load(&igroup->pasid_attach, IOMMU_NO_PASID);
+ old_hwpt_paging = find_hwpt_paging(attach->hwpt);
if (!old_hwpt_paging || hwpt_paging->ioas != old_hwpt_paging->ioas) {
- list_for_each_entry(cur, &igroup->device_list, group_item) {
+ xa_for_each(&attach->device_array, index, cur) {
rc = iopt_table_enforce_dev_resv_regions(
&hwpt_paging->ioas->iopt, cur->dev, NULL);
if (rc)
@@ -693,69 +727,81 @@ err_unresv:
}
static struct iommufd_hw_pagetable *
-iommufd_device_do_replace(struct iommufd_device *idev,
+iommufd_device_do_replace(struct iommufd_device *idev, ioasid_t pasid,
struct iommufd_hw_pagetable *hwpt)
{
struct iommufd_hwpt_paging *hwpt_paging = find_hwpt_paging(hwpt);
+ bool attach_resv = hwpt_paging && pasid == IOMMU_NO_PASID;
struct iommufd_hwpt_paging *old_hwpt_paging;
struct iommufd_group *igroup = idev->igroup;
struct iommufd_hw_pagetable *old_hwpt;
+ struct iommufd_attach *attach;
unsigned int num_devices;
int rc;
- mutex_lock(&idev->igroup->lock);
+ mutex_lock(&igroup->lock);
+
+ attach = xa_load(&igroup->pasid_attach, pasid);
+ if (!attach) {
+ rc = -EINVAL;
+ goto err_unlock;
+ }
+
+ old_hwpt = attach->hwpt;
- if (igroup->hwpt == NULL) {
+ WARN_ON(!old_hwpt || xa_empty(&attach->device_array));
+
+ if (!iommufd_device_is_attached(idev, pasid)) {
rc = -EINVAL;
goto err_unlock;
}
- if (hwpt == igroup->hwpt) {
- mutex_unlock(&idev->igroup->lock);
+ if (hwpt == old_hwpt) {
+ mutex_unlock(&igroup->lock);
return NULL;
}
- old_hwpt = igroup->hwpt;
- if (hwpt_paging) {
+ if (attach_resv) {
rc = iommufd_group_do_replace_reserved_iova(igroup, hwpt_paging);
if (rc)
goto err_unlock;
}
- rc = iommufd_hwpt_replace_device(idev, hwpt, old_hwpt);
+ rc = iommufd_hwpt_replace_device(idev, pasid, hwpt, old_hwpt);
if (rc)
goto err_unresv;
old_hwpt_paging = find_hwpt_paging(old_hwpt);
- if (old_hwpt_paging &&
+ if (old_hwpt_paging && pasid == IOMMU_NO_PASID &&
(!hwpt_paging || hwpt_paging->ioas != old_hwpt_paging->ioas))
iommufd_group_remove_reserved_iova(igroup, old_hwpt_paging);
- igroup->hwpt = hwpt;
+ attach->hwpt = hwpt;
- num_devices = list_count_nodes(&igroup->device_list);
+ num_devices = iommufd_group_device_num(igroup, pasid);
/*
- * Move the refcounts held by the device_list to the new hwpt. Retain a
+ * Move the refcounts held by the device_array to the new hwpt. Retain a
* refcount for this thread as the caller will free it.
*/
refcount_add(num_devices, &hwpt->obj.users);
if (num_devices > 1)
WARN_ON(refcount_sub_and_test(num_devices - 1,
&old_hwpt->obj.users));
- mutex_unlock(&idev->igroup->lock);
+ mutex_unlock(&igroup->lock);
/* Caller must destroy old_hwpt */
return old_hwpt;
err_unresv:
- if (hwpt_paging)
+ if (attach_resv)
iommufd_group_remove_reserved_iova(igroup, hwpt_paging);
err_unlock:
- mutex_unlock(&idev->igroup->lock);
+ mutex_unlock(&igroup->lock);
return ERR_PTR(rc);
}
typedef struct iommufd_hw_pagetable *(*attach_fn)(
- struct iommufd_device *idev, struct iommufd_hw_pagetable *hwpt);
+ struct iommufd_device *idev, ioasid_t pasid,
+ struct iommufd_hw_pagetable *hwpt);
/*
* When automatically managing the domains we search for a compatible domain in
@@ -763,7 +809,7 @@ typedef struct iommufd_hw_pagetable *(*attach_fn)(
* Automatic domain selection will never pick a manually created domain.
*/
static struct iommufd_hw_pagetable *
-iommufd_device_auto_get_domain(struct iommufd_device *idev,
+iommufd_device_auto_get_domain(struct iommufd_device *idev, ioasid_t pasid,
struct iommufd_ioas *ioas, u32 *pt_id,
attach_fn do_attach)
{
@@ -792,7 +838,7 @@ iommufd_device_auto_get_domain(struct iommufd_device *idev,
hwpt = &hwpt_paging->common;
if (!iommufd_lock_obj(&hwpt->obj))
continue;
- destroy_hwpt = (*do_attach)(idev, hwpt);
+ destroy_hwpt = (*do_attach)(idev, pasid, hwpt);
if (IS_ERR(destroy_hwpt)) {
iommufd_put_object(idev->ictx, &hwpt->obj);
/*
@@ -810,8 +856,8 @@ iommufd_device_auto_get_domain(struct iommufd_device *idev,
goto out_unlock;
}
- hwpt_paging = iommufd_hwpt_paging_alloc(idev->ictx, ioas, idev, 0,
- immediate_attach, NULL);
+ hwpt_paging = iommufd_hwpt_paging_alloc(idev->ictx, ioas, idev, pasid,
+ 0, immediate_attach, NULL);
if (IS_ERR(hwpt_paging)) {
destroy_hwpt = ERR_CAST(hwpt_paging);
goto out_unlock;
@@ -819,7 +865,7 @@ iommufd_device_auto_get_domain(struct iommufd_device *idev,
hwpt = &hwpt_paging->common;
if (!immediate_attach) {
- destroy_hwpt = (*do_attach)(idev, hwpt);
+ destroy_hwpt = (*do_attach)(idev, pasid, hwpt);
if (IS_ERR(destroy_hwpt))
goto out_abort;
} else {
@@ -840,8 +886,9 @@ out_unlock:
return destroy_hwpt;
}
-static int iommufd_device_change_pt(struct iommufd_device *idev, u32 *pt_id,
- attach_fn do_attach)
+static int iommufd_device_change_pt(struct iommufd_device *idev,
+ ioasid_t pasid,
+ u32 *pt_id, attach_fn do_attach)
{
struct iommufd_hw_pagetable *destroy_hwpt;
struct iommufd_object *pt_obj;
@@ -856,7 +903,7 @@ static int iommufd_device_change_pt(struct iommufd_device *idev, u32 *pt_id,
struct iommufd_hw_pagetable *hwpt =
container_of(pt_obj, struct iommufd_hw_pagetable, obj);
- destroy_hwpt = (*do_attach)(idev, hwpt);
+ destroy_hwpt = (*do_attach)(idev, pasid, hwpt);
if (IS_ERR(destroy_hwpt))
goto out_put_pt_obj;
break;
@@ -865,8 +912,8 @@ static int iommufd_device_change_pt(struct iommufd_device *idev, u32 *pt_id,
struct iommufd_ioas *ioas =
container_of(pt_obj, struct iommufd_ioas, obj);
- destroy_hwpt = iommufd_device_auto_get_domain(idev, ioas, pt_id,
- do_attach);
+ destroy_hwpt = iommufd_device_auto_get_domain(idev, pasid, ioas,
+ pt_id, do_attach);
if (IS_ERR(destroy_hwpt))
goto out_put_pt_obj;
break;
@@ -888,22 +935,26 @@ out_put_pt_obj:
}
/**
- * iommufd_device_attach - Connect a device to an iommu_domain
+ * iommufd_device_attach - Connect a device/pasid to an iommu_domain
* @idev: device to attach
+ * @pasid: pasid to attach
* @pt_id: Input a IOMMUFD_OBJ_IOAS, or IOMMUFD_OBJ_HWPT_PAGING
* Output the IOMMUFD_OBJ_HWPT_PAGING ID
*
- * This connects the device to an iommu_domain, either automatically or manually
- * selected. Once this completes the device could do DMA.
+ * This connects the device/pasid to an iommu_domain, either automatically
+ * or manually selected. Once this completes the device could do DMA with
+ * @pasid. @pasid is IOMMU_NO_PASID if this attach is for no pasid usage.
*
* The caller should return the resulting pt_id back to userspace.
* This function is undone by calling iommufd_device_detach().
*/
-int iommufd_device_attach(struct iommufd_device *idev, u32 *pt_id)
+int iommufd_device_attach(struct iommufd_device *idev, ioasid_t pasid,
+ u32 *pt_id)
{
int rc;
- rc = iommufd_device_change_pt(idev, pt_id, &iommufd_device_do_attach);
+ rc = iommufd_device_change_pt(idev, pasid, pt_id,
+ &iommufd_device_do_attach);
if (rc)
return rc;
@@ -917,8 +968,9 @@ int iommufd_device_attach(struct iommufd_device *idev, u32 *pt_id)
EXPORT_SYMBOL_NS_GPL(iommufd_device_attach, "IOMMUFD");
/**
- * iommufd_device_replace - Change the device's iommu_domain
+ * iommufd_device_replace - Change the device/pasid's iommu_domain
* @idev: device to change
+ * @pasid: pasid to change
* @pt_id: Input a IOMMUFD_OBJ_IOAS, or IOMMUFD_OBJ_HWPT_PAGING
* Output the IOMMUFD_OBJ_HWPT_PAGING ID
*
@@ -929,27 +981,33 @@ EXPORT_SYMBOL_NS_GPL(iommufd_device_attach, "IOMMUFD");
*
* If it fails then no change is made to the attachment. The iommu driver may
* implement this so there is no disruption in translation. This can only be
- * called if iommufd_device_attach() has already succeeded.
+ * called if iommufd_device_attach() has already succeeded. @pasid is
+ * IOMMU_NO_PASID for no pasid usage.
*/
-int iommufd_device_replace(struct iommufd_device *idev, u32 *pt_id)
+int iommufd_device_replace(struct iommufd_device *idev, ioasid_t pasid,
+ u32 *pt_id)
{
- return iommufd_device_change_pt(idev, pt_id,
+ return iommufd_device_change_pt(idev, pasid, pt_id,
&iommufd_device_do_replace);
}
EXPORT_SYMBOL_NS_GPL(iommufd_device_replace, "IOMMUFD");
/**
- * iommufd_device_detach - Disconnect a device to an iommu_domain
+ * iommufd_device_detach - Disconnect a device/device to an iommu_domain
* @idev: device to detach
+ * @pasid: pasid to detach
*
* Undo iommufd_device_attach(). This disconnects the idev from the previously
* attached pt_id. The device returns back to a blocked DMA translation.
+ * @pasid is IOMMU_NO_PASID for no pasid usage.
*/
-void iommufd_device_detach(struct iommufd_device *idev)
+void iommufd_device_detach(struct iommufd_device *idev, ioasid_t pasid)
{
struct iommufd_hw_pagetable *hwpt;
- hwpt = iommufd_hw_pagetable_detach(idev);
+ hwpt = iommufd_hw_pagetable_detach(idev, pasid);
+ if (!hwpt)
+ return;
iommufd_hw_pagetable_put(idev->ictx, hwpt);
refcount_dec(&idev->obj.users);
}
@@ -1349,7 +1407,7 @@ int iommufd_access_rw(struct iommufd_access *access, unsigned long iova,
struct io_pagetable *iopt;
struct iopt_area *area;
unsigned long last_iova;
- int rc;
+ int rc = -EINVAL;
if (!length)
return -EINVAL;
@@ -1405,7 +1463,8 @@ int iommufd_get_hw_info(struct iommufd_ucmd *ucmd)
void *data;
int rc;
- if (cmd->flags || cmd->__reserved)
+ if (cmd->flags || cmd->__reserved[0] || cmd->__reserved[1] ||
+ cmd->__reserved[2])
return -EOPNOTSUPP;
idev = iommufd_get_device(ucmd, cmd->dev_id);
@@ -1462,6 +1521,36 @@ int iommufd_get_hw_info(struct iommufd_ucmd *ucmd)
if (device_iommu_capable(idev->dev, IOMMU_CAP_DIRTY_TRACKING))
cmd->out_capabilities |= IOMMU_HW_CAP_DIRTY_TRACKING;
+ cmd->out_max_pasid_log2 = 0;
+ /*
+ * Currently, all iommu drivers enable PASID in the probe_device()
+ * op if iommu and device supports it. So the max_pasids stored in
+ * dev->iommu indicates both PASID support and enable status. A
+ * non-zero dev->iommu->max_pasids means PASID is supported and
+ * enabled. The iommufd only reports PASID capability to userspace
+ * if it's enabled.
+ */
+ if (idev->dev->iommu->max_pasids) {
+ cmd->out_max_pasid_log2 = ilog2(idev->dev->iommu->max_pasids);
+
+ if (dev_is_pci(idev->dev)) {
+ struct pci_dev *pdev = to_pci_dev(idev->dev);
+ int ctrl;
+
+ ctrl = pci_pasid_status(pdev);
+
+ WARN_ON_ONCE(ctrl < 0 ||
+ !(ctrl & PCI_PASID_CTRL_ENABLE));
+
+ if (ctrl & PCI_PASID_CTRL_EXEC)
+ cmd->out_capabilities |=
+ IOMMU_HW_CAP_PCI_PASID_EXEC;
+ if (ctrl & PCI_PASID_CTRL_PRIV)
+ cmd->out_capabilities |=
+ IOMMU_HW_CAP_PCI_PASID_PRIV;
+ }
+ }
+
rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
out_free:
kfree(data);
diff --git a/drivers/iommu/iommufd/driver.c b/drivers/iommu/iommufd/driver.c
index 2d98b04ff1cb..922cd1fe7ec2 100644
--- a/drivers/iommu/iommufd/driver.c
+++ b/drivers/iommu/iommufd/driver.c
@@ -49,5 +49,203 @@ struct device *iommufd_viommu_find_dev(struct iommufd_viommu *viommu,
}
EXPORT_SYMBOL_NS_GPL(iommufd_viommu_find_dev, "IOMMUFD");
+/* Return -ENOENT if device is not associated to the vIOMMU */
+int iommufd_viommu_get_vdev_id(struct iommufd_viommu *viommu,
+ struct device *dev, unsigned long *vdev_id)
+{
+ struct iommufd_vdevice *vdev;
+ unsigned long index;
+ int rc = -ENOENT;
+
+ if (WARN_ON_ONCE(!vdev_id))
+ return -EINVAL;
+
+ xa_lock(&viommu->vdevs);
+ xa_for_each(&viommu->vdevs, index, vdev) {
+ if (vdev->dev == dev) {
+ *vdev_id = vdev->id;
+ rc = 0;
+ break;
+ }
+ }
+ xa_unlock(&viommu->vdevs);
+ return rc;
+}
+EXPORT_SYMBOL_NS_GPL(iommufd_viommu_get_vdev_id, "IOMMUFD");
+
+/*
+ * Typically called in driver's threaded IRQ handler.
+ * The @type and @event_data must be defined in include/uapi/linux/iommufd.h
+ */
+int iommufd_viommu_report_event(struct iommufd_viommu *viommu,
+ enum iommu_veventq_type type, void *event_data,
+ size_t data_len)
+{
+ struct iommufd_veventq *veventq;
+ struct iommufd_vevent *vevent;
+ int rc = 0;
+
+ if (WARN_ON_ONCE(!data_len || !event_data))
+ return -EINVAL;
+
+ down_read(&viommu->veventqs_rwsem);
+
+ veventq = iommufd_viommu_find_veventq(viommu, type);
+ if (!veventq) {
+ rc = -EOPNOTSUPP;
+ goto out_unlock_veventqs;
+ }
+
+ spin_lock(&veventq->common.lock);
+ if (veventq->num_events == veventq->depth) {
+ vevent = &veventq->lost_events_header;
+ goto out_set_header;
+ }
+
+ vevent = kzalloc(struct_size(vevent, event_data, data_len), GFP_ATOMIC);
+ if (!vevent) {
+ rc = -ENOMEM;
+ vevent = &veventq->lost_events_header;
+ goto out_set_header;
+ }
+ memcpy(vevent->event_data, event_data, data_len);
+ vevent->data_len = data_len;
+ veventq->num_events++;
+
+out_set_header:
+ iommufd_vevent_handler(veventq, vevent);
+ spin_unlock(&veventq->common.lock);
+out_unlock_veventqs:
+ up_read(&viommu->veventqs_rwsem);
+ return rc;
+}
+EXPORT_SYMBOL_NS_GPL(iommufd_viommu_report_event, "IOMMUFD");
+
+#ifdef CONFIG_IRQ_MSI_IOMMU
+/*
+ * Get a iommufd_sw_msi_map for the msi physical address requested by the irq
+ * layer. The mapping to IOVA is global to the iommufd file descriptor, every
+ * domain that is attached to a device using the same MSI parameters will use
+ * the same IOVA.
+ */
+static struct iommufd_sw_msi_map *
+iommufd_sw_msi_get_map(struct iommufd_ctx *ictx, phys_addr_t msi_addr,
+ phys_addr_t sw_msi_start)
+{
+ struct iommufd_sw_msi_map *cur;
+ unsigned int max_pgoff = 0;
+
+ lockdep_assert_held(&ictx->sw_msi_lock);
+
+ list_for_each_entry(cur, &ictx->sw_msi_list, sw_msi_item) {
+ if (cur->sw_msi_start != sw_msi_start)
+ continue;
+ max_pgoff = max(max_pgoff, cur->pgoff + 1);
+ if (cur->msi_addr == msi_addr)
+ return cur;
+ }
+
+ if (ictx->sw_msi_id >=
+ BITS_PER_BYTE * sizeof_field(struct iommufd_sw_msi_maps, bitmap))
+ return ERR_PTR(-EOVERFLOW);
+
+ cur = kzalloc(sizeof(*cur), GFP_KERNEL);
+ if (!cur)
+ return ERR_PTR(-ENOMEM);
+
+ cur->sw_msi_start = sw_msi_start;
+ cur->msi_addr = msi_addr;
+ cur->pgoff = max_pgoff;
+ cur->id = ictx->sw_msi_id++;
+ list_add_tail(&cur->sw_msi_item, &ictx->sw_msi_list);
+ return cur;
+}
+
+int iommufd_sw_msi_install(struct iommufd_ctx *ictx,
+ struct iommufd_hwpt_paging *hwpt_paging,
+ struct iommufd_sw_msi_map *msi_map)
+{
+ unsigned long iova;
+
+ lockdep_assert_held(&ictx->sw_msi_lock);
+
+ iova = msi_map->sw_msi_start + msi_map->pgoff * PAGE_SIZE;
+ if (!test_bit(msi_map->id, hwpt_paging->present_sw_msi.bitmap)) {
+ int rc;
+
+ rc = iommu_map(hwpt_paging->common.domain, iova,
+ msi_map->msi_addr, PAGE_SIZE,
+ IOMMU_WRITE | IOMMU_READ | IOMMU_MMIO,
+ GFP_KERNEL_ACCOUNT);
+ if (rc)
+ return rc;
+ __set_bit(msi_map->id, hwpt_paging->present_sw_msi.bitmap);
+ }
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(iommufd_sw_msi_install, "IOMMUFD_INTERNAL");
+
+/*
+ * Called by the irq code if the platform translates the MSI address through the
+ * IOMMU. msi_addr is the physical address of the MSI page. iommufd will
+ * allocate a fd global iova for the physical page that is the same on all
+ * domains and devices.
+ */
+int iommufd_sw_msi(struct iommu_domain *domain, struct msi_desc *desc,
+ phys_addr_t msi_addr)
+{
+ struct device *dev = msi_desc_to_dev(desc);
+ struct iommufd_hwpt_paging *hwpt_paging;
+ struct iommu_attach_handle *raw_handle;
+ struct iommufd_attach_handle *handle;
+ struct iommufd_sw_msi_map *msi_map;
+ struct iommufd_ctx *ictx;
+ unsigned long iova;
+ int rc;
+
+ /*
+ * It is safe to call iommu_attach_handle_get() here because the iommu
+ * core code invokes this under the group mutex which also prevents any
+ * change of the attach handle for the duration of this function.
+ */
+ iommu_group_mutex_assert(dev);
+
+ raw_handle =
+ iommu_attach_handle_get(dev->iommu_group, IOMMU_NO_PASID, 0);
+ if (IS_ERR(raw_handle))
+ return 0;
+ hwpt_paging = find_hwpt_paging(domain->iommufd_hwpt);
+
+ handle = to_iommufd_handle(raw_handle);
+ /* No IOMMU_RESV_SW_MSI means no change to the msi_msg */
+ if (handle->idev->igroup->sw_msi_start == PHYS_ADDR_MAX)
+ return 0;
+
+ ictx = handle->idev->ictx;
+ guard(mutex)(&ictx->sw_msi_lock);
+ /*
+ * The input msi_addr is the exact byte offset of the MSI doorbell, we
+ * assume the caller has checked that it is contained with a MMIO region
+ * that is secure to map at PAGE_SIZE.
+ */
+ msi_map = iommufd_sw_msi_get_map(handle->idev->ictx,
+ msi_addr & PAGE_MASK,
+ handle->idev->igroup->sw_msi_start);
+ if (IS_ERR(msi_map))
+ return PTR_ERR(msi_map);
+
+ rc = iommufd_sw_msi_install(ictx, hwpt_paging, msi_map);
+ if (rc)
+ return rc;
+ __set_bit(msi_map->id, handle->idev->igroup->required_sw_msi.bitmap);
+
+ iova = msi_map->sw_msi_start + msi_map->pgoff * PAGE_SIZE;
+ msi_desc_set_iommu_msi_iova(desc, iova, PAGE_SHIFT);
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(iommufd_sw_msi, "IOMMUFD");
+#endif
+
MODULE_DESCRIPTION("iommufd code shared with builtin modules");
+MODULE_IMPORT_NS("IOMMUFD_INTERNAL");
MODULE_LICENSE("GPL");
diff --git a/drivers/iommu/iommufd/eventq.c b/drivers/iommu/iommufd/eventq.c
new file mode 100644
index 000000000000..f39cf0797347
--- /dev/null
+++ b/drivers/iommu/iommufd/eventq.c
@@ -0,0 +1,598 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2024 Intel Corporation
+ */
+#define pr_fmt(fmt) "iommufd: " fmt
+
+#include <linux/anon_inodes.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/iommufd.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/pci-ats.h>
+#include <linux/poll.h>
+#include <uapi/linux/iommufd.h>
+
+#include "../iommu-priv.h"
+#include "iommufd_private.h"
+
+/* IOMMUFD_OBJ_FAULT Functions */
+
+int iommufd_fault_iopf_enable(struct iommufd_device *idev)
+{
+ struct device *dev = idev->dev;
+ int ret;
+
+ /*
+ * Once we turn on PCI/PRI support for VF, the response failure code
+ * should not be forwarded to the hardware due to PRI being a shared
+ * resource between PF and VFs. There is no coordination for this
+ * shared capability. This waits for a vPRI reset to recover.
+ */
+ if (dev_is_pci(dev)) {
+ struct pci_dev *pdev = to_pci_dev(dev);
+
+ if (pdev->is_virtfn && pci_pri_supported(pdev))
+ return -EINVAL;
+ }
+
+ mutex_lock(&idev->iopf_lock);
+ /* Device iopf has already been on. */
+ if (++idev->iopf_enabled > 1) {
+ mutex_unlock(&idev->iopf_lock);
+ return 0;
+ }
+
+ ret = iommu_dev_enable_feature(dev, IOMMU_DEV_FEAT_IOPF);
+ if (ret)
+ --idev->iopf_enabled;
+ mutex_unlock(&idev->iopf_lock);
+
+ return ret;
+}
+
+void iommufd_fault_iopf_disable(struct iommufd_device *idev)
+{
+ mutex_lock(&idev->iopf_lock);
+ if (!WARN_ON(idev->iopf_enabled == 0)) {
+ if (--idev->iopf_enabled == 0)
+ iommu_dev_disable_feature(idev->dev, IOMMU_DEV_FEAT_IOPF);
+ }
+ mutex_unlock(&idev->iopf_lock);
+}
+
+void iommufd_auto_response_faults(struct iommufd_hw_pagetable *hwpt,
+ struct iommufd_attach_handle *handle)
+{
+ struct iommufd_fault *fault = hwpt->fault;
+ struct iopf_group *group, *next;
+ struct list_head free_list;
+ unsigned long index;
+
+ if (!fault)
+ return;
+ INIT_LIST_HEAD(&free_list);
+
+ mutex_lock(&fault->mutex);
+ spin_lock(&fault->common.lock);
+ list_for_each_entry_safe(group, next, &fault->common.deliver, node) {
+ if (group->attach_handle != &handle->handle)
+ continue;
+ list_move(&group->node, &free_list);
+ }
+ spin_unlock(&fault->common.lock);
+
+ list_for_each_entry_safe(group, next, &free_list, node) {
+ list_del(&group->node);
+ iopf_group_response(group, IOMMU_PAGE_RESP_INVALID);
+ iopf_free_group(group);
+ }
+
+ xa_for_each(&fault->response, index, group) {
+ if (group->attach_handle != &handle->handle)
+ continue;
+ xa_erase(&fault->response, index);
+ iopf_group_response(group, IOMMU_PAGE_RESP_INVALID);
+ iopf_free_group(group);
+ }
+ mutex_unlock(&fault->mutex);
+}
+
+void iommufd_fault_destroy(struct iommufd_object *obj)
+{
+ struct iommufd_eventq *eventq =
+ container_of(obj, struct iommufd_eventq, obj);
+ struct iommufd_fault *fault = eventq_to_fault(eventq);
+ struct iopf_group *group, *next;
+ unsigned long index;
+
+ /*
+ * The iommufd object's reference count is zero at this point.
+ * We can be confident that no other threads are currently
+ * accessing this pointer. Therefore, acquiring the mutex here
+ * is unnecessary.
+ */
+ list_for_each_entry_safe(group, next, &fault->common.deliver, node) {
+ list_del(&group->node);
+ iopf_group_response(group, IOMMU_PAGE_RESP_INVALID);
+ iopf_free_group(group);
+ }
+ xa_for_each(&fault->response, index, group) {
+ xa_erase(&fault->response, index);
+ iopf_group_response(group, IOMMU_PAGE_RESP_INVALID);
+ iopf_free_group(group);
+ }
+ xa_destroy(&fault->response);
+ mutex_destroy(&fault->mutex);
+}
+
+static void iommufd_compose_fault_message(struct iommu_fault *fault,
+ struct iommu_hwpt_pgfault *hwpt_fault,
+ struct iommufd_device *idev,
+ u32 cookie)
+{
+ hwpt_fault->flags = fault->prm.flags;
+ hwpt_fault->dev_id = idev->obj.id;
+ hwpt_fault->pasid = fault->prm.pasid;
+ hwpt_fault->grpid = fault->prm.grpid;
+ hwpt_fault->perm = fault->prm.perm;
+ hwpt_fault->addr = fault->prm.addr;
+ hwpt_fault->length = 0;
+ hwpt_fault->cookie = cookie;
+}
+
+/* Fetch the first node out of the fault->deliver list */
+static struct iopf_group *
+iommufd_fault_deliver_fetch(struct iommufd_fault *fault)
+{
+ struct list_head *list = &fault->common.deliver;
+ struct iopf_group *group = NULL;
+
+ spin_lock(&fault->common.lock);
+ if (!list_empty(list)) {
+ group = list_first_entry(list, struct iopf_group, node);
+ list_del(&group->node);
+ }
+ spin_unlock(&fault->common.lock);
+ return group;
+}
+
+/* Restore a node back to the head of the fault->deliver list */
+static void iommufd_fault_deliver_restore(struct iommufd_fault *fault,
+ struct iopf_group *group)
+{
+ spin_lock(&fault->common.lock);
+ list_add(&group->node, &fault->common.deliver);
+ spin_unlock(&fault->common.lock);
+}
+
+static ssize_t iommufd_fault_fops_read(struct file *filep, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ size_t fault_size = sizeof(struct iommu_hwpt_pgfault);
+ struct iommufd_eventq *eventq = filep->private_data;
+ struct iommufd_fault *fault = eventq_to_fault(eventq);
+ struct iommu_hwpt_pgfault data = {};
+ struct iommufd_device *idev;
+ struct iopf_group *group;
+ struct iopf_fault *iopf;
+ size_t done = 0;
+ int rc = 0;
+
+ if (*ppos || count % fault_size)
+ return -ESPIPE;
+
+ mutex_lock(&fault->mutex);
+ while ((group = iommufd_fault_deliver_fetch(fault))) {
+ if (done >= count ||
+ group->fault_count * fault_size > count - done) {
+ iommufd_fault_deliver_restore(fault, group);
+ break;
+ }
+
+ rc = xa_alloc(&fault->response, &group->cookie, group,
+ xa_limit_32b, GFP_KERNEL);
+ if (rc) {
+ iommufd_fault_deliver_restore(fault, group);
+ break;
+ }
+
+ idev = to_iommufd_handle(group->attach_handle)->idev;
+ list_for_each_entry(iopf, &group->faults, list) {
+ iommufd_compose_fault_message(&iopf->fault,
+ &data, idev,
+ group->cookie);
+ if (copy_to_user(buf + done, &data, fault_size)) {
+ xa_erase(&fault->response, group->cookie);
+ iommufd_fault_deliver_restore(fault, group);
+ rc = -EFAULT;
+ break;
+ }
+ done += fault_size;
+ }
+ }
+ mutex_unlock(&fault->mutex);
+
+ return done == 0 ? rc : done;
+}
+
+static ssize_t iommufd_fault_fops_write(struct file *filep, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ size_t response_size = sizeof(struct iommu_hwpt_page_response);
+ struct iommufd_eventq *eventq = filep->private_data;
+ struct iommufd_fault *fault = eventq_to_fault(eventq);
+ struct iommu_hwpt_page_response response;
+ struct iopf_group *group;
+ size_t done = 0;
+ int rc = 0;
+
+ if (*ppos || count % response_size)
+ return -ESPIPE;
+
+ mutex_lock(&fault->mutex);
+ while (count > done) {
+ rc = copy_from_user(&response, buf + done, response_size);
+ if (rc)
+ break;
+
+ static_assert((int)IOMMUFD_PAGE_RESP_SUCCESS ==
+ (int)IOMMU_PAGE_RESP_SUCCESS);
+ static_assert((int)IOMMUFD_PAGE_RESP_INVALID ==
+ (int)IOMMU_PAGE_RESP_INVALID);
+ if (response.code != IOMMUFD_PAGE_RESP_SUCCESS &&
+ response.code != IOMMUFD_PAGE_RESP_INVALID) {
+ rc = -EINVAL;
+ break;
+ }
+
+ group = xa_erase(&fault->response, response.cookie);
+ if (!group) {
+ rc = -EINVAL;
+ break;
+ }
+
+ iopf_group_response(group, response.code);
+ iopf_free_group(group);
+ done += response_size;
+ }
+ mutex_unlock(&fault->mutex);
+
+ return done == 0 ? rc : done;
+}
+
+/* IOMMUFD_OBJ_VEVENTQ Functions */
+
+void iommufd_veventq_abort(struct iommufd_object *obj)
+{
+ struct iommufd_eventq *eventq =
+ container_of(obj, struct iommufd_eventq, obj);
+ struct iommufd_veventq *veventq = eventq_to_veventq(eventq);
+ struct iommufd_viommu *viommu = veventq->viommu;
+ struct iommufd_vevent *cur, *next;
+
+ lockdep_assert_held_write(&viommu->veventqs_rwsem);
+
+ list_for_each_entry_safe(cur, next, &eventq->deliver, node) {
+ list_del(&cur->node);
+ if (cur != &veventq->lost_events_header)
+ kfree(cur);
+ }
+
+ refcount_dec(&viommu->obj.users);
+ list_del(&veventq->node);
+}
+
+void iommufd_veventq_destroy(struct iommufd_object *obj)
+{
+ struct iommufd_veventq *veventq = eventq_to_veventq(
+ container_of(obj, struct iommufd_eventq, obj));
+
+ down_write(&veventq->viommu->veventqs_rwsem);
+ iommufd_veventq_abort(obj);
+ up_write(&veventq->viommu->veventqs_rwsem);
+}
+
+static struct iommufd_vevent *
+iommufd_veventq_deliver_fetch(struct iommufd_veventq *veventq)
+{
+ struct iommufd_eventq *eventq = &veventq->common;
+ struct list_head *list = &eventq->deliver;
+ struct iommufd_vevent *vevent = NULL;
+
+ spin_lock(&eventq->lock);
+ if (!list_empty(list)) {
+ struct iommufd_vevent *next;
+
+ next = list_first_entry(list, struct iommufd_vevent, node);
+ /* Make a copy of the lost_events_header for copy_to_user */
+ if (next == &veventq->lost_events_header) {
+ vevent = kzalloc(sizeof(*vevent), GFP_ATOMIC);
+ if (!vevent)
+ goto out_unlock;
+ }
+ list_del(&next->node);
+ if (vevent)
+ memcpy(vevent, next, sizeof(*vevent));
+ else
+ vevent = next;
+ }
+out_unlock:
+ spin_unlock(&eventq->lock);
+ return vevent;
+}
+
+static void iommufd_veventq_deliver_restore(struct iommufd_veventq *veventq,
+ struct iommufd_vevent *vevent)
+{
+ struct iommufd_eventq *eventq = &veventq->common;
+ struct list_head *list = &eventq->deliver;
+
+ spin_lock(&eventq->lock);
+ if (vevent_for_lost_events_header(vevent)) {
+ /* Remove the copy of the lost_events_header */
+ kfree(vevent);
+ vevent = NULL;
+ /* An empty list needs the lost_events_header back */
+ if (list_empty(list))
+ vevent = &veventq->lost_events_header;
+ }
+ if (vevent)
+ list_add(&vevent->node, list);
+ spin_unlock(&eventq->lock);
+}
+
+static ssize_t iommufd_veventq_fops_read(struct file *filep, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct iommufd_eventq *eventq = filep->private_data;
+ struct iommufd_veventq *veventq = eventq_to_veventq(eventq);
+ struct iommufd_vevent_header *hdr;
+ struct iommufd_vevent *cur;
+ size_t done = 0;
+ int rc = 0;
+
+ if (*ppos)
+ return -ESPIPE;
+
+ while ((cur = iommufd_veventq_deliver_fetch(veventq))) {
+ /* Validate the remaining bytes against the header size */
+ if (done >= count || sizeof(*hdr) > count - done) {
+ iommufd_veventq_deliver_restore(veventq, cur);
+ break;
+ }
+ hdr = &cur->header;
+
+ /* If being a normal vEVENT, validate against the full size */
+ if (!vevent_for_lost_events_header(cur) &&
+ sizeof(hdr) + cur->data_len > count - done) {
+ iommufd_veventq_deliver_restore(veventq, cur);
+ break;
+ }
+
+ if (copy_to_user(buf + done, hdr, sizeof(*hdr))) {
+ iommufd_veventq_deliver_restore(veventq, cur);
+ rc = -EFAULT;
+ break;
+ }
+ done += sizeof(*hdr);
+
+ if (cur->data_len &&
+ copy_to_user(buf + done, cur->event_data, cur->data_len)) {
+ iommufd_veventq_deliver_restore(veventq, cur);
+ rc = -EFAULT;
+ break;
+ }
+ spin_lock(&eventq->lock);
+ if (!vevent_for_lost_events_header(cur))
+ veventq->num_events--;
+ spin_unlock(&eventq->lock);
+ done += cur->data_len;
+ kfree(cur);
+ }
+
+ return done == 0 ? rc : done;
+}
+
+/* Common Event Queue Functions */
+
+static __poll_t iommufd_eventq_fops_poll(struct file *filep,
+ struct poll_table_struct *wait)
+{
+ struct iommufd_eventq *eventq = filep->private_data;
+ __poll_t pollflags = 0;
+
+ if (eventq->obj.type == IOMMUFD_OBJ_FAULT)
+ pollflags |= EPOLLOUT;
+
+ poll_wait(filep, &eventq->wait_queue, wait);
+ spin_lock(&eventq->lock);
+ if (!list_empty(&eventq->deliver))
+ pollflags |= EPOLLIN | EPOLLRDNORM;
+ spin_unlock(&eventq->lock);
+
+ return pollflags;
+}
+
+static int iommufd_eventq_fops_release(struct inode *inode, struct file *filep)
+{
+ struct iommufd_eventq *eventq = filep->private_data;
+
+ refcount_dec(&eventq->obj.users);
+ iommufd_ctx_put(eventq->ictx);
+ return 0;
+}
+
+#define INIT_EVENTQ_FOPS(read_op, write_op) \
+ ((const struct file_operations){ \
+ .owner = THIS_MODULE, \
+ .open = nonseekable_open, \
+ .read = read_op, \
+ .write = write_op, \
+ .poll = iommufd_eventq_fops_poll, \
+ .release = iommufd_eventq_fops_release, \
+ })
+
+static int iommufd_eventq_init(struct iommufd_eventq *eventq, char *name,
+ struct iommufd_ctx *ictx,
+ const struct file_operations *fops)
+{
+ struct file *filep;
+ int fdno;
+
+ spin_lock_init(&eventq->lock);
+ INIT_LIST_HEAD(&eventq->deliver);
+ init_waitqueue_head(&eventq->wait_queue);
+
+ filep = anon_inode_getfile(name, fops, eventq, O_RDWR);
+ if (IS_ERR(filep))
+ return PTR_ERR(filep);
+
+ eventq->ictx = ictx;
+ iommufd_ctx_get(eventq->ictx);
+ eventq->filep = filep;
+ refcount_inc(&eventq->obj.users);
+
+ fdno = get_unused_fd_flags(O_CLOEXEC);
+ if (fdno < 0)
+ fput(filep);
+ return fdno;
+}
+
+static const struct file_operations iommufd_fault_fops =
+ INIT_EVENTQ_FOPS(iommufd_fault_fops_read, iommufd_fault_fops_write);
+
+int iommufd_fault_alloc(struct iommufd_ucmd *ucmd)
+{
+ struct iommu_fault_alloc *cmd = ucmd->cmd;
+ struct iommufd_fault *fault;
+ int fdno;
+ int rc;
+
+ if (cmd->flags)
+ return -EOPNOTSUPP;
+
+ fault = __iommufd_object_alloc(ucmd->ictx, fault, IOMMUFD_OBJ_FAULT,
+ common.obj);
+ if (IS_ERR(fault))
+ return PTR_ERR(fault);
+
+ xa_init_flags(&fault->response, XA_FLAGS_ALLOC1);
+ mutex_init(&fault->mutex);
+
+ fdno = iommufd_eventq_init(&fault->common, "[iommufd-pgfault]",
+ ucmd->ictx, &iommufd_fault_fops);
+ if (fdno < 0) {
+ rc = fdno;
+ goto out_abort;
+ }
+
+ cmd->out_fault_id = fault->common.obj.id;
+ cmd->out_fault_fd = fdno;
+
+ rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
+ if (rc)
+ goto out_put_fdno;
+ iommufd_object_finalize(ucmd->ictx, &fault->common.obj);
+
+ fd_install(fdno, fault->common.filep);
+
+ return 0;
+out_put_fdno:
+ put_unused_fd(fdno);
+ fput(fault->common.filep);
+out_abort:
+ iommufd_object_abort_and_destroy(ucmd->ictx, &fault->common.obj);
+
+ return rc;
+}
+
+int iommufd_fault_iopf_handler(struct iopf_group *group)
+{
+ struct iommufd_hw_pagetable *hwpt;
+ struct iommufd_fault *fault;
+
+ hwpt = group->attach_handle->domain->iommufd_hwpt;
+ fault = hwpt->fault;
+
+ spin_lock(&fault->common.lock);
+ list_add_tail(&group->node, &fault->common.deliver);
+ spin_unlock(&fault->common.lock);
+
+ wake_up_interruptible(&fault->common.wait_queue);
+
+ return 0;
+}
+
+static const struct file_operations iommufd_veventq_fops =
+ INIT_EVENTQ_FOPS(iommufd_veventq_fops_read, NULL);
+
+int iommufd_veventq_alloc(struct iommufd_ucmd *ucmd)
+{
+ struct iommu_veventq_alloc *cmd = ucmd->cmd;
+ struct iommufd_veventq *veventq;
+ struct iommufd_viommu *viommu;
+ int fdno;
+ int rc;
+
+ if (cmd->flags || cmd->__reserved ||
+ cmd->type == IOMMU_VEVENTQ_TYPE_DEFAULT)
+ return -EOPNOTSUPP;
+ if (!cmd->veventq_depth)
+ return -EINVAL;
+
+ viommu = iommufd_get_viommu(ucmd, cmd->viommu_id);
+ if (IS_ERR(viommu))
+ return PTR_ERR(viommu);
+
+ down_write(&viommu->veventqs_rwsem);
+
+ if (iommufd_viommu_find_veventq(viommu, cmd->type)) {
+ rc = -EEXIST;
+ goto out_unlock_veventqs;
+ }
+
+ veventq = __iommufd_object_alloc(ucmd->ictx, veventq,
+ IOMMUFD_OBJ_VEVENTQ, common.obj);
+ if (IS_ERR(veventq)) {
+ rc = PTR_ERR(veventq);
+ goto out_unlock_veventqs;
+ }
+
+ veventq->type = cmd->type;
+ veventq->viommu = viommu;
+ refcount_inc(&viommu->obj.users);
+ veventq->depth = cmd->veventq_depth;
+ list_add_tail(&veventq->node, &viommu->veventqs);
+ veventq->lost_events_header.header.flags =
+ IOMMU_VEVENTQ_FLAG_LOST_EVENTS;
+
+ fdno = iommufd_eventq_init(&veventq->common, "[iommufd-viommu-event]",
+ ucmd->ictx, &iommufd_veventq_fops);
+ if (fdno < 0) {
+ rc = fdno;
+ goto out_abort;
+ }
+
+ cmd->out_veventq_id = veventq->common.obj.id;
+ cmd->out_veventq_fd = fdno;
+
+ rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
+ if (rc)
+ goto out_put_fdno;
+
+ iommufd_object_finalize(ucmd->ictx, &veventq->common.obj);
+ fd_install(fdno, veventq->common.filep);
+ goto out_unlock_veventqs;
+
+out_put_fdno:
+ put_unused_fd(fdno);
+ fput(veventq->common.filep);
+out_abort:
+ iommufd_object_abort_and_destroy(ucmd->ictx, &veventq->common.obj);
+out_unlock_veventqs:
+ up_write(&viommu->veventqs_rwsem);
+ iommufd_put_object(ucmd->ictx, &viommu->obj);
+ return rc;
+}
diff --git a/drivers/iommu/iommufd/fault.c b/drivers/iommu/iommufd/fault.c
deleted file mode 100644
index c48d72c9668c..000000000000
--- a/drivers/iommu/iommufd/fault.c
+++ /dev/null
@@ -1,342 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/* Copyright (C) 2024 Intel Corporation
- */
-#define pr_fmt(fmt) "iommufd: " fmt
-
-#include <linux/anon_inodes.h>
-#include <linux/file.h>
-#include <linux/fs.h>
-#include <linux/iommufd.h>
-#include <linux/module.h>
-#include <linux/mutex.h>
-#include <linux/pci.h>
-#include <linux/pci-ats.h>
-#include <linux/poll.h>
-#include <uapi/linux/iommufd.h>
-
-#include "../iommu-priv.h"
-#include "iommufd_private.h"
-
-int iommufd_fault_iopf_enable(struct iommufd_device *idev)
-{
- struct device *dev = idev->dev;
- int ret;
-
- /*
- * Once we turn on PCI/PRI support for VF, the response failure code
- * should not be forwarded to the hardware due to PRI being a shared
- * resource between PF and VFs. There is no coordination for this
- * shared capability. This waits for a vPRI reset to recover.
- */
- if (dev_is_pci(dev)) {
- struct pci_dev *pdev = to_pci_dev(dev);
-
- if (pdev->is_virtfn && pci_pri_supported(pdev))
- return -EINVAL;
- }
-
- mutex_lock(&idev->iopf_lock);
- /* Device iopf has already been on. */
- if (++idev->iopf_enabled > 1) {
- mutex_unlock(&idev->iopf_lock);
- return 0;
- }
-
- ret = iommu_dev_enable_feature(dev, IOMMU_DEV_FEAT_IOPF);
- if (ret)
- --idev->iopf_enabled;
- mutex_unlock(&idev->iopf_lock);
-
- return ret;
-}
-
-void iommufd_fault_iopf_disable(struct iommufd_device *idev)
-{
- mutex_lock(&idev->iopf_lock);
- if (!WARN_ON(idev->iopf_enabled == 0)) {
- if (--idev->iopf_enabled == 0)
- iommu_dev_disable_feature(idev->dev, IOMMU_DEV_FEAT_IOPF);
- }
- mutex_unlock(&idev->iopf_lock);
-}
-
-void iommufd_auto_response_faults(struct iommufd_hw_pagetable *hwpt,
- struct iommufd_attach_handle *handle)
-{
- struct iommufd_fault *fault = hwpt->fault;
- struct iopf_group *group, *next;
- struct list_head free_list;
- unsigned long index;
-
- if (!fault)
- return;
- INIT_LIST_HEAD(&free_list);
-
- mutex_lock(&fault->mutex);
- spin_lock(&fault->lock);
- list_for_each_entry_safe(group, next, &fault->deliver, node) {
- if (group->attach_handle != &handle->handle)
- continue;
- list_move(&group->node, &free_list);
- }
- spin_unlock(&fault->lock);
-
- list_for_each_entry_safe(group, next, &free_list, node) {
- list_del(&group->node);
- iopf_group_response(group, IOMMU_PAGE_RESP_INVALID);
- iopf_free_group(group);
- }
-
- xa_for_each(&fault->response, index, group) {
- if (group->attach_handle != &handle->handle)
- continue;
- xa_erase(&fault->response, index);
- iopf_group_response(group, IOMMU_PAGE_RESP_INVALID);
- iopf_free_group(group);
- }
- mutex_unlock(&fault->mutex);
-}
-
-void iommufd_fault_destroy(struct iommufd_object *obj)
-{
- struct iommufd_fault *fault = container_of(obj, struct iommufd_fault, obj);
- struct iopf_group *group, *next;
- unsigned long index;
-
- /*
- * The iommufd object's reference count is zero at this point.
- * We can be confident that no other threads are currently
- * accessing this pointer. Therefore, acquiring the mutex here
- * is unnecessary.
- */
- list_for_each_entry_safe(group, next, &fault->deliver, node) {
- list_del(&group->node);
- iopf_group_response(group, IOMMU_PAGE_RESP_INVALID);
- iopf_free_group(group);
- }
- xa_for_each(&fault->response, index, group) {
- xa_erase(&fault->response, index);
- iopf_group_response(group, IOMMU_PAGE_RESP_INVALID);
- iopf_free_group(group);
- }
- xa_destroy(&fault->response);
- mutex_destroy(&fault->mutex);
-}
-
-static void iommufd_compose_fault_message(struct iommu_fault *fault,
- struct iommu_hwpt_pgfault *hwpt_fault,
- struct iommufd_device *idev,
- u32 cookie)
-{
- hwpt_fault->flags = fault->prm.flags;
- hwpt_fault->dev_id = idev->obj.id;
- hwpt_fault->pasid = fault->prm.pasid;
- hwpt_fault->grpid = fault->prm.grpid;
- hwpt_fault->perm = fault->prm.perm;
- hwpt_fault->addr = fault->prm.addr;
- hwpt_fault->length = 0;
- hwpt_fault->cookie = cookie;
-}
-
-static ssize_t iommufd_fault_fops_read(struct file *filep, char __user *buf,
- size_t count, loff_t *ppos)
-{
- size_t fault_size = sizeof(struct iommu_hwpt_pgfault);
- struct iommufd_fault *fault = filep->private_data;
- struct iommu_hwpt_pgfault data = {};
- struct iommufd_device *idev;
- struct iopf_group *group;
- struct iopf_fault *iopf;
- size_t done = 0;
- int rc = 0;
-
- if (*ppos || count % fault_size)
- return -ESPIPE;
-
- mutex_lock(&fault->mutex);
- while ((group = iommufd_fault_deliver_fetch(fault))) {
- if (done >= count ||
- group->fault_count * fault_size > count - done) {
- iommufd_fault_deliver_restore(fault, group);
- break;
- }
-
- rc = xa_alloc(&fault->response, &group->cookie, group,
- xa_limit_32b, GFP_KERNEL);
- if (rc) {
- iommufd_fault_deliver_restore(fault, group);
- break;
- }
-
- idev = to_iommufd_handle(group->attach_handle)->idev;
- list_for_each_entry(iopf, &group->faults, list) {
- iommufd_compose_fault_message(&iopf->fault,
- &data, idev,
- group->cookie);
- if (copy_to_user(buf + done, &data, fault_size)) {
- xa_erase(&fault->response, group->cookie);
- iommufd_fault_deliver_restore(fault, group);
- rc = -EFAULT;
- break;
- }
- done += fault_size;
- }
- }
- mutex_unlock(&fault->mutex);
-
- return done == 0 ? rc : done;
-}
-
-static ssize_t iommufd_fault_fops_write(struct file *filep, const char __user *buf,
- size_t count, loff_t *ppos)
-{
- size_t response_size = sizeof(struct iommu_hwpt_page_response);
- struct iommufd_fault *fault = filep->private_data;
- struct iommu_hwpt_page_response response;
- struct iopf_group *group;
- size_t done = 0;
- int rc = 0;
-
- if (*ppos || count % response_size)
- return -ESPIPE;
-
- mutex_lock(&fault->mutex);
- while (count > done) {
- rc = copy_from_user(&response, buf + done, response_size);
- if (rc)
- break;
-
- static_assert((int)IOMMUFD_PAGE_RESP_SUCCESS ==
- (int)IOMMU_PAGE_RESP_SUCCESS);
- static_assert((int)IOMMUFD_PAGE_RESP_INVALID ==
- (int)IOMMU_PAGE_RESP_INVALID);
- if (response.code != IOMMUFD_PAGE_RESP_SUCCESS &&
- response.code != IOMMUFD_PAGE_RESP_INVALID) {
- rc = -EINVAL;
- break;
- }
-
- group = xa_erase(&fault->response, response.cookie);
- if (!group) {
- rc = -EINVAL;
- break;
- }
-
- iopf_group_response(group, response.code);
- iopf_free_group(group);
- done += response_size;
- }
- mutex_unlock(&fault->mutex);
-
- return done == 0 ? rc : done;
-}
-
-static __poll_t iommufd_fault_fops_poll(struct file *filep,
- struct poll_table_struct *wait)
-{
- struct iommufd_fault *fault = filep->private_data;
- __poll_t pollflags = EPOLLOUT;
-
- poll_wait(filep, &fault->wait_queue, wait);
- spin_lock(&fault->lock);
- if (!list_empty(&fault->deliver))
- pollflags |= EPOLLIN | EPOLLRDNORM;
- spin_unlock(&fault->lock);
-
- return pollflags;
-}
-
-static int iommufd_fault_fops_release(struct inode *inode, struct file *filep)
-{
- struct iommufd_fault *fault = filep->private_data;
-
- refcount_dec(&fault->obj.users);
- iommufd_ctx_put(fault->ictx);
- return 0;
-}
-
-static const struct file_operations iommufd_fault_fops = {
- .owner = THIS_MODULE,
- .open = nonseekable_open,
- .read = iommufd_fault_fops_read,
- .write = iommufd_fault_fops_write,
- .poll = iommufd_fault_fops_poll,
- .release = iommufd_fault_fops_release,
-};
-
-int iommufd_fault_alloc(struct iommufd_ucmd *ucmd)
-{
- struct iommu_fault_alloc *cmd = ucmd->cmd;
- struct iommufd_fault *fault;
- struct file *filep;
- int fdno;
- int rc;
-
- if (cmd->flags)
- return -EOPNOTSUPP;
-
- fault = iommufd_object_alloc(ucmd->ictx, fault, IOMMUFD_OBJ_FAULT);
- if (IS_ERR(fault))
- return PTR_ERR(fault);
-
- fault->ictx = ucmd->ictx;
- INIT_LIST_HEAD(&fault->deliver);
- xa_init_flags(&fault->response, XA_FLAGS_ALLOC1);
- mutex_init(&fault->mutex);
- spin_lock_init(&fault->lock);
- init_waitqueue_head(&fault->wait_queue);
-
- filep = anon_inode_getfile("[iommufd-pgfault]", &iommufd_fault_fops,
- fault, O_RDWR);
- if (IS_ERR(filep)) {
- rc = PTR_ERR(filep);
- goto out_abort;
- }
-
- refcount_inc(&fault->obj.users);
- iommufd_ctx_get(fault->ictx);
- fault->filep = filep;
-
- fdno = get_unused_fd_flags(O_CLOEXEC);
- if (fdno < 0) {
- rc = fdno;
- goto out_fput;
- }
-
- cmd->out_fault_id = fault->obj.id;
- cmd->out_fault_fd = fdno;
-
- rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
- if (rc)
- goto out_put_fdno;
- iommufd_object_finalize(ucmd->ictx, &fault->obj);
-
- fd_install(fdno, fault->filep);
-
- return 0;
-out_put_fdno:
- put_unused_fd(fdno);
-out_fput:
- fput(filep);
-out_abort:
- iommufd_object_abort_and_destroy(ucmd->ictx, &fault->obj);
-
- return rc;
-}
-
-int iommufd_fault_iopf_handler(struct iopf_group *group)
-{
- struct iommufd_hw_pagetable *hwpt;
- struct iommufd_fault *fault;
-
- hwpt = group->attach_handle->domain->iommufd_hwpt;
- fault = hwpt->fault;
-
- spin_lock(&fault->lock);
- list_add_tail(&group->node, &fault->deliver);
- spin_unlock(&fault->lock);
-
- wake_up_interruptible(&fault->wait_queue);
-
- return 0;
-}
diff --git a/drivers/iommu/iommufd/hw_pagetable.c b/drivers/iommu/iommufd/hw_pagetable.c
index 7de6e914232e..487779470261 100644
--- a/drivers/iommu/iommufd/hw_pagetable.c
+++ b/drivers/iommu/iommufd/hw_pagetable.c
@@ -14,7 +14,7 @@ static void __iommufd_hwpt_destroy(struct iommufd_hw_pagetable *hwpt)
iommu_domain_free(hwpt->domain);
if (hwpt->fault)
- refcount_dec(&hwpt->fault->obj.users);
+ refcount_dec(&hwpt->fault->common.obj.users);
}
void iommufd_hwpt_paging_destroy(struct iommufd_object *obj)
@@ -90,6 +90,7 @@ iommufd_hwpt_paging_enforce_cc(struct iommufd_hwpt_paging *hwpt_paging)
* @ictx: iommufd context
* @ioas: IOAS to associate the domain with
* @idev: Device to get an iommu_domain for
+ * @pasid: PASID to get an iommu_domain for
* @flags: Flags from userspace
* @immediate_attach: True if idev should be attached to the hwpt
* @user_data: The user provided driver specific data describing the domain to
@@ -105,13 +106,14 @@ iommufd_hwpt_paging_enforce_cc(struct iommufd_hwpt_paging *hwpt_paging)
*/
struct iommufd_hwpt_paging *
iommufd_hwpt_paging_alloc(struct iommufd_ctx *ictx, struct iommufd_ioas *ioas,
- struct iommufd_device *idev, u32 flags,
- bool immediate_attach,
+ struct iommufd_device *idev, ioasid_t pasid,
+ u32 flags, bool immediate_attach,
const struct iommu_user_data *user_data)
{
const u32 valid_flags = IOMMU_HWPT_ALLOC_NEST_PARENT |
IOMMU_HWPT_ALLOC_DIRTY_TRACKING |
- IOMMU_HWPT_FAULT_ID_VALID;
+ IOMMU_HWPT_FAULT_ID_VALID |
+ IOMMU_HWPT_ALLOC_PASID;
const struct iommu_ops *ops = dev_iommu_ops(idev->dev);
struct iommufd_hwpt_paging *hwpt_paging;
struct iommufd_hw_pagetable *hwpt;
@@ -126,12 +128,16 @@ iommufd_hwpt_paging_alloc(struct iommufd_ctx *ictx, struct iommufd_ioas *ioas,
if ((flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING) &&
!device_iommu_capable(idev->dev, IOMMU_CAP_DIRTY_TRACKING))
return ERR_PTR(-EOPNOTSUPP);
+ if ((flags & IOMMU_HWPT_FAULT_ID_VALID) &&
+ (flags & IOMMU_HWPT_ALLOC_NEST_PARENT))
+ return ERR_PTR(-EOPNOTSUPP);
hwpt_paging = __iommufd_object_alloc(
ictx, hwpt_paging, IOMMUFD_OBJ_HWPT_PAGING, common.obj);
if (IS_ERR(hwpt_paging))
return ERR_CAST(hwpt_paging);
hwpt = &hwpt_paging->common;
+ hwpt->pasid_compat = flags & IOMMU_HWPT_ALLOC_PASID;
INIT_LIST_HEAD(&hwpt_paging->hwpt_item);
/* Pairs with iommufd_hw_pagetable_destroy() */
@@ -156,7 +162,8 @@ iommufd_hwpt_paging_alloc(struct iommufd_ctx *ictx, struct iommufd_ioas *ioas,
goto out_abort;
}
}
- iommu_domain_set_sw_msi(hwpt->domain, iommufd_sw_msi);
+ hwpt->domain->iommufd_hwpt = hwpt;
+ hwpt->domain->cookie_type = IOMMU_COOKIE_IOMMUFD;
/*
* Set the coherency mode before we do iopt_table_add_domain() as some
@@ -185,7 +192,7 @@ iommufd_hwpt_paging_alloc(struct iommufd_ctx *ictx, struct iommufd_ioas *ioas,
* sequence. Once those drivers are fixed this should be removed.
*/
if (immediate_attach) {
- rc = iommufd_hw_pagetable_attach(hwpt, idev);
+ rc = iommufd_hw_pagetable_attach(hwpt, idev, pasid);
if (rc)
goto out_abort;
}
@@ -198,7 +205,7 @@ iommufd_hwpt_paging_alloc(struct iommufd_ctx *ictx, struct iommufd_ioas *ioas,
out_detach:
if (immediate_attach)
- iommufd_hw_pagetable_detach(idev);
+ iommufd_hw_pagetable_detach(idev, pasid);
out_abort:
iommufd_object_abort_and_destroy(ictx, &hwpt->obj);
return ERR_PTR(rc);
@@ -227,7 +234,7 @@ iommufd_hwpt_nested_alloc(struct iommufd_ctx *ictx,
struct iommufd_hw_pagetable *hwpt;
int rc;
- if ((flags & ~IOMMU_HWPT_FAULT_ID_VALID) ||
+ if ((flags & ~(IOMMU_HWPT_FAULT_ID_VALID | IOMMU_HWPT_ALLOC_PASID)) ||
!user_data->len || !ops->domain_alloc_nested)
return ERR_PTR(-EOPNOTSUPP);
if (parent->auto_domain || !parent->nest_parent ||
@@ -239,6 +246,7 @@ iommufd_hwpt_nested_alloc(struct iommufd_ctx *ictx,
if (IS_ERR(hwpt_nested))
return ERR_CAST(hwpt_nested);
hwpt = &hwpt_nested->common;
+ hwpt->pasid_compat = flags & IOMMU_HWPT_ALLOC_PASID;
refcount_inc(&parent->common.obj.users);
hwpt_nested->parent = parent;
@@ -252,7 +260,8 @@ iommufd_hwpt_nested_alloc(struct iommufd_ctx *ictx,
goto out_abort;
}
hwpt->domain->owner = ops;
- iommu_domain_set_sw_msi(hwpt->domain, iommufd_sw_msi);
+ hwpt->domain->iommufd_hwpt = hwpt;
+ hwpt->domain->cookie_type = IOMMU_COOKIE_IOMMUFD;
if (WARN_ON_ONCE(hwpt->domain->type != IOMMU_DOMAIN_NESTED)) {
rc = -EINVAL;
@@ -282,7 +291,7 @@ iommufd_viommu_alloc_hwpt_nested(struct iommufd_viommu *viommu, u32 flags,
struct iommufd_hw_pagetable *hwpt;
int rc;
- if (flags & ~IOMMU_HWPT_FAULT_ID_VALID)
+ if (flags & ~(IOMMU_HWPT_FAULT_ID_VALID | IOMMU_HWPT_ALLOC_PASID))
return ERR_PTR(-EOPNOTSUPP);
if (!user_data->len)
return ERR_PTR(-EOPNOTSUPP);
@@ -294,6 +303,7 @@ iommufd_viommu_alloc_hwpt_nested(struct iommufd_viommu *viommu, u32 flags,
if (IS_ERR(hwpt_nested))
return ERR_CAST(hwpt_nested);
hwpt = &hwpt_nested->common;
+ hwpt->pasid_compat = flags & IOMMU_HWPT_ALLOC_PASID;
hwpt_nested->viommu = viommu;
refcount_inc(&viommu->obj.users);
@@ -308,8 +318,9 @@ iommufd_viommu_alloc_hwpt_nested(struct iommufd_viommu *viommu, u32 flags,
hwpt->domain = NULL;
goto out_abort;
}
+ hwpt->domain->iommufd_hwpt = hwpt;
hwpt->domain->owner = viommu->iommu_dev->ops;
- iommu_domain_set_sw_msi(hwpt->domain, iommufd_sw_msi);
+ hwpt->domain->cookie_type = IOMMU_COOKIE_IOMMUFD;
if (WARN_ON_ONCE(hwpt->domain->type != IOMMU_DOMAIN_NESTED)) {
rc = -EINVAL;
@@ -358,8 +369,8 @@ int iommufd_hwpt_alloc(struct iommufd_ucmd *ucmd)
ioas = container_of(pt_obj, struct iommufd_ioas, obj);
mutex_lock(&ioas->mutex);
hwpt_paging = iommufd_hwpt_paging_alloc(
- ucmd->ictx, ioas, idev, cmd->flags, false,
- user_data.len ? &user_data : NULL);
+ ucmd->ictx, ioas, idev, IOMMU_NO_PASID, cmd->flags,
+ false, user_data.len ? &user_data : NULL);
if (IS_ERR(hwpt_paging)) {
rc = PTR_ERR(hwpt_paging);
goto out_unlock;
@@ -409,10 +420,9 @@ int iommufd_hwpt_alloc(struct iommufd_ucmd *ucmd)
}
hwpt->fault = fault;
hwpt->domain->iopf_handler = iommufd_fault_iopf_handler;
- refcount_inc(&fault->obj.users);
- iommufd_put_object(ucmd->ictx, &fault->obj);
+ refcount_inc(&fault->common.obj.users);
+ iommufd_put_object(ucmd->ictx, &fault->common.obj);
}
- hwpt->domain->iommufd_hwpt = hwpt;
cmd->out_hwpt_id = hwpt->obj.id;
rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
diff --git a/drivers/iommu/iommufd/iommufd_private.h b/drivers/iommu/iommufd/iommufd_private.h
index 246297452a44..80e8c76d25f2 100644
--- a/drivers/iommu/iommufd/iommufd_private.h
+++ b/drivers/iommu/iommufd/iommufd_private.h
@@ -32,8 +32,11 @@ struct iommufd_sw_msi_maps {
DECLARE_BITMAP(bitmap, 64);
};
-int iommufd_sw_msi(struct iommu_domain *domain, struct msi_desc *desc,
- phys_addr_t msi_addr);
+#ifdef CONFIG_IRQ_MSI_IOMMU
+int iommufd_sw_msi_install(struct iommufd_ctx *ictx,
+ struct iommufd_hwpt_paging *hwpt_paging,
+ struct iommufd_sw_msi_map *msi_map);
+#endif
struct iommufd_ctx {
struct file *file;
@@ -296,6 +299,7 @@ struct iommufd_hw_pagetable {
struct iommufd_object obj;
struct iommu_domain *domain;
struct iommufd_fault *fault;
+ bool pasid_compat : 1;
};
struct iommufd_hwpt_paging {
@@ -366,13 +370,13 @@ int iommufd_hwpt_get_dirty_bitmap(struct iommufd_ucmd *ucmd);
struct iommufd_hwpt_paging *
iommufd_hwpt_paging_alloc(struct iommufd_ctx *ictx, struct iommufd_ioas *ioas,
- struct iommufd_device *idev, u32 flags,
- bool immediate_attach,
+ struct iommufd_device *idev, ioasid_t pasid,
+ u32 flags, bool immediate_attach,
const struct iommu_user_data *user_data);
int iommufd_hw_pagetable_attach(struct iommufd_hw_pagetable *hwpt,
- struct iommufd_device *idev);
+ struct iommufd_device *idev, ioasid_t pasid);
struct iommufd_hw_pagetable *
-iommufd_hw_pagetable_detach(struct iommufd_device *idev);
+iommufd_hw_pagetable_detach(struct iommufd_device *idev, ioasid_t pasid);
void iommufd_hwpt_paging_destroy(struct iommufd_object *obj);
void iommufd_hwpt_paging_abort(struct iommufd_object *obj);
void iommufd_hwpt_nested_destroy(struct iommufd_object *obj);
@@ -396,13 +400,14 @@ static inline void iommufd_hw_pagetable_put(struct iommufd_ctx *ictx,
refcount_dec(&hwpt->obj.users);
}
+struct iommufd_attach;
+
struct iommufd_group {
struct kref ref;
struct mutex lock;
struct iommufd_ctx *ictx;
struct iommu_group *group;
- struct iommufd_hw_pagetable *hwpt;
- struct list_head device_list;
+ struct xarray pasid_attach;
struct iommufd_sw_msi_maps required_sw_msi;
phys_addr_t sw_msi_start;
};
@@ -454,49 +459,17 @@ void iopt_remove_access(struct io_pagetable *iopt,
u32 iopt_access_list_id);
void iommufd_access_destroy_object(struct iommufd_object *obj);
-/*
- * An iommufd_fault object represents an interface to deliver I/O page faults
- * to the user space. These objects are created/destroyed by the user space and
- * associated with hardware page table objects during page-table allocation.
- */
-struct iommufd_fault {
+struct iommufd_eventq {
struct iommufd_object obj;
struct iommufd_ctx *ictx;
struct file *filep;
spinlock_t lock; /* protects the deliver list */
struct list_head deliver;
- struct mutex mutex; /* serializes response flows */
- struct xarray response;
struct wait_queue_head wait_queue;
};
-/* Fetch the first node out of the fault->deliver list */
-static inline struct iopf_group *
-iommufd_fault_deliver_fetch(struct iommufd_fault *fault)
-{
- struct list_head *list = &fault->deliver;
- struct iopf_group *group = NULL;
-
- spin_lock(&fault->lock);
- if (!list_empty(list)) {
- group = list_first_entry(list, struct iopf_group, node);
- list_del(&group->node);
- }
- spin_unlock(&fault->lock);
- return group;
-}
-
-/* Restore a node back to the head of the fault->deliver list */
-static inline void iommufd_fault_deliver_restore(struct iommufd_fault *fault,
- struct iopf_group *group)
-{
- spin_lock(&fault->lock);
- list_add(&group->node, &fault->deliver);
- spin_unlock(&fault->lock);
-}
-
struct iommufd_attach_handle {
struct iommu_attach_handle handle;
struct iommufd_device *idev;
@@ -505,12 +478,29 @@ struct iommufd_attach_handle {
/* Convert an iommu attach handle to iommufd handle. */
#define to_iommufd_handle(hdl) container_of(hdl, struct iommufd_attach_handle, handle)
+/*
+ * An iommufd_fault object represents an interface to deliver I/O page faults
+ * to the user space. These objects are created/destroyed by the user space and
+ * associated with hardware page table objects during page-table allocation.
+ */
+struct iommufd_fault {
+ struct iommufd_eventq common;
+ struct mutex mutex; /* serializes response flows */
+ struct xarray response;
+};
+
+static inline struct iommufd_fault *
+eventq_to_fault(struct iommufd_eventq *eventq)
+{
+ return container_of(eventq, struct iommufd_fault, common);
+}
+
static inline struct iommufd_fault *
iommufd_get_fault(struct iommufd_ucmd *ucmd, u32 id)
{
return container_of(iommufd_get_object(ucmd->ictx, id,
IOMMUFD_OBJ_FAULT),
- struct iommufd_fault, obj);
+ struct iommufd_fault, common.obj);
}
int iommufd_fault_alloc(struct iommufd_ucmd *ucmd);
@@ -522,6 +512,74 @@ void iommufd_fault_iopf_disable(struct iommufd_device *idev);
void iommufd_auto_response_faults(struct iommufd_hw_pagetable *hwpt,
struct iommufd_attach_handle *handle);
+/* An iommufd_vevent represents a vIOMMU event in an iommufd_veventq */
+struct iommufd_vevent {
+ struct iommufd_vevent_header header;
+ struct list_head node; /* for iommufd_eventq::deliver */
+ ssize_t data_len;
+ u64 event_data[] __counted_by(data_len);
+};
+
+#define vevent_for_lost_events_header(vevent) \
+ (vevent->header.flags & IOMMU_VEVENTQ_FLAG_LOST_EVENTS)
+
+/*
+ * An iommufd_veventq object represents an interface to deliver vIOMMU events to
+ * the user space. It is created/destroyed by the user space and associated with
+ * a vIOMMU object during the allocations.
+ */
+struct iommufd_veventq {
+ struct iommufd_eventq common;
+ struct iommufd_viommu *viommu;
+ struct list_head node; /* for iommufd_viommu::veventqs */
+ struct iommufd_vevent lost_events_header;
+
+ unsigned int type;
+ unsigned int depth;
+
+ /* Use common.lock for protection */
+ u32 num_events;
+ u32 sequence;
+};
+
+static inline struct iommufd_veventq *
+eventq_to_veventq(struct iommufd_eventq *eventq)
+{
+ return container_of(eventq, struct iommufd_veventq, common);
+}
+
+static inline struct iommufd_veventq *
+iommufd_get_veventq(struct iommufd_ucmd *ucmd, u32 id)
+{
+ return container_of(iommufd_get_object(ucmd->ictx, id,
+ IOMMUFD_OBJ_VEVENTQ),
+ struct iommufd_veventq, common.obj);
+}
+
+int iommufd_veventq_alloc(struct iommufd_ucmd *ucmd);
+void iommufd_veventq_destroy(struct iommufd_object *obj);
+void iommufd_veventq_abort(struct iommufd_object *obj);
+
+static inline void iommufd_vevent_handler(struct iommufd_veventq *veventq,
+ struct iommufd_vevent *vevent)
+{
+ struct iommufd_eventq *eventq = &veventq->common;
+
+ lockdep_assert_held(&eventq->lock);
+
+ /*
+ * Remove the lost_events_header and add the new node at the same time.
+ * Note the new node can be lost_events_header, for a sequence update.
+ */
+ if (list_is_last(&veventq->lost_events_header.node, &eventq->deliver))
+ list_del(&veventq->lost_events_header.node);
+ list_add_tail(&vevent->node, &eventq->deliver);
+ vevent->header.sequence = veventq->sequence;
+ veventq->sequence = (veventq->sequence + 1) & INT_MAX;
+
+ wake_up_interruptible(&eventq->wait_queue);
+}
+
static inline struct iommufd_viommu *
iommufd_get_viommu(struct iommufd_ucmd *ucmd, u32 id)
{
@@ -530,6 +588,20 @@ iommufd_get_viommu(struct iommufd_ucmd *ucmd, u32 id)
struct iommufd_viommu, obj);
}
+static inline struct iommufd_veventq *
+iommufd_viommu_find_veventq(struct iommufd_viommu *viommu, u32 type)
+{
+ struct iommufd_veventq *veventq, *next;
+
+ lockdep_assert_held(&viommu->veventqs_rwsem);
+
+ list_for_each_entry_safe(veventq, next, &viommu->veventqs, node) {
+ if (veventq->type == type)
+ return veventq;
+ }
+ return NULL;
+}
+
int iommufd_viommu_alloc_ioctl(struct iommufd_ucmd *ucmd);
void iommufd_viommu_destroy(struct iommufd_object *obj);
int iommufd_vdevice_alloc_ioctl(struct iommufd_ucmd *ucmd);
diff --git a/drivers/iommu/iommufd/iommufd_test.h b/drivers/iommu/iommufd/iommufd_test.h
index a6b7a163f636..1cd7e8394129 100644
--- a/drivers/iommu/iommufd/iommufd_test.h
+++ b/drivers/iommu/iommufd/iommufd_test.h
@@ -24,6 +24,11 @@ enum {
IOMMU_TEST_OP_MD_CHECK_IOTLB,
IOMMU_TEST_OP_TRIGGER_IOPF,
IOMMU_TEST_OP_DEV_CHECK_CACHE,
+ IOMMU_TEST_OP_TRIGGER_VEVENT,
+ IOMMU_TEST_OP_PASID_ATTACH,
+ IOMMU_TEST_OP_PASID_REPLACE,
+ IOMMU_TEST_OP_PASID_DETACH,
+ IOMMU_TEST_OP_PASID_CHECK_HWPT,
};
enum {
@@ -48,6 +53,7 @@ enum {
enum {
MOCK_FLAGS_DEVICE_NO_DIRTY = 1 << 0,
MOCK_FLAGS_DEVICE_HUGE_IOVA = 1 << 1,
+ MOCK_FLAGS_DEVICE_PASID = 1 << 2,
};
enum {
@@ -60,6 +66,9 @@ enum {
MOCK_DEV_CACHE_NUM = 4,
};
+/* Reserved for special pasid replace test */
+#define IOMMU_TEST_PASID_RESERVED 1024
+
struct iommu_test_cmd {
__u32 size;
__u32 op;
@@ -145,11 +154,36 @@ struct iommu_test_cmd {
__u32 id;
__u32 cache;
} check_dev_cache;
+ struct {
+ __u32 dev_id;
+ } trigger_vevent;
+ struct {
+ __u32 pasid;
+ __u32 pt_id;
+ /* @id is stdev_id */
+ } pasid_attach;
+ struct {
+ __u32 pasid;
+ __u32 pt_id;
+ /* @id is stdev_id */
+ } pasid_replace;
+ struct {
+ __u32 pasid;
+ /* @id is stdev_id */
+ } pasid_detach;
+ struct {
+ __u32 pasid;
+ __u32 hwpt_id;
+ /* @id is stdev_id */
+ } pasid_check;
};
__u32 last;
};
#define IOMMU_TEST_CMD _IO(IOMMUFD_TYPE, IOMMUFD_CMD_BASE + 32)
+/* Mock device/iommu PASID width */
+#define MOCK_PASID_WIDTH 20
+
/* Mock structs for IOMMU_DEVICE_GET_HW_INFO ioctl */
#define IOMMU_HW_INFO_TYPE_SELFTEST 0xfeedbeef
#define IOMMU_HW_INFO_SELFTEST_REGVAL 0xdeadbeef
@@ -212,4 +246,10 @@ struct iommu_viommu_invalidate_selftest {
__u32 cache_id;
};
+#define IOMMU_VEVENTQ_TYPE_SELFTEST 0xbeefbeef
+
+struct iommu_viommu_event_selftest {
+ __u32 virt_id;
+};
+
#endif
diff --git a/drivers/iommu/iommufd/main.c b/drivers/iommu/iommufd/main.c
index b6fa9fd11bc1..3df468f64e7d 100644
--- a/drivers/iommu/iommufd/main.c
+++ b/drivers/iommu/iommufd/main.c
@@ -317,6 +317,7 @@ union ucmd_buffer {
struct iommu_ioas_unmap unmap;
struct iommu_option option;
struct iommu_vdevice_alloc vdev;
+ struct iommu_veventq_alloc veventq;
struct iommu_vfio_ioas vfio_ioas;
struct iommu_viommu_alloc viommu;
#ifdef CONFIG_IOMMUFD_TEST
@@ -372,6 +373,8 @@ static const struct iommufd_ioctl_op iommufd_ioctl_ops[] = {
IOCTL_OP(IOMMU_OPTION, iommufd_option, struct iommu_option, val64),
IOCTL_OP(IOMMU_VDEVICE_ALLOC, iommufd_vdevice_alloc_ioctl,
struct iommu_vdevice_alloc, virt_id),
+ IOCTL_OP(IOMMU_VEVENTQ_ALLOC, iommufd_veventq_alloc,
+ struct iommu_veventq_alloc, out_veventq_fd),
IOCTL_OP(IOMMU_VFIO_IOAS, iommufd_vfio_ioas, struct iommu_vfio_ioas,
__reserved),
IOCTL_OP(IOMMU_VIOMMU_ALLOC, iommufd_viommu_alloc_ioctl,
@@ -514,6 +517,10 @@ static const struct iommufd_object_ops iommufd_object_ops[] = {
[IOMMUFD_OBJ_VDEVICE] = {
.destroy = iommufd_vdevice_destroy,
},
+ [IOMMUFD_OBJ_VEVENTQ] = {
+ .destroy = iommufd_veventq_destroy,
+ .abort = iommufd_veventq_abort,
+ },
[IOMMUFD_OBJ_VIOMMU] = {
.destroy = iommufd_viommu_destroy,
},
diff --git a/drivers/iommu/iommufd/selftest.c b/drivers/iommu/iommufd/selftest.c
index d40deb0a4f06..18d9a216eb30 100644
--- a/drivers/iommu/iommufd/selftest.c
+++ b/drivers/iommu/iommufd/selftest.c
@@ -161,9 +161,13 @@ enum selftest_obj_type {
struct mock_dev {
struct device dev;
+ struct mock_viommu *viommu;
+ struct rw_semaphore viommu_rwsem;
unsigned long flags;
+ unsigned long vdev_id;
int id;
u32 cache[MOCK_DEV_CACHE_NUM];
+ atomic_t pasid_1024_fake_error;
};
static inline struct mock_dev *to_mock_dev(struct device *dev)
@@ -193,15 +197,71 @@ static int mock_domain_nop_attach(struct iommu_domain *domain,
struct device *dev)
{
struct mock_dev *mdev = to_mock_dev(dev);
+ struct mock_viommu *new_viommu = NULL;
+ unsigned long vdev_id = 0;
+ int rc;
if (domain->dirty_ops && (mdev->flags & MOCK_FLAGS_DEVICE_NO_DIRTY))
return -EINVAL;
+ iommu_group_mutex_assert(dev);
+ if (domain->type == IOMMU_DOMAIN_NESTED) {
+ new_viommu = to_mock_nested(domain)->mock_viommu;
+ if (new_viommu) {
+ rc = iommufd_viommu_get_vdev_id(&new_viommu->core, dev,
+ &vdev_id);
+ if (rc)
+ return rc;
+ }
+ }
+ if (new_viommu != mdev->viommu) {
+ down_write(&mdev->viommu_rwsem);
+ mdev->viommu = new_viommu;
+ mdev->vdev_id = vdev_id;
+ up_write(&mdev->viommu_rwsem);
+ }
+
+ return 0;
+}
+
+static int mock_domain_set_dev_pasid_nop(struct iommu_domain *domain,
+ struct device *dev, ioasid_t pasid,
+ struct iommu_domain *old)
+{
+ struct mock_dev *mdev = to_mock_dev(dev);
+
+ /*
+ * Per the first attach with pasid 1024, set the
+ * mdev->pasid_1024_fake_error. Hence the second call of this op
+ * can fake an error to validate the error path of the core. This
+ * is helpful to test the case in which the iommu core needs to
+ * rollback to the old domain due to driver failure. e.g. replace.
+ * User should be careful about the third call of this op, it shall
+ * succeed since the mdev->pasid_1024_fake_error is cleared in the
+ * second call.
+ */
+ if (pasid == 1024) {
+ if (domain->type == IOMMU_DOMAIN_BLOCKED) {
+ atomic_set(&mdev->pasid_1024_fake_error, 0);
+ } else if (atomic_read(&mdev->pasid_1024_fake_error)) {
+ /*
+ * Clear the flag, and fake an error to fail the
+ * replacement.
+ */
+ atomic_set(&mdev->pasid_1024_fake_error, 0);
+ return -ENOMEM;
+ } else {
+ /* Set the flag to fake an error in next call */
+ atomic_set(&mdev->pasid_1024_fake_error, 1);
+ }
+ }
+
return 0;
}
static const struct iommu_domain_ops mock_blocking_ops = {
.attach_dev = mock_domain_nop_attach,
+ .set_dev_pasid = mock_domain_set_dev_pasid_nop
};
static struct iommu_domain mock_blocking_domain = {
@@ -343,7 +403,7 @@ mock_domain_alloc_nested(struct device *dev, struct iommu_domain *parent,
struct mock_iommu_domain_nested *mock_nested;
struct mock_iommu_domain *mock_parent;
- if (flags)
+ if (flags & ~IOMMU_HWPT_ALLOC_PASID)
return ERR_PTR(-EOPNOTSUPP);
if (!parent || parent->ops != mock_ops.default_domain_ops)
return ERR_PTR(-EINVAL);
@@ -365,7 +425,8 @@ mock_domain_alloc_paging_flags(struct device *dev, u32 flags,
{
bool has_dirty_flag = flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING;
const u32 PAGING_FLAGS = IOMMU_HWPT_ALLOC_DIRTY_TRACKING |
- IOMMU_HWPT_ALLOC_NEST_PARENT;
+ IOMMU_HWPT_ALLOC_NEST_PARENT |
+ IOMMU_HWPT_ALLOC_PASID;
struct mock_dev *mdev = to_mock_dev(dev);
bool no_dirty_ops = mdev->flags & MOCK_FLAGS_DEVICE_NO_DIRTY;
struct mock_iommu_domain *mock;
@@ -585,7 +646,7 @@ mock_viommu_alloc_domain_nested(struct iommufd_viommu *viommu, u32 flags,
struct mock_viommu *mock_viommu = to_mock_viommu(viommu);
struct mock_iommu_domain_nested *mock_nested;
- if (flags)
+ if (flags & ~IOMMU_HWPT_ALLOC_PASID)
return ERR_PTR(-EOPNOTSUPP);
mock_nested = __mock_domain_alloc_nested(user_data);
@@ -720,6 +781,7 @@ static const struct iommu_ops mock_ops = {
.map_pages = mock_domain_map_pages,
.unmap_pages = mock_domain_unmap_pages,
.iova_to_phys = mock_domain_iova_to_phys,
+ .set_dev_pasid = mock_domain_set_dev_pasid_nop,
},
};
@@ -780,6 +842,7 @@ static struct iommu_domain_ops domain_nested_ops = {
.free = mock_domain_free_nested,
.attach_dev = mock_domain_nop_attach,
.cache_invalidate_user = mock_domain_cache_invalidate_user,
+ .set_dev_pasid = mock_domain_set_dev_pasid_nop,
};
static inline struct iommufd_hw_pagetable *
@@ -839,17 +902,24 @@ static void mock_dev_release(struct device *dev)
static struct mock_dev *mock_dev_create(unsigned long dev_flags)
{
+ struct property_entry prop[] = {
+ PROPERTY_ENTRY_U32("pasid-num-bits", 0),
+ {},
+ };
+ const u32 valid_flags = MOCK_FLAGS_DEVICE_NO_DIRTY |
+ MOCK_FLAGS_DEVICE_HUGE_IOVA |
+ MOCK_FLAGS_DEVICE_PASID;
struct mock_dev *mdev;
int rc, i;
- if (dev_flags &
- ~(MOCK_FLAGS_DEVICE_NO_DIRTY | MOCK_FLAGS_DEVICE_HUGE_IOVA))
+ if (dev_flags & ~valid_flags)
return ERR_PTR(-EINVAL);
mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
if (!mdev)
return ERR_PTR(-ENOMEM);
+ init_rwsem(&mdev->viommu_rwsem);
device_initialize(&mdev->dev);
mdev->flags = dev_flags;
mdev->dev.release = mock_dev_release;
@@ -866,6 +936,15 @@ static struct mock_dev *mock_dev_create(unsigned long dev_flags)
if (rc)
goto err_put;
+ if (dev_flags & MOCK_FLAGS_DEVICE_PASID)
+ prop[0] = PROPERTY_ENTRY_U32("pasid-num-bits", MOCK_PASID_WIDTH);
+
+ rc = device_create_managed_software_node(&mdev->dev, prop, NULL);
+ if (rc) {
+ dev_err(&mdev->dev, "add pasid-num-bits property failed, rc: %d", rc);
+ goto err_put;
+ }
+
rc = device_add(&mdev->dev);
if (rc)
goto err_put;
@@ -921,7 +1000,7 @@ static int iommufd_test_mock_domain(struct iommufd_ucmd *ucmd,
}
sobj->idev.idev = idev;
- rc = iommufd_device_attach(idev, &pt_id);
+ rc = iommufd_device_attach(idev, IOMMU_NO_PASID, &pt_id);
if (rc)
goto out_unbind;
@@ -936,7 +1015,7 @@ static int iommufd_test_mock_domain(struct iommufd_ucmd *ucmd,
return 0;
out_detach:
- iommufd_device_detach(idev);
+ iommufd_device_detach(idev, IOMMU_NO_PASID);
out_unbind:
iommufd_device_unbind(idev);
out_mdev:
@@ -946,39 +1025,49 @@ out_sobj:
return rc;
}
-/* Replace the mock domain with a manually allocated hw_pagetable */
-static int iommufd_test_mock_domain_replace(struct iommufd_ucmd *ucmd,
- unsigned int device_id, u32 pt_id,
- struct iommu_test_cmd *cmd)
+static struct selftest_obj *
+iommufd_test_get_selftest_obj(struct iommufd_ctx *ictx, u32 id)
{
struct iommufd_object *dev_obj;
struct selftest_obj *sobj;
- int rc;
/*
* Prefer to use the OBJ_SELFTEST because the destroy_rwsem will ensure
* it doesn't race with detach, which is not allowed.
*/
- dev_obj =
- iommufd_get_object(ucmd->ictx, device_id, IOMMUFD_OBJ_SELFTEST);
+ dev_obj = iommufd_get_object(ictx, id, IOMMUFD_OBJ_SELFTEST);
if (IS_ERR(dev_obj))
- return PTR_ERR(dev_obj);
+ return ERR_CAST(dev_obj);
sobj = to_selftest_obj(dev_obj);
if (sobj->type != TYPE_IDEV) {
- rc = -EINVAL;
- goto out_dev_obj;
+ iommufd_put_object(ictx, dev_obj);
+ return ERR_PTR(-EINVAL);
}
+ return sobj;
+}
- rc = iommufd_device_replace(sobj->idev.idev, &pt_id);
+/* Replace the mock domain with a manually allocated hw_pagetable */
+static int iommufd_test_mock_domain_replace(struct iommufd_ucmd *ucmd,
+ unsigned int device_id, u32 pt_id,
+ struct iommu_test_cmd *cmd)
+{
+ struct selftest_obj *sobj;
+ int rc;
+
+ sobj = iommufd_test_get_selftest_obj(ucmd->ictx, device_id);
+ if (IS_ERR(sobj))
+ return PTR_ERR(sobj);
+
+ rc = iommufd_device_replace(sobj->idev.idev, IOMMU_NO_PASID, &pt_id);
if (rc)
- goto out_dev_obj;
+ goto out_sobj;
cmd->mock_domain_replace.pt_id = pt_id;
rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
-out_dev_obj:
- iommufd_put_object(ucmd->ictx, dev_obj);
+out_sobj:
+ iommufd_put_object(ucmd->ictx, &sobj->obj);
return rc;
}
@@ -1597,13 +1686,166 @@ static int iommufd_test_trigger_iopf(struct iommufd_ucmd *ucmd,
return 0;
}
+static int iommufd_test_trigger_vevent(struct iommufd_ucmd *ucmd,
+ struct iommu_test_cmd *cmd)
+{
+ struct iommu_viommu_event_selftest test = {};
+ struct iommufd_device *idev;
+ struct mock_dev *mdev;
+ int rc = -ENOENT;
+
+ idev = iommufd_get_device(ucmd, cmd->trigger_vevent.dev_id);
+ if (IS_ERR(idev))
+ return PTR_ERR(idev);
+ mdev = to_mock_dev(idev->dev);
+
+ down_read(&mdev->viommu_rwsem);
+ if (!mdev->viommu || !mdev->vdev_id)
+ goto out_unlock;
+
+ test.virt_id = mdev->vdev_id;
+ rc = iommufd_viommu_report_event(&mdev->viommu->core,
+ IOMMU_VEVENTQ_TYPE_SELFTEST, &test,
+ sizeof(test));
+out_unlock:
+ up_read(&mdev->viommu_rwsem);
+ iommufd_put_object(ucmd->ictx, &idev->obj);
+
+ return rc;
+}
+
+static inline struct iommufd_hw_pagetable *
+iommufd_get_hwpt(struct iommufd_ucmd *ucmd, u32 id)
+{
+ struct iommufd_object *pt_obj;
+
+ pt_obj = iommufd_get_object(ucmd->ictx, id, IOMMUFD_OBJ_ANY);
+ if (IS_ERR(pt_obj))
+ return ERR_CAST(pt_obj);
+
+ if (pt_obj->type != IOMMUFD_OBJ_HWPT_NESTED &&
+ pt_obj->type != IOMMUFD_OBJ_HWPT_PAGING) {
+ iommufd_put_object(ucmd->ictx, pt_obj);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return container_of(pt_obj, struct iommufd_hw_pagetable, obj);
+}
+
+static int iommufd_test_pasid_check_hwpt(struct iommufd_ucmd *ucmd,
+ struct iommu_test_cmd *cmd)
+{
+ u32 hwpt_id = cmd->pasid_check.hwpt_id;
+ struct iommu_domain *attached_domain;
+ struct iommu_attach_handle *handle;
+ struct iommufd_hw_pagetable *hwpt;
+ struct selftest_obj *sobj;
+ struct mock_dev *mdev;
+ int rc = 0;
+
+ sobj = iommufd_test_get_selftest_obj(ucmd->ictx, cmd->id);
+ if (IS_ERR(sobj))
+ return PTR_ERR(sobj);
+
+ mdev = sobj->idev.mock_dev;
+
+ handle = iommu_attach_handle_get(mdev->dev.iommu_group,
+ cmd->pasid_check.pasid, 0);
+ if (IS_ERR(handle))
+ attached_domain = NULL;
+ else
+ attached_domain = handle->domain;
+
+ /* hwpt_id == 0 means to check if pasid is detached */
+ if (!hwpt_id) {
+ if (attached_domain)
+ rc = -EINVAL;
+ goto out_sobj;
+ }
+
+ hwpt = iommufd_get_hwpt(ucmd, hwpt_id);
+ if (IS_ERR(hwpt)) {
+ rc = PTR_ERR(hwpt);
+ goto out_sobj;
+ }
+
+ if (attached_domain != hwpt->domain)
+ rc = -EINVAL;
+
+ iommufd_put_object(ucmd->ictx, &hwpt->obj);
+out_sobj:
+ iommufd_put_object(ucmd->ictx, &sobj->obj);
+ return rc;
+}
+
+static int iommufd_test_pasid_attach(struct iommufd_ucmd *ucmd,
+ struct iommu_test_cmd *cmd)
+{
+ struct selftest_obj *sobj;
+ int rc;
+
+ sobj = iommufd_test_get_selftest_obj(ucmd->ictx, cmd->id);
+ if (IS_ERR(sobj))
+ return PTR_ERR(sobj);
+
+ rc = iommufd_device_attach(sobj->idev.idev, cmd->pasid_attach.pasid,
+ &cmd->pasid_attach.pt_id);
+ if (rc)
+ goto out_sobj;
+
+ rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
+ if (rc)
+ iommufd_device_detach(sobj->idev.idev,
+ cmd->pasid_attach.pasid);
+
+out_sobj:
+ iommufd_put_object(ucmd->ictx, &sobj->obj);
+ return rc;
+}
+
+static int iommufd_test_pasid_replace(struct iommufd_ucmd *ucmd,
+ struct iommu_test_cmd *cmd)
+{
+ struct selftest_obj *sobj;
+ int rc;
+
+ sobj = iommufd_test_get_selftest_obj(ucmd->ictx, cmd->id);
+ if (IS_ERR(sobj))
+ return PTR_ERR(sobj);
+
+ rc = iommufd_device_replace(sobj->idev.idev, cmd->pasid_attach.pasid,
+ &cmd->pasid_attach.pt_id);
+ if (rc)
+ goto out_sobj;
+
+ rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
+
+out_sobj:
+ iommufd_put_object(ucmd->ictx, &sobj->obj);
+ return rc;
+}
+
+static int iommufd_test_pasid_detach(struct iommufd_ucmd *ucmd,
+ struct iommu_test_cmd *cmd)
+{
+ struct selftest_obj *sobj;
+
+ sobj = iommufd_test_get_selftest_obj(ucmd->ictx, cmd->id);
+ if (IS_ERR(sobj))
+ return PTR_ERR(sobj);
+
+ iommufd_device_detach(sobj->idev.idev, cmd->pasid_detach.pasid);
+ iommufd_put_object(ucmd->ictx, &sobj->obj);
+ return 0;
+}
+
void iommufd_selftest_destroy(struct iommufd_object *obj)
{
struct selftest_obj *sobj = to_selftest_obj(obj);
switch (sobj->type) {
case TYPE_IDEV:
- iommufd_device_detach(sobj->idev.idev);
+ iommufd_device_detach(sobj->idev.idev, IOMMU_NO_PASID);
iommufd_device_unbind(sobj->idev.idev);
mock_dev_destroy(sobj->idev.mock_dev);
break;
@@ -1678,6 +1920,16 @@ int iommufd_test(struct iommufd_ucmd *ucmd)
cmd->dirty.flags);
case IOMMU_TEST_OP_TRIGGER_IOPF:
return iommufd_test_trigger_iopf(ucmd, cmd);
+ case IOMMU_TEST_OP_TRIGGER_VEVENT:
+ return iommufd_test_trigger_vevent(ucmd, cmd);
+ case IOMMU_TEST_OP_PASID_ATTACH:
+ return iommufd_test_pasid_attach(ucmd, cmd);
+ case IOMMU_TEST_OP_PASID_REPLACE:
+ return iommufd_test_pasid_replace(ucmd, cmd);
+ case IOMMU_TEST_OP_PASID_DETACH:
+ return iommufd_test_pasid_detach(ucmd, cmd);
+ case IOMMU_TEST_OP_PASID_CHECK_HWPT:
+ return iommufd_test_pasid_check_hwpt(ucmd, cmd);
default:
return -EOPNOTSUPP;
}
@@ -1724,6 +1976,7 @@ int __init iommufd_test_init(void)
init_completion(&mock_iommu.complete);
mock_iommu_iopf_queue = iopf_queue_alloc("mock-iopfq");
+ mock_iommu.iommu_dev.max_pasids = (1 << MOCK_PASID_WIDTH);
return 0;
diff --git a/drivers/iommu/iommufd/viommu.c b/drivers/iommu/iommufd/viommu.c
index 69b88e8c7c26..01df2b985f02 100644
--- a/drivers/iommu/iommufd/viommu.c
+++ b/drivers/iommu/iommufd/viommu.c
@@ -59,6 +59,8 @@ int iommufd_viommu_alloc_ioctl(struct iommufd_ucmd *ucmd)
viommu->ictx = ucmd->ictx;
viommu->hwpt = hwpt_paging;
refcount_inc(&viommu->hwpt->common.obj.users);
+ INIT_LIST_HEAD(&viommu->veventqs);
+ init_rwsem(&viommu->veventqs_rwsem);
/*
* It is the most likely case that a physical IOMMU is unpluggable. A
* pluggable IOMMU instance (if exists) is responsible for refcounting
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index 0b1870a09e1f..06f809e70f15 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -267,6 +267,7 @@ config DM_CRYPT
depends on BLK_DEV_DM
depends on (ENCRYPTED_KEYS || ENCRYPTED_KEYS=n)
depends on (TRUSTED_KEYS || TRUSTED_KEYS=n)
+ select CRC32
select CRYPTO
select CRYPTO_CBC
select CRYPTO_ESSIV
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index aab8240429b0..9c8ed65cd87e 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -2234,7 +2234,7 @@ int dm_bufio_issue_discard(struct dm_bufio_client *c, sector_t block, sector_t c
}
EXPORT_SYMBOL_GPL(dm_bufio_issue_discard);
-static bool forget_buffer(struct dm_bufio_client *c, sector_t block)
+static void forget_buffer(struct dm_bufio_client *c, sector_t block)
{
struct dm_buffer *b;
@@ -2249,8 +2249,6 @@ static bool forget_buffer(struct dm_bufio_client *c, sector_t block)
cache_put_and_wake(c, b);
}
}
-
- return b ? true : false;
}
/*
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 9cb797a561d6..a10d75a562db 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -406,6 +406,12 @@ struct cache {
mempool_t migration_pool;
struct bio_set bs;
+
+ /*
+ * Cache_size entries. Set bits indicate blocks mapped beyond the
+ * target length, which are marked for invalidation.
+ */
+ unsigned long *invalid_bitset;
};
struct per_bio_data {
@@ -1922,6 +1928,9 @@ static void __destroy(struct cache *cache)
if (cache->discard_bitset)
free_bitset(cache->discard_bitset);
+ if (cache->invalid_bitset)
+ free_bitset(cache->invalid_bitset);
+
if (cache->copier)
dm_kcopyd_client_destroy(cache->copier);
@@ -2510,6 +2519,13 @@ static int cache_create(struct cache_args *ca, struct cache **result)
}
clear_bitset(cache->discard_bitset, from_dblock(cache->discard_nr_blocks));
+ cache->invalid_bitset = alloc_bitset(from_cblock(cache->cache_size));
+ if (!cache->invalid_bitset) {
+ *error = "could not allocate bitset for invalid blocks";
+ goto bad;
+ }
+ clear_bitset(cache->invalid_bitset, from_cblock(cache->cache_size));
+
cache->copier = dm_kcopyd_client_create(&dm_kcopyd_throttle);
if (IS_ERR(cache->copier)) {
*error = "could not create kcopyd client";
@@ -2808,6 +2824,24 @@ static int load_mapping(void *context, dm_oblock_t oblock, dm_cblock_t cblock,
return policy_load_mapping(cache->policy, oblock, cblock, dirty, hint, hint_valid);
}
+static int load_filtered_mapping(void *context, dm_oblock_t oblock, dm_cblock_t cblock,
+ bool dirty, uint32_t hint, bool hint_valid)
+{
+ struct cache *cache = context;
+
+ if (from_oblock(oblock) >= from_oblock(cache->origin_blocks)) {
+ if (dirty) {
+ DMERR("%s: unable to shrink origin; cache block %u is dirty",
+ cache_device_name(cache), from_cblock(cblock));
+ return -EFBIG;
+ }
+ set_bit(from_cblock(cblock), cache->invalid_bitset);
+ return 0;
+ }
+
+ return load_mapping(context, oblock, cblock, dirty, hint, hint_valid);
+}
+
/*
* The discard block size in the on disk metadata is not
* necessarily the same as we're currently using. So we have to
@@ -2899,6 +2933,27 @@ static dm_cblock_t get_cache_dev_size(struct cache *cache)
return to_cblock(size);
}
+static bool can_resume(struct cache *cache)
+{
+ /*
+ * Disallow retrying the resume operation for devices that failed the
+ * first resume attempt, as the failure leaves the policy object partially
+ * initialized. Retrying could trigger BUG_ON when loading cache mappings
+ * into the incomplete policy object.
+ */
+ if (cache->sized && !cache->loaded_mappings) {
+ if (get_cache_mode(cache) != CM_WRITE)
+ DMERR("%s: unable to resume a failed-loaded cache, please check metadata.",
+ cache_device_name(cache));
+ else
+ DMERR("%s: unable to resume cache due to missing proper cache table reload",
+ cache_device_name(cache));
+ return false;
+ }
+
+ return true;
+}
+
static bool can_resize(struct cache *cache, dm_cblock_t new_size)
{
if (from_cblock(new_size) > from_cblock(cache->cache_size)) {
@@ -2941,12 +2996,33 @@ static int resize_cache_dev(struct cache *cache, dm_cblock_t new_size)
return 0;
}
+static int truncate_oblocks(struct cache *cache)
+{
+ uint32_t nr_blocks = from_cblock(cache->cache_size);
+ uint32_t i;
+ int r;
+
+ for_each_set_bit(i, cache->invalid_bitset, nr_blocks) {
+ r = dm_cache_remove_mapping(cache->cmd, to_cblock(i));
+ if (r) {
+ DMERR_LIMIT("%s: invalidation failed; couldn't update on disk metadata",
+ cache_device_name(cache));
+ return r;
+ }
+ }
+
+ return 0;
+}
+
static int cache_preresume(struct dm_target *ti)
{
int r = 0;
struct cache *cache = ti->private;
dm_cblock_t csize = get_cache_dev_size(cache);
+ if (!can_resume(cache))
+ return -EINVAL;
+
/*
* Check to see if the cache has resized.
*/
@@ -2962,11 +3038,25 @@ static int cache_preresume(struct dm_target *ti)
}
if (!cache->loaded_mappings) {
+ /*
+ * The fast device could have been resized since the last
+ * failed preresume attempt. To be safe we start by a blank
+ * bitset for cache blocks.
+ */
+ clear_bitset(cache->invalid_bitset, from_cblock(cache->cache_size));
+
r = dm_cache_load_mappings(cache->cmd, cache->policy,
- load_mapping, cache);
+ load_filtered_mapping, cache);
if (r) {
DMERR("%s: could not load cache mappings", cache_device_name(cache));
- metadata_operation_failed(cache, "dm_cache_load_mappings", r);
+ if (r != -EFBIG)
+ metadata_operation_failed(cache, "dm_cache_load_mappings", r);
+ return r;
+ }
+
+ r = truncate_oblocks(cache);
+ if (r) {
+ metadata_operation_failed(cache, "dm_cache_remove_mapping", r);
return r;
}
@@ -3426,7 +3516,7 @@ static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits)
static struct target_type cache_target = {
.name = "cache",
- .version = {2, 2, 0},
+ .version = {2, 3, 0},
.module = THIS_MODULE,
.ctr = cache_ctr,
.dtr = cache_dtr,
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 02a2919f4e5a..9dfdb63220d7 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -17,6 +17,7 @@
#include <linux/bio.h>
#include <linux/blkdev.h>
#include <linux/blk-integrity.h>
+#include <linux/crc32.h>
#include <linux/mempool.h>
#include <linux/slab.h>
#include <linux/crypto.h>
@@ -125,7 +126,6 @@ struct iv_lmk_private {
#define TCW_WHITENING_SIZE 16
struct iv_tcw_private {
- struct crypto_shash *crc32_tfm;
u8 *iv_seed;
u8 *whitening;
};
@@ -607,10 +607,6 @@ static void crypt_iv_tcw_dtr(struct crypt_config *cc)
tcw->iv_seed = NULL;
kfree_sensitive(tcw->whitening);
tcw->whitening = NULL;
-
- if (tcw->crc32_tfm && !IS_ERR(tcw->crc32_tfm))
- crypto_free_shash(tcw->crc32_tfm);
- tcw->crc32_tfm = NULL;
}
static int crypt_iv_tcw_ctr(struct crypt_config *cc, struct dm_target *ti,
@@ -628,13 +624,6 @@ static int crypt_iv_tcw_ctr(struct crypt_config *cc, struct dm_target *ti,
return -EINVAL;
}
- tcw->crc32_tfm = crypto_alloc_shash("crc32", 0,
- CRYPTO_ALG_ALLOCATES_MEMORY);
- if (IS_ERR(tcw->crc32_tfm)) {
- ti->error = "Error initializing CRC32 in TCW";
- return PTR_ERR(tcw->crc32_tfm);
- }
-
tcw->iv_seed = kzalloc(cc->iv_size, GFP_KERNEL);
tcw->whitening = kzalloc(TCW_WHITENING_SIZE, GFP_KERNEL);
if (!tcw->iv_seed || !tcw->whitening) {
@@ -668,36 +657,28 @@ static int crypt_iv_tcw_wipe(struct crypt_config *cc)
return 0;
}
-static int crypt_iv_tcw_whitening(struct crypt_config *cc,
- struct dm_crypt_request *dmreq,
- u8 *data)
+static void crypt_iv_tcw_whitening(struct crypt_config *cc,
+ struct dm_crypt_request *dmreq, u8 *data)
{
struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
__le64 sector = cpu_to_le64(dmreq->iv_sector);
u8 buf[TCW_WHITENING_SIZE];
- SHASH_DESC_ON_STACK(desc, tcw->crc32_tfm);
- int i, r;
+ int i;
/* xor whitening with sector number */
crypto_xor_cpy(buf, tcw->whitening, (u8 *)&sector, 8);
crypto_xor_cpy(&buf[8], tcw->whitening + 8, (u8 *)&sector, 8);
/* calculate crc32 for every 32bit part and xor it */
- desc->tfm = tcw->crc32_tfm;
- for (i = 0; i < 4; i++) {
- r = crypto_shash_digest(desc, &buf[i * 4], 4, &buf[i * 4]);
- if (r)
- goto out;
- }
+ for (i = 0; i < 4; i++)
+ put_unaligned_le32(crc32(0, &buf[i * 4], 4), &buf[i * 4]);
crypto_xor(&buf[0], &buf[12], 4);
crypto_xor(&buf[4], &buf[8], 4);
/* apply whitening (8 bytes) to whole sector */
for (i = 0; i < ((1 << SECTOR_SHIFT) / 8); i++)
crypto_xor(data + i * 8, buf, 8);
-out:
memzero_explicit(buf, sizeof(buf));
- return r;
}
static int crypt_iv_tcw_gen(struct crypt_config *cc, u8 *iv,
@@ -707,13 +688,12 @@ static int crypt_iv_tcw_gen(struct crypt_config *cc, u8 *iv,
struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
__le64 sector = cpu_to_le64(dmreq->iv_sector);
u8 *src;
- int r = 0;
/* Remove whitening from ciphertext */
if (bio_data_dir(dmreq->ctx->bio_in) != WRITE) {
sg = crypt_get_sg_data(cc, dmreq->sg_in);
src = kmap_local_page(sg_page(sg));
- r = crypt_iv_tcw_whitening(cc, dmreq, src + sg->offset);
+ crypt_iv_tcw_whitening(cc, dmreq, src + sg->offset);
kunmap_local(src);
}
@@ -723,7 +703,7 @@ static int crypt_iv_tcw_gen(struct crypt_config *cc, u8 *iv,
crypto_xor_cpy(&iv[8], tcw->iv_seed + 8, (u8 *)&sector,
cc->iv_size - 8);
- return r;
+ return 0;
}
static int crypt_iv_tcw_post(struct crypt_config *cc, u8 *iv,
@@ -731,7 +711,6 @@ static int crypt_iv_tcw_post(struct crypt_config *cc, u8 *iv,
{
struct scatterlist *sg;
u8 *dst;
- int r;
if (bio_data_dir(dmreq->ctx->bio_in) != WRITE)
return 0;
@@ -739,10 +718,10 @@ static int crypt_iv_tcw_post(struct crypt_config *cc, u8 *iv,
/* Apply whitening on ciphertext */
sg = crypt_get_sg_data(cc, dmreq->sg_out);
dst = kmap_local_page(sg_page(sg));
- r = crypt_iv_tcw_whitening(cc, dmreq, dst + sg->offset);
+ crypt_iv_tcw_whitening(cc, dmreq, dst + sg->offset);
kunmap_local(dst);
- return r;
+ return 0;
}
static int crypt_iv_random_gen(struct crypt_config *cc, u8 *iv,
diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c
index 08f6387620c1..d4cf0ac2a7aa 100644
--- a/drivers/md/dm-delay.c
+++ b/drivers/md/dm-delay.c
@@ -369,6 +369,21 @@ static int delay_map(struct dm_target *ti, struct bio *bio)
return delay_bio(dc, c, bio);
}
+#ifdef CONFIG_BLK_DEV_ZONED
+static int delay_report_zones(struct dm_target *ti,
+ struct dm_report_zones_args *args, unsigned int nr_zones)
+{
+ struct delay_c *dc = ti->private;
+ struct delay_class *c = &dc->read;
+
+ return dm_report_zones(c->dev->bdev, c->start,
+ c->start + dm_target_offset(ti, args->next_sector),
+ args, nr_zones);
+}
+#else
+#define delay_report_zones NULL
+#endif
+
#define DMEMIT_DELAY_CLASS(c) \
DMEMIT("%s %llu %u", (c)->dev->name, (unsigned long long)(c)->start, (c)->delay)
@@ -424,11 +439,12 @@ out:
static struct target_type delay_target = {
.name = "delay",
.version = {1, 4, 0},
- .features = DM_TARGET_PASSES_INTEGRITY,
+ .features = DM_TARGET_PASSES_INTEGRITY | DM_TARGET_ZONED_HM,
.module = THIS_MODULE,
.ctr = delay_ctr,
.dtr = delay_dtr,
.map = delay_map,
+ .report_zones = delay_report_zones,
.presuspend = delay_presuspend,
.resume = delay_resume,
.status = delay_status,
diff --git a/drivers/md/dm-ebs-target.c b/drivers/md/dm-ebs-target.c
index 18ae45dcbfb2..b19b0142a690 100644
--- a/drivers/md/dm-ebs-target.c
+++ b/drivers/md/dm-ebs-target.c
@@ -390,6 +390,12 @@ static int ebs_map(struct dm_target *ti, struct bio *bio)
return DM_MAPIO_REMAPPED;
}
+static void ebs_postsuspend(struct dm_target *ti)
+{
+ struct ebs_c *ec = ti->private;
+ dm_bufio_client_reset(ec->bufio);
+}
+
static void ebs_status(struct dm_target *ti, status_type_t type,
unsigned int status_flags, char *result, unsigned int maxlen)
{
@@ -447,6 +453,7 @@ static struct target_type ebs_target = {
.ctr = ebs_ctr,
.dtr = ebs_dtr,
.map = ebs_map,
+ .postsuspend = ebs_postsuspend,
.status = ebs_status,
.io_hints = ebs_io_hints,
.prepare_ioctl = ebs_prepare_ioctl,
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index c8c1a00e7d80..8b219b1199b4 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -21,6 +21,7 @@
#include <linux/reboot.h>
#include <crypto/hash.h>
#include <crypto/skcipher.h>
+#include <crypto/utils.h>
#include <linux/async_tx.h>
#include <linux/dm-bufio.h>
@@ -516,7 +517,7 @@ static int sb_mac(struct dm_integrity_c *ic, bool wr)
dm_integrity_io_error(ic, "crypto_shash_digest", r);
return r;
}
- if (memcmp(mac, actual_mac, mac_size)) {
+ if (crypto_memneq(mac, actual_mac, mac_size)) {
dm_integrity_io_error(ic, "superblock mac", -EILSEQ);
dm_audit_log_target(DM_MSG_PREFIX, "mac-superblock", ic->ti, 0);
return -EILSEQ;
@@ -859,7 +860,7 @@ static void rw_section_mac(struct dm_integrity_c *ic, unsigned int section, bool
if (likely(wr))
memcpy(&js->mac, result + (j * JOURNAL_MAC_PER_SECTOR), JOURNAL_MAC_PER_SECTOR);
else {
- if (memcmp(&js->mac, result + (j * JOURNAL_MAC_PER_SECTOR), JOURNAL_MAC_PER_SECTOR)) {
+ if (crypto_memneq(&js->mac, result + (j * JOURNAL_MAC_PER_SECTOR), JOURNAL_MAC_PER_SECTOR)) {
dm_integrity_io_error(ic, "journal mac", -EILSEQ);
dm_audit_log_target(DM_MSG_PREFIX, "mac-journal", ic->ti, 0);
}
@@ -1401,10 +1402,9 @@ static bool find_newer_committed_node(struct dm_integrity_c *ic, struct journal_
static int dm_integrity_rw_tag(struct dm_integrity_c *ic, unsigned char *tag, sector_t *metadata_block,
unsigned int *metadata_offset, unsigned int total_size, int op)
{
-#define MAY_BE_FILLER 1
-#define MAY_BE_HASH 2
unsigned int hash_offset = 0;
- unsigned int may_be = MAY_BE_HASH | (ic->discard ? MAY_BE_FILLER : 0);
+ unsigned char mismatch_hash = 0;
+ unsigned char mismatch_filler = !ic->discard;
do {
unsigned char *data, *dp;
@@ -1425,7 +1425,7 @@ static int dm_integrity_rw_tag(struct dm_integrity_c *ic, unsigned char *tag, se
if (op == TAG_READ) {
memcpy(tag, dp, to_copy);
} else if (op == TAG_WRITE) {
- if (memcmp(dp, tag, to_copy)) {
+ if (crypto_memneq(dp, tag, to_copy)) {
memcpy(dp, tag, to_copy);
dm_bufio_mark_partial_buffer_dirty(b, *metadata_offset, *metadata_offset + to_copy);
}
@@ -1433,29 +1433,30 @@ static int dm_integrity_rw_tag(struct dm_integrity_c *ic, unsigned char *tag, se
/* e.g.: op == TAG_CMP */
if (likely(is_power_of_2(ic->tag_size))) {
- if (unlikely(memcmp(dp, tag, to_copy)))
- if (unlikely(!ic->discard) ||
- unlikely(memchr_inv(dp, DISCARD_FILLER, to_copy) != NULL)) {
- goto thorough_test;
- }
+ if (unlikely(crypto_memneq(dp, tag, to_copy)))
+ goto thorough_test;
} else {
unsigned int i, ts;
thorough_test:
ts = total_size;
for (i = 0; i < to_copy; i++, ts--) {
- if (unlikely(dp[i] != tag[i]))
- may_be &= ~MAY_BE_HASH;
- if (likely(dp[i] != DISCARD_FILLER))
- may_be &= ~MAY_BE_FILLER;
+ /*
+ * Warning: the control flow must not be
+ * dependent on match/mismatch of
+ * individual bytes.
+ */
+ mismatch_hash |= dp[i] ^ tag[i];
+ mismatch_filler |= dp[i] ^ DISCARD_FILLER;
hash_offset++;
if (unlikely(hash_offset == ic->tag_size)) {
- if (unlikely(!may_be)) {
+ if (unlikely(mismatch_hash) && unlikely(mismatch_filler)) {
dm_bufio_release(b);
return ts;
}
hash_offset = 0;
- may_be = MAY_BE_HASH | (ic->discard ? MAY_BE_FILLER : 0);
+ mismatch_hash = 0;
+ mismatch_filler = !ic->discard;
}
}
}
@@ -1476,8 +1477,6 @@ thorough_test:
} while (unlikely(total_size));
return 0;
-#undef MAY_BE_FILLER
-#undef MAY_BE_HASH
}
struct flush_request {
@@ -2076,7 +2075,7 @@ retry_kmap:
char checksums_onstack[MAX_T(size_t, HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)];
integrity_sector_checksum(ic, logical_sector, mem + bv.bv_offset, checksums_onstack);
- if (unlikely(memcmp(checksums_onstack, journal_entry_tag(ic, je), ic->tag_size))) {
+ if (unlikely(crypto_memneq(checksums_onstack, journal_entry_tag(ic, je), ic->tag_size))) {
DMERR_LIMIT("Checksum failed when reading from journal, at sector 0x%llx",
logical_sector);
dm_audit_log_bio(DM_MSG_PREFIX, "journal-checksum",
@@ -2595,7 +2594,7 @@ static void dm_integrity_inline_recheck(struct work_struct *w)
bio_put(outgoing_bio);
integrity_sector_checksum(ic, dio->bio_details.bi_iter.bi_sector, outgoing_data, digest);
- if (unlikely(memcmp(digest, dio->integrity_payload, min(crypto_shash_digestsize(ic->internal_hash), ic->tag_size)))) {
+ if (unlikely(crypto_memneq(digest, dio->integrity_payload, min(crypto_shash_digestsize(ic->internal_hash), ic->tag_size)))) {
DMERR_LIMIT("%pg: Checksum failed at sector 0x%llx",
ic->dev->bdev, dio->bio_details.bi_iter.bi_sector);
atomic64_inc(&ic->number_of_mismatches);
@@ -2634,7 +2633,7 @@ static int dm_integrity_end_io(struct dm_target *ti, struct bio *bio, blk_status
char *mem = bvec_kmap_local(&bv);
//memset(mem, 0xff, ic->sectors_per_block << SECTOR_SHIFT);
integrity_sector_checksum(ic, dio->bio_details.bi_iter.bi_sector, mem, digest);
- if (unlikely(memcmp(digest, dio->integrity_payload + pos,
+ if (unlikely(crypto_memneq(digest, dio->integrity_payload + pos,
min(crypto_shash_digestsize(ic->internal_hash), ic->tag_size)))) {
kunmap_local(mem);
dm_integrity_free_payload(dio);
@@ -2911,7 +2910,7 @@ static void do_journal_write(struct dm_integrity_c *ic, unsigned int write_start
integrity_sector_checksum(ic, sec + ((l - j) << ic->sb->log2_sectors_per_block),
(char *)access_journal_data(ic, i, l), test_tag);
- if (unlikely(memcmp(test_tag, journal_entry_tag(ic, je2), ic->tag_size))) {
+ if (unlikely(crypto_memneq(test_tag, journal_entry_tag(ic, je2), ic->tag_size))) {
dm_integrity_io_error(ic, "tag mismatch when replaying journal", -EILSEQ);
dm_audit_log_target(DM_MSG_PREFIX, "integrity-replay-journal", ic->ti, 0);
}
@@ -5072,16 +5071,19 @@ try_smaller_buffer:
ic->recalc_bitmap = dm_integrity_alloc_page_list(n_bitmap_pages);
if (!ic->recalc_bitmap) {
+ ti->error = "Could not allocate memory for bitmap";
r = -ENOMEM;
goto bad;
}
ic->may_write_bitmap = dm_integrity_alloc_page_list(n_bitmap_pages);
if (!ic->may_write_bitmap) {
+ ti->error = "Could not allocate memory for bitmap";
r = -ENOMEM;
goto bad;
}
ic->bbs = kvmalloc_array(ic->n_bitmap_blocks, sizeof(struct bitmap_block_status), GFP_KERNEL);
if (!ic->bbs) {
+ ti->error = "Could not allocate memory for bitmap";
r = -ENOMEM;
goto bad;
}
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
index 3786ac67cefe..a1b7535c508a 100644
--- a/drivers/md/dm-stripe.c
+++ b/drivers/md/dm-stripe.c
@@ -467,7 +467,7 @@ static struct target_type stripe_target = {
.name = "striped",
.version = {1, 7, 0},
.features = DM_TARGET_PASSES_INTEGRITY | DM_TARGET_NOWAIT |
- DM_TARGET_ATOMIC_WRITES,
+ DM_TARGET_ATOMIC_WRITES | DM_TARGET_PASSES_CRYPTO,
.module = THIS_MODULE,
.ctr = stripe_ctr,
.dtr = stripe_dtr,
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 453803f1edf5..35100a435c88 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -697,6 +697,10 @@ int dm_table_add_target(struct dm_table *t, const char *type,
DMERR("%s: zero-length target", dm_device_name(t->md));
return -EINVAL;
}
+ if (start + len < start || start + len > LLONG_MAX >> SECTOR_SHIFT) {
+ DMERR("%s: too large device", dm_device_name(t->md));
+ return -EINVAL;
+ }
ti->type = dm_get_target_type(type);
if (!ti->type) {
diff --git a/drivers/md/dm-vdo/block-map.c b/drivers/md/dm-vdo/block-map.c
index 89cb7942ec5c..baf683cabb1b 100644
--- a/drivers/md/dm-vdo/block-map.c
+++ b/drivers/md/dm-vdo/block-map.c
@@ -451,7 +451,7 @@ static struct page_info * __must_check find_page(struct vdo_page_cache *cache,
* select_lru_page() - Determine which page is least recently used.
*
* Picks the least recently used from among the non-busy entries at the front of each of the lru
- * ring. Since whenever we mark a page busy we also put it to the end of the ring it is unlikely
+ * list. Since whenever we mark a page busy we also put it to the end of the list it is unlikely
* that the entries at the front are busy unless the queue is very short, but not impossible.
*
* Return: A pointer to the info structure for a relevant page, or NULL if no such page can be
@@ -1544,7 +1544,7 @@ static void write_page_if_not_dirtied(struct vdo_waiter *waiter, void *context)
static void return_to_pool(struct block_map_zone *zone, struct pooled_vio *vio)
{
- return_vio_to_pool(zone->vio_pool, vio);
+ return_vio_to_pool(vio);
check_for_drain_complete(zone);
}
@@ -1837,7 +1837,7 @@ static void finish_block_map_page_load(struct vdo_completion *completion)
if (!vdo_copy_valid_page(vio->data, nonce, pbn, page))
vdo_format_block_map_page(page, nonce, pbn, false);
- return_vio_to_pool(zone->vio_pool, pooled);
+ return_vio_to_pool(pooled);
/* Release our claim to the load and wake any waiters */
release_page_lock(data_vio, "load");
@@ -1851,10 +1851,9 @@ static void handle_io_error(struct vdo_completion *completion)
struct vio *vio = as_vio(completion);
struct pooled_vio *pooled = container_of(vio, struct pooled_vio, vio);
struct data_vio *data_vio = completion->parent;
- struct block_map_zone *zone = pooled->context;
vio_record_metadata_io_error(vio);
- return_vio_to_pool(zone->vio_pool, pooled);
+ return_vio_to_pool(pooled);
abort_load(data_vio, result);
}
@@ -2499,7 +2498,7 @@ static void finish_cursor(struct cursor *cursor)
struct cursors *cursors = cursor->parent;
struct vdo_completion *completion = cursors->completion;
- return_vio_to_pool(cursors->pool, vdo_forget(cursor->vio));
+ return_vio_to_pool(vdo_forget(cursor->vio));
if (--cursors->active_roots > 0)
return;
@@ -2746,7 +2745,7 @@ static int __must_check initialize_block_map_zone(struct block_map *map,
if (result != VDO_SUCCESS)
return result;
- result = make_vio_pool(vdo, BLOCK_MAP_VIO_POOL_SIZE,
+ result = make_vio_pool(vdo, BLOCK_MAP_VIO_POOL_SIZE, 1,
zone->thread_id, VIO_TYPE_BLOCK_MAP_INTERIOR,
VIO_PRIORITY_METADATA, zone, &zone->vio_pool);
if (result != VDO_SUCCESS)
diff --git a/drivers/md/dm-vdo/constants.h b/drivers/md/dm-vdo/constants.h
index a8c4d6e24b38..2a8b03779f87 100644
--- a/drivers/md/dm-vdo/constants.h
+++ b/drivers/md/dm-vdo/constants.h
@@ -44,9 +44,6 @@ enum {
/* The default size of each slab journal, in blocks */
DEFAULT_VDO_SLAB_JOURNAL_SIZE = 224,
- /* Unit test minimum */
- MINIMUM_VDO_SLAB_JOURNAL_BLOCKS = 2,
-
/*
* The initial size of lbn_operations and pbn_operations, which is based upon the expected
* maximum number of outstanding VIOs. This value was chosen to make it highly unlikely
diff --git a/drivers/md/dm-vdo/dedupe.c b/drivers/md/dm-vdo/dedupe.c
index 3f3d29af1be4..5c49d49e023c 100644
--- a/drivers/md/dm-vdo/dedupe.c
+++ b/drivers/md/dm-vdo/dedupe.c
@@ -226,7 +226,7 @@ struct hash_lock {
* A list containing the data VIOs sharing this lock, all having the same record name and
* data block contents, linked by their hash_lock_node fields.
*/
- struct list_head duplicate_ring;
+ struct list_head duplicate_vios;
/* The number of data_vios sharing this lock instance */
data_vio_count_t reference_count;
@@ -343,7 +343,7 @@ static void return_hash_lock_to_pool(struct hash_zone *zone, struct hash_lock *l
{
memset(lock, 0, sizeof(*lock));
INIT_LIST_HEAD(&lock->pool_node);
- INIT_LIST_HEAD(&lock->duplicate_ring);
+ INIT_LIST_HEAD(&lock->duplicate_vios);
vdo_waitq_init(&lock->waiters);
list_add_tail(&lock->pool_node, &zone->lock_pool);
}
@@ -441,7 +441,7 @@ static void set_hash_lock(struct data_vio *data_vio, struct hash_lock *new_lock)
VDO_ASSERT_LOG_ONLY(data_vio->hash_zone != NULL,
"must have a hash zone when holding a hash lock");
VDO_ASSERT_LOG_ONLY(!list_empty(&data_vio->hash_lock_entry),
- "must be on a hash lock ring when holding a hash lock");
+ "must be on a hash lock list when holding a hash lock");
VDO_ASSERT_LOG_ONLY(old_lock->reference_count > 0,
"hash lock reference must be counted");
@@ -464,10 +464,10 @@ static void set_hash_lock(struct data_vio *data_vio, struct hash_lock *new_lock)
if (new_lock != NULL) {
/*
- * Keep all data_vios sharing the lock on a ring since they can complete in any
+ * Keep all data_vios sharing the lock on a list since they can complete in any
* order and we'll always need a pointer to one to compare data.
*/
- list_move_tail(&data_vio->hash_lock_entry, &new_lock->duplicate_ring);
+ list_move_tail(&data_vio->hash_lock_entry, &new_lock->duplicate_vios);
new_lock->reference_count += 1;
if (new_lock->max_references < new_lock->reference_count)
new_lock->max_references = new_lock->reference_count;
@@ -1789,10 +1789,10 @@ static bool is_hash_collision(struct hash_lock *lock, struct data_vio *candidate
struct hash_zone *zone;
bool collides;
- if (list_empty(&lock->duplicate_ring))
+ if (list_empty(&lock->duplicate_vios))
return false;
- lock_holder = list_first_entry(&lock->duplicate_ring, struct data_vio,
+ lock_holder = list_first_entry(&lock->duplicate_vios, struct data_vio,
hash_lock_entry);
zone = candidate->hash_zone;
collides = !blocks_equal(lock_holder->vio.data, candidate->vio.data);
@@ -1815,7 +1815,7 @@ static inline int assert_hash_lock_preconditions(const struct data_vio *data_vio
return result;
result = VDO_ASSERT(list_empty(&data_vio->hash_lock_entry),
- "must not already be a member of a hash lock ring");
+ "must not already be a member of a hash lock list");
if (result != VDO_SUCCESS)
return result;
@@ -1942,8 +1942,8 @@ void vdo_release_hash_lock(struct data_vio *data_vio)
"returned hash lock must not be in use with state %s",
get_hash_lock_state_name(lock->state));
VDO_ASSERT_LOG_ONLY(list_empty(&lock->pool_node),
- "hash lock returned to zone must not be in a pool ring");
- VDO_ASSERT_LOG_ONLY(list_empty(&lock->duplicate_ring),
+ "hash lock returned to zone must not be in a pool list");
+ VDO_ASSERT_LOG_ONLY(list_empty(&lock->duplicate_vios),
"hash lock returned to zone must not reference DataVIOs");
return_hash_lock_to_pool(zone, lock);
diff --git a/drivers/md/dm-vdo/encodings.c b/drivers/md/dm-vdo/encodings.c
index 100e92f8f866..b7cc0f41caca 100644
--- a/drivers/md/dm-vdo/encodings.c
+++ b/drivers/md/dm-vdo/encodings.c
@@ -711,24 +711,11 @@ int vdo_configure_slab(block_count_t slab_size, block_count_t slab_journal_block
ref_blocks = vdo_get_saved_reference_count_size(slab_size - slab_journal_blocks);
meta_blocks = (ref_blocks + slab_journal_blocks);
- /* Make sure test code hasn't configured slabs to be too small. */
+ /* Make sure configured slabs are not too small. */
if (meta_blocks >= slab_size)
return VDO_BAD_CONFIGURATION;
- /*
- * If the slab size is very small, assume this must be a unit test and override the number
- * of data blocks to be a power of two (wasting blocks in the slab). Many tests need their
- * data_blocks fields to be the exact capacity of the configured volume, and that used to
- * fall out since they use a power of two for the number of data blocks, the slab size was
- * a power of two, and every block in a slab was a data block.
- *
- * TODO: Try to figure out some way of structuring testParameters and unit tests so this
- * hack isn't needed without having to edit several unit tests every time the metadata size
- * changes by one block.
- */
data_blocks = slab_size - meta_blocks;
- if ((slab_size < 1024) && !is_power_of_2(data_blocks))
- data_blocks = ((block_count_t) 1 << ilog2(data_blocks));
/*
* Configure the slab journal thresholds. The flush threshold is 168 of 224 blocks in
@@ -1221,11 +1208,6 @@ int vdo_validate_config(const struct vdo_config *config,
if (result != VDO_SUCCESS)
return result;
- result = VDO_ASSERT(config->slab_journal_blocks >= MINIMUM_VDO_SLAB_JOURNAL_BLOCKS,
- "slab journal size meets minimum size");
- if (result != VDO_SUCCESS)
- return result;
-
result = VDO_ASSERT(config->slab_journal_blocks <= config->slab_size,
"slab journal size is within expected bound");
if (result != VDO_SUCCESS)
diff --git a/drivers/md/dm-vdo/indexer/index-layout.c b/drivers/md/dm-vdo/indexer/index-layout.c
index af8fab83b0f3..61edf2b72427 100644
--- a/drivers/md/dm-vdo/indexer/index-layout.c
+++ b/drivers/md/dm-vdo/indexer/index-layout.c
@@ -54,7 +54,6 @@
* Each save also has a unique nonce.
*/
-#define MAGIC_SIZE 32
#define NONCE_INFO_SIZE 32
#define MAX_SAVES 2
@@ -98,9 +97,11 @@ enum region_type {
#define SUPER_VERSION_CURRENT 3
#define SUPER_VERSION_MAXIMUM 7
-static const u8 LAYOUT_MAGIC[MAGIC_SIZE] = "*ALBIREO*SINGLE*FILE*LAYOUT*001*";
+static const u8 LAYOUT_MAGIC[] = "*ALBIREO*SINGLE*FILE*LAYOUT*001*";
static const u64 REGION_MAGIC = 0x416c6252676e3031; /* 'AlbRgn01' */
+#define MAGIC_SIZE (sizeof(LAYOUT_MAGIC) - 1)
+
struct region_header {
u64 magic;
u64 region_blocks;
diff --git a/drivers/md/dm-vdo/indexer/index-session.c b/drivers/md/dm-vdo/indexer/index-session.c
index aee0914d604a..aa575a24e0b2 100644
--- a/drivers/md/dm-vdo/indexer/index-session.c
+++ b/drivers/md/dm-vdo/indexer/index-session.c
@@ -100,7 +100,6 @@ static int get_index_session(struct uds_index_session *index_session)
int uds_launch_request(struct uds_request *request)
{
- size_t internal_size;
int result;
if (request->callback == NULL) {
@@ -121,10 +120,7 @@ int uds_launch_request(struct uds_request *request)
}
/* Reset all internal fields before processing. */
- internal_size =
- sizeof(struct uds_request) - offsetof(struct uds_request, zone_number);
- // FIXME should be using struct_group for this instead
- memset((char *) request + sizeof(*request) - internal_size, 0, internal_size);
+ memset(&request->internal, 0, sizeof(request->internal));
result = get_index_session(request->session);
if (result != UDS_SUCCESS)
diff --git a/drivers/md/dm-vdo/indexer/indexer.h b/drivers/md/dm-vdo/indexer/indexer.h
index 183a94eb7e92..7c1fc4577f5b 100644
--- a/drivers/md/dm-vdo/indexer/indexer.h
+++ b/drivers/md/dm-vdo/indexer/indexer.h
@@ -8,6 +8,7 @@
#include <linux/mutex.h>
#include <linux/sched.h>
+#include <linux/stddef.h>
#include <linux/types.h>
#include <linux/wait.h>
@@ -73,7 +74,7 @@ enum uds_request_type {
/* Remove any mapping for a name. */
UDS_DELETE,
-};
+} __packed;
enum uds_open_index_type {
/* Create a new index. */
@@ -226,7 +227,7 @@ struct uds_zone_message {
enum uds_zone_message_type type;
/* The virtual chapter number to which the message applies */
u64 virtual_chapter;
-};
+} __packed;
struct uds_index_session;
struct uds_index;
@@ -253,34 +254,32 @@ struct uds_request {
/* The existing data associated with the request name, if any */
struct uds_record_data old_metadata;
- /* Either UDS_SUCCESS or an error code for the request */
- int status;
/* True if the record name had an existing entry in the index */
bool found;
+ /* Either UDS_SUCCESS or an error code for the request */
+ int status;
- /*
- * The remaining fields are used internally and should not be altered by clients. The index
- * relies on zone_number being the first field in this section.
- */
-
- /* The number of the zone which will process this request*/
- unsigned int zone_number;
- /* A link for adding a request to a lock-free queue */
- struct funnel_queue_entry queue_link;
- /* A link for adding a request to a standard linked list */
- struct uds_request *next_request;
- /* A pointer to the index processing this request */
- struct uds_index *index;
- /* Control message for coordinating between zones */
- struct uds_zone_message zone_message;
- /* If true, process request immediately by waking the worker thread */
- bool unbatched;
- /* If true, continue this request before processing newer requests */
- bool requeued;
- /* The virtual chapter containing the record name, if known */
- u64 virtual_chapter;
- /* The region of the index containing the record name */
- enum uds_index_region location;
+ /* The remaining fields are used internally and should not be altered by clients. */
+ struct_group(internal,
+ /* The virtual chapter containing the record name, if known */
+ u64 virtual_chapter;
+ /* The region of the index containing the record name */
+ enum uds_index_region location;
+ /* If true, process request immediately by waking the worker thread */
+ bool unbatched;
+ /* If true, continue this request before processing newer requests */
+ bool requeued;
+ /* Control message for coordinating between zones */
+ struct uds_zone_message zone_message;
+ /* The number of the zone which will process this request*/
+ unsigned int zone_number;
+ /* A link for adding a request to a lock-free queue */
+ struct funnel_queue_entry queue_link;
+ /* A link for adding a request to a standard linked list */
+ struct uds_request *next_request;
+ /* A pointer to the index processing this request */
+ struct uds_index *index;
+ );
};
/* A session is required for most index operations. */
diff --git a/drivers/md/dm-vdo/io-submitter.c b/drivers/md/dm-vdo/io-submitter.c
index 421e5436c32c..11d47770b54d 100644
--- a/drivers/md/dm-vdo/io-submitter.c
+++ b/drivers/md/dm-vdo/io-submitter.c
@@ -327,6 +327,7 @@ void vdo_submit_data_vio(struct data_vio *data_vio)
* @error_handler: the handler for submission or I/O errors (may be NULL)
* @operation: the type of I/O to perform
* @data: the buffer to read or write (may be NULL)
+ * @size: the I/O amount in bytes
*
* The vio is enqueued on a vdo bio queue so that bio submission (which may block) does not block
* other vdo threads.
@@ -338,7 +339,7 @@ void vdo_submit_data_vio(struct data_vio *data_vio)
*/
void __submit_metadata_vio(struct vio *vio, physical_block_number_t physical,
bio_end_io_t callback, vdo_action_fn error_handler,
- blk_opf_t operation, char *data)
+ blk_opf_t operation, char *data, int size)
{
int result;
struct vdo_completion *completion = &vio->completion;
@@ -349,7 +350,8 @@ void __submit_metadata_vio(struct vio *vio, physical_block_number_t physical,
vdo_reset_completion(completion);
completion->error_handler = error_handler;
- result = vio_reset_bio(vio, data, callback, operation | REQ_META, physical);
+ result = vio_reset_bio_with_size(vio, data, size, callback, operation | REQ_META,
+ physical);
if (result != VDO_SUCCESS) {
continue_vio(vio, result);
return;
diff --git a/drivers/md/dm-vdo/io-submitter.h b/drivers/md/dm-vdo/io-submitter.h
index 80748699496f..3088f11055fd 100644
--- a/drivers/md/dm-vdo/io-submitter.h
+++ b/drivers/md/dm-vdo/io-submitter.h
@@ -8,6 +8,7 @@
#include <linux/bio.h>
+#include "constants.h"
#include "types.h"
struct io_submitter;
@@ -26,14 +27,25 @@ void vdo_submit_data_vio(struct data_vio *data_vio);
void __submit_metadata_vio(struct vio *vio, physical_block_number_t physical,
bio_end_io_t callback, vdo_action_fn error_handler,
- blk_opf_t operation, char *data);
+ blk_opf_t operation, char *data, int size);
static inline void vdo_submit_metadata_vio(struct vio *vio, physical_block_number_t physical,
bio_end_io_t callback, vdo_action_fn error_handler,
blk_opf_t operation)
{
__submit_metadata_vio(vio, physical, callback, error_handler,
- operation, vio->data);
+ operation, vio->data, vio->block_count * VDO_BLOCK_SIZE);
+}
+
+static inline void vdo_submit_metadata_vio_with_size(struct vio *vio,
+ physical_block_number_t physical,
+ bio_end_io_t callback,
+ vdo_action_fn error_handler,
+ blk_opf_t operation,
+ int size)
+{
+ __submit_metadata_vio(vio, physical, callback, error_handler,
+ operation, vio->data, size);
}
static inline void vdo_submit_flush_vio(struct vio *vio, bio_end_io_t callback,
@@ -41,7 +53,7 @@ static inline void vdo_submit_flush_vio(struct vio *vio, bio_end_io_t callback,
{
/* FIXME: Can we just use REQ_OP_FLUSH? */
__submit_metadata_vio(vio, 0, callback, error_handler,
- REQ_OP_WRITE | REQ_PREFLUSH, NULL);
+ REQ_OP_WRITE | REQ_PREFLUSH, NULL, 0);
}
#endif /* VDO_IO_SUBMITTER_H */
diff --git a/drivers/md/dm-vdo/packer.h b/drivers/md/dm-vdo/packer.h
index 0f3be44710b5..8c8d6892582d 100644
--- a/drivers/md/dm-vdo/packer.h
+++ b/drivers/md/dm-vdo/packer.h
@@ -46,7 +46,7 @@ struct compressed_block {
/*
* Each packer_bin holds an incomplete batch of data_vios that only partially fill a compressed
- * block. The bins are kept in a ring sorted by the amount of unused space so the first bin with
+ * block. The bins are kept in a list sorted by the amount of unused space so the first bin with
* enough space to hold a newly-compressed data_vio can easily be found. When the bin fills up or
* is flushed, the first uncanceled data_vio in the bin is selected to be the agent for that bin.
* Upon entering the packer, each data_vio already has its compressed data in the first slot of the
diff --git a/drivers/md/dm-vdo/priority-table.c b/drivers/md/dm-vdo/priority-table.c
index 42d3d8d0e4b5..9bae8256ba4e 100644
--- a/drivers/md/dm-vdo/priority-table.c
+++ b/drivers/md/dm-vdo/priority-table.c
@@ -199,7 +199,7 @@ void vdo_priority_table_remove(struct priority_table *table, struct list_head *e
/*
* Remove the entry from the bucket list, remembering a pointer to another entry in the
- * ring.
+ * list.
*/
next_entry = entry->next;
list_del_init(entry);
diff --git a/drivers/md/dm-vdo/recovery-journal.h b/drivers/md/dm-vdo/recovery-journal.h
index 899071173015..25e7ec6d19f6 100644
--- a/drivers/md/dm-vdo/recovery-journal.h
+++ b/drivers/md/dm-vdo/recovery-journal.h
@@ -43,9 +43,9 @@
* has a vio which is used to commit that block to disk. The vio's data is the on-disk
* representation of the journal block. In addition each in-memory block has a buffer which is used
* to accumulate entries while a partial commit of the block is in progress. In-memory blocks are
- * kept on two rings. Free blocks live on the 'free_tail_blocks' ring. When a block becomes active
- * (see below) it is moved to the 'active_tail_blocks' ring. When a block is fully committed, it is
- * moved back to the 'free_tail_blocks' ring.
+ * kept on two lists. Free blocks live on the 'free_tail_blocks' list. When a block becomes active
+ * (see below) it is moved to the 'active_tail_blocks' list. When a block is fully committed, it is
+ * moved back to the 'free_tail_blocks' list.
*
* When entries are added to the journal, they are added to the active in-memory block, as
* indicated by the 'active_block' field. If the caller wishes to wait for the entry to be
diff --git a/drivers/md/dm-vdo/slab-depot.c b/drivers/md/dm-vdo/slab-depot.c
index 8f0a35c63af6..f3d80ff7bef5 100644
--- a/drivers/md/dm-vdo/slab-depot.c
+++ b/drivers/md/dm-vdo/slab-depot.c
@@ -139,7 +139,7 @@ static bool is_slab_journal_blank(const struct vdo_slab *slab)
}
/**
- * mark_slab_journal_dirty() - Put a slab journal on the dirty ring of its allocator in the correct
+ * mark_slab_journal_dirty() - Put a slab journal on the dirty list of its allocator in the correct
* order.
* @journal: The journal to be marked dirty.
* @lock: The recovery journal lock held by the slab journal.
@@ -414,8 +414,7 @@ static void complete_reaping(struct vdo_completion *completion)
{
struct slab_journal *journal = completion->parent;
- return_vio_to_pool(journal->slab->allocator->vio_pool,
- vio_as_pooled_vio(as_vio(vdo_forget(completion))));
+ return_vio_to_pool(vio_as_pooled_vio(as_vio(completion)));
finish_reaping(journal);
reap_slab_journal(journal);
}
@@ -698,7 +697,7 @@ static void complete_write(struct vdo_completion *completion)
sequence_number_t committed = get_committing_sequence_number(pooled);
list_del_init(&pooled->list_entry);
- return_vio_to_pool(journal->slab->allocator->vio_pool, vdo_forget(pooled));
+ return_vio_to_pool(pooled);
if (result != VDO_SUCCESS) {
vio_record_metadata_io_error(as_vio(completion));
@@ -822,7 +821,7 @@ static void commit_tail(struct slab_journal *journal)
/*
* Since we are about to commit the tail block, this journal no longer needs to be on the
- * ring of journals which the recovery journal might ask to commit.
+ * list of journals which the recovery journal might ask to commit.
*/
mark_slab_journal_clean(journal);
@@ -1076,7 +1075,7 @@ static void finish_reference_block_write(struct vdo_completion *completion)
/* Release the slab journal lock. */
adjust_slab_journal_block_reference(&slab->journal,
block->slab_journal_lock_to_release, -1);
- return_vio_to_pool(slab->allocator->vio_pool, pooled);
+ return_vio_to_pool(pooled);
/*
* We can't clear the is_writing flag earlier as releasing the slab journal lock may cause
@@ -1170,8 +1169,8 @@ static void handle_io_error(struct vdo_completion *completion)
struct vdo_slab *slab = ((struct reference_block *) completion->parent)->slab;
vio_record_metadata_io_error(vio);
- return_vio_to_pool(slab->allocator->vio_pool, vio_as_pooled_vio(vio));
- slab->active_count--;
+ return_vio_to_pool(vio_as_pooled_vio(vio));
+ slab->active_count -= vio->io_size / VDO_BLOCK_SIZE;
vdo_enter_read_only_mode(slab->allocator->depot->vdo, result);
check_if_slab_drained(slab);
}
@@ -1372,7 +1371,7 @@ static unsigned int calculate_slab_priority(struct vdo_slab *slab)
static void prioritize_slab(struct vdo_slab *slab)
{
VDO_ASSERT_LOG_ONLY(list_empty(&slab->allocq_entry),
- "a slab must not already be on a ring when prioritizing");
+ "a slab must not already be on a list when prioritizing");
slab->priority = calculate_slab_priority(slab);
vdo_priority_table_enqueue(slab->allocator->prioritized_slabs,
slab->priority, &slab->allocq_entry);
@@ -2165,28 +2164,95 @@ static void dirty_all_reference_blocks(struct vdo_slab *slab)
dirty_block(&slab->reference_blocks[i]);
}
+static inline bool journal_points_equal(struct journal_point first,
+ struct journal_point second)
+{
+ return ((first.sequence_number == second.sequence_number) &&
+ (first.entry_count == second.entry_count));
+}
+
/**
- * clear_provisional_references() - Clear the provisional reference counts from a reference block.
- * @block: The block to clear.
+ * match_bytes() - Check an 8-byte word for bytes matching the value specified
+ * @input: A word to examine the bytes of
+ * @match: The byte value sought
+ *
+ * Return: 1 in each byte when the corresponding input byte matched, 0 otherwise
*/
-static void clear_provisional_references(struct reference_block *block)
+static inline u64 match_bytes(u64 input, u8 match)
{
- vdo_refcount_t *counters = get_reference_counters_for_block(block);
- block_count_t j;
+ u64 temp = input ^ (match * 0x0101010101010101ULL);
+ /* top bit of each byte is set iff top bit of temp byte is clear; rest are 0 */
+ u64 test_top_bits = ~temp & 0x8080808080808080ULL;
+ /* top bit of each byte is set iff low 7 bits of temp byte are clear; rest are useless */
+ u64 test_low_bits = 0x8080808080808080ULL - (temp & 0x7f7f7f7f7f7f7f7fULL);
+ /* return 1 when both tests indicate temp byte is 0 */
+ return (test_top_bits & test_low_bits) >> 7;
+}
+
+/**
+ * count_valid_references() - Process a newly loaded refcount array
+ * @counters: the array of counters from a metadata block
+ *
+ * Scan a 8-byte-aligned array of counters, fixing up any "provisional" values that weren't
+ * cleaned up at shutdown, changing them internally to "empty".
+ *
+ * Return: the number of blocks that are referenced (counters not "empty")
+ */
+static unsigned int count_valid_references(vdo_refcount_t *counters)
+{
+ u64 *words = (u64 *)counters;
+ /* It's easier to count occurrences of a specific byte than its absences. */
+ unsigned int empty_count = 0;
+ /* For speed, we process 8 bytes at once. */
+ unsigned int words_left = COUNTS_PER_BLOCK / sizeof(u64);
+
+ /*
+ * Sanity check assumptions used for optimizing this code: Counters are bytes. The counter
+ * array is a multiple of the word size.
+ */
+ BUILD_BUG_ON(sizeof(vdo_refcount_t) != 1);
+ BUILD_BUG_ON((COUNTS_PER_BLOCK % sizeof(u64)) != 0);
+
+ while (words_left > 0) {
+ /*
+ * This is used effectively as 8 byte-size counters. Byte 0 counts how many words
+ * had the target value found in byte 0, etc. We just have to avoid overflow.
+ */
+ u64 split_count = 0;
+ /*
+ * The counter "% 255" trick used below to fold split_count into empty_count
+ * imposes a limit of 254 bytes examined each iteration of the outer loop. We
+ * process a word at a time, so that limit gets rounded down to 31 u64 words.
+ */
+ const unsigned int max_words_per_iteration = 254 / sizeof(u64);
+ unsigned int iter_words_left = min_t(unsigned int, words_left,
+ max_words_per_iteration);
+
+ words_left -= iter_words_left;
+
+ while (iter_words_left--) {
+ u64 word = *words;
+ u64 temp;
+
+ /* First, if we have any provisional refcount values, clear them. */
+ temp = match_bytes(word, PROVISIONAL_REFERENCE_COUNT);
+ if (temp) {
+ /*
+ * 'temp' has 0x01 bytes where 'word' has PROVISIONAL; this xor
+ * will alter just those bytes, changing PROVISIONAL to EMPTY.
+ */
+ word ^= temp * (PROVISIONAL_REFERENCE_COUNT ^ EMPTY_REFERENCE_COUNT);
+ *words = word;
+ }
- for (j = 0; j < COUNTS_PER_BLOCK; j++) {
- if (counters[j] == PROVISIONAL_REFERENCE_COUNT) {
- counters[j] = EMPTY_REFERENCE_COUNT;
- block->allocated_count--;
+ /* Now count the EMPTY_REFERENCE_COUNT bytes, updating the 8 counters. */
+ split_count += match_bytes(word, EMPTY_REFERENCE_COUNT);
+ words++;
}
+ empty_count += split_count % 255;
}
-}
-static inline bool journal_points_equal(struct journal_point first,
- struct journal_point second)
-{
- return ((first.sequence_number == second.sequence_number) &&
- (first.entry_count == second.entry_count));
+ return COUNTS_PER_BLOCK - empty_count;
}
/**
@@ -2197,7 +2263,6 @@ static inline bool journal_points_equal(struct journal_point first,
static void unpack_reference_block(struct packed_reference_block *packed,
struct reference_block *block)
{
- block_count_t index;
sector_count_t i;
struct vdo_slab *slab = block->slab;
vdo_refcount_t *counters = get_reference_counters_for_block(block);
@@ -2223,11 +2288,7 @@ static void unpack_reference_block(struct packed_reference_block *packed,
}
}
- block->allocated_count = 0;
- for (index = 0; index < COUNTS_PER_BLOCK; index++) {
- if (counters[index] != EMPTY_REFERENCE_COUNT)
- block->allocated_count++;
- }
+ block->allocated_count = count_valid_references(counters);
}
/**
@@ -2240,13 +2301,19 @@ static void finish_reference_block_load(struct vdo_completion *completion)
struct pooled_vio *pooled = vio_as_pooled_vio(vio);
struct reference_block *block = completion->parent;
struct vdo_slab *slab = block->slab;
+ unsigned int block_count = vio->io_size / VDO_BLOCK_SIZE;
+ unsigned int i;
+ char *data = vio->data;
- unpack_reference_block((struct packed_reference_block *) vio->data, block);
- return_vio_to_pool(slab->allocator->vio_pool, pooled);
- slab->active_count--;
- clear_provisional_references(block);
+ for (i = 0; i < block_count; i++, block++, data += VDO_BLOCK_SIZE) {
+ struct packed_reference_block *packed = (struct packed_reference_block *) data;
+
+ unpack_reference_block(packed, block);
+ slab->free_blocks -= block->allocated_count;
+ }
+ return_vio_to_pool(pooled);
+ slab->active_count -= block_count;
- slab->free_blocks -= block->allocated_count;
check_if_slab_drained(slab);
}
@@ -2260,23 +2327,25 @@ static void load_reference_block_endio(struct bio *bio)
}
/**
- * load_reference_block() - After a block waiter has gotten a VIO from the VIO pool, load the
- * block.
- * @waiter: The waiter of the block to load.
+ * load_reference_block_group() - After a block waiter has gotten a VIO from the VIO pool, load
+ * a set of blocks.
+ * @waiter: The waiter of the first block to load.
* @context: The VIO returned by the pool.
*/
-static void load_reference_block(struct vdo_waiter *waiter, void *context)
+static void load_reference_block_group(struct vdo_waiter *waiter, void *context)
{
struct pooled_vio *pooled = context;
struct vio *vio = &pooled->vio;
struct reference_block *block =
container_of(waiter, struct reference_block, waiter);
- size_t block_offset = (block - block->slab->reference_blocks);
+ u32 block_offset = block - block->slab->reference_blocks;
+ u32 max_block_count = block->slab->reference_block_count - block_offset;
+ u32 block_count = min_t(int, vio->block_count, max_block_count);
vio->completion.parent = block;
- vdo_submit_metadata_vio(vio, block->slab->ref_counts_origin + block_offset,
- load_reference_block_endio, handle_io_error,
- REQ_OP_READ);
+ vdo_submit_metadata_vio_with_size(vio, block->slab->ref_counts_origin + block_offset,
+ load_reference_block_endio, handle_io_error,
+ REQ_OP_READ, block_count * VDO_BLOCK_SIZE);
}
/**
@@ -2286,14 +2355,21 @@ static void load_reference_block(struct vdo_waiter *waiter, void *context)
static void load_reference_blocks(struct vdo_slab *slab)
{
block_count_t i;
+ u64 blocks_per_vio = slab->allocator->refcount_blocks_per_big_vio;
+ struct vio_pool *pool = slab->allocator->refcount_big_vio_pool;
+
+ if (!pool) {
+ pool = slab->allocator->vio_pool;
+ blocks_per_vio = 1;
+ }
slab->free_blocks = slab->block_count;
slab->active_count = slab->reference_block_count;
- for (i = 0; i < slab->reference_block_count; i++) {
+ for (i = 0; i < slab->reference_block_count; i += blocks_per_vio) {
struct vdo_waiter *waiter = &slab->reference_blocks[i].waiter;
- waiter->callback = load_reference_block;
- acquire_vio_from_pool(slab->allocator->vio_pool, waiter);
+ waiter->callback = load_reference_block_group;
+ acquire_vio_from_pool(pool, waiter);
}
}
@@ -2429,7 +2505,7 @@ static void finish_loading_journal(struct vdo_completion *completion)
initialize_journal_state(journal);
}
- return_vio_to_pool(slab->allocator->vio_pool, vio_as_pooled_vio(vio));
+ return_vio_to_pool(vio_as_pooled_vio(vio));
vdo_finish_loading_with_result(&slab->state, allocate_counters_if_clean(slab));
}
@@ -2449,7 +2525,7 @@ static void handle_load_error(struct vdo_completion *completion)
struct vio *vio = as_vio(completion);
vio_record_metadata_io_error(vio);
- return_vio_to_pool(journal->slab->allocator->vio_pool, vio_as_pooled_vio(vio));
+ return_vio_to_pool(vio_as_pooled_vio(vio));
vdo_finish_loading_with_result(&journal->slab->state, result);
}
@@ -2547,7 +2623,7 @@ static void queue_slab(struct vdo_slab *slab)
int result;
VDO_ASSERT_LOG_ONLY(list_empty(&slab->allocq_entry),
- "a requeued slab must not already be on a ring");
+ "a requeued slab must not already be on a list");
if (vdo_is_read_only(allocator->depot->vdo))
return;
@@ -2700,6 +2776,7 @@ static void finish_scrubbing(struct slab_scrubber *scrubber, int result)
vdo_log_info("VDO commencing normal operation");
else if (prior_state == VDO_RECOVERING)
vdo_log_info("Exiting recovery mode");
+ free_vio_pool(vdo_forget(allocator->refcount_big_vio_pool));
}
/*
@@ -3281,7 +3358,7 @@ int vdo_release_block_reference(struct block_allocator *allocator,
* This is a min_heap callback function orders slab_status structures using the 'is_clean' field as
* the primary key and the 'emptiness' field as the secondary key.
*
- * Slabs need to be pushed onto the rings in the same order they are to be popped off. Popping
+ * Slabs need to be pushed onto the lists in the same order they are to be popped off. Popping
* should always get the most empty first, so pushing should be from most empty to least empty.
* Thus, the ordering is reversed from the usual sense since min_heap returns smaller elements
* before larger ones.
@@ -3983,6 +4060,7 @@ static int __must_check initialize_block_allocator(struct slab_depot *depot,
struct vdo *vdo = depot->vdo;
block_count_t max_free_blocks = depot->slab_config.data_blocks;
unsigned int max_priority = (2 + ilog2(max_free_blocks));
+ u32 reference_block_count, refcount_reads_needed, refcount_blocks_per_vio;
*allocator = (struct block_allocator) {
.depot = depot,
@@ -4000,12 +4078,24 @@ static int __must_check initialize_block_allocator(struct slab_depot *depot,
return result;
vdo_initialize_completion(&allocator->completion, vdo, VDO_BLOCK_ALLOCATOR_COMPLETION);
- result = make_vio_pool(vdo, BLOCK_ALLOCATOR_VIO_POOL_SIZE, allocator->thread_id,
+ result = make_vio_pool(vdo, BLOCK_ALLOCATOR_VIO_POOL_SIZE, 1, allocator->thread_id,
VIO_TYPE_SLAB_JOURNAL, VIO_PRIORITY_METADATA,
allocator, &allocator->vio_pool);
if (result != VDO_SUCCESS)
return result;
+ /* Initialize the refcount-reading vio pool. */
+ reference_block_count = vdo_get_saved_reference_count_size(depot->slab_config.slab_blocks);
+ refcount_reads_needed = DIV_ROUND_UP(reference_block_count, MAX_BLOCKS_PER_VIO);
+ refcount_blocks_per_vio = DIV_ROUND_UP(reference_block_count, refcount_reads_needed);
+ allocator->refcount_blocks_per_big_vio = refcount_blocks_per_vio;
+ result = make_vio_pool(vdo, BLOCK_ALLOCATOR_REFCOUNT_VIO_POOL_SIZE,
+ allocator->refcount_blocks_per_big_vio, allocator->thread_id,
+ VIO_TYPE_SLAB_JOURNAL, VIO_PRIORITY_METADATA,
+ NULL, &allocator->refcount_big_vio_pool);
+ if (result != VDO_SUCCESS)
+ return result;
+
result = initialize_slab_scrubber(allocator);
if (result != VDO_SUCCESS)
return result;
@@ -4223,6 +4313,7 @@ void vdo_free_slab_depot(struct slab_depot *depot)
uninitialize_allocator_summary(allocator);
uninitialize_scrubber_vio(&allocator->scrubber);
free_vio_pool(vdo_forget(allocator->vio_pool));
+ free_vio_pool(vdo_forget(allocator->refcount_big_vio_pool));
vdo_free_priority_table(vdo_forget(allocator->prioritized_slabs));
}
diff --git a/drivers/md/dm-vdo/slab-depot.h b/drivers/md/dm-vdo/slab-depot.h
index f234853501ca..fadc0c9d4dc4 100644
--- a/drivers/md/dm-vdo/slab-depot.h
+++ b/drivers/md/dm-vdo/slab-depot.h
@@ -45,6 +45,13 @@
enum {
/* The number of vios in the vio pool is proportional to the throughput of the VDO. */
BLOCK_ALLOCATOR_VIO_POOL_SIZE = 128,
+
+ /*
+ * The number of vios in the vio pool used for loading reference count data. A slab's
+ * refcounts is capped at ~8MB, and we process one at a time in a zone, so 9 should be
+ * plenty.
+ */
+ BLOCK_ALLOCATOR_REFCOUNT_VIO_POOL_SIZE = 9,
};
/*
@@ -248,7 +255,7 @@ struct vdo_slab {
/* A list of the dirty blocks waiting to be written out */
struct vdo_wait_queue dirty_blocks;
- /* The number of blocks which are currently writing */
+ /* The number of blocks which are currently reading or writing */
size_t active_count;
/* A waiter object for updating the slab summary */
@@ -425,6 +432,10 @@ struct block_allocator {
/* The vio pool for reading and writing block allocator metadata */
struct vio_pool *vio_pool;
+ /* The vio pool for large initial reads of ref count areas */
+ struct vio_pool *refcount_big_vio_pool;
+ /* How many ref count blocks are read per vio at initial load */
+ u32 refcount_blocks_per_big_vio;
/* The dm_kcopyd client for erasing slab journals */
struct dm_kcopyd_client *eraser;
/* Iterator over the slabs to be erased */
diff --git a/drivers/md/dm-vdo/types.h b/drivers/md/dm-vdo/types.h
index dbe892b10f26..cdf36e7d7702 100644
--- a/drivers/md/dm-vdo/types.h
+++ b/drivers/md/dm-vdo/types.h
@@ -376,6 +376,9 @@ struct vio {
/* The size of this vio in blocks */
unsigned int block_count;
+ /* The amount of data to be read or written, in bytes */
+ unsigned int io_size;
+
/* The data being read or written. */
char *data;
diff --git a/drivers/md/dm-vdo/vdo.c b/drivers/md/dm-vdo/vdo.c
index a7e32baab4af..80b608674022 100644
--- a/drivers/md/dm-vdo/vdo.c
+++ b/drivers/md/dm-vdo/vdo.c
@@ -31,9 +31,7 @@
#include <linux/completion.h>
#include <linux/device-mapper.h>
-#include <linux/kernel.h>
#include <linux/lz4.h>
-#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/spinlock.h>
#include <linux/types.h>
@@ -142,12 +140,6 @@ static void finish_vdo_request_queue(void *ptr)
vdo_unregister_allocating_thread();
}
-#ifdef MODULE
-#define MODULE_NAME THIS_MODULE->name
-#else
-#define MODULE_NAME "dm-vdo"
-#endif /* MODULE */
-
static const struct vdo_work_queue_type default_queue_type = {
.start = start_vdo_request_queue,
.finish = finish_vdo_request_queue,
@@ -559,8 +551,7 @@ int vdo_make(unsigned int instance, struct device_config *config, char **reason,
*vdo_ptr = vdo;
snprintf(vdo->thread_name_prefix, sizeof(vdo->thread_name_prefix),
- "%s%u", MODULE_NAME, instance);
- BUG_ON(vdo->thread_name_prefix[0] == '\0');
+ "vdo%u", instance);
result = vdo_allocate(vdo->thread_config.thread_count,
struct vdo_thread, __func__, &vdo->threads);
if (result != VDO_SUCCESS) {
diff --git a/drivers/md/dm-vdo/vio.c b/drivers/md/dm-vdo/vio.c
index e710f3c5a972..e7f4153e55e3 100644
--- a/drivers/md/dm-vdo/vio.c
+++ b/drivers/md/dm-vdo/vio.c
@@ -188,14 +188,23 @@ void vdo_set_bio_properties(struct bio *bio, struct vio *vio, bio_end_io_t callb
/*
* Prepares the bio to perform IO with the specified buffer. May only be used on a VDO-allocated
- * bio, as it assumes the bio wraps a 4k buffer that is 4k aligned, but there does not have to be a
- * vio associated with the bio.
+ * bio, as it assumes the bio wraps a 4k-multiple buffer that is 4k aligned, but there does not
+ * have to be a vio associated with the bio.
*/
int vio_reset_bio(struct vio *vio, char *data, bio_end_io_t callback,
blk_opf_t bi_opf, physical_block_number_t pbn)
{
- int bvec_count, offset, len, i;
+ return vio_reset_bio_with_size(vio, data, vio->block_count * VDO_BLOCK_SIZE,
+ callback, bi_opf, pbn);
+}
+
+int vio_reset_bio_with_size(struct vio *vio, char *data, int size, bio_end_io_t callback,
+ blk_opf_t bi_opf, physical_block_number_t pbn)
+{
+ int bvec_count, offset, i;
struct bio *bio = vio->bio;
+ int vio_size = vio->block_count * VDO_BLOCK_SIZE;
+ int remaining;
bio_reset(bio, bio->bi_bdev, bi_opf);
vdo_set_bio_properties(bio, vio, callback, bi_opf, pbn);
@@ -205,22 +214,21 @@ int vio_reset_bio(struct vio *vio, char *data, bio_end_io_t callback,
bio->bi_ioprio = 0;
bio->bi_io_vec = bio->bi_inline_vecs;
bio->bi_max_vecs = vio->block_count + 1;
- len = VDO_BLOCK_SIZE * vio->block_count;
+ if (VDO_ASSERT(size <= vio_size, "specified size %d is not greater than allocated %d",
+ size, vio_size) != VDO_SUCCESS)
+ size = vio_size;
+ vio->io_size = size;
offset = offset_in_page(data);
- bvec_count = DIV_ROUND_UP(offset + len, PAGE_SIZE);
+ bvec_count = DIV_ROUND_UP(offset + size, PAGE_SIZE);
+ remaining = size;
- /*
- * If we knew that data was always on one page, or contiguous pages, we wouldn't need the
- * loop. But if we're using vmalloc, it's not impossible that the data is in different
- * pages that can't be merged in bio_add_page...
- */
- for (i = 0; (i < bvec_count) && (len > 0); i++) {
+ for (i = 0; (i < bvec_count) && (remaining > 0); i++) {
struct page *page;
int bytes_added;
int bytes = PAGE_SIZE - offset;
- if (bytes > len)
- bytes = len;
+ if (bytes > remaining)
+ bytes = remaining;
page = is_vmalloc_addr(data) ? vmalloc_to_page(data) : virt_to_page(data);
bytes_added = bio_add_page(bio, page, bytes, offset);
@@ -232,7 +240,7 @@ int vio_reset_bio(struct vio *vio, char *data, bio_end_io_t callback,
}
data += bytes;
- len -= bytes;
+ remaining -= bytes;
offset = 0;
}
@@ -301,6 +309,7 @@ void vio_record_metadata_io_error(struct vio *vio)
* make_vio_pool() - Create a new vio pool.
* @vdo: The vdo.
* @pool_size: The number of vios in the pool.
+ * @block_count: The number of 4k blocks per vio.
* @thread_id: The ID of the thread using this pool.
* @vio_type: The type of vios in the pool.
* @priority: The priority with which vios from the pool should be enqueued.
@@ -309,13 +318,14 @@ void vio_record_metadata_io_error(struct vio *vio)
*
* Return: A success or error code.
*/
-int make_vio_pool(struct vdo *vdo, size_t pool_size, thread_id_t thread_id,
+int make_vio_pool(struct vdo *vdo, size_t pool_size, size_t block_count, thread_id_t thread_id,
enum vio_type vio_type, enum vio_priority priority, void *context,
struct vio_pool **pool_ptr)
{
struct vio_pool *pool;
char *ptr;
int result;
+ size_t per_vio_size = VDO_BLOCK_SIZE * block_count;
result = vdo_allocate_extended(struct vio_pool, pool_size, struct pooled_vio,
__func__, &pool);
@@ -326,7 +336,7 @@ int make_vio_pool(struct vdo *vdo, size_t pool_size, thread_id_t thread_id,
INIT_LIST_HEAD(&pool->available);
INIT_LIST_HEAD(&pool->busy);
- result = vdo_allocate(pool_size * VDO_BLOCK_SIZE, char,
+ result = vdo_allocate(pool_size * per_vio_size, char,
"VIO pool buffer", &pool->buffer);
if (result != VDO_SUCCESS) {
free_vio_pool(pool);
@@ -334,10 +344,10 @@ int make_vio_pool(struct vdo *vdo, size_t pool_size, thread_id_t thread_id,
}
ptr = pool->buffer;
- for (pool->size = 0; pool->size < pool_size; pool->size++, ptr += VDO_BLOCK_SIZE) {
+ for (pool->size = 0; pool->size < pool_size; pool->size++, ptr += per_vio_size) {
struct pooled_vio *pooled = &pool->vios[pool->size];
- result = allocate_vio_components(vdo, vio_type, priority, NULL, 1, ptr,
+ result = allocate_vio_components(vdo, vio_type, priority, NULL, block_count, ptr,
&pooled->vio);
if (result != VDO_SUCCESS) {
free_vio_pool(pool);
@@ -345,6 +355,7 @@ int make_vio_pool(struct vdo *vdo, size_t pool_size, thread_id_t thread_id,
}
pooled->context = context;
+ pooled->pool = pool;
list_add_tail(&pooled->pool_entry, &pool->available);
}
@@ -419,12 +430,13 @@ void acquire_vio_from_pool(struct vio_pool *pool, struct vdo_waiter *waiter)
}
/**
- * return_vio_to_pool() - Return a vio to the pool
- * @pool: The vio pool.
+ * return_vio_to_pool() - Return a vio to its pool
* @vio: The pooled vio to return.
*/
-void return_vio_to_pool(struct vio_pool *pool, struct pooled_vio *vio)
+void return_vio_to_pool(struct pooled_vio *vio)
{
+ struct vio_pool *pool = vio->pool;
+
VDO_ASSERT_LOG_ONLY((pool->thread_id == vdo_get_callback_thread_id()),
"vio pool entry returned on same thread as it was acquired");
diff --git a/drivers/md/dm-vdo/vio.h b/drivers/md/dm-vdo/vio.h
index 3490e9f59b04..4bfcb21901f1 100644
--- a/drivers/md/dm-vdo/vio.h
+++ b/drivers/md/dm-vdo/vio.h
@@ -30,6 +30,8 @@ struct pooled_vio {
void *context;
/* The list entry used by the pool */
struct list_head pool_entry;
+ /* The pool this vio is allocated from */
+ struct vio_pool *pool;
};
/**
@@ -123,6 +125,8 @@ void vdo_set_bio_properties(struct bio *bio, struct vio *vio, bio_end_io_t callb
int vio_reset_bio(struct vio *vio, char *data, bio_end_io_t callback,
blk_opf_t bi_opf, physical_block_number_t pbn);
+int vio_reset_bio_with_size(struct vio *vio, char *data, int size, bio_end_io_t callback,
+ blk_opf_t bi_opf, physical_block_number_t pbn);
void update_vio_error_stats(struct vio *vio, const char *format, ...)
__printf(2, 3);
@@ -188,12 +192,13 @@ static inline struct pooled_vio *vio_as_pooled_vio(struct vio *vio)
struct vio_pool;
-int __must_check make_vio_pool(struct vdo *vdo, size_t pool_size, thread_id_t thread_id,
- enum vio_type vio_type, enum vio_priority priority,
- void *context, struct vio_pool **pool_ptr);
+int __must_check make_vio_pool(struct vdo *vdo, size_t pool_size, size_t block_count,
+ thread_id_t thread_id, enum vio_type vio_type,
+ enum vio_priority priority, void *context,
+ struct vio_pool **pool_ptr);
void free_vio_pool(struct vio_pool *pool);
bool __must_check is_vio_pool_busy(struct vio_pool *pool);
void acquire_vio_from_pool(struct vio_pool *pool, struct vdo_waiter *waiter);
-void return_vio_to_pool(struct vio_pool *pool, struct pooled_vio *vio);
+void return_vio_to_pool(struct pooled_vio *vio);
#endif /* VIO_H */
diff --git a/drivers/md/dm-vdo/wait-queue.c b/drivers/md/dm-vdo/wait-queue.c
index 6e1e739277ef..f81ed0cee2bf 100644
--- a/drivers/md/dm-vdo/wait-queue.c
+++ b/drivers/md/dm-vdo/wait-queue.c
@@ -34,7 +34,7 @@ void vdo_waitq_enqueue_waiter(struct vdo_wait_queue *waitq, struct vdo_waiter *w
waitq->last_waiter->next_waiter = waiter;
}
- /* In both cases, the waiter we added to the ring becomes the last waiter. */
+ /* In both cases, the waiter we added to the list becomes the last waiter. */
waitq->last_waiter = waiter;
waitq->length += 1;
}
diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
index e86c1431b108..3c427f18a04b 100644
--- a/drivers/md/dm-verity-target.c
+++ b/drivers/md/dm-verity-target.c
@@ -30,6 +30,7 @@
#define DM_VERITY_ENV_VAR_NAME "DM_VERITY_ERR_BLOCK_NR"
#define DM_VERITY_DEFAULT_PREFETCH_SIZE 262144
+#define DM_VERITY_USE_BH_DEFAULT_BYTES 8192
#define DM_VERITY_MAX_CORRUPTED_ERRS 100
@@ -49,6 +50,15 @@ static unsigned int dm_verity_prefetch_cluster = DM_VERITY_DEFAULT_PREFETCH_SIZE
module_param_named(prefetch_cluster, dm_verity_prefetch_cluster, uint, 0644);
+static unsigned int dm_verity_use_bh_bytes[4] = {
+ DM_VERITY_USE_BH_DEFAULT_BYTES, // IOPRIO_CLASS_NONE
+ DM_VERITY_USE_BH_DEFAULT_BYTES, // IOPRIO_CLASS_RT
+ DM_VERITY_USE_BH_DEFAULT_BYTES, // IOPRIO_CLASS_BE
+ 0 // IOPRIO_CLASS_IDLE
+};
+
+module_param_array_named(use_bh_bytes, dm_verity_use_bh_bytes, uint, NULL, 0644);
+
static DEFINE_STATIC_KEY_FALSE(use_bh_wq_enabled);
/* Is at least one dm-verity instance using ahash_tfm instead of shash_tfm? */
@@ -311,7 +321,7 @@ static int verity_verify_level(struct dm_verity *v, struct dm_verity_io *io,
if (static_branch_unlikely(&use_bh_wq_enabled) && io->in_bh) {
data = dm_bufio_get(v->bufio, hash_block, &buf);
- if (data == NULL) {
+ if (IS_ERR_OR_NULL(data)) {
/*
* In tasklet and the hash was not in the bufio cache.
* Return early and resume execution from a work-queue
@@ -324,8 +334,24 @@ static int verity_verify_level(struct dm_verity *v, struct dm_verity_io *io,
&buf, bio->bi_ioprio);
}
- if (IS_ERR(data))
- return PTR_ERR(data);
+ if (IS_ERR(data)) {
+ if (skip_unverified)
+ return 1;
+ r = PTR_ERR(data);
+ data = dm_bufio_new(v->bufio, hash_block, &buf);
+ if (IS_ERR(data))
+ return r;
+ if (verity_fec_decode(v, io, DM_VERITY_BLOCK_TYPE_METADATA,
+ hash_block, data) == 0) {
+ aux = dm_bufio_get_aux_data(buf);
+ aux->hash_verified = 1;
+ goto release_ok;
+ } else {
+ dm_bufio_release(buf);
+ dm_bufio_forget(v->bufio, hash_block);
+ return r;
+ }
+ }
aux = dm_bufio_get_aux_data(buf);
@@ -366,6 +392,7 @@ static int verity_verify_level(struct dm_verity *v, struct dm_verity_io *io,
}
}
+release_ok:
data += offset;
memcpy(want_digest, data, v->digest_size);
r = 0;
@@ -652,9 +679,17 @@ static void verity_bh_work(struct work_struct *w)
verity_finish_io(io, errno_to_blk_status(err));
}
+static inline bool verity_use_bh(unsigned int bytes, unsigned short ioprio)
+{
+ return ioprio <= IOPRIO_CLASS_IDLE &&
+ bytes <= READ_ONCE(dm_verity_use_bh_bytes[ioprio]);
+}
+
static void verity_end_io(struct bio *bio)
{
struct dm_verity_io *io = bio->bi_private;
+ unsigned short ioprio = IOPRIO_PRIO_CLASS(bio->bi_ioprio);
+ unsigned int bytes = io->n_blocks << io->v->data_dev_block_bits;
if (bio->bi_status &&
(!verity_fec_is_enabled(io->v) ||
@@ -664,9 +699,14 @@ static void verity_end_io(struct bio *bio)
return;
}
- if (static_branch_unlikely(&use_bh_wq_enabled) && io->v->use_bh_wq) {
- INIT_WORK(&io->bh_work, verity_bh_work);
- queue_work(system_bh_wq, &io->bh_work);
+ if (static_branch_unlikely(&use_bh_wq_enabled) && io->v->use_bh_wq &&
+ verity_use_bh(bytes, ioprio)) {
+ if (in_hardirq() || irqs_disabled()) {
+ INIT_WORK(&io->bh_work, verity_bh_work);
+ queue_work(system_bh_wq, &io->bh_work);
+ } else {
+ verity_bh_work(&io->bh_work);
+ }
} else {
INIT_WORK(&io->work, verity_work);
queue_work(io->v->verify_wq, &io->work);
@@ -796,6 +836,13 @@ static int verity_map(struct dm_target *ti, struct bio *bio)
return DM_MAPIO_SUBMITTED;
}
+static void verity_postsuspend(struct dm_target *ti)
+{
+ struct dm_verity *v = ti->private;
+ flush_workqueue(v->verify_wq);
+ dm_bufio_client_reset(v->bufio);
+}
+
/*
* Status: V (valid) or C (corruption found)
*/
@@ -1761,11 +1808,12 @@ static struct target_type verity_target = {
.name = "verity",
/* Note: the LSMs depend on the singleton and immutable features */
.features = DM_TARGET_SINGLETON | DM_TARGET_IMMUTABLE,
- .version = {1, 10, 0},
+ .version = {1, 11, 0},
.module = THIS_MODULE,
.ctr = verity_ctr,
.dtr = verity_dtr,
.map = verity_map,
+ .postsuspend = verity_postsuspend,
.status = verity_status,
.prepare_ioctl = verity_prepare_ioctl,
.iterate_devices = verity_iterate_devices,
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 4d1e42891d24..5ab7574c0c76 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1540,14 +1540,18 @@ static void __send_empty_flush(struct clone_info *ci)
{
struct dm_table *t = ci->map;
struct bio flush_bio;
+ blk_opf_t opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC;
+
+ if ((ci->io->orig_bio->bi_opf & (REQ_IDLE | REQ_SYNC)) ==
+ (REQ_IDLE | REQ_SYNC))
+ opf |= REQ_IDLE;
/*
* Use an on-stack bio for this, it's safe since we don't
* need to reference it after submit. It's just used as
* the basis for the clone(s).
*/
- bio_init(&flush_bio, ci->io->md->disk->part0, NULL, 0,
- REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC);
+ bio_init(&flush_bio, ci->io->md->disk->part0, NULL, 0, opf);
ci->bio = &flush_bio;
ci->sector_count = 0;
diff --git a/drivers/media/dvb-frontends/dib8000.c b/drivers/media/dvb-frontends/dib8000.c
index 2f5165918163..cfe59c3255f7 100644
--- a/drivers/media/dvb-frontends/dib8000.c
+++ b/drivers/media/dvb-frontends/dib8000.c
@@ -2701,8 +2701,11 @@ static void dib8000_set_dds(struct dib8000_state *state, s32 offset_khz)
u8 ratio;
if (state->revision == 0x8090) {
+ u32 internal = dib8000_read32(state, 23) / 1000;
+
ratio = 4;
- unit_khz_dds_val = (1<<26) / (dib8000_read32(state, 23) / 1000);
+
+ unit_khz_dds_val = (1<<26) / (internal ?: 1);
if (offset_khz < 0)
dds = (1 << 26) - (abs_offset_khz * unit_khz_dds_val);
else
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 56bc72c7ce4a..6b37d61150ee 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -641,7 +641,6 @@ source "drivers/misc/mei/Kconfig"
source "drivers/misc/vmw_vmci/Kconfig"
source "drivers/misc/genwqe/Kconfig"
source "drivers/misc/echo/Kconfig"
-source "drivers/misc/cxl/Kconfig"
source "drivers/misc/ocxl/Kconfig"
source "drivers/misc/bcm-vk/Kconfig"
source "drivers/misc/cardreader/Kconfig"
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index 545aad06d088..d6c917229c45 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -50,7 +50,6 @@ obj-$(CONFIG_SRAM) += sram.o
obj-$(CONFIG_SRAM_EXEC) += sram-exec.o
obj-$(CONFIG_GENWQE) += genwqe/
obj-$(CONFIG_ECHO) += echo/
-obj-$(CONFIG_CXL_BASE) += cxl/
obj-$(CONFIG_DW_XDATA_PCIE) += dw-xdata-pcie.o
obj-$(CONFIG_PCI_ENDPOINT_TEST) += pci_endpoint_test.o
obj-$(CONFIG_OCXL) += ocxl/
diff --git a/drivers/misc/cxl/Kconfig b/drivers/misc/cxl/Kconfig
deleted file mode 100644
index 15307f5e4307..000000000000
--- a/drivers/misc/cxl/Kconfig
+++ /dev/null
@@ -1,28 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-#
-# IBM Coherent Accelerator (CXL) compatible devices
-#
-
-config CXL_BASE
- bool
- select PPC_COPRO_BASE
- select PPC_64S_HASH_MMU
-
-config CXL
- tristate "Support for IBM Coherent Accelerators (CXL) (DEPRECATED)"
- depends on PPC_POWERNV && PCI_MSI && EEH
- select CXL_BASE
- help
- The cxl driver is deprecated and will be removed in a future
- kernel release.
-
- Select this option to enable driver support for IBM Coherent
- Accelerators (CXL). CXL is otherwise known as Coherent Accelerator
- Processor Interface (CAPI). CAPI allows accelerators in FPGAs to be
- coherently attached to a CPU via an MMU. This driver enables
- userspace programs to access these accelerators via /dev/cxl/afuM.N
- devices.
-
- CAPI adapters are found in POWER8 based systems.
-
- If unsure, say N.
diff --git a/drivers/misc/cxl/Makefile b/drivers/misc/cxl/Makefile
deleted file mode 100644
index 5eea61b9584f..000000000000
--- a/drivers/misc/cxl/Makefile
+++ /dev/null
@@ -1,14 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-ccflags-y := $(call cc-disable-warning, unused-const-variable)
-ccflags-$(CONFIG_PPC_WERROR) += -Werror
-
-cxl-y += main.o file.o irq.o fault.o native.o
-cxl-y += context.o sysfs.o pci.o trace.o
-cxl-y += vphb.o api.o cxllib.o
-cxl-$(CONFIG_PPC_PSERIES) += flash.o guest.o of.o hcalls.o
-cxl-$(CONFIG_DEBUG_FS) += debugfs.o
-obj-$(CONFIG_CXL) += cxl.o
-obj-$(CONFIG_CXL_BASE) += base.o
-
-# For tracepoints to include our trace.h from tracepoint infrastructure:
-CFLAGS_trace.o := -I$(src)
diff --git a/drivers/misc/cxl/api.c b/drivers/misc/cxl/api.c
deleted file mode 100644
index d85c56530863..000000000000
--- a/drivers/misc/cxl/api.c
+++ /dev/null
@@ -1,532 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright 2014 IBM Corp.
- */
-
-#include <linux/pci.h>
-#include <linux/slab.h>
-#include <linux/file.h>
-#include <misc/cxl.h>
-#include <linux/module.h>
-#include <linux/mount.h>
-#include <linux/pseudo_fs.h>
-#include <linux/sched/mm.h>
-#include <linux/mmu_context.h>
-#include <linux/irqdomain.h>
-
-#include "cxl.h"
-
-/*
- * Since we want to track memory mappings to be able to force-unmap
- * when the AFU is no longer reachable, we need an inode. For devices
- * opened through the cxl user API, this is not a problem, but a
- * userland process can also get a cxl fd through the cxl_get_fd()
- * API, which is used by the cxlflash driver.
- *
- * Therefore we implement our own simple pseudo-filesystem and inode
- * allocator. We don't use the anonymous inode, as we need the
- * meta-data associated with it (address_space) and it is shared by
- * other drivers/processes, so it could lead to cxl unmapping VMAs
- * from random processes.
- */
-
-#define CXL_PSEUDO_FS_MAGIC 0x1697697f
-
-static int cxl_fs_cnt;
-static struct vfsmount *cxl_vfs_mount;
-
-static int cxl_fs_init_fs_context(struct fs_context *fc)
-{
- return init_pseudo(fc, CXL_PSEUDO_FS_MAGIC) ? 0 : -ENOMEM;
-}
-
-static struct file_system_type cxl_fs_type = {
- .name = "cxl",
- .owner = THIS_MODULE,
- .init_fs_context = cxl_fs_init_fs_context,
- .kill_sb = kill_anon_super,
-};
-
-
-void cxl_release_mapping(struct cxl_context *ctx)
-{
- if (ctx->kernelapi && ctx->mapping)
- simple_release_fs(&cxl_vfs_mount, &cxl_fs_cnt);
-}
-
-static struct file *cxl_getfile(const char *name,
- const struct file_operations *fops,
- void *priv, int flags)
-{
- struct file *file;
- struct inode *inode;
- int rc;
-
- /* strongly inspired by anon_inode_getfile() */
-
- if (fops->owner && !try_module_get(fops->owner))
- return ERR_PTR(-ENOENT);
-
- rc = simple_pin_fs(&cxl_fs_type, &cxl_vfs_mount, &cxl_fs_cnt);
- if (rc < 0) {
- pr_err("Cannot mount cxl pseudo filesystem: %d\n", rc);
- file = ERR_PTR(rc);
- goto err_module;
- }
-
- inode = alloc_anon_inode(cxl_vfs_mount->mnt_sb);
- if (IS_ERR(inode)) {
- file = ERR_CAST(inode);
- goto err_fs;
- }
-
- file = alloc_file_pseudo(inode, cxl_vfs_mount, name,
- flags & (O_ACCMODE | O_NONBLOCK), fops);
- if (IS_ERR(file))
- goto err_inode;
-
- file->private_data = priv;
-
- return file;
-
-err_inode:
- iput(inode);
-err_fs:
- simple_release_fs(&cxl_vfs_mount, &cxl_fs_cnt);
-err_module:
- module_put(fops->owner);
- return file;
-}
-
-struct cxl_context *cxl_dev_context_init(struct pci_dev *dev)
-{
- struct cxl_afu *afu;
- struct cxl_context *ctx;
- int rc;
-
- afu = cxl_pci_to_afu(dev);
- if (IS_ERR(afu))
- return ERR_CAST(afu);
-
- ctx = cxl_context_alloc();
- if (!ctx)
- return ERR_PTR(-ENOMEM);
-
- ctx->kernelapi = true;
-
- /* Make it a slave context. We can promote it later? */
- rc = cxl_context_init(ctx, afu, false);
- if (rc)
- goto err_ctx;
-
- return ctx;
-
-err_ctx:
- kfree(ctx);
- return ERR_PTR(rc);
-}
-EXPORT_SYMBOL_GPL(cxl_dev_context_init);
-
-struct cxl_context *cxl_get_context(struct pci_dev *dev)
-{
- return dev->dev.archdata.cxl_ctx;
-}
-EXPORT_SYMBOL_GPL(cxl_get_context);
-
-int cxl_release_context(struct cxl_context *ctx)
-{
- if (ctx->status >= STARTED)
- return -EBUSY;
-
- cxl_context_free(ctx);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(cxl_release_context);
-
-static irq_hw_number_t cxl_find_afu_irq(struct cxl_context *ctx, int num)
-{
- __u16 range;
- int r;
-
- for (r = 0; r < CXL_IRQ_RANGES; r++) {
- range = ctx->irqs.range[r];
- if (num < range) {
- return ctx->irqs.offset[r] + num;
- }
- num -= range;
- }
- return 0;
-}
-
-
-int cxl_set_priv(struct cxl_context *ctx, void *priv)
-{
- if (!ctx)
- return -EINVAL;
-
- ctx->priv = priv;
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(cxl_set_priv);
-
-void *cxl_get_priv(struct cxl_context *ctx)
-{
- if (!ctx)
- return ERR_PTR(-EINVAL);
-
- return ctx->priv;
-}
-EXPORT_SYMBOL_GPL(cxl_get_priv);
-
-int cxl_allocate_afu_irqs(struct cxl_context *ctx, int num)
-{
- int res;
- irq_hw_number_t hwirq;
-
- if (num == 0)
- num = ctx->afu->pp_irqs;
- res = afu_allocate_irqs(ctx, num);
- if (res)
- return res;
-
- if (!cpu_has_feature(CPU_FTR_HVMODE)) {
- /* In a guest, the PSL interrupt is not multiplexed. It was
- * allocated above, and we need to set its handler
- */
- hwirq = cxl_find_afu_irq(ctx, 0);
- if (hwirq)
- cxl_map_irq(ctx->afu->adapter, hwirq, cxl_ops->psl_interrupt, ctx, "psl");
- }
-
- if (ctx->status == STARTED) {
- if (cxl_ops->update_ivtes)
- cxl_ops->update_ivtes(ctx);
- else WARN(1, "BUG: cxl_allocate_afu_irqs must be called prior to starting the context on this platform\n");
- }
-
- return res;
-}
-EXPORT_SYMBOL_GPL(cxl_allocate_afu_irqs);
-
-void cxl_free_afu_irqs(struct cxl_context *ctx)
-{
- irq_hw_number_t hwirq;
- unsigned int virq;
-
- if (!cpu_has_feature(CPU_FTR_HVMODE)) {
- hwirq = cxl_find_afu_irq(ctx, 0);
- if (hwirq) {
- virq = irq_find_mapping(NULL, hwirq);
- if (virq)
- cxl_unmap_irq(virq, ctx);
- }
- }
- afu_irq_name_free(ctx);
- cxl_ops->release_irq_ranges(&ctx->irqs, ctx->afu->adapter);
-}
-EXPORT_SYMBOL_GPL(cxl_free_afu_irqs);
-
-int cxl_map_afu_irq(struct cxl_context *ctx, int num,
- irq_handler_t handler, void *cookie, char *name)
-{
- irq_hw_number_t hwirq;
-
- /*
- * Find interrupt we are to register.
- */
- hwirq = cxl_find_afu_irq(ctx, num);
- if (!hwirq)
- return -ENOENT;
-
- return cxl_map_irq(ctx->afu->adapter, hwirq, handler, cookie, name);
-}
-EXPORT_SYMBOL_GPL(cxl_map_afu_irq);
-
-void cxl_unmap_afu_irq(struct cxl_context *ctx, int num, void *cookie)
-{
- irq_hw_number_t hwirq;
- unsigned int virq;
-
- hwirq = cxl_find_afu_irq(ctx, num);
- if (!hwirq)
- return;
-
- virq = irq_find_mapping(NULL, hwirq);
- if (virq)
- cxl_unmap_irq(virq, cookie);
-}
-EXPORT_SYMBOL_GPL(cxl_unmap_afu_irq);
-
-/*
- * Start a context
- * Code here similar to afu_ioctl_start_work().
- */
-int cxl_start_context(struct cxl_context *ctx, u64 wed,
- struct task_struct *task)
-{
- int rc = 0;
- bool kernel = true;
-
- pr_devel("%s: pe: %i\n", __func__, ctx->pe);
-
- mutex_lock(&ctx->status_mutex);
- if (ctx->status == STARTED)
- goto out; /* already started */
-
- /*
- * Increment the mapped context count for adapter. This also checks
- * if adapter_context_lock is taken.
- */
- rc = cxl_adapter_context_get(ctx->afu->adapter);
- if (rc)
- goto out;
-
- if (task) {
- ctx->pid = get_task_pid(task, PIDTYPE_PID);
- kernel = false;
-
- /* acquire a reference to the task's mm */
- ctx->mm = get_task_mm(current);
-
- /* ensure this mm_struct can't be freed */
- cxl_context_mm_count_get(ctx);
-
- if (ctx->mm) {
- /* decrement the use count from above */
- mmput(ctx->mm);
- /* make TLBIs for this context global */
- mm_context_add_copro(ctx->mm);
- }
- }
-
- /*
- * Increment driver use count. Enables global TLBIs for hash
- * and callbacks to handle the segment table
- */
- cxl_ctx_get();
-
- /* See the comment in afu_ioctl_start_work() */
- smp_mb();
-
- if ((rc = cxl_ops->attach_process(ctx, kernel, wed, 0))) {
- put_pid(ctx->pid);
- ctx->pid = NULL;
- cxl_adapter_context_put(ctx->afu->adapter);
- cxl_ctx_put();
- if (task) {
- cxl_context_mm_count_put(ctx);
- if (ctx->mm)
- mm_context_remove_copro(ctx->mm);
- }
- goto out;
- }
-
- ctx->status = STARTED;
-out:
- mutex_unlock(&ctx->status_mutex);
- return rc;
-}
-EXPORT_SYMBOL_GPL(cxl_start_context);
-
-int cxl_process_element(struct cxl_context *ctx)
-{
- return ctx->external_pe;
-}
-EXPORT_SYMBOL_GPL(cxl_process_element);
-
-/* Stop a context. Returns 0 on success, otherwise -Errno */
-int cxl_stop_context(struct cxl_context *ctx)
-{
- return __detach_context(ctx);
-}
-EXPORT_SYMBOL_GPL(cxl_stop_context);
-
-void cxl_set_master(struct cxl_context *ctx)
-{
- ctx->master = true;
-}
-EXPORT_SYMBOL_GPL(cxl_set_master);
-
-/* wrappers around afu_* file ops which are EXPORTED */
-int cxl_fd_open(struct inode *inode, struct file *file)
-{
- return afu_open(inode, file);
-}
-EXPORT_SYMBOL_GPL(cxl_fd_open);
-int cxl_fd_release(struct inode *inode, struct file *file)
-{
- return afu_release(inode, file);
-}
-EXPORT_SYMBOL_GPL(cxl_fd_release);
-long cxl_fd_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-{
- return afu_ioctl(file, cmd, arg);
-}
-EXPORT_SYMBOL_GPL(cxl_fd_ioctl);
-int cxl_fd_mmap(struct file *file, struct vm_area_struct *vm)
-{
- return afu_mmap(file, vm);
-}
-EXPORT_SYMBOL_GPL(cxl_fd_mmap);
-__poll_t cxl_fd_poll(struct file *file, struct poll_table_struct *poll)
-{
- return afu_poll(file, poll);
-}
-EXPORT_SYMBOL_GPL(cxl_fd_poll);
-ssize_t cxl_fd_read(struct file *file, char __user *buf, size_t count,
- loff_t *off)
-{
- return afu_read(file, buf, count, off);
-}
-EXPORT_SYMBOL_GPL(cxl_fd_read);
-
-#define PATCH_FOPS(NAME) if (!fops->NAME) fops->NAME = afu_fops.NAME
-
-/* Get a struct file and fd for a context and attach the ops */
-struct file *cxl_get_fd(struct cxl_context *ctx, struct file_operations *fops,
- int *fd)
-{
- struct file *file;
- int rc, flags, fdtmp;
- char *name = NULL;
-
- /* only allow one per context */
- if (ctx->mapping)
- return ERR_PTR(-EEXIST);
-
- flags = O_RDWR | O_CLOEXEC;
-
- /* This code is similar to anon_inode_getfd() */
- rc = get_unused_fd_flags(flags);
- if (rc < 0)
- return ERR_PTR(rc);
- fdtmp = rc;
-
- /*
- * Patch the file ops. Needs to be careful that this is rentrant safe.
- */
- if (fops) {
- PATCH_FOPS(open);
- PATCH_FOPS(poll);
- PATCH_FOPS(read);
- PATCH_FOPS(release);
- PATCH_FOPS(unlocked_ioctl);
- PATCH_FOPS(compat_ioctl);
- PATCH_FOPS(mmap);
- } else /* use default ops */
- fops = (struct file_operations *)&afu_fops;
-
- name = kasprintf(GFP_KERNEL, "cxl:%d", ctx->pe);
- file = cxl_getfile(name, fops, ctx, flags);
- kfree(name);
- if (IS_ERR(file))
- goto err_fd;
-
- cxl_context_set_mapping(ctx, file->f_mapping);
- *fd = fdtmp;
- return file;
-
-err_fd:
- put_unused_fd(fdtmp);
- return NULL;
-}
-EXPORT_SYMBOL_GPL(cxl_get_fd);
-
-struct cxl_context *cxl_fops_get_context(struct file *file)
-{
- return file->private_data;
-}
-EXPORT_SYMBOL_GPL(cxl_fops_get_context);
-
-void cxl_set_driver_ops(struct cxl_context *ctx,
- struct cxl_afu_driver_ops *ops)
-{
- WARN_ON(!ops->fetch_event || !ops->event_delivered);
- atomic_set(&ctx->afu_driver_events, 0);
- ctx->afu_driver_ops = ops;
-}
-EXPORT_SYMBOL_GPL(cxl_set_driver_ops);
-
-void cxl_context_events_pending(struct cxl_context *ctx,
- unsigned int new_events)
-{
- atomic_add(new_events, &ctx->afu_driver_events);
- wake_up_all(&ctx->wq);
-}
-EXPORT_SYMBOL_GPL(cxl_context_events_pending);
-
-int cxl_start_work(struct cxl_context *ctx,
- struct cxl_ioctl_start_work *work)
-{
- int rc;
-
- /* code taken from afu_ioctl_start_work */
- if (!(work->flags & CXL_START_WORK_NUM_IRQS))
- work->num_interrupts = ctx->afu->pp_irqs;
- else if ((work->num_interrupts < ctx->afu->pp_irqs) ||
- (work->num_interrupts > ctx->afu->irqs_max)) {
- return -EINVAL;
- }
-
- rc = afu_register_irqs(ctx, work->num_interrupts);
- if (rc)
- return rc;
-
- rc = cxl_start_context(ctx, work->work_element_descriptor, current);
- if (rc < 0) {
- afu_release_irqs(ctx, ctx);
- return rc;
- }
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(cxl_start_work);
-
-void __iomem *cxl_psa_map(struct cxl_context *ctx)
-{
- if (ctx->status != STARTED)
- return NULL;
-
- pr_devel("%s: psn_phys%llx size:%llx\n",
- __func__, ctx->psn_phys, ctx->psn_size);
- return ioremap(ctx->psn_phys, ctx->psn_size);
-}
-EXPORT_SYMBOL_GPL(cxl_psa_map);
-
-void cxl_psa_unmap(void __iomem *addr)
-{
- iounmap(addr);
-}
-EXPORT_SYMBOL_GPL(cxl_psa_unmap);
-
-int cxl_afu_reset(struct cxl_context *ctx)
-{
- struct cxl_afu *afu = ctx->afu;
- int rc;
-
- rc = cxl_ops->afu_reset(afu);
- if (rc)
- return rc;
-
- return cxl_ops->afu_check_and_enable(afu);
-}
-EXPORT_SYMBOL_GPL(cxl_afu_reset);
-
-void cxl_perst_reloads_same_image(struct cxl_afu *afu,
- bool perst_reloads_same_image)
-{
- afu->adapter->perst_same_image = perst_reloads_same_image;
-}
-EXPORT_SYMBOL_GPL(cxl_perst_reloads_same_image);
-
-ssize_t cxl_read_adapter_vpd(struct pci_dev *dev, void *buf, size_t count)
-{
- struct cxl_afu *afu = cxl_pci_to_afu(dev);
- if (IS_ERR(afu))
- return -ENODEV;
-
- return cxl_ops->read_adapter_vpd(afu->adapter, buf, count);
-}
-EXPORT_SYMBOL_GPL(cxl_read_adapter_vpd);
diff --git a/drivers/misc/cxl/base.c b/drivers/misc/cxl/base.c
deleted file mode 100644
index b054562c046e..000000000000
--- a/drivers/misc/cxl/base.c
+++ /dev/null
@@ -1,126 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright 2014 IBM Corp.
- */
-
-#include <linux/module.h>
-#include <linux/rcupdate.h>
-#include <asm/errno.h>
-#include <misc/cxl-base.h>
-#include <linux/of.h>
-#include <linux/of_platform.h>
-#include "cxl.h"
-
-/* protected by rcu */
-static struct cxl_calls *cxl_calls;
-
-atomic_t cxl_use_count = ATOMIC_INIT(0);
-EXPORT_SYMBOL(cxl_use_count);
-
-#ifdef CONFIG_CXL_MODULE
-
-static inline struct cxl_calls *cxl_calls_get(void)
-{
- struct cxl_calls *calls = NULL;
-
- rcu_read_lock();
- calls = rcu_dereference(cxl_calls);
- if (calls && !try_module_get(calls->owner))
- calls = NULL;
- rcu_read_unlock();
-
- return calls;
-}
-
-static inline void cxl_calls_put(struct cxl_calls *calls)
-{
- BUG_ON(calls != cxl_calls);
-
- /* we don't need to rcu this, as we hold a reference to the module */
- module_put(cxl_calls->owner);
-}
-
-#else /* !defined CONFIG_CXL_MODULE */
-
-static inline struct cxl_calls *cxl_calls_get(void)
-{
- return cxl_calls;
-}
-
-static inline void cxl_calls_put(struct cxl_calls *calls) { }
-
-#endif /* CONFIG_CXL_MODULE */
-
-/* AFU refcount management */
-struct cxl_afu *cxl_afu_get(struct cxl_afu *afu)
-{
- return (get_device(&afu->dev) == NULL) ? NULL : afu;
-}
-EXPORT_SYMBOL_GPL(cxl_afu_get);
-
-void cxl_afu_put(struct cxl_afu *afu)
-{
- put_device(&afu->dev);
-}
-EXPORT_SYMBOL_GPL(cxl_afu_put);
-
-void cxl_slbia(struct mm_struct *mm)
-{
- struct cxl_calls *calls;
-
- calls = cxl_calls_get();
- if (!calls)
- return;
-
- if (cxl_ctx_in_use())
- calls->cxl_slbia(mm);
-
- cxl_calls_put(calls);
-}
-
-int register_cxl_calls(struct cxl_calls *calls)
-{
- if (cxl_calls)
- return -EBUSY;
-
- rcu_assign_pointer(cxl_calls, calls);
- return 0;
-}
-EXPORT_SYMBOL_GPL(register_cxl_calls);
-
-void unregister_cxl_calls(struct cxl_calls *calls)
-{
- BUG_ON(cxl_calls->owner != calls->owner);
- RCU_INIT_POINTER(cxl_calls, NULL);
- synchronize_rcu();
-}
-EXPORT_SYMBOL_GPL(unregister_cxl_calls);
-
-int cxl_update_properties(struct device_node *dn,
- struct property *new_prop)
-{
- return of_update_property(dn, new_prop);
-}
-EXPORT_SYMBOL_GPL(cxl_update_properties);
-
-static int __init cxl_base_init(void)
-{
- struct device_node *np;
- struct platform_device *dev;
- int count = 0;
-
- /*
- * Scan for compatible devices in guest only
- */
- if (cpu_has_feature(CPU_FTR_HVMODE))
- return 0;
-
- for_each_compatible_node(np, NULL, "ibm,coherent-platform-facility") {
- dev = of_platform_device_create(np, NULL, NULL);
- if (dev)
- count++;
- }
- pr_devel("Found %d cxl device(s)\n", count);
- return 0;
-}
-device_initcall(cxl_base_init);
diff --git a/drivers/misc/cxl/context.c b/drivers/misc/cxl/context.c
deleted file mode 100644
index 76b5ea66dfa1..000000000000
--- a/drivers/misc/cxl/context.c
+++ /dev/null
@@ -1,362 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright 2014 IBM Corp.
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/bitmap.h>
-#include <linux/sched.h>
-#include <linux/pid.h>
-#include <linux/fs.h>
-#include <linux/mm.h>
-#include <linux/debugfs.h>
-#include <linux/slab.h>
-#include <linux/idr.h>
-#include <linux/sched/mm.h>
-#include <linux/mmu_context.h>
-#include <asm/cputable.h>
-#include <asm/current.h>
-#include <asm/copro.h>
-
-#include "cxl.h"
-
-/*
- * Allocates space for a CXL context.
- */
-struct cxl_context *cxl_context_alloc(void)
-{
- return kzalloc(sizeof(struct cxl_context), GFP_KERNEL);
-}
-
-/*
- * Initialises a CXL context.
- */
-int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master)
-{
- int i;
-
- ctx->afu = afu;
- ctx->master = master;
- ctx->pid = NULL; /* Set in start work ioctl */
- mutex_init(&ctx->mapping_lock);
- ctx->mapping = NULL;
- ctx->tidr = 0;
- ctx->assign_tidr = false;
-
- if (cxl_is_power8()) {
- spin_lock_init(&ctx->sste_lock);
-
- /*
- * Allocate the segment table before we put it in the IDR so that we
- * can always access it when dereferenced from IDR. For the same
- * reason, the segment table is only destroyed after the context is
- * removed from the IDR. Access to this in the IOCTL is protected by
- * Linux filesystem semantics (can't IOCTL until open is complete).
- */
- i = cxl_alloc_sst(ctx);
- if (i)
- return i;
- }
-
- INIT_WORK(&ctx->fault_work, cxl_handle_fault);
-
- init_waitqueue_head(&ctx->wq);
- spin_lock_init(&ctx->lock);
-
- ctx->irq_bitmap = NULL;
- ctx->pending_irq = false;
- ctx->pending_fault = false;
- ctx->pending_afu_err = false;
-
- INIT_LIST_HEAD(&ctx->irq_names);
-
- /*
- * When we have to destroy all contexts in cxl_context_detach_all() we
- * end up with afu_release_irqs() called from inside a
- * idr_for_each_entry(). Hence we need to make sure that anything
- * dereferenced from this IDR is ok before we allocate the IDR here.
- * This clears out the IRQ ranges to ensure this.
- */
- for (i = 0; i < CXL_IRQ_RANGES; i++)
- ctx->irqs.range[i] = 0;
-
- mutex_init(&ctx->status_mutex);
-
- ctx->status = OPENED;
-
- /*
- * Allocating IDR! We better make sure everything's setup that
- * dereferences from it.
- */
- mutex_lock(&afu->contexts_lock);
- idr_preload(GFP_KERNEL);
- i = idr_alloc(&ctx->afu->contexts_idr, ctx, 0,
- ctx->afu->num_procs, GFP_NOWAIT);
- idr_preload_end();
- mutex_unlock(&afu->contexts_lock);
- if (i < 0)
- return i;
-
- ctx->pe = i;
- if (cpu_has_feature(CPU_FTR_HVMODE)) {
- ctx->elem = &ctx->afu->native->spa[i];
- ctx->external_pe = ctx->pe;
- } else {
- ctx->external_pe = -1; /* assigned when attaching */
- }
- ctx->pe_inserted = false;
-
- /*
- * take a ref on the afu so that it stays alive at-least till
- * this context is reclaimed inside reclaim_ctx.
- */
- cxl_afu_get(afu);
- return 0;
-}
-
-void cxl_context_set_mapping(struct cxl_context *ctx,
- struct address_space *mapping)
-{
- mutex_lock(&ctx->mapping_lock);
- ctx->mapping = mapping;
- mutex_unlock(&ctx->mapping_lock);
-}
-
-static vm_fault_t cxl_mmap_fault(struct vm_fault *vmf)
-{
- struct vm_area_struct *vma = vmf->vma;
- struct cxl_context *ctx = vma->vm_file->private_data;
- u64 area, offset;
- vm_fault_t ret;
-
- offset = vmf->pgoff << PAGE_SHIFT;
-
- pr_devel("%s: pe: %i address: 0x%lx offset: 0x%llx\n",
- __func__, ctx->pe, vmf->address, offset);
-
- if (ctx->afu->current_mode == CXL_MODE_DEDICATED) {
- area = ctx->afu->psn_phys;
- if (offset >= ctx->afu->adapter->ps_size)
- return VM_FAULT_SIGBUS;
- } else {
- area = ctx->psn_phys;
- if (offset >= ctx->psn_size)
- return VM_FAULT_SIGBUS;
- }
-
- mutex_lock(&ctx->status_mutex);
-
- if (ctx->status != STARTED) {
- mutex_unlock(&ctx->status_mutex);
- pr_devel("%s: Context not started, failing problem state access\n", __func__);
- if (ctx->mmio_err_ff) {
- if (!ctx->ff_page) {
- ctx->ff_page = alloc_page(GFP_USER);
- if (!ctx->ff_page)
- return VM_FAULT_OOM;
- memset(page_address(ctx->ff_page), 0xff, PAGE_SIZE);
- }
- get_page(ctx->ff_page);
- vmf->page = ctx->ff_page;
- vma->vm_page_prot = pgprot_cached(vma->vm_page_prot);
- return 0;
- }
- return VM_FAULT_SIGBUS;
- }
-
- ret = vmf_insert_pfn(vma, vmf->address, (area + offset) >> PAGE_SHIFT);
-
- mutex_unlock(&ctx->status_mutex);
-
- return ret;
-}
-
-static const struct vm_operations_struct cxl_mmap_vmops = {
- .fault = cxl_mmap_fault,
-};
-
-/*
- * Map a per-context mmio space into the given vma.
- */
-int cxl_context_iomap(struct cxl_context *ctx, struct vm_area_struct *vma)
-{
- u64 start = vma->vm_pgoff << PAGE_SHIFT;
- u64 len = vma->vm_end - vma->vm_start;
-
- if (ctx->afu->current_mode == CXL_MODE_DEDICATED) {
- if (start + len > ctx->afu->adapter->ps_size)
- return -EINVAL;
-
- if (cxl_is_power9()) {
- /*
- * Make sure there is a valid problem state
- * area space for this AFU.
- */
- if (ctx->master && !ctx->afu->psa) {
- pr_devel("AFU doesn't support mmio space\n");
- return -EINVAL;
- }
-
- /* Can't mmap until the AFU is enabled */
- if (!ctx->afu->enabled)
- return -EBUSY;
- }
- } else {
- if (start + len > ctx->psn_size)
- return -EINVAL;
-
- /* Make sure there is a valid per process space for this AFU */
- if ((ctx->master && !ctx->afu->psa) || (!ctx->afu->pp_psa)) {
- pr_devel("AFU doesn't support mmio space\n");
- return -EINVAL;
- }
-
- /* Can't mmap until the AFU is enabled */
- if (!ctx->afu->enabled)
- return -EBUSY;
- }
-
- pr_devel("%s: mmio physical: %llx pe: %i master:%i\n", __func__,
- ctx->psn_phys, ctx->pe , ctx->master);
-
- vm_flags_set(vma, VM_IO | VM_PFNMAP);
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
- vma->vm_ops = &cxl_mmap_vmops;
- return 0;
-}
-
-/*
- * Detach a context from the hardware. This disables interrupts and doesn't
- * return until all outstanding interrupts for this context have completed. The
- * hardware should no longer access *ctx after this has returned.
- */
-int __detach_context(struct cxl_context *ctx)
-{
- enum cxl_context_status status;
-
- mutex_lock(&ctx->status_mutex);
- status = ctx->status;
- ctx->status = CLOSED;
- mutex_unlock(&ctx->status_mutex);
- if (status != STARTED)
- return -EBUSY;
-
- /* Only warn if we detached while the link was OK.
- * If detach fails when hw is down, we don't care.
- */
- WARN_ON(cxl_ops->detach_process(ctx) &&
- cxl_ops->link_ok(ctx->afu->adapter, ctx->afu));
- flush_work(&ctx->fault_work); /* Only needed for dedicated process */
-
- /*
- * Wait until no further interrupts are presented by the PSL
- * for this context.
- */
- if (cxl_ops->irq_wait)
- cxl_ops->irq_wait(ctx);
-
- /* release the reference to the group leader and mm handling pid */
- put_pid(ctx->pid);
-
- cxl_ctx_put();
-
- /* Decrease the attached context count on the adapter */
- cxl_adapter_context_put(ctx->afu->adapter);
-
- /* Decrease the mm count on the context */
- cxl_context_mm_count_put(ctx);
- if (ctx->mm)
- mm_context_remove_copro(ctx->mm);
- ctx->mm = NULL;
-
- return 0;
-}
-
-/*
- * Detach the given context from the AFU. This doesn't actually
- * free the context but it should stop the context running in hardware
- * (ie. prevent this context from generating any further interrupts
- * so that it can be freed).
- */
-void cxl_context_detach(struct cxl_context *ctx)
-{
- int rc;
-
- rc = __detach_context(ctx);
- if (rc)
- return;
-
- afu_release_irqs(ctx, ctx);
- wake_up_all(&ctx->wq);
-}
-
-/*
- * Detach all contexts on the given AFU.
- */
-void cxl_context_detach_all(struct cxl_afu *afu)
-{
- struct cxl_context *ctx;
- int tmp;
-
- mutex_lock(&afu->contexts_lock);
- idr_for_each_entry(&afu->contexts_idr, ctx, tmp) {
- /*
- * Anything done in here needs to be setup before the IDR is
- * created and torn down after the IDR removed
- */
- cxl_context_detach(ctx);
-
- /*
- * We are force detaching - remove any active PSA mappings so
- * userspace cannot interfere with the card if it comes back.
- * Easiest way to exercise this is to unbind and rebind the
- * driver via sysfs while it is in use.
- */
- mutex_lock(&ctx->mapping_lock);
- if (ctx->mapping)
- unmap_mapping_range(ctx->mapping, 0, 0, 1);
- mutex_unlock(&ctx->mapping_lock);
- }
- mutex_unlock(&afu->contexts_lock);
-}
-
-static void reclaim_ctx(struct rcu_head *rcu)
-{
- struct cxl_context *ctx = container_of(rcu, struct cxl_context, rcu);
-
- if (cxl_is_power8())
- free_page((u64)ctx->sstp);
- if (ctx->ff_page)
- __free_page(ctx->ff_page);
- ctx->sstp = NULL;
-
- bitmap_free(ctx->irq_bitmap);
-
- /* Drop ref to the afu device taken during cxl_context_init */
- cxl_afu_put(ctx->afu);
-
- kfree(ctx);
-}
-
-void cxl_context_free(struct cxl_context *ctx)
-{
- if (ctx->kernelapi && ctx->mapping)
- cxl_release_mapping(ctx);
- mutex_lock(&ctx->afu->contexts_lock);
- idr_remove(&ctx->afu->contexts_idr, ctx->pe);
- mutex_unlock(&ctx->afu->contexts_lock);
- call_rcu(&ctx->rcu, reclaim_ctx);
-}
-
-void cxl_context_mm_count_get(struct cxl_context *ctx)
-{
- if (ctx->mm)
- mmgrab(ctx->mm);
-}
-
-void cxl_context_mm_count_put(struct cxl_context *ctx)
-{
- if (ctx->mm)
- mmdrop(ctx->mm);
-}
diff --git a/drivers/misc/cxl/cxl.h b/drivers/misc/cxl/cxl.h
deleted file mode 100644
index 6ad0ab892675..000000000000
--- a/drivers/misc/cxl/cxl.h
+++ /dev/null
@@ -1,1135 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright 2014 IBM Corp.
- */
-
-#ifndef _CXL_H_
-#define _CXL_H_
-
-#include <linux/interrupt.h>
-#include <linux/semaphore.h>
-#include <linux/device.h>
-#include <linux/types.h>
-#include <linux/cdev.h>
-#include <linux/pid.h>
-#include <linux/io.h>
-#include <linux/pci.h>
-#include <linux/fs.h>
-#include <asm/cputable.h>
-#include <asm/mmu.h>
-#include <asm/reg.h>
-#include <misc/cxl-base.h>
-
-#include <misc/cxl.h>
-#include <uapi/misc/cxl.h>
-
-extern uint cxl_verbose;
-
-struct property;
-
-#define CXL_TIMEOUT 5
-
-/*
- * Bump version each time a user API change is made, whether it is
- * backwards compatible ot not.
- */
-#define CXL_API_VERSION 3
-#define CXL_API_VERSION_COMPATIBLE 1
-
-/*
- * Opaque types to avoid accidentally passing registers for the wrong MMIO
- *
- * At the end of the day, I'm not married to using typedef here, but it might
- * (and has!) help avoid bugs like mixing up CXL_PSL_CtxTime and
- * CXL_PSL_CtxTime_An, or calling cxl_p1n_write instead of cxl_p1_write.
- *
- * I'm quite happy if these are changed back to #defines before upstreaming, it
- * should be little more than a regexp search+replace operation in this file.
- */
-typedef struct {
- const int x;
-} cxl_p1_reg_t;
-typedef struct {
- const int x;
-} cxl_p1n_reg_t;
-typedef struct {
- const int x;
-} cxl_p2n_reg_t;
-#define cxl_reg_off(reg) \
- (reg.x)
-
-/* Memory maps. Ref CXL Appendix A */
-
-/* PSL Privilege 1 Memory Map */
-/* Configuration and Control area - CAIA 1&2 */
-static const cxl_p1_reg_t CXL_PSL_CtxTime = {0x0000};
-static const cxl_p1_reg_t CXL_PSL_ErrIVTE = {0x0008};
-static const cxl_p1_reg_t CXL_PSL_KEY1 = {0x0010};
-static const cxl_p1_reg_t CXL_PSL_KEY2 = {0x0018};
-static const cxl_p1_reg_t CXL_PSL_Control = {0x0020};
-/* Downloading */
-static const cxl_p1_reg_t CXL_PSL_DLCNTL = {0x0060};
-static const cxl_p1_reg_t CXL_PSL_DLADDR = {0x0068};
-
-/* PSL Lookaside Buffer Management Area - CAIA 1 */
-static const cxl_p1_reg_t CXL_PSL_LBISEL = {0x0080};
-static const cxl_p1_reg_t CXL_PSL_SLBIE = {0x0088};
-static const cxl_p1_reg_t CXL_PSL_SLBIA = {0x0090};
-static const cxl_p1_reg_t CXL_PSL_TLBIE = {0x00A0};
-static const cxl_p1_reg_t CXL_PSL_TLBIA = {0x00A8};
-static const cxl_p1_reg_t CXL_PSL_AFUSEL = {0x00B0};
-
-/* 0x00C0:7EFF Implementation dependent area */
-/* PSL registers - CAIA 1 */
-static const cxl_p1_reg_t CXL_PSL_FIR1 = {0x0100};
-static const cxl_p1_reg_t CXL_PSL_FIR2 = {0x0108};
-static const cxl_p1_reg_t CXL_PSL_Timebase = {0x0110};
-static const cxl_p1_reg_t CXL_PSL_VERSION = {0x0118};
-static const cxl_p1_reg_t CXL_PSL_RESLCKTO = {0x0128};
-static const cxl_p1_reg_t CXL_PSL_TB_CTLSTAT = {0x0140};
-static const cxl_p1_reg_t CXL_PSL_FIR_CNTL = {0x0148};
-static const cxl_p1_reg_t CXL_PSL_DSNDCTL = {0x0150};
-static const cxl_p1_reg_t CXL_PSL_SNWRALLOC = {0x0158};
-static const cxl_p1_reg_t CXL_PSL_TRACE = {0x0170};
-/* PSL registers - CAIA 2 */
-static const cxl_p1_reg_t CXL_PSL9_CONTROL = {0x0020};
-static const cxl_p1_reg_t CXL_XSL9_INV = {0x0110};
-static const cxl_p1_reg_t CXL_XSL9_DBG = {0x0130};
-static const cxl_p1_reg_t CXL_XSL9_DEF = {0x0140};
-static const cxl_p1_reg_t CXL_XSL9_DSNCTL = {0x0168};
-static const cxl_p1_reg_t CXL_PSL9_FIR1 = {0x0300};
-static const cxl_p1_reg_t CXL_PSL9_FIR_MASK = {0x0308};
-static const cxl_p1_reg_t CXL_PSL9_Timebase = {0x0310};
-static const cxl_p1_reg_t CXL_PSL9_DEBUG = {0x0320};
-static const cxl_p1_reg_t CXL_PSL9_FIR_CNTL = {0x0348};
-static const cxl_p1_reg_t CXL_PSL9_DSNDCTL = {0x0350};
-static const cxl_p1_reg_t CXL_PSL9_TB_CTLSTAT = {0x0340};
-static const cxl_p1_reg_t CXL_PSL9_TRACECFG = {0x0368};
-static const cxl_p1_reg_t CXL_PSL9_APCDEDALLOC = {0x0378};
-static const cxl_p1_reg_t CXL_PSL9_APCDEDTYPE = {0x0380};
-static const cxl_p1_reg_t CXL_PSL9_TNR_ADDR = {0x0388};
-static const cxl_p1_reg_t CXL_PSL9_CTCCFG = {0x0390};
-static const cxl_p1_reg_t CXL_PSL9_GP_CT = {0x0398};
-static const cxl_p1_reg_t CXL_XSL9_IERAT = {0x0588};
-static const cxl_p1_reg_t CXL_XSL9_ILPP = {0x0590};
-
-/* 0x7F00:7FFF Reserved PCIe MSI-X Pending Bit Array area */
-/* 0x8000:FFFF Reserved PCIe MSI-X Table Area */
-
-/* PSL Slice Privilege 1 Memory Map */
-/* Configuration Area - CAIA 1&2 */
-static const cxl_p1n_reg_t CXL_PSL_SR_An = {0x00};
-static const cxl_p1n_reg_t CXL_PSL_LPID_An = {0x08};
-static const cxl_p1n_reg_t CXL_PSL_AMBAR_An = {0x10};
-static const cxl_p1n_reg_t CXL_PSL_SPOffset_An = {0x18};
-static const cxl_p1n_reg_t CXL_PSL_ID_An = {0x20};
-static const cxl_p1n_reg_t CXL_PSL_SERR_An = {0x28};
-/* Memory Management and Lookaside Buffer Management - CAIA 1*/
-static const cxl_p1n_reg_t CXL_PSL_SDR_An = {0x30};
-/* Memory Management and Lookaside Buffer Management - CAIA 1&2 */
-static const cxl_p1n_reg_t CXL_PSL_AMOR_An = {0x38};
-/* Pointer Area - CAIA 1&2 */
-static const cxl_p1n_reg_t CXL_HAURP_An = {0x80};
-static const cxl_p1n_reg_t CXL_PSL_SPAP_An = {0x88};
-static const cxl_p1n_reg_t CXL_PSL_LLCMD_An = {0x90};
-/* Control Area - CAIA 1&2 */
-static const cxl_p1n_reg_t CXL_PSL_SCNTL_An = {0xA0};
-static const cxl_p1n_reg_t CXL_PSL_CtxTime_An = {0xA8};
-static const cxl_p1n_reg_t CXL_PSL_IVTE_Offset_An = {0xB0};
-static const cxl_p1n_reg_t CXL_PSL_IVTE_Limit_An = {0xB8};
-/* 0xC0:FF Implementation Dependent Area - CAIA 1&2 */
-static const cxl_p1n_reg_t CXL_PSL_FIR_SLICE_An = {0xC0};
-static const cxl_p1n_reg_t CXL_AFU_DEBUG_An = {0xC8};
-/* 0xC0:FF Implementation Dependent Area - CAIA 1 */
-static const cxl_p1n_reg_t CXL_PSL_APCALLOC_A = {0xD0};
-static const cxl_p1n_reg_t CXL_PSL_COALLOC_A = {0xD8};
-static const cxl_p1n_reg_t CXL_PSL_RXCTL_A = {0xE0};
-static const cxl_p1n_reg_t CXL_PSL_SLICE_TRACE = {0xE8};
-
-/* PSL Slice Privilege 2 Memory Map */
-/* Configuration and Control Area - CAIA 1&2 */
-static const cxl_p2n_reg_t CXL_PSL_PID_TID_An = {0x000};
-static const cxl_p2n_reg_t CXL_CSRP_An = {0x008};
-/* Configuration and Control Area - CAIA 1 */
-static const cxl_p2n_reg_t CXL_AURP0_An = {0x010};
-static const cxl_p2n_reg_t CXL_AURP1_An = {0x018};
-static const cxl_p2n_reg_t CXL_SSTP0_An = {0x020};
-static const cxl_p2n_reg_t CXL_SSTP1_An = {0x028};
-/* Configuration and Control Area - CAIA 1 */
-static const cxl_p2n_reg_t CXL_PSL_AMR_An = {0x030};
-/* Segment Lookaside Buffer Management - CAIA 1 */
-static const cxl_p2n_reg_t CXL_SLBIE_An = {0x040};
-static const cxl_p2n_reg_t CXL_SLBIA_An = {0x048};
-static const cxl_p2n_reg_t CXL_SLBI_Select_An = {0x050};
-/* Interrupt Registers - CAIA 1&2 */
-static const cxl_p2n_reg_t CXL_PSL_DSISR_An = {0x060};
-static const cxl_p2n_reg_t CXL_PSL_DAR_An = {0x068};
-static const cxl_p2n_reg_t CXL_PSL_DSR_An = {0x070};
-static const cxl_p2n_reg_t CXL_PSL_TFC_An = {0x078};
-static const cxl_p2n_reg_t CXL_PSL_PEHandle_An = {0x080};
-static const cxl_p2n_reg_t CXL_PSL_ErrStat_An = {0x088};
-/* AFU Registers - CAIA 1&2 */
-static const cxl_p2n_reg_t CXL_AFU_Cntl_An = {0x090};
-static const cxl_p2n_reg_t CXL_AFU_ERR_An = {0x098};
-/* Work Element Descriptor - CAIA 1&2 */
-static const cxl_p2n_reg_t CXL_PSL_WED_An = {0x0A0};
-/* 0x0C0:FFF Implementation Dependent Area */
-
-#define CXL_PSL_SPAP_Addr 0x0ffffffffffff000ULL
-#define CXL_PSL_SPAP_Size 0x0000000000000ff0ULL
-#define CXL_PSL_SPAP_Size_Shift 4
-#define CXL_PSL_SPAP_V 0x0000000000000001ULL
-
-/****** CXL_PSL_Control ****************************************************/
-#define CXL_PSL_Control_tb (0x1ull << (63-63))
-#define CXL_PSL_Control_Fr (0x1ull << (63-31))
-#define CXL_PSL_Control_Fs_MASK (0x3ull << (63-29))
-#define CXL_PSL_Control_Fs_Complete (0x3ull << (63-29))
-
-/****** CXL_PSL_DLCNTL *****************************************************/
-#define CXL_PSL_DLCNTL_D (0x1ull << (63-28))
-#define CXL_PSL_DLCNTL_C (0x1ull << (63-29))
-#define CXL_PSL_DLCNTL_E (0x1ull << (63-30))
-#define CXL_PSL_DLCNTL_S (0x1ull << (63-31))
-#define CXL_PSL_DLCNTL_CE (CXL_PSL_DLCNTL_C | CXL_PSL_DLCNTL_E)
-#define CXL_PSL_DLCNTL_DCES (CXL_PSL_DLCNTL_D | CXL_PSL_DLCNTL_CE | CXL_PSL_DLCNTL_S)
-
-/****** CXL_PSL_SR_An ******************************************************/
-#define CXL_PSL_SR_An_SF MSR_SF /* 64bit */
-#define CXL_PSL_SR_An_TA (1ull << (63-1)) /* Tags active, GA1: 0 */
-#define CXL_PSL_SR_An_HV MSR_HV /* Hypervisor, GA1: 0 */
-#define CXL_PSL_SR_An_XLAT_hpt (0ull << (63-6))/* Hashed page table (HPT) mode */
-#define CXL_PSL_SR_An_XLAT_roh (2ull << (63-6))/* Radix on HPT mode */
-#define CXL_PSL_SR_An_XLAT_ror (3ull << (63-6))/* Radix on Radix mode */
-#define CXL_PSL_SR_An_BOT (1ull << (63-10)) /* Use the in-memory segment table */
-#define CXL_PSL_SR_An_PR MSR_PR /* Problem state, GA1: 1 */
-#define CXL_PSL_SR_An_ISL (1ull << (63-53)) /* Ignore Segment Large Page */
-#define CXL_PSL_SR_An_TC (1ull << (63-54)) /* Page Table secondary hash */
-#define CXL_PSL_SR_An_US (1ull << (63-56)) /* User state, GA1: X */
-#define CXL_PSL_SR_An_SC (1ull << (63-58)) /* Segment Table secondary hash */
-#define CXL_PSL_SR_An_R MSR_DR /* Relocate, GA1: 1 */
-#define CXL_PSL_SR_An_MP (1ull << (63-62)) /* Master Process */
-#define CXL_PSL_SR_An_LE (1ull << (63-63)) /* Little Endian */
-
-/****** CXL_PSL_ID_An ****************************************************/
-#define CXL_PSL_ID_An_F (1ull << (63-31))
-#define CXL_PSL_ID_An_L (1ull << (63-30))
-
-/****** CXL_PSL_SERR_An ****************************************************/
-#define CXL_PSL_SERR_An_afuto (1ull << (63-0))
-#define CXL_PSL_SERR_An_afudis (1ull << (63-1))
-#define CXL_PSL_SERR_An_afuov (1ull << (63-2))
-#define CXL_PSL_SERR_An_badsrc (1ull << (63-3))
-#define CXL_PSL_SERR_An_badctx (1ull << (63-4))
-#define CXL_PSL_SERR_An_llcmdis (1ull << (63-5))
-#define CXL_PSL_SERR_An_llcmdto (1ull << (63-6))
-#define CXL_PSL_SERR_An_afupar (1ull << (63-7))
-#define CXL_PSL_SERR_An_afudup (1ull << (63-8))
-#define CXL_PSL_SERR_An_IRQS ( \
- CXL_PSL_SERR_An_afuto | CXL_PSL_SERR_An_afudis | CXL_PSL_SERR_An_afuov | \
- CXL_PSL_SERR_An_badsrc | CXL_PSL_SERR_An_badctx | CXL_PSL_SERR_An_llcmdis | \
- CXL_PSL_SERR_An_llcmdto | CXL_PSL_SERR_An_afupar | CXL_PSL_SERR_An_afudup)
-#define CXL_PSL_SERR_An_afuto_mask (1ull << (63-32))
-#define CXL_PSL_SERR_An_afudis_mask (1ull << (63-33))
-#define CXL_PSL_SERR_An_afuov_mask (1ull << (63-34))
-#define CXL_PSL_SERR_An_badsrc_mask (1ull << (63-35))
-#define CXL_PSL_SERR_An_badctx_mask (1ull << (63-36))
-#define CXL_PSL_SERR_An_llcmdis_mask (1ull << (63-37))
-#define CXL_PSL_SERR_An_llcmdto_mask (1ull << (63-38))
-#define CXL_PSL_SERR_An_afupar_mask (1ull << (63-39))
-#define CXL_PSL_SERR_An_afudup_mask (1ull << (63-40))
-#define CXL_PSL_SERR_An_IRQ_MASKS ( \
- CXL_PSL_SERR_An_afuto_mask | CXL_PSL_SERR_An_afudis_mask | CXL_PSL_SERR_An_afuov_mask | \
- CXL_PSL_SERR_An_badsrc_mask | CXL_PSL_SERR_An_badctx_mask | CXL_PSL_SERR_An_llcmdis_mask | \
- CXL_PSL_SERR_An_llcmdto_mask | CXL_PSL_SERR_An_afupar_mask | CXL_PSL_SERR_An_afudup_mask)
-
-#define CXL_PSL_SERR_An_AE (1ull << (63-30))
-
-/****** CXL_PSL_SCNTL_An ****************************************************/
-#define CXL_PSL_SCNTL_An_CR (0x1ull << (63-15))
-/* Programming Modes: */
-#define CXL_PSL_SCNTL_An_PM_MASK (0xffffull << (63-31))
-#define CXL_PSL_SCNTL_An_PM_Shared (0x0000ull << (63-31))
-#define CXL_PSL_SCNTL_An_PM_OS (0x0001ull << (63-31))
-#define CXL_PSL_SCNTL_An_PM_Process (0x0002ull << (63-31))
-#define CXL_PSL_SCNTL_An_PM_AFU (0x0004ull << (63-31))
-#define CXL_PSL_SCNTL_An_PM_AFU_PBT (0x0104ull << (63-31))
-/* Purge Status (ro) */
-#define CXL_PSL_SCNTL_An_Ps_MASK (0x3ull << (63-39))
-#define CXL_PSL_SCNTL_An_Ps_Pending (0x1ull << (63-39))
-#define CXL_PSL_SCNTL_An_Ps_Complete (0x3ull << (63-39))
-/* Purge */
-#define CXL_PSL_SCNTL_An_Pc (0x1ull << (63-48))
-/* Suspend Status (ro) */
-#define CXL_PSL_SCNTL_An_Ss_MASK (0x3ull << (63-55))
-#define CXL_PSL_SCNTL_An_Ss_Pending (0x1ull << (63-55))
-#define CXL_PSL_SCNTL_An_Ss_Complete (0x3ull << (63-55))
-/* Suspend Control */
-#define CXL_PSL_SCNTL_An_Sc (0x1ull << (63-63))
-
-/* AFU Slice Enable Status (ro) */
-#define CXL_AFU_Cntl_An_ES_MASK (0x7ull << (63-2))
-#define CXL_AFU_Cntl_An_ES_Disabled (0x0ull << (63-2))
-#define CXL_AFU_Cntl_An_ES_Enabled (0x4ull << (63-2))
-/* AFU Slice Enable */
-#define CXL_AFU_Cntl_An_E (0x1ull << (63-3))
-/* AFU Slice Reset status (ro) */
-#define CXL_AFU_Cntl_An_RS_MASK (0x3ull << (63-5))
-#define CXL_AFU_Cntl_An_RS_Pending (0x1ull << (63-5))
-#define CXL_AFU_Cntl_An_RS_Complete (0x2ull << (63-5))
-/* AFU Slice Reset */
-#define CXL_AFU_Cntl_An_RA (0x1ull << (63-7))
-
-/****** CXL_SSTP0/1_An ******************************************************/
-/* These top bits are for the segment that CONTAINS the segment table */
-#define CXL_SSTP0_An_B_SHIFT SLB_VSID_SSIZE_SHIFT
-#define CXL_SSTP0_An_KS (1ull << (63-2))
-#define CXL_SSTP0_An_KP (1ull << (63-3))
-#define CXL_SSTP0_An_N (1ull << (63-4))
-#define CXL_SSTP0_An_L (1ull << (63-5))
-#define CXL_SSTP0_An_C (1ull << (63-6))
-#define CXL_SSTP0_An_TA (1ull << (63-7))
-#define CXL_SSTP0_An_LP_SHIFT (63-9) /* 2 Bits */
-/* And finally, the virtual address & size of the segment table: */
-#define CXL_SSTP0_An_SegTableSize_SHIFT (63-31) /* 12 Bits */
-#define CXL_SSTP0_An_SegTableSize_MASK \
- (((1ull << 12) - 1) << CXL_SSTP0_An_SegTableSize_SHIFT)
-#define CXL_SSTP0_An_STVA_U_MASK ((1ull << (63-49))-1)
-#define CXL_SSTP1_An_STVA_L_MASK (~((1ull << (63-55))-1))
-#define CXL_SSTP1_An_V (1ull << (63-63))
-
-/****** CXL_PSL_SLBIE_[An] - CAIA 1 **************************************************/
-/* write: */
-#define CXL_SLBIE_C PPC_BIT(36) /* Class */
-#define CXL_SLBIE_SS PPC_BITMASK(37, 38) /* Segment Size */
-#define CXL_SLBIE_SS_SHIFT PPC_BITLSHIFT(38)
-#define CXL_SLBIE_TA PPC_BIT(38) /* Tags Active */
-/* read: */
-#define CXL_SLBIE_MAX PPC_BITMASK(24, 31)
-#define CXL_SLBIE_PENDING PPC_BITMASK(56, 63)
-
-/****** Common to all CXL_TLBIA/SLBIA_[An] - CAIA 1 **********************************/
-#define CXL_TLB_SLB_P (1ull) /* Pending (read) */
-
-/****** Common to all CXL_TLB/SLB_IA/IE_[An] registers - CAIA 1 **********************/
-#define CXL_TLB_SLB_IQ_ALL (0ull) /* Inv qualifier */
-#define CXL_TLB_SLB_IQ_LPID (1ull) /* Inv qualifier */
-#define CXL_TLB_SLB_IQ_LPIDPID (3ull) /* Inv qualifier */
-
-/****** CXL_PSL_AFUSEL ******************************************************/
-#define CXL_PSL_AFUSEL_A (1ull << (63-55)) /* Adapter wide invalidates affect all AFUs */
-
-/****** CXL_PSL_DSISR_An - CAIA 1 ****************************************************/
-#define CXL_PSL_DSISR_An_DS (1ull << (63-0)) /* Segment not found */
-#define CXL_PSL_DSISR_An_DM (1ull << (63-1)) /* PTE not found (See also: M) or protection fault */
-#define CXL_PSL_DSISR_An_ST (1ull << (63-2)) /* Segment Table PTE not found */
-#define CXL_PSL_DSISR_An_UR (1ull << (63-3)) /* AURP PTE not found */
-#define CXL_PSL_DSISR_TRANS (CXL_PSL_DSISR_An_DS | CXL_PSL_DSISR_An_DM | CXL_PSL_DSISR_An_ST | CXL_PSL_DSISR_An_UR)
-#define CXL_PSL_DSISR_An_PE (1ull << (63-4)) /* PSL Error (implementation specific) */
-#define CXL_PSL_DSISR_An_AE (1ull << (63-5)) /* AFU Error */
-#define CXL_PSL_DSISR_An_OC (1ull << (63-6)) /* OS Context Warning */
-#define CXL_PSL_DSISR_PENDING (CXL_PSL_DSISR_TRANS | CXL_PSL_DSISR_An_PE | CXL_PSL_DSISR_An_AE | CXL_PSL_DSISR_An_OC)
-/* NOTE: Bits 32:63 are undefined if DSISR[DS] = 1 */
-#define CXL_PSL_DSISR_An_M DSISR_NOHPTE /* PTE not found */
-#define CXL_PSL_DSISR_An_P DSISR_PROTFAULT /* Storage protection violation */
-#define CXL_PSL_DSISR_An_A (1ull << (63-37)) /* AFU lock access to write through or cache inhibited storage */
-#define CXL_PSL_DSISR_An_S DSISR_ISSTORE /* Access was afu_wr or afu_zero */
-#define CXL_PSL_DSISR_An_K DSISR_KEYFAULT /* Access not permitted by virtual page class key protection */
-
-/****** CXL_PSL_DSISR_An - CAIA 2 ****************************************************/
-#define CXL_PSL9_DSISR_An_TF (1ull << (63-3)) /* Translation fault */
-#define CXL_PSL9_DSISR_An_PE (1ull << (63-4)) /* PSL Error (implementation specific) */
-#define CXL_PSL9_DSISR_An_AE (1ull << (63-5)) /* AFU Error */
-#define CXL_PSL9_DSISR_An_OC (1ull << (63-6)) /* OS Context Warning */
-#define CXL_PSL9_DSISR_An_S (1ull << (63-38)) /* TF for a write operation */
-#define CXL_PSL9_DSISR_PENDING (CXL_PSL9_DSISR_An_TF | CXL_PSL9_DSISR_An_PE | CXL_PSL9_DSISR_An_AE | CXL_PSL9_DSISR_An_OC)
-/*
- * NOTE: Bits 56:63 (Checkout Response Status) are valid when DSISR_An[TF] = 1
- * Status (0:7) Encoding
- */
-#define CXL_PSL9_DSISR_An_CO_MASK 0x00000000000000ffULL
-#define CXL_PSL9_DSISR_An_SF 0x0000000000000080ULL /* Segment Fault 0b10000000 */
-#define CXL_PSL9_DSISR_An_PF_SLR 0x0000000000000088ULL /* PTE not found (Single Level Radix) 0b10001000 */
-#define CXL_PSL9_DSISR_An_PF_RGC 0x000000000000008CULL /* PTE not found (Radix Guest (child)) 0b10001100 */
-#define CXL_PSL9_DSISR_An_PF_RGP 0x0000000000000090ULL /* PTE not found (Radix Guest (parent)) 0b10010000 */
-#define CXL_PSL9_DSISR_An_PF_HRH 0x0000000000000094ULL /* PTE not found (HPT/Radix Host) 0b10010100 */
-#define CXL_PSL9_DSISR_An_PF_STEG 0x000000000000009CULL /* PTE not found (STEG VA) 0b10011100 */
-#define CXL_PSL9_DSISR_An_URTCH 0x00000000000000B4ULL /* Unsupported Radix Tree Configuration 0b10110100 */
-
-/****** CXL_PSL_TFC_An ******************************************************/
-#define CXL_PSL_TFC_An_A (1ull << (63-28)) /* Acknowledge non-translation fault */
-#define CXL_PSL_TFC_An_C (1ull << (63-29)) /* Continue (abort transaction) */
-#define CXL_PSL_TFC_An_AE (1ull << (63-30)) /* Restart PSL with address error */
-#define CXL_PSL_TFC_An_R (1ull << (63-31)) /* Restart PSL transaction */
-
-/****** CXL_PSL_DEBUG *****************************************************/
-#define CXL_PSL_DEBUG_CDC (1ull << (63-27)) /* Coherent Data cache support */
-
-/****** CXL_XSL9_IERAT_ERAT - CAIA 2 **********************************/
-#define CXL_XSL9_IERAT_MLPID (1ull << (63-0)) /* Match LPID */
-#define CXL_XSL9_IERAT_MPID (1ull << (63-1)) /* Match PID */
-#define CXL_XSL9_IERAT_PRS (1ull << (63-4)) /* PRS bit for Radix invalidations */
-#define CXL_XSL9_IERAT_INVR (1ull << (63-3)) /* Invalidate Radix */
-#define CXL_XSL9_IERAT_IALL (1ull << (63-8)) /* Invalidate All */
-#define CXL_XSL9_IERAT_IINPROG (1ull << (63-63)) /* Invalidate in progress */
-
-/* cxl_process_element->software_status */
-#define CXL_PE_SOFTWARE_STATE_V (1ul << (31 - 0)) /* Valid */
-#define CXL_PE_SOFTWARE_STATE_C (1ul << (31 - 29)) /* Complete */
-#define CXL_PE_SOFTWARE_STATE_S (1ul << (31 - 30)) /* Suspend */
-#define CXL_PE_SOFTWARE_STATE_T (1ul << (31 - 31)) /* Terminate */
-
-/****** CXL_PSL_RXCTL_An (Implementation Specific) **************************
- * Controls AFU Hang Pulse, which sets the timeout for the AFU to respond to
- * the PSL for any response (except MMIO). Timeouts will occur between 1x to 2x
- * of the hang pulse frequency.
- */
-#define CXL_PSL_RXCTL_AFUHP_4S 0x7000000000000000ULL
-
-/* SPA->sw_command_status */
-#define CXL_SPA_SW_CMD_MASK 0xffff000000000000ULL
-#define CXL_SPA_SW_CMD_TERMINATE 0x0001000000000000ULL
-#define CXL_SPA_SW_CMD_REMOVE 0x0002000000000000ULL
-#define CXL_SPA_SW_CMD_SUSPEND 0x0003000000000000ULL
-#define CXL_SPA_SW_CMD_RESUME 0x0004000000000000ULL
-#define CXL_SPA_SW_CMD_ADD 0x0005000000000000ULL
-#define CXL_SPA_SW_CMD_UPDATE 0x0006000000000000ULL
-#define CXL_SPA_SW_STATE_MASK 0x0000ffff00000000ULL
-#define CXL_SPA_SW_STATE_TERMINATED 0x0000000100000000ULL
-#define CXL_SPA_SW_STATE_REMOVED 0x0000000200000000ULL
-#define CXL_SPA_SW_STATE_SUSPENDED 0x0000000300000000ULL
-#define CXL_SPA_SW_STATE_RESUMED 0x0000000400000000ULL
-#define CXL_SPA_SW_STATE_ADDED 0x0000000500000000ULL
-#define CXL_SPA_SW_STATE_UPDATED 0x0000000600000000ULL
-#define CXL_SPA_SW_PSL_ID_MASK 0x00000000ffff0000ULL
-#define CXL_SPA_SW_LINK_MASK 0x000000000000ffffULL
-
-#define CXL_MAX_SLICES 4
-#define MAX_AFU_MMIO_REGS 3
-
-#define CXL_MODE_TIME_SLICED 0x4
-#define CXL_SUPPORTED_MODES (CXL_MODE_DEDICATED | CXL_MODE_DIRECTED)
-
-#define CXL_DEV_MINORS 13 /* 1 control + 4 AFUs * 3 (dedicated/master/shared) */
-#define CXL_CARD_MINOR(adapter) (adapter->adapter_num * CXL_DEV_MINORS)
-#define CXL_DEVT_ADAPTER(dev) (MINOR(dev) / CXL_DEV_MINORS)
-
-#define CXL_PSL9_TRACEID_MAX 0xAU
-#define CXL_PSL9_TRACESTATE_FIN 0x3U
-
-enum cxl_context_status {
- CLOSED,
- OPENED,
- STARTED
-};
-
-enum prefault_modes {
- CXL_PREFAULT_NONE,
- CXL_PREFAULT_WED,
- CXL_PREFAULT_ALL,
-};
-
-enum cxl_attrs {
- CXL_ADAPTER_ATTRS,
- CXL_AFU_MASTER_ATTRS,
- CXL_AFU_ATTRS,
-};
-
-struct cxl_sste {
- __be64 esid_data;
- __be64 vsid_data;
-};
-
-#define to_cxl_adapter(d) container_of(d, struct cxl, dev)
-#define to_cxl_afu(d) container_of(d, struct cxl_afu, dev)
-
-struct cxl_afu_native {
- void __iomem *p1n_mmio;
- void __iomem *afu_desc_mmio;
- irq_hw_number_t psl_hwirq;
- unsigned int psl_virq;
- struct mutex spa_mutex;
- /*
- * Only the first part of the SPA is used for the process element
- * linked list. The only other part that software needs to worry about
- * is sw_command_status, which we store a separate pointer to.
- * Everything else in the SPA is only used by hardware
- */
- struct cxl_process_element *spa;
- __be64 *sw_command_status;
- unsigned int spa_size;
- int spa_order;
- int spa_max_procs;
- u64 pp_offset;
-};
-
-struct cxl_afu_guest {
- struct cxl_afu *parent;
- u64 handle;
- phys_addr_t p2n_phys;
- u64 p2n_size;
- int max_ints;
- bool handle_err;
- struct delayed_work work_err;
- int previous_state;
-};
-
-struct cxl_afu {
- struct cxl_afu_native *native;
- struct cxl_afu_guest *guest;
- irq_hw_number_t serr_hwirq;
- unsigned int serr_virq;
- char *psl_irq_name;
- char *err_irq_name;
- void __iomem *p2n_mmio;
- phys_addr_t psn_phys;
- u64 pp_size;
-
- struct cxl *adapter;
- struct device dev;
- struct cdev afu_cdev_s, afu_cdev_m, afu_cdev_d;
- struct device *chardev_s, *chardev_m, *chardev_d;
- struct idr contexts_idr;
- struct dentry *debugfs;
- struct mutex contexts_lock;
- spinlock_t afu_cntl_lock;
-
- /* -1: AFU deconfigured/locked, >= 0: number of readers */
- atomic_t configured_state;
-
- /* AFU error buffer fields and bin attribute for sysfs */
- u64 eb_len, eb_offset;
- struct bin_attribute attr_eb;
-
- /* pointer to the vphb */
- struct pci_controller *phb;
-
- int pp_irqs;
- int irqs_max;
- int num_procs;
- int max_procs_virtualised;
- int slice;
- int modes_supported;
- int current_mode;
- int crs_num;
- u64 crs_len;
- u64 crs_offset;
- struct list_head crs;
- enum prefault_modes prefault_mode;
- bool psa;
- bool pp_psa;
- bool enabled;
-};
-
-
-struct cxl_irq_name {
- struct list_head list;
- char *name;
-};
-
-struct irq_avail {
- irq_hw_number_t offset;
- irq_hw_number_t range;
- unsigned long *bitmap;
-};
-
-/*
- * This is a cxl context. If the PSL is in dedicated mode, there will be one
- * of these per AFU. If in AFU directed there can be lots of these.
- */
-struct cxl_context {
- struct cxl_afu *afu;
-
- /* Problem state MMIO */
- phys_addr_t psn_phys;
- u64 psn_size;
-
- /* Used to unmap any mmaps when force detaching */
- struct address_space *mapping;
- struct mutex mapping_lock;
- struct page *ff_page;
- bool mmio_err_ff;
- bool kernelapi;
-
- spinlock_t sste_lock; /* Protects segment table entries */
- struct cxl_sste *sstp;
- u64 sstp0, sstp1;
- unsigned int sst_size, sst_lru;
-
- wait_queue_head_t wq;
- /* use mm context associated with this pid for ds faults */
- struct pid *pid;
- spinlock_t lock; /* Protects pending_irq_mask, pending_fault and fault_addr */
- /* Only used in PR mode */
- u64 process_token;
-
- /* driver private data */
- void *priv;
-
- unsigned long *irq_bitmap; /* Accessed from IRQ context */
- struct cxl_irq_ranges irqs;
- struct list_head irq_names;
- u64 fault_addr;
- u64 fault_dsisr;
- u64 afu_err;
-
- /*
- * This status and it's lock pretects start and detach context
- * from racing. It also prevents detach from racing with
- * itself
- */
- enum cxl_context_status status;
- struct mutex status_mutex;
-
-
- /* XXX: Is it possible to need multiple work items at once? */
- struct work_struct fault_work;
- u64 dsisr;
- u64 dar;
-
- struct cxl_process_element *elem;
-
- /*
- * pe is the process element handle, assigned by this driver when the
- * context is initialized.
- *
- * external_pe is the PE shown outside of cxl.
- * On bare-metal, pe=external_pe, because we decide what the handle is.
- * In a guest, we only find out about the pe used by pHyp when the
- * context is attached, and that's the value we want to report outside
- * of cxl.
- */
- int pe;
- int external_pe;
-
- u32 irq_count;
- bool pe_inserted;
- bool master;
- bool kernel;
- bool pending_irq;
- bool pending_fault;
- bool pending_afu_err;
-
- /* Used by AFU drivers for driver specific event delivery */
- struct cxl_afu_driver_ops *afu_driver_ops;
- atomic_t afu_driver_events;
-
- struct rcu_head rcu;
-
- struct mm_struct *mm;
-
- u16 tidr;
- bool assign_tidr;
-};
-
-struct cxl_irq_info;
-
-struct cxl_service_layer_ops {
- int (*adapter_regs_init)(struct cxl *adapter, struct pci_dev *dev);
- int (*invalidate_all)(struct cxl *adapter);
- int (*afu_regs_init)(struct cxl_afu *afu);
- int (*sanitise_afu_regs)(struct cxl_afu *afu);
- int (*register_serr_irq)(struct cxl_afu *afu);
- void (*release_serr_irq)(struct cxl_afu *afu);
- irqreturn_t (*handle_interrupt)(int irq, struct cxl_context *ctx, struct cxl_irq_info *irq_info);
- irqreturn_t (*fail_irq)(struct cxl_afu *afu, struct cxl_irq_info *irq_info);
- int (*activate_dedicated_process)(struct cxl_afu *afu);
- int (*attach_afu_directed)(struct cxl_context *ctx, u64 wed, u64 amr);
- int (*attach_dedicated_process)(struct cxl_context *ctx, u64 wed, u64 amr);
- void (*update_dedicated_ivtes)(struct cxl_context *ctx);
- void (*debugfs_add_adapter_regs)(struct cxl *adapter, struct dentry *dir);
- void (*debugfs_add_afu_regs)(struct cxl_afu *afu, struct dentry *dir);
- void (*psl_irq_dump_registers)(struct cxl_context *ctx);
- void (*err_irq_dump_registers)(struct cxl *adapter);
- void (*debugfs_stop_trace)(struct cxl *adapter);
- void (*write_timebase_ctrl)(struct cxl *adapter);
- u64 (*timebase_read)(struct cxl *adapter);
- int capi_mode;
- bool needs_reset_before_disable;
-};
-
-struct cxl_native {
- u64 afu_desc_off;
- u64 afu_desc_size;
- void __iomem *p1_mmio;
- void __iomem *p2_mmio;
- irq_hw_number_t err_hwirq;
- unsigned int err_virq;
- u64 ps_off;
- bool no_data_cache; /* set if no data cache on the card */
- const struct cxl_service_layer_ops *sl_ops;
-};
-
-struct cxl_guest {
- struct platform_device *pdev;
- int irq_nranges;
- struct cdev cdev;
- irq_hw_number_t irq_base_offset;
- struct irq_avail *irq_avail;
- spinlock_t irq_alloc_lock;
- u64 handle;
- char *status;
- u16 vendor;
- u16 device;
- u16 subsystem_vendor;
- u16 subsystem;
-};
-
-struct cxl {
- struct cxl_native *native;
- struct cxl_guest *guest;
- spinlock_t afu_list_lock;
- struct cxl_afu *afu[CXL_MAX_SLICES];
- struct device dev;
- struct dentry *trace;
- struct dentry *psl_err_chk;
- struct dentry *debugfs;
- char *irq_name;
- struct bin_attribute cxl_attr;
- int adapter_num;
- int user_irqs;
- u64 ps_size;
- u16 psl_rev;
- u16 base_image;
- u8 vsec_status;
- u8 caia_major;
- u8 caia_minor;
- u8 slices;
- bool user_image_loaded;
- bool perst_loads_image;
- bool perst_select_user;
- bool perst_same_image;
- bool psl_timebase_synced;
- bool tunneled_ops_supported;
-
- /*
- * number of contexts mapped on to this card. Possible values are:
- * >0: Number of contexts mapped and new one can be mapped.
- * 0: No active contexts and new ones can be mapped.
- * -1: No contexts mapped and new ones cannot be mapped.
- */
- atomic_t contexts_num;
-};
-
-int cxl_pci_alloc_one_irq(struct cxl *adapter);
-void cxl_pci_release_one_irq(struct cxl *adapter, int hwirq);
-int cxl_pci_alloc_irq_ranges(struct cxl_irq_ranges *irqs, struct cxl *adapter, unsigned int num);
-void cxl_pci_release_irq_ranges(struct cxl_irq_ranges *irqs, struct cxl *adapter);
-int cxl_pci_setup_irq(struct cxl *adapter, unsigned int hwirq, unsigned int virq);
-int cxl_update_image_control(struct cxl *adapter);
-int cxl_pci_reset(struct cxl *adapter);
-void cxl_pci_release_afu(struct device *dev);
-ssize_t cxl_pci_read_adapter_vpd(struct cxl *adapter, void *buf, size_t len);
-
-/* common == phyp + powernv - CAIA 1&2 */
-struct cxl_process_element_common {
- __be32 tid;
- __be32 pid;
- __be64 csrp;
- union {
- struct {
- __be64 aurp0;
- __be64 aurp1;
- __be64 sstp0;
- __be64 sstp1;
- } psl8; /* CAIA 1 */
- struct {
- u8 reserved2[8];
- u8 reserved3[8];
- u8 reserved4[8];
- u8 reserved5[8];
- } psl9; /* CAIA 2 */
- } u;
- __be64 amr;
- u8 reserved6[4];
- __be64 wed;
-} __packed;
-
-/* just powernv - CAIA 1&2 */
-struct cxl_process_element {
- __be64 sr;
- __be64 SPOffset;
- union {
- __be64 sdr; /* CAIA 1 */
- u8 reserved1[8]; /* CAIA 2 */
- } u;
- __be64 haurp;
- __be32 ctxtime;
- __be16 ivte_offsets[4];
- __be16 ivte_ranges[4];
- __be32 lpid;
- struct cxl_process_element_common common;
- __be32 software_state;
-} __packed;
-
-static inline bool cxl_adapter_link_ok(struct cxl *cxl, struct cxl_afu *afu)
-{
- struct pci_dev *pdev;
-
- if (cpu_has_feature(CPU_FTR_HVMODE)) {
- pdev = to_pci_dev(cxl->dev.parent);
- return !pci_channel_offline(pdev);
- }
- return true;
-}
-
-static inline void __iomem *_cxl_p1_addr(struct cxl *cxl, cxl_p1_reg_t reg)
-{
- WARN_ON(!cpu_has_feature(CPU_FTR_HVMODE));
- return cxl->native->p1_mmio + cxl_reg_off(reg);
-}
-
-static inline void cxl_p1_write(struct cxl *cxl, cxl_p1_reg_t reg, u64 val)
-{
- if (likely(cxl_adapter_link_ok(cxl, NULL)))
- out_be64(_cxl_p1_addr(cxl, reg), val);
-}
-
-static inline u64 cxl_p1_read(struct cxl *cxl, cxl_p1_reg_t reg)
-{
- if (likely(cxl_adapter_link_ok(cxl, NULL)))
- return in_be64(_cxl_p1_addr(cxl, reg));
- else
- return ~0ULL;
-}
-
-static inline void __iomem *_cxl_p1n_addr(struct cxl_afu *afu, cxl_p1n_reg_t reg)
-{
- WARN_ON(!cpu_has_feature(CPU_FTR_HVMODE));
- return afu->native->p1n_mmio + cxl_reg_off(reg);
-}
-
-static inline void cxl_p1n_write(struct cxl_afu *afu, cxl_p1n_reg_t reg, u64 val)
-{
- if (likely(cxl_adapter_link_ok(afu->adapter, afu)))
- out_be64(_cxl_p1n_addr(afu, reg), val);
-}
-
-static inline u64 cxl_p1n_read(struct cxl_afu *afu, cxl_p1n_reg_t reg)
-{
- if (likely(cxl_adapter_link_ok(afu->adapter, afu)))
- return in_be64(_cxl_p1n_addr(afu, reg));
- else
- return ~0ULL;
-}
-
-static inline void __iomem *_cxl_p2n_addr(struct cxl_afu *afu, cxl_p2n_reg_t reg)
-{
- return afu->p2n_mmio + cxl_reg_off(reg);
-}
-
-static inline void cxl_p2n_write(struct cxl_afu *afu, cxl_p2n_reg_t reg, u64 val)
-{
- if (likely(cxl_adapter_link_ok(afu->adapter, afu)))
- out_be64(_cxl_p2n_addr(afu, reg), val);
-}
-
-static inline u64 cxl_p2n_read(struct cxl_afu *afu, cxl_p2n_reg_t reg)
-{
- if (likely(cxl_adapter_link_ok(afu->adapter, afu)))
- return in_be64(_cxl_p2n_addr(afu, reg));
- else
- return ~0ULL;
-}
-
-static inline bool cxl_is_power8(void)
-{
- if ((pvr_version_is(PVR_POWER8E)) ||
- (pvr_version_is(PVR_POWER8NVL)) ||
- (pvr_version_is(PVR_POWER8)) ||
- (pvr_version_is(PVR_HX_C2000)))
- return true;
- return false;
-}
-
-static inline bool cxl_is_power9(void)
-{
- if (pvr_version_is(PVR_POWER9))
- return true;
- return false;
-}
-
-ssize_t cxl_pci_afu_read_err_buffer(struct cxl_afu *afu, char *buf,
- loff_t off, size_t count);
-
-
-struct cxl_calls {
- void (*cxl_slbia)(struct mm_struct *mm);
- struct module *owner;
-};
-int register_cxl_calls(struct cxl_calls *calls);
-void unregister_cxl_calls(struct cxl_calls *calls);
-int cxl_update_properties(struct device_node *dn, struct property *new_prop);
-
-void cxl_remove_adapter_nr(struct cxl *adapter);
-
-void cxl_release_spa(struct cxl_afu *afu);
-
-dev_t cxl_get_dev(void);
-int cxl_file_init(void);
-void cxl_file_exit(void);
-int cxl_register_adapter(struct cxl *adapter);
-int cxl_register_afu(struct cxl_afu *afu);
-int cxl_chardev_d_afu_add(struct cxl_afu *afu);
-int cxl_chardev_m_afu_add(struct cxl_afu *afu);
-int cxl_chardev_s_afu_add(struct cxl_afu *afu);
-void cxl_chardev_afu_remove(struct cxl_afu *afu);
-
-void cxl_context_detach_all(struct cxl_afu *afu);
-void cxl_context_free(struct cxl_context *ctx);
-void cxl_context_detach(struct cxl_context *ctx);
-
-int cxl_sysfs_adapter_add(struct cxl *adapter);
-void cxl_sysfs_adapter_remove(struct cxl *adapter);
-int cxl_sysfs_afu_add(struct cxl_afu *afu);
-void cxl_sysfs_afu_remove(struct cxl_afu *afu);
-int cxl_sysfs_afu_m_add(struct cxl_afu *afu);
-void cxl_sysfs_afu_m_remove(struct cxl_afu *afu);
-
-struct cxl *cxl_alloc_adapter(void);
-struct cxl_afu *cxl_alloc_afu(struct cxl *adapter, int slice);
-int cxl_afu_select_best_mode(struct cxl_afu *afu);
-
-int cxl_native_register_psl_irq(struct cxl_afu *afu);
-void cxl_native_release_psl_irq(struct cxl_afu *afu);
-int cxl_native_register_psl_err_irq(struct cxl *adapter);
-void cxl_native_release_psl_err_irq(struct cxl *adapter);
-int cxl_native_register_serr_irq(struct cxl_afu *afu);
-void cxl_native_release_serr_irq(struct cxl_afu *afu);
-int afu_register_irqs(struct cxl_context *ctx, u32 count);
-void afu_release_irqs(struct cxl_context *ctx, void *cookie);
-void afu_irq_name_free(struct cxl_context *ctx);
-
-int cxl_attach_afu_directed_psl9(struct cxl_context *ctx, u64 wed, u64 amr);
-int cxl_attach_afu_directed_psl8(struct cxl_context *ctx, u64 wed, u64 amr);
-int cxl_activate_dedicated_process_psl9(struct cxl_afu *afu);
-int cxl_activate_dedicated_process_psl8(struct cxl_afu *afu);
-int cxl_attach_dedicated_process_psl9(struct cxl_context *ctx, u64 wed, u64 amr);
-int cxl_attach_dedicated_process_psl8(struct cxl_context *ctx, u64 wed, u64 amr);
-void cxl_update_dedicated_ivtes_psl9(struct cxl_context *ctx);
-void cxl_update_dedicated_ivtes_psl8(struct cxl_context *ctx);
-
-#ifdef CONFIG_DEBUG_FS
-
-void cxl_debugfs_init(void);
-void cxl_debugfs_exit(void);
-void cxl_debugfs_adapter_add(struct cxl *adapter);
-void cxl_debugfs_adapter_remove(struct cxl *adapter);
-void cxl_debugfs_afu_add(struct cxl_afu *afu);
-void cxl_debugfs_afu_remove(struct cxl_afu *afu);
-void cxl_debugfs_add_adapter_regs_psl9(struct cxl *adapter, struct dentry *dir);
-void cxl_debugfs_add_adapter_regs_psl8(struct cxl *adapter, struct dentry *dir);
-void cxl_debugfs_add_afu_regs_psl9(struct cxl_afu *afu, struct dentry *dir);
-void cxl_debugfs_add_afu_regs_psl8(struct cxl_afu *afu, struct dentry *dir);
-
-#else /* CONFIG_DEBUG_FS */
-
-static inline void __init cxl_debugfs_init(void)
-{
-}
-
-static inline void cxl_debugfs_exit(void)
-{
-}
-
-static inline void cxl_debugfs_adapter_add(struct cxl *adapter)
-{
-}
-
-static inline void cxl_debugfs_adapter_remove(struct cxl *adapter)
-{
-}
-
-static inline void cxl_debugfs_afu_add(struct cxl_afu *afu)
-{
-}
-
-static inline void cxl_debugfs_afu_remove(struct cxl_afu *afu)
-{
-}
-
-static inline void cxl_debugfs_add_adapter_regs_psl9(struct cxl *adapter,
- struct dentry *dir)
-{
-}
-
-static inline void cxl_debugfs_add_adapter_regs_psl8(struct cxl *adapter,
- struct dentry *dir)
-{
-}
-
-static inline void cxl_debugfs_add_afu_regs_psl9(struct cxl_afu *afu, struct dentry *dir)
-{
-}
-
-static inline void cxl_debugfs_add_afu_regs_psl8(struct cxl_afu *afu, struct dentry *dir)
-{
-}
-
-#endif /* CONFIG_DEBUG_FS */
-
-void cxl_handle_fault(struct work_struct *work);
-void cxl_prefault(struct cxl_context *ctx, u64 wed);
-int cxl_handle_mm_fault(struct mm_struct *mm, u64 dsisr, u64 dar);
-
-struct cxl *get_cxl_adapter(int num);
-int cxl_alloc_sst(struct cxl_context *ctx);
-void cxl_dump_debug_buffer(void *addr, size_t size);
-
-void init_cxl_native(void);
-
-struct cxl_context *cxl_context_alloc(void);
-int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master);
-void cxl_context_set_mapping(struct cxl_context *ctx,
- struct address_space *mapping);
-void cxl_context_free(struct cxl_context *ctx);
-int cxl_context_iomap(struct cxl_context *ctx, struct vm_area_struct *vma);
-unsigned int cxl_map_irq(struct cxl *adapter, irq_hw_number_t hwirq,
- irq_handler_t handler, void *cookie, const char *name);
-void cxl_unmap_irq(unsigned int virq, void *cookie);
-int __detach_context(struct cxl_context *ctx);
-
-/*
- * This must match the layout of the H_COLLECT_CA_INT_INFO retbuf defined
- * in PAPR.
- * Field pid_tid is now 'reserved' because it's no more used on bare-metal.
- * On a guest environment, PSL_PID_An is located on the upper 32 bits and
- * PSL_TID_An register in the lower 32 bits.
- */
-struct cxl_irq_info {
- u64 dsisr;
- u64 dar;
- u64 dsr;
- u64 reserved;
- u64 afu_err;
- u64 errstat;
- u64 proc_handle;
- u64 padding[2]; /* to match the expected retbuf size for plpar_hcall9 */
-};
-
-void cxl_assign_psn_space(struct cxl_context *ctx);
-int cxl_invalidate_all_psl9(struct cxl *adapter);
-int cxl_invalidate_all_psl8(struct cxl *adapter);
-irqreturn_t cxl_irq_psl9(int irq, struct cxl_context *ctx, struct cxl_irq_info *irq_info);
-irqreturn_t cxl_irq_psl8(int irq, struct cxl_context *ctx, struct cxl_irq_info *irq_info);
-irqreturn_t cxl_fail_irq_psl(struct cxl_afu *afu, struct cxl_irq_info *irq_info);
-int cxl_register_one_irq(struct cxl *adapter, irq_handler_t handler,
- void *cookie, irq_hw_number_t *dest_hwirq,
- unsigned int *dest_virq, const char *name);
-
-int cxl_check_error(struct cxl_afu *afu);
-int cxl_afu_slbia(struct cxl_afu *afu);
-int cxl_data_cache_flush(struct cxl *adapter);
-int cxl_afu_disable(struct cxl_afu *afu);
-int cxl_psl_purge(struct cxl_afu *afu);
-int cxl_calc_capp_routing(struct pci_dev *dev, u64 *chipid,
- u32 *phb_index, u64 *capp_unit_id);
-int cxl_slot_is_switched(struct pci_dev *dev);
-int cxl_get_xsl9_dsnctl(struct pci_dev *dev, u64 capp_unit_id, u64 *reg);
-u64 cxl_calculate_sr(bool master, bool kernel, bool real_mode, bool p9);
-
-void cxl_native_irq_dump_regs_psl9(struct cxl_context *ctx);
-void cxl_native_irq_dump_regs_psl8(struct cxl_context *ctx);
-void cxl_native_err_irq_dump_regs_psl8(struct cxl *adapter);
-void cxl_native_err_irq_dump_regs_psl9(struct cxl *adapter);
-int cxl_pci_vphb_add(struct cxl_afu *afu);
-void cxl_pci_vphb_remove(struct cxl_afu *afu);
-void cxl_release_mapping(struct cxl_context *ctx);
-
-extern struct pci_driver cxl_pci_driver;
-extern struct platform_driver cxl_of_driver;
-int afu_allocate_irqs(struct cxl_context *ctx, u32 count);
-
-int afu_open(struct inode *inode, struct file *file);
-int afu_release(struct inode *inode, struct file *file);
-long afu_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
-int afu_mmap(struct file *file, struct vm_area_struct *vm);
-__poll_t afu_poll(struct file *file, struct poll_table_struct *poll);
-ssize_t afu_read(struct file *file, char __user *buf, size_t count, loff_t *off);
-extern const struct file_operations afu_fops;
-
-struct cxl *cxl_guest_init_adapter(struct device_node *np, struct platform_device *dev);
-void cxl_guest_remove_adapter(struct cxl *adapter);
-int cxl_of_read_adapter_handle(struct cxl *adapter, struct device_node *np);
-int cxl_of_read_adapter_properties(struct cxl *adapter, struct device_node *np);
-ssize_t cxl_guest_read_adapter_vpd(struct cxl *adapter, void *buf, size_t len);
-ssize_t cxl_guest_read_afu_vpd(struct cxl_afu *afu, void *buf, size_t len);
-int cxl_guest_init_afu(struct cxl *adapter, int slice, struct device_node *afu_np);
-void cxl_guest_remove_afu(struct cxl_afu *afu);
-int cxl_of_read_afu_handle(struct cxl_afu *afu, struct device_node *afu_np);
-int cxl_of_read_afu_properties(struct cxl_afu *afu, struct device_node *afu_np);
-int cxl_guest_add_chardev(struct cxl *adapter);
-void cxl_guest_remove_chardev(struct cxl *adapter);
-void cxl_guest_reload_module(struct cxl *adapter);
-int cxl_of_probe(struct platform_device *pdev);
-
-struct cxl_backend_ops {
- struct module *module;
- int (*adapter_reset)(struct cxl *adapter);
- int (*alloc_one_irq)(struct cxl *adapter);
- void (*release_one_irq)(struct cxl *adapter, int hwirq);
- int (*alloc_irq_ranges)(struct cxl_irq_ranges *irqs,
- struct cxl *adapter, unsigned int num);
- void (*release_irq_ranges)(struct cxl_irq_ranges *irqs,
- struct cxl *adapter);
- int (*setup_irq)(struct cxl *adapter, unsigned int hwirq,
- unsigned int virq);
- irqreturn_t (*handle_psl_slice_error)(struct cxl_context *ctx,
- u64 dsisr, u64 errstat);
- irqreturn_t (*psl_interrupt)(int irq, void *data);
- int (*ack_irq)(struct cxl_context *ctx, u64 tfc, u64 psl_reset_mask);
- void (*irq_wait)(struct cxl_context *ctx);
- int (*attach_process)(struct cxl_context *ctx, bool kernel,
- u64 wed, u64 amr);
- int (*detach_process)(struct cxl_context *ctx);
- void (*update_ivtes)(struct cxl_context *ctx);
- bool (*support_attributes)(const char *attr_name, enum cxl_attrs type);
- bool (*link_ok)(struct cxl *cxl, struct cxl_afu *afu);
- void (*release_afu)(struct device *dev);
- ssize_t (*afu_read_err_buffer)(struct cxl_afu *afu, char *buf,
- loff_t off, size_t count);
- int (*afu_check_and_enable)(struct cxl_afu *afu);
- int (*afu_activate_mode)(struct cxl_afu *afu, int mode);
- int (*afu_deactivate_mode)(struct cxl_afu *afu, int mode);
- int (*afu_reset)(struct cxl_afu *afu);
- int (*afu_cr_read8)(struct cxl_afu *afu, int cr_idx, u64 offset, u8 *val);
- int (*afu_cr_read16)(struct cxl_afu *afu, int cr_idx, u64 offset, u16 *val);
- int (*afu_cr_read32)(struct cxl_afu *afu, int cr_idx, u64 offset, u32 *val);
- int (*afu_cr_read64)(struct cxl_afu *afu, int cr_idx, u64 offset, u64 *val);
- int (*afu_cr_write8)(struct cxl_afu *afu, int cr_idx, u64 offset, u8 val);
- int (*afu_cr_write16)(struct cxl_afu *afu, int cr_idx, u64 offset, u16 val);
- int (*afu_cr_write32)(struct cxl_afu *afu, int cr_idx, u64 offset, u32 val);
- ssize_t (*read_adapter_vpd)(struct cxl *adapter, void *buf, size_t count);
-};
-extern const struct cxl_backend_ops cxl_native_ops;
-extern const struct cxl_backend_ops cxl_guest_ops;
-extern const struct cxl_backend_ops *cxl_ops;
-
-/* check if the given pci_dev is on the cxl vphb bus */
-bool cxl_pci_is_vphb_device(struct pci_dev *dev);
-
-/* decode AFU error bits in the PSL register PSL_SERR_An */
-void cxl_afu_decode_psl_serr(struct cxl_afu *afu, u64 serr);
-
-/*
- * Increments the number of attached contexts on an adapter.
- * In case an adapter_context_lock is taken the return -EBUSY.
- */
-int cxl_adapter_context_get(struct cxl *adapter);
-
-/* Decrements the number of attached contexts on an adapter */
-void cxl_adapter_context_put(struct cxl *adapter);
-
-/* If no active contexts then prevents contexts from being attached */
-int cxl_adapter_context_lock(struct cxl *adapter);
-
-/* Unlock the contexts-lock if taken. Warn and force unlock otherwise */
-void cxl_adapter_context_unlock(struct cxl *adapter);
-
-/* Increases the reference count to "struct mm_struct" */
-void cxl_context_mm_count_get(struct cxl_context *ctx);
-
-/* Decrements the reference count to "struct mm_struct" */
-void cxl_context_mm_count_put(struct cxl_context *ctx);
-
-#endif
diff --git a/drivers/misc/cxl/cxllib.c b/drivers/misc/cxl/cxllib.c
deleted file mode 100644
index e5fe0a171472..000000000000
--- a/drivers/misc/cxl/cxllib.c
+++ /dev/null
@@ -1,271 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright 2017 IBM Corp.
- */
-
-#include <linux/hugetlb.h>
-#include <linux/sched/mm.h>
-#include <asm/opal-api.h>
-#include <asm/pnv-pci.h>
-#include <misc/cxllib.h>
-
-#include "cxl.h"
-
-#define CXL_INVALID_DRA ~0ull
-#define CXL_DUMMY_READ_SIZE 128
-#define CXL_DUMMY_READ_ALIGN 8
-#define CXL_CAPI_WINDOW_START 0x2000000000000ull
-#define CXL_CAPI_WINDOW_LOG_SIZE 48
-#define CXL_XSL_CONFIG_CURRENT_VERSION CXL_XSL_CONFIG_VERSION1
-
-
-bool cxllib_slot_is_supported(struct pci_dev *dev, unsigned long flags)
-{
- int rc;
- u32 phb_index;
- u64 chip_id, capp_unit_id;
-
- /* No flags currently supported */
- if (flags)
- return false;
-
- if (!cpu_has_feature(CPU_FTR_HVMODE))
- return false;
-
- if (!cxl_is_power9())
- return false;
-
- if (cxl_slot_is_switched(dev))
- return false;
-
- /* on p9, some pci slots are not connected to a CAPP unit */
- rc = cxl_calc_capp_routing(dev, &chip_id, &phb_index, &capp_unit_id);
- if (rc)
- return false;
-
- return true;
-}
-EXPORT_SYMBOL_GPL(cxllib_slot_is_supported);
-
-static DEFINE_MUTEX(dra_mutex);
-static u64 dummy_read_addr = CXL_INVALID_DRA;
-
-static int allocate_dummy_read_buf(void)
-{
- u64 buf, vaddr;
- size_t buf_size;
-
- /*
- * Dummy read buffer is 128-byte long, aligned on a
- * 256-byte boundary and we need the physical address.
- */
- buf_size = CXL_DUMMY_READ_SIZE + (1ull << CXL_DUMMY_READ_ALIGN);
- buf = (u64) kzalloc(buf_size, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- vaddr = (buf + (1ull << CXL_DUMMY_READ_ALIGN) - 1) &
- (~0ull << CXL_DUMMY_READ_ALIGN);
-
- WARN((vaddr + CXL_DUMMY_READ_SIZE) > (buf + buf_size),
- "Dummy read buffer alignment issue");
- dummy_read_addr = virt_to_phys((void *) vaddr);
- return 0;
-}
-
-int cxllib_get_xsl_config(struct pci_dev *dev, struct cxllib_xsl_config *cfg)
-{
- int rc;
- u32 phb_index;
- u64 chip_id, capp_unit_id;
-
- if (!cpu_has_feature(CPU_FTR_HVMODE))
- return -EINVAL;
-
- mutex_lock(&dra_mutex);
- if (dummy_read_addr == CXL_INVALID_DRA) {
- rc = allocate_dummy_read_buf();
- if (rc) {
- mutex_unlock(&dra_mutex);
- return rc;
- }
- }
- mutex_unlock(&dra_mutex);
-
- rc = cxl_calc_capp_routing(dev, &chip_id, &phb_index, &capp_unit_id);
- if (rc)
- return rc;
-
- rc = cxl_get_xsl9_dsnctl(dev, capp_unit_id, &cfg->dsnctl);
- if (rc)
- return rc;
-
- cfg->version = CXL_XSL_CONFIG_CURRENT_VERSION;
- cfg->log_bar_size = CXL_CAPI_WINDOW_LOG_SIZE;
- cfg->bar_addr = CXL_CAPI_WINDOW_START;
- cfg->dra = dummy_read_addr;
- return 0;
-}
-EXPORT_SYMBOL_GPL(cxllib_get_xsl_config);
-
-int cxllib_switch_phb_mode(struct pci_dev *dev, enum cxllib_mode mode,
- unsigned long flags)
-{
- int rc = 0;
-
- if (!cpu_has_feature(CPU_FTR_HVMODE))
- return -EINVAL;
-
- switch (mode) {
- case CXL_MODE_PCI:
- /*
- * We currently don't support going back to PCI mode
- * However, we'll turn the invalidations off, so that
- * the firmware doesn't have to ack them and can do
- * things like reset, etc.. with no worries.
- * So always return EPERM (can't go back to PCI) or
- * EBUSY if we couldn't even turn off snooping
- */
- rc = pnv_phb_to_cxl_mode(dev, OPAL_PHB_CAPI_MODE_SNOOP_OFF);
- if (rc)
- rc = -EBUSY;
- else
- rc = -EPERM;
- break;
- case CXL_MODE_CXL:
- /* DMA only supported on TVT1 for the time being */
- if (flags != CXL_MODE_DMA_TVT1)
- return -EINVAL;
- rc = pnv_phb_to_cxl_mode(dev, OPAL_PHB_CAPI_MODE_DMA_TVT1);
- if (rc)
- return rc;
- rc = pnv_phb_to_cxl_mode(dev, OPAL_PHB_CAPI_MODE_SNOOP_ON);
- break;
- default:
- rc = -EINVAL;
- }
- return rc;
-}
-EXPORT_SYMBOL_GPL(cxllib_switch_phb_mode);
-
-/*
- * When switching the PHB to capi mode, the TVT#1 entry for
- * the Partitionable Endpoint is set in bypass mode, like
- * in PCI mode.
- * Configure the device dma to use TVT#1, which is done
- * by calling dma_set_mask() with a mask large enough.
- */
-int cxllib_set_device_dma(struct pci_dev *dev, unsigned long flags)
-{
- int rc;
-
- if (flags)
- return -EINVAL;
-
- rc = dma_set_mask(&dev->dev, DMA_BIT_MASK(64));
- return rc;
-}
-EXPORT_SYMBOL_GPL(cxllib_set_device_dma);
-
-int cxllib_get_PE_attributes(struct task_struct *task,
- unsigned long translation_mode,
- struct cxllib_pe_attributes *attr)
-{
- if (translation_mode != CXL_TRANSLATED_MODE &&
- translation_mode != CXL_REAL_MODE)
- return -EINVAL;
-
- attr->sr = cxl_calculate_sr(false,
- task == NULL,
- translation_mode == CXL_REAL_MODE,
- true);
- attr->lpid = mfspr(SPRN_LPID);
- if (task) {
- struct mm_struct *mm = get_task_mm(task);
- if (mm == NULL)
- return -EINVAL;
- /*
- * Caller is keeping a reference on mm_users for as long
- * as XSL uses the memory context
- */
- attr->pid = mm->context.id;
- mmput(mm);
- attr->tid = task->thread.tidr;
- } else {
- attr->pid = 0;
- attr->tid = 0;
- }
- return 0;
-}
-EXPORT_SYMBOL_GPL(cxllib_get_PE_attributes);
-
-static int get_vma_info(struct mm_struct *mm, u64 addr,
- u64 *vma_start, u64 *vma_end,
- unsigned long *page_size)
-{
- struct vm_area_struct *vma = NULL;
- int rc = 0;
-
- mmap_read_lock(mm);
-
- vma = find_vma(mm, addr);
- if (!vma) {
- rc = -EFAULT;
- goto out;
- }
- *page_size = vma_kernel_pagesize(vma);
- *vma_start = vma->vm_start;
- *vma_end = vma->vm_end;
-out:
- mmap_read_unlock(mm);
- return rc;
-}
-
-int cxllib_handle_fault(struct mm_struct *mm, u64 addr, u64 size, u64 flags)
-{
- int rc;
- u64 dar, vma_start, vma_end;
- unsigned long page_size;
-
- if (mm == NULL)
- return -EFAULT;
-
- /*
- * The buffer we have to process can extend over several pages
- * and may also cover several VMAs.
- * We iterate over all the pages. The page size could vary
- * between VMAs.
- */
- rc = get_vma_info(mm, addr, &vma_start, &vma_end, &page_size);
- if (rc)
- return rc;
-
- for (dar = (addr & ~(page_size - 1)); dar < (addr + size);
- dar += page_size) {
- if (dar < vma_start || dar >= vma_end) {
- /*
- * We don't hold mm->mmap_lock while iterating, since
- * the lock is required by one of the lower-level page
- * fault processing functions and it could
- * create a deadlock.
- *
- * It means the VMAs can be altered between 2
- * loop iterations and we could theoretically
- * miss a page (however unlikely). But that's
- * not really a problem, as the driver will
- * retry access, get another page fault on the
- * missing page and call us again.
- */
- rc = get_vma_info(mm, dar, &vma_start, &vma_end,
- &page_size);
- if (rc)
- return rc;
- }
-
- rc = cxl_handle_mm_fault(mm, flags, dar);
- if (rc)
- return -EFAULT;
- }
- return 0;
-}
-EXPORT_SYMBOL_GPL(cxllib_handle_fault);
diff --git a/drivers/misc/cxl/debugfs.c b/drivers/misc/cxl/debugfs.c
deleted file mode 100644
index 7b987bf498b5..000000000000
--- a/drivers/misc/cxl/debugfs.c
+++ /dev/null
@@ -1,134 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright 2014 IBM Corp.
- */
-
-#include <linux/debugfs.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-
-#include "cxl.h"
-
-static struct dentry *cxl_debugfs;
-
-/* Helpers to export CXL mmaped IO registers via debugfs */
-static int debugfs_io_u64_get(void *data, u64 *val)
-{
- *val = in_be64((u64 __iomem *)data);
- return 0;
-}
-
-static int debugfs_io_u64_set(void *data, u64 val)
-{
- out_be64((u64 __iomem *)data, val);
- return 0;
-}
-DEFINE_DEBUGFS_ATTRIBUTE(fops_io_x64, debugfs_io_u64_get, debugfs_io_u64_set,
- "0x%016llx\n");
-
-static void debugfs_create_io_x64(const char *name, umode_t mode,
- struct dentry *parent, u64 __iomem *value)
-{
- debugfs_create_file_unsafe(name, mode, parent, (void __force *)value,
- &fops_io_x64);
-}
-
-void cxl_debugfs_add_adapter_regs_psl9(struct cxl *adapter, struct dentry *dir)
-{
- debugfs_create_io_x64("fir1", S_IRUSR, dir, _cxl_p1_addr(adapter, CXL_PSL9_FIR1));
- debugfs_create_io_x64("fir_mask", 0400, dir,
- _cxl_p1_addr(adapter, CXL_PSL9_FIR_MASK));
- debugfs_create_io_x64("fir_cntl", S_IRUSR, dir, _cxl_p1_addr(adapter, CXL_PSL9_FIR_CNTL));
- debugfs_create_io_x64("trace", S_IRUSR | S_IWUSR, dir, _cxl_p1_addr(adapter, CXL_PSL9_TRACECFG));
- debugfs_create_io_x64("debug", 0600, dir,
- _cxl_p1_addr(adapter, CXL_PSL9_DEBUG));
- debugfs_create_io_x64("xsl-debug", 0600, dir,
- _cxl_p1_addr(adapter, CXL_XSL9_DBG));
-}
-
-void cxl_debugfs_add_adapter_regs_psl8(struct cxl *adapter, struct dentry *dir)
-{
- debugfs_create_io_x64("fir1", S_IRUSR, dir, _cxl_p1_addr(adapter, CXL_PSL_FIR1));
- debugfs_create_io_x64("fir2", S_IRUSR, dir, _cxl_p1_addr(adapter, CXL_PSL_FIR2));
- debugfs_create_io_x64("fir_cntl", S_IRUSR, dir, _cxl_p1_addr(adapter, CXL_PSL_FIR_CNTL));
- debugfs_create_io_x64("trace", S_IRUSR | S_IWUSR, dir, _cxl_p1_addr(adapter, CXL_PSL_TRACE));
-}
-
-void cxl_debugfs_adapter_add(struct cxl *adapter)
-{
- struct dentry *dir;
- char buf[32];
-
- if (!cxl_debugfs)
- return;
-
- snprintf(buf, 32, "card%i", adapter->adapter_num);
- dir = debugfs_create_dir(buf, cxl_debugfs);
- adapter->debugfs = dir;
-
- debugfs_create_io_x64("err_ivte", S_IRUSR, dir, _cxl_p1_addr(adapter, CXL_PSL_ErrIVTE));
-
- if (adapter->native->sl_ops->debugfs_add_adapter_regs)
- adapter->native->sl_ops->debugfs_add_adapter_regs(adapter, dir);
-}
-
-void cxl_debugfs_adapter_remove(struct cxl *adapter)
-{
- debugfs_remove_recursive(adapter->debugfs);
-}
-
-void cxl_debugfs_add_afu_regs_psl9(struct cxl_afu *afu, struct dentry *dir)
-{
- debugfs_create_io_x64("serr", S_IRUSR, dir, _cxl_p1n_addr(afu, CXL_PSL_SERR_An));
-}
-
-void cxl_debugfs_add_afu_regs_psl8(struct cxl_afu *afu, struct dentry *dir)
-{
- debugfs_create_io_x64("sstp0", S_IRUSR, dir, _cxl_p2n_addr(afu, CXL_SSTP0_An));
- debugfs_create_io_x64("sstp1", S_IRUSR, dir, _cxl_p2n_addr(afu, CXL_SSTP1_An));
-
- debugfs_create_io_x64("fir", S_IRUSR, dir, _cxl_p1n_addr(afu, CXL_PSL_FIR_SLICE_An));
- debugfs_create_io_x64("serr", S_IRUSR, dir, _cxl_p1n_addr(afu, CXL_PSL_SERR_An));
- debugfs_create_io_x64("afu_debug", S_IRUSR, dir, _cxl_p1n_addr(afu, CXL_AFU_DEBUG_An));
- debugfs_create_io_x64("trace", S_IRUSR | S_IWUSR, dir, _cxl_p1n_addr(afu, CXL_PSL_SLICE_TRACE));
-}
-
-void cxl_debugfs_afu_add(struct cxl_afu *afu)
-{
- struct dentry *dir;
- char buf[32];
-
- if (!afu->adapter->debugfs)
- return;
-
- snprintf(buf, 32, "psl%i.%i", afu->adapter->adapter_num, afu->slice);
- dir = debugfs_create_dir(buf, afu->adapter->debugfs);
- afu->debugfs = dir;
-
- debugfs_create_io_x64("sr", S_IRUSR, dir, _cxl_p1n_addr(afu, CXL_PSL_SR_An));
- debugfs_create_io_x64("dsisr", S_IRUSR, dir, _cxl_p2n_addr(afu, CXL_PSL_DSISR_An));
- debugfs_create_io_x64("dar", S_IRUSR, dir, _cxl_p2n_addr(afu, CXL_PSL_DAR_An));
-
- debugfs_create_io_x64("err_status", S_IRUSR, dir, _cxl_p2n_addr(afu, CXL_PSL_ErrStat_An));
-
- if (afu->adapter->native->sl_ops->debugfs_add_afu_regs)
- afu->adapter->native->sl_ops->debugfs_add_afu_regs(afu, dir);
-}
-
-void cxl_debugfs_afu_remove(struct cxl_afu *afu)
-{
- debugfs_remove_recursive(afu->debugfs);
-}
-
-void __init cxl_debugfs_init(void)
-{
- if (!cpu_has_feature(CPU_FTR_HVMODE))
- return;
-
- cxl_debugfs = debugfs_create_dir("cxl", NULL);
-}
-
-void cxl_debugfs_exit(void)
-{
- debugfs_remove_recursive(cxl_debugfs);
-}
diff --git a/drivers/misc/cxl/fault.c b/drivers/misc/cxl/fault.c
deleted file mode 100644
index 2c64f55cf01f..000000000000
--- a/drivers/misc/cxl/fault.c
+++ /dev/null
@@ -1,341 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright 2014 IBM Corp.
- */
-
-#include <linux/workqueue.h>
-#include <linux/sched/signal.h>
-#include <linux/sched/mm.h>
-#include <linux/pid.h>
-#include <linux/mm.h>
-#include <linux/moduleparam.h>
-
-#undef MODULE_PARAM_PREFIX
-#define MODULE_PARAM_PREFIX "cxl" "."
-#include <asm/current.h>
-#include <asm/copro.h>
-#include <asm/mmu.h>
-
-#include "cxl.h"
-#include "trace.h"
-
-static bool sste_matches(struct cxl_sste *sste, struct copro_slb *slb)
-{
- return ((sste->vsid_data == cpu_to_be64(slb->vsid)) &&
- (sste->esid_data == cpu_to_be64(slb->esid)));
-}
-
-/*
- * This finds a free SSTE for the given SLB, or returns NULL if it's already in
- * the segment table.
- */
-static struct cxl_sste *find_free_sste(struct cxl_context *ctx,
- struct copro_slb *slb)
-{
- struct cxl_sste *primary, *sste, *ret = NULL;
- unsigned int mask = (ctx->sst_size >> 7) - 1; /* SSTP0[SegTableSize] */
- unsigned int entry;
- unsigned int hash;
-
- if (slb->vsid & SLB_VSID_B_1T)
- hash = (slb->esid >> SID_SHIFT_1T) & mask;
- else /* 256M */
- hash = (slb->esid >> SID_SHIFT) & mask;
-
- primary = ctx->sstp + (hash << 3);
-
- for (entry = 0, sste = primary; entry < 8; entry++, sste++) {
- if (!ret && !(be64_to_cpu(sste->esid_data) & SLB_ESID_V))
- ret = sste;
- if (sste_matches(sste, slb))
- return NULL;
- }
- if (ret)
- return ret;
-
- /* Nothing free, select an entry to cast out */
- ret = primary + ctx->sst_lru;
- ctx->sst_lru = (ctx->sst_lru + 1) & 0x7;
-
- return ret;
-}
-
-static void cxl_load_segment(struct cxl_context *ctx, struct copro_slb *slb)
-{
- /* mask is the group index, we search primary and secondary here. */
- struct cxl_sste *sste;
- unsigned long flags;
-
- spin_lock_irqsave(&ctx->sste_lock, flags);
- sste = find_free_sste(ctx, slb);
- if (!sste)
- goto out_unlock;
-
- pr_devel("CXL Populating SST[%li]: %#llx %#llx\n",
- sste - ctx->sstp, slb->vsid, slb->esid);
- trace_cxl_ste_write(ctx, sste - ctx->sstp, slb->esid, slb->vsid);
-
- sste->vsid_data = cpu_to_be64(slb->vsid);
- sste->esid_data = cpu_to_be64(slb->esid);
-out_unlock:
- spin_unlock_irqrestore(&ctx->sste_lock, flags);
-}
-
-static int cxl_fault_segment(struct cxl_context *ctx, struct mm_struct *mm,
- u64 ea)
-{
- struct copro_slb slb = {0,0};
- int rc;
-
- if (!(rc = copro_calculate_slb(mm, ea, &slb))) {
- cxl_load_segment(ctx, &slb);
- }
-
- return rc;
-}
-
-static void cxl_ack_ae(struct cxl_context *ctx)
-{
- unsigned long flags;
-
- cxl_ops->ack_irq(ctx, CXL_PSL_TFC_An_AE, 0);
-
- spin_lock_irqsave(&ctx->lock, flags);
- ctx->pending_fault = true;
- ctx->fault_addr = ctx->dar;
- ctx->fault_dsisr = ctx->dsisr;
- spin_unlock_irqrestore(&ctx->lock, flags);
-
- wake_up_all(&ctx->wq);
-}
-
-static int cxl_handle_segment_miss(struct cxl_context *ctx,
- struct mm_struct *mm, u64 ea)
-{
- int rc;
-
- pr_devel("CXL interrupt: Segment fault pe: %i ea: %#llx\n", ctx->pe, ea);
- trace_cxl_ste_miss(ctx, ea);
-
- if ((rc = cxl_fault_segment(ctx, mm, ea)))
- cxl_ack_ae(ctx);
- else {
-
- mb(); /* Order seg table write to TFC MMIO write */
- cxl_ops->ack_irq(ctx, CXL_PSL_TFC_An_R, 0);
- }
-
- return IRQ_HANDLED;
-}
-
-int cxl_handle_mm_fault(struct mm_struct *mm, u64 dsisr, u64 dar)
-{
- vm_fault_t flt = 0;
- int result;
- unsigned long access, flags, inv_flags = 0;
-
- /*
- * Add the fault handling cpu to task mm cpumask so that we
- * can do a safe lockless page table walk when inserting the
- * hash page table entry. This function get called with a
- * valid mm for user space addresses. Hence using the if (mm)
- * check is sufficient here.
- */
- if (mm && !cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) {
- cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
- /*
- * We need to make sure we walk the table only after
- * we update the cpumask. The other side of the barrier
- * is explained in serialize_against_pte_lookup()
- */
- smp_mb();
- }
- if ((result = copro_handle_mm_fault(mm, dar, dsisr, &flt))) {
- pr_devel("copro_handle_mm_fault failed: %#x\n", result);
- return result;
- }
-
- if (!radix_enabled()) {
- /*
- * update_mmu_cache() will not have loaded the hash since current->trap
- * is not a 0x400 or 0x300, so just call hash_page_mm() here.
- */
- access = _PAGE_PRESENT | _PAGE_READ;
- if (dsisr & CXL_PSL_DSISR_An_S)
- access |= _PAGE_WRITE;
-
- if (!mm && (get_region_id(dar) != USER_REGION_ID))
- access |= _PAGE_PRIVILEGED;
-
- if (dsisr & DSISR_NOHPTE)
- inv_flags |= HPTE_NOHPTE_UPDATE;
-
- local_irq_save(flags);
- hash_page_mm(mm, dar, access, 0x300, inv_flags);
- local_irq_restore(flags);
- }
- return 0;
-}
-
-static void cxl_handle_page_fault(struct cxl_context *ctx,
- struct mm_struct *mm,
- u64 dsisr, u64 dar)
-{
- trace_cxl_pte_miss(ctx, dsisr, dar);
-
- if (cxl_handle_mm_fault(mm, dsisr, dar)) {
- cxl_ack_ae(ctx);
- } else {
- pr_devel("Page fault successfully handled for pe: %i!\n", ctx->pe);
- cxl_ops->ack_irq(ctx, CXL_PSL_TFC_An_R, 0);
- }
-}
-
-/*
- * Returns the mm_struct corresponding to the context ctx.
- * mm_users == 0, the context may be in the process of being closed.
- */
-static struct mm_struct *get_mem_context(struct cxl_context *ctx)
-{
- if (ctx->mm == NULL)
- return NULL;
-
- if (!mmget_not_zero(ctx->mm))
- return NULL;
-
- return ctx->mm;
-}
-
-static bool cxl_is_segment_miss(struct cxl_context *ctx, u64 dsisr)
-{
- if ((cxl_is_power8() && (dsisr & CXL_PSL_DSISR_An_DS)))
- return true;
-
- return false;
-}
-
-static bool cxl_is_page_fault(struct cxl_context *ctx, u64 dsisr)
-{
- if ((cxl_is_power8()) && (dsisr & CXL_PSL_DSISR_An_DM))
- return true;
-
- if (cxl_is_power9())
- return true;
-
- return false;
-}
-
-void cxl_handle_fault(struct work_struct *fault_work)
-{
- struct cxl_context *ctx =
- container_of(fault_work, struct cxl_context, fault_work);
- u64 dsisr = ctx->dsisr;
- u64 dar = ctx->dar;
- struct mm_struct *mm = NULL;
-
- if (cpu_has_feature(CPU_FTR_HVMODE)) {
- if (cxl_p2n_read(ctx->afu, CXL_PSL_DSISR_An) != dsisr ||
- cxl_p2n_read(ctx->afu, CXL_PSL_DAR_An) != dar ||
- cxl_p2n_read(ctx->afu, CXL_PSL_PEHandle_An) != ctx->pe) {
- /* Most likely explanation is harmless - a dedicated
- * process has detached and these were cleared by the
- * PSL purge, but warn about it just in case
- */
- dev_notice(&ctx->afu->dev, "cxl_handle_fault: Translation fault regs changed\n");
- return;
- }
- }
-
- /* Early return if the context is being / has been detached */
- if (ctx->status == CLOSED) {
- cxl_ack_ae(ctx);
- return;
- }
-
- pr_devel("CXL BOTTOM HALF handling fault for afu pe: %i. "
- "DSISR: %#llx DAR: %#llx\n", ctx->pe, dsisr, dar);
-
- if (!ctx->kernel) {
-
- mm = get_mem_context(ctx);
- if (mm == NULL) {
- pr_devel("%s: unable to get mm for pe=%d pid=%i\n",
- __func__, ctx->pe, pid_nr(ctx->pid));
- cxl_ack_ae(ctx);
- return;
- } else {
- pr_devel("Handling page fault for pe=%d pid=%i\n",
- ctx->pe, pid_nr(ctx->pid));
- }
- }
-
- if (cxl_is_segment_miss(ctx, dsisr))
- cxl_handle_segment_miss(ctx, mm, dar);
- else if (cxl_is_page_fault(ctx, dsisr))
- cxl_handle_page_fault(ctx, mm, dsisr, dar);
- else
- WARN(1, "cxl_handle_fault has nothing to handle\n");
-
- if (mm)
- mmput(mm);
-}
-
-static u64 next_segment(u64 ea, u64 vsid)
-{
- if (vsid & SLB_VSID_B_1T)
- ea |= (1ULL << 40) - 1;
- else
- ea |= (1ULL << 28) - 1;
-
- return ea + 1;
-}
-
-static void cxl_prefault_vma(struct cxl_context *ctx, struct mm_struct *mm)
-{
- u64 ea, last_esid = 0;
- struct copro_slb slb;
- VMA_ITERATOR(vmi, mm, 0);
- struct vm_area_struct *vma;
- int rc;
-
- mmap_read_lock(mm);
- for_each_vma(vmi, vma) {
- for (ea = vma->vm_start; ea < vma->vm_end;
- ea = next_segment(ea, slb.vsid)) {
- rc = copro_calculate_slb(mm, ea, &slb);
- if (rc)
- continue;
-
- if (last_esid == slb.esid)
- continue;
-
- cxl_load_segment(ctx, &slb);
- last_esid = slb.esid;
- }
- }
- mmap_read_unlock(mm);
-}
-
-void cxl_prefault(struct cxl_context *ctx, u64 wed)
-{
- struct mm_struct *mm = get_mem_context(ctx);
-
- if (mm == NULL) {
- pr_devel("cxl_prefault unable to get mm %i\n",
- pid_nr(ctx->pid));
- return;
- }
-
- switch (ctx->afu->prefault_mode) {
- case CXL_PREFAULT_WED:
- cxl_fault_segment(ctx, mm, wed);
- break;
- case CXL_PREFAULT_ALL:
- cxl_prefault_vma(ctx, mm);
- break;
- default:
- break;
- }
-
- mmput(mm);
-}
diff --git a/drivers/misc/cxl/file.c b/drivers/misc/cxl/file.c
deleted file mode 100644
index 012e11b959bc..000000000000
--- a/drivers/misc/cxl/file.c
+++ /dev/null
@@ -1,699 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright 2014 IBM Corp.
- */
-
-#include <linux/spinlock.h>
-#include <linux/module.h>
-#include <linux/export.h>
-#include <linux/kernel.h>
-#include <linux/bitmap.h>
-#include <linux/sched/signal.h>
-#include <linux/poll.h>
-#include <linux/pid.h>
-#include <linux/fs.h>
-#include <linux/mm.h>
-#include <linux/slab.h>
-#include <linux/sched/mm.h>
-#include <linux/mmu_context.h>
-#include <asm/cputable.h>
-#include <asm/current.h>
-#include <asm/copro.h>
-
-#include "cxl.h"
-#include "trace.h"
-
-#define CXL_NUM_MINORS 256 /* Total to reserve */
-
-#define CXL_AFU_MINOR_D(afu) (CXL_CARD_MINOR(afu->adapter) + 1 + (3 * afu->slice))
-#define CXL_AFU_MINOR_M(afu) (CXL_AFU_MINOR_D(afu) + 1)
-#define CXL_AFU_MINOR_S(afu) (CXL_AFU_MINOR_D(afu) + 2)
-#define CXL_AFU_MKDEV_D(afu) MKDEV(MAJOR(cxl_dev), CXL_AFU_MINOR_D(afu))
-#define CXL_AFU_MKDEV_M(afu) MKDEV(MAJOR(cxl_dev), CXL_AFU_MINOR_M(afu))
-#define CXL_AFU_MKDEV_S(afu) MKDEV(MAJOR(cxl_dev), CXL_AFU_MINOR_S(afu))
-
-#define CXL_DEVT_AFU(dev) ((MINOR(dev) % CXL_DEV_MINORS - 1) / 3)
-
-#define CXL_DEVT_IS_CARD(dev) (MINOR(dev) % CXL_DEV_MINORS == 0)
-
-static dev_t cxl_dev;
-
-static int __afu_open(struct inode *inode, struct file *file, bool master)
-{
- struct cxl *adapter;
- struct cxl_afu *afu;
- struct cxl_context *ctx;
- int adapter_num = CXL_DEVT_ADAPTER(inode->i_rdev);
- int slice = CXL_DEVT_AFU(inode->i_rdev);
- int rc = -ENODEV;
-
- pr_devel("afu_open afu%i.%i\n", slice, adapter_num);
-
- if (!(adapter = get_cxl_adapter(adapter_num)))
- return -ENODEV;
-
- if (slice > adapter->slices)
- goto err_put_adapter;
-
- spin_lock(&adapter->afu_list_lock);
- if (!(afu = adapter->afu[slice])) {
- spin_unlock(&adapter->afu_list_lock);
- goto err_put_adapter;
- }
-
- /*
- * taking a ref to the afu so that it doesn't go away
- * for rest of the function. This ref is released before
- * we return.
- */
- cxl_afu_get(afu);
- spin_unlock(&adapter->afu_list_lock);
-
- if (!afu->current_mode)
- goto err_put_afu;
-
- if (!cxl_ops->link_ok(adapter, afu)) {
- rc = -EIO;
- goto err_put_afu;
- }
-
- if (!(ctx = cxl_context_alloc())) {
- rc = -ENOMEM;
- goto err_put_afu;
- }
-
- rc = cxl_context_init(ctx, afu, master);
- if (rc)
- goto err_put_afu;
-
- cxl_context_set_mapping(ctx, inode->i_mapping);
-
- pr_devel("afu_open pe: %i\n", ctx->pe);
- file->private_data = ctx;
-
- /* indicate success */
- rc = 0;
-
-err_put_afu:
- /* release the ref taken earlier */
- cxl_afu_put(afu);
-err_put_adapter:
- put_device(&adapter->dev);
- return rc;
-}
-
-int afu_open(struct inode *inode, struct file *file)
-{
- return __afu_open(inode, file, false);
-}
-
-static int afu_master_open(struct inode *inode, struct file *file)
-{
- return __afu_open(inode, file, true);
-}
-
-int afu_release(struct inode *inode, struct file *file)
-{
- struct cxl_context *ctx = file->private_data;
-
- pr_devel("%s: closing cxl file descriptor. pe: %i\n",
- __func__, ctx->pe);
- cxl_context_detach(ctx);
-
-
- /*
- * Delete the context's mapping pointer, unless it's created by the
- * kernel API, in which case leave it so it can be freed by reclaim_ctx()
- */
- if (!ctx->kernelapi) {
- mutex_lock(&ctx->mapping_lock);
- ctx->mapping = NULL;
- mutex_unlock(&ctx->mapping_lock);
- }
-
- /*
- * At this this point all bottom halfs have finished and we should be
- * getting no more IRQs from the hardware for this context. Once it's
- * removed from the IDR (and RCU synchronised) it's safe to free the
- * sstp and context.
- */
- cxl_context_free(ctx);
-
- return 0;
-}
-
-static long afu_ioctl_start_work(struct cxl_context *ctx,
- struct cxl_ioctl_start_work __user *uwork)
-{
- struct cxl_ioctl_start_work work;
- u64 amr = 0;
- int rc;
-
- pr_devel("%s: pe: %i\n", __func__, ctx->pe);
-
- /* Do this outside the status_mutex to avoid a circular dependency with
- * the locking in cxl_mmap_fault() */
- if (copy_from_user(&work, uwork, sizeof(work)))
- return -EFAULT;
-
- mutex_lock(&ctx->status_mutex);
- if (ctx->status != OPENED) {
- rc = -EIO;
- goto out;
- }
-
- /*
- * if any of the reserved fields are set or any of the unused
- * flags are set it's invalid
- */
- if (work.reserved1 || work.reserved2 || work.reserved3 ||
- work.reserved4 || work.reserved5 ||
- (work.flags & ~CXL_START_WORK_ALL)) {
- rc = -EINVAL;
- goto out;
- }
-
- if (!(work.flags & CXL_START_WORK_NUM_IRQS))
- work.num_interrupts = ctx->afu->pp_irqs;
- else if ((work.num_interrupts < ctx->afu->pp_irqs) ||
- (work.num_interrupts > ctx->afu->irqs_max)) {
- rc = -EINVAL;
- goto out;
- }
-
- if ((rc = afu_register_irqs(ctx, work.num_interrupts)))
- goto out;
-
- if (work.flags & CXL_START_WORK_AMR)
- amr = work.amr & mfspr(SPRN_UAMOR);
-
- if (work.flags & CXL_START_WORK_TID)
- ctx->assign_tidr = true;
-
- ctx->mmio_err_ff = !!(work.flags & CXL_START_WORK_ERR_FF);
-
- /*
- * Increment the mapped context count for adapter. This also checks
- * if adapter_context_lock is taken.
- */
- rc = cxl_adapter_context_get(ctx->afu->adapter);
- if (rc) {
- afu_release_irqs(ctx, ctx);
- goto out;
- }
-
- /*
- * We grab the PID here and not in the file open to allow for the case
- * where a process (master, some daemon, etc) has opened the chardev on
- * behalf of another process, so the AFU's mm gets bound to the process
- * that performs this ioctl and not the process that opened the file.
- * Also we grab the PID of the group leader so that if the task that
- * has performed the attach operation exits the mm context of the
- * process is still accessible.
- */
- ctx->pid = get_task_pid(current, PIDTYPE_PID);
-
- /* acquire a reference to the task's mm */
- ctx->mm = get_task_mm(current);
-
- /* ensure this mm_struct can't be freed */
- cxl_context_mm_count_get(ctx);
-
- if (ctx->mm) {
- /* decrement the use count from above */
- mmput(ctx->mm);
- /* make TLBIs for this context global */
- mm_context_add_copro(ctx->mm);
- }
-
- /*
- * Increment driver use count. Enables global TLBIs for hash
- * and callbacks to handle the segment table
- */
- cxl_ctx_get();
-
- /*
- * A barrier is needed to make sure all TLBIs are global
- * before we attach and the context starts being used by the
- * adapter.
- *
- * Needed after mm_context_add_copro() for radix and
- * cxl_ctx_get() for hash/p8.
- *
- * The barrier should really be mb(), since it involves a
- * device. However, it's only useful when we have local
- * vs. global TLBIs, i.e SMP=y. So keep smp_mb().
- */
- smp_mb();
-
- trace_cxl_attach(ctx, work.work_element_descriptor, work.num_interrupts, amr);
-
- if ((rc = cxl_ops->attach_process(ctx, false, work.work_element_descriptor,
- amr))) {
- afu_release_irqs(ctx, ctx);
- cxl_adapter_context_put(ctx->afu->adapter);
- put_pid(ctx->pid);
- ctx->pid = NULL;
- cxl_ctx_put();
- cxl_context_mm_count_put(ctx);
- if (ctx->mm)
- mm_context_remove_copro(ctx->mm);
- goto out;
- }
-
- rc = 0;
- if (work.flags & CXL_START_WORK_TID) {
- work.tid = ctx->tidr;
- if (copy_to_user(uwork, &work, sizeof(work)))
- rc = -EFAULT;
- }
-
- ctx->status = STARTED;
-
-out:
- mutex_unlock(&ctx->status_mutex);
- return rc;
-}
-
-static long afu_ioctl_process_element(struct cxl_context *ctx,
- int __user *upe)
-{
- pr_devel("%s: pe: %i\n", __func__, ctx->pe);
-
- if (copy_to_user(upe, &ctx->external_pe, sizeof(__u32)))
- return -EFAULT;
-
- return 0;
-}
-
-static long afu_ioctl_get_afu_id(struct cxl_context *ctx,
- struct cxl_afu_id __user *upafuid)
-{
- struct cxl_afu_id afuid = { 0 };
-
- afuid.card_id = ctx->afu->adapter->adapter_num;
- afuid.afu_offset = ctx->afu->slice;
- afuid.afu_mode = ctx->afu->current_mode;
-
- /* set the flag bit in case the afu is a slave */
- if (ctx->afu->current_mode == CXL_MODE_DIRECTED && !ctx->master)
- afuid.flags |= CXL_AFUID_FLAG_SLAVE;
-
- if (copy_to_user(upafuid, &afuid, sizeof(afuid)))
- return -EFAULT;
-
- return 0;
-}
-
-long afu_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-{
- struct cxl_context *ctx = file->private_data;
-
- if (ctx->status == CLOSED)
- return -EIO;
-
- if (!cxl_ops->link_ok(ctx->afu->adapter, ctx->afu))
- return -EIO;
-
- pr_devel("afu_ioctl\n");
- switch (cmd) {
- case CXL_IOCTL_START_WORK:
- return afu_ioctl_start_work(ctx, (struct cxl_ioctl_start_work __user *)arg);
- case CXL_IOCTL_GET_PROCESS_ELEMENT:
- return afu_ioctl_process_element(ctx, (__u32 __user *)arg);
- case CXL_IOCTL_GET_AFU_ID:
- return afu_ioctl_get_afu_id(ctx, (struct cxl_afu_id __user *)
- arg);
- }
- return -EINVAL;
-}
-
-static long afu_compat_ioctl(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- return afu_ioctl(file, cmd, arg);
-}
-
-int afu_mmap(struct file *file, struct vm_area_struct *vm)
-{
- struct cxl_context *ctx = file->private_data;
-
- /* AFU must be started before we can MMIO */
- if (ctx->status != STARTED)
- return -EIO;
-
- if (!cxl_ops->link_ok(ctx->afu->adapter, ctx->afu))
- return -EIO;
-
- return cxl_context_iomap(ctx, vm);
-}
-
-static inline bool ctx_event_pending(struct cxl_context *ctx)
-{
- if (ctx->pending_irq || ctx->pending_fault || ctx->pending_afu_err)
- return true;
-
- if (ctx->afu_driver_ops && atomic_read(&ctx->afu_driver_events))
- return true;
-
- return false;
-}
-
-__poll_t afu_poll(struct file *file, struct poll_table_struct *poll)
-{
- struct cxl_context *ctx = file->private_data;
- __poll_t mask = 0;
- unsigned long flags;
-
-
- poll_wait(file, &ctx->wq, poll);
-
- pr_devel("afu_poll wait done pe: %i\n", ctx->pe);
-
- spin_lock_irqsave(&ctx->lock, flags);
- if (ctx_event_pending(ctx))
- mask |= EPOLLIN | EPOLLRDNORM;
- else if (ctx->status == CLOSED)
- /* Only error on closed when there are no futher events pending
- */
- mask |= EPOLLERR;
- spin_unlock_irqrestore(&ctx->lock, flags);
-
- pr_devel("afu_poll pe: %i returning %#x\n", ctx->pe, mask);
-
- return mask;
-}
-
-static ssize_t afu_driver_event_copy(struct cxl_context *ctx,
- char __user *buf,
- struct cxl_event *event,
- struct cxl_event_afu_driver_reserved *pl)
-{
- /* Check event */
- if (!pl) {
- ctx->afu_driver_ops->event_delivered(ctx, pl, -EINVAL);
- return -EFAULT;
- }
-
- /* Check event size */
- event->header.size += pl->data_size;
- if (event->header.size > CXL_READ_MIN_SIZE) {
- ctx->afu_driver_ops->event_delivered(ctx, pl, -EINVAL);
- return -EFAULT;
- }
-
- /* Copy event header */
- if (copy_to_user(buf, event, sizeof(struct cxl_event_header))) {
- ctx->afu_driver_ops->event_delivered(ctx, pl, -EFAULT);
- return -EFAULT;
- }
-
- /* Copy event data */
- buf += sizeof(struct cxl_event_header);
- if (copy_to_user(buf, &pl->data, pl->data_size)) {
- ctx->afu_driver_ops->event_delivered(ctx, pl, -EFAULT);
- return -EFAULT;
- }
-
- ctx->afu_driver_ops->event_delivered(ctx, pl, 0); /* Success */
- return event->header.size;
-}
-
-ssize_t afu_read(struct file *file, char __user *buf, size_t count,
- loff_t *off)
-{
- struct cxl_context *ctx = file->private_data;
- struct cxl_event_afu_driver_reserved *pl = NULL;
- struct cxl_event event;
- unsigned long flags;
- int rc;
- DEFINE_WAIT(wait);
-
- if (!cxl_ops->link_ok(ctx->afu->adapter, ctx->afu))
- return -EIO;
-
- if (count < CXL_READ_MIN_SIZE)
- return -EINVAL;
-
- spin_lock_irqsave(&ctx->lock, flags);
-
- for (;;) {
- prepare_to_wait(&ctx->wq, &wait, TASK_INTERRUPTIBLE);
- if (ctx_event_pending(ctx) || (ctx->status == CLOSED))
- break;
-
- if (!cxl_ops->link_ok(ctx->afu->adapter, ctx->afu)) {
- rc = -EIO;
- goto out;
- }
-
- if (file->f_flags & O_NONBLOCK) {
- rc = -EAGAIN;
- goto out;
- }
-
- if (signal_pending(current)) {
- rc = -ERESTARTSYS;
- goto out;
- }
-
- spin_unlock_irqrestore(&ctx->lock, flags);
- pr_devel("afu_read going to sleep...\n");
- schedule();
- pr_devel("afu_read woken up\n");
- spin_lock_irqsave(&ctx->lock, flags);
- }
-
- finish_wait(&ctx->wq, &wait);
-
- memset(&event, 0, sizeof(event));
- event.header.process_element = ctx->pe;
- event.header.size = sizeof(struct cxl_event_header);
- if (ctx->afu_driver_ops && atomic_read(&ctx->afu_driver_events)) {
- pr_devel("afu_read delivering AFU driver specific event\n");
- pl = ctx->afu_driver_ops->fetch_event(ctx);
- atomic_dec(&ctx->afu_driver_events);
- event.header.type = CXL_EVENT_AFU_DRIVER;
- } else if (ctx->pending_irq) {
- pr_devel("afu_read delivering AFU interrupt\n");
- event.header.size += sizeof(struct cxl_event_afu_interrupt);
- event.header.type = CXL_EVENT_AFU_INTERRUPT;
- event.irq.irq = find_first_bit(ctx->irq_bitmap, ctx->irq_count) + 1;
- clear_bit(event.irq.irq - 1, ctx->irq_bitmap);
- if (bitmap_empty(ctx->irq_bitmap, ctx->irq_count))
- ctx->pending_irq = false;
- } else if (ctx->pending_fault) {
- pr_devel("afu_read delivering data storage fault\n");
- event.header.size += sizeof(struct cxl_event_data_storage);
- event.header.type = CXL_EVENT_DATA_STORAGE;
- event.fault.addr = ctx->fault_addr;
- event.fault.dsisr = ctx->fault_dsisr;
- ctx->pending_fault = false;
- } else if (ctx->pending_afu_err) {
- pr_devel("afu_read delivering afu error\n");
- event.header.size += sizeof(struct cxl_event_afu_error);
- event.header.type = CXL_EVENT_AFU_ERROR;
- event.afu_error.error = ctx->afu_err;
- ctx->pending_afu_err = false;
- } else if (ctx->status == CLOSED) {
- pr_devel("afu_read fatal error\n");
- spin_unlock_irqrestore(&ctx->lock, flags);
- return -EIO;
- } else
- WARN(1, "afu_read must be buggy\n");
-
- spin_unlock_irqrestore(&ctx->lock, flags);
-
- if (event.header.type == CXL_EVENT_AFU_DRIVER)
- return afu_driver_event_copy(ctx, buf, &event, pl);
-
- if (copy_to_user(buf, &event, event.header.size))
- return -EFAULT;
- return event.header.size;
-
-out:
- finish_wait(&ctx->wq, &wait);
- spin_unlock_irqrestore(&ctx->lock, flags);
- return rc;
-}
-
-/*
- * Note: if this is updated, we need to update api.c to patch the new ones in
- * too
- */
-const struct file_operations afu_fops = {
- .owner = THIS_MODULE,
- .open = afu_open,
- .poll = afu_poll,
- .read = afu_read,
- .release = afu_release,
- .unlocked_ioctl = afu_ioctl,
- .compat_ioctl = afu_compat_ioctl,
- .mmap = afu_mmap,
-};
-
-static const struct file_operations afu_master_fops = {
- .owner = THIS_MODULE,
- .open = afu_master_open,
- .poll = afu_poll,
- .read = afu_read,
- .release = afu_release,
- .unlocked_ioctl = afu_ioctl,
- .compat_ioctl = afu_compat_ioctl,
- .mmap = afu_mmap,
-};
-
-
-static char *cxl_devnode(const struct device *dev, umode_t *mode)
-{
- if (cpu_has_feature(CPU_FTR_HVMODE) &&
- CXL_DEVT_IS_CARD(dev->devt)) {
- /*
- * These minor numbers will eventually be used to program the
- * PSL and AFUs once we have dynamic reprogramming support
- */
- return NULL;
- }
- return kasprintf(GFP_KERNEL, "cxl/%s", dev_name(dev));
-}
-
-static const struct class cxl_class = {
- .name = "cxl",
- .devnode = cxl_devnode,
-};
-
-static int cxl_add_chardev(struct cxl_afu *afu, dev_t devt, struct cdev *cdev,
- struct device **chardev, char *postfix, char *desc,
- const struct file_operations *fops)
-{
- struct device *dev;
- int rc;
-
- cdev_init(cdev, fops);
- rc = cdev_add(cdev, devt, 1);
- if (rc) {
- dev_err(&afu->dev, "Unable to add %s chardev: %i\n", desc, rc);
- return rc;
- }
-
- dev = device_create(&cxl_class, &afu->dev, devt, afu,
- "afu%i.%i%s", afu->adapter->adapter_num, afu->slice, postfix);
- if (IS_ERR(dev)) {
- rc = PTR_ERR(dev);
- dev_err(&afu->dev, "Unable to create %s chardev in sysfs: %i\n", desc, rc);
- goto err;
- }
-
- *chardev = dev;
-
- return 0;
-err:
- cdev_del(cdev);
- return rc;
-}
-
-int cxl_chardev_d_afu_add(struct cxl_afu *afu)
-{
- return cxl_add_chardev(afu, CXL_AFU_MKDEV_D(afu), &afu->afu_cdev_d,
- &afu->chardev_d, "d", "dedicated",
- &afu_master_fops); /* Uses master fops */
-}
-
-int cxl_chardev_m_afu_add(struct cxl_afu *afu)
-{
- return cxl_add_chardev(afu, CXL_AFU_MKDEV_M(afu), &afu->afu_cdev_m,
- &afu->chardev_m, "m", "master",
- &afu_master_fops);
-}
-
-int cxl_chardev_s_afu_add(struct cxl_afu *afu)
-{
- return cxl_add_chardev(afu, CXL_AFU_MKDEV_S(afu), &afu->afu_cdev_s,
- &afu->chardev_s, "s", "shared",
- &afu_fops);
-}
-
-void cxl_chardev_afu_remove(struct cxl_afu *afu)
-{
- if (afu->chardev_d) {
- cdev_del(&afu->afu_cdev_d);
- device_unregister(afu->chardev_d);
- afu->chardev_d = NULL;
- }
- if (afu->chardev_m) {
- cdev_del(&afu->afu_cdev_m);
- device_unregister(afu->chardev_m);
- afu->chardev_m = NULL;
- }
- if (afu->chardev_s) {
- cdev_del(&afu->afu_cdev_s);
- device_unregister(afu->chardev_s);
- afu->chardev_s = NULL;
- }
-}
-
-int cxl_register_afu(struct cxl_afu *afu)
-{
- afu->dev.class = &cxl_class;
-
- return device_register(&afu->dev);
-}
-
-int cxl_register_adapter(struct cxl *adapter)
-{
- adapter->dev.class = &cxl_class;
-
- /*
- * Future: When we support dynamically reprogramming the PSL & AFU we
- * will expose the interface to do that via a chardev:
- * adapter->dev.devt = CXL_CARD_MKDEV(adapter);
- */
-
- return device_register(&adapter->dev);
-}
-
-dev_t cxl_get_dev(void)
-{
- return cxl_dev;
-}
-
-int __init cxl_file_init(void)
-{
- int rc;
-
- /*
- * If these change we really need to update API. Either change some
- * flags or update API version number CXL_API_VERSION.
- */
- BUILD_BUG_ON(CXL_API_VERSION != 3);
- BUILD_BUG_ON(sizeof(struct cxl_ioctl_start_work) != 64);
- BUILD_BUG_ON(sizeof(struct cxl_event_header) != 8);
- BUILD_BUG_ON(sizeof(struct cxl_event_afu_interrupt) != 8);
- BUILD_BUG_ON(sizeof(struct cxl_event_data_storage) != 32);
- BUILD_BUG_ON(sizeof(struct cxl_event_afu_error) != 16);
-
- if ((rc = alloc_chrdev_region(&cxl_dev, 0, CXL_NUM_MINORS, "cxl"))) {
- pr_err("Unable to allocate CXL major number: %i\n", rc);
- return rc;
- }
-
- pr_devel("CXL device allocated, MAJOR %i\n", MAJOR(cxl_dev));
-
- rc = class_register(&cxl_class);
- if (rc) {
- pr_err("Unable to create CXL class\n");
- goto err;
- }
-
- return 0;
-
-err:
- unregister_chrdev_region(cxl_dev, CXL_NUM_MINORS);
- return rc;
-}
-
-void cxl_file_exit(void)
-{
- unregister_chrdev_region(cxl_dev, CXL_NUM_MINORS);
- class_unregister(&cxl_class);
-}
diff --git a/drivers/misc/cxl/flash.c b/drivers/misc/cxl/flash.c
deleted file mode 100644
index eee9decc121e..000000000000
--- a/drivers/misc/cxl/flash.c
+++ /dev/null
@@ -1,538 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <linux/kernel.h>
-#include <linux/fs.h>
-#include <linux/semaphore.h>
-#include <linux/slab.h>
-#include <linux/uaccess.h>
-#include <linux/of.h>
-#include <asm/rtas.h>
-
-#include "cxl.h"
-#include "hcalls.h"
-
-#define DOWNLOAD_IMAGE 1
-#define VALIDATE_IMAGE 2
-
-struct ai_header {
- u16 version;
- u8 reserved0[6];
- u16 vendor;
- u16 device;
- u16 subsystem_vendor;
- u16 subsystem;
- u64 image_offset;
- u64 image_length;
- u8 reserved1[96];
-};
-
-static struct semaphore sem;
-static unsigned long *buffer[CXL_AI_MAX_ENTRIES];
-static struct sg_list *le;
-static u64 continue_token;
-static unsigned int transfer;
-
-struct update_props_workarea {
- __be32 phandle;
- __be32 state;
- __be64 reserved;
- __be32 nprops;
-} __packed;
-
-struct update_nodes_workarea {
- __be32 state;
- __be64 unit_address;
- __be32 reserved;
-} __packed;
-
-#define DEVICE_SCOPE 3
-#define NODE_ACTION_MASK 0xff000000
-#define NODE_COUNT_MASK 0x00ffffff
-#define OPCODE_DELETE 0x01000000
-#define OPCODE_UPDATE 0x02000000
-#define OPCODE_ADD 0x03000000
-
-static int rcall(int token, char *buf, s32 scope)
-{
- int rc;
-
- spin_lock(&rtas_data_buf_lock);
-
- memcpy(rtas_data_buf, buf, RTAS_DATA_BUF_SIZE);
- rc = rtas_call(token, 2, 1, NULL, rtas_data_buf, scope);
- memcpy(buf, rtas_data_buf, RTAS_DATA_BUF_SIZE);
-
- spin_unlock(&rtas_data_buf_lock);
- return rc;
-}
-
-static int update_property(struct device_node *dn, const char *name,
- u32 vd, char *value)
-{
- struct property *new_prop;
- u32 *val;
- int rc;
-
- new_prop = kzalloc(sizeof(*new_prop), GFP_KERNEL);
- if (!new_prop)
- return -ENOMEM;
-
- new_prop->name = kstrdup(name, GFP_KERNEL);
- if (!new_prop->name) {
- kfree(new_prop);
- return -ENOMEM;
- }
-
- new_prop->length = vd;
- new_prop->value = kzalloc(new_prop->length, GFP_KERNEL);
- if (!new_prop->value) {
- kfree(new_prop->name);
- kfree(new_prop);
- return -ENOMEM;
- }
- memcpy(new_prop->value, value, vd);
-
- val = (u32 *)new_prop->value;
- rc = cxl_update_properties(dn, new_prop);
- pr_devel("%pOFn: update property (%s, length: %i, value: %#x)\n",
- dn, name, vd, be32_to_cpu(*val));
-
- if (rc) {
- kfree(new_prop->name);
- kfree(new_prop->value);
- kfree(new_prop);
- }
- return rc;
-}
-
-static int update_node(__be32 phandle, s32 scope)
-{
- struct update_props_workarea *upwa;
- struct device_node *dn;
- int i, rc, ret;
- char *prop_data;
- char *buf;
- int token;
- u32 nprops;
- u32 vd;
-
- token = rtas_token("ibm,update-properties");
- if (token == RTAS_UNKNOWN_SERVICE)
- return -EINVAL;
-
- buf = kzalloc(RTAS_DATA_BUF_SIZE, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- dn = of_find_node_by_phandle(be32_to_cpu(phandle));
- if (!dn) {
- kfree(buf);
- return -ENOENT;
- }
-
- upwa = (struct update_props_workarea *)&buf[0];
- upwa->phandle = phandle;
- do {
- rc = rcall(token, buf, scope);
- if (rc < 0)
- break;
-
- prop_data = buf + sizeof(*upwa);
- nprops = be32_to_cpu(upwa->nprops);
-
- if (*prop_data == 0) {
- prop_data++;
- vd = be32_to_cpu(*(__be32 *)prop_data);
- prop_data += vd + sizeof(vd);
- nprops--;
- }
-
- for (i = 0; i < nprops; i++) {
- char *prop_name;
-
- prop_name = prop_data;
- prop_data += strlen(prop_name) + 1;
- vd = be32_to_cpu(*(__be32 *)prop_data);
- prop_data += sizeof(vd);
-
- if ((vd != 0x00000000) && (vd != 0x80000000)) {
- ret = update_property(dn, prop_name, vd,
- prop_data);
- if (ret)
- pr_err("cxl: Could not update property %s - %i\n",
- prop_name, ret);
-
- prop_data += vd;
- }
- }
- } while (rc == 1);
-
- of_node_put(dn);
- kfree(buf);
- return rc;
-}
-
-static int update_devicetree(struct cxl *adapter, s32 scope)
-{
- struct update_nodes_workarea *unwa;
- u32 action, node_count;
- int token, rc, i;
- __be32 *data, phandle;
- char *buf;
-
- token = rtas_token("ibm,update-nodes");
- if (token == RTAS_UNKNOWN_SERVICE)
- return -EINVAL;
-
- buf = kzalloc(RTAS_DATA_BUF_SIZE, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- unwa = (struct update_nodes_workarea *)&buf[0];
- unwa->unit_address = cpu_to_be64(adapter->guest->handle);
- do {
- rc = rcall(token, buf, scope);
- if (rc && rc != 1)
- break;
-
- data = (__be32 *)buf + 4;
- while (be32_to_cpu(*data) & NODE_ACTION_MASK) {
- action = be32_to_cpu(*data) & NODE_ACTION_MASK;
- node_count = be32_to_cpu(*data) & NODE_COUNT_MASK;
- pr_devel("device reconfiguration - action: %#x, nodes: %#x\n",
- action, node_count);
- data++;
-
- for (i = 0; i < node_count; i++) {
- phandle = *data++;
-
- switch (action) {
- case OPCODE_DELETE:
- /* nothing to do */
- break;
- case OPCODE_UPDATE:
- update_node(phandle, scope);
- break;
- case OPCODE_ADD:
- /* nothing to do, just move pointer */
- data++;
- break;
- }
- }
- }
- } while (rc == 1);
-
- kfree(buf);
- return 0;
-}
-
-static int handle_image(struct cxl *adapter, int operation,
- long (*fct)(u64, u64, u64, u64 *),
- struct cxl_adapter_image *ai)
-{
- size_t mod, s_copy, len_chunk = 0;
- struct ai_header *header = NULL;
- unsigned int entries = 0, i;
- void *dest, *from;
- int rc = 0, need_header;
-
- /* base adapter image header */
- need_header = (ai->flags & CXL_AI_NEED_HEADER);
- if (need_header) {
- header = kzalloc(sizeof(struct ai_header), GFP_KERNEL);
- if (!header)
- return -ENOMEM;
- header->version = cpu_to_be16(1);
- header->vendor = cpu_to_be16(adapter->guest->vendor);
- header->device = cpu_to_be16(adapter->guest->device);
- header->subsystem_vendor = cpu_to_be16(adapter->guest->subsystem_vendor);
- header->subsystem = cpu_to_be16(adapter->guest->subsystem);
- header->image_offset = cpu_to_be64(CXL_AI_HEADER_SIZE);
- header->image_length = cpu_to_be64(ai->len_image);
- }
-
- /* number of entries in the list */
- len_chunk = ai->len_data;
- if (need_header)
- len_chunk += CXL_AI_HEADER_SIZE;
-
- entries = len_chunk / CXL_AI_BUFFER_SIZE;
- mod = len_chunk % CXL_AI_BUFFER_SIZE;
- if (mod)
- entries++;
-
- if (entries > CXL_AI_MAX_ENTRIES) {
- rc = -EINVAL;
- goto err;
- }
-
- /* < -- MAX_CHUNK_SIZE = 4096 * 256 = 1048576 bytes -->
- * chunk 0 ----------------------------------------------------
- * | header | data |
- * ----------------------------------------------------
- * chunk 1 ----------------------------------------------------
- * | data |
- * ----------------------------------------------------
- * ....
- * chunk n ----------------------------------------------------
- * | data |
- * ----------------------------------------------------
- */
- from = (void *) ai->data;
- for (i = 0; i < entries; i++) {
- dest = buffer[i];
- s_copy = CXL_AI_BUFFER_SIZE;
-
- if ((need_header) && (i == 0)) {
- /* add adapter image header */
- memcpy(buffer[i], header, sizeof(struct ai_header));
- s_copy = CXL_AI_BUFFER_SIZE - CXL_AI_HEADER_SIZE;
- dest += CXL_AI_HEADER_SIZE; /* image offset */
- }
- if ((i == (entries - 1)) && mod)
- s_copy = mod;
-
- /* copy data */
- if (copy_from_user(dest, from, s_copy))
- goto err;
-
- /* fill in the list */
- le[i].phys_addr = cpu_to_be64(virt_to_phys(buffer[i]));
- le[i].len = cpu_to_be64(CXL_AI_BUFFER_SIZE);
- if ((i == (entries - 1)) && mod)
- le[i].len = cpu_to_be64(mod);
- from += s_copy;
- }
- pr_devel("%s (op: %i, need header: %i, entries: %i, token: %#llx)\n",
- __func__, operation, need_header, entries, continue_token);
-
- /*
- * download/validate the adapter image to the coherent
- * platform facility
- */
- rc = fct(adapter->guest->handle, virt_to_phys(le), entries,
- &continue_token);
- if (rc == 0) /* success of download/validation operation */
- continue_token = 0;
-
-err:
- kfree(header);
-
- return rc;
-}
-
-static int transfer_image(struct cxl *adapter, int operation,
- struct cxl_adapter_image *ai)
-{
- int rc = 0;
- int afu;
-
- switch (operation) {
- case DOWNLOAD_IMAGE:
- rc = handle_image(adapter, operation,
- &cxl_h_download_adapter_image, ai);
- if (rc < 0) {
- pr_devel("resetting adapter\n");
- cxl_h_reset_adapter(adapter->guest->handle);
- }
- return rc;
-
- case VALIDATE_IMAGE:
- rc = handle_image(adapter, operation,
- &cxl_h_validate_adapter_image, ai);
- if (rc < 0) {
- pr_devel("resetting adapter\n");
- cxl_h_reset_adapter(adapter->guest->handle);
- return rc;
- }
- if (rc == 0) {
- pr_devel("remove current afu\n");
- for (afu = 0; afu < adapter->slices; afu++)
- cxl_guest_remove_afu(adapter->afu[afu]);
-
- pr_devel("resetting adapter\n");
- cxl_h_reset_adapter(adapter->guest->handle);
-
- /* The entire image has now been
- * downloaded and the validation has
- * been successfully performed.
- * After that, the partition should call
- * ibm,update-nodes and
- * ibm,update-properties to receive the
- * current configuration
- */
- rc = update_devicetree(adapter, DEVICE_SCOPE);
- transfer = 1;
- }
- return rc;
- }
-
- return -EINVAL;
-}
-
-static long ioctl_transfer_image(struct cxl *adapter, int operation,
- struct cxl_adapter_image __user *uai)
-{
- struct cxl_adapter_image ai;
-
- pr_devel("%s\n", __func__);
-
- if (copy_from_user(&ai, uai, sizeof(struct cxl_adapter_image)))
- return -EFAULT;
-
- /*
- * Make sure reserved fields and bits are set to 0
- */
- if (ai.reserved1 || ai.reserved2 || ai.reserved3 || ai.reserved4 ||
- (ai.flags & ~CXL_AI_ALL))
- return -EINVAL;
-
- return transfer_image(adapter, operation, &ai);
-}
-
-static int device_open(struct inode *inode, struct file *file)
-{
- int adapter_num = CXL_DEVT_ADAPTER(inode->i_rdev);
- struct cxl *adapter;
- int rc = 0, i;
-
- pr_devel("in %s\n", __func__);
-
- BUG_ON(sizeof(struct ai_header) != CXL_AI_HEADER_SIZE);
-
- /* Allows one process to open the device by using a semaphore */
- if (down_interruptible(&sem) != 0)
- return -EPERM;
-
- if (!(adapter = get_cxl_adapter(adapter_num))) {
- rc = -ENODEV;
- goto err_unlock;
- }
-
- file->private_data = adapter;
- continue_token = 0;
- transfer = 0;
-
- for (i = 0; i < CXL_AI_MAX_ENTRIES; i++)
- buffer[i] = NULL;
-
- /* aligned buffer containing list entries which describes up to
- * 1 megabyte of data (256 entries of 4096 bytes each)
- * Logical real address of buffer 0 - Buffer 0 length in bytes
- * Logical real address of buffer 1 - Buffer 1 length in bytes
- * Logical real address of buffer 2 - Buffer 2 length in bytes
- * ....
- * ....
- * Logical real address of buffer N - Buffer N length in bytes
- */
- le = (struct sg_list *)get_zeroed_page(GFP_KERNEL);
- if (!le) {
- rc = -ENOMEM;
- goto err;
- }
-
- for (i = 0; i < CXL_AI_MAX_ENTRIES; i++) {
- buffer[i] = (unsigned long *)get_zeroed_page(GFP_KERNEL);
- if (!buffer[i]) {
- rc = -ENOMEM;
- goto err1;
- }
- }
-
- return 0;
-
-err1:
- for (i = 0; i < CXL_AI_MAX_ENTRIES; i++) {
- if (buffer[i])
- free_page((unsigned long) buffer[i]);
- }
-
- if (le)
- free_page((unsigned long) le);
-err:
- put_device(&adapter->dev);
-err_unlock:
- up(&sem);
-
- return rc;
-}
-
-static long device_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-{
- struct cxl *adapter = file->private_data;
-
- pr_devel("in %s\n", __func__);
-
- if (cmd == CXL_IOCTL_DOWNLOAD_IMAGE)
- return ioctl_transfer_image(adapter,
- DOWNLOAD_IMAGE,
- (struct cxl_adapter_image __user *)arg);
- else if (cmd == CXL_IOCTL_VALIDATE_IMAGE)
- return ioctl_transfer_image(adapter,
- VALIDATE_IMAGE,
- (struct cxl_adapter_image __user *)arg);
- else
- return -EINVAL;
-}
-
-static int device_close(struct inode *inode, struct file *file)
-{
- struct cxl *adapter = file->private_data;
- int i;
-
- pr_devel("in %s\n", __func__);
-
- for (i = 0; i < CXL_AI_MAX_ENTRIES; i++) {
- if (buffer[i])
- free_page((unsigned long) buffer[i]);
- }
-
- if (le)
- free_page((unsigned long) le);
-
- up(&sem);
- put_device(&adapter->dev);
- continue_token = 0;
-
- /* reload the module */
- if (transfer)
- cxl_guest_reload_module(adapter);
- else {
- pr_devel("resetting adapter\n");
- cxl_h_reset_adapter(adapter->guest->handle);
- }
-
- transfer = 0;
- return 0;
-}
-
-static const struct file_operations fops = {
- .owner = THIS_MODULE,
- .open = device_open,
- .unlocked_ioctl = device_ioctl,
- .compat_ioctl = compat_ptr_ioctl,
- .release = device_close,
-};
-
-void cxl_guest_remove_chardev(struct cxl *adapter)
-{
- cdev_del(&adapter->guest->cdev);
-}
-
-int cxl_guest_add_chardev(struct cxl *adapter)
-{
- dev_t devt;
- int rc;
-
- devt = MKDEV(MAJOR(cxl_get_dev()), CXL_CARD_MINOR(adapter));
- cdev_init(&adapter->guest->cdev, &fops);
- if ((rc = cdev_add(&adapter->guest->cdev, devt, 1))) {
- dev_err(&adapter->dev,
- "Unable to add chardev on adapter (card%i): %i\n",
- adapter->adapter_num, rc);
- goto err;
- }
- adapter->dev.devt = devt;
- sema_init(&sem, 1);
-err:
- return rc;
-}
diff --git a/drivers/misc/cxl/guest.c b/drivers/misc/cxl/guest.c
deleted file mode 100644
index fb95a2d5cef4..000000000000
--- a/drivers/misc/cxl/guest.c
+++ /dev/null
@@ -1,1208 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright 2015 IBM Corp.
- */
-
-#include <linux/spinlock.h>
-#include <linux/uaccess.h>
-#include <linux/delay.h>
-#include <linux/irqdomain.h>
-#include <linux/platform_device.h>
-
-#include "cxl.h"
-#include "hcalls.h"
-#include "trace.h"
-
-#define CXL_ERROR_DETECTED_EVENT 1
-#define CXL_SLOT_RESET_EVENT 2
-#define CXL_RESUME_EVENT 3
-
-static void pci_error_handlers(struct cxl_afu *afu,
- int bus_error_event,
- pci_channel_state_t state)
-{
- struct pci_dev *afu_dev;
- struct pci_driver *afu_drv;
- const struct pci_error_handlers *err_handler;
-
- if (afu->phb == NULL)
- return;
-
- list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) {
- afu_drv = to_pci_driver(afu_dev->dev.driver);
- if (!afu_drv)
- continue;
-
- err_handler = afu_drv->err_handler;
- switch (bus_error_event) {
- case CXL_ERROR_DETECTED_EVENT:
- afu_dev->error_state = state;
-
- if (err_handler &&
- err_handler->error_detected)
- err_handler->error_detected(afu_dev, state);
- break;
- case CXL_SLOT_RESET_EVENT:
- afu_dev->error_state = state;
-
- if (err_handler &&
- err_handler->slot_reset)
- err_handler->slot_reset(afu_dev);
- break;
- case CXL_RESUME_EVENT:
- if (err_handler &&
- err_handler->resume)
- err_handler->resume(afu_dev);
- break;
- }
- }
-}
-
-static irqreturn_t guest_handle_psl_slice_error(struct cxl_context *ctx, u64 dsisr,
- u64 errstat)
-{
- pr_devel("in %s\n", __func__);
- dev_crit(&ctx->afu->dev, "PSL ERROR STATUS: 0x%.16llx\n", errstat);
-
- return cxl_ops->ack_irq(ctx, 0, errstat);
-}
-
-static ssize_t guest_collect_vpd(struct cxl *adapter, struct cxl_afu *afu,
- void *buf, size_t len)
-{
- unsigned int entries, mod;
- unsigned long **vpd_buf = NULL;
- struct sg_list *le;
- int rc = 0, i, tocopy;
- u64 out = 0;
-
- if (buf == NULL)
- return -EINVAL;
-
- /* number of entries in the list */
- entries = len / SG_BUFFER_SIZE;
- mod = len % SG_BUFFER_SIZE;
- if (mod)
- entries++;
-
- if (entries > SG_MAX_ENTRIES) {
- entries = SG_MAX_ENTRIES;
- len = SG_MAX_ENTRIES * SG_BUFFER_SIZE;
- mod = 0;
- }
-
- vpd_buf = kcalloc(entries, sizeof(unsigned long *), GFP_KERNEL);
- if (!vpd_buf)
- return -ENOMEM;
-
- le = (struct sg_list *)get_zeroed_page(GFP_KERNEL);
- if (!le) {
- rc = -ENOMEM;
- goto err1;
- }
-
- for (i = 0; i < entries; i++) {
- vpd_buf[i] = (unsigned long *)get_zeroed_page(GFP_KERNEL);
- if (!vpd_buf[i]) {
- rc = -ENOMEM;
- goto err2;
- }
- le[i].phys_addr = cpu_to_be64(virt_to_phys(vpd_buf[i]));
- le[i].len = cpu_to_be64(SG_BUFFER_SIZE);
- if ((i == (entries - 1)) && mod)
- le[i].len = cpu_to_be64(mod);
- }
-
- if (adapter)
- rc = cxl_h_collect_vpd_adapter(adapter->guest->handle,
- virt_to_phys(le), entries, &out);
- else
- rc = cxl_h_collect_vpd(afu->guest->handle, 0,
- virt_to_phys(le), entries, &out);
- pr_devel("length of available (entries: %i), vpd: %#llx\n",
- entries, out);
-
- if (!rc) {
- /*
- * hcall returns in 'out' the size of available VPDs.
- * It fills the buffer with as much data as possible.
- */
- if (out < len)
- len = out;
- rc = len;
- if (out) {
- for (i = 0; i < entries; i++) {
- if (len < SG_BUFFER_SIZE)
- tocopy = len;
- else
- tocopy = SG_BUFFER_SIZE;
- memcpy(buf, vpd_buf[i], tocopy);
- buf += tocopy;
- len -= tocopy;
- }
- }
- }
-err2:
- for (i = 0; i < entries; i++) {
- if (vpd_buf[i])
- free_page((unsigned long) vpd_buf[i]);
- }
- free_page((unsigned long) le);
-err1:
- kfree(vpd_buf);
- return rc;
-}
-
-static int guest_get_irq_info(struct cxl_context *ctx, struct cxl_irq_info *info)
-{
- return cxl_h_collect_int_info(ctx->afu->guest->handle, ctx->process_token, info);
-}
-
-static irqreturn_t guest_psl_irq(int irq, void *data)
-{
- struct cxl_context *ctx = data;
- struct cxl_irq_info irq_info;
- int rc;
-
- pr_devel("%d: received PSL interrupt %i\n", ctx->pe, irq);
- rc = guest_get_irq_info(ctx, &irq_info);
- if (rc) {
- WARN(1, "Unable to get IRQ info: %i\n", rc);
- return IRQ_HANDLED;
- }
-
- rc = cxl_irq_psl8(irq, ctx, &irq_info);
- return rc;
-}
-
-static int afu_read_error_state(struct cxl_afu *afu, int *state_out)
-{
- u64 state;
- int rc = 0;
-
- if (!afu)
- return -EIO;
-
- rc = cxl_h_read_error_state(afu->guest->handle, &state);
- if (!rc) {
- WARN_ON(state != H_STATE_NORMAL &&
- state != H_STATE_DISABLE &&
- state != H_STATE_TEMP_UNAVAILABLE &&
- state != H_STATE_PERM_UNAVAILABLE);
- *state_out = state & 0xffffffff;
- }
- return rc;
-}
-
-static irqreturn_t guest_slice_irq_err(int irq, void *data)
-{
- struct cxl_afu *afu = data;
- int rc;
- u64 serr, afu_error, dsisr;
-
- rc = cxl_h_get_fn_error_interrupt(afu->guest->handle, &serr);
- if (rc) {
- dev_crit(&afu->dev, "Couldn't read PSL_SERR_An: %d\n", rc);
- return IRQ_HANDLED;
- }
- afu_error = cxl_p2n_read(afu, CXL_AFU_ERR_An);
- dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An);
- cxl_afu_decode_psl_serr(afu, serr);
- dev_crit(&afu->dev, "AFU_ERR_An: 0x%.16llx\n", afu_error);
- dev_crit(&afu->dev, "PSL_DSISR_An: 0x%.16llx\n", dsisr);
-
- rc = cxl_h_ack_fn_error_interrupt(afu->guest->handle, serr);
- if (rc)
- dev_crit(&afu->dev, "Couldn't ack slice error interrupt: %d\n",
- rc);
-
- return IRQ_HANDLED;
-}
-
-
-static int irq_alloc_range(struct cxl *adapter, int len, int *irq)
-{
- int i, n;
- struct irq_avail *cur;
-
- for (i = 0; i < adapter->guest->irq_nranges; i++) {
- cur = &adapter->guest->irq_avail[i];
- n = bitmap_find_next_zero_area(cur->bitmap, cur->range,
- 0, len, 0);
- if (n < cur->range) {
- bitmap_set(cur->bitmap, n, len);
- *irq = cur->offset + n;
- pr_devel("guest: allocate IRQs %#x->%#x\n",
- *irq, *irq + len - 1);
-
- return 0;
- }
- }
- return -ENOSPC;
-}
-
-static int irq_free_range(struct cxl *adapter, int irq, int len)
-{
- int i, n;
- struct irq_avail *cur;
-
- if (len == 0)
- return -ENOENT;
-
- for (i = 0; i < adapter->guest->irq_nranges; i++) {
- cur = &adapter->guest->irq_avail[i];
- if (irq >= cur->offset &&
- (irq + len) <= (cur->offset + cur->range)) {
- n = irq - cur->offset;
- bitmap_clear(cur->bitmap, n, len);
- pr_devel("guest: release IRQs %#x->%#x\n",
- irq, irq + len - 1);
- return 0;
- }
- }
- return -ENOENT;
-}
-
-static int guest_reset(struct cxl *adapter)
-{
- struct cxl_afu *afu = NULL;
- int i, rc;
-
- pr_devel("Adapter reset request\n");
- spin_lock(&adapter->afu_list_lock);
- for (i = 0; i < adapter->slices; i++) {
- if ((afu = adapter->afu[i])) {
- pci_error_handlers(afu, CXL_ERROR_DETECTED_EVENT,
- pci_channel_io_frozen);
- cxl_context_detach_all(afu);
- }
- }
-
- rc = cxl_h_reset_adapter(adapter->guest->handle);
- for (i = 0; i < adapter->slices; i++) {
- if (!rc && (afu = adapter->afu[i])) {
- pci_error_handlers(afu, CXL_SLOT_RESET_EVENT,
- pci_channel_io_normal);
- pci_error_handlers(afu, CXL_RESUME_EVENT, 0);
- }
- }
- spin_unlock(&adapter->afu_list_lock);
- return rc;
-}
-
-static int guest_alloc_one_irq(struct cxl *adapter)
-{
- int irq;
-
- spin_lock(&adapter->guest->irq_alloc_lock);
- if (irq_alloc_range(adapter, 1, &irq))
- irq = -ENOSPC;
- spin_unlock(&adapter->guest->irq_alloc_lock);
- return irq;
-}
-
-static void guest_release_one_irq(struct cxl *adapter, int irq)
-{
- spin_lock(&adapter->guest->irq_alloc_lock);
- irq_free_range(adapter, irq, 1);
- spin_unlock(&adapter->guest->irq_alloc_lock);
-}
-
-static int guest_alloc_irq_ranges(struct cxl_irq_ranges *irqs,
- struct cxl *adapter, unsigned int num)
-{
- int i, try, irq;
-
- memset(irqs, 0, sizeof(struct cxl_irq_ranges));
-
- spin_lock(&adapter->guest->irq_alloc_lock);
- for (i = 0; i < CXL_IRQ_RANGES && num; i++) {
- try = num;
- while (try) {
- if (irq_alloc_range(adapter, try, &irq) == 0)
- break;
- try /= 2;
- }
- if (!try)
- goto error;
- irqs->offset[i] = irq;
- irqs->range[i] = try;
- num -= try;
- }
- if (num)
- goto error;
- spin_unlock(&adapter->guest->irq_alloc_lock);
- return 0;
-
-error:
- for (i = 0; i < CXL_IRQ_RANGES; i++)
- irq_free_range(adapter, irqs->offset[i], irqs->range[i]);
- spin_unlock(&adapter->guest->irq_alloc_lock);
- return -ENOSPC;
-}
-
-static void guest_release_irq_ranges(struct cxl_irq_ranges *irqs,
- struct cxl *adapter)
-{
- int i;
-
- spin_lock(&adapter->guest->irq_alloc_lock);
- for (i = 0; i < CXL_IRQ_RANGES; i++)
- irq_free_range(adapter, irqs->offset[i], irqs->range[i]);
- spin_unlock(&adapter->guest->irq_alloc_lock);
-}
-
-static int guest_register_serr_irq(struct cxl_afu *afu)
-{
- afu->err_irq_name = kasprintf(GFP_KERNEL, "cxl-%s-err",
- dev_name(&afu->dev));
- if (!afu->err_irq_name)
- return -ENOMEM;
-
- if (!(afu->serr_virq = cxl_map_irq(afu->adapter, afu->serr_hwirq,
- guest_slice_irq_err, afu, afu->err_irq_name))) {
- kfree(afu->err_irq_name);
- afu->err_irq_name = NULL;
- return -ENOMEM;
- }
-
- return 0;
-}
-
-static void guest_release_serr_irq(struct cxl_afu *afu)
-{
- cxl_unmap_irq(afu->serr_virq, afu);
- cxl_ops->release_one_irq(afu->adapter, afu->serr_hwirq);
- kfree(afu->err_irq_name);
-}
-
-static int guest_ack_irq(struct cxl_context *ctx, u64 tfc, u64 psl_reset_mask)
-{
- return cxl_h_control_faults(ctx->afu->guest->handle, ctx->process_token,
- tfc >> 32, (psl_reset_mask != 0));
-}
-
-static void disable_afu_irqs(struct cxl_context *ctx)
-{
- irq_hw_number_t hwirq;
- unsigned int virq;
- int r, i;
-
- pr_devel("Disabling AFU(%d) interrupts\n", ctx->afu->slice);
- for (r = 0; r < CXL_IRQ_RANGES; r++) {
- hwirq = ctx->irqs.offset[r];
- for (i = 0; i < ctx->irqs.range[r]; hwirq++, i++) {
- virq = irq_find_mapping(NULL, hwirq);
- disable_irq(virq);
- }
- }
-}
-
-static void enable_afu_irqs(struct cxl_context *ctx)
-{
- irq_hw_number_t hwirq;
- unsigned int virq;
- int r, i;
-
- pr_devel("Enabling AFU(%d) interrupts\n", ctx->afu->slice);
- for (r = 0; r < CXL_IRQ_RANGES; r++) {
- hwirq = ctx->irqs.offset[r];
- for (i = 0; i < ctx->irqs.range[r]; hwirq++, i++) {
- virq = irq_find_mapping(NULL, hwirq);
- enable_irq(virq);
- }
- }
-}
-
-static int _guest_afu_cr_readXX(int sz, struct cxl_afu *afu, int cr_idx,
- u64 offset, u64 *val)
-{
- unsigned long cr;
- char c;
- int rc = 0;
-
- if (afu->crs_len < sz)
- return -ENOENT;
-
- if (unlikely(offset >= afu->crs_len))
- return -ERANGE;
-
- cr = get_zeroed_page(GFP_KERNEL);
- if (!cr)
- return -ENOMEM;
-
- rc = cxl_h_get_config(afu->guest->handle, cr_idx, offset,
- virt_to_phys((void *)cr), sz);
- if (rc)
- goto err;
-
- switch (sz) {
- case 1:
- c = *((char *) cr);
- *val = c;
- break;
- case 2:
- *val = in_le16((u16 *)cr);
- break;
- case 4:
- *val = in_le32((unsigned *)cr);
- break;
- case 8:
- *val = in_le64((u64 *)cr);
- break;
- default:
- WARN_ON(1);
- }
-err:
- free_page(cr);
- return rc;
-}
-
-static int guest_afu_cr_read32(struct cxl_afu *afu, int cr_idx, u64 offset,
- u32 *out)
-{
- int rc;
- u64 val;
-
- rc = _guest_afu_cr_readXX(4, afu, cr_idx, offset, &val);
- if (!rc)
- *out = (u32) val;
- return rc;
-}
-
-static int guest_afu_cr_read16(struct cxl_afu *afu, int cr_idx, u64 offset,
- u16 *out)
-{
- int rc;
- u64 val;
-
- rc = _guest_afu_cr_readXX(2, afu, cr_idx, offset, &val);
- if (!rc)
- *out = (u16) val;
- return rc;
-}
-
-static int guest_afu_cr_read8(struct cxl_afu *afu, int cr_idx, u64 offset,
- u8 *out)
-{
- int rc;
- u64 val;
-
- rc = _guest_afu_cr_readXX(1, afu, cr_idx, offset, &val);
- if (!rc)
- *out = (u8) val;
- return rc;
-}
-
-static int guest_afu_cr_read64(struct cxl_afu *afu, int cr_idx, u64 offset,
- u64 *out)
-{
- return _guest_afu_cr_readXX(8, afu, cr_idx, offset, out);
-}
-
-static int guest_afu_cr_write32(struct cxl_afu *afu, int cr, u64 off, u32 in)
-{
- /* config record is not writable from guest */
- return -EPERM;
-}
-
-static int guest_afu_cr_write16(struct cxl_afu *afu, int cr, u64 off, u16 in)
-{
- /* config record is not writable from guest */
- return -EPERM;
-}
-
-static int guest_afu_cr_write8(struct cxl_afu *afu, int cr, u64 off, u8 in)
-{
- /* config record is not writable from guest */
- return -EPERM;
-}
-
-static int attach_afu_directed(struct cxl_context *ctx, u64 wed, u64 amr)
-{
- struct cxl_process_element_hcall *elem;
- struct cxl *adapter = ctx->afu->adapter;
- const struct cred *cred;
- u32 pid, idx;
- int rc, r, i;
- u64 mmio_addr, mmio_size;
- __be64 flags = 0;
-
- /* Must be 8 byte aligned and cannot cross a 4096 byte boundary */
- if (!(elem = (struct cxl_process_element_hcall *)
- get_zeroed_page(GFP_KERNEL)))
- return -ENOMEM;
-
- elem->version = cpu_to_be64(CXL_PROCESS_ELEMENT_VERSION);
- if (ctx->kernel) {
- pid = 0;
- flags |= CXL_PE_TRANSLATION_ENABLED;
- flags |= CXL_PE_PRIVILEGED_PROCESS;
- if (mfmsr() & MSR_SF)
- flags |= CXL_PE_64_BIT;
- } else {
- pid = current->pid;
- flags |= CXL_PE_PROBLEM_STATE;
- flags |= CXL_PE_TRANSLATION_ENABLED;
- if (!test_tsk_thread_flag(current, TIF_32BIT))
- flags |= CXL_PE_64_BIT;
- cred = get_current_cred();
- if (uid_eq(cred->euid, GLOBAL_ROOT_UID))
- flags |= CXL_PE_PRIVILEGED_PROCESS;
- put_cred(cred);
- }
- elem->flags = cpu_to_be64(flags);
- elem->common.tid = cpu_to_be32(0); /* Unused */
- elem->common.pid = cpu_to_be32(pid);
- elem->common.csrp = cpu_to_be64(0); /* disable */
- elem->common.u.psl8.aurp0 = cpu_to_be64(0); /* disable */
- elem->common.u.psl8.aurp1 = cpu_to_be64(0); /* disable */
-
- cxl_prefault(ctx, wed);
-
- elem->common.u.psl8.sstp0 = cpu_to_be64(ctx->sstp0);
- elem->common.u.psl8.sstp1 = cpu_to_be64(ctx->sstp1);
-
- /*
- * Ensure we have at least one interrupt allocated to take faults for
- * kernel contexts that may not have allocated any AFU IRQs at all:
- */
- if (ctx->irqs.range[0] == 0) {
- rc = afu_register_irqs(ctx, 0);
- if (rc)
- goto out_free;
- }
-
- for (r = 0; r < CXL_IRQ_RANGES; r++) {
- for (i = 0; i < ctx->irqs.range[r]; i++) {
- if (r == 0 && i == 0) {
- elem->pslVirtualIsn = cpu_to_be32(ctx->irqs.offset[0]);
- } else {
- idx = ctx->irqs.offset[r] + i - adapter->guest->irq_base_offset;
- elem->applicationVirtualIsnBitmap[idx / 8] |= 0x80 >> (idx % 8);
- }
- }
- }
- elem->common.amr = cpu_to_be64(amr);
- elem->common.wed = cpu_to_be64(wed);
-
- disable_afu_irqs(ctx);
-
- rc = cxl_h_attach_process(ctx->afu->guest->handle, elem,
- &ctx->process_token, &mmio_addr, &mmio_size);
- if (rc == H_SUCCESS) {
- if (ctx->master || !ctx->afu->pp_psa) {
- ctx->psn_phys = ctx->afu->psn_phys;
- ctx->psn_size = ctx->afu->adapter->ps_size;
- } else {
- ctx->psn_phys = mmio_addr;
- ctx->psn_size = mmio_size;
- }
- if (ctx->afu->pp_psa && mmio_size &&
- ctx->afu->pp_size == 0) {
- /*
- * There's no property in the device tree to read the
- * pp_size. We only find out at the 1st attach.
- * Compared to bare-metal, it is too late and we
- * should really lock here. However, on powerVM,
- * pp_size is really only used to display in /sys.
- * Being discussed with pHyp for their next release.
- */
- ctx->afu->pp_size = mmio_size;
- }
- /* from PAPR: process element is bytes 4-7 of process token */
- ctx->external_pe = ctx->process_token & 0xFFFFFFFF;
- pr_devel("CXL pe=%i is known as %i for pHyp, mmio_size=%#llx",
- ctx->pe, ctx->external_pe, ctx->psn_size);
- ctx->pe_inserted = true;
- enable_afu_irqs(ctx);
- }
-
-out_free:
- free_page((u64)elem);
- return rc;
-}
-
-static int guest_attach_process(struct cxl_context *ctx, bool kernel, u64 wed, u64 amr)
-{
- pr_devel("in %s\n", __func__);
-
- ctx->kernel = kernel;
- if (ctx->afu->current_mode == CXL_MODE_DIRECTED)
- return attach_afu_directed(ctx, wed, amr);
-
- /* dedicated mode not supported on FW840 */
-
- return -EINVAL;
-}
-
-static int detach_afu_directed(struct cxl_context *ctx)
-{
- if (!ctx->pe_inserted)
- return 0;
- if (cxl_h_detach_process(ctx->afu->guest->handle, ctx->process_token))
- return -1;
- return 0;
-}
-
-static int guest_detach_process(struct cxl_context *ctx)
-{
- pr_devel("in %s\n", __func__);
- trace_cxl_detach(ctx);
-
- if (!cxl_ops->link_ok(ctx->afu->adapter, ctx->afu))
- return -EIO;
-
- if (ctx->afu->current_mode == CXL_MODE_DIRECTED)
- return detach_afu_directed(ctx);
-
- return -EINVAL;
-}
-
-static void guest_release_afu(struct device *dev)
-{
- struct cxl_afu *afu = to_cxl_afu(dev);
-
- pr_devel("%s\n", __func__);
-
- idr_destroy(&afu->contexts_idr);
-
- kfree(afu->guest);
- kfree(afu);
-}
-
-ssize_t cxl_guest_read_afu_vpd(struct cxl_afu *afu, void *buf, size_t len)
-{
- return guest_collect_vpd(NULL, afu, buf, len);
-}
-
-#define ERR_BUFF_MAX_COPY_SIZE PAGE_SIZE
-static ssize_t guest_afu_read_err_buffer(struct cxl_afu *afu, char *buf,
- loff_t off, size_t count)
-{
- void *tbuf = NULL;
- int rc = 0;
-
- tbuf = (void *) get_zeroed_page(GFP_KERNEL);
- if (!tbuf)
- return -ENOMEM;
-
- rc = cxl_h_get_afu_err(afu->guest->handle,
- off & 0x7,
- virt_to_phys(tbuf),
- count);
- if (rc)
- goto err;
-
- if (count > ERR_BUFF_MAX_COPY_SIZE)
- count = ERR_BUFF_MAX_COPY_SIZE - (off & 0x7);
- memcpy(buf, tbuf, count);
-err:
- free_page((u64)tbuf);
-
- return rc;
-}
-
-static int guest_afu_check_and_enable(struct cxl_afu *afu)
-{
- return 0;
-}
-
-static bool guest_support_attributes(const char *attr_name,
- enum cxl_attrs type)
-{
- switch (type) {
- case CXL_ADAPTER_ATTRS:
- if ((strcmp(attr_name, "base_image") == 0) ||
- (strcmp(attr_name, "load_image_on_perst") == 0) ||
- (strcmp(attr_name, "perst_reloads_same_image") == 0) ||
- (strcmp(attr_name, "image_loaded") == 0))
- return false;
- break;
- case CXL_AFU_MASTER_ATTRS:
- if ((strcmp(attr_name, "pp_mmio_off") == 0))
- return false;
- break;
- case CXL_AFU_ATTRS:
- break;
- default:
- break;
- }
-
- return true;
-}
-
-static int activate_afu_directed(struct cxl_afu *afu)
-{
- int rc;
-
- dev_info(&afu->dev, "Activating AFU(%d) directed mode\n", afu->slice);
-
- afu->current_mode = CXL_MODE_DIRECTED;
-
- afu->num_procs = afu->max_procs_virtualised;
-
- if ((rc = cxl_chardev_m_afu_add(afu)))
- return rc;
-
- if ((rc = cxl_sysfs_afu_m_add(afu)))
- goto err;
-
- if ((rc = cxl_chardev_s_afu_add(afu)))
- goto err1;
-
- return 0;
-err1:
- cxl_sysfs_afu_m_remove(afu);
-err:
- cxl_chardev_afu_remove(afu);
- return rc;
-}
-
-static int guest_afu_activate_mode(struct cxl_afu *afu, int mode)
-{
- if (!mode)
- return 0;
- if (!(mode & afu->modes_supported))
- return -EINVAL;
-
- if (mode == CXL_MODE_DIRECTED)
- return activate_afu_directed(afu);
-
- if (mode == CXL_MODE_DEDICATED)
- dev_err(&afu->dev, "Dedicated mode not supported\n");
-
- return -EINVAL;
-}
-
-static int deactivate_afu_directed(struct cxl_afu *afu)
-{
- dev_info(&afu->dev, "Deactivating AFU(%d) directed mode\n", afu->slice);
-
- afu->current_mode = 0;
- afu->num_procs = 0;
-
- cxl_sysfs_afu_m_remove(afu);
- cxl_chardev_afu_remove(afu);
-
- cxl_ops->afu_reset(afu);
-
- return 0;
-}
-
-static int guest_afu_deactivate_mode(struct cxl_afu *afu, int mode)
-{
- if (!mode)
- return 0;
- if (!(mode & afu->modes_supported))
- return -EINVAL;
-
- if (mode == CXL_MODE_DIRECTED)
- return deactivate_afu_directed(afu);
- return 0;
-}
-
-static int guest_afu_reset(struct cxl_afu *afu)
-{
- pr_devel("AFU(%d) reset request\n", afu->slice);
- return cxl_h_reset_afu(afu->guest->handle);
-}
-
-static int guest_map_slice_regs(struct cxl_afu *afu)
-{
- if (!(afu->p2n_mmio = ioremap(afu->guest->p2n_phys, afu->guest->p2n_size))) {
- dev_err(&afu->dev, "Error mapping AFU(%d) MMIO regions\n",
- afu->slice);
- return -ENOMEM;
- }
- return 0;
-}
-
-static void guest_unmap_slice_regs(struct cxl_afu *afu)
-{
- if (afu->p2n_mmio)
- iounmap(afu->p2n_mmio);
-}
-
-static int afu_update_state(struct cxl_afu *afu)
-{
- int rc, cur_state;
-
- rc = afu_read_error_state(afu, &cur_state);
- if (rc)
- return rc;
-
- if (afu->guest->previous_state == cur_state)
- return 0;
-
- pr_devel("AFU(%d) update state to %#x\n", afu->slice, cur_state);
-
- switch (cur_state) {
- case H_STATE_NORMAL:
- afu->guest->previous_state = cur_state;
- break;
-
- case H_STATE_DISABLE:
- pci_error_handlers(afu, CXL_ERROR_DETECTED_EVENT,
- pci_channel_io_frozen);
-
- cxl_context_detach_all(afu);
- if ((rc = cxl_ops->afu_reset(afu)))
- pr_devel("reset hcall failed %d\n", rc);
-
- rc = afu_read_error_state(afu, &cur_state);
- if (!rc && cur_state == H_STATE_NORMAL) {
- pci_error_handlers(afu, CXL_SLOT_RESET_EVENT,
- pci_channel_io_normal);
- pci_error_handlers(afu, CXL_RESUME_EVENT, 0);
- }
- afu->guest->previous_state = 0;
- break;
-
- case H_STATE_TEMP_UNAVAILABLE:
- afu->guest->previous_state = cur_state;
- break;
-
- case H_STATE_PERM_UNAVAILABLE:
- dev_err(&afu->dev, "AFU is in permanent error state\n");
- pci_error_handlers(afu, CXL_ERROR_DETECTED_EVENT,
- pci_channel_io_perm_failure);
- afu->guest->previous_state = cur_state;
- break;
-
- default:
- pr_err("Unexpected AFU(%d) error state: %#x\n",
- afu->slice, cur_state);
- return -EINVAL;
- }
-
- return rc;
-}
-
-static void afu_handle_errstate(struct work_struct *work)
-{
- struct cxl_afu_guest *afu_guest =
- container_of(to_delayed_work(work), struct cxl_afu_guest, work_err);
-
- if (!afu_update_state(afu_guest->parent) &&
- afu_guest->previous_state == H_STATE_PERM_UNAVAILABLE)
- return;
-
- if (afu_guest->handle_err)
- schedule_delayed_work(&afu_guest->work_err,
- msecs_to_jiffies(3000));
-}
-
-static bool guest_link_ok(struct cxl *cxl, struct cxl_afu *afu)
-{
- int state;
-
- if (afu && (!afu_read_error_state(afu, &state))) {
- if (state == H_STATE_NORMAL)
- return true;
- }
-
- return false;
-}
-
-static int afu_properties_look_ok(struct cxl_afu *afu)
-{
- if (afu->pp_irqs < 0) {
- dev_err(&afu->dev, "Unexpected per-process minimum interrupt value\n");
- return -EINVAL;
- }
-
- if (afu->max_procs_virtualised < 1) {
- dev_err(&afu->dev, "Unexpected max number of processes virtualised value\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-int cxl_guest_init_afu(struct cxl *adapter, int slice, struct device_node *afu_np)
-{
- struct cxl_afu *afu;
- bool free = true;
- int rc;
-
- pr_devel("in %s - AFU(%d)\n", __func__, slice);
- if (!(afu = cxl_alloc_afu(adapter, slice)))
- return -ENOMEM;
-
- if (!(afu->guest = kzalloc(sizeof(struct cxl_afu_guest), GFP_KERNEL))) {
- kfree(afu);
- return -ENOMEM;
- }
-
- if ((rc = dev_set_name(&afu->dev, "afu%i.%i",
- adapter->adapter_num,
- slice)))
- goto err1;
-
- adapter->slices++;
-
- if ((rc = cxl_of_read_afu_handle(afu, afu_np)))
- goto err1;
-
- if ((rc = cxl_ops->afu_reset(afu)))
- goto err1;
-
- if ((rc = cxl_of_read_afu_properties(afu, afu_np)))
- goto err1;
-
- if ((rc = afu_properties_look_ok(afu)))
- goto err1;
-
- if ((rc = guest_map_slice_regs(afu)))
- goto err1;
-
- if ((rc = guest_register_serr_irq(afu)))
- goto err2;
-
- /*
- * After we call this function we must not free the afu directly, even
- * if it returns an error!
- */
- if ((rc = cxl_register_afu(afu)))
- goto err_put_dev;
-
- if ((rc = cxl_sysfs_afu_add(afu)))
- goto err_del_dev;
-
- /*
- * pHyp doesn't expose the programming models supported by the
- * AFU. pHyp currently only supports directed mode. If it adds
- * dedicated mode later, this version of cxl has no way to
- * detect it. So we'll initialize the driver, but the first
- * attach will fail.
- * Being discussed with pHyp to do better (likely new property)
- */
- if (afu->max_procs_virtualised == 1)
- afu->modes_supported = CXL_MODE_DEDICATED;
- else
- afu->modes_supported = CXL_MODE_DIRECTED;
-
- if ((rc = cxl_afu_select_best_mode(afu)))
- goto err_remove_sysfs;
-
- adapter->afu[afu->slice] = afu;
-
- afu->enabled = true;
-
- /*
- * wake up the cpu periodically to check the state
- * of the AFU using "afu" stored in the guest structure.
- */
- afu->guest->parent = afu;
- afu->guest->handle_err = true;
- INIT_DELAYED_WORK(&afu->guest->work_err, afu_handle_errstate);
- schedule_delayed_work(&afu->guest->work_err, msecs_to_jiffies(1000));
-
- if ((rc = cxl_pci_vphb_add(afu)))
- dev_info(&afu->dev, "Can't register vPHB\n");
-
- return 0;
-
-err_remove_sysfs:
- cxl_sysfs_afu_remove(afu);
-err_del_dev:
- device_del(&afu->dev);
-err_put_dev:
- put_device(&afu->dev);
- free = false;
- guest_release_serr_irq(afu);
-err2:
- guest_unmap_slice_regs(afu);
-err1:
- if (free) {
- kfree(afu->guest);
- kfree(afu);
- }
- return rc;
-}
-
-void cxl_guest_remove_afu(struct cxl_afu *afu)
-{
- if (!afu)
- return;
-
- /* flush and stop pending job */
- afu->guest->handle_err = false;
- flush_delayed_work(&afu->guest->work_err);
-
- cxl_pci_vphb_remove(afu);
- cxl_sysfs_afu_remove(afu);
-
- spin_lock(&afu->adapter->afu_list_lock);
- afu->adapter->afu[afu->slice] = NULL;
- spin_unlock(&afu->adapter->afu_list_lock);
-
- cxl_context_detach_all(afu);
- cxl_ops->afu_deactivate_mode(afu, afu->current_mode);
- guest_release_serr_irq(afu);
- guest_unmap_slice_regs(afu);
-
- device_unregister(&afu->dev);
-}
-
-static void free_adapter(struct cxl *adapter)
-{
- struct irq_avail *cur;
- int i;
-
- if (adapter->guest) {
- if (adapter->guest->irq_avail) {
- for (i = 0; i < adapter->guest->irq_nranges; i++) {
- cur = &adapter->guest->irq_avail[i];
- bitmap_free(cur->bitmap);
- }
- kfree(adapter->guest->irq_avail);
- }
- kfree(adapter->guest->status);
- kfree(adapter->guest);
- }
- cxl_remove_adapter_nr(adapter);
- kfree(adapter);
-}
-
-static int properties_look_ok(struct cxl *adapter)
-{
- /* The absence of this property means that the operational
- * status is unknown or okay
- */
- if (strlen(adapter->guest->status) &&
- strcmp(adapter->guest->status, "okay")) {
- pr_err("ABORTING:Bad operational status of the device\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-ssize_t cxl_guest_read_adapter_vpd(struct cxl *adapter, void *buf, size_t len)
-{
- return guest_collect_vpd(adapter, NULL, buf, len);
-}
-
-void cxl_guest_remove_adapter(struct cxl *adapter)
-{
- pr_devel("in %s\n", __func__);
-
- cxl_sysfs_adapter_remove(adapter);
-
- cxl_guest_remove_chardev(adapter);
- device_unregister(&adapter->dev);
-}
-
-static void release_adapter(struct device *dev)
-{
- free_adapter(to_cxl_adapter(dev));
-}
-
-struct cxl *cxl_guest_init_adapter(struct device_node *np, struct platform_device *pdev)
-{
- struct cxl *adapter;
- bool free = true;
- int rc;
-
- if (!(adapter = cxl_alloc_adapter()))
- return ERR_PTR(-ENOMEM);
-
- if (!(adapter->guest = kzalloc(sizeof(struct cxl_guest), GFP_KERNEL))) {
- free_adapter(adapter);
- return ERR_PTR(-ENOMEM);
- }
-
- adapter->slices = 0;
- adapter->guest->pdev = pdev;
- adapter->dev.parent = &pdev->dev;
- adapter->dev.release = release_adapter;
- dev_set_drvdata(&pdev->dev, adapter);
-
- /*
- * Hypervisor controls PSL timebase initialization (p1 register).
- * On FW840, PSL is initialized.
- */
- adapter->psl_timebase_synced = true;
-
- if ((rc = cxl_of_read_adapter_handle(adapter, np)))
- goto err1;
-
- if ((rc = cxl_of_read_adapter_properties(adapter, np)))
- goto err1;
-
- if ((rc = properties_look_ok(adapter)))
- goto err1;
-
- if ((rc = cxl_guest_add_chardev(adapter)))
- goto err1;
-
- /*
- * After we call this function we must not free the adapter directly,
- * even if it returns an error!
- */
- if ((rc = cxl_register_adapter(adapter)))
- goto err_put_dev;
-
- if ((rc = cxl_sysfs_adapter_add(adapter)))
- goto err_del_dev;
-
- /* release the context lock as the adapter is configured */
- cxl_adapter_context_unlock(adapter);
-
- return adapter;
-
-err_del_dev:
- device_del(&adapter->dev);
-err_put_dev:
- put_device(&adapter->dev);
- free = false;
- cxl_guest_remove_chardev(adapter);
-err1:
- if (free)
- free_adapter(adapter);
- return ERR_PTR(rc);
-}
-
-void cxl_guest_reload_module(struct cxl *adapter)
-{
- struct platform_device *pdev;
-
- pdev = adapter->guest->pdev;
- cxl_guest_remove_adapter(adapter);
-
- cxl_of_probe(pdev);
-}
-
-const struct cxl_backend_ops cxl_guest_ops = {
- .module = THIS_MODULE,
- .adapter_reset = guest_reset,
- .alloc_one_irq = guest_alloc_one_irq,
- .release_one_irq = guest_release_one_irq,
- .alloc_irq_ranges = guest_alloc_irq_ranges,
- .release_irq_ranges = guest_release_irq_ranges,
- .setup_irq = NULL,
- .handle_psl_slice_error = guest_handle_psl_slice_error,
- .psl_interrupt = guest_psl_irq,
- .ack_irq = guest_ack_irq,
- .attach_process = guest_attach_process,
- .detach_process = guest_detach_process,
- .update_ivtes = NULL,
- .support_attributes = guest_support_attributes,
- .link_ok = guest_link_ok,
- .release_afu = guest_release_afu,
- .afu_read_err_buffer = guest_afu_read_err_buffer,
- .afu_check_and_enable = guest_afu_check_and_enable,
- .afu_activate_mode = guest_afu_activate_mode,
- .afu_deactivate_mode = guest_afu_deactivate_mode,
- .afu_reset = guest_afu_reset,
- .afu_cr_read8 = guest_afu_cr_read8,
- .afu_cr_read16 = guest_afu_cr_read16,
- .afu_cr_read32 = guest_afu_cr_read32,
- .afu_cr_read64 = guest_afu_cr_read64,
- .afu_cr_write8 = guest_afu_cr_write8,
- .afu_cr_write16 = guest_afu_cr_write16,
- .afu_cr_write32 = guest_afu_cr_write32,
- .read_adapter_vpd = cxl_guest_read_adapter_vpd,
-};
diff --git a/drivers/misc/cxl/hcalls.c b/drivers/misc/cxl/hcalls.c
deleted file mode 100644
index aba5e20eeb1f..000000000000
--- a/drivers/misc/cxl/hcalls.c
+++ /dev/null
@@ -1,643 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright 2015 IBM Corp.
- */
-
-
-#include <linux/compiler.h>
-#include <linux/types.h>
-#include <linux/delay.h>
-#include <asm/byteorder.h>
-#include "hcalls.h"
-#include "trace.h"
-
-#define CXL_HCALL_TIMEOUT 60000
-#define CXL_HCALL_TIMEOUT_DOWNLOAD 120000
-
-#define H_ATTACH_CA_PROCESS 0x344
-#define H_CONTROL_CA_FUNCTION 0x348
-#define H_DETACH_CA_PROCESS 0x34C
-#define H_COLLECT_CA_INT_INFO 0x350
-#define H_CONTROL_CA_FAULTS 0x354
-#define H_DOWNLOAD_CA_FUNCTION 0x35C
-#define H_DOWNLOAD_CA_FACILITY 0x364
-#define H_CONTROL_CA_FACILITY 0x368
-
-#define H_CONTROL_CA_FUNCTION_RESET 1 /* perform a reset */
-#define H_CONTROL_CA_FUNCTION_SUSPEND_PROCESS 2 /* suspend a process from being executed */
-#define H_CONTROL_CA_FUNCTION_RESUME_PROCESS 3 /* resume a process to be executed */
-#define H_CONTROL_CA_FUNCTION_READ_ERR_STATE 4 /* read the error state */
-#define H_CONTROL_CA_FUNCTION_GET_AFU_ERR 5 /* collect the AFU error buffer */
-#define H_CONTROL_CA_FUNCTION_GET_CONFIG 6 /* collect configuration record */
-#define H_CONTROL_CA_FUNCTION_GET_DOWNLOAD_STATE 7 /* query to return download status */
-#define H_CONTROL_CA_FUNCTION_TERMINATE_PROCESS 8 /* terminate the process before completion */
-#define H_CONTROL_CA_FUNCTION_COLLECT_VPD 9 /* collect VPD */
-#define H_CONTROL_CA_FUNCTION_GET_FUNCTION_ERR_INT 11 /* read the function-wide error data based on an interrupt */
-#define H_CONTROL_CA_FUNCTION_ACK_FUNCTION_ERR_INT 12 /* acknowledge function-wide error data based on an interrupt */
-#define H_CONTROL_CA_FUNCTION_GET_ERROR_LOG 13 /* retrieve the Platform Log ID (PLID) of an error log */
-
-#define H_CONTROL_CA_FAULTS_RESPOND_PSL 1
-#define H_CONTROL_CA_FAULTS_RESPOND_AFU 2
-
-#define H_CONTROL_CA_FACILITY_RESET 1 /* perform a reset */
-#define H_CONTROL_CA_FACILITY_COLLECT_VPD 2 /* collect VPD */
-
-#define H_DOWNLOAD_CA_FACILITY_DOWNLOAD 1 /* download adapter image */
-#define H_DOWNLOAD_CA_FACILITY_VALIDATE 2 /* validate adapter image */
-
-
-#define _CXL_LOOP_HCALL(call, rc, retbuf, fn, ...) \
- { \
- unsigned int delay, total_delay = 0; \
- u64 token = 0; \
- \
- memset(retbuf, 0, sizeof(retbuf)); \
- while (1) { \
- rc = call(fn, retbuf, __VA_ARGS__, token); \
- token = retbuf[0]; \
- if (rc != H_BUSY && !H_IS_LONG_BUSY(rc)) \
- break; \
- \
- if (rc == H_BUSY) \
- delay = 10; \
- else \
- delay = get_longbusy_msecs(rc); \
- \
- total_delay += delay; \
- if (total_delay > CXL_HCALL_TIMEOUT) { \
- WARN(1, "Warning: Giving up waiting for CXL hcall " \
- "%#x after %u msec\n", fn, total_delay); \
- rc = H_BUSY; \
- break; \
- } \
- msleep(delay); \
- } \
- }
-#define CXL_H_WAIT_UNTIL_DONE(...) _CXL_LOOP_HCALL(plpar_hcall, __VA_ARGS__)
-#define CXL_H9_WAIT_UNTIL_DONE(...) _CXL_LOOP_HCALL(plpar_hcall9, __VA_ARGS__)
-
-#define _PRINT_MSG(rc, format, ...) \
- { \
- if ((rc != H_SUCCESS) && (rc != H_CONTINUE)) \
- pr_err(format, __VA_ARGS__); \
- else \
- pr_devel(format, __VA_ARGS__); \
- } \
-
-
-static char *afu_op_names[] = {
- "UNKNOWN_OP", /* 0 undefined */
- "RESET", /* 1 */
- "SUSPEND_PROCESS", /* 2 */
- "RESUME_PROCESS", /* 3 */
- "READ_ERR_STATE", /* 4 */
- "GET_AFU_ERR", /* 5 */
- "GET_CONFIG", /* 6 */
- "GET_DOWNLOAD_STATE", /* 7 */
- "TERMINATE_PROCESS", /* 8 */
- "COLLECT_VPD", /* 9 */
- "UNKNOWN_OP", /* 10 undefined */
- "GET_FUNCTION_ERR_INT", /* 11 */
- "ACK_FUNCTION_ERR_INT", /* 12 */
- "GET_ERROR_LOG", /* 13 */
-};
-
-static char *control_adapter_op_names[] = {
- "UNKNOWN_OP", /* 0 undefined */
- "RESET", /* 1 */
- "COLLECT_VPD", /* 2 */
-};
-
-static char *download_op_names[] = {
- "UNKNOWN_OP", /* 0 undefined */
- "DOWNLOAD", /* 1 */
- "VALIDATE", /* 2 */
-};
-
-static char *op_str(unsigned int op, char *name_array[], int array_len)
-{
- if (op >= array_len)
- return "UNKNOWN_OP";
- return name_array[op];
-}
-
-#define OP_STR(op, name_array) op_str(op, name_array, ARRAY_SIZE(name_array))
-
-#define OP_STR_AFU(op) OP_STR(op, afu_op_names)
-#define OP_STR_CONTROL_ADAPTER(op) OP_STR(op, control_adapter_op_names)
-#define OP_STR_DOWNLOAD_ADAPTER(op) OP_STR(op, download_op_names)
-
-
-long cxl_h_attach_process(u64 unit_address,
- struct cxl_process_element_hcall *element,
- u64 *process_token, u64 *mmio_addr, u64 *mmio_size)
-{
- unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
- long rc;
-
- CXL_H_WAIT_UNTIL_DONE(rc, retbuf, H_ATTACH_CA_PROCESS, unit_address, virt_to_phys(element));
- _PRINT_MSG(rc, "cxl_h_attach_process(%#.16llx, %#.16lx): %li\n",
- unit_address, virt_to_phys(element), rc);
- trace_cxl_hcall_attach(unit_address, virt_to_phys(element), retbuf[0], retbuf[1], retbuf[2], rc);
-
- pr_devel("token: 0x%.8lx mmio_addr: 0x%lx mmio_size: 0x%lx\nProcess Element Structure:\n",
- retbuf[0], retbuf[1], retbuf[2]);
- cxl_dump_debug_buffer(element, sizeof(*element));
-
- switch (rc) {
- case H_SUCCESS: /* The process info is attached to the coherent platform function */
- *process_token = retbuf[0];
- if (mmio_addr)
- *mmio_addr = retbuf[1];
- if (mmio_size)
- *mmio_size = retbuf[2];
- return 0;
- case H_PARAMETER: /* An incorrect parameter was supplied. */
- case H_FUNCTION: /* The function is not supported. */
- return -EINVAL;
- case H_AUTHORITY: /* The partition does not have authority to perform this hcall */
- case H_RESOURCE: /* The coherent platform function does not have enough additional resource to attach the process */
- case H_HARDWARE: /* A hardware event prevented the attach operation */
- case H_STATE: /* The coherent platform function is not in a valid state */
- case H_BUSY:
- return -EBUSY;
- default:
- WARN(1, "Unexpected return code: %lx", rc);
- return -EINVAL;
- }
-}
-
-/*
- * cxl_h_detach_process - Detach a process element from a coherent
- * platform function.
- */
-long cxl_h_detach_process(u64 unit_address, u64 process_token)
-{
- unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
- long rc;
-
- CXL_H_WAIT_UNTIL_DONE(rc, retbuf, H_DETACH_CA_PROCESS, unit_address, process_token);
- _PRINT_MSG(rc, "cxl_h_detach_process(%#.16llx, 0x%.8llx): %li\n", unit_address, process_token, rc);
- trace_cxl_hcall_detach(unit_address, process_token, rc);
-
- switch (rc) {
- case H_SUCCESS: /* The process was detached from the coherent platform function */
- return 0;
- case H_PARAMETER: /* An incorrect parameter was supplied. */
- return -EINVAL;
- case H_AUTHORITY: /* The partition does not have authority to perform this hcall */
- case H_RESOURCE: /* The function has page table mappings for MMIO */
- case H_HARDWARE: /* A hardware event prevented the detach operation */
- case H_STATE: /* The coherent platform function is not in a valid state */
- case H_BUSY:
- return -EBUSY;
- default:
- WARN(1, "Unexpected return code: %lx", rc);
- return -EINVAL;
- }
-}
-
-/*
- * cxl_h_control_function - This H_CONTROL_CA_FUNCTION hypervisor call allows
- * the partition to manipulate or query
- * certain coherent platform function behaviors.
- */
-static long cxl_h_control_function(u64 unit_address, u64 op,
- u64 p1, u64 p2, u64 p3, u64 p4, u64 *out)
-{
- unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
- long rc;
-
- CXL_H9_WAIT_UNTIL_DONE(rc, retbuf, H_CONTROL_CA_FUNCTION, unit_address, op, p1, p2, p3, p4);
- _PRINT_MSG(rc, "cxl_h_control_function(%#.16llx, %s(%#llx, %#llx, %#llx, %#llx, R4: %#lx)): %li\n",
- unit_address, OP_STR_AFU(op), p1, p2, p3, p4, retbuf[0], rc);
- trace_cxl_hcall_control_function(unit_address, OP_STR_AFU(op), p1, p2, p3, p4, retbuf[0], rc);
-
- switch (rc) {
- case H_SUCCESS: /* The operation is completed for the coherent platform function */
- if ((op == H_CONTROL_CA_FUNCTION_GET_FUNCTION_ERR_INT ||
- op == H_CONTROL_CA_FUNCTION_READ_ERR_STATE ||
- op == H_CONTROL_CA_FUNCTION_COLLECT_VPD))
- *out = retbuf[0];
- return 0;
- case H_PARAMETER: /* An incorrect parameter was supplied. */
- case H_FUNCTION: /* The function is not supported. */
- case H_NOT_FOUND: /* The operation supplied was not valid */
- case H_NOT_AVAILABLE: /* The operation cannot be performed because the AFU has not been downloaded */
- case H_SG_LIST: /* An block list entry was invalid */
- return -EINVAL;
- case H_AUTHORITY: /* The partition does not have authority to perform this hcall */
- case H_RESOURCE: /* The function has page table mappings for MMIO */
- case H_HARDWARE: /* A hardware event prevented the attach operation */
- case H_STATE: /* The coherent platform function is not in a valid state */
- case H_BUSY:
- return -EBUSY;
- default:
- WARN(1, "Unexpected return code: %lx", rc);
- return -EINVAL;
- }
-}
-
-/*
- * cxl_h_reset_afu - Perform a reset to the coherent platform function.
- */
-long cxl_h_reset_afu(u64 unit_address)
-{
- return cxl_h_control_function(unit_address,
- H_CONTROL_CA_FUNCTION_RESET,
- 0, 0, 0, 0,
- NULL);
-}
-
-/*
- * cxl_h_suspend_process - Suspend a process from being executed
- * Parameter1 = process-token as returned from H_ATTACH_CA_PROCESS when
- * process was attached.
- */
-long cxl_h_suspend_process(u64 unit_address, u64 process_token)
-{
- return cxl_h_control_function(unit_address,
- H_CONTROL_CA_FUNCTION_SUSPEND_PROCESS,
- process_token, 0, 0, 0,
- NULL);
-}
-
-/*
- * cxl_h_resume_process - Resume a process to be executed
- * Parameter1 = process-token as returned from H_ATTACH_CA_PROCESS when
- * process was attached.
- */
-long cxl_h_resume_process(u64 unit_address, u64 process_token)
-{
- return cxl_h_control_function(unit_address,
- H_CONTROL_CA_FUNCTION_RESUME_PROCESS,
- process_token, 0, 0, 0,
- NULL);
-}
-
-/*
- * cxl_h_read_error_state - Checks the error state of the coherent
- * platform function.
- * R4 contains the error state
- */
-long cxl_h_read_error_state(u64 unit_address, u64 *state)
-{
- return cxl_h_control_function(unit_address,
- H_CONTROL_CA_FUNCTION_READ_ERR_STATE,
- 0, 0, 0, 0,
- state);
-}
-
-/*
- * cxl_h_get_afu_err - collect the AFU error buffer
- * Parameter1 = byte offset into error buffer to retrieve, valid values
- * are between 0 and (ibm,error-buffer-size - 1)
- * Parameter2 = 4K aligned real address of error buffer, to be filled in
- * Parameter3 = length of error buffer, valid values are 4K or less
- */
-long cxl_h_get_afu_err(u64 unit_address, u64 offset,
- u64 buf_address, u64 len)
-{
- return cxl_h_control_function(unit_address,
- H_CONTROL_CA_FUNCTION_GET_AFU_ERR,
- offset, buf_address, len, 0,
- NULL);
-}
-
-/*
- * cxl_h_get_config - collect configuration record for the
- * coherent platform function
- * Parameter1 = # of configuration record to retrieve, valid values are
- * between 0 and (ibm,#config-records - 1)
- * Parameter2 = byte offset into configuration record to retrieve,
- * valid values are between 0 and (ibm,config-record-size - 1)
- * Parameter3 = 4K aligned real address of configuration record buffer,
- * to be filled in
- * Parameter4 = length of configuration buffer, valid values are 4K or less
- */
-long cxl_h_get_config(u64 unit_address, u64 cr_num, u64 offset,
- u64 buf_address, u64 len)
-{
- return cxl_h_control_function(unit_address,
- H_CONTROL_CA_FUNCTION_GET_CONFIG,
- cr_num, offset, buf_address, len,
- NULL);
-}
-
-/*
- * cxl_h_terminate_process - Terminate the process before completion
- * Parameter1 = process-token as returned from H_ATTACH_CA_PROCESS when
- * process was attached.
- */
-long cxl_h_terminate_process(u64 unit_address, u64 process_token)
-{
- return cxl_h_control_function(unit_address,
- H_CONTROL_CA_FUNCTION_TERMINATE_PROCESS,
- process_token, 0, 0, 0,
- NULL);
-}
-
-/*
- * cxl_h_collect_vpd - Collect VPD for the coherent platform function.
- * Parameter1 = # of VPD record to retrieve, valid values are between 0
- * and (ibm,#config-records - 1).
- * Parameter2 = 4K naturally aligned real buffer containing block
- * list entries
- * Parameter3 = number of block list entries in the block list, valid
- * values are between 0 and 256
- */
-long cxl_h_collect_vpd(u64 unit_address, u64 record, u64 list_address,
- u64 num, u64 *out)
-{
- return cxl_h_control_function(unit_address,
- H_CONTROL_CA_FUNCTION_COLLECT_VPD,
- record, list_address, num, 0,
- out);
-}
-
-/*
- * cxl_h_get_fn_error_interrupt - Read the function-wide error data based on an interrupt
- */
-long cxl_h_get_fn_error_interrupt(u64 unit_address, u64 *reg)
-{
- return cxl_h_control_function(unit_address,
- H_CONTROL_CA_FUNCTION_GET_FUNCTION_ERR_INT,
- 0, 0, 0, 0, reg);
-}
-
-/*
- * cxl_h_ack_fn_error_interrupt - Acknowledge function-wide error data
- * based on an interrupt
- * Parameter1 = value to write to the function-wide error interrupt register
- */
-long cxl_h_ack_fn_error_interrupt(u64 unit_address, u64 value)
-{
- return cxl_h_control_function(unit_address,
- H_CONTROL_CA_FUNCTION_ACK_FUNCTION_ERR_INT,
- value, 0, 0, 0,
- NULL);
-}
-
-/*
- * cxl_h_get_error_log - Retrieve the Platform Log ID (PLID) of
- * an error log
- */
-long cxl_h_get_error_log(u64 unit_address, u64 value)
-{
- return cxl_h_control_function(unit_address,
- H_CONTROL_CA_FUNCTION_GET_ERROR_LOG,
- 0, 0, 0, 0,
- NULL);
-}
-
-/*
- * cxl_h_collect_int_info - Collect interrupt info about a coherent
- * platform function after an interrupt occurred.
- */
-long cxl_h_collect_int_info(u64 unit_address, u64 process_token,
- struct cxl_irq_info *info)
-{
- long rc;
-
- BUG_ON(sizeof(*info) != sizeof(unsigned long[PLPAR_HCALL9_BUFSIZE]));
-
- rc = plpar_hcall9(H_COLLECT_CA_INT_INFO, (unsigned long *) info,
- unit_address, process_token);
- _PRINT_MSG(rc, "cxl_h_collect_int_info(%#.16llx, 0x%llx): %li\n",
- unit_address, process_token, rc);
- trace_cxl_hcall_collect_int_info(unit_address, process_token, rc);
-
- switch (rc) {
- case H_SUCCESS: /* The interrupt info is returned in return registers. */
- pr_devel("dsisr:%#llx, dar:%#llx, dsr:%#llx, pid_tid:%#llx, afu_err:%#llx, errstat:%#llx\n",
- info->dsisr, info->dar, info->dsr, info->reserved,
- info->afu_err, info->errstat);
- return 0;
- case H_PARAMETER: /* An incorrect parameter was supplied. */
- return -EINVAL;
- case H_AUTHORITY: /* The partition does not have authority to perform this hcall. */
- case H_HARDWARE: /* A hardware event prevented the collection of the interrupt info.*/
- case H_STATE: /* The coherent platform function is not in a valid state to collect interrupt info. */
- return -EBUSY;
- default:
- WARN(1, "Unexpected return code: %lx", rc);
- return -EINVAL;
- }
-}
-
-/*
- * cxl_h_control_faults - Control the operation of a coherent platform
- * function after a fault occurs.
- *
- * Parameters
- * control-mask: value to control the faults
- * looks like PSL_TFC_An shifted >> 32
- * reset-mask: mask to control reset of function faults
- * Set reset_mask = 1 to reset PSL errors
- */
-long cxl_h_control_faults(u64 unit_address, u64 process_token,
- u64 control_mask, u64 reset_mask)
-{
- unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
- long rc;
-
- memset(retbuf, 0, sizeof(retbuf));
-
- rc = plpar_hcall(H_CONTROL_CA_FAULTS, retbuf, unit_address,
- H_CONTROL_CA_FAULTS_RESPOND_PSL, process_token,
- control_mask, reset_mask);
- _PRINT_MSG(rc, "cxl_h_control_faults(%#.16llx, 0x%llx, %#llx, %#llx): %li (%#lx)\n",
- unit_address, process_token, control_mask, reset_mask,
- rc, retbuf[0]);
- trace_cxl_hcall_control_faults(unit_address, process_token,
- control_mask, reset_mask, retbuf[0], rc);
-
- switch (rc) {
- case H_SUCCESS: /* Faults were successfully controlled for the function. */
- return 0;
- case H_PARAMETER: /* An incorrect parameter was supplied. */
- return -EINVAL;
- case H_HARDWARE: /* A hardware event prevented the control of faults. */
- case H_STATE: /* The function was in an invalid state. */
- case H_AUTHORITY: /* The partition does not have authority to perform this hcall; the coherent platform facilities may need to be licensed. */
- return -EBUSY;
- case H_FUNCTION: /* The function is not supported */
- case H_NOT_FOUND: /* The operation supplied was not valid */
- return -EINVAL;
- default:
- WARN(1, "Unexpected return code: %lx", rc);
- return -EINVAL;
- }
-}
-
-/*
- * cxl_h_control_facility - This H_CONTROL_CA_FACILITY hypervisor call
- * allows the partition to manipulate or query
- * certain coherent platform facility behaviors.
- */
-static long cxl_h_control_facility(u64 unit_address, u64 op,
- u64 p1, u64 p2, u64 p3, u64 p4, u64 *out)
-{
- unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
- long rc;
-
- CXL_H9_WAIT_UNTIL_DONE(rc, retbuf, H_CONTROL_CA_FACILITY, unit_address, op, p1, p2, p3, p4);
- _PRINT_MSG(rc, "cxl_h_control_facility(%#.16llx, %s(%#llx, %#llx, %#llx, %#llx, R4: %#lx)): %li\n",
- unit_address, OP_STR_CONTROL_ADAPTER(op), p1, p2, p3, p4, retbuf[0], rc);
- trace_cxl_hcall_control_facility(unit_address, OP_STR_CONTROL_ADAPTER(op), p1, p2, p3, p4, retbuf[0], rc);
-
- switch (rc) {
- case H_SUCCESS: /* The operation is completed for the coherent platform facility */
- if (op == H_CONTROL_CA_FACILITY_COLLECT_VPD)
- *out = retbuf[0];
- return 0;
- case H_PARAMETER: /* An incorrect parameter was supplied. */
- case H_FUNCTION: /* The function is not supported. */
- case H_NOT_FOUND: /* The operation supplied was not valid */
- case H_NOT_AVAILABLE: /* The operation cannot be performed because the AFU has not been downloaded */
- case H_SG_LIST: /* An block list entry was invalid */
- return -EINVAL;
- case H_AUTHORITY: /* The partition does not have authority to perform this hcall */
- case H_RESOURCE: /* The function has page table mappings for MMIO */
- case H_HARDWARE: /* A hardware event prevented the attach operation */
- case H_STATE: /* The coherent platform facility is not in a valid state */
- case H_BUSY:
- return -EBUSY;
- default:
- WARN(1, "Unexpected return code: %lx", rc);
- return -EINVAL;
- }
-}
-
-/*
- * cxl_h_reset_adapter - Perform a reset to the coherent platform facility.
- */
-long cxl_h_reset_adapter(u64 unit_address)
-{
- return cxl_h_control_facility(unit_address,
- H_CONTROL_CA_FACILITY_RESET,
- 0, 0, 0, 0,
- NULL);
-}
-
-/*
- * cxl_h_collect_vpd - Collect VPD for the coherent platform function.
- * Parameter1 = 4K naturally aligned real buffer containing block
- * list entries
- * Parameter2 = number of block list entries in the block list, valid
- * values are between 0 and 256
- */
-long cxl_h_collect_vpd_adapter(u64 unit_address, u64 list_address,
- u64 num, u64 *out)
-{
- return cxl_h_control_facility(unit_address,
- H_CONTROL_CA_FACILITY_COLLECT_VPD,
- list_address, num, 0, 0,
- out);
-}
-
-/*
- * cxl_h_download_facility - This H_DOWNLOAD_CA_FACILITY
- * hypervisor call provide platform support for
- * downloading a base adapter image to the coherent
- * platform facility, and for validating the entire
- * image after the download.
- * Parameters
- * op: operation to perform to the coherent platform function
- * Download: operation = 1, the base image in the coherent platform
- * facility is first erased, and then
- * programmed using the image supplied
- * in the scatter/gather list.
- * Validate: operation = 2, the base image in the coherent platform
- * facility is compared with the image
- * supplied in the scatter/gather list.
- * list_address: 4K naturally aligned real buffer containing
- * scatter/gather list entries.
- * num: number of block list entries in the scatter/gather list.
- */
-static long cxl_h_download_facility(u64 unit_address, u64 op,
- u64 list_address, u64 num,
- u64 *out)
-{
- unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
- unsigned int delay, total_delay = 0;
- u64 token = 0;
- long rc;
-
- if (*out != 0)
- token = *out;
-
- memset(retbuf, 0, sizeof(retbuf));
- while (1) {
- rc = plpar_hcall(H_DOWNLOAD_CA_FACILITY, retbuf,
- unit_address, op, list_address, num,
- token);
- token = retbuf[0];
- if (rc != H_BUSY && !H_IS_LONG_BUSY(rc))
- break;
-
- if (rc != H_BUSY) {
- delay = get_longbusy_msecs(rc);
- total_delay += delay;
- if (total_delay > CXL_HCALL_TIMEOUT_DOWNLOAD) {
- WARN(1, "Warning: Giving up waiting for CXL hcall "
- "%#x after %u msec\n",
- H_DOWNLOAD_CA_FACILITY, total_delay);
- rc = H_BUSY;
- break;
- }
- msleep(delay);
- }
- }
- _PRINT_MSG(rc, "cxl_h_download_facility(%#.16llx, %s(%#llx, %#llx), %#lx): %li\n",
- unit_address, OP_STR_DOWNLOAD_ADAPTER(op), list_address, num, retbuf[0], rc);
- trace_cxl_hcall_download_facility(unit_address, OP_STR_DOWNLOAD_ADAPTER(op), list_address, num, retbuf[0], rc);
-
- switch (rc) {
- case H_SUCCESS: /* The operation is completed for the coherent platform facility */
- return 0;
- case H_PARAMETER: /* An incorrect parameter was supplied */
- case H_FUNCTION: /* The function is not supported. */
- case H_SG_LIST: /* An block list entry was invalid */
- case H_BAD_DATA: /* Image verification failed */
- return -EINVAL;
- case H_AUTHORITY: /* The partition does not have authority to perform this hcall */
- case H_RESOURCE: /* The function has page table mappings for MMIO */
- case H_HARDWARE: /* A hardware event prevented the attach operation */
- case H_STATE: /* The coherent platform facility is not in a valid state */
- case H_BUSY:
- return -EBUSY;
- case H_CONTINUE:
- *out = retbuf[0];
- return 1; /* More data is needed for the complete image */
- default:
- WARN(1, "Unexpected return code: %lx", rc);
- return -EINVAL;
- }
-}
-
-/*
- * cxl_h_download_adapter_image - Download the base image to the coherent
- * platform facility.
- */
-long cxl_h_download_adapter_image(u64 unit_address,
- u64 list_address, u64 num,
- u64 *out)
-{
- return cxl_h_download_facility(unit_address,
- H_DOWNLOAD_CA_FACILITY_DOWNLOAD,
- list_address, num, out);
-}
-
-/*
- * cxl_h_validate_adapter_image - Validate the base image in the coherent
- * platform facility.
- */
-long cxl_h_validate_adapter_image(u64 unit_address,
- u64 list_address, u64 num,
- u64 *out)
-{
- return cxl_h_download_facility(unit_address,
- H_DOWNLOAD_CA_FACILITY_VALIDATE,
- list_address, num, out);
-}
diff --git a/drivers/misc/cxl/hcalls.h b/drivers/misc/cxl/hcalls.h
deleted file mode 100644
index d200465dc6ac..000000000000
--- a/drivers/misc/cxl/hcalls.h
+++ /dev/null
@@ -1,200 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright 2015 IBM Corp.
- */
-
-#ifndef _HCALLS_H
-#define _HCALLS_H
-
-#include <linux/types.h>
-#include <asm/byteorder.h>
-#include <asm/hvcall.h>
-#include "cxl.h"
-
-#define SG_BUFFER_SIZE 4096
-#define SG_MAX_ENTRIES 256
-
-struct sg_list {
- u64 phys_addr;
- u64 len;
-};
-
-/*
- * This is straight out of PAPR, but replacing some of the compound fields with
- * a single field, where they were identical to the register layout.
- *
- * The 'flags' parameter regroups the various bit-fields
- */
-#define CXL_PE_CSRP_VALID (1ULL << 63)
-#define CXL_PE_PROBLEM_STATE (1ULL << 62)
-#define CXL_PE_SECONDARY_SEGMENT_TBL_SRCH (1ULL << 61)
-#define CXL_PE_TAGS_ACTIVE (1ULL << 60)
-#define CXL_PE_USER_STATE (1ULL << 59)
-#define CXL_PE_TRANSLATION_ENABLED (1ULL << 58)
-#define CXL_PE_64_BIT (1ULL << 57)
-#define CXL_PE_PRIVILEGED_PROCESS (1ULL << 56)
-
-#define CXL_PROCESS_ELEMENT_VERSION 1
-struct cxl_process_element_hcall {
- __be64 version;
- __be64 flags;
- u8 reserved0[12];
- __be32 pslVirtualIsn;
- u8 applicationVirtualIsnBitmap[256];
- u8 reserved1[144];
- struct cxl_process_element_common common;
- u8 reserved4[12];
-} __packed;
-
-#define H_STATE_NORMAL 1
-#define H_STATE_DISABLE 2
-#define H_STATE_TEMP_UNAVAILABLE 3
-#define H_STATE_PERM_UNAVAILABLE 4
-
-/* NOTE: element must be a logical real address, and must be pinned */
-long cxl_h_attach_process(u64 unit_address, struct cxl_process_element_hcall *element,
- u64 *process_token, u64 *mmio_addr, u64 *mmio_size);
-
-/**
- * cxl_h_detach_process - Detach a process element from a coherent
- * platform function.
- */
-long cxl_h_detach_process(u64 unit_address, u64 process_token);
-
-/**
- * cxl_h_reset_afu - Perform a reset to the coherent platform function.
- */
-long cxl_h_reset_afu(u64 unit_address);
-
-/**
- * cxl_h_suspend_process - Suspend a process from being executed
- * Parameter1 = process-token as returned from H_ATTACH_CA_PROCESS when
- * process was attached.
- */
-long cxl_h_suspend_process(u64 unit_address, u64 process_token);
-
-/**
- * cxl_h_resume_process - Resume a process to be executed
- * Parameter1 = process-token as returned from H_ATTACH_CA_PROCESS when
- * process was attached.
- */
-long cxl_h_resume_process(u64 unit_address, u64 process_token);
-
-/**
- * cxl_h_read_error_state - Reads the error state of the coherent
- * platform function.
- * R4 contains the error state
- */
-long cxl_h_read_error_state(u64 unit_address, u64 *state);
-
-/**
- * cxl_h_get_afu_err - collect the AFU error buffer
- * Parameter1 = byte offset into error buffer to retrieve, valid values
- * are between 0 and (ibm,error-buffer-size - 1)
- * Parameter2 = 4K aligned real address of error buffer, to be filled in
- * Parameter3 = length of error buffer, valid values are 4K or less
- */
-long cxl_h_get_afu_err(u64 unit_address, u64 offset, u64 buf_address, u64 len);
-
-/**
- * cxl_h_get_config - collect configuration record for the
- * coherent platform function
- * Parameter1 = # of configuration record to retrieve, valid values are
- * between 0 and (ibm,#config-records - 1)
- * Parameter2 = byte offset into configuration record to retrieve,
- * valid values are between 0 and (ibm,config-record-size - 1)
- * Parameter3 = 4K aligned real address of configuration record buffer,
- * to be filled in
- * Parameter4 = length of configuration buffer, valid values are 4K or less
- */
-long cxl_h_get_config(u64 unit_address, u64 cr_num, u64 offset,
- u64 buf_address, u64 len);
-
-/**
- * cxl_h_terminate_process - Terminate the process before completion
- * Parameter1 = process-token as returned from H_ATTACH_CA_PROCESS when
- * process was attached.
- */
-long cxl_h_terminate_process(u64 unit_address, u64 process_token);
-
-/**
- * cxl_h_collect_vpd - Collect VPD for the coherent platform function.
- * Parameter1 = # of VPD record to retrieve, valid values are between 0
- * and (ibm,#config-records - 1).
- * Parameter2 = 4K naturally aligned real buffer containing block
- * list entries
- * Parameter3 = number of block list entries in the block list, valid
- * values are between 0 and 256
- */
-long cxl_h_collect_vpd(u64 unit_address, u64 record, u64 list_address,
- u64 num, u64 *out);
-
-/**
- * cxl_h_get_fn_error_interrupt - Read the function-wide error data based on an interrupt
- */
-long cxl_h_get_fn_error_interrupt(u64 unit_address, u64 *reg);
-
-/**
- * cxl_h_ack_fn_error_interrupt - Acknowledge function-wide error data
- * based on an interrupt
- * Parameter1 = value to write to the function-wide error interrupt register
- */
-long cxl_h_ack_fn_error_interrupt(u64 unit_address, u64 value);
-
-/**
- * cxl_h_get_error_log - Retrieve the Platform Log ID (PLID) of
- * an error log
- */
-long cxl_h_get_error_log(u64 unit_address, u64 value);
-
-/**
- * cxl_h_collect_int_info - Collect interrupt info about a coherent
- * platform function after an interrupt occurred.
- */
-long cxl_h_collect_int_info(u64 unit_address, u64 process_token,
- struct cxl_irq_info *info);
-
-/**
- * cxl_h_control_faults - Control the operation of a coherent platform
- * function after a fault occurs.
- *
- * Parameters
- * control-mask: value to control the faults
- * looks like PSL_TFC_An shifted >> 32
- * reset-mask: mask to control reset of function faults
- * Set reset_mask = 1 to reset PSL errors
- */
-long cxl_h_control_faults(u64 unit_address, u64 process_token,
- u64 control_mask, u64 reset_mask);
-
-/**
- * cxl_h_reset_adapter - Perform a reset to the coherent platform facility.
- */
-long cxl_h_reset_adapter(u64 unit_address);
-
-/**
- * cxl_h_collect_vpd - Collect VPD for the coherent platform function.
- * Parameter1 = 4K naturally aligned real buffer containing block
- * list entries
- * Parameter2 = number of block list entries in the block list, valid
- * values are between 0 and 256
- */
-long cxl_h_collect_vpd_adapter(u64 unit_address, u64 list_address,
- u64 num, u64 *out);
-
-/**
- * cxl_h_download_adapter_image - Download the base image to the coherent
- * platform facility.
- */
-long cxl_h_download_adapter_image(u64 unit_address,
- u64 list_address, u64 num,
- u64 *out);
-
-/**
- * cxl_h_validate_adapter_image - Validate the base image in the coherent
- * platform facility.
- */
-long cxl_h_validate_adapter_image(u64 unit_address,
- u64 list_address, u64 num,
- u64 *out);
-#endif /* _HCALLS_H */
diff --git a/drivers/misc/cxl/irq.c b/drivers/misc/cxl/irq.c
deleted file mode 100644
index b730e022a48e..000000000000
--- a/drivers/misc/cxl/irq.c
+++ /dev/null
@@ -1,450 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright 2014 IBM Corp.
- */
-
-#include <linux/interrupt.h>
-#include <linux/irqdomain.h>
-#include <linux/workqueue.h>
-#include <linux/sched.h>
-#include <linux/wait.h>
-#include <linux/slab.h>
-#include <linux/pid.h>
-#include <asm/cputable.h>
-#include <misc/cxl-base.h>
-
-#include "cxl.h"
-#include "trace.h"
-
-static int afu_irq_range_start(void)
-{
- if (cpu_has_feature(CPU_FTR_HVMODE))
- return 1;
- return 0;
-}
-
-static irqreturn_t schedule_cxl_fault(struct cxl_context *ctx, u64 dsisr, u64 dar)
-{
- ctx->dsisr = dsisr;
- ctx->dar = dar;
- schedule_work(&ctx->fault_work);
- return IRQ_HANDLED;
-}
-
-irqreturn_t cxl_irq_psl9(int irq, struct cxl_context *ctx, struct cxl_irq_info *irq_info)
-{
- u64 dsisr, dar;
-
- dsisr = irq_info->dsisr;
- dar = irq_info->dar;
-
- trace_cxl_psl9_irq(ctx, irq, dsisr, dar);
-
- pr_devel("CXL interrupt %i for afu pe: %i DSISR: %#llx DAR: %#llx\n", irq, ctx->pe, dsisr, dar);
-
- if (dsisr & CXL_PSL9_DSISR_An_TF) {
- pr_devel("CXL interrupt: Scheduling translation fault handling for later (pe: %i)\n", ctx->pe);
- return schedule_cxl_fault(ctx, dsisr, dar);
- }
-
- if (dsisr & CXL_PSL9_DSISR_An_PE)
- return cxl_ops->handle_psl_slice_error(ctx, dsisr,
- irq_info->errstat);
- if (dsisr & CXL_PSL9_DSISR_An_AE) {
- pr_devel("CXL interrupt: AFU Error 0x%016llx\n", irq_info->afu_err);
-
- if (ctx->pending_afu_err) {
- /*
- * This shouldn't happen - the PSL treats these errors
- * as fatal and will have reset the AFU, so there's not
- * much point buffering multiple AFU errors.
- * OTOH if we DO ever see a storm of these come in it's
- * probably best that we log them somewhere:
- */
- dev_err_ratelimited(&ctx->afu->dev, "CXL AFU Error undelivered to pe %i: 0x%016llx\n",
- ctx->pe, irq_info->afu_err);
- } else {
- spin_lock(&ctx->lock);
- ctx->afu_err = irq_info->afu_err;
- ctx->pending_afu_err = 1;
- spin_unlock(&ctx->lock);
-
- wake_up_all(&ctx->wq);
- }
-
- cxl_ops->ack_irq(ctx, CXL_PSL_TFC_An_A, 0);
- return IRQ_HANDLED;
- }
- if (dsisr & CXL_PSL9_DSISR_An_OC)
- pr_devel("CXL interrupt: OS Context Warning\n");
-
- WARN(1, "Unhandled CXL PSL IRQ\n");
- return IRQ_HANDLED;
-}
-
-irqreturn_t cxl_irq_psl8(int irq, struct cxl_context *ctx, struct cxl_irq_info *irq_info)
-{
- u64 dsisr, dar;
-
- dsisr = irq_info->dsisr;
- dar = irq_info->dar;
-
- trace_cxl_psl_irq(ctx, irq, dsisr, dar);
-
- pr_devel("CXL interrupt %i for afu pe: %i DSISR: %#llx DAR: %#llx\n", irq, ctx->pe, dsisr, dar);
-
- if (dsisr & CXL_PSL_DSISR_An_DS) {
- /*
- * We don't inherently need to sleep to handle this, but we do
- * need to get a ref to the task's mm, which we can't do from
- * irq context without the potential for a deadlock since it
- * takes the task_lock. An alternate option would be to keep a
- * reference to the task's mm the entire time it has cxl open,
- * but to do that we need to solve the issue where we hold a
- * ref to the mm, but the mm can hold a ref to the fd after an
- * mmap preventing anything from being cleaned up.
- */
- pr_devel("Scheduling segment miss handling for later pe: %i\n", ctx->pe);
- return schedule_cxl_fault(ctx, dsisr, dar);
- }
-
- if (dsisr & CXL_PSL_DSISR_An_M)
- pr_devel("CXL interrupt: PTE not found\n");
- if (dsisr & CXL_PSL_DSISR_An_P)
- pr_devel("CXL interrupt: Storage protection violation\n");
- if (dsisr & CXL_PSL_DSISR_An_A)
- pr_devel("CXL interrupt: AFU lock access to write through or cache inhibited storage\n");
- if (dsisr & CXL_PSL_DSISR_An_S)
- pr_devel("CXL interrupt: Access was afu_wr or afu_zero\n");
- if (dsisr & CXL_PSL_DSISR_An_K)
- pr_devel("CXL interrupt: Access not permitted by virtual page class key protection\n");
-
- if (dsisr & CXL_PSL_DSISR_An_DM) {
- /*
- * In some cases we might be able to handle the fault
- * immediately if hash_page would succeed, but we still need
- * the task's mm, which as above we can't get without a lock
- */
- pr_devel("Scheduling page fault handling for later pe: %i\n", ctx->pe);
- return schedule_cxl_fault(ctx, dsisr, dar);
- }
- if (dsisr & CXL_PSL_DSISR_An_ST)
- WARN(1, "CXL interrupt: Segment Table PTE not found\n");
- if (dsisr & CXL_PSL_DSISR_An_UR)
- pr_devel("CXL interrupt: AURP PTE not found\n");
- if (dsisr & CXL_PSL_DSISR_An_PE)
- return cxl_ops->handle_psl_slice_error(ctx, dsisr,
- irq_info->errstat);
- if (dsisr & CXL_PSL_DSISR_An_AE) {
- pr_devel("CXL interrupt: AFU Error 0x%016llx\n", irq_info->afu_err);
-
- if (ctx->pending_afu_err) {
- /*
- * This shouldn't happen - the PSL treats these errors
- * as fatal and will have reset the AFU, so there's not
- * much point buffering multiple AFU errors.
- * OTOH if we DO ever see a storm of these come in it's
- * probably best that we log them somewhere:
- */
- dev_err_ratelimited(&ctx->afu->dev, "CXL AFU Error "
- "undelivered to pe %i: 0x%016llx\n",
- ctx->pe, irq_info->afu_err);
- } else {
- spin_lock(&ctx->lock);
- ctx->afu_err = irq_info->afu_err;
- ctx->pending_afu_err = true;
- spin_unlock(&ctx->lock);
-
- wake_up_all(&ctx->wq);
- }
-
- cxl_ops->ack_irq(ctx, CXL_PSL_TFC_An_A, 0);
- return IRQ_HANDLED;
- }
- if (dsisr & CXL_PSL_DSISR_An_OC)
- pr_devel("CXL interrupt: OS Context Warning\n");
-
- WARN(1, "Unhandled CXL PSL IRQ\n");
- return IRQ_HANDLED;
-}
-
-static irqreturn_t cxl_irq_afu(int irq, void *data)
-{
- struct cxl_context *ctx = data;
- irq_hw_number_t hwirq = irqd_to_hwirq(irq_get_irq_data(irq));
- int irq_off, afu_irq = 0;
- __u16 range;
- int r;
-
- /*
- * Look for the interrupt number.
- * On bare-metal, we know range 0 only contains the PSL
- * interrupt so we could start counting at range 1 and initialize
- * afu_irq at 1.
- * In a guest, range 0 also contains AFU interrupts, so it must
- * be counted for. Therefore we initialize afu_irq at 0 to take into
- * account the PSL interrupt.
- *
- * For code-readability, it just seems easier to go over all
- * the ranges on bare-metal and guest. The end result is the same.
- */
- for (r = 0; r < CXL_IRQ_RANGES; r++) {
- irq_off = hwirq - ctx->irqs.offset[r];
- range = ctx->irqs.range[r];
- if (irq_off >= 0 && irq_off < range) {
- afu_irq += irq_off;
- break;
- }
- afu_irq += range;
- }
- if (unlikely(r >= CXL_IRQ_RANGES)) {
- WARN(1, "Received AFU IRQ out of range for pe %i (virq %i hwirq %lx)\n",
- ctx->pe, irq, hwirq);
- return IRQ_HANDLED;
- }
-
- trace_cxl_afu_irq(ctx, afu_irq, irq, hwirq);
- pr_devel("Received AFU interrupt %i for pe: %i (virq %i hwirq %lx)\n",
- afu_irq, ctx->pe, irq, hwirq);
-
- if (unlikely(!ctx->irq_bitmap)) {
- WARN(1, "Received AFU IRQ for context with no IRQ bitmap\n");
- return IRQ_HANDLED;
- }
- spin_lock(&ctx->lock);
- set_bit(afu_irq - 1, ctx->irq_bitmap);
- ctx->pending_irq = true;
- spin_unlock(&ctx->lock);
-
- wake_up_all(&ctx->wq);
-
- return IRQ_HANDLED;
-}
-
-unsigned int cxl_map_irq(struct cxl *adapter, irq_hw_number_t hwirq,
- irq_handler_t handler, void *cookie, const char *name)
-{
- unsigned int virq;
- int result;
-
- /* IRQ Domain? */
- virq = irq_create_mapping(NULL, hwirq);
- if (!virq) {
- dev_warn(&adapter->dev, "cxl_map_irq: irq_create_mapping failed\n");
- return 0;
- }
-
- if (cxl_ops->setup_irq)
- cxl_ops->setup_irq(adapter, hwirq, virq);
-
- pr_devel("hwirq %#lx mapped to virq %u\n", hwirq, virq);
-
- result = request_irq(virq, handler, 0, name, cookie);
- if (result) {
- dev_warn(&adapter->dev, "cxl_map_irq: request_irq failed: %i\n", result);
- return 0;
- }
-
- return virq;
-}
-
-void cxl_unmap_irq(unsigned int virq, void *cookie)
-{
- free_irq(virq, cookie);
-}
-
-int cxl_register_one_irq(struct cxl *adapter,
- irq_handler_t handler,
- void *cookie,
- irq_hw_number_t *dest_hwirq,
- unsigned int *dest_virq,
- const char *name)
-{
- int hwirq, virq;
-
- if ((hwirq = cxl_ops->alloc_one_irq(adapter)) < 0)
- return hwirq;
-
- if (!(virq = cxl_map_irq(adapter, hwirq, handler, cookie, name)))
- goto err;
-
- *dest_hwirq = hwirq;
- *dest_virq = virq;
-
- return 0;
-
-err:
- cxl_ops->release_one_irq(adapter, hwirq);
- return -ENOMEM;
-}
-
-void afu_irq_name_free(struct cxl_context *ctx)
-{
- struct cxl_irq_name *irq_name, *tmp;
-
- list_for_each_entry_safe(irq_name, tmp, &ctx->irq_names, list) {
- kfree(irq_name->name);
- list_del(&irq_name->list);
- kfree(irq_name);
- }
-}
-
-int afu_allocate_irqs(struct cxl_context *ctx, u32 count)
-{
- int rc, r, i, j = 1;
- struct cxl_irq_name *irq_name;
- int alloc_count;
-
- /*
- * In native mode, range 0 is reserved for the multiplexed
- * PSL interrupt. It has been allocated when the AFU was initialized.
- *
- * In a guest, the PSL interrupt is not mutliplexed, but per-context,
- * and is the first interrupt from range 0. It still needs to be
- * allocated, so bump the count by one.
- */
- if (cpu_has_feature(CPU_FTR_HVMODE))
- alloc_count = count;
- else
- alloc_count = count + 1;
-
- if ((rc = cxl_ops->alloc_irq_ranges(&ctx->irqs, ctx->afu->adapter,
- alloc_count)))
- return rc;
-
- if (cpu_has_feature(CPU_FTR_HVMODE)) {
- /* Multiplexed PSL Interrupt */
- ctx->irqs.offset[0] = ctx->afu->native->psl_hwirq;
- ctx->irqs.range[0] = 1;
- }
-
- ctx->irq_count = count;
- ctx->irq_bitmap = bitmap_zalloc(count, GFP_KERNEL);
- if (!ctx->irq_bitmap)
- goto out;
-
- /*
- * Allocate names first. If any fail, bail out before allocating
- * actual hardware IRQs.
- */
- for (r = afu_irq_range_start(); r < CXL_IRQ_RANGES; r++) {
- for (i = 0; i < ctx->irqs.range[r]; i++) {
- irq_name = kmalloc(sizeof(struct cxl_irq_name),
- GFP_KERNEL);
- if (!irq_name)
- goto out;
- irq_name->name = kasprintf(GFP_KERNEL, "cxl-%s-pe%i-%i",
- dev_name(&ctx->afu->dev),
- ctx->pe, j);
- if (!irq_name->name) {
- kfree(irq_name);
- goto out;
- }
- /* Add to tail so next look get the correct order */
- list_add_tail(&irq_name->list, &ctx->irq_names);
- j++;
- }
- }
- return 0;
-
-out:
- cxl_ops->release_irq_ranges(&ctx->irqs, ctx->afu->adapter);
- bitmap_free(ctx->irq_bitmap);
- afu_irq_name_free(ctx);
- return -ENOMEM;
-}
-
-static void afu_register_hwirqs(struct cxl_context *ctx)
-{
- irq_hw_number_t hwirq;
- struct cxl_irq_name *irq_name;
- int r, i;
- irqreturn_t (*handler)(int irq, void *data);
-
- /* We've allocated all memory now, so let's do the irq allocations */
- irq_name = list_first_entry(&ctx->irq_names, struct cxl_irq_name, list);
- for (r = afu_irq_range_start(); r < CXL_IRQ_RANGES; r++) {
- hwirq = ctx->irqs.offset[r];
- for (i = 0; i < ctx->irqs.range[r]; hwirq++, i++) {
- if (r == 0 && i == 0)
- /*
- * The very first interrupt of range 0 is
- * always the PSL interrupt, but we only
- * need to connect a handler for guests,
- * because there's one PSL interrupt per
- * context.
- * On bare-metal, the PSL interrupt is
- * multiplexed and was setup when the AFU
- * was configured.
- */
- handler = cxl_ops->psl_interrupt;
- else
- handler = cxl_irq_afu;
- cxl_map_irq(ctx->afu->adapter, hwirq, handler, ctx,
- irq_name->name);
- irq_name = list_next_entry(irq_name, list);
- }
- }
-}
-
-int afu_register_irqs(struct cxl_context *ctx, u32 count)
-{
- int rc;
-
- rc = afu_allocate_irqs(ctx, count);
- if (rc)
- return rc;
-
- afu_register_hwirqs(ctx);
- return 0;
-}
-
-void afu_release_irqs(struct cxl_context *ctx, void *cookie)
-{
- irq_hw_number_t hwirq;
- unsigned int virq;
- int r, i;
-
- for (r = afu_irq_range_start(); r < CXL_IRQ_RANGES; r++) {
- hwirq = ctx->irqs.offset[r];
- for (i = 0; i < ctx->irqs.range[r]; hwirq++, i++) {
- virq = irq_find_mapping(NULL, hwirq);
- if (virq)
- cxl_unmap_irq(virq, cookie);
- }
- }
-
- afu_irq_name_free(ctx);
- cxl_ops->release_irq_ranges(&ctx->irqs, ctx->afu->adapter);
-
- ctx->irq_count = 0;
-}
-
-void cxl_afu_decode_psl_serr(struct cxl_afu *afu, u64 serr)
-{
- dev_crit(&afu->dev,
- "PSL Slice error received. Check AFU for root cause.\n");
- dev_crit(&afu->dev, "PSL_SERR_An: 0x%016llx\n", serr);
- if (serr & CXL_PSL_SERR_An_afuto)
- dev_crit(&afu->dev, "AFU MMIO Timeout\n");
- if (serr & CXL_PSL_SERR_An_afudis)
- dev_crit(&afu->dev,
- "MMIO targeted Accelerator that was not enabled\n");
- if (serr & CXL_PSL_SERR_An_afuov)
- dev_crit(&afu->dev, "AFU CTAG Overflow\n");
- if (serr & CXL_PSL_SERR_An_badsrc)
- dev_crit(&afu->dev, "Bad Interrupt Source\n");
- if (serr & CXL_PSL_SERR_An_badctx)
- dev_crit(&afu->dev, "Bad Context Handle\n");
- if (serr & CXL_PSL_SERR_An_llcmdis)
- dev_crit(&afu->dev, "LLCMD to Disabled AFU\n");
- if (serr & CXL_PSL_SERR_An_llcmdto)
- dev_crit(&afu->dev, "LLCMD Timeout to AFU\n");
- if (serr & CXL_PSL_SERR_An_afupar)
- dev_crit(&afu->dev, "AFU MMIO Parity Error\n");
- if (serr & CXL_PSL_SERR_An_afudup)
- dev_crit(&afu->dev, "AFU MMIO Duplicate CTAG Error\n");
- if (serr & CXL_PSL_SERR_An_AE)
- dev_crit(&afu->dev,
- "AFU asserted JDONE with JERROR in AFU Directed Mode\n");
-}
diff --git a/drivers/misc/cxl/main.c b/drivers/misc/cxl/main.c
deleted file mode 100644
index c1fbf6f588f7..000000000000
--- a/drivers/misc/cxl/main.c
+++ /dev/null
@@ -1,383 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright 2014 IBM Corp.
- */
-
-#include <linux/spinlock.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/device.h>
-#include <linux/mutex.h>
-#include <linux/init.h>
-#include <linux/list.h>
-#include <linux/mm.h>
-#include <linux/of.h>
-#include <linux/slab.h>
-#include <linux/idr.h>
-#include <linux/pci.h>
-#include <linux/platform_device.h>
-#include <linux/sched/task.h>
-
-#include <asm/cputable.h>
-#include <asm/mmu.h>
-#include <misc/cxl-base.h>
-
-#include "cxl.h"
-#include "trace.h"
-
-static DEFINE_SPINLOCK(adapter_idr_lock);
-static DEFINE_IDR(cxl_adapter_idr);
-
-uint cxl_verbose;
-module_param_named(verbose, cxl_verbose, uint, 0600);
-MODULE_PARM_DESC(verbose, "Enable verbose dmesg output");
-
-const struct cxl_backend_ops *cxl_ops;
-
-int cxl_afu_slbia(struct cxl_afu *afu)
-{
- unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT);
-
- pr_devel("cxl_afu_slbia issuing SLBIA command\n");
- cxl_p2n_write(afu, CXL_SLBIA_An, CXL_TLB_SLB_IQ_ALL);
- while (cxl_p2n_read(afu, CXL_SLBIA_An) & CXL_TLB_SLB_P) {
- if (time_after_eq(jiffies, timeout)) {
- dev_warn(&afu->dev, "WARNING: CXL AFU SLBIA timed out!\n");
- return -EBUSY;
- }
- /* If the adapter has gone down, we can assume that we
- * will PERST it and that will invalidate everything.
- */
- if (!cxl_ops->link_ok(afu->adapter, afu))
- return -EIO;
- cpu_relax();
- }
- return 0;
-}
-
-static inline void _cxl_slbia(struct cxl_context *ctx, struct mm_struct *mm)
-{
- unsigned long flags;
-
- if (ctx->mm != mm)
- return;
-
- pr_devel("%s matched mm - card: %i afu: %i pe: %i\n", __func__,
- ctx->afu->adapter->adapter_num, ctx->afu->slice, ctx->pe);
-
- spin_lock_irqsave(&ctx->sste_lock, flags);
- trace_cxl_slbia(ctx);
- memset(ctx->sstp, 0, ctx->sst_size);
- spin_unlock_irqrestore(&ctx->sste_lock, flags);
- mb();
- cxl_afu_slbia(ctx->afu);
-}
-
-static inline void cxl_slbia_core(struct mm_struct *mm)
-{
- struct cxl *adapter;
- struct cxl_afu *afu;
- struct cxl_context *ctx;
- int card, slice, id;
-
- pr_devel("%s called\n", __func__);
-
- spin_lock(&adapter_idr_lock);
- idr_for_each_entry(&cxl_adapter_idr, adapter, card) {
- /* XXX: Make this lookup faster with link from mm to ctx */
- spin_lock(&adapter->afu_list_lock);
- for (slice = 0; slice < adapter->slices; slice++) {
- afu = adapter->afu[slice];
- if (!afu || !afu->enabled)
- continue;
- rcu_read_lock();
- idr_for_each_entry(&afu->contexts_idr, ctx, id)
- _cxl_slbia(ctx, mm);
- rcu_read_unlock();
- }
- spin_unlock(&adapter->afu_list_lock);
- }
- spin_unlock(&adapter_idr_lock);
-}
-
-static struct cxl_calls cxl_calls = {
- .cxl_slbia = cxl_slbia_core,
- .owner = THIS_MODULE,
-};
-
-int cxl_alloc_sst(struct cxl_context *ctx)
-{
- unsigned long vsid;
- u64 ea_mask, size, sstp0, sstp1;
-
- sstp0 = 0;
- sstp1 = 0;
-
- ctx->sst_size = PAGE_SIZE;
- ctx->sst_lru = 0;
- ctx->sstp = (struct cxl_sste *)get_zeroed_page(GFP_KERNEL);
- if (!ctx->sstp) {
- pr_err("cxl_alloc_sst: Unable to allocate segment table\n");
- return -ENOMEM;
- }
- pr_devel("SSTP allocated at 0x%p\n", ctx->sstp);
-
- vsid = get_kernel_vsid((u64)ctx->sstp, mmu_kernel_ssize) << 12;
-
- sstp0 |= (u64)mmu_kernel_ssize << CXL_SSTP0_An_B_SHIFT;
- sstp0 |= (SLB_VSID_KERNEL | mmu_psize_defs[mmu_linear_psize].sllp) << 50;
-
- size = (((u64)ctx->sst_size >> 8) - 1) << CXL_SSTP0_An_SegTableSize_SHIFT;
- if (unlikely(size & ~CXL_SSTP0_An_SegTableSize_MASK)) {
- WARN(1, "Impossible segment table size\n");
- return -EINVAL;
- }
- sstp0 |= size;
-
- if (mmu_kernel_ssize == MMU_SEGSIZE_256M)
- ea_mask = 0xfffff00ULL;
- else
- ea_mask = 0xffffffff00ULL;
-
- sstp0 |= vsid >> (50-14); /* Top 14 bits of VSID */
- sstp1 |= (vsid << (64-(50-14))) & ~ea_mask;
- sstp1 |= (u64)ctx->sstp & ea_mask;
- sstp1 |= CXL_SSTP1_An_V;
-
- pr_devel("Looked up %#llx: slbfee. %#llx (ssize: %x, vsid: %#lx), copied to SSTP0: %#llx, SSTP1: %#llx\n",
- (u64)ctx->sstp, (u64)ctx->sstp & ESID_MASK, mmu_kernel_ssize, vsid, sstp0, sstp1);
-
- /* Store calculated sstp hardware points for use later */
- ctx->sstp0 = sstp0;
- ctx->sstp1 = sstp1;
-
- return 0;
-}
-
-/* print buffer content as integers when debugging */
-void cxl_dump_debug_buffer(void *buf, size_t buf_len)
-{
-#ifdef DEBUG
- int i, *ptr;
-
- /*
- * We want to regroup up to 4 integers per line, which means they
- * need to be in the same pr_devel() statement
- */
- ptr = (int *) buf;
- for (i = 0; i * 4 < buf_len; i += 4) {
- if ((i + 3) * 4 < buf_len)
- pr_devel("%.8x %.8x %.8x %.8x\n", ptr[i], ptr[i + 1],
- ptr[i + 2], ptr[i + 3]);
- else if ((i + 2) * 4 < buf_len)
- pr_devel("%.8x %.8x %.8x\n", ptr[i], ptr[i + 1],
- ptr[i + 2]);
- else if ((i + 1) * 4 < buf_len)
- pr_devel("%.8x %.8x\n", ptr[i], ptr[i + 1]);
- else
- pr_devel("%.8x\n", ptr[i]);
- }
-#endif /* DEBUG */
-}
-
-/* Find a CXL adapter by it's number and increase it's refcount */
-struct cxl *get_cxl_adapter(int num)
-{
- struct cxl *adapter;
-
- spin_lock(&adapter_idr_lock);
- if ((adapter = idr_find(&cxl_adapter_idr, num)))
- get_device(&adapter->dev);
- spin_unlock(&adapter_idr_lock);
-
- return adapter;
-}
-
-static int cxl_alloc_adapter_nr(struct cxl *adapter)
-{
- int i;
-
- idr_preload(GFP_KERNEL);
- spin_lock(&adapter_idr_lock);
- i = idr_alloc(&cxl_adapter_idr, adapter, 0, 0, GFP_NOWAIT);
- spin_unlock(&adapter_idr_lock);
- idr_preload_end();
- if (i < 0)
- return i;
-
- adapter->adapter_num = i;
-
- return 0;
-}
-
-void cxl_remove_adapter_nr(struct cxl *adapter)
-{
- idr_remove(&cxl_adapter_idr, adapter->adapter_num);
-}
-
-struct cxl *cxl_alloc_adapter(void)
-{
- struct cxl *adapter;
-
- if (!(adapter = kzalloc(sizeof(struct cxl), GFP_KERNEL)))
- return NULL;
-
- spin_lock_init(&adapter->afu_list_lock);
-
- if (cxl_alloc_adapter_nr(adapter))
- goto err1;
-
- if (dev_set_name(&adapter->dev, "card%i", adapter->adapter_num))
- goto err2;
-
- /* start with context lock taken */
- atomic_set(&adapter->contexts_num, -1);
-
- return adapter;
-err2:
- cxl_remove_adapter_nr(adapter);
-err1:
- kfree(adapter);
- return NULL;
-}
-
-struct cxl_afu *cxl_alloc_afu(struct cxl *adapter, int slice)
-{
- struct cxl_afu *afu;
-
- if (!(afu = kzalloc(sizeof(struct cxl_afu), GFP_KERNEL)))
- return NULL;
-
- afu->adapter = adapter;
- afu->dev.parent = &adapter->dev;
- afu->dev.release = cxl_ops->release_afu;
- afu->slice = slice;
- idr_init(&afu->contexts_idr);
- mutex_init(&afu->contexts_lock);
- spin_lock_init(&afu->afu_cntl_lock);
- atomic_set(&afu->configured_state, -1);
- afu->prefault_mode = CXL_PREFAULT_NONE;
- afu->irqs_max = afu->adapter->user_irqs;
-
- return afu;
-}
-
-int cxl_afu_select_best_mode(struct cxl_afu *afu)
-{
- if (afu->modes_supported & CXL_MODE_DIRECTED)
- return cxl_ops->afu_activate_mode(afu, CXL_MODE_DIRECTED);
-
- if (afu->modes_supported & CXL_MODE_DEDICATED)
- return cxl_ops->afu_activate_mode(afu, CXL_MODE_DEDICATED);
-
- dev_warn(&afu->dev, "No supported programming modes available\n");
- /* We don't fail this so the user can inspect sysfs */
- return 0;
-}
-
-int cxl_adapter_context_get(struct cxl *adapter)
-{
- int rc;
-
- rc = atomic_inc_unless_negative(&adapter->contexts_num);
- return rc ? 0 : -EBUSY;
-}
-
-void cxl_adapter_context_put(struct cxl *adapter)
-{
- atomic_dec_if_positive(&adapter->contexts_num);
-}
-
-int cxl_adapter_context_lock(struct cxl *adapter)
-{
- int rc;
- /* no active contexts -> contexts_num == 0 */
- rc = atomic_cmpxchg(&adapter->contexts_num, 0, -1);
- return rc ? -EBUSY : 0;
-}
-
-void cxl_adapter_context_unlock(struct cxl *adapter)
-{
- int val = atomic_cmpxchg(&adapter->contexts_num, -1, 0);
-
- /*
- * contexts lock taken -> contexts_num == -1
- * If not true then show a warning and force reset the lock.
- * This will happen when context_unlock was requested without
- * doing a context_lock.
- */
- if (val != -1) {
- atomic_set(&adapter->contexts_num, 0);
- WARN(1, "Adapter context unlocked with %d active contexts",
- val);
- }
-}
-
-static int __init init_cxl(void)
-{
- int rc = 0;
-
- if (!tlbie_capable)
- return -EINVAL;
-
- if ((rc = cxl_file_init()))
- return rc;
-
- cxl_debugfs_init();
-
- /*
- * we don't register the callback on P9. slb callack is only
- * used for the PSL8 MMU and CX4.
- */
- if (cxl_is_power8()) {
- rc = register_cxl_calls(&cxl_calls);
- if (rc)
- goto err;
- }
-
- if (cpu_has_feature(CPU_FTR_HVMODE)) {
- cxl_ops = &cxl_native_ops;
- rc = pci_register_driver(&cxl_pci_driver);
- }
-#ifdef CONFIG_PPC_PSERIES
- else {
- cxl_ops = &cxl_guest_ops;
- rc = platform_driver_register(&cxl_of_driver);
- }
-#endif
- if (rc)
- goto err1;
-
- return 0;
-err1:
- if (cxl_is_power8())
- unregister_cxl_calls(&cxl_calls);
-err:
- cxl_debugfs_exit();
- cxl_file_exit();
-
- return rc;
-}
-
-static void exit_cxl(void)
-{
- if (cpu_has_feature(CPU_FTR_HVMODE))
- pci_unregister_driver(&cxl_pci_driver);
-#ifdef CONFIG_PPC_PSERIES
- else
- platform_driver_unregister(&cxl_of_driver);
-#endif
-
- cxl_debugfs_exit();
- cxl_file_exit();
- if (cxl_is_power8())
- unregister_cxl_calls(&cxl_calls);
- idr_destroy(&cxl_adapter_idr);
-}
-
-module_init(init_cxl);
-module_exit(exit_cxl);
-
-MODULE_DESCRIPTION("IBM Coherent Accelerator");
-MODULE_AUTHOR("Ian Munsie <imunsie@au1.ibm.com>");
-MODULE_LICENSE("GPL");
diff --git a/drivers/misc/cxl/native.c b/drivers/misc/cxl/native.c
deleted file mode 100644
index fbe16a6ab7ad..000000000000
--- a/drivers/misc/cxl/native.c
+++ /dev/null
@@ -1,1592 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright 2014 IBM Corp.
- */
-
-#include <linux/spinlock.h>
-#include <linux/sched.h>
-#include <linux/sched/clock.h>
-#include <linux/slab.h>
-#include <linux/mutex.h>
-#include <linux/mm.h>
-#include <linux/uaccess.h>
-#include <linux/delay.h>
-#include <linux/irqdomain.h>
-#include <asm/synch.h>
-#include <asm/switch_to.h>
-#include <misc/cxl-base.h>
-
-#include "cxl.h"
-#include "trace.h"
-
-static int afu_control(struct cxl_afu *afu, u64 command, u64 clear,
- u64 result, u64 mask, bool enabled)
-{
- u64 AFU_Cntl;
- unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT);
- int rc = 0;
-
- spin_lock(&afu->afu_cntl_lock);
- pr_devel("AFU command starting: %llx\n", command);
-
- trace_cxl_afu_ctrl(afu, command);
-
- AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An);
- cxl_p2n_write(afu, CXL_AFU_Cntl_An, (AFU_Cntl & ~clear) | command);
-
- AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An);
- while ((AFU_Cntl & mask) != result) {
- if (time_after_eq(jiffies, timeout)) {
- dev_warn(&afu->dev, "WARNING: AFU control timed out!\n");
- rc = -EBUSY;
- goto out;
- }
-
- if (!cxl_ops->link_ok(afu->adapter, afu)) {
- afu->enabled = enabled;
- rc = -EIO;
- goto out;
- }
-
- pr_devel_ratelimited("AFU control... (0x%016llx)\n",
- AFU_Cntl | command);
- cpu_relax();
- AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An);
- }
-
- if (AFU_Cntl & CXL_AFU_Cntl_An_RA) {
- /*
- * Workaround for a bug in the XSL used in the Mellanox CX4
- * that fails to clear the RA bit after an AFU reset,
- * preventing subsequent AFU resets from working.
- */
- cxl_p2n_write(afu, CXL_AFU_Cntl_An, AFU_Cntl & ~CXL_AFU_Cntl_An_RA);
- }
-
- pr_devel("AFU command complete: %llx\n", command);
- afu->enabled = enabled;
-out:
- trace_cxl_afu_ctrl_done(afu, command, rc);
- spin_unlock(&afu->afu_cntl_lock);
-
- return rc;
-}
-
-static int afu_enable(struct cxl_afu *afu)
-{
- pr_devel("AFU enable request\n");
-
- return afu_control(afu, CXL_AFU_Cntl_An_E, 0,
- CXL_AFU_Cntl_An_ES_Enabled,
- CXL_AFU_Cntl_An_ES_MASK, true);
-}
-
-int cxl_afu_disable(struct cxl_afu *afu)
-{
- pr_devel("AFU disable request\n");
-
- return afu_control(afu, 0, CXL_AFU_Cntl_An_E,
- CXL_AFU_Cntl_An_ES_Disabled,
- CXL_AFU_Cntl_An_ES_MASK, false);
-}
-
-/* This will disable as well as reset */
-static int native_afu_reset(struct cxl_afu *afu)
-{
- int rc;
- u64 serr;
-
- pr_devel("AFU reset request\n");
-
- rc = afu_control(afu, CXL_AFU_Cntl_An_RA, 0,
- CXL_AFU_Cntl_An_RS_Complete | CXL_AFU_Cntl_An_ES_Disabled,
- CXL_AFU_Cntl_An_RS_MASK | CXL_AFU_Cntl_An_ES_MASK,
- false);
-
- /*
- * Re-enable any masked interrupts when the AFU is not
- * activated to avoid side effects after attaching a process
- * in dedicated mode.
- */
- if (afu->current_mode == 0) {
- serr = cxl_p1n_read(afu, CXL_PSL_SERR_An);
- serr &= ~CXL_PSL_SERR_An_IRQ_MASKS;
- cxl_p1n_write(afu, CXL_PSL_SERR_An, serr);
- }
-
- return rc;
-}
-
-static int native_afu_check_and_enable(struct cxl_afu *afu)
-{
- if (!cxl_ops->link_ok(afu->adapter, afu)) {
- WARN(1, "Refusing to enable afu while link down!\n");
- return -EIO;
- }
- if (afu->enabled)
- return 0;
- return afu_enable(afu);
-}
-
-int cxl_psl_purge(struct cxl_afu *afu)
-{
- u64 PSL_CNTL = cxl_p1n_read(afu, CXL_PSL_SCNTL_An);
- u64 AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An);
- u64 dsisr, dar;
- u64 start, end;
- u64 trans_fault = 0x0ULL;
- unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT);
- int rc = 0;
-
- trace_cxl_psl_ctrl(afu, CXL_PSL_SCNTL_An_Pc);
-
- pr_devel("PSL purge request\n");
-
- if (cxl_is_power8())
- trans_fault = CXL_PSL_DSISR_TRANS;
- if (cxl_is_power9())
- trans_fault = CXL_PSL9_DSISR_An_TF;
-
- if (!cxl_ops->link_ok(afu->adapter, afu)) {
- dev_warn(&afu->dev, "PSL Purge called with link down, ignoring\n");
- rc = -EIO;
- goto out;
- }
-
- if ((AFU_Cntl & CXL_AFU_Cntl_An_ES_MASK) != CXL_AFU_Cntl_An_ES_Disabled) {
- WARN(1, "psl_purge request while AFU not disabled!\n");
- cxl_afu_disable(afu);
- }
-
- cxl_p1n_write(afu, CXL_PSL_SCNTL_An,
- PSL_CNTL | CXL_PSL_SCNTL_An_Pc);
- start = local_clock();
- PSL_CNTL = cxl_p1n_read(afu, CXL_PSL_SCNTL_An);
- while ((PSL_CNTL & CXL_PSL_SCNTL_An_Ps_MASK)
- == CXL_PSL_SCNTL_An_Ps_Pending) {
- if (time_after_eq(jiffies, timeout)) {
- dev_warn(&afu->dev, "WARNING: PSL Purge timed out!\n");
- rc = -EBUSY;
- goto out;
- }
- if (!cxl_ops->link_ok(afu->adapter, afu)) {
- rc = -EIO;
- goto out;
- }
-
- dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An);
- pr_devel_ratelimited("PSL purging... PSL_CNTL: 0x%016llx PSL_DSISR: 0x%016llx\n",
- PSL_CNTL, dsisr);
-
- if (dsisr & trans_fault) {
- dar = cxl_p2n_read(afu, CXL_PSL_DAR_An);
- dev_notice(&afu->dev, "PSL purge terminating pending translation, DSISR: 0x%016llx, DAR: 0x%016llx\n",
- dsisr, dar);
- cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_AE);
- } else if (dsisr) {
- dev_notice(&afu->dev, "PSL purge acknowledging pending non-translation fault, DSISR: 0x%016llx\n",
- dsisr);
- cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_A);
- } else {
- cpu_relax();
- }
- PSL_CNTL = cxl_p1n_read(afu, CXL_PSL_SCNTL_An);
- }
- end = local_clock();
- pr_devel("PSL purged in %lld ns\n", end - start);
-
- cxl_p1n_write(afu, CXL_PSL_SCNTL_An,
- PSL_CNTL & ~CXL_PSL_SCNTL_An_Pc);
-out:
- trace_cxl_psl_ctrl_done(afu, CXL_PSL_SCNTL_An_Pc, rc);
- return rc;
-}
-
-static int spa_max_procs(int spa_size)
-{
- /*
- * From the CAIA:
- * end_of_SPA_area = SPA_Base + ((n+4) * 128) + (( ((n*8) + 127) >> 7) * 128) + 255
- * Most of that junk is really just an overly-complicated way of saying
- * the last 256 bytes are __aligned(128), so it's really:
- * end_of_SPA_area = end_of_PSL_queue_area + __aligned(128) 255
- * and
- * end_of_PSL_queue_area = SPA_Base + ((n+4) * 128) + (n*8) - 1
- * so
- * sizeof(SPA) = ((n+4) * 128) + (n*8) + __aligned(128) 256
- * Ignore the alignment (which is safe in this case as long as we are
- * careful with our rounding) and solve for n:
- */
- return ((spa_size / 8) - 96) / 17;
-}
-
-static int cxl_alloc_spa(struct cxl_afu *afu, int mode)
-{
- unsigned spa_size;
-
- /* Work out how many pages to allocate */
- afu->native->spa_order = -1;
- do {
- afu->native->spa_order++;
- spa_size = (1 << afu->native->spa_order) * PAGE_SIZE;
-
- if (spa_size > 0x100000) {
- dev_warn(&afu->dev, "num_of_processes too large for the SPA, limiting to %i (0x%x)\n",
- afu->native->spa_max_procs, afu->native->spa_size);
- if (mode != CXL_MODE_DEDICATED)
- afu->num_procs = afu->native->spa_max_procs;
- break;
- }
-
- afu->native->spa_size = spa_size;
- afu->native->spa_max_procs = spa_max_procs(afu->native->spa_size);
- } while (afu->native->spa_max_procs < afu->num_procs);
-
- if (!(afu->native->spa = (struct cxl_process_element *)
- __get_free_pages(GFP_KERNEL | __GFP_ZERO, afu->native->spa_order))) {
- pr_err("cxl_alloc_spa: Unable to allocate scheduled process area\n");
- return -ENOMEM;
- }
- pr_devel("spa pages: %i afu->spa_max_procs: %i afu->num_procs: %i\n",
- 1<<afu->native->spa_order, afu->native->spa_max_procs, afu->num_procs);
-
- return 0;
-}
-
-static void attach_spa(struct cxl_afu *afu)
-{
- u64 spap;
-
- afu->native->sw_command_status = (__be64 *)((char *)afu->native->spa +
- ((afu->native->spa_max_procs + 3) * 128));
-
- spap = virt_to_phys(afu->native->spa) & CXL_PSL_SPAP_Addr;
- spap |= ((afu->native->spa_size >> (12 - CXL_PSL_SPAP_Size_Shift)) - 1) & CXL_PSL_SPAP_Size;
- spap |= CXL_PSL_SPAP_V;
- pr_devel("cxl: SPA allocated at 0x%p. Max processes: %i, sw_command_status: 0x%p CXL_PSL_SPAP_An=0x%016llx\n",
- afu->native->spa, afu->native->spa_max_procs,
- afu->native->sw_command_status, spap);
- cxl_p1n_write(afu, CXL_PSL_SPAP_An, spap);
-}
-
-void cxl_release_spa(struct cxl_afu *afu)
-{
- if (afu->native->spa) {
- free_pages((unsigned long) afu->native->spa,
- afu->native->spa_order);
- afu->native->spa = NULL;
- }
-}
-
-/*
- * Invalidation of all ERAT entries is no longer required by CAIA2. Use
- * only for debug.
- */
-int cxl_invalidate_all_psl9(struct cxl *adapter)
-{
- unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT);
- u64 ierat;
-
- pr_devel("CXL adapter - invalidation of all ERAT entries\n");
-
- /* Invalidates all ERAT entries for Radix or HPT */
- ierat = CXL_XSL9_IERAT_IALL;
- if (radix_enabled())
- ierat |= CXL_XSL9_IERAT_INVR;
- cxl_p1_write(adapter, CXL_XSL9_IERAT, ierat);
-
- while (cxl_p1_read(adapter, CXL_XSL9_IERAT) & CXL_XSL9_IERAT_IINPROG) {
- if (time_after_eq(jiffies, timeout)) {
- dev_warn(&adapter->dev,
- "WARNING: CXL adapter invalidation of all ERAT entries timed out!\n");
- return -EBUSY;
- }
- if (!cxl_ops->link_ok(adapter, NULL))
- return -EIO;
- cpu_relax();
- }
- return 0;
-}
-
-int cxl_invalidate_all_psl8(struct cxl *adapter)
-{
- unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT);
-
- pr_devel("CXL adapter wide TLBIA & SLBIA\n");
-
- cxl_p1_write(adapter, CXL_PSL_AFUSEL, CXL_PSL_AFUSEL_A);
-
- cxl_p1_write(adapter, CXL_PSL_TLBIA, CXL_TLB_SLB_IQ_ALL);
- while (cxl_p1_read(adapter, CXL_PSL_TLBIA) & CXL_TLB_SLB_P) {
- if (time_after_eq(jiffies, timeout)) {
- dev_warn(&adapter->dev, "WARNING: CXL adapter wide TLBIA timed out!\n");
- return -EBUSY;
- }
- if (!cxl_ops->link_ok(adapter, NULL))
- return -EIO;
- cpu_relax();
- }
-
- cxl_p1_write(adapter, CXL_PSL_SLBIA, CXL_TLB_SLB_IQ_ALL);
- while (cxl_p1_read(adapter, CXL_PSL_SLBIA) & CXL_TLB_SLB_P) {
- if (time_after_eq(jiffies, timeout)) {
- dev_warn(&adapter->dev, "WARNING: CXL adapter wide SLBIA timed out!\n");
- return -EBUSY;
- }
- if (!cxl_ops->link_ok(adapter, NULL))
- return -EIO;
- cpu_relax();
- }
- return 0;
-}
-
-int cxl_data_cache_flush(struct cxl *adapter)
-{
- u64 reg;
- unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT);
-
- /*
- * Do a datacache flush only if datacache is available.
- * In case of PSL9D datacache absent hence flush operation.
- * would timeout.
- */
- if (adapter->native->no_data_cache) {
- pr_devel("No PSL data cache. Ignoring cache flush req.\n");
- return 0;
- }
-
- pr_devel("Flushing data cache\n");
- reg = cxl_p1_read(adapter, CXL_PSL_Control);
- reg |= CXL_PSL_Control_Fr;
- cxl_p1_write(adapter, CXL_PSL_Control, reg);
-
- reg = cxl_p1_read(adapter, CXL_PSL_Control);
- while ((reg & CXL_PSL_Control_Fs_MASK) != CXL_PSL_Control_Fs_Complete) {
- if (time_after_eq(jiffies, timeout)) {
- dev_warn(&adapter->dev, "WARNING: cache flush timed out!\n");
- return -EBUSY;
- }
-
- if (!cxl_ops->link_ok(adapter, NULL)) {
- dev_warn(&adapter->dev, "WARNING: link down when flushing cache\n");
- return -EIO;
- }
- cpu_relax();
- reg = cxl_p1_read(adapter, CXL_PSL_Control);
- }
-
- reg &= ~CXL_PSL_Control_Fr;
- cxl_p1_write(adapter, CXL_PSL_Control, reg);
- return 0;
-}
-
-static int cxl_write_sstp(struct cxl_afu *afu, u64 sstp0, u64 sstp1)
-{
- int rc;
-
- /* 1. Disable SSTP by writing 0 to SSTP1[V] */
- cxl_p2n_write(afu, CXL_SSTP1_An, 0);
-
- /* 2. Invalidate all SLB entries */
- if ((rc = cxl_afu_slbia(afu)))
- return rc;
-
- /* 3. Set SSTP0_An */
- cxl_p2n_write(afu, CXL_SSTP0_An, sstp0);
-
- /* 4. Set SSTP1_An */
- cxl_p2n_write(afu, CXL_SSTP1_An, sstp1);
-
- return 0;
-}
-
-/* Using per slice version may improve performance here. (ie. SLBIA_An) */
-static void slb_invalid(struct cxl_context *ctx)
-{
- struct cxl *adapter = ctx->afu->adapter;
- u64 slbia;
-
- WARN_ON(!mutex_is_locked(&ctx->afu->native->spa_mutex));
-
- cxl_p1_write(adapter, CXL_PSL_LBISEL,
- ((u64)be32_to_cpu(ctx->elem->common.pid) << 32) |
- be32_to_cpu(ctx->elem->lpid));
- cxl_p1_write(adapter, CXL_PSL_SLBIA, CXL_TLB_SLB_IQ_LPIDPID);
-
- while (1) {
- if (!cxl_ops->link_ok(adapter, NULL))
- break;
- slbia = cxl_p1_read(adapter, CXL_PSL_SLBIA);
- if (!(slbia & CXL_TLB_SLB_P))
- break;
- cpu_relax();
- }
-}
-
-static int do_process_element_cmd(struct cxl_context *ctx,
- u64 cmd, u64 pe_state)
-{
- u64 state;
- unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT);
- int rc = 0;
-
- trace_cxl_llcmd(ctx, cmd);
-
- WARN_ON(!ctx->afu->enabled);
-
- ctx->elem->software_state = cpu_to_be32(pe_state);
- smp_wmb();
- *(ctx->afu->native->sw_command_status) = cpu_to_be64(cmd | 0 | ctx->pe);
- smp_mb();
- cxl_p1n_write(ctx->afu, CXL_PSL_LLCMD_An, cmd | ctx->pe);
- while (1) {
- if (time_after_eq(jiffies, timeout)) {
- dev_warn(&ctx->afu->dev, "WARNING: Process Element Command timed out!\n");
- rc = -EBUSY;
- goto out;
- }
- if (!cxl_ops->link_ok(ctx->afu->adapter, ctx->afu)) {
- dev_warn(&ctx->afu->dev, "WARNING: Device link down, aborting Process Element Command!\n");
- rc = -EIO;
- goto out;
- }
- state = be64_to_cpup(ctx->afu->native->sw_command_status);
- if (state == ~0ULL) {
- pr_err("cxl: Error adding process element to AFU\n");
- rc = -1;
- goto out;
- }
- if ((state & (CXL_SPA_SW_CMD_MASK | CXL_SPA_SW_STATE_MASK | CXL_SPA_SW_LINK_MASK)) ==
- (cmd | (cmd >> 16) | ctx->pe))
- break;
- /*
- * The command won't finish in the PSL if there are
- * outstanding DSIs. Hence we need to yield here in
- * case there are outstanding DSIs that we need to
- * service. Tuning possiblity: we could wait for a
- * while before sched
- */
- schedule();
-
- }
-out:
- trace_cxl_llcmd_done(ctx, cmd, rc);
- return rc;
-}
-
-static int add_process_element(struct cxl_context *ctx)
-{
- int rc = 0;
-
- mutex_lock(&ctx->afu->native->spa_mutex);
- pr_devel("%s Adding pe: %i started\n", __func__, ctx->pe);
- if (!(rc = do_process_element_cmd(ctx, CXL_SPA_SW_CMD_ADD, CXL_PE_SOFTWARE_STATE_V)))
- ctx->pe_inserted = true;
- pr_devel("%s Adding pe: %i finished\n", __func__, ctx->pe);
- mutex_unlock(&ctx->afu->native->spa_mutex);
- return rc;
-}
-
-static int terminate_process_element(struct cxl_context *ctx)
-{
- int rc = 0;
-
- /* fast path terminate if it's already invalid */
- if (!(ctx->elem->software_state & cpu_to_be32(CXL_PE_SOFTWARE_STATE_V)))
- return rc;
-
- mutex_lock(&ctx->afu->native->spa_mutex);
- pr_devel("%s Terminate pe: %i started\n", __func__, ctx->pe);
- /* We could be asked to terminate when the hw is down. That
- * should always succeed: it's not running if the hw has gone
- * away and is being reset.
- */
- if (cxl_ops->link_ok(ctx->afu->adapter, ctx->afu))
- rc = do_process_element_cmd(ctx, CXL_SPA_SW_CMD_TERMINATE,
- CXL_PE_SOFTWARE_STATE_V | CXL_PE_SOFTWARE_STATE_T);
- ctx->elem->software_state = 0; /* Remove Valid bit */
- pr_devel("%s Terminate pe: %i finished\n", __func__, ctx->pe);
- mutex_unlock(&ctx->afu->native->spa_mutex);
- return rc;
-}
-
-static int remove_process_element(struct cxl_context *ctx)
-{
- int rc = 0;
-
- mutex_lock(&ctx->afu->native->spa_mutex);
- pr_devel("%s Remove pe: %i started\n", __func__, ctx->pe);
-
- /* We could be asked to remove when the hw is down. Again, if
- * the hw is down, the PE is gone, so we succeed.
- */
- if (cxl_ops->link_ok(ctx->afu->adapter, ctx->afu))
- rc = do_process_element_cmd(ctx, CXL_SPA_SW_CMD_REMOVE, 0);
-
- if (!rc)
- ctx->pe_inserted = false;
- if (cxl_is_power8())
- slb_invalid(ctx);
- pr_devel("%s Remove pe: %i finished\n", __func__, ctx->pe);
- mutex_unlock(&ctx->afu->native->spa_mutex);
-
- return rc;
-}
-
-void cxl_assign_psn_space(struct cxl_context *ctx)
-{
- if (!ctx->afu->pp_size || ctx->master) {
- ctx->psn_phys = ctx->afu->psn_phys;
- ctx->psn_size = ctx->afu->adapter->ps_size;
- } else {
- ctx->psn_phys = ctx->afu->psn_phys +
- (ctx->afu->native->pp_offset + ctx->afu->pp_size * ctx->pe);
- ctx->psn_size = ctx->afu->pp_size;
- }
-}
-
-static int activate_afu_directed(struct cxl_afu *afu)
-{
- int rc;
-
- dev_info(&afu->dev, "Activating AFU directed mode\n");
-
- afu->num_procs = afu->max_procs_virtualised;
- if (afu->native->spa == NULL) {
- if (cxl_alloc_spa(afu, CXL_MODE_DIRECTED))
- return -ENOMEM;
- }
- attach_spa(afu);
-
- cxl_p1n_write(afu, CXL_PSL_SCNTL_An, CXL_PSL_SCNTL_An_PM_AFU);
- if (cxl_is_power8())
- cxl_p1n_write(afu, CXL_PSL_AMOR_An, 0xFFFFFFFFFFFFFFFFULL);
- cxl_p1n_write(afu, CXL_PSL_ID_An, CXL_PSL_ID_An_F | CXL_PSL_ID_An_L);
-
- afu->current_mode = CXL_MODE_DIRECTED;
-
- if ((rc = cxl_chardev_m_afu_add(afu)))
- return rc;
-
- if ((rc = cxl_sysfs_afu_m_add(afu)))
- goto err;
-
- if ((rc = cxl_chardev_s_afu_add(afu)))
- goto err1;
-
- return 0;
-err1:
- cxl_sysfs_afu_m_remove(afu);
-err:
- cxl_chardev_afu_remove(afu);
- return rc;
-}
-
-#ifdef CONFIG_CPU_LITTLE_ENDIAN
-#define set_endian(sr) ((sr) |= CXL_PSL_SR_An_LE)
-#else
-#define set_endian(sr) ((sr) &= ~(CXL_PSL_SR_An_LE))
-#endif
-
-u64 cxl_calculate_sr(bool master, bool kernel, bool real_mode, bool p9)
-{
- u64 sr = 0;
-
- set_endian(sr);
- if (master)
- sr |= CXL_PSL_SR_An_MP;
- if (mfspr(SPRN_LPCR) & LPCR_TC)
- sr |= CXL_PSL_SR_An_TC;
-
- if (kernel) {
- if (!real_mode)
- sr |= CXL_PSL_SR_An_R;
- sr |= (mfmsr() & MSR_SF) | CXL_PSL_SR_An_HV;
- } else {
- sr |= CXL_PSL_SR_An_PR | CXL_PSL_SR_An_R;
- if (radix_enabled())
- sr |= CXL_PSL_SR_An_HV;
- else
- sr &= ~(CXL_PSL_SR_An_HV);
- if (!test_tsk_thread_flag(current, TIF_32BIT))
- sr |= CXL_PSL_SR_An_SF;
- }
- if (p9) {
- if (radix_enabled())
- sr |= CXL_PSL_SR_An_XLAT_ror;
- else
- sr |= CXL_PSL_SR_An_XLAT_hpt;
- }
- return sr;
-}
-
-static u64 calculate_sr(struct cxl_context *ctx)
-{
- return cxl_calculate_sr(ctx->master, ctx->kernel, false,
- cxl_is_power9());
-}
-
-static void update_ivtes_directed(struct cxl_context *ctx)
-{
- bool need_update = (ctx->status == STARTED);
- int r;
-
- if (need_update) {
- WARN_ON(terminate_process_element(ctx));
- WARN_ON(remove_process_element(ctx));
- }
-
- for (r = 0; r < CXL_IRQ_RANGES; r++) {
- ctx->elem->ivte_offsets[r] = cpu_to_be16(ctx->irqs.offset[r]);
- ctx->elem->ivte_ranges[r] = cpu_to_be16(ctx->irqs.range[r]);
- }
-
- /*
- * Theoretically we could use the update llcmd, instead of a
- * terminate/remove/add (or if an atomic update was required we could
- * do a suspend/update/resume), however it seems there might be issues
- * with the update llcmd on some cards (including those using an XSL on
- * an ASIC) so for now it's safest to go with the commands that are
- * known to work. In the future if we come across a situation where the
- * card may be performing transactions using the same PE while we are
- * doing this update we might need to revisit this.
- */
- if (need_update)
- WARN_ON(add_process_element(ctx));
-}
-
-static int process_element_entry_psl9(struct cxl_context *ctx, u64 wed, u64 amr)
-{
- u32 pid;
- int rc;
-
- cxl_assign_psn_space(ctx);
-
- ctx->elem->ctxtime = 0; /* disable */
- ctx->elem->lpid = cpu_to_be32(mfspr(SPRN_LPID));
- ctx->elem->haurp = 0; /* disable */
-
- if (ctx->kernel)
- pid = 0;
- else {
- if (ctx->mm == NULL) {
- pr_devel("%s: unable to get mm for pe=%d pid=%i\n",
- __func__, ctx->pe, pid_nr(ctx->pid));
- return -EINVAL;
- }
- pid = ctx->mm->context.id;
- }
-
- /* Assign a unique TIDR (thread id) for the current thread */
- if (!(ctx->tidr) && (ctx->assign_tidr)) {
- rc = set_thread_tidr(current);
- if (rc)
- return -ENODEV;
- ctx->tidr = current->thread.tidr;
- pr_devel("%s: current tidr: %d\n", __func__, ctx->tidr);
- }
-
- ctx->elem->common.tid = cpu_to_be32(ctx->tidr);
- ctx->elem->common.pid = cpu_to_be32(pid);
-
- ctx->elem->sr = cpu_to_be64(calculate_sr(ctx));
-
- ctx->elem->common.csrp = 0; /* disable */
-
- cxl_prefault(ctx, wed);
-
- /*
- * Ensure we have the multiplexed PSL interrupt set up to take faults
- * for kernel contexts that may not have allocated any AFU IRQs at all:
- */
- if (ctx->irqs.range[0] == 0) {
- ctx->irqs.offset[0] = ctx->afu->native->psl_hwirq;
- ctx->irqs.range[0] = 1;
- }
-
- ctx->elem->common.amr = cpu_to_be64(amr);
- ctx->elem->common.wed = cpu_to_be64(wed);
-
- return 0;
-}
-
-int cxl_attach_afu_directed_psl9(struct cxl_context *ctx, u64 wed, u64 amr)
-{
- int result;
-
- /* fill the process element entry */
- result = process_element_entry_psl9(ctx, wed, amr);
- if (result)
- return result;
-
- update_ivtes_directed(ctx);
-
- /* first guy needs to enable */
- result = cxl_ops->afu_check_and_enable(ctx->afu);
- if (result)
- return result;
-
- return add_process_element(ctx);
-}
-
-int cxl_attach_afu_directed_psl8(struct cxl_context *ctx, u64 wed, u64 amr)
-{
- u32 pid;
- int result;
-
- cxl_assign_psn_space(ctx);
-
- ctx->elem->ctxtime = 0; /* disable */
- ctx->elem->lpid = cpu_to_be32(mfspr(SPRN_LPID));
- ctx->elem->haurp = 0; /* disable */
- ctx->elem->u.sdr = cpu_to_be64(mfspr(SPRN_SDR1));
-
- pid = current->pid;
- if (ctx->kernel)
- pid = 0;
- ctx->elem->common.tid = 0;
- ctx->elem->common.pid = cpu_to_be32(pid);
-
- ctx->elem->sr = cpu_to_be64(calculate_sr(ctx));
-
- ctx->elem->common.csrp = 0; /* disable */
- ctx->elem->common.u.psl8.aurp0 = 0; /* disable */
- ctx->elem->common.u.psl8.aurp1 = 0; /* disable */
-
- cxl_prefault(ctx, wed);
-
- ctx->elem->common.u.psl8.sstp0 = cpu_to_be64(ctx->sstp0);
- ctx->elem->common.u.psl8.sstp1 = cpu_to_be64(ctx->sstp1);
-
- /*
- * Ensure we have the multiplexed PSL interrupt set up to take faults
- * for kernel contexts that may not have allocated any AFU IRQs at all:
- */
- if (ctx->irqs.range[0] == 0) {
- ctx->irqs.offset[0] = ctx->afu->native->psl_hwirq;
- ctx->irqs.range[0] = 1;
- }
-
- update_ivtes_directed(ctx);
-
- ctx->elem->common.amr = cpu_to_be64(amr);
- ctx->elem->common.wed = cpu_to_be64(wed);
-
- /* first guy needs to enable */
- if ((result = cxl_ops->afu_check_and_enable(ctx->afu)))
- return result;
-
- return add_process_element(ctx);
-}
-
-static int deactivate_afu_directed(struct cxl_afu *afu)
-{
- dev_info(&afu->dev, "Deactivating AFU directed mode\n");
-
- afu->current_mode = 0;
- afu->num_procs = 0;
-
- cxl_sysfs_afu_m_remove(afu);
- cxl_chardev_afu_remove(afu);
-
- /*
- * The CAIA section 2.2.1 indicates that the procedure for starting and
- * stopping an AFU in AFU directed mode is AFU specific, which is not
- * ideal since this code is generic and with one exception has no
- * knowledge of the AFU. This is in contrast to the procedure for
- * disabling a dedicated process AFU, which is documented to just
- * require a reset. The architecture does indicate that both an AFU
- * reset and an AFU disable should result in the AFU being disabled and
- * we do both followed by a PSL purge for safety.
- *
- * Notably we used to have some issues with the disable sequence on PSL
- * cards, which is why we ended up using this heavy weight procedure in
- * the first place, however a bug was discovered that had rendered the
- * disable operation ineffective, so it is conceivable that was the
- * sole explanation for those difficulties. Careful regression testing
- * is recommended if anyone attempts to remove or reorder these
- * operations.
- *
- * The XSL on the Mellanox CX4 behaves a little differently from the
- * PSL based cards and will time out an AFU reset if the AFU is still
- * enabled. That card is special in that we do have a means to identify
- * it from this code, so in that case we skip the reset and just use a
- * disable/purge to avoid the timeout and corresponding noise in the
- * kernel log.
- */
- if (afu->adapter->native->sl_ops->needs_reset_before_disable)
- cxl_ops->afu_reset(afu);
- cxl_afu_disable(afu);
- cxl_psl_purge(afu);
-
- return 0;
-}
-
-int cxl_activate_dedicated_process_psl9(struct cxl_afu *afu)
-{
- dev_info(&afu->dev, "Activating dedicated process mode\n");
-
- /*
- * If XSL is set to dedicated mode (Set in PSL_SCNTL reg), the
- * XSL and AFU are programmed to work with a single context.
- * The context information should be configured in the SPA area
- * index 0 (so PSL_SPAP must be configured before enabling the
- * AFU).
- */
- afu->num_procs = 1;
- if (afu->native->spa == NULL) {
- if (cxl_alloc_spa(afu, CXL_MODE_DEDICATED))
- return -ENOMEM;
- }
- attach_spa(afu);
-
- cxl_p1n_write(afu, CXL_PSL_SCNTL_An, CXL_PSL_SCNTL_An_PM_Process);
- cxl_p1n_write(afu, CXL_PSL_ID_An, CXL_PSL_ID_An_F | CXL_PSL_ID_An_L);
-
- afu->current_mode = CXL_MODE_DEDICATED;
-
- return cxl_chardev_d_afu_add(afu);
-}
-
-int cxl_activate_dedicated_process_psl8(struct cxl_afu *afu)
-{
- dev_info(&afu->dev, "Activating dedicated process mode\n");
-
- cxl_p1n_write(afu, CXL_PSL_SCNTL_An, CXL_PSL_SCNTL_An_PM_Process);
-
- cxl_p1n_write(afu, CXL_PSL_CtxTime_An, 0); /* disable */
- cxl_p1n_write(afu, CXL_PSL_SPAP_An, 0); /* disable */
- cxl_p1n_write(afu, CXL_PSL_AMOR_An, 0xFFFFFFFFFFFFFFFFULL);
- cxl_p1n_write(afu, CXL_PSL_LPID_An, mfspr(SPRN_LPID));
- cxl_p1n_write(afu, CXL_HAURP_An, 0); /* disable */
- cxl_p1n_write(afu, CXL_PSL_SDR_An, mfspr(SPRN_SDR1));
-
- cxl_p2n_write(afu, CXL_CSRP_An, 0); /* disable */
- cxl_p2n_write(afu, CXL_AURP0_An, 0); /* disable */
- cxl_p2n_write(afu, CXL_AURP1_An, 0); /* disable */
-
- afu->current_mode = CXL_MODE_DEDICATED;
- afu->num_procs = 1;
-
- return cxl_chardev_d_afu_add(afu);
-}
-
-void cxl_update_dedicated_ivtes_psl9(struct cxl_context *ctx)
-{
- int r;
-
- for (r = 0; r < CXL_IRQ_RANGES; r++) {
- ctx->elem->ivte_offsets[r] = cpu_to_be16(ctx->irqs.offset[r]);
- ctx->elem->ivte_ranges[r] = cpu_to_be16(ctx->irqs.range[r]);
- }
-}
-
-void cxl_update_dedicated_ivtes_psl8(struct cxl_context *ctx)
-{
- struct cxl_afu *afu = ctx->afu;
-
- cxl_p1n_write(afu, CXL_PSL_IVTE_Offset_An,
- (((u64)ctx->irqs.offset[0] & 0xffff) << 48) |
- (((u64)ctx->irqs.offset[1] & 0xffff) << 32) |
- (((u64)ctx->irqs.offset[2] & 0xffff) << 16) |
- ((u64)ctx->irqs.offset[3] & 0xffff));
- cxl_p1n_write(afu, CXL_PSL_IVTE_Limit_An, (u64)
- (((u64)ctx->irqs.range[0] & 0xffff) << 48) |
- (((u64)ctx->irqs.range[1] & 0xffff) << 32) |
- (((u64)ctx->irqs.range[2] & 0xffff) << 16) |
- ((u64)ctx->irqs.range[3] & 0xffff));
-}
-
-int cxl_attach_dedicated_process_psl9(struct cxl_context *ctx, u64 wed, u64 amr)
-{
- struct cxl_afu *afu = ctx->afu;
- int result;
-
- /* fill the process element entry */
- result = process_element_entry_psl9(ctx, wed, amr);
- if (result)
- return result;
-
- if (ctx->afu->adapter->native->sl_ops->update_dedicated_ivtes)
- afu->adapter->native->sl_ops->update_dedicated_ivtes(ctx);
-
- ctx->elem->software_state = cpu_to_be32(CXL_PE_SOFTWARE_STATE_V);
- /*
- * Ideally we should do a wmb() here to make sure the changes to the
- * PE are visible to the card before we call afu_enable.
- * On ppc64 though all mmios are preceded by a 'sync' instruction hence
- * we dont dont need one here.
- */
-
- result = cxl_ops->afu_reset(afu);
- if (result)
- return result;
-
- return afu_enable(afu);
-}
-
-int cxl_attach_dedicated_process_psl8(struct cxl_context *ctx, u64 wed, u64 amr)
-{
- struct cxl_afu *afu = ctx->afu;
- u64 pid;
- int rc;
-
- pid = (u64)current->pid << 32;
- if (ctx->kernel)
- pid = 0;
- cxl_p2n_write(afu, CXL_PSL_PID_TID_An, pid);
-
- cxl_p1n_write(afu, CXL_PSL_SR_An, calculate_sr(ctx));
-
- if ((rc = cxl_write_sstp(afu, ctx->sstp0, ctx->sstp1)))
- return rc;
-
- cxl_prefault(ctx, wed);
-
- if (ctx->afu->adapter->native->sl_ops->update_dedicated_ivtes)
- afu->adapter->native->sl_ops->update_dedicated_ivtes(ctx);
-
- cxl_p2n_write(afu, CXL_PSL_AMR_An, amr);
-
- /* master only context for dedicated */
- cxl_assign_psn_space(ctx);
-
- if ((rc = cxl_ops->afu_reset(afu)))
- return rc;
-
- cxl_p2n_write(afu, CXL_PSL_WED_An, wed);
-
- return afu_enable(afu);
-}
-
-static int deactivate_dedicated_process(struct cxl_afu *afu)
-{
- dev_info(&afu->dev, "Deactivating dedicated process mode\n");
-
- afu->current_mode = 0;
- afu->num_procs = 0;
-
- cxl_chardev_afu_remove(afu);
-
- return 0;
-}
-
-static int native_afu_deactivate_mode(struct cxl_afu *afu, int mode)
-{
- if (mode == CXL_MODE_DIRECTED)
- return deactivate_afu_directed(afu);
- if (mode == CXL_MODE_DEDICATED)
- return deactivate_dedicated_process(afu);
- return 0;
-}
-
-static int native_afu_activate_mode(struct cxl_afu *afu, int mode)
-{
- if (!mode)
- return 0;
- if (!(mode & afu->modes_supported))
- return -EINVAL;
-
- if (!cxl_ops->link_ok(afu->adapter, afu)) {
- WARN(1, "Device link is down, refusing to activate!\n");
- return -EIO;
- }
-
- if (mode == CXL_MODE_DIRECTED)
- return activate_afu_directed(afu);
- if ((mode == CXL_MODE_DEDICATED) &&
- (afu->adapter->native->sl_ops->activate_dedicated_process))
- return afu->adapter->native->sl_ops->activate_dedicated_process(afu);
-
- return -EINVAL;
-}
-
-static int native_attach_process(struct cxl_context *ctx, bool kernel,
- u64 wed, u64 amr)
-{
- if (!cxl_ops->link_ok(ctx->afu->adapter, ctx->afu)) {
- WARN(1, "Device link is down, refusing to attach process!\n");
- return -EIO;
- }
-
- ctx->kernel = kernel;
- if ((ctx->afu->current_mode == CXL_MODE_DIRECTED) &&
- (ctx->afu->adapter->native->sl_ops->attach_afu_directed))
- return ctx->afu->adapter->native->sl_ops->attach_afu_directed(ctx, wed, amr);
-
- if ((ctx->afu->current_mode == CXL_MODE_DEDICATED) &&
- (ctx->afu->adapter->native->sl_ops->attach_dedicated_process))
- return ctx->afu->adapter->native->sl_ops->attach_dedicated_process(ctx, wed, amr);
-
- return -EINVAL;
-}
-
-static inline int detach_process_native_dedicated(struct cxl_context *ctx)
-{
- /*
- * The CAIA section 2.1.1 indicates that we need to do an AFU reset to
- * stop the AFU in dedicated mode (we therefore do not make that
- * optional like we do in the afu directed path). It does not indicate
- * that we need to do an explicit disable (which should occur
- * implicitly as part of the reset) or purge, but we do these as well
- * to be on the safe side.
- *
- * Notably we used to have some issues with the disable sequence
- * (before the sequence was spelled out in the architecture) which is
- * why we were so heavy weight in the first place, however a bug was
- * discovered that had rendered the disable operation ineffective, so
- * it is conceivable that was the sole explanation for those
- * difficulties. Point is, we should be careful and do some regression
- * testing if we ever attempt to remove any part of this procedure.
- */
- cxl_ops->afu_reset(ctx->afu);
- cxl_afu_disable(ctx->afu);
- cxl_psl_purge(ctx->afu);
- return 0;
-}
-
-static void native_update_ivtes(struct cxl_context *ctx)
-{
- if (ctx->afu->current_mode == CXL_MODE_DIRECTED)
- return update_ivtes_directed(ctx);
- if ((ctx->afu->current_mode == CXL_MODE_DEDICATED) &&
- (ctx->afu->adapter->native->sl_ops->update_dedicated_ivtes))
- return ctx->afu->adapter->native->sl_ops->update_dedicated_ivtes(ctx);
- WARN(1, "native_update_ivtes: Bad mode\n");
-}
-
-static inline int detach_process_native_afu_directed(struct cxl_context *ctx)
-{
- if (!ctx->pe_inserted)
- return 0;
- if (terminate_process_element(ctx))
- return -1;
- if (remove_process_element(ctx))
- return -1;
-
- return 0;
-}
-
-static int native_detach_process(struct cxl_context *ctx)
-{
- trace_cxl_detach(ctx);
-
- if (ctx->afu->current_mode == CXL_MODE_DEDICATED)
- return detach_process_native_dedicated(ctx);
-
- return detach_process_native_afu_directed(ctx);
-}
-
-static int native_get_irq_info(struct cxl_afu *afu, struct cxl_irq_info *info)
-{
- /* If the adapter has gone away, we can't get any meaningful
- * information.
- */
- if (!cxl_ops->link_ok(afu->adapter, afu))
- return -EIO;
-
- info->dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An);
- info->dar = cxl_p2n_read(afu, CXL_PSL_DAR_An);
- if (cxl_is_power8())
- info->dsr = cxl_p2n_read(afu, CXL_PSL_DSR_An);
- info->afu_err = cxl_p2n_read(afu, CXL_AFU_ERR_An);
- info->errstat = cxl_p2n_read(afu, CXL_PSL_ErrStat_An);
- info->proc_handle = 0;
-
- return 0;
-}
-
-void cxl_native_irq_dump_regs_psl9(struct cxl_context *ctx)
-{
- u64 fir1, serr;
-
- fir1 = cxl_p1_read(ctx->afu->adapter, CXL_PSL9_FIR1);
-
- dev_crit(&ctx->afu->dev, "PSL_FIR1: 0x%016llx\n", fir1);
- if (ctx->afu->adapter->native->sl_ops->register_serr_irq) {
- serr = cxl_p1n_read(ctx->afu, CXL_PSL_SERR_An);
- cxl_afu_decode_psl_serr(ctx->afu, serr);
- }
-}
-
-void cxl_native_irq_dump_regs_psl8(struct cxl_context *ctx)
-{
- u64 fir1, fir2, fir_slice, serr, afu_debug;
-
- fir1 = cxl_p1_read(ctx->afu->adapter, CXL_PSL_FIR1);
- fir2 = cxl_p1_read(ctx->afu->adapter, CXL_PSL_FIR2);
- fir_slice = cxl_p1n_read(ctx->afu, CXL_PSL_FIR_SLICE_An);
- afu_debug = cxl_p1n_read(ctx->afu, CXL_AFU_DEBUG_An);
-
- dev_crit(&ctx->afu->dev, "PSL_FIR1: 0x%016llx\n", fir1);
- dev_crit(&ctx->afu->dev, "PSL_FIR2: 0x%016llx\n", fir2);
- if (ctx->afu->adapter->native->sl_ops->register_serr_irq) {
- serr = cxl_p1n_read(ctx->afu, CXL_PSL_SERR_An);
- cxl_afu_decode_psl_serr(ctx->afu, serr);
- }
- dev_crit(&ctx->afu->dev, "PSL_FIR_SLICE_An: 0x%016llx\n", fir_slice);
- dev_crit(&ctx->afu->dev, "CXL_PSL_AFU_DEBUG_An: 0x%016llx\n", afu_debug);
-}
-
-static irqreturn_t native_handle_psl_slice_error(struct cxl_context *ctx,
- u64 dsisr, u64 errstat)
-{
-
- dev_crit(&ctx->afu->dev, "PSL ERROR STATUS: 0x%016llx\n", errstat);
-
- if (ctx->afu->adapter->native->sl_ops->psl_irq_dump_registers)
- ctx->afu->adapter->native->sl_ops->psl_irq_dump_registers(ctx);
-
- if (ctx->afu->adapter->native->sl_ops->debugfs_stop_trace) {
- dev_crit(&ctx->afu->dev, "STOPPING CXL TRACE\n");
- ctx->afu->adapter->native->sl_ops->debugfs_stop_trace(ctx->afu->adapter);
- }
-
- return cxl_ops->ack_irq(ctx, 0, errstat);
-}
-
-static bool cxl_is_translation_fault(struct cxl_afu *afu, u64 dsisr)
-{
- if ((cxl_is_power8()) && (dsisr & CXL_PSL_DSISR_TRANS))
- return true;
-
- if ((cxl_is_power9()) && (dsisr & CXL_PSL9_DSISR_An_TF))
- return true;
-
- return false;
-}
-
-irqreturn_t cxl_fail_irq_psl(struct cxl_afu *afu, struct cxl_irq_info *irq_info)
-{
- if (cxl_is_translation_fault(afu, irq_info->dsisr))
- cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_AE);
- else
- cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_A);
-
- return IRQ_HANDLED;
-}
-
-static irqreturn_t native_irq_multiplexed(int irq, void *data)
-{
- struct cxl_afu *afu = data;
- struct cxl_context *ctx;
- struct cxl_irq_info irq_info;
- u64 phreg = cxl_p2n_read(afu, CXL_PSL_PEHandle_An);
- int ph, ret = IRQ_HANDLED, res;
-
- /* check if eeh kicked in while the interrupt was in flight */
- if (unlikely(phreg == ~0ULL)) {
- dev_warn(&afu->dev,
- "Ignoring slice interrupt(%d) due to fenced card",
- irq);
- return IRQ_HANDLED;
- }
- /* Mask the pe-handle from register value */
- ph = phreg & 0xffff;
- if ((res = native_get_irq_info(afu, &irq_info))) {
- WARN(1, "Unable to get CXL IRQ Info: %i\n", res);
- if (afu->adapter->native->sl_ops->fail_irq)
- return afu->adapter->native->sl_ops->fail_irq(afu, &irq_info);
- return ret;
- }
-
- rcu_read_lock();
- ctx = idr_find(&afu->contexts_idr, ph);
- if (ctx) {
- if (afu->adapter->native->sl_ops->handle_interrupt)
- ret = afu->adapter->native->sl_ops->handle_interrupt(irq, ctx, &irq_info);
- rcu_read_unlock();
- return ret;
- }
- rcu_read_unlock();
-
- WARN(1, "Unable to demultiplex CXL PSL IRQ for PE %i DSISR %016llx DAR"
- " %016llx\n(Possible AFU HW issue - was a term/remove acked"
- " with outstanding transactions?)\n", ph, irq_info.dsisr,
- irq_info.dar);
- if (afu->adapter->native->sl_ops->fail_irq)
- ret = afu->adapter->native->sl_ops->fail_irq(afu, &irq_info);
- return ret;
-}
-
-static void native_irq_wait(struct cxl_context *ctx)
-{
- u64 dsisr;
- int timeout = 1000;
- int ph;
-
- /*
- * Wait until no further interrupts are presented by the PSL
- * for this context.
- */
- while (timeout--) {
- ph = cxl_p2n_read(ctx->afu, CXL_PSL_PEHandle_An) & 0xffff;
- if (ph != ctx->pe)
- return;
- dsisr = cxl_p2n_read(ctx->afu, CXL_PSL_DSISR_An);
- if (cxl_is_power8() &&
- ((dsisr & CXL_PSL_DSISR_PENDING) == 0))
- return;
- if (cxl_is_power9() &&
- ((dsisr & CXL_PSL9_DSISR_PENDING) == 0))
- return;
- /*
- * We are waiting for the workqueue to process our
- * irq, so need to let that run here.
- */
- msleep(1);
- }
-
- dev_warn(&ctx->afu->dev, "WARNING: waiting on DSI for PE %i"
- " DSISR %016llx!\n", ph, dsisr);
- return;
-}
-
-static irqreturn_t native_slice_irq_err(int irq, void *data)
-{
- struct cxl_afu *afu = data;
- u64 errstat, serr, afu_error, dsisr;
- u64 fir_slice, afu_debug, irq_mask;
-
- /*
- * slice err interrupt is only used with full PSL (no XSL)
- */
- serr = cxl_p1n_read(afu, CXL_PSL_SERR_An);
- errstat = cxl_p2n_read(afu, CXL_PSL_ErrStat_An);
- afu_error = cxl_p2n_read(afu, CXL_AFU_ERR_An);
- dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An);
- cxl_afu_decode_psl_serr(afu, serr);
-
- if (cxl_is_power8()) {
- fir_slice = cxl_p1n_read(afu, CXL_PSL_FIR_SLICE_An);
- afu_debug = cxl_p1n_read(afu, CXL_AFU_DEBUG_An);
- dev_crit(&afu->dev, "PSL_FIR_SLICE_An: 0x%016llx\n", fir_slice);
- dev_crit(&afu->dev, "CXL_PSL_AFU_DEBUG_An: 0x%016llx\n", afu_debug);
- }
- dev_crit(&afu->dev, "CXL_PSL_ErrStat_An: 0x%016llx\n", errstat);
- dev_crit(&afu->dev, "AFU_ERR_An: 0x%.16llx\n", afu_error);
- dev_crit(&afu->dev, "PSL_DSISR_An: 0x%.16llx\n", dsisr);
-
- /* mask off the IRQ so it won't retrigger until the AFU is reset */
- irq_mask = (serr & CXL_PSL_SERR_An_IRQS) >> 32;
- serr |= irq_mask;
- cxl_p1n_write(afu, CXL_PSL_SERR_An, serr);
- dev_info(&afu->dev, "Further such interrupts will be masked until the AFU is reset\n");
-
- return IRQ_HANDLED;
-}
-
-void cxl_native_err_irq_dump_regs_psl9(struct cxl *adapter)
-{
- u64 fir1;
-
- fir1 = cxl_p1_read(adapter, CXL_PSL9_FIR1);
- dev_crit(&adapter->dev, "PSL_FIR: 0x%016llx\n", fir1);
-}
-
-void cxl_native_err_irq_dump_regs_psl8(struct cxl *adapter)
-{
- u64 fir1, fir2;
-
- fir1 = cxl_p1_read(adapter, CXL_PSL_FIR1);
- fir2 = cxl_p1_read(adapter, CXL_PSL_FIR2);
- dev_crit(&adapter->dev,
- "PSL_FIR1: 0x%016llx\nPSL_FIR2: 0x%016llx\n",
- fir1, fir2);
-}
-
-static irqreturn_t native_irq_err(int irq, void *data)
-{
- struct cxl *adapter = data;
- u64 err_ivte;
-
- WARN(1, "CXL ERROR interrupt %i\n", irq);
-
- err_ivte = cxl_p1_read(adapter, CXL_PSL_ErrIVTE);
- dev_crit(&adapter->dev, "PSL_ErrIVTE: 0x%016llx\n", err_ivte);
-
- if (adapter->native->sl_ops->debugfs_stop_trace) {
- dev_crit(&adapter->dev, "STOPPING CXL TRACE\n");
- adapter->native->sl_ops->debugfs_stop_trace(adapter);
- }
-
- if (adapter->native->sl_ops->err_irq_dump_registers)
- adapter->native->sl_ops->err_irq_dump_registers(adapter);
-
- return IRQ_HANDLED;
-}
-
-int cxl_native_register_psl_err_irq(struct cxl *adapter)
-{
- int rc;
-
- adapter->irq_name = kasprintf(GFP_KERNEL, "cxl-%s-err",
- dev_name(&adapter->dev));
- if (!adapter->irq_name)
- return -ENOMEM;
-
- if ((rc = cxl_register_one_irq(adapter, native_irq_err, adapter,
- &adapter->native->err_hwirq,
- &adapter->native->err_virq,
- adapter->irq_name))) {
- kfree(adapter->irq_name);
- adapter->irq_name = NULL;
- return rc;
- }
-
- cxl_p1_write(adapter, CXL_PSL_ErrIVTE, adapter->native->err_hwirq & 0xffff);
-
- return 0;
-}
-
-void cxl_native_release_psl_err_irq(struct cxl *adapter)
-{
- if (adapter->native->err_virq == 0 ||
- adapter->native->err_virq !=
- irq_find_mapping(NULL, adapter->native->err_hwirq))
- return;
-
- cxl_p1_write(adapter, CXL_PSL_ErrIVTE, 0x0000000000000000);
- cxl_unmap_irq(adapter->native->err_virq, adapter);
- cxl_ops->release_one_irq(adapter, adapter->native->err_hwirq);
- kfree(adapter->irq_name);
- adapter->native->err_virq = 0;
-}
-
-int cxl_native_register_serr_irq(struct cxl_afu *afu)
-{
- u64 serr;
- int rc;
-
- afu->err_irq_name = kasprintf(GFP_KERNEL, "cxl-%s-err",
- dev_name(&afu->dev));
- if (!afu->err_irq_name)
- return -ENOMEM;
-
- if ((rc = cxl_register_one_irq(afu->adapter, native_slice_irq_err, afu,
- &afu->serr_hwirq,
- &afu->serr_virq, afu->err_irq_name))) {
- kfree(afu->err_irq_name);
- afu->err_irq_name = NULL;
- return rc;
- }
-
- serr = cxl_p1n_read(afu, CXL_PSL_SERR_An);
- if (cxl_is_power8())
- serr = (serr & 0x00ffffffffff0000ULL) | (afu->serr_hwirq & 0xffff);
- if (cxl_is_power9()) {
- /*
- * By default, all errors are masked. So don't set all masks.
- * Slice errors will be transfered.
- */
- serr = (serr & ~0xff0000007fffffffULL) | (afu->serr_hwirq & 0xffff);
- }
- cxl_p1n_write(afu, CXL_PSL_SERR_An, serr);
-
- return 0;
-}
-
-void cxl_native_release_serr_irq(struct cxl_afu *afu)
-{
- if (afu->serr_virq == 0 ||
- afu->serr_virq != irq_find_mapping(NULL, afu->serr_hwirq))
- return;
-
- cxl_p1n_write(afu, CXL_PSL_SERR_An, 0x0000000000000000);
- cxl_unmap_irq(afu->serr_virq, afu);
- cxl_ops->release_one_irq(afu->adapter, afu->serr_hwirq);
- kfree(afu->err_irq_name);
- afu->serr_virq = 0;
-}
-
-int cxl_native_register_psl_irq(struct cxl_afu *afu)
-{
- int rc;
-
- afu->psl_irq_name = kasprintf(GFP_KERNEL, "cxl-%s",
- dev_name(&afu->dev));
- if (!afu->psl_irq_name)
- return -ENOMEM;
-
- if ((rc = cxl_register_one_irq(afu->adapter, native_irq_multiplexed,
- afu, &afu->native->psl_hwirq, &afu->native->psl_virq,
- afu->psl_irq_name))) {
- kfree(afu->psl_irq_name);
- afu->psl_irq_name = NULL;
- }
- return rc;
-}
-
-void cxl_native_release_psl_irq(struct cxl_afu *afu)
-{
- if (afu->native->psl_virq == 0 ||
- afu->native->psl_virq !=
- irq_find_mapping(NULL, afu->native->psl_hwirq))
- return;
-
- cxl_unmap_irq(afu->native->psl_virq, afu);
- cxl_ops->release_one_irq(afu->adapter, afu->native->psl_hwirq);
- kfree(afu->psl_irq_name);
- afu->native->psl_virq = 0;
-}
-
-static void recover_psl_err(struct cxl_afu *afu, u64 errstat)
-{
- u64 dsisr;
-
- pr_devel("RECOVERING FROM PSL ERROR... (0x%016llx)\n", errstat);
-
- /* Clear PSL_DSISR[PE] */
- dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An);
- cxl_p2n_write(afu, CXL_PSL_DSISR_An, dsisr & ~CXL_PSL_DSISR_An_PE);
-
- /* Write 1s to clear error status bits */
- cxl_p2n_write(afu, CXL_PSL_ErrStat_An, errstat);
-}
-
-static int native_ack_irq(struct cxl_context *ctx, u64 tfc, u64 psl_reset_mask)
-{
- trace_cxl_psl_irq_ack(ctx, tfc);
- if (tfc)
- cxl_p2n_write(ctx->afu, CXL_PSL_TFC_An, tfc);
- if (psl_reset_mask)
- recover_psl_err(ctx->afu, psl_reset_mask);
-
- return 0;
-}
-
-int cxl_check_error(struct cxl_afu *afu)
-{
- return (cxl_p1n_read(afu, CXL_PSL_SCNTL_An) == ~0ULL);
-}
-
-static bool native_support_attributes(const char *attr_name,
- enum cxl_attrs type)
-{
- return true;
-}
-
-static int native_afu_cr_read64(struct cxl_afu *afu, int cr, u64 off, u64 *out)
-{
- if (unlikely(!cxl_ops->link_ok(afu->adapter, afu)))
- return -EIO;
- if (unlikely(off >= afu->crs_len))
- return -ERANGE;
- *out = in_le64(afu->native->afu_desc_mmio + afu->crs_offset +
- (cr * afu->crs_len) + off);
- return 0;
-}
-
-static int native_afu_cr_read32(struct cxl_afu *afu, int cr, u64 off, u32 *out)
-{
- if (unlikely(!cxl_ops->link_ok(afu->adapter, afu)))
- return -EIO;
- if (unlikely(off >= afu->crs_len))
- return -ERANGE;
- *out = in_le32(afu->native->afu_desc_mmio + afu->crs_offset +
- (cr * afu->crs_len) + off);
- return 0;
-}
-
-static int native_afu_cr_read16(struct cxl_afu *afu, int cr, u64 off, u16 *out)
-{
- u64 aligned_off = off & ~0x3L;
- u32 val;
- int rc;
-
- rc = native_afu_cr_read32(afu, cr, aligned_off, &val);
- if (!rc)
- *out = (val >> ((off & 0x3) * 8)) & 0xffff;
- return rc;
-}
-
-static int native_afu_cr_read8(struct cxl_afu *afu, int cr, u64 off, u8 *out)
-{
- u64 aligned_off = off & ~0x3L;
- u32 val;
- int rc;
-
- rc = native_afu_cr_read32(afu, cr, aligned_off, &val);
- if (!rc)
- *out = (val >> ((off & 0x3) * 8)) & 0xff;
- return rc;
-}
-
-static int native_afu_cr_write32(struct cxl_afu *afu, int cr, u64 off, u32 in)
-{
- if (unlikely(!cxl_ops->link_ok(afu->adapter, afu)))
- return -EIO;
- if (unlikely(off >= afu->crs_len))
- return -ERANGE;
- out_le32(afu->native->afu_desc_mmio + afu->crs_offset +
- (cr * afu->crs_len) + off, in);
- return 0;
-}
-
-static int native_afu_cr_write16(struct cxl_afu *afu, int cr, u64 off, u16 in)
-{
- u64 aligned_off = off & ~0x3L;
- u32 val32, mask, shift;
- int rc;
-
- rc = native_afu_cr_read32(afu, cr, aligned_off, &val32);
- if (rc)
- return rc;
- shift = (off & 0x3) * 8;
- WARN_ON(shift == 24);
- mask = 0xffff << shift;
- val32 = (val32 & ~mask) | (in << shift);
-
- rc = native_afu_cr_write32(afu, cr, aligned_off, val32);
- return rc;
-}
-
-static int native_afu_cr_write8(struct cxl_afu *afu, int cr, u64 off, u8 in)
-{
- u64 aligned_off = off & ~0x3L;
- u32 val32, mask, shift;
- int rc;
-
- rc = native_afu_cr_read32(afu, cr, aligned_off, &val32);
- if (rc)
- return rc;
- shift = (off & 0x3) * 8;
- mask = 0xff << shift;
- val32 = (val32 & ~mask) | (in << shift);
-
- rc = native_afu_cr_write32(afu, cr, aligned_off, val32);
- return rc;
-}
-
-const struct cxl_backend_ops cxl_native_ops = {
- .module = THIS_MODULE,
- .adapter_reset = cxl_pci_reset,
- .alloc_one_irq = cxl_pci_alloc_one_irq,
- .release_one_irq = cxl_pci_release_one_irq,
- .alloc_irq_ranges = cxl_pci_alloc_irq_ranges,
- .release_irq_ranges = cxl_pci_release_irq_ranges,
- .setup_irq = cxl_pci_setup_irq,
- .handle_psl_slice_error = native_handle_psl_slice_error,
- .psl_interrupt = NULL,
- .ack_irq = native_ack_irq,
- .irq_wait = native_irq_wait,
- .attach_process = native_attach_process,
- .detach_process = native_detach_process,
- .update_ivtes = native_update_ivtes,
- .support_attributes = native_support_attributes,
- .link_ok = cxl_adapter_link_ok,
- .release_afu = cxl_pci_release_afu,
- .afu_read_err_buffer = cxl_pci_afu_read_err_buffer,
- .afu_check_and_enable = native_afu_check_and_enable,
- .afu_activate_mode = native_afu_activate_mode,
- .afu_deactivate_mode = native_afu_deactivate_mode,
- .afu_reset = native_afu_reset,
- .afu_cr_read8 = native_afu_cr_read8,
- .afu_cr_read16 = native_afu_cr_read16,
- .afu_cr_read32 = native_afu_cr_read32,
- .afu_cr_read64 = native_afu_cr_read64,
- .afu_cr_write8 = native_afu_cr_write8,
- .afu_cr_write16 = native_afu_cr_write16,
- .afu_cr_write32 = native_afu_cr_write32,
- .read_adapter_vpd = cxl_pci_read_adapter_vpd,
-};
diff --git a/drivers/misc/cxl/of.c b/drivers/misc/cxl/of.c
deleted file mode 100644
index e26ee85279fa..000000000000
--- a/drivers/misc/cxl/of.c
+++ /dev/null
@@ -1,346 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright 2015 IBM Corp.
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/of_platform.h>
-
-#include "cxl.h"
-
-static int read_phys_addr(struct device_node *np, char *prop_name,
- struct cxl_afu *afu)
-{
- int i, len, entry_size, naddr, nsize, type;
- u64 addr, size;
- const __be32 *prop;
-
- naddr = of_n_addr_cells(np);
- nsize = of_n_size_cells(np);
-
- prop = of_get_property(np, prop_name, &len);
- if (prop) {
- entry_size = naddr + nsize;
- for (i = 0; i < (len / 4); i += entry_size, prop += entry_size) {
- type = be32_to_cpu(prop[0]);
- addr = of_read_number(prop, naddr);
- size = of_read_number(&prop[naddr], nsize);
- switch (type) {
- case 0: /* unit address */
- afu->guest->handle = addr;
- break;
- case 1: /* p2 area */
- afu->guest->p2n_phys += addr;
- afu->guest->p2n_size = size;
- break;
- case 2: /* problem state area */
- afu->psn_phys += addr;
- afu->adapter->ps_size = size;
- break;
- default:
- pr_err("Invalid address type %d found in %s property of AFU\n",
- type, prop_name);
- return -EINVAL;
- }
- }
- }
- return 0;
-}
-
-static int read_vpd(struct cxl *adapter, struct cxl_afu *afu)
-{
- char vpd[256];
- int rc;
- size_t len = sizeof(vpd);
-
- memset(vpd, 0, len);
-
- if (adapter)
- rc = cxl_guest_read_adapter_vpd(adapter, vpd, len);
- else
- rc = cxl_guest_read_afu_vpd(afu, vpd, len);
-
- if (rc > 0) {
- cxl_dump_debug_buffer(vpd, rc);
- rc = 0;
- }
- return rc;
-}
-
-int cxl_of_read_afu_handle(struct cxl_afu *afu, struct device_node *afu_np)
-{
- return of_property_read_reg(afu_np, 0, &afu->guest->handle, NULL);
-}
-
-int cxl_of_read_afu_properties(struct cxl_afu *afu, struct device_node *np)
-{
- int i, rc;
- u16 device_id, vendor_id;
- u32 val = 0, class_code;
-
- /* Properties are read in the same order as listed in PAPR */
-
- rc = read_phys_addr(np, "reg", afu);
- if (rc)
- return rc;
-
- rc = read_phys_addr(np, "assigned-addresses", afu);
- if (rc)
- return rc;
-
- if (afu->psn_phys == 0)
- afu->psa = false;
- else
- afu->psa = true;
-
- of_property_read_u32(np, "ibm,#processes", &afu->max_procs_virtualised);
-
- if (cxl_verbose)
- read_vpd(NULL, afu);
-
- of_property_read_u32(np, "ibm,max-ints-per-process", &afu->guest->max_ints);
- afu->irqs_max = afu->guest->max_ints;
-
- if (!of_property_read_u32(np, "ibm,min-ints-per-process", &afu->pp_irqs)) {
- /* One extra interrupt for the PSL interrupt is already
- * included. Remove it now to keep only AFU interrupts and
- * match the native case.
- */
- afu->pp_irqs--;
- }
-
- of_property_read_u64(np, "ibm,error-buffer-size", &afu->eb_len);
- afu->eb_offset = 0;
-
- of_property_read_u64(np, "ibm,config-record-size", &afu->crs_len);
- afu->crs_offset = 0;
-
- of_property_read_u32(np, "ibm,#config-records", &afu->crs_num);
-
- if (cxl_verbose) {
- for (i = 0; i < afu->crs_num; i++) {
- rc = cxl_ops->afu_cr_read16(afu, i, PCI_DEVICE_ID,
- &device_id);
- if (!rc)
- pr_info("record %d - device-id: %#x\n",
- i, device_id);
- rc = cxl_ops->afu_cr_read16(afu, i, PCI_VENDOR_ID,
- &vendor_id);
- if (!rc)
- pr_info("record %d - vendor-id: %#x\n",
- i, vendor_id);
- rc = cxl_ops->afu_cr_read32(afu, i, PCI_CLASS_REVISION,
- &class_code);
- if (!rc) {
- class_code >>= 8;
- pr_info("record %d - class-code: %#x\n",
- i, class_code);
- }
- }
- }
- /*
- * if "ibm,process-mmio" doesn't exist then per-process mmio is
- * not supported
- */
- val = 0;
- if (!of_property_read_u32(np, "ibm,process-mmio", &val) && val == 1)
- afu->pp_psa = true;
- else
- afu->pp_psa = false;
-
- if (!of_property_read_u32(np, "ibm,function-error-interrupt", &val))
- afu->serr_hwirq = val;
-
- pr_devel("AFU handle: %#llx\n", afu->guest->handle);
- pr_devel("p2n_phys: %#llx (size %#llx)\n",
- afu->guest->p2n_phys, afu->guest->p2n_size);
- pr_devel("psn_phys: %#llx (size %#llx)\n",
- afu->psn_phys, afu->adapter->ps_size);
- pr_devel("Max number of processes virtualised=%i\n",
- afu->max_procs_virtualised);
- pr_devel("Per-process irqs min=%i, max=%i\n", afu->pp_irqs,
- afu->irqs_max);
- pr_devel("Slice error interrupt=%#lx\n", afu->serr_hwirq);
-
- return 0;
-}
-
-static int read_adapter_irq_config(struct cxl *adapter, struct device_node *np)
-{
- const __be32 *ranges;
- int len, nranges, i;
- struct irq_avail *cur;
-
- ranges = of_get_property(np, "interrupt-ranges", &len);
- if (ranges == NULL || len < (2 * sizeof(int)))
- return -EINVAL;
-
- /*
- * encoded array of two cells per entry, each cell encoded as
- * with encode-int
- */
- nranges = len / (2 * sizeof(int));
- if (nranges == 0 || (nranges * 2 * sizeof(int)) != len)
- return -EINVAL;
-
- adapter->guest->irq_avail = kcalloc(nranges, sizeof(struct irq_avail),
- GFP_KERNEL);
- if (adapter->guest->irq_avail == NULL)
- return -ENOMEM;
-
- adapter->guest->irq_base_offset = be32_to_cpu(ranges[0]);
- for (i = 0; i < nranges; i++) {
- cur = &adapter->guest->irq_avail[i];
- cur->offset = be32_to_cpu(ranges[i * 2]);
- cur->range = be32_to_cpu(ranges[i * 2 + 1]);
- cur->bitmap = bitmap_zalloc(cur->range, GFP_KERNEL);
- if (cur->bitmap == NULL)
- goto err;
- if (cur->offset < adapter->guest->irq_base_offset)
- adapter->guest->irq_base_offset = cur->offset;
- if (cxl_verbose)
- pr_info("available IRQ range: %#lx-%#lx (%lu)\n",
- cur->offset, cur->offset + cur->range - 1,
- cur->range);
- }
- adapter->guest->irq_nranges = nranges;
- spin_lock_init(&adapter->guest->irq_alloc_lock);
-
- return 0;
-err:
- for (i--; i >= 0; i--) {
- cur = &adapter->guest->irq_avail[i];
- bitmap_free(cur->bitmap);
- }
- kfree(adapter->guest->irq_avail);
- adapter->guest->irq_avail = NULL;
- return -ENOMEM;
-}
-
-int cxl_of_read_adapter_handle(struct cxl *adapter, struct device_node *np)
-{
- return of_property_read_reg(np, 0, &adapter->guest->handle, NULL);
-}
-
-int cxl_of_read_adapter_properties(struct cxl *adapter, struct device_node *np)
-{
- int rc;
- const char *p;
- u32 val = 0;
-
- /* Properties are read in the same order as listed in PAPR */
-
- if ((rc = read_adapter_irq_config(adapter, np)))
- return rc;
-
- if (!of_property_read_u32(np, "ibm,caia-version", &val)) {
- adapter->caia_major = (val & 0xFF00) >> 8;
- adapter->caia_minor = val & 0xFF;
- }
-
- if (!of_property_read_u32(np, "ibm,psl-revision", &val))
- adapter->psl_rev = val;
-
- if (!of_property_read_string(np, "status", &p)) {
- adapter->guest->status = kasprintf(GFP_KERNEL, "%s", p);
- if (adapter->guest->status == NULL)
- return -ENOMEM;
- }
-
- if (!of_property_read_u32(np, "vendor-id", &val))
- adapter->guest->vendor = val;
-
- if (!of_property_read_u32(np, "device-id", &val))
- adapter->guest->device = val;
-
- if (!of_property_read_u32(np, "subsystem-vendor-id", &val))
- adapter->guest->subsystem_vendor = val;
-
- if (!of_property_read_u32(np, "subsystem-id", &val))
- adapter->guest->subsystem = val;
-
- if (cxl_verbose)
- read_vpd(adapter, NULL);
-
- return 0;
-}
-
-static void cxl_of_remove(struct platform_device *pdev)
-{
- struct cxl *adapter;
- int afu;
-
- adapter = dev_get_drvdata(&pdev->dev);
- for (afu = 0; afu < adapter->slices; afu++)
- cxl_guest_remove_afu(adapter->afu[afu]);
-
- cxl_guest_remove_adapter(adapter);
-}
-
-static void cxl_of_shutdown(struct platform_device *pdev)
-{
- cxl_of_remove(pdev);
-}
-
-int cxl_of_probe(struct platform_device *pdev)
-{
- struct device_node *np = NULL;
- struct device_node *afu_np = NULL;
- struct cxl *adapter = NULL;
- int ret;
- int slice = 0, slice_ok = 0;
-
- dev_err_once(&pdev->dev, "DEPRECATION: cxl is deprecated and will be removed in a future kernel release\n");
-
- pr_devel("in %s\n", __func__);
-
- np = pdev->dev.of_node;
- if (np == NULL)
- return -ENODEV;
-
- /* init adapter */
- adapter = cxl_guest_init_adapter(np, pdev);
- if (IS_ERR(adapter)) {
- dev_err(&pdev->dev, "guest_init_adapter failed: %li\n", PTR_ERR(adapter));
- return PTR_ERR(adapter);
- }
-
- /* init afu */
- for_each_child_of_node(np, afu_np) {
- if ((ret = cxl_guest_init_afu(adapter, slice, afu_np)))
- dev_err(&pdev->dev, "AFU %i failed to initialise: %i\n",
- slice, ret);
- else
- slice_ok++;
- slice++;
- }
-
- if (slice_ok == 0) {
- dev_info(&pdev->dev, "No active AFU");
- adapter->slices = 0;
- }
-
- return 0;
-}
-
-static const struct of_device_id cxl_of_match[] = {
- { .compatible = "ibm,coherent-platform-facility",},
- {},
-};
-MODULE_DEVICE_TABLE(of, cxl_of_match);
-
-struct platform_driver cxl_of_driver = {
- .driver = {
- .name = "cxl_of",
- .of_match_table = cxl_of_match,
- .owner = THIS_MODULE
- },
- .probe = cxl_of_probe,
- .remove = cxl_of_remove,
- .shutdown = cxl_of_shutdown,
-};
diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c
deleted file mode 100644
index 92bf7c5c7b35..000000000000
--- a/drivers/misc/cxl/pci.c
+++ /dev/null
@@ -1,2103 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright 2014 IBM Corp.
- */
-
-#include <linux/pci_regs.h>
-#include <linux/pci_ids.h>
-#include <linux/device.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/sort.h>
-#include <linux/pci.h>
-#include <linux/of.h>
-#include <linux/delay.h>
-#include <asm/opal.h>
-#include <asm/msi_bitmap.h>
-#include <asm/pnv-pci.h>
-#include <asm/io.h>
-#include <asm/reg.h>
-
-#include "cxl.h"
-#include <misc/cxl.h>
-
-
-#define CXL_PCI_VSEC_ID 0x1280
-#define CXL_VSEC_MIN_SIZE 0x80
-
-#define CXL_READ_VSEC_LENGTH(dev, vsec, dest) \
- { \
- pci_read_config_word(dev, vsec + 0x6, dest); \
- *dest >>= 4; \
- }
-#define CXL_READ_VSEC_NAFUS(dev, vsec, dest) \
- pci_read_config_byte(dev, vsec + 0x8, dest)
-
-#define CXL_READ_VSEC_STATUS(dev, vsec, dest) \
- pci_read_config_byte(dev, vsec + 0x9, dest)
-#define CXL_STATUS_SECOND_PORT 0x80
-#define CXL_STATUS_MSI_X_FULL 0x40
-#define CXL_STATUS_MSI_X_SINGLE 0x20
-#define CXL_STATUS_FLASH_RW 0x08
-#define CXL_STATUS_FLASH_RO 0x04
-#define CXL_STATUS_LOADABLE_AFU 0x02
-#define CXL_STATUS_LOADABLE_PSL 0x01
-/* If we see these features we won't try to use the card */
-#define CXL_UNSUPPORTED_FEATURES \
- (CXL_STATUS_MSI_X_FULL | CXL_STATUS_MSI_X_SINGLE)
-
-#define CXL_READ_VSEC_MODE_CONTROL(dev, vsec, dest) \
- pci_read_config_byte(dev, vsec + 0xa, dest)
-#define CXL_WRITE_VSEC_MODE_CONTROL(dev, vsec, val) \
- pci_write_config_byte(dev, vsec + 0xa, val)
-#define CXL_VSEC_PROTOCOL_MASK 0xe0
-#define CXL_VSEC_PROTOCOL_1024TB 0x80
-#define CXL_VSEC_PROTOCOL_512TB 0x40
-#define CXL_VSEC_PROTOCOL_256TB 0x20 /* Power 8/9 uses this */
-#define CXL_VSEC_PROTOCOL_ENABLE 0x01
-
-#define CXL_READ_VSEC_PSL_REVISION(dev, vsec, dest) \
- pci_read_config_word(dev, vsec + 0xc, dest)
-#define CXL_READ_VSEC_CAIA_MINOR(dev, vsec, dest) \
- pci_read_config_byte(dev, vsec + 0xe, dest)
-#define CXL_READ_VSEC_CAIA_MAJOR(dev, vsec, dest) \
- pci_read_config_byte(dev, vsec + 0xf, dest)
-#define CXL_READ_VSEC_BASE_IMAGE(dev, vsec, dest) \
- pci_read_config_word(dev, vsec + 0x10, dest)
-
-#define CXL_READ_VSEC_IMAGE_STATE(dev, vsec, dest) \
- pci_read_config_byte(dev, vsec + 0x13, dest)
-#define CXL_WRITE_VSEC_IMAGE_STATE(dev, vsec, val) \
- pci_write_config_byte(dev, vsec + 0x13, val)
-#define CXL_VSEC_USER_IMAGE_LOADED 0x80 /* RO */
-#define CXL_VSEC_PERST_LOADS_IMAGE 0x20 /* RW */
-#define CXL_VSEC_PERST_SELECT_USER 0x10 /* RW */
-
-#define CXL_READ_VSEC_AFU_DESC_OFF(dev, vsec, dest) \
- pci_read_config_dword(dev, vsec + 0x20, dest)
-#define CXL_READ_VSEC_AFU_DESC_SIZE(dev, vsec, dest) \
- pci_read_config_dword(dev, vsec + 0x24, dest)
-#define CXL_READ_VSEC_PS_OFF(dev, vsec, dest) \
- pci_read_config_dword(dev, vsec + 0x28, dest)
-#define CXL_READ_VSEC_PS_SIZE(dev, vsec, dest) \
- pci_read_config_dword(dev, vsec + 0x2c, dest)
-
-
-/* This works a little different than the p1/p2 register accesses to make it
- * easier to pull out individual fields */
-#define AFUD_READ(afu, off) in_be64(afu->native->afu_desc_mmio + off)
-#define AFUD_READ_LE(afu, off) in_le64(afu->native->afu_desc_mmio + off)
-#define EXTRACT_PPC_BIT(val, bit) (!!(val & PPC_BIT(bit)))
-#define EXTRACT_PPC_BITS(val, bs, be) ((val & PPC_BITMASK(bs, be)) >> PPC_BITLSHIFT(be))
-
-#define AFUD_READ_INFO(afu) AFUD_READ(afu, 0x0)
-#define AFUD_NUM_INTS_PER_PROC(val) EXTRACT_PPC_BITS(val, 0, 15)
-#define AFUD_NUM_PROCS(val) EXTRACT_PPC_BITS(val, 16, 31)
-#define AFUD_NUM_CRS(val) EXTRACT_PPC_BITS(val, 32, 47)
-#define AFUD_MULTIMODE(val) EXTRACT_PPC_BIT(val, 48)
-#define AFUD_PUSH_BLOCK_TRANSFER(val) EXTRACT_PPC_BIT(val, 55)
-#define AFUD_DEDICATED_PROCESS(val) EXTRACT_PPC_BIT(val, 59)
-#define AFUD_AFU_DIRECTED(val) EXTRACT_PPC_BIT(val, 61)
-#define AFUD_TIME_SLICED(val) EXTRACT_PPC_BIT(val, 63)
-#define AFUD_READ_CR(afu) AFUD_READ(afu, 0x20)
-#define AFUD_CR_LEN(val) EXTRACT_PPC_BITS(val, 8, 63)
-#define AFUD_READ_CR_OFF(afu) AFUD_READ(afu, 0x28)
-#define AFUD_READ_PPPSA(afu) AFUD_READ(afu, 0x30)
-#define AFUD_PPPSA_PP(val) EXTRACT_PPC_BIT(val, 6)
-#define AFUD_PPPSA_PSA(val) EXTRACT_PPC_BIT(val, 7)
-#define AFUD_PPPSA_LEN(val) EXTRACT_PPC_BITS(val, 8, 63)
-#define AFUD_READ_PPPSA_OFF(afu) AFUD_READ(afu, 0x38)
-#define AFUD_READ_EB(afu) AFUD_READ(afu, 0x40)
-#define AFUD_EB_LEN(val) EXTRACT_PPC_BITS(val, 8, 63)
-#define AFUD_READ_EB_OFF(afu) AFUD_READ(afu, 0x48)
-
-static const struct pci_device_id cxl_pci_tbl[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x0477), },
- { PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x044b), },
- { PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x04cf), },
- { PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x0601), },
- { PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x0623), },
- { PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x0628), },
- { }
-};
-MODULE_DEVICE_TABLE(pci, cxl_pci_tbl);
-
-
-/*
- * Mostly using these wrappers to avoid confusion:
- * priv 1 is BAR2, while priv 2 is BAR0
- */
-static inline resource_size_t p1_base(struct pci_dev *dev)
-{
- return pci_resource_start(dev, 2);
-}
-
-static inline resource_size_t p1_size(struct pci_dev *dev)
-{
- return pci_resource_len(dev, 2);
-}
-
-static inline resource_size_t p2_base(struct pci_dev *dev)
-{
- return pci_resource_start(dev, 0);
-}
-
-static inline resource_size_t p2_size(struct pci_dev *dev)
-{
- return pci_resource_len(dev, 0);
-}
-
-static int find_cxl_vsec(struct pci_dev *dev)
-{
- return pci_find_vsec_capability(dev, PCI_VENDOR_ID_IBM, CXL_PCI_VSEC_ID);
-}
-
-static void dump_cxl_config_space(struct pci_dev *dev)
-{
- int vsec;
- u32 val;
-
- dev_info(&dev->dev, "dump_cxl_config_space\n");
-
- pci_read_config_dword(dev, PCI_BASE_ADDRESS_0, &val);
- dev_info(&dev->dev, "BAR0: %#.8x\n", val);
- pci_read_config_dword(dev, PCI_BASE_ADDRESS_1, &val);
- dev_info(&dev->dev, "BAR1: %#.8x\n", val);
- pci_read_config_dword(dev, PCI_BASE_ADDRESS_2, &val);
- dev_info(&dev->dev, "BAR2: %#.8x\n", val);
- pci_read_config_dword(dev, PCI_BASE_ADDRESS_3, &val);
- dev_info(&dev->dev, "BAR3: %#.8x\n", val);
- pci_read_config_dword(dev, PCI_BASE_ADDRESS_4, &val);
- dev_info(&dev->dev, "BAR4: %#.8x\n", val);
- pci_read_config_dword(dev, PCI_BASE_ADDRESS_5, &val);
- dev_info(&dev->dev, "BAR5: %#.8x\n", val);
-
- dev_info(&dev->dev, "p1 regs: %#llx, len: %#llx\n",
- p1_base(dev), p1_size(dev));
- dev_info(&dev->dev, "p2 regs: %#llx, len: %#llx\n",
- p2_base(dev), p2_size(dev));
- dev_info(&dev->dev, "BAR 4/5: %#llx, len: %#llx\n",
- pci_resource_start(dev, 4), pci_resource_len(dev, 4));
-
- if (!(vsec = find_cxl_vsec(dev)))
- return;
-
-#define show_reg(name, what) \
- dev_info(&dev->dev, "cxl vsec: %30s: %#x\n", name, what)
-
- pci_read_config_dword(dev, vsec + 0x0, &val);
- show_reg("Cap ID", (val >> 0) & 0xffff);
- show_reg("Cap Ver", (val >> 16) & 0xf);
- show_reg("Next Cap Ptr", (val >> 20) & 0xfff);
- pci_read_config_dword(dev, vsec + 0x4, &val);
- show_reg("VSEC ID", (val >> 0) & 0xffff);
- show_reg("VSEC Rev", (val >> 16) & 0xf);
- show_reg("VSEC Length", (val >> 20) & 0xfff);
- pci_read_config_dword(dev, vsec + 0x8, &val);
- show_reg("Num AFUs", (val >> 0) & 0xff);
- show_reg("Status", (val >> 8) & 0xff);
- show_reg("Mode Control", (val >> 16) & 0xff);
- show_reg("Reserved", (val >> 24) & 0xff);
- pci_read_config_dword(dev, vsec + 0xc, &val);
- show_reg("PSL Rev", (val >> 0) & 0xffff);
- show_reg("CAIA Ver", (val >> 16) & 0xffff);
- pci_read_config_dword(dev, vsec + 0x10, &val);
- show_reg("Base Image Rev", (val >> 0) & 0xffff);
- show_reg("Reserved", (val >> 16) & 0x0fff);
- show_reg("Image Control", (val >> 28) & 0x3);
- show_reg("Reserved", (val >> 30) & 0x1);
- show_reg("Image Loaded", (val >> 31) & 0x1);
-
- pci_read_config_dword(dev, vsec + 0x14, &val);
- show_reg("Reserved", val);
- pci_read_config_dword(dev, vsec + 0x18, &val);
- show_reg("Reserved", val);
- pci_read_config_dword(dev, vsec + 0x1c, &val);
- show_reg("Reserved", val);
-
- pci_read_config_dword(dev, vsec + 0x20, &val);
- show_reg("AFU Descriptor Offset", val);
- pci_read_config_dword(dev, vsec + 0x24, &val);
- show_reg("AFU Descriptor Size", val);
- pci_read_config_dword(dev, vsec + 0x28, &val);
- show_reg("Problem State Offset", val);
- pci_read_config_dword(dev, vsec + 0x2c, &val);
- show_reg("Problem State Size", val);
-
- pci_read_config_dword(dev, vsec + 0x30, &val);
- show_reg("Reserved", val);
- pci_read_config_dword(dev, vsec + 0x34, &val);
- show_reg("Reserved", val);
- pci_read_config_dword(dev, vsec + 0x38, &val);
- show_reg("Reserved", val);
- pci_read_config_dword(dev, vsec + 0x3c, &val);
- show_reg("Reserved", val);
-
- pci_read_config_dword(dev, vsec + 0x40, &val);
- show_reg("PSL Programming Port", val);
- pci_read_config_dword(dev, vsec + 0x44, &val);
- show_reg("PSL Programming Control", val);
-
- pci_read_config_dword(dev, vsec + 0x48, &val);
- show_reg("Reserved", val);
- pci_read_config_dword(dev, vsec + 0x4c, &val);
- show_reg("Reserved", val);
-
- pci_read_config_dword(dev, vsec + 0x50, &val);
- show_reg("Flash Address Register", val);
- pci_read_config_dword(dev, vsec + 0x54, &val);
- show_reg("Flash Size Register", val);
- pci_read_config_dword(dev, vsec + 0x58, &val);
- show_reg("Flash Status/Control Register", val);
- pci_read_config_dword(dev, vsec + 0x58, &val);
- show_reg("Flash Data Port", val);
-
-#undef show_reg
-}
-
-static void dump_afu_descriptor(struct cxl_afu *afu)
-{
- u64 val, afu_cr_num, afu_cr_off, afu_cr_len;
- int i;
-
-#define show_reg(name, what) \
- dev_info(&afu->dev, "afu desc: %30s: %#llx\n", name, what)
-
- val = AFUD_READ_INFO(afu);
- show_reg("num_ints_per_process", AFUD_NUM_INTS_PER_PROC(val));
- show_reg("num_of_processes", AFUD_NUM_PROCS(val));
- show_reg("num_of_afu_CRs", AFUD_NUM_CRS(val));
- show_reg("req_prog_mode", val & 0xffffULL);
- afu_cr_num = AFUD_NUM_CRS(val);
-
- val = AFUD_READ(afu, 0x8);
- show_reg("Reserved", val);
- val = AFUD_READ(afu, 0x10);
- show_reg("Reserved", val);
- val = AFUD_READ(afu, 0x18);
- show_reg("Reserved", val);
-
- val = AFUD_READ_CR(afu);
- show_reg("Reserved", (val >> (63-7)) & 0xff);
- show_reg("AFU_CR_len", AFUD_CR_LEN(val));
- afu_cr_len = AFUD_CR_LEN(val) * 256;
-
- val = AFUD_READ_CR_OFF(afu);
- afu_cr_off = val;
- show_reg("AFU_CR_offset", val);
-
- val = AFUD_READ_PPPSA(afu);
- show_reg("PerProcessPSA_control", (val >> (63-7)) & 0xff);
- show_reg("PerProcessPSA Length", AFUD_PPPSA_LEN(val));
-
- val = AFUD_READ_PPPSA_OFF(afu);
- show_reg("PerProcessPSA_offset", val);
-
- val = AFUD_READ_EB(afu);
- show_reg("Reserved", (val >> (63-7)) & 0xff);
- show_reg("AFU_EB_len", AFUD_EB_LEN(val));
-
- val = AFUD_READ_EB_OFF(afu);
- show_reg("AFU_EB_offset", val);
-
- for (i = 0; i < afu_cr_num; i++) {
- val = AFUD_READ_LE(afu, afu_cr_off + i * afu_cr_len);
- show_reg("CR Vendor", val & 0xffff);
- show_reg("CR Device", (val >> 16) & 0xffff);
- }
-#undef show_reg
-}
-
-#define P8_CAPP_UNIT0_ID 0xBA
-#define P8_CAPP_UNIT1_ID 0XBE
-#define P9_CAPP_UNIT0_ID 0xC0
-#define P9_CAPP_UNIT1_ID 0xE0
-
-static int get_phb_index(struct device_node *np, u32 *phb_index)
-{
- if (of_property_read_u32(np, "ibm,phb-index", phb_index))
- return -ENODEV;
- return 0;
-}
-
-static u64 get_capp_unit_id(struct device_node *np, u32 phb_index)
-{
- /*
- * POWER 8:
- * - For chips other than POWER8NVL, we only have CAPP 0,
- * irrespective of which PHB is used.
- * - For POWER8NVL, assume CAPP 0 is attached to PHB0 and
- * CAPP 1 is attached to PHB1.
- */
- if (cxl_is_power8()) {
- if (!pvr_version_is(PVR_POWER8NVL))
- return P8_CAPP_UNIT0_ID;
-
- if (phb_index == 0)
- return P8_CAPP_UNIT0_ID;
-
- if (phb_index == 1)
- return P8_CAPP_UNIT1_ID;
- }
-
- /*
- * POWER 9:
- * PEC0 (PHB0). Capp ID = CAPP0 (0b1100_0000)
- * PEC1 (PHB1 - PHB2). No capi mode
- * PEC2 (PHB3 - PHB4 - PHB5): Capi mode on PHB3 only. Capp ID = CAPP1 (0b1110_0000)
- */
- if (cxl_is_power9()) {
- if (phb_index == 0)
- return P9_CAPP_UNIT0_ID;
-
- if (phb_index == 3)
- return P9_CAPP_UNIT1_ID;
- }
-
- return 0;
-}
-
-int cxl_calc_capp_routing(struct pci_dev *dev, u64 *chipid,
- u32 *phb_index, u64 *capp_unit_id)
-{
- int rc;
- struct device_node *np;
- u32 id;
-
- if (!(np = pnv_pci_get_phb_node(dev)))
- return -ENODEV;
-
- while (np && of_property_read_u32(np, "ibm,chip-id", &id))
- np = of_get_next_parent(np);
- if (!np)
- return -ENODEV;
-
- *chipid = id;
-
- rc = get_phb_index(np, phb_index);
- if (rc) {
- pr_err("cxl: invalid phb index\n");
- of_node_put(np);
- return rc;
- }
-
- *capp_unit_id = get_capp_unit_id(np, *phb_index);
- of_node_put(np);
- if (!*capp_unit_id) {
- pr_err("cxl: No capp unit found for PHB[%lld,%d]. Make sure the adapter is on a capi-compatible slot\n",
- *chipid, *phb_index);
- return -ENODEV;
- }
-
- return 0;
-}
-
-static DEFINE_MUTEX(indications_mutex);
-
-static int get_phb_indications(struct pci_dev *dev, u64 *capiind, u64 *asnind,
- u64 *nbwind)
-{
- static u32 val[3];
- struct device_node *np;
-
- mutex_lock(&indications_mutex);
- if (!val[0]) {
- if (!(np = pnv_pci_get_phb_node(dev))) {
- mutex_unlock(&indications_mutex);
- return -ENODEV;
- }
-
- if (of_property_read_u32_array(np, "ibm,phb-indications", val, 3)) {
- val[2] = 0x0300UL; /* legacy values */
- val[1] = 0x0400UL;
- val[0] = 0x0200UL;
- }
- of_node_put(np);
- }
- *capiind = val[0];
- *asnind = val[1];
- *nbwind = val[2];
- mutex_unlock(&indications_mutex);
- return 0;
-}
-
-int cxl_get_xsl9_dsnctl(struct pci_dev *dev, u64 capp_unit_id, u64 *reg)
-{
- u64 xsl_dsnctl;
- u64 capiind, asnind, nbwind;
-
- /*
- * CAPI Identifier bits [0:7]
- * bit 61:60 MSI bits --> 0
- * bit 59 TVT selector --> 0
- */
- if (get_phb_indications(dev, &capiind, &asnind, &nbwind))
- return -ENODEV;
-
- /*
- * Tell XSL where to route data to.
- * The field chipid should match the PHB CAPI_CMPM register
- */
- xsl_dsnctl = (capiind << (63-15)); /* Bit 57 */
- xsl_dsnctl |= (capp_unit_id << (63-15));
-
- /* nMMU_ID Defaults to: b’000001001’*/
- xsl_dsnctl |= ((u64)0x09 << (63-28));
-
- /*
- * Used to identify CAPI packets which should be sorted into
- * the Non-Blocking queues by the PHB. This field should match
- * the PHB PBL_NBW_CMPM register
- * nbwind=0x03, bits [57:58], must include capi indicator.
- * Not supported on P9 DD1.
- */
- xsl_dsnctl |= (nbwind << (63-55));
-
- /*
- * Upper 16b address bits of ASB_Notify messages sent to the
- * system. Need to match the PHB’s ASN Compare/Mask Register.
- * Not supported on P9 DD1.
- */
- xsl_dsnctl |= asnind;
-
- *reg = xsl_dsnctl;
- return 0;
-}
-
-static int init_implementation_adapter_regs_psl9(struct cxl *adapter,
- struct pci_dev *dev)
-{
- u64 xsl_dsnctl, psl_fircntl;
- u64 chipid;
- u32 phb_index;
- u64 capp_unit_id;
- u64 psl_debug;
- int rc;
-
- rc = cxl_calc_capp_routing(dev, &chipid, &phb_index, &capp_unit_id);
- if (rc)
- return rc;
-
- rc = cxl_get_xsl9_dsnctl(dev, capp_unit_id, &xsl_dsnctl);
- if (rc)
- return rc;
-
- cxl_p1_write(adapter, CXL_XSL9_DSNCTL, xsl_dsnctl);
-
- /* Set fir_cntl to recommended value for production env */
- psl_fircntl = (0x2ULL << (63-3)); /* ce_report */
- psl_fircntl |= (0x1ULL << (63-6)); /* FIR_report */
- psl_fircntl |= 0x1ULL; /* ce_thresh */
- cxl_p1_write(adapter, CXL_PSL9_FIR_CNTL, psl_fircntl);
-
- /* Setup the PSL to transmit packets on the PCIe before the
- * CAPP is enabled. Make sure that CAPP virtual machines are disabled
- */
- cxl_p1_write(adapter, CXL_PSL9_DSNDCTL, 0x0001001000012A10ULL);
-
- /*
- * A response to an ASB_Notify request is returned by the
- * system as an MMIO write to the address defined in
- * the PSL_TNR_ADDR register.
- * keep the Reset Value: 0x00020000E0000000
- */
-
- /* Enable XSL rty limit */
- cxl_p1_write(adapter, CXL_XSL9_DEF, 0x51F8000000000005ULL);
-
- /* Change XSL_INV dummy read threshold */
- cxl_p1_write(adapter, CXL_XSL9_INV, 0x0000040007FFC200ULL);
-
- if (phb_index == 3) {
- /* disable machines 31-47 and 20-27 for DMA */
- cxl_p1_write(adapter, CXL_PSL9_APCDEDTYPE, 0x40000FF3FFFF0000ULL);
- }
-
- /* Snoop machines */
- cxl_p1_write(adapter, CXL_PSL9_APCDEDALLOC, 0x800F000200000000ULL);
-
- /* Enable NORST and DD2 features */
- cxl_p1_write(adapter, CXL_PSL9_DEBUG, 0xC000000000000000ULL);
-
- /*
- * Check if PSL has data-cache. We need to flush adapter datacache
- * when as its about to be removed.
- */
- psl_debug = cxl_p1_read(adapter, CXL_PSL9_DEBUG);
- if (psl_debug & CXL_PSL_DEBUG_CDC) {
- dev_dbg(&dev->dev, "No data-cache present\n");
- adapter->native->no_data_cache = true;
- }
-
- return 0;
-}
-
-static int init_implementation_adapter_regs_psl8(struct cxl *adapter, struct pci_dev *dev)
-{
- u64 psl_dsnctl, psl_fircntl;
- u64 chipid;
- u32 phb_index;
- u64 capp_unit_id;
- int rc;
-
- rc = cxl_calc_capp_routing(dev, &chipid, &phb_index, &capp_unit_id);
- if (rc)
- return rc;
-
- psl_dsnctl = 0x0000900000000000ULL; /* pteupd ttype, scdone */
- psl_dsnctl |= (0x2ULL << (63-38)); /* MMIO hang pulse: 256 us */
- /* Tell PSL where to route data to */
- psl_dsnctl |= (chipid << (63-5));
- psl_dsnctl |= (capp_unit_id << (63-13));
-
- cxl_p1_write(adapter, CXL_PSL_DSNDCTL, psl_dsnctl);
- cxl_p1_write(adapter, CXL_PSL_RESLCKTO, 0x20000000200ULL);
- /* snoop write mask */
- cxl_p1_write(adapter, CXL_PSL_SNWRALLOC, 0x00000000FFFFFFFFULL);
- /* set fir_cntl to recommended value for production env */
- psl_fircntl = (0x2ULL << (63-3)); /* ce_report */
- psl_fircntl |= (0x1ULL << (63-6)); /* FIR_report */
- psl_fircntl |= 0x1ULL; /* ce_thresh */
- cxl_p1_write(adapter, CXL_PSL_FIR_CNTL, psl_fircntl);
- /* for debugging with trace arrays */
- cxl_p1_write(adapter, CXL_PSL_TRACE, 0x0000FF7C00000000ULL);
-
- return 0;
-}
-
-/* PSL */
-#define TBSYNC_CAL(n) (((u64)n & 0x7) << (63-3))
-#define TBSYNC_CNT(n) (((u64)n & 0x7) << (63-6))
-/* For the PSL this is a multiple for 0 < n <= 7: */
-#define PSL_2048_250MHZ_CYCLES 1
-
-static void write_timebase_ctrl_psl8(struct cxl *adapter)
-{
- cxl_p1_write(adapter, CXL_PSL_TB_CTLSTAT,
- TBSYNC_CNT(2 * PSL_2048_250MHZ_CYCLES));
-}
-
-static u64 timebase_read_psl9(struct cxl *adapter)
-{
- return cxl_p1_read(adapter, CXL_PSL9_Timebase);
-}
-
-static u64 timebase_read_psl8(struct cxl *adapter)
-{
- return cxl_p1_read(adapter, CXL_PSL_Timebase);
-}
-
-static void cxl_setup_psl_timebase(struct cxl *adapter, struct pci_dev *dev)
-{
- struct device_node *np;
-
- adapter->psl_timebase_synced = false;
-
- if (!(np = pnv_pci_get_phb_node(dev)))
- return;
-
- /* Do not fail when CAPP timebase sync is not supported by OPAL */
- of_node_get(np);
- if (!of_property_present(np, "ibm,capp-timebase-sync")) {
- of_node_put(np);
- dev_info(&dev->dev, "PSL timebase inactive: OPAL support missing\n");
- return;
- }
- of_node_put(np);
-
- /*
- * Setup PSL Timebase Control and Status register
- * with the recommended Timebase Sync Count value
- */
- if (adapter->native->sl_ops->write_timebase_ctrl)
- adapter->native->sl_ops->write_timebase_ctrl(adapter);
-
- /* Enable PSL Timebase */
- cxl_p1_write(adapter, CXL_PSL_Control, 0x0000000000000000);
- cxl_p1_write(adapter, CXL_PSL_Control, CXL_PSL_Control_tb);
-
- return;
-}
-
-static int init_implementation_afu_regs_psl9(struct cxl_afu *afu)
-{
- return 0;
-}
-
-static int init_implementation_afu_regs_psl8(struct cxl_afu *afu)
-{
- /* read/write masks for this slice */
- cxl_p1n_write(afu, CXL_PSL_APCALLOC_A, 0xFFFFFFFEFEFEFEFEULL);
- /* APC read/write masks for this slice */
- cxl_p1n_write(afu, CXL_PSL_COALLOC_A, 0xFF000000FEFEFEFEULL);
- /* for debugging with trace arrays */
- cxl_p1n_write(afu, CXL_PSL_SLICE_TRACE, 0x0000FFFF00000000ULL);
- cxl_p1n_write(afu, CXL_PSL_RXCTL_A, CXL_PSL_RXCTL_AFUHP_4S);
-
- return 0;
-}
-
-int cxl_pci_setup_irq(struct cxl *adapter, unsigned int hwirq,
- unsigned int virq)
-{
- struct pci_dev *dev = to_pci_dev(adapter->dev.parent);
-
- return pnv_cxl_ioda_msi_setup(dev, hwirq, virq);
-}
-
-int cxl_update_image_control(struct cxl *adapter)
-{
- struct pci_dev *dev = to_pci_dev(adapter->dev.parent);
- int rc;
- int vsec;
- u8 image_state;
-
- if (!(vsec = find_cxl_vsec(dev))) {
- dev_err(&dev->dev, "ABORTING: CXL VSEC not found!\n");
- return -ENODEV;
- }
-
- if ((rc = CXL_READ_VSEC_IMAGE_STATE(dev, vsec, &image_state))) {
- dev_err(&dev->dev, "failed to read image state: %i\n", rc);
- return rc;
- }
-
- if (adapter->perst_loads_image)
- image_state |= CXL_VSEC_PERST_LOADS_IMAGE;
- else
- image_state &= ~CXL_VSEC_PERST_LOADS_IMAGE;
-
- if (adapter->perst_select_user)
- image_state |= CXL_VSEC_PERST_SELECT_USER;
- else
- image_state &= ~CXL_VSEC_PERST_SELECT_USER;
-
- if ((rc = CXL_WRITE_VSEC_IMAGE_STATE(dev, vsec, image_state))) {
- dev_err(&dev->dev, "failed to update image control: %i\n", rc);
- return rc;
- }
-
- return 0;
-}
-
-int cxl_pci_alloc_one_irq(struct cxl *adapter)
-{
- struct pci_dev *dev = to_pci_dev(adapter->dev.parent);
-
- return pnv_cxl_alloc_hwirqs(dev, 1);
-}
-
-void cxl_pci_release_one_irq(struct cxl *adapter, int hwirq)
-{
- struct pci_dev *dev = to_pci_dev(adapter->dev.parent);
-
- return pnv_cxl_release_hwirqs(dev, hwirq, 1);
-}
-
-int cxl_pci_alloc_irq_ranges(struct cxl_irq_ranges *irqs,
- struct cxl *adapter, unsigned int num)
-{
- struct pci_dev *dev = to_pci_dev(adapter->dev.parent);
-
- return pnv_cxl_alloc_hwirq_ranges(irqs, dev, num);
-}
-
-void cxl_pci_release_irq_ranges(struct cxl_irq_ranges *irqs,
- struct cxl *adapter)
-{
- struct pci_dev *dev = to_pci_dev(adapter->dev.parent);
-
- pnv_cxl_release_hwirq_ranges(irqs, dev);
-}
-
-static int setup_cxl_bars(struct pci_dev *dev)
-{
- /* Safety check in case we get backported to < 3.17 without M64 */
- if ((p1_base(dev) < 0x100000000ULL) ||
- (p2_base(dev) < 0x100000000ULL)) {
- dev_err(&dev->dev, "ABORTING: M32 BAR assignment incompatible with CXL\n");
- return -ENODEV;
- }
-
- /*
- * BAR 4/5 has a special meaning for CXL and must be programmed with a
- * special value corresponding to the CXL protocol address range.
- * For POWER 8/9 that means bits 48:49 must be set to 10
- */
- pci_write_config_dword(dev, PCI_BASE_ADDRESS_4, 0x00000000);
- pci_write_config_dword(dev, PCI_BASE_ADDRESS_5, 0x00020000);
-
- return 0;
-}
-
-/* pciex node: ibm,opal-m64-window = <0x3d058 0x0 0x3d058 0x0 0x8 0x0>; */
-static int switch_card_to_cxl(struct pci_dev *dev)
-{
- int vsec;
- u8 val;
- int rc;
-
- dev_info(&dev->dev, "switch card to CXL\n");
-
- if (!(vsec = find_cxl_vsec(dev))) {
- dev_err(&dev->dev, "ABORTING: CXL VSEC not found!\n");
- return -ENODEV;
- }
-
- if ((rc = CXL_READ_VSEC_MODE_CONTROL(dev, vsec, &val))) {
- dev_err(&dev->dev, "failed to read current mode control: %i", rc);
- return rc;
- }
- val &= ~CXL_VSEC_PROTOCOL_MASK;
- val |= CXL_VSEC_PROTOCOL_256TB | CXL_VSEC_PROTOCOL_ENABLE;
- if ((rc = CXL_WRITE_VSEC_MODE_CONTROL(dev, vsec, val))) {
- dev_err(&dev->dev, "failed to enable CXL protocol: %i", rc);
- return rc;
- }
- /*
- * The CAIA spec (v0.12 11.6 Bi-modal Device Support) states
- * we must wait 100ms after this mode switch before touching
- * PCIe config space.
- */
- msleep(100);
-
- return 0;
-}
-
-static int pci_map_slice_regs(struct cxl_afu *afu, struct cxl *adapter, struct pci_dev *dev)
-{
- u64 p1n_base, p2n_base, afu_desc;
- const u64 p1n_size = 0x100;
- const u64 p2n_size = 0x1000;
-
- p1n_base = p1_base(dev) + 0x10000 + (afu->slice * p1n_size);
- p2n_base = p2_base(dev) + (afu->slice * p2n_size);
- afu->psn_phys = p2_base(dev) + (adapter->native->ps_off + (afu->slice * adapter->ps_size));
- afu_desc = p2_base(dev) + adapter->native->afu_desc_off + (afu->slice * adapter->native->afu_desc_size);
-
- if (!(afu->native->p1n_mmio = ioremap(p1n_base, p1n_size)))
- goto err;
- if (!(afu->p2n_mmio = ioremap(p2n_base, p2n_size)))
- goto err1;
- if (afu_desc) {
- if (!(afu->native->afu_desc_mmio = ioremap(afu_desc, adapter->native->afu_desc_size)))
- goto err2;
- }
-
- return 0;
-err2:
- iounmap(afu->p2n_mmio);
-err1:
- iounmap(afu->native->p1n_mmio);
-err:
- dev_err(&afu->dev, "Error mapping AFU MMIO regions\n");
- return -ENOMEM;
-}
-
-static void pci_unmap_slice_regs(struct cxl_afu *afu)
-{
- if (afu->p2n_mmio) {
- iounmap(afu->p2n_mmio);
- afu->p2n_mmio = NULL;
- }
- if (afu->native->p1n_mmio) {
- iounmap(afu->native->p1n_mmio);
- afu->native->p1n_mmio = NULL;
- }
- if (afu->native->afu_desc_mmio) {
- iounmap(afu->native->afu_desc_mmio);
- afu->native->afu_desc_mmio = NULL;
- }
-}
-
-void cxl_pci_release_afu(struct device *dev)
-{
- struct cxl_afu *afu = to_cxl_afu(dev);
-
- pr_devel("%s\n", __func__);
-
- idr_destroy(&afu->contexts_idr);
- cxl_release_spa(afu);
-
- kfree(afu->native);
- kfree(afu);
-}
-
-/* Expects AFU struct to have recently been zeroed out */
-static int cxl_read_afu_descriptor(struct cxl_afu *afu)
-{
- u64 val;
-
- val = AFUD_READ_INFO(afu);
- afu->pp_irqs = AFUD_NUM_INTS_PER_PROC(val);
- afu->max_procs_virtualised = AFUD_NUM_PROCS(val);
- afu->crs_num = AFUD_NUM_CRS(val);
-
- if (AFUD_AFU_DIRECTED(val))
- afu->modes_supported |= CXL_MODE_DIRECTED;
- if (AFUD_DEDICATED_PROCESS(val))
- afu->modes_supported |= CXL_MODE_DEDICATED;
- if (AFUD_TIME_SLICED(val))
- afu->modes_supported |= CXL_MODE_TIME_SLICED;
-
- val = AFUD_READ_PPPSA(afu);
- afu->pp_size = AFUD_PPPSA_LEN(val) * 4096;
- afu->psa = AFUD_PPPSA_PSA(val);
- if ((afu->pp_psa = AFUD_PPPSA_PP(val)))
- afu->native->pp_offset = AFUD_READ_PPPSA_OFF(afu);
-
- val = AFUD_READ_CR(afu);
- afu->crs_len = AFUD_CR_LEN(val) * 256;
- afu->crs_offset = AFUD_READ_CR_OFF(afu);
-
-
- /* eb_len is in multiple of 4K */
- afu->eb_len = AFUD_EB_LEN(AFUD_READ_EB(afu)) * 4096;
- afu->eb_offset = AFUD_READ_EB_OFF(afu);
-
- /* eb_off is 4K aligned so lower 12 bits are always zero */
- if (EXTRACT_PPC_BITS(afu->eb_offset, 0, 11) != 0) {
- dev_warn(&afu->dev,
- "Invalid AFU error buffer offset %Lx\n",
- afu->eb_offset);
- dev_info(&afu->dev,
- "Ignoring AFU error buffer in the descriptor\n");
- /* indicate that no afu buffer exists */
- afu->eb_len = 0;
- }
-
- return 0;
-}
-
-static int cxl_afu_descriptor_looks_ok(struct cxl_afu *afu)
-{
- int i, rc;
- u32 val;
-
- if (afu->psa && afu->adapter->ps_size <
- (afu->native->pp_offset + afu->pp_size*afu->max_procs_virtualised)) {
- dev_err(&afu->dev, "per-process PSA can't fit inside the PSA!\n");
- return -ENODEV;
- }
-
- if (afu->pp_psa && (afu->pp_size < PAGE_SIZE))
- dev_warn(&afu->dev, "AFU uses pp_size(%#016llx) < PAGE_SIZE per-process PSA!\n", afu->pp_size);
-
- for (i = 0; i < afu->crs_num; i++) {
- rc = cxl_ops->afu_cr_read32(afu, i, 0, &val);
- if (rc || val == 0) {
- dev_err(&afu->dev, "ABORTING: AFU configuration record %i is invalid\n", i);
- return -EINVAL;
- }
- }
-
- if ((afu->modes_supported & ~CXL_MODE_DEDICATED) && afu->max_procs_virtualised == 0) {
- /*
- * We could also check this for the dedicated process model
- * since the architecture indicates it should be set to 1, but
- * in that case we ignore the value and I'd rather not risk
- * breaking any existing dedicated process AFUs that left it as
- * 0 (not that I'm aware of any). It is clearly an error for an
- * AFU directed AFU to set this to 0, and would have previously
- * triggered a bug resulting in the maximum not being enforced
- * at all since idr_alloc treats 0 as no maximum.
- */
- dev_err(&afu->dev, "AFU does not support any processes\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int sanitise_afu_regs_psl9(struct cxl_afu *afu)
-{
- u64 reg;
-
- /*
- * Clear out any regs that contain either an IVTE or address or may be
- * waiting on an acknowledgment to try to be a bit safer as we bring
- * it online
- */
- reg = cxl_p2n_read(afu, CXL_AFU_Cntl_An);
- if ((reg & CXL_AFU_Cntl_An_ES_MASK) != CXL_AFU_Cntl_An_ES_Disabled) {
- dev_warn(&afu->dev, "WARNING: AFU was not disabled: %#016llx\n", reg);
- if (cxl_ops->afu_reset(afu))
- return -EIO;
- if (cxl_afu_disable(afu))
- return -EIO;
- if (cxl_psl_purge(afu))
- return -EIO;
- }
- cxl_p1n_write(afu, CXL_PSL_SPAP_An, 0x0000000000000000);
- cxl_p1n_write(afu, CXL_PSL_AMBAR_An, 0x0000000000000000);
- reg = cxl_p2n_read(afu, CXL_PSL_DSISR_An);
- if (reg) {
- dev_warn(&afu->dev, "AFU had pending DSISR: %#016llx\n", reg);
- if (reg & CXL_PSL9_DSISR_An_TF)
- cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_AE);
- else
- cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_A);
- }
- if (afu->adapter->native->sl_ops->register_serr_irq) {
- reg = cxl_p1n_read(afu, CXL_PSL_SERR_An);
- if (reg) {
- if (reg & ~0x000000007fffffff)
- dev_warn(&afu->dev, "AFU had pending SERR: %#016llx\n", reg);
- cxl_p1n_write(afu, CXL_PSL_SERR_An, reg & ~0xffff);
- }
- }
- reg = cxl_p2n_read(afu, CXL_PSL_ErrStat_An);
- if (reg) {
- dev_warn(&afu->dev, "AFU had pending error status: %#016llx\n", reg);
- cxl_p2n_write(afu, CXL_PSL_ErrStat_An, reg);
- }
-
- return 0;
-}
-
-static int sanitise_afu_regs_psl8(struct cxl_afu *afu)
-{
- u64 reg;
-
- /*
- * Clear out any regs that contain either an IVTE or address or may be
- * waiting on an acknowledgement to try to be a bit safer as we bring
- * it online
- */
- reg = cxl_p2n_read(afu, CXL_AFU_Cntl_An);
- if ((reg & CXL_AFU_Cntl_An_ES_MASK) != CXL_AFU_Cntl_An_ES_Disabled) {
- dev_warn(&afu->dev, "WARNING: AFU was not disabled: %#016llx\n", reg);
- if (cxl_ops->afu_reset(afu))
- return -EIO;
- if (cxl_afu_disable(afu))
- return -EIO;
- if (cxl_psl_purge(afu))
- return -EIO;
- }
- cxl_p1n_write(afu, CXL_PSL_SPAP_An, 0x0000000000000000);
- cxl_p1n_write(afu, CXL_PSL_IVTE_Limit_An, 0x0000000000000000);
- cxl_p1n_write(afu, CXL_PSL_IVTE_Offset_An, 0x0000000000000000);
- cxl_p1n_write(afu, CXL_PSL_AMBAR_An, 0x0000000000000000);
- cxl_p1n_write(afu, CXL_PSL_SPOffset_An, 0x0000000000000000);
- cxl_p1n_write(afu, CXL_HAURP_An, 0x0000000000000000);
- cxl_p2n_write(afu, CXL_CSRP_An, 0x0000000000000000);
- cxl_p2n_write(afu, CXL_AURP1_An, 0x0000000000000000);
- cxl_p2n_write(afu, CXL_AURP0_An, 0x0000000000000000);
- cxl_p2n_write(afu, CXL_SSTP1_An, 0x0000000000000000);
- cxl_p2n_write(afu, CXL_SSTP0_An, 0x0000000000000000);
- reg = cxl_p2n_read(afu, CXL_PSL_DSISR_An);
- if (reg) {
- dev_warn(&afu->dev, "AFU had pending DSISR: %#016llx\n", reg);
- if (reg & CXL_PSL_DSISR_TRANS)
- cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_AE);
- else
- cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_A);
- }
- if (afu->adapter->native->sl_ops->register_serr_irq) {
- reg = cxl_p1n_read(afu, CXL_PSL_SERR_An);
- if (reg) {
- if (reg & ~0xffff)
- dev_warn(&afu->dev, "AFU had pending SERR: %#016llx\n", reg);
- cxl_p1n_write(afu, CXL_PSL_SERR_An, reg & ~0xffff);
- }
- }
- reg = cxl_p2n_read(afu, CXL_PSL_ErrStat_An);
- if (reg) {
- dev_warn(&afu->dev, "AFU had pending error status: %#016llx\n", reg);
- cxl_p2n_write(afu, CXL_PSL_ErrStat_An, reg);
- }
-
- return 0;
-}
-
-#define ERR_BUFF_MAX_COPY_SIZE PAGE_SIZE
-/*
- * afu_eb_read:
- * Called from sysfs and reads the afu error info buffer. The h/w only supports
- * 4/8 bytes aligned access. So in case the requested offset/count arent 8 byte
- * aligned the function uses a bounce buffer which can be max PAGE_SIZE.
- */
-ssize_t cxl_pci_afu_read_err_buffer(struct cxl_afu *afu, char *buf,
- loff_t off, size_t count)
-{
- loff_t aligned_start, aligned_end;
- size_t aligned_length;
- void *tbuf;
- const void __iomem *ebuf = afu->native->afu_desc_mmio + afu->eb_offset;
-
- if (count == 0 || off < 0 || (size_t)off >= afu->eb_len)
- return 0;
-
- /* calculate aligned read window */
- count = min((size_t)(afu->eb_len - off), count);
- aligned_start = round_down(off, 8);
- aligned_end = round_up(off + count, 8);
- aligned_length = aligned_end - aligned_start;
-
- /* max we can copy in one read is PAGE_SIZE */
- if (aligned_length > ERR_BUFF_MAX_COPY_SIZE) {
- aligned_length = ERR_BUFF_MAX_COPY_SIZE;
- count = ERR_BUFF_MAX_COPY_SIZE - (off & 0x7);
- }
-
- /* use bounce buffer for copy */
- tbuf = (void *)__get_free_page(GFP_KERNEL);
- if (!tbuf)
- return -ENOMEM;
-
- /* perform aligned read from the mmio region */
- memcpy_fromio(tbuf, ebuf + aligned_start, aligned_length);
- memcpy(buf, tbuf + (off & 0x7), count);
-
- free_page((unsigned long)tbuf);
-
- return count;
-}
-
-static int pci_configure_afu(struct cxl_afu *afu, struct cxl *adapter, struct pci_dev *dev)
-{
- int rc;
-
- if ((rc = pci_map_slice_regs(afu, adapter, dev)))
- return rc;
-
- if (adapter->native->sl_ops->sanitise_afu_regs) {
- rc = adapter->native->sl_ops->sanitise_afu_regs(afu);
- if (rc)
- goto err1;
- }
-
- /* We need to reset the AFU before we can read the AFU descriptor */
- if ((rc = cxl_ops->afu_reset(afu)))
- goto err1;
-
- if (cxl_verbose)
- dump_afu_descriptor(afu);
-
- if ((rc = cxl_read_afu_descriptor(afu)))
- goto err1;
-
- if ((rc = cxl_afu_descriptor_looks_ok(afu)))
- goto err1;
-
- if (adapter->native->sl_ops->afu_regs_init)
- if ((rc = adapter->native->sl_ops->afu_regs_init(afu)))
- goto err1;
-
- if (adapter->native->sl_ops->register_serr_irq)
- if ((rc = adapter->native->sl_ops->register_serr_irq(afu)))
- goto err1;
-
- if ((rc = cxl_native_register_psl_irq(afu)))
- goto err2;
-
- atomic_set(&afu->configured_state, 0);
- return 0;
-
-err2:
- if (adapter->native->sl_ops->release_serr_irq)
- adapter->native->sl_ops->release_serr_irq(afu);
-err1:
- pci_unmap_slice_regs(afu);
- return rc;
-}
-
-static void pci_deconfigure_afu(struct cxl_afu *afu)
-{
- /*
- * It's okay to deconfigure when AFU is already locked, otherwise wait
- * until there are no readers
- */
- if (atomic_read(&afu->configured_state) != -1) {
- while (atomic_cmpxchg(&afu->configured_state, 0, -1) != -1)
- schedule();
- }
- cxl_native_release_psl_irq(afu);
- if (afu->adapter->native->sl_ops->release_serr_irq)
- afu->adapter->native->sl_ops->release_serr_irq(afu);
- pci_unmap_slice_regs(afu);
-}
-
-static int pci_init_afu(struct cxl *adapter, int slice, struct pci_dev *dev)
-{
- struct cxl_afu *afu;
- int rc = -ENOMEM;
-
- afu = cxl_alloc_afu(adapter, slice);
- if (!afu)
- return -ENOMEM;
-
- afu->native = kzalloc(sizeof(struct cxl_afu_native), GFP_KERNEL);
- if (!afu->native)
- goto err_free_afu;
-
- mutex_init(&afu->native->spa_mutex);
-
- rc = dev_set_name(&afu->dev, "afu%i.%i", adapter->adapter_num, slice);
- if (rc)
- goto err_free_native;
-
- rc = pci_configure_afu(afu, adapter, dev);
- if (rc)
- goto err_free_native;
-
- /* Don't care if this fails */
- cxl_debugfs_afu_add(afu);
-
- /*
- * After we call this function we must not free the afu directly, even
- * if it returns an error!
- */
- if ((rc = cxl_register_afu(afu)))
- goto err_put_dev;
-
- if ((rc = cxl_sysfs_afu_add(afu)))
- goto err_del_dev;
-
- adapter->afu[afu->slice] = afu;
-
- if ((rc = cxl_pci_vphb_add(afu)))
- dev_info(&afu->dev, "Can't register vPHB\n");
-
- return 0;
-
-err_del_dev:
- device_del(&afu->dev);
-err_put_dev:
- pci_deconfigure_afu(afu);
- cxl_debugfs_afu_remove(afu);
- put_device(&afu->dev);
- return rc;
-
-err_free_native:
- kfree(afu->native);
-err_free_afu:
- kfree(afu);
- return rc;
-
-}
-
-static void cxl_pci_remove_afu(struct cxl_afu *afu)
-{
- pr_devel("%s\n", __func__);
-
- if (!afu)
- return;
-
- cxl_pci_vphb_remove(afu);
- cxl_sysfs_afu_remove(afu);
- cxl_debugfs_afu_remove(afu);
-
- spin_lock(&afu->adapter->afu_list_lock);
- afu->adapter->afu[afu->slice] = NULL;
- spin_unlock(&afu->adapter->afu_list_lock);
-
- cxl_context_detach_all(afu);
- cxl_ops->afu_deactivate_mode(afu, afu->current_mode);
-
- pci_deconfigure_afu(afu);
- device_unregister(&afu->dev);
-}
-
-int cxl_pci_reset(struct cxl *adapter)
-{
- struct pci_dev *dev = to_pci_dev(adapter->dev.parent);
- int rc;
-
- if (adapter->perst_same_image) {
- dev_warn(&dev->dev,
- "cxl: refusing to reset/reflash when perst_reloads_same_image is set.\n");
- return -EINVAL;
- }
-
- dev_info(&dev->dev, "CXL reset\n");
-
- /*
- * The adapter is about to be reset, so ignore errors.
- */
- cxl_data_cache_flush(adapter);
-
- /* pcie_warm_reset requests a fundamental pci reset which includes a
- * PERST assert/deassert. PERST triggers a loading of the image
- * if "user" or "factory" is selected in sysfs */
- if ((rc = pci_set_pcie_reset_state(dev, pcie_warm_reset))) {
- dev_err(&dev->dev, "cxl: pcie_warm_reset failed\n");
- return rc;
- }
-
- return rc;
-}
-
-static int cxl_map_adapter_regs(struct cxl *adapter, struct pci_dev *dev)
-{
- if (pci_request_region(dev, 2, "priv 2 regs"))
- goto err1;
- if (pci_request_region(dev, 0, "priv 1 regs"))
- goto err2;
-
- pr_devel("cxl_map_adapter_regs: p1: %#016llx %#llx, p2: %#016llx %#llx",
- p1_base(dev), p1_size(dev), p2_base(dev), p2_size(dev));
-
- if (!(adapter->native->p1_mmio = ioremap(p1_base(dev), p1_size(dev))))
- goto err3;
-
- if (!(adapter->native->p2_mmio = ioremap(p2_base(dev), p2_size(dev))))
- goto err4;
-
- return 0;
-
-err4:
- iounmap(adapter->native->p1_mmio);
- adapter->native->p1_mmio = NULL;
-err3:
- pci_release_region(dev, 0);
-err2:
- pci_release_region(dev, 2);
-err1:
- return -ENOMEM;
-}
-
-static void cxl_unmap_adapter_regs(struct cxl *adapter)
-{
- if (adapter->native->p1_mmio) {
- iounmap(adapter->native->p1_mmio);
- adapter->native->p1_mmio = NULL;
- pci_release_region(to_pci_dev(adapter->dev.parent), 2);
- }
- if (adapter->native->p2_mmio) {
- iounmap(adapter->native->p2_mmio);
- adapter->native->p2_mmio = NULL;
- pci_release_region(to_pci_dev(adapter->dev.parent), 0);
- }
-}
-
-static int cxl_read_vsec(struct cxl *adapter, struct pci_dev *dev)
-{
- int vsec;
- u32 afu_desc_off, afu_desc_size;
- u32 ps_off, ps_size;
- u16 vseclen;
- u8 image_state;
-
- if (!(vsec = find_cxl_vsec(dev))) {
- dev_err(&dev->dev, "ABORTING: CXL VSEC not found!\n");
- return -ENODEV;
- }
-
- CXL_READ_VSEC_LENGTH(dev, vsec, &vseclen);
- if (vseclen < CXL_VSEC_MIN_SIZE) {
- dev_err(&dev->dev, "ABORTING: CXL VSEC too short\n");
- return -EINVAL;
- }
-
- CXL_READ_VSEC_STATUS(dev, vsec, &adapter->vsec_status);
- CXL_READ_VSEC_PSL_REVISION(dev, vsec, &adapter->psl_rev);
- CXL_READ_VSEC_CAIA_MAJOR(dev, vsec, &adapter->caia_major);
- CXL_READ_VSEC_CAIA_MINOR(dev, vsec, &adapter->caia_minor);
- CXL_READ_VSEC_BASE_IMAGE(dev, vsec, &adapter->base_image);
- CXL_READ_VSEC_IMAGE_STATE(dev, vsec, &image_state);
- adapter->user_image_loaded = !!(image_state & CXL_VSEC_USER_IMAGE_LOADED);
- adapter->perst_select_user = !!(image_state & CXL_VSEC_USER_IMAGE_LOADED);
- adapter->perst_loads_image = !!(image_state & CXL_VSEC_PERST_LOADS_IMAGE);
-
- CXL_READ_VSEC_NAFUS(dev, vsec, &adapter->slices);
- CXL_READ_VSEC_AFU_DESC_OFF(dev, vsec, &afu_desc_off);
- CXL_READ_VSEC_AFU_DESC_SIZE(dev, vsec, &afu_desc_size);
- CXL_READ_VSEC_PS_OFF(dev, vsec, &ps_off);
- CXL_READ_VSEC_PS_SIZE(dev, vsec, &ps_size);
-
- /* Convert everything to bytes, because there is NO WAY I'd look at the
- * code a month later and forget what units these are in ;-) */
- adapter->native->ps_off = ps_off * 64 * 1024;
- adapter->ps_size = ps_size * 64 * 1024;
- adapter->native->afu_desc_off = afu_desc_off * 64 * 1024;
- adapter->native->afu_desc_size = afu_desc_size * 64 * 1024;
-
- /* Total IRQs - 1 PSL ERROR - #AFU*(1 slice error + 1 DSI) */
- adapter->user_irqs = pnv_cxl_get_irq_count(dev) - 1 - 2*adapter->slices;
-
- return 0;
-}
-
-/*
- * Workaround a PCIe Host Bridge defect on some cards, that can cause
- * malformed Transaction Layer Packet (TLP) errors to be erroneously
- * reported. Mask this error in the Uncorrectable Error Mask Register.
- *
- * The upper nibble of the PSL revision is used to distinguish between
- * different cards. The affected ones have it set to 0.
- */
-static void cxl_fixup_malformed_tlp(struct cxl *adapter, struct pci_dev *dev)
-{
- int aer;
- u32 data;
-
- if (adapter->psl_rev & 0xf000)
- return;
- if (!(aer = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR)))
- return;
- pci_read_config_dword(dev, aer + PCI_ERR_UNCOR_MASK, &data);
- if (data & PCI_ERR_UNC_MALF_TLP)
- if (data & PCI_ERR_UNC_INTN)
- return;
- data |= PCI_ERR_UNC_MALF_TLP;
- data |= PCI_ERR_UNC_INTN;
- pci_write_config_dword(dev, aer + PCI_ERR_UNCOR_MASK, data);
-}
-
-static bool cxl_compatible_caia_version(struct cxl *adapter)
-{
- if (cxl_is_power8() && (adapter->caia_major == 1))
- return true;
-
- if (cxl_is_power9() && (adapter->caia_major == 2))
- return true;
-
- return false;
-}
-
-static int cxl_vsec_looks_ok(struct cxl *adapter, struct pci_dev *dev)
-{
- if (adapter->vsec_status & CXL_STATUS_SECOND_PORT)
- return -EBUSY;
-
- if (adapter->vsec_status & CXL_UNSUPPORTED_FEATURES) {
- dev_err(&dev->dev, "ABORTING: CXL requires unsupported features\n");
- return -EINVAL;
- }
-
- if (!cxl_compatible_caia_version(adapter)) {
- dev_info(&dev->dev, "Ignoring card. PSL type is not supported (caia version: %d)\n",
- adapter->caia_major);
- return -ENODEV;
- }
-
- if (!adapter->slices) {
- /* Once we support dynamic reprogramming we can use the card if
- * it supports loadable AFUs */
- dev_err(&dev->dev, "ABORTING: Device has no AFUs\n");
- return -EINVAL;
- }
-
- if (!adapter->native->afu_desc_off || !adapter->native->afu_desc_size) {
- dev_err(&dev->dev, "ABORTING: VSEC shows no AFU descriptors\n");
- return -EINVAL;
- }
-
- if (adapter->ps_size > p2_size(dev) - adapter->native->ps_off) {
- dev_err(&dev->dev, "ABORTING: Problem state size larger than "
- "available in BAR2: 0x%llx > 0x%llx\n",
- adapter->ps_size, p2_size(dev) - adapter->native->ps_off);
- return -EINVAL;
- }
-
- return 0;
-}
-
-ssize_t cxl_pci_read_adapter_vpd(struct cxl *adapter, void *buf, size_t len)
-{
- return pci_read_vpd(to_pci_dev(adapter->dev.parent), 0, len, buf);
-}
-
-static void cxl_release_adapter(struct device *dev)
-{
- struct cxl *adapter = to_cxl_adapter(dev);
-
- pr_devel("cxl_release_adapter\n");
-
- cxl_remove_adapter_nr(adapter);
-
- kfree(adapter->native);
- kfree(adapter);
-}
-
-#define CXL_PSL_ErrIVTE_tberror (0x1ull << (63-31))
-
-static int sanitise_adapter_regs(struct cxl *adapter)
-{
- int rc = 0;
-
- /* Clear PSL tberror bit by writing 1 to it */
- cxl_p1_write(adapter, CXL_PSL_ErrIVTE, CXL_PSL_ErrIVTE_tberror);
-
- if (adapter->native->sl_ops->invalidate_all) {
- /* do not invalidate ERAT entries when not reloading on PERST */
- if (cxl_is_power9() && (adapter->perst_loads_image))
- return 0;
- rc = adapter->native->sl_ops->invalidate_all(adapter);
- }
-
- return rc;
-}
-
-/* This should contain *only* operations that can safely be done in
- * both creation and recovery.
- */
-static int cxl_configure_adapter(struct cxl *adapter, struct pci_dev *dev)
-{
- int rc;
-
- adapter->dev.parent = &dev->dev;
- adapter->dev.release = cxl_release_adapter;
- pci_set_drvdata(dev, adapter);
-
- rc = pci_enable_device(dev);
- if (rc) {
- dev_err(&dev->dev, "pci_enable_device failed: %i\n", rc);
- return rc;
- }
-
- if ((rc = cxl_read_vsec(adapter, dev)))
- return rc;
-
- if ((rc = cxl_vsec_looks_ok(adapter, dev)))
- return rc;
-
- cxl_fixup_malformed_tlp(adapter, dev);
-
- if ((rc = setup_cxl_bars(dev)))
- return rc;
-
- if ((rc = switch_card_to_cxl(dev)))
- return rc;
-
- if ((rc = cxl_update_image_control(adapter)))
- return rc;
-
- if ((rc = cxl_map_adapter_regs(adapter, dev)))
- return rc;
-
- if ((rc = sanitise_adapter_regs(adapter)))
- goto err;
-
- if ((rc = adapter->native->sl_ops->adapter_regs_init(adapter, dev)))
- goto err;
-
- /* Required for devices using CAPP DMA mode, harmless for others */
- pci_set_master(dev);
-
- adapter->tunneled_ops_supported = false;
-
- if (cxl_is_power9()) {
- if (pnv_pci_set_tunnel_bar(dev, 0x00020000E0000000ull, 1))
- dev_info(&dev->dev, "Tunneled operations unsupported\n");
- else
- adapter->tunneled_ops_supported = true;
- }
-
- if ((rc = pnv_phb_to_cxl_mode(dev, adapter->native->sl_ops->capi_mode)))
- goto err;
-
- /* If recovery happened, the last step is to turn on snooping.
- * In the non-recovery case this has no effect */
- if ((rc = pnv_phb_to_cxl_mode(dev, OPAL_PHB_CAPI_MODE_SNOOP_ON)))
- goto err;
-
- /* Ignore error, adapter init is not dependant on timebase sync */
- cxl_setup_psl_timebase(adapter, dev);
-
- if ((rc = cxl_native_register_psl_err_irq(adapter)))
- goto err;
-
- return 0;
-
-err:
- cxl_unmap_adapter_regs(adapter);
- return rc;
-
-}
-
-static void cxl_deconfigure_adapter(struct cxl *adapter)
-{
- struct pci_dev *pdev = to_pci_dev(adapter->dev.parent);
-
- if (cxl_is_power9())
- pnv_pci_set_tunnel_bar(pdev, 0x00020000E0000000ull, 0);
-
- cxl_native_release_psl_err_irq(adapter);
- cxl_unmap_adapter_regs(adapter);
-
- pci_disable_device(pdev);
-}
-
-static void cxl_stop_trace_psl9(struct cxl *adapter)
-{
- int traceid;
- u64 trace_state, trace_mask;
- struct pci_dev *dev = to_pci_dev(adapter->dev.parent);
-
- /* read each tracearray state and issue mmio to stop them is needed */
- for (traceid = 0; traceid <= CXL_PSL9_TRACEID_MAX; ++traceid) {
- trace_state = cxl_p1_read(adapter, CXL_PSL9_CTCCFG);
- trace_mask = (0x3ULL << (62 - traceid * 2));
- trace_state = (trace_state & trace_mask) >> (62 - traceid * 2);
- dev_dbg(&dev->dev, "cxl: Traceid-%d trace_state=0x%0llX\n",
- traceid, trace_state);
-
- /* issue mmio if the trace array isn't in FIN state */
- if (trace_state != CXL_PSL9_TRACESTATE_FIN)
- cxl_p1_write(adapter, CXL_PSL9_TRACECFG,
- 0x8400000000000000ULL | traceid);
- }
-}
-
-static void cxl_stop_trace_psl8(struct cxl *adapter)
-{
- int slice;
-
- /* Stop the trace */
- cxl_p1_write(adapter, CXL_PSL_TRACE, 0x8000000000000017LL);
-
- /* Stop the slice traces */
- spin_lock(&adapter->afu_list_lock);
- for (slice = 0; slice < adapter->slices; slice++) {
- if (adapter->afu[slice])
- cxl_p1n_write(adapter->afu[slice], CXL_PSL_SLICE_TRACE,
- 0x8000000000000000LL);
- }
- spin_unlock(&adapter->afu_list_lock);
-}
-
-static const struct cxl_service_layer_ops psl9_ops = {
- .adapter_regs_init = init_implementation_adapter_regs_psl9,
- .invalidate_all = cxl_invalidate_all_psl9,
- .afu_regs_init = init_implementation_afu_regs_psl9,
- .sanitise_afu_regs = sanitise_afu_regs_psl9,
- .register_serr_irq = cxl_native_register_serr_irq,
- .release_serr_irq = cxl_native_release_serr_irq,
- .handle_interrupt = cxl_irq_psl9,
- .fail_irq = cxl_fail_irq_psl,
- .activate_dedicated_process = cxl_activate_dedicated_process_psl9,
- .attach_afu_directed = cxl_attach_afu_directed_psl9,
- .attach_dedicated_process = cxl_attach_dedicated_process_psl9,
- .update_dedicated_ivtes = cxl_update_dedicated_ivtes_psl9,
- .debugfs_add_adapter_regs = cxl_debugfs_add_adapter_regs_psl9,
- .debugfs_add_afu_regs = cxl_debugfs_add_afu_regs_psl9,
- .psl_irq_dump_registers = cxl_native_irq_dump_regs_psl9,
- .err_irq_dump_registers = cxl_native_err_irq_dump_regs_psl9,
- .debugfs_stop_trace = cxl_stop_trace_psl9,
- .timebase_read = timebase_read_psl9,
- .capi_mode = OPAL_PHB_CAPI_MODE_CAPI,
- .needs_reset_before_disable = true,
-};
-
-static const struct cxl_service_layer_ops psl8_ops = {
- .adapter_regs_init = init_implementation_adapter_regs_psl8,
- .invalidate_all = cxl_invalidate_all_psl8,
- .afu_regs_init = init_implementation_afu_regs_psl8,
- .sanitise_afu_regs = sanitise_afu_regs_psl8,
- .register_serr_irq = cxl_native_register_serr_irq,
- .release_serr_irq = cxl_native_release_serr_irq,
- .handle_interrupt = cxl_irq_psl8,
- .fail_irq = cxl_fail_irq_psl,
- .activate_dedicated_process = cxl_activate_dedicated_process_psl8,
- .attach_afu_directed = cxl_attach_afu_directed_psl8,
- .attach_dedicated_process = cxl_attach_dedicated_process_psl8,
- .update_dedicated_ivtes = cxl_update_dedicated_ivtes_psl8,
- .debugfs_add_adapter_regs = cxl_debugfs_add_adapter_regs_psl8,
- .debugfs_add_afu_regs = cxl_debugfs_add_afu_regs_psl8,
- .psl_irq_dump_registers = cxl_native_irq_dump_regs_psl8,
- .err_irq_dump_registers = cxl_native_err_irq_dump_regs_psl8,
- .debugfs_stop_trace = cxl_stop_trace_psl8,
- .write_timebase_ctrl = write_timebase_ctrl_psl8,
- .timebase_read = timebase_read_psl8,
- .capi_mode = OPAL_PHB_CAPI_MODE_CAPI,
- .needs_reset_before_disable = true,
-};
-
-static void set_sl_ops(struct cxl *adapter, struct pci_dev *dev)
-{
- if (cxl_is_power8()) {
- dev_info(&dev->dev, "Device uses a PSL8\n");
- adapter->native->sl_ops = &psl8_ops;
- } else {
- dev_info(&dev->dev, "Device uses a PSL9\n");
- adapter->native->sl_ops = &psl9_ops;
- }
-}
-
-
-static struct cxl *cxl_pci_init_adapter(struct pci_dev *dev)
-{
- struct cxl *adapter;
- int rc;
-
- adapter = cxl_alloc_adapter();
- if (!adapter)
- return ERR_PTR(-ENOMEM);
-
- adapter->native = kzalloc(sizeof(struct cxl_native), GFP_KERNEL);
- if (!adapter->native) {
- rc = -ENOMEM;
- goto err_release;
- }
-
- set_sl_ops(adapter, dev);
-
- /* Set defaults for parameters which need to persist over
- * configure/reconfigure
- */
- adapter->perst_loads_image = true;
- adapter->perst_same_image = false;
-
- rc = cxl_configure_adapter(adapter, dev);
- if (rc) {
- pci_disable_device(dev);
- goto err_release;
- }
-
- /* Don't care if this one fails: */
- cxl_debugfs_adapter_add(adapter);
-
- /*
- * After we call this function we must not free the adapter directly,
- * even if it returns an error!
- */
- if ((rc = cxl_register_adapter(adapter)))
- goto err_put_dev;
-
- if ((rc = cxl_sysfs_adapter_add(adapter)))
- goto err_del_dev;
-
- /* Release the context lock as adapter is configured */
- cxl_adapter_context_unlock(adapter);
-
- return adapter;
-
-err_del_dev:
- device_del(&adapter->dev);
-err_put_dev:
- /* This should mirror cxl_remove_adapter, except without the
- * sysfs parts
- */
- cxl_debugfs_adapter_remove(adapter);
- cxl_deconfigure_adapter(adapter);
- put_device(&adapter->dev);
- return ERR_PTR(rc);
-
-err_release:
- cxl_release_adapter(&adapter->dev);
- return ERR_PTR(rc);
-}
-
-static void cxl_pci_remove_adapter(struct cxl *adapter)
-{
- pr_devel("cxl_remove_adapter\n");
-
- cxl_sysfs_adapter_remove(adapter);
- cxl_debugfs_adapter_remove(adapter);
-
- /*
- * Flush adapter datacache as its about to be removed.
- */
- cxl_data_cache_flush(adapter);
-
- cxl_deconfigure_adapter(adapter);
-
- device_unregister(&adapter->dev);
-}
-
-#define CXL_MAX_PCIEX_PARENT 2
-
-int cxl_slot_is_switched(struct pci_dev *dev)
-{
- struct device_node *np;
- int depth = 0;
-
- if (!(np = pci_device_to_OF_node(dev))) {
- pr_err("cxl: np = NULL\n");
- return -ENODEV;
- }
- of_node_get(np);
- while (np) {
- np = of_get_next_parent(np);
- if (!of_node_is_type(np, "pciex"))
- break;
- depth++;
- }
- of_node_put(np);
- return (depth > CXL_MAX_PCIEX_PARENT);
-}
-
-static int cxl_probe(struct pci_dev *dev, const struct pci_device_id *id)
-{
- struct cxl *adapter;
- int slice;
- int rc;
-
- dev_err_once(&dev->dev, "DEPRECATED: cxl is deprecated and will be removed in a future kernel release\n");
-
- if (cxl_pci_is_vphb_device(dev)) {
- dev_dbg(&dev->dev, "cxl_init_adapter: Ignoring cxl vphb device\n");
- return -ENODEV;
- }
-
- if (cxl_slot_is_switched(dev)) {
- dev_info(&dev->dev, "Ignoring card on incompatible PCI slot\n");
- return -ENODEV;
- }
-
- if (cxl_is_power9() && !radix_enabled()) {
- dev_info(&dev->dev, "Only Radix mode supported\n");
- return -ENODEV;
- }
-
- if (cxl_verbose)
- dump_cxl_config_space(dev);
-
- adapter = cxl_pci_init_adapter(dev);
- if (IS_ERR(adapter)) {
- dev_err(&dev->dev, "cxl_init_adapter failed: %li\n", PTR_ERR(adapter));
- return PTR_ERR(adapter);
- }
-
- for (slice = 0; slice < adapter->slices; slice++) {
- if ((rc = pci_init_afu(adapter, slice, dev))) {
- dev_err(&dev->dev, "AFU %i failed to initialise: %i\n", slice, rc);
- continue;
- }
-
- rc = cxl_afu_select_best_mode(adapter->afu[slice]);
- if (rc)
- dev_err(&dev->dev, "AFU %i failed to start: %i\n", slice, rc);
- }
-
- return 0;
-}
-
-static void cxl_remove(struct pci_dev *dev)
-{
- struct cxl *adapter = pci_get_drvdata(dev);
- struct cxl_afu *afu;
- int i;
-
- /*
- * Lock to prevent someone grabbing a ref through the adapter list as
- * we are removing it
- */
- for (i = 0; i < adapter->slices; i++) {
- afu = adapter->afu[i];
- cxl_pci_remove_afu(afu);
- }
- cxl_pci_remove_adapter(adapter);
-}
-
-static pci_ers_result_t cxl_vphb_error_detected(struct cxl_afu *afu,
- pci_channel_state_t state)
-{
- struct pci_dev *afu_dev;
- struct pci_driver *afu_drv;
- const struct pci_error_handlers *err_handler;
- pci_ers_result_t result = PCI_ERS_RESULT_NEED_RESET;
- pci_ers_result_t afu_result = PCI_ERS_RESULT_NEED_RESET;
-
- /* There should only be one entry, but go through the list
- * anyway
- */
- if (afu == NULL || afu->phb == NULL)
- return result;
-
- list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) {
- afu_drv = to_pci_driver(afu_dev->dev.driver);
- if (!afu_drv)
- continue;
-
- afu_dev->error_state = state;
-
- err_handler = afu_drv->err_handler;
- if (err_handler)
- afu_result = err_handler->error_detected(afu_dev,
- state);
- /* Disconnect trumps all, NONE trumps NEED_RESET */
- if (afu_result == PCI_ERS_RESULT_DISCONNECT)
- result = PCI_ERS_RESULT_DISCONNECT;
- else if ((afu_result == PCI_ERS_RESULT_NONE) &&
- (result == PCI_ERS_RESULT_NEED_RESET))
- result = PCI_ERS_RESULT_NONE;
- }
- return result;
-}
-
-static pci_ers_result_t cxl_pci_error_detected(struct pci_dev *pdev,
- pci_channel_state_t state)
-{
- struct cxl *adapter = pci_get_drvdata(pdev);
- struct cxl_afu *afu;
- pci_ers_result_t result = PCI_ERS_RESULT_NEED_RESET;
- pci_ers_result_t afu_result = PCI_ERS_RESULT_NEED_RESET;
- int i;
-
- /* At this point, we could still have an interrupt pending.
- * Let's try to get them out of the way before they do
- * anything we don't like.
- */
- schedule();
-
- /* If we're permanently dead, give up. */
- if (state == pci_channel_io_perm_failure) {
- spin_lock(&adapter->afu_list_lock);
- for (i = 0; i < adapter->slices; i++) {
- afu = adapter->afu[i];
- /*
- * Tell the AFU drivers; but we don't care what they
- * say, we're going away.
- */
- cxl_vphb_error_detected(afu, state);
- }
- spin_unlock(&adapter->afu_list_lock);
- return PCI_ERS_RESULT_DISCONNECT;
- }
-
- /* Are we reflashing?
- *
- * If we reflash, we could come back as something entirely
- * different, including a non-CAPI card. As such, by default
- * we don't participate in the process. We'll be unbound and
- * the slot re-probed. (TODO: check EEH doesn't blindly rebind
- * us!)
- *
- * However, this isn't the entire story: for reliablity
- * reasons, we usually want to reflash the FPGA on PERST in
- * order to get back to a more reliable known-good state.
- *
- * This causes us a bit of a problem: if we reflash we can't
- * trust that we'll come back the same - we could have a new
- * image and been PERSTed in order to load that
- * image. However, most of the time we actually *will* come
- * back the same - for example a regular EEH event.
- *
- * Therefore, we allow the user to assert that the image is
- * indeed the same and that we should continue on into EEH
- * anyway.
- */
- if (adapter->perst_loads_image && !adapter->perst_same_image) {
- /* TODO take the PHB out of CXL mode */
- dev_info(&pdev->dev, "reflashing, so opting out of EEH!\n");
- return PCI_ERS_RESULT_NONE;
- }
-
- /*
- * At this point, we want to try to recover. We'll always
- * need a complete slot reset: we don't trust any other reset.
- *
- * Now, we go through each AFU:
- * - We send the driver, if bound, an error_detected callback.
- * We expect it to clean up, but it can also tell us to give
- * up and permanently detach the card. To simplify things, if
- * any bound AFU driver doesn't support EEH, we give up on EEH.
- *
- * - We detach all contexts associated with the AFU. This
- * does not free them, but puts them into a CLOSED state
- * which causes any the associated files to return useful
- * errors to userland. It also unmaps, but does not free,
- * any IRQs.
- *
- * - We clean up our side: releasing and unmapping resources we hold
- * so we can wire them up again when the hardware comes back up.
- *
- * Driver authors should note:
- *
- * - Any contexts you create in your kernel driver (except
- * those associated with anonymous file descriptors) are
- * your responsibility to free and recreate. Likewise with
- * any attached resources.
- *
- * - We will take responsibility for re-initialising the
- * device context (the one set up for you in
- * cxl_pci_enable_device_hook and accessed through
- * cxl_get_context). If you've attached IRQs or other
- * resources to it, they remains yours to free.
- *
- * You can call the same functions to release resources as you
- * normally would: we make sure that these functions continue
- * to work when the hardware is down.
- *
- * Two examples:
- *
- * 1) If you normally free all your resources at the end of
- * each request, or if you use anonymous FDs, your
- * error_detected callback can simply set a flag to tell
- * your driver not to start any new calls. You can then
- * clear the flag in the resume callback.
- *
- * 2) If you normally allocate your resources on startup:
- * * Set a flag in error_detected as above.
- * * Let CXL detach your contexts.
- * * In slot_reset, free the old resources and allocate new ones.
- * * In resume, clear the flag to allow things to start.
- */
-
- /* Make sure no one else changes the afu list */
- spin_lock(&adapter->afu_list_lock);
-
- for (i = 0; i < adapter->slices; i++) {
- afu = adapter->afu[i];
-
- if (afu == NULL)
- continue;
-
- afu_result = cxl_vphb_error_detected(afu, state);
- cxl_context_detach_all(afu);
- cxl_ops->afu_deactivate_mode(afu, afu->current_mode);
- pci_deconfigure_afu(afu);
-
- /* Disconnect trumps all, NONE trumps NEED_RESET */
- if (afu_result == PCI_ERS_RESULT_DISCONNECT)
- result = PCI_ERS_RESULT_DISCONNECT;
- else if ((afu_result == PCI_ERS_RESULT_NONE) &&
- (result == PCI_ERS_RESULT_NEED_RESET))
- result = PCI_ERS_RESULT_NONE;
- }
- spin_unlock(&adapter->afu_list_lock);
-
- /* should take the context lock here */
- if (cxl_adapter_context_lock(adapter) != 0)
- dev_warn(&adapter->dev,
- "Couldn't take context lock with %d active-contexts\n",
- atomic_read(&adapter->contexts_num));
-
- cxl_deconfigure_adapter(adapter);
-
- return result;
-}
-
-static pci_ers_result_t cxl_pci_slot_reset(struct pci_dev *pdev)
-{
- struct cxl *adapter = pci_get_drvdata(pdev);
- struct cxl_afu *afu;
- struct cxl_context *ctx;
- struct pci_dev *afu_dev;
- struct pci_driver *afu_drv;
- const struct pci_error_handlers *err_handler;
- pci_ers_result_t afu_result = PCI_ERS_RESULT_RECOVERED;
- pci_ers_result_t result = PCI_ERS_RESULT_RECOVERED;
- int i;
-
- if (cxl_configure_adapter(adapter, pdev))
- goto err;
-
- /*
- * Unlock context activation for the adapter. Ideally this should be
- * done in cxl_pci_resume but cxlflash module tries to activate the
- * master context as part of slot_reset callback.
- */
- cxl_adapter_context_unlock(adapter);
-
- spin_lock(&adapter->afu_list_lock);
- for (i = 0; i < adapter->slices; i++) {
- afu = adapter->afu[i];
-
- if (afu == NULL)
- continue;
-
- if (pci_configure_afu(afu, adapter, pdev))
- goto err_unlock;
-
- if (cxl_afu_select_best_mode(afu))
- goto err_unlock;
-
- if (afu->phb == NULL)
- continue;
-
- list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) {
- /* Reset the device context.
- * TODO: make this less disruptive
- */
- ctx = cxl_get_context(afu_dev);
-
- if (ctx && cxl_release_context(ctx))
- goto err_unlock;
-
- ctx = cxl_dev_context_init(afu_dev);
- if (IS_ERR(ctx))
- goto err_unlock;
-
- afu_dev->dev.archdata.cxl_ctx = ctx;
-
- if (cxl_ops->afu_check_and_enable(afu))
- goto err_unlock;
-
- afu_dev->error_state = pci_channel_io_normal;
-
- /* If there's a driver attached, allow it to
- * chime in on recovery. Drivers should check
- * if everything has come back OK, but
- * shouldn't start new work until we call
- * their resume function.
- */
- afu_drv = to_pci_driver(afu_dev->dev.driver);
- if (!afu_drv)
- continue;
-
- err_handler = afu_drv->err_handler;
- if (err_handler && err_handler->slot_reset)
- afu_result = err_handler->slot_reset(afu_dev);
-
- if (afu_result == PCI_ERS_RESULT_DISCONNECT)
- result = PCI_ERS_RESULT_DISCONNECT;
- }
- }
-
- spin_unlock(&adapter->afu_list_lock);
- return result;
-
-err_unlock:
- spin_unlock(&adapter->afu_list_lock);
-
-err:
- /* All the bits that happen in both error_detected and cxl_remove
- * should be idempotent, so we don't need to worry about leaving a mix
- * of unconfigured and reconfigured resources.
- */
- dev_err(&pdev->dev, "EEH recovery failed. Asking to be disconnected.\n");
- return PCI_ERS_RESULT_DISCONNECT;
-}
-
-static void cxl_pci_resume(struct pci_dev *pdev)
-{
- struct cxl *adapter = pci_get_drvdata(pdev);
- struct cxl_afu *afu;
- struct pci_dev *afu_dev;
- struct pci_driver *afu_drv;
- const struct pci_error_handlers *err_handler;
- int i;
-
- /* Everything is back now. Drivers should restart work now.
- * This is not the place to be checking if everything came back up
- * properly, because there's no return value: do that in slot_reset.
- */
- spin_lock(&adapter->afu_list_lock);
- for (i = 0; i < adapter->slices; i++) {
- afu = adapter->afu[i];
-
- if (afu == NULL || afu->phb == NULL)
- continue;
-
- list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) {
- afu_drv = to_pci_driver(afu_dev->dev.driver);
- if (!afu_drv)
- continue;
-
- err_handler = afu_drv->err_handler;
- if (err_handler && err_handler->resume)
- err_handler->resume(afu_dev);
- }
- }
- spin_unlock(&adapter->afu_list_lock);
-}
-
-static const struct pci_error_handlers cxl_err_handler = {
- .error_detected = cxl_pci_error_detected,
- .slot_reset = cxl_pci_slot_reset,
- .resume = cxl_pci_resume,
-};
-
-struct pci_driver cxl_pci_driver = {
- .name = "cxl-pci",
- .id_table = cxl_pci_tbl,
- .probe = cxl_probe,
- .remove = cxl_remove,
- .shutdown = cxl_remove,
- .err_handler = &cxl_err_handler,
-};
diff --git a/drivers/misc/cxl/sysfs.c b/drivers/misc/cxl/sysfs.c
deleted file mode 100644
index b1fc6446bd4b..000000000000
--- a/drivers/misc/cxl/sysfs.c
+++ /dev/null
@@ -1,771 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright 2014 IBM Corp.
- */
-
-#include <linux/kernel.h>
-#include <linux/device.h>
-#include <linux/sysfs.h>
-#include <linux/pci_regs.h>
-
-#include "cxl.h"
-
-#define to_afu_chardev_m(d) dev_get_drvdata(d)
-
-/********* Adapter attributes **********************************************/
-
-static ssize_t caia_version_show(struct device *device,
- struct device_attribute *attr,
- char *buf)
-{
- struct cxl *adapter = to_cxl_adapter(device);
-
- return scnprintf(buf, PAGE_SIZE, "%i.%i\n", adapter->caia_major,
- adapter->caia_minor);
-}
-
-static ssize_t psl_revision_show(struct device *device,
- struct device_attribute *attr,
- char *buf)
-{
- struct cxl *adapter = to_cxl_adapter(device);
-
- return scnprintf(buf, PAGE_SIZE, "%i\n", adapter->psl_rev);
-}
-
-static ssize_t base_image_show(struct device *device,
- struct device_attribute *attr,
- char *buf)
-{
- struct cxl *adapter = to_cxl_adapter(device);
-
- return scnprintf(buf, PAGE_SIZE, "%i\n", adapter->base_image);
-}
-
-static ssize_t image_loaded_show(struct device *device,
- struct device_attribute *attr,
- char *buf)
-{
- struct cxl *adapter = to_cxl_adapter(device);
-
- if (adapter->user_image_loaded)
- return scnprintf(buf, PAGE_SIZE, "user\n");
- return scnprintf(buf, PAGE_SIZE, "factory\n");
-}
-
-static ssize_t psl_timebase_synced_show(struct device *device,
- struct device_attribute *attr,
- char *buf)
-{
- struct cxl *adapter = to_cxl_adapter(device);
- u64 psl_tb, delta;
-
- /* Recompute the status only in native mode */
- if (cpu_has_feature(CPU_FTR_HVMODE)) {
- psl_tb = adapter->native->sl_ops->timebase_read(adapter);
- delta = abs(mftb() - psl_tb);
-
- /* CORE TB and PSL TB difference <= 16usecs ? */
- adapter->psl_timebase_synced = (tb_to_ns(delta) < 16000) ? true : false;
- pr_devel("PSL timebase %s - delta: 0x%016llx\n",
- (tb_to_ns(delta) < 16000) ? "synchronized" :
- "not synchronized", tb_to_ns(delta));
- }
- return scnprintf(buf, PAGE_SIZE, "%i\n", adapter->psl_timebase_synced);
-}
-
-static ssize_t tunneled_ops_supported_show(struct device *device,
- struct device_attribute *attr,
- char *buf)
-{
- struct cxl *adapter = to_cxl_adapter(device);
-
- return scnprintf(buf, PAGE_SIZE, "%i\n", adapter->tunneled_ops_supported);
-}
-
-static ssize_t reset_adapter_store(struct device *device,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct cxl *adapter = to_cxl_adapter(device);
- int rc;
- int val;
-
- rc = sscanf(buf, "%i", &val);
- if ((rc != 1) || (val != 1 && val != -1))
- return -EINVAL;
-
- /*
- * See if we can lock the context mapping that's only allowed
- * when there are no contexts attached to the adapter. Once
- * taken this will also prevent any context from getting activated.
- */
- if (val == 1) {
- rc = cxl_adapter_context_lock(adapter);
- if (rc)
- goto out;
-
- rc = cxl_ops->adapter_reset(adapter);
- /* In case reset failed release context lock */
- if (rc)
- cxl_adapter_context_unlock(adapter);
-
- } else if (val == -1) {
- /* Perform a forced adapter reset */
- rc = cxl_ops->adapter_reset(adapter);
- }
-
-out:
- return rc ? rc : count;
-}
-
-static ssize_t load_image_on_perst_show(struct device *device,
- struct device_attribute *attr,
- char *buf)
-{
- struct cxl *adapter = to_cxl_adapter(device);
-
- if (!adapter->perst_loads_image)
- return scnprintf(buf, PAGE_SIZE, "none\n");
-
- if (adapter->perst_select_user)
- return scnprintf(buf, PAGE_SIZE, "user\n");
- return scnprintf(buf, PAGE_SIZE, "factory\n");
-}
-
-static ssize_t load_image_on_perst_store(struct device *device,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct cxl *adapter = to_cxl_adapter(device);
- int rc;
-
- if (!strncmp(buf, "none", 4))
- adapter->perst_loads_image = false;
- else if (!strncmp(buf, "user", 4)) {
- adapter->perst_select_user = true;
- adapter->perst_loads_image = true;
- } else if (!strncmp(buf, "factory", 7)) {
- adapter->perst_select_user = false;
- adapter->perst_loads_image = true;
- } else
- return -EINVAL;
-
- if ((rc = cxl_update_image_control(adapter)))
- return rc;
-
- return count;
-}
-
-static ssize_t perst_reloads_same_image_show(struct device *device,
- struct device_attribute *attr,
- char *buf)
-{
- struct cxl *adapter = to_cxl_adapter(device);
-
- return scnprintf(buf, PAGE_SIZE, "%i\n", adapter->perst_same_image);
-}
-
-static ssize_t perst_reloads_same_image_store(struct device *device,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct cxl *adapter = to_cxl_adapter(device);
- int rc;
- int val;
-
- rc = sscanf(buf, "%i", &val);
- if ((rc != 1) || !(val == 1 || val == 0))
- return -EINVAL;
-
- adapter->perst_same_image = (val == 1);
- return count;
-}
-
-static struct device_attribute adapter_attrs[] = {
- __ATTR_RO(caia_version),
- __ATTR_RO(psl_revision),
- __ATTR_RO(base_image),
- __ATTR_RO(image_loaded),
- __ATTR_RO(psl_timebase_synced),
- __ATTR_RO(tunneled_ops_supported),
- __ATTR_RW(load_image_on_perst),
- __ATTR_RW(perst_reloads_same_image),
- __ATTR(reset, S_IWUSR, NULL, reset_adapter_store),
-};
-
-
-/********* AFU master specific attributes **********************************/
-
-static ssize_t mmio_size_show_master(struct device *device,
- struct device_attribute *attr,
- char *buf)
-{
- struct cxl_afu *afu = to_afu_chardev_m(device);
-
- return scnprintf(buf, PAGE_SIZE, "%llu\n", afu->adapter->ps_size);
-}
-
-static ssize_t pp_mmio_off_show(struct device *device,
- struct device_attribute *attr,
- char *buf)
-{
- struct cxl_afu *afu = to_afu_chardev_m(device);
-
- return scnprintf(buf, PAGE_SIZE, "%llu\n", afu->native->pp_offset);
-}
-
-static ssize_t pp_mmio_len_show(struct device *device,
- struct device_attribute *attr,
- char *buf)
-{
- struct cxl_afu *afu = to_afu_chardev_m(device);
-
- return scnprintf(buf, PAGE_SIZE, "%llu\n", afu->pp_size);
-}
-
-static struct device_attribute afu_master_attrs[] = {
- __ATTR(mmio_size, S_IRUGO, mmio_size_show_master, NULL),
- __ATTR_RO(pp_mmio_off),
- __ATTR_RO(pp_mmio_len),
-};
-
-
-/********* AFU attributes **************************************************/
-
-static ssize_t mmio_size_show(struct device *device,
- struct device_attribute *attr,
- char *buf)
-{
- struct cxl_afu *afu = to_cxl_afu(device);
-
- if (afu->pp_size)
- return scnprintf(buf, PAGE_SIZE, "%llu\n", afu->pp_size);
- return scnprintf(buf, PAGE_SIZE, "%llu\n", afu->adapter->ps_size);
-}
-
-static ssize_t reset_store_afu(struct device *device,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct cxl_afu *afu = to_cxl_afu(device);
- int rc;
-
- /* Not safe to reset if it is currently in use */
- mutex_lock(&afu->contexts_lock);
- if (!idr_is_empty(&afu->contexts_idr)) {
- rc = -EBUSY;
- goto err;
- }
-
- if ((rc = cxl_ops->afu_reset(afu)))
- goto err;
-
- rc = count;
-err:
- mutex_unlock(&afu->contexts_lock);
- return rc;
-}
-
-static ssize_t irqs_min_show(struct device *device,
- struct device_attribute *attr,
- char *buf)
-{
- struct cxl_afu *afu = to_cxl_afu(device);
-
- return scnprintf(buf, PAGE_SIZE, "%i\n", afu->pp_irqs);
-}
-
-static ssize_t irqs_max_show(struct device *device,
- struct device_attribute *attr,
- char *buf)
-{
- struct cxl_afu *afu = to_cxl_afu(device);
-
- return scnprintf(buf, PAGE_SIZE, "%i\n", afu->irqs_max);
-}
-
-static ssize_t irqs_max_store(struct device *device,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct cxl_afu *afu = to_cxl_afu(device);
- ssize_t ret;
- int irqs_max;
-
- ret = sscanf(buf, "%i", &irqs_max);
- if (ret != 1)
- return -EINVAL;
-
- if (irqs_max < afu->pp_irqs)
- return -EINVAL;
-
- if (cpu_has_feature(CPU_FTR_HVMODE)) {
- if (irqs_max > afu->adapter->user_irqs)
- return -EINVAL;
- } else {
- /* pHyp sets a per-AFU limit */
- if (irqs_max > afu->guest->max_ints)
- return -EINVAL;
- }
-
- afu->irqs_max = irqs_max;
- return count;
-}
-
-static ssize_t modes_supported_show(struct device *device,
- struct device_attribute *attr, char *buf)
-{
- struct cxl_afu *afu = to_cxl_afu(device);
- char *p = buf, *end = buf + PAGE_SIZE;
-
- if (afu->modes_supported & CXL_MODE_DEDICATED)
- p += scnprintf(p, end - p, "dedicated_process\n");
- if (afu->modes_supported & CXL_MODE_DIRECTED)
- p += scnprintf(p, end - p, "afu_directed\n");
- return (p - buf);
-}
-
-static ssize_t prefault_mode_show(struct device *device,
- struct device_attribute *attr,
- char *buf)
-{
- struct cxl_afu *afu = to_cxl_afu(device);
-
- switch (afu->prefault_mode) {
- case CXL_PREFAULT_WED:
- return scnprintf(buf, PAGE_SIZE, "work_element_descriptor\n");
- case CXL_PREFAULT_ALL:
- return scnprintf(buf, PAGE_SIZE, "all\n");
- default:
- return scnprintf(buf, PAGE_SIZE, "none\n");
- }
-}
-
-static ssize_t prefault_mode_store(struct device *device,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct cxl_afu *afu = to_cxl_afu(device);
- enum prefault_modes mode = -1;
-
- if (!strncmp(buf, "none", 4))
- mode = CXL_PREFAULT_NONE;
- else {
- if (!radix_enabled()) {
-
- /* only allowed when not in radix mode */
- if (!strncmp(buf, "work_element_descriptor", 23))
- mode = CXL_PREFAULT_WED;
- if (!strncmp(buf, "all", 3))
- mode = CXL_PREFAULT_ALL;
- } else {
- dev_err(device, "Cannot prefault with radix enabled\n");
- }
- }
-
- if (mode == -1)
- return -EINVAL;
-
- afu->prefault_mode = mode;
- return count;
-}
-
-static ssize_t mode_show(struct device *device,
- struct device_attribute *attr,
- char *buf)
-{
- struct cxl_afu *afu = to_cxl_afu(device);
-
- if (afu->current_mode == CXL_MODE_DEDICATED)
- return scnprintf(buf, PAGE_SIZE, "dedicated_process\n");
- if (afu->current_mode == CXL_MODE_DIRECTED)
- return scnprintf(buf, PAGE_SIZE, "afu_directed\n");
- return scnprintf(buf, PAGE_SIZE, "none\n");
-}
-
-static ssize_t mode_store(struct device *device, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct cxl_afu *afu = to_cxl_afu(device);
- int old_mode, mode = -1;
- int rc = -EBUSY;
-
- /* can't change this if we have a user */
- mutex_lock(&afu->contexts_lock);
- if (!idr_is_empty(&afu->contexts_idr))
- goto err;
-
- if (!strncmp(buf, "dedicated_process", 17))
- mode = CXL_MODE_DEDICATED;
- if (!strncmp(buf, "afu_directed", 12))
- mode = CXL_MODE_DIRECTED;
- if (!strncmp(buf, "none", 4))
- mode = 0;
-
- if (mode == -1) {
- rc = -EINVAL;
- goto err;
- }
-
- /*
- * afu_deactivate_mode needs to be done outside the lock, prevent
- * other contexts coming in before we are ready:
- */
- old_mode = afu->current_mode;
- afu->current_mode = 0;
- afu->num_procs = 0;
-
- mutex_unlock(&afu->contexts_lock);
-
- if ((rc = cxl_ops->afu_deactivate_mode(afu, old_mode)))
- return rc;
- if ((rc = cxl_ops->afu_activate_mode(afu, mode)))
- return rc;
-
- return count;
-err:
- mutex_unlock(&afu->contexts_lock);
- return rc;
-}
-
-static ssize_t api_version_show(struct device *device,
- struct device_attribute *attr,
- char *buf)
-{
- return scnprintf(buf, PAGE_SIZE, "%i\n", CXL_API_VERSION);
-}
-
-static ssize_t api_version_compatible_show(struct device *device,
- struct device_attribute *attr,
- char *buf)
-{
- return scnprintf(buf, PAGE_SIZE, "%i\n", CXL_API_VERSION_COMPATIBLE);
-}
-
-static ssize_t afu_eb_read(struct file *filp, struct kobject *kobj,
- const struct bin_attribute *bin_attr, char *buf,
- loff_t off, size_t count)
-{
- struct cxl_afu *afu = to_cxl_afu(kobj_to_dev(kobj));
-
- return cxl_ops->afu_read_err_buffer(afu, buf, off, count);
-}
-
-static struct device_attribute afu_attrs[] = {
- __ATTR_RO(mmio_size),
- __ATTR_RO(irqs_min),
- __ATTR_RW(irqs_max),
- __ATTR_RO(modes_supported),
- __ATTR_RW(mode),
- __ATTR_RW(prefault_mode),
- __ATTR_RO(api_version),
- __ATTR_RO(api_version_compatible),
- __ATTR(reset, S_IWUSR, NULL, reset_store_afu),
-};
-
-int cxl_sysfs_adapter_add(struct cxl *adapter)
-{
- struct device_attribute *dev_attr;
- int i, rc;
-
- for (i = 0; i < ARRAY_SIZE(adapter_attrs); i++) {
- dev_attr = &adapter_attrs[i];
- if (cxl_ops->support_attributes(dev_attr->attr.name,
- CXL_ADAPTER_ATTRS)) {
- if ((rc = device_create_file(&adapter->dev, dev_attr)))
- goto err;
- }
- }
- return 0;
-err:
- for (i--; i >= 0; i--) {
- dev_attr = &adapter_attrs[i];
- if (cxl_ops->support_attributes(dev_attr->attr.name,
- CXL_ADAPTER_ATTRS))
- device_remove_file(&adapter->dev, dev_attr);
- }
- return rc;
-}
-
-void cxl_sysfs_adapter_remove(struct cxl *adapter)
-{
- struct device_attribute *dev_attr;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(adapter_attrs); i++) {
- dev_attr = &adapter_attrs[i];
- if (cxl_ops->support_attributes(dev_attr->attr.name,
- CXL_ADAPTER_ATTRS))
- device_remove_file(&adapter->dev, dev_attr);
- }
-}
-
-struct afu_config_record {
- struct kobject kobj;
- struct bin_attribute config_attr;
- struct list_head list;
- int cr;
- u16 device;
- u16 vendor;
- u32 class;
-};
-
-#define to_cr(obj) container_of(obj, struct afu_config_record, kobj)
-
-static ssize_t vendor_show(struct kobject *kobj,
- struct kobj_attribute *attr, char *buf)
-{
- struct afu_config_record *cr = to_cr(kobj);
-
- return scnprintf(buf, PAGE_SIZE, "0x%.4x\n", cr->vendor);
-}
-
-static ssize_t device_show(struct kobject *kobj,
- struct kobj_attribute *attr, char *buf)
-{
- struct afu_config_record *cr = to_cr(kobj);
-
- return scnprintf(buf, PAGE_SIZE, "0x%.4x\n", cr->device);
-}
-
-static ssize_t class_show(struct kobject *kobj,
- struct kobj_attribute *attr, char *buf)
-{
- struct afu_config_record *cr = to_cr(kobj);
-
- return scnprintf(buf, PAGE_SIZE, "0x%.6x\n", cr->class);
-}
-
-static ssize_t afu_read_config(struct file *filp, struct kobject *kobj,
- const struct bin_attribute *bin_attr, char *buf,
- loff_t off, size_t count)
-{
- struct afu_config_record *cr = to_cr(kobj);
- struct cxl_afu *afu = to_cxl_afu(kobj_to_dev(kobj->parent));
-
- u64 i, j, val, rc;
-
- for (i = 0; i < count;) {
- rc = cxl_ops->afu_cr_read64(afu, cr->cr, off & ~0x7, &val);
- if (rc)
- val = ~0ULL;
- for (j = off & 0x7; j < 8 && i < count; i++, j++, off++)
- buf[i] = (val >> (j * 8)) & 0xff;
- }
-
- return count;
-}
-
-static struct kobj_attribute vendor_attribute =
- __ATTR_RO(vendor);
-static struct kobj_attribute device_attribute =
- __ATTR_RO(device);
-static struct kobj_attribute class_attribute =
- __ATTR_RO(class);
-
-static struct attribute *afu_cr_attrs[] = {
- &vendor_attribute.attr,
- &device_attribute.attr,
- &class_attribute.attr,
- NULL,
-};
-ATTRIBUTE_GROUPS(afu_cr);
-
-static void release_afu_config_record(struct kobject *kobj)
-{
- struct afu_config_record *cr = to_cr(kobj);
-
- kfree(cr);
-}
-
-static const struct kobj_type afu_config_record_type = {
- .sysfs_ops = &kobj_sysfs_ops,
- .release = release_afu_config_record,
- .default_groups = afu_cr_groups,
-};
-
-static struct afu_config_record *cxl_sysfs_afu_new_cr(struct cxl_afu *afu, int cr_idx)
-{
- struct afu_config_record *cr;
- int rc;
-
- cr = kzalloc(sizeof(struct afu_config_record), GFP_KERNEL);
- if (!cr)
- return ERR_PTR(-ENOMEM);
-
- cr->cr = cr_idx;
-
- rc = cxl_ops->afu_cr_read16(afu, cr_idx, PCI_DEVICE_ID, &cr->device);
- if (rc)
- goto err;
- rc = cxl_ops->afu_cr_read16(afu, cr_idx, PCI_VENDOR_ID, &cr->vendor);
- if (rc)
- goto err;
- rc = cxl_ops->afu_cr_read32(afu, cr_idx, PCI_CLASS_REVISION, &cr->class);
- if (rc)
- goto err;
- cr->class >>= 8;
-
- /*
- * Export raw AFU PCIe like config record. For now this is read only by
- * root - we can expand that later to be readable by non-root and maybe
- * even writable provided we have a good use-case. Once we support
- * exposing AFUs through a virtual PHB they will get that for free from
- * Linux' PCI infrastructure, but until then it's not clear that we
- * need it for anything since the main use case is just identifying
- * AFUs, which can be done via the vendor, device and class attributes.
- */
- sysfs_bin_attr_init(&cr->config_attr);
- cr->config_attr.attr.name = "config";
- cr->config_attr.attr.mode = S_IRUSR;
- cr->config_attr.size = afu->crs_len;
- cr->config_attr.read_new = afu_read_config;
-
- rc = kobject_init_and_add(&cr->kobj, &afu_config_record_type,
- &afu->dev.kobj, "cr%i", cr->cr);
- if (rc)
- goto err1;
-
- rc = sysfs_create_bin_file(&cr->kobj, &cr->config_attr);
- if (rc)
- goto err1;
-
- rc = kobject_uevent(&cr->kobj, KOBJ_ADD);
- if (rc)
- goto err2;
-
- return cr;
-err2:
- sysfs_remove_bin_file(&cr->kobj, &cr->config_attr);
-err1:
- kobject_put(&cr->kobj);
- return ERR_PTR(rc);
-err:
- kfree(cr);
- return ERR_PTR(rc);
-}
-
-void cxl_sysfs_afu_remove(struct cxl_afu *afu)
-{
- struct device_attribute *dev_attr;
- struct afu_config_record *cr, *tmp;
- int i;
-
- /* remove the err buffer bin attribute */
- if (afu->eb_len)
- device_remove_bin_file(&afu->dev, &afu->attr_eb);
-
- for (i = 0; i < ARRAY_SIZE(afu_attrs); i++) {
- dev_attr = &afu_attrs[i];
- if (cxl_ops->support_attributes(dev_attr->attr.name,
- CXL_AFU_ATTRS))
- device_remove_file(&afu->dev, &afu_attrs[i]);
- }
-
- list_for_each_entry_safe(cr, tmp, &afu->crs, list) {
- sysfs_remove_bin_file(&cr->kobj, &cr->config_attr);
- kobject_put(&cr->kobj);
- }
-}
-
-int cxl_sysfs_afu_add(struct cxl_afu *afu)
-{
- struct device_attribute *dev_attr;
- struct afu_config_record *cr;
- int i, rc;
-
- INIT_LIST_HEAD(&afu->crs);
-
- for (i = 0; i < ARRAY_SIZE(afu_attrs); i++) {
- dev_attr = &afu_attrs[i];
- if (cxl_ops->support_attributes(dev_attr->attr.name,
- CXL_AFU_ATTRS)) {
- if ((rc = device_create_file(&afu->dev, &afu_attrs[i])))
- goto err;
- }
- }
-
- /* conditionally create the add the binary file for error info buffer */
- if (afu->eb_len) {
- sysfs_attr_init(&afu->attr_eb.attr);
-
- afu->attr_eb.attr.name = "afu_err_buff";
- afu->attr_eb.attr.mode = S_IRUGO;
- afu->attr_eb.size = afu->eb_len;
- afu->attr_eb.read_new = afu_eb_read;
-
- rc = device_create_bin_file(&afu->dev, &afu->attr_eb);
- if (rc) {
- dev_err(&afu->dev,
- "Unable to create eb attr for the afu. Err(%d)\n",
- rc);
- goto err;
- }
- }
-
- for (i = 0; i < afu->crs_num; i++) {
- cr = cxl_sysfs_afu_new_cr(afu, i);
- if (IS_ERR(cr)) {
- rc = PTR_ERR(cr);
- goto err1;
- }
- list_add(&cr->list, &afu->crs);
- }
-
- return 0;
-
-err1:
- cxl_sysfs_afu_remove(afu);
- return rc;
-err:
- /* reset the eb_len as we havent created the bin attr */
- afu->eb_len = 0;
-
- for (i--; i >= 0; i--) {
- dev_attr = &afu_attrs[i];
- if (cxl_ops->support_attributes(dev_attr->attr.name,
- CXL_AFU_ATTRS))
- device_remove_file(&afu->dev, &afu_attrs[i]);
- }
- return rc;
-}
-
-int cxl_sysfs_afu_m_add(struct cxl_afu *afu)
-{
- struct device_attribute *dev_attr;
- int i, rc;
-
- for (i = 0; i < ARRAY_SIZE(afu_master_attrs); i++) {
- dev_attr = &afu_master_attrs[i];
- if (cxl_ops->support_attributes(dev_attr->attr.name,
- CXL_AFU_MASTER_ATTRS)) {
- if ((rc = device_create_file(afu->chardev_m, &afu_master_attrs[i])))
- goto err;
- }
- }
-
- return 0;
-
-err:
- for (i--; i >= 0; i--) {
- dev_attr = &afu_master_attrs[i];
- if (cxl_ops->support_attributes(dev_attr->attr.name,
- CXL_AFU_MASTER_ATTRS))
- device_remove_file(afu->chardev_m, &afu_master_attrs[i]);
- }
- return rc;
-}
-
-void cxl_sysfs_afu_m_remove(struct cxl_afu *afu)
-{
- struct device_attribute *dev_attr;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(afu_master_attrs); i++) {
- dev_attr = &afu_master_attrs[i];
- if (cxl_ops->support_attributes(dev_attr->attr.name,
- CXL_AFU_MASTER_ATTRS))
- device_remove_file(afu->chardev_m, &afu_master_attrs[i]);
- }
-}
diff --git a/drivers/misc/cxl/trace.c b/drivers/misc/cxl/trace.c
deleted file mode 100644
index 86f654b99efb..000000000000
--- a/drivers/misc/cxl/trace.c
+++ /dev/null
@@ -1,9 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright 2015 IBM Corp.
- */
-
-#ifndef __CHECKER__
-#define CREATE_TRACE_POINTS
-#include "trace.h"
-#endif
diff --git a/drivers/misc/cxl/trace.h b/drivers/misc/cxl/trace.h
deleted file mode 100644
index c474157c6857..000000000000
--- a/drivers/misc/cxl/trace.h
+++ /dev/null
@@ -1,691 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright 2015 IBM Corp.
- */
-
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM cxl
-
-#if !defined(_CXL_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
-#define _CXL_TRACE_H
-
-#include <linux/tracepoint.h>
-
-#include "cxl.h"
-
-#define dsisr_psl9_flags(flags) \
- __print_flags(flags, "|", \
- { CXL_PSL9_DSISR_An_CO_MASK, "FR" }, \
- { CXL_PSL9_DSISR_An_TF, "TF" }, \
- { CXL_PSL9_DSISR_An_PE, "PE" }, \
- { CXL_PSL9_DSISR_An_AE, "AE" }, \
- { CXL_PSL9_DSISR_An_OC, "OC" }, \
- { CXL_PSL9_DSISR_An_S, "S" })
-
-#define DSISR_FLAGS \
- { CXL_PSL_DSISR_An_DS, "DS" }, \
- { CXL_PSL_DSISR_An_DM, "DM" }, \
- { CXL_PSL_DSISR_An_ST, "ST" }, \
- { CXL_PSL_DSISR_An_UR, "UR" }, \
- { CXL_PSL_DSISR_An_PE, "PE" }, \
- { CXL_PSL_DSISR_An_AE, "AE" }, \
- { CXL_PSL_DSISR_An_OC, "OC" }, \
- { CXL_PSL_DSISR_An_M, "M" }, \
- { CXL_PSL_DSISR_An_P, "P" }, \
- { CXL_PSL_DSISR_An_A, "A" }, \
- { CXL_PSL_DSISR_An_S, "S" }, \
- { CXL_PSL_DSISR_An_K, "K" }
-
-#define TFC_FLAGS \
- { CXL_PSL_TFC_An_A, "A" }, \
- { CXL_PSL_TFC_An_C, "C" }, \
- { CXL_PSL_TFC_An_AE, "AE" }, \
- { CXL_PSL_TFC_An_R, "R" }
-
-#define LLCMD_NAMES \
- { CXL_SPA_SW_CMD_TERMINATE, "TERMINATE" }, \
- { CXL_SPA_SW_CMD_REMOVE, "REMOVE" }, \
- { CXL_SPA_SW_CMD_SUSPEND, "SUSPEND" }, \
- { CXL_SPA_SW_CMD_RESUME, "RESUME" }, \
- { CXL_SPA_SW_CMD_ADD, "ADD" }, \
- { CXL_SPA_SW_CMD_UPDATE, "UPDATE" }
-
-#define AFU_COMMANDS \
- { 0, "DISABLE" }, \
- { CXL_AFU_Cntl_An_E, "ENABLE" }, \
- { CXL_AFU_Cntl_An_RA, "RESET" }
-
-#define PSL_COMMANDS \
- { CXL_PSL_SCNTL_An_Pc, "PURGE" }, \
- { CXL_PSL_SCNTL_An_Sc, "SUSPEND" }
-
-
-DECLARE_EVENT_CLASS(cxl_pe_class,
- TP_PROTO(struct cxl_context *ctx),
-
- TP_ARGS(ctx),
-
- TP_STRUCT__entry(
- __field(u8, card)
- __field(u8, afu)
- __field(u16, pe)
- ),
-
- TP_fast_assign(
- __entry->card = ctx->afu->adapter->adapter_num;
- __entry->afu = ctx->afu->slice;
- __entry->pe = ctx->pe;
- ),
-
- TP_printk("afu%i.%i pe=%i",
- __entry->card,
- __entry->afu,
- __entry->pe
- )
-);
-
-
-TRACE_EVENT(cxl_attach,
- TP_PROTO(struct cxl_context *ctx, u64 wed, s16 num_interrupts, u64 amr),
-
- TP_ARGS(ctx, wed, num_interrupts, amr),
-
- TP_STRUCT__entry(
- __field(u8, card)
- __field(u8, afu)
- __field(u16, pe)
- __field(pid_t, pid)
- __field(u64, wed)
- __field(u64, amr)
- __field(s16, num_interrupts)
- ),
-
- TP_fast_assign(
- __entry->card = ctx->afu->adapter->adapter_num;
- __entry->afu = ctx->afu->slice;
- __entry->pe = ctx->pe;
- __entry->pid = pid_nr(ctx->pid);
- __entry->wed = wed;
- __entry->amr = amr;
- __entry->num_interrupts = num_interrupts;
- ),
-
- TP_printk("afu%i.%i pid=%i pe=%i wed=0x%016llx irqs=%i amr=0x%llx",
- __entry->card,
- __entry->afu,
- __entry->pid,
- __entry->pe,
- __entry->wed,
- __entry->num_interrupts,
- __entry->amr
- )
-);
-
-DEFINE_EVENT(cxl_pe_class, cxl_detach,
- TP_PROTO(struct cxl_context *ctx),
- TP_ARGS(ctx)
-);
-
-TRACE_EVENT(cxl_afu_irq,
- TP_PROTO(struct cxl_context *ctx, int afu_irq, int virq, irq_hw_number_t hwirq),
-
- TP_ARGS(ctx, afu_irq, virq, hwirq),
-
- TP_STRUCT__entry(
- __field(u8, card)
- __field(u8, afu)
- __field(u16, pe)
- __field(u16, afu_irq)
- __field(int, virq)
- __field(irq_hw_number_t, hwirq)
- ),
-
- TP_fast_assign(
- __entry->card = ctx->afu->adapter->adapter_num;
- __entry->afu = ctx->afu->slice;
- __entry->pe = ctx->pe;
- __entry->afu_irq = afu_irq;
- __entry->virq = virq;
- __entry->hwirq = hwirq;
- ),
-
- TP_printk("afu%i.%i pe=%i afu_irq=%i virq=%i hwirq=0x%lx",
- __entry->card,
- __entry->afu,
- __entry->pe,
- __entry->afu_irq,
- __entry->virq,
- __entry->hwirq
- )
-);
-
-TRACE_EVENT(cxl_psl9_irq,
- TP_PROTO(struct cxl_context *ctx, int irq, u64 dsisr, u64 dar),
-
- TP_ARGS(ctx, irq, dsisr, dar),
-
- TP_STRUCT__entry(
- __field(u8, card)
- __field(u8, afu)
- __field(u16, pe)
- __field(int, irq)
- __field(u64, dsisr)
- __field(u64, dar)
- ),
-
- TP_fast_assign(
- __entry->card = ctx->afu->adapter->adapter_num;
- __entry->afu = ctx->afu->slice;
- __entry->pe = ctx->pe;
- __entry->irq = irq;
- __entry->dsisr = dsisr;
- __entry->dar = dar;
- ),
-
- TP_printk("afu%i.%i pe=%i irq=%i dsisr=0x%016llx dsisr=%s dar=0x%016llx",
- __entry->card,
- __entry->afu,
- __entry->pe,
- __entry->irq,
- __entry->dsisr,
- dsisr_psl9_flags(__entry->dsisr),
- __entry->dar
- )
-);
-
-TRACE_EVENT(cxl_psl_irq,
- TP_PROTO(struct cxl_context *ctx, int irq, u64 dsisr, u64 dar),
-
- TP_ARGS(ctx, irq, dsisr, dar),
-
- TP_STRUCT__entry(
- __field(u8, card)
- __field(u8, afu)
- __field(u16, pe)
- __field(int, irq)
- __field(u64, dsisr)
- __field(u64, dar)
- ),
-
- TP_fast_assign(
- __entry->card = ctx->afu->adapter->adapter_num;
- __entry->afu = ctx->afu->slice;
- __entry->pe = ctx->pe;
- __entry->irq = irq;
- __entry->dsisr = dsisr;
- __entry->dar = dar;
- ),
-
- TP_printk("afu%i.%i pe=%i irq=%i dsisr=%s dar=0x%016llx",
- __entry->card,
- __entry->afu,
- __entry->pe,
- __entry->irq,
- __print_flags(__entry->dsisr, "|", DSISR_FLAGS),
- __entry->dar
- )
-);
-
-TRACE_EVENT(cxl_psl_irq_ack,
- TP_PROTO(struct cxl_context *ctx, u64 tfc),
-
- TP_ARGS(ctx, tfc),
-
- TP_STRUCT__entry(
- __field(u8, card)
- __field(u8, afu)
- __field(u16, pe)
- __field(u64, tfc)
- ),
-
- TP_fast_assign(
- __entry->card = ctx->afu->adapter->adapter_num;
- __entry->afu = ctx->afu->slice;
- __entry->pe = ctx->pe;
- __entry->tfc = tfc;
- ),
-
- TP_printk("afu%i.%i pe=%i tfc=%s",
- __entry->card,
- __entry->afu,
- __entry->pe,
- __print_flags(__entry->tfc, "|", TFC_FLAGS)
- )
-);
-
-TRACE_EVENT(cxl_ste_miss,
- TP_PROTO(struct cxl_context *ctx, u64 dar),
-
- TP_ARGS(ctx, dar),
-
- TP_STRUCT__entry(
- __field(u8, card)
- __field(u8, afu)
- __field(u16, pe)
- __field(u64, dar)
- ),
-
- TP_fast_assign(
- __entry->card = ctx->afu->adapter->adapter_num;
- __entry->afu = ctx->afu->slice;
- __entry->pe = ctx->pe;
- __entry->dar = dar;
- ),
-
- TP_printk("afu%i.%i pe=%i dar=0x%016llx",
- __entry->card,
- __entry->afu,
- __entry->pe,
- __entry->dar
- )
-);
-
-TRACE_EVENT(cxl_ste_write,
- TP_PROTO(struct cxl_context *ctx, unsigned int idx, u64 e, u64 v),
-
- TP_ARGS(ctx, idx, e, v),
-
- TP_STRUCT__entry(
- __field(u8, card)
- __field(u8, afu)
- __field(u16, pe)
- __field(unsigned int, idx)
- __field(u64, e)
- __field(u64, v)
- ),
-
- TP_fast_assign(
- __entry->card = ctx->afu->adapter->adapter_num;
- __entry->afu = ctx->afu->slice;
- __entry->pe = ctx->pe;
- __entry->idx = idx;
- __entry->e = e;
- __entry->v = v;
- ),
-
- TP_printk("afu%i.%i pe=%i SSTE[%i] E=0x%016llx V=0x%016llx",
- __entry->card,
- __entry->afu,
- __entry->pe,
- __entry->idx,
- __entry->e,
- __entry->v
- )
-);
-
-TRACE_EVENT(cxl_pte_miss,
- TP_PROTO(struct cxl_context *ctx, u64 dsisr, u64 dar),
-
- TP_ARGS(ctx, dsisr, dar),
-
- TP_STRUCT__entry(
- __field(u8, card)
- __field(u8, afu)
- __field(u16, pe)
- __field(u64, dsisr)
- __field(u64, dar)
- ),
-
- TP_fast_assign(
- __entry->card = ctx->afu->adapter->adapter_num;
- __entry->afu = ctx->afu->slice;
- __entry->pe = ctx->pe;
- __entry->dsisr = dsisr;
- __entry->dar = dar;
- ),
-
- TP_printk("afu%i.%i pe=%i dsisr=%s dar=0x%016llx",
- __entry->card,
- __entry->afu,
- __entry->pe,
- __print_flags(__entry->dsisr, "|", DSISR_FLAGS),
- __entry->dar
- )
-);
-
-TRACE_EVENT(cxl_llcmd,
- TP_PROTO(struct cxl_context *ctx, u64 cmd),
-
- TP_ARGS(ctx, cmd),
-
- TP_STRUCT__entry(
- __field(u8, card)
- __field(u8, afu)
- __field(u16, pe)
- __field(u64, cmd)
- ),
-
- TP_fast_assign(
- __entry->card = ctx->afu->adapter->adapter_num;
- __entry->afu = ctx->afu->slice;
- __entry->pe = ctx->pe;
- __entry->cmd = cmd;
- ),
-
- TP_printk("afu%i.%i pe=%i cmd=%s",
- __entry->card,
- __entry->afu,
- __entry->pe,
- __print_symbolic_u64(__entry->cmd, LLCMD_NAMES)
- )
-);
-
-TRACE_EVENT(cxl_llcmd_done,
- TP_PROTO(struct cxl_context *ctx, u64 cmd, int rc),
-
- TP_ARGS(ctx, cmd, rc),
-
- TP_STRUCT__entry(
- __field(u8, card)
- __field(u8, afu)
- __field(u16, pe)
- __field(u64, cmd)
- __field(int, rc)
- ),
-
- TP_fast_assign(
- __entry->card = ctx->afu->adapter->adapter_num;
- __entry->afu = ctx->afu->slice;
- __entry->pe = ctx->pe;
- __entry->rc = rc;
- __entry->cmd = cmd;
- ),
-
- TP_printk("afu%i.%i pe=%i cmd=%s rc=%i",
- __entry->card,
- __entry->afu,
- __entry->pe,
- __print_symbolic_u64(__entry->cmd, LLCMD_NAMES),
- __entry->rc
- )
-);
-
-DECLARE_EVENT_CLASS(cxl_afu_psl_ctrl,
- TP_PROTO(struct cxl_afu *afu, u64 cmd),
-
- TP_ARGS(afu, cmd),
-
- TP_STRUCT__entry(
- __field(u8, card)
- __field(u8, afu)
- __field(u64, cmd)
- ),
-
- TP_fast_assign(
- __entry->card = afu->adapter->adapter_num;
- __entry->afu = afu->slice;
- __entry->cmd = cmd;
- ),
-
- TP_printk("afu%i.%i cmd=%s",
- __entry->card,
- __entry->afu,
- __print_symbolic_u64(__entry->cmd, AFU_COMMANDS)
- )
-);
-
-DECLARE_EVENT_CLASS(cxl_afu_psl_ctrl_done,
- TP_PROTO(struct cxl_afu *afu, u64 cmd, int rc),
-
- TP_ARGS(afu, cmd, rc),
-
- TP_STRUCT__entry(
- __field(u8, card)
- __field(u8, afu)
- __field(u64, cmd)
- __field(int, rc)
- ),
-
- TP_fast_assign(
- __entry->card = afu->adapter->adapter_num;
- __entry->afu = afu->slice;
- __entry->rc = rc;
- __entry->cmd = cmd;
- ),
-
- TP_printk("afu%i.%i cmd=%s rc=%i",
- __entry->card,
- __entry->afu,
- __print_symbolic_u64(__entry->cmd, AFU_COMMANDS),
- __entry->rc
- )
-);
-
-DEFINE_EVENT(cxl_afu_psl_ctrl, cxl_afu_ctrl,
- TP_PROTO(struct cxl_afu *afu, u64 cmd),
- TP_ARGS(afu, cmd)
-);
-
-DEFINE_EVENT(cxl_afu_psl_ctrl_done, cxl_afu_ctrl_done,
- TP_PROTO(struct cxl_afu *afu, u64 cmd, int rc),
- TP_ARGS(afu, cmd, rc)
-);
-
-DEFINE_EVENT_PRINT(cxl_afu_psl_ctrl, cxl_psl_ctrl,
- TP_PROTO(struct cxl_afu *afu, u64 cmd),
- TP_ARGS(afu, cmd),
-
- TP_printk("psl%i.%i cmd=%s",
- __entry->card,
- __entry->afu,
- __print_symbolic_u64(__entry->cmd, PSL_COMMANDS)
- )
-);
-
-DEFINE_EVENT_PRINT(cxl_afu_psl_ctrl_done, cxl_psl_ctrl_done,
- TP_PROTO(struct cxl_afu *afu, u64 cmd, int rc),
- TP_ARGS(afu, cmd, rc),
-
- TP_printk("psl%i.%i cmd=%s rc=%i",
- __entry->card,
- __entry->afu,
- __print_symbolic_u64(__entry->cmd, PSL_COMMANDS),
- __entry->rc
- )
-);
-
-DEFINE_EVENT(cxl_pe_class, cxl_slbia,
- TP_PROTO(struct cxl_context *ctx),
- TP_ARGS(ctx)
-);
-
-TRACE_EVENT(cxl_hcall,
- TP_PROTO(u64 unit_address, u64 process_token, long rc),
-
- TP_ARGS(unit_address, process_token, rc),
-
- TP_STRUCT__entry(
- __field(u64, unit_address)
- __field(u64, process_token)
- __field(long, rc)
- ),
-
- TP_fast_assign(
- __entry->unit_address = unit_address;
- __entry->process_token = process_token;
- __entry->rc = rc;
- ),
-
- TP_printk("unit_address=0x%016llx process_token=0x%016llx rc=%li",
- __entry->unit_address,
- __entry->process_token,
- __entry->rc
- )
-);
-
-TRACE_EVENT(cxl_hcall_control,
- TP_PROTO(u64 unit_address, char *fct, u64 p1, u64 p2, u64 p3,
- u64 p4, unsigned long r4, long rc),
-
- TP_ARGS(unit_address, fct, p1, p2, p3, p4, r4, rc),
-
- TP_STRUCT__entry(
- __field(u64, unit_address)
- __field(char *, fct)
- __field(u64, p1)
- __field(u64, p2)
- __field(u64, p3)
- __field(u64, p4)
- __field(unsigned long, r4)
- __field(long, rc)
- ),
-
- TP_fast_assign(
- __entry->unit_address = unit_address;
- __entry->fct = fct;
- __entry->p1 = p1;
- __entry->p2 = p2;
- __entry->p3 = p3;
- __entry->p4 = p4;
- __entry->r4 = r4;
- __entry->rc = rc;
- ),
-
- TP_printk("unit_address=%#.16llx %s(%#llx, %#llx, %#llx, %#llx, R4: %#lx)): %li",
- __entry->unit_address,
- __entry->fct,
- __entry->p1,
- __entry->p2,
- __entry->p3,
- __entry->p4,
- __entry->r4,
- __entry->rc
- )
-);
-
-TRACE_EVENT(cxl_hcall_attach,
- TP_PROTO(u64 unit_address, u64 phys_addr, unsigned long process_token,
- unsigned long mmio_addr, unsigned long mmio_size, long rc),
-
- TP_ARGS(unit_address, phys_addr, process_token,
- mmio_addr, mmio_size, rc),
-
- TP_STRUCT__entry(
- __field(u64, unit_address)
- __field(u64, phys_addr)
- __field(unsigned long, process_token)
- __field(unsigned long, mmio_addr)
- __field(unsigned long, mmio_size)
- __field(long, rc)
- ),
-
- TP_fast_assign(
- __entry->unit_address = unit_address;
- __entry->phys_addr = phys_addr;
- __entry->process_token = process_token;
- __entry->mmio_addr = mmio_addr;
- __entry->mmio_size = mmio_size;
- __entry->rc = rc;
- ),
-
- TP_printk("unit_address=0x%016llx phys_addr=0x%016llx "
- "token=0x%.8lx mmio_addr=0x%lx mmio_size=0x%lx rc=%li",
- __entry->unit_address,
- __entry->phys_addr,
- __entry->process_token,
- __entry->mmio_addr,
- __entry->mmio_size,
- __entry->rc
- )
-);
-
-DEFINE_EVENT(cxl_hcall, cxl_hcall_detach,
- TP_PROTO(u64 unit_address, u64 process_token, long rc),
- TP_ARGS(unit_address, process_token, rc)
-);
-
-DEFINE_EVENT(cxl_hcall_control, cxl_hcall_control_function,
- TP_PROTO(u64 unit_address, char *fct, u64 p1, u64 p2, u64 p3,
- u64 p4, unsigned long r4, long rc),
- TP_ARGS(unit_address, fct, p1, p2, p3, p4, r4, rc)
-);
-
-DEFINE_EVENT(cxl_hcall, cxl_hcall_collect_int_info,
- TP_PROTO(u64 unit_address, u64 process_token, long rc),
- TP_ARGS(unit_address, process_token, rc)
-);
-
-TRACE_EVENT(cxl_hcall_control_faults,
- TP_PROTO(u64 unit_address, u64 process_token,
- u64 control_mask, u64 reset_mask, unsigned long r4,
- long rc),
-
- TP_ARGS(unit_address, process_token,
- control_mask, reset_mask, r4, rc),
-
- TP_STRUCT__entry(
- __field(u64, unit_address)
- __field(u64, process_token)
- __field(u64, control_mask)
- __field(u64, reset_mask)
- __field(unsigned long, r4)
- __field(long, rc)
- ),
-
- TP_fast_assign(
- __entry->unit_address = unit_address;
- __entry->process_token = process_token;
- __entry->control_mask = control_mask;
- __entry->reset_mask = reset_mask;
- __entry->r4 = r4;
- __entry->rc = rc;
- ),
-
- TP_printk("unit_address=0x%016llx process_token=0x%llx "
- "control_mask=%#llx reset_mask=%#llx r4=%#lx rc=%li",
- __entry->unit_address,
- __entry->process_token,
- __entry->control_mask,
- __entry->reset_mask,
- __entry->r4,
- __entry->rc
- )
-);
-
-DEFINE_EVENT(cxl_hcall_control, cxl_hcall_control_facility,
- TP_PROTO(u64 unit_address, char *fct, u64 p1, u64 p2, u64 p3,
- u64 p4, unsigned long r4, long rc),
- TP_ARGS(unit_address, fct, p1, p2, p3, p4, r4, rc)
-);
-
-TRACE_EVENT(cxl_hcall_download_facility,
- TP_PROTO(u64 unit_address, char *fct, u64 list_address, u64 num,
- unsigned long r4, long rc),
-
- TP_ARGS(unit_address, fct, list_address, num, r4, rc),
-
- TP_STRUCT__entry(
- __field(u64, unit_address)
- __field(char *, fct)
- __field(u64, list_address)
- __field(u64, num)
- __field(unsigned long, r4)
- __field(long, rc)
- ),
-
- TP_fast_assign(
- __entry->unit_address = unit_address;
- __entry->fct = fct;
- __entry->list_address = list_address;
- __entry->num = num;
- __entry->r4 = r4;
- __entry->rc = rc;
- ),
-
- TP_printk("%#.16llx, %s(%#llx, %#llx), %#lx): %li",
- __entry->unit_address,
- __entry->fct,
- __entry->list_address,
- __entry->num,
- __entry->r4,
- __entry->rc
- )
-);
-
-#endif /* _CXL_TRACE_H */
-
-/* This part must be outside protection */
-#undef TRACE_INCLUDE_PATH
-#define TRACE_INCLUDE_PATH .
-#define TRACE_INCLUDE_FILE trace
-#include <trace/define_trace.h>
diff --git a/drivers/misc/cxl/vphb.c b/drivers/misc/cxl/vphb.c
deleted file mode 100644
index 6332db8044bd..000000000000
--- a/drivers/misc/cxl/vphb.c
+++ /dev/null
@@ -1,309 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright 2014 IBM Corp.
- */
-
-#include <linux/pci.h>
-#include <misc/cxl.h>
-#include "cxl.h"
-
-static int cxl_pci_probe_mode(struct pci_bus *bus)
-{
- return PCI_PROBE_NORMAL;
-}
-
-static int cxl_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
-{
- return -ENODEV;
-}
-
-static void cxl_teardown_msi_irqs(struct pci_dev *pdev)
-{
- /*
- * MSI should never be set but need still need to provide this call
- * back.
- */
-}
-
-static bool cxl_pci_enable_device_hook(struct pci_dev *dev)
-{
- struct pci_controller *phb;
- struct cxl_afu *afu;
- struct cxl_context *ctx;
-
- phb = pci_bus_to_host(dev->bus);
- afu = (struct cxl_afu *)phb->private_data;
-
- if (!cxl_ops->link_ok(afu->adapter, afu)) {
- dev_warn(&dev->dev, "%s: Device link is down, refusing to enable AFU\n", __func__);
- return false;
- }
-
- dev->dev.archdata.dma_offset = PAGE_OFFSET;
-
- /*
- * Allocate a context to do cxl things too. If we eventually do real
- * DMA ops, we'll need a default context to attach them to
- */
- ctx = cxl_dev_context_init(dev);
- if (IS_ERR(ctx))
- return false;
- dev->dev.archdata.cxl_ctx = ctx;
-
- return (cxl_ops->afu_check_and_enable(afu) == 0);
-}
-
-static void cxl_pci_disable_device(struct pci_dev *dev)
-{
- struct cxl_context *ctx = cxl_get_context(dev);
-
- if (ctx) {
- if (ctx->status == STARTED) {
- dev_err(&dev->dev, "Default context started\n");
- return;
- }
- dev->dev.archdata.cxl_ctx = NULL;
- cxl_release_context(ctx);
- }
-}
-
-static void cxl_pci_reset_secondary_bus(struct pci_dev *dev)
-{
- /* Should we do an AFU reset here ? */
-}
-
-static int cxl_pcie_cfg_record(u8 bus, u8 devfn)
-{
- return (bus << 8) + devfn;
-}
-
-static inline struct cxl_afu *pci_bus_to_afu(struct pci_bus *bus)
-{
- struct pci_controller *phb = bus ? pci_bus_to_host(bus) : NULL;
-
- return phb ? phb->private_data : NULL;
-}
-
-static void cxl_afu_configured_put(struct cxl_afu *afu)
-{
- atomic_dec_if_positive(&afu->configured_state);
-}
-
-static bool cxl_afu_configured_get(struct cxl_afu *afu)
-{
- return atomic_inc_unless_negative(&afu->configured_state);
-}
-
-static inline int cxl_pcie_config_info(struct pci_bus *bus, unsigned int devfn,
- struct cxl_afu *afu, int *_record)
-{
- int record;
-
- record = cxl_pcie_cfg_record(bus->number, devfn);
- if (record > afu->crs_num)
- return PCIBIOS_DEVICE_NOT_FOUND;
-
- *_record = record;
- return 0;
-}
-
-static int cxl_pcie_read_config(struct pci_bus *bus, unsigned int devfn,
- int offset, int len, u32 *val)
-{
- int rc, record;
- struct cxl_afu *afu;
- u8 val8;
- u16 val16;
- u32 val32;
-
- afu = pci_bus_to_afu(bus);
- /* Grab a reader lock on afu. */
- if (afu == NULL || !cxl_afu_configured_get(afu))
- return PCIBIOS_DEVICE_NOT_FOUND;
-
- rc = cxl_pcie_config_info(bus, devfn, afu, &record);
- if (rc)
- goto out;
-
- switch (len) {
- case 1:
- rc = cxl_ops->afu_cr_read8(afu, record, offset, &val8);
- *val = val8;
- break;
- case 2:
- rc = cxl_ops->afu_cr_read16(afu, record, offset, &val16);
- *val = val16;
- break;
- case 4:
- rc = cxl_ops->afu_cr_read32(afu, record, offset, &val32);
- *val = val32;
- break;
- default:
- WARN_ON(1);
- }
-
-out:
- cxl_afu_configured_put(afu);
- return rc ? PCIBIOS_DEVICE_NOT_FOUND : 0;
-}
-
-static int cxl_pcie_write_config(struct pci_bus *bus, unsigned int devfn,
- int offset, int len, u32 val)
-{
- int rc, record;
- struct cxl_afu *afu;
-
- afu = pci_bus_to_afu(bus);
- /* Grab a reader lock on afu. */
- if (afu == NULL || !cxl_afu_configured_get(afu))
- return PCIBIOS_DEVICE_NOT_FOUND;
-
- rc = cxl_pcie_config_info(bus, devfn, afu, &record);
- if (rc)
- goto out;
-
- switch (len) {
- case 1:
- rc = cxl_ops->afu_cr_write8(afu, record, offset, val & 0xff);
- break;
- case 2:
- rc = cxl_ops->afu_cr_write16(afu, record, offset, val & 0xffff);
- break;
- case 4:
- rc = cxl_ops->afu_cr_write32(afu, record, offset, val);
- break;
- default:
- WARN_ON(1);
- }
-
-out:
- cxl_afu_configured_put(afu);
- return rc ? PCIBIOS_SET_FAILED : 0;
-}
-
-static struct pci_ops cxl_pcie_pci_ops =
-{
- .read = cxl_pcie_read_config,
- .write = cxl_pcie_write_config,
-};
-
-
-static struct pci_controller_ops cxl_pci_controller_ops =
-{
- .probe_mode = cxl_pci_probe_mode,
- .enable_device_hook = cxl_pci_enable_device_hook,
- .disable_device = cxl_pci_disable_device,
- .release_device = cxl_pci_disable_device,
- .reset_secondary_bus = cxl_pci_reset_secondary_bus,
- .setup_msi_irqs = cxl_setup_msi_irqs,
- .teardown_msi_irqs = cxl_teardown_msi_irqs,
-};
-
-int cxl_pci_vphb_add(struct cxl_afu *afu)
-{
- struct pci_controller *phb;
- struct device_node *vphb_dn;
- struct device *parent;
-
- /*
- * If there are no AFU configuration records we won't have anything to
- * expose under the vPHB, so skip creating one, returning success since
- * this is still a valid case. This will also opt us out of EEH
- * handling since we won't have anything special to do if there are no
- * kernel drivers attached to the vPHB, and EEH handling is not yet
- * supported in the peer model.
- */
- if (!afu->crs_num)
- return 0;
-
- /* The parent device is the adapter. Reuse the device node of
- * the adapter.
- * We don't seem to care what device node is used for the vPHB,
- * but tools such as lsvpd walk up the device parents looking
- * for a valid location code, so we might as well show devices
- * attached to the adapter as being located on that adapter.
- */
- parent = afu->adapter->dev.parent;
- vphb_dn = parent->of_node;
-
- /* Alloc and setup PHB data structure */
- phb = pcibios_alloc_controller(vphb_dn);
- if (!phb)
- return -ENODEV;
-
- /* Setup parent in sysfs */
- phb->parent = parent;
-
- /* Setup the PHB using arch provided callback */
- phb->ops = &cxl_pcie_pci_ops;
- phb->cfg_addr = NULL;
- phb->cfg_data = NULL;
- phb->private_data = afu;
- phb->controller_ops = cxl_pci_controller_ops;
-
- /* Scan the bus */
- pcibios_scan_phb(phb);
- if (phb->bus == NULL)
- return -ENXIO;
-
- /* Set release hook on root bus */
- pci_set_host_bridge_release(to_pci_host_bridge(phb->bus->bridge),
- pcibios_free_controller_deferred,
- (void *) phb);
-
- /* Claim resources. This might need some rework as well depending
- * whether we are doing probe-only or not, like assigning unassigned
- * resources etc...
- */
- pcibios_claim_one_bus(phb->bus);
-
- /* Add probed PCI devices to the device model */
- pci_bus_add_devices(phb->bus);
-
- afu->phb = phb;
-
- return 0;
-}
-
-void cxl_pci_vphb_remove(struct cxl_afu *afu)
-{
- struct pci_controller *phb;
-
- /* If there is no configuration record we won't have one of these */
- if (!afu || !afu->phb)
- return;
-
- phb = afu->phb;
- afu->phb = NULL;
-
- pci_remove_root_bus(phb->bus);
- /*
- * We don't free phb here - that's handled by
- * pcibios_free_controller_deferred()
- */
-}
-
-bool cxl_pci_is_vphb_device(struct pci_dev *dev)
-{
- struct pci_controller *phb;
-
- phb = pci_bus_to_host(dev->bus);
-
- return (phb->ops == &cxl_pcie_pci_ops);
-}
-
-struct cxl_afu *cxl_pci_to_afu(struct pci_dev *dev)
-{
- struct pci_controller *phb;
-
- phb = pci_bus_to_host(dev->bus);
-
- return (struct cxl_afu *)phb->private_data;
-}
-EXPORT_SYMBOL_GPL(cxl_pci_to_afu);
-
-unsigned int cxl_pci_to_cfg_record(struct pci_dev *dev)
-{
- return cxl_pcie_cfg_record(dev->bus->number, dev->devfn);
-}
-EXPORT_SYMBOL_GPL(cxl_pci_to_cfg_record);
diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c
index 0a7c7f29406c..f721825199ce 100644
--- a/drivers/misc/eeprom/at24.c
+++ b/drivers/misc/eeprom/at24.c
@@ -18,8 +18,6 @@
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/nvmem-provider.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/pm_runtime.h>
#include <linux/property.h>
#include <linux/regmap.h>
@@ -252,7 +250,7 @@ static const struct i2c_device_id at24_ids[] = {
};
MODULE_DEVICE_TABLE(i2c, at24_ids);
-static const struct of_device_id __maybe_unused at24_of_match[] = {
+static const struct of_device_id at24_of_match[] = {
{ .compatible = "atmel,24c00", .data = &at24_data_24c00 },
{ .compatible = "atmel,24c01", .data = &at24_data_24c01 },
{ .compatible = "atmel,24cs01", .data = &at24_data_24cs01 },
@@ -286,7 +284,7 @@ static const struct of_device_id __maybe_unused at24_of_match[] = {
};
MODULE_DEVICE_TABLE(of, at24_of_match);
-static const struct acpi_device_id __maybe_unused at24_acpi_ids[] = {
+static const struct acpi_device_id at24_acpi_ids[] = {
{ "INT3499", (kernel_ulong_t)&at24_data_INT3499 },
{ "TPF0001", (kernel_ulong_t)&at24_data_24c1024 },
{ /* END OF LIST */ }
@@ -848,8 +846,8 @@ static struct i2c_driver at24_driver = {
.driver = {
.name = "at24",
.pm = &at24_pm_ops,
- .of_match_table = of_match_ptr(at24_of_match),
- .acpi_match_table = ACPI_PTR(at24_acpi_ids),
+ .of_match_table = at24_of_match,
+ .acpi_match_table = at24_acpi_ids,
},
.probe = at24_probe,
.remove = at24_remove,
diff --git a/drivers/misc/eeprom/ee1004.c b/drivers/misc/eeprom/ee1004.c
index 89224d4af4a2..e13f9fdd9d7b 100644
--- a/drivers/misc/eeprom/ee1004.c
+++ b/drivers/misc/eeprom/ee1004.c
@@ -304,6 +304,10 @@ static int ee1004_probe(struct i2c_client *client)
I2C_FUNC_SMBUS_BYTE | I2C_FUNC_SMBUS_READ_BYTE_DATA))
return -EPFNOSUPPORT;
+ err = i2c_smbus_read_byte(client);
+ if (err < 0)
+ return -ENODEV;
+
mutex_lock(&ee1004_bus_lock);
err = ee1004_init_bus_data(client);
diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
index 4233dc4cc7d6..6957091ab6de 100644
--- a/drivers/misc/lis3lv02d/lis3lv02d.c
+++ b/drivers/misc/lis3lv02d/lis3lv02d.c
@@ -14,7 +14,6 @@
#include <linux/dmi.h>
#include <linux/module.h>
#include <linux/types.h>
-#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/input.h>
#include <linux/delay.h>
@@ -230,7 +229,7 @@ static int lis3lv02d_get_pwron_wait(struct lis3lv02d *lis3)
return 0;
}
- dev_err(&lis3->pdev->dev, "Error unknown odrs-index: %d\n", odr_idx);
+ dev_err(&lis3->fdev->dev, "Error unknown odrs-index: %d\n", odr_idx);
return -ENXIO;
}
@@ -694,7 +693,7 @@ int lis3lv02d_joystick_enable(struct lis3lv02d *lis3)
input_dev->phys = DRIVER_NAME "/input0";
input_dev->id.bustype = BUS_HOST;
input_dev->id.vendor = 0;
- input_dev->dev.parent = &lis3->pdev->dev;
+ input_dev->dev.parent = &lis3->fdev->dev;
input_dev->open = lis3lv02d_joystick_open;
input_dev->close = lis3lv02d_joystick_close;
@@ -855,32 +854,27 @@ static DEVICE_ATTR(position, S_IRUGO, lis3lv02d_position_show, NULL);
static DEVICE_ATTR(rate, S_IRUGO | S_IWUSR, lis3lv02d_rate_show,
lis3lv02d_rate_set);
-static struct attribute *lis3lv02d_attributes[] = {
+static struct attribute *lis3lv02d_attrs[] = {
&dev_attr_selftest.attr,
&dev_attr_position.attr,
&dev_attr_rate.attr,
NULL
};
-
-static const struct attribute_group lis3lv02d_attribute_group = {
- .attrs = lis3lv02d_attributes
-};
-
+ATTRIBUTE_GROUPS(lis3lv02d);
static int lis3lv02d_add_fs(struct lis3lv02d *lis3)
{
- lis3->pdev = platform_device_register_simple(DRIVER_NAME, -1, NULL, 0);
- if (IS_ERR(lis3->pdev))
- return PTR_ERR(lis3->pdev);
+ lis3->fdev = faux_device_create_with_groups(DRIVER_NAME, NULL, NULL, lis3lv02d_groups);
+ if (!lis3->fdev)
+ return -ENODEV;
- platform_set_drvdata(lis3->pdev, lis3);
- return sysfs_create_group(&lis3->pdev->dev.kobj, &lis3lv02d_attribute_group);
+ faux_device_set_drvdata(lis3->fdev, lis3);
+ return 0;
}
void lis3lv02d_remove_fs(struct lis3lv02d *lis3)
{
- sysfs_remove_group(&lis3->pdev->dev.kobj, &lis3lv02d_attribute_group);
- platform_device_unregister(lis3->pdev);
+ faux_device_destroy(lis3->fdev);
if (lis3->pm_dev) {
/* Barrier after the sysfs remove */
pm_runtime_barrier(lis3->pm_dev);
diff --git a/drivers/misc/lis3lv02d/lis3lv02d.h b/drivers/misc/lis3lv02d/lis3lv02d.h
index 195bd2fd2eb5..989a49e57554 100644
--- a/drivers/misc/lis3lv02d/lis3lv02d.h
+++ b/drivers/misc/lis3lv02d/lis3lv02d.h
@@ -5,7 +5,7 @@
* Copyright (C) 2007-2008 Yan Burman
* Copyright (C) 2008-2009 Eric Piel
*/
-#include <linux/platform_device.h>
+#include <linux/device/faux.h>
#include <linux/input.h>
#include <linux/regulator/consumer.h>
#include <linux/miscdevice.h>
@@ -282,7 +282,7 @@ struct lis3lv02d {
*/
struct input_dev *idev; /* input device */
- struct platform_device *pdev; /* platform device */
+ struct faux_device *fdev; /* faux device */
struct regulator_bulk_data regulators[2];
atomic_t count; /* interrupt count after last read */
union axis_conversion ac; /* hw -> logical axis */
diff --git a/drivers/misc/lkdtm/perms.c b/drivers/misc/lkdtm/perms.c
index 5b861dbff27e..6c24426104ba 100644
--- a/drivers/misc/lkdtm/perms.c
+++ b/drivers/misc/lkdtm/perms.c
@@ -29,6 +29,13 @@ static const unsigned long rodata = 0xAA55AA55;
static unsigned long ro_after_init __ro_after_init = 0x55AA5500;
/*
+ * This is a pointer to do_nothing() which is initialized at runtime rather
+ * than build time to avoid objtool IBT validation warnings caused by an
+ * inlined unrolled memcpy() in execute_location().
+ */
+static void __ro_after_init *do_nothing_ptr;
+
+/*
* This just returns to the caller. It is designed to be copied into
* non-executable memory regions.
*/
@@ -65,13 +72,12 @@ static noinline __nocfi void execute_location(void *dst, bool write)
{
void (*func)(void);
func_desc_t fdesc;
- void *do_nothing_text = dereference_function_descriptor(do_nothing);
- pr_info("attempting ok execution at %px\n", do_nothing_text);
+ pr_info("attempting ok execution at %px\n", do_nothing_ptr);
do_nothing();
if (write == CODE_WRITE) {
- memcpy(dst, do_nothing_text, EXEC_SIZE);
+ memcpy(dst, do_nothing_ptr, EXEC_SIZE);
flush_icache_range((unsigned long)dst,
(unsigned long)dst + EXEC_SIZE);
}
@@ -267,6 +273,8 @@ static void lkdtm_ACCESS_NULL(void)
void __init lkdtm_perms_init(void)
{
+ do_nothing_ptr = dereference_function_descriptor(do_nothing);
+
/* Make sure we can write to __ro_after_init values during __init */
ro_after_init |= 0xAA;
}
diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c
index 718ec5d81d94..67176caf5416 100644
--- a/drivers/misc/mei/bus.c
+++ b/drivers/misc/mei/bus.c
@@ -324,28 +324,6 @@ ssize_t mei_cldev_recv_vtag(struct mei_cl_device *cldev, u8 *buf, size_t length,
EXPORT_SYMBOL_GPL(mei_cldev_recv_vtag);
/**
- * mei_cldev_recv_nonblock_vtag - non block client receive with vtag (read)
- *
- * @cldev: me client device
- * @buf: buffer to receive
- * @length: buffer length
- * @vtag: virtual tag
- *
- * Return:
- * * read size in bytes
- * * -EAGAIN if function will block.
- * * < 0 on other error
- */
-ssize_t mei_cldev_recv_nonblock_vtag(struct mei_cl_device *cldev, u8 *buf,
- size_t length, u8 *vtag)
-{
- struct mei_cl *cl = cldev->cl;
-
- return __mei_cl_recv(cl, buf, length, vtag, MEI_CL_IO_RX_NONBLOCK, 0);
-}
-EXPORT_SYMBOL_GPL(mei_cldev_recv_nonblock_vtag);
-
-/**
* mei_cldev_recv_timeout - client receive with timeout (read)
*
* @cldev: me client device
@@ -439,23 +417,6 @@ ssize_t mei_cldev_recv(struct mei_cl_device *cldev, u8 *buf, size_t length)
EXPORT_SYMBOL_GPL(mei_cldev_recv);
/**
- * mei_cldev_recv_nonblock - non block client receive (read)
- *
- * @cldev: me client device
- * @buf: buffer to receive
- * @length: buffer length
- *
- * Return: read size in bytes of < 0 on error
- * -EAGAIN if function will block.
- */
-ssize_t mei_cldev_recv_nonblock(struct mei_cl_device *cldev, u8 *buf,
- size_t length)
-{
- return mei_cldev_recv_nonblock_vtag(cldev, buf, length, NULL);
-}
-EXPORT_SYMBOL_GPL(mei_cldev_recv_nonblock);
-
-/**
* mei_cl_bus_rx_work - dispatch rx event for a bus device
*
* @work: work
@@ -641,19 +602,6 @@ void mei_cldev_set_drvdata(struct mei_cl_device *cldev, void *data)
EXPORT_SYMBOL_GPL(mei_cldev_set_drvdata);
/**
- * mei_cldev_uuid - return uuid of the underlying me client
- *
- * @cldev: mei client device
- *
- * Return: me client uuid
- */
-const uuid_le *mei_cldev_uuid(const struct mei_cl_device *cldev)
-{
- return mei_me_cl_uuid(cldev->me_cl);
-}
-EXPORT_SYMBOL_GPL(mei_cldev_uuid);
-
-/**
* mei_cldev_ver - return protocol version of the underlying me client
*
* @cldev: mei client device
diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c
index be011cef12e5..3db07d2a881f 100644
--- a/drivers/misc/mei/client.c
+++ b/drivers/misc/mei/client.c
@@ -272,28 +272,6 @@ void mei_me_cl_rm_by_uuid(struct mei_device *dev, const uuid_le *uuid)
}
/**
- * mei_me_cl_rm_by_uuid_id - remove all me clients matching client id
- *
- * @dev: the device structure
- * @uuid: me client uuid
- * @id: me client id
- *
- * Locking: called under "dev->device_lock" lock
- */
-void mei_me_cl_rm_by_uuid_id(struct mei_device *dev, const uuid_le *uuid, u8 id)
-{
- struct mei_me_client *me_cl;
-
- dev_dbg(dev->dev, "remove %pUl %d\n", uuid, id);
-
- down_write(&dev->me_clients_rwsem);
- me_cl = __mei_me_cl_by_uuid_id(dev, uuid, id);
- __mei_me_cl_del(dev, me_cl);
- mei_me_cl_put(me_cl);
- up_write(&dev->me_clients_rwsem);
-}
-
-/**
* mei_me_cl_rm_all - remove all me clients
*
* @dev: the device structure
diff --git a/drivers/misc/mei/client.h b/drivers/misc/mei/client.h
index 9052860bcfe0..01ed26a148c4 100644
--- a/drivers/misc/mei/client.h
+++ b/drivers/misc/mei/client.h
@@ -29,8 +29,6 @@ struct mei_me_client *mei_me_cl_by_id(struct mei_device *dev, u8 client_id);
struct mei_me_client *mei_me_cl_by_uuid_id(struct mei_device *dev,
const uuid_le *uuid, u8 client_id);
void mei_me_cl_rm_by_uuid(struct mei_device *dev, const uuid_le *uuid);
-void mei_me_cl_rm_by_uuid_id(struct mei_device *dev,
- const uuid_le *uuid, u8 id);
void mei_me_cl_rm_all(struct mei_device *dev);
/**
diff --git a/drivers/misc/mei/hw-txe.c b/drivers/misc/mei/hw-txe.c
index 5d0f68b95c29..e9476f9ae25d 100644
--- a/drivers/misc/mei/hw-txe.c
+++ b/drivers/misc/mei/hw-txe.c
@@ -1209,48 +1209,3 @@ struct mei_device *mei_txe_dev_init(struct pci_dev *pdev)
return dev;
}
-
-/**
- * mei_txe_setup_satt2 - SATT2 configuration for DMA support.
- *
- * @dev: the device structure
- * @addr: physical address start of the range
- * @range: physical range size
- *
- * Return: 0 on success an error code otherwise
- */
-int mei_txe_setup_satt2(struct mei_device *dev, phys_addr_t addr, u32 range)
-{
- struct mei_txe_hw *hw = to_txe_hw(dev);
-
- u32 lo32 = lower_32_bits(addr);
- u32 hi32 = upper_32_bits(addr);
- u32 ctrl;
-
- /* SATT is limited to 36 Bits */
- if (hi32 & ~0xF)
- return -EINVAL;
-
- /* SATT has to be 16Byte aligned */
- if (lo32 & 0xF)
- return -EINVAL;
-
- /* SATT range has to be 4Bytes aligned */
- if (range & 0x4)
- return -EINVAL;
-
- /* SATT is limited to 32 MB range*/
- if (range > SATT_RANGE_MAX)
- return -EINVAL;
-
- ctrl = SATT2_CTRL_VALID_MSK;
- ctrl |= hi32 << SATT2_CTRL_BR_BASE_ADDR_REG_SHIFT;
-
- mei_txe_br_reg_write(hw, SATT2_SAP_SIZE_REG, range);
- mei_txe_br_reg_write(hw, SATT2_BRG_BA_LSB_REG, lo32);
- mei_txe_br_reg_write(hw, SATT2_CTRL_REG, ctrl);
- dev_dbg(dev->dev, "SATT2: SAP_SIZE_OFFSET=0x%08X, BRG_BA_LSB_OFFSET=0x%08X, CTRL_OFFSET=0x%08X\n",
- range, lo32, ctrl);
-
- return 0;
-}
diff --git a/drivers/misc/mei/hw-txe.h b/drivers/misc/mei/hw-txe.h
index 96511b04bf88..6790e646895d 100644
--- a/drivers/misc/mei/hw-txe.h
+++ b/drivers/misc/mei/hw-txe.h
@@ -59,7 +59,5 @@ irqreturn_t mei_txe_irq_thread_handler(int irq, void *dev_id);
int mei_txe_aliveness_set_sync(struct mei_device *dev, u32 req);
-int mei_txe_setup_satt2(struct mei_device *dev, phys_addr_t addr, u32 range);
-
#endif /* _MEI_HW_TXE_H_ */
diff --git a/drivers/net/caif/caif_serial.c b/drivers/net/caif/caif_serial.c
index 90ea3dc0fb10..c398ac42eae9 100644
--- a/drivers/net/caif/caif_serial.c
+++ b/drivers/net/caif/caif_serial.c
@@ -330,7 +330,7 @@ static int ldisc_open(struct tty_struct *tty)
ser->tty = tty_kref_get(tty);
ser->dev = dev;
debugfs_init(ser, tty);
- tty->receive_room = N_TTY_BUF_SIZE;
+ tty->receive_room = 4096;
tty->disc_data = ser;
set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
rtnl_lock();
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 934ba9425857..1a70605fad38 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -16698,6 +16698,7 @@ static void bnxt_shutdown(struct pci_dev *pdev)
if (!dev)
return;
+ rtnl_lock();
netdev_lock(dev);
bp = netdev_priv(dev);
if (!bp)
@@ -16717,6 +16718,7 @@ static void bnxt_shutdown(struct pci_dev *pdev)
shutdown_exit:
netdev_unlock(dev);
+ rtnl_unlock();
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
index cb2f9978f45e..f9a73c956861 100644
--- a/drivers/net/ethernet/google/gve/gve_main.c
+++ b/drivers/net/ethernet/google/gve/gve_main.c
@@ -2077,7 +2077,9 @@ static void gve_handle_reset(struct gve_priv *priv)
if (gve_get_do_reset(priv)) {
rtnl_lock();
+ netdev_lock(priv->dev);
gve_reset(priv, false);
+ netdev_unlock(priv->dev);
rtnl_unlock();
}
}
@@ -2714,6 +2716,7 @@ static void gve_shutdown(struct pci_dev *pdev)
bool was_up = netif_running(priv->dev);
rtnl_lock();
+ netdev_lock(netdev);
if (was_up && gve_close(priv->dev)) {
/* If the dev was up, attempt to close, if close fails, reset */
gve_reset_and_teardown(priv, was_up);
@@ -2721,6 +2724,7 @@ static void gve_shutdown(struct pci_dev *pdev)
/* If the dev wasn't up or close worked, finish tearing down */
gve_teardown_priv_resources(priv);
}
+ netdev_unlock(netdev);
rtnl_unlock();
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
index cd0d7b7774f1..6575c422635b 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
@@ -2634,7 +2634,7 @@ static irqreturn_t rvu_mbox_intr_handler(int irq, void *rvu_irq)
rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(1), intr);
rvu_queue_work(&rvu->afvf_wq_info, 64, vfs, intr);
- vfs -= 64;
+ vfs = 64;
}
intr = rvupf_read64(rvu, RVU_PF_VFPF_MBOX_INTX(0));
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
index dab4deca893f..27c3a2daaaa9 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
@@ -207,7 +207,7 @@ static void rvu_nix_unregister_interrupts(struct rvu *rvu)
rvu->irq_allocated[offs + NIX_AF_INT_VEC_RVU] = false;
}
- for (i = NIX_AF_INT_VEC_AF_ERR; i < NIX_AF_INT_VEC_CNT; i++)
+ for (i = NIX_AF_INT_VEC_GEN; i < NIX_AF_INT_VEC_CNT; i++)
if (rvu->irq_allocated[offs + i]) {
free_irq(pci_irq_vector(rvu->pdev, offs + i), rvu_dl);
rvu->irq_allocated[offs + i] = false;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
index aa36670d9a36..58ec5e44aa7a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
@@ -430,7 +430,7 @@ u8 mlx5e_shampo_get_log_pkt_per_rsrv(struct mlx5_core_dev *mdev,
struct mlx5e_params *params)
{
u32 resrv_size = BIT(mlx5e_shampo_get_log_rsrv_size(mdev, params)) *
- PAGE_SIZE;
+ MLX5E_SHAMPO_WQ_BASE_RESRV_SIZE;
return order_base_2(DIV_ROUND_UP(resrv_size, params->sw_mtu));
}
@@ -834,7 +834,8 @@ static u32 mlx5e_shampo_get_log_cq_size(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk)
{
- int rsrv_size = BIT(mlx5e_shampo_get_log_rsrv_size(mdev, params)) * PAGE_SIZE;
+ int rsrv_size = BIT(mlx5e_shampo_get_log_rsrv_size(mdev, params)) *
+ MLX5E_SHAMPO_WQ_BASE_RESRV_SIZE;
u16 num_strides = BIT(mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk));
int pkt_per_rsrv = BIT(mlx5e_shampo_get_log_pkt_per_rsrv(mdev, params));
u8 log_stride_sz = mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk);
@@ -1043,7 +1044,8 @@ u32 mlx5e_shampo_hd_per_wqe(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_rq_param *rq_param)
{
- int resv_size = BIT(mlx5e_shampo_get_log_rsrv_size(mdev, params)) * PAGE_SIZE;
+ int resv_size = BIT(mlx5e_shampo_get_log_rsrv_size(mdev, params)) *
+ MLX5E_SHAMPO_WQ_BASE_RESRV_SIZE;
u16 num_strides = BIT(mlx5e_mpwqe_get_log_num_strides(mdev, params, NULL));
int pkt_per_resv = BIT(mlx5e_shampo_get_log_pkt_per_rsrv(mdev, params));
u8 log_stride_sz = mlx5e_mpwqe_get_log_stride_size(mdev, params, NULL);
diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c
index 1423df8531f7..2bac6be8f6a0 100644
--- a/drivers/net/ethernet/microsoft/mana/mana_en.c
+++ b/drivers/net/ethernet/microsoft/mana/mana_en.c
@@ -661,30 +661,16 @@ int mana_pre_alloc_rxbufs(struct mana_port_context *mpc, int new_mtu, int num_qu
mpc->rxbpre_total = 0;
for (i = 0; i < num_rxb; i++) {
- if (mpc->rxbpre_alloc_size > PAGE_SIZE) {
- va = netdev_alloc_frag(mpc->rxbpre_alloc_size);
- if (!va)
- goto error;
-
- page = virt_to_head_page(va);
- /* Check if the frag falls back to single page */
- if (compound_order(page) <
- get_order(mpc->rxbpre_alloc_size)) {
- put_page(page);
- goto error;
- }
- } else {
- page = dev_alloc_page();
- if (!page)
- goto error;
+ page = dev_alloc_pages(get_order(mpc->rxbpre_alloc_size));
+ if (!page)
+ goto error;
- va = page_to_virt(page);
- }
+ va = page_to_virt(page);
da = dma_map_single(dev, va + mpc->rxbpre_headroom,
mpc->rxbpre_datasize, DMA_FROM_DEVICE);
if (dma_mapping_error(dev, da)) {
- put_page(virt_to_head_page(va));
+ put_page(page);
goto error;
}
@@ -1676,7 +1662,7 @@ drop:
}
static void *mana_get_rxfrag(struct mana_rxq *rxq, struct device *dev,
- dma_addr_t *da, bool *from_pool, bool is_napi)
+ dma_addr_t *da, bool *from_pool)
{
struct page *page;
void *va;
@@ -1687,21 +1673,6 @@ static void *mana_get_rxfrag(struct mana_rxq *rxq, struct device *dev,
if (rxq->xdp_save_va) {
va = rxq->xdp_save_va;
rxq->xdp_save_va = NULL;
- } else if (rxq->alloc_size > PAGE_SIZE) {
- if (is_napi)
- va = napi_alloc_frag(rxq->alloc_size);
- else
- va = netdev_alloc_frag(rxq->alloc_size);
-
- if (!va)
- return NULL;
-
- page = virt_to_head_page(va);
- /* Check if the frag falls back to single page */
- if (compound_order(page) < get_order(rxq->alloc_size)) {
- put_page(page);
- return NULL;
- }
} else {
page = page_pool_dev_alloc_pages(rxq->page_pool);
if (!page)
@@ -1734,7 +1705,7 @@ static void mana_refill_rx_oob(struct device *dev, struct mana_rxq *rxq,
dma_addr_t da;
void *va;
- va = mana_get_rxfrag(rxq, dev, &da, &from_pool, true);
+ va = mana_get_rxfrag(rxq, dev, &da, &from_pool);
if (!va)
return;
@@ -2176,7 +2147,7 @@ static int mana_fill_rx_oob(struct mana_recv_buf_oob *rx_oob, u32 mem_key,
if (mpc->rxbufs_pre)
va = mana_get_rxbuf_pre(rxq, &da);
else
- va = mana_get_rxfrag(rxq, dev, &da, &from_pool, false);
+ va = mana_get_rxfrag(rxq, dev, &da, &from_pool);
if (!va)
return -ENOMEM;
@@ -2262,6 +2233,7 @@ static int mana_create_page_pool(struct mana_rxq *rxq, struct gdma_context *gc)
pprm.nid = gc->numa_node;
pprm.napi = &rxq->rx_cq.napi;
pprm.netdev = rxq->ndev;
+ pprm.order = get_order(rxq->alloc_size);
rxq->page_pool = page_pool_create(&pprm);
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
index b3118bf0757e..c9fd34787c99 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
@@ -516,7 +516,7 @@ static void am65_cpsw_destroy_rxq(struct am65_cpsw_common *common, int id)
napi_disable(&flow->napi_rx);
hrtimer_cancel(&flow->rx_hrtimer);
k3_udma_glue_reset_rx_chn(rx_chn->rx_chn, id, rx_chn,
- am65_cpsw_nuss_rx_cleanup, !!id);
+ am65_cpsw_nuss_rx_cleanup);
for (port = 0; port < common->port_num; port++) {
if (!common->ports[port].ndev)
@@ -3332,7 +3332,7 @@ static int am65_cpsw_nuss_register_ndevs(struct am65_cpsw_common *common)
for (i = 0; i < common->rx_ch_num_flows; i++)
k3_udma_glue_reset_rx_chn(rx_chan->rx_chn, i,
rx_chan,
- am65_cpsw_nuss_rx_cleanup, !!i);
+ am65_cpsw_nuss_rx_cleanup);
k3_udma_glue_disable_rx_chn(rx_chan->rx_chn);
diff --git a/drivers/net/ethernet/ti/icssg/icssg_common.c b/drivers/net/ethernet/ti/icssg/icssg_common.c
index 46f500b90b17..14002b026452 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_common.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_common.c
@@ -1212,7 +1212,7 @@ void prueth_reset_rx_chan(struct prueth_rx_chn *chn,
for (i = 0; i < num_flows; i++)
k3_udma_glue_reset_rx_chn(chn->rx_chn, i, chn,
- prueth_rx_cleanup, !!i);
+ prueth_rx_cleanup);
if (disable)
k3_udma_glue_disable_rx_chn(chn->rx_chn);
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
index 13e43fee1906..9b1de54fd483 100644
--- a/drivers/net/phy/broadcom.c
+++ b/drivers/net/phy/broadcom.c
@@ -859,7 +859,7 @@ static int brcm_fet_config_init(struct phy_device *phydev)
return reg;
/* Unmask events we are interested in and mask interrupts globally. */
- if (phydev->phy_id == PHY_ID_BCM5221)
+ if (phydev->drv->phy_id == PHY_ID_BCM5221)
reg = MII_BRCM_FET_IR_ENABLE |
MII_BRCM_FET_IR_MASK;
else
@@ -888,7 +888,7 @@ static int brcm_fet_config_init(struct phy_device *phydev)
return err;
}
- if (phydev->phy_id != PHY_ID_BCM5221) {
+ if (phydev->drv->phy_id != PHY_ID_BCM5221) {
/* Set the LED mode */
reg = __phy_read(phydev, MII_BRCM_FET_SHDW_AUXMODE4);
if (reg < 0) {
@@ -1009,7 +1009,7 @@ static int brcm_fet_suspend(struct phy_device *phydev)
return err;
}
- if (phydev->phy_id == PHY_ID_BCM5221)
+ if (phydev->drv->phy_id == PHY_ID_BCM5221)
/* Force Low Power Mode with clock enabled */
reg = BCM5221_SHDW_AM4_EN_CLK_LPM | BCM5221_SHDW_AM4_FORCE_LPM;
else
diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c
index 7b3739b29c8f..bb0bf1415872 100644
--- a/drivers/net/usb/rndis_host.c
+++ b/drivers/net/usb/rndis_host.c
@@ -630,6 +630,16 @@ static const struct driver_info zte_rndis_info = {
.tx_fixup = rndis_tx_fixup,
};
+static const struct driver_info wwan_rndis_info = {
+ .description = "Mobile Broadband RNDIS device",
+ .flags = FLAG_WWAN | FLAG_POINTTOPOINT | FLAG_FRAMING_RN | FLAG_NO_SETINT,
+ .bind = rndis_bind,
+ .unbind = rndis_unbind,
+ .status = rndis_status,
+ .rx_fixup = rndis_rx_fixup,
+ .tx_fixup = rndis_tx_fixup,
+};
+
/*-------------------------------------------------------------------------*/
static const struct usb_device_id products [] = {
@@ -666,9 +676,11 @@ static const struct usb_device_id products [] = {
USB_INTERFACE_INFO(USB_CLASS_WIRELESS_CONTROLLER, 1, 3),
.driver_info = (unsigned long) &rndis_info,
}, {
- /* Novatel Verizon USB730L */
+ /* Mobile Broadband Modem, seen in Novatel Verizon USB730L and
+ * Telit FN990A (RNDIS)
+ */
USB_INTERFACE_INFO(USB_CLASS_MISC, 4, 1),
- .driver_info = (unsigned long) &rndis_info,
+ .driver_info = (unsigned long)&wwan_rndis_info,
},
{ }, // END
};
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 44179f4e807f..aeab2308b150 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -178,6 +178,17 @@ int usbnet_get_ethernet_addr(struct usbnet *dev, int iMACAddress)
}
EXPORT_SYMBOL_GPL(usbnet_get_ethernet_addr);
+static bool usbnet_needs_usb_name_format(struct usbnet *dev, struct net_device *net)
+{
+ /* Point to point devices which don't have a real MAC address
+ * (or report a fake local one) have historically used the usb%d
+ * naming. Preserve this..
+ */
+ return (dev->driver_info->flags & FLAG_POINTTOPOINT) != 0 &&
+ (is_zero_ether_addr(net->dev_addr) ||
+ is_local_ether_addr(net->dev_addr));
+}
+
static void intr_complete (struct urb *urb)
{
struct usbnet *dev = urb->context;
@@ -1762,13 +1773,11 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
if (status < 0)
goto out1;
- // heuristic: "usb%d" for links we know are two-host,
- // else "eth%d" when there's reasonable doubt. userspace
- // can rename the link if it knows better.
+ /* heuristic: rename to "eth%d" if we are not sure this link
+ * is two-host (these links keep "usb%d")
+ */
if ((dev->driver_info->flags & FLAG_ETHER) != 0 &&
- ((dev->driver_info->flags & FLAG_POINTTOPOINT) == 0 ||
- /* somebody touched it*/
- !is_zero_ether_addr(net->dev_addr)))
+ !usbnet_needs_usb_name_format(dev, net))
strscpy(net->name, "eth%d", sizeof(net->name));
/* WLAN devices should always be named "wlan%d" */
if ((dev->driver_info->flags & FLAG_WLAN) != 0)
diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c
index 56326f38fe8a..995a7207bdf8 100644
--- a/drivers/net/wan/lapbether.c
+++ b/drivers/net/wan/lapbether.c
@@ -39,6 +39,7 @@
#include <linux/lapb.h>
#include <linux/init.h>
+#include <net/netdev_lock.h>
#include <net/x25device.h>
static const u8 bcast_addr[6] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
@@ -366,6 +367,7 @@ static const struct net_device_ops lapbeth_netdev_ops = {
static void lapbeth_setup(struct net_device *dev)
{
+ netdev_lockdep_set_classes(dev);
dev->netdev_ops = &lapbeth_netdev_ops;
dev->needs_free_netdev = true;
dev->type = ARPHRD_X25;
diff --git a/drivers/nvdimm/claim.c b/drivers/nvdimm/claim.c
index 9e84ab411564..51614651d2e7 100644
--- a/drivers/nvdimm/claim.c
+++ b/drivers/nvdimm/claim.c
@@ -56,17 +56,6 @@ bool __nd_attach_ndns(struct device *dev, struct nd_namespace_common *attach,
return true;
}
-bool nd_attach_ndns(struct device *dev, struct nd_namespace_common *attach,
- struct nd_namespace_common **_ndns)
-{
- bool claimed;
-
- nvdimm_bus_lock(&attach->dev);
- claimed = __nd_attach_ndns(dev, attach, _ndns);
- nvdimm_bus_unlock(&attach->dev);
- return claimed;
-}
-
static bool is_idle(struct device *dev, struct nd_namespace_common *ndns)
{
struct nd_region *nd_region = to_nd_region(dev->parent);
diff --git a/drivers/nvdimm/label.c b/drivers/nvdimm/label.c
index 082253a3a956..04f4a049599a 100644
--- a/drivers/nvdimm/label.c
+++ b/drivers/nvdimm/label.c
@@ -442,7 +442,8 @@ int nd_label_data_init(struct nvdimm_drvdata *ndd)
if (ndd->data)
return 0;
- if (ndd->nsarea.status || ndd->nsarea.max_xfer == 0) {
+ if (ndd->nsarea.status || ndd->nsarea.max_xfer == 0 ||
+ ndd->nsarea.config_size == 0) {
dev_dbg(ndd->dev, "failed to init config data area: (%u:%u)\n",
ndd->nsarea.max_xfer, ndd->nsarea.config_size);
return -ENXIO;
diff --git a/drivers/nvdimm/nd-core.h b/drivers/nvdimm/nd-core.h
index 86976a9e8a15..bfc6bfeb6e24 100644
--- a/drivers/nvdimm/nd-core.h
+++ b/drivers/nvdimm/nd-core.h
@@ -127,8 +127,6 @@ resource_size_t nd_region_allocatable_dpa(struct nd_region *nd_region);
resource_size_t nd_pmem_available_dpa(struct nd_region *nd_region,
struct nd_mapping *nd_mapping);
resource_size_t nd_region_available_dpa(struct nd_region *nd_region);
-int nd_region_conflict(struct nd_region *nd_region, resource_size_t start,
- resource_size_t size);
resource_size_t nvdimm_allocated_dpa(struct nvdimm_drvdata *ndd,
struct nd_label_id *label_id);
int nvdimm_num_label_slots(struct nvdimm_drvdata *ndd);
@@ -136,8 +134,6 @@ void get_ndd(struct nvdimm_drvdata *ndd);
resource_size_t __nvdimm_namespace_capacity(struct nd_namespace_common *ndns);
void nd_detach_ndns(struct device *dev, struct nd_namespace_common **_ndns);
void __nd_detach_ndns(struct device *dev, struct nd_namespace_common **_ndns);
-bool nd_attach_ndns(struct device *dev, struct nd_namespace_common *attach,
- struct nd_namespace_common **_ndns);
bool __nd_attach_ndns(struct device *dev, struct nd_namespace_common *attach,
struct nd_namespace_common **_ndns);
ssize_t nd_namespace_store(struct device *dev,
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index 43156e1576c9..aa50006b7616 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -513,7 +513,7 @@ static int pmem_attach_disk(struct device *dev,
pmem->disk = disk;
pmem->pgmap.owner = pmem;
- pmem->pfn_flags = PFN_DEV;
+ pmem->pfn_flags = 0;
if (is_nd_pfn(dev)) {
pmem->pgmap.type = MEMORY_DEVICE_FS_DAX;
pmem->pgmap.ops = &fsdax_pagemap_ops;
@@ -522,7 +522,6 @@ static int pmem_attach_disk(struct device *dev,
pmem->data_offset = le64_to_cpu(pfn_sb->dataoff);
pmem->pfn_pad = resource_size(res) -
range_len(&pmem->pgmap.range);
- pmem->pfn_flags |= PFN_MAP;
bb_range = pmem->pgmap.range;
bb_range.start += pmem->data_offset;
} else if (pmem_should_map_pages(dev)) {
@@ -532,7 +531,6 @@ static int pmem_attach_disk(struct device *dev,
pmem->pgmap.type = MEMORY_DEVICE_FS_DAX;
pmem->pgmap.ops = &fsdax_pagemap_ops;
addr = devm_memremap_pages(dev, &pmem->pgmap);
- pmem->pfn_flags |= PFN_MAP;
bb_range = pmem->pgmap.range;
} else {
addr = devm_memremap(dev, pmem->phys_addr,
diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c
index 37417ce5ec7b..de1ee5ebc851 100644
--- a/drivers/nvdimm/region_devs.c
+++ b/drivers/nvdimm/region_devs.c
@@ -1229,45 +1229,4 @@ bool is_nvdimm_sync(struct nd_region *nd_region)
}
EXPORT_SYMBOL_GPL(is_nvdimm_sync);
-struct conflict_context {
- struct nd_region *nd_region;
- resource_size_t start, size;
-};
-
-static int region_conflict(struct device *dev, void *data)
-{
- struct nd_region *nd_region;
- struct conflict_context *ctx = data;
- resource_size_t res_end, region_end, region_start;
-
- if (!is_memory(dev))
- return 0;
-
- nd_region = to_nd_region(dev);
- if (nd_region == ctx->nd_region)
- return 0;
-
- res_end = ctx->start + ctx->size;
- region_start = nd_region->ndr_start;
- region_end = region_start + nd_region->ndr_size;
- if (ctx->start >= region_start && ctx->start < region_end)
- return -EBUSY;
- if (res_end > region_start && res_end <= region_end)
- return -EBUSY;
- return 0;
-}
-
-int nd_region_conflict(struct nd_region *nd_region, resource_size_t start,
- resource_size_t size)
-{
- struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev);
- struct conflict_context ctx = {
- .nd_region = nd_region,
- .start = start,
- .size = size,
- };
-
- return device_for_each_child(&nvdimm_bus->dev, &ctx, region_conflict);
-}
-
MODULE_IMPORT_NS("DEVMEM");
diff --git a/drivers/nvme/host/Kconfig b/drivers/nvme/host/Kconfig
index 10e453b2436e..d47dfa80fb95 100644
--- a/drivers/nvme/host/Kconfig
+++ b/drivers/nvme/host/Kconfig
@@ -18,10 +18,15 @@ config NVME_MULTIPATH
bool "NVMe multipath support"
depends on NVME_CORE
help
- This option enables support for multipath access to NVMe
- subsystems. If this option is enabled only a single
- /dev/nvmeXnY device will show up for each NVMe namespace,
- even if it is accessible through multiple controllers.
+ This option controls support for multipath access to NVMe
+ subsystems. If this option is enabled support for NVMe multipath
+ access is included in the kernel. If this option is disabled support
+ for NVMe multipath access is excluded from the kernel. When this
+ option is disabled each controller/namespace receives its
+ own /dev/nvmeXnY device entry and NVMe multipath access is
+ not supported.
+
+ If unsure, say Y.
config NVME_VERBOSE_ERRORS
bool "NVMe verbose error reporting"
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 777db89fdaa7..cc23035148b4 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -3822,7 +3822,7 @@ static int nvme_init_ns_head(struct nvme_ns *ns, struct nvme_ns_info *info)
"Found shared namespace %d, but multipathing not supported.\n",
info->nsid);
dev_warn_once(ctrl->device,
- "Support for shared namespaces without CONFIG_NVME_MULTIPATH is deprecated and will be removed in Linux 6.0.\n");
+ "Shared namespace support requires core_nvme.multipath=Y.\n");
}
}
@@ -4469,11 +4469,9 @@ static void nvme_fw_act_work(struct work_struct *work)
nvme_auth_stop(ctrl);
if (ctrl->mtfa)
- fw_act_timeout = jiffies +
- msecs_to_jiffies(ctrl->mtfa * 100);
+ fw_act_timeout = jiffies + msecs_to_jiffies(ctrl->mtfa * 100);
else
- fw_act_timeout = jiffies +
- msecs_to_jiffies(admin_timeout * 1000);
+ fw_act_timeout = jiffies + secs_to_jiffies(admin_timeout);
nvme_quiesce_io_queues(ctrl);
while (nvme_ctrl_pp_status(ctrl)) {
diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c
index ecf136489044..ca86d3bf7ea4 100644
--- a/drivers/nvme/host/ioctl.c
+++ b/drivers/nvme/host/ioctl.c
@@ -114,8 +114,7 @@ static struct request *nvme_alloc_user_request(struct request_queue *q,
static int nvme_map_user_request(struct request *req, u64 ubuffer,
unsigned bufflen, void __user *meta_buffer, unsigned meta_len,
- struct io_uring_cmd *ioucmd, unsigned int flags,
- unsigned int iou_issue_flags)
+ struct iov_iter *iter, unsigned int flags)
{
struct request_queue *q = req->q;
struct nvme_ns *ns = q->queuedata;
@@ -129,37 +128,23 @@ static int nvme_map_user_request(struct request *req, u64 ubuffer,
if (!nvme_ctrl_sgl_supported(ctrl))
dev_warn_once(ctrl->device, "using unchecked data buffer\n");
if (has_metadata) {
- if (!supports_metadata) {
- ret = -EINVAL;
- goto out;
- }
+ if (!supports_metadata)
+ return -EINVAL;
+
if (!nvme_ctrl_meta_sgl_supported(ctrl))
dev_warn_once(ctrl->device,
"using unchecked metadata buffer\n");
}
- if (ioucmd && (ioucmd->flags & IORING_URING_CMD_FIXED)) {
- struct iov_iter iter;
-
- /* fixedbufs is only for non-vectored io */
- if (WARN_ON_ONCE(flags & NVME_IOCTL_VEC)) {
- ret = -EINVAL;
- goto out;
- }
- ret = io_uring_cmd_import_fixed(ubuffer, bufflen,
- rq_data_dir(req), &iter, ioucmd,
- iou_issue_flags);
- if (ret < 0)
- goto out;
- ret = blk_rq_map_user_iov(q, req, NULL, &iter, GFP_KERNEL);
- } else {
+ if (iter)
+ ret = blk_rq_map_user_iov(q, req, NULL, iter, GFP_KERNEL);
+ else
ret = blk_rq_map_user_io(req, NULL, nvme_to_user_ptr(ubuffer),
bufflen, GFP_KERNEL, flags & NVME_IOCTL_VEC, 0,
0, rq_data_dir(req));
- }
if (ret)
- goto out;
+ return ret;
bio = req->bio;
if (bdev)
@@ -176,8 +161,6 @@ static int nvme_map_user_request(struct request *req, u64 ubuffer,
out_unmap:
if (bio)
blk_rq_unmap_user(bio);
-out:
- blk_mq_free_request(req);
return ret;
}
@@ -200,9 +183,9 @@ static int nvme_submit_user_cmd(struct request_queue *q,
req->timeout = timeout;
if (ubuffer && bufflen) {
ret = nvme_map_user_request(req, ubuffer, bufflen, meta_buffer,
- meta_len, NULL, flags, 0);
+ meta_len, NULL, flags);
if (ret)
- return ret;
+ goto out_free_req;
}
bio = req->bio;
@@ -218,7 +201,10 @@ static int nvme_submit_user_cmd(struct request_queue *q,
if (effects)
nvme_passthru_end(ctrl, ns, effects, cmd, ret);
+ return ret;
+out_free_req:
+ blk_mq_free_request(req);
return ret;
}
@@ -469,6 +455,8 @@ static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
struct request_queue *q = ns ? ns->queue : ctrl->admin_q;
struct nvme_uring_data d;
struct nvme_command c;
+ struct iov_iter iter;
+ struct iov_iter *map_iter = NULL;
struct request *req;
blk_opf_t rq_flags = REQ_ALLOC_CACHE;
blk_mq_req_flags_t blk_flags = 0;
@@ -504,6 +492,20 @@ static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
d.metadata_len = READ_ONCE(cmd->metadata_len);
d.timeout_ms = READ_ONCE(cmd->timeout_ms);
+ if (d.data_len && (ioucmd->flags & IORING_URING_CMD_FIXED)) {
+ /* fixedbufs is only for non-vectored io */
+ if (vec)
+ return -EINVAL;
+
+ ret = io_uring_cmd_import_fixed(d.addr, d.data_len,
+ nvme_is_write(&c) ? WRITE : READ, &iter, ioucmd,
+ issue_flags);
+ if (ret < 0)
+ return ret;
+
+ map_iter = &iter;
+ }
+
if (issue_flags & IO_URING_F_NONBLOCK) {
rq_flags |= REQ_NOWAIT;
blk_flags = BLK_MQ_REQ_NOWAIT;
@@ -517,11 +519,11 @@ static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
req->timeout = d.timeout_ms ? msecs_to_jiffies(d.timeout_ms) : 0;
if (d.data_len) {
- ret = nvme_map_user_request(req, d.addr,
- d.data_len, nvme_to_user_ptr(d.metadata),
- d.metadata_len, ioucmd, vec, issue_flags);
+ ret = nvme_map_user_request(req, d.addr, d.data_len,
+ nvme_to_user_ptr(d.metadata), d.metadata_len,
+ map_iter, vec);
if (ret)
- return ret;
+ goto out_free_req;
}
/* to free bio on completion, as req->bio will be null at that time */
@@ -531,6 +533,10 @@ static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
req->end_io = nvme_uring_cmd_end_io;
blk_execute_rq_nowait(req, false);
return -EIOCBQUEUED;
+
+out_free_req:
+ blk_mq_free_request(req);
+ return ret;
}
static bool is_ctrl_ioctl(unsigned int cmd)
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 2883d17ee1eb..b178d52eac1b 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -986,6 +986,9 @@ static void nvme_submit_cmds(struct nvme_queue *nvmeq, struct rq_list *rqlist)
{
struct request *req;
+ if (rq_list_empty(rqlist))
+ return;
+
spin_lock(&nvmeq->sq_lock);
while ((req = rq_list_pop(rqlist))) {
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
diff --git a/drivers/nvme/target/debugfs.c b/drivers/nvme/target/debugfs.c
index e4300eb95101..5dcbd5aa86e1 100644
--- a/drivers/nvme/target/debugfs.c
+++ b/drivers/nvme/target/debugfs.c
@@ -78,7 +78,7 @@ static int nvmet_ctrl_state_show(struct seq_file *m, void *p)
bool sep = false;
int i;
- for (i = 0; i < 7; i++) {
+ for (i = 0; i < ARRAY_SIZE(csts_state_names); i++) {
int state = BIT(i);
if (!(ctrl->csts & state))
diff --git a/drivers/nvme/target/pci-epf.c b/drivers/nvme/target/pci-epf.c
index b54b3fdbe389..51c27b32248d 100644
--- a/drivers/nvme/target/pci-epf.c
+++ b/drivers/nvme/target/pci-epf.c
@@ -1264,6 +1264,7 @@ static u16 nvmet_pci_epf_create_cq(struct nvmet_ctrl *tctrl,
struct nvmet_pci_epf_ctrl *ctrl = tctrl->drvdata;
struct nvmet_pci_epf_queue *cq = &ctrl->cq[cqid];
u16 status;
+ int ret;
if (test_bit(NVMET_PCI_EPF_Q_LIVE, &cq->flags))
return NVME_SC_QID_INVALID | NVME_STATUS_DNR;
@@ -1298,6 +1299,24 @@ static u16 nvmet_pci_epf_create_cq(struct nvmet_ctrl *tctrl,
if (status != NVME_SC_SUCCESS)
goto err;
+ /*
+ * Map the CQ PCI address space and since PCI endpoint controllers may
+ * return a partial mapping, check that the mapping is large enough.
+ */
+ ret = nvmet_pci_epf_mem_map(ctrl->nvme_epf, cq->pci_addr, cq->pci_size,
+ &cq->pci_map);
+ if (ret) {
+ dev_err(ctrl->dev, "Failed to map CQ %u (err=%d)\n",
+ cq->qid, ret);
+ goto err_internal;
+ }
+
+ if (cq->pci_map.pci_size < cq->pci_size) {
+ dev_err(ctrl->dev, "Invalid partial mapping of queue %u\n",
+ cq->qid);
+ goto err_unmap_queue;
+ }
+
set_bit(NVMET_PCI_EPF_Q_LIVE, &cq->flags);
dev_dbg(ctrl->dev, "CQ[%u]: %u entries of %zu B, IRQ vector %u\n",
@@ -1305,6 +1324,10 @@ static u16 nvmet_pci_epf_create_cq(struct nvmet_ctrl *tctrl,
return NVME_SC_SUCCESS;
+err_unmap_queue:
+ nvmet_pci_epf_mem_unmap(ctrl->nvme_epf, &cq->pci_map);
+err_internal:
+ status = NVME_SC_INTERNAL | NVME_STATUS_DNR;
err:
if (test_and_clear_bit(NVMET_PCI_EPF_Q_IRQ_ENABLED, &cq->flags))
nvmet_pci_epf_remove_irq_vector(ctrl, cq->vector);
@@ -1322,6 +1345,7 @@ static u16 nvmet_pci_epf_delete_cq(struct nvmet_ctrl *tctrl, u16 cqid)
cancel_delayed_work_sync(&cq->work);
nvmet_pci_epf_drain_queue(cq);
nvmet_pci_epf_remove_irq_vector(ctrl, cq->vector);
+ nvmet_pci_epf_mem_unmap(ctrl->nvme_epf, &cq->pci_map);
return NVME_SC_SUCCESS;
}
@@ -1553,36 +1577,6 @@ static void nvmet_pci_epf_free_queues(struct nvmet_pci_epf_ctrl *ctrl)
ctrl->cq = NULL;
}
-static int nvmet_pci_epf_map_queue(struct nvmet_pci_epf_ctrl *ctrl,
- struct nvmet_pci_epf_queue *queue)
-{
- struct nvmet_pci_epf *nvme_epf = ctrl->nvme_epf;
- int ret;
-
- ret = nvmet_pci_epf_mem_map(nvme_epf, queue->pci_addr,
- queue->pci_size, &queue->pci_map);
- if (ret) {
- dev_err(ctrl->dev, "Failed to map queue %u (err=%d)\n",
- queue->qid, ret);
- return ret;
- }
-
- if (queue->pci_map.pci_size < queue->pci_size) {
- dev_err(ctrl->dev, "Invalid partial mapping of queue %u\n",
- queue->qid);
- nvmet_pci_epf_mem_unmap(nvme_epf, &queue->pci_map);
- return -ENOMEM;
- }
-
- return 0;
-}
-
-static inline void nvmet_pci_epf_unmap_queue(struct nvmet_pci_epf_ctrl *ctrl,
- struct nvmet_pci_epf_queue *queue)
-{
- nvmet_pci_epf_mem_unmap(ctrl->nvme_epf, &queue->pci_map);
-}
-
static void nvmet_pci_epf_exec_iod_work(struct work_struct *work)
{
struct nvmet_pci_epf_iod *iod =
@@ -1746,11 +1740,7 @@ static void nvmet_pci_epf_cq_work(struct work_struct *work)
struct nvme_completion *cqe;
struct nvmet_pci_epf_iod *iod;
unsigned long flags;
- int ret, n = 0;
-
- ret = nvmet_pci_epf_map_queue(ctrl, cq);
- if (ret)
- goto again;
+ int ret = 0, n = 0;
while (test_bit(NVMET_PCI_EPF_Q_LIVE, &cq->flags) && ctrl->link_up) {
@@ -1797,8 +1787,6 @@ static void nvmet_pci_epf_cq_work(struct work_struct *work)
n++;
}
- nvmet_pci_epf_unmap_queue(ctrl, cq);
-
/*
* We do not support precise IRQ coalescing time (100ns units as per
* NVMe specifications). So if we have posted completion entries without
@@ -1807,7 +1795,6 @@ static void nvmet_pci_epf_cq_work(struct work_struct *work)
if (n)
nvmet_pci_epf_raise_irq(ctrl, cq, true);
-again:
if (ret < 0)
queue_delayed_work(system_highpri_wq, &cq->work,
NVMET_PCI_EPF_CQ_RETRY_INTERVAL);
diff --git a/drivers/pci/ats.c b/drivers/pci/ats.c
index c6b266c772c8..ec6c8dbdc5e9 100644
--- a/drivers/pci/ats.c
+++ b/drivers/pci/ats.c
@@ -538,4 +538,37 @@ int pci_max_pasids(struct pci_dev *pdev)
return (1 << FIELD_GET(PCI_PASID_CAP_WIDTH, supported));
}
EXPORT_SYMBOL_GPL(pci_max_pasids);
+
+/**
+ * pci_pasid_status - Check the PASID status
+ * @pdev: PCI device structure
+ *
+ * Returns a negative value when no PASID capability is present.
+ * Otherwise the value of the control register is returned.
+ * Status reported are:
+ *
+ * PCI_PASID_CTRL_ENABLE - PASID enabled
+ * PCI_PASID_CTRL_EXEC - Execute permission enabled
+ * PCI_PASID_CTRL_PRIV - Privileged mode enabled
+ */
+int pci_pasid_status(struct pci_dev *pdev)
+{
+ int pasid;
+ u16 ctrl;
+
+ if (pdev->is_virtfn)
+ pdev = pci_physfn(pdev);
+
+ pasid = pdev->pasid_cap;
+ if (!pasid)
+ return -EINVAL;
+
+ pci_read_config_word(pdev, pasid + PCI_PASID_CTRL, &ctrl);
+
+ ctrl &= PCI_PASID_CTRL_ENABLE | PCI_PASID_CTRL_EXEC |
+ PCI_PASID_CTRL_PRIV;
+
+ return ctrl;
+}
+EXPORT_SYMBOL_GPL(pci_pasid_status);
#endif /* CONFIG_PCI_PASID */
diff --git a/drivers/pci/p2pdma.c b/drivers/pci/p2pdma.c
index 0cb7e0aaba0e..19214ec81fbb 100644
--- a/drivers/pci/p2pdma.c
+++ b/drivers/pci/p2pdma.c
@@ -140,13 +140,22 @@ static int p2pmem_alloc_mmap(struct file *filp, struct kobject *kobj,
rcu_read_unlock();
for (vaddr = vma->vm_start; vaddr < vma->vm_end; vaddr += PAGE_SIZE) {
- ret = vm_insert_page(vma, vaddr, virt_to_page(kaddr));
+ struct page *page = virt_to_page(kaddr);
+
+ /*
+ * Initialise the refcount for the freshly allocated page. As
+ * we have just allocated the page no one else should be
+ * using it.
+ */
+ VM_WARN_ON_ONCE_PAGE(!page_ref_count(page), page);
+ set_page_count(page, 1);
+ ret = vm_insert_page(vma, vaddr, page);
if (ret) {
gen_pool_free(p2pdma->pool, (uintptr_t)kaddr, len);
return ret;
}
percpu_ref_get(ref);
- put_page(virt_to_page(kaddr));
+ put_page(page);
kaddr += PAGE_SIZE;
len -= PAGE_SIZE;
}
@@ -193,7 +202,7 @@ static const struct attribute_group p2pmem_group = {
static void p2pdma_page_free(struct page *page)
{
- struct pci_p2pdma_pagemap *pgmap = to_p2p_pgmap(page->pgmap);
+ struct pci_p2pdma_pagemap *pgmap = to_p2p_pgmap(page_pgmap(page));
/* safe to dereference while a reference is held to the percpu ref */
struct pci_p2pdma *p2pdma =
rcu_dereference_protected(pgmap->provider->p2pdma, 1);
@@ -1016,8 +1025,8 @@ enum pci_p2pdma_map_type
pci_p2pdma_map_segment(struct pci_p2pdma_map_state *state, struct device *dev,
struct scatterlist *sg)
{
- if (state->pgmap != sg_page(sg)->pgmap) {
- state->pgmap = sg_page(sg)->pgmap;
+ if (state->pgmap != page_pgmap(sg_page(sg))) {
+ state->pgmap = page_pgmap(sg_page(sg));
state->map = pci_p2pdma_map_type(state->pgmap, dev);
state->bus_off = to_p2p_pgmap(state->pgmap)->bus_offset;
}
diff --git a/drivers/pcmcia/cistpl.c b/drivers/pcmcia/cistpl.c
index d018f36f3a89..0c801e4ccc6c 100644
--- a/drivers/pcmcia/cistpl.c
+++ b/drivers/pcmcia/cistpl.c
@@ -1540,7 +1540,7 @@ static ssize_t pccard_extract_cis(struct pcmcia_socket *s, char *buf,
static ssize_t pccard_show_cis(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
unsigned int size = 0x200;
@@ -1571,7 +1571,7 @@ static ssize_t pccard_show_cis(struct file *filp, struct kobject *kobj,
static ssize_t pccard_store_cis(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct pcmcia_socket *s;
@@ -1605,6 +1605,6 @@ static ssize_t pccard_store_cis(struct file *filp, struct kobject *kobj,
const struct bin_attribute pccard_cis_attr = {
.attr = { .name = "cis", .mode = S_IRUGO | S_IWUSR },
.size = 0x200,
- .read = pccard_show_cis,
- .write = pccard_store_cis,
+ .read_new = pccard_show_cis,
+ .write_new = pccard_store_cis,
};
diff --git a/drivers/phy/freescale/Kconfig b/drivers/phy/freescale/Kconfig
index dcd9acff6d01..81f53564ee15 100644
--- a/drivers/phy/freescale/Kconfig
+++ b/drivers/phy/freescale/Kconfig
@@ -5,6 +5,7 @@ if (ARCH_MXC && ARM64) || COMPILE_TEST
config PHY_FSL_IMX8MQ_USB
tristate "Freescale i.MX8M USB3 PHY"
depends on OF && HAS_IOMEM
+ depends on TYPEC || TYPEC=n
select GENERIC_PHY
default ARCH_MXC && ARM64
diff --git a/drivers/phy/freescale/phy-fsl-imx8m-pcie.c b/drivers/phy/freescale/phy-fsl-imx8m-pcie.c
index e98361dcdead..7355d9921b64 100644
--- a/drivers/phy/freescale/phy-fsl-imx8m-pcie.c
+++ b/drivers/phy/freescale/phy-fsl-imx8m-pcie.c
@@ -141,15 +141,9 @@ static int imx8_pcie_phy_power_on(struct phy *phy)
IMX8MM_GPR_PCIE_REF_CLK_PLL);
usleep_range(100, 200);
- switch (imx8_phy->drvdata->variant) {
- case IMX8MP:
- reset_control_deassert(imx8_phy->perst);
- fallthrough;
- case IMX8MM:
- reset_control_deassert(imx8_phy->reset);
- usleep_range(200, 500);
- break;
- }
+ reset_control_deassert(imx8_phy->perst);
+ reset_control_deassert(imx8_phy->reset);
+ usleep_range(200, 500);
/* Do the PHY common block reset */
regmap_update_bits(imx8_phy->iomuxc_gpr, IOMUXC_GPR14,
@@ -162,6 +156,16 @@ static int imx8_pcie_phy_power_on(struct phy *phy)
return ret;
}
+static int imx8_pcie_phy_power_off(struct phy *phy)
+{
+ struct imx8_pcie_phy *imx8_phy = phy_get_drvdata(phy);
+
+ reset_control_assert(imx8_phy->reset);
+ reset_control_assert(imx8_phy->perst);
+
+ return 0;
+}
+
static int imx8_pcie_phy_init(struct phy *phy)
{
struct imx8_pcie_phy *imx8_phy = phy_get_drvdata(phy);
@@ -182,6 +186,7 @@ static const struct phy_ops imx8_pcie_phy_ops = {
.init = imx8_pcie_phy_init,
.exit = imx8_pcie_phy_exit,
.power_on = imx8_pcie_phy_power_on,
+ .power_off = imx8_pcie_phy_power_off,
.owner = THIS_MODULE,
};
diff --git a/drivers/phy/freescale/phy-fsl-imx8mq-usb.c b/drivers/phy/freescale/phy-fsl-imx8mq-usb.c
index adc6394626ce..a974ef94de9a 100644
--- a/drivers/phy/freescale/phy-fsl-imx8mq-usb.c
+++ b/drivers/phy/freescale/phy-fsl-imx8mq-usb.c
@@ -10,6 +10,7 @@
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
+#include <linux/usb/typec_mux.h>
#define PHY_CTRL0 0x0
#define PHY_CTRL0_REF_SSP_EN BIT(2)
@@ -50,11 +51,66 @@
#define PHY_TUNE_DEFAULT 0xffffffff
+#define TCA_CLK_RST 0x00
+#define TCA_CLK_RST_SW BIT(9)
+#define TCA_CLK_RST_REF_CLK_EN BIT(1)
+#define TCA_CLK_RST_SUSPEND_CLK_EN BIT(0)
+
+#define TCA_INTR_EN 0x04
+#define TCA_INTR_STS 0x08
+
+#define TCA_GCFG 0x10
+#define TCA_GCFG_ROLE_HSTDEV BIT(4)
+#define TCA_GCFG_OP_MODE GENMASK(1, 0)
+#define TCA_GCFG_OP_MODE_SYSMODE 0
+#define TCA_GCFG_OP_MODE_SYNCMODE 1
+
+#define TCA_TCPC 0x14
+#define TCA_TCPC_VALID BIT(4)
+#define TCA_TCPC_LOW_POWER_EN BIT(3)
+#define TCA_TCPC_ORIENTATION_NORMAL BIT(2)
+#define TCA_TCPC_MUX_CONTRL GENMASK(1, 0)
+#define TCA_TCPC_MUX_CONTRL_NO_CONN 0
+#define TCA_TCPC_MUX_CONTRL_USB_CONN 1
+
+#define TCA_SYSMODE_CFG 0x18
+#define TCA_SYSMODE_TCPC_DISABLE BIT(3)
+#define TCA_SYSMODE_TCPC_FLIP BIT(2)
+
+#define TCA_CTRLSYNCMODE_CFG0 0x20
+#define TCA_CTRLSYNCMODE_CFG1 0x20
+
+#define TCA_PSTATE 0x30
+#define TCA_PSTATE_CM_STS BIT(4)
+#define TCA_PSTATE_TX_STS BIT(3)
+#define TCA_PSTATE_RX_PLL_STS BIT(2)
+#define TCA_PSTATE_PIPE0_POWER_DOWN GENMASK(1, 0)
+
+#define TCA_GEN_STATUS 0x34
+#define TCA_GEN_DEV_POR BIT(12)
+#define TCA_GEN_REF_CLK_SEL BIT(8)
+#define TCA_GEN_TYPEC_FLIP_INVERT BIT(4)
+#define TCA_GEN_PHY_TYPEC_DISABLE BIT(3)
+#define TCA_GEN_PHY_TYPEC_FLIP BIT(2)
+
+#define TCA_VBUS_CTRL 0x40
+#define TCA_VBUS_STATUS 0x44
+
+#define TCA_INFO 0xfc
+
+struct tca_blk {
+ struct typec_switch_dev *sw;
+ void __iomem *base;
+ struct mutex mutex;
+ enum typec_orientation orientation;
+};
+
struct imx8mq_usb_phy {
struct phy *phy;
struct clk *clk;
void __iomem *base;
struct regulator *vbus;
+ struct tca_blk *tca;
u32 pcs_tx_swing_full;
u32 pcs_tx_deemph_3p5db;
u32 tx_vref_tune;
@@ -64,6 +120,172 @@ struct imx8mq_usb_phy {
u32 comp_dis_tune;
};
+
+static void tca_blk_orientation_set(struct tca_blk *tca,
+ enum typec_orientation orientation);
+
+#ifdef CONFIG_TYPEC
+
+static int tca_blk_typec_switch_set(struct typec_switch_dev *sw,
+ enum typec_orientation orientation)
+{
+ struct imx8mq_usb_phy *imx_phy = typec_switch_get_drvdata(sw);
+ struct tca_blk *tca = imx_phy->tca;
+ int ret;
+
+ if (tca->orientation == orientation)
+ return 0;
+
+ ret = clk_prepare_enable(imx_phy->clk);
+ if (ret)
+ return ret;
+
+ tca_blk_orientation_set(tca, orientation);
+ clk_disable_unprepare(imx_phy->clk);
+
+ return 0;
+}
+
+static struct typec_switch_dev *tca_blk_get_typec_switch(struct platform_device *pdev,
+ struct imx8mq_usb_phy *imx_phy)
+{
+ struct device *dev = &pdev->dev;
+ struct typec_switch_dev *sw;
+ struct typec_switch_desc sw_desc = { };
+
+ sw_desc.drvdata = imx_phy;
+ sw_desc.fwnode = dev->fwnode;
+ sw_desc.set = tca_blk_typec_switch_set;
+ sw_desc.name = NULL;
+
+ sw = typec_switch_register(dev, &sw_desc);
+ if (IS_ERR(sw)) {
+ dev_err(dev, "Error register tca orientation switch: %ld",
+ PTR_ERR(sw));
+ return NULL;
+ }
+
+ return sw;
+}
+
+static void tca_blk_put_typec_switch(struct typec_switch_dev *sw)
+{
+ typec_switch_unregister(sw);
+}
+
+#else
+
+static struct typec_switch_dev *tca_blk_get_typec_switch(struct platform_device *pdev,
+ struct imx8mq_usb_phy *imx_phy)
+{
+ return NULL;
+}
+
+static void tca_blk_put_typec_switch(struct typec_switch_dev *sw) {}
+
+#endif /* CONFIG_TYPEC */
+
+static void tca_blk_orientation_set(struct tca_blk *tca,
+ enum typec_orientation orientation)
+{
+ u32 val;
+
+ mutex_lock(&tca->mutex);
+
+ if (orientation == TYPEC_ORIENTATION_NONE) {
+ /*
+ * use Controller Synced Mode for TCA low power enable and
+ * put PHY to USB safe state.
+ */
+ val = FIELD_PREP(TCA_GCFG_OP_MODE, TCA_GCFG_OP_MODE_SYNCMODE);
+ writel(val, tca->base + TCA_GCFG);
+
+ val = TCA_TCPC_VALID | TCA_TCPC_LOW_POWER_EN;
+ writel(val, tca->base + TCA_TCPC);
+
+ goto out;
+ }
+
+ /* use System Configuration Mode for TCA mux control. */
+ val = FIELD_PREP(TCA_GCFG_OP_MODE, TCA_GCFG_OP_MODE_SYSMODE);
+ writel(val, tca->base + TCA_GCFG);
+
+ /* Disable TCA module */
+ val = readl(tca->base + TCA_SYSMODE_CFG);
+ val |= TCA_SYSMODE_TCPC_DISABLE;
+ writel(val, tca->base + TCA_SYSMODE_CFG);
+
+ if (orientation == TYPEC_ORIENTATION_REVERSE)
+ val |= TCA_SYSMODE_TCPC_FLIP;
+ else if (orientation == TYPEC_ORIENTATION_NORMAL)
+ val &= ~TCA_SYSMODE_TCPC_FLIP;
+
+ writel(val, tca->base + TCA_SYSMODE_CFG);
+
+ /* Enable TCA module */
+ val &= ~TCA_SYSMODE_TCPC_DISABLE;
+ writel(val, tca->base + TCA_SYSMODE_CFG);
+
+out:
+ tca->orientation = orientation;
+ mutex_unlock(&tca->mutex);
+}
+
+static void tca_blk_init(struct tca_blk *tca)
+{
+ u32 val;
+
+ /* reset XBar block */
+ val = readl(tca->base + TCA_CLK_RST);
+ val &= ~TCA_CLK_RST_SW;
+ writel(val, tca->base + TCA_CLK_RST);
+
+ udelay(100);
+
+ /* clear reset */
+ val |= TCA_CLK_RST_SW;
+ writel(val, tca->base + TCA_CLK_RST);
+
+ tca_blk_orientation_set(tca, tca->orientation);
+}
+
+static struct tca_blk *imx95_usb_phy_get_tca(struct platform_device *pdev,
+ struct imx8mq_usb_phy *imx_phy)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ struct tca_blk *tca;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!res)
+ return NULL;
+
+ tca = devm_kzalloc(dev, sizeof(*tca), GFP_KERNEL);
+ if (!tca)
+ return ERR_PTR(-ENOMEM);
+
+ tca->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(tca->base))
+ return ERR_CAST(tca->base);
+
+ mutex_init(&tca->mutex);
+
+ tca->orientation = TYPEC_ORIENTATION_NORMAL;
+ tca->sw = tca_blk_get_typec_switch(pdev, imx_phy);
+
+ return tca;
+}
+
+static void imx95_usb_phy_put_tca(struct imx8mq_usb_phy *imx_phy)
+{
+ struct tca_blk *tca = imx_phy->tca;
+
+ if (!tca)
+ return;
+
+ tca_blk_put_typec_switch(tca->sw);
+}
+
static u32 phy_tx_vref_tune_from_property(u32 percent)
{
percent = clamp(percent, 94U, 124U);
@@ -315,6 +537,9 @@ static int imx8mp_usb_phy_init(struct phy *phy)
imx8m_phy_tune(imx_phy);
+ if (imx_phy->tca)
+ tca_blk_init(imx_phy->tca);
+
return 0;
}
@@ -359,6 +584,8 @@ static const struct of_device_id imx8mq_usb_phy_of_match[] = {
.data = &imx8mq_usb_phy_ops,},
{.compatible = "fsl,imx8mp-usb-phy",
.data = &imx8mp_usb_phy_ops,},
+ {.compatible = "fsl,imx95-usb-phy",
+ .data = &imx8mp_usb_phy_ops,},
{ }
};
MODULE_DEVICE_TABLE(of, imx8mq_usb_phy_of_match);
@@ -398,6 +625,11 @@ static int imx8mq_usb_phy_probe(struct platform_device *pdev)
phy_set_drvdata(imx_phy->phy, imx_phy);
+ imx_phy->tca = imx95_usb_phy_get_tca(pdev, imx_phy);
+ if (IS_ERR(imx_phy->tca))
+ return dev_err_probe(dev, PTR_ERR(imx_phy->tca),
+ "failed to get tca\n");
+
imx8m_get_phy_tuning_data(imx_phy);
phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
@@ -405,8 +637,16 @@ static int imx8mq_usb_phy_probe(struct platform_device *pdev)
return PTR_ERR_OR_ZERO(phy_provider);
}
+static void imx8mq_usb_phy_remove(struct platform_device *pdev)
+{
+ struct imx8mq_usb_phy *imx_phy = platform_get_drvdata(pdev);
+
+ imx95_usb_phy_put_tca(imx_phy);
+}
+
static struct platform_driver imx8mq_usb_phy_driver = {
.probe = imx8mq_usb_phy_probe,
+ .remove = imx8mq_usb_phy_remove,
.driver = {
.name = "imx8mq-usb-phy",
.of_match_table = imx8mq_usb_phy_of_match,
diff --git a/drivers/phy/freescale/phy-fsl-samsung-hdmi.c b/drivers/phy/freescale/phy-fsl-samsung-hdmi.c
index e4c0a82d16d9..10fbe8dee116 100644
--- a/drivers/phy/freescale/phy-fsl-samsung-hdmi.c
+++ b/drivers/phy/freescale/phy-fsl-samsung-hdmi.c
@@ -668,7 +668,7 @@ static int fsl_samsung_hdmi_phy_probe(struct platform_device *pdev)
if (IS_ERR(phy->regs))
return PTR_ERR(phy->regs);
- phy->apbclk = devm_clk_get(phy->dev, "apb");
+ phy->apbclk = devm_clk_get_enabled(phy->dev, "apb");
if (IS_ERR(phy->apbclk))
return dev_err_probe(phy->dev, PTR_ERR(phy->apbclk),
"failed to get apb clk\n");
@@ -678,12 +678,6 @@ static int fsl_samsung_hdmi_phy_probe(struct platform_device *pdev)
return dev_err_probe(phy->dev, PTR_ERR(phy->refclk),
"failed to get ref clk\n");
- ret = clk_prepare_enable(phy->apbclk);
- if (ret) {
- dev_err(phy->dev, "failed to enable apbclk\n");
- return ret;
- }
-
pm_runtime_get_noresume(phy->dev);
pm_runtime_set_active(phy->dev);
pm_runtime_enable(phy->dev);
@@ -699,8 +693,6 @@ static int fsl_samsung_hdmi_phy_probe(struct platform_device *pdev)
return 0;
register_clk_failed:
- clk_disable_unprepare(phy->apbclk);
-
return ret;
}
diff --git a/drivers/phy/mediatek/phy-mtk-tphy.c b/drivers/phy/mediatek/phy-mtk-tphy.c
index a496fbe3352b..644a34bd2b0b 100644
--- a/drivers/phy/mediatek/phy-mtk-tphy.c
+++ b/drivers/phy/mediatek/phy-mtk-tphy.c
@@ -1195,7 +1195,7 @@ static int phy_type_syscon_get(struct mtk_phy_instance *instance,
int ret;
/* type switch function is optional */
- if (!of_property_read_bool(dn, "mediatek,syscon-type"))
+ if (!of_property_present(dn, "mediatek,syscon-type"))
return 0;
ret = of_parse_phandle_with_fixed_args(dn, "mediatek,syscon-type",
@@ -1258,7 +1258,7 @@ static int phy_efuse_get(struct mtk_tphy *tphy, struct mtk_phy_instance *instanc
}
/* software efuse is optional */
- instance->efuse_sw_en = device_property_read_bool(dev, "nvmem-cells");
+ instance->efuse_sw_en = device_property_present(dev, "nvmem-cells");
if (!instance->efuse_sw_en)
return 0;
diff --git a/drivers/phy/microchip/Kconfig b/drivers/phy/microchip/Kconfig
index 38039ed0754c..2f0045e874ac 100644
--- a/drivers/phy/microchip/Kconfig
+++ b/drivers/phy/microchip/Kconfig
@@ -15,6 +15,7 @@ config PHY_SPARX5_SERDES
config PHY_LAN966X_SERDES
tristate "SerDes PHY driver for Microchip LAN966X"
select GENERIC_PHY
+ depends on SOC_LAN966 || MCHP_LAN966X_PCI || COMPILE_TEST
depends on OF
depends on MFD_SYSCON
help
diff --git a/drivers/phy/phy-can-transceiver.c b/drivers/phy/phy-can-transceiver.c
index ee4ce4249698..2bec70615449 100644
--- a/drivers/phy/phy-can-transceiver.c
+++ b/drivers/phy/phy-can-transceiver.c
@@ -103,6 +103,7 @@ static int can_transceiver_phy_probe(struct platform_device *pdev)
struct phy *phy;
struct gpio_desc *standby_gpio;
struct gpio_desc *enable_gpio;
+ struct mux_state *mux_state;
u32 max_bitrate = 0;
int err;
@@ -113,13 +114,11 @@ static int can_transceiver_phy_probe(struct platform_device *pdev)
match = of_match_node(can_transceiver_phy_ids, pdev->dev.of_node);
drvdata = match->data;
- if (of_property_read_bool(dev->of_node, "mux-states")) {
- struct mux_state *mux_state;
-
- mux_state = devm_mux_state_get(dev, NULL);
- if (IS_ERR(mux_state))
- return dev_err_probe(&pdev->dev, PTR_ERR(mux_state),
- "failed to get mux\n");
+ mux_state = devm_mux_state_get(dev, NULL);
+ if (IS_ERR(mux_state)) {
+ if (PTR_ERR(mux_state) == -EPROBE_DEFER)
+ return PTR_ERR(mux_state);
+ } else {
can_transceiver_phy->mux_state = mux_state;
}
diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c
index 8dfdce605a90..8e2daea81666 100644
--- a/drivers/phy/phy-core.c
+++ b/drivers/phy/phy-core.c
@@ -214,30 +214,6 @@ int phy_pm_runtime_put_sync(struct phy *phy)
}
EXPORT_SYMBOL_GPL(phy_pm_runtime_put_sync);
-void phy_pm_runtime_allow(struct phy *phy)
-{
- if (!phy)
- return;
-
- if (!pm_runtime_enabled(&phy->dev))
- return;
-
- pm_runtime_allow(&phy->dev);
-}
-EXPORT_SYMBOL_GPL(phy_pm_runtime_allow);
-
-void phy_pm_runtime_forbid(struct phy *phy)
-{
- if (!phy)
- return;
-
- if (!pm_runtime_enabled(&phy->dev))
- return;
-
- pm_runtime_forbid(&phy->dev);
-}
-EXPORT_SYMBOL_GPL(phy_pm_runtime_forbid);
-
/**
* phy_init - phy internal initialization before phy operation
* @phy: the phy returned by phy_get()
@@ -405,13 +381,14 @@ EXPORT_SYMBOL_GPL(phy_power_off);
int phy_set_mode_ext(struct phy *phy, enum phy_mode mode, int submode)
{
- int ret;
+ int ret = 0;
- if (!phy || !phy->ops->set_mode)
+ if (!phy)
return 0;
mutex_lock(&phy->mutex);
- ret = phy->ops->set_mode(phy, mode, submode);
+ if (phy->ops->set_mode)
+ ret = phy->ops->set_mode(phy, mode, submode);
if (!ret)
phy->attrs.mode = mode;
mutex_unlock(&phy->mutex);
diff --git a/drivers/phy/qualcomm/Kconfig b/drivers/phy/qualcomm/Kconfig
index 846f8c99547f..c1e0a11ddd76 100644
--- a/drivers/phy/qualcomm/Kconfig
+++ b/drivers/phy/qualcomm/Kconfig
@@ -154,6 +154,19 @@ config PHY_QCOM_M31_USB
management. This driver is required even for peripheral only or
host only mode configurations.
+config PHY_QCOM_UNIPHY_PCIE_28LP
+ bool "PCIE UNIPHY 28LP PHY driver"
+ depends on ARCH_QCOM
+ depends on COMMON_CLK
+ depends on HAS_IOMEM
+ depends on OF
+ select GENERIC_PHY
+ help
+ Enable this to support the PCIe UNIPHY 28LP phy transceiver that
+ is used with PCIe controllers on Qualcomm IPQ5332 chips. It
+ handles PHY initialization, clock management required after
+ resetting the hardware and power management.
+
config PHY_QCOM_USB_HS
tristate "Qualcomm USB HS PHY module"
depends on USB_ULPI_BUS
diff --git a/drivers/phy/qualcomm/Makefile b/drivers/phy/qualcomm/Makefile
index eb60e950ad53..42038bc30974 100644
--- a/drivers/phy/qualcomm/Makefile
+++ b/drivers/phy/qualcomm/Makefile
@@ -17,6 +17,7 @@ obj-$(CONFIG_PHY_QCOM_QMP_USB_LEGACY) += phy-qcom-qmp-usb-legacy.o
obj-$(CONFIG_PHY_QCOM_QUSB2) += phy-qcom-qusb2.o
obj-$(CONFIG_PHY_QCOM_SNPS_EUSB2) += phy-qcom-snps-eusb2.o
obj-$(CONFIG_PHY_QCOM_EUSB2_REPEATER) += phy-qcom-eusb2-repeater.o
+obj-$(CONFIG_PHY_QCOM_UNIPHY_PCIE_28LP) += phy-qcom-uniphy-pcie-28lp.o
obj-$(CONFIG_PHY_QCOM_USB_HS) += phy-qcom-usb-hs.o
obj-$(CONFIG_PHY_QCOM_USB_HSIC) += phy-qcom-usb-hsic.o
obj-$(CONFIG_PHY_QCOM_USB_HS_28NM) += phy-qcom-usb-hs-28nm.o
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c b/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c
index 018bbb300830..c232b8fe9846 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c
@@ -805,6 +805,58 @@ static const struct qmp_phy_init_tbl qcs615_pcie_pcs_tbl[] = {
QMP_PHY_INIT_CFG(QPHY_V2_PCS_TXDEEMPH_M3P5DB_V0, 0xe),
};
+static const struct qmp_phy_init_tbl qcs8300_qmp_gen4x2_pcie_rx_alt_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_UCDR_PI_CONTROLS, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_DFE_CTLE_POST_CAL_OFFSET, 0x38),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B0, 0x9b),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B1, 0xb0),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B2, 0xd2),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B3, 0xf0),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B4, 0x42),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B5, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B6, 0x20),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE2_B0, 0x9b),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE2_B1, 0xfb),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE2_B2, 0xd2),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE2_B3, 0xec),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE2_B4, 0x43),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE2_B5, 0xdd),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE2_B6, 0x0d),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE3_B0, 0xf3),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE3_B1, 0xf8),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE3_B2, 0xec),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE3_B3, 0xd6),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE3_B4, 0x83),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE3_B5, 0xf5),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE3_B6, 0x5e),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_PHPRE_CTRL, 0x20),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_AUX_DATA_THRESH_BIN_RATE_0_1, 0x3f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_AUX_DATA_THRESH_BIN_RATE_2_3, 0x37),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_DFE_3, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH1_RATE3, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH2_RATE3, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH3_RATE3, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH4_RATE3, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH5_RATE3, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH6_RATE3, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH1_RATE210, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH2_RATE210, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH3_RATE210, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_Q_PI_INTRINSIC_BIAS_RATE32, 0x09),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_UCDR_FO_GAIN_RATE2, 0x0c),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_UCDR_FO_GAIN_RATE3, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_UCDR_SO_GAIN_RATE3, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_VGA_CAL_CNTRL1, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_VGA_CAL_MAN_VAL, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_EQU_ADAPTOR_CNTRL4, 0x0b),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x7c),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_IDAC_SAOFFSET, 0x10),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_DFE_DAC_ENABLE1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_GM_CAL, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_TX_ADAPT_POST_THRESH1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_TX_ADAPT_POST_THRESH2, 0x1f),
+};
+
static const struct qmp_phy_init_tbl sdm845_qmp_pcie_serdes_tbl[] = {
QMP_PHY_INIT_CFG(QSERDES_V3_COM_BIAS_EN_CLKBUFLR_EN, 0x14),
QMP_PHY_INIT_CFG(QSERDES_V3_COM_CLK_SELECT, 0x30),
@@ -3336,6 +3388,40 @@ static const struct qmp_phy_cfg qcs615_pciephy_cfg = {
.phy_status = PHYSTATUS,
};
+static const struct qmp_phy_cfg qcs8300_qmp_gen4x2_pciephy_cfg = {
+ .lanes = 2,
+ .offsets = &qmp_pcie_offsets_v5_20,
+
+ .tbls = {
+ .serdes = sa8775p_qmp_gen4x2_pcie_serdes_alt_tbl,
+ .serdes_num = ARRAY_SIZE(sa8775p_qmp_gen4x2_pcie_serdes_alt_tbl),
+ .tx = sa8775p_qmp_gen4_pcie_tx_tbl,
+ .tx_num = ARRAY_SIZE(sa8775p_qmp_gen4_pcie_tx_tbl),
+ .rx = qcs8300_qmp_gen4x2_pcie_rx_alt_tbl,
+ .rx_num = ARRAY_SIZE(qcs8300_qmp_gen4x2_pcie_rx_alt_tbl),
+ .pcs = sa8775p_qmp_gen4x2_pcie_pcs_alt_tbl,
+ .pcs_num = ARRAY_SIZE(sa8775p_qmp_gen4x2_pcie_pcs_alt_tbl),
+ .pcs_misc = sa8775p_qmp_gen4_pcie_pcs_misc_tbl,
+ .pcs_misc_num = ARRAY_SIZE(sa8775p_qmp_gen4_pcie_pcs_misc_tbl),
+ },
+
+ .tbls_rc = &(const struct qmp_phy_cfg_tbls) {
+ .serdes = sa8775p_qmp_gen4x2_pcie_rc_serdes_alt_tbl,
+ .serdes_num = ARRAY_SIZE(sa8775p_qmp_gen4x2_pcie_rc_serdes_alt_tbl),
+ .pcs_misc = sa8775p_qmp_gen4_pcie_rc_pcs_misc_tbl,
+ .pcs_misc_num = ARRAY_SIZE(sa8775p_qmp_gen4_pcie_rc_pcs_misc_tbl),
+ },
+
+ .reset_list = sdm845_pciephy_reset_l,
+ .num_resets = ARRAY_SIZE(sdm845_pciephy_reset_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .regs = pciephy_v5_regs_layout,
+
+ .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL,
+ .phy_status = PHYSTATUS_4_20,
+};
+
static const struct qmp_phy_cfg sdm845_qmp_pciephy_cfg = {
.lanes = 1,
@@ -4156,6 +4242,21 @@ static const struct qmp_phy_cfg x1e80100_qmp_gen4x8_pciephy_cfg = {
.has_nocsr_reset = true,
};
+static const struct qmp_phy_cfg qmp_v6_gen4x4_pciephy_cfg = {
+ .lanes = 4,
+
+ .offsets = &qmp_pcie_offsets_v6_20,
+
+ .reset_list = sdm845_pciephy_reset_l,
+ .num_resets = ARRAY_SIZE(sdm845_pciephy_reset_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .regs = pciephy_v6_regs_layout,
+
+ .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL,
+ .phy_status = PHYSTATUS_4_20,
+};
+
static void qmp_pcie_init_port_b(struct qmp_pcie *qmp, const struct qmp_phy_cfg_tbls *tbls)
{
const struct qmp_phy_cfg *cfg = qmp->cfg;
@@ -4877,6 +4978,9 @@ static const struct of_device_id qmp_pcie_of_match_table[] = {
.compatible = "qcom,qcs615-qmp-gen3x1-pcie-phy",
.data = &qcs615_pciephy_cfg,
}, {
+ .compatible = "qcom,qcs8300-qmp-gen4x2-pcie-phy",
+ .data = &qcs8300_qmp_gen4x2_pciephy_cfg,
+ }, {
.compatible = "qcom,sa8775p-qmp-gen4x2-pcie-phy",
.data = &sa8775p_qmp_gen4x2_pciephy_cfg,
}, {
@@ -4960,6 +5064,9 @@ static const struct of_device_id qmp_pcie_of_match_table[] = {
}, {
.compatible = "qcom,x1e80100-qmp-gen4x8-pcie-phy",
.data = &x1e80100_qmp_gen4x8_pciephy_cfg,
+ }, {
+ .compatible = "qcom,x1p42100-qmp-gen4x4-pcie-phy",
+ .data = &qmp_v6_gen4x4_pciephy_cfg,
},
{ },
};
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-com-v6.h b/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-com-v6.h
index 328c6c0b0b09..258f3d30742e 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-com-v6.h
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-com-v6.h
@@ -86,4 +86,11 @@
#define QSERDES_V6_COM_CMN_STATUS 0x1d0
#define QSERDES_V6_COM_C_READY_STATUS 0x1f8
+#define QSERDES_V6_COM_ADAPTIVE_ANALOG_CONFIG 0x268
+#define QSERDES_V6_COM_CP_CTRL_ADAPTIVE_MODE0 0x26c
+#define QSERDES_V6_COM_PLL_RCCTRL_ADAPTIVE_MODE0 0x270
+#define QSERDES_V6_COM_PLL_CCTRL_ADAPTIVE_MODE0 0x274
+#define QSERDES_V6_COM_CP_CTRL_ADAPTIVE_MODE1 0x278
+#define QSERDES_V6_COM_PLL_RCCTRL_ADAPTIVE_MODE1 0x27c
+#define QSERDES_V6_COM_PLL_CCTRL_ADAPTIVE_MODE1 0x280
#endif
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-ufs-v7.h b/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-ufs-v7.h
new file mode 100644
index 000000000000..3f0522492f85
--- /dev/null
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-ufs-v7.h
@@ -0,0 +1,67 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2024, Linaro Limited
+ */
+
+#ifndef QCOM_PHY_QMP_QSERDES_TXRX_UFS_V7_H_
+#define QCOM_PHY_QMP_QSERDES_TXRX_UFS_V7_H_
+
+#define QSERDES_UFS_V7_TX_RES_CODE_LANE_TX 0x28
+#define QSERDES_UFS_V7_TX_RES_CODE_LANE_RX 0x2c
+#define QSERDES_UFS_V7_TX_RES_CODE_LANE_OFFSET_TX 0x30
+#define QSERDES_UFS_V7_TX_RES_CODE_LANE_OFFSET_RX 0x34
+#define QSERDES_UFS_V7_TX_LANE_MODE_1 0x7c
+#define QSERDES_UFS_V7_TX_FR_DCC_CTRL 0x108
+
+#define QSERDES_UFS_V7_RX_UCDR_FASTLOCK_FO_GAIN_RATE4 0x10
+#define QSERDES_UFS_V7_RX_UCDR_FASTLOCK_SO_GAIN_RATE4 0x24
+#define QSERDES_UFS_V7_RX_UCDR_SO_SATURATION 0x28
+#define QSERDES_UFS_V7_RX_UCDR_FASTLOCK_COUNT_HIGH_RATE4 0x54
+#define QSERDES_UFS_V7_RX_UCDR_PI_CTRL1 0x58
+#define QSERDES_UFS_V7_RX_TERM_BW_CTRL0 0xc4
+#define QSERDES_UFS_V7_RX_UCDR_FO_GAIN_RATE2 0xd4
+#define QSERDES_UFS_V7_RX_UCDR_FO_GAIN_RATE4 0xdc
+#define QSERDES_UFS_V7_RX_UCDR_SO_GAIN_RATE4 0xf0
+#define QSERDES_UFS_V7_RX_UCDR_PI_CONTROLS 0xf4
+#define QSERDES_UFS_V7_RX_VGA_CAL_MAN_VAL 0x178
+#define QSERDES_UFS_V7_RX_EQU_ADAPTOR_CNTRL4 0x1b4
+#define QSERDES_UFS_V7_RX_EQ_OFFSET_ADAPTOR_CNTRL1 0x1cc
+#define QSERDES_UFS_V7_RX_OFFSET_ADAPTOR_CNTRL3 0x1d4
+#define QSERDES_UFS_V7_RX_INTERFACE_MODE 0x1f0
+#define QSERDES_UFS_V7_RX_MODE_RATE_0_1_B0 0x218
+#define QSERDES_UFS_V7_RX_MODE_RATE_0_1_B1 0x21C
+#define QSERDES_UFS_V7_RX_MODE_RATE_0_1_B2 0x220
+#define QSERDES_UFS_V7_RX_MODE_RATE_0_1_B3 0x224
+#define QSERDES_UFS_V7_RX_MODE_RATE_0_1_B4 0x228
+#define QSERDES_UFS_V7_RX_MODE_RATE_0_1_B6 0x230
+#define QSERDES_UFS_V7_RX_MODE_RATE_0_1_B7 0x234
+#define QSERDES_UFS_V7_RX_MODE_RATE2_B3 0x248
+#define QSERDES_UFS_V7_RX_MODE_RATE2_B6 0x254
+#define QSERDES_UFS_V7_RX_MODE_RATE2_B7 0x258
+#define QSERDES_UFS_V7_RX_MODE_RATE3_B0 0x260
+#define QSERDES_UFS_V7_RX_MODE_RATE3_B1 0x264
+#define QSERDES_UFS_V7_RX_MODE_RATE3_B2 0x268
+#define QSERDES_UFS_V7_RX_MODE_RATE3_B3 0x26c
+#define QSERDES_UFS_V7_RX_MODE_RATE3_B4 0x270
+#define QSERDES_UFS_V7_RX_MODE_RATE3_B5 0x274
+#define QSERDES_UFS_V7_RX_MODE_RATE3_B7 0x27c
+#define QSERDES_UFS_V7_RX_MODE_RATE3_B8 0x280
+#define QSERDES_UFS_V7_RX_MODE_RATE4_SA_B0 0x284
+#define QSERDES_UFS_V7_RX_MODE_RATE4_SA_B1 0x288
+#define QSERDES_UFS_V7_RX_MODE_RATE4_SA_B2 0x28c
+#define QSERDES_UFS_V7_RX_MODE_RATE4_SA_B3 0x290
+#define QSERDES_UFS_V7_RX_MODE_RATE4_SA_B4 0x294
+#define QSERDES_UFS_V7_RX_MODE_RATE4_SA_B5 0x298
+#define QSERDES_UFS_V7_RX_MODE_RATE4_SA_B6 0x29c
+#define QSERDES_UFS_V7_RX_MODE_RATE4_SA_B7 0x2a0
+#define QSERDES_UFS_V7_RX_MODE_RATE4_SB_B0 0x2a8
+#define QSERDES_UFS_V7_RX_MODE_RATE4_SB_B1 0x2ac
+#define QSERDES_UFS_V7_RX_MODE_RATE4_SB_B2 0x2b0
+#define QSERDES_UFS_V7_RX_MODE_RATE4_SB_B3 0x2b4
+#define QSERDES_UFS_V7_RX_MODE_RATE4_SB_B4 0x2b8
+#define QSERDES_UFS_V7_RX_MODE_RATE4_SB_B5 0x2bc
+#define QSERDES_UFS_V7_RX_MODE_RATE4_SB_B6 0x2c0
+#define QSERDES_UFS_V7_RX_MODE_RATE4_SB_B7 0x2c4
+#define QSERDES_UFS_V7_RX_DLL0_FTUNE_CTRL 0x348
+#define QSERDES_UFS_V7_RX_SIGDET_CAL_TRIM 0x380
+#endif
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-ufs.c b/drivers/phy/qualcomm/phy-qcom-qmp-ufs.c
index d964bdfe8700..45b3b792696e 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-ufs.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-ufs.c
@@ -31,6 +31,7 @@
#include "phy-qcom-qmp-pcs-ufs-v6.h"
#include "phy-qcom-qmp-qserdes-txrx-ufs-v6.h"
+#include "phy-qcom-qmp-qserdes-txrx-ufs-v7.h"
/* QPHY_PCS_READY_STATUS bit */
#define PCS_READY BIT(0)
@@ -949,6 +950,124 @@ static const struct qmp_phy_init_tbl sm8650_ufsphy_g5_pcs[] = {
QMP_PHY_INIT_CFG(QPHY_V6_PCS_UFS_RX_HSG5_SYNC_WAIT_TIME, 0x9e),
};
+static const struct qmp_phy_init_tbl sm8750_ufsphy_serdes[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SYSCLK_EN_SEL, 0xd9),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_CMN_CONFIG_1, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_HSCLK_SEL_1, 0x11),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_HSCLK_HS_SWITCH_SEL_1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP_EN, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP_CFG, 0x60),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_IVCO, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_IVCO_MODE1, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_CMN_IETRIM, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_CMN_IPTRIM, 0x20),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_VCO_TUNE_MAP, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_VCO_TUNE_CTRL, 0x40),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_ADAPTIVE_ANALOG_CONFIG, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DEC_START_MODE0, 0x41),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_CP_CTRL_MODE0, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_RCTRL_MODE0, 0x18),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_CCTRL_MODE0, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_CP_CTRL_ADAPTIVE_MODE0, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_RCCTRL_ADAPTIVE_MODE0, 0x18),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_CCTRL_ADAPTIVE_MODE0, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP1_MODE0, 0x7f),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP2_MODE0, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_BIN_VCOCAL_CMP_CODE1_MODE0, 0x92),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_BIN_VCOCAL_CMP_CODE2_MODE0, 0x1e),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DEC_START_MODE1, 0x4c),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_CP_CTRL_MODE1, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_RCTRL_MODE1, 0x18),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_CCTRL_MODE1, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_CP_CTRL_ADAPTIVE_MODE1, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_RCCTRL_ADAPTIVE_MODE1, 0x18),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_CCTRL_ADAPTIVE_MODE1, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP1_MODE1, 0x99),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP2_MODE1, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_BIN_VCOCAL_CMP_CODE1_MODE1, 0xbe),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_BIN_VCOCAL_CMP_CODE2_MODE1, 0x23),
+};
+
+static const struct qmp_phy_init_tbl sm8750_ufsphy_tx[] = {
+ QMP_PHY_INIT_CFG(QSERDES_UFS_V7_TX_LANE_MODE_1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_UFS_V7_TX_RES_CODE_LANE_OFFSET_TX, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_UFS_V7_TX_RES_CODE_LANE_OFFSET_RX, 0x17),
+};
+
+static const struct qmp_phy_init_tbl sm8750_ufsphy_rx[] = {
+ QMP_PHY_INIT_CFG(QSERDES_UFS_V7_RX_UCDR_FO_GAIN_RATE2, 0x0c),
+ QMP_PHY_INIT_CFG(QSERDES_UFS_V7_RX_UCDR_FO_GAIN_RATE4, 0x0c),
+ QMP_PHY_INIT_CFG(QSERDES_UFS_V7_RX_UCDR_SO_GAIN_RATE4, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_UFS_V7_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_UFS_V7_RX_UCDR_PI_CONTROLS, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_UFS_V7_RX_OFFSET_ADAPTOR_CNTRL3, 0x0e),
+ QMP_PHY_INIT_CFG(QSERDES_UFS_V7_RX_UCDR_FASTLOCK_COUNT_HIGH_RATE4, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_UFS_V7_RX_UCDR_FASTLOCK_FO_GAIN_RATE4, 0x1c),
+ QMP_PHY_INIT_CFG(QSERDES_UFS_V7_RX_UCDR_FASTLOCK_SO_GAIN_RATE4, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_UFS_V7_RX_VGA_CAL_MAN_VAL, 0x8e),
+ QMP_PHY_INIT_CFG(QSERDES_UFS_V7_RX_EQU_ADAPTOR_CNTRL4, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_UFS_V7_RX_MODE_RATE_0_1_B0, 0xce),
+ QMP_PHY_INIT_CFG(QSERDES_UFS_V7_RX_MODE_RATE_0_1_B1, 0xce),
+ QMP_PHY_INIT_CFG(QSERDES_UFS_V7_RX_MODE_RATE_0_1_B2, 0x18),
+ QMP_PHY_INIT_CFG(QSERDES_UFS_V7_RX_MODE_RATE_0_1_B3, 0x1a),
+ QMP_PHY_INIT_CFG(QSERDES_UFS_V7_RX_MODE_RATE_0_1_B4, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_UFS_V7_RX_MODE_RATE_0_1_B6, 0x60),
+ QMP_PHY_INIT_CFG(QSERDES_UFS_V7_RX_MODE_RATE_0_1_B7, 0x62),
+ QMP_PHY_INIT_CFG(QSERDES_UFS_V7_RX_MODE_RATE2_B3, 0x9a),
+ QMP_PHY_INIT_CFG(QSERDES_UFS_V7_RX_MODE_RATE2_B6, 0xe2),
+ QMP_PHY_INIT_CFG(QSERDES_UFS_V7_RX_MODE_RATE2_B7, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_UFS_V7_RX_MODE_RATE3_B0, 0x1b),
+ QMP_PHY_INIT_CFG(QSERDES_UFS_V7_RX_MODE_RATE3_B1, 0x1b),
+ QMP_PHY_INIT_CFG(QSERDES_UFS_V7_RX_MODE_RATE3_B2, 0x98),
+ QMP_PHY_INIT_CFG(QSERDES_UFS_V7_RX_MODE_RATE3_B3, 0x9b),
+ QMP_PHY_INIT_CFG(QSERDES_UFS_V7_RX_MODE_RATE3_B4, 0x2a),
+ QMP_PHY_INIT_CFG(QSERDES_UFS_V7_RX_MODE_RATE3_B5, 0x12),
+ QMP_PHY_INIT_CFG(QSERDES_UFS_V7_RX_MODE_RATE3_B7, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_UFS_V7_RX_MODE_RATE3_B8, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_UFS_V7_RX_MODE_RATE4_SA_B0, 0x93),
+ QMP_PHY_INIT_CFG(QSERDES_UFS_V7_RX_MODE_RATE4_SA_B1, 0x93),
+ QMP_PHY_INIT_CFG(QSERDES_UFS_V7_RX_MODE_RATE4_SA_B2, 0x60),
+ QMP_PHY_INIT_CFG(QSERDES_UFS_V7_RX_MODE_RATE4_SA_B3, 0x99),
+ QMP_PHY_INIT_CFG(QSERDES_UFS_V7_RX_MODE_RATE4_SA_B4, 0x5f),
+ QMP_PHY_INIT_CFG(QSERDES_UFS_V7_RX_MODE_RATE4_SA_B5, 0x92),
+ QMP_PHY_INIT_CFG(QSERDES_UFS_V7_RX_MODE_RATE4_SA_B6, 0xe3),
+ QMP_PHY_INIT_CFG(QSERDES_UFS_V7_RX_MODE_RATE4_SA_B7, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_UFS_V7_RX_MODE_RATE4_SB_B0, 0x9b),
+ QMP_PHY_INIT_CFG(QSERDES_UFS_V7_RX_MODE_RATE4_SB_B1, 0x9b),
+ QMP_PHY_INIT_CFG(QSERDES_UFS_V7_RX_MODE_RATE4_SB_B2, 0x60),
+ QMP_PHY_INIT_CFG(QSERDES_UFS_V7_RX_MODE_RATE4_SB_B3, 0x99),
+ QMP_PHY_INIT_CFG(QSERDES_UFS_V7_RX_MODE_RATE4_SB_B4, 0x5f),
+ QMP_PHY_INIT_CFG(QSERDES_UFS_V7_RX_MODE_RATE4_SB_B5, 0x92),
+ QMP_PHY_INIT_CFG(QSERDES_UFS_V7_RX_MODE_RATE4_SB_B6, 0xfb),
+ QMP_PHY_INIT_CFG(QSERDES_UFS_V7_RX_MODE_RATE4_SB_B7, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_UFS_V7_RX_UCDR_SO_SATURATION, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_UFS_V7_RX_UCDR_PI_CTRL1, 0x94),
+ QMP_PHY_INIT_CFG(QSERDES_UFS_V7_RX_TERM_BW_CTRL0, 0xfa),
+ QMP_PHY_INIT_CFG(QSERDES_UFS_V7_RX_DLL0_FTUNE_CTRL, 0x30),
+ QMP_PHY_INIT_CFG(QSERDES_UFS_V7_RX_SIGDET_CAL_TRIM, 0x77),
+};
+
+static const struct qmp_phy_init_tbl sm8750_ufsphy_pcs[] = {
+ QMP_PHY_INIT_CFG(QPHY_V6_PCS_UFS_MULTI_LANE_CTRL1, 0x02),
+ QMP_PHY_INIT_CFG(QPHY_V6_PCS_UFS_TX_MID_TERM_CTRL1, 0x43),
+ QMP_PHY_INIT_CFG(QPHY_V6_PCS_UFS_PCS_CTRL1, 0x40),
+ QMP_PHY_INIT_CFG(QPHY_V6_PCS_UFS_TX_LARGE_AMP_DRV_LVL, 0x0f),
+ QMP_PHY_INIT_CFG(QPHY_V6_PCS_UFS_RX_SIGDET_CTRL2, 0x68),
+ QMP_PHY_INIT_CFG(QPHY_V6_PCS_UFS_TX_POST_EMP_LVL_S4, 0x0e),
+ QMP_PHY_INIT_CFG(QPHY_V6_PCS_UFS_TX_POST_EMP_LVL_S5, 0x12),
+ QMP_PHY_INIT_CFG(QPHY_V6_PCS_UFS_TX_POST_EMP_LVL_S6, 0x15),
+ QMP_PHY_INIT_CFG(QPHY_V6_PCS_UFS_TX_POST_EMP_LVL_S7, 0x19),
+};
+
+static const struct qmp_phy_init_tbl sm8750_ufsphy_g4_pcs[] = {
+ QMP_PHY_INIT_CFG(QPHY_V6_PCS_UFS_TX_HSGEAR_CAPABILITY, 0x04),
+ QMP_PHY_INIT_CFG(QPHY_V6_PCS_UFS_RX_HSGEAR_CAPABILITY, 0x04),
+};
+
+static const struct qmp_phy_init_tbl sm8750_ufsphy_hs_b_pcs[] = {
+ QMP_PHY_INIT_CFG(QPHY_V6_PCS_UFS_PCS_CTRL1, 0x41),
+};
+
struct qmp_ufs_offsets {
u16 serdes;
u16 pcs;
@@ -1523,6 +1642,45 @@ static const struct qmp_phy_cfg sm8650_ufsphy_cfg = {
.regs = ufsphy_v6_regs_layout,
};
+static const struct qmp_phy_cfg sm8750_ufsphy_cfg = {
+ .lanes = 2,
+
+ .offsets = &qmp_ufs_offsets_v6,
+ .max_supported_gear = UFS_HS_G5,
+
+ .tbls = {
+ .serdes = sm8750_ufsphy_serdes,
+ .serdes_num = ARRAY_SIZE(sm8750_ufsphy_serdes),
+ .tx = sm8750_ufsphy_tx,
+ .tx_num = ARRAY_SIZE(sm8750_ufsphy_tx),
+ .rx = sm8750_ufsphy_rx,
+ .rx_num = ARRAY_SIZE(sm8750_ufsphy_rx),
+ .pcs = sm8750_ufsphy_pcs,
+ .pcs_num = ARRAY_SIZE(sm8750_ufsphy_pcs),
+ },
+
+ .tbls_hs_b = {
+ .pcs = sm8750_ufsphy_hs_b_pcs,
+ .pcs_num = ARRAY_SIZE(sm8750_ufsphy_hs_b_pcs),
+ },
+
+ .tbls_hs_overlay[0] = {
+ .pcs = sm8750_ufsphy_g4_pcs,
+ .pcs_num = ARRAY_SIZE(sm8750_ufsphy_g4_pcs),
+ .max_gear = UFS_HS_G4,
+ },
+ .tbls_hs_overlay[1] = {
+ .pcs = sm8650_ufsphy_g5_pcs,
+ .pcs_num = ARRAY_SIZE(sm8650_ufsphy_g5_pcs),
+ .max_gear = UFS_HS_G5,
+ },
+
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .regs = ufsphy_v6_regs_layout,
+
+};
+
static void qmp_ufs_serdes_init(struct qmp_ufs *qmp, const struct qmp_phy_cfg_tbls *tbls)
{
void __iomem *serdes = qmp->serdes;
@@ -1578,23 +1736,25 @@ static int qmp_ufs_get_gear_overlay(struct qmp_ufs *qmp, const struct qmp_phy_cf
return ret;
}
+static void qmp_ufs_init_all(struct qmp_ufs *qmp, const struct qmp_phy_cfg_tbls *tbls)
+{
+ qmp_ufs_serdes_init(qmp, tbls);
+ qmp_ufs_lanes_init(qmp, tbls);
+ qmp_ufs_pcs_init(qmp, tbls);
+}
+
static void qmp_ufs_init_registers(struct qmp_ufs *qmp, const struct qmp_phy_cfg *cfg)
{
int i;
- qmp_ufs_serdes_init(qmp, &cfg->tbls);
- qmp_ufs_lanes_init(qmp, &cfg->tbls);
- qmp_ufs_pcs_init(qmp, &cfg->tbls);
+ qmp_ufs_init_all(qmp, &cfg->tbls);
i = qmp_ufs_get_gear_overlay(qmp, cfg);
if (i >= 0) {
- qmp_ufs_serdes_init(qmp, &cfg->tbls_hs_overlay[i]);
- qmp_ufs_lanes_init(qmp, &cfg->tbls_hs_overlay[i]);
- qmp_ufs_pcs_init(qmp, &cfg->tbls_hs_overlay[i]);
+ qmp_ufs_init_all(qmp, &cfg->tbls_hs_overlay[i]);
}
- if (qmp->mode == PHY_MODE_UFS_HS_B)
- qmp_ufs_serdes_init(qmp, &cfg->tbls_hs_b);
+ qmp_ufs_init_all(qmp, &cfg->tbls_hs_b);
}
static int qmp_ufs_com_init(struct qmp_ufs *qmp)
@@ -2061,7 +2221,11 @@ static const struct of_device_id qmp_ufs_of_match_table[] = {
}, {
.compatible = "qcom,sm8650-qmp-ufs-phy",
.data = &sm8650_ufsphy_cfg,
+ }, {
+ .compatible = "qcom,sm8750-qmp-ufs-phy",
+ .data = &sm8750_ufsphy_cfg,
},
+
{ },
};
MODULE_DEVICE_TABLE(of, qmp_ufs_of_match_table);
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-usbc.c b/drivers/phy/qualcomm/phy-qcom-qmp-usbc.c
index cf12a6f12134..5e7fcb26744a 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-usbc.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-usbc.c
@@ -1125,6 +1125,9 @@ static const struct of_device_id qmp_usbc_of_match_table[] = {
.compatible = "qcom,qcm2290-qmp-usb3-phy",
.data = &qcm2290_usb3phy_cfg,
}, {
+ .compatible = "qcom,qcs615-qmp-usb3-phy",
+ .data = &qcm2290_usb3phy_cfg,
+ }, {
.compatible = "qcom,sdm660-qmp-usb3-phy",
.data = &sdm660_usb3phy_cfg,
}, {
diff --git a/drivers/phy/qualcomm/phy-qcom-uniphy-pcie-28lp.c b/drivers/phy/qualcomm/phy-qcom-uniphy-pcie-28lp.c
new file mode 100644
index 000000000000..c8b2a3818880
--- /dev/null
+++ b/drivers/phy/qualcomm/phy-qcom-uniphy-pcie-28lp.c
@@ -0,0 +1,286 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (c) 2025, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include <linux/units.h>
+
+#define RST_ASSERT_DELAY_MIN_US 100
+#define RST_ASSERT_DELAY_MAX_US 150
+#define PIPE_CLK_DELAY_MIN_US 5000
+#define PIPE_CLK_DELAY_MAX_US 5100
+#define CLK_EN_DELAY_MIN_US 30
+#define CLK_EN_DELAY_MAX_US 50
+#define CDR_CTRL_REG_1 0x80
+#define CDR_CTRL_REG_2 0x84
+#define CDR_CTRL_REG_3 0x88
+#define CDR_CTRL_REG_4 0x8c
+#define CDR_CTRL_REG_5 0x90
+#define CDR_CTRL_REG_6 0x94
+#define CDR_CTRL_REG_7 0x98
+#define SSCG_CTRL_REG_1 0x9c
+#define SSCG_CTRL_REG_2 0xa0
+#define SSCG_CTRL_REG_3 0xa4
+#define SSCG_CTRL_REG_4 0xa8
+#define SSCG_CTRL_REG_5 0xac
+#define SSCG_CTRL_REG_6 0xb0
+#define PCS_INTERNAL_CONTROL_2 0x2d8
+
+#define PHY_CFG_PLLCFG 0x220
+#define PHY_CFG_EIOS_DTCT_REG 0x3e4
+#define PHY_CFG_GEN3_ALIGN_HOLDOFF_TIME 0x3e8
+
+enum qcom_uniphy_pcie_type {
+ PHY_TYPE_PCIE = 1,
+ PHY_TYPE_PCIE_GEN2,
+ PHY_TYPE_PCIE_GEN3,
+};
+
+struct qcom_uniphy_pcie_regs {
+ u32 offset;
+ u32 val;
+};
+
+struct qcom_uniphy_pcie_data {
+ int lane_offset; /* offset between the lane register bases */
+ u32 phy_type;
+ const struct qcom_uniphy_pcie_regs *init_seq;
+ u32 init_seq_num;
+ u32 pipe_clk_rate;
+};
+
+struct qcom_uniphy_pcie {
+ struct phy phy;
+ struct device *dev;
+ const struct qcom_uniphy_pcie_data *data;
+ struct clk_bulk_data *clks;
+ int num_clks;
+ struct reset_control *resets;
+ void __iomem *base;
+ int lanes;
+};
+
+#define phy_to_dw_phy(x) container_of((x), struct qca_uni_pcie_phy, phy)
+
+static const struct qcom_uniphy_pcie_regs ipq5332_regs[] = {
+ {
+ .offset = PHY_CFG_PLLCFG,
+ .val = 0x30,
+ }, {
+ .offset = PHY_CFG_EIOS_DTCT_REG,
+ .val = 0x53ef,
+ }, {
+ .offset = PHY_CFG_GEN3_ALIGN_HOLDOFF_TIME,
+ .val = 0xcf,
+ },
+};
+
+static const struct qcom_uniphy_pcie_data ipq5332_data = {
+ .lane_offset = 0x800,
+ .phy_type = PHY_TYPE_PCIE_GEN3,
+ .init_seq = ipq5332_regs,
+ .init_seq_num = ARRAY_SIZE(ipq5332_regs),
+ .pipe_clk_rate = 250 * MEGA,
+};
+
+static void qcom_uniphy_pcie_init(struct qcom_uniphy_pcie *phy)
+{
+ const struct qcom_uniphy_pcie_data *data = phy->data;
+ const struct qcom_uniphy_pcie_regs *init_seq;
+ void __iomem *base = phy->base;
+ int lane, i;
+
+ for (lane = 0; lane < phy->lanes; lane++) {
+ init_seq = data->init_seq;
+
+ for (i = 0; i < data->init_seq_num; i++)
+ writel(init_seq[i].val, base + init_seq[i].offset);
+
+ base += data->lane_offset;
+ }
+}
+
+static int qcom_uniphy_pcie_power_off(struct phy *x)
+{
+ struct qcom_uniphy_pcie *phy = phy_get_drvdata(x);
+
+ clk_bulk_disable_unprepare(phy->num_clks, phy->clks);
+
+ return reset_control_assert(phy->resets);
+}
+
+static int qcom_uniphy_pcie_power_on(struct phy *x)
+{
+ struct qcom_uniphy_pcie *phy = phy_get_drvdata(x);
+ int ret;
+
+ ret = reset_control_assert(phy->resets);
+ if (ret) {
+ dev_err(phy->dev, "reset assert failed (%d)\n", ret);
+ return ret;
+ }
+
+ usleep_range(RST_ASSERT_DELAY_MIN_US, RST_ASSERT_DELAY_MAX_US);
+
+ ret = reset_control_deassert(phy->resets);
+ if (ret) {
+ dev_err(phy->dev, "reset deassert failed (%d)\n", ret);
+ return ret;
+ }
+
+ usleep_range(PIPE_CLK_DELAY_MIN_US, PIPE_CLK_DELAY_MAX_US);
+
+ ret = clk_bulk_prepare_enable(phy->num_clks, phy->clks);
+ if (ret) {
+ dev_err(phy->dev, "clk prepare and enable failed %d\n", ret);
+ return ret;
+ }
+
+ usleep_range(CLK_EN_DELAY_MIN_US, CLK_EN_DELAY_MAX_US);
+
+ qcom_uniphy_pcie_init(phy);
+
+ return 0;
+}
+
+static inline int qcom_uniphy_pcie_get_resources(struct platform_device *pdev,
+ struct qcom_uniphy_pcie *phy)
+{
+ struct resource *res;
+
+ phy->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
+ if (IS_ERR(phy->base))
+ return PTR_ERR(phy->base);
+
+ phy->num_clks = devm_clk_bulk_get_all(phy->dev, &phy->clks);
+ if (phy->num_clks < 0)
+ return phy->num_clks;
+
+ phy->resets = devm_reset_control_array_get_exclusive(phy->dev);
+ if (IS_ERR(phy->resets))
+ return PTR_ERR(phy->resets);
+
+ return 0;
+}
+
+/*
+ * Register a fixed rate pipe clock.
+ *
+ * The <s>_pipe_clksrc generated by PHY goes to the GCC that gate
+ * controls it. The <s>_pipe_clk coming out of the GCC is requested
+ * by the PHY driver for its operations.
+ * We register the <s>_pipe_clksrc here. The gcc driver takes care
+ * of assigning this <s>_pipe_clksrc as parent to <s>_pipe_clk.
+ * Below picture shows this relationship.
+ *
+ * +---------------+
+ * | PHY block |<<---------------------------------------+
+ * | | |
+ * | +-------+ | +-----+ |
+ * I/P---^-->| PLL |---^--->pipe_clksrc--->| GCC |--->pipe_clk---+
+ * clk | +-------+ | +-----+
+ * +---------------+
+ */
+static inline int phy_pipe_clk_register(struct qcom_uniphy_pcie *phy, int id)
+{
+ const struct qcom_uniphy_pcie_data *data = phy->data;
+ struct clk_hw *hw;
+ char name[64];
+
+ snprintf(name, sizeof(name), "phy%d_pipe_clk_src", id);
+ hw = devm_clk_hw_register_fixed_rate(phy->dev, name, NULL, 0,
+ data->pipe_clk_rate);
+ if (IS_ERR(hw))
+ return dev_err_probe(phy->dev, PTR_ERR(hw),
+ "Unable to register %s\n", name);
+
+ return devm_of_clk_add_hw_provider(phy->dev, of_clk_hw_simple_get, hw);
+}
+
+static const struct of_device_id qcom_uniphy_pcie_id_table[] = {
+ {
+ .compatible = "qcom,ipq5332-uniphy-pcie-phy",
+ .data = &ipq5332_data,
+ }, {
+ /* Sentinel */
+ },
+};
+MODULE_DEVICE_TABLE(of, qcom_uniphy_pcie_id_table);
+
+static const struct phy_ops pcie_ops = {
+ .power_on = qcom_uniphy_pcie_power_on,
+ .power_off = qcom_uniphy_pcie_power_off,
+ .owner = THIS_MODULE,
+};
+
+static int qcom_uniphy_pcie_probe(struct platform_device *pdev)
+{
+ struct phy_provider *phy_provider;
+ struct device *dev = &pdev->dev;
+ struct qcom_uniphy_pcie *phy;
+ struct phy *generic_phy;
+ int ret;
+
+ phy = devm_kzalloc(&pdev->dev, sizeof(*phy), GFP_KERNEL);
+ if (!phy)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, phy);
+ phy->dev = &pdev->dev;
+
+ phy->data = of_device_get_match_data(dev);
+ if (!phy->data)
+ return -EINVAL;
+
+ ret = of_property_read_u32(dev_of_node(dev), "num-lanes", &phy->lanes);
+ if (ret)
+ return dev_err_probe(dev, ret, "Couldn't read num-lanes\n");
+
+ ret = qcom_uniphy_pcie_get_resources(pdev, phy);
+ if (ret < 0)
+ return dev_err_probe(&pdev->dev, ret,
+ "failed to get resources: %d\n", ret);
+
+ generic_phy = devm_phy_create(phy->dev, NULL, &pcie_ops);
+ if (IS_ERR(generic_phy))
+ return PTR_ERR(generic_phy);
+
+ phy_set_drvdata(generic_phy, phy);
+
+ ret = phy_pipe_clk_register(phy, generic_phy->id);
+ if (ret)
+ dev_err(&pdev->dev, "failed to register phy pipe clk\n");
+
+ phy_provider = devm_of_phy_provider_register(phy->dev,
+ of_phy_simple_xlate);
+ if (IS_ERR(phy_provider))
+ return PTR_ERR(phy_provider);
+
+ return 0;
+}
+
+static struct platform_driver qcom_uniphy_pcie_driver = {
+ .probe = qcom_uniphy_pcie_probe,
+ .driver = {
+ .name = "qcom-uniphy-pcie",
+ .of_match_table = qcom_uniphy_pcie_id_table,
+ },
+};
+
+module_platform_driver(qcom_uniphy_pcie_driver);
+
+MODULE_DESCRIPTION("PCIE QCOM UNIPHY driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/phy/rockchip/Kconfig b/drivers/phy/rockchip/Kconfig
index dcb8e1628632..14698571b607 100644
--- a/drivers/phy/rockchip/Kconfig
+++ b/drivers/phy/rockchip/Kconfig
@@ -83,6 +83,18 @@ config PHY_ROCKCHIP_PCIE
help
Enable this to support the Rockchip PCIe PHY.
+config PHY_ROCKCHIP_SAMSUNG_DCPHY
+ tristate "Rockchip Samsung MIPI DCPHY driver"
+ depends on (ARCH_ROCKCHIP || COMPILE_TEST)
+ select GENERIC_PHY
+ select GENERIC_PHY_MIPI_DPHY
+ help
+ Enable this to support the Rockchip MIPI DCPHY with
+ Samsung IP block.
+
+ To compile this driver as a module, choose M here: the module
+ will be called phy-rockchip-samsung-dcphy
+
config PHY_ROCKCHIP_SAMSUNG_HDPTX
tristate "Rockchip Samsung HDMI/eDP Combo PHY driver"
depends on (ARCH_ROCKCHIP || COMPILE_TEST) && OF
diff --git a/drivers/phy/rockchip/Makefile b/drivers/phy/rockchip/Makefile
index 010a824e32ce..117aaffd037d 100644
--- a/drivers/phy/rockchip/Makefile
+++ b/drivers/phy/rockchip/Makefile
@@ -8,6 +8,7 @@ obj-$(CONFIG_PHY_ROCKCHIP_INNO_HDMI) += phy-rockchip-inno-hdmi.o
obj-$(CONFIG_PHY_ROCKCHIP_INNO_USB2) += phy-rockchip-inno-usb2.o
obj-$(CONFIG_PHY_ROCKCHIP_NANENG_COMBO_PHY) += phy-rockchip-naneng-combphy.o
obj-$(CONFIG_PHY_ROCKCHIP_PCIE) += phy-rockchip-pcie.o
+obj-$(CONFIG_PHY_ROCKCHIP_SAMSUNG_DCPHY) += phy-rockchip-samsung-dcphy.o
obj-$(CONFIG_PHY_ROCKCHIP_SAMSUNG_HDPTX) += phy-rockchip-samsung-hdptx.o
obj-$(CONFIG_PHY_ROCKCHIP_SNPS_PCIE3) += phy-rockchip-snps-pcie3.o
obj-$(CONFIG_PHY_ROCKCHIP_TYPEC) += phy-rockchip-typec.o
diff --git a/drivers/phy/rockchip/phy-rockchip-inno-usb2.c b/drivers/phy/rockchip/phy-rockchip-inno-usb2.c
index 96f3d868a526..b5e6a864deeb 100644
--- a/drivers/phy/rockchip/phy-rockchip-inno-usb2.c
+++ b/drivers/phy/rockchip/phy-rockchip-inno-usb2.c
@@ -440,7 +440,7 @@ static int rockchip_usb2phy_extcon_register(struct rockchip_usb2phy *rphy)
struct extcon_dev *edev;
int ret;
- if (of_property_read_bool(node, "extcon")) {
+ if (of_property_present(node, "extcon")) {
edev = extcon_get_edev_by_phandle(rphy->dev, 0);
if (IS_ERR(edev))
return dev_err_probe(rphy->dev, PTR_ERR(edev),
@@ -1323,7 +1323,7 @@ static int rockchip_usb2phy_otg_port_init(struct rockchip_usb2phy *rphy,
goto out;
}
- if (!of_property_read_bool(rphy->dev->of_node, "extcon")) {
+ if (!of_property_present(rphy->dev->of_node, "extcon")) {
/* do initial sync of usb state */
id = property_enabled(rphy->grf, &rport->port_cfg->utmi_id);
extcon_set_state_sync(rphy->edev, EXTCON_USB_HOST, !id);
diff --git a/drivers/phy/rockchip/phy-rockchip-naneng-combphy.c b/drivers/phy/rockchip/phy-rockchip-naneng-combphy.c
index 8c3ce57f8915..ce91fb1d5167 100644
--- a/drivers/phy/rockchip/phy-rockchip-naneng-combphy.c
+++ b/drivers/phy/rockchip/phy-rockchip-naneng-combphy.c
@@ -396,6 +396,154 @@ static int rockchip_combphy_probe(struct platform_device *pdev)
return PTR_ERR_OR_ZERO(phy_provider);
}
+static int rk3562_combphy_cfg(struct rockchip_combphy_priv *priv)
+{
+ const struct rockchip_combphy_grfcfg *cfg = priv->cfg->grfcfg;
+ unsigned long rate;
+ u32 val;
+
+ switch (priv->type) {
+ case PHY_TYPE_PCIE:
+ /* Set SSC downward spread spectrum */
+ rockchip_combphy_updatel(priv, PHYREG32_SSC_MASK,
+ PHYREG32_SSC_DOWNWARD << PHYREG32_SSC_DIR_SHIFT,
+ PHYREG32);
+
+ rockchip_combphy_param_write(priv->phy_grf, &cfg->con0_for_pcie, true);
+ rockchip_combphy_param_write(priv->phy_grf, &cfg->con1_for_pcie, true);
+ rockchip_combphy_param_write(priv->phy_grf, &cfg->con2_for_pcie, true);
+ rockchip_combphy_param_write(priv->phy_grf, &cfg->con3_for_pcie, true);
+ break;
+ case PHY_TYPE_USB3:
+ /* Set SSC downward spread spectrum */
+ rockchip_combphy_updatel(priv, PHYREG32_SSC_MASK,
+ PHYREG32_SSC_DOWNWARD << PHYREG32_SSC_DIR_SHIFT,
+ PHYREG32);
+
+ /* Enable adaptive CTLE for USB3.0 Rx */
+ rockchip_combphy_updatel(priv, PHYREG15_CTLE_EN,
+ PHYREG15_CTLE_EN, PHYREG15);
+
+ /* Set PLL KVCO fine tuning signals */
+ rockchip_combphy_updatel(priv, PHYREG33_PLL_KVCO_MASK, BIT(3), PHYREG33);
+
+ /* Set PLL LPF R1 to su_trim[10:7]=1001 */
+ writel(PHYREG12_PLL_LPF_ADJ_VALUE, priv->mmio + PHYREG12);
+
+ /* Set PLL input clock divider 1/2 */
+ val = FIELD_PREP(PHYREG6_PLL_DIV_MASK, PHYREG6_PLL_DIV_2);
+ rockchip_combphy_updatel(priv, PHYREG6_PLL_DIV_MASK, val, PHYREG6);
+
+ /* Set PLL loop divider */
+ writel(PHYREG18_PLL_LOOP, priv->mmio + PHYREG18);
+
+ /* Set PLL KVCO to min and set PLL charge pump current to max */
+ writel(PHYREG11_SU_TRIM_0_7, priv->mmio + PHYREG11);
+
+ rockchip_combphy_param_write(priv->phy_grf, &cfg->pipe_sel_usb, true);
+ rockchip_combphy_param_write(priv->phy_grf, &cfg->pipe_txcomp_sel, false);
+ rockchip_combphy_param_write(priv->phy_grf, &cfg->pipe_txelec_sel, false);
+ rockchip_combphy_param_write(priv->phy_grf, &cfg->usb_mode_set, true);
+ break;
+ default:
+ dev_err(priv->dev, "incompatible PHY type\n");
+ return -EINVAL;
+ }
+
+ rate = clk_get_rate(priv->refclk);
+
+ switch (rate) {
+ case REF_CLOCK_24MHz:
+ if (priv->type == PHY_TYPE_USB3) {
+ /* Set ssc_cnt[9:0]=0101111101 & 31.5KHz */
+ val = FIELD_PREP(PHYREG15_SSC_CNT_MASK, PHYREG15_SSC_CNT_VALUE);
+ rockchip_combphy_updatel(priv, PHYREG15_SSC_CNT_MASK,
+ val, PHYREG15);
+
+ writel(PHYREG16_SSC_CNT_VALUE, priv->mmio + PHYREG16);
+ }
+ break;
+ case REF_CLOCK_25MHz:
+ rockchip_combphy_param_write(priv->phy_grf, &cfg->pipe_clk_25m, true);
+ break;
+ case REF_CLOCK_100MHz:
+ rockchip_combphy_param_write(priv->phy_grf, &cfg->pipe_clk_100m, true);
+ if (priv->type == PHY_TYPE_PCIE) {
+ /* PLL KVCO tuning fine */
+ val = FIELD_PREP(PHYREG33_PLL_KVCO_MASK, PHYREG33_PLL_KVCO_VALUE);
+ rockchip_combphy_updatel(priv, PHYREG33_PLL_KVCO_MASK,
+ val, PHYREG33);
+
+ /* Enable controlling random jitter, aka RMJ */
+ writel(0x4, priv->mmio + PHYREG12);
+
+ val = PHYREG6_PLL_DIV_2 << PHYREG6_PLL_DIV_SHIFT;
+ rockchip_combphy_updatel(priv, PHYREG6_PLL_DIV_MASK,
+ val, PHYREG6);
+
+ writel(0x32, priv->mmio + PHYREG18);
+ writel(0xf0, priv->mmio + PHYREG11);
+ }
+ break;
+ default:
+ dev_err(priv->dev, "Unsupported rate: %lu\n", rate);
+ return -EINVAL;
+ }
+
+ if (priv->ext_refclk) {
+ rockchip_combphy_param_write(priv->phy_grf, &cfg->pipe_clk_ext, true);
+ if (priv->type == PHY_TYPE_PCIE && rate == REF_CLOCK_100MHz) {
+ val = PHYREG13_RESISTER_HIGH_Z << PHYREG13_RESISTER_SHIFT;
+ val |= PHYREG13_CKRCV_AMP0;
+ rockchip_combphy_updatel(priv, PHYREG13_RESISTER_MASK, val, PHYREG13);
+
+ val = readl(priv->mmio + PHYREG14);
+ val |= PHYREG14_CKRCV_AMP1;
+ writel(val, priv->mmio + PHYREG14);
+ }
+ }
+
+ if (priv->enable_ssc) {
+ val = readl(priv->mmio + PHYREG8);
+ val |= PHYREG8_SSC_EN;
+ writel(val, priv->mmio + PHYREG8);
+ }
+
+ return 0;
+}
+
+static const struct rockchip_combphy_grfcfg rk3562_combphy_grfcfgs = {
+ /* pipe-phy-grf */
+ .pcie_mode_set = { 0x0000, 5, 0, 0x00, 0x11 },
+ .usb_mode_set = { 0x0000, 5, 0, 0x00, 0x04 },
+ .pipe_rxterm_set = { 0x0000, 12, 12, 0x00, 0x01 },
+ .pipe_txelec_set = { 0x0004, 1, 1, 0x00, 0x01 },
+ .pipe_txcomp_set = { 0x0004, 4, 4, 0x00, 0x01 },
+ .pipe_clk_25m = { 0x0004, 14, 13, 0x00, 0x01 },
+ .pipe_clk_100m = { 0x0004, 14, 13, 0x00, 0x02 },
+ .pipe_phymode_sel = { 0x0008, 1, 1, 0x00, 0x01 },
+ .pipe_rate_sel = { 0x0008, 2, 2, 0x00, 0x01 },
+ .pipe_rxterm_sel = { 0x0008, 8, 8, 0x00, 0x01 },
+ .pipe_txelec_sel = { 0x0008, 12, 12, 0x00, 0x01 },
+ .pipe_txcomp_sel = { 0x0008, 15, 15, 0x00, 0x01 },
+ .pipe_clk_ext = { 0x000c, 9, 8, 0x02, 0x01 },
+ .pipe_sel_usb = { 0x000c, 14, 13, 0x00, 0x01 },
+ .pipe_phy_status = { 0x0034, 6, 6, 0x01, 0x00 },
+ .con0_for_pcie = { 0x0000, 15, 0, 0x00, 0x1000 },
+ .con1_for_pcie = { 0x0004, 15, 0, 0x00, 0x0000 },
+ .con2_for_pcie = { 0x0008, 15, 0, 0x00, 0x0101 },
+ .con3_for_pcie = { 0x000c, 15, 0, 0x00, 0x0200 },
+};
+
+static const struct rockchip_combphy_cfg rk3562_combphy_cfgs = {
+ .num_phys = 1,
+ .phy_ids = {
+ 0xff750000
+ },
+ .grfcfg = &rk3562_combphy_grfcfgs,
+ .combphy_cfg = rk3562_combphy_cfg,
+};
+
static int rk3568_combphy_cfg(struct rockchip_combphy_priv *priv)
{
const struct rockchip_combphy_grfcfg *cfg = priv->cfg->grfcfg;
@@ -1050,6 +1198,10 @@ static const struct rockchip_combphy_cfg rk3588_combphy_cfgs = {
static const struct of_device_id rockchip_combphy_of_match[] = {
{
+ .compatible = "rockchip,rk3562-naneng-combphy",
+ .data = &rk3562_combphy_cfgs,
+ },
+ {
.compatible = "rockchip,rk3568-naneng-combphy",
.data = &rk3568_combphy_cfgs,
},
diff --git a/drivers/phy/rockchip/phy-rockchip-samsung-dcphy.c b/drivers/phy/rockchip/phy-rockchip-samsung-dcphy.c
new file mode 100644
index 000000000000..08c78c1bafc9
--- /dev/null
+++ b/drivers/phy/rockchip/phy-rockchip-samsung-dcphy.c
@@ -0,0 +1,1719 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2025 Rockchip Electronics Co.Ltd
+ * Author:
+ * Guochun Huang <hero.huang@rock-chips.com>
+ */
+
+#include <dt-bindings/phy/phy.h>
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/of.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+
+#define FIELD_PREP_HIWORD(_mask, _val) \
+ ( \
+ FIELD_PREP((_mask), (_val)) | \
+ ((_mask) << 16) \
+ )
+
+#define BIAS_CON0 0x0000
+#define I_RES_CNTL_MASK GENMASK(6, 4)
+#define I_RES_CNTL(x) FIELD_PREP(I_RES_CNTL_MASK, x)
+#define I_RES_059_2UA I_RES_CNTL(0)
+#define I_RES_100_2UA I_RES_CNTL(1)
+#define I_RES_094_2UA I_RES_CNTL(2)
+#define I_RES_113_8UA I_RES_CNTL(3)
+#define I_RES_089_7UA I_RES_CNTL(4)
+#define I_RES_111_8UA I_RES_CNTL(5)
+#define I_RES_108_2UA I_RES_CNTL(6)
+#define I_RES_120_8UA I_RES_CNTL(7)
+#define I_DEV_SEL_MASK GENMASK(1, 0)
+#define I_DEV_SEL(x) FIELD_PREP(I_DEV_SEL_MASK, x)
+#define I_DEV_DIV_6 I_DEV_SEL(0)
+#define I_DEV_DIV_12 I_DEV_SEL(1)
+#define I_DEV_DIV_20 I_DEV_SEL(2)
+#define I_DEV_DIV_40 I_DEV_SEL(3)
+
+#define BIAS_CON1 0x0004
+#define I_VBG_SEL_MASK GENMASK(9, 8)
+#define I_VBG_SEL(x) FIELD_PREP(I_VBG_SEL_MASK, x)
+#define I_VBG_SEL_780MV I_VBG_SEL(0)
+#define I_VBG_SEL_820MV I_VBG_SEL(1)
+#define I_VBG_SEL_860MV I_VBG_SEL(2)
+#define I_VBG_SEL_900MV I_VBG_SEL(3)
+#define I_BGR_VREF_SEL_MASK GENMASK(5, 4)
+#define I_BGR_VREF_SEL(x) FIELD_PREP(I_BGR_VREF_SEL_MASK, x)
+#define I_BGR_VREF_810MV I_BGR_VREF_SEL(0)
+#define I_BGR_VREF_820MV I_BGR_VREF_SEL(1)
+#define I_BGR_VREF_830MV I_BGR_VREF_SEL(2)
+#define I_BGR_VREF_840MV I_BGR_VREF_SEL(3)
+#define I_LADDER_SEL_MASK GENMASK(2, 0)
+#define I_LADDER_SEL(x) FIELD_PREP(I_LADDER_SEL_MASK, x)
+#define I_LADDER_1_00V I_LADDER_SEL(0)
+#define I_LADDER_0_96V I_LADDER_SEL(1)
+#define I_LADDER_0_92V I_LADDER_SEL(2)
+#define I_LADDER_0_88V I_LADDER_SEL(3)
+#define I_LADDER_0_84V I_LADDER_SEL(4)
+#define I_LADDER_0_80V I_LADDER_SEL(5)
+#define I_LADDER_0_76V I_LADDER_SEL(6)
+#define I_LADDER_0_72V I_LADDER_SEL(7)
+
+/*
+ * Voltage corrections around reference voltages
+ * The selection between the 400-based or 200-based values for REG_400M
+ * is done by the hw depending on I_MUX below being 400MV or 200MV.
+ */
+#define BIAS_CON2 0x0008
+#define REG_325M_MASK GENMASK(14, 12)
+#define REG_325M(x) FIELD_PREP(REG_325M_MASK, x)
+#define REG_325M_295MV REG_325M(0)
+#define REG_325M_305MV REG_325M(1)
+#define REG_325M_315MV REG_325M(2)
+#define REG_325M_325MV REG_325M(3)
+#define REG_325M_335MV REG_325M(4)
+#define REG_325M_345MV REG_325M(5)
+#define REG_325M_355MV REG_325M(6)
+#define REG_325M_365MV REG_325M(7)
+#define REG_LP_400M_MASK GENMASK(10, 8)
+#define REG_LP_400M(x) FIELD_PREP(REG_LP_400M_MASK, x)
+#define REG_LP_400M_380MV REG_LP_400M(0)
+#define REG_LP_400M_390MV REG_LP_400M(1)
+#define REG_LP_400M_400MV REG_LP_400M(2)
+#define REG_LP_400M_410MV REG_LP_400M(3)
+#define REG_LP_400M_420MV REG_LP_400M(4)
+#define REG_LP_400M_430MV REG_LP_400M(5)
+#define REG_LP_400M_440MV REG_LP_400M(6)
+#define REG_LP_400M_450MV REG_LP_400M(7)
+#define REG_400M_MASK GENMASK(6, 4)
+#define REG_400M(x) FIELD_PREP(REG_400M_MASK, x)
+#define REG_400M_380MV REG_400M(0)
+#define REG_400M_390MV REG_400M(1)
+#define REG_400M_400MV REG_400M(2)
+#define REG_400M_410MV REG_400M(3)
+#define REG_400M_420MV REG_400M(4)
+#define REG_400M_430MV REG_400M(5)
+#define REG_400M_440MV REG_400M(6)
+#define REG_400M_450MV REG_400M(7)
+#define REG_400M_230MV REG_400M(0)
+#define REG_400M_220MV REG_400M(1)
+#define REG_400M_210MV REG_400M(2)
+#define REG_400M_200MV REG_400M(3)
+#define REG_400M_190MV REG_400M(4)
+#define REG_400M_180MV REG_400M(5)
+#define REG_400M_170MV REG_400M(6)
+#define REG_400M_160MV REG_400M(7)
+#define REG_645M_MASK GENMASK(2, 0)
+#define REG_645M(x) FIELD_PREP(REG_645M_MASK, x)
+#define REG_645M_605MV REG_645M(0)
+#define REG_645M_625MV REG_645M(1)
+#define REG_645M_635MV REG_645M(2)
+#define REG_645M_645MV REG_645M(3)
+#define REG_645M_655MV REG_645M(4)
+#define REG_645M_665MV REG_645M(5)
+#define REG_645M_685MV REG_645M(6)
+#define REG_645M_725MV REG_645M(7)
+
+#define BIAS_CON4 0x0010
+#define I_MUX_SEL_MASK GENMASK(6, 5)
+#define I_MUX_SEL(x) FIELD_PREP(I_MUX_SEL_MASK, x)
+#define I_MUX_400MV I_MUX_SEL(0)
+#define I_MUX_200MV I_MUX_SEL(1)
+#define I_MUX_530MV I_MUX_SEL(2)
+
+#define PLL_CON0 0x0100
+#define PLL_EN BIT(12)
+#define S_MASK GENMASK(10, 8)
+#define S(x) FIELD_PREP(S_MASK, x)
+#define P_MASK GENMASK(5, 0)
+#define P(x) FIELD_PREP(P_MASK, x)
+#define PLL_CON1 0x0104
+#define PLL_CON2 0x0108
+#define M_MASK GENMASK(9, 0)
+#define M(x) FIELD_PREP(M_MASK, x)
+#define PLL_CON3 0x010c
+#define MRR_MASK GENMASK(13, 8)
+#define MRR(x) FIELD_PREP(MRR_MASK, x)
+#define MFR_MASK GENMASK(7, 0)
+#define MFR(x) FIELD_PREP(MFR_MASK, x)
+#define PLL_CON4 0x0110
+#define SSCG_EN BIT(11)
+#define PLL_CON5 0x0114
+#define RESET_N_SEL BIT(10)
+#define PLL_ENABLE_SEL BIT(8)
+#define PLL_CON6 0x0118
+#define PLL_CON7 0x011c
+#define PLL_LOCK_CNT(x) FIELD_PREP(GENMASK(15, 0), x)
+#define PLL_CON8 0x0120
+#define PLL_STB_CNT(x) FIELD_PREP(GENMASK(15, 0), x)
+#define PLL_STAT0 0x0140
+#define PLL_LOCK BIT(0)
+
+#define DPHY_MC_GNR_CON0 0x0300
+#define PHY_READY BIT(1)
+#define PHY_ENABLE BIT(0)
+#define DPHY_MC_GNR_CON1 0x0304
+#define T_PHY_READY(x) FIELD_PREP(GENMASK(15, 0), x)
+#define DPHY_MC_ANA_CON0 0x0308
+#define EDGE_CON(x) FIELD_PREP(GENMASK(14, 12), x)
+#define EDGE_CON_DIR(x) FIELD_PREP(BIT(9), x)
+#define EDGE_CON_EN BIT(8)
+#define RES_UP(x) FIELD_PREP(GENMASK(7, 4), x)
+#define RES_DN(x) FIELD_PREP(GENMASK(3, 0), x)
+#define DPHY_MC_ANA_CON1 0x030c
+#define DPHY_MC_ANA_CON2 0x0310
+#define HS_VREG_AMP_ICON(x) FIELD_PREP(GENMASK(1, 0), x)
+#define DPHY_MC_TIME_CON0 0x0330
+#define HSTX_CLK_SEL BIT(12)
+#define T_LPX(x) FIELD_PREP(GENMASK(11, 4), x)
+#define DPHY_MC_TIME_CON1 0x0334
+#define T_CLK_ZERO(x) FIELD_PREP(GENMASK(15, 8), x)
+#define T_CLK_PREPARE(x) FIELD_PREP(GENMASK(7, 0), x)
+#define DPHY_MC_TIME_CON2 0x0338
+#define T_HS_EXIT(x) FIELD_PREP(GENMASK(15, 8), x)
+#define T_CLK_TRAIL(x) FIELD_PREP(GENMASK(7, 0), x)
+#define DPHY_MC_TIME_CON3 0x033c
+#define T_CLK_POST(x) FIELD_PREP(GENMASK(7, 0), x)
+#define DPHY_MC_TIME_CON4 0x0340
+#define T_ULPS_EXIT(x) FIELD_PREP(GENMASK(9, 0), x)
+#define DPHY_MC_DESKEW_CON0 0x0350
+#define SKEW_CAL_RUN_TIME(x) FIELD_PREP(GENMASK(15, 12), x)
+
+#define SKEW_CAL_INIT_RUN_TIME(x) FIELD_PREP(GENMASK(11, 8), x)
+#define SKEW_CAL_INIT_WAIT_TIME(x) FIELD_PREP(GENMASK(7, 4), x)
+#define SKEW_CAL_EN BIT(0)
+
+#define COMBO_MD0_GNR_CON0 0x0400
+#define COMBO_MD0_GNR_CON1 0x0404
+#define COMBO_MD0_ANA_CON0 0x0408
+#define COMBO_MD0_ANA_CON1 0x040c
+#define COMBO_MD0_ANA_CON2 0x0410
+
+#define COMBO_MD0_TIME_CON0 0x0430
+#define COMBO_MD0_TIME_CON1 0x0434
+#define COMBO_MD0_TIME_CON2 0x0438
+#define COMBO_MD0_TIME_CON3 0x043c
+#define COMBO_MD0_TIME_CON4 0x0440
+#define COMBO_MD0_DATA_CON0 0x0444
+
+#define COMBO_MD1_GNR_CON0 0x0500
+#define COMBO_MD1_GNR_CON1 0x0504
+#define COMBO_MD1_ANA_CON0 0x0508
+#define COMBO_MD1_ANA_CON1 0x050c
+#define COMBO_MD1_ANA_CON2 0x0510
+#define COMBO_MD1_TIME_CON0 0x0530
+#define COMBO_MD1_TIME_CON1 0x0534
+#define COMBO_MD1_TIME_CON2 0x0538
+#define COMBO_MD1_TIME_CON3 0x053c
+#define COMBO_MD1_TIME_CON4 0x0540
+#define COMBO_MD1_DATA_CON0 0x0544
+
+#define COMBO_MD2_GNR_CON0 0x0600
+#define COMBO_MD2_GNR_CON1 0x0604
+#define COMBO_MD2_ANA_CON0 0X0608
+#define COMBO_MD2_ANA_CON1 0X060c
+#define COMBO_MD2_ANA_CON2 0X0610
+#define COMBO_MD2_TIME_CON0 0x0630
+#define COMBO_MD2_TIME_CON1 0x0634
+#define COMBO_MD2_TIME_CON2 0x0638
+#define COMBO_MD2_TIME_CON3 0x063c
+#define COMBO_MD2_TIME_CON4 0x0640
+#define COMBO_MD2_DATA_CON0 0x0644
+
+#define DPHY_MD3_GNR_CON0 0x0700
+#define DPHY_MD3_GNR_CON1 0x0704
+#define DPHY_MD3_ANA_CON0 0X0708
+#define DPHY_MD3_ANA_CON1 0X070c
+#define DPHY_MD3_ANA_CON2 0X0710
+#define DPHY_MD3_TIME_CON0 0x0730
+#define DPHY_MD3_TIME_CON1 0x0734
+#define DPHY_MD3_TIME_CON2 0x0738
+#define DPHY_MD3_TIME_CON3 0x073c
+#define DPHY_MD3_TIME_CON4 0x0740
+#define DPHY_MD3_DATA_CON0 0x0744
+
+#define T_LP_EXIT_SKEW(x) FIELD_PREP(GENMASK(3, 2), x)
+#define T_LP_ENTRY_SKEW(x) FIELD_PREP(GENMASK(1, 0), x)
+#define T_HS_ZERO(x) FIELD_PREP(GENMASK(15, 8), x)
+#define T_HS_PREPARE(x) FIELD_PREP(GENMASK(7, 0), x)
+#define T_HS_EXIT(x) FIELD_PREP(GENMASK(15, 8), x)
+#define T_HS_TRAIL(x) FIELD_PREP(GENMASK(7, 0), x)
+#define T_TA_GET(x) FIELD_PREP(GENMASK(7, 4), x)
+#define T_TA_GO(x) FIELD_PREP(GENMASK(3, 0), x)
+
+/* MIPI_CDPHY_GRF registers */
+#define MIPI_DCPHY_GRF_CON0 0x0000
+#define S_CPHY_MODE FIELD_PREP_HIWORD(BIT(3), 1)
+#define M_CPHY_MODE FIELD_PREP_HIWORD(BIT(0), 1)
+
+enum hs_drv_res_ohm {
+ STRENGTH_30_OHM = 0x8,
+ STRENGTH_31_2_OHM,
+ STRENGTH_32_5_OHM,
+ STRENGTH_34_OHM,
+ STRENGTH_35_5_OHM,
+ STRENGTH_37_OHM,
+ STRENGTH_39_OHM,
+ STRENGTH_41_OHM,
+ STRENGTH_43_OHM = 0x0,
+ STRENGTH_46_OHM,
+ STRENGTH_49_OHM,
+ STRENGTH_52_OHM,
+ STRENGTH_56_OHM,
+ STRENGTH_60_OHM,
+ STRENGTH_66_OHM,
+ STRENGTH_73_OHM,
+};
+
+struct hs_drv_res_cfg {
+ enum hs_drv_res_ohm clk_hs_drv_up_ohm;
+ enum hs_drv_res_ohm clk_hs_drv_down_ohm;
+ enum hs_drv_res_ohm data_hs_drv_up_ohm;
+ enum hs_drv_res_ohm data_hs_drv_down_ohm;
+};
+
+struct samsung_mipi_dcphy_plat_data {
+ const struct hs_drv_res_cfg *dphy_hs_drv_res_cfg;
+ u32 dphy_tx_max_lane_kbps;
+};
+
+struct samsung_mipi_dcphy {
+ struct device *dev;
+ struct clk *ref_clk;
+ struct clk *pclk;
+ struct regmap *regmap;
+ struct regmap *grf_regmap;
+ struct reset_control *m_phy_rst;
+ struct reset_control *s_phy_rst;
+ struct reset_control *apb_rst;
+ struct reset_control *grf_apb_rst;
+ unsigned int lanes;
+ struct phy *phy;
+ u8 type;
+
+ const struct samsung_mipi_dcphy_plat_data *pdata;
+ struct {
+ unsigned long long rate;
+ u8 prediv;
+ u16 fbdiv;
+ long dsm;
+ u8 scaler;
+
+ bool ssc_en;
+ u8 mfr;
+ u8 mrr;
+ } pll;
+};
+
+struct samsung_mipi_dphy_timing {
+ unsigned int max_lane_mbps;
+ u8 clk_prepare;
+ u8 clk_zero;
+ u8 clk_post;
+ u8 clk_trail_eot;
+ u8 hs_prepare;
+ u8 hs_zero;
+ u8 hs_trail_eot;
+ u8 lpx;
+ u8 hs_exit;
+ u8 hs_settle;
+};
+
+/*
+ * Timing values taken from rk3588 vendor kernel.
+ * Not documented in hw documentation.
+ */
+static const
+struct samsung_mipi_dphy_timing samsung_mipi_dphy_timing_table[] = {
+ {6500, 32, 117, 31, 28, 30, 56, 27, 24, 44, 37},
+ {6490, 32, 116, 31, 28, 30, 56, 27, 24, 44, 37},
+ {6480, 32, 116, 31, 28, 30, 56, 27, 24, 44, 37},
+ {6470, 32, 116, 31, 28, 30, 56, 27, 24, 44, 37},
+ {6460, 32, 116, 31, 28, 30, 56, 27, 24, 44, 37},
+ {6450, 32, 115, 31, 28, 30, 56, 27, 24, 44, 37},
+ {6440, 32, 115, 31, 28, 30, 56, 27, 24, 44, 37},
+ {6430, 31, 116, 31, 28, 30, 55, 27, 24, 44, 37},
+ {6420, 31, 116, 31, 28, 30, 55, 27, 24, 44, 37},
+ {6410, 31, 116, 31, 27, 30, 55, 27, 24, 44, 37},
+ {6400, 31, 115, 30, 27, 30, 55, 27, 23, 43, 36},
+ {6390, 31, 115, 30, 27, 30, 55, 27, 23, 43, 36},
+ {6380, 31, 115, 30, 27, 30, 55, 27, 23, 43, 36},
+ {6370, 31, 115, 30, 27, 30, 55, 26, 23, 43, 36},
+ {6360, 31, 114, 30, 27, 30, 54, 26, 23, 43, 36},
+ {6350, 31, 114, 30, 27, 30, 54, 26, 23, 43, 36},
+ {6340, 31, 114, 30, 27, 30, 54, 26, 23, 43, 36},
+ {6330, 31, 114, 30, 27, 30, 54, 26, 23, 43, 36},
+ {6320, 31, 113, 30, 27, 30, 54, 26, 23, 43, 36},
+ {6310, 31, 113, 30, 27, 30, 54, 26, 23, 43, 36},
+ {6300, 31, 113, 30, 27, 30, 54, 26, 23, 43, 36},
+ {6290, 31, 113, 30, 27, 29, 54, 26, 23, 43, 36},
+ {6280, 31, 112, 30, 27, 29, 54, 26, 23, 43, 36},
+ {6270, 31, 112, 30, 27, 29, 54, 26, 23, 43, 36},
+ {6260, 31, 112, 30, 27, 29, 54, 26, 23, 43, 36},
+ {6250, 31, 112, 30, 27, 29, 54, 26, 23, 42, 36},
+ {6240, 30, 113, 30, 27, 29, 54, 26, 23, 42, 36},
+ {6230, 30, 112, 30, 27, 29, 54, 26, 23, 42, 35},
+ {6220, 30, 112, 30, 27, 29, 53, 26, 23, 42, 35},
+ {6210, 30, 112, 30, 27, 29, 53, 26, 23, 42, 35},
+ {6200, 30, 112, 29, 27, 29, 53, 26, 23, 42, 35},
+ {6190, 30, 111, 29, 27, 29, 53, 26, 23, 42, 35},
+ {6180, 30, 111, 29, 27, 29, 53, 26, 23, 42, 35},
+ {6170, 30, 111, 29, 26, 29, 53, 26, 23, 42, 35},
+ {6160, 30, 111, 29, 26, 29, 53, 26, 23, 42, 35},
+ {6150, 30, 110, 29, 26, 29, 53, 26, 23, 42, 35},
+ {6140, 30, 110, 29, 26, 29, 52, 26, 23, 42, 35},
+ {6130, 30, 110, 29, 26, 29, 52, 25, 22, 42, 35},
+ {6120, 30, 110, 29, 26, 29, 52, 25, 22, 42, 35},
+ {6110, 30, 110, 29, 26, 29, 52, 25, 22, 42, 35},
+ {6100, 30, 109, 29, 26, 29, 52, 25, 22, 41, 35},
+ {6090, 30, 109, 29, 26, 29, 52, 25, 22, 41, 35},
+ {6080, 30, 109, 29, 26, 28, 53, 25, 22, 41, 35},
+ {6070, 30, 109, 29, 26, 28, 52, 25, 22, 41, 34},
+ {6060, 30, 108, 29, 26, 28, 52, 25, 22, 41, 34},
+ {6050, 30, 108, 29, 26, 28, 52, 25, 22, 41, 34},
+ {6040, 29, 109, 29, 26, 28, 52, 25, 22, 41, 34},
+ {6030, 29, 109, 29, 26, 28, 52, 25, 22, 41, 34},
+ {6020, 29, 108, 29, 26, 28, 52, 25, 22, 41, 34},
+ {6010, 29, 108, 29, 26, 28, 52, 25, 22, 41, 34},
+ {6000, 29, 108, 28, 26, 28, 51, 25, 22, 41, 34},
+ {5990, 29, 108, 28, 26, 28, 51, 25, 22, 41, 34},
+ {5980, 29, 107, 28, 26, 28, 51, 25, 22, 41, 34},
+ {5970, 29, 107, 28, 26, 28, 51, 25, 22, 41, 34},
+ {5960, 29, 107, 28, 26, 28, 51, 25, 22, 40, 34},
+ {5950, 29, 107, 28, 26, 28, 51, 25, 22, 40, 34},
+ {5940, 29, 107, 28, 25, 28, 51, 25, 22, 40, 34},
+ {5930, 29, 106, 28, 25, 28, 50, 25, 22, 40, 34},
+ {5920, 29, 106, 28, 25, 28, 50, 25, 22, 40, 34},
+ {5910, 29, 106, 28, 25, 28, 50, 25, 22, 40, 34},
+ {5900, 29, 106, 28, 25, 28, 50, 24, 22, 40, 33},
+ {5890, 29, 105, 28, 25, 28, 50, 24, 22, 40, 33},
+ {5880, 29, 105, 28, 25, 28, 50, 24, 22, 40, 33},
+ {5870, 29, 105, 28, 25, 27, 51, 24, 22, 40, 33},
+ {5860, 29, 105, 28, 25, 27, 51, 24, 21, 40, 33},
+ {5850, 29, 104, 28, 25, 27, 50, 24, 21, 40, 33},
+ {5840, 28, 105, 28, 25, 27, 50, 24, 21, 40, 33},
+ {5830, 28, 105, 28, 25, 27, 50, 24, 21, 40, 33},
+ {5820, 28, 105, 28, 25, 27, 50, 24, 21, 40, 33},
+ {5810, 28, 104, 28, 25, 27, 50, 24, 21, 39, 33},
+ {5800, 28, 104, 27, 25, 27, 50, 24, 21, 39, 33},
+ {5790, 28, 104, 27, 25, 27, 50, 24, 21, 39, 33},
+ {5780, 28, 104, 27, 25, 27, 49, 24, 21, 39, 33},
+ {5770, 28, 104, 27, 25, 27, 49, 24, 21, 39, 33},
+ {5760, 28, 103, 27, 25, 27, 49, 24, 21, 39, 33},
+ {5750, 28, 103, 27, 25, 27, 49, 24, 21, 39, 33},
+ {5740, 28, 103, 27, 25, 27, 49, 24, 21, 39, 33},
+ {5730, 28, 103, 27, 25, 27, 49, 24, 21, 39, 32},
+ {5720, 28, 102, 27, 25, 27, 49, 24, 21, 39, 32},
+ {5710, 28, 102, 27, 25, 27, 48, 24, 21, 39, 32},
+ {5700, 28, 102, 27, 24, 27, 48, 24, 21, 39, 32},
+ {5690, 28, 102, 27, 24, 27, 48, 24, 21, 39, 32},
+ {5680, 28, 101, 27, 24, 27, 48, 24, 21, 39, 32},
+ {5670, 28, 101, 27, 24, 27, 48, 23, 21, 38, 32},
+ {5660, 28, 101, 27, 24, 26, 49, 23, 21, 38, 32},
+ {5650, 28, 101, 27, 24, 26, 49, 23, 21, 38, 32},
+ {5640, 27, 101, 27, 24, 26, 48, 23, 21, 38, 32},
+ {5630, 27, 101, 27, 24, 26, 48, 23, 21, 38, 32},
+ {5620, 27, 101, 27, 24, 26, 48, 23, 21, 38, 32},
+ {5610, 27, 101, 27, 24, 26, 48, 23, 21, 38, 32},
+ {5600, 27, 101, 26, 24, 26, 48, 23, 20, 38, 32},
+ {5590, 27, 100, 26, 24, 26, 48, 23, 20, 38, 32},
+ {5580, 27, 100, 26, 24, 26, 48, 23, 20, 38, 32},
+ {5570, 27, 100, 26, 24, 26, 48, 23, 20, 38, 31},
+ {5560, 27, 100, 26, 24, 26, 47, 23, 20, 38, 31},
+ {5550, 27, 99, 26, 24, 26, 47, 23, 20, 38, 31},
+ {5540, 27, 99, 26, 24, 26, 47, 23, 20, 38, 31},
+ {5530, 27, 99, 26, 24, 26, 47, 23, 20, 38, 31},
+ {5520, 27, 99, 26, 24, 26, 47, 23, 20, 37, 31},
+ {5510, 27, 98, 26, 24, 26, 47, 23, 20, 37, 31},
+ {5500, 27, 98, 26, 24, 26, 47, 23, 20, 37, 31},
+ {5490, 27, 98, 26, 24, 26, 46, 23, 20, 37, 31},
+ {5480, 27, 98, 26, 24, 26, 46, 23, 20, 37, 31},
+ {5470, 27, 97, 26, 23, 26, 46, 23, 20, 37, 31},
+ {5460, 27, 97, 26, 23, 26, 46, 23, 20, 37, 31},
+ {5450, 27, 97, 26, 23, 25, 47, 23, 20, 37, 31},
+ {5440, 26, 98, 26, 23, 25, 47, 23, 20, 37, 31},
+ {5430, 26, 98, 26, 23, 25, 47, 22, 20, 37, 31},
+ {5420, 26, 97, 26, 23, 25, 46, 22, 20, 37, 31},
+ {5410, 26, 97, 26, 23, 25, 46, 22, 20, 37, 31},
+ {5400, 26, 97, 25, 23, 25, 46, 22, 20, 37, 30},
+ {5390, 26, 97, 25, 23, 25, 46, 22, 20, 37, 30},
+ {5380, 26, 96, 25, 23, 25, 46, 22, 20, 36, 30},
+ {5370, 26, 96, 25, 23, 25, 46, 22, 20, 36, 30},
+ {5360, 26, 96, 25, 23, 25, 46, 22, 20, 36, 30},
+ {5350, 26, 96, 25, 23, 25, 46, 22, 20, 36, 30},
+ {5340, 26, 95, 25, 23, 25, 45, 22, 20, 36, 30},
+ {5330, 26, 95, 25, 23, 25, 45, 22, 19, 36, 30},
+ {5320, 26, 95, 25, 23, 25, 45, 22, 19, 36, 30},
+ {5310, 26, 95, 25, 23, 25, 45, 22, 19, 36, 30},
+ {5300, 26, 95, 25, 23, 25, 45, 22, 19, 36, 30},
+ {5290, 26, 94, 25, 23, 25, 45, 22, 19, 36, 30},
+ {5280, 26, 94, 25, 23, 25, 45, 22, 19, 36, 30},
+ {5270, 26, 94, 25, 23, 25, 44, 22, 19, 36, 30},
+ {5260, 26, 94, 25, 23, 25, 44, 22, 19, 36, 30},
+ {5250, 25, 94, 25, 23, 24, 45, 22, 19, 36, 30},
+ {5240, 25, 94, 25, 23, 24, 45, 22, 19, 36, 29},
+ {5230, 25, 94, 25, 22, 24, 45, 22, 19, 35, 29},
+ {5220, 25, 94, 25, 22, 24, 45, 22, 19, 35, 29},
+ {5210, 25, 93, 25, 22, 24, 45, 22, 19, 35, 29},
+ {5200, 25, 93, 24, 22, 24, 44, 21, 19, 35, 29},
+ {5190, 25, 93, 24, 22, 24, 44, 21, 19, 35, 29},
+ {5180, 25, 93, 24, 22, 24, 44, 21, 19, 35, 29},
+ {5170, 25, 92, 24, 22, 24, 44, 21, 19, 35, 29},
+ {5160, 25, 92, 24, 22, 24, 44, 21, 19, 35, 29},
+ {5150, 25, 92, 24, 22, 24, 44, 21, 19, 35, 29},
+ {5140, 25, 92, 24, 22, 24, 44, 21, 19, 35, 29},
+ {5130, 25, 92, 24, 22, 24, 43, 21, 19, 35, 29},
+ {5120, 25, 91, 24, 22, 24, 43, 21, 19, 35, 29},
+ {5110, 25, 91, 24, 22, 24, 43, 21, 19, 35, 29},
+ {5100, 25, 91, 24, 22, 24, 43, 21, 19, 35, 29},
+ {5090, 25, 91, 24, 22, 24, 43, 21, 19, 34, 29},
+ {5080, 25, 90, 24, 22, 24, 43, 21, 19, 34, 29},
+ {5070, 25, 90, 24, 22, 24, 43, 21, 19, 34, 28},
+ {5060, 25, 90, 24, 22, 24, 43, 21, 18, 34, 28},
+ {5050, 24, 91, 24, 22, 24, 42, 21, 18, 34, 28},
+ {5040, 24, 90, 24, 22, 23, 43, 21, 18, 34, 28},
+ {5030, 24, 90, 24, 22, 23, 43, 21, 18, 34, 28},
+ {5020, 24, 90, 24, 22, 23, 43, 21, 18, 34, 28},
+ {5010, 24, 90, 24, 22, 23, 43, 21, 18, 34, 28},
+ {5000, 24, 89, 23, 21, 23, 43, 21, 18, 34, 28},
+ {4990, 24, 89, 23, 21, 23, 43, 21, 18, 34, 28},
+ {4980, 24, 89, 23, 21, 23, 42, 21, 18, 34, 28},
+ {4970, 24, 89, 23, 21, 23, 42, 21, 18, 34, 28},
+ {4960, 24, 89, 23, 21, 23, 42, 20, 18, 34, 28},
+ {4950, 24, 88, 23, 21, 23, 42, 20, 18, 34, 28},
+ {4940, 24, 88, 23, 21, 23, 42, 20, 18, 33, 28},
+ {4930, 24, 88, 23, 21, 23, 42, 20, 18, 33, 28},
+ {4920, 24, 88, 23, 21, 23, 42, 20, 18, 33, 28},
+ {4910, 24, 87, 23, 21, 23, 41, 20, 18, 33, 28},
+ {4900, 24, 87, 23, 21, 23, 41, 20, 18, 33, 27},
+ {4890, 24, 87, 23, 21, 23, 41, 20, 18, 33, 27},
+ {4880, 24, 87, 23, 21, 23, 41, 20, 18, 33, 27},
+ {4870, 24, 86, 23, 21, 23, 41, 20, 18, 33, 27},
+ {4860, 24, 86, 23, 21, 23, 41, 20, 18, 33, 27},
+ {4850, 23, 87, 23, 21, 23, 41, 20, 18, 33, 27},
+ {4840, 23, 87, 23, 21, 23, 40, 20, 18, 33, 27},
+ {4830, 23, 86, 23, 21, 22, 41, 20, 18, 33, 27},
+ {4820, 23, 86, 23, 21, 22, 41, 20, 18, 33, 27},
+ {4810, 23, 86, 23, 21, 22, 41, 20, 18, 33, 27},
+ {4800, 23, 86, 22, 21, 22, 41, 20, 17, 32, 27},
+ {4790, 23, 86, 22, 21, 22, 41, 20, 17, 32, 27},
+ {4780, 23, 85, 22, 21, 22, 41, 20, 17, 32, 27},
+ {4770, 23, 85, 22, 21, 22, 41, 20, 17, 32, 27},
+ {4760, 23, 85, 22, 20, 22, 40, 20, 17, 32, 27},
+ {4750, 23, 85, 22, 20, 22, 40, 20, 17, 32, 27},
+ {4740, 23, 84, 22, 20, 22, 40, 20, 17, 32, 26},
+ {4730, 23, 84, 22, 20, 22, 40, 19, 17, 32, 26},
+ {4720, 23, 84, 22, 20, 22, 40, 19, 17, 32, 26},
+ {4710, 23, 84, 22, 20, 22, 40, 19, 17, 32, 26},
+ {4700, 23, 83, 22, 20, 22, 40, 19, 17, 32, 26},
+ {4690, 23, 83, 22, 20, 22, 39, 19, 17, 32, 26},
+ {4680, 23, 83, 22, 20, 22, 39, 19, 17, 32, 26},
+ {4670, 23, 83, 22, 20, 22, 39, 19, 17, 32, 26},
+ {4660, 23, 82, 22, 20, 22, 39, 19, 17, 32, 26},
+ {4650, 22, 83, 22, 20, 22, 39, 19, 17, 31, 26},
+ {4640, 22, 83, 22, 20, 22, 39, 19, 17, 31, 26},
+ {4630, 22, 83, 22, 20, 22, 39, 19, 17, 31, 26},
+ {4620, 22, 83, 22, 20, 21, 39, 19, 17, 31, 26},
+ {4610, 22, 82, 22, 20, 21, 39, 19, 17, 31, 26},
+ {4600, 22, 82, 21, 20, 21, 39, 19, 17, 31, 26},
+ {4590, 22, 82, 21, 20, 21, 39, 19, 17, 31, 26},
+ {4580, 22, 82, 21, 20, 21, 39, 19, 17, 31, 26},
+ {4570, 22, 81, 21, 20, 21, 39, 19, 17, 31, 25},
+ {4560, 22, 81, 21, 20, 21, 39, 19, 17, 31, 25},
+ {4550, 22, 81, 21, 20, 21, 38, 19, 17, 31, 25},
+ {4540, 22, 81, 21, 20, 21, 38, 19, 17, 31, 25},
+ {4530, 22, 80, 21, 19, 21, 38, 19, 16, 31, 25},
+ {4520, 22, 80, 21, 19, 21, 38, 19, 16, 31, 25},
+ {4510, 22, 80, 21, 19, 21, 38, 19, 16, 31, 25},
+ {4500, 22, 80, 21, 19, 21, 38, 19, 16, 30, 25},
+ {4490, 22, 80, 21, 19, 21, 38, 18, 16, 30, 25},
+ {4480, 22, 79, 21, 19, 21, 38, 18, 16, 30, 25},
+ {4470, 22, 79, 21, 19, 21, 37, 18, 16, 30, 25},
+ {4460, 22, 79, 21, 19, 21, 37, 18, 16, 30, 25},
+ {4450, 21, 80, 21, 19, 21, 37, 18, 16, 30, 25},
+ {4440, 21, 79, 21, 19, 21, 37, 18, 16, 30, 25},
+ {4430, 21, 79, 21, 19, 21, 37, 18, 16, 30, 25},
+ {4420, 21, 79, 21, 19, 21, 37, 18, 16, 30, 25},
+ {4410, 21, 79, 21, 19, 20, 38, 18, 16, 30, 25},
+ {4400, 21, 78, 20, 19, 20, 37, 18, 16, 30, 24},
+ {4390, 21, 78, 20, 19, 20, 37, 18, 16, 30, 24},
+ {4380, 21, 78, 20, 19, 20, 37, 18, 16, 30, 24},
+ {4370, 21, 78, 20, 19, 20, 37, 18, 16, 30, 24},
+ {4360, 21, 77, 20, 19, 20, 37, 18, 16, 29, 24},
+ {4350, 21, 77, 20, 19, 20, 37, 18, 16, 29, 24},
+ {4340, 21, 77, 20, 19, 20, 37, 18, 16, 29, 24},
+ {4330, 21, 77, 20, 19, 20, 36, 18, 16, 29, 24},
+ {4320, 21, 77, 20, 19, 20, 36, 18, 16, 29, 24},
+ {4310, 21, 76, 20, 19, 20, 36, 18, 16, 29, 24},
+ {4300, 21, 76, 20, 18, 20, 36, 18, 16, 29, 24},
+ {4290, 21, 76, 20, 18, 20, 36, 18, 16, 29, 24},
+ {4280, 21, 76, 20, 18, 20, 36, 18, 16, 29, 24},
+ {4270, 21, 75, 20, 18, 20, 36, 18, 16, 29, 24},
+ {4260, 21, 75, 20, 18, 20, 35, 17, 15, 29, 24},
+ {4250, 20, 76, 20, 18, 20, 35, 17, 15, 29, 24},
+ {4240, 20, 76, 20, 18, 20, 35, 17, 15, 29, 23},
+ {4230, 20, 75, 20, 18, 20, 35, 17, 15, 29, 23},
+ {4220, 20, 75, 20, 18, 20, 35, 17, 15, 29, 23},
+ {4210, 20, 75, 20, 18, 20, 35, 17, 15, 28, 23},
+ {4200, 20, 75, 19, 18, 19, 36, 17, 15, 28, 23},
+ {4190, 20, 74, 19, 18, 19, 36, 17, 15, 28, 23},
+ {4180, 20, 74, 19, 18, 19, 35, 17, 15, 28, 23},
+ {4170, 20, 74, 19, 18, 19, 35, 17, 15, 28, 23},
+ {4160, 20, 74, 19, 18, 19, 35, 17, 15, 28, 23},
+ {4150, 20, 74, 19, 18, 19, 35, 17, 15, 28, 23},
+ {4140, 20, 73, 19, 18, 19, 35, 17, 15, 28, 23},
+ {4130, 20, 73, 19, 18, 19, 35, 17, 15, 28, 23},
+ {4120, 20, 73, 19, 18, 19, 35, 17, 15, 28, 23},
+ {4110, 20, 73, 19, 18, 19, 34, 17, 15, 28, 23},
+ {4100, 20, 72, 19, 18, 19, 34, 17, 15, 28, 23},
+ {4090, 20, 72, 19, 18, 19, 34, 17, 15, 28, 23},
+ {4080, 20, 72, 19, 18, 19, 34, 17, 15, 28, 23},
+ {4070, 20, 72, 19, 18, 19, 34, 17, 15, 27, 22},
+ {4060, 19, 72, 19, 17, 19, 34, 17, 15, 27, 22},
+ {4050, 19, 72, 19, 17, 19, 34, 17, 15, 27, 22},
+ {4040, 19, 72, 19, 17, 19, 33, 17, 15, 27, 22},
+ {4030, 19, 72, 19, 17, 19, 33, 17, 15, 27, 22},
+ {4020, 19, 71, 19, 17, 19, 33, 16, 15, 27, 22},
+ {4010, 19, 71, 19, 17, 19, 33, 16, 15, 27, 22},
+ {4000, 19, 71, 18, 17, 19, 33, 16, 14, 27, 22},
+ {3990, 19, 71, 18, 17, 18, 34, 16, 14, 27, 22},
+ {3980, 19, 71, 18, 17, 18, 34, 16, 14, 27, 22},
+ {3970, 19, 70, 18, 17, 18, 33, 16, 14, 27, 22},
+ {3960, 19, 70, 18, 17, 18, 33, 16, 14, 27, 22},
+ {3950, 19, 70, 18, 17, 18, 33, 16, 14, 27, 22},
+ {3940, 19, 70, 18, 17, 18, 33, 16, 14, 27, 22},
+ {3930, 19, 69, 18, 17, 18, 33, 16, 14, 27, 22},
+ {3920, 19, 69, 18, 17, 18, 33, 16, 14, 26, 22},
+ {3910, 19, 69, 18, 17, 18, 33, 16, 14, 26, 22},
+ {3900, 19, 69, 18, 17, 18, 33, 16, 14, 26, 21},
+ {3890, 19, 68, 18, 17, 18, 32, 16, 14, 26, 21},
+ {3880, 19, 68, 18, 17, 18, 32, 16, 14, 26, 21},
+ {3870, 19, 68, 18, 17, 18, 32, 16, 14, 26, 21},
+ {3860, 18, 69, 18, 17, 18, 32, 16, 14, 26, 21},
+ {3850, 18, 68, 18, 17, 18, 32, 16, 14, 26, 21},
+ {3840, 18, 68, 18, 17, 18, 32, 16, 14, 26, 21},
+ {3830, 18, 68, 18, 16, 18, 32, 16, 14, 26, 21},
+ {3820, 18, 68, 18, 16, 18, 31, 16, 14, 26, 21},
+ {3810, 18, 68, 18, 16, 18, 31, 16, 14, 26, 21},
+ {3800, 18, 67, 17, 16, 18, 31, 16, 14, 26, 21},
+ {3790, 18, 67, 17, 16, 17, 32, 15, 14, 26, 21},
+ {3780, 18, 67, 17, 16, 17, 32, 15, 14, 25, 21},
+ {3770, 18, 67, 17, 16, 17, 32, 15, 14, 25, 21},
+ {3760, 18, 66, 17, 16, 17, 32, 15, 14, 25, 21},
+ {3750, 18, 66, 17, 16, 17, 31, 15, 14, 25, 21},
+ {3740, 18, 66, 17, 16, 17, 31, 15, 14, 25, 20},
+ {3730, 18, 66, 17, 16, 17, 31, 15, 13, 25, 20},
+ {3720, 18, 65, 17, 16, 17, 31, 15, 13, 25, 20},
+ {3710, 18, 65, 17, 16, 17, 31, 15, 13, 25, 20},
+ {3700, 18, 65, 17, 16, 17, 31, 15, 13, 25, 20},
+ {3690, 18, 65, 17, 16, 17, 31, 15, 13, 25, 20},
+ {3680, 18, 64, 17, 16, 17, 31, 15, 13, 25, 20},
+ {3670, 18, 64, 17, 16, 17, 30, 15, 13, 25, 20},
+ {3660, 17, 65, 17, 16, 17, 30, 15, 13, 25, 20},
+ {3650, 17, 65, 17, 16, 17, 30, 15, 13, 25, 20},
+ {3640, 17, 65, 17, 16, 17, 30, 15, 13, 25, 20},
+ {3630, 17, 64, 17, 16, 17, 30, 15, 13, 24, 20},
+ {3620, 17, 64, 17, 16, 17, 30, 15, 13, 24, 20},
+ {3610, 17, 64, 17, 16, 17, 30, 15, 13, 24, 20},
+ {3600, 17, 64, 16, 16, 17, 29, 15, 13, 24, 20},
+ {3590, 17, 63, 16, 15, 17, 29, 15, 13, 24, 20},
+ {3580, 17, 63, 16, 15, 16, 30, 15, 13, 24, 20},
+ {3570, 17, 63, 16, 15, 16, 30, 15, 13, 24, 19},
+ {3560, 17, 63, 16, 15, 16, 30, 14, 13, 24, 19},
+ {3550, 17, 62, 16, 15, 16, 30, 14, 13, 24, 19},
+ {3540, 17, 62, 16, 15, 16, 30, 14, 13, 24, 19},
+ {3530, 17, 62, 16, 15, 16, 29, 14, 13, 24, 19},
+ {3520, 17, 62, 16, 15, 16, 29, 14, 13, 24, 19},
+ {3510, 17, 62, 16, 15, 16, 29, 14, 13, 24, 19},
+ {3500, 17, 61, 16, 15, 16, 29, 14, 13, 24, 19},
+ {3490, 17, 61, 16, 15, 16, 29, 14, 13, 23, 19},
+ {3480, 17, 61, 16, 15, 16, 29, 14, 13, 23, 19},
+ {3470, 17, 61, 16, 15, 16, 29, 14, 13, 23, 19},
+ {3460, 16, 61, 16, 15, 16, 28, 14, 12, 23, 19},
+ {3450, 16, 61, 16, 15, 16, 28, 14, 12, 23, 19},
+ {3440, 16, 61, 16, 15, 16, 28, 14, 12, 23, 19},
+ {3430, 16, 61, 16, 15, 16, 28, 14, 12, 23, 19},
+ {3420, 16, 60, 16, 15, 16, 28, 14, 12, 23, 19},
+ {3410, 16, 60, 16, 15, 16, 28, 14, 12, 23, 18},
+ {3400, 16, 60, 15, 15, 16, 28, 14, 12, 23, 18},
+ {3390, 16, 60, 15, 15, 16, 28, 14, 12, 23, 18},
+ {3380, 16, 59, 15, 15, 16, 27, 14, 12, 23, 18},
+ {3370, 16, 59, 15, 15, 15, 28, 14, 12, 23, 18},
+ {3360, 16, 59, 15, 14, 15, 28, 14, 12, 23, 18},
+ {3350, 16, 59, 15, 14, 15, 28, 14, 12, 23, 18},
+ {3340, 16, 59, 15, 14, 15, 28, 14, 12, 22, 18},
+ {3330, 16, 58, 15, 14, 15, 28, 14, 12, 22, 18},
+ {3320, 16, 58, 15, 14, 15, 28, 13, 12, 22, 18},
+ {3310, 16, 58, 15, 14, 15, 27, 13, 12, 22, 18},
+ {3300, 16, 58, 15, 14, 15, 27, 13, 12, 22, 18},
+ {3290, 16, 57, 15, 14, 15, 27, 13, 12, 22, 18},
+ {3280, 16, 57, 15, 14, 15, 27, 13, 12, 22, 18},
+ {3270, 16, 57, 15, 14, 15, 27, 13, 12, 22, 18},
+ {3260, 15, 58, 15, 14, 15, 27, 13, 12, 22, 18},
+ {3250, 15, 57, 15, 14, 15, 27, 13, 12, 22, 18},
+ {3240, 15, 57, 15, 14, 15, 26, 13, 12, 22, 17},
+ {3230, 15, 57, 15, 14, 15, 26, 13, 12, 22, 17},
+ {3220, 15, 57, 15, 14, 15, 26, 13, 12, 22, 17},
+ {3210, 15, 56, 15, 14, 15, 26, 13, 12, 22, 17},
+ {3200, 15, 56, 14, 14, 15, 26, 13, 11, 21, 17},
+ {3190, 15, 56, 14, 14, 15, 26, 13, 11, 21, 17},
+ {3180, 15, 56, 14, 14, 15, 26, 13, 11, 21, 17},
+ {3170, 15, 56, 14, 14, 15, 25, 13, 11, 21, 17},
+ {3160, 15, 55, 14, 14, 14, 26, 13, 11, 21, 17},
+ {3150, 15, 55, 14, 14, 14, 26, 13, 11, 21, 17},
+ {3140, 15, 55, 14, 14, 14, 26, 13, 11, 21, 17},
+ {3130, 15, 55, 14, 14, 14, 26, 13, 11, 21, 17},
+ {3120, 15, 54, 14, 13, 14, 26, 13, 11, 21, 17},
+ {3110, 15, 54, 14, 13, 14, 26, 13, 11, 21, 17},
+ {3100, 15, 54, 14, 13, 14, 26, 13, 11, 21, 17},
+ {3090, 15, 54, 14, 13, 14, 25, 12, 11, 21, 17},
+ {3080, 15, 53, 14, 13, 14, 25, 12, 11, 21, 17},
+ {3070, 14, 54, 14, 13, 14, 25, 12, 11, 21, 16},
+ {3060, 14, 54, 14, 13, 14, 25, 12, 11, 21, 16},
+ {3050, 14, 54, 14, 13, 14, 25, 12, 11, 20, 16},
+ {3040, 14, 53, 14, 13, 14, 25, 12, 11, 20, 16},
+ {3030, 14, 53, 14, 13, 14, 25, 12, 11, 20, 16},
+ {3020, 14, 53, 14, 13, 14, 24, 12, 11, 20, 16},
+ {3010, 14, 53, 14, 13, 14, 24, 12, 11, 20, 16},
+ {3000, 14, 53, 13, 13, 14, 24, 12, 11, 20, 16},
+ {2990, 14, 52, 13, 13, 14, 24, 12, 11, 20, 16},
+ {2980, 14, 52, 13, 13, 14, 24, 12, 11, 20, 16},
+ {2970, 14, 52, 13, 13, 14, 24, 12, 11, 20, 16},
+ {2960, 14, 52, 13, 13, 14, 24, 12, 11, 20, 16},
+ {2950, 14, 51, 13, 13, 13, 24, 12, 11, 20, 16},
+ {2940, 14, 51, 13, 13, 13, 24, 12, 11, 20, 16},
+ {2930, 14, 51, 13, 13, 13, 24, 12, 10, 20, 16},
+ {2920, 14, 51, 13, 13, 13, 24, 12, 10, 20, 16},
+ {2910, 14, 50, 13, 13, 13, 24, 12, 10, 20, 15},
+ {2900, 14, 50, 13, 13, 13, 24, 12, 10, 19, 15},
+ {2890, 14, 50, 13, 12, 13, 24, 12, 10, 19, 15},
+ {2880, 14, 50, 13, 12, 13, 23, 12, 10, 19, 15},
+ {2870, 13, 50, 13, 12, 13, 23, 12, 10, 19, 15},
+ {2860, 13, 50, 13, 12, 13, 23, 12, 10, 19, 15},
+ {2850, 13, 50, 13, 12, 13, 23, 11, 10, 19, 15},
+ {2840, 13, 50, 13, 12, 13, 23, 11, 10, 19, 15},
+ {2830, 13, 50, 13, 12, 13, 23, 11, 10, 19, 15},
+ {2820, 13, 49, 13, 12, 13, 23, 11, 10, 19, 15},
+ {2810, 13, 49, 13, 12, 13, 23, 11, 10, 19, 15},
+ {2800, 13, 49, 12, 12, 13, 22, 11, 10, 19, 15},
+ {2790, 13, 49, 12, 12, 13, 22, 11, 10, 19, 15},
+ {2780, 13, 48, 12, 12, 13, 22, 11, 10, 19, 15},
+ {2770, 13, 48, 12, 12, 13, 22, 11, 10, 19, 15},
+ {2760, 13, 48, 12, 12, 13, 22, 11, 10, 18, 15},
+ {2750, 13, 48, 12, 12, 13, 22, 11, 10, 18, 15},
+ {2740, 13, 47, 12, 12, 12, 23, 11, 10, 18, 14},
+ {2730, 13, 47, 12, 12, 12, 22, 11, 10, 18, 14},
+ {2720, 13, 47, 12, 12, 12, 22, 11, 10, 18, 14},
+ {2710, 13, 47, 12, 12, 12, 22, 11, 10, 18, 14},
+ {2700, 13, 47, 12, 12, 12, 22, 11, 10, 18, 14},
+ {2690, 13, 46, 12, 12, 12, 22, 11, 10, 18, 14},
+ {2680, 13, 46, 12, 12, 12, 22, 11, 10, 18, 14},
+ {2670, 12, 47, 12, 12, 12, 22, 11, 10, 18, 14},
+ {2660, 12, 47, 12, 12, 12, 21, 11, 9, 18, 14},
+ {2650, 12, 46, 12, 11, 12, 21, 11, 9, 18, 14},
+ {2640, 12, 46, 12, 11, 12, 21, 11, 9, 18, 14},
+ {2630, 12, 46, 12, 11, 12, 21, 11, 9, 18, 14},
+ {2620, 12, 46, 12, 11, 12, 21, 10, 9, 18, 14},
+ {2610, 12, 45, 12, 11, 12, 21, 10, 9, 17, 14},
+ {2600, 12, 45, 11, 11, 12, 21, 10, 9, 17, 14},
+ {2590, 12, 45, 11, 11, 12, 20, 10, 9, 17, 14},
+ {2580, 12, 45, 11, 11, 12, 20, 10, 9, 17, 14},
+ {2570, 12, 44, 11, 11, 12, 20, 10, 9, 17, 13},
+ {2560, 12, 44, 11, 11, 12, 20, 10, 9, 17, 13},
+ {2550, 12, 44, 11, 11, 12, 20, 10, 9, 17, 13},
+ {2540, 12, 44, 11, 11, 11, 21, 10, 9, 17, 13},
+ {2530, 12, 44, 11, 11, 11, 21, 10, 9, 17, 13},
+ {2520, 12, 43, 11, 11, 11, 21, 10, 9, 17, 13},
+ {2510, 12, 43, 11, 11, 11, 20, 10, 9, 17, 13},
+ {2500, 12, 43, 11, 11, 11, 20, 10, 9, 17, 13},
+ {2490, 12, 43, 11, 11, 11, 20, 10, 9, 17, 13},
+ {2480, 12, 42, 11, 11, 11, 20, 10, 9, 17, 13},
+ {2470, 11, 43, 11, 11, 11, 20, 10, 9, 16, 13},
+ {2460, 11, 43, 11, 11, 11, 20, 10, 9, 16, 13},
+ {2450, 11, 43, 11, 11, 11, 20, 10, 9, 16, 13},
+ {2440, 11, 42, 11, 11, 11, 19, 10, 9, 16, 13},
+ {2430, 11, 42, 11, 11, 11, 19, 10, 9, 16, 13},
+ {2420, 11, 42, 11, 10, 11, 19, 10, 9, 16, 13},
+ {2410, 11, 42, 11, 10, 11, 19, 10, 9, 16, 12},
+ {2400, 11, 41, 10, 10, 11, 19, 10, 8, 16, 12},
+ {2390, 11, 41, 10, 10, 11, 19, 10, 8, 16, 12},
+ {2380, 11, 41, 10, 10, 11, 19, 9, 8, 16, 12},
+ {2370, 11, 41, 10, 10, 11, 18, 9, 8, 16, 12},
+ {2360, 11, 41, 10, 10, 11, 18, 9, 8, 16, 12},
+ {2350, 11, 40, 10, 10, 11, 18, 9, 8, 16, 12},
+ {2340, 11, 40, 10, 10, 11, 18, 9, 8, 16, 12},
+ {2330, 11, 40, 10, 10, 10, 19, 9, 8, 16, 12},
+ {2320, 11, 40, 10, 10, 10, 19, 9, 8, 15, 12},
+ {2310, 11, 39, 10, 10, 10, 19, 9, 8, 15, 12},
+ {2300, 11, 39, 10, 10, 10, 18, 9, 8, 15, 12},
+ {2290, 11, 39, 10, 10, 10, 18, 9, 8, 15, 12},
+ {2280, 11, 39, 10, 10, 10, 18, 9, 8, 15, 12},
+ {2270, 10, 39, 10, 10, 10, 18, 9, 8, 15, 12},
+ {2260, 10, 39, 10, 10, 10, 18, 9, 8, 15, 12},
+ {2250, 10, 39, 10, 10, 10, 18, 9, 8, 15, 12},
+ {2240, 10, 39, 10, 10, 10, 18, 9, 8, 15, 11},
+ {2230, 10, 38, 10, 10, 10, 18, 9, 8, 15, 11},
+ {2220, 10, 38, 10, 10, 10, 17, 9, 8, 15, 11},
+ {2210, 10, 38, 10, 10, 10, 17, 9, 8, 15, 11},
+ {2200, 10, 38, 9, 10, 10, 17, 9, 8, 15, 11},
+ {2190, 10, 38, 9, 9, 10, 17, 9, 8, 15, 11},
+ {2180, 10, 37, 9, 9, 10, 17, 9, 8, 14, 11},
+ {2170, 10, 37, 9, 9, 10, 17, 9, 8, 14, 11},
+ {2160, 10, 37, 9, 9, 10, 17, 9, 8, 14, 11},
+ {2150, 10, 37, 9, 9, 10, 16, 8, 8, 14, 11},
+ {2140, 10, 36, 9, 9, 10, 16, 8, 8, 14, 11},
+ {2130, 10, 36, 9, 9, 10, 16, 8, 7, 14, 11},
+ {2120, 10, 36, 9, 9, 9, 17, 8, 7, 14, 11},
+ {2110, 10, 36, 9, 9, 9, 17, 8, 7, 14, 11},
+ {2100, 10, 35, 9, 9, 9, 17, 8, 7, 14, 11},
+ {2090, 10, 35, 9, 9, 9, 17, 8, 7, 14, 11},
+ {2080, 9, 36, 9, 9, 9, 16, 8, 7, 14, 11},
+ {2070, 9, 36, 9, 9, 9, 16, 8, 7, 14, 10},
+ {2060, 9, 35, 9, 9, 9, 16, 8, 7, 14, 10},
+ {2050, 9, 35, 9, 9, 9, 16, 8, 7, 14, 10},
+ {2040, 9, 35, 9, 9, 9, 16, 8, 7, 14, 10},
+ {2030, 9, 35, 9, 9, 9, 16, 8, 7, 13, 10},
+ {2020, 9, 35, 9, 9, 9, 16, 8, 7, 13, 10},
+ {2010, 9, 34, 9, 9, 9, 15, 8, 7, 13, 10},
+ {2000, 9, 34, 8, 9, 9, 15, 8, 7, 13, 10},
+ {1990, 9, 34, 8, 9, 9, 15, 8, 7, 13, 10},
+ {1980, 9, 34, 8, 9, 9, 15, 8, 7, 13, 10},
+ {1970, 9, 33, 8, 9, 9, 15, 8, 7, 13, 10},
+ {1960, 9, 33, 8, 9, 9, 15, 8, 7, 13, 10},
+ {1950, 9, 33, 8, 8, 9, 15, 8, 7, 13, 10},
+ {1940, 9, 33, 8, 8, 9, 15, 8, 7, 13, 10},
+ {1930, 9, 32, 8, 8, 9, 14, 8, 7, 13, 10},
+ {1920, 9, 32, 8, 8, 9, 14, 8, 7, 13, 10},
+ {1910, 9, 32, 8, 8, 8, 15, 7, 7, 13, 9},
+ {1900, 9, 32, 8, 8, 8, 15, 7, 7, 13, 9},
+ {1890, 9, 31, 8, 8, 8, 15, 7, 7, 12, 9},
+ {1880, 8, 32, 8, 8, 8, 15, 7, 7, 12, 9},
+ {1870, 8, 32, 8, 8, 8, 15, 7, 7, 12, 9},
+ {1860, 8, 32, 8, 8, 8, 14, 7, 6, 12, 9},
+ {1850, 8, 32, 8, 8, 8, 14, 7, 6, 12, 9},
+ {1840, 8, 31, 8, 8, 8, 14, 7, 6, 12, 9},
+ {1830, 8, 31, 8, 8, 8, 14, 7, 6, 12, 9},
+ {1820, 8, 31, 8, 8, 8, 14, 7, 6, 12, 9},
+ {1810, 8, 31, 8, 8, 8, 14, 7, 6, 12, 9},
+ {1800, 8, 30, 7, 8, 8, 14, 7, 6, 12, 9},
+ {1790, 8, 30, 7, 8, 8, 13, 7, 6, 12, 9},
+ {1780, 8, 30, 7, 8, 8, 13, 7, 6, 12, 9},
+ {1770, 8, 30, 7, 8, 8, 13, 7, 6, 12, 9},
+ {1760, 8, 29, 7, 8, 8, 13, 7, 6, 12, 9},
+ {1750, 8, 29, 7, 8, 8, 13, 7, 6, 12, 9},
+ {1740, 8, 29, 7, 8, 8, 13, 7, 6, 11, 8},
+ {1730, 8, 29, 7, 8, 8, 13, 7, 6, 11, 8},
+ {1720, 8, 29, 7, 7, 8, 13, 7, 6, 11, 8},
+ {1710, 8, 28, 7, 7, 8, 12, 7, 6, 11, 8},
+ {1700, 8, 28, 7, 7, 7, 13, 7, 6, 11, 8},
+ {1690, 8, 28, 7, 7, 7, 13, 7, 6, 11, 8},
+ {1680, 7, 29, 7, 7, 7, 13, 6, 6, 11, 8},
+ {1670, 7, 28, 7, 7, 7, 13, 6, 6, 11, 8},
+ {1660, 7, 28, 7, 7, 7, 13, 6, 6, 11, 8},
+ {1650, 7, 28, 7, 7, 7, 13, 6, 6, 11, 8},
+ {1640, 7, 28, 7, 7, 7, 12, 6, 6, 11, 8},
+ {1630, 7, 27, 7, 7, 7, 12, 6, 6, 11, 8},
+ {1620, 7, 27, 7, 7, 7, 12, 6, 6, 11, 8},
+ {1610, 7, 27, 7, 7, 7, 12, 6, 6, 11, 8},
+ {1600, 7, 27, 6, 7, 7, 12, 6, 5, 10, 8},
+ {1590, 7, 26, 6, 7, 7, 12, 6, 5, 10, 8},
+ {1580, 7, 26, 6, 7, 7, 12, 6, 5, 10, 7},
+ {1570, 7, 26, 6, 7, 7, 11, 6, 5, 10, 7},
+ {1560, 7, 26, 6, 7, 7, 11, 6, 5, 10, 7},
+ {1550, 7, 26, 6, 7, 7, 11, 6, 5, 10, 7},
+ {1540, 7, 25, 6, 7, 7, 11, 6, 5, 10, 7},
+ {1530, 7, 25, 6, 7, 7, 11, 6, 5, 10, 7},
+ {1520, 7, 25, 6, 7, 7, 11, 6, 5, 10, 7},
+ {1510, 7, 25, 6, 7, 7, 11, 6, 5, 10, 7},
+ {1500, 7, 24, 6, 7, 7, 10, 6, 5, 10, 7},
+ {1490, 59, 25, 6, 77, 59, 10, 70, 44, 9, 73},
+ {1480, 59, 24, 6, 76, 58, 10, 70, 44, 9, 73},
+ {1470, 58, 24, 6, 76, 58, 10, 69, 44, 9, 72},
+ {1460, 58, 24, 6, 76, 58, 10, 69, 43, 9, 72},
+ {1450, 58, 24, 6, 75, 57, 10, 68, 43, 9, 71},
+ {1440, 57, 24, 6, 75, 57, 10, 68, 43, 9, 71},
+ {1430, 57, 23, 6, 75, 57, 10, 68, 43, 8, 70},
+ {1420, 56, 23, 6, 74, 57, 9, 67, 43, 8, 70},
+ {1410, 56, 23, 6, 74, 57, 9, 67, 43, 8, 69},
+ {1400, 56, 23, 5, 74, 55, 9, 67, 41, 8, 69},
+ {1390, 55, 23, 5, 73, 55, 9, 66, 41, 8, 68},
+ {1380, 55, 23, 5, 73, 54, 9, 66, 41, 8, 68},
+ {1370, 54, 22, 5, 72, 54, 9, 66, 41, 8, 67},
+ {1360, 54, 22, 5, 72, 54, 9, 65, 40, 8, 67},
+ {1350, 54, 22, 5, 72, 53, 9, 65, 40, 8, 66},
+ {1340, 53, 22, 5, 71, 53, 9, 65, 40, 8, 66},
+ {1330, 53, 22, 5, 71, 53, 9, 64, 39, 8, 65},
+ {1320, 52, 22, 5, 71, 53, 8, 64, 40, 8, 65},
+ {1310, 52, 21, 5, 70, 53, 8, 64, 40, 8, 64},
+ {1300, 51, 21, 5, 70, 51, 8, 63, 38, 8, 64},
+ {1290, 51, 21, 5, 70, 51, 8, 63, 38, 7, 64},
+ {1280, 51, 21, 5, 69, 51, 8, 63, 38, 7, 63},
+ {1270, 50, 21, 5, 69, 50, 8, 62, 38, 7, 63},
+ {1260, 50, 20, 5, 69, 50, 8, 62, 37, 7, 62},
+ {1250, 49, 20, 5, 68, 49, 8, 62, 37, 7, 62},
+ {1240, 49, 20, 5, 68, 49, 8, 61, 37, 7, 61},
+ {1230, 49, 20, 5, 68, 49, 8, 61, 36, 7, 61},
+ {1220, 48, 20, 5, 67, 48, 8, 61, 36, 7, 60},
+ {1210, 48, 19, 5, 67, 48, 7, 60, 36, 7, 60},
+ {1200, 49, 19, 4, 67, 49, 7, 60, 36, 7, 59},
+ {1190, 48, 19, 4, 66, 48, 7, 60, 36, 7, 59},
+ {1180, 48, 19, 4, 66, 48, 7, 59, 36, 7, 58},
+ {1170, 46, 19, 4, 66, 46, 7, 59, 35, 7, 58},
+ {1160, 46, 18, 4, 65, 46, 7, 59, 34, 7, 57},
+ {1150, 45, 18, 4, 65, 46, 7, 58, 34, 7, 57},
+ {1140, 45, 18, 4, 65, 45, 7, 58, 34, 6, 56},
+ {1130, 45, 18, 4, 64, 45, 7, 58, 33, 6, 56},
+ {1120, 44, 18, 4, 64, 44, 7, 57, 33, 6, 55},
+ {1110, 44, 18, 4, 64, 44, 7, 57, 33, 6, 55},
+ {1100, 43, 17, 4, 63, 44, 6, 57, 32, 6, 54},
+ {1090, 43, 17, 4, 63, 44, 6, 56, 33, 6, 54},
+ {1080, 43, 17, 4, 63, 44, 6, 56, 33, 6, 53},
+ {1070, 42, 17, 4, 62, 44, 6, 56, 33, 6, 53},
+ {1060, 42, 17, 4, 62, 42, 6, 55, 31, 6, 52},
+ {1050, 41, 17, 4, 62, 42, 6, 55, 31, 6, 52},
+ {1040, 41, 16, 4, 61, 41, 6, 54, 31, 6, 52},
+ {1030, 41, 16, 4, 61, 41, 6, 54, 30, 6, 51},
+ {1020, 40, 16, 4, 61, 41, 6, 54, 30, 6, 51},
+ {1010, 40, 16, 4, 60, 40, 6, 53, 30, 6, 50},
+ {1000, 39, 16, 3, 60, 40, 6, 53, 29, 5, 50},
+ { 990, 39, 15, 3, 60, 39, 6, 53, 29, 5, 49},
+ { 980, 39, 15, 3, 59, 39, 5, 52, 29, 5, 49},
+ { 970, 38, 15, 3, 59, 39, 5, 52, 29, 5, 48},
+ { 960, 38, 15, 3, 59, 39, 5, 52, 29, 5, 48},
+ { 950, 37, 15, 3, 58, 39, 5, 51, 29, 5, 47},
+ { 940, 37, 14, 3, 58, 39, 5, 51, 29, 5, 47},
+ { 930, 37, 14, 3, 57, 37, 5, 51, 27, 5, 46},
+ { 920, 36, 14, 3, 57, 37, 5, 50, 27, 5, 46},
+ { 910, 36, 14, 3, 57, 36, 5, 50, 27, 5, 45},
+ { 900, 35, 14, 3, 56, 36, 5, 50, 26, 5, 45},
+ { 890, 35, 14, 3, 56, 36, 5, 49, 26, 5, 44},
+ { 880, 35, 13, 3, 56, 35, 5, 49, 26, 5, 44},
+ { 870, 34, 13, 3, 55, 35, 4, 49, 26, 5, 43},
+ { 860, 34, 13, 3, 55, 35, 4, 48, 25, 5, 43},
+ { 850, 33, 13, 3, 55, 35, 4, 48, 26, 4, 42},
+ { 840, 33, 13, 3, 54, 35, 4, 48, 26, 4, 42},
+ { 830, 33, 12, 3, 54, 33, 4, 47, 24, 4, 41},
+ { 820, 32, 12, 3, 54, 33, 4, 47, 24, 4, 41},
+ { 810, 32, 12, 3, 53, 33, 4, 47, 24, 4, 40},
+ { 800, 31, 12, 2, 53, 32, 4, 46, 23, 4, 40},
+ { 790, 31, 12, 2, 53, 32, 4, 46, 23, 4, 39},
+ { 780, 30, 12, 2, 52, 31, 4, 46, 23, 4, 39},
+ { 770, 30, 11, 2, 52, 31, 4, 45, 23, 4, 39},
+ { 760, 30, 11, 2, 52, 31, 3, 45, 22, 4, 38},
+ { 750, 29, 11, 2, 51, 30, 3, 45, 22, 4, 38},
+ { 740, 29, 11, 2, 51, 30, 3, 44, 22, 4, 37},
+ { 730, 28, 11, 2, 51, 31, 3, 44, 22, 4, 37},
+ { 720, 28, 10, 2, 50, 30, 3, 44, 22, 4, 36},
+ { 710, 28, 10, 2, 50, 30, 3, 43, 22, 4, 36},
+ { 700, 27, 10, 2, 50, 28, 3, 43, 20, 3, 35},
+ { 690, 27, 10, 2, 49, 28, 3, 43, 20, 3, 35},
+ { 680, 26, 10, 2, 49, 28, 3, 42, 20, 3, 34},
+ { 670, 26, 10, 2, 49, 27, 3, 42, 20, 3, 34},
+ { 660, 26, 9, 2, 48, 27, 3, 42, 19, 3, 33},
+ { 650, 25, 9, 2, 48, 26, 3, 41, 19, 3, 33},
+ { 640, 25, 9, 2, 48, 26, 2, 41, 19, 3, 32},
+ { 630, 24, 9, 2, 47, 26, 2, 40, 18, 3, 32},
+ { 620, 24, 9, 2, 47, 26, 2, 40, 19, 3, 31},
+ { 610, 24, 8, 2, 47, 26, 2, 40, 19, 3, 31},
+ { 600, 23, 8, 1, 46, 26, 2, 39, 18, 3, 30},
+ { 590, 23, 8, 1, 46, 24, 2, 39, 17, 3, 30},
+ { 580, 22, 8, 1, 46, 24, 2, 39, 17, 3, 29},
+ { 570, 22, 8, 1, 45, 23, 2, 38, 17, 3, 29},
+ { 560, 22, 7, 1, 45, 23, 2, 38, 16, 2, 28},
+ { 550, 21, 7, 1, 45, 23, 2, 38, 16, 2, 28},
+ { 540, 21, 7, 1, 44, 22, 2, 37, 16, 2, 27},
+ { 530, 20, 7, 1, 44, 22, 1, 37, 15, 2, 27},
+ { 520, 20, 7, 1, 43, 21, 1, 37, 15, 2, 27},
+ { 510, 20, 6, 1, 43, 21, 1, 36, 15, 2, 26},
+ { 500, 19, 6, 1, 43, 22, 1, 36, 15, 2, 26},
+ { 490, 19, 6, 1, 42, 21, 1, 36, 15, 2, 25},
+ { 480, 18, 6, 1, 42, 21, 1, 35, 15, 2, 25},
+ { 470, 18, 6, 1, 42, 21, 1, 35, 15, 2, 24},
+ { 460, 18, 6, 1, 41, 19, 1, 35, 13, 2, 24},
+ { 450, 17, 5, 1, 41, 19, 1, 34, 13, 2, 23},
+ { 440, 17, 5, 1, 41, 18, 1, 34, 13, 2, 23},
+ { 430, 16, 5, 1, 40, 18, 0, 34, 12, 2, 22},
+ { 420, 16, 5, 1, 40, 18, 0, 33, 12, 2, 22},
+ { 410, 16, 5, 1, 40, 17, 0, 33, 12, 1, 21},
+ { 400, 15, 5, 0, 39, 17, 0, 33, 11, 1, 21},
+ { 390, 15, 4, 0, 39, 17, 0, 32, 12, 1, 20},
+ { 380, 14, 4, 0, 39, 17, 0, 32, 12, 1, 20},
+ { 370, 14, 4, 0, 38, 17, 0, 32, 12, 1, 19},
+ { 360, 14, 4, 0, 38, 15, 0, 31, 10, 1, 19},
+ { 350, 13, 4, 0, 38, 15, 0, 31, 10, 1, 18},
+ { 340, 13, 3, 0, 37, 15, 0, 31, 10, 1, 18},
+ { 330, 12, 3, 0, 37, 14, 0, 30, 9, 1, 17},
+ { 320, 12, 3, 0, 37, 14, 0, 30, 9, 1, 17},
+ { 310, 12, 3, 0, 36, 13, 0, 30, 9, 1, 16},
+ { 300, 11, 3, 0, 36, 13, 0, 29, 8, 1, 16},
+ { 290, 11, 2, 0, 36, 13, 0, 29, 8, 1, 15},
+ { 280, 10, 2, 0, 35, 12, 0, 29, 8, 1, 15},
+ { 270, 10, 2, 0, 35, 12, 0, 28, 8, 0, 14},
+ { 260, 9, 2, 0, 35, 12, 0, 28, 8, 0, 14},
+ { 250, 9, 2, 0, 34, 12, 0, 28, 8, 0, 14},
+ { 240, 9, 2, 0, 34, 12, 0, 27, 8, 0, 13},
+ { 230, 8, 1, 0, 34, 10, 0, 27, 6, 0, 13},
+ { 220, 8, 1, 0, 33, 10, 0, 27, 6, 0, 12},
+ { 210, 7, 1, 0, 33, 10, 0, 26, 6, 0, 12},
+ { 200, 7, 1, 0, 33, 9, 0, 26, 5, 0, 11},
+ { 190, 7, 1, 0, 32, 9, 0, 25, 5, 0, 11},
+ { 180, 6, 1, 0, 32, 8, 0, 25, 5, 0, 10},
+ { 170, 6, 0, 0, 32, 8, 0, 25, 5, 0, 10},
+ { 160, 5, 0, 0, 31, 8, 0, 24, 4, 0, 9},
+ { 150, 5, 0, 0, 31, 8, 0, 24, 5, 0, 9},
+ { 140, 5, 0, 0, 31, 8, 0, 24, 5, 0, 8},
+ { 130, 4, 0, 0, 30, 6, 0, 23, 3, 0, 8},
+ { 120, 4, 0, 0, 30, 6, 0, 23, 3, 0, 7},
+ { 110, 3, 0, 0, 30, 6, 0, 23, 3, 0, 7},
+ { 100, 3, 0, 0, 29, 5, 0, 22, 2, 0, 6},
+ { 90, 3, 0, 0, 29, 5, 0, 22, 2, 0, 6},
+ { 80, 2, 0, 0, 28, 5, 0, 22, 2, 0, 5},
+};
+
+static void samsung_mipi_dcphy_bias_block_enable(struct samsung_mipi_dcphy *samsung)
+{
+ regmap_write(samsung->regmap, BIAS_CON0, I_DEV_DIV_6 | I_RES_100_2UA);
+ regmap_write(samsung->regmap, BIAS_CON1, I_VBG_SEL_820MV | I_BGR_VREF_820MV |
+ I_LADDER_1_00V);
+ regmap_write(samsung->regmap, BIAS_CON2, REG_325M_325MV | REG_LP_400M_400MV |
+ REG_400M_400MV | REG_645M_645MV);
+
+ /* default output voltage select:
+ * dphy: 400mv
+ * cphy: 530mv
+ */
+ regmap_update_bits(samsung->regmap, BIAS_CON4,
+ I_MUX_SEL_MASK, I_MUX_400MV);
+}
+
+static void samsung_mipi_dphy_lane_enable(struct samsung_mipi_dcphy *samsung)
+{
+ regmap_write(samsung->regmap, DPHY_MC_GNR_CON1, T_PHY_READY(0x2000));
+ regmap_update_bits(samsung->regmap, DPHY_MC_GNR_CON0,
+ PHY_ENABLE, PHY_ENABLE);
+
+ switch (samsung->lanes) {
+ case 4:
+ regmap_write(samsung->regmap, DPHY_MD3_GNR_CON1,
+ T_PHY_READY(0x2000));
+ regmap_update_bits(samsung->regmap, DPHY_MD3_GNR_CON0,
+ PHY_ENABLE, PHY_ENABLE);
+ fallthrough;
+ case 3:
+ regmap_write(samsung->regmap, COMBO_MD2_GNR_CON1,
+ T_PHY_READY(0x2000));
+ regmap_update_bits(samsung->regmap, COMBO_MD2_GNR_CON0,
+ PHY_ENABLE, PHY_ENABLE);
+ fallthrough;
+ case 2:
+ regmap_write(samsung->regmap, COMBO_MD1_GNR_CON1,
+ T_PHY_READY(0x2000));
+ regmap_update_bits(samsung->regmap, COMBO_MD1_GNR_CON0,
+ PHY_ENABLE, PHY_ENABLE);
+ fallthrough;
+ case 1:
+ default:
+ regmap_write(samsung->regmap, COMBO_MD0_GNR_CON1,
+ T_PHY_READY(0x2000));
+ regmap_update_bits(samsung->regmap, COMBO_MD0_GNR_CON0,
+ PHY_ENABLE, PHY_ENABLE);
+ break;
+ }
+}
+
+static void samsung_mipi_dphy_lane_disable(struct samsung_mipi_dcphy *samsung)
+{
+ switch (samsung->lanes) {
+ case 4:
+ regmap_update_bits(samsung->regmap, DPHY_MD3_GNR_CON0,
+ PHY_ENABLE, 0);
+ fallthrough;
+ case 3:
+ regmap_update_bits(samsung->regmap, COMBO_MD2_GNR_CON0,
+ PHY_ENABLE, 0);
+ fallthrough;
+ case 2:
+ regmap_update_bits(samsung->regmap, COMBO_MD1_GNR_CON0,
+ PHY_ENABLE, 0);
+ fallthrough;
+ case 1:
+ default:
+ regmap_update_bits(samsung->regmap, COMBO_MD0_GNR_CON0,
+ PHY_ENABLE, 0);
+ break;
+ }
+
+ regmap_update_bits(samsung->regmap, DPHY_MC_GNR_CON0, PHY_ENABLE, 0);
+}
+
+static void samsung_mipi_dcphy_pll_configure(struct samsung_mipi_dcphy *samsung)
+{
+ regmap_update_bits(samsung->regmap, PLL_CON0, S_MASK | P_MASK,
+ S(samsung->pll.scaler) | P(samsung->pll.prediv));
+
+ if (samsung->pll.dsm < 0) {
+ u16 dsm_tmp;
+
+ /* Using opposite number subtraction to find complement */
+ dsm_tmp = abs(samsung->pll.dsm);
+ dsm_tmp = dsm_tmp - 1;
+ dsm_tmp ^= 0xffff;
+ regmap_write(samsung->regmap, PLL_CON1, dsm_tmp);
+ } else {
+ regmap_write(samsung->regmap, PLL_CON1, samsung->pll.dsm);
+ }
+
+ regmap_update_bits(samsung->regmap, PLL_CON2,
+ M_MASK, M(samsung->pll.fbdiv));
+
+ if (samsung->pll.ssc_en) {
+ regmap_write(samsung->regmap, PLL_CON3,
+ MRR(samsung->pll.mrr) | MFR(samsung->pll.mfr));
+ regmap_update_bits(samsung->regmap, PLL_CON4, SSCG_EN, SSCG_EN);
+ }
+
+ regmap_write(samsung->regmap, PLL_CON5, RESET_N_SEL | PLL_ENABLE_SEL);
+ regmap_write(samsung->regmap, PLL_CON7, PLL_LOCK_CNT(0xf000));
+ regmap_write(samsung->regmap, PLL_CON8, PLL_STB_CNT(0xf000));
+}
+
+static int samsung_mipi_dcphy_pll_enable(struct samsung_mipi_dcphy *samsung)
+{
+ u32 sts;
+ int ret;
+
+ regmap_update_bits(samsung->regmap, PLL_CON0, PLL_EN, PLL_EN);
+
+ ret = regmap_read_poll_timeout(samsung->regmap, PLL_STAT0,
+ sts, (sts & PLL_LOCK), 1000, 20000);
+ if (ret < 0)
+ dev_err(samsung->dev, "DC-PHY pll failed to lock\n");
+
+ return ret;
+}
+
+static void samsung_mipi_dcphy_pll_disable(struct samsung_mipi_dcphy *samsung)
+{
+ regmap_update_bits(samsung->regmap, PLL_CON0, PLL_EN, 0);
+}
+
+static const struct samsung_mipi_dphy_timing *
+samsung_mipi_dphy_get_timing(struct samsung_mipi_dcphy *samsung)
+{
+ const struct samsung_mipi_dphy_timing *timings;
+ unsigned int num_timings;
+ unsigned int lane_mbps = div64_ul(samsung->pll.rate, USEC_PER_SEC);
+ unsigned int i;
+
+ timings = samsung_mipi_dphy_timing_table;
+ num_timings = ARRAY_SIZE(samsung_mipi_dphy_timing_table);
+
+ for (i = num_timings; i > 1; i--)
+ if (lane_mbps <= timings[i - 1].max_lane_mbps)
+ break;
+
+ return &timings[i - 1];
+}
+
+static unsigned long
+samsung_mipi_dcphy_pll_round_rate(struct samsung_mipi_dcphy *samsung,
+ unsigned long prate, unsigned long rate,
+ u8 *prediv, u16 *fbdiv, int *dsm, u8 *scaler)
+{
+ u32 max_fout = samsung->pdata->dphy_tx_max_lane_kbps;
+ u64 best_freq = 0;
+ u64 fin, fvco, fout;
+ u8 min_prediv, max_prediv;
+ u8 _prediv, best_prediv = 1;
+ u16 _fbdiv, best_fbdiv = 1;
+ u8 _scaler, best_scaler = 0;
+ u32 min_delta = UINT_MAX;
+ long _dsm, best_dsm = 0;
+
+ if (!prate) {
+ dev_err(samsung->dev, "parent rate of PLL can not be zero\n");
+ return 0;
+ }
+
+ /*
+ * The PLL output frequency can be calculated using a simple formula:
+ * Fvco = ((m+k/65536) x 2 x Fin) / p
+ * Fout = ((m+k/65536) x 2 x Fin) / (p x 2^s)
+ */
+ fin = div64_ul(prate, MSEC_PER_SEC);
+
+ while (!best_freq) {
+ fout = div64_ul(rate, MSEC_PER_SEC);
+ if (fout > max_fout)
+ fout = max_fout;
+
+ /* 0 ≤ S[2:0] ≤ 6 */
+ for (_scaler = 0; _scaler < 7; _scaler++) {
+ fvco = fout << _scaler;
+
+ /*
+ * 2600MHz ≤ FVCO ≤ 6600MHz
+ */
+ if (fvco < 2600 * MSEC_PER_SEC || fvco > 6600 * MSEC_PER_SEC)
+ continue;
+
+ /* 6MHz ≤ Fref(Fin / p) ≤ 30MHz */
+ min_prediv = DIV_ROUND_UP_ULL(fin, 30 * MSEC_PER_SEC);
+ max_prediv = DIV_ROUND_CLOSEST_ULL(fin, 6 * MSEC_PER_SEC);
+
+ for (_prediv = min_prediv; _prediv <= max_prediv; _prediv++) {
+ u64 delta, tmp;
+
+ _fbdiv = DIV_ROUND_CLOSEST_ULL(fvco * _prediv, 2 * fin);
+
+ /* 64 ≤ M[9:0] ≤ 1023 */
+ if (_fbdiv < 64 || _fbdiv > 1023)
+ continue;
+
+ /* -32767 ≤ K[15:0] ≤ 32767 */
+ _dsm = ((_prediv * fvco) - (2 * _fbdiv * fin));
+ _dsm = DIV_ROUND_UP_ULL(_dsm << 15, fin);
+ if (abs(_dsm) > 32767)
+ continue;
+
+ tmp = DIV_ROUND_CLOSEST_ULL((_fbdiv * fin * 2 * 1000), _prediv);
+ tmp += DIV_ROUND_CLOSEST_ULL((_dsm * fin * 1000), _prediv << 15);
+
+ delta = abs(fvco * MSEC_PER_SEC - tmp);
+ if (delta < min_delta) {
+ best_prediv = _prediv;
+ best_fbdiv = _fbdiv;
+ best_dsm = _dsm;
+ best_scaler = _scaler;
+ min_delta = delta;
+ best_freq = DIV_ROUND_CLOSEST_ULL(tmp, 1000) * MSEC_PER_SEC;
+ }
+ }
+ }
+
+ rate += 100 * MSEC_PER_SEC;
+ }
+
+ *prediv = best_prediv;
+ *fbdiv = best_fbdiv;
+ *dsm = (int)best_dsm & 0xffff;
+ *scaler = best_scaler;
+ dev_dbg(samsung->dev, "p: %d, m: %d, dsm:%ld, scaler: %d\n",
+ best_prediv, best_fbdiv, best_dsm, best_scaler);
+
+ return best_freq >> best_scaler;
+}
+
+static void
+samsung_mipi_dphy_clk_lane_timing_init(struct samsung_mipi_dcphy *samsung)
+{
+ const struct samsung_mipi_dphy_timing *timing;
+ unsigned int lane_hs_rate = div64_ul(samsung->pll.rate, USEC_PER_SEC);
+ u32 val, res_up, res_down;
+
+ timing = samsung_mipi_dphy_get_timing(samsung);
+ regmap_write(samsung->regmap, DPHY_MC_GNR_CON0, 0xf000);
+
+ /*
+ * The Drive-Strength / Voltage-Amplitude is adjusted by setting
+ * the Driver-Up Resistor and Driver-Down Resistor.
+ */
+ res_up = samsung->pdata->dphy_hs_drv_res_cfg->clk_hs_drv_up_ohm;
+ res_down = samsung->pdata->dphy_hs_drv_res_cfg->clk_hs_drv_down_ohm;
+ val = EDGE_CON(7) | EDGE_CON_DIR(0) | EDGE_CON_EN |
+ RES_UP(res_up) | RES_DN(res_down);
+ regmap_write(samsung->regmap, DPHY_MC_ANA_CON0, val);
+
+ if (lane_hs_rate >= 4500)
+ regmap_write(samsung->regmap, DPHY_MC_ANA_CON1, 0x0001);
+
+ val = 0;
+ /*
+ * Divide-by-2 Clock from Serial Clock. Use this when data rate is under
+ * 1500Mbps, otherwise divide-by-16 Clock from Serial Clock
+ */
+ if (lane_hs_rate < 1500)
+ val = HSTX_CLK_SEL;
+
+ val |= T_LPX(timing->lpx);
+ /* T_LP_EXIT_SKEW/T_LP_ENTRY_SKEW unconfig */
+ regmap_write(samsung->regmap, DPHY_MC_TIME_CON0, val);
+
+ val = T_CLK_ZERO(timing->clk_zero) | T_CLK_PREPARE(timing->clk_prepare);
+ regmap_write(samsung->regmap, DPHY_MC_TIME_CON1, val);
+
+ val = T_HS_EXIT(timing->hs_exit) | T_CLK_TRAIL(timing->clk_trail_eot);
+ regmap_write(samsung->regmap, DPHY_MC_TIME_CON2, val);
+
+ val = T_CLK_POST(timing->clk_post);
+ regmap_write(samsung->regmap, DPHY_MC_TIME_CON3, val);
+
+ /* Escape Clock is 20.00MHz */
+ regmap_write(samsung->regmap, DPHY_MC_TIME_CON4, 0x1f4);
+
+ /*
+ * skew calibration should be off, if the operation data rate is
+ * under 1.5Gbps or equal to 1.5Gbps.
+ */
+ if (lane_hs_rate > 1500)
+ regmap_write(samsung->regmap, DPHY_MC_DESKEW_CON0, 0x9cb1);
+}
+
+static void
+samsung_mipi_dphy_data_lane_timing_init(struct samsung_mipi_dcphy *samsung)
+{
+ const struct samsung_mipi_dphy_timing *timing;
+ unsigned int lane_hs_rate = div64_ul(samsung->pll.rate, USEC_PER_SEC);
+ u32 val, res_up, res_down;
+
+ timing = samsung_mipi_dphy_get_timing(samsung);
+
+ /*
+ * The Drive-Strength / Voltage-Amplitude is adjusted by adjusting the
+ * Driver-Up Resistor and Driver-Down Resistor.
+ */
+ res_up = samsung->pdata->dphy_hs_drv_res_cfg->data_hs_drv_up_ohm;
+ res_down = samsung->pdata->dphy_hs_drv_res_cfg->data_hs_drv_down_ohm;
+ val = EDGE_CON(7) | EDGE_CON_DIR(0) | EDGE_CON_EN |
+ RES_UP(res_up) | RES_DN(res_down);
+ regmap_write(samsung->regmap, COMBO_MD0_ANA_CON0, val);
+ regmap_write(samsung->regmap, COMBO_MD1_ANA_CON0, val);
+ regmap_write(samsung->regmap, COMBO_MD2_ANA_CON0, val);
+ regmap_write(samsung->regmap, DPHY_MD3_ANA_CON0, val);
+
+ if (lane_hs_rate >= 4500) {
+ regmap_write(samsung->regmap, COMBO_MD0_ANA_CON1, 0x0001);
+ regmap_write(samsung->regmap, COMBO_MD1_ANA_CON1, 0x0001);
+ regmap_write(samsung->regmap, COMBO_MD2_ANA_CON1, 0x0001);
+ regmap_write(samsung->regmap, DPHY_MD3_ANA_CON1, 0x0001);
+ }
+
+ val = 0;
+ /*
+ * Divide-by-2 Clock from Serial Clock. Use this when data rate is under
+ * 1500Mbps, otherwise divide-by-16 Clock from Serial Clock
+ */
+ if (lane_hs_rate < 1500)
+ val = HSTX_CLK_SEL;
+
+ val |= T_LPX(timing->lpx);
+ /* T_LP_EXIT_SKEW/T_LP_ENTRY_SKEW unconfig */
+ regmap_write(samsung->regmap, COMBO_MD0_TIME_CON0, val);
+ regmap_write(samsung->regmap, COMBO_MD1_TIME_CON0, val);
+ regmap_write(samsung->regmap, COMBO_MD2_TIME_CON0, val);
+ regmap_write(samsung->regmap, DPHY_MD3_TIME_CON0, val);
+
+ val = T_HS_ZERO(timing->hs_zero) | T_HS_PREPARE(timing->hs_prepare);
+ regmap_write(samsung->regmap, COMBO_MD0_TIME_CON1, val);
+ regmap_write(samsung->regmap, COMBO_MD1_TIME_CON1, val);
+ regmap_write(samsung->regmap, COMBO_MD2_TIME_CON1, val);
+ regmap_write(samsung->regmap, DPHY_MD3_TIME_CON1, val);
+
+ val = T_HS_EXIT(timing->hs_exit) | T_HS_TRAIL(timing->hs_trail_eot);
+ regmap_write(samsung->regmap, COMBO_MD0_TIME_CON2, val);
+ regmap_write(samsung->regmap, COMBO_MD1_TIME_CON2, val);
+ regmap_write(samsung->regmap, COMBO_MD2_TIME_CON2, val);
+ regmap_write(samsung->regmap, DPHY_MD3_TIME_CON2, val);
+
+ /* TTA-GET/TTA-GO Timing Counter register use default value */
+ val = T_TA_GET(0x3) | T_TA_GO(0x0);
+ regmap_write(samsung->regmap, COMBO_MD0_TIME_CON3, val);
+ regmap_write(samsung->regmap, COMBO_MD1_TIME_CON3, val);
+ regmap_write(samsung->regmap, COMBO_MD2_TIME_CON3, val);
+ regmap_write(samsung->regmap, DPHY_MD3_TIME_CON3, val);
+
+ /* Escape Clock is 20.00MHz */
+ regmap_write(samsung->regmap, COMBO_MD0_TIME_CON4, 0x1f4);
+ regmap_write(samsung->regmap, COMBO_MD1_TIME_CON4, 0x1f4);
+ regmap_write(samsung->regmap, COMBO_MD2_TIME_CON4, 0x1f4);
+ regmap_write(samsung->regmap, DPHY_MD3_TIME_CON4, 0x1f4);
+}
+
+static int samsung_mipi_dphy_power_on(struct samsung_mipi_dcphy *samsung)
+{
+ int ret;
+
+ reset_control_assert(samsung->m_phy_rst);
+
+ samsung_mipi_dcphy_bias_block_enable(samsung);
+ samsung_mipi_dcphy_pll_configure(samsung);
+ samsung_mipi_dphy_clk_lane_timing_init(samsung);
+ samsung_mipi_dphy_data_lane_timing_init(samsung);
+ ret = samsung_mipi_dcphy_pll_enable(samsung);
+ if (ret < 0)
+ return ret;
+
+ samsung_mipi_dphy_lane_enable(samsung);
+
+ reset_control_deassert(samsung->m_phy_rst);
+
+ /* The TSKEWCAL maximum is 100 µsec
+ * at initial calibration.
+ */
+ usleep_range(100, 110);
+
+ return 0;
+}
+
+static int samsung_mipi_dcphy_power_on(struct phy *phy)
+{
+ struct samsung_mipi_dcphy *samsung = phy_get_drvdata(phy);
+
+ reset_control_assert(samsung->apb_rst);
+ udelay(1);
+ reset_control_deassert(samsung->apb_rst);
+
+ switch (samsung->type) {
+ case PHY_TYPE_DPHY:
+ return samsung_mipi_dphy_power_on(samsung);
+ default:
+ /* CPHY part to be implemented later */
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int samsung_mipi_dcphy_power_off(struct phy *phy)
+{
+ struct samsung_mipi_dcphy *samsung = phy_get_drvdata(phy);
+
+ switch (samsung->type) {
+ case PHY_TYPE_DPHY:
+ samsung_mipi_dphy_lane_disable(samsung);
+ break;
+ default:
+ /* CPHY part to be implemented later */
+ return -EOPNOTSUPP;
+ }
+
+ samsung_mipi_dcphy_pll_disable(samsung);
+
+ return 0;
+}
+
+static int
+samsung_mipi_dcphy_pll_ssc_modulation_calc(struct samsung_mipi_dcphy *samsung,
+ u8 *mfr, u8 *mrr)
+{
+ unsigned long fin = div64_ul(clk_get_rate(samsung->ref_clk), MSEC_PER_SEC);
+ u16 prediv = samsung->pll.prediv;
+ u16 fbdiv = samsung->pll.fbdiv;
+ u16 min_mfr, max_mfr;
+ u16 _mfr, best_mfr = 0;
+ u16 mr, _mrr, best_mrr = 0;
+
+ /* 20KHz ≤ MF ≤ 150KHz */
+ max_mfr = DIV_ROUND_UP(fin, (20 * prediv) << 5);
+ min_mfr = div64_ul(fin, ((150 * prediv) << 5));
+ /*0 ≤ mfr ≤ 255 */
+ if (max_mfr > 256)
+ max_mfr = 256;
+
+ for (_mfr = min_mfr; _mfr < max_mfr; _mfr++) {
+ /* 1 ≤ mrr ≤ 31 */
+ for (_mrr = 1; _mrr < 32; _mrr++) {
+ mr = DIV_ROUND_UP(_mfr * _mrr * 100, fbdiv << 6);
+ /* 0 ≤ MR ≤ 5% */
+ if (mr > 5)
+ continue;
+
+ if (_mfr * _mrr < 513) {
+ best_mfr = _mfr;
+ best_mrr = _mrr;
+ break;
+ }
+ }
+ }
+
+ if (best_mrr) {
+ *mfr = best_mfr & 0xff;
+ *mrr = best_mrr & 0x3f;
+ } else {
+ dev_err(samsung->dev, "failed to calc ssc parameter mfr and mrr\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void
+samsung_mipi_dcphy_pll_calc_rate(struct samsung_mipi_dcphy *samsung,
+ unsigned long long rate)
+{
+ unsigned long prate = clk_get_rate(samsung->ref_clk);
+ unsigned long fout;
+ u8 scaler = 0, mfr = 0, mrr = 0;
+ u16 fbdiv = 0;
+ u8 prediv = 1;
+ int dsm = 0;
+ int ret;
+
+ fout = samsung_mipi_dcphy_pll_round_rate(samsung, prate, rate,
+ &prediv, &fbdiv, &dsm,
+ &scaler);
+
+ dev_dbg(samsung->dev, "%s: fin=%lu, req_rate=%llu\n",
+ __func__, prate, rate);
+ dev_dbg(samsung->dev, "%s: fout=%lu, prediv=%u, fbdiv=%u\n",
+ __func__, fout, prediv, fbdiv);
+
+ samsung->pll.prediv = prediv;
+ samsung->pll.fbdiv = fbdiv;
+ samsung->pll.dsm = dsm;
+ samsung->pll.scaler = scaler;
+ samsung->pll.rate = fout;
+
+ /*
+ * All DPHY 2.0 compliant Transmitters shall support SSC operating above
+ * 2.5 Gbps
+ */
+ if (fout > 2500000000LL) {
+ ret = samsung_mipi_dcphy_pll_ssc_modulation_calc(samsung,
+ &mfr, &mrr);
+ if (!ret) {
+ samsung->pll.ssc_en = true;
+ samsung->pll.mfr = mfr;
+ samsung->pll.mrr = mrr;
+ }
+ }
+}
+
+static int samsung_mipi_dcphy_configure(struct phy *phy,
+ union phy_configure_opts *opts)
+{
+ struct samsung_mipi_dcphy *samsung = phy_get_drvdata(phy);
+ unsigned long long target_rate = opts->mipi_dphy.hs_clk_rate;
+
+ samsung->lanes = opts->mipi_dphy.lanes > 4 ? 4 : opts->mipi_dphy.lanes;
+
+ samsung_mipi_dcphy_pll_calc_rate(samsung, target_rate);
+ opts->mipi_dphy.hs_clk_rate = samsung->pll.rate;
+
+ return 0;
+}
+
+static int samsung_mipi_dcphy_init(struct phy *phy)
+{
+ struct samsung_mipi_dcphy *samsung = phy_get_drvdata(phy);
+
+ return pm_runtime_resume_and_get(samsung->dev);
+}
+
+static int samsung_mipi_dcphy_exit(struct phy *phy)
+{
+ struct samsung_mipi_dcphy *samsung = phy_get_drvdata(phy);
+
+ return pm_runtime_put(samsung->dev);
+}
+
+static const struct phy_ops samsung_mipi_dcphy_ops = {
+ .configure = samsung_mipi_dcphy_configure,
+ .power_on = samsung_mipi_dcphy_power_on,
+ .power_off = samsung_mipi_dcphy_power_off,
+ .init = samsung_mipi_dcphy_init,
+ .exit = samsung_mipi_dcphy_exit,
+ .owner = THIS_MODULE,
+};
+
+static const struct regmap_config samsung_mipi_dcphy_regmap_config = {
+ .name = "dcphy",
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = 0x10000,
+};
+
+static struct phy *samsung_mipi_dcphy_xlate(struct device *dev,
+ const struct of_phandle_args *args)
+{
+ struct samsung_mipi_dcphy *samsung = dev_get_drvdata(dev);
+
+ if (args->args_count != 1) {
+ dev_err(dev, "invalid number of arguments\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (samsung->type != PHY_NONE && samsung->type != args->args[0])
+ dev_warn(dev, "phy type select %d overwriting type %d\n",
+ args->args[0], samsung->type);
+
+ samsung->type = args->args[0];
+
+ return samsung->phy;
+}
+
+static int samsung_mipi_dcphy_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct samsung_mipi_dcphy *samsung;
+ struct phy_provider *phy_provider;
+ struct resource *res;
+ void __iomem *regs;
+ int ret;
+
+ samsung = devm_kzalloc(dev, sizeof(*samsung), GFP_KERNEL);
+ if (!samsung)
+ return -ENOMEM;
+
+ samsung->dev = dev;
+ samsung->pdata = device_get_match_data(dev);
+ platform_set_drvdata(pdev, samsung);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(regs))
+ return PTR_ERR(regs);
+
+ samsung->regmap = devm_regmap_init_mmio(dev, regs,
+ &samsung_mipi_dcphy_regmap_config);
+ if (IS_ERR(samsung->regmap))
+ return dev_err_probe(dev, PTR_ERR(samsung->regmap), "Failed to init regmap\n");
+
+ samsung->grf_regmap = syscon_regmap_lookup_by_phandle(np, "rockchip,grf");
+ if (IS_ERR(samsung->grf_regmap))
+ return dev_err_probe(dev, PTR_ERR(samsung->grf_regmap),
+ "Unable to get rockchip,grf\n");
+
+ samsung->ref_clk = devm_clk_get(dev, "ref");
+ if (IS_ERR(samsung->ref_clk))
+ return dev_err_probe(dev, PTR_ERR(samsung->ref_clk),
+ "Failed to get reference clock\n");
+
+ samsung->pclk = devm_clk_get(dev, "pclk");
+ if (IS_ERR(samsung->pclk))
+ return dev_err_probe(dev, PTR_ERR(samsung->pclk), "Failed to get pclk\n");
+
+ samsung->m_phy_rst = devm_reset_control_get(dev, "m_phy");
+ if (IS_ERR(samsung->m_phy_rst))
+ return dev_err_probe(dev, PTR_ERR(samsung->m_phy_rst),
+ "Failed to get system m_phy_rst control\n");
+
+ samsung->s_phy_rst = devm_reset_control_get(dev, "s_phy");
+ if (IS_ERR(samsung->s_phy_rst))
+ return dev_err_probe(dev, PTR_ERR(samsung->s_phy_rst),
+ "Failed to get system s_phy_rst control\n");
+
+ samsung->apb_rst = devm_reset_control_get(dev, "apb");
+ if (IS_ERR(samsung->apb_rst))
+ return dev_err_probe(dev, PTR_ERR(samsung->apb_rst),
+ "Failed to get system apb_rst control\n");
+
+ samsung->grf_apb_rst = devm_reset_control_get(dev, "grf");
+ if (IS_ERR(samsung->grf_apb_rst))
+ return dev_err_probe(dev, PTR_ERR(samsung->grf_apb_rst),
+ "Failed to get system grf_apb_rst control\n");
+
+ samsung->phy = devm_phy_create(dev, NULL, &samsung_mipi_dcphy_ops);
+ if (IS_ERR(samsung->phy))
+ return dev_err_probe(dev, PTR_ERR(samsung->phy), "Failed to create MIPI DC-PHY\n");
+
+ phy_set_drvdata(samsung->phy, samsung);
+
+ ret = devm_pm_runtime_enable(dev);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to enable runtime PM\n");
+
+ phy_provider = devm_of_phy_provider_register(dev, samsung_mipi_dcphy_xlate);
+ if (IS_ERR(phy_provider))
+ return dev_err_probe(dev, PTR_ERR(phy_provider),
+ "Failed to register phy provider\n");
+
+ return 0;
+}
+
+static __maybe_unused int samsung_mipi_dcphy_runtime_suspend(struct device *dev)
+{
+ struct samsung_mipi_dcphy *samsung = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(samsung->ref_clk);
+ clk_disable_unprepare(samsung->pclk);
+
+ return 0;
+}
+
+static __maybe_unused int samsung_mipi_dcphy_runtime_resume(struct device *dev)
+{
+ struct samsung_mipi_dcphy *samsung = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_prepare_enable(samsung->pclk);
+ if (ret) {
+ dev_err(samsung->dev, "Failed to enable pclk, %d\n", ret);
+ return ret;
+ }
+
+ clk_prepare_enable(samsung->ref_clk);
+ if (ret) {
+ dev_err(samsung->dev, "Failed to enable reference clock, %d\n", ret);
+ clk_disable_unprepare(samsung->pclk);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct dev_pm_ops samsung_mipi_dcphy_pm_ops = {
+ SET_RUNTIME_PM_OPS(samsung_mipi_dcphy_runtime_suspend,
+ samsung_mipi_dcphy_runtime_resume, NULL)
+};
+
+static const struct hs_drv_res_cfg rk3576_dphy_hs_drv_res_cfg = {
+ .clk_hs_drv_up_ohm = STRENGTH_52_OHM,
+ .clk_hs_drv_down_ohm = STRENGTH_52_OHM,
+ .data_hs_drv_up_ohm = STRENGTH_39_OHM,
+ .data_hs_drv_down_ohm = STRENGTH_39_OHM,
+};
+
+static const struct hs_drv_res_cfg rk3588_dphy_hs_drv_res_cfg = {
+ .clk_hs_drv_up_ohm = STRENGTH_34_OHM,
+ .clk_hs_drv_down_ohm = STRENGTH_34_OHM,
+ .data_hs_drv_up_ohm = STRENGTH_43_OHM,
+ .data_hs_drv_down_ohm = STRENGTH_43_OHM,
+};
+
+static const struct samsung_mipi_dcphy_plat_data rk3576_samsung_mipi_dcphy_plat_data = {
+ .dphy_hs_drv_res_cfg = &rk3576_dphy_hs_drv_res_cfg,
+ .dphy_tx_max_lane_kbps = 2500000L,
+};
+
+static const struct samsung_mipi_dcphy_plat_data rk3588_samsung_mipi_dcphy_plat_data = {
+ .dphy_hs_drv_res_cfg = &rk3588_dphy_hs_drv_res_cfg,
+ .dphy_tx_max_lane_kbps = 4500000L,
+};
+
+static const struct of_device_id samsung_mipi_dcphy_of_match[] = {
+ {
+ .compatible = "rockchip,rk3576-mipi-dcphy",
+ .data = &rk3576_samsung_mipi_dcphy_plat_data,
+ }, {
+ .compatible = "rockchip,rk3588-mipi-dcphy",
+ .data = &rk3588_samsung_mipi_dcphy_plat_data,
+ },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, samsung_mipi_dcphy_of_match);
+
+static struct platform_driver samsung_mipi_dcphy_driver = {
+ .driver = {
+ .name = "samsung-mipi-dcphy",
+ .of_match_table = samsung_mipi_dcphy_of_match,
+ .pm = &samsung_mipi_dcphy_pm_ops,
+ },
+ .probe = samsung_mipi_dcphy_probe,
+};
+module_platform_driver(samsung_mipi_dcphy_driver);
+
+MODULE_AUTHOR("Guochun Huang <hero.huang@rock-chips.com>");
+MODULE_DESCRIPTION("Samsung MIPI DCPHY Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/phy/rockchip/phy-rockchip-samsung-hdptx.c b/drivers/phy/rockchip/phy-rockchip-samsung-hdptx.c
index 0965b9d4f9cf..fe7c05748356 100644
--- a/drivers/phy/rockchip/phy-rockchip-samsung-hdptx.c
+++ b/drivers/phy/rockchip/phy-rockchip-samsung-hdptx.c
@@ -25,6 +25,7 @@
#define HDPTX_I_PLL_EN BIT(7)
#define HDPTX_I_BIAS_EN BIT(6)
#define HDPTX_I_BGR_EN BIT(5)
+#define HDPTX_MODE_SEL BIT(0)
#define GRF_HDPTX_STATUS 0x80
#define HDPTX_O_PLL_LOCK_DONE BIT(3)
#define HDPTX_O_PHY_CLK_RDY BIT(2)
@@ -44,66 +45,130 @@
#define LANE_REG(n) HDTPX_REG(n, 0300, 062d)
/* CMN_REG(0008) */
+#define OVRD_LCPLL_EN_MASK BIT(7)
#define LCPLL_EN_MASK BIT(6)
#define LCPLL_LCVCO_MODE_EN_MASK BIT(4)
/* CMN_REG(001e) */
#define LCPLL_PI_EN_MASK BIT(5)
#define LCPLL_100M_CLK_EN_MASK BIT(0)
/* CMN_REG(0025) */
-#define LCPLL_PMS_IQDIV_RSTN BIT(4)
+#define LCPLL_PMS_IQDIV_RSTN_MASK BIT(4)
/* CMN_REG(0028) */
-#define LCPLL_SDC_FRAC_EN BIT(2)
-#define LCPLL_SDC_FRAC_RSTN BIT(0)
+#define LCPLL_SDC_FRAC_EN_MASK BIT(2)
+#define LCPLL_SDC_FRAC_RSTN_MASK BIT(0)
/* CMN_REG(002d) */
#define LCPLL_SDC_N_MASK GENMASK(3, 1)
/* CMN_REG(002e) */
#define LCPLL_SDC_NUMBERATOR_MASK GENMASK(5, 0)
/* CMN_REG(002f) */
#define LCPLL_SDC_DENOMINATOR_MASK GENMASK(7, 2)
-#define LCPLL_SDC_NDIV_RSTN BIT(0)
+#define LCPLL_SDC_NDIV_RSTN_MASK BIT(0)
+/* CMN_REG(003c) */
+#define ANA_LCPLL_RESERVED7_MASK BIT(7)
/* CMN_REG(003d) */
-#define ROPLL_LCVCO_EN BIT(4)
+#define OVRD_ROPLL_EN_MASK BIT(7)
+#define ROPLL_EN_MASK BIT(6)
+#define ROPLL_LCVCO_EN_MASK BIT(4)
+/* CMN_REG(0046) */
+#define ROPLL_ANA_CPP_CTRL_COARSE_MASK GENMASK(7, 4)
+#define ROPLL_ANA_CPP_CTRL_FINE_MASK GENMASK(3, 0)
+/* CMN_REG(0047) */
+#define ROPLL_ANA_LPF_C_SEL_COARSE_MASK GENMASK(5, 3)
+#define ROPLL_ANA_LPF_C_SEL_FINE_MASK GENMASK(2, 0)
/* CMN_REG(004e) */
-#define ROPLL_PI_EN BIT(5)
+#define ROPLL_PI_EN_MASK BIT(5)
+/* CMN_REG(0051) */
+#define ROPLL_PMS_MDIV_MASK GENMASK(7, 0)
+/* CMN_REG(0055) */
+#define ROPLL_PMS_MDIV_AFC_MASK GENMASK(7, 0)
+/* CMN_REG(0059) */
+#define ANA_ROPLL_PMS_PDIV_MASK GENMASK(7, 4)
+#define ANA_ROPLL_PMS_REFDIV_MASK GENMASK(3, 0)
+/* CMN_REG(005a) */
+#define ROPLL_PMS_SDIV_RBR_MASK GENMASK(7, 4)
+#define ROPLL_PMS_SDIV_HBR_MASK GENMASK(3, 0)
+/* CMN_REG(005b) */
+#define ROPLL_PMS_SDIV_HBR2_MASK GENMASK(7, 4)
/* CMN_REG(005c) */
-#define ROPLL_PMS_IQDIV_RSTN BIT(5)
+#define ROPLL_PMS_IQDIV_RSTN_MASK BIT(5)
/* CMN_REG(005e) */
#define ROPLL_SDM_EN_MASK BIT(6)
-#define ROPLL_SDM_FRAC_EN_RBR BIT(3)
-#define ROPLL_SDM_FRAC_EN_HBR BIT(2)
-#define ROPLL_SDM_FRAC_EN_HBR2 BIT(1)
-#define ROPLL_SDM_FRAC_EN_HBR3 BIT(0)
+#define OVRD_ROPLL_SDM_RSTN_MASK BIT(5)
+#define ROPLL_SDM_RSTN_MASK BIT(4)
+#define ROPLL_SDC_FRAC_EN_RBR_MASK BIT(3)
+#define ROPLL_SDC_FRAC_EN_HBR_MASK BIT(2)
+#define ROPLL_SDC_FRAC_EN_HBR2_MASK BIT(1)
+#define ROPLL_SDM_FRAC_EN_HBR3_MASK BIT(0)
+/* CMN_REG(005f) */
+#define OVRD_ROPLL_SDC_RSTN_MASK BIT(5)
+#define ROPLL_SDC_RSTN_MASK BIT(4)
+/* CMN_REG(0060) */
+#define ROPLL_SDM_DENOMINATOR_MASK GENMASK(7, 0)
/* CMN_REG(0064) */
#define ROPLL_SDM_NUM_SIGN_RBR_MASK BIT(3)
+#define ROPLL_SDM_NUM_SIGN_HBR_MASK BIT(2)
+#define ROPLL_SDM_NUM_SIGN_HBR2_MASK BIT(1)
+/* CMN_REG(0065) */
+#define ROPLL_SDM_NUM_MASK GENMASK(7, 0)
/* CMN_REG(0069) */
#define ROPLL_SDC_N_RBR_MASK GENMASK(2, 0)
+/* CMN_REG(006a) */
+#define ROPLL_SDC_N_HBR_MASK GENMASK(5, 3)
+#define ROPLL_SDC_N_HBR2_MASK GENMASK(2, 0)
+/* CMN_REG(006b) */
+#define ROPLL_SDC_N_HBR3_MASK GENMASK(3, 1)
+/* CMN_REG(006c) */
+#define ROPLL_SDC_NUM_MASK GENMASK(5, 0)
+/* cmn_reg0070 */
+#define ROPLL_SDC_DENO_MASK GENMASK(5, 0)
/* CMN_REG(0074) */
-#define ROPLL_SDC_NDIV_RSTN BIT(2)
-#define ROPLL_SSC_EN BIT(0)
+#define OVRD_ROPLL_SDC_NDIV_RSTN_MASK BIT(3)
+#define ROPLL_SDC_NDIV_RSTN_MASK BIT(2)
+#define OVRD_ROPLL_SSC_EN_MASK BIT(1)
+#define ROPLL_SSC_EN_MASK BIT(0)
+/* CMN_REG(0075) */
+#define ANA_ROPLL_SSC_FM_DEVIATION_MASK GENMASK(5, 0)
+/* CMN_REG(0076) */
+#define ANA_ROPLL_SSC_FM_FREQ_MASK GENMASK(6, 2)
+/* CMN_REG(0077) */
+#define ANA_ROPLL_SSC_CLK_DIV_SEL_MASK GENMASK(6, 3)
/* CMN_REG(0081) */
-#define OVRD_PLL_CD_CLK_EN BIT(8)
-#define PLL_CD_HSCLK_EAST_EN BIT(0)
+#define OVRD_PLL_CD_CLK_EN_MASK BIT(8)
+#define ANA_PLL_CD_TX_SER_RATE_SEL_MASK BIT(3)
+#define ANA_PLL_CD_HSCLK_WEST_EN_MASK BIT(1)
+#define ANA_PLL_CD_HSCLK_EAST_EN_MASK BIT(0)
+/* CMN_REG(0082) */
+#define ANA_PLL_CD_VREG_GAIN_CTRL_MASK GENMASK(3, 0)
+/* CMN_REG(0083) */
+#define ANA_PLL_CD_VREG_ICTRL_MASK GENMASK(6, 5)
+/* CMN_REG(0084) */
+#define PLL_LCRO_CLK_SEL_MASK BIT(5)
+/* CMN_REG(0085) */
+#define ANA_PLL_SYNC_LOSS_DET_MODE_MASK GENMASK(1, 0)
/* CMN_REG(0086) */
#define PLL_PCG_POSTDIV_SEL_MASK GENMASK(7, 4)
#define PLL_PCG_CLK_SEL_MASK GENMASK(3, 1)
-#define PLL_PCG_CLK_EN BIT(0)
+#define PLL_PCG_CLK_EN_MASK BIT(0)
/* CMN_REG(0087) */
-#define PLL_FRL_MODE_EN BIT(3)
-#define PLL_TX_HS_CLK_EN BIT(2)
+#define ANA_PLL_FRL_MODE_EN_MASK BIT(3)
+#define ANA_PLL_TX_HS_CLK_EN_MASK BIT(2)
/* CMN_REG(0089) */
-#define LCPLL_ALONE_MODE BIT(1)
+#define LCPLL_ALONE_MODE_MASK BIT(1)
+/* CMN_REG(0095) */
+#define DP_TX_LINK_BW_MASK GENMASK(1, 0)
/* CMN_REG(0097) */
-#define DIG_CLK_SEL BIT(1)
-#define ROPLL_REF BIT(1)
-#define LCPLL_REF 0
+#define DIG_CLK_SEL_MASK BIT(1)
+#define LCPLL_REF BIT(1)
+#define ROPLL_REF 0
/* CMN_REG(0099) */
-#define CMN_ROPLL_ALONE_MODE BIT(2)
+#define SSC_EN_MASK GENMASK(7, 6)
+#define CMN_ROPLL_ALONE_MODE_MASK BIT(2)
#define ROPLL_ALONE_MODE BIT(2)
/* CMN_REG(009a) */
-#define HS_SPEED_SEL BIT(0)
+#define HS_SPEED_SEL_MASK BIT(0)
#define DIV_10_CLOCK BIT(0)
/* CMN_REG(009b) */
-#define IS_SPEED_SEL BIT(4)
+#define LS_SPEED_SEL_MASK BIT(4)
#define LINK_SYMBOL_CLOCK BIT(4)
#define LINK_SYMBOL_CLOCK1_2 0
@@ -118,6 +183,8 @@
/* SB_REG(0104) */
#define OVRD_SB_EN_MASK BIT(5)
#define SB_EN_MASK BIT(4)
+#define OVRD_SB_AUX_EN_MASK BIT(1)
+#define SB_AUX_EN_MASK BIT(0)
/* SB_REG(0105) */
#define OVRD_SB_EARC_CMDC_EN_MASK BIT(6)
#define SB_EARC_CMDC_EN_MASK BIT(5)
@@ -126,6 +193,8 @@
#define ANA_SB_TX_LLVL_PROG_MASK GENMASK(6, 4)
/* SB_REG(0109) */
#define ANA_SB_DMRX_AFC_DIV_RATIO_MASK GENMASK(2, 0)
+/* SB_REG(010d) */
+#define ANA_SB_DMRX_LPBK_DATA_MASK BIT(4)
/* SB_REG(010f) */
#define OVRD_SB_VREG_EN_MASK BIT(7)
#define SB_VREG_EN_MASK BIT(6)
@@ -133,6 +202,7 @@
#define SB_VREG_LPF_BYPASS_MASK BIT(4)
#define ANA_SB_VREG_GAIN_CTRL_MASK GENMASK(3, 0)
/* SB_REG(0110) */
+#define ANA_SB_VREG_OUT_SEL_MASK BIT(1)
#define ANA_SB_VREG_REF_SEL_MASK BIT(0)
/* SB_REG(0113) */
#define SB_RX_RCAL_OPT_CODE_MASK GENMASK(5, 4)
@@ -147,13 +217,24 @@
#define AFC_RSTN_DELAY_TIME_MASK GENMASK(6, 4)
/* SB_REG(0117) */
#define FAST_PULSE_TIME_MASK GENMASK(3, 0)
+/* SB_REG(0118) */
+#define SB_TG_EARC_DMRX_RECVRD_CLK_CNT_MASK GENMASK(7, 0)
+/* SB_REG(011a) */
+#define SB_TG_CNT_RUN_NO_7_0_MASK GENMASK(7, 0)
/* SB_REG(011b) */
#define SB_EARC_SIG_DET_BYPASS_MASK BIT(4)
#define SB_AFC_TOL_MASK GENMASK(3, 0)
+/* SB_REG(011c) */
+#define SB_AFC_STB_NUM_MASK GENMASK(3, 0)
+/* SB_REG(011d) */
+#define SB_TG_OSC_CNT_MIN_MASK GENMASK(7, 0)
+/* SB_REG(011e) */
+#define SB_TG_OSC_CNT_MAX_MASK GENMASK(7, 0)
/* SB_REG(011f) */
#define SB_PWM_AFC_CTRL_MASK GENMASK(7, 2)
#define SB_RCAL_RSTN_MASK BIT(1)
/* SB_REG(0120) */
+#define SB_AUX_EN_IN_MASK BIT(7)
#define SB_EARC_EN_MASK BIT(1)
#define SB_EARC_AFC_EN_MASK BIT(2)
/* SB_REG(0123) */
@@ -161,39 +242,92 @@
#define SB_READY_MASK BIT(4)
/* LNTOP_REG(0200) */
-#define PROTOCOL_SEL BIT(2)
+#define PROTOCOL_SEL_MASK BIT(2)
#define HDMI_MODE BIT(2)
#define HDMI_TMDS_FRL_SEL BIT(1)
/* LNTOP_REG(0206) */
-#define DATA_BUS_SEL BIT(0)
+#define DATA_BUS_WIDTH_MASK GENMASK(2, 1)
+#define DATA_BUS_WIDTH_SEL_MASK BIT(0)
#define DATA_BUS_36_40 BIT(0)
/* LNTOP_REG(0207) */
-#define LANE_EN 0xf
+#define LANE_EN_MASK 0xf
#define ALL_LANE_EN 0xf
+/* LANE_REG(0301) */
+#define OVRD_LN_TX_DRV_EI_EN_MASK BIT(7)
+#define LN_TX_DRV_EI_EN_MASK BIT(6)
+/* LANE_REG(0303) */
+#define OVRD_LN_TX_DRV_LVL_CTRL_MASK BIT(5)
+#define LN_TX_DRV_LVL_CTRL_MASK GENMASK(4, 0)
+/* LANE_REG(0304) */
+#define OVRD_LN_TX_DRV_POST_LVL_CTRL_MASK BIT(4)
+#define LN_TX_DRV_POST_LVL_CTRL_MASK GENMASK(3, 0)
+/* LANE_REG(0305) */
+#define OVRD_LN_TX_DRV_PRE_LVL_CTRL_MASK BIT(6)
+#define LN_TX_DRV_PRE_LVL_CTRL_MASK GENMASK(5, 2)
+/* LANE_REG(0306) */
+#define LN_ANA_TX_DRV_IDRV_IDN_CTRL_MASK GENMASK(7, 5)
+#define LN_ANA_TX_DRV_IDRV_IUP_CTRL_MASK GENMASK(4, 2)
+#define LN_ANA_TX_DRV_ACCDRV_EN_MASK BIT(0)
+/* LANE_REG(0307) */
+#define LN_ANA_TX_DRV_ACCDRV_POL_SEL_MASK BIT(6)
+#define LN_ANA_TX_DRV_ACCDRV_CTRL_MASK GENMASK(5, 3)
+/* LANE_REG(030a) */
+#define LN_ANA_TX_JEQ_EN_MASK BIT(4)
+#define LN_TX_JEQ_EVEN_CTRL_RBR_MASK GENMASK(3, 0)
+/* LANE_REG(030b) */
+#define LN_TX_JEQ_EVEN_CTRL_HBR_MASK GENMASK(7, 4)
+#define LN_TX_JEQ_EVEN_CTRL_HBR2_MASK GENMASK(3, 0)
+/* LANE_REG(030c) */
+#define LN_TX_JEQ_ODD_CTRL_RBR_MASK GENMASK(3, 0)
+/* LANE_REG(030d) */
+#define LN_TX_JEQ_ODD_CTRL_HBR_MASK GENMASK(7, 4)
+#define LN_TX_JEQ_ODD_CTRL_HBR2_MASK GENMASK(3, 0)
+/* LANE_REG(0310) */
+#define LN_ANA_TX_SYNC_LOSS_DET_MODE_MASK GENMASK(1, 0)
+/* LANE_REG(0311) */
+#define LN_TX_SER_40BIT_EN_RBR_MASK BIT(3)
+#define LN_TX_SER_40BIT_EN_HBR_MASK BIT(2)
+#define LN_TX_SER_40BIT_EN_HBR2_MASK BIT(1)
/* LANE_REG(0312) */
-#define LN0_TX_SER_RATE_SEL_RBR BIT(5)
-#define LN0_TX_SER_RATE_SEL_HBR BIT(4)
-#define LN0_TX_SER_RATE_SEL_HBR2 BIT(3)
-#define LN0_TX_SER_RATE_SEL_HBR3 BIT(2)
+#define LN0_TX_SER_RATE_SEL_RBR_MASK BIT(5)
+#define LN0_TX_SER_RATE_SEL_HBR_MASK BIT(4)
+#define LN0_TX_SER_RATE_SEL_HBR2_MASK BIT(3)
+#define LN0_TX_SER_RATE_SEL_HBR3_MASK BIT(2)
+/* LANE_REG(0316) */
+#define LN_ANA_TX_SER_VREG_GAIN_CTRL_MASK GENMASK(3, 0)
+/* LANE_REG(031B) */
+#define LN_ANA_TX_RESERVED_MASK GENMASK(7, 0)
+/* LANE_REG(031e) */
+#define LN_POLARITY_INV_MASK BIT(2)
+#define LN_LANE_MODE_MASK BIT(1)
+
/* LANE_REG(0412) */
-#define LN1_TX_SER_RATE_SEL_RBR BIT(5)
-#define LN1_TX_SER_RATE_SEL_HBR BIT(4)
-#define LN1_TX_SER_RATE_SEL_HBR2 BIT(3)
-#define LN1_TX_SER_RATE_SEL_HBR3 BIT(2)
+#define LN1_TX_SER_RATE_SEL_RBR_MASK BIT(5)
+#define LN1_TX_SER_RATE_SEL_HBR_MASK BIT(4)
+#define LN1_TX_SER_RATE_SEL_HBR2_MASK BIT(3)
+#define LN1_TX_SER_RATE_SEL_HBR3_MASK BIT(2)
+
/* LANE_REG(0512) */
-#define LN2_TX_SER_RATE_SEL_RBR BIT(5)
-#define LN2_TX_SER_RATE_SEL_HBR BIT(4)
-#define LN2_TX_SER_RATE_SEL_HBR2 BIT(3)
-#define LN2_TX_SER_RATE_SEL_HBR3 BIT(2)
+#define LN2_TX_SER_RATE_SEL_RBR_MASK BIT(5)
+#define LN2_TX_SER_RATE_SEL_HBR_MASK BIT(4)
+#define LN2_TX_SER_RATE_SEL_HBR2_MASK BIT(3)
+#define LN2_TX_SER_RATE_SEL_HBR3_MASK BIT(2)
+
/* LANE_REG(0612) */
-#define LN3_TX_SER_RATE_SEL_RBR BIT(5)
-#define LN3_TX_SER_RATE_SEL_HBR BIT(4)
-#define LN3_TX_SER_RATE_SEL_HBR2 BIT(3)
-#define LN3_TX_SER_RATE_SEL_HBR3 BIT(2)
+#define LN3_TX_SER_RATE_SEL_RBR_MASK BIT(5)
+#define LN3_TX_SER_RATE_SEL_HBR_MASK BIT(4)
+#define LN3_TX_SER_RATE_SEL_HBR2_MASK BIT(3)
+#define LN3_TX_SER_RATE_SEL_HBR3_MASK BIT(2)
#define HDMI20_MAX_RATE 600000000
+enum dp_link_rate {
+ DP_BW_RBR,
+ DP_BW_HBR,
+ DP_BW_HBR2,
+};
+
struct lcpll_config {
u32 bit_rate;
u8 lcvco_mode_en;
@@ -255,6 +389,19 @@ struct ropll_config {
u8 cd_tx_ser_rate_sel;
};
+struct tx_drv_ctrl {
+ u8 tx_drv_lvl_ctrl;
+ u8 tx_drv_post_lvl_ctrl;
+ u8 ana_tx_drv_idrv_idn_ctrl;
+ u8 ana_tx_drv_idrv_iup_ctrl;
+ u8 ana_tx_drv_accdrv_en;
+ u8 ana_tx_drv_accdrv_ctrl;
+ u8 tx_drv_pre_lvl_ctrl;
+ u8 ana_tx_jeq_en;
+ u8 tx_jeq_even_ctrl;
+ u8 tx_jeq_odd_ctrl;
+};
+
enum rk_hdptx_reset {
RST_APB = 0,
RST_INIT,
@@ -263,11 +410,22 @@ enum rk_hdptx_reset {
RST_MAX
};
+#define MAX_HDPTX_PHY_NUM 2
+
+struct rk_hdptx_phy_cfg {
+ unsigned int num_phys;
+ unsigned int phy_ids[MAX_HDPTX_PHY_NUM];
+};
+
struct rk_hdptx_phy {
struct device *dev;
struct regmap *regmap;
struct regmap *grf;
+ /* PHY const config */
+ const struct rk_hdptx_phy_cfg *cfgs;
+ int phy_id;
+
struct phy *phy;
struct phy_config *phy_cfg;
struct clk_bulk_data *clks;
@@ -279,6 +437,10 @@ struct rk_hdptx_phy {
unsigned long rate;
atomic_t usage_count;
+
+ /* used for dp mode */
+ unsigned int link_rate;
+ unsigned int lanes;
};
static const struct ropll_config ropll_tmds_cfg[] = {
@@ -557,16 +719,100 @@ static const struct reg_sequence rk_hdtpx_tmds_lane_init_seq[] = {
REG_SEQ0(LANE_REG(0606), 0x1c),
};
+static struct tx_drv_ctrl tx_drv_ctrl_rbr[4][4] = {
+ /* voltage swing 0, pre-emphasis 0->3 */
+ {
+ { 0x2, 0x0, 0x4, 0x6, 0x0, 0x4, 0x1, 0x1, 0x7, 0x7 },
+ { 0x4, 0x3, 0x4, 0x6, 0x0, 0x4, 0x0, 0x1, 0x7, 0x7 },
+ { 0x7, 0x6, 0x4, 0x6, 0x0, 0x4, 0x0, 0x1, 0x7, 0x7 },
+ { 0xd, 0xc, 0x7, 0x7, 0x1, 0x7, 0x0, 0x1, 0x7, 0x7 },
+ },
+
+ /* voltage swing 1, pre-emphasis 0->2 */
+ {
+ { 0x4, 0x0, 0x4, 0x6, 0x0, 0x4, 0x1, 0x1, 0x7, 0x7 },
+ { 0x9, 0x5, 0x4, 0x6, 0x0, 0x4, 0x0, 0x1, 0x7, 0x7 },
+ { 0xc, 0x8, 0x7, 0x7, 0x1, 0x7, 0x0, 0x1, 0x7, 0x7 },
+ },
+
+ /* voltage swing 2, pre-emphasis 0->1 */
+ {
+ { 0x8, 0x0, 0x4, 0x6, 0x0, 0x4, 0x1, 0x1, 0x7, 0x7 },
+ { 0xc, 0x5, 0x7, 0x7, 0x1, 0x7, 0x0, 0x1, 0x7, 0x7 },
+ },
+
+ /* voltage swing 3, pre-emphasis 0 */
+ {
+ { 0xb, 0x0, 0x7, 0x7, 0x1, 0x4, 0x1, 0x1, 0x7, 0x7 },
+ }
+};
+
+static struct tx_drv_ctrl tx_drv_ctrl_hbr[4][4] = {
+ /* voltage swing 0, pre-emphasis 0->3 */
+ {
+ { 0x2, 0x0, 0x4, 0x6, 0x0, 0x4, 0x1, 0x1, 0x7, 0x7 },
+ { 0x5, 0x4, 0x4, 0x6, 0x0, 0x4, 0x0, 0x1, 0x7, 0x7 },
+ { 0x9, 0x8, 0x4, 0x6, 0x0, 0x4, 0x0, 0x1, 0x7, 0x7 },
+ { 0xd, 0xc, 0x7, 0x7, 0x1, 0x7, 0x0, 0x1, 0x7, 0x7 },
+ },
+
+ /* voltage swing 1, pre-emphasis 0->2 */
+ {
+ { 0x6, 0x1, 0x4, 0x6, 0x0, 0x4, 0x1, 0x1, 0x7, 0x7 },
+ { 0xa, 0x6, 0x4, 0x6, 0x0, 0x4, 0x0, 0x1, 0x7, 0x7 },
+ { 0xc, 0x8, 0x7, 0x7, 0x1, 0x7, 0x0, 0x1, 0x7, 0x7 },
+ },
+
+ /* voltage swing 2, pre-emphasis 0->1 */
+ {
+ { 0x9, 0x1, 0x4, 0x6, 0x0, 0x4, 0x1, 0x1, 0x7, 0x7 },
+ { 0xd, 0x6, 0x7, 0x7, 0x1, 0x7, 0x0, 0x1, 0x7, 0x7 },
+ },
+
+ /* voltage swing 3, pre-emphasis 0 */
+ {
+ { 0xc, 0x1, 0x7, 0x7, 0x1, 0x4, 0x1, 0x1, 0x7, 0x7 },
+ }
+};
+
+static struct tx_drv_ctrl tx_drv_ctrl_hbr2[4][4] = {
+ /* voltage swing 0, pre-emphasis 0->3 */
+ {
+ { 0x2, 0x1, 0x4, 0x6, 0x0, 0x4, 0x0, 0x1, 0x7, 0x7 },
+ { 0x5, 0x4, 0x4, 0x6, 0x0, 0x4, 0x0, 0x1, 0x7, 0x7 },
+ { 0x9, 0x8, 0x4, 0x6, 0x1, 0x4, 0x0, 0x1, 0x7, 0x7 },
+ { 0xd, 0xc, 0x7, 0x7, 0x1, 0x7, 0x0, 0x1, 0x7, 0x7 },
+ },
+
+ /* voltage swing 1, pre-emphasis 0->2 */
+ {
+ { 0x6, 0x1, 0x4, 0x6, 0x0, 0x4, 0x1, 0x1, 0x7, 0x7 },
+ { 0xb, 0x7, 0x4, 0x6, 0x0, 0x4, 0x0, 0x1, 0x7, 0x7 },
+ { 0xd, 0x9, 0x7, 0x7, 0x1, 0x7, 0x0, 0x1, 0x7, 0x7 },
+ },
+
+ /* voltage swing 2, pre-emphasis 0->1 */
+ {
+ { 0x8, 0x1, 0x4, 0x6, 0x0, 0x4, 0x1, 0x1, 0x7, 0x7 },
+ { 0xc, 0x6, 0x7, 0x7, 0x1, 0x7, 0x0, 0x1, 0x7, 0x7 },
+ },
+
+ /* voltage swing 3, pre-emphasis 0 */
+ {
+ { 0xb, 0x0, 0x7, 0x7, 0x1, 0x4, 0x1, 0x1, 0x7, 0x7 },
+ }
+};
+
static bool rk_hdptx_phy_is_rw_reg(struct device *dev, unsigned int reg)
{
switch (reg) {
- case 0x0000 ... 0x029c:
- case 0x0400 ... 0x04a4:
- case 0x0800 ... 0x08a4:
- case 0x0c00 ... 0x0cb4:
- case 0x1000 ... 0x10b4:
- case 0x1400 ... 0x14b4:
- case 0x1800 ... 0x18b4:
+ case 0x0000 ... 0x029c: /* CMN Register */
+ case 0x0400 ... 0x04a4: /* Sideband Register */
+ case 0x0800 ... 0x08a4: /* Lane Top Register */
+ case 0x0c00 ... 0x0cb4: /* Lane 0 Register */
+ case 0x1000 ... 0x10b4: /* Lane 1 Register */
+ case 0x1400 ... 0x14b4: /* Lane 2 Register */
+ case 0x1800 ... 0x18b4: /* Lane 3 Register */
return true;
}
@@ -813,8 +1059,8 @@ static int rk_hdptx_ropll_tmds_cmn_config(struct rk_hdptx_phy *hdptx,
regmap_update_bits(hdptx->regmap, CMN_REG(0086), PLL_PCG_POSTDIV_SEL_MASK,
FIELD_PREP(PLL_PCG_POSTDIV_SEL_MASK, cfg->pms_sdiv));
- regmap_update_bits(hdptx->regmap, CMN_REG(0086), PLL_PCG_CLK_EN,
- PLL_PCG_CLK_EN);
+ regmap_update_bits(hdptx->regmap, CMN_REG(0086), PLL_PCG_CLK_EN_MASK,
+ FIELD_PREP(PLL_PCG_CLK_EN_MASK, 0x1));
return rk_hdptx_post_enable_pll(hdptx);
}
@@ -843,9 +1089,45 @@ static int rk_hdptx_ropll_tmds_mode_config(struct rk_hdptx_phy *hdptx,
return rk_hdptx_post_enable_lane(hdptx);
}
+static void rk_hdptx_dp_reset(struct rk_hdptx_phy *hdptx)
+{
+ reset_control_assert(hdptx->rsts[RST_LANE].rstc);
+ reset_control_assert(hdptx->rsts[RST_CMN].rstc);
+ reset_control_assert(hdptx->rsts[RST_INIT].rstc);
+
+ reset_control_assert(hdptx->rsts[RST_APB].rstc);
+ udelay(10);
+ reset_control_deassert(hdptx->rsts[RST_APB].rstc);
+
+ regmap_update_bits(hdptx->regmap, LANE_REG(0301),
+ OVRD_LN_TX_DRV_EI_EN_MASK | LN_TX_DRV_EI_EN_MASK,
+ FIELD_PREP(OVRD_LN_TX_DRV_EI_EN_MASK, 1) |
+ FIELD_PREP(LN_TX_DRV_EI_EN_MASK, 0));
+ regmap_update_bits(hdptx->regmap, LANE_REG(0401),
+ OVRD_LN_TX_DRV_EI_EN_MASK | LN_TX_DRV_EI_EN_MASK,
+ FIELD_PREP(OVRD_LN_TX_DRV_EI_EN_MASK, 1) |
+ FIELD_PREP(LN_TX_DRV_EI_EN_MASK, 0));
+ regmap_update_bits(hdptx->regmap, LANE_REG(0501),
+ OVRD_LN_TX_DRV_EI_EN_MASK | LN_TX_DRV_EI_EN_MASK,
+ FIELD_PREP(OVRD_LN_TX_DRV_EI_EN_MASK, 1) |
+ FIELD_PREP(LN_TX_DRV_EI_EN_MASK, 0));
+ regmap_update_bits(hdptx->regmap, LANE_REG(0601),
+ OVRD_LN_TX_DRV_EI_EN_MASK | LN_TX_DRV_EI_EN_MASK,
+ FIELD_PREP(OVRD_LN_TX_DRV_EI_EN_MASK, 1) |
+ FIELD_PREP(LN_TX_DRV_EI_EN_MASK, 0));
+
+ regmap_write(hdptx->grf, GRF_HDPTX_CON0,
+ HDPTX_I_PLL_EN << 16 | FIELD_PREP(HDPTX_I_PLL_EN, 0x0));
+ regmap_write(hdptx->grf, GRF_HDPTX_CON0,
+ HDPTX_I_BIAS_EN << 16 | FIELD_PREP(HDPTX_I_BIAS_EN, 0x0));
+ regmap_write(hdptx->grf, GRF_HDPTX_CON0,
+ HDPTX_I_BGR_EN << 16 | FIELD_PREP(HDPTX_I_BGR_EN, 0x0));
+}
+
static int rk_hdptx_phy_consumer_get(struct rk_hdptx_phy *hdptx,
unsigned int rate)
{
+ enum phy_mode mode = phy_get_mode(hdptx->phy);
u32 status;
int ret;
@@ -859,10 +1141,14 @@ static int rk_hdptx_phy_consumer_get(struct rk_hdptx_phy *hdptx,
if (status & HDPTX_O_PLL_LOCK_DONE)
dev_warn(hdptx->dev, "PLL locked by unknown consumer!\n");
- if (rate) {
- ret = rk_hdptx_ropll_tmds_cmn_config(hdptx, rate);
- if (ret)
- goto dec_usage;
+ if (mode == PHY_MODE_DP) {
+ rk_hdptx_dp_reset(hdptx);
+ } else {
+ if (rate) {
+ ret = rk_hdptx_ropll_tmds_cmn_config(hdptx, rate);
+ if (ret)
+ goto dec_usage;
+ }
}
return 0;
@@ -874,6 +1160,7 @@ dec_usage:
static int rk_hdptx_phy_consumer_put(struct rk_hdptx_phy *hdptx, bool force)
{
+ enum phy_mode mode = phy_get_mode(hdptx->phy);
u32 status;
int ret;
@@ -887,8 +1174,12 @@ static int rk_hdptx_phy_consumer_put(struct rk_hdptx_phy *hdptx, bool force)
} else {
ret = regmap_read(hdptx->grf, GRF_HDPTX_STATUS, &status);
if (!ret) {
- if (status & HDPTX_O_PLL_LOCK_DONE)
- rk_hdptx_phy_disable(hdptx);
+ if (status & HDPTX_O_PLL_LOCK_DONE) {
+ if (mode == PHY_MODE_DP)
+ rk_hdptx_dp_reset(hdptx);
+ else
+ rk_hdptx_phy_disable(hdptx);
+ }
return 0;
} else if (force) {
return 0;
@@ -899,11 +1190,262 @@ static int rk_hdptx_phy_consumer_put(struct rk_hdptx_phy *hdptx, bool force)
return ret;
}
+static void rk_hdptx_dp_pll_init(struct rk_hdptx_phy *hdptx)
+{
+ regmap_update_bits(hdptx->regmap, CMN_REG(003c), ANA_LCPLL_RESERVED7_MASK,
+ FIELD_PREP(ANA_LCPLL_RESERVED7_MASK, 0x1));
+
+ regmap_update_bits(hdptx->regmap, CMN_REG(0046),
+ ROPLL_ANA_CPP_CTRL_COARSE_MASK | ROPLL_ANA_CPP_CTRL_FINE_MASK,
+ FIELD_PREP(ROPLL_ANA_CPP_CTRL_COARSE_MASK, 0xe) |
+ FIELD_PREP(ROPLL_ANA_CPP_CTRL_FINE_MASK, 0xe));
+ regmap_update_bits(hdptx->regmap, CMN_REG(0047),
+ ROPLL_ANA_LPF_C_SEL_COARSE_MASK |
+ ROPLL_ANA_LPF_C_SEL_FINE_MASK,
+ FIELD_PREP(ROPLL_ANA_LPF_C_SEL_COARSE_MASK, 0x4) |
+ FIELD_PREP(ROPLL_ANA_LPF_C_SEL_FINE_MASK, 0x4));
+
+ regmap_write(hdptx->regmap, CMN_REG(0051), FIELD_PREP(ROPLL_PMS_MDIV_MASK, 0x87));
+ regmap_write(hdptx->regmap, CMN_REG(0052), FIELD_PREP(ROPLL_PMS_MDIV_MASK, 0x71));
+ regmap_write(hdptx->regmap, CMN_REG(0053), FIELD_PREP(ROPLL_PMS_MDIV_MASK, 0x71));
+
+ regmap_write(hdptx->regmap, CMN_REG(0055),
+ FIELD_PREP(ROPLL_PMS_MDIV_AFC_MASK, 0x87));
+ regmap_write(hdptx->regmap, CMN_REG(0056),
+ FIELD_PREP(ROPLL_PMS_MDIV_AFC_MASK, 0x71));
+ regmap_write(hdptx->regmap, CMN_REG(0057),
+ FIELD_PREP(ROPLL_PMS_MDIV_AFC_MASK, 0x71));
+
+ regmap_write(hdptx->regmap, CMN_REG(0059),
+ FIELD_PREP(ANA_ROPLL_PMS_PDIV_MASK, 0x1) |
+ FIELD_PREP(ANA_ROPLL_PMS_REFDIV_MASK, 0x1));
+ regmap_write(hdptx->regmap, CMN_REG(005a),
+ FIELD_PREP(ROPLL_PMS_SDIV_RBR_MASK, 0x3) |
+ FIELD_PREP(ROPLL_PMS_SDIV_HBR_MASK, 0x1));
+ regmap_update_bits(hdptx->regmap, CMN_REG(005b), ROPLL_PMS_SDIV_HBR2_MASK,
+ FIELD_PREP(ROPLL_PMS_SDIV_HBR2_MASK, 0x0));
+
+ regmap_update_bits(hdptx->regmap, CMN_REG(005e), ROPLL_SDM_EN_MASK,
+ FIELD_PREP(ROPLL_SDM_EN_MASK, 0x1));
+ regmap_update_bits(hdptx->regmap, CMN_REG(005e),
+ OVRD_ROPLL_SDM_RSTN_MASK | ROPLL_SDM_RSTN_MASK,
+ FIELD_PREP(OVRD_ROPLL_SDM_RSTN_MASK, 0x1) |
+ FIELD_PREP(ROPLL_SDM_RSTN_MASK, 0x1));
+ regmap_update_bits(hdptx->regmap, CMN_REG(005e), ROPLL_SDC_FRAC_EN_RBR_MASK,
+ FIELD_PREP(ROPLL_SDC_FRAC_EN_RBR_MASK, 0x1));
+ regmap_update_bits(hdptx->regmap, CMN_REG(005e), ROPLL_SDC_FRAC_EN_HBR_MASK,
+ FIELD_PREP(ROPLL_SDC_FRAC_EN_HBR_MASK, 0x1));
+ regmap_update_bits(hdptx->regmap, CMN_REG(005e), ROPLL_SDC_FRAC_EN_HBR2_MASK,
+ FIELD_PREP(ROPLL_SDC_FRAC_EN_HBR2_MASK, 0x1));
+
+ regmap_update_bits(hdptx->regmap, CMN_REG(005f),
+ OVRD_ROPLL_SDC_RSTN_MASK | ROPLL_SDC_RSTN_MASK,
+ FIELD_PREP(OVRD_ROPLL_SDC_RSTN_MASK, 0x1) |
+ FIELD_PREP(ROPLL_SDC_RSTN_MASK, 0x1));
+ regmap_write(hdptx->regmap, CMN_REG(0060),
+ FIELD_PREP(ROPLL_SDM_DENOMINATOR_MASK, 0x21));
+ regmap_write(hdptx->regmap, CMN_REG(0061),
+ FIELD_PREP(ROPLL_SDM_DENOMINATOR_MASK, 0x27));
+ regmap_write(hdptx->regmap, CMN_REG(0062),
+ FIELD_PREP(ROPLL_SDM_DENOMINATOR_MASK, 0x27));
+
+ regmap_update_bits(hdptx->regmap, CMN_REG(0064),
+ ROPLL_SDM_NUM_SIGN_RBR_MASK |
+ ROPLL_SDM_NUM_SIGN_HBR_MASK |
+ ROPLL_SDM_NUM_SIGN_HBR2_MASK,
+ FIELD_PREP(ROPLL_SDM_NUM_SIGN_RBR_MASK, 0x0) |
+ FIELD_PREP(ROPLL_SDM_NUM_SIGN_HBR_MASK, 0x1) |
+ FIELD_PREP(ROPLL_SDM_NUM_SIGN_HBR2_MASK, 0x1));
+ regmap_write(hdptx->regmap, CMN_REG(0065),
+ FIELD_PREP(ROPLL_SDM_NUM_MASK, 0x0));
+ regmap_write(hdptx->regmap, CMN_REG(0066),
+ FIELD_PREP(ROPLL_SDM_NUM_MASK, 0xd));
+ regmap_write(hdptx->regmap, CMN_REG(0067),
+ FIELD_PREP(ROPLL_SDM_NUM_MASK, 0xd));
+
+ regmap_update_bits(hdptx->regmap, CMN_REG(0069), ROPLL_SDC_N_RBR_MASK,
+ FIELD_PREP(ROPLL_SDC_N_RBR_MASK, 0x2));
+
+ regmap_update_bits(hdptx->regmap, CMN_REG(006a),
+ ROPLL_SDC_N_HBR_MASK | ROPLL_SDC_N_HBR2_MASK,
+ FIELD_PREP(ROPLL_SDC_N_HBR_MASK, 0x1) |
+ FIELD_PREP(ROPLL_SDC_N_HBR2_MASK, 0x1));
+
+ regmap_write(hdptx->regmap, CMN_REG(006c),
+ FIELD_PREP(ROPLL_SDC_NUM_MASK, 0x3));
+ regmap_write(hdptx->regmap, CMN_REG(006d),
+ FIELD_PREP(ROPLL_SDC_NUM_MASK, 0x7));
+ regmap_write(hdptx->regmap, CMN_REG(006e),
+ FIELD_PREP(ROPLL_SDC_NUM_MASK, 0x7));
+
+ regmap_write(hdptx->regmap, CMN_REG(0070),
+ FIELD_PREP(ROPLL_SDC_DENO_MASK, 0x8));
+ regmap_write(hdptx->regmap, CMN_REG(0071),
+ FIELD_PREP(ROPLL_SDC_DENO_MASK, 0x18));
+ regmap_write(hdptx->regmap, CMN_REG(0072),
+ FIELD_PREP(ROPLL_SDC_DENO_MASK, 0x18));
+
+ regmap_update_bits(hdptx->regmap, CMN_REG(0074),
+ OVRD_ROPLL_SDC_NDIV_RSTN_MASK | ROPLL_SDC_NDIV_RSTN_MASK,
+ FIELD_PREP(OVRD_ROPLL_SDC_NDIV_RSTN_MASK, 0x1) |
+ FIELD_PREP(ROPLL_SDC_NDIV_RSTN_MASK, 0x1));
+
+ regmap_update_bits(hdptx->regmap, CMN_REG(0077), ANA_ROPLL_SSC_CLK_DIV_SEL_MASK,
+ FIELD_PREP(ANA_ROPLL_SSC_CLK_DIV_SEL_MASK, 0x1));
+
+ regmap_update_bits(hdptx->regmap, CMN_REG(0081), ANA_PLL_CD_TX_SER_RATE_SEL_MASK,
+ FIELD_PREP(ANA_PLL_CD_TX_SER_RATE_SEL_MASK, 0x0));
+ regmap_update_bits(hdptx->regmap, CMN_REG(0081),
+ ANA_PLL_CD_HSCLK_EAST_EN_MASK | ANA_PLL_CD_HSCLK_WEST_EN_MASK,
+ FIELD_PREP(ANA_PLL_CD_HSCLK_EAST_EN_MASK, 0x1) |
+ FIELD_PREP(ANA_PLL_CD_HSCLK_WEST_EN_MASK, 0x0));
+
+ regmap_update_bits(hdptx->regmap, CMN_REG(0082), ANA_PLL_CD_VREG_GAIN_CTRL_MASK,
+ FIELD_PREP(ANA_PLL_CD_VREG_GAIN_CTRL_MASK, 0x4));
+ regmap_update_bits(hdptx->regmap, CMN_REG(0083), ANA_PLL_CD_VREG_ICTRL_MASK,
+ FIELD_PREP(ANA_PLL_CD_VREG_ICTRL_MASK, 0x1));
+ regmap_update_bits(hdptx->regmap, CMN_REG(0084), PLL_LCRO_CLK_SEL_MASK,
+ FIELD_PREP(PLL_LCRO_CLK_SEL_MASK, 0x1));
+ regmap_update_bits(hdptx->regmap, CMN_REG(0085), ANA_PLL_SYNC_LOSS_DET_MODE_MASK,
+ FIELD_PREP(ANA_PLL_SYNC_LOSS_DET_MODE_MASK, 0x3));
+
+ regmap_update_bits(hdptx->regmap, CMN_REG(0087), ANA_PLL_TX_HS_CLK_EN_MASK,
+ FIELD_PREP(ANA_PLL_TX_HS_CLK_EN_MASK, 0x1));
+
+ regmap_update_bits(hdptx->regmap, CMN_REG(0097), DIG_CLK_SEL_MASK,
+ FIELD_PREP(DIG_CLK_SEL_MASK, 0x1));
+
+ regmap_update_bits(hdptx->regmap, CMN_REG(0099), CMN_ROPLL_ALONE_MODE_MASK,
+ FIELD_PREP(CMN_ROPLL_ALONE_MODE_MASK, 0x1));
+ regmap_update_bits(hdptx->regmap, CMN_REG(009a), HS_SPEED_SEL_MASK,
+ FIELD_PREP(HS_SPEED_SEL_MASK, 0x1));
+ regmap_update_bits(hdptx->regmap, CMN_REG(009b), LS_SPEED_SEL_MASK,
+ FIELD_PREP(LS_SPEED_SEL_MASK, 0x1));
+}
+
+static int rk_hdptx_dp_aux_init(struct rk_hdptx_phy *hdptx)
+{
+ u32 status;
+ int ret;
+
+ regmap_update_bits(hdptx->regmap, SB_REG(0102), ANA_SB_RXTERM_OFFSP_MASK,
+ FIELD_PREP(ANA_SB_RXTERM_OFFSP_MASK, 0x3));
+ regmap_update_bits(hdptx->regmap, SB_REG(0103), ANA_SB_RXTERM_OFFSN_MASK,
+ FIELD_PREP(ANA_SB_RXTERM_OFFSN_MASK, 0x3));
+ regmap_update_bits(hdptx->regmap, SB_REG(0104), SB_AUX_EN_MASK,
+ FIELD_PREP(SB_AUX_EN_MASK, 0x1));
+ regmap_update_bits(hdptx->regmap, SB_REG(0105), ANA_SB_TX_HLVL_PROG_MASK,
+ FIELD_PREP(ANA_SB_TX_HLVL_PROG_MASK, 0x7));
+ regmap_update_bits(hdptx->regmap, SB_REG(0106), ANA_SB_TX_LLVL_PROG_MASK,
+ FIELD_PREP(ANA_SB_TX_LLVL_PROG_MASK, 0x7));
+
+ regmap_update_bits(hdptx->regmap, SB_REG(010d), ANA_SB_DMRX_LPBK_DATA_MASK,
+ FIELD_PREP(ANA_SB_DMRX_LPBK_DATA_MASK, 0x1));
+
+ regmap_update_bits(hdptx->regmap, SB_REG(010f), ANA_SB_VREG_GAIN_CTRL_MASK,
+ FIELD_PREP(ANA_SB_VREG_GAIN_CTRL_MASK, 0x0));
+ regmap_update_bits(hdptx->regmap, SB_REG(0110),
+ ANA_SB_VREG_OUT_SEL_MASK | ANA_SB_VREG_REF_SEL_MASK,
+ FIELD_PREP(ANA_SB_VREG_OUT_SEL_MASK, 0x1) |
+ FIELD_PREP(ANA_SB_VREG_REF_SEL_MASK, 0x1));
+
+ regmap_update_bits(hdptx->regmap, SB_REG(0113),
+ SB_RX_RCAL_OPT_CODE_MASK | SB_RX_RTERM_CTRL_MASK,
+ FIELD_PREP(SB_RX_RCAL_OPT_CODE_MASK, 0x1) |
+ FIELD_PREP(SB_RX_RTERM_CTRL_MASK, 0x3));
+ regmap_update_bits(hdptx->regmap, SB_REG(0114),
+ SB_TG_SB_EN_DELAY_TIME_MASK | SB_TG_RXTERM_EN_DELAY_TIME_MASK,
+ FIELD_PREP(SB_TG_SB_EN_DELAY_TIME_MASK, 0x2) |
+ FIELD_PREP(SB_TG_RXTERM_EN_DELAY_TIME_MASK, 0x2));
+ regmap_update_bits(hdptx->regmap, SB_REG(0115),
+ SB_READY_DELAY_TIME_MASK | SB_TG_OSC_EN_DELAY_TIME_MASK,
+ FIELD_PREP(SB_READY_DELAY_TIME_MASK, 0x2) |
+ FIELD_PREP(SB_TG_OSC_EN_DELAY_TIME_MASK, 0x2));
+ regmap_update_bits(hdptx->regmap, SB_REG(0116),
+ AFC_RSTN_DELAY_TIME_MASK,
+ FIELD_PREP(AFC_RSTN_DELAY_TIME_MASK, 0x2));
+ regmap_update_bits(hdptx->regmap, SB_REG(0117),
+ FAST_PULSE_TIME_MASK,
+ FIELD_PREP(FAST_PULSE_TIME_MASK, 0x4));
+ regmap_update_bits(hdptx->regmap, SB_REG(0118),
+ SB_TG_EARC_DMRX_RECVRD_CLK_CNT_MASK,
+ FIELD_PREP(SB_TG_EARC_DMRX_RECVRD_CLK_CNT_MASK, 0xa));
+
+ regmap_update_bits(hdptx->regmap, SB_REG(011a), SB_TG_CNT_RUN_NO_7_0_MASK,
+ FIELD_PREP(SB_TG_CNT_RUN_NO_7_0_MASK, 0x3));
+ regmap_update_bits(hdptx->regmap, SB_REG(011b),
+ SB_EARC_SIG_DET_BYPASS_MASK | SB_AFC_TOL_MASK,
+ FIELD_PREP(SB_EARC_SIG_DET_BYPASS_MASK, 0x1) |
+ FIELD_PREP(SB_AFC_TOL_MASK, 0x3));
+ regmap_update_bits(hdptx->regmap, SB_REG(011c), SB_AFC_STB_NUM_MASK,
+ FIELD_PREP(SB_AFC_STB_NUM_MASK, 0x4));
+ regmap_update_bits(hdptx->regmap, SB_REG(011d), SB_TG_OSC_CNT_MIN_MASK,
+ FIELD_PREP(SB_TG_OSC_CNT_MIN_MASK, 0x67));
+ regmap_update_bits(hdptx->regmap, SB_REG(011e), SB_TG_OSC_CNT_MAX_MASK,
+ FIELD_PREP(SB_TG_OSC_CNT_MAX_MASK, 0x6a));
+ regmap_update_bits(hdptx->regmap, SB_REG(011f), SB_PWM_AFC_CTRL_MASK,
+ FIELD_PREP(SB_PWM_AFC_CTRL_MASK, 0x5));
+ regmap_update_bits(hdptx->regmap, SB_REG(011f), SB_RCAL_RSTN_MASK,
+ FIELD_PREP(SB_RCAL_RSTN_MASK, 0x1));
+ regmap_update_bits(hdptx->regmap, SB_REG(0120), SB_AUX_EN_IN_MASK,
+ FIELD_PREP(SB_AUX_EN_IN_MASK, 0x1));
+
+ regmap_update_bits(hdptx->regmap, SB_REG(0102), OVRD_SB_RXTERM_EN_MASK,
+ FIELD_PREP(OVRD_SB_RXTERM_EN_MASK, 0x1));
+ regmap_update_bits(hdptx->regmap, SB_REG(0103), OVRD_SB_RX_RESCAL_DONE_MASK,
+ FIELD_PREP(OVRD_SB_RX_RESCAL_DONE_MASK, 0x1));
+ regmap_update_bits(hdptx->regmap, SB_REG(0104), OVRD_SB_EN_MASK,
+ FIELD_PREP(OVRD_SB_EN_MASK, 0x1));
+ regmap_update_bits(hdptx->regmap, SB_REG(0104), OVRD_SB_AUX_EN_MASK,
+ FIELD_PREP(OVRD_SB_AUX_EN_MASK, 0x1));
+
+ regmap_update_bits(hdptx->regmap, SB_REG(010f), OVRD_SB_VREG_EN_MASK,
+ FIELD_PREP(OVRD_SB_VREG_EN_MASK, 0x1));
+
+ regmap_write(hdptx->grf, GRF_HDPTX_CON0,
+ HDPTX_I_BGR_EN << 16 | FIELD_PREP(HDPTX_I_BGR_EN, 0x1));
+ regmap_write(hdptx->grf, GRF_HDPTX_CON0,
+ HDPTX_I_BIAS_EN << 16 | FIELD_PREP(HDPTX_I_BIAS_EN, 0x1));
+ usleep_range(20, 25);
+
+ reset_control_deassert(hdptx->rsts[RST_INIT].rstc);
+ usleep_range(20, 25);
+ reset_control_deassert(hdptx->rsts[RST_CMN].rstc);
+ usleep_range(20, 25);
+
+ regmap_update_bits(hdptx->regmap, SB_REG(0103), OVRD_SB_RX_RESCAL_DONE_MASK,
+ FIELD_PREP(OVRD_SB_RX_RESCAL_DONE_MASK, 0x1));
+ usleep_range(100, 110);
+ regmap_update_bits(hdptx->regmap, SB_REG(0104), SB_EN_MASK,
+ FIELD_PREP(SB_EN_MASK, 0x1));
+ usleep_range(100, 110);
+ regmap_update_bits(hdptx->regmap, SB_REG(0102), SB_RXTERM_EN_MASK,
+ FIELD_PREP(SB_RXTERM_EN_MASK, 0x1));
+ usleep_range(20, 25);
+ regmap_update_bits(hdptx->regmap, SB_REG(010f), SB_VREG_EN_MASK,
+ FIELD_PREP(SB_VREG_EN_MASK, 0x1));
+ usleep_range(20, 25);
+ regmap_update_bits(hdptx->regmap, SB_REG(0104), SB_AUX_EN_MASK,
+ FIELD_PREP(SB_AUX_EN_MASK, 0x1));
+ usleep_range(100, 110);
+
+ ret = regmap_read_poll_timeout(hdptx->grf, GRF_HDPTX_STATUS,
+ status, FIELD_GET(HDPTX_O_SB_RDY, status),
+ 50, 1000);
+ if (ret) {
+ dev_err(hdptx->dev, "Failed to get phy sb ready: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
static int rk_hdptx_phy_power_on(struct phy *phy)
{
struct rk_hdptx_phy *hdptx = phy_get_drvdata(phy);
int bus_width = phy_get_bus_width(hdptx->phy);
- int ret;
+ enum phy_mode mode = phy_get_mode(phy);
+ int ret, lane;
/*
* FIXME: Temporary workaround to pass pixel_clk_rate
@@ -919,9 +1461,37 @@ static int rk_hdptx_phy_power_on(struct phy *phy)
if (ret)
return ret;
- ret = rk_hdptx_ropll_tmds_mode_config(hdptx, rate);
- if (ret)
- rk_hdptx_phy_consumer_put(hdptx, true);
+ if (mode == PHY_MODE_DP) {
+ regmap_write(hdptx->grf, GRF_HDPTX_CON0,
+ HDPTX_MODE_SEL << 16 | FIELD_PREP(HDPTX_MODE_SEL, 0x1));
+
+ for (lane = 0; lane < 4; lane++) {
+ regmap_update_bits(hdptx->regmap, LANE_REG(031e) + 0x400 * lane,
+ LN_POLARITY_INV_MASK | LN_LANE_MODE_MASK,
+ FIELD_PREP(LN_POLARITY_INV_MASK, 0) |
+ FIELD_PREP(LN_LANE_MODE_MASK, 1));
+ }
+
+ regmap_update_bits(hdptx->regmap, LNTOP_REG(0200), PROTOCOL_SEL_MASK,
+ FIELD_PREP(PROTOCOL_SEL_MASK, 0x0));
+ regmap_update_bits(hdptx->regmap, LNTOP_REG(0206), DATA_BUS_WIDTH_MASK,
+ FIELD_PREP(DATA_BUS_WIDTH_MASK, 0x1));
+ regmap_update_bits(hdptx->regmap, LNTOP_REG(0206), DATA_BUS_WIDTH_SEL_MASK,
+ FIELD_PREP(DATA_BUS_WIDTH_SEL_MASK, 0x0));
+
+ rk_hdptx_dp_pll_init(hdptx);
+
+ ret = rk_hdptx_dp_aux_init(hdptx);
+ if (ret)
+ rk_hdptx_phy_consumer_put(hdptx, true);
+ } else {
+ regmap_write(hdptx->grf, GRF_HDPTX_CON0,
+ HDPTX_MODE_SEL << 16 | FIELD_PREP(HDPTX_MODE_SEL, 0x0));
+
+ ret = rk_hdptx_ropll_tmds_mode_config(hdptx, rate);
+ if (ret)
+ rk_hdptx_phy_consumer_put(hdptx, true);
+ }
return ret;
}
@@ -933,9 +1503,308 @@ static int rk_hdptx_phy_power_off(struct phy *phy)
return rk_hdptx_phy_consumer_put(hdptx, false);
}
+static int rk_hdptx_phy_verify_config(struct rk_hdptx_phy *hdptx,
+ struct phy_configure_opts_dp *dp)
+{
+ int i;
+
+ if (dp->set_rate) {
+ switch (dp->link_rate) {
+ case 1620:
+ case 2700:
+ case 5400:
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ if (dp->set_lanes) {
+ switch (dp->lanes) {
+ case 1:
+ case 2:
+ case 4:
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ if (dp->set_voltages) {
+ for (i = 0; i < hdptx->lanes; i++) {
+ if (dp->voltage[i] > 3 || dp->pre[i] > 3)
+ return -EINVAL;
+
+ if (dp->voltage[i] + dp->pre[i] > 3)
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int rk_hdptx_phy_set_rate(struct rk_hdptx_phy *hdptx,
+ struct phy_configure_opts_dp *dp)
+{
+ u32 bw, status;
+ int ret;
+
+ regmap_write(hdptx->grf, GRF_HDPTX_CON0,
+ HDPTX_I_PLL_EN << 16 | FIELD_PREP(HDPTX_I_PLL_EN, 0x0));
+
+ switch (dp->link_rate) {
+ case 1620:
+ bw = DP_BW_RBR;
+ break;
+ case 2700:
+ bw = DP_BW_HBR;
+ break;
+ case 5400:
+ bw = DP_BW_HBR2;
+ break;
+ default:
+ return -EINVAL;
+ }
+ hdptx->link_rate = dp->link_rate;
+
+ regmap_update_bits(hdptx->regmap, CMN_REG(0008), OVRD_LCPLL_EN_MASK | LCPLL_EN_MASK,
+ FIELD_PREP(OVRD_LCPLL_EN_MASK, 0x1) |
+ FIELD_PREP(LCPLL_EN_MASK, 0x0));
+
+ regmap_update_bits(hdptx->regmap, CMN_REG(003d), OVRD_ROPLL_EN_MASK | ROPLL_EN_MASK,
+ FIELD_PREP(OVRD_ROPLL_EN_MASK, 0x1) |
+ FIELD_PREP(ROPLL_EN_MASK, 0x1));
+
+ if (dp->ssc) {
+ regmap_update_bits(hdptx->regmap, CMN_REG(0074),
+ OVRD_ROPLL_SSC_EN_MASK | ROPLL_SSC_EN_MASK,
+ FIELD_PREP(OVRD_ROPLL_SSC_EN_MASK, 0x1) |
+ FIELD_PREP(ROPLL_SSC_EN_MASK, 0x1));
+ regmap_write(hdptx->regmap, CMN_REG(0075),
+ FIELD_PREP(ANA_ROPLL_SSC_FM_DEVIATION_MASK, 0xc));
+ regmap_update_bits(hdptx->regmap, CMN_REG(0076),
+ ANA_ROPLL_SSC_FM_FREQ_MASK,
+ FIELD_PREP(ANA_ROPLL_SSC_FM_FREQ_MASK, 0x1f));
+
+ regmap_update_bits(hdptx->regmap, CMN_REG(0099), SSC_EN_MASK,
+ FIELD_PREP(SSC_EN_MASK, 0x2));
+ } else {
+ regmap_update_bits(hdptx->regmap, CMN_REG(0074),
+ OVRD_ROPLL_SSC_EN_MASK | ROPLL_SSC_EN_MASK,
+ FIELD_PREP(OVRD_ROPLL_SSC_EN_MASK, 0x1) |
+ FIELD_PREP(ROPLL_SSC_EN_MASK, 0x0));
+ regmap_write(hdptx->regmap, CMN_REG(0075),
+ FIELD_PREP(ANA_ROPLL_SSC_FM_DEVIATION_MASK, 0x20));
+ regmap_update_bits(hdptx->regmap, CMN_REG(0076),
+ ANA_ROPLL_SSC_FM_FREQ_MASK,
+ FIELD_PREP(ANA_ROPLL_SSC_FM_FREQ_MASK, 0xc));
+
+ regmap_update_bits(hdptx->regmap, CMN_REG(0099), SSC_EN_MASK,
+ FIELD_PREP(SSC_EN_MASK, 0x0));
+ }
+
+ regmap_update_bits(hdptx->regmap, CMN_REG(0095), DP_TX_LINK_BW_MASK,
+ FIELD_PREP(DP_TX_LINK_BW_MASK, bw));
+
+ regmap_write(hdptx->grf, GRF_HDPTX_CON0,
+ HDPTX_I_PLL_EN << 16 | FIELD_PREP(HDPTX_I_PLL_EN, 0x1));
+
+ ret = regmap_read_poll_timeout(hdptx->grf, GRF_HDPTX_STATUS,
+ status, FIELD_GET(HDPTX_O_PLL_LOCK_DONE, status),
+ 50, 1000);
+ if (ret) {
+ dev_err(hdptx->dev, "Failed to get phy pll lock: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int rk_hdptx_phy_set_lanes(struct rk_hdptx_phy *hdptx,
+ struct phy_configure_opts_dp *dp)
+{
+ hdptx->lanes = dp->lanes;
+
+ regmap_update_bits(hdptx->regmap, LNTOP_REG(0207), LANE_EN_MASK,
+ FIELD_PREP(LANE_EN_MASK, GENMASK(hdptx->lanes - 1, 0)));
+
+ return 0;
+}
+
+static void rk_hdptx_phy_set_voltage(struct rk_hdptx_phy *hdptx,
+ struct phy_configure_opts_dp *dp,
+ u8 lane)
+{
+ const struct tx_drv_ctrl *ctrl;
+ u32 offset = lane * 0x400;
+
+ switch (hdptx->link_rate) {
+ case 1620:
+ ctrl = &tx_drv_ctrl_rbr[dp->voltage[lane]][dp->pre[lane]];
+ regmap_update_bits(hdptx->regmap, LANE_REG(030a) + offset,
+ LN_TX_JEQ_EVEN_CTRL_RBR_MASK,
+ FIELD_PREP(LN_TX_JEQ_EVEN_CTRL_RBR_MASK,
+ ctrl->tx_jeq_even_ctrl));
+ regmap_update_bits(hdptx->regmap, LANE_REG(030c) + offset,
+ LN_TX_JEQ_ODD_CTRL_RBR_MASK,
+ FIELD_PREP(LN_TX_JEQ_ODD_CTRL_RBR_MASK,
+ ctrl->tx_jeq_odd_ctrl));
+ regmap_update_bits(hdptx->regmap, LANE_REG(0311) + offset,
+ LN_TX_SER_40BIT_EN_RBR_MASK,
+ FIELD_PREP(LN_TX_SER_40BIT_EN_RBR_MASK, 0x1));
+ break;
+ case 2700:
+ ctrl = &tx_drv_ctrl_hbr[dp->voltage[lane]][dp->pre[lane]];
+ regmap_update_bits(hdptx->regmap, LANE_REG(030b) + offset,
+ LN_TX_JEQ_EVEN_CTRL_HBR_MASK,
+ FIELD_PREP(LN_TX_JEQ_EVEN_CTRL_HBR_MASK,
+ ctrl->tx_jeq_even_ctrl));
+ regmap_update_bits(hdptx->regmap, LANE_REG(030d) + offset,
+ LN_TX_JEQ_ODD_CTRL_HBR_MASK,
+ FIELD_PREP(LN_TX_JEQ_ODD_CTRL_HBR_MASK,
+ ctrl->tx_jeq_odd_ctrl));
+ regmap_update_bits(hdptx->regmap, LANE_REG(0311) + offset,
+ LN_TX_SER_40BIT_EN_HBR_MASK,
+ FIELD_PREP(LN_TX_SER_40BIT_EN_HBR_MASK, 0x1));
+ break;
+ case 5400:
+ default:
+ ctrl = &tx_drv_ctrl_hbr2[dp->voltage[lane]][dp->pre[lane]];
+ regmap_update_bits(hdptx->regmap, LANE_REG(030b) + offset,
+ LN_TX_JEQ_EVEN_CTRL_HBR2_MASK,
+ FIELD_PREP(LN_TX_JEQ_EVEN_CTRL_HBR2_MASK,
+ ctrl->tx_jeq_even_ctrl));
+ regmap_update_bits(hdptx->regmap, LANE_REG(030d) + offset,
+ LN_TX_JEQ_ODD_CTRL_HBR2_MASK,
+ FIELD_PREP(LN_TX_JEQ_ODD_CTRL_HBR2_MASK,
+ ctrl->tx_jeq_odd_ctrl));
+ regmap_update_bits(hdptx->regmap, LANE_REG(0311) + offset,
+ LN_TX_SER_40BIT_EN_HBR2_MASK,
+ FIELD_PREP(LN_TX_SER_40BIT_EN_HBR2_MASK, 0x1));
+ break;
+ }
+
+ regmap_update_bits(hdptx->regmap, LANE_REG(0303) + offset,
+ OVRD_LN_TX_DRV_LVL_CTRL_MASK | LN_TX_DRV_LVL_CTRL_MASK,
+ FIELD_PREP(OVRD_LN_TX_DRV_LVL_CTRL_MASK, 0x1) |
+ FIELD_PREP(LN_TX_DRV_LVL_CTRL_MASK,
+ ctrl->tx_drv_lvl_ctrl));
+ regmap_update_bits(hdptx->regmap, LANE_REG(0304) + offset,
+ OVRD_LN_TX_DRV_POST_LVL_CTRL_MASK |
+ LN_TX_DRV_POST_LVL_CTRL_MASK,
+ FIELD_PREP(OVRD_LN_TX_DRV_POST_LVL_CTRL_MASK, 0x1) |
+ FIELD_PREP(LN_TX_DRV_POST_LVL_CTRL_MASK,
+ ctrl->tx_drv_post_lvl_ctrl));
+ regmap_update_bits(hdptx->regmap, LANE_REG(0305) + offset,
+ OVRD_LN_TX_DRV_PRE_LVL_CTRL_MASK |
+ LN_TX_DRV_PRE_LVL_CTRL_MASK,
+ FIELD_PREP(OVRD_LN_TX_DRV_PRE_LVL_CTRL_MASK, 0x1) |
+ FIELD_PREP(LN_TX_DRV_PRE_LVL_CTRL_MASK,
+ ctrl->tx_drv_pre_lvl_ctrl));
+ regmap_update_bits(hdptx->regmap, LANE_REG(0306) + offset,
+ LN_ANA_TX_DRV_IDRV_IDN_CTRL_MASK |
+ LN_ANA_TX_DRV_IDRV_IUP_CTRL_MASK |
+ LN_ANA_TX_DRV_ACCDRV_EN_MASK,
+ FIELD_PREP(LN_ANA_TX_DRV_IDRV_IDN_CTRL_MASK,
+ ctrl->ana_tx_drv_idrv_idn_ctrl) |
+ FIELD_PREP(LN_ANA_TX_DRV_IDRV_IUP_CTRL_MASK,
+ ctrl->ana_tx_drv_idrv_iup_ctrl) |
+ FIELD_PREP(LN_ANA_TX_DRV_ACCDRV_EN_MASK,
+ ctrl->ana_tx_drv_accdrv_en));
+ regmap_update_bits(hdptx->regmap, LANE_REG(0307) + offset,
+ LN_ANA_TX_DRV_ACCDRV_POL_SEL_MASK |
+ LN_ANA_TX_DRV_ACCDRV_CTRL_MASK,
+ FIELD_PREP(LN_ANA_TX_DRV_ACCDRV_POL_SEL_MASK, 0x1) |
+ FIELD_PREP(LN_ANA_TX_DRV_ACCDRV_CTRL_MASK,
+ ctrl->ana_tx_drv_accdrv_ctrl));
+
+ regmap_update_bits(hdptx->regmap, LANE_REG(030a) + offset,
+ LN_ANA_TX_JEQ_EN_MASK,
+ FIELD_PREP(LN_ANA_TX_JEQ_EN_MASK, ctrl->ana_tx_jeq_en));
+
+ regmap_update_bits(hdptx->regmap, LANE_REG(0310) + offset,
+ LN_ANA_TX_SYNC_LOSS_DET_MODE_MASK,
+ FIELD_PREP(LN_ANA_TX_SYNC_LOSS_DET_MODE_MASK, 0x3));
+
+ regmap_update_bits(hdptx->regmap, LANE_REG(0316) + offset,
+ LN_ANA_TX_SER_VREG_GAIN_CTRL_MASK,
+ FIELD_PREP(LN_ANA_TX_SER_VREG_GAIN_CTRL_MASK, 0x2));
+
+ regmap_update_bits(hdptx->regmap, LANE_REG(031b) + offset,
+ LN_ANA_TX_RESERVED_MASK,
+ FIELD_PREP(LN_ANA_TX_RESERVED_MASK, 0x1));
+}
+
+static int rk_hdptx_phy_set_voltages(struct rk_hdptx_phy *hdptx,
+ struct phy_configure_opts_dp *dp)
+{
+ u8 lane;
+ u32 status;
+ int ret;
+
+ for (lane = 0; lane < hdptx->lanes; lane++)
+ rk_hdptx_phy_set_voltage(hdptx, dp, lane);
+
+ reset_control_deassert(hdptx->rsts[RST_LANE].rstc);
+
+ ret = regmap_read_poll_timeout(hdptx->grf, GRF_HDPTX_STATUS,
+ status, FIELD_GET(HDPTX_O_PHY_RDY, status),
+ 50, 5000);
+ if (ret) {
+ dev_err(hdptx->dev, "Failed to get phy ready: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int rk_hdptx_phy_configure(struct phy *phy, union phy_configure_opts *opts)
+{
+ struct rk_hdptx_phy *hdptx = phy_get_drvdata(phy);
+ enum phy_mode mode = phy_get_mode(phy);
+ int ret;
+
+ if (mode != PHY_MODE_DP)
+ return 0;
+
+ ret = rk_hdptx_phy_verify_config(hdptx, &opts->dp);
+ if (ret) {
+ dev_err(hdptx->dev, "invalid params for phy configure\n");
+ return ret;
+ }
+
+ if (opts->dp.set_rate) {
+ ret = rk_hdptx_phy_set_rate(hdptx, &opts->dp);
+ if (ret) {
+ dev_err(hdptx->dev, "failed to set rate: %d\n", ret);
+ return ret;
+ }
+ }
+
+ if (opts->dp.set_lanes) {
+ ret = rk_hdptx_phy_set_lanes(hdptx, &opts->dp);
+ if (ret) {
+ dev_err(hdptx->dev, "failed to set lanes: %d\n", ret);
+ return ret;
+ }
+ }
+
+ if (opts->dp.set_voltages) {
+ ret = rk_hdptx_phy_set_voltages(hdptx, &opts->dp);
+ if (ret) {
+ dev_err(hdptx->dev, "failed to set voltages: %d\n",
+ ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
static const struct phy_ops rk_hdptx_phy_ops = {
.power_on = rk_hdptx_phy_power_on,
.power_off = rk_hdptx_phy_power_off,
+ .configure = rk_hdptx_phy_configure,
.owner = THIS_MODULE,
};
@@ -1007,15 +1876,14 @@ static int rk_hdptx_phy_clk_register(struct rk_hdptx_phy *hdptx)
struct device *dev = hdptx->dev;
const char *name, *pname;
struct clk *refclk;
- int ret, id;
+ int ret;
refclk = devm_clk_get(dev, "ref");
if (IS_ERR(refclk))
return dev_err_probe(dev, PTR_ERR(refclk),
"Failed to get ref clock\n");
- id = of_alias_get_id(dev->of_node, "hdptxphy");
- name = id > 0 ? "clk_hdmiphy_pixel1" : "clk_hdmiphy_pixel0";
+ name = hdptx->phy_id > 0 ? "clk_hdmiphy_pixel1" : "clk_hdmiphy_pixel0";
pname = __clk_get_name(refclk);
hdptx->hw.init = CLK_HW_INIT(name, pname, &hdptx_phy_clk_ops,
@@ -1058,8 +1926,9 @@ static int rk_hdptx_phy_probe(struct platform_device *pdev)
struct phy_provider *phy_provider;
struct device *dev = &pdev->dev;
struct rk_hdptx_phy *hdptx;
+ struct resource *res;
void __iomem *regs;
- int ret;
+ int ret, id;
hdptx = devm_kzalloc(dev, sizeof(*hdptx), GFP_KERNEL);
if (!hdptx)
@@ -1067,11 +1936,27 @@ static int rk_hdptx_phy_probe(struct platform_device *pdev)
hdptx->dev = dev;
- regs = devm_platform_ioremap_resource(pdev, 0);
+ regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(regs))
return dev_err_probe(dev, PTR_ERR(regs),
"Failed to ioremap resource\n");
+ hdptx->cfgs = device_get_match_data(dev);
+ if (!hdptx->cfgs)
+ return dev_err_probe(dev, -EINVAL, "missing match data\n");
+
+ /* find the phy-id from the io address */
+ hdptx->phy_id = -ENODEV;
+ for (id = 0; id < hdptx->cfgs->num_phys; id++) {
+ if (res->start == hdptx->cfgs->phy_ids[id]) {
+ hdptx->phy_id = id;
+ break;
+ }
+ }
+
+ if (hdptx->phy_id < 0)
+ return dev_err_probe(dev, -ENODEV, "no matching device found\n");
+
ret = devm_clk_bulk_get_all(dev, &hdptx->clks);
if (ret < 0)
return dev_err_probe(dev, ret, "Failed to get clocks\n");
@@ -1132,8 +2017,30 @@ static const struct dev_pm_ops rk_hdptx_phy_pm_ops = {
rk_hdptx_phy_runtime_resume, NULL)
};
+static const struct rk_hdptx_phy_cfg rk3576_hdptx_phy_cfgs = {
+ .num_phys = 1,
+ .phy_ids = {
+ 0x2b000000,
+ },
+};
+
+static const struct rk_hdptx_phy_cfg rk3588_hdptx_phy_cfgs = {
+ .num_phys = 2,
+ .phy_ids = {
+ 0xfed60000,
+ 0xfed70000,
+ },
+};
+
static const struct of_device_id rk_hdptx_phy_of_match[] = {
- { .compatible = "rockchip,rk3588-hdptx-phy", },
+ {
+ .compatible = "rockchip,rk3576-hdptx-phy",
+ .data = &rk3576_hdptx_phy_cfgs
+ },
+ {
+ .compatible = "rockchip,rk3588-hdptx-phy",
+ .data = &rk3588_hdptx_phy_cfgs
+ },
{}
};
MODULE_DEVICE_TABLE(of, rk_hdptx_phy_of_match);
@@ -1150,5 +2057,6 @@ module_platform_driver(rk_hdptx_phy_driver);
MODULE_AUTHOR("Algea Cao <algea.cao@rock-chips.com>");
MODULE_AUTHOR("Cristian Ciocaltea <cristian.ciocaltea@collabora.com>");
+MODULE_AUTHOR("Damon Ding <damon.ding@rock-chips.com>");
MODULE_DESCRIPTION("Samsung HDMI/eDP Transmitter Combo PHY Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/phy/rockchip/phy-rockchip-usbdp.c b/drivers/phy/rockchip/phy-rockchip-usbdp.c
index 5b1e8a3806ed..c066cc0a7b4f 100644
--- a/drivers/phy/rockchip/phy-rockchip-usbdp.c
+++ b/drivers/phy/rockchip/phy-rockchip-usbdp.c
@@ -187,6 +187,8 @@ struct rk_udphy {
u32 dp_aux_din_sel;
bool dp_sink_hpd_sel;
bool dp_sink_hpd_cfg;
+ unsigned int link_rate;
+ unsigned int lanes;
u8 bw;
int id;
@@ -978,7 +980,7 @@ static int rk_udphy_parse_dt(struct rk_udphy *udphy)
if (device_property_present(dev, "maximum-speed")) {
maximum_speed = usb_get_maximum_speed(dev);
- udphy->hs = maximum_speed <= USB_SPEED_HIGH ? true : false;
+ udphy->hs = maximum_speed <= USB_SPEED_HIGH;
}
ret = rk_udphy_clk_init(udphy, dev);
@@ -1045,7 +1047,6 @@ static int rk_udphy_dp_phy_init(struct phy *phy)
mutex_lock(&udphy->mutex);
udphy->dp_in_use = true;
- rk_udphy_dp_hpd_event_trigger(udphy, udphy->dp_sink_hpd_cfg);
mutex_unlock(&udphy->mutex);
@@ -1103,15 +1104,19 @@ static int rk_udphy_dp_phy_power_off(struct phy *phy)
return 0;
}
-static int rk_udphy_dp_phy_verify_link_rate(unsigned int link_rate)
+/*
+ * Verify link rate
+ */
+static int rk_udphy_dp_phy_verify_link_rate(struct rk_udphy *udphy,
+ struct phy_configure_opts_dp *dp)
{
- switch (link_rate) {
+ switch (dp->link_rate) {
case 1620:
case 2700:
case 5400:
case 8100:
+ udphy->link_rate = dp->link_rate;
break;
-
default:
return -EINVAL;
}
@@ -1119,45 +1124,44 @@ static int rk_udphy_dp_phy_verify_link_rate(unsigned int link_rate)
return 0;
}
-static int rk_udphy_dp_phy_verify_config(struct rk_udphy *udphy,
- struct phy_configure_opts_dp *dp)
+static int rk_udphy_dp_phy_verify_lanes(struct rk_udphy *udphy,
+ struct phy_configure_opts_dp *dp)
{
- int i, ret;
-
- /* If changing link rate was required, verify it's supported. */
- ret = rk_udphy_dp_phy_verify_link_rate(dp->link_rate);
- if (ret)
- return ret;
-
- /* Verify lane count. */
switch (dp->lanes) {
case 1:
case 2:
case 4:
/* valid lane count. */
+ udphy->lanes = dp->lanes;
break;
default:
return -EINVAL;
}
- /*
- * If changing voltages is required, check swing and pre-emphasis
- * levels, per-lane.
- */
- if (dp->set_voltages) {
- /* Lane count verified previously. */
- for (i = 0; i < dp->lanes; i++) {
- if (dp->voltage[i] > 3 || dp->pre[i] > 3)
- return -EINVAL;
+ return 0;
+}
- /*
- * Sum of voltage swing and pre-emphasis levels cannot
- * exceed 3.
- */
- if (dp->voltage[i] + dp->pre[i] > 3)
- return -EINVAL;
- }
+/*
+ * If changing voltages is required, check swing and pre-emphasis
+ * levels, per-lane.
+ */
+static int rk_udphy_dp_phy_verify_voltages(struct rk_udphy *udphy,
+ struct phy_configure_opts_dp *dp)
+{
+ int i;
+
+ /* Lane count verified previously. */
+ for (i = 0; i < udphy->lanes; i++) {
+ if (dp->voltage[i] > 3 || dp->pre[i] > 3)
+ return -EINVAL;
+
+ /*
+ * Sum of voltage swing and pre-emphasis levels cannot
+ * exceed 3.
+ */
+ if (dp->voltage[i] + dp->pre[i] > 3)
+ return -EINVAL;
}
return 0;
@@ -1197,9 +1201,23 @@ static int rk_udphy_dp_phy_configure(struct phy *phy,
u32 i, val, lane;
int ret;
- ret = rk_udphy_dp_phy_verify_config(udphy, dp);
- if (ret)
- return ret;
+ if (dp->set_rate) {
+ ret = rk_udphy_dp_phy_verify_link_rate(udphy, dp);
+ if (ret)
+ return ret;
+ }
+
+ if (dp->set_lanes) {
+ ret = rk_udphy_dp_phy_verify_lanes(udphy, dp);
+ if (ret)
+ return ret;
+ }
+
+ if (dp->set_voltages) {
+ ret = rk_udphy_dp_phy_verify_voltages(udphy, dp);
+ if (ret)
+ return ret;
+ }
if (dp->set_rate) {
regmap_update_bits(udphy->pma_regmap, CMN_DP_RSTN_OFFSET,
@@ -1244,9 +1262,9 @@ static int rk_udphy_dp_phy_configure(struct phy *phy,
}
if (dp->set_voltages) {
- for (i = 0; i < dp->lanes; i++) {
+ for (i = 0; i < udphy->lanes; i++) {
lane = udphy->dp_lane_sel[i];
- switch (dp->link_rate) {
+ switch (udphy->link_rate) {
case 1620:
case 2700:
regmap_update_bits(udphy->pma_regmap,
diff --git a/drivers/phy/samsung/Kconfig b/drivers/phy/samsung/Kconfig
index e2330b0894d6..6566100441d6 100644
--- a/drivers/phy/samsung/Kconfig
+++ b/drivers/phy/samsung/Kconfig
@@ -81,6 +81,7 @@ config PHY_EXYNOS5_USBDRD
tristate "Exynos5 SoC series USB DRD PHY driver"
depends on (ARCH_EXYNOS && OF) || COMPILE_TEST
depends on HAS_IOMEM
+ depends on TYPEC || !TYPEC
depends on USB_DWC3_EXYNOS
select GENERIC_PHY
select MFD_SYSCON
diff --git a/drivers/phy/samsung/Makefile b/drivers/phy/samsung/Makefile
index fea1f96d0e43..342682638a87 100644
--- a/drivers/phy/samsung/Makefile
+++ b/drivers/phy/samsung/Makefile
@@ -7,6 +7,7 @@ phy-exynos-ufs-y += phy-gs101-ufs.o
phy-exynos-ufs-y += phy-samsung-ufs.o
phy-exynos-ufs-y += phy-exynos7-ufs.o
phy-exynos-ufs-y += phy-exynosautov9-ufs.o
+phy-exynos-ufs-y += phy-exynosautov920-ufs.o
phy-exynos-ufs-y += phy-fsd-ufs.o
obj-$(CONFIG_PHY_SAMSUNG_USB2) += phy-exynos-usb2.o
phy-exynos-usb2-y += phy-samsung-usb2.o
diff --git a/drivers/phy/samsung/phy-exynos5-usbdrd.c b/drivers/phy/samsung/phy-exynos5-usbdrd.c
index 46b8f6987c62..817fddee0392 100644
--- a/drivers/phy/samsung/phy-exynos5-usbdrd.c
+++ b/drivers/phy/samsung/phy-exynos5-usbdrd.c
@@ -23,6 +23,8 @@
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <linux/soc/samsung/exynos-regs-pmu.h>
+#include <linux/usb/typec.h>
+#include <linux/usb/typec_mux.h>
/* Exynos USB PHY registers */
#define EXYNOS5_FSEL_9MHZ6 0x0
@@ -209,6 +211,10 @@
#define EXYNOS9_PMA_USBDP_CMN_REG00B8 0x02e0
#define CMN_REG00B8_LANE_MUX_SEL_DP GENMASK(3, 0)
+#define CMN_REG00B8_LANE_MUX_SEL_DP_LANE3 BIT(3)
+#define CMN_REG00B8_LANE_MUX_SEL_DP_LANE2 BIT(2)
+#define CMN_REG00B8_LANE_MUX_SEL_DP_LANE1 BIT(1)
+#define CMN_REG00B8_LANE_MUX_SEL_DP_LANE0 BIT(0)
#define EXYNOS9_PMA_USBDP_CMN_REG01C0 0x0700
#define CMN_REG01C0_ANA_LCPLL_LOCK_DONE BIT(7)
@@ -383,11 +389,14 @@ struct exynos5_usbdrd_phy_drvdata {
* @clks: clocks for register access
* @core_clks: core clocks for phy (ref, pipe3, utmi+, ITP, etc. as required)
* @drv_data: pointer to SoC level driver data structure
+ * @phy_mutex: mutex protecting phy_init/exit & TCPC callbacks
* @phys: array for 'EXYNOS5_DRDPHYS_NUM' number of PHY
* instances each with its 'phy' and 'phy_cfg'.
* @extrefclk: frequency select settings when using 'separate
* reference clocks' for SS and HS operations
* @regulators: regulators for phy
+ * @sw: TypeC orientation switch handle
+ * @orientation: TypeC connector orientation - normal or flipped
*/
struct exynos5_usbdrd_phy {
struct device *dev;
@@ -397,6 +406,7 @@ struct exynos5_usbdrd_phy {
struct clk_bulk_data *clks;
struct clk_bulk_data *core_clks;
const struct exynos5_usbdrd_phy_drvdata *drv_data;
+ struct mutex phy_mutex;
struct phy_usb_instance {
struct phy *phy;
u32 index;
@@ -406,6 +416,9 @@ struct exynos5_usbdrd_phy {
} phys[EXYNOS5_DRDPHYS_NUM];
u32 extrefclk;
struct regulator_bulk_data *regulators;
+
+ struct typec_switch_dev *sw;
+ enum typec_orientation orientation;
};
static inline
@@ -647,22 +660,38 @@ exynos5_usbdrd_usbdp_g2_v4_pma_lane_mux_sel(struct exynos5_usbdrd_phy *phy_drd)
/* lane configuration: USB on all lanes */
reg = readl(regs_base + EXYNOS9_PMA_USBDP_CMN_REG00B8);
reg &= ~CMN_REG00B8_LANE_MUX_SEL_DP;
- writel(reg, regs_base + EXYNOS9_PMA_USBDP_CMN_REG00B8);
-
/*
- * FIXME: below code supports one connector orientation only. It needs
- * updating once we can receive connector events.
+ * USB on lanes 0 & 1 in normal mode, or 2 & 3 if reversed, DP on the
+ * other ones.
*/
+ reg |= FIELD_PREP(CMN_REG00B8_LANE_MUX_SEL_DP,
+ ((phy_drd->orientation == TYPEC_ORIENTATION_NORMAL)
+ ? (CMN_REG00B8_LANE_MUX_SEL_DP_LANE3
+ | CMN_REG00B8_LANE_MUX_SEL_DP_LANE2)
+ : (CMN_REG00B8_LANE_MUX_SEL_DP_LANE1
+ | CMN_REG00B8_LANE_MUX_SEL_DP_LANE0)));
+ writel(reg, regs_base + EXYNOS9_PMA_USBDP_CMN_REG00B8);
+
/* override of TX receiver detector and comparator: lane 1 */
reg = readl(regs_base + EXYNOS9_PMA_USBDP_TRSV_REG0413);
- reg &= ~TRSV_REG0413_OVRD_LN1_TX_RXD_COMP_EN;
- reg &= ~TRSV_REG0413_OVRD_LN1_TX_RXD_EN;
+ if (phy_drd->orientation == TYPEC_ORIENTATION_NORMAL) {
+ reg &= ~TRSV_REG0413_OVRD_LN1_TX_RXD_COMP_EN;
+ reg &= ~TRSV_REG0413_OVRD_LN1_TX_RXD_EN;
+ } else {
+ reg |= TRSV_REG0413_OVRD_LN1_TX_RXD_COMP_EN;
+ reg |= TRSV_REG0413_OVRD_LN1_TX_RXD_EN;
+ }
writel(reg, regs_base + EXYNOS9_PMA_USBDP_TRSV_REG0413);
/* lane 3 */
reg = readl(regs_base + EXYNOS9_PMA_USBDP_TRSV_REG0813);
- reg |= TRSV_REG0813_OVRD_LN3_TX_RXD_COMP_EN;
- reg |= TRSV_REG0813_OVRD_LN3_TX_RXD_EN;
+ if (phy_drd->orientation == TYPEC_ORIENTATION_NORMAL) {
+ reg |= TRSV_REG0813_OVRD_LN3_TX_RXD_COMP_EN;
+ reg |= TRSV_REG0813_OVRD_LN3_TX_RXD_EN;
+ } else {
+ reg &= ~TRSV_REG0813_OVRD_LN3_TX_RXD_COMP_EN;
+ reg &= ~TRSV_REG0813_OVRD_LN3_TX_RXD_EN;
+ }
writel(reg, regs_base + EXYNOS9_PMA_USBDP_TRSV_REG0813);
}
@@ -700,21 +729,18 @@ exynos5_usbdrd_usbdp_g2_v4_pma_check_cdr_lock(struct exynos5_usbdrd_phy *phy_drd
int err;
err = readl_poll_timeout(
- phy_drd->reg_pma + EXYNOS9_PMA_USBDP_TRSV_REG03C3,
- reg, (reg & locked) == locked, sleep_us, timeout_us);
- if (!err)
- return;
-
- dev_err(phy_drd->dev,
- "timed out waiting for CDR lock (l0): %#.8x, retrying\n", reg);
-
- /* based on cable orientation, this might be on the other phy port */
- err = readl_poll_timeout(
- phy_drd->reg_pma + EXYNOS9_PMA_USBDP_TRSV_REG07C3,
+ /* lane depends on cable orientation */
+ (phy_drd->reg_pma
+ + ((phy_drd->orientation == TYPEC_ORIENTATION_NORMAL)
+ ? EXYNOS9_PMA_USBDP_TRSV_REG03C3
+ : EXYNOS9_PMA_USBDP_TRSV_REG07C3)),
reg, (reg & locked) == locked, sleep_us, timeout_us);
if (err)
dev_err(phy_drd->dev,
- "timed out waiting for CDR lock (l2): %#.8x\n", reg);
+ "timed out waiting for CDR(l%d) lock: %#.8x\n",
+ ((phy_drd->orientation == TYPEC_ORIENTATION_NORMAL)
+ ? 0
+ : 2), reg);
}
static void exynos5_usbdrd_utmi_init(struct exynos5_usbdrd_phy *phy_drd)
@@ -1111,13 +1137,15 @@ static void exynos850_usbdrd_utmi_init(struct exynos5_usbdrd_phy *phy_drd)
reg |= LINKCTRL_BUS_FILTER_BYPASS(0xf);
writel(reg, regs_base + EXYNOS850_DRD_LINKCTRL);
- reg = readl(regs_base + EXYNOS850_DRD_UTMI);
- reg |= UTMI_FORCE_BVALID | UTMI_FORCE_VBUSVALID;
- writel(reg, regs_base + EXYNOS850_DRD_UTMI);
+ if (!phy_drd->sw) {
+ reg = readl(regs_base + EXYNOS850_DRD_UTMI);
+ reg |= UTMI_FORCE_BVALID | UTMI_FORCE_VBUSVALID;
+ writel(reg, regs_base + EXYNOS850_DRD_UTMI);
- reg = readl(regs_base + EXYNOS850_DRD_HSP);
- reg |= HSP_VBUSVLDEXT | HSP_VBUSVLDEXTSEL;
- writel(reg, regs_base + EXYNOS850_DRD_HSP);
+ reg = readl(regs_base + EXYNOS850_DRD_HSP);
+ reg |= HSP_VBUSVLDEXT | HSP_VBUSVLDEXTSEL;
+ writel(reg, regs_base + EXYNOS850_DRD_HSP);
+ }
reg = readl(regs_base + EXYNOS850_DRD_SSPPLLCTL);
reg &= ~SSPPLLCTL_FSEL;
@@ -1184,7 +1212,8 @@ static int exynos850_usbdrd_phy_init(struct phy *phy)
return ret;
/* UTMI or PIPE3 specific init */
- inst->phy_cfg->phy_init(phy_drd);
+ scoped_guard(mutex, &phy_drd->phy_mutex)
+ inst->phy_cfg->phy_init(phy_drd);
clk_bulk_disable_unprepare(phy_drd->drv_data->n_clks, phy_drd->clks);
@@ -1203,6 +1232,8 @@ static int exynos850_usbdrd_phy_exit(struct phy *phy)
if (ret)
return ret;
+ guard(mutex)(&phy_drd->phy_mutex);
+
/* Set PHY clock and control HS PHY */
reg = readl(regs_base + EXYNOS850_DRD_UTMI);
reg &= ~(UTMI_DP_PULLDOWN | UTMI_DM_PULLDOWN);
@@ -1374,6 +1405,87 @@ static int exynos5_usbdrd_phy_clk_handle(struct exynos5_usbdrd_phy *phy_drd)
return 0;
}
+static int exynos5_usbdrd_orien_sw_set(struct typec_switch_dev *sw,
+ enum typec_orientation orientation)
+{
+ struct exynos5_usbdrd_phy *phy_drd = typec_switch_get_drvdata(sw);
+ int ret;
+
+ ret = clk_bulk_prepare_enable(phy_drd->drv_data->n_clks, phy_drd->clks);
+ if (ret) {
+ dev_err(phy_drd->dev, "Failed to enable PHY clocks(s)\n");
+ return ret;
+ }
+
+ scoped_guard(mutex, &phy_drd->phy_mutex) {
+ void __iomem * const regs_base = phy_drd->reg_phy;
+ unsigned int reg;
+
+ if (orientation == TYPEC_ORIENTATION_NONE) {
+ reg = readl(regs_base + EXYNOS850_DRD_UTMI);
+ reg &= ~(UTMI_FORCE_VBUSVALID | UTMI_FORCE_BVALID);
+ writel(reg, regs_base + EXYNOS850_DRD_UTMI);
+
+ reg = readl(regs_base + EXYNOS850_DRD_HSP);
+ reg |= HSP_VBUSVLDEXTSEL;
+ reg &= ~HSP_VBUSVLDEXT;
+ writel(reg, regs_base + EXYNOS850_DRD_HSP);
+ } else {
+ reg = readl(regs_base + EXYNOS850_DRD_UTMI);
+ reg |= UTMI_FORCE_VBUSVALID | UTMI_FORCE_BVALID;
+ writel(reg, regs_base + EXYNOS850_DRD_UTMI);
+
+ reg = readl(regs_base + EXYNOS850_DRD_HSP);
+ reg |= HSP_VBUSVLDEXTSEL | HSP_VBUSVLDEXT;
+ writel(reg, regs_base + EXYNOS850_DRD_HSP);
+ }
+
+ phy_drd->orientation = orientation;
+ }
+
+ clk_bulk_disable(phy_drd->drv_data->n_clks, phy_drd->clks);
+
+ return 0;
+}
+
+static void exynos5_usbdrd_orien_switch_unregister(void *data)
+{
+ struct exynos5_usbdrd_phy *phy_drd = data;
+
+ typec_switch_unregister(phy_drd->sw);
+}
+
+static int exynos5_usbdrd_setup_notifiers(struct exynos5_usbdrd_phy *phy_drd)
+{
+ int ret;
+
+ if (!IS_ENABLED(CONFIG_TYPEC))
+ return 0;
+
+ if (device_property_present(phy_drd->dev, "orientation-switch")) {
+ struct typec_switch_desc sw_desc = { };
+
+ sw_desc.drvdata = phy_drd;
+ sw_desc.fwnode = dev_fwnode(phy_drd->dev);
+ sw_desc.set = exynos5_usbdrd_orien_sw_set;
+
+ phy_drd->sw = typec_switch_register(phy_drd->dev, &sw_desc);
+ if (IS_ERR(phy_drd->sw))
+ return dev_err_probe(phy_drd->dev,
+ PTR_ERR(phy_drd->sw),
+ "Failed to register TypeC orientation switch\n");
+
+ ret = devm_add_action_or_reset(phy_drd->dev,
+ exynos5_usbdrd_orien_switch_unregister,
+ phy_drd);
+ if (ret)
+ return dev_err_probe(phy_drd->dev, ret,
+ "Failed to register TypeC orientation devm action\n");
+ }
+
+ return 0;
+}
+
static const struct exynos5_usbdrd_phy_config phy_cfg_exynos5[] = {
{
.id = EXYNOS5_DRDPHY_UTMI,
@@ -1513,8 +1625,11 @@ static const struct exynos5_usbdrd_phy_tuning gs101_tunes_pipe3_preinit[] = {
PHY_TUNING_ENTRY_PMA(0x09e0, -1, 0x00),
PHY_TUNING_ENTRY_PMA(0x09e4, -1, 0x36),
PHY_TUNING_ENTRY_PMA(0x1e7c, -1, 0x06),
- PHY_TUNING_ENTRY_PMA(0x1e90, -1, 0x00),
- PHY_TUNING_ENTRY_PMA(0x1e94, -1, 0x36),
+ PHY_TUNING_ENTRY_PMA(0x19e0, -1, 0x00),
+ PHY_TUNING_ENTRY_PMA(0x19e4, -1, 0x36),
+ /* fix bootloader bug */
+ PHY_TUNING_ENTRY_PMA(0x1e90, -1, 0x02),
+ PHY_TUNING_ENTRY_PMA(0x1e94, -1, 0x0b),
/* improve LVCC */
PHY_TUNING_ENTRY_PMA(0x08f0, -1, 0x30),
PHY_TUNING_ENTRY_PMA(0x18f0, -1, 0x30),
@@ -1698,6 +1813,10 @@ static int exynos5_usbdrd_phy_probe(struct platform_device *pdev)
return -EINVAL;
phy_drd->drv_data = drv_data;
+ ret = devm_mutex_init(dev, &phy_drd->phy_mutex);
+ if (ret)
+ return ret;
+
if (of_property_present(dev->of_node, "reg-names")) {
void __iomem *reg;
@@ -1728,10 +1847,9 @@ static int exynos5_usbdrd_phy_probe(struct platform_device *pdev)
reg_pmu = syscon_regmap_lookup_by_phandle(dev->of_node,
"samsung,pmu-syscon");
- if (IS_ERR(reg_pmu)) {
- dev_err(dev, "Failed to lookup PMU regmap\n");
- return PTR_ERR(reg_pmu);
- }
+ if (IS_ERR(reg_pmu))
+ return dev_err_probe(dev, PTR_ERR(reg_pmu),
+ "Failed to lookup PMU regmap\n");
/*
* Exynos5420 SoC has multiple channels for USB 3.0 PHY, with
@@ -1757,15 +1875,18 @@ static int exynos5_usbdrd_phy_probe(struct platform_device *pdev)
if (ret)
return dev_err_probe(dev, ret, "failed to get regulators\n");
+ ret = exynos5_usbdrd_setup_notifiers(phy_drd);
+ if (ret)
+ return ret;
+
dev_vdbg(dev, "Creating usbdrd_phy phy\n");
for (i = 0; i < EXYNOS5_DRDPHYS_NUM; i++) {
struct phy *phy = devm_phy_create(dev, NULL, drv_data->phy_ops);
- if (IS_ERR(phy)) {
- dev_err(dev, "Failed to create usbdrd_phy phy\n");
- return PTR_ERR(phy);
- }
+ if (IS_ERR(phy))
+ return dev_err_probe(dev, PTR_ERR(phy),
+ "Failed to create usbdrd_phy phy\n");
phy_drd->phys[i].phy = phy;
phy_drd->phys[i].index = i;
@@ -1789,10 +1910,9 @@ static int exynos5_usbdrd_phy_probe(struct platform_device *pdev)
phy_provider = devm_of_phy_provider_register(dev,
exynos5_usbdrd_phy_xlate);
- if (IS_ERR(phy_provider)) {
- dev_err(phy_drd->dev, "Failed to register phy provider\n");
- return PTR_ERR(phy_provider);
- }
+ if (IS_ERR(phy_provider))
+ return dev_err_probe(phy_drd->dev, PTR_ERR(phy_provider),
+ "Failed to register phy provider\n");
return 0;
}
diff --git a/drivers/phy/samsung/phy-exynosautov920-ufs.c b/drivers/phy/samsung/phy-exynosautov920-ufs.c
new file mode 100644
index 000000000000..21ef79c42f95
--- /dev/null
+++ b/drivers/phy/samsung/phy-exynosautov920-ufs.c
@@ -0,0 +1,168 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * UFS PHY driver data for Samsung ExynosAuto v920 SoC
+ *
+ * Copyright (C) 2024 Samsung Electronics Co., Ltd.
+ */
+
+#include "phy-samsung-ufs.h"
+
+#define EXYNOSAUTOV920_EMBEDDED_COMBO_PHY_CTRL 0x708
+#define EXYNOSAUTOV920_EMBEDDED_COMBO_PHY_CTRL_MASK 0x1
+#define EXYNOSAUTOV920_EMBEDDED_COMBO_PHY_CTRL_EN BIT(0)
+#define EXYNOSAUTOV920_EMBEDDED_COMBO_PHY_CDR_LOCK_STATUS 0x5e
+
+#define EXYNOSAUTOV920_CDR_LOCK_OFFSET 0xce4
+
+#define PHY_EXYNOSAUTOV920_LANE_OFFSET 0x200
+#define PHY_TRSV_REG_CFG_AUTOV920(o, v, d) \
+ PHY_TRSV_REG_CFG_OFFSET(o, v, d, PHY_EXYNOSAUTOV920_LANE_OFFSET)
+
+/* Calibration for phy initialization */
+static const struct samsung_ufs_phy_cfg exynosautov920_pre_init_cfg[] = {
+ PHY_COMN_REG_CFG(0x29, 0x22, PWR_MODE_ANY),
+ PHY_COMN_REG_CFG(0x43, 0x10, PWR_MODE_ANY),
+ PHY_COMN_REG_CFG(0x3c, 0x14, PWR_MODE_ANY),
+ PHY_COMN_REG_CFG(0x46, 0x48, PWR_MODE_ANY),
+ PHY_COMN_REG_CFG(0x04, 0x95, PWR_MODE_ANY),
+ PHY_COMN_REG_CFG(0x06, 0x30, PWR_MODE_ANY),
+
+ PHY_TRSV_REG_CFG_AUTOV920(0x200, 0x00, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG_AUTOV920(0x201, 0x06, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG_AUTOV920(0x202, 0x06, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG_AUTOV920(0x203, 0x0a, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG_AUTOV920(0x204, 0x00, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG_AUTOV920(0x205, 0x10, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG_AUTOV920(0x207, 0x0c, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG_AUTOV920(0x2e1, 0xc0, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG_AUTOV920(0x22d, 0xf8, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG_AUTOV920(0x234, 0x60, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG_AUTOV920(0x238, 0x13, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG_AUTOV920(0x239, 0x48, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG_AUTOV920(0x23a, 0x01, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG_AUTOV920(0x23b, 0x29, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG_AUTOV920(0x23c, 0x2a, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG_AUTOV920(0x23d, 0x01, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG_AUTOV920(0x23e, 0x14, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG_AUTOV920(0x23f, 0x13, PWR_MODE_ANY),
+
+ PHY_TRSV_REG_CFG_AUTOV920(0x240, 0x4a, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG_AUTOV920(0x243, 0x40, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG_AUTOV920(0x244, 0x02, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG_AUTOV920(0x25d, 0x00, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG_AUTOV920(0x25e, 0x3f, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG_AUTOV920(0x25f, 0xff, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG_AUTOV920(0x26f, 0xf0, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG_AUTOV920(0x273, 0x33, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG_AUTOV920(0x274, 0x50, PWR_MODE_ANY),
+
+ PHY_TRSV_REG_CFG_AUTOV920(0x284, 0x02, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG_AUTOV920(0x285, 0x02, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG_AUTOV920(0x2a2, 0x04, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG_AUTOV920(0x27d, 0x01, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG_AUTOV920(0x2fa, 0x01, PWR_MODE_ANY),
+
+ PHY_TRSV_REG_CFG_AUTOV920(0x286, 0x03, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG_AUTOV920(0x287, 0x03, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG_AUTOV920(0x288, 0x03, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG_AUTOV920(0x289, 0x03, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG_AUTOV920(0x2b3, 0x04, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG_AUTOV920(0x2b6, 0x0b, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG_AUTOV920(0x2b7, 0x0b, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG_AUTOV920(0x2b8, 0x0b, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG_AUTOV920(0x2b9, 0x0b, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG_AUTOV920(0x2ba, 0x0b, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG_AUTOV920(0x2bb, 0x06, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG_AUTOV920(0x2bc, 0x06, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG_AUTOV920(0x2bd, 0x06, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG_AUTOV920(0x2be, 0x06, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG_AUTOV920(0x34b, 0x01, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG_AUTOV920(0x34c, 0x24, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG_AUTOV920(0x34d, 0x23, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG_AUTOV920(0x34e, 0x45, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG_AUTOV920(0x34f, 0x00, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG_AUTOV920(0x350, 0x31, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG_AUTOV920(0x351, 0x00, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG_AUTOV920(0x352, 0x02, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG_AUTOV920(0x353, 0x00, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG_AUTOV920(0x354, 0x01, PWR_MODE_ANY),
+
+ PHY_COMN_REG_CFG(0x43, 0x18, PWR_MODE_ANY),
+ PHY_COMN_REG_CFG(0x43, 0x00, PWR_MODE_ANY),
+
+ END_UFS_PHY_CFG,
+};
+
+/* Calibration for HS mode series A/B */
+static const struct samsung_ufs_phy_cfg exynosautov920_pre_pwr_hs_cfg[] = {
+ PHY_TRSV_REG_CFG_AUTOV920(0x369, 0x11, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG_AUTOV920(0x246, 0x03, PWR_MODE_ANY),
+
+ END_UFS_PHY_CFG,
+};
+
+static const struct samsung_ufs_phy_cfg exynosautov920_post_pwr_hs_cfg[] = {
+ END_UFS_PHY_CFG,
+};
+
+#define DELAY_IN_US 40
+#define RETRY_CNT 100
+#define EXYNOSAUTOV920_CDR_LOCK_MASK 0x8
+
+int exynosautov920_ufs_phy_wait_cdr_lock(struct phy *phy, u8 lane)
+{
+ struct samsung_ufs_phy *ufs_phy = get_samsung_ufs_phy(phy);
+ u32 reg, i;
+
+ struct samsung_ufs_phy_cfg cfg[4] = {
+ PHY_TRSV_REG_CFG_AUTOV920(0x222, 0x10, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG_AUTOV920(0x222, 0x18, PWR_MODE_ANY),
+ PHY_TRSV_REG_CFG_AUTOV920(0x246, 0x01, PWR_MODE_ANY),
+ END_UFS_PHY_CFG,
+ };
+
+ for (i = 0; i < RETRY_CNT; i++) {
+ udelay(DELAY_IN_US);
+
+ reg = readl(ufs_phy->reg_pma + EXYNOSAUTOV920_CDR_LOCK_OFFSET +
+ (PHY_APB_ADDR(PHY_EXYNOSAUTOV920_LANE_OFFSET) * lane));
+
+ if ((reg & EXYNOSAUTOV920_CDR_LOCK_MASK)
+ == EXYNOSAUTOV920_CDR_LOCK_MASK) {
+ samsung_ufs_phy_config(ufs_phy, &cfg[2], lane);
+ return 0;
+ }
+
+ udelay(DELAY_IN_US);
+
+ /* Disable and enable CDR */
+ samsung_ufs_phy_config(ufs_phy, &cfg[0], lane);
+ samsung_ufs_phy_config(ufs_phy, &cfg[1], lane);
+ }
+
+ dev_err(ufs_phy->dev, "failed to get phy cdr lock\n");
+ return -ETIMEDOUT;
+}
+
+static const struct samsung_ufs_phy_cfg *exynosautov920_ufs_phy_cfgs[CFG_TAG_MAX] = {
+ [CFG_PRE_INIT] = exynosautov920_pre_init_cfg,
+ [CFG_PRE_PWR_HS] = exynosautov920_pre_pwr_hs_cfg,
+ [CFG_POST_PWR_HS] = exynosautov920_post_pwr_hs_cfg,
+};
+
+static const char * const exynosautov920_ufs_phy_clks[] = {
+ "ref_clk",
+};
+
+const struct samsung_ufs_phy_drvdata exynosautov920_ufs_phy = {
+ .cfgs = exynosautov920_ufs_phy_cfgs,
+ .isol = {
+ .offset = EXYNOSAUTOV920_EMBEDDED_COMBO_PHY_CTRL,
+ .mask = EXYNOSAUTOV920_EMBEDDED_COMBO_PHY_CTRL_MASK,
+ .en = EXYNOSAUTOV920_EMBEDDED_COMBO_PHY_CTRL_EN,
+ },
+ .clk_list = exynosautov920_ufs_phy_clks,
+ .num_clks = ARRAY_SIZE(exynosautov920_ufs_phy_clks),
+ .cdr_lock_status_offset = EXYNOSAUTOV920_EMBEDDED_COMBO_PHY_CDR_LOCK_STATUS,
+ .wait_for_cdr = exynosautov920_ufs_phy_wait_cdr_lock,
+};
diff --git a/drivers/phy/samsung/phy-samsung-ufs.c b/drivers/phy/samsung/phy-samsung-ufs.c
index 8e9ccd39f97e..f3cbe6b17b23 100644
--- a/drivers/phy/samsung/phy-samsung-ufs.c
+++ b/drivers/phy/samsung/phy-samsung-ufs.c
@@ -28,9 +28,9 @@
#define PHY_DEF_LANE_CNT 1
-static void samsung_ufs_phy_config(struct samsung_ufs_phy *phy,
- const struct samsung_ufs_phy_cfg *cfg,
- u8 lane)
+void samsung_ufs_phy_config(struct samsung_ufs_phy *phy,
+ const struct samsung_ufs_phy_cfg *cfg,
+ u8 lane)
{
enum {LANE_0, LANE_1}; /* lane index */
@@ -324,6 +324,9 @@ static const struct of_device_id samsung_ufs_phy_match[] = {
.compatible = "samsung,exynosautov9-ufs-phy",
.data = &exynosautov9_ufs_phy,
}, {
+ .compatible = "samsung,exynosautov920-ufs-phy",
+ .data = &exynosautov920_ufs_phy,
+ }, {
.compatible = "tesla,fsd-ufs-phy",
.data = &fsd_ufs_phy,
},
diff --git a/drivers/phy/samsung/phy-samsung-ufs.h b/drivers/phy/samsung/phy-samsung-ufs.h
index 9b7deef6e10f..a28f148081d1 100644
--- a/drivers/phy/samsung/phy-samsung-ufs.h
+++ b/drivers/phy/samsung/phy-samsung-ufs.h
@@ -143,9 +143,13 @@ static inline void samsung_ufs_phy_ctrl_isol(
}
int samsung_ufs_phy_wait_for_lock_acq(struct phy *phy, u8 lane);
+int exynosautov920_ufs_phy_wait_cdr_lock(struct phy *phy, u8 lane);
+void samsung_ufs_phy_config(struct samsung_ufs_phy *phy,
+ const struct samsung_ufs_phy_cfg *cfg, u8 lane);
extern const struct samsung_ufs_phy_drvdata exynos7_ufs_phy;
extern const struct samsung_ufs_phy_drvdata exynosautov9_ufs_phy;
+extern const struct samsung_ufs_phy_drvdata exynosautov920_ufs_phy;
extern const struct samsung_ufs_phy_drvdata fsd_ufs_phy;
extern const struct samsung_ufs_phy_drvdata tensor_gs101_ufs_phy;
diff --git a/drivers/phy/st/phy-stih407-usb.c b/drivers/phy/st/phy-stih407-usb.c
index a4ae2cca7f63..ebb1d0858aa3 100644
--- a/drivers/phy/st/phy-stih407-usb.c
+++ b/drivers/phy/st/phy-stih407-usb.c
@@ -18,8 +18,8 @@
#include <linux/mfd/syscon.h>
#include <linux/phy/phy.h>
-#define PHYPARAM_REG 1
-#define PHYCTRL_REG 2
+#define PHYPARAM_REG 0
+#define PHYCTRL_REG 1
/* Default PHY_SEL and REFCLKSEL configuration */
#define STIH407_USB_PICOPHY_CTRL_PORT_CONF 0x6
@@ -91,8 +91,8 @@ static int stih407_usb2_picophy_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
struct phy_provider *phy_provider;
+ unsigned int syscon_args[2];
struct phy *phy;
- int ret;
phy_dev = devm_kzalloc(dev, sizeof(*phy_dev), GFP_KERNEL);
if (!phy_dev)
@@ -116,25 +116,15 @@ static int stih407_usb2_picophy_probe(struct platform_device *pdev)
/* Reset port by default: only deassert it in phy init */
reset_control_assert(phy_dev->rstport);
- phy_dev->regmap = syscon_regmap_lookup_by_phandle(np, "st,syscfg");
+ phy_dev->regmap = syscon_regmap_lookup_by_phandle_args(np, "st,syscfg",
+ 2, syscon_args);
if (IS_ERR(phy_dev->regmap)) {
dev_err(dev, "No syscfg phandle specified\n");
return PTR_ERR(phy_dev->regmap);
}
- ret = of_property_read_u32_index(np, "st,syscfg", PHYPARAM_REG,
- &phy_dev->param);
- if (ret) {
- dev_err(dev, "can't get phyparam offset (%d)\n", ret);
- return ret;
- }
-
- ret = of_property_read_u32_index(np, "st,syscfg", PHYCTRL_REG,
- &phy_dev->ctrl);
- if (ret) {
- dev_err(dev, "can't get phyctrl offset (%d)\n", ret);
- return ret;
- }
+ phy_dev->param = syscon_args[PHYPARAM_REG];
+ phy_dev->ctrl = syscon_args[PHYCTRL_REG];
phy = devm_phy_create(dev, NULL, &stih407_usb2_picophy_data);
if (IS_ERR(phy)) {
diff --git a/drivers/platform/chrome/cros_ec_lpc.c b/drivers/platform/chrome/cros_ec_lpc.c
index be319949b941..7d9a78289c96 100644
--- a/drivers/platform/chrome/cros_ec_lpc.c
+++ b/drivers/platform/chrome/cros_ec_lpc.c
@@ -455,7 +455,7 @@ static void cros_ec_lpc_acpi_notify(acpi_handle device, u32 value, void *data)
blocking_notifier_call_chain(&ec_dev->panic_notifier, 0, ec_dev);
kobject_uevent_env(&ec_dev->dev->kobj, KOBJ_CHANGE, (char **)env);
/* Begin orderly shutdown. EC will force reset after a short period. */
- hw_protection_shutdown("CrOS EC Panic", -1);
+ __hw_protection_trigger("CrOS EC Panic", -1, HWPROT_ACT_SHUTDOWN);
/* Do not query for other events after a panic is reported */
return;
}
diff --git a/drivers/pnp/isapnp/core.c b/drivers/pnp/isapnp/core.c
index c43d8ad02529..d2ff76e74a05 100644
--- a/drivers/pnp/isapnp/core.c
+++ b/drivers/pnp/isapnp/core.c
@@ -843,6 +843,7 @@ EXPORT_SYMBOL(isapnp_protocol);
EXPORT_SYMBOL(isapnp_present);
EXPORT_SYMBOL(isapnp_cfg_begin);
EXPORT_SYMBOL(isapnp_cfg_end);
+EXPORT_SYMBOL(isapnp_read_byte);
EXPORT_SYMBOL(isapnp_write_byte);
static int isapnp_get_resources(struct pnp_dev *dev)
diff --git a/drivers/power/supply/da9030_battery.c b/drivers/power/supply/da9030_battery.c
index ac2e319e9517..d25279c26030 100644
--- a/drivers/power/supply/da9030_battery.c
+++ b/drivers/power/supply/da9030_battery.c
@@ -502,8 +502,7 @@ static int da9030_battery_probe(struct platform_device *pdev)
/* 10 seconds between monitor runs unless platform defines other
interval */
- charger->interval = msecs_to_jiffies(
- (pdata->batmon_interval ? : 10) * 1000);
+ charger->interval = secs_to_jiffies(pdata->batmon_interval ? : 10);
charger->charge_milliamp = pdata->charge_milliamp;
charger->charge_millivolt = pdata->charge_millivolt;
diff --git a/drivers/power/supply/ip5xxx_power.c b/drivers/power/supply/ip5xxx_power.c
index a031eadb49dd..24eea7a91b30 100644
--- a/drivers/power/supply/ip5xxx_power.c
+++ b/drivers/power/supply/ip5xxx_power.c
@@ -828,10 +828,9 @@ static void ip5xxx_setup_regs(struct device *dev, struct ip5xxx *ip5xxx,
static int ip5xxx_power_probe(struct i2c_client *client)
{
- const struct ip5xxx_regfield_config *fields = &ip51xx_fields;
+ const struct ip5xxx_regfield_config *fields;
struct power_supply_config psy_cfg = {};
struct device *dev = &client->dev;
- const struct of_device_id *of_id;
struct power_supply *psy;
struct ip5xxx *ip5xxx;
@@ -843,9 +842,7 @@ static int ip5xxx_power_probe(struct i2c_client *client)
if (IS_ERR(ip5xxx->regmap))
return PTR_ERR(ip5xxx->regmap);
- of_id = i2c_of_match_device(dev->driver->of_match_table, client);
- if (of_id)
- fields = (const struct ip5xxx_regfield_config *)of_id->data;
+ fields = i2c_get_match_data(client) ?: &ip51xx_fields;
ip5xxx_setup_regs(dev, ip5xxx, fields);
psy_cfg.fwnode = dev_fwnode(dev);
diff --git a/drivers/pps/generators/Kconfig b/drivers/pps/generators/Kconfig
index cd94bf3bfaf2..b3f340ed3163 100644
--- a/drivers/pps/generators/Kconfig
+++ b/drivers/pps/generators/Kconfig
@@ -31,4 +31,20 @@ config PPS_GENERATOR_PARPORT
utilizes STROBE pin of a parallel port to send PPS signals. It uses
parport abstraction layer and hrtimers to precisely control the signal.
+config PPS_GENERATOR_TIO
+ tristate "TIO PPS signal generator"
+ depends on X86 && CPU_SUP_INTEL
+ help
+ If you say yes here you get support for a PPS TIO signal generator
+ which generates a pulse at a prescribed time based on the system clock.
+ It uses time translation and hrtimers to precisely generate a pulse.
+ This hardware is present on 2019 and newer Intel CPUs. However, this
+ driver is not useful without adding highly specialized hardware outside
+ the Linux system to observe these pulses.
+
+ To compile this driver as a module, choose M here: the module
+ will be called pps_gen_tio.
+
+ If unsure, say N.
+
endif # PPS_GENERATOR
diff --git a/drivers/pps/generators/Makefile b/drivers/pps/generators/Makefile
index dc1aa5a4688b..e109920e8a2d 100644
--- a/drivers/pps/generators/Makefile
+++ b/drivers/pps/generators/Makefile
@@ -8,5 +8,6 @@ obj-$(CONFIG_PPS_GENERATOR) := pps_gen_core.o
obj-$(CONFIG_PPS_GENERATOR_DUMMY) += pps_gen-dummy.o
obj-$(CONFIG_PPS_GENERATOR_PARPORT) += pps_gen_parport.o
+obj-$(CONFIG_PPS_GENERATOR_TIO) += pps_gen_tio.o
ccflags-$(CONFIG_PPS_DEBUG) := -DDEBUG
diff --git a/drivers/pps/generators/pps_gen-dummy.c b/drivers/pps/generators/pps_gen-dummy.c
index b284c200cbe5..55de4aecf35e 100644
--- a/drivers/pps/generators/pps_gen-dummy.c
+++ b/drivers/pps/generators/pps_gen-dummy.c
@@ -61,7 +61,7 @@ static int pps_gen_dummy_enable(struct pps_gen_device *pps_gen, bool enable)
* The PPS info struct
*/
-static struct pps_gen_source_info pps_gen_dummy_info = {
+static const struct pps_gen_source_info pps_gen_dummy_info = {
.use_system_clock = true,
.get_time = pps_gen_dummy_get_time,
.enable = pps_gen_dummy_enable,
diff --git a/drivers/pps/generators/pps_gen.c b/drivers/pps/generators/pps_gen.c
index ca592f1736f4..5b8bb454913c 100644
--- a/drivers/pps/generators/pps_gen.c
+++ b/drivers/pps/generators/pps_gen.c
@@ -66,7 +66,7 @@ static long pps_gen_cdev_ioctl(struct file *file,
if (ret)
return -EFAULT;
- ret = pps_gen->info.enable(pps_gen, status);
+ ret = pps_gen->info->enable(pps_gen, status);
if (ret)
return ret;
pps_gen->enabled = status;
@@ -76,7 +76,7 @@ static long pps_gen_cdev_ioctl(struct file *file,
case PPS_GEN_USESYSTEMCLOCK:
dev_dbg(pps_gen->dev, "PPS_GEN_USESYSTEMCLOCK\n");
- ret = put_user(pps_gen->info.use_system_clock, uiuarg);
+ ret = put_user(pps_gen->info->use_system_clock, uiuarg);
if (ret)
return -EFAULT;
@@ -175,7 +175,7 @@ static int pps_gen_register_cdev(struct pps_gen_device *pps_gen)
devt = MKDEV(MAJOR(pps_gen_devt), pps_gen->id);
cdev_init(&pps_gen->cdev, &pps_gen_cdev_fops);
- pps_gen->cdev.owner = pps_gen->info.owner;
+ pps_gen->cdev.owner = pps_gen->info->owner;
err = cdev_add(&pps_gen->cdev, devt, 1);
if (err) {
@@ -183,8 +183,8 @@ static int pps_gen_register_cdev(struct pps_gen_device *pps_gen)
MAJOR(pps_gen_devt), pps_gen->id);
goto free_ida;
}
- pps_gen->dev = device_create(pps_gen_class, pps_gen->info.parent, devt,
- pps_gen, "pps-gen%d", pps_gen->id);
+ pps_gen->dev = device_create(pps_gen_class, pps_gen->info->parent, devt,
+ pps_gen, "pps-gen%d", pps_gen->id);
if (IS_ERR(pps_gen->dev)) {
err = PTR_ERR(pps_gen->dev);
goto del_cdev;
@@ -225,7 +225,7 @@ static void pps_gen_unregister_cdev(struct pps_gen_device *pps_gen)
* Return: the PPS generator device in case of success, and ERR_PTR(errno)
* otherwise.
*/
-struct pps_gen_device *pps_gen_register_source(struct pps_gen_source_info *info)
+struct pps_gen_device *pps_gen_register_source(const struct pps_gen_source_info *info)
{
struct pps_gen_device *pps_gen;
int err;
@@ -235,7 +235,7 @@ struct pps_gen_device *pps_gen_register_source(struct pps_gen_source_info *info)
err = -ENOMEM;
goto pps_gen_register_source_exit;
}
- pps_gen->info = *info;
+ pps_gen->info = info;
pps_gen->enabled = false;
init_waitqueue_head(&pps_gen->queue);
diff --git a/drivers/pps/generators/pps_gen_tio.c b/drivers/pps/generators/pps_gen_tio.c
new file mode 100644
index 000000000000..6c46b46c66cd
--- /dev/null
+++ b/drivers/pps/generators/pps_gen_tio.c
@@ -0,0 +1,272 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Intel PPS signal Generator Driver
+ *
+ * Copyright (C) 2024 Intel Corporation
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/cleanup.h>
+#include <linux/container_of.h>
+#include <linux/device.h>
+#include <linux/hrtimer.h>
+#include <linux/io-64-nonatomic-hi-lo.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pps_gen_kernel.h>
+#include <linux/timekeeping.h>
+#include <linux/types.h>
+
+#include <asm/cpu_device_id.h>
+
+#define TIOCTL 0x00
+#define TIOCOMPV 0x10
+#define TIOEC 0x30
+
+/* Control Register */
+#define TIOCTL_EN BIT(0)
+#define TIOCTL_DIR BIT(1)
+#define TIOCTL_EP GENMASK(3, 2)
+#define TIOCTL_EP_RISING_EDGE FIELD_PREP(TIOCTL_EP, 0)
+#define TIOCTL_EP_FALLING_EDGE FIELD_PREP(TIOCTL_EP, 1)
+#define TIOCTL_EP_TOGGLE_EDGE FIELD_PREP(TIOCTL_EP, 2)
+
+/* Safety time to set hrtimer early */
+#define SAFE_TIME_NS (10 * NSEC_PER_MSEC)
+
+#define MAGIC_CONST (NSEC_PER_SEC - SAFE_TIME_NS)
+#define ART_HW_DELAY_CYCLES 2
+
+struct pps_tio {
+ struct pps_gen_source_info gen_info;
+ struct pps_gen_device *pps_gen;
+ struct hrtimer timer;
+ void __iomem *base;
+ u32 prev_count;
+ spinlock_t lock;
+ struct device *dev;
+};
+
+static inline u32 pps_tio_read(u32 offset, struct pps_tio *tio)
+{
+ return readl(tio->base + offset);
+}
+
+static inline void pps_ctl_write(u32 value, struct pps_tio *tio)
+{
+ writel(value, tio->base + TIOCTL);
+}
+
+/*
+ * For COMPV register, It's safer to write
+ * higher 32-bit followed by lower 32-bit
+ */
+static inline void pps_compv_write(u64 value, struct pps_tio *tio)
+{
+ hi_lo_writeq(value, tio->base + TIOCOMPV);
+}
+
+static inline ktime_t first_event(struct pps_tio *tio)
+{
+ return ktime_set(ktime_get_real_seconds() + 1, MAGIC_CONST);
+}
+
+static u32 pps_tio_disable(struct pps_tio *tio)
+{
+ u32 ctrl;
+
+ ctrl = pps_tio_read(TIOCTL, tio);
+ pps_compv_write(0, tio);
+
+ ctrl &= ~TIOCTL_EN;
+ pps_ctl_write(ctrl, tio);
+ tio->pps_gen->enabled = false;
+ tio->prev_count = 0;
+ return ctrl;
+}
+
+static void pps_tio_enable(struct pps_tio *tio)
+{
+ u32 ctrl;
+
+ ctrl = pps_tio_read(TIOCTL, tio);
+ ctrl |= TIOCTL_EN;
+ pps_ctl_write(ctrl, tio);
+ tio->pps_gen->enabled = true;
+}
+
+static void pps_tio_direction_output(struct pps_tio *tio)
+{
+ u32 ctrl;
+
+ ctrl = pps_tio_disable(tio);
+
+ /*
+ * We enable the device, be sure that the
+ * 'compare' value is invalid
+ */
+ pps_compv_write(0, tio);
+
+ ctrl &= ~(TIOCTL_DIR | TIOCTL_EP);
+ ctrl |= TIOCTL_EP_TOGGLE_EDGE;
+ pps_ctl_write(ctrl, tio);
+ pps_tio_enable(tio);
+}
+
+static bool pps_generate_next_pulse(ktime_t expires, struct pps_tio *tio)
+{
+ u64 art;
+
+ if (!ktime_real_to_base_clock(expires, CSID_X86_ART, &art)) {
+ pps_tio_disable(tio);
+ return false;
+ }
+
+ pps_compv_write(art - ART_HW_DELAY_CYCLES, tio);
+ return true;
+}
+
+static enum hrtimer_restart hrtimer_callback(struct hrtimer *timer)
+{
+ ktime_t expires, now;
+ u32 event_count;
+ struct pps_tio *tio = container_of(timer, struct pps_tio, timer);
+
+ guard(spinlock)(&tio->lock);
+
+ /*
+ * Check if any event is missed.
+ * If an event is missed, TIO will be disabled.
+ */
+ event_count = pps_tio_read(TIOEC, tio);
+ if (tio->prev_count && tio->prev_count == event_count)
+ goto err;
+ tio->prev_count = event_count;
+
+ expires = hrtimer_get_expires(timer);
+
+ now = ktime_get_real();
+ if (now - expires >= SAFE_TIME_NS)
+ goto err;
+
+ tio->pps_gen->enabled = pps_generate_next_pulse(expires + SAFE_TIME_NS, tio);
+ if (!tio->pps_gen->enabled)
+ return HRTIMER_NORESTART;
+
+ hrtimer_forward(timer, now, NSEC_PER_SEC / 2);
+ return HRTIMER_RESTART;
+
+err:
+ dev_err(tio->dev, "Event missed, Disabling Timed I/O");
+ pps_tio_disable(tio);
+ pps_gen_event(tio->pps_gen, PPS_GEN_EVENT_MISSEDPULSE, NULL);
+ return HRTIMER_NORESTART;
+}
+
+static int pps_tio_gen_enable(struct pps_gen_device *pps_gen, bool enable)
+{
+ struct pps_tio *tio = container_of(pps_gen->info, struct pps_tio, gen_info);
+
+ if (!timekeeping_clocksource_has_base(CSID_X86_ART)) {
+ dev_err_once(tio->dev, "PPS cannot be used as clock is not related to ART");
+ return -ENODEV;
+ }
+
+ guard(spinlock_irqsave)(&tio->lock);
+ if (enable && !pps_gen->enabled) {
+ pps_tio_direction_output(tio);
+ hrtimer_start(&tio->timer, first_event(tio), HRTIMER_MODE_ABS);
+ } else if (!enable && pps_gen->enabled) {
+ hrtimer_cancel(&tio->timer);
+ pps_tio_disable(tio);
+ }
+
+ return 0;
+}
+
+static int pps_tio_get_time(struct pps_gen_device *pps_gen,
+ struct timespec64 *time)
+{
+ struct system_time_snapshot snap;
+
+ ktime_get_snapshot(&snap);
+ *time = ktime_to_timespec64(snap.real);
+
+ return 0;
+}
+
+static int pps_gen_tio_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct pps_tio *tio;
+
+ if (!(cpu_feature_enabled(X86_FEATURE_TSC_KNOWN_FREQ) &&
+ cpu_feature_enabled(X86_FEATURE_ART))) {
+ dev_warn(dev, "TSC/ART is not enabled");
+ return -ENODEV;
+ }
+
+ tio = devm_kzalloc(dev, sizeof(*tio), GFP_KERNEL);
+ if (!tio)
+ return -ENOMEM;
+
+ tio->gen_info.use_system_clock = true;
+ tio->gen_info.enable = pps_tio_gen_enable;
+ tio->gen_info.get_time = pps_tio_get_time;
+ tio->gen_info.owner = THIS_MODULE;
+
+ tio->pps_gen = pps_gen_register_source(&tio->gen_info);
+ if (IS_ERR(tio->pps_gen))
+ return PTR_ERR(tio->pps_gen);
+
+ tio->dev = dev;
+ tio->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(tio->base))
+ return PTR_ERR(tio->base);
+
+ pps_tio_disable(tio);
+ hrtimer_init(&tio->timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
+ tio->timer.function = hrtimer_callback;
+ spin_lock_init(&tio->lock);
+ platform_set_drvdata(pdev, &tio);
+
+ return 0;
+}
+
+static void pps_gen_tio_remove(struct platform_device *pdev)
+{
+ struct pps_tio *tio = platform_get_drvdata(pdev);
+
+ hrtimer_cancel(&tio->timer);
+ pps_tio_disable(tio);
+ pps_gen_unregister_source(tio->pps_gen);
+}
+
+static const struct acpi_device_id intel_pmc_tio_acpi_match[] = {
+ { "INTC1021" },
+ { "INTC1022" },
+ { "INTC1023" },
+ { "INTC1024" },
+ {}
+};
+MODULE_DEVICE_TABLE(acpi, intel_pmc_tio_acpi_match);
+
+static struct platform_driver pps_gen_tio_driver = {
+ .probe = pps_gen_tio_probe,
+ .remove = pps_gen_tio_remove,
+ .driver = {
+ .name = "intel-pps-gen-tio",
+ .acpi_match_table = intel_pmc_tio_acpi_match,
+ },
+};
+module_platform_driver(pps_gen_tio_driver);
+
+MODULE_AUTHOR("Christopher Hall <christopher.s.hall@intel.com>");
+MODULE_AUTHOR("Lakshmi Sowjanya D <lakshmi.sowjanya.d@intel.com>");
+MODULE_AUTHOR("Pandith N <pandith.n@intel.com>");
+MODULE_AUTHOR("Thejesh Reddy T R <thejesh.reddy.t.r@intel.com>");
+MODULE_AUTHOR("Subramanian Mohan <subramanian.mohan@intel.com>");
+MODULE_DESCRIPTION("Intel PMC Time-Aware IO Generator Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pps/generators/sysfs.c b/drivers/pps/generators/sysfs.c
index faf8b1c6d202..6d6bc0006fea 100644
--- a/drivers/pps/generators/sysfs.c
+++ b/drivers/pps/generators/sysfs.c
@@ -19,7 +19,7 @@ static ssize_t system_show(struct device *dev, struct device_attribute *attr,
{
struct pps_gen_device *pps_gen = dev_get_drvdata(dev);
- return sysfs_emit(buf, "%d\n", pps_gen->info.use_system_clock);
+ return sysfs_emit(buf, "%d\n", pps_gen->info->use_system_clock);
}
static DEVICE_ATTR_RO(system);
@@ -30,7 +30,7 @@ static ssize_t time_show(struct device *dev, struct device_attribute *attr,
struct timespec64 time;
int ret;
- ret = pps_gen->info.get_time(pps_gen, &time);
+ ret = pps_gen->info->get_time(pps_gen, &time);
if (ret)
return ret;
@@ -49,7 +49,7 @@ static ssize_t enable_store(struct device *dev, struct device_attribute *attr,
if (ret)
return ret;
- ret = pps_gen->info.enable(pps_gen, status);
+ ret = pps_gen->info->enable(pps_gen, status);
if (ret)
return ret;
pps_gen->enabled = status;
diff --git a/drivers/rapidio/rio-sysfs.c b/drivers/rapidio/rio-sysfs.c
index 90d391210533..6f89b232f1d5 100644
--- a/drivers/rapidio/rio-sysfs.c
+++ b/drivers/rapidio/rio-sysfs.c
@@ -114,7 +114,7 @@ static struct attribute *rio_dev_attrs[] = {
static ssize_t
rio_read_config(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct rio_dev *dev = to_rio_dev(kobj_to_dev(kobj));
@@ -185,7 +185,7 @@ rio_read_config(struct file *filp, struct kobject *kobj,
static ssize_t
rio_write_config(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct rio_dev *dev = to_rio_dev(kobj_to_dev(kobj));
@@ -241,17 +241,17 @@ rio_write_config(struct file *filp, struct kobject *kobj,
return count;
}
-static struct bin_attribute rio_config_attr = {
+static const struct bin_attribute rio_config_attr = {
.attr = {
.name = "config",
.mode = S_IRUGO | S_IWUSR,
},
.size = RIO_MAINT_SPACE_SZ,
- .read = rio_read_config,
- .write = rio_write_config,
+ .read_new = rio_read_config,
+ .write_new = rio_write_config,
};
-static struct bin_attribute *rio_dev_bin_attrs[] = {
+static const struct bin_attribute *const rio_dev_bin_attrs[] = {
&rio_config_attr,
NULL,
};
@@ -278,7 +278,7 @@ static umode_t rio_dev_is_attr_visible(struct kobject *kobj,
static const struct attribute_group rio_dev_group = {
.attrs = rio_dev_attrs,
.is_visible = rio_dev_is_attr_visible,
- .bin_attrs = rio_dev_bin_attrs,
+ .bin_attrs_new = rio_dev_bin_attrs,
};
const struct attribute_group *rio_dev_groups[] = {
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 00a7f3617cd8..90629a756693 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -5282,8 +5282,8 @@ static void regulator_handle_critical(struct regulator_dev *rdev,
if (!reason)
return;
- hw_protection_shutdown(reason,
- rdev->constraints->uv_less_critical_window_ms);
+ hw_protection_trigger(reason,
+ rdev->constraints->uv_less_critical_window_ms);
}
/**
diff --git a/drivers/regulator/dummy.c b/drivers/regulator/dummy.c
index 9f59889129ab..e5197ec7234d 100644
--- a/drivers/regulator/dummy.c
+++ b/drivers/regulator/dummy.c
@@ -13,7 +13,7 @@
#include <linux/err.h>
#include <linux/export.h>
-#include <linux/platform_device.h>
+#include <linux/device/faux.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
@@ -37,15 +37,15 @@ static const struct regulator_desc dummy_desc = {
.ops = &dummy_ops,
};
-static int dummy_regulator_probe(struct platform_device *pdev)
+static int dummy_regulator_probe(struct faux_device *fdev)
{
struct regulator_config config = { };
int ret;
- config.dev = &pdev->dev;
+ config.dev = &fdev->dev;
config.init_data = &dummy_initdata;
- dummy_regulator_rdev = devm_regulator_register(&pdev->dev, &dummy_desc,
+ dummy_regulator_rdev = devm_regulator_register(&fdev->dev, &dummy_desc,
&config);
if (IS_ERR(dummy_regulator_rdev)) {
ret = PTR_ERR(dummy_regulator_rdev);
@@ -56,36 +56,17 @@ static int dummy_regulator_probe(struct platform_device *pdev)
return 0;
}
-static struct platform_driver dummy_regulator_driver = {
- .probe = dummy_regulator_probe,
- .driver = {
- .name = "reg-dummy",
- .probe_type = PROBE_FORCE_SYNCHRONOUS,
- },
+struct faux_device_ops dummy_regulator_driver = {
+ .probe = dummy_regulator_probe,
};
-static struct platform_device *dummy_pdev;
+static struct faux_device *dummy_fdev;
void __init regulator_dummy_init(void)
{
- int ret;
-
- dummy_pdev = platform_device_alloc("reg-dummy", -1);
- if (!dummy_pdev) {
+ dummy_fdev = faux_device_create("reg-dummy", NULL, &dummy_regulator_driver);
+ if (!dummy_fdev) {
pr_err("Failed to allocate dummy regulator device\n");
return;
}
-
- ret = platform_device_add(dummy_pdev);
- if (ret != 0) {
- pr_err("Failed to register dummy regulator device: %d\n", ret);
- platform_device_put(dummy_pdev);
- return;
- }
-
- ret = platform_driver_register(&dummy_regulator_driver);
- if (ret != 0) {
- pr_err("Failed to register dummy regulator driver: %d\n", ret);
- platform_device_unregister(dummy_pdev);
- }
}
diff --git a/drivers/regulator/irq_helpers.c b/drivers/regulator/irq_helpers.c
index 0aa188b2bbb2..5742faee8071 100644
--- a/drivers/regulator/irq_helpers.c
+++ b/drivers/regulator/irq_helpers.c
@@ -64,16 +64,16 @@ static void regulator_notifier_isr_work(struct work_struct *work)
reread:
if (d->fatal_cnt && h->retry_cnt > d->fatal_cnt) {
if (!d->die)
- return hw_protection_shutdown("Regulator HW failure? - no IC recovery",
- REGULATOR_FORCED_SAFETY_SHUTDOWN_WAIT_MS);
+ return hw_protection_trigger("Regulator HW failure? - no IC recovery",
+ REGULATOR_FORCED_SAFETY_SHUTDOWN_WAIT_MS);
ret = d->die(rid);
/*
* If the 'last resort' IC recovery failed we will have
* nothing else left to do...
*/
if (ret)
- return hw_protection_shutdown("Regulator HW failure. IC recovery failed",
- REGULATOR_FORCED_SAFETY_SHUTDOWN_WAIT_MS);
+ return hw_protection_trigger("Regulator HW failure. IC recovery failed",
+ REGULATOR_FORCED_SAFETY_SHUTDOWN_WAIT_MS);
/*
* If h->die() was implemented we assume recovery has been
@@ -263,14 +263,14 @@ fail_out:
if (d->fatal_cnt && h->retry_cnt > d->fatal_cnt) {
/* If we have no recovery, just try shut down straight away */
if (!d->die) {
- hw_protection_shutdown("Regulator failure. Retry count exceeded",
- REGULATOR_FORCED_SAFETY_SHUTDOWN_WAIT_MS);
+ hw_protection_trigger("Regulator failure. Retry count exceeded",
+ REGULATOR_FORCED_SAFETY_SHUTDOWN_WAIT_MS);
} else {
ret = d->die(rid);
/* If die() failed shut down as a last attempt to save the HW */
if (ret)
- hw_protection_shutdown("Regulator failure. Recovery failed",
- REGULATOR_FORCED_SAFETY_SHUTDOWN_WAIT_MS);
+ hw_protection_trigger("Regulator failure. Recovery failed",
+ REGULATOR_FORCED_SAFETY_SHUTDOWN_WAIT_MS);
}
}
diff --git a/drivers/regulator/rk808-regulator.c b/drivers/regulator/rk808-regulator.c
index 7d82bd1b36df..1e8142479656 100644
--- a/drivers/regulator/rk808-regulator.c
+++ b/drivers/regulator/rk808-regulator.c
@@ -270,8 +270,8 @@ static const unsigned int rk817_buck1_4_ramp_table[] = {
static int rk806_set_mode_dcdc(struct regulator_dev *rdev, unsigned int mode)
{
- int rid = rdev_get_id(rdev);
- int ctr_bit, reg;
+ unsigned int rid = rdev_get_id(rdev);
+ unsigned int ctr_bit, reg;
reg = RK806_POWER_FPWM_EN0 + rid / 8;
ctr_bit = rid % 8;
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 0bbbf778ecfa..838bdc138ffe 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -1321,13 +1321,6 @@ config RTC_DRV_SPEAR
If you say Y here you will get support for the RTC found on
spear
-config RTC_DRV_PCF50633
- depends on MFD_PCF50633
- tristate "NXP PCF50633 RTC"
- help
- If you say yes here you get support for the RTC subsystem of the
- NXP PCF50633 used in embedded systems.
-
config RTC_DRV_AB8500
tristate "ST-Ericsson AB8500 RTC"
depends on AB8500_CORE
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index 489b4ab07068..31473b3276d9 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -126,7 +126,6 @@ obj-$(CONFIG_RTC_DRV_PALMAS) += rtc-palmas.o
obj-$(CONFIG_RTC_DRV_PCAP) += rtc-pcap.o
obj-$(CONFIG_RTC_DRV_PCF2123) += rtc-pcf2123.o
obj-$(CONFIG_RTC_DRV_PCF2127) += rtc-pcf2127.o
-obj-$(CONFIG_RTC_DRV_PCF50633) += rtc-pcf50633.o
obj-$(CONFIG_RTC_DRV_PCF85063) += rtc-pcf85063.o
obj-$(CONFIG_RTC_DRV_PCF8523) += rtc-pcf8523.o
obj-$(CONFIG_RTC_DRV_PCF85363) += rtc-pcf85363.o
diff --git a/drivers/rtc/rtc-ab-eoz9.c b/drivers/rtc/rtc-ab-eoz9.c
index d2b60487d462..de002f7a39bf 100644
--- a/drivers/rtc/rtc-ab-eoz9.c
+++ b/drivers/rtc/rtc-ab-eoz9.c
@@ -426,29 +426,9 @@ static umode_t abeoz9_is_visible(const void *data,
}
}
-static const u32 abeoz9_chip_config[] = {
- HWMON_C_REGISTER_TZ,
- 0
-};
-
-static const struct hwmon_channel_info abeoz9_chip = {
- .type = hwmon_chip,
- .config = abeoz9_chip_config,
-};
-
-static const u32 abeoz9_temp_config[] = {
- HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MIN,
- 0
-};
-
-static const struct hwmon_channel_info abeoz9_temp = {
- .type = hwmon_temp,
- .config = abeoz9_temp_config,
-};
-
static const struct hwmon_channel_info * const abeoz9_info[] = {
- &abeoz9_chip,
- &abeoz9_temp,
+ HWMON_CHANNEL_INFO(chip, HWMON_C_REGISTER_TZ),
+ HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MIN),
NULL
};
diff --git a/drivers/rtc/rtc-ab8500.c b/drivers/rtc/rtc-ab8500.c
index 2dcda96f4a8e..ed2b6b8bb3bf 100644
--- a/drivers/rtc/rtc-ab8500.c
+++ b/drivers/rtc/rtc-ab8500.c
@@ -361,7 +361,7 @@ static int ab8500_rtc_probe(struct platform_device *pdev)
return -ENODEV;
}
- device_init_wakeup(&pdev->dev, true);
+ devm_device_init_wakeup(&pdev->dev);
rtc = devm_rtc_allocate_device(&pdev->dev);
if (IS_ERR(rtc))
@@ -375,7 +375,7 @@ static int ab8500_rtc_probe(struct platform_device *pdev)
if (err < 0)
return err;
- dev_pm_set_wake_irq(&pdev->dev, irq);
+ devm_pm_set_wake_irq(&pdev->dev, irq);
platform_set_drvdata(pdev, rtc);
set_bit(RTC_FEATURE_ALARM_RES_MINUTE, rtc->features);
@@ -392,18 +392,11 @@ static int ab8500_rtc_probe(struct platform_device *pdev)
return devm_rtc_register_device(rtc);
}
-static void ab8500_rtc_remove(struct platform_device *pdev)
-{
- dev_pm_clear_wake_irq(&pdev->dev);
- device_init_wakeup(&pdev->dev, false);
-}
-
static struct platform_driver ab8500_rtc_driver = {
.driver = {
.name = "ab8500-rtc",
},
.probe = ab8500_rtc_probe,
- .remove = ab8500_rtc_remove,
.id_table = ab85xx_rtc_ids,
};
diff --git a/drivers/rtc/rtc-aspeed.c b/drivers/rtc/rtc-aspeed.c
index 880b015eebaf..0d0053b52f9b 100644
--- a/drivers/rtc/rtc-aspeed.c
+++ b/drivers/rtc/rtc-aspeed.c
@@ -8,7 +8,6 @@
#include <linux/io.h>
struct aspeed_rtc {
- struct rtc_device *rtc_dev;
void __iomem *base;
};
@@ -85,6 +84,7 @@ static const struct rtc_class_ops aspeed_rtc_ops = {
static int aspeed_rtc_probe(struct platform_device *pdev)
{
struct aspeed_rtc *rtc;
+ struct rtc_device *rtc_dev;
rtc = devm_kzalloc(&pdev->dev, sizeof(*rtc), GFP_KERNEL);
if (!rtc)
@@ -94,17 +94,17 @@ static int aspeed_rtc_probe(struct platform_device *pdev)
if (IS_ERR(rtc->base))
return PTR_ERR(rtc->base);
- rtc->rtc_dev = devm_rtc_allocate_device(&pdev->dev);
- if (IS_ERR(rtc->rtc_dev))
- return PTR_ERR(rtc->rtc_dev);
+ rtc_dev = devm_rtc_allocate_device(&pdev->dev);
+ if (IS_ERR(rtc_dev))
+ return PTR_ERR(rtc_dev);
platform_set_drvdata(pdev, rtc);
- rtc->rtc_dev->ops = &aspeed_rtc_ops;
- rtc->rtc_dev->range_min = RTC_TIMESTAMP_BEGIN_1900;
- rtc->rtc_dev->range_max = 38814989399LL; /* 3199-12-31 23:59:59 */
+ rtc_dev->ops = &aspeed_rtc_ops;
+ rtc_dev->range_min = RTC_TIMESTAMP_BEGIN_1900;
+ rtc_dev->range_max = 38814989399LL; /* 3199-12-31 23:59:59 */
- return devm_rtc_register_device(rtc->rtc_dev);
+ return devm_rtc_register_device(rtc_dev);
}
static const struct of_device_id aspeed_rtc_match[] = {
diff --git a/drivers/rtc/rtc-cros-ec.c b/drivers/rtc/rtc-cros-ec.c
index 865c2e82c7a5..e956505a06fb 100644
--- a/drivers/rtc/rtc-cros-ec.c
+++ b/drivers/rtc/rtc-cros-ec.c
@@ -35,21 +35,18 @@ struct cros_ec_rtc {
static int cros_ec_rtc_get(struct cros_ec_device *cros_ec, u32 command,
u32 *response)
{
+ DEFINE_RAW_FLEX(struct cros_ec_command, msg, data,
+ sizeof(struct ec_response_rtc));
int ret;
- struct {
- struct cros_ec_command msg;
- struct ec_response_rtc data;
- } __packed msg;
- memset(&msg, 0, sizeof(msg));
- msg.msg.command = command;
- msg.msg.insize = sizeof(msg.data);
+ msg->command = command;
+ msg->insize = sizeof(struct ec_response_rtc);
- ret = cros_ec_cmd_xfer_status(cros_ec, &msg.msg);
+ ret = cros_ec_cmd_xfer_status(cros_ec, msg);
if (ret < 0)
return ret;
- *response = msg.data.time;
+ *response = ((struct ec_response_rtc *)msg->data)->time;
return 0;
}
@@ -57,18 +54,15 @@ static int cros_ec_rtc_get(struct cros_ec_device *cros_ec, u32 command,
static int cros_ec_rtc_set(struct cros_ec_device *cros_ec, u32 command,
u32 param)
{
+ DEFINE_RAW_FLEX(struct cros_ec_command, msg, data,
+ sizeof(struct ec_response_rtc));
int ret;
- struct {
- struct cros_ec_command msg;
- struct ec_response_rtc data;
- } __packed msg;
- memset(&msg, 0, sizeof(msg));
- msg.msg.command = command;
- msg.msg.outsize = sizeof(msg.data);
- msg.data.time = param;
+ msg->command = command;
+ msg->outsize = sizeof(struct ec_response_rtc);
+ ((struct ec_response_rtc *)msg->data)->time = param;
- ret = cros_ec_cmd_xfer_status(cros_ec, &msg.msg);
+ ret = cros_ec_cmd_xfer_status(cros_ec, msg);
if (ret < 0)
return ret;
return 0;
diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
index 872e0b679be4..5efbe69bf5ca 100644
--- a/drivers/rtc/rtc-ds1307.c
+++ b/drivers/rtc/rtc-ds1307.c
@@ -1807,10 +1807,8 @@ static int ds1307_probe(struct i2c_client *client)
* For some variants, be sure alarms can trigger when we're
* running on Vbackup (BBSQI/BBSQW)
*/
- if (want_irq || ds1307_can_wakeup_device) {
+ if (want_irq || ds1307_can_wakeup_device)
regs[0] |= DS1337_BIT_INTCN | chip->bbsqi_bit;
- regs[0] &= ~(DS1337_BIT_A2IE | DS1337_BIT_A1IE);
- }
regmap_write(ds1307->regmap, DS1337_REG_CONTROL,
regs[0]);
diff --git a/drivers/rtc/rtc-ds1343.c b/drivers/rtc/rtc-ds1343.c
index ed5a6ba89a3e..aa9500791b7e 100644
--- a/drivers/rtc/rtc-ds1343.c
+++ b/drivers/rtc/rtc-ds1343.c
@@ -427,18 +427,13 @@ static int ds1343_probe(struct spi_device *spi)
"unable to request irq for rtc ds1343\n");
} else {
device_init_wakeup(&spi->dev, true);
- dev_pm_set_wake_irq(&spi->dev, spi->irq);
+ devm_pm_set_wake_irq(&spi->dev, spi->irq);
}
}
return 0;
}
-static void ds1343_remove(struct spi_device *spi)
-{
- dev_pm_clear_wake_irq(&spi->dev);
-}
-
#ifdef CONFIG_PM_SLEEP
static int ds1343_suspend(struct device *dev)
@@ -471,7 +466,6 @@ static struct spi_driver ds1343_driver = {
.pm = &ds1343_pm,
},
.probe = ds1343_probe,
- .remove = ds1343_remove,
.id_table = ds1343_id,
};
diff --git a/drivers/rtc/rtc-ds2404.c b/drivers/rtc/rtc-ds2404.c
index 3231fd9f61da..217694eca36c 100644
--- a/drivers/rtc/rtc-ds2404.c
+++ b/drivers/rtc/rtc-ds2404.c
@@ -31,7 +31,6 @@ struct ds2404 {
struct gpio_desc *rst_gpiod;
struct gpio_desc *clk_gpiod;
struct gpio_desc *dq_gpiod;
- struct rtc_device *rtc;
};
static int ds2404_gpio_map(struct ds2404 *chip, struct platform_device *pdev)
@@ -182,6 +181,7 @@ static const struct rtc_class_ops ds2404_rtc_ops = {
static int rtc_probe(struct platform_device *pdev)
{
struct ds2404 *chip;
+ struct rtc_device *rtc;
int retval = -EBUSY;
chip = devm_kzalloc(&pdev->dev, sizeof(struct ds2404), GFP_KERNEL);
@@ -190,9 +190,9 @@ static int rtc_probe(struct platform_device *pdev)
chip->dev = &pdev->dev;
- chip->rtc = devm_rtc_allocate_device(&pdev->dev);
- if (IS_ERR(chip->rtc))
- return PTR_ERR(chip->rtc);
+ rtc = devm_rtc_allocate_device(&pdev->dev);
+ if (IS_ERR(rtc))
+ return PTR_ERR(rtc);
retval = ds2404_gpio_map(chip, pdev);
if (retval)
@@ -200,10 +200,10 @@ static int rtc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, chip);
- chip->rtc->ops = &ds2404_rtc_ops;
- chip->rtc->range_max = U32_MAX;
+ rtc->ops = &ds2404_rtc_ops;
+ rtc->range_max = U32_MAX;
- retval = devm_rtc_register_device(chip->rtc);
+ retval = devm_rtc_register_device(rtc);
if (retval)
return retval;
diff --git a/drivers/rtc/rtc-ds3232.c b/drivers/rtc/rtc-ds3232.c
index 19c09c418746..18f35823b4b5 100644
--- a/drivers/rtc/rtc-ds3232.c
+++ b/drivers/rtc/rtc-ds3232.c
@@ -339,29 +339,9 @@ static int ds3232_hwmon_read(struct device *dev,
return err;
}
-static u32 ds3232_hwmon_chip_config[] = {
- HWMON_C_REGISTER_TZ,
- 0
-};
-
-static const struct hwmon_channel_info ds3232_hwmon_chip = {
- .type = hwmon_chip,
- .config = ds3232_hwmon_chip_config,
-};
-
-static u32 ds3232_hwmon_temp_config[] = {
- HWMON_T_INPUT,
- 0
-};
-
-static const struct hwmon_channel_info ds3232_hwmon_temp = {
- .type = hwmon_temp,
- .config = ds3232_hwmon_temp_config,
-};
-
static const struct hwmon_channel_info * const ds3232_hwmon_info[] = {
- &ds3232_hwmon_chip,
- &ds3232_hwmon_temp,
+ HWMON_CHANNEL_INFO(chip, HWMON_C_REGISTER_TZ),
+ HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT),
NULL
};
diff --git a/drivers/rtc/rtc-ep93xx.c b/drivers/rtc/rtc-ep93xx.c
index 1fdd20d01560..dcdcdd06f30d 100644
--- a/drivers/rtc/rtc-ep93xx.c
+++ b/drivers/rtc/rtc-ep93xx.c
@@ -28,7 +28,6 @@
struct ep93xx_rtc {
void __iomem *mmio_base;
- struct rtc_device *rtc;
};
static int ep93xx_rtc_get_swcomp(struct device *dev, unsigned short *preload,
@@ -123,6 +122,7 @@ static const struct attribute_group ep93xx_rtc_sysfs_files = {
static int ep93xx_rtc_probe(struct platform_device *pdev)
{
struct ep93xx_rtc *ep93xx_rtc;
+ struct rtc_device *rtc;
int err;
ep93xx_rtc = devm_kzalloc(&pdev->dev, sizeof(*ep93xx_rtc), GFP_KERNEL);
@@ -135,18 +135,18 @@ static int ep93xx_rtc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, ep93xx_rtc);
- ep93xx_rtc->rtc = devm_rtc_allocate_device(&pdev->dev);
- if (IS_ERR(ep93xx_rtc->rtc))
- return PTR_ERR(ep93xx_rtc->rtc);
+ rtc = devm_rtc_allocate_device(&pdev->dev);
+ if (IS_ERR(rtc))
+ return PTR_ERR(rtc);
- ep93xx_rtc->rtc->ops = &ep93xx_rtc_ops;
- ep93xx_rtc->rtc->range_max = U32_MAX;
+ rtc->ops = &ep93xx_rtc_ops;
+ rtc->range_max = U32_MAX;
- err = rtc_add_group(ep93xx_rtc->rtc, &ep93xx_rtc_sysfs_files);
+ err = rtc_add_group(rtc, &ep93xx_rtc_sysfs_files);
if (err)
return err;
- return devm_rtc_register_device(ep93xx_rtc->rtc);
+ return devm_rtc_register_device(rtc);
}
static const struct of_device_id ep93xx_rtc_of_ids[] = {
diff --git a/drivers/rtc/rtc-fsl-ftm-alarm.c b/drivers/rtc/rtc-fsl-ftm-alarm.c
index a72c4ad0cec6..c8015f04c71f 100644
--- a/drivers/rtc/rtc-fsl-ftm-alarm.c
+++ b/drivers/rtc/rtc-fsl-ftm-alarm.c
@@ -309,7 +309,7 @@ static const struct of_device_id ftm_rtc_match[] = {
};
MODULE_DEVICE_TABLE(of, ftm_rtc_match);
-static const struct acpi_device_id ftm_imx_acpi_ids[] = {
+static const struct acpi_device_id ftm_imx_acpi_ids[] __maybe_unused = {
{"NXP0014",},
{ }
};
diff --git a/drivers/rtc/rtc-ftrtc010.c b/drivers/rtc/rtc-ftrtc010.c
index cb4a5d101f53..02608d378495 100644
--- a/drivers/rtc/rtc-ftrtc010.c
+++ b/drivers/rtc/rtc-ftrtc010.c
@@ -28,7 +28,6 @@ MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:" DRV_NAME);
struct ftrtc010_rtc {
- struct rtc_device *rtc_dev;
void __iomem *rtc_base;
int rtc_irq;
struct clk *pclk;
@@ -113,6 +112,7 @@ static int ftrtc010_rtc_probe(struct platform_device *pdev)
struct ftrtc010_rtc *rtc;
struct device *dev = &pdev->dev;
struct resource *res;
+ struct rtc_device *rtc_dev;
int ret;
rtc = devm_kzalloc(&pdev->dev, sizeof(*rtc), GFP_KERNEL);
@@ -160,29 +160,28 @@ static int ftrtc010_rtc_probe(struct platform_device *pdev)
goto err_disable_extclk;
}
- rtc->rtc_dev = devm_rtc_allocate_device(dev);
- if (IS_ERR(rtc->rtc_dev)) {
- ret = PTR_ERR(rtc->rtc_dev);
+ rtc_dev = devm_rtc_allocate_device(dev);
+ if (IS_ERR(rtc_dev)) {
+ ret = PTR_ERR(rtc_dev);
goto err_disable_extclk;
}
- rtc->rtc_dev->ops = &ftrtc010_rtc_ops;
+ rtc_dev->ops = &ftrtc010_rtc_ops;
sec = readl(rtc->rtc_base + FTRTC010_RTC_SECOND);
min = readl(rtc->rtc_base + FTRTC010_RTC_MINUTE);
hour = readl(rtc->rtc_base + FTRTC010_RTC_HOUR);
days = readl(rtc->rtc_base + FTRTC010_RTC_DAYS);
- rtc->rtc_dev->range_min = (u64)days * 86400 + hour * 3600 +
- min * 60 + sec;
- rtc->rtc_dev->range_max = U32_MAX + rtc->rtc_dev->range_min;
+ rtc_dev->range_min = (u64)days * 86400 + hour * 3600 + min * 60 + sec;
+ rtc_dev->range_max = U32_MAX + rtc_dev->range_min;
ret = devm_request_irq(dev, rtc->rtc_irq, ftrtc010_rtc_interrupt,
IRQF_SHARED, pdev->name, dev);
if (unlikely(ret))
goto err_disable_extclk;
- return devm_rtc_register_device(rtc->rtc_dev);
+ return devm_rtc_register_device(rtc_dev);
err_disable_extclk:
clk_disable_unprepare(rtc->extclk);
diff --git a/drivers/rtc/rtc-m48t86.c b/drivers/rtc/rtc-m48t86.c
index dd4a62e2d39c..10cd054fe86f 100644
--- a/drivers/rtc/rtc-m48t86.c
+++ b/drivers/rtc/rtc-m48t86.c
@@ -41,7 +41,6 @@
struct m48t86_rtc_info {
void __iomem *index_reg;
void __iomem *data_reg;
- struct rtc_device *rtc;
};
static unsigned char m48t86_readb(struct device *dev, unsigned long addr)
@@ -219,6 +218,7 @@ static bool m48t86_verify_chip(struct platform_device *pdev)
static int m48t86_rtc_probe(struct platform_device *pdev)
{
struct m48t86_rtc_info *info;
+ struct rtc_device *rtc;
unsigned char reg;
int err;
struct nvmem_config m48t86_nvmem_cfg = {
@@ -250,17 +250,17 @@ static int m48t86_rtc_probe(struct platform_device *pdev)
return -ENODEV;
}
- info->rtc = devm_rtc_allocate_device(&pdev->dev);
- if (IS_ERR(info->rtc))
- return PTR_ERR(info->rtc);
+ rtc = devm_rtc_allocate_device(&pdev->dev);
+ if (IS_ERR(rtc))
+ return PTR_ERR(rtc);
- info->rtc->ops = &m48t86_rtc_ops;
+ rtc->ops = &m48t86_rtc_ops;
- err = devm_rtc_register_device(info->rtc);
+ err = devm_rtc_register_device(rtc);
if (err)
return err;
- devm_rtc_nvmem_register(info->rtc, &m48t86_nvmem_cfg);
+ devm_rtc_nvmem_register(rtc, &m48t86_nvmem_cfg);
/* read battery status */
reg = m48t86_readb(&pdev->dev, M48T86_D);
diff --git a/drivers/rtc/rtc-max31335.c b/drivers/rtc/rtc-max31335.c
index 3fbcf5f6b92f..a7bb37aaab9e 100644
--- a/drivers/rtc/rtc-max31335.c
+++ b/drivers/rtc/rtc-max31335.c
@@ -184,31 +184,91 @@
#define MAX31335_RAM_SIZE 32
#define MAX31335_TIME_SIZE 0x07
+/* MAX31331 Register Map */
+#define MAX31331_RTC_CONFIG2 0x04
+
#define clk_hw_to_max31335(_hw) container_of(_hw, struct max31335_data, clkout)
+/* Supported Maxim RTC */
+enum max_rtc_ids {
+ ID_MAX31331,
+ ID_MAX31335,
+ MAX_RTC_ID_NR
+};
+
+struct chip_desc {
+ u8 sec_reg;
+ u8 alarm1_sec_reg;
+
+ u8 int_en_reg;
+ u8 int_status_reg;
+
+ u8 ram_reg;
+ u8 ram_size;
+
+ u8 temp_reg;
+
+ u8 trickle_reg;
+
+ u8 clkout_reg;
+
+ enum max_rtc_ids id;
+};
+
struct max31335_data {
struct regmap *regmap;
struct rtc_device *rtc;
struct clk_hw clkout;
+ struct clk *clkin;
+ const struct chip_desc *chip;
+ int irq;
};
static const int max31335_clkout_freq[] = { 1, 64, 1024, 32768 };
+static const struct chip_desc chip[MAX_RTC_ID_NR] = {
+ [ID_MAX31331] = {
+ .id = ID_MAX31331,
+ .int_en_reg = 0x01,
+ .int_status_reg = 0x00,
+ .sec_reg = 0x08,
+ .alarm1_sec_reg = 0x0F,
+ .ram_reg = 0x20,
+ .ram_size = 32,
+ .trickle_reg = 0x1B,
+ .clkout_reg = 0x04,
+ },
+ [ID_MAX31335] = {
+ .id = ID_MAX31335,
+ .int_en_reg = 0x01,
+ .int_status_reg = 0x00,
+ .sec_reg = 0x0A,
+ .alarm1_sec_reg = 0x11,
+ .ram_reg = 0x40,
+ .ram_size = 32,
+ .temp_reg = 0x35,
+ .trickle_reg = 0x1D,
+ .clkout_reg = 0x06,
+ },
+};
+
static const u16 max31335_trickle_resistors[] = {3000, 6000, 11000};
static bool max31335_volatile_reg(struct device *dev, unsigned int reg)
{
+ struct max31335_data *max31335 = dev_get_drvdata(dev);
+ const struct chip_desc *chip = max31335->chip;
+
/* time keeping registers */
- if (reg >= MAX31335_SECONDS &&
- reg < MAX31335_SECONDS + MAX31335_TIME_SIZE)
+ if (reg >= chip->sec_reg && reg < chip->sec_reg + MAX31335_TIME_SIZE)
return true;
/* interrupt status register */
- if (reg == MAX31335_STATUS1)
+ if (reg == chip->int_status_reg)
return true;
- /* temperature registers */
- if (reg == MAX31335_TEMP_DATA_MSB || reg == MAX31335_TEMP_DATA_LSB)
+ /* temperature registers if valid */
+ if (chip->temp_reg && (reg == chip->temp_reg || reg == chip->temp_reg + 1))
return true;
return false;
@@ -227,7 +287,7 @@ static int max31335_read_time(struct device *dev, struct rtc_time *tm)
u8 date[7];
int ret;
- ret = regmap_bulk_read(max31335->regmap, MAX31335_SECONDS, date,
+ ret = regmap_bulk_read(max31335->regmap, max31335->chip->sec_reg, date,
sizeof(date));
if (ret)
return ret;
@@ -262,7 +322,7 @@ static int max31335_set_time(struct device *dev, struct rtc_time *tm)
if (tm->tm_year >= 200)
date[5] |= FIELD_PREP(MAX31335_MONTH_CENTURY, 1);
- return regmap_bulk_write(max31335->regmap, MAX31335_SECONDS, date,
+ return regmap_bulk_write(max31335->regmap, max31335->chip->sec_reg, date,
sizeof(date));
}
@@ -273,7 +333,7 @@ static int max31335_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
struct rtc_time time;
u8 regs[6];
- ret = regmap_bulk_read(max31335->regmap, MAX31335_ALM1_SEC, regs,
+ ret = regmap_bulk_read(max31335->regmap, max31335->chip->alarm1_sec_reg, regs,
sizeof(regs));
if (ret)
return ret;
@@ -292,11 +352,11 @@ static int max31335_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
if (time.tm_year >= 200)
alrm->time.tm_year += 100;
- ret = regmap_read(max31335->regmap, MAX31335_INT_EN1, &ctrl);
+ ret = regmap_read(max31335->regmap, max31335->chip->int_en_reg, &ctrl);
if (ret)
return ret;
- ret = regmap_read(max31335->regmap, MAX31335_STATUS1, &status);
+ ret = regmap_read(max31335->regmap, max31335->chip->int_status_reg, &status);
if (ret)
return ret;
@@ -320,18 +380,18 @@ static int max31335_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
regs[4] = bin2bcd(alrm->time.tm_mon + 1);
regs[5] = bin2bcd(alrm->time.tm_year % 100);
- ret = regmap_bulk_write(max31335->regmap, MAX31335_ALM1_SEC,
+ ret = regmap_bulk_write(max31335->regmap, max31335->chip->alarm1_sec_reg,
regs, sizeof(regs));
if (ret)
return ret;
reg = FIELD_PREP(MAX31335_INT_EN1_A1IE, alrm->enabled);
- ret = regmap_update_bits(max31335->regmap, MAX31335_INT_EN1,
+ ret = regmap_update_bits(max31335->regmap, max31335->chip->int_en_reg,
MAX31335_INT_EN1_A1IE, reg);
if (ret)
return ret;
- ret = regmap_update_bits(max31335->regmap, MAX31335_STATUS1,
+ ret = regmap_update_bits(max31335->regmap, max31335->chip->int_status_reg,
MAX31335_STATUS1_A1F, 0);
return 0;
@@ -341,23 +401,33 @@ static int max31335_alarm_irq_enable(struct device *dev, unsigned int enabled)
{
struct max31335_data *max31335 = dev_get_drvdata(dev);
- return regmap_update_bits(max31335->regmap, MAX31335_INT_EN1,
+ return regmap_update_bits(max31335->regmap, max31335->chip->int_en_reg,
MAX31335_INT_EN1_A1IE, enabled);
}
static irqreturn_t max31335_handle_irq(int irq, void *dev_id)
{
struct max31335_data *max31335 = dev_id;
- bool status;
- int ret;
+ struct mutex *lock = &max31335->rtc->ops_lock;
+ int ret, status;
- ret = regmap_update_bits_check(max31335->regmap, MAX31335_STATUS1,
- MAX31335_STATUS1_A1F, 0, &status);
+ mutex_lock(lock);
+
+ ret = regmap_read(max31335->regmap, max31335->chip->int_status_reg, &status);
if (ret)
- return IRQ_HANDLED;
+ goto exit;
+
+ if (FIELD_GET(MAX31335_STATUS1_A1F, status)) {
+ ret = regmap_update_bits(max31335->regmap, max31335->chip->int_status_reg,
+ MAX31335_STATUS1_A1F, 0);
+ if (ret)
+ goto exit;
- if (status)
rtc_update_irq(max31335->rtc, 1, RTC_AF | RTC_IRQF);
+ }
+
+exit:
+ mutex_unlock(lock);
return IRQ_HANDLED;
}
@@ -404,7 +474,7 @@ static int max31335_trickle_charger_setup(struct device *dev,
i = i + trickle_cfg;
- return regmap_write(max31335->regmap, MAX31335_TRICKLE_REG,
+ return regmap_write(max31335->regmap, max31335->chip->trickle_reg,
FIELD_PREP(MAX31335_TRICKLE_REG_TRICKLE, i) |
FIELD_PREP(MAX31335_TRICKLE_REG_EN_TRICKLE,
chargeable));
@@ -418,7 +488,7 @@ static unsigned long max31335_clkout_recalc_rate(struct clk_hw *hw,
unsigned int reg;
int ret;
- ret = regmap_read(max31335->regmap, MAX31335_RTC_CONFIG2, &reg);
+ ret = regmap_read(max31335->regmap, max31335->chip->clkout_reg, &reg);
if (ret)
return 0;
@@ -449,23 +519,23 @@ static int max31335_clkout_set_rate(struct clk_hw *hw, unsigned long rate,
ARRAY_SIZE(max31335_clkout_freq));
freq_mask = __roundup_pow_of_two(ARRAY_SIZE(max31335_clkout_freq)) - 1;
- return regmap_update_bits(max31335->regmap, MAX31335_RTC_CONFIG2,
- freq_mask, index);
+ return regmap_update_bits(max31335->regmap, max31335->chip->clkout_reg,
+ freq_mask, index);
}
static int max31335_clkout_enable(struct clk_hw *hw)
{
struct max31335_data *max31335 = clk_hw_to_max31335(hw);
- return regmap_set_bits(max31335->regmap, MAX31335_RTC_CONFIG2,
- MAX31335_RTC_CONFIG2_ENCLKO);
+ return regmap_set_bits(max31335->regmap, max31335->chip->clkout_reg,
+ MAX31335_RTC_CONFIG2_ENCLKO);
}
static void max31335_clkout_disable(struct clk_hw *hw)
{
struct max31335_data *max31335 = clk_hw_to_max31335(hw);
- regmap_clear_bits(max31335->regmap, MAX31335_RTC_CONFIG2,
+ regmap_clear_bits(max31335->regmap, max31335->chip->clkout_reg,
MAX31335_RTC_CONFIG2_ENCLKO);
}
@@ -475,7 +545,7 @@ static int max31335_clkout_is_enabled(struct clk_hw *hw)
unsigned int reg;
int ret;
- ret = regmap_read(max31335->regmap, MAX31335_RTC_CONFIG2, &reg);
+ ret = regmap_read(max31335->regmap, max31335->chip->clkout_reg, &reg);
if (ret)
return ret;
@@ -500,7 +570,7 @@ static int max31335_nvmem_reg_read(void *priv, unsigned int offset,
void *val, size_t bytes)
{
struct max31335_data *max31335 = priv;
- unsigned int reg = MAX31335_TS0_SEC_1_128 + offset;
+ unsigned int reg = max31335->chip->ram_reg + offset;
return regmap_bulk_read(max31335->regmap, reg, val, bytes);
}
@@ -509,7 +579,7 @@ static int max31335_nvmem_reg_write(void *priv, unsigned int offset,
void *val, size_t bytes)
{
struct max31335_data *max31335 = priv;
- unsigned int reg = MAX31335_TS0_SEC_1_128 + offset;
+ unsigned int reg = max31335->chip->ram_reg + offset;
return regmap_bulk_write(max31335->regmap, reg, val, bytes);
}
@@ -533,7 +603,7 @@ static int max31335_read_temp(struct device *dev, enum hwmon_sensor_types type,
if (type != hwmon_temp || attr != hwmon_temp_input)
return -EOPNOTSUPP;
- ret = regmap_bulk_read(max31335->regmap, MAX31335_TEMP_DATA_MSB,
+ ret = regmap_bulk_read(max31335->regmap, max31335->chip->temp_reg,
reg, 2);
if (ret)
return ret;
@@ -577,8 +647,8 @@ static int max31335_clkout_register(struct device *dev)
int ret;
if (!device_property_present(dev, "#clock-cells"))
- return regmap_clear_bits(max31335->regmap, MAX31335_RTC_CONFIG2,
- MAX31335_RTC_CONFIG2_ENCLKO);
+ return regmap_clear_bits(max31335->regmap, max31335->chip->clkout_reg,
+ MAX31335_RTC_CONFIG2_ENCLKO);
max31335->clkout.init = &max31335_clk_init;
@@ -605,6 +675,7 @@ static int max31335_probe(struct i2c_client *client)
#if IS_REACHABLE(HWMON)
struct device *hwmon;
#endif
+ const struct chip_desc *match;
int ret;
max31335 = devm_kzalloc(&client->dev, sizeof(*max31335), GFP_KERNEL);
@@ -616,7 +687,10 @@ static int max31335_probe(struct i2c_client *client)
return PTR_ERR(max31335->regmap);
i2c_set_clientdata(client, max31335);
-
+ match = i2c_get_match_data(client);
+ if (!match)
+ return -ENODEV;
+ max31335->chip = match;
max31335->rtc = devm_rtc_allocate_device(&client->dev);
if (IS_ERR(max31335->rtc))
return PTR_ERR(max31335->rtc);
@@ -639,6 +713,8 @@ static int max31335_probe(struct i2c_client *client)
dev_warn(&client->dev,
"unable to request IRQ, alarm max31335 disabled\n");
client->irq = 0;
+ } else {
+ max31335->irq = client->irq;
}
}
@@ -652,13 +728,13 @@ static int max31335_probe(struct i2c_client *client)
"cannot register rtc nvmem\n");
#if IS_REACHABLE(HWMON)
- hwmon = devm_hwmon_device_register_with_info(&client->dev, client->name,
- max31335,
- &max31335_chip_info,
- NULL);
- if (IS_ERR(hwmon))
- return dev_err_probe(&client->dev, PTR_ERR(hwmon),
- "cannot register hwmon device\n");
+ if (max31335->chip->temp_reg) {
+ hwmon = devm_hwmon_device_register_with_info(&client->dev, client->name, max31335,
+ &max31335_chip_info, NULL);
+ if (IS_ERR(hwmon))
+ return dev_err_probe(&client->dev, PTR_ERR(hwmon),
+ "cannot register hwmon device\n");
+ }
#endif
ret = max31335_trickle_charger_setup(&client->dev, max31335);
@@ -669,14 +745,16 @@ static int max31335_probe(struct i2c_client *client)
}
static const struct i2c_device_id max31335_id[] = {
- { "max31335" },
+ { "max31331", (kernel_ulong_t)&chip[ID_MAX31331] },
+ { "max31335", (kernel_ulong_t)&chip[ID_MAX31335] },
{ }
};
MODULE_DEVICE_TABLE(i2c, max31335_id);
static const struct of_device_id max31335_of_match[] = {
- { .compatible = "adi,max31335" },
+ { .compatible = "adi,max31331", .data = &chip[ID_MAX31331] },
+ { .compatible = "adi,max31335", .data = &chip[ID_MAX31335] },
{ }
};
@@ -693,5 +771,6 @@ static struct i2c_driver max31335_driver = {
module_i2c_driver(max31335_driver);
MODULE_AUTHOR("Antoniu Miclaus <antoniu.miclaus@analog.com>");
+MODULE_AUTHOR("Saket Kumar Purwar <Saket.Kumarpurwar@analog.com>");
MODULE_DESCRIPTION("MAX31335 RTC driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-max77686.c b/drivers/rtc/rtc-max77686.c
index 7bb044d2ac25..69ea3ce75b5a 100644
--- a/drivers/rtc/rtc-max77686.c
+++ b/drivers/rtc/rtc-max77686.c
@@ -85,7 +85,6 @@ struct max77686_rtc_driver_data {
struct max77686_rtc_info {
struct device *dev;
- struct i2c_client *rtc;
struct rtc_device *rtc_dev;
struct mutex lock;
@@ -691,6 +690,7 @@ static int max77686_init_rtc_regmap(struct max77686_rtc_info *info)
{
struct device *parent = info->dev->parent;
struct i2c_client *parent_i2c = to_i2c_client(parent);
+ struct i2c_client *client;
int ret;
if (info->drv_data->rtc_irq_from_platform) {
@@ -704,40 +704,35 @@ static int max77686_init_rtc_regmap(struct max77686_rtc_info *info)
}
info->regmap = dev_get_regmap(parent, NULL);
- if (!info->regmap) {
- dev_err(info->dev, "Failed to get rtc regmap\n");
- return -ENODEV;
- }
+ if (!info->regmap)
+ return dev_err_probe(info->dev, -ENODEV,
+ "Failed to get rtc regmap\n");
if (info->drv_data->rtc_i2c_addr == MAX77686_INVALID_I2C_ADDR) {
info->rtc_regmap = info->regmap;
goto add_rtc_irq;
}
- info->rtc = devm_i2c_new_dummy_device(info->dev, parent_i2c->adapter,
- info->drv_data->rtc_i2c_addr);
- if (IS_ERR(info->rtc)) {
- dev_err(info->dev, "Failed to allocate I2C device for RTC\n");
- return PTR_ERR(info->rtc);
- }
+ client = devm_i2c_new_dummy_device(info->dev, parent_i2c->adapter,
+ info->drv_data->rtc_i2c_addr);
+ if (IS_ERR(client))
+ return dev_err_probe(info->dev, PTR_ERR(client),
+ "Failed to allocate I2C device for RTC\n");
- info->rtc_regmap = devm_regmap_init_i2c(info->rtc,
+ info->rtc_regmap = devm_regmap_init_i2c(client,
info->drv_data->regmap_config);
- if (IS_ERR(info->rtc_regmap)) {
- ret = PTR_ERR(info->rtc_regmap);
- dev_err(info->dev, "Failed to allocate RTC regmap: %d\n", ret);
- return ret;
- }
+ if (IS_ERR(info->rtc_regmap))
+ return dev_err_probe(info->dev, PTR_ERR(info->rtc_regmap),
+ "Failed to allocate RTC regmap\n");
add_rtc_irq:
ret = regmap_add_irq_chip(info->rtc_regmap, info->rtc_irq,
IRQF_ONESHOT | IRQF_SHARED,
0, info->drv_data->rtc_irq_chip,
&info->rtc_irq_data);
- if (ret < 0) {
- dev_err(info->dev, "Failed to add RTC irq chip: %d\n", ret);
- return ret;
- }
+ if (ret < 0)
+ return dev_err_probe(info->dev, ret,
+ "Failed to add RTC irq chip\n");
return 0;
}
diff --git a/drivers/rtc/rtc-meson-vrtc.c b/drivers/rtc/rtc-meson-vrtc.c
index 5849729f7d01..7d38258cbe37 100644
--- a/drivers/rtc/rtc-meson-vrtc.c
+++ b/drivers/rtc/rtc-meson-vrtc.c
@@ -13,7 +13,6 @@
struct meson_vrtc_data {
void __iomem *io_alarm;
- struct rtc_device *rtc;
unsigned long alarm_time;
bool enabled;
};
@@ -65,6 +64,7 @@ static const struct rtc_class_ops meson_vrtc_ops = {
static int meson_vrtc_probe(struct platform_device *pdev)
{
struct meson_vrtc_data *vrtc;
+ struct rtc_device *rtc;
vrtc = devm_kzalloc(&pdev->dev, sizeof(*vrtc), GFP_KERNEL);
if (!vrtc)
@@ -78,12 +78,12 @@ static int meson_vrtc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, vrtc);
- vrtc->rtc = devm_rtc_allocate_device(&pdev->dev);
- if (IS_ERR(vrtc->rtc))
- return PTR_ERR(vrtc->rtc);
+ rtc = devm_rtc_allocate_device(&pdev->dev);
+ if (IS_ERR(rtc))
+ return PTR_ERR(rtc);
- vrtc->rtc->ops = &meson_vrtc_ops;
- return devm_rtc_register_device(vrtc->rtc);
+ rtc->ops = &meson_vrtc_ops;
+ return devm_rtc_register_device(rtc);
}
static int __maybe_unused meson_vrtc_suspend(struct device *dev)
diff --git a/drivers/rtc/rtc-meson.c b/drivers/rtc/rtc-meson.c
index db1d626edca5..47e9ebf58ffc 100644
--- a/drivers/rtc/rtc-meson.c
+++ b/drivers/rtc/rtc-meson.c
@@ -59,7 +59,6 @@
#define MESON_STATIC_DEFAULT (MESON_STATIC_BIAS_CUR | MESON_STATIC_VOLTAGE)
struct meson_rtc {
- struct rtc_device *rtc; /* rtc device we created */
struct device *dev; /* device we bound from */
struct reset_control *reset; /* reset source */
struct regulator *vdd; /* voltage input */
@@ -292,6 +291,7 @@ static int meson_rtc_probe(struct platform_device *pdev)
};
struct device *dev = &pdev->dev;
struct meson_rtc *rtc;
+ struct rtc_device *rtc_dev;
void __iomem *base;
int ret;
u32 tm;
@@ -300,16 +300,16 @@ static int meson_rtc_probe(struct platform_device *pdev)
if (!rtc)
return -ENOMEM;
- rtc->rtc = devm_rtc_allocate_device(dev);
- if (IS_ERR(rtc->rtc))
- return PTR_ERR(rtc->rtc);
+ rtc_dev = devm_rtc_allocate_device(dev);
+ if (IS_ERR(rtc_dev))
+ return PTR_ERR(rtc_dev);
platform_set_drvdata(pdev, rtc);
rtc->dev = dev;
- rtc->rtc->ops = &meson_rtc_ops;
- rtc->rtc->range_max = U32_MAX;
+ rtc_dev->ops = &meson_rtc_ops;
+ rtc_dev->range_max = U32_MAX;
base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
@@ -365,11 +365,11 @@ static int meson_rtc_probe(struct platform_device *pdev)
}
meson_rtc_nvmem_config.priv = rtc;
- ret = devm_rtc_nvmem_register(rtc->rtc, &meson_rtc_nvmem_config);
+ ret = devm_rtc_nvmem_register(rtc_dev, &meson_rtc_nvmem_config);
if (ret)
goto out_disable_vdd;
- ret = devm_rtc_register_device(rtc->rtc);
+ ret = devm_rtc_register_device(rtc_dev);
if (ret)
goto out_disable_vdd;
diff --git a/drivers/rtc/rtc-mpfs.c b/drivers/rtc/rtc-mpfs.c
index 3892b0f9917f..6aa3eae575d2 100644
--- a/drivers/rtc/rtc-mpfs.c
+++ b/drivers/rtc/rtc-mpfs.c
@@ -266,19 +266,14 @@ static int mpfs_rtc_probe(struct platform_device *pdev)
writel(prescaler, rtcdev->base + PRESCALER_REG);
dev_info(&pdev->dev, "prescaler set to: %lu\n", prescaler);
- device_init_wakeup(&pdev->dev, true);
- ret = dev_pm_set_wake_irq(&pdev->dev, wakeup_irq);
+ devm_device_init_wakeup(&pdev->dev);
+ ret = devm_pm_set_wake_irq(&pdev->dev, wakeup_irq);
if (ret)
dev_err(&pdev->dev, "failed to enable irq wake\n");
return devm_rtc_register_device(rtcdev->rtc);
}
-static void mpfs_rtc_remove(struct platform_device *pdev)
-{
- dev_pm_clear_wake_irq(&pdev->dev);
-}
-
static const struct of_device_id mpfs_rtc_of_match[] = {
{ .compatible = "microchip,mpfs-rtc" },
{ }
@@ -288,7 +283,6 @@ MODULE_DEVICE_TABLE(of, mpfs_rtc_of_match);
static struct platform_driver mpfs_rtc_driver = {
.probe = mpfs_rtc_probe,
- .remove = mpfs_rtc_remove,
.driver = {
.name = "mpfs_rtc",
.of_match_table = mpfs_rtc_of_match,
diff --git a/drivers/rtc/rtc-nxp-bbnsm.c b/drivers/rtc/rtc-nxp-bbnsm.c
index fa3b0328c7a2..d4fc9dc583d3 100644
--- a/drivers/rtc/rtc-nxp-bbnsm.c
+++ b/drivers/rtc/rtc-nxp-bbnsm.c
@@ -189,36 +189,26 @@ static int bbnsm_rtc_probe(struct platform_device *pdev)
/* clear all the pending events */
regmap_write(bbnsm->regmap, BBNSM_EVENTS, 0x7A);
- device_init_wakeup(&pdev->dev, true);
- dev_pm_set_wake_irq(&pdev->dev, bbnsm->irq);
+ ret = devm_device_init_wakeup(&pdev->dev);
+ if (ret)
+ dev_err(&pdev->dev, "failed to init wakeup, %d\n", ret);
+
+ ret = devm_pm_set_wake_irq(&pdev->dev, bbnsm->irq);
+ if (ret)
+ dev_err(&pdev->dev, "failed to set wake irq, %d\n", ret);
ret = devm_request_irq(&pdev->dev, bbnsm->irq, bbnsm_rtc_irq_handler,
IRQF_SHARED, "rtc alarm", &pdev->dev);
if (ret) {
dev_err(&pdev->dev, "failed to request irq %d: %d\n",
bbnsm->irq, ret);
- goto err;
+ return ret;
}
bbnsm->rtc->ops = &bbnsm_rtc_ops;
bbnsm->rtc->range_max = U32_MAX;
- ret = devm_rtc_register_device(bbnsm->rtc);
- if (ret)
- goto err;
-
- return 0;
-
-err:
- dev_pm_clear_wake_irq(&pdev->dev);
- device_init_wakeup(&pdev->dev, false);
- return ret;
-}
-
-static void bbnsm_rtc_remove(struct platform_device *pdev)
-{
- dev_pm_clear_wake_irq(&pdev->dev);
- device_init_wakeup(&pdev->dev, false);
+ return devm_rtc_register_device(bbnsm->rtc);
}
static const struct of_device_id bbnsm_dt_ids[] = {
@@ -233,7 +223,6 @@ static struct platform_driver bbnsm_rtc_driver = {
.of_match_table = bbnsm_dt_ids,
},
.probe = bbnsm_rtc_probe,
- .remove = bbnsm_rtc_remove,
};
module_platform_driver(bbnsm_rtc_driver);
diff --git a/drivers/rtc/rtc-pcf50633.c b/drivers/rtc/rtc-pcf50633.c
deleted file mode 100644
index c019c4d91c7d..000000000000
--- a/drivers/rtc/rtc-pcf50633.c
+++ /dev/null
@@ -1,284 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/* NXP PCF50633 RTC Driver
- *
- * (C) 2006-2008 by Openmoko, Inc.
- * Author: Balaji Rao <balajirrao@openmoko.org>
- * All rights reserved.
- *
- * Broken down from monstrous PCF50633 driver mainly by
- * Harald Welte, Andy Green and Werner Almesberger
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/device.h>
-#include <linux/slab.h>
-#include <linux/platform_device.h>
-#include <linux/rtc.h>
-#include <linux/bcd.h>
-#include <linux/err.h>
-
-#include <linux/mfd/pcf50633/core.h>
-
-#define PCF50633_REG_RTCSC 0x59 /* Second */
-#define PCF50633_REG_RTCMN 0x5a /* Minute */
-#define PCF50633_REG_RTCHR 0x5b /* Hour */
-#define PCF50633_REG_RTCWD 0x5c /* Weekday */
-#define PCF50633_REG_RTCDT 0x5d /* Day */
-#define PCF50633_REG_RTCMT 0x5e /* Month */
-#define PCF50633_REG_RTCYR 0x5f /* Year */
-#define PCF50633_REG_RTCSCA 0x60 /* Alarm Second */
-#define PCF50633_REG_RTCMNA 0x61 /* Alarm Minute */
-#define PCF50633_REG_RTCHRA 0x62 /* Alarm Hour */
-#define PCF50633_REG_RTCWDA 0x63 /* Alarm Weekday */
-#define PCF50633_REG_RTCDTA 0x64 /* Alarm Day */
-#define PCF50633_REG_RTCMTA 0x65 /* Alarm Month */
-#define PCF50633_REG_RTCYRA 0x66 /* Alarm Year */
-
-enum pcf50633_time_indexes {
- PCF50633_TI_SEC,
- PCF50633_TI_MIN,
- PCF50633_TI_HOUR,
- PCF50633_TI_WKDAY,
- PCF50633_TI_DAY,
- PCF50633_TI_MONTH,
- PCF50633_TI_YEAR,
- PCF50633_TI_EXTENT /* always last */
-};
-
-struct pcf50633_time {
- u_int8_t time[PCF50633_TI_EXTENT];
-};
-
-struct pcf50633_rtc {
- int alarm_enabled;
- int alarm_pending;
-
- struct pcf50633 *pcf;
- struct rtc_device *rtc_dev;
-};
-
-static void pcf2rtc_time(struct rtc_time *rtc, struct pcf50633_time *pcf)
-{
- rtc->tm_sec = bcd2bin(pcf->time[PCF50633_TI_SEC]);
- rtc->tm_min = bcd2bin(pcf->time[PCF50633_TI_MIN]);
- rtc->tm_hour = bcd2bin(pcf->time[PCF50633_TI_HOUR]);
- rtc->tm_wday = bcd2bin(pcf->time[PCF50633_TI_WKDAY]);
- rtc->tm_mday = bcd2bin(pcf->time[PCF50633_TI_DAY]);
- rtc->tm_mon = bcd2bin(pcf->time[PCF50633_TI_MONTH]) - 1;
- rtc->tm_year = bcd2bin(pcf->time[PCF50633_TI_YEAR]) + 100;
-}
-
-static void rtc2pcf_time(struct pcf50633_time *pcf, struct rtc_time *rtc)
-{
- pcf->time[PCF50633_TI_SEC] = bin2bcd(rtc->tm_sec);
- pcf->time[PCF50633_TI_MIN] = bin2bcd(rtc->tm_min);
- pcf->time[PCF50633_TI_HOUR] = bin2bcd(rtc->tm_hour);
- pcf->time[PCF50633_TI_WKDAY] = bin2bcd(rtc->tm_wday);
- pcf->time[PCF50633_TI_DAY] = bin2bcd(rtc->tm_mday);
- pcf->time[PCF50633_TI_MONTH] = bin2bcd(rtc->tm_mon + 1);
- pcf->time[PCF50633_TI_YEAR] = bin2bcd(rtc->tm_year % 100);
-}
-
-static int
-pcf50633_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
-{
- struct pcf50633_rtc *rtc = dev_get_drvdata(dev);
- int err;
-
- if (enabled)
- err = pcf50633_irq_unmask(rtc->pcf, PCF50633_IRQ_ALARM);
- else
- err = pcf50633_irq_mask(rtc->pcf, PCF50633_IRQ_ALARM);
-
- if (err < 0)
- return err;
-
- rtc->alarm_enabled = enabled;
-
- return 0;
-}
-
-static int pcf50633_rtc_read_time(struct device *dev, struct rtc_time *tm)
-{
- struct pcf50633_rtc *rtc;
- struct pcf50633_time pcf_tm;
- int ret;
-
- rtc = dev_get_drvdata(dev);
-
- ret = pcf50633_read_block(rtc->pcf, PCF50633_REG_RTCSC,
- PCF50633_TI_EXTENT,
- &pcf_tm.time[0]);
- if (ret != PCF50633_TI_EXTENT) {
- dev_err(dev, "Failed to read time\n");
- return -EIO;
- }
-
- dev_dbg(dev, "PCF_TIME: %02x.%02x.%02x %02x:%02x:%02x\n",
- pcf_tm.time[PCF50633_TI_DAY],
- pcf_tm.time[PCF50633_TI_MONTH],
- pcf_tm.time[PCF50633_TI_YEAR],
- pcf_tm.time[PCF50633_TI_HOUR],
- pcf_tm.time[PCF50633_TI_MIN],
- pcf_tm.time[PCF50633_TI_SEC]);
-
- pcf2rtc_time(tm, &pcf_tm);
-
- dev_dbg(dev, "RTC_TIME: %ptRr\n", tm);
-
- return 0;
-}
-
-static int pcf50633_rtc_set_time(struct device *dev, struct rtc_time *tm)
-{
- struct pcf50633_rtc *rtc;
- struct pcf50633_time pcf_tm;
- int alarm_masked, ret = 0;
-
- rtc = dev_get_drvdata(dev);
-
- dev_dbg(dev, "RTC_TIME: %ptRr\n", tm);
-
- rtc2pcf_time(&pcf_tm, tm);
-
- dev_dbg(dev, "PCF_TIME: %02x.%02x.%02x %02x:%02x:%02x\n",
- pcf_tm.time[PCF50633_TI_DAY],
- pcf_tm.time[PCF50633_TI_MONTH],
- pcf_tm.time[PCF50633_TI_YEAR],
- pcf_tm.time[PCF50633_TI_HOUR],
- pcf_tm.time[PCF50633_TI_MIN],
- pcf_tm.time[PCF50633_TI_SEC]);
-
-
- alarm_masked = pcf50633_irq_mask_get(rtc->pcf, PCF50633_IRQ_ALARM);
-
- if (!alarm_masked)
- pcf50633_irq_mask(rtc->pcf, PCF50633_IRQ_ALARM);
-
- /* Returns 0 on success */
- ret = pcf50633_write_block(rtc->pcf, PCF50633_REG_RTCSC,
- PCF50633_TI_EXTENT,
- &pcf_tm.time[0]);
-
- if (!alarm_masked)
- pcf50633_irq_unmask(rtc->pcf, PCF50633_IRQ_ALARM);
-
- return ret;
-}
-
-static int pcf50633_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
-{
- struct pcf50633_rtc *rtc;
- struct pcf50633_time pcf_tm;
- int ret = 0;
-
- rtc = dev_get_drvdata(dev);
-
- alrm->enabled = rtc->alarm_enabled;
- alrm->pending = rtc->alarm_pending;
-
- ret = pcf50633_read_block(rtc->pcf, PCF50633_REG_RTCSCA,
- PCF50633_TI_EXTENT, &pcf_tm.time[0]);
- if (ret != PCF50633_TI_EXTENT) {
- dev_err(dev, "Failed to read time\n");
- return -EIO;
- }
-
- pcf2rtc_time(&alrm->time, &pcf_tm);
-
- return rtc_valid_tm(&alrm->time);
-}
-
-static int pcf50633_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
-{
- struct pcf50633_rtc *rtc;
- struct pcf50633_time pcf_tm;
- int alarm_masked, ret = 0;
-
- rtc = dev_get_drvdata(dev);
-
- rtc2pcf_time(&pcf_tm, &alrm->time);
-
- /* do like mktime does and ignore tm_wday */
- pcf_tm.time[PCF50633_TI_WKDAY] = 7;
-
- alarm_masked = pcf50633_irq_mask_get(rtc->pcf, PCF50633_IRQ_ALARM);
-
- /* disable alarm interrupt */
- if (!alarm_masked)
- pcf50633_irq_mask(rtc->pcf, PCF50633_IRQ_ALARM);
-
- /* Returns 0 on success */
- ret = pcf50633_write_block(rtc->pcf, PCF50633_REG_RTCSCA,
- PCF50633_TI_EXTENT, &pcf_tm.time[0]);
- if (!alrm->enabled)
- rtc->alarm_pending = 0;
-
- if (!alarm_masked || alrm->enabled)
- pcf50633_irq_unmask(rtc->pcf, PCF50633_IRQ_ALARM);
- rtc->alarm_enabled = alrm->enabled;
-
- return ret;
-}
-
-static const struct rtc_class_ops pcf50633_rtc_ops = {
- .read_time = pcf50633_rtc_read_time,
- .set_time = pcf50633_rtc_set_time,
- .read_alarm = pcf50633_rtc_read_alarm,
- .set_alarm = pcf50633_rtc_set_alarm,
- .alarm_irq_enable = pcf50633_rtc_alarm_irq_enable,
-};
-
-static void pcf50633_rtc_irq(int irq, void *data)
-{
- struct pcf50633_rtc *rtc = data;
-
- rtc_update_irq(rtc->rtc_dev, 1, RTC_AF | RTC_IRQF);
- rtc->alarm_pending = 1;
-}
-
-static int pcf50633_rtc_probe(struct platform_device *pdev)
-{
- struct pcf50633_rtc *rtc;
-
- rtc = devm_kzalloc(&pdev->dev, sizeof(*rtc), GFP_KERNEL);
- if (!rtc)
- return -ENOMEM;
-
- rtc->pcf = dev_to_pcf50633(pdev->dev.parent);
- platform_set_drvdata(pdev, rtc);
- rtc->rtc_dev = devm_rtc_device_register(&pdev->dev, "pcf50633-rtc",
- &pcf50633_rtc_ops, THIS_MODULE);
-
- if (IS_ERR(rtc->rtc_dev))
- return PTR_ERR(rtc->rtc_dev);
-
- pcf50633_register_irq(rtc->pcf, PCF50633_IRQ_ALARM,
- pcf50633_rtc_irq, rtc);
- return 0;
-}
-
-static void pcf50633_rtc_remove(struct platform_device *pdev)
-{
- struct pcf50633_rtc *rtc;
-
- rtc = platform_get_drvdata(pdev);
- pcf50633_free_irq(rtc->pcf, PCF50633_IRQ_ALARM);
-}
-
-static struct platform_driver pcf50633_rtc_driver = {
- .driver = {
- .name = "pcf50633-rtc",
- },
- .probe = pcf50633_rtc_probe,
- .remove = pcf50633_rtc_remove,
-};
-
-module_platform_driver(pcf50633_rtc_driver);
-
-MODULE_DESCRIPTION("PCF50633 RTC driver");
-MODULE_AUTHOR("Balaji Rao <balajirrao@openmoko.org>");
-MODULE_LICENSE("GPL");
-
diff --git a/drivers/rtc/rtc-pcf85063.c b/drivers/rtc/rtc-pcf85063.c
index 905986c61655..4fa5c4ecdd5a 100644
--- a/drivers/rtc/rtc-pcf85063.c
+++ b/drivers/rtc/rtc-pcf85063.c
@@ -35,6 +35,7 @@
#define PCF85063_REG_CTRL1_CAP_SEL BIT(0)
#define PCF85063_REG_CTRL1_STOP BIT(5)
#define PCF85063_REG_CTRL1_EXT_TEST BIT(7)
+#define PCF85063_REG_CTRL1_SWR 0x58
#define PCF85063_REG_CTRL2 0x01
#define PCF85063_CTRL2_AF BIT(6)
@@ -589,16 +590,30 @@ static int pcf85063_probe(struct i2c_client *client)
i2c_set_clientdata(client, pcf85063);
- err = regmap_read(pcf85063->regmap, PCF85063_REG_CTRL1, &tmp);
- if (err) {
- dev_err(&client->dev, "RTC chip is not present\n");
- return err;
- }
+ err = regmap_read(pcf85063->regmap, PCF85063_REG_SC, &tmp);
+ if (err)
+ return dev_err_probe(&client->dev, err, "RTC chip is not present\n");
pcf85063->rtc = devm_rtc_allocate_device(&client->dev);
if (IS_ERR(pcf85063->rtc))
return PTR_ERR(pcf85063->rtc);
+ /*
+ * If a Power loss is detected, SW reset the device.
+ * From PCF85063A datasheet:
+ * There is a low probability that some devices will have corruption
+ * of the registers after the automatic power-on reset...
+ */
+ if (tmp & PCF85063_REG_SC_OS) {
+ dev_warn(&client->dev,
+ "POR issue detected, sending a SW reset\n");
+ err = regmap_write(pcf85063->regmap, PCF85063_REG_CTRL1,
+ PCF85063_REG_CTRL1_SWR);
+ if (err < 0)
+ dev_warn(&client->dev,
+ "SW reset failed, trying to continue\n");
+ }
+
err = pcf85063_load_capacitance(pcf85063, client->dev.of_node,
config->force_cap_7000 ? 7000 : 0);
if (err < 0)
diff --git a/drivers/rtc/rtc-pl030.c b/drivers/rtc/rtc-pl030.c
index 39038c0754ee..5caaa714f448 100644
--- a/drivers/rtc/rtc-pl030.c
+++ b/drivers/rtc/rtc-pl030.c
@@ -21,7 +21,6 @@
#define RTC_CR_MIE (1 << 0)
struct pl030_rtc {
- struct rtc_device *rtc;
void __iomem *base;
};
@@ -86,6 +85,7 @@ static int pl030_probe(struct amba_device *dev, const struct amba_id *id)
{
struct pl030_rtc *rtc;
int ret;
+ struct rtc_device *rtc_dev;
ret = amba_request_regions(dev, NULL);
if (ret)
@@ -97,14 +97,14 @@ static int pl030_probe(struct amba_device *dev, const struct amba_id *id)
goto err_rtc;
}
- rtc->rtc = devm_rtc_allocate_device(&dev->dev);
- if (IS_ERR(rtc->rtc)) {
- ret = PTR_ERR(rtc->rtc);
+ rtc_dev = devm_rtc_allocate_device(&dev->dev);
+ if (IS_ERR(rtc_dev)) {
+ ret = PTR_ERR(rtc_dev);
goto err_rtc;
}
- rtc->rtc->ops = &pl030_ops;
- rtc->rtc->range_max = U32_MAX;
+ rtc_dev->ops = &pl030_ops;
+ rtc_dev->range_max = U32_MAX;
rtc->base = ioremap(dev->res.start, resource_size(&dev->res));
if (!rtc->base) {
ret = -ENOMEM;
@@ -121,7 +121,7 @@ static int pl030_probe(struct amba_device *dev, const struct amba_id *id)
if (ret)
goto err_irq;
- ret = devm_rtc_register_device(rtc->rtc);
+ ret = devm_rtc_register_device(rtc_dev);
if (ret)
goto err_reg;
@@ -148,7 +148,7 @@ static void pl030_remove(struct amba_device *dev)
amba_release_regions(dev);
}
-static struct amba_id pl030_ids[] = {
+static const struct amba_id pl030_ids[] = {
{
.id = 0x00041030,
.mask = 0x000fffff,
diff --git a/drivers/rtc/rtc-pl031.c b/drivers/rtc/rtc-pl031.c
index bad6a5d9c683..eab39dfa4e5f 100644
--- a/drivers/rtc/rtc-pl031.c
+++ b/drivers/rtc/rtc-pl031.c
@@ -74,6 +74,8 @@
* @st_weekday: if this is an ST Microelectronics silicon version that need
* the weekday fix
* @irqflags: special IRQ flags per variant
+ * @range_min: minimum date/time supported by the RTC
+ * @range_max: maximum date/time supported by the RTC
*/
struct pl031_vendor_data {
struct rtc_class_ops ops;
@@ -284,8 +286,6 @@ static void pl031_remove(struct amba_device *adev)
{
struct pl031_local *ldata = dev_get_drvdata(&adev->dev);
- dev_pm_clear_wake_irq(&adev->dev);
- device_init_wakeup(&adev->dev, false);
if (adev->irq[0])
free_irq(adev->irq[0], ldata);
amba_release_regions(adev);
@@ -350,7 +350,7 @@ static int pl031_probe(struct amba_device *adev, const struct amba_id *id)
}
}
- device_init_wakeup(&adev->dev, true);
+ devm_device_init_wakeup(&adev->dev);
ldata->rtc = devm_rtc_allocate_device(&adev->dev);
if (IS_ERR(ldata->rtc)) {
ret = PTR_ERR(ldata->rtc);
@@ -373,7 +373,7 @@ static int pl031_probe(struct amba_device *adev, const struct amba_id *id)
vendor->irqflags, "rtc-pl031", ldata);
if (ret)
goto out;
- dev_pm_set_wake_irq(&adev->dev, adev->irq[0]);
+ devm_pm_set_wake_irq(&adev->dev, adev->irq[0]);
}
return 0;
diff --git a/drivers/rtc/rtc-pm8xxx.c b/drivers/rtc/rtc-pm8xxx.c
index b2518aea4218..3c1dddcc81df 100644
--- a/drivers/rtc/rtc-pm8xxx.c
+++ b/drivers/rtc/rtc-pm8xxx.c
@@ -5,6 +5,7 @@
* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
* Copyright (c) 2023, Linaro Limited
*/
+#include <linux/efi.h>
#include <linux/of.h>
#include <linux/module.h>
#include <linux/nvmem-consumer.h>
@@ -16,9 +17,10 @@
#include <linux/regmap.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
-
#include <linux/unaligned.h>
+#include <asm/byteorder.h>
+
/* RTC_CTRL register bit fields */
#define PM8xxx_RTC_ENABLE BIT(7)
#define PM8xxx_RTC_ALARM_CLEAR BIT(0)
@@ -46,28 +48,125 @@ struct pm8xxx_rtc_regs {
unsigned int alarm_en;
};
+struct qcom_uefi_rtc_info {
+ __le32 offset_gps;
+ u8 reserved[8];
+} __packed;
+
/**
* struct pm8xxx_rtc - RTC driver internal structure
* @rtc: RTC device
* @regmap: regmap used to access registers
* @allow_set_time: whether the time can be set
+ * @use_uefi: use UEFI variable as fallback for offset
* @alarm_irq: alarm irq number
* @regs: register description
* @dev: device structure
+ * @rtc_info: qcom uefi rtc-info structure
* @nvmem_cell: nvmem cell for offset
* @offset: offset from epoch in seconds
+ * @offset_dirty: offset needs to be stored on shutdown
*/
struct pm8xxx_rtc {
struct rtc_device *rtc;
struct regmap *regmap;
bool allow_set_time;
+ bool use_uefi;
int alarm_irq;
const struct pm8xxx_rtc_regs *regs;
struct device *dev;
+ struct qcom_uefi_rtc_info rtc_info;
struct nvmem_cell *nvmem_cell;
u32 offset;
+ bool offset_dirty;
};
+#ifdef CONFIG_EFI
+
+MODULE_IMPORT_NS("EFIVAR");
+
+#define QCOM_UEFI_NAME L"RTCInfo"
+#define QCOM_UEFI_GUID EFI_GUID(0x882f8c2b, 0x9646, 0x435f, \
+ 0x8d, 0xe5, 0xf2, 0x08, 0xff, 0x80, 0xc1, 0xbd)
+#define QCOM_UEFI_ATTRS (EFI_VARIABLE_NON_VOLATILE | \
+ EFI_VARIABLE_BOOTSERVICE_ACCESS | \
+ EFI_VARIABLE_RUNTIME_ACCESS)
+
+static int pm8xxx_rtc_read_uefi_offset(struct pm8xxx_rtc *rtc_dd)
+{
+ struct qcom_uefi_rtc_info *rtc_info = &rtc_dd->rtc_info;
+ unsigned long size = sizeof(*rtc_info);
+ struct device *dev = rtc_dd->dev;
+ efi_status_t status;
+ u32 offset_gps;
+ int rc;
+
+ rc = efivar_lock();
+ if (rc)
+ return rc;
+
+ status = efivar_get_variable(QCOM_UEFI_NAME, &QCOM_UEFI_GUID, NULL,
+ &size, rtc_info);
+ efivar_unlock();
+
+ if (status != EFI_SUCCESS) {
+ dev_dbg(dev, "failed to read UEFI offset: %lu\n", status);
+ return efi_status_to_err(status);
+ }
+
+ if (size != sizeof(*rtc_info)) {
+ dev_dbg(dev, "unexpected UEFI structure size %lu\n", size);
+ return -EINVAL;
+ }
+
+ dev_dbg(dev, "uefi_rtc_info = %*ph\n", (int)size, rtc_info);
+
+ /* Convert from GPS to Unix time offset */
+ offset_gps = le32_to_cpu(rtc_info->offset_gps);
+ rtc_dd->offset = offset_gps + (u32)RTC_TIMESTAMP_EPOCH_GPS;
+
+ return 0;
+}
+
+static int pm8xxx_rtc_write_uefi_offset(struct pm8xxx_rtc *rtc_dd, u32 offset)
+{
+ struct qcom_uefi_rtc_info *rtc_info = &rtc_dd->rtc_info;
+ unsigned long size = sizeof(*rtc_info);
+ struct device *dev = rtc_dd->dev;
+ efi_status_t status;
+ u32 offset_gps;
+
+ /* Convert from Unix to GPS time offset */
+ offset_gps = offset - (u32)RTC_TIMESTAMP_EPOCH_GPS;
+
+ rtc_info->offset_gps = cpu_to_le32(offset_gps);
+
+ dev_dbg(dev, "efi_rtc_info = %*ph\n", (int)size, rtc_info);
+
+ status = efivar_set_variable(QCOM_UEFI_NAME, &QCOM_UEFI_GUID,
+ QCOM_UEFI_ATTRS, size, rtc_info);
+ if (status != EFI_SUCCESS) {
+ dev_dbg(dev, "failed to write UEFI offset: %lx\n", status);
+ return efi_status_to_err(status);
+ }
+
+ return 0;
+}
+
+#else /* CONFIG_EFI */
+
+static int pm8xxx_rtc_read_uefi_offset(struct pm8xxx_rtc *rtc_dd)
+{
+ return -ENODEV;
+}
+
+static int pm8xxx_rtc_write_uefi_offset(struct pm8xxx_rtc *rtc_dd, u32 offset)
+{
+ return -ENODEV;
+}
+
+#endif /* CONFIG_EFI */
+
static int pm8xxx_rtc_read_nvmem_offset(struct pm8xxx_rtc *rtc_dd)
{
size_t len;
@@ -110,14 +209,6 @@ static int pm8xxx_rtc_write_nvmem_offset(struct pm8xxx_rtc *rtc_dd, u32 offset)
return 0;
}
-static int pm8xxx_rtc_read_offset(struct pm8xxx_rtc *rtc_dd)
-{
- if (!rtc_dd->nvmem_cell)
- return 0;
-
- return pm8xxx_rtc_read_nvmem_offset(rtc_dd);
-}
-
static int pm8xxx_rtc_read_raw(struct pm8xxx_rtc *rtc_dd, u32 *secs)
{
const struct pm8xxx_rtc_regs *regs = rtc_dd->regs;
@@ -155,7 +246,7 @@ static int pm8xxx_rtc_update_offset(struct pm8xxx_rtc *rtc_dd, u32 secs)
u32 offset;
int rc;
- if (!rtc_dd->nvmem_cell)
+ if (!rtc_dd->nvmem_cell && !rtc_dd->use_uefi)
return -ENODEV;
rc = pm8xxx_rtc_read_raw(rtc_dd, &raw_secs);
@@ -167,10 +258,25 @@ static int pm8xxx_rtc_update_offset(struct pm8xxx_rtc *rtc_dd, u32 secs)
if (offset == rtc_dd->offset)
return 0;
- rc = pm8xxx_rtc_write_nvmem_offset(rtc_dd, offset);
+ /*
+ * Reduce flash wear by deferring updates due to clock drift until
+ * shutdown.
+ */
+ if (abs_diff(offset, rtc_dd->offset) < 30) {
+ rtc_dd->offset_dirty = true;
+ goto out;
+ }
+
+ if (rtc_dd->nvmem_cell)
+ rc = pm8xxx_rtc_write_nvmem_offset(rtc_dd, offset);
+ else
+ rc = pm8xxx_rtc_write_uefi_offset(rtc_dd, offset);
+
if (rc)
return rc;
+ rtc_dd->offset_dirty = false;
+out:
rtc_dd->offset = offset;
return 0;
@@ -455,6 +561,30 @@ static const struct of_device_id pm8xxx_id_table[] = {
};
MODULE_DEVICE_TABLE(of, pm8xxx_id_table);
+static int pm8xxx_rtc_probe_offset(struct pm8xxx_rtc *rtc_dd)
+{
+ int rc;
+
+ rtc_dd->nvmem_cell = devm_nvmem_cell_get(rtc_dd->dev, "offset");
+ if (IS_ERR(rtc_dd->nvmem_cell)) {
+ rc = PTR_ERR(rtc_dd->nvmem_cell);
+ if (rc != -ENOENT)
+ return rc;
+ rtc_dd->nvmem_cell = NULL;
+ } else {
+ return pm8xxx_rtc_read_nvmem_offset(rtc_dd);
+ }
+
+ /* Use UEFI storage as fallback if available */
+ if (efivar_is_available()) {
+ rc = pm8xxx_rtc_read_uefi_offset(rtc_dd);
+ if (rc == 0)
+ rtc_dd->use_uefi = true;
+ }
+
+ return 0;
+}
+
static int pm8xxx_rtc_probe(struct platform_device *pdev)
{
const struct of_device_id *match;
@@ -469,30 +599,23 @@ static int pm8xxx_rtc_probe(struct platform_device *pdev)
if (rtc_dd == NULL)
return -ENOMEM;
+ rtc_dd->regs = match->data;
+ rtc_dd->dev = &pdev->dev;
+
rtc_dd->regmap = dev_get_regmap(pdev->dev.parent, NULL);
if (!rtc_dd->regmap)
return -ENXIO;
- rtc_dd->alarm_irq = platform_get_irq(pdev, 0);
- if (rtc_dd->alarm_irq < 0)
- return -ENXIO;
+ if (!of_property_read_bool(pdev->dev.of_node, "qcom,no-alarm")) {
+ rtc_dd->alarm_irq = platform_get_irq(pdev, 0);
+ if (rtc_dd->alarm_irq < 0)
+ return -ENXIO;
+ }
rtc_dd->allow_set_time = of_property_read_bool(pdev->dev.of_node,
"allow-set-time");
-
- rtc_dd->nvmem_cell = devm_nvmem_cell_get(&pdev->dev, "offset");
- if (IS_ERR(rtc_dd->nvmem_cell)) {
- rc = PTR_ERR(rtc_dd->nvmem_cell);
- if (rc != -ENOENT)
- return rc;
- rtc_dd->nvmem_cell = NULL;
- }
-
- rtc_dd->regs = match->data;
- rtc_dd->dev = &pdev->dev;
-
if (!rtc_dd->allow_set_time) {
- rc = pm8xxx_rtc_read_offset(rtc_dd);
+ rc = pm8xxx_rtc_probe_offset(rtc_dd);
if (rc)
return rc;
}
@@ -503,8 +626,6 @@ static int pm8xxx_rtc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, rtc_dd);
- device_init_wakeup(&pdev->dev, true);
-
rtc_dd->rtc = devm_rtc_allocate_device(&pdev->dev);
if (IS_ERR(rtc_dd->rtc))
return PTR_ERR(rtc_dd->rtc);
@@ -512,32 +633,41 @@ static int pm8xxx_rtc_probe(struct platform_device *pdev)
rtc_dd->rtc->ops = &pm8xxx_rtc_ops;
rtc_dd->rtc->range_max = U32_MAX;
- rc = devm_request_any_context_irq(&pdev->dev, rtc_dd->alarm_irq,
- pm8xxx_alarm_trigger,
- IRQF_TRIGGER_RISING,
- "pm8xxx_rtc_alarm", rtc_dd);
- if (rc < 0)
- return rc;
+ if (rtc_dd->alarm_irq) {
+ rc = devm_request_any_context_irq(&pdev->dev, rtc_dd->alarm_irq,
+ pm8xxx_alarm_trigger,
+ IRQF_TRIGGER_RISING,
+ "pm8xxx_rtc_alarm", rtc_dd);
+ if (rc < 0)
+ return rc;
- rc = devm_rtc_register_device(rtc_dd->rtc);
- if (rc)
- return rc;
+ rc = devm_pm_set_wake_irq(&pdev->dev, rtc_dd->alarm_irq);
+ if (rc)
+ return rc;
- rc = dev_pm_set_wake_irq(&pdev->dev, rtc_dd->alarm_irq);
- if (rc)
- return rc;
+ devm_device_init_wakeup(&pdev->dev);
+ } else {
+ clear_bit(RTC_FEATURE_ALARM, rtc_dd->rtc->features);
+ }
- return 0;
+ return devm_rtc_register_device(rtc_dd->rtc);
}
-static void pm8xxx_remove(struct platform_device *pdev)
+static void pm8xxx_shutdown(struct platform_device *pdev)
{
- dev_pm_clear_wake_irq(&pdev->dev);
+ struct pm8xxx_rtc *rtc_dd = platform_get_drvdata(pdev);
+
+ if (rtc_dd->offset_dirty) {
+ if (rtc_dd->nvmem_cell)
+ pm8xxx_rtc_write_nvmem_offset(rtc_dd, rtc_dd->offset);
+ else
+ pm8xxx_rtc_write_uefi_offset(rtc_dd, rtc_dd->offset);
+ }
}
static struct platform_driver pm8xxx_rtc_driver = {
.probe = pm8xxx_rtc_probe,
- .remove = pm8xxx_remove,
+ .shutdown = pm8xxx_shutdown,
.driver = {
.name = "rtc-pm8xxx",
.of_match_table = pm8xxx_id_table,
diff --git a/drivers/rtc/rtc-renesas-rtca3.c b/drivers/rtc/rtc-renesas-rtca3.c
index a056291d3887..ab816bdf0d77 100644
--- a/drivers/rtc/rtc-renesas-rtca3.c
+++ b/drivers/rtc/rtc-renesas-rtca3.c
@@ -586,17 +586,14 @@ static int rtca3_initial_setup(struct clk *clk, struct rtca3_priv *priv)
*/
usleep_range(sleep_us, sleep_us + 10);
- /* Disable all interrupts. */
- mask = RTCA3_RCR1_AIE | RTCA3_RCR1_CIE | RTCA3_RCR1_PIE;
- ret = rtca3_alarm_irq_set_helper(priv, mask, 0);
- if (ret)
- return ret;
-
mask = RTCA3_RCR2_START | RTCA3_RCR2_HR24;
val = readb(priv->base + RTCA3_RCR2);
- /* Nothing to do if already started in 24 hours and calendar count mode. */
- if ((val & mask) == mask)
- return 0;
+ /* Only disable the interrupts if already started in 24 hours and calendar count mode. */
+ if ((val & mask) == mask) {
+ /* Disable all interrupts. */
+ mask = RTCA3_RCR1_AIE | RTCA3_RCR1_CIE | RTCA3_RCR1_PIE;
+ return rtca3_alarm_irq_set_helper(priv, mask, 0);
+ }
/* Reconfigure the RTC in 24 hours and calendar count mode. */
mask = RTCA3_RCR2_START | RTCA3_RCR2_CNTMD;
diff --git a/drivers/rtc/rtc-rv3032.c b/drivers/rtc/rtc-rv3032.c
index 35b2e36b426a..2c6a8918acba 100644
--- a/drivers/rtc/rtc-rv3032.c
+++ b/drivers/rtc/rtc-rv3032.c
@@ -69,8 +69,7 @@
#define RV3032_CLKOUT2_FD_MSK GENMASK(6, 5)
#define RV3032_CLKOUT2_OS BIT(7)
-#define RV3032_CTRL1_EERD BIT(3)
-#define RV3032_CTRL1_WADA BIT(5)
+#define RV3032_CTRL1_EERD BIT(2)
#define RV3032_CTRL2_STOP BIT(0)
#define RV3032_CTRL2_EIE BIT(2)
@@ -947,11 +946,6 @@ static int rv3032_probe(struct i2c_client *client)
if (!client->irq)
clear_bit(RTC_FEATURE_ALARM, rv3032->rtc->features);
- ret = regmap_update_bits(rv3032->regmap, RV3032_CTRL1,
- RV3032_CTRL1_WADA, RV3032_CTRL1_WADA);
- if (ret)
- return ret;
-
rv3032_trickle_charger_setup(&client->dev, rv3032);
set_bit(RTC_FEATURE_BACKUP_SWITCH_MODE, rv3032->rtc->features);
diff --git a/drivers/rtc/rtc-rx8581.c b/drivers/rtc/rtc-rx8581.c
index b18c12887bdc..20c2dff01bae 100644
--- a/drivers/rtc/rtc-rx8581.c
+++ b/drivers/rtc/rtc-rx8581.c
@@ -52,11 +52,6 @@
#define RX8571_USER_RAM 0x10
#define RX8571_NVRAM_SIZE 0x10
-struct rx8581 {
- struct regmap *regmap;
- struct rtc_device *rtc;
-};
-
struct rx85x1_config {
struct regmap_config regmap;
unsigned int num_nvram;
@@ -72,14 +67,14 @@ static int rx8581_rtc_read_time(struct device *dev, struct rtc_time *tm)
unsigned char date[7];
unsigned int data;
int err;
- struct rx8581 *rx8581 = i2c_get_clientdata(client);
+ struct regmap *regmap = i2c_get_clientdata(client);
/* First we ensure that the "update flag" is not set, we read the
* time and date then re-read the "update flag". If the update flag
* has been set, we know that the time has changed during the read so
* we repeat the whole process again.
*/
- err = regmap_read(rx8581->regmap, RX8581_REG_FLAG, &data);
+ err = regmap_read(regmap, RX8581_REG_FLAG, &data);
if (err < 0)
return err;
@@ -92,20 +87,20 @@ static int rx8581_rtc_read_time(struct device *dev, struct rtc_time *tm)
do {
/* If update flag set, clear it */
if (data & RX8581_FLAG_UF) {
- err = regmap_write(rx8581->regmap, RX8581_REG_FLAG,
- data & ~RX8581_FLAG_UF);
+ err = regmap_write(regmap, RX8581_REG_FLAG,
+ data & ~RX8581_FLAG_UF);
if (err < 0)
return err;
}
/* Now read time and date */
- err = regmap_bulk_read(rx8581->regmap, RX8581_REG_SC, date,
+ err = regmap_bulk_read(regmap, RX8581_REG_SC, date,
sizeof(date));
if (err < 0)
return err;
/* Check flag register */
- err = regmap_read(rx8581->regmap, RX8581_REG_FLAG, &data);
+ err = regmap_read(regmap, RX8581_REG_FLAG, &data);
if (err < 0)
return err;
} while (data & RX8581_FLAG_UF);
@@ -137,7 +132,7 @@ static int rx8581_rtc_set_time(struct device *dev, struct rtc_time *tm)
struct i2c_client *client = to_i2c_client(dev);
int err;
unsigned char buf[7];
- struct rx8581 *rx8581 = i2c_get_clientdata(client);
+ struct regmap *regmap = i2c_get_clientdata(client);
dev_dbg(dev, "%s: secs=%d, mins=%d, hours=%d, "
"mday=%d, mon=%d, year=%d, wday=%d\n",
@@ -160,25 +155,23 @@ static int rx8581_rtc_set_time(struct device *dev, struct rtc_time *tm)
buf[RX8581_REG_DW] = (0x1 << tm->tm_wday);
/* Stop the clock */
- err = regmap_update_bits(rx8581->regmap, RX8581_REG_CTRL,
+ err = regmap_update_bits(regmap, RX8581_REG_CTRL,
RX8581_CTRL_STOP, RX8581_CTRL_STOP);
if (err < 0)
return err;
/* write register's data */
- err = regmap_bulk_write(rx8581->regmap, RX8581_REG_SC,
- buf, sizeof(buf));
+ err = regmap_bulk_write(regmap, RX8581_REG_SC, buf, sizeof(buf));
if (err < 0)
return err;
/* get VLF and clear it */
- err = regmap_update_bits(rx8581->regmap, RX8581_REG_FLAG,
- RX8581_FLAG_VLF, 0);
+ err = regmap_update_bits(regmap, RX8581_REG_FLAG, RX8581_FLAG_VLF, 0);
if (err < 0)
return err;
/* Restart the clock */
- return regmap_update_bits(rx8581->regmap, RX8581_REG_CTRL,
+ return regmap_update_bits(regmap, RX8581_REG_CTRL,
RX8581_CTRL_STOP, 0);
}
@@ -190,29 +183,27 @@ static const struct rtc_class_ops rx8581_rtc_ops = {
static int rx8571_nvram_read(void *priv, unsigned int offset, void *val,
size_t bytes)
{
- struct rx8581 *rx8581 = priv;
+ struct regmap *regmap = priv;
- return regmap_bulk_read(rx8581->regmap, RX8571_USER_RAM + offset,
- val, bytes);
+ return regmap_bulk_read(regmap, RX8571_USER_RAM + offset, val, bytes);
}
static int rx8571_nvram_write(void *priv, unsigned int offset, void *val,
size_t bytes)
{
- struct rx8581 *rx8581 = priv;
+ struct regmap *regmap = priv;
- return regmap_bulk_write(rx8581->regmap, RX8571_USER_RAM + offset,
- val, bytes);
+ return regmap_bulk_write(regmap, RX8571_USER_RAM + offset, val, bytes);
}
static int rx85x1_nvram_read(void *priv, unsigned int offset, void *val,
size_t bytes)
{
- struct rx8581 *rx8581 = priv;
+ struct regmap *regmap = priv;
unsigned int tmp_val;
int ret;
- ret = regmap_read(rx8581->regmap, RX8581_REG_RAM, &tmp_val);
+ ret = regmap_read(regmap, RX8581_REG_RAM, &tmp_val);
(*(unsigned char *)val) = (unsigned char) tmp_val;
return ret;
@@ -221,12 +212,11 @@ static int rx85x1_nvram_read(void *priv, unsigned int offset, void *val,
static int rx85x1_nvram_write(void *priv, unsigned int offset, void *val,
size_t bytes)
{
- struct rx8581 *rx8581 = priv;
+ struct regmap *regmap = priv;
unsigned char tmp_val;
tmp_val = *((unsigned char *)val);
- return regmap_write(rx8581->regmap, RX8581_REG_RAM,
- (unsigned int)tmp_val);
+ return regmap_write(regmap, RX8581_REG_RAM, (unsigned int)tmp_val);
}
static const struct rx85x1_config rx8581_config = {
@@ -249,9 +239,10 @@ static const struct rx85x1_config rx8571_config = {
static int rx8581_probe(struct i2c_client *client)
{
- struct rx8581 *rx8581;
+ struct regmap *regmap;
const struct rx85x1_config *config = &rx8581_config;
const void *data = of_device_get_match_data(&client->dev);
+ struct rtc_device *rtc;
static struct nvmem_config nvmem_cfg[] = {
{
.name = "rx85x1-",
@@ -276,31 +267,27 @@ static int rx8581_probe(struct i2c_client *client)
if (data)
config = data;
- rx8581 = devm_kzalloc(&client->dev, sizeof(struct rx8581), GFP_KERNEL);
- if (!rx8581)
- return -ENOMEM;
-
- i2c_set_clientdata(client, rx8581);
+ regmap = devm_regmap_init_i2c(client, &config->regmap);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
- rx8581->regmap = devm_regmap_init_i2c(client, &config->regmap);
- if (IS_ERR(rx8581->regmap))
- return PTR_ERR(rx8581->regmap);
+ i2c_set_clientdata(client, regmap);
- rx8581->rtc = devm_rtc_allocate_device(&client->dev);
- if (IS_ERR(rx8581->rtc))
- return PTR_ERR(rx8581->rtc);
+ rtc = devm_rtc_allocate_device(&client->dev);
+ if (IS_ERR(rtc))
+ return PTR_ERR(rtc);
- rx8581->rtc->ops = &rx8581_rtc_ops;
- rx8581->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
- rx8581->rtc->range_max = RTC_TIMESTAMP_END_2099;
- rx8581->rtc->start_secs = 0;
- rx8581->rtc->set_start_time = true;
+ rtc->ops = &rx8581_rtc_ops;
+ rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
+ rtc->range_max = RTC_TIMESTAMP_END_2099;
+ rtc->start_secs = 0;
+ rtc->set_start_time = true;
- ret = devm_rtc_register_device(rx8581->rtc);
+ ret = devm_rtc_register_device(rtc);
for (i = 0; i < config->num_nvram; i++) {
- nvmem_cfg[i].priv = rx8581;
- devm_rtc_nvmem_register(rx8581->rtc, &nvmem_cfg[i]);
+ nvmem_cfg[i].priv = regmap;
+ devm_rtc_nvmem_register(rtc, &nvmem_cfg[i]);
}
return ret;
diff --git a/drivers/rtc/rtc-rzn1.c b/drivers/rtc/rtc-rzn1.c
index cb220807d925..eeb9612a666f 100644
--- a/drivers/rtc/rtc-rzn1.c
+++ b/drivers/rtc/rtc-rzn1.c
@@ -19,6 +19,7 @@
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/rtc.h>
+#include <linux/spinlock.h>
#define RZN1_RTC_CTL0 0x00
#define RZN1_RTC_CTL0_SLSB_SUBU 0
@@ -27,6 +28,7 @@
#define RZN1_RTC_CTL0_CE BIT(7)
#define RZN1_RTC_CTL1 0x04
+#define RZN1_RTC_CTL1_1SE BIT(3)
#define RZN1_RTC_CTL1_ALME BIT(4)
#define RZN1_RTC_CTL2 0x08
@@ -58,6 +60,13 @@
struct rzn1_rtc {
struct rtc_device *rtcdev;
void __iomem *base;
+ /*
+ * Protects access to RZN1_RTC_CTL1 reg. rtc_lock with threaded_irqs
+ * would introduce race conditions when switching interrupts because
+ * of potential sleeps
+ */
+ spinlock_t ctl1_access_lock;
+ struct rtc_time tm_alarm;
};
static void rzn1_rtc_get_time_snapshot(struct rzn1_rtc *rtc, struct rtc_time *tm)
@@ -135,8 +144,38 @@ static int rzn1_rtc_set_time(struct device *dev, struct rtc_time *tm)
static irqreturn_t rzn1_rtc_alarm_irq(int irq, void *dev_id)
{
struct rzn1_rtc *rtc = dev_id;
+ u32 ctl1, set_irq_bits = 0;
+
+ if (rtc->tm_alarm.tm_sec == 0)
+ rtc_update_irq(rtc->rtcdev, 1, RTC_AF | RTC_IRQF);
+ else
+ /* Switch to 1s interrupts */
+ set_irq_bits = RZN1_RTC_CTL1_1SE;
- rtc_update_irq(rtc->rtcdev, 1, RTC_AF | RTC_IRQF);
+ guard(spinlock)(&rtc->ctl1_access_lock);
+
+ ctl1 = readl(rtc->base + RZN1_RTC_CTL1);
+ ctl1 &= ~RZN1_RTC_CTL1_ALME;
+ ctl1 |= set_irq_bits;
+ writel(ctl1, rtc->base + RZN1_RTC_CTL1);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t rzn1_rtc_1s_irq(int irq, void *dev_id)
+{
+ struct rzn1_rtc *rtc = dev_id;
+ u32 ctl1;
+
+ if (readl(rtc->base + RZN1_RTC_SECC) == bin2bcd(rtc->tm_alarm.tm_sec)) {
+ guard(spinlock)(&rtc->ctl1_access_lock);
+
+ ctl1 = readl(rtc->base + RZN1_RTC_CTL1);
+ ctl1 &= ~RZN1_RTC_CTL1_1SE;
+ writel(ctl1, rtc->base + RZN1_RTC_CTL1);
+
+ rtc_update_irq(rtc->rtcdev, 1, RTC_AF | RTC_IRQF);
+ }
return IRQ_HANDLED;
}
@@ -144,14 +183,38 @@ static irqreturn_t rzn1_rtc_alarm_irq(int irq, void *dev_id)
static int rzn1_rtc_alarm_irq_enable(struct device *dev, unsigned int enable)
{
struct rzn1_rtc *rtc = dev_get_drvdata(dev);
- u32 ctl1 = readl(rtc->base + RZN1_RTC_CTL1);
+ struct rtc_time *tm = &rtc->tm_alarm, tm_now;
+ u32 ctl1;
+ int ret;
- if (enable)
- ctl1 |= RZN1_RTC_CTL1_ALME;
- else
- ctl1 &= ~RZN1_RTC_CTL1_ALME;
+ guard(spinlock_irqsave)(&rtc->ctl1_access_lock);
- writel(ctl1, rtc->base + RZN1_RTC_CTL1);
+ ctl1 = readl(rtc->base + RZN1_RTC_CTL1);
+
+ if (enable) {
+ /*
+ * Use alarm interrupt if alarm time is at least a minute away
+ * or less than a minute but in the next minute. Otherwise use
+ * 1 second interrupt to wait for the proper second
+ */
+ do {
+ ctl1 &= ~(RZN1_RTC_CTL1_ALME | RZN1_RTC_CTL1_1SE);
+
+ ret = rzn1_rtc_read_time(dev, &tm_now);
+ if (ret)
+ return ret;
+
+ if (rtc_tm_sub(tm, &tm_now) > 59 || tm->tm_min != tm_now.tm_min)
+ ctl1 |= RZN1_RTC_CTL1_ALME;
+ else
+ ctl1 |= RZN1_RTC_CTL1_1SE;
+
+ writel(ctl1, rtc->base + RZN1_RTC_CTL1);
+ } while (readl(rtc->base + RZN1_RTC_SECC) != bin2bcd(tm_now.tm_sec));
+ } else {
+ ctl1 &= ~(RZN1_RTC_CTL1_ALME | RZN1_RTC_CTL1_1SE);
+ writel(ctl1, rtc->base + RZN1_RTC_CTL1);
+ }
return 0;
}
@@ -185,7 +248,7 @@ static int rzn1_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
}
ctl1 = readl(rtc->base + RZN1_RTC_CTL1);
- alrm->enabled = !!(ctl1 & RZN1_RTC_CTL1_ALME);
+ alrm->enabled = !!(ctl1 & (RZN1_RTC_CTL1_ALME | RZN1_RTC_CTL1_1SE));
return 0;
}
@@ -216,6 +279,8 @@ static int rzn1_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
writel(bin2bcd(tm->tm_hour), rtc->base + RZN1_RTC_ALH);
writel(BIT(wday), rtc->base + RZN1_RTC_ALW);
+ rtc->tm_alarm = alrm->time;
+
rzn1_rtc_alarm_irq_enable(dev, alrm->enabled);
return 0;
@@ -304,7 +369,7 @@ static const struct rtc_class_ops rzn1_rtc_ops = {
static int rzn1_rtc_probe(struct platform_device *pdev)
{
struct rzn1_rtc *rtc;
- int alarm_irq;
+ int irq;
int ret;
rtc = devm_kzalloc(&pdev->dev, sizeof(*rtc), GFP_KERNEL);
@@ -317,9 +382,9 @@ static int rzn1_rtc_probe(struct platform_device *pdev)
if (IS_ERR(rtc->base))
return dev_err_probe(&pdev->dev, PTR_ERR(rtc->base), "Missing reg\n");
- alarm_irq = platform_get_irq(pdev, 0);
- if (alarm_irq < 0)
- return alarm_irq;
+ irq = platform_get_irq_byname(pdev, "alarm");
+ if (irq < 0)
+ return irq;
rtc->rtcdev = devm_rtc_allocate_device(&pdev->dev);
if (IS_ERR(rtc->rtcdev))
@@ -329,8 +394,6 @@ static int rzn1_rtc_probe(struct platform_device *pdev)
rtc->rtcdev->range_max = RTC_TIMESTAMP_END_2099;
rtc->rtcdev->alarm_offset_max = 7 * 86400;
rtc->rtcdev->ops = &rzn1_rtc_ops;
- set_bit(RTC_FEATURE_ALARM_RES_MINUTE, rtc->rtcdev->features);
- clear_bit(RTC_FEATURE_UPDATE_INTERRUPT, rtc->rtcdev->features);
ret = devm_pm_runtime_enable(&pdev->dev);
if (ret < 0)
@@ -349,13 +412,24 @@ static int rzn1_rtc_probe(struct platform_device *pdev)
/* Disable all interrupts */
writel(0, rtc->base + RZN1_RTC_CTL1);
- ret = devm_request_irq(&pdev->dev, alarm_irq, rzn1_rtc_alarm_irq, 0,
- dev_name(&pdev->dev), rtc);
+ spin_lock_init(&rtc->ctl1_access_lock);
+
+ ret = devm_request_irq(&pdev->dev, irq, rzn1_rtc_alarm_irq, 0, "RZN1 RTC Alarm", rtc);
if (ret) {
- dev_err(&pdev->dev, "RTC timer interrupt not available\n");
+ dev_err(&pdev->dev, "RTC alarm interrupt not available\n");
goto dis_runtime_pm;
}
+ irq = platform_get_irq_byname_optional(pdev, "pps");
+ if (irq >= 0)
+ ret = devm_request_irq(&pdev->dev, irq, rzn1_rtc_1s_irq, 0, "RZN1 RTC 1s", rtc);
+
+ if (irq < 0 || ret) {
+ set_bit(RTC_FEATURE_ALARM_RES_MINUTE, rtc->rtcdev->features);
+ clear_bit(RTC_FEATURE_UPDATE_INTERRUPT, rtc->rtcdev->features);
+ dev_warn(&pdev->dev, "RTC pps interrupt not available. Alarm has only minute accuracy\n");
+ }
+
ret = devm_rtc_register_device(rtc->rtcdev);
if (ret)
goto dis_runtime_pm;
diff --git a/drivers/rtc/rtc-s35390a.c b/drivers/rtc/rtc-s35390a.c
index e3dc18882f41..3408d2ab2741 100644
--- a/drivers/rtc/rtc-s35390a.c
+++ b/drivers/rtc/rtc-s35390a.c
@@ -63,7 +63,6 @@ MODULE_DEVICE_TABLE(of, s35390a_of_match);
struct s35390a {
struct i2c_client *client[8];
- struct rtc_device *rtc;
int twentyfourhour;
};
@@ -422,6 +421,7 @@ static int s35390a_probe(struct i2c_client *client)
int err, err_read;
unsigned int i;
struct s35390a *s35390a;
+ struct rtc_device *rtc;
char buf, status1;
struct device *dev = &client->dev;
@@ -447,9 +447,9 @@ static int s35390a_probe(struct i2c_client *client)
}
}
- s35390a->rtc = devm_rtc_allocate_device(dev);
- if (IS_ERR(s35390a->rtc))
- return PTR_ERR(s35390a->rtc);
+ rtc = devm_rtc_allocate_device(dev);
+ if (IS_ERR(rtc))
+ return PTR_ERR(rtc);
err_read = s35390a_read_status(s35390a, &status1);
if (err_read < 0) {
@@ -480,17 +480,17 @@ static int s35390a_probe(struct i2c_client *client)
device_set_wakeup_capable(dev, 1);
- s35390a->rtc->ops = &s35390a_rtc_ops;
- s35390a->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
- s35390a->rtc->range_max = RTC_TIMESTAMP_END_2099;
+ rtc->ops = &s35390a_rtc_ops;
+ rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
+ rtc->range_max = RTC_TIMESTAMP_END_2099;
- set_bit(RTC_FEATURE_ALARM_RES_MINUTE, s35390a->rtc->features);
- clear_bit(RTC_FEATURE_UPDATE_INTERRUPT, s35390a->rtc->features );
+ set_bit(RTC_FEATURE_ALARM_RES_MINUTE, rtc->features);
+ clear_bit(RTC_FEATURE_UPDATE_INTERRUPT, rtc->features);
if (status1 & S35390A_FLAG_INT2)
- rtc_update_irq(s35390a->rtc, 1, RTC_AF);
+ rtc_update_irq(rtc, 1, RTC_AF);
- return devm_rtc_register_device(s35390a->rtc);
+ return devm_rtc_register_device(rtc);
}
static struct i2c_driver s35390a_driver = {
diff --git a/drivers/rtc/rtc-s5m.c b/drivers/rtc/rtc-s5m.c
index 36acca5b2639..db5c9b641277 100644
--- a/drivers/rtc/rtc-s5m.c
+++ b/drivers/rtc/rtc-s5m.c
@@ -146,7 +146,6 @@ static const struct s5m_rtc_reg_config s2mps15_rtc_regs = {
struct s5m_rtc_info {
struct device *dev;
- struct i2c_client *i2c;
struct sec_pmic_dev *s5m87xx;
struct regmap *regmap;
struct rtc_device *rtc_dev;
@@ -627,11 +626,10 @@ static int s5m8767_rtc_init_reg(struct s5m_rtc_info *info)
}
info->rtc_24hr_mode = 1;
- if (ret < 0) {
- dev_err(info->dev, "%s: fail to write controlm reg(%d)\n",
- __func__, ret);
- return ret;
- }
+ if (ret < 0)
+ return dev_err_probe(info->dev, ret,
+ "%s: fail to write controlm reg\n",
+ __func__);
return ret;
}
@@ -640,6 +638,7 @@ static int s5m_rtc_probe(struct platform_device *pdev)
{
struct sec_pmic_dev *s5m87xx = dev_get_drvdata(pdev->dev.parent);
struct s5m_rtc_info *info;
+ struct i2c_client *i2c;
const struct regmap_config *regmap_cfg;
int ret, alarm_irq;
@@ -669,26 +668,21 @@ static int s5m_rtc_probe(struct platform_device *pdev)
alarm_irq = S5M8767_IRQ_RTCA1;
break;
default:
- dev_err(&pdev->dev,
- "Device type %lu is not supported by RTC driver\n",
- platform_get_device_id(pdev)->driver_data);
- return -ENODEV;
+ return dev_err_probe(&pdev->dev, -ENODEV,
+ "Device type %lu is not supported by RTC driver\n",
+ platform_get_device_id(pdev)->driver_data);
}
- info->i2c = devm_i2c_new_dummy_device(&pdev->dev, s5m87xx->i2c->adapter,
- RTC_I2C_ADDR);
- if (IS_ERR(info->i2c)) {
- dev_err(&pdev->dev, "Failed to allocate I2C for RTC\n");
- return PTR_ERR(info->i2c);
- }
+ i2c = devm_i2c_new_dummy_device(&pdev->dev, s5m87xx->i2c->adapter,
+ RTC_I2C_ADDR);
+ if (IS_ERR(i2c))
+ return dev_err_probe(&pdev->dev, PTR_ERR(i2c),
+ "Failed to allocate I2C for RTC\n");
- info->regmap = devm_regmap_init_i2c(info->i2c, regmap_cfg);
- if (IS_ERR(info->regmap)) {
- ret = PTR_ERR(info->regmap);
- dev_err(&pdev->dev, "Failed to allocate RTC register map: %d\n",
- ret);
- return ret;
- }
+ info->regmap = devm_regmap_init_i2c(i2c, regmap_cfg);
+ if (IS_ERR(info->regmap))
+ return dev_err_probe(&pdev->dev, PTR_ERR(info->regmap),
+ "Failed to allocate RTC register map\n");
info->dev = &pdev->dev;
info->s5m87xx = s5m87xx;
@@ -696,11 +690,10 @@ static int s5m_rtc_probe(struct platform_device *pdev)
if (s5m87xx->irq_data) {
info->irq = regmap_irq_get_virq(s5m87xx->irq_data, alarm_irq);
- if (info->irq <= 0) {
- dev_err(&pdev->dev, "Failed to get virtual IRQ %d\n",
- alarm_irq);
- return -EINVAL;
- }
+ if (info->irq <= 0)
+ return dev_err_probe(&pdev->dev, -EINVAL,
+ "Failed to get virtual IRQ %d\n",
+ alarm_irq);
}
platform_set_drvdata(pdev, info);
@@ -724,11 +717,10 @@ static int s5m_rtc_probe(struct platform_device *pdev)
ret = devm_request_threaded_irq(&pdev->dev, info->irq, NULL,
s5m_rtc_alarm_irq, 0, "rtc-alarm0",
info);
- if (ret < 0) {
- dev_err(&pdev->dev, "Failed to request alarm IRQ: %d: %d\n",
- info->irq, ret);
- return ret;
- }
+ if (ret < 0)
+ return dev_err_probe(&pdev->dev, ret,
+ "Failed to request alarm IRQ %d\n",
+ info->irq);
device_init_wakeup(&pdev->dev, true);
}
diff --git a/drivers/rtc/rtc-sd2405al.c b/drivers/rtc/rtc-sd2405al.c
index d2568c3e3876..00c3033e8079 100644
--- a/drivers/rtc/rtc-sd2405al.c
+++ b/drivers/rtc/rtc-sd2405al.c
@@ -42,7 +42,6 @@
struct sd2405al {
struct device *dev;
- struct rtc_device *rtc;
struct regmap *regmap;
};
@@ -167,6 +166,7 @@ static const struct regmap_config sd2405al_regmap_conf = {
static int sd2405al_probe(struct i2c_client *client)
{
struct sd2405al *sd2405al;
+ struct rtc_device *rtc;
int ret;
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
@@ -182,17 +182,17 @@ static int sd2405al_probe(struct i2c_client *client)
if (IS_ERR(sd2405al->regmap))
return PTR_ERR(sd2405al->regmap);
- sd2405al->rtc = devm_rtc_allocate_device(&client->dev);
- if (IS_ERR(sd2405al->rtc))
- return PTR_ERR(sd2405al->rtc);
+ rtc = devm_rtc_allocate_device(&client->dev);
+ if (IS_ERR(rtc))
+ return PTR_ERR(rtc);
- sd2405al->rtc->ops = &sd2405al_rtc_ops;
- sd2405al->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
- sd2405al->rtc->range_max = RTC_TIMESTAMP_END_2099;
+ rtc->ops = &sd2405al_rtc_ops;
+ rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
+ rtc->range_max = RTC_TIMESTAMP_END_2099;
dev_set_drvdata(&client->dev, sd2405al);
- ret = devm_rtc_register_device(sd2405al->rtc);
+ ret = devm_rtc_register_device(rtc);
if (ret < 0)
return ret;
diff --git a/drivers/rtc/rtc-sd3078.c b/drivers/rtc/rtc-sd3078.c
index fe27b54beaad..10cc1dcfc774 100644
--- a/drivers/rtc/rtc-sd3078.c
+++ b/drivers/rtc/rtc-sd3078.c
@@ -36,11 +36,6 @@
*/
#define WRITE_PROTECT_EN 0
-struct sd3078 {
- struct rtc_device *rtc;
- struct regmap *regmap;
-};
-
/*
* In order to prevent arbitrary modification of the time register,
* when modification of the register,
@@ -49,14 +44,11 @@ struct sd3078 {
* 2. set WRITE2 bit
* 3. set WRITE3 bit
*/
-static void sd3078_enable_reg_write(struct sd3078 *sd3078)
+static void sd3078_enable_reg_write(struct regmap *regmap)
{
- regmap_update_bits(sd3078->regmap, SD3078_REG_CTRL2,
- KEY_WRITE1, KEY_WRITE1);
- regmap_update_bits(sd3078->regmap, SD3078_REG_CTRL1,
- KEY_WRITE2, KEY_WRITE2);
- regmap_update_bits(sd3078->regmap, SD3078_REG_CTRL1,
- KEY_WRITE3, KEY_WRITE3);
+ regmap_update_bits(regmap, SD3078_REG_CTRL2, KEY_WRITE1, KEY_WRITE1);
+ regmap_update_bits(regmap, SD3078_REG_CTRL1, KEY_WRITE2, KEY_WRITE2);
+ regmap_update_bits(regmap, SD3078_REG_CTRL1, KEY_WRITE3, KEY_WRITE3);
}
#if WRITE_PROTECT_EN
@@ -69,14 +61,11 @@ static void sd3078_enable_reg_write(struct sd3078 *sd3078)
* 2. clear WRITE3 bit
* 3. clear WRITE1 bit
*/
-static void sd3078_disable_reg_write(struct sd3078 *sd3078)
+static void sd3078_disable_reg_write(struct regmap *regmap)
{
- regmap_update_bits(sd3078->regmap, SD3078_REG_CTRL1,
- KEY_WRITE2, 0);
- regmap_update_bits(sd3078->regmap, SD3078_REG_CTRL1,
- KEY_WRITE3, 0);
- regmap_update_bits(sd3078->regmap, SD3078_REG_CTRL2,
- KEY_WRITE1, 0);
+ regmap_update_bits(regmap, SD3078_REG_CTRL1, KEY_WRITE2, 0);
+ regmap_update_bits(regmap, SD3078_REG_CTRL1, KEY_WRITE3, 0);
+ regmap_update_bits(regmap, SD3078_REG_CTRL2, KEY_WRITE1, 0);
}
#endif
@@ -85,11 +74,10 @@ static int sd3078_rtc_read_time(struct device *dev, struct rtc_time *tm)
unsigned char hour;
unsigned char rtc_data[NUM_TIME_REGS] = {0};
struct i2c_client *client = to_i2c_client(dev);
- struct sd3078 *sd3078 = i2c_get_clientdata(client);
+ struct regmap *regmap = i2c_get_clientdata(client);
int ret;
- ret = regmap_bulk_read(sd3078->regmap, SD3078_REG_SC, rtc_data,
- NUM_TIME_REGS);
+ ret = regmap_bulk_read(regmap, SD3078_REG_SC, rtc_data, NUM_TIME_REGS);
if (ret < 0) {
dev_err(dev, "reading from RTC failed with err:%d\n", ret);
return ret;
@@ -123,7 +111,7 @@ static int sd3078_rtc_set_time(struct device *dev, struct rtc_time *tm)
{
unsigned char rtc_data[NUM_TIME_REGS];
struct i2c_client *client = to_i2c_client(dev);
- struct sd3078 *sd3078 = i2c_get_clientdata(client);
+ struct regmap *regmap = i2c_get_clientdata(client);
int ret;
rtc_data[SD3078_REG_SC] = bin2bcd(tm->tm_sec);
@@ -135,10 +123,10 @@ static int sd3078_rtc_set_time(struct device *dev, struct rtc_time *tm)
rtc_data[SD3078_REG_YR] = bin2bcd(tm->tm_year - 100);
#if WRITE_PROTECT_EN
- sd3078_enable_reg_write(sd3078);
+ sd3078_enable_reg_write(regmap);
#endif
- ret = regmap_bulk_write(sd3078->regmap, SD3078_REG_SC, rtc_data,
+ ret = regmap_bulk_write(regmap, SD3078_REG_SC, rtc_data,
NUM_TIME_REGS);
if (ret < 0) {
dev_err(dev, "writing to RTC failed with err:%d\n", ret);
@@ -146,7 +134,7 @@ static int sd3078_rtc_set_time(struct device *dev, struct rtc_time *tm)
}
#if WRITE_PROTECT_EN
- sd3078_disable_reg_write(sd3078);
+ sd3078_disable_reg_write(regmap);
#endif
return 0;
@@ -166,36 +154,33 @@ static const struct regmap_config regmap_config = {
static int sd3078_probe(struct i2c_client *client)
{
int ret;
- struct sd3078 *sd3078;
+ struct regmap *regmap;
+ struct rtc_device *rtc;
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
return -ENODEV;
- sd3078 = devm_kzalloc(&client->dev, sizeof(*sd3078), GFP_KERNEL);
- if (!sd3078)
- return -ENOMEM;
-
- sd3078->regmap = devm_regmap_init_i2c(client, &regmap_config);
- if (IS_ERR(sd3078->regmap)) {
+ regmap = devm_regmap_init_i2c(client, &regmap_config);
+ if (IS_ERR(regmap)) {
dev_err(&client->dev, "regmap allocation failed\n");
- return PTR_ERR(sd3078->regmap);
+ return PTR_ERR(regmap);
}
- i2c_set_clientdata(client, sd3078);
+ i2c_set_clientdata(client, regmap);
- sd3078->rtc = devm_rtc_allocate_device(&client->dev);
- if (IS_ERR(sd3078->rtc))
- return PTR_ERR(sd3078->rtc);
+ rtc = devm_rtc_allocate_device(&client->dev);
+ if (IS_ERR(rtc))
+ return PTR_ERR(rtc);
- sd3078->rtc->ops = &sd3078_rtc_ops;
- sd3078->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
- sd3078->rtc->range_max = RTC_TIMESTAMP_END_2099;
+ rtc->ops = &sd3078_rtc_ops;
+ rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
+ rtc->range_max = RTC_TIMESTAMP_END_2099;
- ret = devm_rtc_register_device(sd3078->rtc);
+ ret = devm_rtc_register_device(rtc);
if (ret)
return ret;
- sd3078_enable_reg_write(sd3078);
+ sd3078_enable_reg_write(regmap);
return 0;
}
diff --git a/drivers/rtc/rtc-stm32.c b/drivers/rtc/rtc-stm32.c
index a0564d443569..1b715db47160 100644
--- a/drivers/rtc/rtc-stm32.c
+++ b/drivers/rtc/rtc-stm32.c
@@ -1143,11 +1143,11 @@ static int stm32_rtc_probe(struct platform_device *pdev)
goto err;
}
- ret = device_init_wakeup(&pdev->dev, true);
+ ret = devm_device_init_wakeup(&pdev->dev);
if (ret)
goto err;
- ret = dev_pm_set_wake_irq(&pdev->dev, rtc->irq_alarm);
+ ret = devm_pm_set_wake_irq(&pdev->dev, rtc->irq_alarm);
if (ret)
goto err;
@@ -1208,9 +1208,6 @@ err_no_rtc_ck:
if (rtc->data->need_dbp)
regmap_update_bits(rtc->dbp, rtc->dbp_reg, rtc->dbp_mask, 0);
- dev_pm_clear_wake_irq(&pdev->dev);
- device_init_wakeup(&pdev->dev, false);
-
return ret;
}
@@ -1237,9 +1234,6 @@ static void stm32_rtc_remove(struct platform_device *pdev)
/* Enable backup domain write protection if needed */
if (rtc->data->need_dbp)
regmap_update_bits(rtc->dbp, rtc->dbp_reg, rtc->dbp_mask, 0);
-
- dev_pm_clear_wake_irq(&pdev->dev);
- device_init_wakeup(&pdev->dev, false);
}
static int stm32_rtc_suspend(struct device *dev)
diff --git a/drivers/s390/block/Kconfig b/drivers/s390/block/Kconfig
index e3710a762aba..4bfe469c04aa 100644
--- a/drivers/s390/block/Kconfig
+++ b/drivers/s390/block/Kconfig
@@ -4,13 +4,21 @@ comment "S/390 block device drivers"
config DCSSBLK
def_tristate m
- select FS_DAX_LIMITED
- select DAX
prompt "DCSSBLK support"
depends on S390 && BLOCK
help
Support for dcss block device
+config DCSSBLK_DAX
+ def_bool y
+ depends on DCSSBLK
+ # requires S390 ZONE_DEVICE support
+ depends on BROKEN
+ select DAX
+ prompt "DCSSBLK DAX support"
+ help
+ Enable DAX operation for the dcss block device
+
config DASD
def_tristate y
prompt "Support for DASD devices"
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index 0f14d279d30b..7248e547fefb 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -534,6 +534,21 @@ static const struct attribute_group *dcssblk_dev_attr_groups[] = {
NULL,
};
+static int dcssblk_setup_dax(struct dcssblk_dev_info *dev_info)
+{
+ struct dax_device *dax_dev;
+
+ if (!IS_ENABLED(CONFIG_DCSSBLK_DAX))
+ return 0;
+
+ dax_dev = alloc_dax(dev_info, &dcssblk_dax_ops);
+ if (IS_ERR(dax_dev))
+ return PTR_ERR(dax_dev);
+ set_dax_synchronous(dax_dev);
+ dev_info->dax_dev = dax_dev;
+ return dax_add_host(dev_info->dax_dev, dev_info->gd);
+}
+
/*
* device attribute for adding devices
*/
@@ -547,7 +562,6 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char
int rc, i, j, num_of_segments;
struct dcssblk_dev_info *dev_info;
struct segment_info *seg_info, *temp;
- struct dax_device *dax_dev;
char *local_buf;
unsigned long seg_byte_size;
@@ -674,14 +688,7 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char
if (rc)
goto put_dev;
- dax_dev = alloc_dax(dev_info, &dcssblk_dax_ops);
- if (IS_ERR(dax_dev)) {
- rc = PTR_ERR(dax_dev);
- goto put_dev;
- }
- set_dax_synchronous(dax_dev);
- dev_info->dax_dev = dax_dev;
- rc = dax_add_host(dev_info->dax_dev, dev_info->gd);
+ rc = dcssblk_setup_dax(dev_info);
if (rc)
goto out_dax;
@@ -917,7 +924,7 @@ __dcssblk_direct_access(struct dcssblk_dev_info *dev_info, pgoff_t pgoff,
*kaddr = __va(dev_info->start + offset);
if (pfn)
*pfn = __pfn_to_pfn_t(PFN_DOWN(dev_info->start + offset),
- PFN_DEV|PFN_SPECIAL);
+ PFN_DEV);
return (dev_sz - offset) / PAGE_SIZE;
}
diff --git a/drivers/soundwire/Kconfig b/drivers/soundwire/Kconfig
index f66f869dff2e..ad56393e4c93 100644
--- a/drivers/soundwire/Kconfig
+++ b/drivers/soundwire/Kconfig
@@ -31,6 +31,7 @@ config SOUNDWIRE_AMD
config SOUNDWIRE_CADENCE
tristate
+ select CRC8
config SOUNDWIRE_INTEL
tristate "Intel SoundWire Master driver"
diff --git a/drivers/soundwire/amd_manager.c b/drivers/soundwire/amd_manager.c
index 5a54b10daf77..a12c68b93b1c 100644
--- a/drivers/soundwire/amd_manager.c
+++ b/drivers/soundwire/amd_manager.c
@@ -143,6 +143,57 @@ static void amd_sdw_wake_enable(struct amd_sdw_manager *amd_manager, bool enable
writel(wake_ctrl, amd_manager->mmio + ACP_SW_STATE_CHANGE_STATUS_MASK_8TO11);
}
+static int amd_sdw_set_device_state(struct amd_sdw_manager *amd_manager, u32 target_device_state)
+{
+ u32 sdw_dev_state;
+
+ sdw_dev_state = readl(amd_manager->acp_mmio + AMD_SDW_DEVICE_STATE);
+ switch (amd_manager->instance) {
+ case ACP_SDW0:
+ u32p_replace_bits(&sdw_dev_state, target_device_state,
+ AMD_SDW0_DEVICE_STATE_MASK);
+ break;
+ case ACP_SDW1:
+ u32p_replace_bits(&sdw_dev_state, target_device_state,
+ AMD_SDW1_DEVICE_STATE_MASK);
+ break;
+ default:
+ return -EINVAL;
+ }
+ writel(sdw_dev_state, amd_manager->acp_mmio + AMD_SDW_DEVICE_STATE);
+ sdw_dev_state = readl(amd_manager->acp_mmio + AMD_SDW_DEVICE_STATE);
+ dev_dbg(amd_manager->dev, "AMD_SDW_DEVICE_STATE:0x%x\n", sdw_dev_state);
+ return 0;
+}
+
+static int amd_sdw_host_wake_enable(struct amd_sdw_manager *amd_manager, bool enable)
+{
+ u32 intr_cntl1;
+ u32 sdw_host_wake_irq_mask;
+
+ if (!amd_manager->wake_en_mask)
+ return 0;
+
+ switch (amd_manager->instance) {
+ case ACP_SDW0:
+ sdw_host_wake_irq_mask = AMD_SDW0_HOST_WAKE_INTR_MASK;
+ break;
+ case ACP_SDW1:
+ sdw_host_wake_irq_mask = AMD_SDW1_HOST_WAKE_INTR_MASK;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ intr_cntl1 = readl(amd_manager->acp_mmio + ACP_EXTERNAL_INTR_CNTL(ACP_SDW1));
+ if (enable)
+ intr_cntl1 |= sdw_host_wake_irq_mask;
+ else
+ intr_cntl1 &= ~sdw_host_wake_irq_mask;
+ writel(intr_cntl1, amd_manager->acp_mmio + ACP_EXTERNAL_INTR_CNTL(ACP_SDW1));
+ return 0;
+}
+
static void amd_sdw_ctl_word_prep(u32 *lower_word, u32 *upper_word, struct sdw_msg *msg,
int cmd_offset)
{
@@ -295,7 +346,7 @@ static enum sdw_command_response amd_sdw_fill_msg_resp(struct amd_sdw_manager *a
msg->dev_num);
return SDW_CMD_FAIL;
}
- dev_err_ratelimited(amd_manager->dev, "command is ignored for Slave %d\n",
+ dev_dbg_ratelimited(amd_manager->dev, "command is ignored for Slave %d\n",
msg->dev_num);
return SDW_CMD_IGNORED;
}
@@ -446,6 +497,10 @@ static int amd_sdw_port_params(struct sdw_bus *bus, struct sdw_port_params *p_pa
return -EINVAL;
}
break;
+ case ACP70_PCI_REV_ID:
+ case ACP71_PCI_REV_ID:
+ frame_fmt_reg = acp70_sdw_dp_reg[p_params->num].frame_fmt_reg;
+ break;
default:
return -EINVAL;
}
@@ -494,6 +549,14 @@ static int amd_sdw_transport_params(struct sdw_bus *bus,
return -EINVAL;
}
break;
+ case ACP70_PCI_REV_ID:
+ case ACP71_PCI_REV_ID:
+ frame_fmt_reg = acp70_sdw_dp_reg[params->port_num].frame_fmt_reg;
+ sample_int_reg = acp70_sdw_dp_reg[params->port_num].sample_int_reg;
+ hctrl_dp0_reg = acp70_sdw_dp_reg[params->port_num].hctrl_dp0_reg;
+ offset_reg = acp70_sdw_dp_reg[params->port_num].offset_reg;
+ lane_ctrl_ch_en_reg = acp70_sdw_dp_reg[params->port_num].lane_ctrl_ch_en_reg;
+ break;
default:
return -EINVAL;
}
@@ -549,6 +612,10 @@ static int amd_sdw_port_enable(struct sdw_bus *bus,
return -EINVAL;
}
break;
+ case ACP70_PCI_REV_ID:
+ case ACP71_PCI_REV_ID:
+ lane_ctrl_ch_en_reg = acp70_sdw_dp_reg[enable_ch->port_num].lane_ctrl_ch_en_reg;
+ break;
default:
return -EINVAL;
}
@@ -849,6 +916,7 @@ static void amd_sdw_update_slave_status(u32 status_change_0to7, u32 status_chang
static void amd_sdw_process_wake_event(struct amd_sdw_manager *amd_manager)
{
+ dev_dbg(amd_manager->dev, "SoundWire Wake event reported\n");
pm_request_resume(amd_manager->dev);
writel(0x00, amd_manager->acp_mmio + ACP_SW_WAKE_EN(amd_manager->instance));
writel(0x00, amd_manager->mmio + ACP_SW_STATE_CHANGE_STATUS_8TO11);
@@ -965,6 +1033,11 @@ static int amd_sdw_manager_probe(struct platform_device *pdev)
return -EINVAL;
}
break;
+ case ACP70_PCI_REV_ID:
+ case ACP71_PCI_REV_ID:
+ amd_manager->num_dout_ports = AMD_ACP70_SDW_MAX_TX_PORTS;
+ amd_manager->num_din_ports = AMD_ACP70_SDW_MAX_RX_PORTS;
+ break;
default:
return -EINVAL;
}
@@ -1137,8 +1210,21 @@ static int __maybe_unused amd_suspend(struct device *dev)
if (amd_manager->power_mode_mask & AMD_SDW_CLK_STOP_MODE) {
amd_sdw_wake_enable(amd_manager, false);
- return amd_sdw_clock_stop(amd_manager);
+ if (amd_manager->acp_rev >= ACP70_PCI_REV_ID) {
+ ret = amd_sdw_host_wake_enable(amd_manager, false);
+ if (ret)
+ return ret;
+ }
+ ret = amd_sdw_clock_stop(amd_manager);
+ if (ret)
+ return ret;
} else if (amd_manager->power_mode_mask & AMD_SDW_POWER_OFF_MODE) {
+ amd_sdw_wake_enable(amd_manager, false);
+ if (amd_manager->acp_rev >= ACP70_PCI_REV_ID) {
+ ret = amd_sdw_host_wake_enable(amd_manager, false);
+ if (ret)
+ return ret;
+ }
/*
* As per hardware programming sequence on AMD platforms,
* clock stop should be invoked first before powering-off
@@ -1146,7 +1232,14 @@ static int __maybe_unused amd_suspend(struct device *dev)
ret = amd_sdw_clock_stop(amd_manager);
if (ret)
return ret;
- return amd_deinit_sdw_manager(amd_manager);
+ ret = amd_deinit_sdw_manager(amd_manager);
+ if (ret)
+ return ret;
+ }
+ if (amd_manager->acp_rev >= ACP70_PCI_REV_ID) {
+ ret = amd_sdw_set_device_state(amd_manager, AMD_SDW_DEVICE_STATE_D3);
+ if (ret)
+ return ret;
}
return 0;
}
@@ -1156,6 +1249,7 @@ static int __maybe_unused amd_suspend_runtime(struct device *dev)
struct amd_sdw_manager *amd_manager = dev_get_drvdata(dev);
struct sdw_bus *bus = &amd_manager->bus;
int ret;
+ u32 val;
if (bus->prop.hw_disabled) {
dev_dbg(bus->dev, "SoundWire manager %d is disabled,\n",
@@ -1164,12 +1258,40 @@ static int __maybe_unused amd_suspend_runtime(struct device *dev)
}
if (amd_manager->power_mode_mask & AMD_SDW_CLK_STOP_MODE) {
amd_sdw_wake_enable(amd_manager, true);
- return amd_sdw_clock_stop(amd_manager);
+ if (amd_manager->acp_rev >= ACP70_PCI_REV_ID) {
+ ret = amd_sdw_host_wake_enable(amd_manager, true);
+ if (ret)
+ return ret;
+ }
+ ret = amd_sdw_clock_stop(amd_manager);
+ if (ret)
+ return ret;
} else if (amd_manager->power_mode_mask & AMD_SDW_POWER_OFF_MODE) {
+ amd_sdw_wake_enable(amd_manager, true);
+ if (amd_manager->acp_rev >= ACP70_PCI_REV_ID) {
+ ret = amd_sdw_host_wake_enable(amd_manager, true);
+ if (ret)
+ return ret;
+ }
ret = amd_sdw_clock_stop(amd_manager);
if (ret)
return ret;
- return amd_deinit_sdw_manager(amd_manager);
+ ret = amd_deinit_sdw_manager(amd_manager);
+ if (ret)
+ return ret;
+ }
+ if (amd_manager->acp_rev >= ACP70_PCI_REV_ID) {
+ ret = amd_sdw_set_device_state(amd_manager, AMD_SDW_DEVICE_STATE_D3);
+ if (ret)
+ return ret;
+ if (amd_manager->wake_en_mask) {
+ val = readl(amd_manager->acp_mmio + ACP_PME_EN);
+ if (!val) {
+ writel(1, amd_manager->acp_mmio + ACP_PME_EN);
+ val = readl(amd_manager->acp_mmio + ACP_PME_EN);
+ dev_dbg(amd_manager->dev, "ACP_PME_EN:0x%x\n", val);
+ }
+ }
}
return 0;
}
@@ -1188,9 +1310,21 @@ static int __maybe_unused amd_resume_runtime(struct device *dev)
}
if (amd_manager->power_mode_mask & AMD_SDW_CLK_STOP_MODE) {
- return amd_sdw_clock_stop_exit(amd_manager);
+ ret = amd_sdw_clock_stop_exit(amd_manager);
+ if (ret)
+ return ret;
+ if (amd_manager->acp_rev >= ACP70_PCI_REV_ID) {
+ ret = amd_sdw_host_wake_enable(amd_manager, false);
+ if (ret)
+ return ret;
+ }
} else if (amd_manager->power_mode_mask & AMD_SDW_POWER_OFF_MODE) {
writel(0x00, amd_manager->acp_mmio + ACP_SW_WAKE_EN(amd_manager->instance));
+ if (amd_manager->acp_rev >= ACP70_PCI_REV_ID) {
+ ret = amd_sdw_host_wake_enable(amd_manager, false);
+ if (ret)
+ return ret;
+ }
val = readl(amd_manager->mmio + ACP_SW_CLK_RESUME_CTRL);
if (val) {
val |= AMD_SDW_CLK_RESUME_REQ;
@@ -1211,6 +1345,11 @@ static int __maybe_unused amd_resume_runtime(struct device *dev)
return ret;
amd_sdw_set_frameshape(amd_manager);
}
+ if (amd_manager->acp_rev >= ACP70_PCI_REV_ID) {
+ ret = amd_sdw_set_device_state(amd_manager, AMD_SDW_DEVICE_STATE_D0);
+ if (ret)
+ return ret;
+ }
return 0;
}
diff --git a/drivers/soundwire/amd_manager.h b/drivers/soundwire/amd_manager.h
index cc2170e4521e..6cc916b0c820 100644
--- a/drivers/soundwire/amd_manager.h
+++ b/drivers/soundwire/amd_manager.h
@@ -159,8 +159,11 @@
#define AMD_ACP63_SDW0_MAX_RX_PORTS 3
#define AMD_ACP63_SDW1_MAX_TX_PORTS 1
#define AMD_ACP63_SDW1_MAX_RX_PORTS 1
+#define AMD_ACP70_SDW_MAX_TX_PORTS 3
+#define AMD_ACP70_SDW_MAX_RX_PORTS 3
#define AMD_ACP63_SDW0_MAX_DAI 6
#define AMD_ACP63_SDW1_MAX_DAI 2
+#define AMD_ACP70_SDW_MAX_DAI 6
#define AMD_SDW_SLAVE_0_ATTACHED 5
#define AMD_SDW_SSP_COUNTER_VAL 3
@@ -191,6 +194,14 @@
#define AMD_SDW_CLK_RESUME_DONE 3
#define AMD_SDW_WAKE_STAT_MASK BIT(16)
#define AMD_SDW_WAKE_INTR_MASK BIT(16)
+#define AMD_SDW0_HOST_WAKE_INTR_MASK BIT(22)
+#define AMD_SDW1_HOST_WAKE_INTR_MASK BIT(23)
+#define AMD_SDW_DEVICE_STATE 0x1430
+#define AMD_SDW0_DEVICE_STATE_MASK GENMASK(1, 0)
+#define AMD_SDW1_DEVICE_STATE_MASK GENMASK(3, 2)
+#define AMD_SDW_DEVICE_STATE_D0 0
+#define AMD_SDW_DEVICE_STATE_D3 3
+#define ACP_PME_EN 0x0001400
static u32 amd_sdw_freq_tbl[AMD_SDW_MAX_FREQ_NUM] = {
AMD_SDW_DEFAULT_CLK_FREQ,
@@ -244,6 +255,21 @@ static struct sdw_manager_dp_reg acp63_sdw1_dp_reg[AMD_ACP63_SDW1_MAX_DAI] = {
ACP_SW_AUDIO1_RX_OFFSET, ACP_SW_AUDIO1_RX_CHANNEL_ENABLE_DP0}
};
+static struct sdw_manager_dp_reg acp70_sdw_dp_reg[AMD_ACP70_SDW_MAX_DAI] = {
+ {ACP_SW_AUDIO0_TX_FRAME_FORMAT, ACP_SW_AUDIO0_TX_SAMPLEINTERVAL, ACP_SW_AUDIO0_TX_HCTRL_DP0,
+ ACP_SW_AUDIO0_TX_OFFSET_DP0, ACP_SW_AUDIO0_TX_CHANNEL_ENABLE_DP0},
+ {ACP_SW_AUDIO1_TX_FRAME_FORMAT, ACP_SW_AUDIO1_TX_SAMPLEINTERVAL, ACP_SW_AUDIO1_TX_HCTRL,
+ ACP_SW_AUDIO1_TX_OFFSET, ACP_SW_AUDIO1_TX_CHANNEL_ENABLE_DP0},
+ {ACP_SW_AUDIO2_TX_FRAME_FORMAT, ACP_SW_AUDIO2_TX_SAMPLEINTERVAL, ACP_SW_AUDIO2_TX_HCTRL,
+ ACP_SW_AUDIO2_TX_OFFSET, ACP_SW_AUDIO2_TX_CHANNEL_ENABLE_DP0},
+ {ACP_SW_AUDIO0_RX_FRAME_FORMAT, ACP_SW_AUDIO0_RX_SAMPLEINTERVAL, ACP_SW_AUDIO0_RX_HCTRL_DP0,
+ ACP_SW_AUDIO0_RX_OFFSET_DP0, ACP_SW_AUDIO0_RX_CHANNEL_ENABLE_DP0},
+ {ACP_SW_AUDIO1_RX_FRAME_FORMAT, ACP_SW_AUDIO1_RX_SAMPLEINTERVAL, ACP_SW_AUDIO1_RX_HCTRL,
+ ACP_SW_AUDIO1_RX_OFFSET, ACP_SW_AUDIO1_RX_CHANNEL_ENABLE_DP0},
+ {ACP_SW_AUDIO2_RX_FRAME_FORMAT, ACP_SW_AUDIO2_RX_SAMPLEINTERVAL, ACP_SW_AUDIO2_RX_HCTRL,
+ ACP_SW_AUDIO2_RX_OFFSET, ACP_SW_AUDIO2_RX_CHANNEL_ENABLE_DP0},
+};
+
static u32 sdw_manager_reg_mask_array[AMD_SDW_MAX_MANAGER_COUNT] = {
AMD_SDW0_EXT_INTR_MASK,
AMD_SDW1_EXT_INTR_MASK
diff --git a/drivers/soundwire/bus.c b/drivers/soundwire/bus.c
index 9b295fc9acd5..6f8a20014e76 100644
--- a/drivers/soundwire/bus.c
+++ b/drivers/soundwire/bus.c
@@ -8,6 +8,7 @@
#include <linux/soundwire/sdw_registers.h>
#include <linux/soundwire/sdw.h>
#include <linux/soundwire/sdw_type.h>
+#include <linux/string_choices.h>
#include "bus.h"
#include "irq.h"
#include "sysfs_local.h"
@@ -277,7 +278,7 @@ static int sdw_transfer_unlocked(struct sdw_bus *bus, struct sdw_msg *msg)
if (ret != 0 && ret != -ENODATA)
dev_err(bus->dev, "trf on Slave %d failed:%d %s addr %x count %d\n",
msg->dev_num, ret,
- (msg->flags & SDW_MSG_FLAG_WRITE) ? "write" : "read",
+ str_write_read(msg->flags & SDW_MSG_FLAG_WRITE),
msg->addr, msg->len);
return ret;
@@ -1263,7 +1264,7 @@ int sdw_configure_dpn_intr(struct sdw_slave *slave,
if (slave->bus->params.s_data_mode != SDW_PORT_DATA_MODE_NORMAL) {
dev_dbg(&slave->dev, "TEST FAIL interrupt %s\n",
- enable ? "on" : "off");
+ str_on_off(enable));
mask |= SDW_DPN_INT_TEST_FAIL;
}
@@ -2038,3 +2039,46 @@ void sdw_clear_slave_status(struct sdw_bus *bus, u32 request)
}
}
EXPORT_SYMBOL(sdw_clear_slave_status);
+
+int sdw_bpt_send_async(struct sdw_bus *bus, struct sdw_slave *slave, struct sdw_bpt_msg *msg)
+{
+ if (msg->len > SDW_BPT_MSG_MAX_BYTES) {
+ dev_err(bus->dev, "Invalid BPT message length %d\n", msg->len);
+ return -EINVAL;
+ }
+
+ /* check device is enumerated */
+ if (slave->dev_num == SDW_ENUM_DEV_NUM ||
+ slave->dev_num > SDW_MAX_DEVICES) {
+ dev_err(&slave->dev, "Invalid device number %d\n", slave->dev_num);
+ return -ENODEV;
+ }
+
+ /* make sure all callbacks are defined */
+ if (!bus->ops->bpt_send_async ||
+ !bus->ops->bpt_wait) {
+ dev_err(bus->dev, "BPT callbacks not defined\n");
+ return -EOPNOTSUPP;
+ }
+
+ return bus->ops->bpt_send_async(bus, slave, msg);
+}
+EXPORT_SYMBOL(sdw_bpt_send_async);
+
+int sdw_bpt_wait(struct sdw_bus *bus, struct sdw_slave *slave, struct sdw_bpt_msg *msg)
+{
+ return bus->ops->bpt_wait(bus, slave, msg);
+}
+EXPORT_SYMBOL(sdw_bpt_wait);
+
+int sdw_bpt_send_sync(struct sdw_bus *bus, struct sdw_slave *slave, struct sdw_bpt_msg *msg)
+{
+ int ret;
+
+ ret = sdw_bpt_send_async(bus, slave, msg);
+ if (ret < 0)
+ return ret;
+
+ return sdw_bpt_wait(bus, slave, msg);
+}
+EXPORT_SYMBOL(sdw_bpt_send_sync);
diff --git a/drivers/soundwire/bus.h b/drivers/soundwire/bus.h
index fc990171b3f7..02651fbb683a 100644
--- a/drivers/soundwire/bus.h
+++ b/drivers/soundwire/bus.h
@@ -72,6 +72,24 @@ struct sdw_msg {
bool page;
};
+/**
+ * struct sdw_btp_msg - Message structure
+ * @addr: Start Register address accessed in the Slave
+ * @len: number of bytes to transfer. More than 64Kb can be transferred
+ * but a practical limit of SDW_BPT_MSG_MAX_BYTES is enforced.
+ * @dev_num: Slave device number
+ * @flags: transfer flags, indicate if xfer is read or write
+ * @buf: message data buffer (filled by host for write, filled
+ * by Peripheral hardware for reads)
+ */
+struct sdw_bpt_msg {
+ u32 addr;
+ u32 len;
+ u8 dev_num;
+ u8 flags;
+ u8 *buf;
+};
+
#define SDW_DOUBLE_RATE_FACTOR 2
#define SDW_STRM_RATE_GROUPING 1
diff --git a/drivers/soundwire/cadence_master.c b/drivers/soundwire/cadence_master.c
index f367670ea991..21bb491d026b 100644
--- a/drivers/soundwire/cadence_master.c
+++ b/drivers/soundwire/cadence_master.c
@@ -7,6 +7,7 @@
*/
#include <linux/cleanup.h>
+#include <linux/crc8.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/debugfs.h>
@@ -184,6 +185,7 @@ MODULE_PARM_DESC(cdns_mcp_int_mask, "Cadence MCP IntMask");
#define CDNS_PORTCTRL_TEST_FAILED BIT(1)
#define CDNS_PORTCTRL_DIRN BIT(7)
#define CDNS_PORTCTRL_BANK_INVERT BIT(8)
+#define CDNS_PORTCTRL_BULK_ENABLE BIT(16)
#define CDNS_PORT_OFFSET 0x80
@@ -1341,7 +1343,7 @@ static u32 cdns_set_initial_frame_shape(int n_rows, int n_cols)
return val;
}
-static void cdns_init_clock_ctrl(struct sdw_cdns *cdns)
+static int cdns_init_clock_ctrl(struct sdw_cdns *cdns)
{
struct sdw_bus *bus = &cdns->bus;
struct sdw_master_prop *prop = &bus->prop;
@@ -1355,14 +1357,25 @@ static void cdns_init_clock_ctrl(struct sdw_cdns *cdns)
prop->default_row,
prop->default_col);
+ if (!prop->default_frame_rate || !prop->default_row) {
+ dev_err(cdns->dev, "Default frame_rate %d or row %d is invalid\n",
+ prop->default_frame_rate, prop->default_row);
+ return -EINVAL;
+ }
+
/* Set clock divider */
- divider = (prop->mclk_freq / prop->max_clk_freq) - 1;
+ divider = (prop->mclk_freq * SDW_DOUBLE_RATE_FACTOR /
+ bus->params.curr_dr_freq) - 1;
cdns_updatel(cdns, CDNS_MCP_CLK_CTRL0,
CDNS_MCP_CLK_MCLKD_MASK, divider);
cdns_updatel(cdns, CDNS_MCP_CLK_CTRL1,
CDNS_MCP_CLK_MCLKD_MASK, divider);
+ /* Set frame shape base on the actual bus frequency. */
+ prop->default_col = bus->params.curr_dr_freq /
+ prop->default_frame_rate / prop->default_row;
+
/*
* Frame shape changes after initialization have to be done
* with the bank switch mechanism
@@ -1375,6 +1388,8 @@ static void cdns_init_clock_ctrl(struct sdw_cdns *cdns)
ssp_interval = prop->default_frame_rate / SDW_CADENCE_GSYNC_HZ;
cdns_writel(cdns, CDNS_MCP_SSP_CTRL0, ssp_interval);
cdns_writel(cdns, CDNS_MCP_SSP_CTRL1, ssp_interval);
+
+ return 0;
}
/**
@@ -1408,9 +1423,12 @@ EXPORT_SYMBOL(sdw_cdns_soft_reset);
*/
int sdw_cdns_init(struct sdw_cdns *cdns)
{
+ int ret;
u32 val;
- cdns_init_clock_ctrl(cdns);
+ ret = cdns_init_clock_ctrl(cdns);
+ if (ret)
+ return ret;
sdw_cdns_check_self_clearing_bits(cdns, __func__, false, 0);
@@ -1901,13 +1919,20 @@ void sdw_cdns_config_stream(struct sdw_cdns *cdns,
if (cdns->bus.params.m_data_mode != SDW_PORT_DATA_MODE_NORMAL)
val |= CDNS_PORTCTRL_TEST_FAILED;
+ } else if (pdi->num == 0 || pdi->num == 1) {
+ val |= CDNS_PORTCTRL_BULK_ENABLE;
}
offset = CDNS_PORTCTRL + pdi->num * CDNS_PORT_OFFSET;
cdns_updatel(cdns, offset,
- CDNS_PORTCTRL_DIRN | CDNS_PORTCTRL_TEST_FAILED,
+ CDNS_PORTCTRL_DIRN | CDNS_PORTCTRL_TEST_FAILED |
+ CDNS_PORTCTRL_BULK_ENABLE,
val);
- val = pdi->num;
+ /* The DataPort0 needs to be mapped to both PDI0 and PDI1 ! */
+ if (pdi->num == 1)
+ val = 0;
+ else
+ val = pdi->num;
val |= CDNS_PDI_CONFIG_SOFT_RESET;
val |= FIELD_PREP(CDNS_PDI_CONFIG_CHANNEL, (1 << ch) - 1);
cdns_writel(cdns, CDNS_PDI_CONFIG(pdi->num), val);
@@ -1952,5 +1977,638 @@ struct sdw_cdns_pdi *sdw_cdns_alloc_pdi(struct sdw_cdns *cdns,
}
EXPORT_SYMBOL(sdw_cdns_alloc_pdi);
+/*
+ * the MIPI SoundWire CRC8 polynomial is X^8 + X^6 + X^3 + X^2 + 1, MSB first
+ * The value is (1)01001101 = 0x4D
+ *
+ * the table below was generated with
+ *
+ * u8 crc8_lookup_table[CRC8_TABLE_SIZE];
+ * crc8_populate_msb(crc8_lookup_table, SDW_CRC8_POLY);
+ *
+ */
+#define SDW_CRC8_SEED 0xFF
+#define SDW_CRC8_POLY 0x4D
+
+static const u8 sdw_crc8_lookup_msb[CRC8_TABLE_SIZE] = {
+ 0x00, 0x4d, 0x9a, 0xd7, 0x79, 0x34, 0xe3, 0xae, /* 0 - 7 */
+ 0xf2, 0xbf, 0x68, 0x25, 0x8b, 0xc6, 0x11, 0x5c, /* 8 -15 */
+ 0xa9, 0xe4, 0x33, 0x7e, 0xd0, 0x9d, 0x4a, 0x07, /* 16 - 23 */
+ 0x5b, 0x16, 0xc1, 0x8c, 0x22, 0x6f, 0xb8, 0xf5, /* 24 - 31 */
+ 0x1f, 0x52, 0x85, 0xc8, 0x66, 0x2b, 0xfc, 0xb1, /* 32 - 39 */
+ 0xed, 0xa0, 0x77, 0x3a, 0x94, 0xd9, 0x0e, 0x43, /* 40 - 47 */
+ 0xb6, 0xfb, 0x2c, 0x61, 0xcf, 0x82, 0x55, 0x18, /* 48 - 55 */
+ 0x44, 0x09, 0xde, 0x93, 0x3d, 0x70, 0xa7, 0xea, /* 56 - 63 */
+ 0x3e, 0x73, 0xa4, 0xe9, 0x47, 0x0a, 0xdd, 0x90, /* 64 - 71 */
+ 0xcc, 0x81, 0x56, 0x1b, 0xb5, 0xf8, 0x2f, 0x62, /* 72 - 79 */
+ 0x97, 0xda, 0x0d, 0x40, 0xee, 0xa3, 0x74, 0x39, /* 80 - 87 */
+ 0x65, 0x28, 0xff, 0xb2, 0x1c, 0x51, 0x86, 0xcb, /* 88 - 95 */
+ 0x21, 0x6c, 0xbb, 0xf6, 0x58, 0x15, 0xc2, 0x8f, /* 96 - 103 */
+ 0xd3, 0x9e, 0x49, 0x04, 0xaa, 0xe7, 0x30, 0x7d, /* 104 - 111 */
+ 0x88, 0xc5, 0x12, 0x5f, 0xf1, 0xbc, 0x6b, 0x26, /* 112 - 119 */
+ 0x7a, 0x37, 0xe0, 0xad, 0x03, 0x4e, 0x99, 0xd4, /* 120 - 127 */
+ 0x7c, 0x31, 0xe6, 0xab, 0x05, 0x48, 0x9f, 0xd2, /* 128 - 135 */
+ 0x8e, 0xc3, 0x14, 0x59, 0xf7, 0xba, 0x6d, 0x20, /* 136 - 143 */
+ 0xd5, 0x98, 0x4f, 0x02, 0xac, 0xe1, 0x36, 0x7b, /* 144 - 151 */
+ 0x27, 0x6a, 0xbd, 0xf0, 0x5e, 0x13, 0xc4, 0x89, /* 152 - 159 */
+ 0x63, 0x2e, 0xf9, 0xb4, 0x1a, 0x57, 0x80, 0xcd, /* 160 - 167 */
+ 0x91, 0xdc, 0x0b, 0x46, 0xe8, 0xa5, 0x72, 0x3f, /* 168 - 175 */
+ 0xca, 0x87, 0x50, 0x1d, 0xb3, 0xfe, 0x29, 0x64, /* 176 - 183 */
+ 0x38, 0x75, 0xa2, 0xef, 0x41, 0x0c, 0xdb, 0x96, /* 184 - 191 */
+ 0x42, 0x0f, 0xd8, 0x95, 0x3b, 0x76, 0xa1, 0xec, /* 192 - 199 */
+ 0xb0, 0xfd, 0x2a, 0x67, 0xc9, 0x84, 0x53, 0x1e, /* 200 - 207 */
+ 0xeb, 0xa6, 0x71, 0x3c, 0x92, 0xdf, 0x08, 0x45, /* 208 - 215 */
+ 0x19, 0x54, 0x83, 0xce, 0x60, 0x2d, 0xfa, 0xb7, /* 216 - 223 */
+ 0x5d, 0x10, 0xc7, 0x8a, 0x24, 0x69, 0xbe, 0xf3, /* 224 - 231 */
+ 0xaf, 0xe2, 0x35, 0x78, 0xd6, 0x9b, 0x4c, 0x01, /* 232 - 239 */
+ 0xf4, 0xb9, 0x6e, 0x23, 0x8d, 0xc0, 0x17, 0x5a, /* 240 - 247 */
+ 0x06, 0x4b, 0x9c, 0xd1, 0x7f, 0x32, 0xe5, 0xa8 /* 248 - 255 */
+};
+
+/* BPT/BRA helpers */
+
+#define SDW_CDNS_BRA_HDR 6 /* defined by MIPI */
+#define SDW_CDNS_BRA_HDR_CRC 1 /* defined by MIPI */
+#define SDW_CDNS_BRA_HDR_CRC_PAD 1 /* Cadence only */
+#define SDW_CDNS_BRA_HDR_RESP 1 /* defined by MIPI */
+#define SDW_CDNS_BRA_HDR_RESP_PAD 1 /* Cadence only */
+
+#define SDW_CDNS_BRA_DATA_PAD 1 /* Cadence only */
+#define SDW_CDNS_BRA_DATA_CRC 1 /* defined by MIPI */
+#define SDW_CDNS_BRA_DATA_CRC_PAD 1 /* Cadence only */
+
+#define SDW_CDNS_BRA_FOOTER_RESP 1 /* defined by MIPI */
+#define SDW_CDNS_BRA_FOOTER_RESP_PAD 1 /* Cadence only */
+
+#define SDW_CDNS_WRITE_PDI1_BUFFER_SIZE \
+ ((SDW_CDNS_BRA_HDR_RESP + SDW_CDNS_BRA_HDR_RESP_PAD + \
+ SDW_CDNS_BRA_FOOTER_RESP + SDW_CDNS_BRA_FOOTER_RESP_PAD) * 2)
+
+#define SDW_CDNS_READ_PDI0_BUFFER_SIZE \
+ ((SDW_CDNS_BRA_HDR + SDW_CDNS_BRA_HDR_CRC + SDW_CDNS_BRA_HDR_CRC_PAD) * 2)
+
+static unsigned int sdw_cdns_bra_actual_data_size(unsigned int allocated_bytes_per_frame)
+{
+ unsigned int total;
+
+ if (allocated_bytes_per_frame < (SDW_CDNS_BRA_HDR + SDW_CDNS_BRA_HDR_CRC +
+ SDW_CDNS_BRA_HDR_RESP + SDW_CDNS_BRA_DATA_CRC +
+ SDW_CDNS_BRA_FOOTER_RESP))
+ return 0;
+
+ total = allocated_bytes_per_frame - SDW_CDNS_BRA_HDR - SDW_CDNS_BRA_HDR_CRC -
+ SDW_CDNS_BRA_HDR_RESP - SDW_CDNS_BRA_DATA_CRC - SDW_CDNS_BRA_FOOTER_RESP;
+
+ return total;
+}
+
+static unsigned int sdw_cdns_write_pdi0_buffer_size(unsigned int actual_data_size)
+{
+ unsigned int total;
+
+ total = SDW_CDNS_BRA_HDR + SDW_CDNS_BRA_HDR_CRC + SDW_CDNS_BRA_HDR_CRC_PAD;
+
+ total += actual_data_size;
+ if (actual_data_size & 1)
+ total += SDW_CDNS_BRA_DATA_PAD;
+
+ total += SDW_CDNS_BRA_DATA_CRC + SDW_CDNS_BRA_DATA_CRC_PAD;
+
+ return total * 2;
+}
+
+static unsigned int sdw_cdns_read_pdi1_buffer_size(unsigned int actual_data_size)
+{
+ unsigned int total;
+
+ total = SDW_CDNS_BRA_HDR_RESP + SDW_CDNS_BRA_HDR_RESP_PAD;
+
+ total += actual_data_size;
+ if (actual_data_size & 1)
+ total += SDW_CDNS_BRA_DATA_PAD;
+
+ total += SDW_CDNS_BRA_HDR_CRC + SDW_CDNS_BRA_HDR_CRC_PAD;
+
+ total += SDW_CDNS_BRA_FOOTER_RESP + SDW_CDNS_BRA_FOOTER_RESP_PAD;
+
+ return total * 2;
+}
+
+int sdw_cdns_bpt_find_buffer_sizes(int command, /* 0: write, 1: read */
+ int row, int col, unsigned int data_bytes,
+ unsigned int requested_bytes_per_frame,
+ unsigned int *data_per_frame, unsigned int *pdi0_buffer_size,
+ unsigned int *pdi1_buffer_size, unsigned int *num_frames)
+{
+ unsigned int bpt_bits = row * (col - 1);
+ unsigned int bpt_bytes = bpt_bits >> 3;
+ unsigned int actual_bpt_bytes;
+ unsigned int pdi0_tx_size;
+ unsigned int pdi1_rx_size;
+ unsigned int remainder;
+
+ if (!data_bytes)
+ return -EINVAL;
+
+ actual_bpt_bytes = sdw_cdns_bra_actual_data_size(bpt_bytes);
+ if (!actual_bpt_bytes)
+ return -EINVAL;
+
+ if (data_bytes < actual_bpt_bytes)
+ actual_bpt_bytes = data_bytes;
+
+ /*
+ * the caller may want to set the number of bytes per frame,
+ * allow when possible
+ */
+ if (requested_bytes_per_frame < actual_bpt_bytes)
+ actual_bpt_bytes = requested_bytes_per_frame;
+
+ *data_per_frame = actual_bpt_bytes;
+
+ if (command == 0) {
+ /*
+ * for writes we need to send all the data_bytes per frame,
+ * even for the last frame which may only transport fewer bytes
+ */
+
+ *num_frames = DIV_ROUND_UP(data_bytes, actual_bpt_bytes);
+
+ pdi0_tx_size = sdw_cdns_write_pdi0_buffer_size(actual_bpt_bytes);
+ pdi1_rx_size = SDW_CDNS_WRITE_PDI1_BUFFER_SIZE;
+
+ *pdi0_buffer_size = pdi0_tx_size * *num_frames;
+ *pdi1_buffer_size = pdi1_rx_size * *num_frames;
+ } else {
+ /*
+ * for reads we need to retrieve only what is requested in the BPT
+ * header, so the last frame needs to be special-cased
+ */
+ *num_frames = data_bytes / actual_bpt_bytes;
+
+ pdi0_tx_size = SDW_CDNS_READ_PDI0_BUFFER_SIZE;
+ pdi1_rx_size = sdw_cdns_read_pdi1_buffer_size(actual_bpt_bytes);
+
+ *pdi0_buffer_size = pdi0_tx_size * *num_frames;
+ *pdi1_buffer_size = pdi1_rx_size * *num_frames;
+
+ remainder = data_bytes % actual_bpt_bytes;
+ if (remainder) {
+ pdi0_tx_size = SDW_CDNS_READ_PDI0_BUFFER_SIZE;
+ pdi1_rx_size = sdw_cdns_read_pdi1_buffer_size(remainder);
+
+ *num_frames = *num_frames + 1;
+ *pdi0_buffer_size += pdi0_tx_size;
+ *pdi1_buffer_size += pdi1_rx_size;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(sdw_cdns_bpt_find_buffer_sizes);
+
+static int sdw_cdns_copy_write_data(u8 *data, int data_size, u8 *dma_buffer, int dma_buffer_size)
+{
+ /*
+ * the implementation copies the data one byte at a time. Experiments with
+ * two bytes at a time did not seem to improve the performance
+ */
+ int i, j;
+
+ /* size check to prevent out of bounds access */
+ i = data_size - 1;
+ j = (2 * i) - (i & 1);
+ if (data_size & 1)
+ j++;
+ j += 2;
+ if (j >= dma_buffer_size)
+ return -EINVAL;
+
+ /* copy data */
+ for (i = 0; i < data_size; i++) {
+ j = (2 * i) - (i & 1);
+ dma_buffer[j] = data[i];
+ }
+ /* add required pad */
+ if (data_size & 1)
+ dma_buffer[++j] = 0;
+ /* skip last two bytes */
+ j += 2;
+
+ /* offset and data are off-by-one */
+ return j + 1;
+}
+
+static int sdw_cdns_prepare_write_pd0_buffer(u8 *header, unsigned int header_size,
+ u8 *data, unsigned int data_size,
+ u8 *dma_buffer, unsigned int dma_buffer_size,
+ unsigned int *dma_data_written,
+ unsigned int frame_counter)
+{
+ int data_written;
+ u8 *last_byte;
+ u8 crc;
+
+ *dma_data_written = 0;
+
+ data_written = sdw_cdns_copy_write_data(header, header_size, dma_buffer, dma_buffer_size);
+ if (data_written < 0)
+ return data_written;
+ dma_buffer[3] = BIT(7);
+ dma_buffer[3] |= frame_counter & GENMASK(3, 0);
+
+ dma_buffer += data_written;
+ dma_buffer_size -= data_written;
+ *dma_data_written += data_written;
+
+ crc = SDW_CRC8_SEED;
+ crc = crc8(sdw_crc8_lookup_msb, header, header_size, crc);
+
+ data_written = sdw_cdns_copy_write_data(&crc, 1, dma_buffer, dma_buffer_size);
+ if (data_written < 0)
+ return data_written;
+ dma_buffer += data_written;
+ dma_buffer_size -= data_written;
+ *dma_data_written += data_written;
+
+ data_written = sdw_cdns_copy_write_data(data, data_size, dma_buffer, dma_buffer_size);
+ if (data_written < 0)
+ return data_written;
+ dma_buffer += data_written;
+ dma_buffer_size -= data_written;
+ *dma_data_written += data_written;
+
+ crc = SDW_CRC8_SEED;
+ crc = crc8(sdw_crc8_lookup_msb, data, data_size, crc);
+ data_written = sdw_cdns_copy_write_data(&crc, 1, dma_buffer, dma_buffer_size);
+ if (data_written < 0)
+ return data_written;
+ dma_buffer += data_written;
+ dma_buffer_size -= data_written;
+ *dma_data_written += data_written;
+
+ /* tag last byte */
+ last_byte = dma_buffer - 1;
+ last_byte[0] = BIT(6);
+
+ return 0;
+}
+
+static int sdw_cdns_prepare_read_pd0_buffer(u8 *header, unsigned int header_size,
+ u8 *dma_buffer, unsigned int dma_buffer_size,
+ unsigned int *dma_data_written,
+ unsigned int frame_counter)
+{
+ int data_written;
+ u8 *last_byte;
+ u8 crc;
+
+ *dma_data_written = 0;
+
+ data_written = sdw_cdns_copy_write_data(header, header_size, dma_buffer, dma_buffer_size);
+ if (data_written < 0)
+ return data_written;
+ dma_buffer[3] = BIT(7);
+ dma_buffer[3] |= frame_counter & GENMASK(3, 0);
+
+ dma_buffer += data_written;
+ dma_buffer_size -= data_written;
+ *dma_data_written += data_written;
+
+ crc = SDW_CRC8_SEED;
+ crc = crc8(sdw_crc8_lookup_msb, header, header_size, crc);
+
+ data_written = sdw_cdns_copy_write_data(&crc, 1, dma_buffer, dma_buffer_size);
+ if (data_written < 0)
+ return data_written;
+ dma_buffer += data_written;
+ dma_buffer_size -= data_written;
+ *dma_data_written += data_written;
+
+ /* tag last byte */
+ last_byte = dma_buffer - 1;
+ last_byte[0] = BIT(6);
+
+ return 0;
+}
+
+#define CDNS_BPT_ROLLING_COUNTER_START 1
+
+int sdw_cdns_prepare_write_dma_buffer(u8 dev_num, u32 start_register, u8 *data, int data_size,
+ int data_per_frame, u8 *dma_buffer, int dma_buffer_size,
+ int *dma_buffer_total_bytes)
+{
+ int total_dma_data_written = 0;
+ u8 *p_dma_buffer = dma_buffer;
+ u8 header[SDW_CDNS_BRA_HDR];
+ int dma_data_written;
+ u8 *p_data = data;
+ u8 counter;
+ int ret;
+
+ counter = CDNS_BPT_ROLLING_COUNTER_START;
+
+ header[0] = BIT(1); /* write command: BIT(1) set */
+ header[0] |= GENMASK(7, 6); /* header is active */
+ header[0] |= (dev_num << 2);
+
+ while (data_size >= data_per_frame) {
+ header[1] = data_per_frame;
+ header[2] = start_register >> 24 & 0xFF;
+ header[3] = start_register >> 16 & 0xFF;
+ header[4] = start_register >> 8 & 0xFF;
+ header[5] = start_register >> 0 & 0xFF;
+
+ ret = sdw_cdns_prepare_write_pd0_buffer(header, SDW_CDNS_BRA_HDR,
+ p_data, data_per_frame,
+ p_dma_buffer, dma_buffer_size,
+ &dma_data_written, counter);
+ if (ret < 0)
+ return ret;
+
+ counter++;
+
+ p_data += data_per_frame;
+ data_size -= data_per_frame;
+
+ p_dma_buffer += dma_data_written;
+ dma_buffer_size -= dma_data_written;
+ total_dma_data_written += dma_data_written;
+
+ start_register += data_per_frame;
+ }
+
+ if (data_size) {
+ header[1] = data_size;
+ header[2] = start_register >> 24 & 0xFF;
+ header[3] = start_register >> 16 & 0xFF;
+ header[4] = start_register >> 8 & 0xFF;
+ header[5] = start_register >> 0 & 0xFF;
+
+ ret = sdw_cdns_prepare_write_pd0_buffer(header, SDW_CDNS_BRA_HDR,
+ p_data, data_size,
+ p_dma_buffer, dma_buffer_size,
+ &dma_data_written, counter);
+ if (ret < 0)
+ return ret;
+
+ total_dma_data_written += dma_data_written;
+ }
+
+ *dma_buffer_total_bytes = total_dma_data_written;
+
+ return 0;
+}
+EXPORT_SYMBOL(sdw_cdns_prepare_write_dma_buffer);
+
+int sdw_cdns_prepare_read_dma_buffer(u8 dev_num, u32 start_register, int data_size,
+ int data_per_frame, u8 *dma_buffer, int dma_buffer_size,
+ int *dma_buffer_total_bytes)
+{
+ int total_dma_data_written = 0;
+ u8 *p_dma_buffer = dma_buffer;
+ u8 header[SDW_CDNS_BRA_HDR];
+ int dma_data_written;
+ u8 counter;
+ int ret;
+
+ counter = CDNS_BPT_ROLLING_COUNTER_START;
+
+ header[0] = 0; /* read command: BIT(1) cleared */
+ header[0] |= GENMASK(7, 6); /* header is active */
+ header[0] |= (dev_num << 2);
+
+ while (data_size >= data_per_frame) {
+ header[1] = data_per_frame;
+ header[2] = start_register >> 24 & 0xFF;
+ header[3] = start_register >> 16 & 0xFF;
+ header[4] = start_register >> 8 & 0xFF;
+ header[5] = start_register >> 0 & 0xFF;
+
+ ret = sdw_cdns_prepare_read_pd0_buffer(header, SDW_CDNS_BRA_HDR, p_dma_buffer,
+ dma_buffer_size, &dma_data_written,
+ counter);
+ if (ret < 0)
+ return ret;
+
+ counter++;
+
+ data_size -= data_per_frame;
+
+ p_dma_buffer += dma_data_written;
+ dma_buffer_size -= dma_data_written;
+ total_dma_data_written += dma_data_written;
+
+ start_register += data_per_frame;
+ }
+
+ if (data_size) {
+ header[1] = data_size;
+ header[2] = start_register >> 24 & 0xFF;
+ header[3] = start_register >> 16 & 0xFF;
+ header[4] = start_register >> 8 & 0xFF;
+ header[5] = start_register >> 0 & 0xFF;
+
+ ret = sdw_cdns_prepare_read_pd0_buffer(header, SDW_CDNS_BRA_HDR, p_dma_buffer,
+ dma_buffer_size, &dma_data_written,
+ counter);
+ if (ret < 0)
+ return ret;
+
+ total_dma_data_written += dma_data_written;
+ }
+
+ *dma_buffer_total_bytes = total_dma_data_written;
+
+ return 0;
+}
+EXPORT_SYMBOL(sdw_cdns_prepare_read_dma_buffer);
+
+static int check_counter(u32 val, u8 counter)
+{
+ u8 frame;
+
+ frame = (val >> 24) & GENMASK(3, 0);
+ if (counter != frame)
+ return -EIO;
+ return 0;
+}
+
+static int check_response(u32 val)
+{
+ u8 response;
+
+ response = (val >> 3) & GENMASK(1, 0);
+ if (response == 0) /* Ignored */
+ return -ENODATA;
+ if (response != 1) /* ACK */
+ return -EIO;
+
+ return 0;
+}
+
+static int check_frame_start(u32 header, u8 counter)
+{
+ int ret;
+
+ /* check frame_start marker */
+ if (!(header & BIT(31)))
+ return -EIO;
+
+ ret = check_counter(header, counter);
+ if (ret < 0)
+ return ret;
+
+ return check_response(header);
+}
+
+static int check_frame_end(u32 footer)
+{
+ /* check frame_end marker */
+ if (!(footer & BIT(30)))
+ return -EIO;
+
+ return check_response(footer);
+}
+
+int sdw_cdns_check_write_response(struct device *dev, u8 *dma_buffer,
+ int dma_buffer_size, int num_frames)
+{
+ u32 *p_data;
+ int counter;
+ u32 header;
+ u32 footer;
+ int ret;
+ int i;
+
+ /* paranoia check on buffer size */
+ if (dma_buffer_size != num_frames * 8)
+ return -EINVAL;
+
+ counter = CDNS_BPT_ROLLING_COUNTER_START;
+ p_data = (u32 *)dma_buffer;
+
+ for (i = 0; i < num_frames; i++) {
+ header = *p_data++;
+ footer = *p_data++;
+
+ ret = check_frame_start(header, counter);
+ if (ret < 0) {
+ dev_err(dev, "%s: bad frame %d/%d start header %x\n",
+ __func__, i, num_frames, header);
+ return ret;
+ }
+
+ ret = check_frame_end(footer);
+ if (ret < 0) {
+ dev_err(dev, "%s: bad frame %d/%d end footer %x\n",
+ __func__, i, num_frames, footer);
+ return ret;
+ }
+
+ counter++;
+ counter &= GENMASK(3, 0);
+ }
+ return 0;
+}
+EXPORT_SYMBOL(sdw_cdns_check_write_response);
+
+static u8 extract_read_data(u32 *data, int num_bytes, u8 *buffer)
+{
+ u32 val;
+ int i;
+ u8 crc;
+ u8 b0;
+ u8 b1;
+
+ crc = SDW_CRC8_SEED;
+
+ /* process two bytes at a time */
+ for (i = 0; i < num_bytes / 2; i++) {
+ val = *data++;
+
+ b0 = val & 0xff;
+ b1 = (val >> 8) & 0xff;
+
+ *buffer++ = b0;
+ crc = crc8(sdw_crc8_lookup_msb, &b0, 1, crc);
+
+ *buffer++ = b1;
+ crc = crc8(sdw_crc8_lookup_msb, &b1, 1, crc);
+ }
+ /* handle remaining byte if it exists */
+ if (num_bytes & 1) {
+ val = *data;
+
+ b0 = val & 0xff;
+
+ *buffer++ = b0;
+ crc = crc8(sdw_crc8_lookup_msb, &b0, 1, crc);
+ }
+ return crc;
+}
+
+int sdw_cdns_check_read_response(struct device *dev, u8 *dma_buffer, int dma_buffer_size,
+ u8 *buffer, int buffer_size, int num_frames, int data_per_frame)
+{
+ int total_num_bytes = 0;
+ u32 *p_data;
+ u8 *p_buf;
+ int counter;
+ u32 header;
+ u32 footer;
+ u8 expected_crc;
+ u8 crc;
+ int len;
+ int ret;
+ int i;
+
+ counter = CDNS_BPT_ROLLING_COUNTER_START;
+ p_data = (u32 *)dma_buffer;
+ p_buf = buffer;
+
+ for (i = 0; i < num_frames; i++) {
+ header = *p_data++;
+
+ ret = check_frame_start(header, counter);
+ if (ret < 0) {
+ dev_err(dev, "%s: bad frame %d/%d start header %x\n",
+ __func__, i, num_frames, header);
+ return ret;
+ }
+
+ len = data_per_frame;
+ if (total_num_bytes + data_per_frame > buffer_size)
+ len = buffer_size - total_num_bytes;
+
+ crc = extract_read_data(p_data, len, p_buf);
+
+ p_data += (len + 1) / 2;
+ expected_crc = *p_data++ & 0xff;
+
+ if (crc != expected_crc) {
+ dev_err(dev, "%s: bad frame %d/%d crc %#x expected %#x\n",
+ __func__, i, num_frames, crc, expected_crc);
+ return -EIO;
+ }
+
+ p_buf += len;
+ total_num_bytes += len;
+
+ footer = *p_data++;
+ ret = check_frame_end(footer);
+ if (ret < 0) {
+ dev_err(dev, "%s: bad frame %d/%d end footer %x\n",
+ __func__, i, num_frames, footer);
+ return ret;
+ }
+
+ counter++;
+ counter &= GENMASK(3, 0);
+ }
+ return 0;
+}
+EXPORT_SYMBOL(sdw_cdns_check_read_response);
+
MODULE_LICENSE("Dual BSD/GPL");
MODULE_DESCRIPTION("Cadence Soundwire Library");
diff --git a/drivers/soundwire/cadence_master.h b/drivers/soundwire/cadence_master.h
index c34fb050fe4f..9373426c7f63 100644
--- a/drivers/soundwire/cadence_master.h
+++ b/drivers/soundwire/cadence_master.h
@@ -208,4 +208,24 @@ void sdw_cdns_check_self_clearing_bits(struct sdw_cdns *cdns, const char *string
void sdw_cdns_config_update(struct sdw_cdns *cdns);
int sdw_cdns_config_update_set_wait(struct sdw_cdns *cdns);
+/* SoundWire BPT/BRA helpers to format data */
+int sdw_cdns_bpt_find_buffer_sizes(int command, /* 0: write, 1: read */
+ int row, int col, unsigned int data_bytes,
+ unsigned int requested_bytes_per_frame,
+ unsigned int *data_per_frame, unsigned int *pdi0_buffer_size,
+ unsigned int *pdi1_buffer_size, unsigned int *num_frames);
+
+int sdw_cdns_prepare_write_dma_buffer(u8 dev_num, u32 start_register, u8 *data, int data_size,
+ int data_per_frame, u8 *dma_buffer, int dma_buffer_size,
+ int *dma_buffer_total_bytes);
+
+int sdw_cdns_prepare_read_dma_buffer(u8 dev_num, u32 start_register, int data_size,
+ int data_per_frame, u8 *dma_buffer, int dma_buffer_size,
+ int *dma_buffer_total_bytes);
+
+int sdw_cdns_check_write_response(struct device *dev, u8 *dma_buffer,
+ int dma_buffer_size, int num_frames);
+
+int sdw_cdns_check_read_response(struct device *dev, u8 *dma_buffer, int dma_buffer_size,
+ u8 *buffer, int buffer_size, int num_frames, int data_per_frame);
#endif /* __SDW_CADENCE_H */
diff --git a/drivers/soundwire/debugfs.c b/drivers/soundwire/debugfs.c
index c30f571934ee..3099ea074f10 100644
--- a/drivers/soundwire/debugfs.c
+++ b/drivers/soundwire/debugfs.c
@@ -10,6 +10,7 @@
#include <linux/slab.h>
#include <linux/soundwire/sdw.h>
#include <linux/soundwire/sdw_registers.h>
+#include <linux/string_choices.h>
#include "bus.h"
static struct dentry *sdw_debugfs_root;
@@ -135,9 +136,10 @@ static int sdw_slave_reg_show(struct seq_file *s_file, void *data)
}
DEFINE_SHOW_ATTRIBUTE(sdw_slave_reg);
-#define MAX_CMD_BYTES 256
+#define MAX_CMD_BYTES (1024 * 1024)
static int cmd;
+static int cmd_type;
static u32 start_addr;
static size_t num_bytes;
static u8 read_buffer[MAX_CMD_BYTES];
@@ -153,7 +155,7 @@ static int set_command(void *data, u64 value)
/* Userspace changed the hardware state behind the kernel's back */
add_taint(TAINT_USER, LOCKDEP_STILL_OK);
- dev_dbg(&slave->dev, "command: %s\n", value ? "read" : "write");
+ dev_dbg(&slave->dev, "command: %s\n", str_read_write(value));
cmd = value;
return 0;
@@ -161,6 +163,25 @@ static int set_command(void *data, u64 value)
DEFINE_DEBUGFS_ATTRIBUTE(set_command_fops, NULL,
set_command, "%llu\n");
+static int set_command_type(void *data, u64 value)
+{
+ struct sdw_slave *slave = data;
+
+ if (value > 1)
+ return -EINVAL;
+
+ /* Userspace changed the hardware state behind the kernel's back */
+ add_taint(TAINT_USER, LOCKDEP_STILL_OK);
+
+ dev_dbg(&slave->dev, "command type: %s\n", value ? "BRA" : "Column0");
+
+ cmd_type = (int)value;
+
+ return 0;
+}
+DEFINE_DEBUGFS_ATTRIBUTE(set_command_type_fops, NULL,
+ set_command_type, "%llu\n");
+
static int set_start_address(void *data, u64 value)
{
struct sdw_slave *slave = data;
@@ -196,9 +217,28 @@ static int set_num_bytes(void *data, u64 value)
DEFINE_DEBUGFS_ATTRIBUTE(set_num_bytes_fops, NULL,
set_num_bytes, "%llu\n");
+static int do_bpt_sequence(struct sdw_slave *slave, bool write, u8 *buffer)
+{
+ struct sdw_bpt_msg msg = {0};
+
+ msg.addr = start_addr;
+ msg.len = num_bytes;
+ msg.dev_num = slave->dev_num;
+ if (write)
+ msg.flags = SDW_MSG_FLAG_WRITE;
+ else
+ msg.flags = SDW_MSG_FLAG_READ;
+ msg.buf = buffer;
+
+ return sdw_bpt_send_sync(slave->bus, slave, &msg);
+}
+
static int cmd_go(void *data, u64 value)
{
+ const struct firmware *fw = NULL;
struct sdw_slave *slave = data;
+ ktime_t start_t;
+ ktime_t finish_t;
int ret;
if (value != 1)
@@ -215,40 +255,52 @@ static int cmd_go(void *data, u64 value)
return ret;
}
- /* Userspace changed the hardware state behind the kernel's back */
- add_taint(TAINT_USER, LOCKDEP_STILL_OK);
-
- dev_dbg(&slave->dev, "starting command\n");
-
if (cmd == 0) {
- const struct firmware *fw;
-
ret = request_firmware(&fw, firmware_file, &slave->dev);
if (ret < 0) {
dev_err(&slave->dev, "firmware %s not found\n", firmware_file);
goto out;
}
-
- if (fw->size != num_bytes) {
+ if (fw->size < num_bytes) {
dev_err(&slave->dev,
- "firmware %s: unexpected size %zd, desired %zd\n",
+ "firmware %s: firmware size %zd, desired %zd\n",
firmware_file, fw->size, num_bytes);
- release_firmware(fw);
goto out;
}
+ }
- ret = sdw_nwrite_no_pm(slave, start_addr, num_bytes, fw->data);
- release_firmware(fw);
+ /* Userspace changed the hardware state behind the kernel's back */
+ add_taint(TAINT_USER, LOCKDEP_STILL_OK);
+
+ dev_dbg(&slave->dev, "starting command\n");
+ start_t = ktime_get();
+
+ if (cmd == 0) {
+ if (cmd_type)
+ ret = do_bpt_sequence(slave, true, (u8 *)fw->data);
+ else
+ ret = sdw_nwrite_no_pm(slave, start_addr, num_bytes, fw->data);
} else {
- ret = sdw_nread_no_pm(slave, start_addr, num_bytes, read_buffer);
+ memset(read_buffer, 0, sizeof(read_buffer));
+
+ if (cmd_type)
+ ret = do_bpt_sequence(slave, false, read_buffer);
+ else
+ ret = sdw_nread_no_pm(slave, start_addr, num_bytes, read_buffer);
}
- dev_dbg(&slave->dev, "command completed %d\n", ret);
+ finish_t = ktime_get();
out:
+ if (fw)
+ release_firmware(fw);
+
pm_runtime_mark_last_busy(&slave->dev);
pm_runtime_put(&slave->dev);
+ dev_dbg(&slave->dev, "command completed, num_byte %zu status %d, time %lld ms\n",
+ num_bytes, ret, div_u64(finish_t - start_t, NSEC_PER_MSEC));
+
return ret;
}
DEFINE_DEBUGFS_ATTRIBUTE(cmd_go_fops, NULL,
@@ -290,6 +342,7 @@ void sdw_slave_debugfs_init(struct sdw_slave *slave)
/* interface to send arbitrary commands */
debugfs_create_file("command", 0200, d, slave, &set_command_fops);
+ debugfs_create_file("command_type", 0200, d, slave, &set_command_type_fops);
debugfs_create_file("start_address", 0200, d, slave, &set_start_address_fops);
debugfs_create_file("num_bytes", 0200, d, slave, &set_num_bytes_fops);
debugfs_create_file("go", 0200, d, slave, &cmd_go_fops);
diff --git a/drivers/soundwire/generic_bandwidth_allocation.c b/drivers/soundwire/generic_bandwidth_allocation.c
index 59965f43c2fb..1cfaccf43eac 100644
--- a/drivers/soundwire/generic_bandwidth_allocation.c
+++ b/drivers/soundwire/generic_bandwidth_allocation.c
@@ -86,6 +86,49 @@ void sdw_compute_slave_ports(struct sdw_master_runtime *m_rt,
}
EXPORT_SYMBOL(sdw_compute_slave_ports);
+static void sdw_compute_dp0_slave_ports(struct sdw_master_runtime *m_rt)
+{
+ struct sdw_bus *bus = m_rt->bus;
+ struct sdw_slave_runtime *s_rt;
+ struct sdw_port_runtime *p_rt;
+
+ list_for_each_entry(s_rt, &m_rt->slave_rt_list, m_rt_node) {
+ list_for_each_entry(p_rt, &s_rt->port_list, port_node) {
+ sdw_fill_xport_params(&p_rt->transport_params, p_rt->num, false,
+ SDW_BLK_GRP_CNT_1, bus->params.col, 0, 0, 1,
+ bus->params.col - 1, SDW_BLK_PKG_PER_PORT, 0x0);
+
+ sdw_fill_port_params(&p_rt->port_params, p_rt->num, bus->params.col - 1,
+ SDW_PORT_FLOW_MODE_ISOCH, SDW_PORT_DATA_MODE_NORMAL);
+ }
+ }
+}
+
+static void sdw_compute_dp0_master_ports(struct sdw_master_runtime *m_rt)
+{
+ struct sdw_port_runtime *p_rt;
+ struct sdw_bus *bus = m_rt->bus;
+
+ list_for_each_entry(p_rt, &m_rt->port_list, port_node) {
+ sdw_fill_xport_params(&p_rt->transport_params, p_rt->num, false,
+ SDW_BLK_GRP_CNT_1, bus->params.col, 0, 0, 1,
+ bus->params.col - 1, SDW_BLK_PKG_PER_PORT, 0x0);
+
+ sdw_fill_port_params(&p_rt->port_params, p_rt->num, bus->params.col - 1,
+ SDW_PORT_FLOW_MODE_ISOCH, SDW_PORT_DATA_MODE_NORMAL);
+ }
+}
+
+static void sdw_compute_dp0_port_params(struct sdw_bus *bus)
+{
+ struct sdw_master_runtime *m_rt;
+
+ list_for_each_entry(m_rt, &bus->m_rt_list, bus_node) {
+ sdw_compute_dp0_master_ports(m_rt);
+ sdw_compute_dp0_slave_ports(m_rt);
+ }
+}
+
static void sdw_compute_master_ports(struct sdw_master_runtime *m_rt,
struct sdw_group_params *params,
int *port_bo, int hstop)
@@ -194,10 +237,11 @@ static int sdw_compute_group_params(struct sdw_bus *bus,
continue;
} else {
/*
- * Include runtimes with running (ENABLED state) and paused (DISABLED state)
- * streams
+ * Include runtimes with running (ENABLED/PREPARED state) and
+ * paused (DISABLED state) streams
*/
if (m_rt->stream->state != SDW_STREAM_ENABLED &&
+ m_rt->stream->state != SDW_STREAM_PREPARED &&
m_rt->stream->state != SDW_STREAM_DISABLED)
continue;
}
@@ -618,6 +662,11 @@ int sdw_compute_params(struct sdw_bus *bus, struct sdw_stream_runtime *stream)
if (ret < 0)
return ret;
+ if (stream->type == SDW_STREAM_BPT) {
+ sdw_compute_dp0_port_params(bus);
+ return 0;
+ }
+
/* Compute transport and port params */
ret = sdw_compute_port_params(bus, stream);
if (ret < 0) {
diff --git a/drivers/soundwire/intel.h b/drivers/soundwire/intel.h
index dddd29381441..d44e70d3c4e3 100644
--- a/drivers/soundwire/intel.h
+++ b/drivers/soundwire/intel.h
@@ -48,11 +48,34 @@ struct sdw_intel_link_res {
struct hdac_bus *hbus;
};
+/**
+ * struct sdw_intel_bpt - SoundWire Intel BPT context
+ * @bpt_tx_stream: BPT TX stream
+ * @dmab_tx_bdl: BPT TX buffer descriptor list
+ * @bpt_rx_stream: BPT RX stream
+ * @dmab_rx_bdl: BPT RX buffer descriptor list
+ * @pdi0_buffer_size: PDI0 buffer size
+ * @pdi1_buffer_size: PDI1 buffer size
+ * @num_frames: number of frames
+ * @data_per_frame: data per frame
+ */
+struct sdw_intel_bpt {
+ struct hdac_ext_stream *bpt_tx_stream;
+ struct snd_dma_buffer dmab_tx_bdl;
+ struct hdac_ext_stream *bpt_rx_stream;
+ struct snd_dma_buffer dmab_rx_bdl;
+ unsigned int pdi0_buffer_size;
+ unsigned int pdi1_buffer_size;
+ unsigned int num_frames;
+ unsigned int data_per_frame;
+};
+
struct sdw_intel {
struct sdw_cdns cdns;
int instance;
struct sdw_intel_link_res *link_res;
bool startup_done;
+ struct sdw_intel_bpt bpt_ctx;
#ifdef CONFIG_DEBUG_FS
struct dentry *debugfs;
#endif
diff --git a/drivers/soundwire/intel_ace2x.c b/drivers/soundwire/intel_ace2x.c
index e305c6258ca9..5b31e1f69591 100644
--- a/drivers/soundwire/intel_ace2x.c
+++ b/drivers/soundwire/intel_ace2x.c
@@ -13,12 +13,320 @@
#include <linux/soundwire/sdw_intel.h>
#include <sound/hdaudio.h>
#include <sound/hda-mlink.h>
+#include <sound/hda-sdw-bpt.h>
#include <sound/hda_register.h>
#include <sound/pcm_params.h>
#include "cadence_master.h"
#include "bus.h"
#include "intel.h"
+static int sdw_slave_bpt_stream_add(struct sdw_slave *slave, struct sdw_stream_runtime *stream)
+{
+ struct sdw_stream_config sconfig = {0};
+ struct sdw_port_config pconfig = {0};
+ int ret;
+
+ /* arbitrary configuration */
+ sconfig.frame_rate = 16000;
+ sconfig.ch_count = 1;
+ sconfig.bps = 32; /* this is required for BPT/BRA */
+ sconfig.direction = SDW_DATA_DIR_RX;
+ sconfig.type = SDW_STREAM_BPT;
+
+ pconfig.num = 0;
+ pconfig.ch_mask = BIT(0);
+
+ ret = sdw_stream_add_slave(slave, &sconfig, &pconfig, 1, stream);
+ if (ret)
+ dev_err(&slave->dev, "%s: failed: %d\n", __func__, ret);
+
+ return ret;
+}
+
+static int intel_ace2x_bpt_open_stream(struct sdw_intel *sdw, struct sdw_slave *slave,
+ struct sdw_bpt_msg *msg)
+{
+ struct sdw_cdns *cdns = &sdw->cdns;
+ struct sdw_bus *bus = &cdns->bus;
+ struct sdw_master_prop *prop = &bus->prop;
+ struct sdw_stream_runtime *stream;
+ struct sdw_stream_config sconfig;
+ struct sdw_port_config *pconfig;
+ unsigned int pdi0_buffer_size;
+ unsigned int tx_dma_bandwidth;
+ unsigned int pdi1_buffer_size;
+ unsigned int rx_dma_bandwidth;
+ unsigned int data_per_frame;
+ unsigned int tx_total_bytes;
+ struct sdw_cdns_pdi *pdi0;
+ struct sdw_cdns_pdi *pdi1;
+ unsigned int num_frames;
+ int command;
+ int ret1;
+ int ret;
+ int dir;
+ int i;
+
+ stream = sdw_alloc_stream("BPT", SDW_STREAM_BPT);
+ if (!stream)
+ return -ENOMEM;
+
+ cdns->bus.bpt_stream = stream;
+
+ ret = sdw_slave_bpt_stream_add(slave, stream);
+ if (ret < 0)
+ goto release_stream;
+
+ /* handle PDI0 first */
+ dir = SDW_DATA_DIR_TX;
+
+ pdi0 = sdw_cdns_alloc_pdi(cdns, &cdns->pcm, 1, dir, 0);
+ if (!pdi0) {
+ dev_err(cdns->dev, "%s: sdw_cdns_alloc_pdi0 failed\n", __func__);
+ ret = -EINVAL;
+ goto remove_slave;
+ }
+
+ sdw_cdns_config_stream(cdns, 1, dir, pdi0);
+
+ /* handle PDI1 */
+ dir = SDW_DATA_DIR_RX;
+
+ pdi1 = sdw_cdns_alloc_pdi(cdns, &cdns->pcm, 1, dir, 1);
+ if (!pdi1) {
+ dev_err(cdns->dev, "%s: sdw_cdns_alloc_pdi1 failed\n", __func__);
+ ret = -EINVAL;
+ goto remove_slave;
+ }
+
+ sdw_cdns_config_stream(cdns, 1, dir, pdi1);
+
+ /*
+ * the port config direction, number of channels and frame
+ * rate is totally arbitrary
+ */
+ sconfig.direction = dir;
+ sconfig.ch_count = 1;
+ sconfig.frame_rate = 16000;
+ sconfig.type = SDW_STREAM_BPT;
+ sconfig.bps = 32; /* this is required for BPT/BRA */
+
+ /* Port configuration */
+ pconfig = kcalloc(2, sizeof(*pconfig), GFP_KERNEL);
+ if (!pconfig) {
+ ret = -ENOMEM;
+ goto remove_slave;
+ }
+
+ for (i = 0; i < 2 /* num_pdi */; i++) {
+ pconfig[i].num = i;
+ pconfig[i].ch_mask = 1;
+ }
+
+ ret = sdw_stream_add_master(&cdns->bus, &sconfig, pconfig, 2, stream);
+ kfree(pconfig);
+
+ if (ret < 0) {
+ dev_err(cdns->dev, "add master to stream failed:%d\n", ret);
+ goto remove_slave;
+ }
+
+ ret = sdw_prepare_stream(cdns->bus.bpt_stream);
+ if (ret < 0)
+ goto remove_master;
+
+ command = (msg->flags & SDW_MSG_FLAG_WRITE) ? 0 : 1;
+
+ ret = sdw_cdns_bpt_find_buffer_sizes(command, cdns->bus.params.row, cdns->bus.params.col,
+ msg->len, SDW_BPT_MSG_MAX_BYTES, &data_per_frame,
+ &pdi0_buffer_size, &pdi1_buffer_size, &num_frames);
+ if (ret < 0)
+ goto deprepare_stream;
+
+ sdw->bpt_ctx.pdi0_buffer_size = pdi0_buffer_size;
+ sdw->bpt_ctx.pdi1_buffer_size = pdi1_buffer_size;
+ sdw->bpt_ctx.num_frames = num_frames;
+ sdw->bpt_ctx.data_per_frame = data_per_frame;
+ tx_dma_bandwidth = div_u64((u64)pdi0_buffer_size * 8 * (u64)prop->default_frame_rate,
+ num_frames);
+ rx_dma_bandwidth = div_u64((u64)pdi1_buffer_size * 8 * (u64)prop->default_frame_rate,
+ num_frames);
+
+ dev_dbg(cdns->dev, "Message len %d transferred in %d frames (%d per frame)\n",
+ msg->len, num_frames, data_per_frame);
+ dev_dbg(cdns->dev, "sizes pdi0 %d pdi1 %d tx_bandwidth %d rx_bandwidth %d\n",
+ pdi0_buffer_size, pdi1_buffer_size, tx_dma_bandwidth, rx_dma_bandwidth);
+
+ ret = hda_sdw_bpt_open(cdns->dev->parent, /* PCI device */
+ sdw->instance, &sdw->bpt_ctx.bpt_tx_stream,
+ &sdw->bpt_ctx.dmab_tx_bdl, pdi0_buffer_size, tx_dma_bandwidth,
+ &sdw->bpt_ctx.bpt_rx_stream, &sdw->bpt_ctx.dmab_rx_bdl,
+ pdi1_buffer_size, rx_dma_bandwidth);
+ if (ret < 0) {
+ dev_err(cdns->dev, "%s: hda_sdw_bpt_open failed %d\n", __func__, ret);
+ goto deprepare_stream;
+ }
+
+ if (!command) {
+ ret = sdw_cdns_prepare_write_dma_buffer(msg->dev_num, msg->addr, msg->buf,
+ msg->len, data_per_frame,
+ sdw->bpt_ctx.dmab_tx_bdl.area,
+ pdi0_buffer_size, &tx_total_bytes);
+ } else {
+ ret = sdw_cdns_prepare_read_dma_buffer(msg->dev_num, msg->addr, msg->len,
+ data_per_frame,
+ sdw->bpt_ctx.dmab_tx_bdl.area,
+ pdi0_buffer_size, &tx_total_bytes);
+ }
+
+ if (!ret)
+ return 0;
+
+ dev_err(cdns->dev, "%s: sdw_prepare_%s_dma_buffer failed %d\n",
+ __func__, command ? "read" : "write", ret);
+
+ ret1 = hda_sdw_bpt_close(cdns->dev->parent, /* PCI device */
+ sdw->bpt_ctx.bpt_tx_stream, &sdw->bpt_ctx.dmab_tx_bdl,
+ sdw->bpt_ctx.bpt_rx_stream, &sdw->bpt_ctx.dmab_rx_bdl);
+ if (ret1 < 0)
+ dev_err(cdns->dev, "%s: hda_sdw_bpt_close failed: ret %d\n",
+ __func__, ret1);
+
+deprepare_stream:
+ sdw_deprepare_stream(cdns->bus.bpt_stream);
+
+remove_master:
+ ret1 = sdw_stream_remove_master(&cdns->bus, cdns->bus.bpt_stream);
+ if (ret1 < 0)
+ dev_err(cdns->dev, "%s: remove master failed: %d\n",
+ __func__, ret1);
+
+remove_slave:
+ ret1 = sdw_stream_remove_slave(slave, cdns->bus.bpt_stream);
+ if (ret1 < 0)
+ dev_err(cdns->dev, "%s: remove slave failed: %d\n",
+ __func__, ret1);
+
+release_stream:
+ sdw_release_stream(cdns->bus.bpt_stream);
+ cdns->bus.bpt_stream = NULL;
+
+ return ret;
+}
+
+static void intel_ace2x_bpt_close_stream(struct sdw_intel *sdw, struct sdw_slave *slave,
+ struct sdw_bpt_msg *msg)
+{
+ struct sdw_cdns *cdns = &sdw->cdns;
+ int ret;
+
+ ret = hda_sdw_bpt_close(cdns->dev->parent /* PCI device */, sdw->bpt_ctx.bpt_tx_stream,
+ &sdw->bpt_ctx.dmab_tx_bdl, sdw->bpt_ctx.bpt_rx_stream,
+ &sdw->bpt_ctx.dmab_rx_bdl);
+ if (ret < 0)
+ dev_err(cdns->dev, "%s: hda_sdw_bpt_close failed: ret %d\n",
+ __func__, ret);
+
+ ret = sdw_deprepare_stream(cdns->bus.bpt_stream);
+ if (ret < 0)
+ dev_err(cdns->dev, "%s: sdw_deprepare_stream failed: ret %d\n",
+ __func__, ret);
+
+ ret = sdw_stream_remove_master(&cdns->bus, cdns->bus.bpt_stream);
+ if (ret < 0)
+ dev_err(cdns->dev, "%s: remove master failed: %d\n",
+ __func__, ret);
+
+ ret = sdw_stream_remove_slave(slave, cdns->bus.bpt_stream);
+ if (ret < 0)
+ dev_err(cdns->dev, "%s: remove slave failed: %d\n",
+ __func__, ret);
+
+ cdns->bus.bpt_stream = NULL;
+}
+
+#define INTEL_BPT_MSG_BYTE_ALIGNMENT 32
+
+static int intel_ace2x_bpt_send_async(struct sdw_intel *sdw, struct sdw_slave *slave,
+ struct sdw_bpt_msg *msg)
+{
+ struct sdw_cdns *cdns = &sdw->cdns;
+ int ret;
+
+ if (msg->len % INTEL_BPT_MSG_BYTE_ALIGNMENT) {
+ dev_err(cdns->dev, "BPT message length %d is not a multiple of %d bytes\n",
+ msg->len, INTEL_BPT_MSG_BYTE_ALIGNMENT);
+ return -EINVAL;
+ }
+
+ dev_dbg(cdns->dev, "BPT Transfer start\n");
+
+ ret = intel_ace2x_bpt_open_stream(sdw, slave, msg);
+ if (ret < 0)
+ return ret;
+
+ ret = hda_sdw_bpt_send_async(cdns->dev->parent, /* PCI device */
+ sdw->bpt_ctx.bpt_tx_stream, sdw->bpt_ctx.bpt_rx_stream);
+ if (ret < 0) {
+ dev_err(cdns->dev, "%s: hda_sdw_bpt_send_async failed: %d\n",
+ __func__, ret);
+
+ intel_ace2x_bpt_close_stream(sdw, slave, msg);
+
+ return ret;
+ }
+
+ ret = sdw_enable_stream(cdns->bus.bpt_stream);
+ if (ret < 0) {
+ dev_err(cdns->dev, "%s: sdw_stream_enable failed: %d\n",
+ __func__, ret);
+ intel_ace2x_bpt_close_stream(sdw, slave, msg);
+ }
+
+ return ret;
+}
+
+static int intel_ace2x_bpt_wait(struct sdw_intel *sdw, struct sdw_slave *slave,
+ struct sdw_bpt_msg *msg)
+{
+ struct sdw_cdns *cdns = &sdw->cdns;
+ int ret;
+
+ dev_dbg(cdns->dev, "BPT Transfer wait\n");
+
+ ret = hda_sdw_bpt_wait(cdns->dev->parent, /* PCI device */
+ sdw->bpt_ctx.bpt_tx_stream, sdw->bpt_ctx.bpt_rx_stream);
+ if (ret < 0)
+ dev_err(cdns->dev, "%s: hda_sdw_bpt_wait failed: %d\n", __func__, ret);
+
+ ret = sdw_disable_stream(cdns->bus.bpt_stream);
+ if (ret < 0) {
+ dev_err(cdns->dev, "%s: sdw_stream_enable failed: %d\n",
+ __func__, ret);
+ goto err;
+ }
+
+ if (msg->flags & SDW_MSG_FLAG_WRITE) {
+ ret = sdw_cdns_check_write_response(cdns->dev, sdw->bpt_ctx.dmab_rx_bdl.area,
+ sdw->bpt_ctx.pdi1_buffer_size,
+ sdw->bpt_ctx.num_frames);
+ if (ret < 0)
+ dev_err(cdns->dev, "%s: BPT Write failed %d\n", __func__, ret);
+ } else {
+ ret = sdw_cdns_check_read_response(cdns->dev, sdw->bpt_ctx.dmab_rx_bdl.area,
+ sdw->bpt_ctx.pdi1_buffer_size,
+ msg->buf, msg->len, sdw->bpt_ctx.num_frames,
+ sdw->bpt_ctx.data_per_frame);
+ if (ret < 0)
+ dev_err(cdns->dev, "%s: BPT Read failed %d\n", __func__, ret);
+ }
+
+err:
+ intel_ace2x_bpt_close_stream(sdw, slave, msg);
+
+ return ret;
+}
+
/*
* shim vendor-specific (vs) ops
*/
@@ -753,7 +1061,11 @@ const struct sdw_intel_hw_ops sdw_intel_lnl_hw_ops = {
.sync_check_cmdsync_unlocked = intel_check_cmdsync_unlocked,
.program_sdi = intel_program_sdi,
+
+ .bpt_send_async = intel_ace2x_bpt_send_async,
+ .bpt_wait = intel_ace2x_bpt_wait,
};
EXPORT_SYMBOL_NS(sdw_intel_lnl_hw_ops, "SOUNDWIRE_INTEL");
MODULE_IMPORT_NS("SND_SOC_SOF_HDA_MLINK");
+MODULE_IMPORT_NS("SND_SOC_SOF_INTEL_HDA_SDW_BPT");
diff --git a/drivers/soundwire/intel_auxdevice.c b/drivers/soundwire/intel_auxdevice.c
index 599954d92752..5ea6399e6c9b 100644
--- a/drivers/soundwire/intel_auxdevice.c
+++ b/drivers/soundwire/intel_auxdevice.c
@@ -79,6 +79,27 @@ static bool is_wake_capable(struct sdw_slave *slave)
return false;
}
+static int generic_bpt_send_async(struct sdw_bus *bus, struct sdw_slave *slave,
+ struct sdw_bpt_msg *msg)
+{
+ struct sdw_cdns *cdns = bus_to_cdns(bus);
+ struct sdw_intel *sdw = cdns_to_intel(cdns);
+
+ if (sdw->link_res->hw_ops->bpt_send_async)
+ return sdw->link_res->hw_ops->bpt_send_async(sdw, slave, msg);
+ return -EOPNOTSUPP;
+}
+
+static int generic_bpt_wait(struct sdw_bus *bus, struct sdw_slave *slave, struct sdw_bpt_msg *msg)
+{
+ struct sdw_cdns *cdns = bus_to_cdns(bus);
+ struct sdw_intel *sdw = cdns_to_intel(cdns);
+
+ if (sdw->link_res->hw_ops->bpt_wait)
+ return sdw->link_res->hw_ops->bpt_wait(sdw, slave, msg);
+ return -EOPNOTSUPP;
+}
+
static int generic_pre_bank_switch(struct sdw_bus *bus)
{
struct sdw_cdns *cdns = bus_to_cdns(bus);
@@ -222,30 +243,9 @@ static int sdw_master_read_intel_prop(struct sdw_bus *bus)
static int intel_prop_read(struct sdw_bus *bus)
{
- struct sdw_master_prop *prop;
-
/* Initialize with default handler to read all DisCo properties */
sdw_master_read_prop(bus);
- /*
- * Only one bus frequency is supported so far, filter
- * frequencies reported in the DSDT
- */
- prop = &bus->prop;
- if (prop->clk_freq && prop->num_clk_freq > 1) {
- unsigned int default_bus_frequency;
-
- default_bus_frequency =
- prop->default_frame_rate *
- prop->default_row *
- prop->default_col /
- SDW_DOUBLE_RATE_FACTOR;
-
- prop->num_clk_freq = 1;
- prop->clk_freq[0] = default_bus_frequency;
- prop->max_clk_freq = default_bus_frequency;
- }
-
/* read Intel-specific properties */
sdw_master_read_intel_prop(bus);
@@ -288,6 +288,9 @@ static struct sdw_master_ops sdw_intel_ops = {
.get_device_num = intel_get_device_num_ida,
.put_device_num = intel_put_device_num_ida,
.new_peripheral_assigned = generic_new_peripheral_assigned,
+
+ .bpt_send_async = generic_bpt_send_async,
+ .bpt_wait = generic_bpt_wait,
};
/*
diff --git a/drivers/soundwire/slave.c b/drivers/soundwire/slave.c
index 4869b073b11c..d2d99555ec5a 100644
--- a/drivers/soundwire/slave.c
+++ b/drivers/soundwire/slave.c
@@ -13,6 +13,7 @@ static void sdw_slave_release(struct device *dev)
{
struct sdw_slave *slave = dev_to_sdw_dev(dev);
+ of_node_put(slave->dev.of_node);
mutex_destroy(&slave->sdw_dev_lock);
kfree(slave);
}
diff --git a/drivers/soundwire/stream.c b/drivers/soundwire/stream.c
index e9df503332bb..a4bea742b5d9 100644
--- a/drivers/soundwire/stream.c
+++ b/drivers/soundwire/stream.c
@@ -14,6 +14,7 @@
#include <linux/soundwire/sdw_registers.h>
#include <linux/soundwire/sdw.h>
#include <linux/soundwire/sdw_type.h>
+#include <linux/string_choices.h>
#include <sound/soc.h>
#include "bus.h"
@@ -87,11 +88,14 @@ static int _sdw_program_slave_port_params(struct sdw_bus *bus,
return ret;
}
- /* Program DPN_BlockCtrl3 register */
- ret = sdw_write_no_pm(slave, addr2, t_params->blk_pkg_mode);
- if (ret < 0) {
- dev_err(bus->dev, "DPN_BlockCtrl3 register write failed\n");
- return ret;
+ /* DP0 does not implement BlockCtrl3 */
+ if (t_params->port_num) {
+ /* Program DPN_BlockCtrl3 register */
+ ret = sdw_write_no_pm(slave, addr2, t_params->blk_pkg_mode);
+ if (ret < 0) {
+ dev_err(bus->dev, "DPN_BlockCtrl3 register write failed\n");
+ return ret;
+ }
}
/*
@@ -130,18 +134,28 @@ static int sdw_program_slave_port_params(struct sdw_bus *bus,
struct sdw_port_params *p_params = &p_rt->port_params;
struct sdw_slave_prop *slave_prop = &s_rt->slave->prop;
u32 addr1, addr2, addr3, addr4, addr5, addr6;
- struct sdw_dpn_prop *dpn_prop;
+ enum sdw_dpn_type port_type;
+ bool read_only_wordlength;
int ret;
u8 wbuf;
if (s_rt->slave->is_mockup_device)
return 0;
- dpn_prop = sdw_get_slave_dpn_prop(s_rt->slave,
- s_rt->direction,
- t_params->port_num);
- if (!dpn_prop)
- return -EINVAL;
+ if (t_params->port_num) {
+ struct sdw_dpn_prop *dpn_prop;
+
+ dpn_prop = sdw_get_slave_dpn_prop(s_rt->slave, s_rt->direction,
+ t_params->port_num);
+ if (!dpn_prop)
+ return -EINVAL;
+
+ read_only_wordlength = dpn_prop->read_only_wordlength;
+ port_type = dpn_prop->type;
+ } else {
+ read_only_wordlength = false;
+ port_type = SDW_DPN_FULL;
+ }
addr1 = SDW_DPN_PORTCTRL(t_params->port_num);
addr2 = SDW_DPN_BLOCKCTRL1(t_params->port_num);
@@ -171,7 +185,7 @@ static int sdw_program_slave_port_params(struct sdw_bus *bus,
return ret;
}
- if (!dpn_prop->read_only_wordlength) {
+ if (!read_only_wordlength) {
/* Program DPN_BlockCtrl1 register */
ret = sdw_write_no_pm(s_rt->slave, addr2, (p_params->bps - 1));
if (ret < 0) {
@@ -223,9 +237,9 @@ static int sdw_program_slave_port_params(struct sdw_bus *bus,
}
}
- if (dpn_prop->type != SDW_DPN_SIMPLE) {
+ if (port_type != SDW_DPN_SIMPLE) {
ret = _sdw_program_slave_port_params(bus, s_rt->slave,
- t_params, dpn_prop->type);
+ t_params, port_type);
if (ret < 0)
dev_err(&s_rt->slave->dev,
"Transport reg write failed for port: %d\n",
@@ -358,7 +372,7 @@ static int sdw_enable_disable_master_ports(struct sdw_master_runtime *m_rt,
} else {
dev_err(bus->dev,
"dpn_port_enable_ch not supported, %s failed\n",
- en ? "enable" : "disable");
+ str_enable_disable(en));
return -EINVAL;
}
@@ -432,6 +446,9 @@ static int sdw_prep_deprep_slave_ports(struct sdw_bus *bus,
struct completion *port_ready;
struct sdw_dpn_prop *dpn_prop;
struct sdw_prepare_ch prep_ch;
+ u32 imp_def_interrupts;
+ bool simple_ch_prep_sm;
+ u32 ch_prep_timeout;
bool intr = false;
int ret = 0, val;
u32 addr;
@@ -439,20 +456,35 @@ static int sdw_prep_deprep_slave_ports(struct sdw_bus *bus,
prep_ch.num = p_rt->num;
prep_ch.ch_mask = p_rt->ch_mask;
- dpn_prop = sdw_get_slave_dpn_prop(s_rt->slave,
- s_rt->direction,
- prep_ch.num);
- if (!dpn_prop) {
- dev_err(bus->dev,
- "Slave Port:%d properties not found\n", prep_ch.num);
- return -EINVAL;
+ if (p_rt->num) {
+ dpn_prop = sdw_get_slave_dpn_prop(s_rt->slave, s_rt->direction, prep_ch.num);
+ if (!dpn_prop) {
+ dev_err(bus->dev,
+ "Slave Port:%d properties not found\n", prep_ch.num);
+ return -EINVAL;
+ }
+
+ imp_def_interrupts = dpn_prop->imp_def_interrupts;
+ simple_ch_prep_sm = dpn_prop->simple_ch_prep_sm;
+ ch_prep_timeout = dpn_prop->ch_prep_timeout;
+ } else {
+ struct sdw_dp0_prop *dp0_prop = s_rt->slave->prop.dp0_prop;
+
+ if (!dp0_prop) {
+ dev_err(bus->dev,
+ "Slave DP0 properties not found\n");
+ return -EINVAL;
+ }
+ imp_def_interrupts = dp0_prop->imp_def_interrupts;
+ simple_ch_prep_sm = dp0_prop->simple_ch_prep_sm;
+ ch_prep_timeout = dp0_prop->ch_prep_timeout;
}
prep_ch.prepare = prep;
prep_ch.bank = bus->params.next_bank;
- if (dpn_prop->imp_def_interrupts || !dpn_prop->simple_ch_prep_sm ||
+ if (imp_def_interrupts || !simple_ch_prep_sm ||
bus->params.s_data_mode != SDW_PORT_DATA_MODE_NORMAL)
intr = true;
@@ -463,7 +495,7 @@ static int sdw_prep_deprep_slave_ports(struct sdw_bus *bus,
*/
if (prep && intr) {
ret = sdw_configure_dpn_intr(s_rt->slave, p_rt->num, prep,
- dpn_prop->imp_def_interrupts);
+ imp_def_interrupts);
if (ret < 0)
return ret;
}
@@ -472,7 +504,7 @@ static int sdw_prep_deprep_slave_ports(struct sdw_bus *bus,
sdw_do_port_prep(s_rt, prep_ch, prep ? SDW_OPS_PORT_PRE_PREP : SDW_OPS_PORT_PRE_DEPREP);
/* Prepare Slave port implementing CP_SM */
- if (!dpn_prop->simple_ch_prep_sm) {
+ if (!simple_ch_prep_sm) {
addr = SDW_DPN_PREPARECTRL(p_rt->num);
if (prep)
@@ -489,7 +521,7 @@ static int sdw_prep_deprep_slave_ports(struct sdw_bus *bus,
/* Wait for completion on port ready */
port_ready = &s_rt->slave->port_ready[prep_ch.num];
wait_for_completion_timeout(port_ready,
- msecs_to_jiffies(dpn_prop->ch_prep_timeout));
+ msecs_to_jiffies(ch_prep_timeout));
val = sdw_read_no_pm(s_rt->slave, SDW_DPN_PREPARESTATUS(p_rt->num));
if ((val < 0) || (val & p_rt->ch_mask)) {
@@ -506,7 +538,7 @@ static int sdw_prep_deprep_slave_ports(struct sdw_bus *bus,
/* Disable interrupt after Port de-prepare */
if (!prep && intr)
ret = sdw_configure_dpn_intr(s_rt->slave, p_rt->num, prep,
- dpn_prop->imp_def_interrupts);
+ imp_def_interrupts);
return ret;
}
@@ -1007,7 +1039,8 @@ static int sdw_slave_port_is_valid_range(struct device *dev, int num)
static int sdw_slave_port_config(struct sdw_slave *slave,
struct sdw_slave_runtime *s_rt,
- const struct sdw_port_config *port_config)
+ const struct sdw_port_config *port_config,
+ bool is_bpt_stream)
{
struct sdw_port_runtime *p_rt;
int ret;
@@ -1019,9 +1052,13 @@ static int sdw_slave_port_config(struct sdw_slave *slave,
* TODO: Check valid port range as defined by DisCo/
* slave
*/
- ret = sdw_slave_port_is_valid_range(&slave->dev, port_config[i].num);
- if (ret < 0)
- return ret;
+ if (!is_bpt_stream) {
+ ret = sdw_slave_port_is_valid_range(&slave->dev, port_config[i].num);
+ if (ret < 0)
+ return ret;
+ } else if (port_config[i].num) {
+ return -EINVAL;
+ }
ret = sdw_port_config(p_rt, port_config, i);
if (ret < 0)
@@ -1190,6 +1227,20 @@ static struct sdw_master_runtime
struct sdw_master_runtime *m_rt, *walk_m_rt;
struct list_head *insert_after;
+ if (stream->type == SDW_STREAM_BPT) {
+ if (bus->stream_refcount > 0 || bus->bpt_stream_refcount > 0) {
+ dev_err(bus->dev, "%s: %d/%d audio/BPT stream already allocated\n",
+ __func__, bus->stream_refcount, bus->bpt_stream_refcount);
+ return ERR_PTR(-EBUSY);
+ }
+ } else {
+ if (bus->bpt_stream_refcount > 0) {
+ dev_err(bus->dev, "%s: BPT stream already allocated\n",
+ __func__);
+ return ERR_PTR(-EAGAIN);
+ }
+ }
+
m_rt = kzalloc(sizeof(*m_rt), GFP_KERNEL);
if (!m_rt)
return NULL;
@@ -1218,6 +1269,8 @@ static struct sdw_master_runtime
m_rt->stream = stream;
bus->stream_refcount++;
+ if (stream->type == SDW_STREAM_BPT)
+ bus->bpt_stream_refcount++;
return m_rt;
}
@@ -1266,6 +1319,8 @@ static void sdw_master_rt_free(struct sdw_master_runtime *m_rt,
list_del(&m_rt->bus_node);
kfree(m_rt);
+ if (stream->type == SDW_STREAM_BPT)
+ bus->bpt_stream_refcount--;
bus->stream_refcount--;
}
@@ -1330,6 +1385,11 @@ struct sdw_dpn_prop *sdw_get_slave_dpn_prop(struct sdw_slave *slave,
u8 num_ports;
int i;
+ if (!port_num) {
+ dev_err(&slave->dev, "%s: port_num is zero\n", __func__);
+ return NULL;
+ }
+
if (direction == SDW_DATA_DIR_TX) {
num_ports = hweight32(slave->prop.source_ports);
dpn_prop = slave->prop.src_dpn_prop;
@@ -1805,12 +1865,13 @@ static int set_stream(struct snd_pcm_substream *substream,
* sdw_alloc_stream() - Allocate and return stream runtime
*
* @stream_name: SoundWire stream name
+ * @type: stream type (could be PCM ,PDM or BPT)
*
* Allocates a SoundWire stream runtime instance.
* sdw_alloc_stream should be called only once per stream. Typically
* invoked from ALSA/ASoC machine/platform driver.
*/
-struct sdw_stream_runtime *sdw_alloc_stream(const char *stream_name)
+struct sdw_stream_runtime *sdw_alloc_stream(const char *stream_name, enum sdw_stream_type type)
{
struct sdw_stream_runtime *stream;
@@ -1822,6 +1883,7 @@ struct sdw_stream_runtime *sdw_alloc_stream(const char *stream_name)
INIT_LIST_HEAD(&stream->master_list);
stream->state = SDW_STREAM_ALLOCATED;
stream->m_rt_count = 0;
+ stream->type = type;
return stream;
}
@@ -1850,7 +1912,7 @@ int sdw_startup_stream(void *sdw_substream)
if (!name)
return -ENOMEM;
- sdw_stream = sdw_alloc_stream(name);
+ sdw_stream = sdw_alloc_stream(name, SDW_STREAM_PCM);
if (!sdw_stream) {
dev_err(rtd->dev, "alloc stream failed for substream DAI %s\n", substream->name);
ret = -ENOMEM;
@@ -1957,6 +2019,12 @@ int sdw_stream_add_master(struct sdw_bus *bus,
m_rt = sdw_master_rt_find(bus, stream);
if (!m_rt) {
m_rt = sdw_master_rt_alloc(bus, stream);
+ if (IS_ERR(m_rt)) {
+ ret = PTR_ERR(m_rt);
+ dev_err(bus->dev, "%s: Master runtime alloc failed for stream:%s: %d\n",
+ __func__, stream->name, ret);
+ goto unlock;
+ }
if (!m_rt) {
dev_err(bus->dev, "%s: Master runtime alloc failed for stream:%s\n",
__func__, stream->name);
@@ -2072,6 +2140,12 @@ int sdw_stream_add_slave(struct sdw_slave *slave,
* So, allocate m_rt and add Slave to it.
*/
m_rt = sdw_master_rt_alloc(slave->bus, stream);
+ if (IS_ERR(m_rt)) {
+ ret = PTR_ERR(m_rt);
+ dev_err(&slave->dev, "%s: Master runtime alloc failed for stream:%s: %d\n",
+ __func__, stream->name, ret);
+ goto unlock;
+ }
if (!m_rt) {
dev_err(&slave->dev, "%s: Master runtime alloc failed for stream:%s\n",
__func__, stream->name);
@@ -2113,7 +2187,8 @@ int sdw_stream_add_slave(struct sdw_slave *slave,
if (ret)
goto unlock;
- ret = sdw_slave_port_config(slave, s_rt, port_config);
+ ret = sdw_slave_port_config(slave, s_rt, port_config,
+ stream->type == SDW_STREAM_BPT);
if (ret)
goto unlock;
diff --git a/drivers/spi/spi-amd.c b/drivers/spi/spi-amd.c
index c85997478b81..17fc0b17e756 100644
--- a/drivers/spi/spi-amd.c
+++ b/drivers/spi/spi-amd.c
@@ -302,7 +302,7 @@ static void amd_set_spi_freq(struct amd_spi *amd_spi, u32 speed_hz)
{
unsigned int i, spd7_val, alt_spd;
- for (i = 0; i < ARRAY_SIZE(amd_spi_freq); i++)
+ for (i = 0; i < ARRAY_SIZE(amd_spi_freq)-1; i++)
if (speed_hz >= amd_spi_freq[i].speed_hz)
break;
diff --git a/drivers/staging/gpib/Kconfig b/drivers/staging/gpib/Kconfig
index 81510db3072e..aa01538d5beb 100644
--- a/drivers/staging/gpib/Kconfig
+++ b/drivers/staging/gpib/Kconfig
@@ -50,7 +50,6 @@ config GPIB_CEC_PCI
tristate "CEC PCI board"
depends on PCI
depends on HAS_IOPORT
- depends on !X86_PAE
select GPIB_COMMON
select GPIB_NEC7210
help
@@ -64,7 +63,6 @@ config GPIB_NI_PCI_ISA
tristate "NI PCI/ISA compatible boards"
depends on ISA_BUS || PCI || PCMCIA
depends on HAS_IOPORT
- depends on !X86_PAE
depends on PCMCIA || !PCMCIA
depends on HAS_IOPORT_MAP
select GPIB_COMMON
@@ -90,7 +88,6 @@ config GPIB_CB7210
tristate "Measurement Computing compatible boards"
depends on HAS_IOPORT
depends on ISA_BUS || PCI || PCMCIA
- depends on !X86_PAE
depends on PCMCIA || !PCMCIA
select GPIB_COMMON
select GPIB_NEC7210
@@ -169,7 +166,6 @@ config GPIB_HP82341
tristate "HP82341x"
select GPIB_COMMON
select GPIB_TMS9914
- depends on BROKEN
depends on ISA_BUS || EISA
help
GPIB driver for HP82341 A/B/C/D boards
@@ -182,7 +178,6 @@ config GPIB_INES
depends on PCI || ISA_BUS || PCMCIA
depends on PCMCIA || !PCMCIA
depends on HAS_IOPORT
- depends on !X86_PAE
select GPIB_COMMON
select GPIB_NEC7210
help
diff --git a/drivers/staging/gpib/agilent_82350b/agilent_82350b.c b/drivers/staging/gpib/agilent_82350b/agilent_82350b.c
index 3f4f95b7fe34..445b9380ff98 100644
--- a/drivers/staging/gpib/agilent_82350b/agilent_82350b.c
+++ b/drivers/staging/gpib/agilent_82350b/agilent_82350b.c
@@ -4,6 +4,10 @@
* copyright : (C) 2002, 2004 by Frank Mori Hess *
***************************************************************************/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#define dev_fmt pr_fmt
+#define DRV_NAME KBUILD_MODNAME
+
#include "agilent_82350b.h"
#include <linux/delay.h>
#include <linux/ioport.h>
@@ -20,8 +24,14 @@
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("GPIB driver for Agilent 82350b");
-int agilent_82350b_accel_read(gpib_board_t *board, uint8_t *buffer, size_t length, int *end,
- size_t *bytes_read)
+static int read_transfer_counter(struct agilent_82350b_priv *a_priv);
+static unsigned short read_and_clear_event_status(struct gpib_board *board);
+static void set_transfer_counter(struct agilent_82350b_priv *a_priv, int count);
+static int agilent_82350b_write(struct gpib_board *board, uint8_t *buffer,
+ size_t length, int send_eoi, size_t *bytes_written);
+
+static int agilent_82350b_accel_read(struct gpib_board *board, uint8_t *buffer,
+ size_t length, int *end, size_t *bytes_read)
{
struct agilent_82350b_priv *a_priv = board->private_data;
@@ -48,9 +58,6 @@ int agilent_82350b_accel_read(gpib_board_t *board, uint8_t *buffer, size_t lengt
retval = tms9914_read(board, tms_priv, buffer, 1, end, &num_bytes);
*bytes_read += num_bytes;
- if (retval < 0)
- dev_err(board->gpib_dev, "%s: tms9914_read failed retval=%i\n",
- driver_name, retval);
if (retval < 0 || *end)
return retval;
++buffer;
@@ -66,10 +73,7 @@ int agilent_82350b_accel_read(gpib_board_t *board, uint8_t *buffer, size_t lengt
int j;
int count;
- if (num_fifo_bytes - i < agilent_82350b_fifo_size)
- block_size = num_fifo_bytes - i;
- else
- block_size = agilent_82350b_fifo_size;
+ block_size = min(num_fifo_bytes - i, agilent_82350b_fifo_size);
set_transfer_counter(a_priv, block_size);
writeb(ENABLE_TI_TO_SRAM | DIRECTION_GPIB_TO_HOST,
a_priv->gpib_base + SRAM_ACCESS_CONTROL_REG);
@@ -86,7 +90,6 @@ int agilent_82350b_accel_read(gpib_board_t *board, uint8_t *buffer, size_t lengt
test_bit(DEV_CLEAR_BN, &tms_priv->state) ||
test_bit(TIMO_NUM, &board->status));
if (retval) {
- dev_dbg(board->gpib_dev, "%s: read wait interrupted\n", driver_name);
retval = -ERESTARTSYS;
break;
}
@@ -100,13 +103,10 @@ int agilent_82350b_accel_read(gpib_board_t *board, uint8_t *buffer, size_t lengt
*end = 1;
}
if (test_bit(TIMO_NUM, &board->status)) {
- dev_err(board->gpib_dev, "%s: read timed out\n", driver_name);
retval = -ETIMEDOUT;
break;
}
if (test_bit(DEV_CLEAR_BN, &tms_priv->state)) {
- dev_err(board->gpib_dev, "%s: device clear interrupted read\n",
- driver_name);
retval = -EINTR;
break;
}
@@ -130,30 +130,24 @@ int agilent_82350b_accel_read(gpib_board_t *board, uint8_t *buffer, size_t lengt
return 0;
}
-static int translate_wait_return_value(gpib_board_t *board, int retval)
+static int translate_wait_return_value(struct gpib_board *board, int retval)
{
struct agilent_82350b_priv *a_priv = board->private_data;
struct tms9914_priv *tms_priv = &a_priv->tms9914_priv;
- if (retval) {
- dev_err(board->gpib_dev, "%s: write wait interrupted\n", driver_name);
+ if (retval)
return -ERESTARTSYS;
- }
- if (test_bit(TIMO_NUM, &board->status)) {
- dev_err(board->gpib_dev, "%s: write timed out\n", driver_name);
+ if (test_bit(TIMO_NUM, &board->status))
return -ETIMEDOUT;
- }
- if (test_bit(DEV_CLEAR_BN, &tms_priv->state)) {
- dev_err(board->gpib_dev, "%s: device clear interrupted write\n", driver_name);
+ if (test_bit(DEV_CLEAR_BN, &tms_priv->state))
return -EINTR;
- }
return 0;
}
-int agilent_82350b_accel_write(gpib_board_t *board, uint8_t *buffer, size_t length, int send_eoi,
- size_t *bytes_written)
-
+static int agilent_82350b_accel_write(struct gpib_board *board, uint8_t *buffer,
+ size_t length, int send_eoi,
+ size_t *bytes_written)
{
struct agilent_82350b_priv *a_priv = board->private_data;
struct tms9914_priv *tms_priv = &a_priv->tms9914_priv;
@@ -174,10 +168,8 @@ int agilent_82350b_accel_write(gpib_board_t *board, uint8_t *buffer, size_t leng
event_status = read_and_clear_event_status(board);
- //pr_info("ag_ac_wr: event status 0x%x tms state 0x%lx\n", event_status, tms_priv->state);
-
#ifdef EXPERIMENTAL
- pr_info("ag_ac_wr: wait for previous BO to complete if any\n");
+ // wait for previous BO to complete if any
retval = wait_event_interruptible(board->wait,
test_bit(DEV_CLEAR_BN, &tms_priv->state) ||
test_bit(WRITE_READY_BN, &tms_priv->state) ||
@@ -188,22 +180,16 @@ int agilent_82350b_accel_write(gpib_board_t *board, uint8_t *buffer, size_t leng
return retval;
#endif
- //pr_info("ag_ac_wr: sending first byte\n");
retval = agilent_82350b_write(board, buffer, 1, 0, &num_bytes);
*bytes_written += num_bytes;
if (retval < 0)
return retval;
- //pr_info("ag_ac_wr: %ld bytes eoi %d tms state 0x%lx\n",length, send_eoi, tms_priv->state);
-
write_byte(tms_priv, tms_priv->imr0_bits & ~HR_BOIE, IMR0);
for (i = 1; i < fifotransferlength;) {
clear_bit(WRITE_READY_BN, &tms_priv->state);
- if (fifotransferlength - i < agilent_82350b_fifo_size)
- block_size = fifotransferlength - i;
- else
- block_size = agilent_82350b_fifo_size;
+ block_size = min(fifotransferlength - i, agilent_82350b_fifo_size);
set_transfer_counter(a_priv, block_size);
for (j = 0; j < block_size; ++j, ++i) {
// load data into board's sram
@@ -211,13 +197,8 @@ int agilent_82350b_accel_write(gpib_board_t *board, uint8_t *buffer, size_t leng
}
writeb(ENABLE_TI_TO_SRAM, a_priv->gpib_base + SRAM_ACCESS_CONTROL_REG);
- //pr_info("ag_ac_wr: send block: %d bytes tms 0x%lx\n", block_size,
- // tms_priv->state);
-
- if (agilent_82350b_fifo_is_halted(a_priv)) {
+ if (agilent_82350b_fifo_is_halted(a_priv))
writeb(RESTART_STREAM_BIT, a_priv->gpib_base + STREAM_STATUS_REG);
- // pr_info("ag_ac_wr: needed restart\n");
- }
retval = wait_event_interruptible(board->wait,
((event_status =
@@ -227,7 +208,6 @@ int agilent_82350b_accel_write(gpib_board_t *board, uint8_t *buffer, size_t leng
test_bit(TIMO_NUM, &board->status));
writeb(0, a_priv->gpib_base + SRAM_ACCESS_CONTROL_REG);
num_bytes = block_size - read_transfer_counter(a_priv);
- //pr_info("ag_ac_wr: sent %ld bytes tms 0x%lx\n", num_bytes, tms_priv->state);
*bytes_written += num_bytes;
retval = translate_wait_return_value(board, retval);
@@ -239,9 +219,6 @@ int agilent_82350b_accel_write(gpib_board_t *board, uint8_t *buffer, size_t leng
return retval;
if (send_eoi) {
- //pr_info("ag_ac_wr: sending last byte with eoi byte no: %d\n",
- // fifotransferlength+1);
-
retval = agilent_82350b_write(board, buffer + fifotransferlength, 1, send_eoi,
&num_bytes);
*bytes_written += num_bytes;
@@ -251,8 +228,7 @@ int agilent_82350b_accel_write(gpib_board_t *board, uint8_t *buffer, size_t leng
return 0;
}
-unsigned short read_and_clear_event_status(gpib_board_t *board)
-
+static unsigned short read_and_clear_event_status(struct gpib_board *board)
{
struct agilent_82350b_priv *a_priv = board->private_data;
unsigned long flags;
@@ -265,12 +241,12 @@ unsigned short read_and_clear_event_status(gpib_board_t *board)
return status;
}
-irqreturn_t agilent_82350b_interrupt(int irq, void *arg)
+static irqreturn_t agilent_82350b_interrupt(int irq, void *arg)
{
int tms9914_status1 = 0, tms9914_status2 = 0;
int event_status;
- gpib_board_t *board = arg;
+ struct gpib_board *board = arg;
struct agilent_82350b_priv *a_priv = board->private_data;
unsigned long flags;
irqreturn_t retval = IRQ_NONE;
@@ -286,7 +262,6 @@ irqreturn_t agilent_82350b_interrupt(int irq, void *arg)
tms9914_interrupt_have_status(board, &a_priv->tms9914_priv, tms9914_status1,
tms9914_status2);
}
-//pr_info("event_status=0x%x s1 %x s2 %x\n", event_status,tms9914_status1,tms9914_status2);
//write-clear status bits
if (event_status & (BUFFER_END_STATUS_BIT | TERM_COUNT_STATUS_BIT)) {
writeb(event_status & (BUFFER_END_STATUS_BIT | TERM_COUNT_STATUS_BIT),
@@ -298,12 +273,9 @@ irqreturn_t agilent_82350b_interrupt(int irq, void *arg)
return retval;
}
-void agilent_82350b_detach(gpib_board_t *board);
-
-const char *driver_name = "agilent_82350b";
-
-int read_transfer_counter(struct agilent_82350b_priv *a_priv)
+static void agilent_82350b_detach(struct gpib_board *board);
+static int read_transfer_counter(struct agilent_82350b_priv *a_priv)
{
int lo, mid, value;
@@ -314,8 +286,7 @@ int read_transfer_counter(struct agilent_82350b_priv *a_priv)
return value;
}
-void set_transfer_counter(struct agilent_82350b_priv *a_priv, int count)
-
+static void set_transfer_counter(struct agilent_82350b_priv *a_priv, int count)
{
int complement = -count;
@@ -326,17 +297,16 @@ void set_transfer_counter(struct agilent_82350b_priv *a_priv, int count)
}
// wrappers for interface functions
-int agilent_82350b_read(gpib_board_t *board, uint8_t *buffer, size_t length, int *end,
- size_t *bytes_read)
-
+static int agilent_82350b_read(struct gpib_board *board, uint8_t *buffer,
+ size_t length, int *end, size_t *bytes_read)
{
struct agilent_82350b_priv *priv = board->private_data;
return tms9914_read(board, &priv->tms9914_priv, buffer, length, end, bytes_read);
}
-int agilent_82350b_write(gpib_board_t *board, uint8_t *buffer, size_t length, int send_eoi,
- size_t *bytes_written)
+static int agilent_82350b_write(struct gpib_board *board, uint8_t *buffer,
+ size_t length, int send_eoi, size_t *bytes_written)
{
struct agilent_82350b_priv *priv = board->private_data;
@@ -344,8 +314,8 @@ int agilent_82350b_write(gpib_board_t *board, uint8_t *buffer, size_t length, in
return tms9914_write(board, &priv->tms9914_priv, buffer, length, send_eoi, bytes_written);
}
-int agilent_82350b_command(gpib_board_t *board, uint8_t *buffer, size_t length,
- size_t *bytes_written)
+static int agilent_82350b_command(struct gpib_board *board, uint8_t *buffer,
+ size_t length, size_t *bytes_written)
{
struct agilent_82350b_priv *priv = board->private_data;
@@ -353,7 +323,7 @@ int agilent_82350b_command(gpib_board_t *board, uint8_t *buffer, size_t length,
return tms9914_command(board, &priv->tms9914_priv, buffer, length, bytes_written);
}
-int agilent_82350b_take_control(gpib_board_t *board, int synchronous)
+static int agilent_82350b_take_control(struct gpib_board *board, int synchronous)
{
struct agilent_82350b_priv *priv = board->private_data;
@@ -361,7 +331,7 @@ int agilent_82350b_take_control(gpib_board_t *board, int synchronous)
return tms9914_take_control_workaround(board, &priv->tms9914_priv, synchronous);
}
-int agilent_82350b_go_to_standby(gpib_board_t *board)
+static int agilent_82350b_go_to_standby(struct gpib_board *board)
{
struct agilent_82350b_priv *priv = board->private_data;
@@ -369,7 +339,8 @@ int agilent_82350b_go_to_standby(gpib_board_t *board)
return tms9914_go_to_standby(board, &priv->tms9914_priv);
}
-void agilent_82350b_request_system_control(gpib_board_t *board, int request_control)
+static void agilent_82350b_request_system_control(struct gpib_board *board,
+ int request_control)
{
struct agilent_82350b_priv *a_priv = board->private_data;
@@ -387,7 +358,7 @@ void agilent_82350b_request_system_control(gpib_board_t *board, int request_cont
tms9914_request_system_control(board, &a_priv->tms9914_priv, request_control);
}
-void agilent_82350b_interface_clear(gpib_board_t *board, int assert)
+static void agilent_82350b_interface_clear(struct gpib_board *board, int assert)
{
struct agilent_82350b_priv *priv = board->private_data;
@@ -395,104 +366,96 @@ void agilent_82350b_interface_clear(gpib_board_t *board, int assert)
tms9914_interface_clear(board, &priv->tms9914_priv, assert);
}
-void agilent_82350b_remote_enable(gpib_board_t *board, int enable)
-
+static void agilent_82350b_remote_enable(struct gpib_board *board, int enable)
{
struct agilent_82350b_priv *priv = board->private_data;
tms9914_remote_enable(board, &priv->tms9914_priv, enable);
}
-int agilent_82350b_enable_eos(gpib_board_t *board, uint8_t eos_byte, int compare_8_bits)
-
+static int agilent_82350b_enable_eos(struct gpib_board *board, uint8_t eos_byte,
+ int compare_8_bits)
{
struct agilent_82350b_priv *priv = board->private_data;
return tms9914_enable_eos(board, &priv->tms9914_priv, eos_byte, compare_8_bits);
}
-void agilent_82350b_disable_eos(gpib_board_t *board)
-
+static void agilent_82350b_disable_eos(struct gpib_board *board)
{
struct agilent_82350b_priv *priv = board->private_data;
tms9914_disable_eos(board, &priv->tms9914_priv);
}
-unsigned int agilent_82350b_update_status(gpib_board_t *board, unsigned int clear_mask)
-
+static unsigned int agilent_82350b_update_status(struct gpib_board *board,
+ unsigned int clear_mask)
{
struct agilent_82350b_priv *priv = board->private_data;
return tms9914_update_status(board, &priv->tms9914_priv, clear_mask);
}
-int agilent_82350b_primary_address(gpib_board_t *board, unsigned int address)
-
+static int agilent_82350b_primary_address(struct gpib_board *board,
+ unsigned int address)
{
struct agilent_82350b_priv *priv = board->private_data;
return tms9914_primary_address(board, &priv->tms9914_priv, address);
}
-int agilent_82350b_secondary_address(gpib_board_t *board, unsigned int address, int enable)
-
+static int agilent_82350b_secondary_address(struct gpib_board *board,
+ unsigned int address, int enable)
{
struct agilent_82350b_priv *priv = board->private_data;
return tms9914_secondary_address(board, &priv->tms9914_priv, address, enable);
}
-int agilent_82350b_parallel_poll(gpib_board_t *board, uint8_t *result)
-
+static int agilent_82350b_parallel_poll(struct gpib_board *board, uint8_t *result)
{
struct agilent_82350b_priv *priv = board->private_data;
return tms9914_parallel_poll(board, &priv->tms9914_priv, result);
}
-void agilent_82350b_parallel_poll_configure(gpib_board_t *board, uint8_t config)
-
+static void agilent_82350b_parallel_poll_configure(struct gpib_board *board,
+ uint8_t config)
{
struct agilent_82350b_priv *priv = board->private_data;
tms9914_parallel_poll_configure(board, &priv->tms9914_priv, config);
}
-void agilent_82350b_parallel_poll_response(gpib_board_t *board, int ist)
-
+static void agilent_82350b_parallel_poll_response(struct gpib_board *board, int ist)
{
struct agilent_82350b_priv *priv = board->private_data;
tms9914_parallel_poll_response(board, &priv->tms9914_priv, ist);
}
-void agilent_82350b_serial_poll_response(gpib_board_t *board, uint8_t status)
-
+static void agilent_82350b_serial_poll_response(struct gpib_board *board, uint8_t status)
{
struct agilent_82350b_priv *priv = board->private_data;
tms9914_serial_poll_response(board, &priv->tms9914_priv, status);
}
-uint8_t agilent_82350b_serial_poll_status(gpib_board_t *board)
-
+static uint8_t agilent_82350b_serial_poll_status(struct gpib_board *board)
{
struct agilent_82350b_priv *priv = board->private_data;
return tms9914_serial_poll_status(board, &priv->tms9914_priv);
}
-int agilent_82350b_line_status(const gpib_board_t *board)
-
+static int agilent_82350b_line_status(const struct gpib_board *board)
{
struct agilent_82350b_priv *priv = board->private_data;
return tms9914_line_status(board, &priv->tms9914_priv);
}
-unsigned int agilent_82350b_t1_delay(gpib_board_t *board, unsigned int nanosec)
-
+static int agilent_82350b_t1_delay(struct gpib_board *board, unsigned int nanosec)
{
struct agilent_82350b_priv *a_priv = board->private_data;
static const int nanosec_per_clock = 30;
@@ -507,16 +470,14 @@ unsigned int agilent_82350b_t1_delay(gpib_board_t *board, unsigned int nanosec)
return value * nanosec_per_clock;
}
-void agilent_82350b_return_to_local(gpib_board_t *board)
-
+static void agilent_82350b_return_to_local(struct gpib_board *board)
{
struct agilent_82350b_priv *priv = board->private_data;
tms9914_return_to_local(board, &priv->tms9914_priv);
}
-int agilent_82350b_allocate_private(gpib_board_t *board)
-
+static int agilent_82350b_allocate_private(struct gpib_board *board)
{
board->private_data = kzalloc(sizeof(struct agilent_82350b_priv), GFP_KERNEL);
if (!board->private_data)
@@ -524,15 +485,14 @@ int agilent_82350b_allocate_private(gpib_board_t *board)
return 0;
}
-void agilent_82350b_free_private(gpib_board_t *board)
-
+static void agilent_82350b_free_private(struct gpib_board *board)
{
kfree(board->private_data);
board->private_data = NULL;
}
-static int init_82350a_hardware(gpib_board_t *board, const gpib_board_config_t *config)
-
+static int init_82350a_hardware(struct gpib_board *board,
+ const gpib_board_config_t *config)
{
struct agilent_82350b_priv *a_priv = board->private_data;
static const unsigned int firmware_length = 5302;
@@ -557,11 +517,10 @@ static int init_82350a_hardware(gpib_board_t *board, const gpib_board_config_t *
return 0;
// need to programme borg
if (!config->init_data || config->init_data_length != firmware_length) {
- dev_err(board->gpib_dev, "%s: the 82350A board requires firmware after powering on.\n",
- driver_name);
+ dev_err(board->gpib_dev, "the 82350A board requires firmware after powering on.\n");
return -EIO;
}
- dev_info(board->gpib_dev, "%s: Loading firmware...\n", driver_name);
+ dev_dbg(board->gpib_dev, "Loading firmware...\n");
// tickle the borg
writel(plx_cntrl_static_bits | PLX9050_USER3_DATA_BIT,
@@ -580,7 +539,7 @@ static int init_82350a_hardware(gpib_board_t *board, const gpib_board_config_t *
usleep_range(10, 20);
}
if (j == timeout) {
- dev_err(board->gpib_dev, "%s: timed out loading firmware.\n", driver_name);
+ dev_err(board->gpib_dev, "timed out loading firmware.\n");
return -ETIMEDOUT;
}
writeb(firmware_data[i], a_priv->gpib_base + CONFIG_DATA_REG);
@@ -591,15 +550,14 @@ static int init_82350a_hardware(gpib_board_t *board, const gpib_board_config_t *
usleep_range(10, 20);
}
if (j == timeout) {
- dev_err(board->gpib_dev, "%s: timed out waiting for firmware load to complete.\n",
- driver_name);
+ dev_err(board->gpib_dev, "timed out waiting for firmware load to complete.\n");
return -ETIMEDOUT;
}
- dev_info(board->gpib_dev, "%s: ...done.\n", driver_name);
+ dev_dbg(board->gpib_dev, " ...done.\n");
return 0;
}
-static int test_sram(gpib_board_t *board)
+static int test_sram(struct gpib_board *board)
{
struct agilent_82350b_priv *a_priv = board->private_data;
@@ -617,19 +575,19 @@ static int test_sram(gpib_board_t *board)
unsigned int read_value = readb(a_priv->sram_base + i);
if ((i & byte_mask) != read_value) {
- dev_err(board->gpib_dev, "%s: SRAM test failed at %d wanted %d got %d\n",
- driver_name, i, (i & byte_mask), read_value);
+ dev_err(board->gpib_dev, "SRAM test failed at %d wanted %d got %d\n",
+ i, (i & byte_mask), read_value);
return -EIO;
}
if (need_resched())
schedule();
}
- dev_info(board->gpib_dev, "%s: SRAM test passed 0x%x bytes checked\n",
- driver_name, sram_length);
+ dev_dbg(board->gpib_dev, "SRAM test passed 0x%x bytes checked\n", sram_length);
return 0;
}
-static int agilent_82350b_generic_attach(gpib_board_t *board, const gpib_board_config_t *config,
+static int agilent_82350b_generic_attach(struct gpib_board *board,
+ const gpib_board_config_t *config,
int use_fifos)
{
@@ -653,14 +611,14 @@ static int agilent_82350b_generic_attach(gpib_board_t *board, const gpib_board_c
PCI_DEVICE_ID_82350B, NULL);
if (a_priv->pci_device) {
a_priv->model = MODEL_82350B;
- dev_info(board->gpib_dev, "%s: Agilent 82350B board found\n", driver_name);
+ dev_dbg(board->gpib_dev, "Agilent 82350B board found\n");
} else {
a_priv->pci_device = gpib_pci_get_device(config, PCI_VENDOR_ID_AGILENT,
PCI_DEVICE_ID_82351A, NULL);
if (a_priv->pci_device) {
a_priv->model = MODEL_82351A;
- dev_info(board->gpib_dev, "%s: Agilent 82351B board found\n", driver_name);
+ dev_dbg(board->gpib_dev, "Agilent 82351B board found\n");
} else {
a_priv->pci_device = gpib_pci_get_subsys(config, PCI_VENDOR_ID_PLX,
@@ -670,46 +628,40 @@ static int agilent_82350b_generic_attach(gpib_board_t *board, const gpib_board_c
a_priv->pci_device);
if (a_priv->pci_device) {
a_priv->model = MODEL_82350A;
- dev_info(board->gpib_dev, "%s: HP/Agilent 82350A board found\n",
- driver_name);
+ dev_dbg(board->gpib_dev, "HP/Agilent 82350A board found\n");
} else {
- dev_err(board->gpib_dev, "%s: no 82350/82351 board found\n",
- driver_name);
+ dev_err(board->gpib_dev, "no 82350/82351 board found\n");
return -ENODEV;
}
}
}
if (pci_enable_device(a_priv->pci_device)) {
- dev_err(board->gpib_dev, "%s: error enabling pci device\n", driver_name);
+ dev_err(board->gpib_dev, "error enabling pci device\n");
return -EIO;
}
- if (pci_request_regions(a_priv->pci_device, driver_name))
- return -EIO;
+ if (pci_request_regions(a_priv->pci_device, DRV_NAME))
+ return -ENOMEM;
switch (a_priv->model) {
case MODEL_82350A:
a_priv->plx_base = ioremap(pci_resource_start(a_priv->pci_device, PLX_MEM_REGION),
pci_resource_len(a_priv->pci_device, PLX_MEM_REGION));
- dev_dbg(board->gpib_dev, "%s: plx base address remapped to 0x%p\n",
- driver_name, a_priv->plx_base);
+ dev_dbg(board->gpib_dev, "plx base address remapped to 0x%p\n", a_priv->plx_base);
a_priv->gpib_base = ioremap(pci_resource_start(a_priv->pci_device,
GPIB_82350A_REGION),
pci_resource_len(a_priv->pci_device,
GPIB_82350A_REGION));
- dev_dbg(board->gpib_dev, "%s: gpib base address remapped to 0x%p\n",
- driver_name, a_priv->gpib_base);
+ dev_dbg(board->gpib_dev, "chip base address remapped to 0x%p\n", a_priv->gpib_base);
tms_priv->mmiobase = a_priv->gpib_base + TMS9914_BASE_REG;
a_priv->sram_base = ioremap(pci_resource_start(a_priv->pci_device,
SRAM_82350A_REGION),
pci_resource_len(a_priv->pci_device,
SRAM_82350A_REGION));
- dev_dbg(board->gpib_dev, "%s: sram base address remapped to 0x%p\n",
- driver_name, a_priv->sram_base);
+ dev_dbg(board->gpib_dev, "sram base address remapped to 0x%p\n", a_priv->sram_base);
a_priv->borg_base = ioremap(pci_resource_start(a_priv->pci_device,
BORG_82350A_REGION),
pci_resource_len(a_priv->pci_device,
BORG_82350A_REGION));
- dev_dbg(board->gpib_dev, "%s: borg base address remapped to 0x%p\n",
- driver_name, a_priv->borg_base);
+ dev_dbg(board->gpib_dev, "borg base address remapped to 0x%p\n", a_priv->borg_base);
retval = init_82350a_hardware(board, config);
if (retval < 0)
@@ -719,21 +671,18 @@ static int agilent_82350b_generic_attach(gpib_board_t *board, const gpib_board_c
case MODEL_82351A:
a_priv->gpib_base = ioremap(pci_resource_start(a_priv->pci_device, GPIB_REGION),
pci_resource_len(a_priv->pci_device, GPIB_REGION));
- dev_dbg(board->gpib_dev, "%s: gpib base address remapped to 0x%p\n",
- driver_name, a_priv->gpib_base);
+ dev_dbg(board->gpib_dev, "chip base address remapped to 0x%p\n", a_priv->gpib_base);
tms_priv->mmiobase = a_priv->gpib_base + TMS9914_BASE_REG;
a_priv->sram_base = ioremap(pci_resource_start(a_priv->pci_device, SRAM_REGION),
pci_resource_len(a_priv->pci_device, SRAM_REGION));
- dev_dbg(board->gpib_dev, "%s: sram base address remapped to 0x%p\n",
- driver_name, a_priv->sram_base);
+ dev_dbg(board->gpib_dev, "sram base address remapped to 0x%p\n", a_priv->sram_base);
a_priv->misc_base = ioremap(pci_resource_start(a_priv->pci_device, MISC_REGION),
pci_resource_len(a_priv->pci_device, MISC_REGION));
- dev_dbg(board->gpib_dev, "%s: misc base address remapped to 0x%p\n",
- driver_name, a_priv->misc_base);
+ dev_dbg(board->gpib_dev, "misc base address remapped to 0x%p\n", a_priv->misc_base);
break;
default:
- pr_err("%s: invalid board\n", driver_name);
- return -1;
+ dev_err(board->gpib_dev, "invalid board\n");
+ return -ENODEV;
}
retval = test_sram(board);
@@ -741,12 +690,12 @@ static int agilent_82350b_generic_attach(gpib_board_t *board, const gpib_board_c
return retval;
if (request_irq(a_priv->pci_device->irq, agilent_82350b_interrupt,
- IRQF_SHARED, driver_name, board)) {
- pr_err("%s: can't request IRQ %d\n", driver_name, a_priv->pci_device->irq);
+ IRQF_SHARED, DRV_NAME, board)) {
+ dev_err(board->gpib_dev, "failed to obtain irq %d\n", a_priv->pci_device->irq);
return -EIO;
}
a_priv->irq = a_priv->pci_device->irq;
- dev_dbg(board->gpib_dev, "%s: IRQ %d\n", driver_name, a_priv->irq);
+ dev_dbg(board->gpib_dev, " IRQ %d\n", a_priv->irq);
writeb(0, a_priv->gpib_base + SRAM_ACCESS_CONTROL_REG);
a_priv->card_mode_bits = ENABLE_PCI_IRQ_BIT;
@@ -780,20 +729,19 @@ static int agilent_82350b_generic_attach(gpib_board_t *board, const gpib_board_c
return 0;
}
-int agilent_82350b_unaccel_attach(gpib_board_t *board, const gpib_board_config_t *config)
-
+static int agilent_82350b_unaccel_attach(struct gpib_board *board,
+ const gpib_board_config_t *config)
{
return agilent_82350b_generic_attach(board, config, 0);
}
-int agilent_82350b_accel_attach(gpib_board_t *board, const gpib_board_config_t *config)
-
+static int agilent_82350b_accel_attach(struct gpib_board *board,
+ const gpib_board_config_t *config)
{
return agilent_82350b_generic_attach(board, config, 1);
}
-void agilent_82350b_detach(gpib_board_t *board)
-
+static void agilent_82350b_detach(struct gpib_board *board)
{
struct agilent_82350b_priv *a_priv = board->private_data;
struct tms9914_priv *tms_priv;
@@ -848,6 +796,7 @@ static gpib_interface_t agilent_82350b_unaccel_interface = {
.primary_address = agilent_82350b_primary_address,
.secondary_address = agilent_82350b_secondary_address,
.serial_poll_response = agilent_82350b_serial_poll_response,
+ .serial_poll_status = agilent_82350b_serial_poll_status,
.t1_delay = agilent_82350b_t1_delay,
.return_to_local = agilent_82350b_return_to_local,
};
@@ -875,6 +824,7 @@ static gpib_interface_t agilent_82350b_interface = {
.primary_address = agilent_82350b_primary_address,
.secondary_address = agilent_82350b_secondary_address,
.serial_poll_response = agilent_82350b_serial_poll_response,
+ .serial_poll_status = agilent_82350b_serial_poll_status,
.t1_delay = agilent_82350b_t1_delay,
.return_to_local = agilent_82350b_return_to_local,
};
@@ -895,31 +845,30 @@ static const struct pci_device_id agilent_82350b_pci_table[] = {
MODULE_DEVICE_TABLE(pci, agilent_82350b_pci_table);
static struct pci_driver agilent_82350b_pci_driver = {
- .name = "agilent_82350b",
+ .name = DRV_NAME,
.id_table = agilent_82350b_pci_table,
.probe = &agilent_82350b_pci_probe
};
static int __init agilent_82350b_init_module(void)
-
{
int result;
result = pci_register_driver(&agilent_82350b_pci_driver);
if (result) {
- pr_err("agilent_82350b: pci_register_driver failed: error = %d\n", result);
+ pr_err("pci_register_driver failed: error = %d\n", result);
return result;
}
result = gpib_register_driver(&agilent_82350b_unaccel_interface, THIS_MODULE);
if (result) {
- pr_err("agilent_82350b: gpib_register_driver failed: error = %d\n", result);
+ pr_err("gpib_register_driver failed: error = %d\n", result);
goto err_unaccel;
}
result = gpib_register_driver(&agilent_82350b_interface, THIS_MODULE);
if (result) {
- pr_err("agilent_82350b: gpib_register_driver failed: error = %d\n", result);
+ pr_err("gpib_register_driver failed: error = %d\n", result);
goto err_interface;
}
@@ -934,7 +883,6 @@ err_unaccel:
}
static void __exit agilent_82350b_exit_module(void)
-
{
gpib_unregister_driver(&agilent_82350b_interface);
gpib_unregister_driver(&agilent_82350b_unaccel_interface);
diff --git a/drivers/staging/gpib/agilent_82350b/agilent_82350b.h b/drivers/staging/gpib/agilent_82350b/agilent_82350b.h
index 32b322113c10..1573230c619d 100644
--- a/drivers/staging/gpib/agilent_82350b/agilent_82350b.h
+++ b/drivers/staging/gpib/agilent_82350b/agilent_82350b.h
@@ -57,56 +57,6 @@ struct agilent_82350b_priv {
bool using_fifos;
};
-// driver name
-extern const char *driver_name;
-
-// init functions
-
-int agilent_82350b_unaccel_attach(gpib_board_t *board, const gpib_board_config_t *config);
-int agilent_82350b_accel_attach(gpib_board_t *board, const gpib_board_config_t *config);
-
-// interface functions
-int agilent_82350b_accel_read(gpib_board_t *board, uint8_t *buffer, size_t length, int *end,
- size_t *bytes_read);
-int agilent_82350b_accel_write(gpib_board_t *board, uint8_t *buffer, size_t length, int send_eoi,
- size_t *bytes_written);
-int agilent_82350b_read(gpib_board_t *board, uint8_t *buffer, size_t length, int *end,
- size_t *bytes_read);
-int agilent_82350b_write(gpib_board_t *board, uint8_t *buffer, size_t length, int send_eoi,
- size_t *bytes_written);
-int agilent_82350b_command(gpib_board_t *board, uint8_t *buffer, size_t length,
- size_t *bytes_written);
-int agilent_82350b_take_control(gpib_board_t *board, int synchronous);
-int agilent_82350b_go_to_standby(gpib_board_t *board);
-void agilent_82350b_request_system_control(gpib_board_t *board, int request_control);
-void agilent_82350b_interface_clear(gpib_board_t *board, int assert);
-void agilent_82350b_remote_enable(gpib_board_t *board, int enable);
-int agilent_82350b_enable_eos(gpib_board_t *board, uint8_t eos_byte, int
- compare_8_bits);
-void agilent_82350b_disable_eos(gpib_board_t *board);
-unsigned int agilent_82350b_update_status(gpib_board_t *board, unsigned int clear_mask);
-int agilent_82350b_primary_address(gpib_board_t *board, unsigned int address);
-int agilent_82350b_secondary_address(gpib_board_t *board, unsigned int address, int
- enable);
-int agilent_82350b_parallel_poll(gpib_board_t *board, uint8_t *result);
-void agilent_82350b_parallel_poll_configure(gpib_board_t *board, uint8_t config);
-void agilent_82350b_parallel_poll_response(gpib_board_t *board, int ist);
-void agilent_82350b_serial_poll_response(gpib_board_t *board, uint8_t status);
-void agilent_82350b_return_to_local(gpib_board_t *board);
-uint8_t agilent_82350b_serial_poll_status(gpib_board_t *board);
-int agilent_82350b_line_status(const gpib_board_t *board);
-unsigned int agilent_82350b_t1_delay(gpib_board_t *board, unsigned int nanosec);
-
-// interrupt service routines
-irqreturn_t agilent_82350b_interrupt(int irq, void *arg);
-
-// utility functions
-int agilent_82350b_allocate_private(gpib_board_t *board);
-void agilent_82350b_free_private(gpib_board_t *board);
-unsigned short read_and_clear_event_status(gpib_board_t *board);
-int read_transfer_counter(struct agilent_82350b_priv *a_priv);
-void set_transfer_counter(struct agilent_82350b_priv *a_priv, int count);
-
//registers
enum agilent_82350b_gpib_registers
diff --git a/drivers/staging/gpib/agilent_82357a/agilent_82357a.c b/drivers/staging/gpib/agilent_82357a/agilent_82357a.c
index 69f0e490d401..67bf125645c0 100644
--- a/drivers/staging/gpib/agilent_82357a/agilent_82357a.c
+++ b/drivers/staging/gpib/agilent_82357a/agilent_82357a.c
@@ -7,6 +7,10 @@
#define _GNU_SOURCE
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#define dev_fmt pr_fmt
+#define DRV_NAME KBUILD_MODNAME
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
@@ -21,9 +25,10 @@ MODULE_DESCRIPTION("GPIB driver for Agilent 82357A/B usb adapters");
static struct usb_interface *agilent_82357a_driver_interfaces[MAX_NUM_82357A_INTERFACES];
static DEFINE_MUTEX(agilent_82357a_hotplug_lock); // protect board insertion and removal
-static unsigned int agilent_82357a_update_status(gpib_board_t *board, unsigned int clear_mask);
+static unsigned int agilent_82357a_update_status(struct gpib_board *board,
+ unsigned int clear_mask);
-static int agilent_82357a_take_control_internal(gpib_board_t *board, int synchronous);
+static int agilent_82357a_take_control_internal(struct gpib_board *board, int synchronous);
static void agilent_82357a_bulk_complete(struct urb *urb)
{
@@ -79,14 +84,12 @@ static int agilent_82357a_send_bulk_msg(struct agilent_82357a_priv *a_priv, void
retval = usb_submit_urb(a_priv->bulk_urb, GFP_KERNEL);
if (retval) {
- dev_err(&usb_dev->dev, "%s: failed to submit bulk out urb, retval=%i\n",
- __func__, retval);
+ dev_err(&usb_dev->dev, "failed to submit bulk out urb, retval=%i\n", retval);
mutex_unlock(&a_priv->bulk_alloc_lock);
goto cleanup;
}
mutex_unlock(&a_priv->bulk_alloc_lock);
if (down_interruptible(&context->complete)) {
- dev_err(&usb_dev->dev, "%s: interrupted\n", __func__);
retval = -ERESTARTSYS;
goto cleanup;
}
@@ -149,14 +152,12 @@ static int agilent_82357a_receive_bulk_msg(struct agilent_82357a_priv *a_priv, v
retval = usb_submit_urb(a_priv->bulk_urb, GFP_KERNEL);
if (retval) {
- dev_err(&usb_dev->dev, "%s: failed to submit bulk out urb, retval=%i\n",
- __func__, retval);
+ dev_err(&usb_dev->dev, "failed to submit bulk in urb, retval=%i\n", retval);
mutex_unlock(&a_priv->bulk_alloc_lock);
goto cleanup;
}
mutex_unlock(&a_priv->bulk_alloc_lock);
if (down_interruptible(&context->complete)) {
- dev_err(&usb_dev->dev, "%s: interrupted\n", __func__);
retval = -ERESTARTSYS;
goto cleanup;
}
@@ -205,7 +206,6 @@ static int agilent_82357a_receive_control_msg(struct agilent_82357a_priv *a_priv
static void agilent_82357a_dump_raw_block(const u8 *raw_data, int length)
{
- pr_info("hex block dump\n");
print_hex_dump(KERN_INFO, "", DUMP_PREFIX_NONE, 8, 1, raw_data, length, true);
}
@@ -225,7 +225,7 @@ static int agilent_82357a_write_registers(struct agilent_82357a_priv *a_priv,
static const int max_writes = 31;
if (num_writes > max_writes) {
- dev_err(&usb_dev->dev, "%s: bug! num_writes=%i too large\n", __func__, num_writes);
+ dev_err(&usb_dev->dev, "bug! num_writes=%i too large\n", num_writes);
return -EIO;
}
out_data_length = num_writes * bytes_per_write + header_length;
@@ -239,8 +239,7 @@ static int agilent_82357a_write_registers(struct agilent_82357a_priv *a_priv,
out_data[i++] = writes[j].address;
out_data[i++] = writes[j].value;
}
- if (i > out_data_length)
- dev_err(&usb_dev->dev, "%s: bug! buffer overrun\n", __func__);
+
retval = mutex_lock_interruptible(&a_priv->bulk_transfer_lock);
if (retval) {
kfree(out_data);
@@ -249,8 +248,8 @@ static int agilent_82357a_write_registers(struct agilent_82357a_priv *a_priv,
retval = agilent_82357a_send_bulk_msg(a_priv, out_data, i, &bytes_written, 1000);
kfree(out_data);
if (retval) {
- dev_err(&usb_dev->dev, "%s: agilent_82357a_send_bulk_msg returned %i, bytes_written=%i, i=%i\n",
- __func__, retval, bytes_written, i);
+ dev_err(&usb_dev->dev, "send_bulk_msg returned %i, bytes_written=%i, i=%i\n",
+ retval, bytes_written, i);
mutex_unlock(&a_priv->bulk_transfer_lock);
return retval;
}
@@ -265,20 +264,19 @@ static int agilent_82357a_write_registers(struct agilent_82357a_priv *a_priv,
mutex_unlock(&a_priv->bulk_transfer_lock);
if (retval) {
- dev_err(&usb_dev->dev, "%s: agilent_82357a_receive_bulk_msg returned %i, bytes_read=%i\n",
- __func__, retval, bytes_read);
+ dev_err(&usb_dev->dev, "receive_bulk_msg returned %i, bytes_read=%i\n",
+ retval, bytes_read);
agilent_82357a_dump_raw_block(in_data, bytes_read);
kfree(in_data);
return -EIO;
}
if (in_data[0] != (0xff & ~DATA_PIPE_CMD_WR_REGS)) {
- dev_err(&usb_dev->dev, "%s: error, bulk command=0x%x != ~DATA_PIPE_CMD_WR_REGS\n",
- __func__, in_data[0]);
+ dev_err(&usb_dev->dev, "bulk command=0x%x != ~DATA_PIPE_CMD_WR_REGS\n", in_data[0]);
return -EIO;
}
if (in_data[1]) {
- dev_err(&usb_dev->dev, "%s: nonzero error code 0x%x in DATA_PIPE_CMD_WR_REGS response\n",
- __func__, in_data[1]);
+ dev_err(&usb_dev->dev, "nonzero error code 0x%x in DATA_PIPE_CMD_WR_REGS response\n",
+ in_data[1]);
return -EIO;
}
kfree(in_data);
@@ -299,9 +297,10 @@ static int agilent_82357a_read_registers(struct agilent_82357a_priv *a_priv,
static const int header_length = 2;
static const int max_reads = 62;
- if (num_reads > max_reads)
- dev_err(&usb_dev->dev, "%s: bug! num_reads=%i too large\n", __func__, num_reads);
-
+ if (num_reads > max_reads) {
+ dev_err(&usb_dev->dev, "bug! num_reads=%i too large\n", num_reads);
+ return -EIO;
+ }
out_data_length = num_reads + header_length;
out_data = kmalloc(out_data_length, GFP_KERNEL);
if (!out_data)
@@ -311,8 +310,7 @@ static int agilent_82357a_read_registers(struct agilent_82357a_priv *a_priv,
out_data[i++] = num_reads;
for (j = 0; j < num_reads; j++)
out_data[i++] = reads[j].address;
- if (i > out_data_length)
- dev_err(&usb_dev->dev, "%s: bug! buffer overrun\n", __func__);
+
if (blocking) {
retval = mutex_lock_interruptible(&a_priv->bulk_transfer_lock);
if (retval) {
@@ -329,8 +327,8 @@ static int agilent_82357a_read_registers(struct agilent_82357a_priv *a_priv,
retval = agilent_82357a_send_bulk_msg(a_priv, out_data, i, &bytes_written, 1000);
kfree(out_data);
if (retval) {
- dev_err(&usb_dev->dev, "%s: agilent_82357a_send_bulk_msg returned %i, bytes_written=%i, i=%i\n",
- __func__, retval, bytes_written, i);
+ dev_err(&usb_dev->dev, "send_bulk_msg returned %i, bytes_written=%i, i=%i\n",
+ retval, bytes_written, i);
mutex_unlock(&a_priv->bulk_transfer_lock);
return retval;
}
@@ -345,21 +343,20 @@ static int agilent_82357a_read_registers(struct agilent_82357a_priv *a_priv,
mutex_unlock(&a_priv->bulk_transfer_lock);
if (retval) {
- dev_err(&usb_dev->dev, "%s: agilent_82357a_receive_bulk_msg returned %i, bytes_read=%i\n",
- __func__, retval, bytes_read);
+ dev_err(&usb_dev->dev, "receive_bulk_msg returned %i, bytes_read=%i\n",
+ retval, bytes_read);
agilent_82357a_dump_raw_block(in_data, bytes_read);
kfree(in_data);
return -EIO;
}
i = 0;
if (in_data[i++] != (0xff & ~DATA_PIPE_CMD_RD_REGS)) {
- dev_err(&usb_dev->dev, "%s: error, bulk command=0x%x != ~DATA_PIPE_CMD_RD_REGS\n",
- __func__, in_data[0]);
+ dev_err(&usb_dev->dev, "bulk command=0x%x != ~DATA_PIPE_CMD_RD_REGS\n", in_data[0]);
return -EIO;
}
if (in_data[i++]) {
- dev_err(&usb_dev->dev, "%s: nonzero error code 0x%x in DATA_PIPE_CMD_RD_REGS response\n",
- __func__, in_data[1]);
+ dev_err(&usb_dev->dev, "nonzero error code 0x%x in DATA_PIPE_CMD_RD_REGS response\n",
+ in_data[1]);
return -EIO;
}
for (j = 0; j < num_reads; j++)
@@ -390,14 +387,13 @@ static int agilent_82357a_abort(struct agilent_82357a_priv *a_priv, int flush)
wIndex, status_data,
status_data_len, 100);
if (receive_control_retval < 0) {
- dev_err(&usb_dev->dev, "%s: agilent_82357a_receive_control_msg() returned %i\n",
- __func__, receive_control_retval);
+ dev_err(&usb_dev->dev, "82357a_receive_control_msg() returned %i\n",
+ receive_control_retval);
retval = -EIO;
goto cleanup;
}
if (status_data[0] != (~XFER_ABORT & 0xff)) {
- dev_err(&usb_dev->dev, "%s: error, major code=0x%x != ~XFER_ABORT\n",
- __func__, status_data[0]);
+ dev_err(&usb_dev->dev, "major code=0x%x != ~XFER_ABORT\n", status_data[0]);
retval = -EIO;
goto cleanup;
}
@@ -413,8 +409,7 @@ static int agilent_82357a_abort(struct agilent_82357a_priv *a_priv, int flush)
fallthrough;
case UGP_ERR_FLUSHING_ALREADY:
default:
- dev_err(&usb_dev->dev, "%s: abort returned error code=0x%x\n",
- __func__, status_data[1]);
+ dev_err(&usb_dev->dev, "abort returned error code=0x%x\n", status_data[1]);
retval = -EIO;
break;
}
@@ -425,15 +420,15 @@ cleanup:
}
// interface functions
-int agilent_82357a_command(gpib_board_t *board, uint8_t *buffer, size_t length,
+int agilent_82357a_command(struct gpib_board *board, uint8_t *buffer, size_t length,
size_t *bytes_written);
-static int agilent_82357a_read(gpib_board_t *board, uint8_t *buffer, size_t length, int *end,
+static int agilent_82357a_read(struct gpib_board *board, uint8_t *buffer, size_t length, int *end,
size_t *nbytes)
{
int retval;
struct agilent_82357a_priv *a_priv = board->private_data;
- struct usb_device *usb_dev = interface_to_usbdev(a_priv->bus_interface);
+ struct usb_device *usb_dev;
u8 *out_data, *in_data;
int out_data_length, in_data_length;
int bytes_written, bytes_read;
@@ -444,6 +439,10 @@ static int agilent_82357a_read(gpib_board_t *board, uint8_t *buffer, size_t leng
*nbytes = 0;
*end = 0;
+
+ if (!a_priv->bus_interface)
+ return -ENODEV;
+ usb_dev = interface_to_usbdev(a_priv->bus_interface);
out_data_length = 0x9;
out_data = kmalloc(out_data_length, GFP_KERNEL);
if (!out_data)
@@ -469,8 +468,8 @@ static int agilent_82357a_read(gpib_board_t *board, uint8_t *buffer, size_t leng
retval = agilent_82357a_send_bulk_msg(a_priv, out_data, i, &bytes_written, msec_timeout);
kfree(out_data);
if (retval || bytes_written != i) {
- dev_err(&usb_dev->dev, "%s: agilent_82357a_send_bulk_msg returned %i, bytes_written=%i, i=%i\n",
- __func__, retval, bytes_written, i);
+ dev_err(&usb_dev->dev, "send_bulk_msg returned %i, bytes_written=%i, i=%i\n",
+ retval, bytes_written, i);
mutex_unlock(&a_priv->bulk_transfer_lock);
if (retval < 0)
return retval;
@@ -501,19 +500,19 @@ static int agilent_82357a_read(gpib_board_t *board, uint8_t *buffer, size_t leng
&extra_bytes_read, 100);
bytes_read += extra_bytes_read;
if (extra_bytes_retval) {
- dev_err(&usb_dev->dev, "%s: extra_bytes_retval=%i, bytes_read=%i\n",
- __func__, extra_bytes_retval, bytes_read);
+ dev_err(&usb_dev->dev, "extra_bytes_retval=%i, bytes_read=%i\n",
+ extra_bytes_retval, bytes_read);
agilent_82357a_abort(a_priv, 0);
}
} else if (retval) {
- dev_err(&usb_dev->dev, "%s: agilent_82357a_receive_bulk_msg returned %i, bytes_read=%i\n",
- __func__, retval, bytes_read);
+ dev_err(&usb_dev->dev, "receive_bulk_msg returned %i, bytes_read=%i\n",
+ retval, bytes_read);
agilent_82357a_abort(a_priv, 0);
}
mutex_unlock(&a_priv->bulk_transfer_lock);
if (bytes_read > length + 1) {
bytes_read = length + 1;
- pr_warn("%s: bytes_read > length? truncating", __func__);
+ dev_warn(&usb_dev->dev, "bytes_read > length? truncating");
}
if (bytes_read >= 1) {
@@ -535,12 +534,14 @@ static int agilent_82357a_read(gpib_board_t *board, uint8_t *buffer, size_t leng
return retval;
}
-static ssize_t agilent_82357a_generic_write(gpib_board_t *board, uint8_t *buffer, size_t length,
- int send_commands, int send_eoi, size_t *bytes_written)
+static ssize_t agilent_82357a_generic_write(struct gpib_board *board,
+ uint8_t *buffer, size_t length,
+ int send_commands, int send_eoi,
+ size_t *bytes_written)
{
int retval;
struct agilent_82357a_priv *a_priv = board->private_data;
- struct usb_device *usb_dev = interface_to_usbdev(a_priv->bus_interface);
+ struct usb_device *usb_dev;
u8 *out_data = NULL;
u8 *status_data = NULL;
int out_data_length;
@@ -551,6 +552,10 @@ static ssize_t agilent_82357a_generic_write(gpib_board_t *board, uint8_t *buffer
struct agilent_82357a_register_pairlet read_reg;
*bytes_written = 0;
+ if (!a_priv->bus_interface)
+ return -ENODEV;
+
+ usb_dev = interface_to_usbdev(a_priv->bus_interface);
out_data_length = length + 0x8;
out_data = kmalloc(out_data_length, GFP_KERNEL);
if (!out_data)
@@ -584,8 +589,8 @@ static ssize_t agilent_82357a_generic_write(gpib_board_t *board, uint8_t *buffer
kfree(out_data);
if (retval || raw_bytes_written != i) {
agilent_82357a_abort(a_priv, 0);
- dev_err(&usb_dev->dev, "%s: agilent_82357a_send_bulk_msg returned %i, raw_bytes_written=%i, i=%i\n",
- __func__, retval, raw_bytes_written, i);
+ dev_err(&usb_dev->dev, "send_bulk_msg returned %i, raw_bytes_written=%i, i=%i\n",
+ retval, raw_bytes_written, i);
mutex_unlock(&a_priv->bulk_transfer_lock);
if (retval < 0)
return retval;
@@ -597,7 +602,7 @@ static ssize_t agilent_82357a_generic_write(gpib_board_t *board, uint8_t *buffer
&a_priv->interrupt_flags) ||
test_bit(TIMO_NUM, &board->status));
if (retval) {
- dev_err(&usb_dev->dev, "%s: wait write complete interrupted\n", __func__);
+ dev_dbg(&usb_dev->dev, "wait write complete interrupted\n");
agilent_82357a_abort(a_priv, 0);
mutex_unlock(&a_priv->bulk_transfer_lock);
return -ERESTARTSYS;
@@ -614,8 +619,7 @@ static ssize_t agilent_82357a_generic_write(gpib_board_t *board, uint8_t *buffer
read_reg.address = BSR;
retval = agilent_82357a_read_registers(a_priv, &read_reg, 1, 1);
if (retval) {
- dev_err(&usb_dev->dev, "%s: agilent_82357a_read_registers() returned error\n",
- __func__);
+ dev_err(&usb_dev->dev, "read_registers() returned error\n");
return -ETIMEDOUT;
}
@@ -632,8 +636,7 @@ static ssize_t agilent_82357a_generic_write(gpib_board_t *board, uint8_t *buffer
read_reg.address = ADSR;
retval = agilent_82357a_read_registers(a_priv, &read_reg, 1, 1);
if (retval) {
- dev_err(&usb_dev->dev, "%s: agilent_82357a_read_registers() returned error\n",
- __func__);
+ dev_err(&usb_dev->dev, "read_registers() returned error\n");
return -ETIMEDOUT;
}
adsr = read_reg.value;
@@ -659,8 +662,7 @@ static ssize_t agilent_82357a_generic_write(gpib_board_t *board, uint8_t *buffer
100);
mutex_unlock(&a_priv->bulk_transfer_lock);
if (retval < 0) {
- dev_err(&usb_dev->dev, "%s: agilent_82357a_receive_control_msg() returned %i\n",
- __func__, retval);
+ dev_err(&usb_dev->dev, "receive_control_msg() returned %i\n", retval);
kfree(status_data);
return -EIO;
}
@@ -673,19 +675,19 @@ static ssize_t agilent_82357a_generic_write(gpib_board_t *board, uint8_t *buffer
return 0;
}
-static int agilent_82357a_write(gpib_board_t *board, uint8_t *buffer, size_t length,
- int send_eoi, size_t *bytes_written)
+static int agilent_82357a_write(struct gpib_board *board, uint8_t *buffer,
+ size_t length, int send_eoi, size_t *bytes_written)
{
return agilent_82357a_generic_write(board, buffer, length, 0, send_eoi, bytes_written);
}
-int agilent_82357a_command(gpib_board_t *board, uint8_t *buffer, size_t length,
+int agilent_82357a_command(struct gpib_board *board, uint8_t *buffer, size_t length,
size_t *bytes_written)
{
return agilent_82357a_generic_write(board, buffer, length, 1, 0, bytes_written);
}
-int agilent_82357a_take_control_internal(gpib_board_t *board, int synchronous)
+int agilent_82357a_take_control_internal(struct gpib_board *board, int synchronous)
{
struct agilent_82357a_priv *a_priv = board->private_data;
struct usb_device *usb_dev = interface_to_usbdev(a_priv->bus_interface);
@@ -699,17 +701,20 @@ int agilent_82357a_take_control_internal(gpib_board_t *board, int synchronous)
write.value = AUX_TCA;
retval = agilent_82357a_write_registers(a_priv, &write, 1);
if (retval)
- dev_err(&usb_dev->dev, "%s: agilent_82357a_write_registers() returned error\n",
- __func__);
+ dev_err(&usb_dev->dev, "write_registers() returned error\n");
return retval;
}
-static int agilent_82357a_take_control(gpib_board_t *board, int synchronous)
+static int agilent_82357a_take_control(struct gpib_board *board, int synchronous)
{
+ struct agilent_82357a_priv *a_priv = board->private_data;
const int timeout = 10;
int i;
+ if (!a_priv->bus_interface)
+ return -ENODEV;
+
/* It looks like the 9914 does not handle tcs properly.
* See comment above tms9914_take_control_workaround() in
* drivers/gpib/tms9914/tms9914_aux.c
@@ -730,31 +735,39 @@ static int agilent_82357a_take_control(gpib_board_t *board, int synchronous)
return 0;
}
-static int agilent_82357a_go_to_standby(gpib_board_t *board)
+static int agilent_82357a_go_to_standby(struct gpib_board *board)
{
struct agilent_82357a_priv *a_priv = board->private_data;
- struct usb_device *usb_dev = interface_to_usbdev(a_priv->bus_interface);
+ struct usb_device *usb_dev;
struct agilent_82357a_register_pairlet write;
int retval;
+ if (!a_priv->bus_interface)
+ return -ENODEV;
+
+ usb_dev = interface_to_usbdev(a_priv->bus_interface);
write.address = AUXCR;
write.value = AUX_GTS;
retval = agilent_82357a_write_registers(a_priv, &write, 1);
if (retval)
- dev_err(&usb_dev->dev, "%s: agilent_82357a_write_registers() returned error\n",
- __func__);
+ dev_err(&usb_dev->dev, "write_registers() returned error\n");
return 0;
}
//FIXME should change prototype to return int
-static void agilent_82357a_request_system_control(gpib_board_t *board, int request_control)
+static void agilent_82357a_request_system_control(struct gpib_board *board,
+ int request_control)
{
struct agilent_82357a_priv *a_priv = board->private_data;
- struct usb_device *usb_dev = interface_to_usbdev(a_priv->bus_interface);
+ struct usb_device *usb_dev;
struct agilent_82357a_register_pairlet writes[2];
int retval;
int i = 0;
+ if (!a_priv->bus_interface)
+ return; // -ENODEV;
+
+ usb_dev = interface_to_usbdev(a_priv->bus_interface);
/* 82357B needs bit to be set in 9914 AUXCR register */
writes[i].address = AUXCR;
if (request_control) {
@@ -771,18 +784,21 @@ static void agilent_82357a_request_system_control(gpib_board_t *board, int reque
++i;
retval = agilent_82357a_write_registers(a_priv, writes, i);
if (retval)
- dev_err(&usb_dev->dev, "%s: agilent_82357a_write_registers() returned error\n",
- __func__);
+ dev_err(&usb_dev->dev, "write_registers() returned error\n");
return;// retval;
}
-static void agilent_82357a_interface_clear(gpib_board_t *board, int assert)
+static void agilent_82357a_interface_clear(struct gpib_board *board, int assert)
{
struct agilent_82357a_priv *a_priv = board->private_data;
- struct usb_device *usb_dev = interface_to_usbdev(a_priv->bus_interface);
+ struct usb_device *usb_dev;
struct agilent_82357a_register_pairlet write;
int retval;
+ if (!a_priv->bus_interface)
+ return; // -ENODEV;
+
+ usb_dev = interface_to_usbdev(a_priv->bus_interface);
write.address = AUXCR;
write.value = AUX_SIC;
if (assert) {
@@ -791,56 +807,64 @@ static void agilent_82357a_interface_clear(gpib_board_t *board, int assert)
}
retval = agilent_82357a_write_registers(a_priv, &write, 1);
if (retval)
- dev_err(&usb_dev->dev, "%s: agilent_82357a_write_registers() returned error\n",
- __func__);
+ dev_err(&usb_dev->dev, "write_registers() returned error\n");
}
-static void agilent_82357a_remote_enable(gpib_board_t *board, int enable)
+static void agilent_82357a_remote_enable(struct gpib_board *board, int enable)
{
struct agilent_82357a_priv *a_priv = board->private_data;
- struct usb_device *usb_dev = interface_to_usbdev(a_priv->bus_interface);
+ struct usb_device *usb_dev;
struct agilent_82357a_register_pairlet write;
int retval;
+ if (!a_priv->bus_interface)
+ return; //-ENODEV;
+
+ usb_dev = interface_to_usbdev(a_priv->bus_interface);
write.address = AUXCR;
write.value = AUX_SRE;
if (enable)
write.value |= AUX_CS;
retval = agilent_82357a_write_registers(a_priv, &write, 1);
if (retval)
- dev_err(&usb_dev->dev, "%s: agilent_82357a_write_registers() returned error\n",
- __func__);
+ dev_err(&usb_dev->dev, "write_registers() returned error\n");
a_priv->ren_state = enable;
return;// 0;
}
-static int agilent_82357a_enable_eos(gpib_board_t *board, uint8_t eos_byte, int compare_8_bits)
+static int agilent_82357a_enable_eos(struct gpib_board *board, uint8_t eos_byte,
+ int compare_8_bits)
{
struct agilent_82357a_priv *a_priv = board->private_data;
- if (compare_8_bits == 0) {
- pr_warn("%s: hardware only supports 8-bit EOS compare", __func__);
+ if (!a_priv->bus_interface)
+ return -ENODEV;
+ if (compare_8_bits == 0)
return -EOPNOTSUPP;
- }
+
a_priv->eos_char = eos_byte;
a_priv->eos_mode = REOS | BIN;
return 0;
}
-static void agilent_82357a_disable_eos(gpib_board_t *board)
+static void agilent_82357a_disable_eos(struct gpib_board *board)
{
struct agilent_82357a_priv *a_priv = board->private_data;
a_priv->eos_mode &= ~REOS;
}
-static unsigned int agilent_82357a_update_status(gpib_board_t *board, unsigned int clear_mask)
+static unsigned int agilent_82357a_update_status(struct gpib_board *board,
+ unsigned int clear_mask)
{
struct agilent_82357a_priv *a_priv = board->private_data;
- struct usb_device *usb_dev = interface_to_usbdev(a_priv->bus_interface);
+ struct usb_device *usb_dev;
struct agilent_82357a_register_pairlet address_status, bus_status;
int retval;
+ if (!a_priv->bus_interface)
+ return -ENODEV;
+ usb_dev = interface_to_usbdev(a_priv->bus_interface);
board->status &= ~clear_mask;
if (a_priv->is_cic)
set_bit(CIC_NUM, &board->status);
@@ -850,8 +874,7 @@ static unsigned int agilent_82357a_update_status(gpib_board_t *board, unsigned i
retval = agilent_82357a_read_registers(a_priv, &address_status, 1, 0);
if (retval) {
if (retval != -EAGAIN)
- dev_err(&usb_dev->dev, "%s: agilent_82357a_read_registers() returned error\n",
- __func__);
+ dev_err(&usb_dev->dev, "read_registers() returned error\n");
return board->status;
}
// check for remote/local
@@ -883,8 +906,7 @@ static unsigned int agilent_82357a_update_status(gpib_board_t *board, unsigned i
retval = agilent_82357a_read_registers(a_priv, &bus_status, 1, 0);
if (retval) {
if (retval != -EAGAIN)
- dev_err(&usb_dev->dev, "%s: agilent_82357a_read_registers() returned error\n",
- __func__);
+ dev_err(&usb_dev->dev, "read_registers() returned error\n");
return board->status;
}
if (bus_status.value & BSR_SRQ_BIT)
@@ -895,40 +917,46 @@ static unsigned int agilent_82357a_update_status(gpib_board_t *board, unsigned i
return board->status;
}
-static int agilent_82357a_primary_address(gpib_board_t *board, unsigned int address)
+static int agilent_82357a_primary_address(struct gpib_board *board, unsigned int address)
{
struct agilent_82357a_priv *a_priv = board->private_data;
struct usb_device *usb_dev = interface_to_usbdev(a_priv->bus_interface);
struct agilent_82357a_register_pairlet write;
int retval;
+ if (!a_priv->bus_interface)
+ return -ENODEV;
+ usb_dev = interface_to_usbdev(a_priv->bus_interface);
// put primary address in address0
write.address = ADR;
write.value = address & ADDRESS_MASK;
retval = agilent_82357a_write_registers(a_priv, &write, 1);
if (retval) {
- dev_err(&usb_dev->dev, "%s: agilent_82357a_write_registers() returned error\n",
- __func__);
+ dev_err(&usb_dev->dev, "write_registers() returned error\n");
return retval;
}
return retval;
}
-static int agilent_82357a_secondary_address(gpib_board_t *board, unsigned int address, int enable)
+static int agilent_82357a_secondary_address(struct gpib_board *board,
+ unsigned int address, int enable)
{
if (enable)
- pr_warn("%s: warning: assigning a secondary address not supported\n", __func__);
- return -EOPNOTSUPP;
+ return -EOPNOTSUPP;
+ return 0;
}
-static int agilent_82357a_parallel_poll(gpib_board_t *board, uint8_t *result)
+static int agilent_82357a_parallel_poll(struct gpib_board *board, uint8_t *result)
{
struct agilent_82357a_priv *a_priv = board->private_data;
- struct usb_device *usb_dev = interface_to_usbdev(a_priv->bus_interface);
+ struct usb_device *usb_dev;
struct agilent_82357a_register_pairlet writes[2];
struct agilent_82357a_register_pairlet read;
int retval;
+ if (!a_priv->bus_interface)
+ return -ENODEV;
+ usb_dev = interface_to_usbdev(a_priv->bus_interface);
// execute parallel poll
writes[0].address = AUXCR;
writes[0].value = AUX_CS | AUX_RPP;
@@ -936,16 +964,14 @@ static int agilent_82357a_parallel_poll(gpib_board_t *board, uint8_t *result)
writes[1].value = a_priv->hw_control_bits & ~NOT_PARALLEL_POLL;
retval = agilent_82357a_write_registers(a_priv, writes, 2);
if (retval) {
- dev_err(&usb_dev->dev, "%s: agilent_82357a_write_registers() returned error\n",
- __func__);
+ dev_err(&usb_dev->dev, "write_registers() returned error\n");
return retval;
}
udelay(2); //silly, since usb write will take way longer
read.address = CPTR;
retval = agilent_82357a_read_registers(a_priv, &read, 1, 1);
if (retval) {
- dev_err(&usb_dev->dev, "%s: agilent_82357a_read_registers() returned error\n",
- __func__);
+ dev_err(&usb_dev->dev, "read_registers() returned error\n");
return retval;
}
*result = read.value;
@@ -956,75 +982,76 @@ static int agilent_82357a_parallel_poll(gpib_board_t *board, uint8_t *result)
writes[1].value = AUX_RPP;
retval = agilent_82357a_write_registers(a_priv, writes, 2);
if (retval) {
- dev_err(&usb_dev->dev, "%s: agilent_82357a_write_registers() returned error\n",
- __func__);
+ dev_err(&usb_dev->dev, "write_registers() returned error\n");
return retval;
}
return 0;
}
-static void agilent_82357a_parallel_poll_configure(gpib_board_t *board, uint8_t config)
+static void agilent_82357a_parallel_poll_configure(struct gpib_board *board, uint8_t config)
{
//board can only be system controller
return;// 0;
}
-static void agilent_82357a_parallel_poll_response(gpib_board_t *board, int ist)
+static void agilent_82357a_parallel_poll_response(struct gpib_board *board, int ist)
{
//board can only be system controller
return;// 0;
}
-static void agilent_82357a_serial_poll_response(gpib_board_t *board, uint8_t status)
+static void agilent_82357a_serial_poll_response(struct gpib_board *board, uint8_t status)
{
//board can only be system controller
return;// 0;
}
-static uint8_t agilent_82357a_serial_poll_status(gpib_board_t *board)
+static uint8_t agilent_82357a_serial_poll_status(struct gpib_board *board)
{
//board can only be system controller
return 0;
}
-static void agilent_82357a_return_to_local(gpib_board_t *board)
+static void agilent_82357a_return_to_local(struct gpib_board *board)
{
//board can only be system controller
return;// 0;
}
-static int agilent_82357a_line_status(const gpib_board_t *board)
+static int agilent_82357a_line_status(const struct gpib_board *board)
{
struct agilent_82357a_priv *a_priv = board->private_data;
- struct usb_device *usb_dev = interface_to_usbdev(a_priv->bus_interface);
+ struct usb_device *usb_dev;
struct agilent_82357a_register_pairlet bus_status;
int retval;
- int status = ValidALL;
+ int status = VALID_ALL;
+ if (!a_priv->bus_interface)
+ return -ENODEV;
+ usb_dev = interface_to_usbdev(a_priv->bus_interface);
bus_status.address = BSR;
retval = agilent_82357a_read_registers(a_priv, &bus_status, 1, 0);
if (retval) {
if (retval != -EAGAIN)
- dev_err(&usb_dev->dev, "%s: agilent_82357a_read_registers() returned error\n",
- __func__);
+ dev_err(&usb_dev->dev, "read_registers() returned error\n");
return retval;
}
if (bus_status.value & BSR_REN_BIT)
- status |= BusREN;
+ status |= BUS_REN;
if (bus_status.value & BSR_IFC_BIT)
- status |= BusIFC;
+ status |= BUS_IFC;
if (bus_status.value & BSR_SRQ_BIT)
- status |= BusSRQ;
+ status |= BUS_SRQ;
if (bus_status.value & BSR_EOI_BIT)
- status |= BusEOI;
+ status |= BUS_EOI;
if (bus_status.value & BSR_NRFD_BIT)
- status |= BusNRFD;
+ status |= BUS_NRFD;
if (bus_status.value & BSR_NDAC_BIT)
- status |= BusNDAC;
+ status |= BUS_NDAC;
if (bus_status.value & BSR_DAV_BIT)
- status |= BusDAV;
+ status |= BUS_DAV;
if (bus_status.value & BSR_ATN_BIT)
- status |= BusATN;
+ status |= BUS_ATN;
return status;
}
@@ -1044,25 +1071,27 @@ static unsigned short nanosec_to_fast_talker_bits(unsigned int *nanosec)
return bits;
}
-static unsigned int agilent_82357a_t1_delay(gpib_board_t *board, unsigned int nanosec)
+static int agilent_82357a_t1_delay(struct gpib_board *board, unsigned int nanosec)
{
struct agilent_82357a_priv *a_priv = board->private_data;
- struct usb_device *usb_dev = interface_to_usbdev(a_priv->bus_interface);
+ struct usb_device *usb_dev;
struct agilent_82357a_register_pairlet write;
int retval;
+ if (!a_priv->bus_interface)
+ return -ENODEV;
+ usb_dev = interface_to_usbdev(a_priv->bus_interface);
write.address = FAST_TALKER_T1;
write.value = nanosec_to_fast_talker_bits(&nanosec);
retval = agilent_82357a_write_registers(a_priv, &write, 1);
if (retval)
- dev_err(&usb_dev->dev, "%s: agilent_82357a_write_registers() returned error\n",
- __func__);
+ dev_err(&usb_dev->dev, "write_registers() returned error\n");
return nanosec;
}
static void agilent_82357a_interrupt_complete(struct urb *urb)
{
- gpib_board_t *board = urb->context;
+ struct gpib_board *board = urb->context;
struct agilent_82357a_priv *a_priv = board->private_data;
struct usb_device *usb_dev = interface_to_usbdev(a_priv->bus_interface);
int retval;
@@ -1081,7 +1110,7 @@ static void agilent_82357a_interrupt_complete(struct urb *urb)
default: /* other error, resubmit */
retval = usb_submit_urb(a_priv->interrupt_urb, GFP_ATOMIC);
if (retval)
- dev_err(&usb_dev->dev, "%s: failed to resubmit interrupt urb\n", __func__);
+ dev_err(&usb_dev->dev, "failed to resubmit interrupt urb\n");
return;
}
@@ -1097,10 +1126,10 @@ static void agilent_82357a_interrupt_complete(struct urb *urb)
retval = usb_submit_urb(a_priv->interrupt_urb, GFP_ATOMIC);
if (retval)
- dev_err(&usb_dev->dev, "%s: failed to resubmit interrupt urb\n", __func__);
+ dev_err(&usb_dev->dev, "failed to resubmit interrupt urb\n");
}
-static int agilent_82357a_setup_urbs(gpib_board_t *board)
+static int agilent_82357a_setup_urbs(struct gpib_board *board)
{
struct agilent_82357a_priv *a_priv = board->private_data;
struct usb_device *usb_dev;
@@ -1133,8 +1162,7 @@ static int agilent_82357a_setup_urbs(gpib_board_t *board)
if (retval) {
usb_free_urb(a_priv->interrupt_urb);
a_priv->interrupt_urb = NULL;
- dev_err(&usb_dev->dev, "%s: failed to submit first interrupt urb, retval=%i\n",
- __func__, retval);
+ dev_err(&usb_dev->dev, "failed to submit first interrupt urb, retval=%i\n", retval);
goto setup_exit;
}
mutex_unlock(&a_priv->interrupt_alloc_lock);
@@ -1165,7 +1193,7 @@ static void agilent_82357a_release_urbs(struct agilent_82357a_priv *a_priv)
}
}
-static int agilent_82357a_allocate_private(gpib_board_t *board)
+static int agilent_82357a_allocate_private(struct gpib_board *board)
{
struct agilent_82357a_priv *a_priv;
@@ -1180,112 +1208,82 @@ static int agilent_82357a_allocate_private(gpib_board_t *board)
return 0;
}
-static void agilent_82357a_free_private(gpib_board_t *board)
+static void agilent_82357a_free_private(struct gpib_board *board)
{
kfree(board->private_data);
board->private_data = NULL;
-
}
-static int agilent_82357a_init(gpib_board_t *board)
+#define INIT_NUM_REG_WRITES 18
+static int agilent_82357a_init(struct gpib_board *board)
{
struct agilent_82357a_priv *a_priv = board->private_data;
struct usb_device *usb_dev = interface_to_usbdev(a_priv->bus_interface);
struct agilent_82357a_register_pairlet hw_control;
- struct agilent_82357a_register_pairlet writes[0x20];
+ struct agilent_82357a_register_pairlet writes[INIT_NUM_REG_WRITES];
int retval;
- int i;
unsigned int nanosec;
- i = 0;
- writes[i].address = LED_CONTROL;
- writes[i].value = FAIL_LED_ON;
- ++i;
- writes[i].address = RESET_TO_POWERUP;
- writes[i].value = RESET_SPACEBALL;
- ++i;
- retval = agilent_82357a_write_registers(a_priv, writes, i);
+ writes[0].address = LED_CONTROL;
+ writes[0].value = FAIL_LED_ON;
+ writes[1].address = RESET_TO_POWERUP;
+ writes[1].value = RESET_SPACEBALL;
+ retval = agilent_82357a_write_registers(a_priv, writes, 2);
if (retval) {
- dev_err(&usb_dev->dev, "%s: agilent_82357a_write_registers() returned error\n",
- __func__);
+ dev_err(&usb_dev->dev, "write_registers() returned error\n");
return -EIO;
}
set_current_state(TASK_INTERRUPTIBLE);
if (schedule_timeout(usec_to_jiffies(2000)))
return -ERESTARTSYS;
- i = 0;
- writes[i].address = AUXCR;
- writes[i].value = AUX_NBAF;
- ++i;
- writes[i].address = AUXCR;
- writes[i].value = AUX_HLDE;
- ++i;
- writes[i].address = AUXCR;
- writes[i].value = AUX_TON;
- ++i;
- writes[i].address = AUXCR;
- writes[i].value = AUX_LON;
- ++i;
- writes[i].address = AUXCR;
- writes[i].value = AUX_RSV2;
- ++i;
- writes[i].address = AUXCR;
- writes[i].value = AUX_INVAL;
- ++i;
- writes[i].address = AUXCR;
- writes[i].value = AUX_RPP;
- ++i;
- writes[i].address = AUXCR;
- writes[i].value = AUX_STDL;
- ++i;
- writes[i].address = AUXCR;
- writes[i].value = AUX_VSTDL;
- ++i;
- writes[i].address = FAST_TALKER_T1;
+ writes[0].address = AUXCR;
+ writes[0].value = AUX_NBAF;
+ writes[1].address = AUXCR;
+ writes[1].value = AUX_HLDE;
+ writes[2].address = AUXCR;
+ writes[2].value = AUX_TON;
+ writes[3].address = AUXCR;
+ writes[3].value = AUX_LON;
+ writes[4].address = AUXCR;
+ writes[4].value = AUX_RSV2;
+ writes[5].address = AUXCR;
+ writes[5].value = AUX_INVAL;
+ writes[6].address = AUXCR;
+ writes[6].value = AUX_RPP;
+ writes[7].address = AUXCR;
+ writes[7].value = AUX_STDL;
+ writes[8].address = AUXCR;
+ writes[8].value = AUX_VSTDL;
+ writes[9].address = FAST_TALKER_T1;
nanosec = board->t1_nano_sec;
- writes[i].value = nanosec_to_fast_talker_bits(&nanosec);
+ writes[9].value = nanosec_to_fast_talker_bits(&nanosec);
board->t1_nano_sec = nanosec;
- ++i;
- writes[i].address = ADR;
- writes[i].value = board->pad & ADDRESS_MASK;
- ++i;
- writes[i].address = PPR;
- writes[i].value = 0;
- ++i;
- writes[i].address = SPMR;
- writes[i].value = 0;
- ++i;
- writes[i].address = PROTOCOL_CONTROL;
- writes[i].value = WRITE_COMPLETE_INTERRUPT_EN;
- ++i;
- writes[i].address = IMR0;
- writes[i].value = HR_BOIE | HR_BIIE;
- ++i;
- writes[i].address = IMR1;
- writes[i].value = HR_SRQIE;
- ++i;
+ writes[10].address = ADR;
+ writes[10].value = board->pad & ADDRESS_MASK;
+ writes[11].address = PPR;
+ writes[11].value = 0;
+ writes[12].address = SPMR;
+ writes[12].value = 0;
+ writes[13].address = PROTOCOL_CONTROL;
+ writes[13].value = WRITE_COMPLETE_INTERRUPT_EN;
+ writes[14].address = IMR0;
+ writes[14].value = HR_BOIE | HR_BIIE;
+ writes[15].address = IMR1;
+ writes[15].value = HR_SRQIE;
// turn off reset state
- writes[i].address = AUXCR;
- writes[i].value = AUX_CHIP_RESET;
- ++i;
- writes[i].address = LED_CONTROL;
- writes[i].value = FIRMWARE_LED_CONTROL;
- ++i;
- if (i > ARRAY_SIZE(writes)) {
- dev_err(&usb_dev->dev, "%s: bug! writes[] overflow\n", __func__);
- return -EFAULT;
- }
- retval = agilent_82357a_write_registers(a_priv, writes, i);
+ writes[16].address = AUXCR;
+ writes[16].value = AUX_CHIP_RESET;
+ writes[17].address = LED_CONTROL;
+ writes[17].value = FIRMWARE_LED_CONTROL;
+ retval = agilent_82357a_write_registers(a_priv, writes, INIT_NUM_REG_WRITES);
if (retval) {
- dev_err(&usb_dev->dev, "%s: agilent_82357a_write_registers() returned error\n",
- __func__);
+ dev_err(&usb_dev->dev, "write_registers() returned error\n");
return -EIO;
}
hw_control.address = HW_CONTROL;
retval = agilent_82357a_read_registers(a_priv, &hw_control, 1, 1);
if (retval) {
- dev_err(&usb_dev->dev, "%s: agilent_82357a_read_registers() returned error\n",
- __func__);
+ dev_err(&usb_dev->dev, "read_registers() returned error\n");
return -EIO;
}
a_priv->hw_control_bits = (hw_control.value & ~0x7) | NOT_TI_RESET | NOT_PARALLEL_POLL;
@@ -1307,7 +1305,7 @@ static inline int agilent_82357a_device_match(struct usb_interface *interface,
return 1;
}
-static int agilent_82357a_attach(gpib_board_t *board, const gpib_board_config_t *config)
+static int agilent_82357a_attach(struct gpib_board *board, const gpib_board_config_t *config)
{
int retval;
int i;
@@ -1336,7 +1334,7 @@ static int agilent_82357a_attach(gpib_board_t *board, const gpib_board_config_t
}
if (i == MAX_NUM_82357A_INTERFACES) {
dev_err(board->gpib_dev,
- "No Agilent 82357 gpib adapters found, have you loaded its firmware?\n");
+ "No supported adapters found, have you loaded its firmware?\n");
retval = -ENODEV;
goto attach_fail;
}
@@ -1372,8 +1370,7 @@ static int agilent_82357a_attach(gpib_board_t *board, const gpib_board_config_t
goto attach_fail;
}
- dev_info(&usb_dev->dev,
- "bus %d dev num %d attached to gpib minor %d, agilent usb interface %i\n",
+ dev_info(&usb_dev->dev, "bus %d dev num %d attached to gpib%d, interface %i\n",
usb_dev->bus->busnum, usb_dev->devnum, board->minor, i);
mutex_unlock(&agilent_82357a_hotplug_lock);
return retval;
@@ -1384,49 +1381,36 @@ attach_fail:
return retval;
}
-static int agilent_82357a_go_idle(gpib_board_t *board)
+static int agilent_82357a_go_idle(struct gpib_board *board)
{
struct agilent_82357a_priv *a_priv = board->private_data;
struct usb_device *usb_dev = interface_to_usbdev(a_priv->bus_interface);
struct agilent_82357a_register_pairlet writes[0x20];
int retval;
- int i;
- i = 0;
// turn on tms9914 reset state
- writes[i].address = AUXCR;
- writes[i].value = AUX_CS | AUX_CHIP_RESET;
- ++i;
+ writes[0].address = AUXCR;
+ writes[0].value = AUX_CS | AUX_CHIP_RESET;
a_priv->hw_control_bits &= ~NOT_TI_RESET;
- writes[i].address = HW_CONTROL;
- writes[i].value = a_priv->hw_control_bits;
- ++i;
- writes[i].address = PROTOCOL_CONTROL;
- writes[i].value = 0;
- ++i;
- writes[i].address = IMR0;
- writes[i].value = 0;
- ++i;
- writes[i].address = IMR1;
- writes[i].value = 0;
- ++i;
- writes[i].address = LED_CONTROL;
- writes[i].value = 0;
- ++i;
- if (i > ARRAY_SIZE(writes)) {
- dev_err(&usb_dev->dev, "%s: bug! writes[] overflow\n", __func__);
- return -EFAULT;
- }
- retval = agilent_82357a_write_registers(a_priv, writes, i);
+ writes[1].address = HW_CONTROL;
+ writes[1].value = a_priv->hw_control_bits;
+ writes[2].address = PROTOCOL_CONTROL;
+ writes[2].value = 0;
+ writes[3].address = IMR0;
+ writes[3].value = 0;
+ writes[4].address = IMR1;
+ writes[4].value = 0;
+ writes[5].address = LED_CONTROL;
+ writes[5].value = 0;
+ retval = agilent_82357a_write_registers(a_priv, writes, 6);
if (retval) {
- dev_err(&usb_dev->dev, "%s: agilent_82357a_write_registers() returned error\n",
- __func__);
+ dev_err(&usb_dev->dev, "write_registers() returned error\n");
return -EIO;
}
return 0;
}
-static void agilent_82357a_detach(gpib_board_t *board)
+static void agilent_82357a_detach(struct gpib_board *board)
{
struct agilent_82357a_priv *a_priv;
@@ -1445,7 +1429,6 @@ static void agilent_82357a_detach(gpib_board_t *board)
agilent_82357a_release_urbs(a_priv);
agilent_82357a_free_private(board);
}
- dev_info(board->gpib_dev, "%s: detached\n", __func__);
mutex_unlock(&agilent_82357a_hotplug_lock);
}
@@ -1510,8 +1493,7 @@ static int agilent_82357a_driver_probe(struct usb_interface *interface,
if (i == MAX_NUM_82357A_INTERFACES) {
usb_put_dev(usb_dev);
mutex_unlock(&agilent_82357a_hotplug_lock);
- dev_err(&usb_dev->dev, "%s: out of space in agilent_82357a_driver_interfaces[]\n",
- __func__);
+ dev_err(&usb_dev->dev, "out of space in agilent_82357a_driver_interfaces[]\n");
return -1;
}
path = kmalloc(path_length, GFP_KERNEL);
@@ -1536,7 +1518,7 @@ static void agilent_82357a_driver_disconnect(struct usb_interface *interface)
for (i = 0; i < MAX_NUM_82357A_INTERFACES; ++i) {
if (agilent_82357a_driver_interfaces[i] == interface) {
- gpib_board_t *board = usb_get_intfdata(interface);
+ struct gpib_board *board = usb_get_intfdata(interface);
if (board) {
struct agilent_82357a_priv *a_priv = board->private_data;
@@ -1552,13 +1534,12 @@ static void agilent_82357a_driver_disconnect(struct usb_interface *interface)
mutex_unlock(&a_priv->control_alloc_lock);
}
}
- dev_dbg(&usb_dev->dev, "nulled agilent_82357a_driver_interfaces[%i]\n", i);
agilent_82357a_driver_interfaces[i] = NULL;
break;
}
}
if (i == MAX_NUM_82357A_INTERFACES)
- dev_err(&usb_dev->dev, "unable to find interface in agilent_82357a_driver_interfaces[]? bug?\n");
+ dev_err(&usb_dev->dev, "unable to find interface - bug?\n");
usb_put_dev(usb_dev);
mutex_unlock(&agilent_82357a_hotplug_lock);
@@ -1573,7 +1554,7 @@ static int agilent_82357a_driver_suspend(struct usb_interface *interface, pm_mes
for (i = 0; i < MAX_NUM_82357A_INTERFACES; ++i) {
if (agilent_82357a_driver_interfaces[i] == interface) {
- gpib_board_t *board = usb_get_intfdata(interface);
+ struct gpib_board *board = usb_get_intfdata(interface);
if (board) {
struct agilent_82357a_priv *a_priv = board->private_data;
@@ -1583,18 +1564,18 @@ static int agilent_82357a_driver_suspend(struct usb_interface *interface, pm_mes
agilent_82357a_abort(a_priv, 0);
retval = agilent_82357a_go_idle(board);
if (retval) {
- dev_err(&usb_dev->dev, "%s: failed to go idle, retval=%i\n",
- __func__, retval);
+ dev_err(&usb_dev->dev, "failed to go idle, retval=%i\n",
+ retval);
mutex_unlock(&agilent_82357a_hotplug_lock);
return retval;
}
mutex_lock(&a_priv->interrupt_alloc_lock);
agilent_82357a_cleanup_urbs(a_priv);
mutex_unlock(&a_priv->interrupt_alloc_lock);
- dev_info(&usb_dev->dev,
- "bus %d dev num %d gpib minor %d, agilent usb interface %i suspended\n",
- usb_dev->bus->busnum, usb_dev->devnum,
- board->minor, i);
+ dev_dbg(&usb_dev->dev,
+ "bus %d dev num %d gpib %d, interface %i suspended\n",
+ usb_dev->bus->busnum, usb_dev->devnum,
+ board->minor, i);
}
}
break;
@@ -1609,7 +1590,7 @@ static int agilent_82357a_driver_suspend(struct usb_interface *interface, pm_mes
static int agilent_82357a_driver_resume(struct usb_interface *interface)
{
struct usb_device *usb_dev = interface_to_usbdev(interface);
- gpib_board_t *board;
+ struct gpib_board *board;
int i, retval;
mutex_lock(&agilent_82357a_hotplug_lock);
@@ -1631,8 +1612,8 @@ static int agilent_82357a_driver_resume(struct usb_interface *interface)
mutex_lock(&a_priv->interrupt_alloc_lock);
retval = usb_submit_urb(a_priv->interrupt_urb, GFP_KERNEL);
if (retval) {
- dev_err(&usb_dev->dev, "%s: failed to resubmit interrupt urb, retval=%i\n",
- __func__, retval);
+ dev_err(&usb_dev->dev, "failed to resubmit interrupt urb in resume, retval=%i\n",
+ retval);
mutex_unlock(&a_priv->interrupt_alloc_lock);
mutex_unlock(&agilent_82357a_hotplug_lock);
return retval;
@@ -1655,9 +1636,9 @@ static int agilent_82357a_driver_resume(struct usb_interface *interface)
// assert/unassert REN
agilent_82357a_remote_enable(board, a_priv->ren_state);
- dev_info(&usb_dev->dev,
- "bus %d dev num %d gpib minor %d, agilent usb interface %i resumed\n",
- usb_dev->bus->busnum, usb_dev->devnum, board->minor, i);
+ dev_dbg(&usb_dev->dev,
+ "bus %d dev num %d gpib%d, interface %i resumed\n",
+ usb_dev->bus->busnum, usb_dev->devnum, board->minor, i);
}
resume_exit:
@@ -1667,7 +1648,7 @@ resume_exit:
}
static struct usb_driver agilent_82357a_bus_driver = {
- .name = "agilent_82357a_gpib",
+ .name = DRV_NAME,
.probe = agilent_82357a_driver_probe,
.disconnect = agilent_82357a_driver_disconnect,
.suspend = agilent_82357a_driver_suspend,
@@ -1680,19 +1661,18 @@ static int __init agilent_82357a_init_module(void)
int i;
int ret;
- pr_info("agilent_82357a_gpib driver loading");
for (i = 0; i < MAX_NUM_82357A_INTERFACES; ++i)
agilent_82357a_driver_interfaces[i] = NULL;
ret = usb_register(&agilent_82357a_bus_driver);
if (ret) {
- pr_err("agilent_82357a: usb_register failed: error = %d\n", ret);
+ pr_err("usb_register failed: error = %d\n", ret);
return ret;
}
ret = gpib_register_driver(&agilent_82357a_gpib_interface, THIS_MODULE);
if (ret) {
- pr_err("agilent_82357a: gpib_register_driver failed: error = %d\n", ret);
+ pr_err("gpib_register_driver failed: error = %d\n", ret);
usb_deregister(&agilent_82357a_bus_driver);
return ret;
}
@@ -1702,7 +1682,6 @@ static int __init agilent_82357a_init_module(void)
static void __exit agilent_82357a_exit_module(void)
{
- pr_info("agilent_82357a_gpib driver unloading");
gpib_unregister_driver(&agilent_82357a_gpib_interface);
usb_deregister(&agilent_82357a_bus_driver);
}
diff --git a/drivers/staging/gpib/cb7210/Makefile b/drivers/staging/gpib/cb7210/Makefile
index cda0725d6487..d239ae80b415 100644
--- a/drivers/staging/gpib/cb7210/Makefile
+++ b/drivers/staging/gpib/cb7210/Makefile
@@ -1,4 +1,3 @@
-ccflags-$(CONFIG_GPIB_PCMCIA) := -DGPIB_PCMCIA
obj-$(CONFIG_GPIB_CB7210) += cb7210.o
diff --git a/drivers/staging/gpib/cb7210/cb7210.c b/drivers/staging/gpib/cb7210/cb7210.c
index 4d22f647a453..6b22a33a8c4f 100644
--- a/drivers/staging/gpib/cb7210/cb7210.c
+++ b/drivers/staging/gpib/cb7210/cb7210.c
@@ -5,6 +5,10 @@
* copyright : (C) 2001, 2002 by Frank Mori Hess
***************************************************************************/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#define dev_fmt pr_fmt
+#define DRV_NAME KBUILD_MODNAME
+
#include "cb7210.h"
#include <linux/ioport.h>
#include <linux/sched.h>
@@ -23,7 +27,10 @@
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("GPIB driver Measurement Computing boards using cb7210.2 and cbi488.2");
-static inline int have_fifo_word(const struct cb7210_priv *cb_priv)
+static int cb7210_read(struct gpib_board *board, uint8_t *buffer, size_t length,
+ int *end, size_t *bytes_read);
+
+ static inline int have_fifo_word(const struct cb7210_priv *cb_priv)
{
if (((cb7210_read_byte(cb_priv, HS_STATUS)) &
(HS_RX_MSB_NOT_EMPTY | HS_RX_LSB_NOT_EMPTY)) ==
@@ -33,7 +40,7 @@ static inline int have_fifo_word(const struct cb7210_priv *cb_priv)
return 0;
}
-static inline void input_fifo_enable(gpib_board_t *board, int enable)
+static inline void input_fifo_enable(struct gpib_board *board, int enable)
{
struct cb7210_priv *cb_priv = board->private_data;
struct nec7210_priv *nec_priv = &cb_priv->nec7210_priv;
@@ -69,7 +76,7 @@ static inline void input_fifo_enable(gpib_board_t *board, int enable)
spin_unlock_irqrestore(&board->spinlock, flags);
}
-static int fifo_read(gpib_board_t *board, struct cb7210_priv *cb_priv, uint8_t *buffer,
+static int fifo_read(struct gpib_board *board, struct cb7210_priv *cb_priv, uint8_t *buffer,
size_t length, int *end, size_t *bytes_read)
{
ssize_t retval = 0;
@@ -80,12 +87,12 @@ static int fifo_read(gpib_board_t *board, struct cb7210_priv *cb_priv, uint8_t *
*bytes_read = 0;
if (cb_priv->fifo_iobase == 0) {
- pr_err("cb7210: fifo iobase is zero!\n");
+ dev_err(board->gpib_dev, "fifo iobase is zero!\n");
return -EIO;
}
*end = 0;
if (length <= cb7210_fifo_size) {
- pr_err("cb7210: bug! %s with length < fifo size\n", __func__);
+ dev_err(board->gpib_dev, " bug! fifo read length < fifo size\n");
return -EINVAL;
}
@@ -100,7 +107,6 @@ static int fifo_read(gpib_board_t *board, struct cb7210_priv *cb_priv, uint8_t *
test_bit(RECEIVED_END_BN, &nec_priv->state) ||
test_bit(DEV_CLEAR_BN, &nec_priv->state) ||
test_bit(TIMO_NUM, &board->status))) {
- pr_warn("cb7210: fifo half full wait interrupted\n");
retval = -ERESTARTSYS;
nec7210_set_reg_bits(nec_priv, IMR2, HR_DMAI, 0);
break;
@@ -150,7 +156,6 @@ static int fifo_read(gpib_board_t *board, struct cb7210_priv *cb_priv, uint8_t *
test_bit(RECEIVED_END_BN, &nec_priv->state) ||
test_bit(DEV_CLEAR_BN, &nec_priv->state) ||
test_bit(TIMO_NUM, &board->status))) {
- pr_warn("cb7210: fifo half full wait interrupted\n");
retval = -ERESTARTSYS;
}
if (test_bit(TIMO_NUM, &board->status))
@@ -165,8 +170,8 @@ static int fifo_read(gpib_board_t *board, struct cb7210_priv *cb_priv, uint8_t *
return retval;
}
-int cb7210_accel_read(gpib_board_t *board, uint8_t *buffer,
- size_t length, int *end, size_t *bytes_read)
+static int cb7210_accel_read(struct gpib_board *board, uint8_t *buffer,
+ size_t length, int *end, size_t *bytes_read)
{
ssize_t retval;
struct cb7210_priv *cb_priv = board->private_data;
@@ -185,7 +190,6 @@ int cb7210_accel_read(gpib_board_t *board, uint8_t *buffer,
test_bit(READ_READY_BN, &nec_priv->state) ||
test_bit(DEV_CLEAR_BN, &nec_priv->state) ||
test_bit(TIMO_NUM, &board->status))) {
- pr_warn("cb7210: read ready wait interrupted\n");
return -ERESTARTSYS;
}
if (test_bit(TIMO_NUM, &board->status))
@@ -225,7 +229,7 @@ static int output_fifo_empty(const struct cb7210_priv *cb_priv)
return 0;
}
-static inline void output_fifo_enable(gpib_board_t *board, int enable)
+static inline void output_fifo_enable(struct gpib_board *board, int enable)
{
struct cb7210_priv *cb_priv = board->private_data;
struct nec7210_priv *nec_priv = &cb_priv->nec7210_priv;
@@ -260,7 +264,8 @@ static inline void output_fifo_enable(gpib_board_t *board, int enable)
spin_unlock_irqrestore(&board->spinlock, flags);
}
-static int fifo_write(gpib_board_t *board, uint8_t *buffer, size_t length, size_t *bytes_written)
+static int fifo_write(struct gpib_board *board, uint8_t *buffer, size_t length,
+ size_t *bytes_written)
{
size_t count = 0;
ssize_t retval = 0;
@@ -271,7 +276,7 @@ static int fifo_write(gpib_board_t *board, uint8_t *buffer, size_t length, size_
*bytes_written = 0;
if (cb_priv->fifo_iobase == 0) {
- pr_err("cb7210: fifo iobase is zero!\n");
+ dev_err(board->gpib_dev, "fifo iobase is zero!\n");
return -EINVAL;
}
if (length == 0)
@@ -290,7 +295,6 @@ static int fifo_write(gpib_board_t *board, uint8_t *buffer, size_t length, size_
test_bit(DEV_CLEAR_BN, &nec_priv->state) ||
test_bit(BUS_ERROR_BN, &nec_priv->state) ||
test_bit(TIMO_NUM, &board->status))) {
- pr_warn("cb7210: fifo wait interrupted\n");
retval = -ERESTARTSYS;
break;
}
@@ -306,7 +310,7 @@ static int fifo_write(gpib_board_t *board, uint8_t *buffer, size_t length, size_
if (num_bytes + count > length)
num_bytes = length - count;
if (num_bytes % cb7210_fifo_width) {
- pr_err("cb7210: bug! %s with odd number of bytes\n", __func__);
+ dev_err(board->gpib_dev, " bug! fifo write with odd number of bytes\n");
retval = -EINVAL;
break;
}
@@ -331,7 +335,6 @@ static int fifo_write(gpib_board_t *board, uint8_t *buffer, size_t length, size_
test_bit(DEV_CLEAR_BN, &nec_priv->state) ||
test_bit(BUS_ERROR_BN, &nec_priv->state) ||
test_bit(TIMO_NUM, &board->status))) {
- pr_err("cb7210: wait for last byte interrupted\n");
retval = -ERESTARTSYS;
}
if (test_bit(TIMO_NUM, &board->status))
@@ -347,8 +350,8 @@ static int fifo_write(gpib_board_t *board, uint8_t *buffer, size_t length, size_
return retval;
}
-int cb7210_accel_write(gpib_board_t *board, uint8_t *buffer, size_t length, int send_eoi,
- size_t *bytes_written)
+static int cb7210_accel_write(struct gpib_board *board, uint8_t *buffer,
+ size_t length, int send_eoi, size_t *bytes_written)
{
struct cb7210_priv *cb_priv = board->private_data;
struct nec7210_priv *nec_priv = &cb_priv->nec7210_priv;
@@ -375,39 +378,37 @@ int cb7210_accel_write(gpib_board_t *board, uint8_t *buffer, size_t length, int
return retval;
}
-int cb7210_line_status(const gpib_board_t *board)
+static int cb7210_line_status(const struct gpib_board *board)
{
- int status = ValidALL;
+ int status = VALID_ALL;
int bsr_bits;
struct cb7210_priv *cb_priv;
- struct nec7210_priv *nec_priv;
cb_priv = board->private_data;
- nec_priv = &cb_priv->nec7210_priv;
bsr_bits = cb7210_paged_read_byte(cb_priv, BUS_STATUS, BUS_STATUS_PAGE);
if ((bsr_bits & BSR_REN_BIT) == 0)
- status |= BusREN;
+ status |= BUS_REN;
if ((bsr_bits & BSR_IFC_BIT) == 0)
- status |= BusIFC;
+ status |= BUS_IFC;
if ((bsr_bits & BSR_SRQ_BIT) == 0)
- status |= BusSRQ;
+ status |= BUS_SRQ;
if ((bsr_bits & BSR_EOI_BIT) == 0)
- status |= BusEOI;
+ status |= BUS_EOI;
if ((bsr_bits & BSR_NRFD_BIT) == 0)
- status |= BusNRFD;
+ status |= BUS_NRFD;
if ((bsr_bits & BSR_NDAC_BIT) == 0)
- status |= BusNDAC;
+ status |= BUS_NDAC;
if ((bsr_bits & BSR_DAV_BIT) == 0)
- status |= BusDAV;
+ status |= BUS_DAV;
if ((bsr_bits & BSR_ATN_BIT) == 0)
- status |= BusATN;
+ status |= BUS_ATN;
return status;
}
-unsigned int cb7210_t1_delay(gpib_board_t *board, unsigned int nano_sec)
+static int cb7210_t1_delay(struct gpib_board *board, unsigned int nano_sec)
{
struct cb7210_priv *cb_priv = board->private_data;
struct nec7210_priv *nec_priv = &cb_priv->nec7210_priv;
@@ -424,16 +425,16 @@ unsigned int cb7210_t1_delay(gpib_board_t *board, unsigned int nano_sec)
return retval;
}
-irqreturn_t cb7210_locked_internal_interrupt(gpib_board_t *board);
+static irqreturn_t cb7210_locked_internal_interrupt(struct gpib_board *board);
/*
* GPIB interrupt service routines
*/
-irqreturn_t cb_pci_interrupt(int irq, void *arg)
+static irqreturn_t cb_pci_interrupt(int irq, void *arg)
{
int bits;
- gpib_board_t *board = arg;
+ struct gpib_board *board = arg;
struct cb7210_priv *priv = board->private_data;
// first task check if this is really our interrupt in a shared irq environment
@@ -462,7 +463,7 @@ irqreturn_t cb_pci_interrupt(int irq, void *arg)
return cb7210_locked_internal_interrupt(arg);
}
-irqreturn_t cb7210_internal_interrupt(gpib_board_t *board)
+static irqreturn_t cb7210_internal_interrupt(struct gpib_board *board)
{
int hs_status, status1, status2;
struct cb7210_priv *priv = board->private_data;
@@ -479,7 +480,7 @@ irqreturn_t cb7210_internal_interrupt(gpib_board_t *board)
status2 = read_byte(nec_priv, ISR2);
nec7210_interrupt_have_status(board, nec_priv, status1, status2);
- dev_dbg(board->gpib_dev, "cb7210: status 0x%x, mode 0x%x\n", hs_status, priv->hs_mode_bits);
+ dev_dbg(board->gpib_dev, "status 0x%x, mode 0x%x\n", hs_status, priv->hs_mode_bits);
clear_bits = 0;
@@ -516,7 +517,7 @@ irqreturn_t cb7210_internal_interrupt(gpib_board_t *board)
return IRQ_HANDLED;
}
-irqreturn_t cb7210_locked_internal_interrupt(gpib_board_t *board)
+static irqreturn_t cb7210_locked_internal_interrupt(struct gpib_board *board)
{
unsigned long flags;
irqreturn_t retval;
@@ -527,55 +528,57 @@ irqreturn_t cb7210_locked_internal_interrupt(gpib_board_t *board)
return retval;
}
-irqreturn_t cb7210_interrupt(int irq, void *arg)
+static irqreturn_t cb7210_interrupt(int irq, void *arg)
{
return cb7210_internal_interrupt(arg);
}
-static int cb_pci_attach(gpib_board_t *board, const gpib_board_config_t *config);
-static int cb_isa_attach(gpib_board_t *board, const gpib_board_config_t *config);
+static int cb_pci_attach(struct gpib_board *board, const gpib_board_config_t *config);
+static int cb_isa_attach(struct gpib_board *board, const gpib_board_config_t *config);
-static void cb_pci_detach(gpib_board_t *board);
-static void cb_isa_detach(gpib_board_t *board);
+static void cb_pci_detach(struct gpib_board *board);
+static void cb_isa_detach(struct gpib_board *board);
// wrappers for interface functions
-int cb7210_read(gpib_board_t *board, uint8_t *buffer, size_t length, int *end, size_t *bytes_read)
+static int cb7210_read(struct gpib_board *board, uint8_t *buffer, size_t length,
+ int *end, size_t *bytes_read)
{
struct cb7210_priv *priv = board->private_data;
return nec7210_read(board, &priv->nec7210_priv, buffer, length, end, bytes_read);
}
-int cb7210_write(gpib_board_t *board, uint8_t *buffer, size_t length,
- int send_eoi, size_t *bytes_written)
+static int cb7210_write(struct gpib_board *board, uint8_t *buffer, size_t length,
+ int send_eoi, size_t *bytes_written)
{
struct cb7210_priv *priv = board->private_data;
return nec7210_write(board, &priv->nec7210_priv, buffer, length, send_eoi, bytes_written);
}
-int cb7210_command(gpib_board_t *board, uint8_t *buffer, size_t length, size_t *bytes_written)
+static int cb7210_command(struct gpib_board *board, uint8_t *buffer, size_t length,
+ size_t *bytes_written)
{
struct cb7210_priv *priv = board->private_data;
return nec7210_command(board, &priv->nec7210_priv, buffer, length, bytes_written);
}
-int cb7210_take_control(gpib_board_t *board, int synchronous)
+static int cb7210_take_control(struct gpib_board *board, int synchronous)
{
struct cb7210_priv *priv = board->private_data;
return nec7210_take_control(board, &priv->nec7210_priv, synchronous);
}
-int cb7210_go_to_standby(gpib_board_t *board)
+static int cb7210_go_to_standby(struct gpib_board *board)
{
struct cb7210_priv *priv = board->private_data;
return nec7210_go_to_standby(board, &priv->nec7210_priv);
}
-void cb7210_request_system_control(gpib_board_t *board, int request_control)
+static void cb7210_request_system_control(struct gpib_board *board, int request_control)
{
struct cb7210_priv *priv = board->private_data;
struct nec7210_priv *nec_priv = &priv->nec7210_priv;
@@ -589,91 +592,91 @@ void cb7210_request_system_control(gpib_board_t *board, int request_control)
nec7210_request_system_control(board, nec_priv, request_control);
}
-void cb7210_interface_clear(gpib_board_t *board, int assert)
+static void cb7210_interface_clear(struct gpib_board *board, int assert)
{
struct cb7210_priv *priv = board->private_data;
nec7210_interface_clear(board, &priv->nec7210_priv, assert);
}
-void cb7210_remote_enable(gpib_board_t *board, int enable)
+static void cb7210_remote_enable(struct gpib_board *board, int enable)
{
struct cb7210_priv *priv = board->private_data;
nec7210_remote_enable(board, &priv->nec7210_priv, enable);
}
-int cb7210_enable_eos(gpib_board_t *board, uint8_t eos_byte, int compare_8_bits)
+static int cb7210_enable_eos(struct gpib_board *board, uint8_t eos_byte, int compare_8_bits)
{
struct cb7210_priv *priv = board->private_data;
return nec7210_enable_eos(board, &priv->nec7210_priv, eos_byte, compare_8_bits);
}
-void cb7210_disable_eos(gpib_board_t *board)
+static void cb7210_disable_eos(struct gpib_board *board)
{
struct cb7210_priv *priv = board->private_data;
nec7210_disable_eos(board, &priv->nec7210_priv);
}
-unsigned int cb7210_update_status(gpib_board_t *board, unsigned int clear_mask)
+static unsigned int cb7210_update_status(struct gpib_board *board, unsigned int clear_mask)
{
struct cb7210_priv *priv = board->private_data;
return nec7210_update_status(board, &priv->nec7210_priv, clear_mask);
}
-int cb7210_primary_address(gpib_board_t *board, unsigned int address)
+static int cb7210_primary_address(struct gpib_board *board, unsigned int address)
{
struct cb7210_priv *priv = board->private_data;
return nec7210_primary_address(board, &priv->nec7210_priv, address);
}
-int cb7210_secondary_address(gpib_board_t *board, unsigned int address, int enable)
+static int cb7210_secondary_address(struct gpib_board *board, unsigned int address, int enable)
{
struct cb7210_priv *priv = board->private_data;
return nec7210_secondary_address(board, &priv->nec7210_priv, address, enable);
}
-int cb7210_parallel_poll(gpib_board_t *board, uint8_t *result)
+static int cb7210_parallel_poll(struct gpib_board *board, uint8_t *result)
{
struct cb7210_priv *priv = board->private_data;
return nec7210_parallel_poll(board, &priv->nec7210_priv, result);
}
-void cb7210_parallel_poll_configure(gpib_board_t *board, uint8_t configuration)
+static void cb7210_parallel_poll_configure(struct gpib_board *board, uint8_t configuration)
{
struct cb7210_priv *priv = board->private_data;
nec7210_parallel_poll_configure(board, &priv->nec7210_priv, configuration);
}
-void cb7210_parallel_poll_response(gpib_board_t *board, int ist)
+static void cb7210_parallel_poll_response(struct gpib_board *board, int ist)
{
struct cb7210_priv *priv = board->private_data;
nec7210_parallel_poll_response(board, &priv->nec7210_priv, ist);
}
-void cb7210_serial_poll_response(gpib_board_t *board, uint8_t status)
+static void cb7210_serial_poll_response(struct gpib_board *board, uint8_t status)
{
struct cb7210_priv *priv = board->private_data;
nec7210_serial_poll_response(board, &priv->nec7210_priv, status);
}
-uint8_t cb7210_serial_poll_status(gpib_board_t *board)
+static uint8_t cb7210_serial_poll_status(struct gpib_board *board)
{
struct cb7210_priv *priv = board->private_data;
return nec7210_serial_poll_status(board, &priv->nec7210_priv);
}
-void cb7210_return_to_local(gpib_board_t *board)
+static void cb7210_return_to_local(struct gpib_board *board)
{
struct cb7210_priv *priv = board->private_data;
struct nec7210_priv *nec_priv = &priv->nec7210_priv;
@@ -849,27 +852,27 @@ static gpib_interface_t cb_isa_accel_interface = {
.return_to_local = cb7210_return_to_local,
};
-static int cb7210_allocate_private(gpib_board_t *board)
+static int cb7210_allocate_private(struct gpib_board *board)
{
struct cb7210_priv *priv;
board->private_data = kmalloc(sizeof(struct cb7210_priv), GFP_KERNEL);
if (!board->private_data)
- return -1;
+ return -ENOMEM;
priv = board->private_data;
memset(priv, 0, sizeof(struct cb7210_priv));
init_nec7210_private(&priv->nec7210_priv);
return 0;
}
-void cb7210_generic_detach(gpib_board_t *board)
+static void cb7210_generic_detach(struct gpib_board *board)
{
kfree(board->private_data);
board->private_data = NULL;
}
// generic part of attach functions shared by all cb7210 boards
-int cb7210_generic_attach(gpib_board_t *board)
+static int cb7210_generic_attach(struct gpib_board *board)
{
struct cb7210_priv *cb_priv;
struct nec7210_priv *nec_priv;
@@ -887,7 +890,7 @@ int cb7210_generic_attach(gpib_board_t *board)
return 0;
}
-int cb7210_init(struct cb7210_priv *cb_priv, gpib_board_t *board)
+static int cb7210_init(struct cb7210_priv *cb_priv, struct gpib_board *board)
{
struct nec7210_priv *nec_priv = &cb_priv->nec7210_priv;
@@ -917,13 +920,13 @@ int cb7210_init(struct cb7210_priv *cb_priv, gpib_board_t *board)
/* poll so we can detect assertion of ATN */
if (gpib_request_pseudo_irq(board, cb_pci_interrupt)) {
- pr_err("pc2_gpib: failed to allocate pseudo_irq\n");
+ pr_err("failed to allocate pseudo_irq\n");
return -1;
}
return 0;
}
-int cb_pci_attach(gpib_board_t *board, const gpib_board_config_t *config)
+static int cb_pci_attach(struct gpib_board *board, const gpib_board_config_t *config)
{
struct cb7210_priv *cb_priv;
struct nec7210_priv *nec_priv;
@@ -957,17 +960,17 @@ int cb_pci_attach(gpib_board_t *board, const gpib_board_config_t *config)
}
}
if (!cb_priv->pci_device) {
- pr_warn("cb7210: no supported boards found.\n");
- return -1;
+ dev_err(board->gpib_dev, "no supported boards found.\n");
+ return -ENODEV;
}
if (pci_enable_device(cb_priv->pci_device)) {
- pr_err("cb7210: error enabling pci device\n");
- return -1;
+ dev_err(board->gpib_dev, "error enabling pci device\n");
+ return -EIO;
}
- if (pci_request_regions(cb_priv->pci_device, "cb7210"))
- return -1;
+ if (pci_request_regions(cb_priv->pci_device, DRV_NAME))
+ return -EBUSY;
switch (cb_priv->pci_chip) {
case PCI_CHIP_AMCC_S5933:
cb_priv->amcc_iobase = pci_resource_start(cb_priv->pci_device, 0);
@@ -979,13 +982,14 @@ int cb_pci_attach(gpib_board_t *board, const gpib_board_config_t *config)
cb_priv->fifo_iobase = nec_priv->iobase;
break;
default:
- pr_err("cb7210: bug! unhandled pci_chip=%i\n", cb_priv->pci_chip);
+ dev_err(board->gpib_dev, "bug! unhandled pci_chip=%i\n", cb_priv->pci_chip);
return -EIO;
}
isr_flags |= IRQF_SHARED;
- if (request_irq(cb_priv->pci_device->irq, cb_pci_interrupt, isr_flags, "cb7210", board)) {
- pr_err("cb7210: can't request IRQ %d\n", cb_priv->pci_device->irq);
- return -1;
+ if (request_irq(cb_priv->pci_device->irq, cb_pci_interrupt, isr_flags, DRV_NAME, board)) {
+ dev_err(board->gpib_dev, "can't request IRQ %d\n",
+ cb_priv->pci_device->irq);
+ return -EBUSY;
}
cb_priv->irq = cb_priv->pci_device->irq;
@@ -1004,7 +1008,7 @@ int cb_pci_attach(gpib_board_t *board, const gpib_board_config_t *config)
return cb7210_init(cb_priv, board);
}
-void cb_pci_detach(gpib_board_t *board)
+static void cb_pci_detach(struct gpib_board *board)
{
struct cb7210_priv *cb_priv = board->private_data;
struct nec7210_priv *nec_priv;
@@ -1027,7 +1031,7 @@ void cb_pci_detach(gpib_board_t *board)
cb7210_generic_detach(board);
}
-int cb_isa_attach(gpib_board_t *board, const gpib_board_config_t *config)
+static int cb_isa_attach(struct gpib_board *board, const gpib_board_config_t *config)
{
int isr_flags = 0;
struct cb7210_priv *cb_priv;
@@ -1040,20 +1044,22 @@ int cb_isa_attach(gpib_board_t *board, const gpib_board_config_t *config)
return retval;
cb_priv = board->private_data;
nec_priv = &cb_priv->nec7210_priv;
- if (!request_region(config->ibbase, cb7210_iosize, "cb7210")) {
- pr_err("gpib: ioports starting at 0x%x are already in use\n", config->ibbase);
- return -EIO;
+ if (!request_region(config->ibbase, cb7210_iosize, DRV_NAME)) {
+ dev_err(board->gpib_dev, "ioports starting at 0x%x are already in use\n",
+ config->ibbase);
+ return -EBUSY;
}
nec_priv->iobase = config->ibbase;
cb_priv->fifo_iobase = nec7210_iobase(cb_priv);
bits = irq_bits(config->ibirq);
if (bits == 0)
- pr_err("board incapable of using irq %i, try 2-5, 7, 10, or 11\n", config->ibirq);
+ dev_err(board->gpib_dev, "board incapable of using irq %i, try 2-5, 7, 10, or 11\n",
+ config->ibirq);
// install interrupt handler
- if (request_irq(config->ibirq, cb7210_interrupt, isr_flags, "cb7210", board)) {
- pr_err("gpib: can't request IRQ %d\n", config->ibirq);
+ if (request_irq(config->ibirq, cb7210_interrupt, isr_flags, DRV_NAME, board)) {
+ dev_err(board->gpib_dev, "failed to obtain IRQ %d\n", config->ibirq);
return -EBUSY;
}
cb_priv->irq = config->ibirq;
@@ -1061,7 +1067,7 @@ int cb_isa_attach(gpib_board_t *board, const gpib_board_config_t *config)
return cb7210_init(cb_priv, board);
}
-void cb_isa_detach(gpib_board_t *board)
+static void cb_isa_detach(struct gpib_board *board)
{
struct cb7210_priv *cb_priv = board->private_data;
struct nec7210_priv *nec_priv;
@@ -1093,7 +1099,7 @@ static const struct pci_device_id cb7210_pci_table[] = {
MODULE_DEVICE_TABLE(pci, cb7210_pci_table);
static struct pci_driver cb7210_pci_driver = {
- .name = "cb7210",
+ .name = DRV_NAME,
.id_table = cb7210_pci_table,
.probe = &cb7210_pci_probe
};
@@ -1106,7 +1112,7 @@ static struct pci_driver cb7210_pci_driver = {
* pcmcia skeleton example (presumably David Hinds)
***************************************************************************/
-#ifdef GPIB_PCMCIA
+#ifdef CONFIG_GPIB_PCMCIA
#include <linux/kernel.h>
#include <linux/ptrace.h>
@@ -1117,23 +1123,6 @@ static struct pci_driver cb7210_pci_driver = {
#include <pcmcia/ds.h>
/*
- * All the PCMCIA modules use PCMCIA_DEBUG to control debugging. If
- * you do not define PCMCIA_DEBUG at all, all the debug code will be
- * left out. If you compile with PCMCIA_DEBUG=0, the debug code will
- * be present but disabled -- but it can then be enabled for specific
- * modules at load time with a 'pc_debug=#' option to insmod.
- */
-
-#define PCMCIA_DEBUG 1
-
-#ifdef PCMCIA_DEBUG
-static int pc_debug = PCMCIA_DEBUG;
-#define DEBUG(n, args...) do {if (pc_debug > (n)) pr_debug(args); } while (0)
-#else
-#define DEBUG(args...)
-#endif
-
-/*
* The event() function is this driver's Card Services event handler.
* It will be called by Card Services when an appropriate card status
* event is received. The config() and release() entry points are
@@ -1144,8 +1133,8 @@ static int pc_debug = PCMCIA_DEBUG;
static int cb_gpib_config(struct pcmcia_device *link);
static void cb_gpib_release(struct pcmcia_device *link);
-static int cb_pcmcia_attach(gpib_board_t *board, const gpib_board_config_t *config);
-static void cb_pcmcia_detach(gpib_board_t *board);
+static int cb_pcmcia_attach(struct gpib_board *board, const gpib_board_config_t *config);
+static void cb_pcmcia_detach(struct gpib_board *board);
/*
* A linked list of "instances" of the gpib device. Each actual
@@ -1178,7 +1167,7 @@ static struct pcmcia_device *curr_dev;
struct local_info {
struct pcmcia_device *p_dev;
- gpib_board_t *dev;
+ struct gpib_board *dev;
};
/*
@@ -1197,8 +1186,6 @@ static int cb_gpib_probe(struct pcmcia_device *link)
// int ret, i;
- DEBUG(0, "%s(0x%p)\n", __func__, link);
-
/* Allocate space for private device-specific data */
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info)
@@ -1236,9 +1223,7 @@ static int cb_gpib_probe(struct pcmcia_device *link)
static void cb_gpib_remove(struct pcmcia_device *link)
{
struct local_info *info = link->priv;
- //struct gpib_board_t *dev = info->dev;
-
- DEBUG(0, "%s(0x%p)\n", __func__, link);
+ //struct struct gpib_board *dev = info->dev;
if (info->dev)
cb_pcmcia_detach(info->dev);
@@ -1267,7 +1252,6 @@ static int cb_gpib_config(struct pcmcia_device *link)
handle = link;
dev = link->priv;
- DEBUG(0, "%s(0x%p)\n", __func__, link);
retval = pcmcia_loop_config(link, &cb_gpib_config_iteration, NULL);
if (retval) {
@@ -1276,8 +1260,6 @@ static int cb_gpib_config(struct pcmcia_device *link)
return -ENODEV;
}
- DEBUG(0, "gpib_cs: manufacturer: 0x%x card: 0x%x\n", link->manf_id, link->card_id);
-
/*
* This actually configures the PCMCIA socket -- setting up
* the I/O windows and the interrupt mapping.
@@ -1289,7 +1271,6 @@ static int cb_gpib_config(struct pcmcia_device *link)
return -ENODEV;
}
- pr_info("gpib device loaded\n");
return 0;
} /* gpib_config */
@@ -1301,18 +1282,16 @@ static int cb_gpib_config(struct pcmcia_device *link)
static void cb_gpib_release(struct pcmcia_device *link)
{
- DEBUG(0, "%s(0x%p)\n", __func__, link);
pcmcia_disable_device(link);
}
static int cb_gpib_suspend(struct pcmcia_device *link)
{
//struct local_info *info = link->priv;
- //struct gpib_board_t *dev = info->dev;
- DEBUG(0, "%s(0x%p)\n", __func__, link);
+ //struct struct gpib_board *dev = info->dev;
if (link->open)
- pr_warn("Device still open ???\n");
+ dev_warn(&link->dev, "Device still open\n");
//netif_device_detach(dev);
return 0;
@@ -1321,12 +1300,10 @@ static int cb_gpib_suspend(struct pcmcia_device *link)
static int cb_gpib_resume(struct pcmcia_device *link)
{
//struct local_info *info = link->priv;
- //struct gpib_board_t *dev = info->dev;
- DEBUG(0, "%s(0x%p)\n", __func__, link);
+ //struct struct gpib_board *dev = info->dev;
/*if (link->open) {
* ni_gpib_probe(dev); / really?
- * printk("Gpib resumed ???\n");
* //netif_device_attach(dev);
*
*/
@@ -1342,8 +1319,8 @@ static struct pcmcia_device_id cb_pcmcia_ids[] = {
MODULE_DEVICE_TABLE(pcmcia, cb_pcmcia_ids);
static struct pcmcia_driver cb_gpib_cs_driver = {
+ .name = "cb_gpib_cs",
.owner = THIS_MODULE,
- .drv = { .name = "cb_gpib_cs", },
.id_table = cb_pcmcia_ids,
.probe = cb_gpib_probe,
.remove = cb_gpib_remove,
@@ -1351,9 +1328,8 @@ static struct pcmcia_driver cb_gpib_cs_driver = {
.resume = cb_gpib_resume,
};
-void cb_pcmcia_cleanup_module(void)
+static void cb_pcmcia_cleanup_module(void)
{
- DEBUG(0, "cb_gpib_cs: unloading\n");
pcmcia_unregister_driver(&cb_gpib_cs_driver);
}
@@ -1441,15 +1417,15 @@ static gpib_interface_t cb_pcmcia_accel_interface = {
.return_to_local = cb7210_return_to_local,
};
-int cb_pcmcia_attach(gpib_board_t *board, const gpib_board_config_t *config)
+static int cb_pcmcia_attach(struct gpib_board *board, const gpib_board_config_t *config)
{
struct cb7210_priv *cb_priv;
struct nec7210_priv *nec_priv;
int retval;
if (!curr_dev) {
- pr_err("no cb pcmcia cards found\n");
- return -1;
+ dev_err(board->gpib_dev, "no cb pcmcia cards found\n");
+ return -ENODEV;
}
retval = cb7210_generic_attach(board);
@@ -1460,25 +1436,24 @@ int cb_pcmcia_attach(gpib_board_t *board, const gpib_board_config_t *config)
nec_priv = &cb_priv->nec7210_priv;
if (!request_region(curr_dev->resource[0]->start, resource_size(curr_dev->resource[0]),
- "cb7210")) {
- pr_err("gpib: ioports starting at 0x%lx are already in use\n",
- (unsigned long)curr_dev->resource[0]->start);
- return -EIO;
+ DRV_NAME)) {
+ dev_err(board->gpib_dev, "ioports starting at 0x%lx are already in use\n",
+ (unsigned long)curr_dev->resource[0]->start);
+ return -EBUSY;
}
nec_priv->iobase = curr_dev->resource[0]->start;
cb_priv->fifo_iobase = curr_dev->resource[0]->start;
- if (request_irq(curr_dev->irq, cb7210_interrupt, IRQF_SHARED,
- "cb7210", board)) {
- pr_err("cb7210: failed to request IRQ %d\n", curr_dev->irq);
- return -1;
+ if (request_irq(curr_dev->irq, cb7210_interrupt, IRQF_SHARED, DRV_NAME, board)) {
+ dev_err(board->gpib_dev, "failed to request IRQ %d\n", curr_dev->irq);
+ return -EBUSY;
}
cb_priv->irq = curr_dev->irq;
return cb7210_init(cb_priv, board);
}
-void cb_pcmcia_detach(gpib_board_t *board)
+static void cb_pcmcia_detach(struct gpib_board *board)
{
struct cb7210_priv *cb_priv = board->private_data;
struct nec7210_priv *nec_priv;
@@ -1496,7 +1471,7 @@ void cb_pcmcia_detach(gpib_board_t *board)
cb7210_generic_detach(board);
}
-#endif /* GPIB_PCMCIA */
+#endif /* CONFIG_GPIB_PCMCIA */
static int __init cb7210_init_module(void)
{
@@ -1504,75 +1479,75 @@ static int __init cb7210_init_module(void)
ret = pci_register_driver(&cb7210_pci_driver);
if (ret) {
- pr_err("cb7210: pci_register_driver failed: error = %d\n", ret);
+ pr_err("pci_register_driver failed: error = %d\n", ret);
return ret;
}
ret = gpib_register_driver(&cb_pci_interface, THIS_MODULE);
if (ret) {
- pr_err("cb7210: gpib_register_driver failed: error = %d\n", ret);
+ pr_err("gpib_register_driver failed: error = %d\n", ret);
goto err_pci;
}
ret = gpib_register_driver(&cb_isa_interface, THIS_MODULE);
if (ret) {
- pr_err("cb7210: gpib_register_driver failed: error = %d\n", ret);
+ pr_err("gpib_register_driver failed: error = %d\n", ret);
goto err_isa;
}
ret = gpib_register_driver(&cb_pci_accel_interface, THIS_MODULE);
if (ret) {
- pr_err("cb7210: gpib_register_driver failed: error = %d\n", ret);
+ pr_err("gpib_register_driver failed: error = %d\n", ret);
goto err_pci_accel;
}
ret = gpib_register_driver(&cb_pci_unaccel_interface, THIS_MODULE);
if (ret) {
- pr_err("cb7210: gpib_register_driver failed: error = %d\n", ret);
+ pr_err("gpib_register_driver failed: error = %d\n", ret);
goto err_pci_unaccel;
}
ret = gpib_register_driver(&cb_isa_accel_interface, THIS_MODULE);
if (ret) {
- pr_err("cb7210: gpib_register_driver failed: error = %d\n", ret);
+ pr_err("gpib_register_driver failed: error = %d\n", ret);
goto err_isa_accel;
}
ret = gpib_register_driver(&cb_isa_unaccel_interface, THIS_MODULE);
if (ret) {
- pr_err("cb7210: gpib_register_driver failed: error = %d\n", ret);
+ pr_err("gpib_register_driver failed: error = %d\n", ret);
goto err_isa_unaccel;
}
-#ifdef GPIB_PCMCIA
+#ifdef CONFIG_GPIB_PCMCIA
ret = gpib_register_driver(&cb_pcmcia_interface, THIS_MODULE);
if (ret) {
- pr_err("cb7210: gpib_register_driver failed: error = %d\n", ret);
+ pr_err("gpib_register_driver failed: error = %d\n", ret);
goto err_pcmcia;
}
ret = gpib_register_driver(&cb_pcmcia_accel_interface, THIS_MODULE);
if (ret) {
- pr_err("cb7210: gpib_register_driver failed: error = %d\n", ret);
+ pr_err("gpib_register_driver failed: error = %d\n", ret);
goto err_pcmcia_accel;
}
ret = gpib_register_driver(&cb_pcmcia_unaccel_interface, THIS_MODULE);
if (ret) {
- pr_err("cb7210: gpib_register_driver failed: error = %d\n", ret);
+ pr_err("gpib_register_driver failed: error = %d\n", ret);
goto err_pcmcia_unaccel;
}
ret = pcmcia_register_driver(&cb_gpib_cs_driver);
if (ret) {
- pr_err("cb7210: pcmcia_register_driver failed: error = %d\n", ret);
+ pr_err("pcmcia_register_driver failed: error = %d\n", ret);
goto err_pcmcia_driver;
}
#endif
return 0;
-#ifdef GPIB_PCMCIA
+#ifdef CONFIG_GPIB_PCMCIA
err_pcmcia_driver:
gpib_unregister_driver(&cb_pcmcia_unaccel_interface);
err_pcmcia_unaccel:
@@ -1606,7 +1581,7 @@ static void __exit cb7210_exit_module(void)
gpib_unregister_driver(&cb_pci_unaccel_interface);
gpib_unregister_driver(&cb_isa_accel_interface);
gpib_unregister_driver(&cb_isa_unaccel_interface);
-#ifdef GPIB_PCMCIA
+#ifdef CONFIG_GPIB_PCMCIA
gpib_unregister_driver(&cb_pcmcia_interface);
gpib_unregister_driver(&cb_pcmcia_accel_interface);
gpib_unregister_driver(&cb_pcmcia_unaccel_interface);
diff --git a/drivers/staging/gpib/cb7210/cb7210.h b/drivers/staging/gpib/cb7210/cb7210.h
index d56cd905cc8c..2108fe7a8ce5 100644
--- a/drivers/staging/gpib/cb7210/cb7210.h
+++ b/drivers/staging/gpib/cb7210/cb7210.h
@@ -36,51 +36,6 @@ struct cb7210_priv {
unsigned in_fifo_half_full : 1;
};
-// interrupt service routines
-irqreturn_t cb_pci_interrupt(int irq, void *arg);
-irqreturn_t cb7210_interrupt(int irq, void *arg);
-irqreturn_t cb7210_internal_interrupt(gpib_board_t *board);
-
-// interface functions
-int cb7210_read(gpib_board_t *board, uint8_t *buffer, size_t length,
- int *end, size_t *bytes_read);
-int cb7210_accel_read(gpib_board_t *board, uint8_t *buffer, size_t length,
- int *end, size_t *bytes_read);
-int cb7210_write(gpib_board_t *board, uint8_t *buffer, size_t length,
- int send_eoi, size_t *bytes_written);
-int cb7210_accel_write(gpib_board_t *board, uint8_t *buffer, size_t length,
- int send_eoi, size_t *bytes_written);
-int cb7210_command(gpib_board_t *board, uint8_t *buffer, size_t length, size_t *bytes_written);
-int cb7210_take_control(gpib_board_t *board, int synchronous);
-int cb7210_go_to_standby(gpib_board_t *board);
-void cb7210_request_system_control(gpib_board_t *board, int request_control);
-void cb7210_interface_clear(gpib_board_t *board, int assert);
-void cb7210_remote_enable(gpib_board_t *board, int enable);
-int cb7210_enable_eos(gpib_board_t *board, uint8_t eos_byte,
- int compare_8_bits);
-void cb7210_disable_eos(gpib_board_t *board);
-unsigned int cb7210_update_status(gpib_board_t *board, unsigned int clear_mask);
-int cb7210_primary_address(gpib_board_t *board, unsigned int address);
-int cb7210_secondary_address(gpib_board_t *board, unsigned int address,
- int enable);
-int cb7210_parallel_poll(gpib_board_t *board, uint8_t *result);
-void cb7210_serial_poll_response(gpib_board_t *board, uint8_t status);
-uint8_t cb7210_serial_poll_status(gpib_board_t *board);
-void cb7210_parallel_poll_configure(gpib_board_t *board, uint8_t configuration);
-void cb7210_parallel_poll_response(gpib_board_t *board, int ist);
-int cb7210_line_status(const gpib_board_t *board);
-unsigned int cb7210_t1_delay(gpib_board_t *board, unsigned int nano_sec);
-void cb7210_return_to_local(gpib_board_t *board);
-
-// utility functions
-void cb7210_generic_detach(gpib_board_t *board);
-int cb7210_generic_attach(gpib_board_t *board);
-int cb7210_init(struct cb7210_priv *priv, gpib_board_t *board);
-
-// pcmcia init/cleanup
-int cb_pcmcia_init_module(void);
-void cb_pcmcia_cleanup_module(void);
-
// pci-gpib register offset
static const int cb7210_reg_offset = 1;
diff --git a/drivers/staging/gpib/cec/cec.h b/drivers/staging/gpib/cec/cec.h
index 040ca70ed708..3ce2869c7429 100644
--- a/drivers/staging/gpib/cec/cec.h
+++ b/drivers/staging/gpib/cec/cec.h
@@ -16,34 +16,5 @@ struct cec_priv {
unsigned int irq;
};
-// interface functions
-int cec_read(gpib_board_t *board, uint8_t *buffer, size_t length, int *end, size_t *bytes_read);
-int cec_write(gpib_board_t *board, uint8_t *buffer, size_t length, int send_eoi,
- size_t *bytes_written);
-int cec_command(gpib_board_t *board, uint8_t *buffer, size_t length, size_t *bytes_written);
-int cec_take_control(gpib_board_t *board, int synchronous);
-int cec_go_to_standby(gpib_board_t *board);
-void cec_request_system_control(gpib_board_t *board, int request_control);
-void cec_interface_clear(gpib_board_t *board, int assert);
-void cec_remote_enable(gpib_board_t *board, int enable);
-int cec_enable_eos(gpib_board_t *board, uint8_t eos_byte, int compare_8_bits);
-void cec_disable_eos(gpib_board_t *board);
-unsigned int cec_update_status(gpib_board_t *board, unsigned int clear_mask);
-int cec_primary_address(gpib_board_t *board, unsigned int address);
-int cec_secondary_address(gpib_board_t *board, unsigned int address, int enable);
-int cec_parallel_poll(gpib_board_t *board, uint8_t *result);
-void cec_parallel_poll_configure(gpib_board_t *board, uint8_t configuration);
-void cec_parallel_poll_response(gpib_board_t *board, int ist);
-void cec_serial_poll_response(gpib_board_t *board, uint8_t status);
-void cec_return_to_local(gpib_board_t *board);
-
-// interrupt service routines
-irqreturn_t cec_interrupt(int irq, void *arg);
-
-// utility functions
-void cec_free_private(gpib_board_t *board);
-int cec_generic_attach(gpib_board_t *board);
-void cec_init(struct cec_priv *priv, const gpib_board_t *board);
-
// offset between consecutive nec7210 registers
static const int cec_reg_offset = 1;
diff --git a/drivers/staging/gpib/cec/cec_gpib.c b/drivers/staging/gpib/cec/cec_gpib.c
index d056cd1d6b3e..a822fa428cd0 100644
--- a/drivers/staging/gpib/cec/cec_gpib.c
+++ b/drivers/staging/gpib/cec/cec_gpib.c
@@ -4,6 +4,10 @@
* copyright : (C) 2002 by Frank Mori Hess
***************************************************************************/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#define dev_fmt pr_fmt
+#define DRV_NAME KBUILD_MODNAME
+
#include "cec.h"
#include <linux/pci.h>
#include <linux/io.h>
@@ -19,9 +23,9 @@ MODULE_DESCRIPTION("GPIB driver for CEC PCI and PCMCIA boards");
* GPIB interrupt service routines
*/
-irqreturn_t cec_interrupt(int irq, void *arg)
+static irqreturn_t cec_interrupt(int irq, void *arg)
{
- gpib_board_t *board = arg;
+ struct gpib_board *board = arg;
struct cec_priv *priv = board->private_data;
unsigned long flags;
irqreturn_t retval;
@@ -36,146 +40,148 @@ irqreturn_t cec_interrupt(int irq, void *arg)
#define CEC_DEV_ID 0x5cec
#define CEC_SUBID 0x9050
-static int cec_pci_attach(gpib_board_t *board, const gpib_board_config_t *config);
+static int cec_pci_attach(struct gpib_board *board, const gpib_board_config_t *config);
-static void cec_pci_detach(gpib_board_t *board);
+static void cec_pci_detach(struct gpib_board *board);
// wrappers for interface functions
-int cec_read(gpib_board_t *board, uint8_t *buffer, size_t length, int *end, size_t *bytes_read)
+static int cec_read(struct gpib_board *board, uint8_t *buffer, size_t length, int *end,
+ size_t *bytes_read)
{
struct cec_priv *priv = board->private_data;
return nec7210_read(board, &priv->nec7210_priv, buffer, length, end, bytes_read);
}
-int cec_write(gpib_board_t *board, uint8_t *buffer, size_t length, int send_eoi,
- size_t *bytes_written)
+static int cec_write(struct gpib_board *board, uint8_t *buffer, size_t length, int send_eoi,
+ size_t *bytes_written)
{
struct cec_priv *priv = board->private_data;
return nec7210_write(board, &priv->nec7210_priv, buffer, length, send_eoi, bytes_written);
}
-int cec_command(gpib_board_t *board, uint8_t *buffer, size_t length, size_t *bytes_written)
+static int cec_command(struct gpib_board *board, uint8_t *buffer,
+ size_t length, size_t *bytes_written)
{
struct cec_priv *priv = board->private_data;
return nec7210_command(board, &priv->nec7210_priv, buffer, length, bytes_written);
}
-int cec_take_control(gpib_board_t *board, int synchronous)
+static int cec_take_control(struct gpib_board *board, int synchronous)
{
struct cec_priv *priv = board->private_data;
return nec7210_take_control(board, &priv->nec7210_priv, synchronous);
}
-int cec_go_to_standby(gpib_board_t *board)
+static int cec_go_to_standby(struct gpib_board *board)
{
struct cec_priv *priv = board->private_data;
return nec7210_go_to_standby(board, &priv->nec7210_priv);
}
-void cec_request_system_control(gpib_board_t *board, int request_control)
+static void cec_request_system_control(struct gpib_board *board, int request_control)
{
struct cec_priv *priv = board->private_data;
nec7210_request_system_control(board, &priv->nec7210_priv, request_control);
}
-void cec_interface_clear(gpib_board_t *board, int assert)
+static void cec_interface_clear(struct gpib_board *board, int assert)
{
struct cec_priv *priv = board->private_data;
nec7210_interface_clear(board, &priv->nec7210_priv, assert);
}
-void cec_remote_enable(gpib_board_t *board, int enable)
+static void cec_remote_enable(struct gpib_board *board, int enable)
{
struct cec_priv *priv = board->private_data;
nec7210_remote_enable(board, &priv->nec7210_priv, enable);
}
-int cec_enable_eos(gpib_board_t *board, uint8_t eos_byte, int compare_8_bits)
+static int cec_enable_eos(struct gpib_board *board, uint8_t eos_byte, int compare_8_bits)
{
struct cec_priv *priv = board->private_data;
return nec7210_enable_eos(board, &priv->nec7210_priv, eos_byte, compare_8_bits);
}
-void cec_disable_eos(gpib_board_t *board)
+static void cec_disable_eos(struct gpib_board *board)
{
struct cec_priv *priv = board->private_data;
nec7210_disable_eos(board, &priv->nec7210_priv);
}
-unsigned int cec_update_status(gpib_board_t *board, unsigned int clear_mask)
+static unsigned int cec_update_status(struct gpib_board *board, unsigned int clear_mask)
{
struct cec_priv *priv = board->private_data;
return nec7210_update_status(board, &priv->nec7210_priv, clear_mask);
}
-int cec_primary_address(gpib_board_t *board, unsigned int address)
+static int cec_primary_address(struct gpib_board *board, unsigned int address)
{
struct cec_priv *priv = board->private_data;
return nec7210_primary_address(board, &priv->nec7210_priv, address);
}
-int cec_secondary_address(gpib_board_t *board, unsigned int address, int enable)
+static int cec_secondary_address(struct gpib_board *board, unsigned int address, int enable)
{
struct cec_priv *priv = board->private_data;
return nec7210_secondary_address(board, &priv->nec7210_priv, address, enable);
}
-int cec_parallel_poll(gpib_board_t *board, uint8_t *result)
+static int cec_parallel_poll(struct gpib_board *board, uint8_t *result)
{
struct cec_priv *priv = board->private_data;
return nec7210_parallel_poll(board, &priv->nec7210_priv, result);
}
-void cec_parallel_poll_configure(gpib_board_t *board, uint8_t config)
+static void cec_parallel_poll_configure(struct gpib_board *board, uint8_t config)
{
struct cec_priv *priv = board->private_data;
nec7210_parallel_poll_configure(board, &priv->nec7210_priv, config);
}
-void cec_parallel_poll_response(gpib_board_t *board, int ist)
+static void cec_parallel_poll_response(struct gpib_board *board, int ist)
{
struct cec_priv *priv = board->private_data;
nec7210_parallel_poll_response(board, &priv->nec7210_priv, ist);
}
-void cec_serial_poll_response(gpib_board_t *board, uint8_t status)
+static void cec_serial_poll_response(struct gpib_board *board, uint8_t status)
{
struct cec_priv *priv = board->private_data;
nec7210_serial_poll_response(board, &priv->nec7210_priv, status);
}
-static uint8_t cec_serial_poll_status(gpib_board_t *board)
+static uint8_t cec_serial_poll_status(struct gpib_board *board)
{
struct cec_priv *priv = board->private_data;
return nec7210_serial_poll_status(board, &priv->nec7210_priv);
}
-static unsigned int cec_t1_delay(gpib_board_t *board, unsigned int nano_sec)
+static int cec_t1_delay(struct gpib_board *board, unsigned int nano_sec)
{
struct cec_priv *priv = board->private_data;
return nec7210_t1_delay(board, &priv->nec7210_priv, nano_sec);
}
-void cec_return_to_local(gpib_board_t *board)
+static void cec_return_to_local(struct gpib_board *board)
{
struct cec_priv *priv = board->private_data;
@@ -210,7 +216,7 @@ static gpib_interface_t cec_pci_interface = {
.return_to_local = cec_return_to_local,
};
-static int cec_allocate_private(gpib_board_t *board)
+static int cec_allocate_private(struct gpib_board *board)
{
struct cec_priv *priv;
@@ -223,13 +229,13 @@ static int cec_allocate_private(gpib_board_t *board)
return 0;
}
-void cec_free_private(gpib_board_t *board)
+static void cec_free_private(struct gpib_board *board)
{
kfree(board->private_data);
board->private_data = NULL;
}
-int cec_generic_attach(gpib_board_t *board)
+static int cec_generic_attach(struct gpib_board *board)
{
struct cec_priv *cec_priv;
struct nec7210_priv *nec_priv;
@@ -247,7 +253,7 @@ int cec_generic_attach(gpib_board_t *board)
return 0;
}
-void cec_init(struct cec_priv *cec_priv, const gpib_board_t *board)
+static void cec_init(struct cec_priv *cec_priv, const struct gpib_board *board)
{
struct nec7210_priv *nec_priv = &cec_priv->nec7210_priv;
@@ -259,7 +265,7 @@ void cec_init(struct cec_priv *cec_priv, const gpib_board_t *board)
nec7210_board_online(nec_priv, board);
}
-int cec_pci_attach(gpib_board_t *board, const gpib_board_config_t *config)
+static int cec_pci_attach(struct gpib_board *board, const gpib_board_config_t *config)
{
struct cec_priv *cec_priv;
struct nec7210_priv *nec_priv;
@@ -283,31 +289,29 @@ int cec_pci_attach(gpib_board_t *board, const gpib_board_config_t *config)
break;
}
if (!cec_priv->pci_device) {
- pr_err("gpib: no cec PCI board found\n");
- return -1;
+ dev_err(board->gpib_dev, "no cec PCI board found\n");
+ return -ENODEV;
}
if (pci_enable_device(cec_priv->pci_device)) {
- pr_err("error enabling pci device\n");
- return -1;
+ dev_err(board->gpib_dev, "error enabling pci device\n");
+ return -EIO;
}
if (pci_request_regions(cec_priv->pci_device, "cec-gpib"))
- return -1;
+ return -EBUSY;
cec_priv->plx_iobase = pci_resource_start(cec_priv->pci_device, 1);
- pr_info(" plx9050 base address 0x%lx\n", cec_priv->plx_iobase);
- nec_priv->iobase = pci_resource_start(cec_priv->pci_device, 3);
- pr_info(" nec7210 base address 0x%x\n", nec_priv->iobase);
+ nec_priv->iobase = pci_resource_start(cec_priv->pci_device, 3);
isr_flags |= IRQF_SHARED;
- if (request_irq(cec_priv->pci_device->irq, cec_interrupt, isr_flags, "pci-gpib", board)) {
- pr_err("gpib: can't request IRQ %d\n", cec_priv->pci_device->irq);
- return -1;
+ if (request_irq(cec_priv->pci_device->irq, cec_interrupt, isr_flags, DRV_NAME, board)) {
+ dev_err(board->gpib_dev, "failed to obtain IRQ %d\n", cec_priv->pci_device->irq);
+ return -EBUSY;
}
cec_priv->irq = cec_priv->pci_device->irq;
if (gpib_request_pseudo_irq(board, cec_interrupt)) {
- pr_err("cec: failed to allocate pseudo irq\n");
+ dev_err(board->gpib_dev, "failed to allocate pseudo irq\n");
return -1;
}
cec_init(cec_priv, board);
@@ -319,7 +323,7 @@ int cec_pci_attach(gpib_board_t *board, const gpib_board_config_t *config)
return 0;
}
-void cec_pci_detach(gpib_board_t *board)
+static void cec_pci_detach(struct gpib_board *board)
{
struct cec_priv *cec_priv = board->private_data;
struct nec7210_priv *nec_priv;
@@ -354,7 +358,7 @@ static const struct pci_device_id cec_pci_table[] = {
MODULE_DEVICE_TABLE(pci, cec_pci_table);
static struct pci_driver cec_pci_driver = {
- .name = "cec_gpib",
+ .name = DRV_NAME,
.id_table = cec_pci_table,
.probe = &cec_pci_probe
};
@@ -365,13 +369,13 @@ static int __init cec_init_module(void)
result = pci_register_driver(&cec_pci_driver);
if (result) {
- pr_err("cec_gpib: pci_register_driver failed: error = %d\n", result);
+ pr_err("pci_register_driver failed: error = %d\n", result);
return result;
}
result = gpib_register_driver(&cec_pci_interface, THIS_MODULE);
if (result) {
- pr_err("cec_gpib: gpib_register_driver failed: error = %d\n", result);
+ pr_err("gpib_register_driver failed: error = %d\n", result);
return result;
}
diff --git a/drivers/staging/gpib/common/gpib_os.c b/drivers/staging/gpib/common/gpib_os.c
index 4901e660242e..cb77fe0a4b9a 100644
--- a/drivers/staging/gpib/common/gpib_os.c
+++ b/drivers/staging/gpib/common/gpib_os.c
@@ -5,6 +5,9 @@
***************************************************************************
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#define dev_fmt pr_fmt
+
#include "ibsys.h"
#include <linux/module.h>
#include <linux/wait.h>
@@ -23,53 +26,53 @@ MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("GPIB base support");
MODULE_ALIAS_CHARDEV_MAJOR(GPIB_CODE);
-static int board_type_ioctl(gpib_file_private_t *file_priv, gpib_board_t *board, unsigned long arg);
-static int read_ioctl(gpib_file_private_t *file_priv, gpib_board_t *board,
+static int board_type_ioctl(gpib_file_private_t *file_priv, struct gpib_board *board, unsigned long arg);
+static int read_ioctl(gpib_file_private_t *file_priv, struct gpib_board *board,
unsigned long arg);
-static int write_ioctl(gpib_file_private_t *file_priv, gpib_board_t *board,
+static int write_ioctl(gpib_file_private_t *file_priv, struct gpib_board *board,
unsigned long arg);
-static int command_ioctl(gpib_file_private_t *file_priv, gpib_board_t *board,
+static int command_ioctl(gpib_file_private_t *file_priv, struct gpib_board *board,
unsigned long arg);
-static int open_dev_ioctl(struct file *filep, gpib_board_t *board, unsigned long arg);
-static int close_dev_ioctl(struct file *filep, gpib_board_t *board, unsigned long arg);
-static int serial_poll_ioctl(gpib_board_t *board, unsigned long arg);
-static int wait_ioctl(gpib_file_private_t *file_priv, gpib_board_t *board, unsigned long arg);
-static int parallel_poll_ioctl(gpib_board_t *board, unsigned long arg);
-static int online_ioctl(gpib_board_t *board, unsigned long arg);
-static int remote_enable_ioctl(gpib_board_t *board, unsigned long arg);
-static int take_control_ioctl(gpib_board_t *board, unsigned long arg);
-static int line_status_ioctl(gpib_board_t *board, unsigned long arg);
-static int pad_ioctl(gpib_board_t *board, gpib_file_private_t *file_priv,
+static int open_dev_ioctl(struct file *filep, struct gpib_board *board, unsigned long arg);
+static int close_dev_ioctl(struct file *filep, struct gpib_board *board, unsigned long arg);
+static int serial_poll_ioctl(struct gpib_board *board, unsigned long arg);
+static int wait_ioctl(gpib_file_private_t *file_priv, struct gpib_board *board, unsigned long arg);
+static int parallel_poll_ioctl(struct gpib_board *board, unsigned long arg);
+static int online_ioctl(struct gpib_board *board, unsigned long arg);
+static int remote_enable_ioctl(struct gpib_board *board, unsigned long arg);
+static int take_control_ioctl(struct gpib_board *board, unsigned long arg);
+static int line_status_ioctl(struct gpib_board *board, unsigned long arg);
+static int pad_ioctl(struct gpib_board *board, gpib_file_private_t *file_priv,
unsigned long arg);
-static int sad_ioctl(gpib_board_t *board, gpib_file_private_t *file_priv,
+static int sad_ioctl(struct gpib_board *board, gpib_file_private_t *file_priv,
unsigned long arg);
-static int eos_ioctl(gpib_board_t *board, unsigned long arg);
-static int request_service_ioctl(gpib_board_t *board, unsigned long arg);
-static int request_service2_ioctl(gpib_board_t *board, unsigned long arg);
+static int eos_ioctl(struct gpib_board *board, unsigned long arg);
+static int request_service_ioctl(struct gpib_board *board, unsigned long arg);
+static int request_service2_ioctl(struct gpib_board *board, unsigned long arg);
static int iobase_ioctl(gpib_board_config_t *config, unsigned long arg);
static int irq_ioctl(gpib_board_config_t *config, unsigned long arg);
static int dma_ioctl(gpib_board_config_t *config, unsigned long arg);
-static int autospoll_ioctl(gpib_board_t *board, gpib_file_private_t *file_priv,
+static int autospoll_ioctl(struct gpib_board *board, gpib_file_private_t *file_priv,
unsigned long arg);
-static int mutex_ioctl(gpib_board_t *board, gpib_file_private_t *file_priv,
+static int mutex_ioctl(struct gpib_board *board, gpib_file_private_t *file_priv,
unsigned long arg);
-static int timeout_ioctl(gpib_board_t *board, unsigned long arg);
-static int status_bytes_ioctl(gpib_board_t *board, unsigned long arg);
-static int board_info_ioctl(const gpib_board_t *board, unsigned long arg);
-static int ppc_ioctl(gpib_board_t *board, unsigned long arg);
-static int set_local_ppoll_mode_ioctl(gpib_board_t *board, unsigned long arg);
-static int get_local_ppoll_mode_ioctl(gpib_board_t *board, unsigned long arg);
-static int query_board_rsv_ioctl(gpib_board_t *board, unsigned long arg);
-static int interface_clear_ioctl(gpib_board_t *board, unsigned long arg);
+static int timeout_ioctl(struct gpib_board *board, unsigned long arg);
+static int status_bytes_ioctl(struct gpib_board *board, unsigned long arg);
+static int board_info_ioctl(const struct gpib_board *board, unsigned long arg);
+static int ppc_ioctl(struct gpib_board *board, unsigned long arg);
+static int set_local_ppoll_mode_ioctl(struct gpib_board *board, unsigned long arg);
+static int get_local_ppoll_mode_ioctl(struct gpib_board *board, unsigned long arg);
+static int query_board_rsv_ioctl(struct gpib_board *board, unsigned long arg);
+static int interface_clear_ioctl(struct gpib_board *board, unsigned long arg);
static int select_pci_ioctl(gpib_board_config_t *config, unsigned long arg);
static int select_device_path_ioctl(gpib_board_config_t *config, unsigned long arg);
-static int event_ioctl(gpib_board_t *board, unsigned long arg);
-static int request_system_control_ioctl(gpib_board_t *board, unsigned long arg);
-static int t1_delay_ioctl(gpib_board_t *board, unsigned long arg);
+static int event_ioctl(struct gpib_board *board, unsigned long arg);
+static int request_system_control_ioctl(struct gpib_board *board, unsigned long arg);
+static int t1_delay_ioctl(struct gpib_board *board, unsigned long arg);
-static int cleanup_open_devices(gpib_file_private_t *file_priv, gpib_board_t *board);
+static int cleanup_open_devices(gpib_file_private_t *file_priv, struct gpib_board *board);
-static int pop_gpib_event_nolock(gpib_board_t *board, gpib_event_queue_t *queue, short *event_type);
+static int pop_gpib_event_nolock(struct gpib_board *board, gpib_event_queue_t *queue, short *event_type);
/*
* Timer functions
@@ -79,18 +82,18 @@ static int pop_gpib_event_nolock(gpib_board_t *board, gpib_event_queue_t *queue,
static void watchdog_timeout(struct timer_list *t)
{
- gpib_board_t *board = from_timer(board, t, timer);
+ struct gpib_board *board = from_timer(board, t, timer);
set_bit(TIMO_NUM, &board->status);
wake_up_interruptible(&board->wait);
}
/* install timer interrupt handler */
-void os_start_timer(gpib_board_t *board, unsigned int usec_timeout)
+void os_start_timer(struct gpib_board *board, unsigned int usec_timeout)
/* Starts the timeout task */
{
if (timer_pending(&board->timer)) {
- pr_err("gpib: bug! timer already running?\n");
+ dev_err(board->gpib_dev, "bug! timer already running?\n");
return;
}
clear_bit(TIMO_NUM, &board->status);
@@ -102,14 +105,14 @@ void os_start_timer(gpib_board_t *board, unsigned int usec_timeout)
}
}
-void os_remove_timer(gpib_board_t *board)
+void os_remove_timer(struct gpib_board *board)
/* Removes the timeout task */
{
if (timer_pending(&board->timer))
del_timer_sync(&board->timer);
}
-int io_timed_out(gpib_board_t *board)
+int io_timed_out(struct gpib_board *board)
{
if (test_bit(TIMO_NUM, &board->status))
return 1;
@@ -137,10 +140,10 @@ static void pseudo_irq_handler(struct timer_list *t)
mod_timer(&pseudo_irq->timer, jiffies + pseudo_irq_period());
}
-int gpib_request_pseudo_irq(gpib_board_t *board, irqreturn_t (*handler)(int, void *))
+int gpib_request_pseudo_irq(struct gpib_board *board, irqreturn_t (*handler)(int, void *))
{
if (timer_pending(&board->pseudo_irq.timer) || board->pseudo_irq.handler) {
- pr_err("gpib: only one pseudo interrupt per board allowed\n");
+ dev_err(board->gpib_dev, "only one pseudo interrupt per board allowed\n");
return -1;
}
@@ -156,7 +159,7 @@ int gpib_request_pseudo_irq(gpib_board_t *board, irqreturn_t (*handler)(int, voi
}
EXPORT_SYMBOL(gpib_request_pseudo_irq);
-void gpib_free_pseudo_irq(gpib_board_t *board)
+void gpib_free_pseudo_irq(struct gpib_board *board)
{
atomic_set(&board->pseudo_irq.active, 0);
@@ -175,7 +178,7 @@ unsigned int num_status_bytes(const gpib_status_queue_t *dev)
}
// push status byte onto back of status byte fifo
-int push_status_byte(gpib_board_t *board, gpib_status_queue_t *device, u8 poll_byte)
+int push_status_byte(struct gpib_board *board, gpib_status_queue_t *device, u8 poll_byte)
{
struct list_head *head = &device->status_bytes;
status_byte_t *status;
@@ -209,7 +212,7 @@ int push_status_byte(gpib_board_t *board, gpib_status_queue_t *device, u8 poll_b
}
// pop status byte from front of status byte fifo
-int pop_status_byte(gpib_board_t *board, gpib_status_queue_t *device, u8 *poll_byte)
+int pop_status_byte(struct gpib_board *board, gpib_status_queue_t *device, u8 *poll_byte)
{
struct list_head *head = &device->status_bytes;
struct list_head *front = head->next;
@@ -240,7 +243,7 @@ int pop_status_byte(gpib_board_t *board, gpib_status_queue_t *device, u8 *poll_b
return 0;
}
-gpib_status_queue_t *get_gpib_status_queue(gpib_board_t *board, unsigned int pad, int sad)
+gpib_status_queue_t *get_gpib_status_queue(struct gpib_board *board, unsigned int pad, int sad)
{
gpib_status_queue_t *device;
struct list_head *list_ptr;
@@ -255,13 +258,11 @@ gpib_status_queue_t *get_gpib_status_queue(gpib_board_t *board, unsigned int pad
return NULL;
}
-int get_serial_poll_byte(gpib_board_t *board, unsigned int pad, int sad, unsigned int usec_timeout,
+int get_serial_poll_byte(struct gpib_board *board, unsigned int pad, int sad, unsigned int usec_timeout,
uint8_t *poll_byte)
{
gpib_status_queue_t *device;
- dev_dbg(board->gpib_dev, "%s:()\n", __func__);
-
device = get_gpib_status_queue(board, pad, sad);
if (num_status_bytes(device))
return pop_status_byte(board, device, poll_byte);
@@ -269,11 +270,10 @@ int get_serial_poll_byte(gpib_board_t *board, unsigned int pad, int sad, unsigne
return dvrsp(board, pad, sad, usec_timeout, poll_byte);
}
-int autopoll_all_devices(gpib_board_t *board)
+int autopoll_all_devices(struct gpib_board *board)
{
int retval;
- dev_dbg(board->gpib_dev, "entering %s()\n", __func__);
if (mutex_lock_interruptible(&board->user_mutex))
return -ERESTARTSYS;
if (mutex_lock_interruptible(&board->big_gpib_mutex)) {
@@ -290,7 +290,7 @@ int autopoll_all_devices(gpib_board_t *board)
return retval;
}
- dev_dbg(board->gpib_dev, "%s complete\n", __func__);
+ dev_dbg(board->gpib_dev, "complete\n");
/* need to wake wait queue in case someone is
* waiting on RQS
*/
@@ -301,15 +301,13 @@ int autopoll_all_devices(gpib_board_t *board)
return retval;
}
-static int setup_serial_poll(gpib_board_t *board, unsigned int usec_timeout)
+static int setup_serial_poll(struct gpib_board *board, unsigned int usec_timeout)
{
u8 cmd_string[8];
int i;
size_t bytes_written;
int ret;
- dev_dbg(board->gpib_dev, "entering %s()\n", __func__);
-
os_start_timer(board, usec_timeout);
ret = ibcac(board, 1, 1);
if (ret < 0) {
@@ -326,7 +324,7 @@ static int setup_serial_poll(gpib_board_t *board, unsigned int usec_timeout)
ret = board->interface->command(board, cmd_string, i, &bytes_written);
if (ret < 0 || bytes_written < i) {
- pr_err("gpib: failed to setup serial poll\n");
+ dev_dbg(board->gpib_dev, "failed to setup serial poll\n");
os_remove_timer(board);
return -EIO;
}
@@ -335,7 +333,7 @@ static int setup_serial_poll(gpib_board_t *board, unsigned int usec_timeout)
return 0;
}
-static int read_serial_poll_byte(gpib_board_t *board, unsigned int pad,
+static int read_serial_poll_byte(struct gpib_board *board, unsigned int pad,
int sad, unsigned int usec_timeout, uint8_t *result)
{
u8 cmd_string[8];
@@ -344,7 +342,7 @@ static int read_serial_poll_byte(gpib_board_t *board, unsigned int pad,
int i;
size_t nbytes;
- dev_dbg(board->gpib_dev, "entering %s(), pad=%i sad=%i\n", __func__, pad, sad);
+ dev_dbg(board->gpib_dev, "entering pad=%i sad=%i\n", pad, sad);
os_start_timer(board, usec_timeout);
ret = ibcac(board, 1, 1);
@@ -361,7 +359,7 @@ static int read_serial_poll_byte(gpib_board_t *board, unsigned int pad,
ret = board->interface->command(board, cmd_string, i, &nbytes);
if (ret < 0 || nbytes < i) {
- pr_err("gpib: failed to setup serial poll\n");
+ dev_err(board->gpib_dev, "failed to setup serial poll\n");
os_remove_timer(board);
return -EIO;
}
@@ -371,7 +369,7 @@ static int read_serial_poll_byte(gpib_board_t *board, unsigned int pad,
// read poll result
ret = board->interface->read(board, result, 1, &end_flag, &nbytes);
if (ret < 0 || nbytes < 1) {
- pr_err("gpib: serial poll failed\n");
+ dev_err(board->gpib_dev, "serial poll failed\n");
os_remove_timer(board);
return -EIO;
}
@@ -380,14 +378,12 @@ static int read_serial_poll_byte(gpib_board_t *board, unsigned int pad,
return 0;
}
-static int cleanup_serial_poll(gpib_board_t *board, unsigned int usec_timeout)
+static int cleanup_serial_poll(struct gpib_board *board, unsigned int usec_timeout)
{
u8 cmd_string[8];
int ret;
size_t bytes_written;
- dev_dbg(board->gpib_dev, "entering %s()\n", __func__);
-
os_start_timer(board, usec_timeout);
ret = ibcac(board, 1, 1);
if (ret < 0) {
@@ -399,7 +395,7 @@ static int cleanup_serial_poll(gpib_board_t *board, unsigned int usec_timeout)
cmd_string[1] = UNT;
ret = board->interface->command(board, cmd_string, 2, &bytes_written);
if (ret < 0 || bytes_written < 2) {
- pr_err("gpib: failed to disable serial poll\n");
+ dev_err(board->gpib_dev, "failed to disable serial poll\n");
os_remove_timer(board);
return -EIO;
}
@@ -408,7 +404,7 @@ static int cleanup_serial_poll(gpib_board_t *board, unsigned int usec_timeout)
return 0;
}
-static int serial_poll_single(gpib_board_t *board, unsigned int pad, int sad,
+static int serial_poll_single(struct gpib_board *board, unsigned int pad, int sad,
unsigned int usec_timeout, uint8_t *result)
{
int retval, cleanup_retval;
@@ -426,7 +422,7 @@ static int serial_poll_single(gpib_board_t *board, unsigned int pad, int sad,
return 0;
}
-int serial_poll_all(gpib_board_t *board, unsigned int usec_timeout)
+int serial_poll_all(struct gpib_board *board, unsigned int usec_timeout)
{
int retval = 0;
struct list_head *cur;
@@ -435,8 +431,6 @@ int serial_poll_all(gpib_board_t *board, unsigned int usec_timeout)
u8 result;
unsigned int num_bytes = 0;
- dev_dbg(board->gpib_dev, "entering %s()\n", __func__);
-
head = &board->device_list;
if (head->next == head)
return 0;
@@ -475,19 +469,19 @@ int serial_poll_all(gpib_board_t *board, unsigned int usec_timeout)
* SPD and UNT are sent at the completion of the poll.
*/
-int dvrsp(gpib_board_t *board, unsigned int pad, int sad,
+int dvrsp(struct gpib_board *board, unsigned int pad, int sad,
unsigned int usec_timeout, uint8_t *result)
{
int status = ibstatus(board);
int retval;
if ((status & CIC) == 0) {
- pr_err("gpib: not CIC during serial poll\n");
+ dev_err(board->gpib_dev, "not CIC during serial poll\n");
return -1;
}
if (pad > MAX_GPIB_PRIMARY_ADDRESS || sad > MAX_GPIB_SECONDARY_ADDRESS || sad < -1) {
- pr_err("gpib: bad address for serial poll");
+ dev_err(board->gpib_dev, "bad address for serial poll");
return -1;
}
@@ -527,7 +521,7 @@ static int init_gpib_file_private(gpib_file_private_t *priv)
int ibopen(struct inode *inode, struct file *filep)
{
unsigned int minor = iminor(inode);
- gpib_board_t *board;
+ struct gpib_board *board;
gpib_file_private_t *priv;
if (minor >= GPIB_MAX_NUM_BOARDS) {
@@ -544,20 +538,16 @@ int ibopen(struct inode *inode, struct file *filep)
priv = filep->private_data;
init_gpib_file_private((gpib_file_private_t *)filep->private_data);
- dev_dbg(board->gpib_dev, "pid %i, gpib: opening minor %d\n", current->pid, minor);
-
if (board->use_count == 0) {
int retval;
retval = request_module("gpib%i", minor);
- if (retval) {
- dev_dbg(board->gpib_dev, "pid %i, gpib: request module returned %i\n",
- current->pid, retval);
- }
+ if (retval)
+ dev_dbg(board->gpib_dev, "request module returned %i\n", retval);
}
if (board->interface) {
if (!try_module_get(board->provider_module)) {
- pr_err("gpib: try_module_get() failed\n");
+ dev_err(board->gpib_dev, "try_module_get() failed\n");
return -EIO;
}
board->use_count++;
@@ -569,7 +559,7 @@ int ibopen(struct inode *inode, struct file *filep)
int ibclose(struct inode *inode, struct file *filep)
{
unsigned int minor = iminor(inode);
- gpib_board_t *board;
+ struct gpib_board *board;
gpib_file_private_t *priv = filep->private_data;
gpib_descriptor_t *desc;
@@ -580,21 +570,19 @@ int ibclose(struct inode *inode, struct file *filep)
board = &board_array[minor];
- dev_dbg(board->gpib_dev, "pid %i, closing minor %d\n", current->pid, minor);
-
if (priv) {
desc = handle_to_descriptor(priv, 0);
if (desc) {
if (desc->autopoll_enabled) {
- dev_dbg(board->gpib_dev, "pid %i, decrementing autospollers\n",
- current->pid);
+ dev_dbg(board->gpib_dev, "decrementing autospollers\n");
if (board->autospollers > 0)
board->autospollers--;
else
- pr_err("gpib: Attempt to decrement zero autospollers\n");
+ dev_err(board->gpib_dev,
+ "Attempt to decrement zero autospollers\n");
}
} else {
- pr_err("gpib: Unexpected null gpib_descriptor\n");
+ dev_err(board->gpib_dev, "Unexpected null gpib_descriptor\n");
}
cleanup_open_devices(priv, board);
@@ -617,7 +605,7 @@ int ibclose(struct inode *inode, struct file *filep)
long ibioctl(struct file *filep, unsigned int cmd, unsigned long arg)
{
unsigned int minor = iminor(filep->f_path.dentry->d_inode);
- gpib_board_t *board;
+ struct gpib_board *board;
gpib_file_private_t *file_priv = filep->private_data;
long retval = -ENOTTY;
@@ -630,8 +618,8 @@ long ibioctl(struct file *filep, unsigned int cmd, unsigned long arg)
if (mutex_lock_interruptible(&board->big_gpib_mutex))
return -ERESTARTSYS;
- dev_dbg(board->gpib_dev, "pid %i, ioctl %d, interface=%s, use=%d, onl=%d\n",
- current->pid, cmd & 0xff,
+ dev_dbg(board->gpib_dev, "ioctl %d, interface=%s, use=%d, onl=%d\n",
+ cmd & 0xff,
board->interface ? board->interface->name : "",
board->use_count,
board->online);
@@ -647,13 +635,13 @@ long ibioctl(struct file *filep, unsigned int cmd, unsigned long arg)
break;
}
if (!board->interface) {
- pr_err("gpib: no gpib board configured on /dev/gpib%i\n", minor);
+ dev_err(board->gpib_dev, "no gpib board configured\n");
retval = -ENODEV;
goto done;
}
if (file_priv->got_module == 0) {
if (!try_module_get(board->provider_module)) {
- pr_err("gpib: try_module_get() failed\n");
+ dev_err(board->gpib_dev, "try_module_get() failed\n");
retval = -EIO;
goto done;
}
@@ -699,8 +687,6 @@ long ibioctl(struct file *filep, unsigned int cmd, unsigned long arg)
}
if (!board->online) {
- pr_err("gpib: ioctl %i invalid for offline board\n",
- cmd & 0xff);
retval = -EINVAL;
goto done;
}
@@ -737,8 +723,6 @@ long ibioctl(struct file *filep, unsigned int cmd, unsigned long arg)
spin_lock(&board->locking_pid_spinlock);
if (current->pid != board->locking_pid) {
spin_unlock(&board->locking_pid_spinlock);
- pr_err("gpib: need to hold board lock to perform ioctl %i\n",
- cmd & 0xff);
retval = -EPERM;
goto done;
}
@@ -822,7 +806,7 @@ done:
return retval;
}
-static int board_type_ioctl(gpib_file_private_t *file_priv, gpib_board_t *board, unsigned long arg)
+static int board_type_ioctl(gpib_file_private_t *file_priv, struct gpib_board *board, unsigned long arg)
{
struct list_head *list_ptr;
board_type_ioctl_t cmd;
@@ -830,10 +814,8 @@ static int board_type_ioctl(gpib_file_private_t *file_priv, gpib_board_t *board,
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- if (board->online) {
- pr_err("gpib: can't change board type while board is online.\n");
+ if (board->online)
return -EBUSY;
- }
retval = copy_from_user(&cmd, (void __user *)arg, sizeof(board_type_ioctl_t));
if (retval)
@@ -875,7 +857,7 @@ static int board_type_ioctl(gpib_file_private_t *file_priv, gpib_board_t *board,
return -EINVAL;
}
-static int read_ioctl(gpib_file_private_t *file_priv, gpib_board_t *board,
+static int read_ioctl(gpib_file_private_t *file_priv, struct gpib_board *board,
unsigned long arg)
{
read_write_ioctl_t read_cmd;
@@ -951,7 +933,7 @@ static int read_ioctl(gpib_file_private_t *file_priv, gpib_board_t *board,
}
static int command_ioctl(gpib_file_private_t *file_priv,
- gpib_board_t *board, unsigned long arg)
+ struct gpib_board *board, unsigned long arg)
{
read_write_ioctl_t cmd;
u8 __user *userbuf;
@@ -1034,7 +1016,7 @@ static int command_ioctl(gpib_file_private_t *file_priv,
return retval;
}
-static int write_ioctl(gpib_file_private_t *file_priv, gpib_board_t *board,
+static int write_ioctl(gpib_file_private_t *file_priv, struct gpib_board *board,
unsigned long arg)
{
read_write_ioctl_t write_cmd;
@@ -1105,7 +1087,7 @@ static int write_ioctl(gpib_file_private_t *file_priv, gpib_board_t *board,
return retval;
}
-static int status_bytes_ioctl(gpib_board_t *board, unsigned long arg)
+static int status_bytes_ioctl(struct gpib_board *board, unsigned long arg)
{
gpib_status_queue_t *device;
spoll_bytes_ioctl_t cmd;
@@ -1128,7 +1110,7 @@ static int status_bytes_ioctl(gpib_board_t *board, unsigned long arg)
return 0;
}
-static int increment_open_device_count(gpib_board_t *board, struct list_head *head,
+static int increment_open_device_count(struct gpib_board *board, struct list_head *head,
unsigned int pad, int sad)
{
struct list_head *list_ptr;
@@ -1140,8 +1122,8 @@ static int increment_open_device_count(gpib_board_t *board, struct list_head *he
for (list_ptr = head->next; list_ptr != head; list_ptr = list_ptr->next) {
device = list_entry(list_ptr, gpib_status_queue_t, list);
if (gpib_address_equal(device->pad, device->sad, pad, sad)) {
- dev_dbg(board->gpib_dev, "pid %i, incrementing open count for pad %i, sad %i\n",
- current->pid, device->pad, device->sad);
+ dev_dbg(board->gpib_dev, "incrementing open count for pad %i, sad %i\n",
+ device->pad, device->sad);
device->reference_count++;
return 0;
}
@@ -1158,13 +1140,12 @@ static int increment_open_device_count(gpib_board_t *board, struct list_head *he
list_add(&device->list, head);
- dev_dbg(board->gpib_dev, "pid %i, opened pad %i, sad %i\n",
- current->pid, device->pad, device->sad);
+ dev_dbg(board->gpib_dev, "opened pad %i, sad %i\n", device->pad, device->sad);
return 0;
}
-static int subtract_open_device_count(gpib_board_t *board, struct list_head *head,
+static int subtract_open_device_count(struct gpib_board *board, struct list_head *head,
unsigned int pad, int sad, unsigned int count)
{
gpib_status_queue_t *device;
@@ -1173,33 +1154,33 @@ static int subtract_open_device_count(gpib_board_t *board, struct list_head *hea
for (list_ptr = head->next; list_ptr != head; list_ptr = list_ptr->next) {
device = list_entry(list_ptr, gpib_status_queue_t, list);
if (gpib_address_equal(device->pad, device->sad, pad, sad)) {
- dev_dbg(board->gpib_dev, "pid %i, decrementing open count for pad %i, sad %i\n",
- current->pid, device->pad, device->sad);
+ dev_dbg(board->gpib_dev, "decrementing open count for pad %i, sad %i\n",
+ device->pad, device->sad);
if (count > device->reference_count) {
- pr_err("gpib: bug! in %s()\n", __func__);
+ dev_err(board->gpib_dev, "bug! in %s()\n", __func__);
return -EINVAL;
}
device->reference_count -= count;
if (device->reference_count == 0) {
- dev_dbg(board->gpib_dev, "pid %i, closing pad %i, sad %i\n",
- current->pid, device->pad, device->sad);
+ dev_dbg(board->gpib_dev, "closing pad %i, sad %i\n",
+ device->pad, device->sad);
list_del(list_ptr);
kfree(device);
}
return 0;
}
}
- pr_err("gpib: bug! tried to close address that was never opened!\n");
+ dev_err(board->gpib_dev, "bug! tried to close address that was never opened!\n");
return -EINVAL;
}
-static inline int decrement_open_device_count(gpib_board_t *board, struct list_head *head,
+static inline int decrement_open_device_count(struct gpib_board *board, struct list_head *head,
unsigned int pad, int sad)
{
return subtract_open_device_count(board, head, pad, sad, 1);
}
-static int cleanup_open_devices(gpib_file_private_t *file_priv, gpib_board_t *board)
+static int cleanup_open_devices(gpib_file_private_t *file_priv, struct gpib_board *board)
{
int retval = 0;
int i;
@@ -1224,7 +1205,7 @@ static int cleanup_open_devices(gpib_file_private_t *file_priv, gpib_board_t *bo
return 0;
}
-static int open_dev_ioctl(struct file *filep, gpib_board_t *board, unsigned long arg)
+static int open_dev_ioctl(struct file *filep, struct gpib_board *board, unsigned long arg)
{
open_dev_ioctl_t open_dev_cmd;
int retval;
@@ -1274,7 +1255,7 @@ static int open_dev_ioctl(struct file *filep, gpib_board_t *board, unsigned long
return 0;
}
-static int close_dev_ioctl(struct file *filep, gpib_board_t *board, unsigned long arg)
+static int close_dev_ioctl(struct file *filep, struct gpib_board *board, unsigned long arg)
{
close_dev_ioctl_t cmd;
gpib_file_private_t *file_priv = filep->private_data;
@@ -1301,13 +1282,11 @@ static int close_dev_ioctl(struct file *filep, gpib_board_t *board, unsigned lon
return 0;
}
-static int serial_poll_ioctl(gpib_board_t *board, unsigned long arg)
+static int serial_poll_ioctl(struct gpib_board *board, unsigned long arg)
{
serial_poll_ioctl_t serial_cmd;
int retval;
- dev_dbg(board->gpib_dev, "pid %i, entering %s()\n", current->pid, __func__);
-
retval = copy_from_user(&serial_cmd, (void __user *)arg, sizeof(serial_cmd));
if (retval)
return -EFAULT;
@@ -1324,7 +1303,7 @@ static int serial_poll_ioctl(gpib_board_t *board, unsigned long arg)
return 0;
}
-static int wait_ioctl(gpib_file_private_t *file_priv, gpib_board_t *board,
+static int wait_ioctl(gpib_file_private_t *file_priv, struct gpib_board *board,
unsigned long arg)
{
wait_ioctl_t wait_cmd;
@@ -1351,7 +1330,7 @@ static int wait_ioctl(gpib_file_private_t *file_priv, gpib_board_t *board,
return 0;
}
-static int parallel_poll_ioctl(gpib_board_t *board, unsigned long arg)
+static int parallel_poll_ioctl(struct gpib_board *board, unsigned long arg)
{
u8 poll_byte;
int retval;
@@ -1367,7 +1346,7 @@ static int parallel_poll_ioctl(gpib_board_t *board, unsigned long arg)
return 0;
}
-static int online_ioctl(gpib_board_t *board, unsigned long arg)
+static int online_ioctl(struct gpib_board *board, unsigned long arg)
{
online_ioctl_t online_cmd;
int retval;
@@ -1411,7 +1390,7 @@ static int online_ioctl(gpib_board_t *board, unsigned long arg)
return retval;
}
-static int remote_enable_ioctl(gpib_board_t *board, unsigned long arg)
+static int remote_enable_ioctl(struct gpib_board *board, unsigned long arg)
{
int enable;
int retval;
@@ -1423,7 +1402,7 @@ static int remote_enable_ioctl(gpib_board_t *board, unsigned long arg)
return ibsre(board, enable);
}
-static int take_control_ioctl(gpib_board_t *board, unsigned long arg)
+static int take_control_ioctl(struct gpib_board *board, unsigned long arg)
{
int synchronous;
int retval;
@@ -1435,7 +1414,7 @@ static int take_control_ioctl(gpib_board_t *board, unsigned long arg)
return ibcac(board, synchronous, 1);
}
-static int line_status_ioctl(gpib_board_t *board, unsigned long arg)
+static int line_status_ioctl(struct gpib_board *board, unsigned long arg)
{
short lines;
int retval;
@@ -1451,7 +1430,7 @@ static int line_status_ioctl(gpib_board_t *board, unsigned long arg)
return 0;
}
-static int pad_ioctl(gpib_board_t *board, gpib_file_private_t *file_priv,
+static int pad_ioctl(struct gpib_board *board, gpib_file_private_t *file_priv,
unsigned long arg)
{
pad_ioctl_t cmd;
@@ -1487,7 +1466,7 @@ static int pad_ioctl(gpib_board_t *board, gpib_file_private_t *file_priv,
return 0;
}
-static int sad_ioctl(gpib_board_t *board, gpib_file_private_t *file_priv,
+static int sad_ioctl(struct gpib_board *board, gpib_file_private_t *file_priv,
unsigned long arg)
{
sad_ioctl_t cmd;
@@ -1522,7 +1501,7 @@ static int sad_ioctl(gpib_board_t *board, gpib_file_private_t *file_priv,
return 0;
}
-static int eos_ioctl(gpib_board_t *board, unsigned long arg)
+static int eos_ioctl(struct gpib_board *board, unsigned long arg)
{
eos_ioctl_t eos_cmd;
int retval;
@@ -1534,7 +1513,7 @@ static int eos_ioctl(gpib_board_t *board, unsigned long arg)
return ibeos(board, eos_cmd.eos, eos_cmd.eos_flags);
}
-static int request_service_ioctl(gpib_board_t *board, unsigned long arg)
+static int request_service_ioctl(struct gpib_board *board, unsigned long arg)
{
u8 status_byte;
int retval;
@@ -1546,7 +1525,7 @@ static int request_service_ioctl(gpib_board_t *board, unsigned long arg)
return ibrsv2(board, status_byte, status_byte & request_service_bit);
}
-static int request_service2_ioctl(gpib_board_t *board, unsigned long arg)
+static int request_service2_ioctl(struct gpib_board *board, unsigned long arg)
{
request_service2_t request_service2_cmd;
int retval;
@@ -1613,7 +1592,7 @@ static int dma_ioctl(gpib_board_config_t *config, unsigned long arg)
return 0;
}
-static int autospoll_ioctl(gpib_board_t *board, gpib_file_private_t *file_priv,
+static int autospoll_ioctl(struct gpib_board *board, gpib_file_private_t *file_priv,
unsigned long arg)
{
autospoll_ioctl_t enable;
@@ -1639,18 +1618,19 @@ static int autospoll_ioctl(gpib_board_t *board, gpib_file_private_t *file_priv,
board->autospollers--;
retval = 0;
} else {
- pr_err("gpib: tried to set number of autospollers negative\n");
+ dev_err(board->gpib_dev,
+ "tried to set number of autospollers negative\n");
retval = -EINVAL;
}
} else {
- pr_err("gpib: autopoll disable requested before enable\n");
+ dev_err(board->gpib_dev, "autopoll disable requested before enable\n");
retval = -EINVAL;
}
}
return retval;
}
-static int mutex_ioctl(gpib_board_t *board, gpib_file_private_t *file_priv,
+static int mutex_ioctl(struct gpib_board *board, gpib_file_private_t *file_priv,
unsigned long arg)
{
int retval, lock_mutex;
@@ -1661,10 +1641,8 @@ static int mutex_ioctl(gpib_board_t *board, gpib_file_private_t *file_priv,
if (lock_mutex) {
retval = mutex_lock_interruptible(&board->user_mutex);
- if (retval) {
- pr_warn("gpib: ioctl interrupted while waiting on lock\n");
+ if (retval)
return -ERESTARTSYS;
- }
spin_lock(&board->locking_pid_spinlock);
board->locking_pid = current->pid;
@@ -1672,13 +1650,12 @@ static int mutex_ioctl(gpib_board_t *board, gpib_file_private_t *file_priv,
atomic_set(&file_priv->holding_mutex, 1);
- dev_dbg(board->gpib_dev, "pid %i, locked board %d mutex\n",
- current->pid, board->minor);
+ dev_dbg(board->gpib_dev, "locked board mutex\n");
} else {
spin_lock(&board->locking_pid_spinlock);
if (current->pid != board->locking_pid) {
- pr_err("gpib: bug! pid %i tried to release mutex held by pid %i\n",
- current->pid, board->locking_pid);
+ dev_err(board->gpib_dev, "bug! pid %i tried to release mutex held by pid %i\n",
+ current->pid, board->locking_pid);
spin_unlock(&board->locking_pid_spinlock);
return -EPERM;
}
@@ -1688,13 +1665,12 @@ static int mutex_ioctl(gpib_board_t *board, gpib_file_private_t *file_priv,
atomic_set(&file_priv->holding_mutex, 0);
mutex_unlock(&board->user_mutex);
- dev_dbg(board->gpib_dev, "pid %i, unlocked board %i mutex\n",
- current->pid, board->minor);
+ dev_dbg(board->gpib_dev, "unlocked board mutex\n");
}
return 0;
}
-static int timeout_ioctl(gpib_board_t *board, unsigned long arg)
+static int timeout_ioctl(struct gpib_board *board, unsigned long arg)
{
unsigned int timeout;
int retval;
@@ -1704,12 +1680,12 @@ static int timeout_ioctl(gpib_board_t *board, unsigned long arg)
return -EFAULT;
board->usec_timeout = timeout;
- dev_dbg(board->gpib_dev, "pid %i, timeout set to %i usec\n", current->pid, timeout);
+ dev_dbg(board->gpib_dev, "timeout set to %i usec\n", timeout);
return 0;
}
-static int ppc_ioctl(gpib_board_t *board, unsigned long arg)
+static int ppc_ioctl(struct gpib_board *board, unsigned long arg)
{
ppoll_config_ioctl_t cmd;
int retval;
@@ -1735,7 +1711,7 @@ static int ppc_ioctl(gpib_board_t *board, unsigned long arg)
return 0;
}
-static int set_local_ppoll_mode_ioctl(gpib_board_t *board, unsigned long arg)
+static int set_local_ppoll_mode_ioctl(struct gpib_board *board, unsigned long arg)
{
local_ppoll_mode_ioctl_t cmd;
int retval;
@@ -1744,17 +1720,15 @@ static int set_local_ppoll_mode_ioctl(gpib_board_t *board, unsigned long arg)
if (retval)
return -EFAULT;
- if (!board->interface->local_parallel_poll_mode) {
- pr_warn("gpib: local/remote parallel poll mode not supported by driver.");
- return -EIO;
- }
+ if (!board->interface->local_parallel_poll_mode)
+ return -ENOENT;
board->local_ppoll_mode = cmd != 0;
board->interface->local_parallel_poll_mode(board, board->local_ppoll_mode);
return 0;
}
-static int get_local_ppoll_mode_ioctl(gpib_board_t *board, unsigned long arg)
+static int get_local_ppoll_mode_ioctl(struct gpib_board *board, unsigned long arg)
{
local_ppoll_mode_ioctl_t cmd;
int retval;
@@ -1767,7 +1741,7 @@ static int get_local_ppoll_mode_ioctl(gpib_board_t *board, unsigned long arg)
return 0;
}
-static int query_board_rsv_ioctl(gpib_board_t *board, unsigned long arg)
+static int query_board_rsv_ioctl(struct gpib_board *board, unsigned long arg)
{
int status;
int retval;
@@ -1781,7 +1755,7 @@ static int query_board_rsv_ioctl(gpib_board_t *board, unsigned long arg)
return 0;
}
-static int board_info_ioctl(const gpib_board_t *board, unsigned long arg)
+static int board_info_ioctl(const struct gpib_board *board, unsigned long arg)
{
board_info_ioctl_t info;
int retval;
@@ -1804,7 +1778,7 @@ static int board_info_ioctl(const gpib_board_t *board, unsigned long arg)
return 0;
}
-static int interface_clear_ioctl(gpib_board_t *board, unsigned long arg)
+static int interface_clear_ioctl(struct gpib_board *board, unsigned long arg)
{
unsigned int usec_duration;
int retval;
@@ -1867,7 +1841,7 @@ unsigned int num_gpib_events(const gpib_event_queue_t *queue)
return queue->num_events;
}
-static int push_gpib_event_nolock(gpib_board_t *board, short event_type)
+static int push_gpib_event_nolock(struct gpib_board *board, short event_type)
{
gpib_event_queue_t *queue = &board->event_queue;
struct list_head *head = &queue->event_head;
@@ -1887,7 +1861,7 @@ static int push_gpib_event_nolock(gpib_board_t *board, short event_type)
event = kmalloc(sizeof(gpib_event_t), GFP_ATOMIC);
if (!event) {
queue->dropped_event = 1;
- pr_err("gpib: failed to allocate memory for event\n");
+ dev_err(board->gpib_dev, "failed to allocate memory for event\n");
return -ENOMEM;
}
@@ -1905,7 +1879,7 @@ static int push_gpib_event_nolock(gpib_board_t *board, short event_type)
}
// push event onto back of event queue
-int push_gpib_event(gpib_board_t *board, short event_type)
+int push_gpib_event(struct gpib_board *board, short event_type)
{
unsigned long flags;
int retval;
@@ -1923,7 +1897,7 @@ int push_gpib_event(gpib_board_t *board, short event_type)
}
EXPORT_SYMBOL(push_gpib_event);
-static int pop_gpib_event_nolock(gpib_board_t *board, gpib_event_queue_t *queue, short *event_type)
+static int pop_gpib_event_nolock(struct gpib_board *board, gpib_event_queue_t *queue, short *event_type)
{
struct list_head *head = &queue->event_head;
struct list_head *front = head->next;
@@ -1957,7 +1931,7 @@ static int pop_gpib_event_nolock(gpib_board_t *board, gpib_event_queue_t *queue,
}
// pop event from front of event queue
-int pop_gpib_event(gpib_board_t *board, gpib_event_queue_t *queue, short *event_type)
+int pop_gpib_event(struct gpib_board *board, gpib_event_queue_t *queue, short *event_type)
{
unsigned long flags;
int retval;
@@ -1968,7 +1942,7 @@ int pop_gpib_event(gpib_board_t *board, gpib_event_queue_t *queue, short *event_
return retval;
}
-static int event_ioctl(gpib_board_t *board, unsigned long arg)
+static int event_ioctl(struct gpib_board *board, unsigned long arg)
{
event_ioctl_t user_event;
int retval;
@@ -1987,7 +1961,7 @@ static int event_ioctl(gpib_board_t *board, unsigned long arg)
return 0;
}
-static int request_system_control_ioctl(gpib_board_t *board, unsigned long arg)
+static int request_system_control_ioctl(struct gpib_board *board, unsigned long arg)
{
rsc_ioctl_t request_control;
int retval;
@@ -2001,16 +1975,14 @@ static int request_system_control_ioctl(gpib_board_t *board, unsigned long arg)
return 0;
}
-static int t1_delay_ioctl(gpib_board_t *board, unsigned long arg)
+static int t1_delay_ioctl(struct gpib_board *board, unsigned long arg)
{
t1_delay_ioctl_t cmd;
unsigned int delay;
int retval;
- if (!board->interface->t1_delay) {
- pr_warn("gpib: t1 delay not implemented in driver!\n");
- return -EIO;
- }
+ if (!board->interface->t1_delay)
+ return -ENOENT;
retval = copy_from_user(&cmd, (void __user *)arg, sizeof(cmd));
if (retval)
@@ -2018,8 +1990,11 @@ static int t1_delay_ioctl(gpib_board_t *board, unsigned long arg)
delay = cmd;
- board->t1_nano_sec = board->interface->t1_delay(board, delay);
+ retval = board->interface->t1_delay(board, delay);
+ if (retval < 0)
+ return retval;
+ board->t1_nano_sec = retval;
return 0;
}
@@ -2032,7 +2007,7 @@ static const struct file_operations ib_fops = {
.release = &ibclose,
};
-gpib_board_t board_array[GPIB_MAX_NUM_BOARDS];
+struct gpib_board board_array[GPIB_MAX_NUM_BOARDS];
LIST_HEAD(registered_drivers);
@@ -2067,7 +2042,7 @@ void gpib_unregister_driver(gpib_interface_t *interface)
struct list_head *list_ptr;
for (i = 0; i < GPIB_MAX_NUM_BOARDS; i++) {
- gpib_board_t *board = &board_array[i];
+ struct gpib_board *board = &board_array[i];
if (board->interface == interface) {
if (board->use_count > 0)
@@ -2087,7 +2062,6 @@ void gpib_unregister_driver(gpib_interface_t *interface)
kfree(entry);
}
}
- pr_info("gpib: unregistered %s interface\n", interface->name);
}
EXPORT_SYMBOL(gpib_unregister_driver);
@@ -2098,7 +2072,7 @@ static void init_gpib_board_config(gpib_board_config_t *config)
config->pci_slot = -1;
}
-void init_gpib_board(gpib_board_t *board)
+void init_gpib_board(struct gpib_board *board)
{
board->interface = NULL;
board->provider_module = NULL;
@@ -2133,7 +2107,7 @@ void init_gpib_board(gpib_board_t *board)
board->local_ppoll_mode = 0;
}
-int gpib_allocate_board(gpib_board_t *board)
+int gpib_allocate_board(struct gpib_board *board)
{
if (!board->buffer) {
board->buffer_length = 0x4000;
@@ -2146,7 +2120,7 @@ int gpib_allocate_board(gpib_board_t *board)
return 0;
}
-void gpib_deallocate_board(gpib_board_t *board)
+void gpib_deallocate_board(struct gpib_board *board)
{
short dummy;
@@ -2159,7 +2133,7 @@ void gpib_deallocate_board(gpib_board_t *board)
pop_gpib_event(board, &board->event_queue, &dummy);
}
-static void init_board_array(gpib_board_t *board_array, unsigned int length)
+static void init_board_array(struct gpib_board *board_array, unsigned int length)
{
int i;
@@ -2184,7 +2158,7 @@ static int __init gpib_common_init_module(void)
{
int i;
- pr_info("Linux-GPIB core driver\n");
+ pr_info("GPIB core driver\n");
init_board_array(board_array, GPIB_MAX_NUM_BOARDS);
if (register_chrdev(GPIB_CODE, "gpib", &ib_fops)) {
pr_err("gpib: can't get major %d\n", GPIB_CODE);
diff --git a/drivers/staging/gpib/common/iblib.c b/drivers/staging/gpib/common/iblib.c
index 5f6fa135f505..6cca8a49e839 100644
--- a/drivers/staging/gpib/common/iblib.c
+++ b/drivers/staging/gpib/common/iblib.c
@@ -4,6 +4,8 @@
* copyright : (C) 2001, 2002 by Frank Mori Hess
***************************************************************************/
+#define dev_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include "ibsys.h"
#include <linux/delay.h>
#include <linux/kthread.h>
@@ -19,15 +21,13 @@
* If fallback_to_async is non-zero, try to take control asynchronously
* if synchronous attempt fails.
*/
-int ibcac(gpib_board_t *board, int sync, int fallback_to_async)
+int ibcac(struct gpib_board *board, int sync, int fallback_to_async)
{
int status = ibstatus(board);
int retval;
- if ((status & CIC) == 0) {
- pr_err("gpib: not CIC during %s()\n", __func__);
- return -1;
- }
+ if ((status & CIC) == 0)
+ return -EINVAL;
if (status & ATN)
return 0;
@@ -61,7 +61,7 @@ int ibcac(gpib_board_t *board, int sync, int fallback_to_async)
* set the skip_check_for_command_acceptors flag in their
* gpib_interface_struct to avoid useless overhead.
*/
-static int check_for_command_acceptors(gpib_board_t *board)
+static int check_for_command_acceptors(struct gpib_board *board)
{
int lines;
@@ -76,15 +76,8 @@ static int check_for_command_acceptors(gpib_board_t *board)
if (lines < 0)
return lines;
- if (lines & ValidATN) {
- if ((lines & BusATN) == 0) {
- pr_err("gpib: ATN not asserted in %s()?", __func__);
- return 0;
- }
- }
-
- if ((lines & ValidNRFD) && (lines & ValidNDAC)) {
- if ((lines & BusNRFD) == 0 && (lines & BusNDAC) == 0)
+ if ((lines & VALID_NRFD) && (lines & VALID_NDAC)) {
+ if ((lines & BUS_NRFD) == 0 && (lines & BUS_NDAC) == 0)
return -ENOTCONN;
}
@@ -103,7 +96,7 @@ static int check_for_command_acceptors(gpib_board_t *board)
* must be called to initialize the GPIB and enable
* the interface to leave the controller idle state.
*/
-int ibcmd(gpib_board_t *board, uint8_t *buf, size_t length, size_t *bytes_written)
+int ibcmd(struct gpib_board *board, uint8_t *buf, size_t length, size_t *bytes_written)
{
ssize_t ret = 0;
int status;
@@ -112,10 +105,8 @@ int ibcmd(gpib_board_t *board, uint8_t *buf, size_t length, size_t *bytes_writte
status = ibstatus(board);
- if ((status & CIC) == 0) {
- pr_err("gpib: cannot send command when not controller-in-charge\n");
- return -EIO;
- }
+ if ((status & CIC) == 0)
+ return -EINVAL;
os_start_timer(board, board->usec_timeout);
@@ -140,26 +131,22 @@ int ibcmd(gpib_board_t *board, uint8_t *buf, size_t length, size_t *bytes_writte
* active state, i.e., turn ATN off.
*/
-int ibgts(gpib_board_t *board)
+int ibgts(struct gpib_board *board)
{
int status = ibstatus(board);
int retval;
- if ((status & CIC) == 0) {
- pr_err("gpib: not CIC during %s()\n", __func__);
- return -1;
- }
+ if ((status & CIC) == 0)
+ return -EINVAL;
retval = board->interface->go_to_standby(board); /* go to standby */
- if (retval < 0)
- pr_err("gpib: error while going to standby\n");
board->interface->update_status(board, 0);
return retval;
}
-static int autospoll_wait_should_wake_up(gpib_board_t *board)
+static int autospoll_wait_should_wake_up(struct gpib_board *board)
{
int retval;
@@ -175,7 +162,7 @@ static int autospoll_wait_should_wake_up(gpib_board_t *board)
static int autospoll_thread(void *board_void)
{
- gpib_board_t *board = board_void;
+ struct gpib_board *board = board_void;
int retval = 0;
dev_dbg(board->gpib_dev, "entering autospoll thread\n");
@@ -200,20 +187,19 @@ static int autospoll_thread(void *board_void)
retval = autopoll_all_devices(board);
module_put(board->provider_module);
} else {
- pr_err("gpib%i: %s: try_module_get() failed!\n", board->minor, __func__);
+ dev_err(board->gpib_dev, "try_module_get() failed!\n");
}
if (retval <= 0) {
- pr_err("gpib%i: %s: stuck SRQ\n", board->minor, __func__);
+ dev_err(board->gpib_dev, "stuck SRQ\n");
atomic_set(&board->stuck_srq, 1); // XXX could be better
set_bit(SRQI_NUM, &board->status);
}
}
- pr_info("gpib%i: exiting autospoll thread\n", board->minor);
return retval;
}
-int ibonline(gpib_board_t *board)
+int ibonline(struct gpib_board *board)
{
int retval;
@@ -230,7 +216,6 @@ int ibonline(gpib_board_t *board)
retval = board->interface->attach(board, &board->config);
if (retval < 0) {
board->interface->detach(board);
- pr_err("gpib: interface attach failed\n");
return retval;
}
/* nios2nommu on 2.6.11 uclinux kernel has weird problems
@@ -241,19 +226,19 @@ int ibonline(gpib_board_t *board)
"gpib%d_autospoll_kthread", board->minor);
retval = IS_ERR(board->autospoll_task);
if (retval) {
- pr_err("gpib: failed to create autospoll thread\n");
+ dev_err(board->gpib_dev, "failed to create autospoll thread\n");
board->interface->detach(board);
return retval;
}
#endif
board->online = 1;
- dev_dbg(board->gpib_dev, "gpib: board online\n");
+ dev_dbg(board->gpib_dev, "board online\n");
return 0;
}
/* XXX need to make sure board is generally not in use (grab board lock?) */
-int iboffline(gpib_board_t *board)
+int iboffline(struct gpib_board *board)
{
int retval;
@@ -265,14 +250,14 @@ int iboffline(gpib_board_t *board)
if (board->autospoll_task && !IS_ERR(board->autospoll_task)) {
retval = kthread_stop(board->autospoll_task);
if (retval)
- pr_err("gpib: kthread_stop returned %i\n", retval);
+ dev_err(board->gpib_dev, "kthread_stop returned %i\n", retval);
board->autospoll_task = NULL;
}
board->interface->detach(board);
gpib_deallocate_board(board);
board->online = 0;
- dev_dbg(board->gpib_dev, "gpib: board offline\n");
+ dev_dbg(board->gpib_dev, "board offline\n");
return 0;
}
@@ -285,7 +270,7 @@ int iboffline(gpib_board_t *board)
* Next LSB (bits 8-15) - STATUS lines mask (lines that are currently set).
*
*/
-int iblines(const gpib_board_t *board, short *lines)
+int iblines(const struct gpib_board *board, short *lines)
{
int retval;
@@ -312,7 +297,7 @@ int iblines(const gpib_board_t *board, short *lines)
* calling ibcmd.
*/
-int ibrd(gpib_board_t *board, uint8_t *buf, size_t length, int *end_flag, size_t *nbytes)
+int ibrd(struct gpib_board *board, uint8_t *buf, size_t length, int *end_flag, size_t *nbytes)
{
ssize_t ret = 0;
int retval;
@@ -320,10 +305,8 @@ int ibrd(gpib_board_t *board, uint8_t *buf, size_t length, int *end_flag, size_t
*nbytes = 0;
*end_flag = 0;
- if (length == 0) {
- pr_warn("gpib: %s() called with zero length?\n", __func__);
+ if (length == 0)
return 0;
- }
if (board->master) {
retval = ibgts(board);
@@ -338,10 +321,9 @@ int ibrd(gpib_board_t *board, uint8_t *buf, size_t length, int *end_flag, size_t
do {
ret = board->interface->read(board, buf, length - *nbytes, end_flag, &bytes_read);
- if (ret < 0) {
- pr_err("gpib read error\n");
+ if (ret < 0)
goto ibrd_out;
- }
+
buf += bytes_read;
*nbytes += bytes_read;
if (need_resched())
@@ -361,7 +343,7 @@ ibrd_out:
* 1. Prior to conducting the poll the interface is placed
* in the controller active state.
*/
-int ibrpp(gpib_board_t *board, uint8_t *result)
+int ibrpp(struct gpib_board *board, uint8_t *result)
{
int retval = 0;
@@ -370,15 +352,13 @@ int ibrpp(gpib_board_t *board, uint8_t *result)
if (retval)
return -1;
- if (board->interface->parallel_poll(board, result)) {
- pr_err("gpib: parallel poll failed\n");
- retval = -1;
- }
+ retval = board->interface->parallel_poll(board, result);
+
os_remove_timer(board);
return retval;
}
-int ibppc(gpib_board_t *board, uint8_t configuration)
+int ibppc(struct gpib_board *board, uint8_t configuration)
{
configuration &= 0x1f;
board->interface->parallel_poll_configure(board, configuration);
@@ -387,15 +367,13 @@ int ibppc(gpib_board_t *board, uint8_t configuration)
return 0;
}
-int ibrsv2(gpib_board_t *board, uint8_t status_byte, int new_reason_for_service)
+int ibrsv2(struct gpib_board *board, uint8_t status_byte, int new_reason_for_service)
{
int board_status = ibstatus(board);
const unsigned int MSS = status_byte & request_service_bit;
- if ((board_status & CIC)) {
- pr_err("gpib: interface requested service while CIC\n");
+ if ((board_status & CIC))
return -EINVAL;
- }
if (MSS == 0 && new_reason_for_service)
return -EINVAL;
@@ -422,21 +400,17 @@ int ibrsv2(gpib_board_t *board, uint8_t status_byte, int new_reason_for_service)
* ibcmd in order to initialize the bus and enable the
* interface to leave the controller idle state.
*/
-int ibsic(gpib_board_t *board, unsigned int usec_duration)
+int ibsic(struct gpib_board *board, unsigned int usec_duration)
{
- if (board->master == 0) {
- pr_err("gpib: tried to assert IFC when not system controller\n");
- return -1;
- }
+ if (board->master == 0)
+ return -EINVAL;
if (usec_duration < 100)
usec_duration = 100;
- if (usec_duration > 1000) {
+ if (usec_duration > 1000)
usec_duration = 1000;
- pr_warn("gpib: warning, shortening long udelay\n");
- }
- dev_dbg(board->gpib_dev, "sending interface clear\n");
+ dev_dbg(board->gpib_dev, "sending interface clear, delay = %ius\n", usec_duration);
board->interface->interface_clear(board, 1);
udelay(usec_duration);
board->interface->interface_clear(board, 0);
@@ -444,26 +418,22 @@ int ibsic(gpib_board_t *board, unsigned int usec_duration)
return 0;
}
-void ibrsc(gpib_board_t *board, int request_control)
+ /* FIXME make int */
+void ibrsc(struct gpib_board *board, int request_control)
{
board->master = request_control != 0;
- if (!board->interface->request_system_control) {
- pr_err("gpib: bug! driver does not implement request_system_control()\n");
- return;
- }
- board->interface->request_system_control(board, request_control);
+ if (board->interface->request_system_control)
+ board->interface->request_system_control(board, request_control);
}
/*
* IBSRE
* Send REN true if v is non-zero or false if v is zero.
*/
-int ibsre(gpib_board_t *board, int enable)
+int ibsre(struct gpib_board *board, int enable)
{
- if (board->master == 0) {
- pr_err("gpib: tried to set REN when not system controller\n");
- return -1;
- }
+ if (board->master == 0)
+ return -EINVAL;
board->interface->remote_enable(board, enable); /* set or clear REN */
if (!enable)
@@ -477,12 +447,11 @@ int ibsre(gpib_board_t *board, int enable)
* change the GPIB address of the interface board. The address
* must be 0 through 30. ibonl resets the address to PAD.
*/
-int ibpad(gpib_board_t *board, unsigned int addr)
+int ibpad(struct gpib_board *board, unsigned int addr)
{
- if (addr > MAX_GPIB_PRIMARY_ADDRESS) {
- pr_err("gpib: invalid primary address %u\n", addr);
- return -1;
- }
+ if (addr > MAX_GPIB_PRIMARY_ADDRESS)
+ return -EINVAL;
+
board->pad = addr;
if (board->online)
board->interface->primary_address(board, board->pad);
@@ -496,12 +465,10 @@ int ibpad(gpib_board_t *board, unsigned int addr)
* The address must be 0 through 30, or negative disables. ibonl resets the
* address to SAD.
*/
-int ibsad(gpib_board_t *board, int addr)
+int ibsad(struct gpib_board *board, int addr)
{
- if (addr > MAX_GPIB_SECONDARY_ADDRESS) {
- pr_err("gpib: invalid secondary address %i\n", addr);
- return -1;
- }
+ if (addr > MAX_GPIB_SECONDARY_ADDRESS)
+ return -EINVAL;
board->sad = addr;
if (board->online) {
if (board->sad >= 0)
@@ -519,14 +486,12 @@ int ibsad(gpib_board_t *board, int addr)
* Set the end-of-string modes for I/O operations to v.
*
*/
-int ibeos(gpib_board_t *board, int eos, int eosflags)
+int ibeos(struct gpib_board *board, int eos, int eosflags)
{
int retval;
- if (eosflags & ~EOS_MASK) {
- pr_err("bad EOS modes\n");
+ if (eosflags & ~EOS_MASK)
return -EINVAL;
- }
if (eosflags & REOS) {
retval = board->interface->enable_eos(board, eos, eosflags & BIN);
} else {
@@ -536,12 +501,12 @@ int ibeos(gpib_board_t *board, int eos, int eosflags)
return retval;
}
-int ibstatus(gpib_board_t *board)
+int ibstatus(struct gpib_board *board)
{
return general_ibstatus(board, NULL, 0, 0, NULL);
}
-int general_ibstatus(gpib_board_t *board, const gpib_status_queue_t *device,
+int general_ibstatus(struct gpib_board *board, const gpib_status_queue_t *device,
int clear_mask, int set_mask, gpib_descriptor_t *desc)
{
int status = 0;
@@ -555,8 +520,8 @@ int general_ibstatus(gpib_board_t *board, const gpib_status_queue_t *device,
status &= ~TIMO;
/* get real SRQI status if we can */
if (iblines(board, &line_status) == 0) {
- if ((line_status & ValidSRQ)) {
- if ((line_status & BusSRQ))
+ if ((line_status & VALID_SRQ)) {
+ if ((line_status & BUS_SRQ))
status |= SRQI;
else
status &= ~SRQI;
@@ -587,7 +552,7 @@ int general_ibstatus(gpib_board_t *board, const gpib_status_queue_t *device,
}
struct wait_info {
- gpib_board_t *board;
+ struct gpib_board *board;
struct timer_list timer;
int timed_out;
unsigned long usec_timeout;
@@ -611,7 +576,7 @@ static void init_wait_info(struct wait_info *winfo)
static int wait_satisfied(struct wait_info *winfo, gpib_status_queue_t *status_queue,
int wait_mask, int *status, gpib_descriptor_t *desc)
{
- gpib_board_t *board = winfo->board;
+ struct gpib_board *board = winfo->board;
int temp_status;
if (mutex_lock_interruptible(&board->big_gpib_mutex))
@@ -657,7 +622,7 @@ static void remove_wait_timer(struct wait_info *winfo)
* If the mask is 0 then
* no condition is waited for.
*/
-int ibwait(gpib_board_t *board, int wait_mask, int clear_mask, int set_mask,
+int ibwait(struct gpib_board *board, int wait_mask, int clear_mask, int set_mask,
int *status, unsigned long usec_timeout, gpib_descriptor_t *desc)
{
int retval = 0;
@@ -712,15 +677,13 @@ int ibwait(gpib_board_t *board, int wait_mask, int clear_mask, int set_mask,
* well as the interface board itself must be
* addressed by calling ibcmd.
*/
-int ibwrt(gpib_board_t *board, uint8_t *buf, size_t cnt, int send_eoi, size_t *bytes_written)
+int ibwrt(struct gpib_board *board, uint8_t *buf, size_t cnt, int send_eoi, size_t *bytes_written)
{
int ret = 0;
int retval;
- if (cnt == 0) {
- pr_warn("gpib: %s() called with zero length?\n", __func__);
+ if (cnt == 0)
return 0;
- }
if (board->master) {
retval = ibgts(board);
diff --git a/drivers/staging/gpib/common/ibsys.h b/drivers/staging/gpib/common/ibsys.h
index da20971e9c7e..19960af809c2 100644
--- a/drivers/staging/gpib/common/ibsys.h
+++ b/drivers/staging/gpib/common/ibsys.h
@@ -19,13 +19,13 @@
#define MAX_GPIB_PRIMARY_ADDRESS 30
#define MAX_GPIB_SECONDARY_ADDRESS 31
-int gpib_allocate_board(gpib_board_t *board);
-void gpib_deallocate_board(gpib_board_t *board);
+int gpib_allocate_board(struct gpib_board *board);
+void gpib_deallocate_board(struct gpib_board *board);
unsigned int num_status_bytes(const gpib_status_queue_t *dev);
-int push_status_byte(gpib_board_t *board, gpib_status_queue_t *device, uint8_t poll_byte);
-int pop_status_byte(gpib_board_t *board, gpib_status_queue_t *device, uint8_t *poll_byte);
-gpib_status_queue_t *get_gpib_status_queue(gpib_board_t *board, unsigned int pad, int sad);
-int get_serial_poll_byte(gpib_board_t *board, unsigned int pad, int sad,
+int push_status_byte(struct gpib_board *board, gpib_status_queue_t *device, uint8_t poll_byte);
+int pop_status_byte(struct gpib_board *board, gpib_status_queue_t *device, uint8_t *poll_byte);
+gpib_status_queue_t *get_gpib_status_queue(struct gpib_board *board, unsigned int pad, int sad);
+int get_serial_poll_byte(struct gpib_board *board, unsigned int pad, int sad,
unsigned int usec_timeout, uint8_t *poll_byte);
-int autopoll_all_devices(gpib_board_t *board);
+int autopoll_all_devices(struct gpib_board *board);
diff --git a/drivers/staging/gpib/eastwood/fluke_gpib.c b/drivers/staging/gpib/eastwood/fluke_gpib.c
index 0304c5de4ccd..a6b1ac169f94 100644
--- a/drivers/staging/gpib/eastwood/fluke_gpib.c
+++ b/drivers/staging/gpib/eastwood/fluke_gpib.c
@@ -7,6 +7,10 @@
* copyright: (C) 2006, 2010, 2015 Fluke Corporation
***************************************************************************/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#define dev_fmt pr_fmt
+#define DRV_NAME KBUILD_MODNAME
+
#include "fluke_gpib.h"
#include "gpibP.h"
@@ -20,11 +24,11 @@
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("GPIB Driver for Fluke cda devices");
-static int fluke_attach_holdoff_all(gpib_board_t *board, const gpib_board_config_t *config);
-static int fluke_attach_holdoff_end(gpib_board_t *board, const gpib_board_config_t *config);
-static void fluke_detach(gpib_board_t *board);
-static int fluke_config_dma(gpib_board_t *board, int output);
-static irqreturn_t fluke_gpib_internal_interrupt(gpib_board_t *board);
+static int fluke_attach_holdoff_all(struct gpib_board *board, const gpib_board_config_t *config);
+static int fluke_attach_holdoff_end(struct gpib_board *board, const gpib_board_config_t *config);
+static void fluke_detach(struct gpib_board *board);
+static int fluke_config_dma(struct gpib_board *board, int output);
+static irqreturn_t fluke_gpib_internal_interrupt(struct gpib_board *board);
static struct platform_device *fluke_gpib_pdev;
@@ -50,7 +54,7 @@ static void fluke_locking_write_byte(struct nec7210_priv *nec_priv, uint8_t byte
}
// wrappers for interface functions
-static int fluke_read(gpib_board_t *board, uint8_t *buffer, size_t length, int *end,
+static int fluke_read(struct gpib_board *board, uint8_t *buffer, size_t length, int *end,
size_t *bytes_read)
{
struct fluke_priv *priv = board->private_data;
@@ -58,7 +62,7 @@ static int fluke_read(gpib_board_t *board, uint8_t *buffer, size_t length, int *
return nec7210_read(board, &priv->nec7210_priv, buffer, length, end, bytes_read);
}
-static int fluke_write(gpib_board_t *board, uint8_t *buffer, size_t length,
+static int fluke_write(struct gpib_board *board, uint8_t *buffer, size_t length,
int send_eoi, size_t *bytes_written)
{
struct fluke_priv *priv = board->private_data;
@@ -66,28 +70,29 @@ static int fluke_write(gpib_board_t *board, uint8_t *buffer, size_t length,
return nec7210_write(board, &priv->nec7210_priv, buffer, length, send_eoi, bytes_written);
}
-static int fluke_command(gpib_board_t *board, uint8_t *buffer, size_t length, size_t *bytes_written)
+static int fluke_command(struct gpib_board *board, uint8_t *buffer,
+ size_t length, size_t *bytes_written)
{
struct fluke_priv *priv = board->private_data;
return nec7210_command(board, &priv->nec7210_priv, buffer, length, bytes_written);
}
-static int fluke_take_control(gpib_board_t *board, int synchronous)
+static int fluke_take_control(struct gpib_board *board, int synchronous)
{
struct fluke_priv *priv = board->private_data;
return nec7210_take_control(board, &priv->nec7210_priv, synchronous);
}
-static int fluke_go_to_standby(gpib_board_t *board)
+static int fluke_go_to_standby(struct gpib_board *board)
{
struct fluke_priv *priv = board->private_data;
return nec7210_go_to_standby(board, &priv->nec7210_priv);
}
-static void fluke_request_system_control(gpib_board_t *board, int request_control)
+static void fluke_request_system_control(struct gpib_board *board, int request_control)
{
struct fluke_priv *priv = board->private_data;
struct nec7210_priv *nec_priv = &priv->nec7210_priv;
@@ -95,91 +100,91 @@ static void fluke_request_system_control(gpib_board_t *board, int request_contro
nec7210_request_system_control(board, nec_priv, request_control);
}
-static void fluke_interface_clear(gpib_board_t *board, int assert)
+static void fluke_interface_clear(struct gpib_board *board, int assert)
{
struct fluke_priv *priv = board->private_data;
nec7210_interface_clear(board, &priv->nec7210_priv, assert);
}
-static void fluke_remote_enable(gpib_board_t *board, int enable)
+static void fluke_remote_enable(struct gpib_board *board, int enable)
{
struct fluke_priv *priv = board->private_data;
nec7210_remote_enable(board, &priv->nec7210_priv, enable);
}
-static int fluke_enable_eos(gpib_board_t *board, uint8_t eos_byte, int compare_8_bits)
+static int fluke_enable_eos(struct gpib_board *board, uint8_t eos_byte, int compare_8_bits)
{
struct fluke_priv *priv = board->private_data;
return nec7210_enable_eos(board, &priv->nec7210_priv, eos_byte, compare_8_bits);
}
-static void fluke_disable_eos(gpib_board_t *board)
+static void fluke_disable_eos(struct gpib_board *board)
{
struct fluke_priv *priv = board->private_data;
nec7210_disable_eos(board, &priv->nec7210_priv);
}
-static unsigned int fluke_update_status(gpib_board_t *board, unsigned int clear_mask)
+static unsigned int fluke_update_status(struct gpib_board *board, unsigned int clear_mask)
{
struct fluke_priv *priv = board->private_data;
return nec7210_update_status(board, &priv->nec7210_priv, clear_mask);
}
-static int fluke_primary_address(gpib_board_t *board, unsigned int address)
+static int fluke_primary_address(struct gpib_board *board, unsigned int address)
{
struct fluke_priv *priv = board->private_data;
return nec7210_primary_address(board, &priv->nec7210_priv, address);
}
-static int fluke_secondary_address(gpib_board_t *board, unsigned int address, int enable)
+static int fluke_secondary_address(struct gpib_board *board, unsigned int address, int enable)
{
struct fluke_priv *priv = board->private_data;
return nec7210_secondary_address(board, &priv->nec7210_priv, address, enable);
}
-static int fluke_parallel_poll(gpib_board_t *board, uint8_t *result)
+static int fluke_parallel_poll(struct gpib_board *board, uint8_t *result)
{
struct fluke_priv *priv = board->private_data;
return nec7210_parallel_poll(board, &priv->nec7210_priv, result);
}
-static void fluke_parallel_poll_configure(gpib_board_t *board, uint8_t configuration)
+static void fluke_parallel_poll_configure(struct gpib_board *board, uint8_t configuration)
{
struct fluke_priv *priv = board->private_data;
nec7210_parallel_poll_configure(board, &priv->nec7210_priv, configuration);
}
-static void fluke_parallel_poll_response(gpib_board_t *board, int ist)
+static void fluke_parallel_poll_response(struct gpib_board *board, int ist)
{
struct fluke_priv *priv = board->private_data;
nec7210_parallel_poll_response(board, &priv->nec7210_priv, ist);
}
-static void fluke_serial_poll_response(gpib_board_t *board, uint8_t status)
+static void fluke_serial_poll_response(struct gpib_board *board, uint8_t status)
{
struct fluke_priv *priv = board->private_data;
nec7210_serial_poll_response(board, &priv->nec7210_priv, status);
}
-static uint8_t fluke_serial_poll_status(gpib_board_t *board)
+static uint8_t fluke_serial_poll_status(struct gpib_board *board)
{
struct fluke_priv *priv = board->private_data;
return nec7210_serial_poll_status(board, &priv->nec7210_priv);
}
-static void fluke_return_to_local(gpib_board_t *board)
+static void fluke_return_to_local(struct gpib_board *board)
{
struct fluke_priv *priv = board->private_data;
struct nec7210_priv *nec_priv = &priv->nec7210_priv;
@@ -189,39 +194,37 @@ static void fluke_return_to_local(gpib_board_t *board)
write_byte(nec_priv, AUX_RTL, AUXMR);
}
-static int fluke_line_status(const gpib_board_t *board)
+static int fluke_line_status(const struct gpib_board *board)
{
- int status = ValidALL;
+ int status = VALID_ALL;
int bsr_bits;
struct fluke_priv *e_priv;
- struct nec7210_priv *nec_priv;
e_priv = board->private_data;
- nec_priv = &e_priv->nec7210_priv;
bsr_bits = fluke_paged_read_byte(e_priv, BUS_STATUS, BUS_STATUS_PAGE);
if ((bsr_bits & BSR_REN_BIT) == 0)
- status |= BusREN;
+ status |= BUS_REN;
if ((bsr_bits & BSR_IFC_BIT) == 0)
- status |= BusIFC;
+ status |= BUS_IFC;
if ((bsr_bits & BSR_SRQ_BIT) == 0)
- status |= BusSRQ;
+ status |= BUS_SRQ;
if ((bsr_bits & BSR_EOI_BIT) == 0)
- status |= BusEOI;
+ status |= BUS_EOI;
if ((bsr_bits & BSR_NRFD_BIT) == 0)
- status |= BusNRFD;
+ status |= BUS_NRFD;
if ((bsr_bits & BSR_NDAC_BIT) == 0)
- status |= BusNDAC;
+ status |= BUS_NDAC;
if ((bsr_bits & BSR_DAV_BIT) == 0)
- status |= BusDAV;
+ status |= BUS_DAV;
if ((bsr_bits & BSR_ATN_BIT) == 0)
- status |= BusATN;
+ status |= BUS_ATN;
return status;
}
-static unsigned int fluke_t1_delay(gpib_board_t *board, unsigned int nano_sec)
+static int fluke_t1_delay(struct gpib_board *board, unsigned int nano_sec)
{
struct fluke_priv *e_priv = board->private_data;
struct nec7210_priv *nec_priv = &e_priv->nec7210_priv;
@@ -238,7 +241,7 @@ static unsigned int fluke_t1_delay(gpib_board_t *board, unsigned int nano_sec)
return retval;
}
-static int lacs_or_read_ready(gpib_board_t *board)
+static int lacs_or_read_ready(struct gpib_board *board)
{
const struct fluke_priv *e_priv = board->private_data;
const struct nec7210_priv *nec_priv = &e_priv->nec7210_priv;
@@ -254,7 +257,7 @@ static int lacs_or_read_ready(gpib_board_t *board)
/* Wait until it is possible for a read to do something useful. This
* is not essential, it only exists to prevent RFD holdoff from being released pointlessly.
*/
-static int wait_for_read(gpib_board_t *board)
+static int wait_for_read(struct gpib_board *board)
{
struct fluke_priv *e_priv = board->private_data;
struct nec7210_priv *nec_priv = &e_priv->nec7210_priv;
@@ -263,9 +266,9 @@ static int wait_for_read(gpib_board_t *board)
if (wait_event_interruptible(board->wait,
lacs_or_read_ready(board) ||
test_bit(DEV_CLEAR_BN, &nec_priv->state) ||
- test_bit(TIMO_NUM, &board->status))) {
+ test_bit(TIMO_NUM, &board->status)))
retval = -ERESTARTSYS;
- }
+
if (test_bit(TIMO_NUM, &board->status))
retval = -ETIMEDOUT;
if (test_and_clear_bit(DEV_CLEAR_BN, &nec_priv->state))
@@ -311,34 +314,30 @@ static int source_handshake_is_sids_or_sgns(struct fluke_priv *e_priv)
* If the chip is SGNS it is probably waiting for a a byte to
* be written to it.
*/
-static int wait_for_data_out_ready(gpib_board_t *board)
+static int wait_for_data_out_ready(struct gpib_board *board)
{
struct fluke_priv *e_priv = board->private_data;
struct nec7210_priv *nec_priv = &e_priv->nec7210_priv;
int retval = 0;
-// printk("%s: enter\n", __FUNCTION__);
if (wait_event_interruptible(board->wait,
(test_bit(TACS_NUM, &board->status) &&
source_handshake_is_sgns(e_priv)) ||
test_bit(DEV_CLEAR_BN, &nec_priv->state) ||
- test_bit(TIMO_NUM, &board->status))) {
+ test_bit(TIMO_NUM, &board->status)))
retval = -ERESTARTSYS;
- }
if (test_bit(TIMO_NUM, &board->status))
retval = -ETIMEDOUT;
if (test_and_clear_bit(DEV_CLEAR_BN, &nec_priv->state))
retval = -EINTR;
-// printk("%s: exit, retval=%i\n", __FUNCTION__, retval);
return retval;
}
-static int wait_for_sids_or_sgns(gpib_board_t *board)
+static int wait_for_sids_or_sgns(struct gpib_board *board)
{
struct fluke_priv *e_priv = board->private_data;
struct nec7210_priv *nec_priv = &e_priv->nec7210_priv;
int retval = 0;
-// printk("%s: enter\n", __FUNCTION__);
if (wait_event_interruptible(board->wait,
source_handshake_is_sids_or_sgns(e_priv) ||
@@ -350,19 +349,17 @@ static int wait_for_sids_or_sgns(gpib_board_t *board)
retval = -ETIMEDOUT;
if (test_and_clear_bit(DEV_CLEAR_BN, &nec_priv->state))
retval = -EINTR;
-// printk("%s: exit, retval=%i\n", __FUNCTION__, retval);
return retval;
}
static void fluke_dma_callback(void *arg)
{
- gpib_board_t *board = arg;
+ struct gpib_board *board = arg;
struct fluke_priv *e_priv = board->private_data;
struct nec7210_priv *nec_priv = &e_priv->nec7210_priv;
unsigned long flags;
spin_lock_irqsave(&board->spinlock, flags);
-// printk("%s: enter\n", __FUNCTION__);
nec7210_set_reg_bits(nec_priv, IMR1, HR_DOIE | HR_DIIE, HR_DOIE | HR_DIIE);
wake_up_interruptible(&board->wait);
@@ -370,11 +367,11 @@ static void fluke_dma_callback(void *arg)
fluke_gpib_internal_interrupt(board);
clear_bit(DMA_WRITE_IN_PROGRESS_BN, &nec_priv->state);
clear_bit(DMA_READ_IN_PROGRESS_BN, &nec_priv->state);
-// printk("%s: exit\n", __FUNCTION__);
+
spin_unlock_irqrestore(&board->spinlock, flags);
}
-static int fluke_dma_write(gpib_board_t *board, uint8_t *buffer, size_t length,
+static int fluke_dma_write(struct gpib_board *board, uint8_t *buffer, size_t length,
size_t *bytes_written)
{
struct fluke_priv *e_priv = board->private_data;
@@ -385,7 +382,7 @@ static int fluke_dma_write(gpib_board_t *board, uint8_t *buffer, size_t length,
struct dma_async_tx_descriptor *tx_desc;
*bytes_written = 0;
-// printk("%s: enter\n", __FUNCTION__);
+
if (WARN_ON_ONCE(length > e_priv->dma_buffer_size))
return -EFAULT;
dmaengine_terminate_all(e_priv->dma_channel);
@@ -403,7 +400,7 @@ static int fluke_dma_write(gpib_board_t *board, uint8_t *buffer, size_t length,
tx_desc = dmaengine_prep_slave_single(e_priv->dma_channel, address, length, DMA_MEM_TO_DEV,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!tx_desc) {
- pr_err("fluke_gpib: failed to allocate dma transmit descriptor\n");
+ dev_err(board->gpib_dev, "failed to allocate dma transmit descriptor\n");
retval = -ENOMEM;
goto cleanup;
}
@@ -419,10 +416,8 @@ static int fluke_dma_write(gpib_board_t *board, uint8_t *buffer, size_t length,
clear_bit(WRITE_READY_BN, &nec_priv->state);
set_bit(DMA_WRITE_IN_PROGRESS_BN, &nec_priv->state);
- // printk("%s: in spin lock\n", __FUNCTION__);
spin_unlock_irqrestore(&board->spinlock, flags);
-// printk("%s: waiting for write.\n", __FUNCTION__);
// suspend until message is sent
if (wait_event_interruptible(board->wait,
((readl(e_priv->write_transfer_counter) &
@@ -430,7 +425,6 @@ static int fluke_dma_write(gpib_board_t *board, uint8_t *buffer, size_t length,
test_bit(BUS_ERROR_BN, &nec_priv->state) ||
test_bit(DEV_CLEAR_BN, &nec_priv->state) ||
test_bit(TIMO_NUM, &board->status))) {
- dev_dbg(board->gpib_dev, "gpib write interrupted!\n");
retval = -ERESTARTSYS;
}
if (test_bit(TIMO_NUM, &board->status))
@@ -459,11 +453,10 @@ static int fluke_dma_write(gpib_board_t *board, uint8_t *buffer, size_t length,
cleanup:
dma_unmap_single(board->dev, address, length, DMA_TO_DEVICE);
-// printk("%s: exit, retval=%d\n", __FUNCTION__, retval);
return retval;
}
-static int fluke_accel_write(gpib_board_t *board, uint8_t *buffer, size_t length,
+static int fluke_accel_write(struct gpib_board *board, uint8_t *buffer, size_t length,
int send_eoi, size_t *bytes_written)
{
struct fluke_priv *e_priv = board->private_data;
@@ -474,7 +467,7 @@ static int fluke_accel_write(gpib_board_t *board, uint8_t *buffer, size_t length
size_t dma_remainder = remainder;
if (!e_priv->dma_channel) {
- pr_err("fluke_gpib: No dma channel available, cannot do accel write.");
+ dev_err(board->gpib_dev, "No dma channel available, cannot do accel write.");
return -ENXIO;
}
@@ -486,7 +479,6 @@ static int fluke_accel_write(gpib_board_t *board, uint8_t *buffer, size_t length
if (send_eoi)
--dma_remainder;
-// printk("%s: entering while loop\n", __FUNCTION__);
while (dma_remainder > 0) {
size_t num_bytes;
@@ -512,7 +504,7 @@ static int fluke_accel_write(gpib_board_t *board, uint8_t *buffer, size_t length
//handle sending of last byte with eoi
if (send_eoi) {
size_t num_bytes;
- // printk("%s: handling last byte\n", __FUNCTION__);
+
if (WARN_ON_ONCE(remainder != 1))
return -EFAULT;
@@ -533,7 +525,6 @@ static int fluke_accel_write(gpib_board_t *board, uint8_t *buffer, size_t length
return retval;
remainder -= num_bytes;
}
-// printk("%s: bytes send=%i\n", __FUNCTION__, (int)(length - remainder));
return 0;
}
@@ -544,7 +535,7 @@ static int fluke_get_dma_residue(struct dma_chan *chan, dma_cookie_t cookie)
result = dmaengine_pause(chan);
if (result < 0) {
- pr_err("fluke_gpib: dma pause failed?\n");
+ pr_err("dma pause failed?\n");
return result;
}
dmaengine_tx_status(chan, cookie, &state);
@@ -553,7 +544,7 @@ static int fluke_get_dma_residue(struct dma_chan *chan, dma_cookie_t cookie)
return state.residue;
}
-static int fluke_dma_read(gpib_board_t *board, uint8_t *buffer,
+static int fluke_dma_read(struct gpib_board *board, uint8_t *buffer,
size_t length, int *end, size_t *bytes_read)
{
struct fluke_priv *e_priv = board->private_data;
@@ -567,10 +558,6 @@ static int fluke_dma_read(gpib_board_t *board, uint8_t *buffer,
int i;
static const int timeout = 10;
- // printk("%s: enter, bus_address=0x%x, length=%i\n", __FUNCTION__,
- // (unsigned)bus_address,
- // (int)length);
-
*bytes_read = 0;
*end = 0;
if (length == 0)
@@ -589,7 +576,7 @@ static int fluke_dma_read(gpib_board_t *board, uint8_t *buffer,
bus_address, length, DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!tx_desc) {
- pr_err("fluke_gpib: failed to allocate dma transmit descriptor\n");
+ dev_err(board->gpib_dev, "failed to allocate dma transmit descriptor\n");
dma_unmap_single(NULL, bus_address, length, DMA_FROM_DEVICE);
return -EIO;
}
@@ -608,14 +595,12 @@ static int fluke_dma_read(gpib_board_t *board, uint8_t *buffer,
clear_bit(READ_READY_BN, &nec_priv->state);
spin_unlock_irqrestore(&board->spinlock, flags);
-// printk("waiting for data transfer.\n");
// wait for data to transfer
if (wait_event_interruptible(board->wait,
test_bit(DMA_READ_IN_PROGRESS_BN, &nec_priv->state) == 0 ||
test_bit(RECEIVED_END_BN, &nec_priv->state) ||
test_bit(DEV_CLEAR_BN, &nec_priv->state) ||
test_bit(TIMO_NUM, &board->status))) {
- pr_warn("fluke: dma read wait interrupted\n");
retval = -ERESTARTSYS;
}
if (test_bit(TIMO_NUM, &board->status))
@@ -672,7 +657,7 @@ static int fluke_dma_read(gpib_board_t *board, uint8_t *buffer,
return retval;
}
-static int fluke_accel_read(gpib_board_t *board, uint8_t *buffer, size_t length,
+static int fluke_accel_read(struct gpib_board *board, uint8_t *buffer, size_t length,
int *end, size_t *bytes_read)
{
struct fluke_priv *e_priv = board->private_data;
@@ -682,10 +667,6 @@ static int fluke_accel_read(gpib_board_t *board, uint8_t *buffer, size_t length,
int retval = 0;
size_t dma_nbytes;
-/* printk("%s: enter, buffer=0x%p, length=%i\n", __FUNCTION__,
- * buffer, (int)length);
- * printk("\t dma_buffer=0x%p\n", e_priv->dma_buffer);
- */
*end = 0;
*bytes_read = 0;
@@ -699,7 +680,6 @@ static int fluke_accel_read(gpib_board_t *board, uint8_t *buffer, size_t length,
nec7210_release_rfd_holdoff(board, nec_priv);
-// printk("%s: entering while loop\n", __FUNCTION__);
while (remain > 0) {
transfer_size = (e_priv->dma_buffer_size < remain) ?
e_priv->dma_buffer_size : remain;
@@ -709,14 +689,12 @@ static int fluke_accel_read(gpib_board_t *board, uint8_t *buffer, size_t length,
*bytes_read += dma_nbytes;
if (*end)
break;
- if (retval < 0) {
-// printk("%s: early exit, retval=%i\n", __FUNCTION__, (int)retval);
+ if (retval < 0)
return retval;
- }
if (need_resched())
schedule();
}
-// printk("%s: exit, retval=%i\n", __FUNCTION__, (int)retval);
+
return retval;
}
@@ -809,7 +787,7 @@ static gpib_interface_t fluke_interface = {
.return_to_local = fluke_return_to_local,
};
-irqreturn_t fluke_gpib_internal_interrupt(gpib_board_t *board)
+irqreturn_t fluke_gpib_internal_interrupt(struct gpib_board *board)
{
int status0, status1, status2;
struct fluke_priv *priv = board->private_data;
@@ -830,13 +808,6 @@ irqreturn_t fluke_gpib_internal_interrupt(gpib_board_t *board)
if (nec7210_interrupt_have_status(board, nec_priv, status1, status2) == IRQ_HANDLED)
retval = IRQ_HANDLED;
-/*
- * if((status1 & nec_priv->reg_bits[IMR1]) ||
- * (status2 & (nec_priv->reg_bits[IMR2] & IMR2_ENABLE_INTR_MASK)))
- * {
- * printk("fluke: status1 0x%x, status2 0x%x\n", status1, status2);
- * }
- */
if (read_byte(nec_priv, ADR0) & DATA_IN_STATUS) {
if (test_bit(RFD_HOLDOFF_BN, &nec_priv->state))
@@ -853,7 +824,7 @@ irqreturn_t fluke_gpib_internal_interrupt(gpib_board_t *board)
static irqreturn_t fluke_gpib_interrupt(int irq, void *arg)
{
- gpib_board_t *board = arg;
+ struct gpib_board *board = arg;
unsigned long flags;
irqreturn_t retval;
@@ -863,7 +834,7 @@ static irqreturn_t fluke_gpib_interrupt(int irq, void *arg)
return retval;
}
-static int fluke_allocate_private(gpib_board_t *board)
+static int fluke_allocate_private(struct gpib_board *board)
{
struct fluke_priv *priv;
@@ -880,7 +851,7 @@ static int fluke_allocate_private(gpib_board_t *board)
return 0;
}
-static void fluke_generic_detach(gpib_board_t *board)
+static void fluke_generic_detach(struct gpib_board *board)
{
if (board->private_data) {
struct fluke_priv *e_priv = board->private_data;
@@ -892,7 +863,7 @@ static void fluke_generic_detach(gpib_board_t *board)
}
// generic part of attach functions shared by all cb7210 boards
-static int fluke_generic_attach(gpib_board_t *board)
+static int fluke_generic_attach(struct gpib_board *board)
{
struct fluke_priv *e_priv;
struct nec7210_priv *nec_priv;
@@ -912,7 +883,7 @@ static int fluke_generic_attach(gpib_board_t *board)
return 0;
}
-static int fluke_config_dma(gpib_board_t *board, int output)
+static int fluke_config_dma(struct gpib_board *board, int output)
{
struct fluke_priv *e_priv = board->private_data;
struct dma_slave_config config;
@@ -937,7 +908,7 @@ static int fluke_config_dma(gpib_board_t *board, int output)
return dmaengine_slave_config(e_priv->dma_channel, &config);
}
-static int fluke_init(struct fluke_priv *e_priv, gpib_board_t *board, int handshake_mode)
+static int fluke_init(struct fluke_priv *e_priv, struct gpib_board *board, int handshake_mode)
{
struct nec7210_priv *nec_priv = &e_priv->nec7210_priv;
@@ -954,7 +925,7 @@ static int fluke_init(struct fluke_priv *e_priv, gpib_board_t *board, int handsh
/* poll so we can detect ATN changes */
if (gpib_request_pseudo_irq(board, fluke_gpib_interrupt)) {
- pr_err("fluke_gpib: failed to allocate pseudo_irq\n");
+ dev_err(board->gpib_dev, "failed to allocate pseudo_irq\n");
return -EINVAL;
}
@@ -972,7 +943,7 @@ static bool gpib_dma_channel_filter(struct dma_chan *chan, void *filter_param)
return chan->chan_id == 0;
}
-static int fluke_attach_impl(gpib_board_t *board, const gpib_board_config_t *config,
+static int fluke_attach_impl(struct gpib_board *board, const gpib_board_config_t *config,
unsigned int handshake_mode)
{
struct fluke_priv *e_priv;
@@ -984,7 +955,7 @@ static int fluke_attach_impl(gpib_board_t *board, const gpib_board_config_t *con
dma_cap_mask_t dma_cap;
if (!fluke_gpib_pdev) {
- pr_err("No gpib platform device was found, attach failed.\n");
+ dev_err(board->gpib_dev, "No fluke device was found, attach failed.\n");
return -ENODEV;
}
@@ -999,7 +970,7 @@ static int fluke_attach_impl(gpib_board_t *board, const gpib_board_config_t *con
res = platform_get_resource(fluke_gpib_pdev, IORESOURCE_MEM, 0);
if (!res) {
- dev_err(&fluke_gpib_pdev->dev, "Unable to locate mmio resource for cb7210 gpib\n");
+ dev_err(&fluke_gpib_pdev->dev, "Unable to locate mmio resource\n");
return -ENODEV;
}
@@ -1012,10 +983,7 @@ static int fluke_attach_impl(gpib_board_t *board, const gpib_board_config_t *con
e_priv->gpib_iomem_res = res;
nec_priv->mmiobase = ioremap(e_priv->gpib_iomem_res->start,
- resource_size(e_priv->gpib_iomem_res));
- pr_info("gpib: mmiobase %llx remapped to %p, length=%d\n",
- (u64)e_priv->gpib_iomem_res->start,
- nec_priv->mmiobase, (int)resource_size(e_priv->gpib_iomem_res));
+ resource_size(e_priv->gpib_iomem_res));
if (!nec_priv->mmiobase) {
dev_err(&fluke_gpib_pdev->dev, "Could not map I/O memory\n");
return -ENOMEM;
@@ -1050,19 +1018,14 @@ static int fluke_attach_impl(gpib_board_t *board, const gpib_board_config_t *con
e_priv->write_transfer_counter = ioremap(e_priv->write_transfer_counter_res->start,
resource_size(e_priv->write_transfer_counter_res));
- pr_info("gpib: write transfer counter %lx remapped to %p, length=%d\n",
- (unsigned long)e_priv->write_transfer_counter_res->start,
- e_priv->write_transfer_counter,
- (int)resource_size(e_priv->write_transfer_counter_res));
if (!e_priv->write_transfer_counter) {
dev_err(&fluke_gpib_pdev->dev, "Could not map I/O memory\n");
return -ENOMEM;
}
irq = platform_get_irq(fluke_gpib_pdev, 0);
- pr_info("gpib: irq %d\n", irq);
if (irq < 0) {
- dev_err(&fluke_gpib_pdev->dev, "fluke_gpib: request for IRQ failed\n");
+ dev_err(&fluke_gpib_pdev->dev, "failed to obtain IRQ\n");
return -EBUSY;
}
retval = request_irq(irq, fluke_gpib_interrupt, isr_flags, fluke_gpib_pdev->name, board);
@@ -1078,7 +1041,7 @@ static int fluke_attach_impl(gpib_board_t *board, const gpib_board_config_t *con
dma_cap_set(DMA_SLAVE, dma_cap);
e_priv->dma_channel = dma_request_channel(dma_cap, gpib_dma_channel_filter, NULL);
if (!e_priv->dma_channel) {
- pr_err("fluke_gpib: failed to allocate a dma channel.\n");
+ dev_err(board->gpib_dev, "failed to allocate a dma channel.\n");
// we don't error out here because unaccel interface will still
// work without dma
}
@@ -1086,17 +1049,17 @@ static int fluke_attach_impl(gpib_board_t *board, const gpib_board_config_t *con
return fluke_init(e_priv, board, handshake_mode);
}
-int fluke_attach_holdoff_all(gpib_board_t *board, const gpib_board_config_t *config)
+int fluke_attach_holdoff_all(struct gpib_board *board, const gpib_board_config_t *config)
{
return fluke_attach_impl(board, config, HR_HLDA);
}
-int fluke_attach_holdoff_end(gpib_board_t *board, const gpib_board_config_t *config)
+int fluke_attach_holdoff_end(struct gpib_board *board, const gpib_board_config_t *config)
{
return fluke_attach_impl(board, config, HR_HLDE);
}
-void fluke_detach(gpib_board_t *board)
+void fluke_detach(struct gpib_board *board)
{
struct fluke_priv *e_priv = board->private_data;
struct nec7210_priv *nec_priv;
@@ -1142,8 +1105,7 @@ MODULE_DEVICE_TABLE(of, fluke_gpib_of_match);
static struct platform_driver fluke_gpib_platform_driver = {
.driver = {
- .name = "fluke_gpib",
- .owner = THIS_MODULE,
+ .name = DRV_NAME,
.of_match_table = fluke_gpib_of_match,
},
.probe = &fluke_gpib_probe
@@ -1155,25 +1117,25 @@ static int __init fluke_init_module(void)
result = platform_driver_register(&fluke_gpib_platform_driver);
if (result) {
- pr_err("fluke_gpib: platform_driver_register failed: error = %d\n", result);
+ pr_err("platform_driver_register failed: error = %d\n", result);
return result;
}
result = gpib_register_driver(&fluke_unaccel_interface, THIS_MODULE);
if (result) {
- pr_err("fluke_gpib: gpib_register_driver failed: error = %d\n", result);
+ pr_err("gpib_register_driver failed: error = %d\n", result);
goto err_unaccel;
}
result = gpib_register_driver(&fluke_hybrid_interface, THIS_MODULE);
if (result) {
- pr_err("fluke_gpib: gpib_register_driver failed: error = %d\n", result);
+ pr_err("gpib_register_driver failed: error = %d\n", result);
goto err_hybrid;
}
result = gpib_register_driver(&fluke_interface, THIS_MODULE);
if (result) {
- pr_err("fluke_gpib: gpib_register_driver failed: error = %d\n", result);
+ pr_err("gpib_register_driver failed: error = %d\n", result);
goto err_interface;
}
diff --git a/drivers/staging/gpib/fmh_gpib/fmh_gpib.c b/drivers/staging/gpib/fmh_gpib/fmh_gpib.c
index f950e7cdd8f8..53f4b3fccc3c 100644
--- a/drivers/staging/gpib/fmh_gpib/fmh_gpib.c
+++ b/drivers/staging/gpib/fmh_gpib/fmh_gpib.c
@@ -12,6 +12,10 @@
* (C) 2017 Frank Mori Hess
***************************************************************************/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#define dev_fmt pr_fmt
+#define DRV_NAME KBUILD_MODNAME
+
#include "fmh_gpib.h"
#include "gpibP.h"
@@ -28,19 +32,21 @@ MODULE_DESCRIPTION("GPIB Driver for fmh_gpib_core");
MODULE_AUTHOR("Frank Mori Hess <fmh6jj@gmail.com>");
static irqreturn_t fmh_gpib_interrupt(int irq, void *arg);
-static int fmh_gpib_attach_holdoff_all(gpib_board_t *board, const gpib_board_config_t *config);
-static int fmh_gpib_attach_holdoff_end(gpib_board_t *board, const gpib_board_config_t *config);
-static void fmh_gpib_detach(gpib_board_t *board);
-static int fmh_gpib_pci_attach_holdoff_all(gpib_board_t *board, const gpib_board_config_t *config);
-static int fmh_gpib_pci_attach_holdoff_end(gpib_board_t *board, const gpib_board_config_t *config);
-static void fmh_gpib_pci_detach(gpib_board_t *board);
-static int fmh_gpib_config_dma(gpib_board_t *board, int output);
-static irqreturn_t fmh_gpib_internal_interrupt(gpib_board_t *board);
+static int fmh_gpib_attach_holdoff_all(struct gpib_board *board, const gpib_board_config_t *config);
+static int fmh_gpib_attach_holdoff_end(struct gpib_board *board, const gpib_board_config_t *config);
+static void fmh_gpib_detach(struct gpib_board *board);
+static int fmh_gpib_pci_attach_holdoff_all(struct gpib_board *board,
+ const gpib_board_config_t *config);
+static int fmh_gpib_pci_attach_holdoff_end(struct gpib_board *board,
+ const gpib_board_config_t *config);
+static void fmh_gpib_pci_detach(struct gpib_board *board);
+static int fmh_gpib_config_dma(struct gpib_board *board, int output);
+static irqreturn_t fmh_gpib_internal_interrupt(struct gpib_board *board);
static struct platform_driver fmh_gpib_platform_driver;
static struct pci_driver fmh_gpib_pci_driver;
// wrappers for interface functions
-static int fmh_gpib_read(gpib_board_t *board, uint8_t *buffer, size_t length,
+static int fmh_gpib_read(struct gpib_board *board, uint8_t *buffer, size_t length,
int *end, size_t *bytes_read)
{
struct fmh_priv *priv = board->private_data;
@@ -48,7 +54,7 @@ static int fmh_gpib_read(gpib_board_t *board, uint8_t *buffer, size_t length,
return nec7210_read(board, &priv->nec7210_priv, buffer, length, end, bytes_read);
}
-static int fmh_gpib_write(gpib_board_t *board, uint8_t *buffer, size_t length,
+static int fmh_gpib_write(struct gpib_board *board, uint8_t *buffer, size_t length,
int send_eoi, size_t *bytes_written)
{
struct fmh_priv *priv = board->private_data;
@@ -56,7 +62,7 @@ static int fmh_gpib_write(gpib_board_t *board, uint8_t *buffer, size_t length,
return nec7210_write(board, &priv->nec7210_priv, buffer, length, send_eoi, bytes_written);
}
-static int fmh_gpib_command(gpib_board_t *board, uint8_t *buffer, size_t length,
+static int fmh_gpib_command(struct gpib_board *board, uint8_t *buffer, size_t length,
size_t *bytes_written)
{
struct fmh_priv *priv = board->private_data;
@@ -64,21 +70,21 @@ static int fmh_gpib_command(gpib_board_t *board, uint8_t *buffer, size_t length,
return nec7210_command(board, &priv->nec7210_priv, buffer, length, bytes_written);
}
-static int fmh_gpib_take_control(gpib_board_t *board, int synchronous)
+static int fmh_gpib_take_control(struct gpib_board *board, int synchronous)
{
struct fmh_priv *priv = board->private_data;
return nec7210_take_control(board, &priv->nec7210_priv, synchronous);
}
-static int fmh_gpib_go_to_standby(gpib_board_t *board)
+static int fmh_gpib_go_to_standby(struct gpib_board *board)
{
struct fmh_priv *priv = board->private_data;
return nec7210_go_to_standby(board, &priv->nec7210_priv);
}
-static void fmh_gpib_request_system_control(gpib_board_t *board, int request_control)
+static void fmh_gpib_request_system_control(struct gpib_board *board, int request_control)
{
struct fmh_priv *priv = board->private_data;
struct nec7210_priv *nec_priv = &priv->nec7210_priv;
@@ -86,77 +92,77 @@ static void fmh_gpib_request_system_control(gpib_board_t *board, int request_con
nec7210_request_system_control(board, nec_priv, request_control);
}
-static void fmh_gpib_interface_clear(gpib_board_t *board, int assert)
+static void fmh_gpib_interface_clear(struct gpib_board *board, int assert)
{
struct fmh_priv *priv = board->private_data;
nec7210_interface_clear(board, &priv->nec7210_priv, assert);
}
-static void fmh_gpib_remote_enable(gpib_board_t *board, int enable)
+static void fmh_gpib_remote_enable(struct gpib_board *board, int enable)
{
struct fmh_priv *priv = board->private_data;
nec7210_remote_enable(board, &priv->nec7210_priv, enable);
}
-static int fmh_gpib_enable_eos(gpib_board_t *board, uint8_t eos_byte, int compare_8_bits)
+static int fmh_gpib_enable_eos(struct gpib_board *board, uint8_t eos_byte, int compare_8_bits)
{
struct fmh_priv *priv = board->private_data;
return nec7210_enable_eos(board, &priv->nec7210_priv, eos_byte, compare_8_bits);
}
-static void fmh_gpib_disable_eos(gpib_board_t *board)
+static void fmh_gpib_disable_eos(struct gpib_board *board)
{
struct fmh_priv *priv = board->private_data;
nec7210_disable_eos(board, &priv->nec7210_priv);
}
-static unsigned int fmh_gpib_update_status(gpib_board_t *board, unsigned int clear_mask)
+static unsigned int fmh_gpib_update_status(struct gpib_board *board, unsigned int clear_mask)
{
struct fmh_priv *priv = board->private_data;
return nec7210_update_status(board, &priv->nec7210_priv, clear_mask);
}
-static int fmh_gpib_primary_address(gpib_board_t *board, unsigned int address)
+static int fmh_gpib_primary_address(struct gpib_board *board, unsigned int address)
{
struct fmh_priv *priv = board->private_data;
return nec7210_primary_address(board, &priv->nec7210_priv, address);
}
-static int fmh_gpib_secondary_address(gpib_board_t *board, unsigned int address, int enable)
+static int fmh_gpib_secondary_address(struct gpib_board *board, unsigned int address, int enable)
{
struct fmh_priv *priv = board->private_data;
return nec7210_secondary_address(board, &priv->nec7210_priv, address, enable);
}
-static int fmh_gpib_parallel_poll(gpib_board_t *board, uint8_t *result)
+static int fmh_gpib_parallel_poll(struct gpib_board *board, uint8_t *result)
{
struct fmh_priv *priv = board->private_data;
return nec7210_parallel_poll(board, &priv->nec7210_priv, result);
}
-static void fmh_gpib_parallel_poll_configure(gpib_board_t *board, uint8_t configuration)
+static void fmh_gpib_parallel_poll_configure(struct gpib_board *board, uint8_t configuration)
{
struct fmh_priv *priv = board->private_data;
nec7210_parallel_poll_configure(board, &priv->nec7210_priv, configuration);
}
-static void fmh_gpib_parallel_poll_response(gpib_board_t *board, int ist)
+static void fmh_gpib_parallel_poll_response(struct gpib_board *board, int ist)
{
struct fmh_priv *priv = board->private_data;
nec7210_parallel_poll_response(board, &priv->nec7210_priv, ist);
}
-static void fmh_gpib_local_parallel_poll_mode(gpib_board_t *board, int local)
+static void fmh_gpib_local_parallel_poll_mode(struct gpib_board *board, int local)
{
struct fmh_priv *priv = board->private_data;
@@ -171,7 +177,7 @@ static void fmh_gpib_local_parallel_poll_mode(gpib_board_t *board, int local)
}
}
-static void fmh_gpib_serial_poll_response2(gpib_board_t *board, uint8_t status,
+static void fmh_gpib_serial_poll_response2(struct gpib_board *board, uint8_t status,
int new_reason_for_service)
{
struct fmh_priv *priv = board->private_data;
@@ -206,14 +212,14 @@ static void fmh_gpib_serial_poll_response2(gpib_board_t *board, uint8_t status,
spin_unlock_irqrestore(&board->spinlock, flags);
}
-static uint8_t fmh_gpib_serial_poll_status(gpib_board_t *board)
+static uint8_t fmh_gpib_serial_poll_status(struct gpib_board *board)
{
struct fmh_priv *priv = board->private_data;
return nec7210_serial_poll_status(board, &priv->nec7210_priv);
}
-static void fmh_gpib_return_to_local(gpib_board_t *board)
+static void fmh_gpib_return_to_local(struct gpib_board *board)
{
struct fmh_priv *priv = board->private_data;
struct nec7210_priv *nec_priv = &priv->nec7210_priv;
@@ -223,9 +229,9 @@ static void fmh_gpib_return_to_local(gpib_board_t *board)
write_byte(nec_priv, AUX_RTL, AUXMR);
}
-static int fmh_gpib_line_status(const gpib_board_t *board)
+static int fmh_gpib_line_status(const struct gpib_board *board)
{
- int status = ValidALL;
+ int status = VALID_ALL;
int bsr_bits;
struct fmh_priv *e_priv;
struct nec7210_priv *nec_priv;
@@ -236,26 +242,26 @@ static int fmh_gpib_line_status(const gpib_board_t *board)
bsr_bits = read_byte(nec_priv, BUS_STATUS_REG);
if ((bsr_bits & BSR_REN_BIT) == 0)
- status |= BusREN;
+ status |= BUS_REN;
if ((bsr_bits & BSR_IFC_BIT) == 0)
- status |= BusIFC;
+ status |= BUS_IFC;
if ((bsr_bits & BSR_SRQ_BIT) == 0)
- status |= BusSRQ;
+ status |= BUS_SRQ;
if ((bsr_bits & BSR_EOI_BIT) == 0)
- status |= BusEOI;
+ status |= BUS_EOI;
if ((bsr_bits & BSR_NRFD_BIT) == 0)
- status |= BusNRFD;
+ status |= BUS_NRFD;
if ((bsr_bits & BSR_NDAC_BIT) == 0)
- status |= BusNDAC;
+ status |= BUS_NDAC;
if ((bsr_bits & BSR_DAV_BIT) == 0)
- status |= BusDAV;
+ status |= BUS_DAV;
if ((bsr_bits & BSR_ATN_BIT) == 0)
- status |= BusATN;
+ status |= BUS_ATN;
return status;
}
-static unsigned int fmh_gpib_t1_delay(gpib_board_t *board, unsigned int nano_sec)
+static int fmh_gpib_t1_delay(struct gpib_board *board, unsigned int nano_sec)
{
struct fmh_priv *e_priv = board->private_data;
struct nec7210_priv *nec_priv = &e_priv->nec7210_priv;
@@ -272,7 +278,7 @@ static unsigned int fmh_gpib_t1_delay(gpib_board_t *board, unsigned int nano_sec
return retval;
}
-static int lacs_or_read_ready(gpib_board_t *board)
+static int lacs_or_read_ready(struct gpib_board *board)
{
const struct fmh_priv *e_priv = board->private_data;
const struct nec7210_priv *nec_priv = &e_priv->nec7210_priv;
@@ -287,7 +293,7 @@ static int lacs_or_read_ready(gpib_board_t *board)
return retval;
}
-static int wait_for_read(gpib_board_t *board)
+static int wait_for_read(struct gpib_board *board)
{
struct fmh_priv *e_priv = board->private_data;
struct nec7210_priv *nec_priv = &e_priv->nec7210_priv;
@@ -306,7 +312,7 @@ static int wait_for_read(gpib_board_t *board)
return retval;
}
-static int wait_for_rx_fifo_half_full_or_end(gpib_board_t *board)
+static int wait_for_rx_fifo_half_full_or_end(struct gpib_board *board)
{
struct fmh_priv *e_priv = board->private_data;
struct nec7210_priv *nec_priv = &e_priv->nec7210_priv;
@@ -329,12 +335,11 @@ static int wait_for_rx_fifo_half_full_or_end(gpib_board_t *board)
/* Wait until the gpib chip is ready to accept a data out byte.
*/
-static int wait_for_data_out_ready(gpib_board_t *board)
+static int wait_for_data_out_ready(struct gpib_board *board)
{
struct fmh_priv *e_priv = board->private_data;
struct nec7210_priv *nec_priv = &e_priv->nec7210_priv;
int retval = 0;
-// printk("%s: enter\n", __FUNCTION__);
if (wait_event_interruptible(board->wait,
(test_bit(TACS_NUM, &board->status) &&
@@ -348,19 +353,18 @@ static int wait_for_data_out_ready(gpib_board_t *board)
retval = -ETIMEDOUT;
if (test_and_clear_bit(DEV_CLEAR_BN, &nec_priv->state))
retval = -EINTR;
-// printk("%s: exit, retval=%i\n", __FUNCTION__, retval);
+
return retval;
}
static void fmh_gpib_dma_callback(void *arg)
{
- gpib_board_t *board = arg;
+ struct gpib_board *board = arg;
struct fmh_priv *e_priv = board->private_data;
struct nec7210_priv *nec_priv = &e_priv->nec7210_priv;
unsigned long flags;
spin_lock_irqsave(&board->spinlock, flags);
-// printk("%s: enter\n", __FUNCTION__);
nec7210_set_reg_bits(nec_priv, IMR1, HR_DOIE | HR_DIIE, HR_DOIE | HR_DIIE);
wake_up_interruptible(&board->wait);
@@ -370,7 +374,6 @@ static void fmh_gpib_dma_callback(void *arg)
clear_bit(DMA_WRITE_IN_PROGRESS_BN, &nec_priv->state);
clear_bit(DMA_READ_IN_PROGRESS_BN, &nec_priv->state);
- // printk("%s: exit\n", __FUNCTION__);
spin_unlock_irqrestore(&board->spinlock, flags);
}
@@ -388,7 +391,7 @@ static int fmh_gpib_all_bytes_are_sent(struct fmh_priv *e_priv)
return 1;
}
-static int fmh_gpib_dma_write(gpib_board_t *board, uint8_t *buffer, size_t length,
+static int fmh_gpib_dma_write(struct gpib_board *board, uint8_t *buffer, size_t length,
size_t *bytes_written)
{
struct fmh_priv *e_priv = board->private_data;
@@ -399,14 +402,13 @@ static int fmh_gpib_dma_write(gpib_board_t *board, uint8_t *buffer, size_t lengt
struct dma_async_tx_descriptor *tx_desc;
*bytes_written = 0;
-// printk("%s: enter\n", __FUNCTION__);
if (WARN_ON_ONCE(length > e_priv->dma_buffer_size))
return -EFAULT;
dmaengine_terminate_all(e_priv->dma_channel);
memcpy(e_priv->dma_buffer, buffer, length);
address = dma_map_single(board->dev, e_priv->dma_buffer, length, DMA_TO_DEVICE);
if (dma_mapping_error(board->dev, address))
- pr_err("dma mapping error in dma write!\n");
+ dev_err(board->gpib_dev, "dma mapping error in dma write!\n");
/* program dma controller */
retval = fmh_gpib_config_dma(board, 1);
if (retval)
@@ -415,7 +417,7 @@ static int fmh_gpib_dma_write(gpib_board_t *board, uint8_t *buffer, size_t lengt
tx_desc = dmaengine_prep_slave_single(e_priv->dma_channel, address, length, DMA_MEM_TO_DEV,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!tx_desc) {
- pr_err("fmh_gpib_gpib: failed to allocate dma transmit descriptor\n");
+ dev_err(board->gpib_dev, "failed to allocate dma transmit descriptor\n");
retval = -ENOMEM;
goto cleanup;
}
@@ -432,19 +434,17 @@ static int fmh_gpib_dma_write(gpib_board_t *board, uint8_t *buffer, size_t lengt
dma_async_issue_pending(e_priv->dma_channel);
clear_bit(WRITE_READY_BN, &nec_priv->state);
set_bit(DMA_WRITE_IN_PROGRESS_BN, &nec_priv->state);
-// printk("%s: in spin lock\n", __FUNCTION__);
+
spin_unlock_irqrestore(&board->spinlock, flags);
-// printk("%s: waiting for write.\n", __FUNCTION__);
// suspend until message is sent
if (wait_event_interruptible(board->wait,
fmh_gpib_all_bytes_are_sent(e_priv) ||
test_bit(BUS_ERROR_BN, &nec_priv->state) ||
test_bit(DEV_CLEAR_BN, &nec_priv->state) ||
- test_bit(TIMO_NUM, &board->status))) {
- dev_dbg(board->gpib_dev, "gpib write interrupted!\n");
+ test_bit(TIMO_NUM, &board->status)))
retval = -ERESTARTSYS;
- }
+
if (test_bit(TIMO_NUM, &board->status))
retval = -ETIMEDOUT;
if (test_and_clear_bit(DEV_CLEAR_BN, &nec_priv->state))
@@ -464,16 +464,12 @@ static int fmh_gpib_dma_write(gpib_board_t *board, uint8_t *buffer, size_t lengt
fifo_xfer_counter_mask);
if (WARN_ON_ONCE(*bytes_written > length))
return -EFAULT;
- /* printk("length=%i, *bytes_written=%i, residue=%i, retval=%i\n",
- * length, *bytes_written, get_dma_residue(e_priv->dma_channel), retval);
- */
cleanup:
dma_unmap_single(board->dev, address, length, DMA_TO_DEVICE);
-// printk("%s: exit, retval=%d\n", __FUNCTION__, retval);
return retval;
}
-static int fmh_gpib_accel_write(gpib_board_t *board, uint8_t *buffer,
+static int fmh_gpib_accel_write(struct gpib_board *board, uint8_t *buffer,
size_t length, int send_eoi, size_t *bytes_written)
{
struct fmh_priv *e_priv = board->private_data;
@@ -484,7 +480,7 @@ static int fmh_gpib_accel_write(gpib_board_t *board, uint8_t *buffer,
size_t dma_remainder = remainder;
if (!e_priv->dma_channel) {
- pr_err("fmh_gpib_gpib: No dma channel available, cannot do accel write.");
+ dev_err(board->gpib_dev, "No dma channel available, cannot do accel write.");
return -ENXIO;
}
@@ -498,7 +494,6 @@ static int fmh_gpib_accel_write(gpib_board_t *board, uint8_t *buffer,
if (send_eoi)
--dma_remainder;
-// printk("%s: entering while loop\n", __FUNCTION__);
while (dma_remainder > 0) {
size_t num_bytes;
@@ -524,7 +519,7 @@ static int fmh_gpib_accel_write(gpib_board_t *board, uint8_t *buffer,
//handle sending of last byte with eoi
if (send_eoi) {
size_t num_bytes;
- // printk("%s: handling last byte\n", __FUNCTION__);
+
if (WARN_ON_ONCE(remainder != 1))
return -EFAULT;
@@ -545,7 +540,6 @@ static int fmh_gpib_accel_write(gpib_board_t *board, uint8_t *buffer,
return retval;
remainder -= num_bytes;
}
-// printk("%s: bytes send=%i\n", __FUNCTION__, (int)(length - remainder));
return 0;
}
@@ -556,7 +550,7 @@ static int fmh_gpib_get_dma_residue(struct dma_chan *chan, dma_cookie_t cookie)
result = dmaengine_pause(chan);
if (result < 0) {
- pr_err("fmh_gpib_gpib: dma pause failed?\n");
+ pr_err("dma pause failed?\n");
return result;
}
dmaengine_tx_status(chan, cookie, &state);
@@ -565,12 +559,11 @@ static int fmh_gpib_get_dma_residue(struct dma_chan *chan, dma_cookie_t cookie)
return state.residue;
}
-static int wait_for_tx_fifo_half_empty(gpib_board_t *board)
+static int wait_for_tx_fifo_half_empty(struct gpib_board *board)
{
struct fmh_priv *e_priv = board->private_data;
struct nec7210_priv *nec_priv = &e_priv->nec7210_priv;
int retval = 0;
-// printk("%s: enter\n", __FUNCTION__);
if (wait_event_interruptible(board->wait,
(test_bit(TACS_NUM, &board->status) &&
@@ -584,14 +577,14 @@ static int wait_for_tx_fifo_half_empty(gpib_board_t *board)
retval = -ETIMEDOUT;
if (test_and_clear_bit(DEV_CLEAR_BN, &nec_priv->state))
retval = -EINTR;
-// printk("%s: exit, retval=%i\n", __FUNCTION__, retval);
+
return retval;
}
/* supports writing a chunk of data whose length must fit into the hardware'd xfer counter,
* called in a loop by fmh_gpib_fifo_write()
*/
-static int fmh_gpib_fifo_write_countable(gpib_board_t *board, uint8_t *buffer,
+static int fmh_gpib_fifo_write_countable(struct gpib_board *board, uint8_t *buffer,
size_t length, int send_eoi, size_t *bytes_written)
{
struct fmh_priv *e_priv = board->private_data;
@@ -600,7 +593,6 @@ static int fmh_gpib_fifo_write_countable(gpib_board_t *board, uint8_t *buffer,
unsigned int remainder;
*bytes_written = 0;
-// printk("%s: enter\n", __FUNCTION__);
if (WARN_ON_ONCE(length > fifo_xfer_counter_mask))
return -EFAULT;
@@ -635,10 +627,9 @@ static int fmh_gpib_fifo_write_countable(gpib_board_t *board, uint8_t *buffer,
fmh_gpib_all_bytes_are_sent(e_priv) ||
test_bit(BUS_ERROR_BN, &nec_priv->state) ||
test_bit(DEV_CLEAR_BN, &nec_priv->state) ||
- test_bit(TIMO_NUM, &board->status))) {
- dev_dbg(board->gpib_dev, "gpib write interrupted!\n");
+ test_bit(TIMO_NUM, &board->status)))
retval = -ERESTARTSYS;
- }
+
if (test_bit(TIMO_NUM, &board->status))
retval = -ETIMEDOUT;
if (test_and_clear_bit(DEV_CLEAR_BN, &nec_priv->state))
@@ -655,15 +646,11 @@ cleanup:
fifo_xfer_counter_mask);
if (WARN_ON_ONCE(*bytes_written > length))
return -EFAULT;
- /* printk("length=%i, *bytes_written=%i, residue=%i, retval=%i\n",
- * length, *bytes_written, get_dma_residue(e_priv->dma_channel), retval);
- */
-// printk("%s: exit, retval=%d\n", __FUNCTION__, retval);
return retval;
}
-static int fmh_gpib_fifo_write(gpib_board_t *board, uint8_t *buffer, size_t length,
+static int fmh_gpib_fifo_write(struct gpib_board *board, uint8_t *buffer, size_t length,
int send_eoi, size_t *bytes_written)
{
struct fmh_priv *e_priv = board->private_data;
@@ -678,8 +665,6 @@ static int fmh_gpib_fifo_write(gpib_board_t *board, uint8_t *buffer, size_t leng
clear_bit(DEV_CLEAR_BN, &nec_priv->state); // XXX FIXME
-// printk("%s: entering while loop\n", __FUNCTION__);
-
while (remainder > 0) {
size_t num_bytes;
int last_pass;
@@ -708,11 +693,11 @@ static int fmh_gpib_fifo_write(gpib_board_t *board, uint8_t *buffer, size_t leng
if (need_resched())
schedule();
}
-// printk("%s: bytes send=%i\n", __FUNCTION__, (int)(length - remainder));
+
return retval;
}
-static int fmh_gpib_dma_read(gpib_board_t *board, uint8_t *buffer,
+static int fmh_gpib_dma_read(struct gpib_board *board, uint8_t *buffer,
size_t length, int *end, size_t *bytes_read)
{
struct fmh_priv *e_priv = board->private_data;
@@ -725,10 +710,6 @@ static int fmh_gpib_dma_read(gpib_board_t *board, uint8_t *buffer,
struct dma_async_tx_descriptor *tx_desc;
dma_cookie_t dma_cookie;
- // printk("%s: enter, bus_address=0x%x, length=%i\n", __FUNCTION__,
- //(unsigned)bus_address,
-// (int)length);
-
*bytes_read = 0;
*end = 0;
if (length == 0)
@@ -737,7 +718,7 @@ static int fmh_gpib_dma_read(gpib_board_t *board, uint8_t *buffer,
bus_address = dma_map_single(board->dev, e_priv->dma_buffer,
length, DMA_FROM_DEVICE);
if (dma_mapping_error(board->dev, bus_address))
- pr_err("dma mapping error in dma read!");
+ dev_err(board->gpib_dev, "dma mapping error in dma read!");
/* program dma controller */
retval = fmh_gpib_config_dma(board, 0);
@@ -749,7 +730,7 @@ static int fmh_gpib_dma_read(gpib_board_t *board, uint8_t *buffer,
length, DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!tx_desc) {
- pr_err("fmh_gpib_gpib: failed to allocate dma transmit descriptor\n");
+ dev_err(board->gpib_dev, "failed to allocate dma transmit descriptor\n");
dma_unmap_single(board->dev, bus_address, length, DMA_FROM_DEVICE);
return -EIO;
}
@@ -769,7 +750,7 @@ static int fmh_gpib_dma_read(gpib_board_t *board, uint8_t *buffer,
set_bit(DMA_READ_IN_PROGRESS_BN, &nec_priv->state);
spin_unlock_irqrestore(&board->spinlock, flags);
-// printk("waiting for data transfer.\n");
+
// wait for data to transfer
wait_retval = wait_event_interruptible(board->wait,
test_bit(DMA_READ_IN_PROGRESS_BN, &nec_priv->state)
@@ -777,10 +758,9 @@ static int fmh_gpib_dma_read(gpib_board_t *board, uint8_t *buffer,
test_bit(RECEIVED_END_BN, &nec_priv->state) ||
test_bit(DEV_CLEAR_BN, &nec_priv->state) ||
test_bit(TIMO_NUM, &board->status));
- if (wait_retval) {
- pr_warn("fmh_gpib: dma read wait interrupted\n");
+ if (wait_retval)
retval = -ERESTARTSYS;
- }
+
if (test_bit(TIMO_NUM, &board->status))
retval = -ETIMEDOUT;
if (test_bit(DEV_CLEAR_BN, &nec_priv->state))
@@ -825,13 +805,11 @@ static int fmh_gpib_dma_read(gpib_board_t *board, uint8_t *buffer,
*end = 1;
}
spin_unlock_irqrestore(&board->spinlock, flags);
-// printk("\tbytes_read=%i, residue=%i, end=%i, retval=%i, wait_retval=%i\n",
-// *bytes_read, residue, *end, retval, wait_retval);
return retval;
}
-static void fmh_gpib_release_rfd_holdoff(gpib_board_t *board, struct fmh_priv *e_priv)
+static void fmh_gpib_release_rfd_holdoff(struct gpib_board *board, struct fmh_priv *e_priv)
{
struct nec7210_priv *nec_priv = &e_priv->nec7210_priv;
unsigned int ext_status_1;
@@ -868,7 +846,7 @@ static void fmh_gpib_release_rfd_holdoff(gpib_board_t *board, struct fmh_priv *e
spin_unlock_irqrestore(&board->spinlock, flags);
}
-static int fmh_gpib_accel_read(gpib_board_t *board, uint8_t *buffer, size_t length,
+static int fmh_gpib_accel_read(struct gpib_board *board, uint8_t *buffer, size_t length,
int *end, size_t *bytes_read)
{
struct fmh_priv *e_priv = board->private_data;
@@ -918,17 +896,13 @@ static int fmh_gpib_accel_read(gpib_board_t *board, uint8_t *buffer, size_t leng
/* Read a chunk of data whose length is within the limits of the hardware's
* xfer counter. Called in a loop from fmh_gpib_fifo_read().
*/
-static int fmh_gpib_fifo_read_countable(gpib_board_t *board, uint8_t *buffer,
+static int fmh_gpib_fifo_read_countable(struct gpib_board *board, uint8_t *buffer,
size_t length, int *end, size_t *bytes_read)
{
struct fmh_priv *e_priv = board->private_data;
struct nec7210_priv *nec_priv = &e_priv->nec7210_priv;
int retval = 0;
- // printk("%s: enter, bus_address=0x%x, length=%i\n", __FUNCTION__,
- // (unsigned)bus_address,
-// (int)length);
-
*bytes_read = 0;
*end = 0;
if (length == 0)
@@ -977,13 +951,10 @@ cleanup:
*end = 1;
}
-// printk("\tbytes_read=%i, end=%i, retval=%i, wait_retval=%i\n",
-// *bytes_read, *end, retval, wait_retval);
-
return retval;
}
-static int fmh_gpib_fifo_read(gpib_board_t *board, uint8_t *buffer, size_t length,
+static int fmh_gpib_fifo_read(struct gpib_board *board, uint8_t *buffer, size_t length,
int *end, size_t *bytes_read)
{
struct fmh_priv *e_priv = board->private_data;
@@ -1152,7 +1123,7 @@ static gpib_interface_t fmh_gpib_pci_unaccel_interface = {
.return_to_local = fmh_gpib_return_to_local,
};
-irqreturn_t fmh_gpib_internal_interrupt(gpib_board_t *board)
+irqreturn_t fmh_gpib_internal_interrupt(struct gpib_board *board)
{
unsigned int status0, status1, status2, ext_status_1, fifo_status;
struct fmh_priv *priv = board->private_data;
@@ -1242,7 +1213,7 @@ irqreturn_t fmh_gpib_internal_interrupt(gpib_board_t *board)
irqreturn_t fmh_gpib_interrupt(int irq, void *arg)
{
- gpib_board_t *board = arg;
+ struct gpib_board *board = arg;
unsigned long flags;
irqreturn_t retval;
@@ -1252,7 +1223,7 @@ irqreturn_t fmh_gpib_interrupt(int irq, void *arg)
return retval;
}
-static int fmh_gpib_allocate_private(gpib_board_t *board)
+static int fmh_gpib_allocate_private(struct gpib_board *board)
{
struct fmh_priv *priv;
@@ -1269,7 +1240,7 @@ static int fmh_gpib_allocate_private(gpib_board_t *board)
return 0;
}
-static void fmh_gpib_generic_detach(gpib_board_t *board)
+static void fmh_gpib_generic_detach(struct gpib_board *board)
{
if (board->private_data) {
struct fmh_priv *e_priv = board->private_data;
@@ -1283,7 +1254,7 @@ static void fmh_gpib_generic_detach(gpib_board_t *board)
}
// generic part of attach functions
-static int fmh_gpib_generic_attach(gpib_board_t *board)
+static int fmh_gpib_generic_attach(struct gpib_board *board)
{
struct fmh_priv *e_priv;
struct nec7210_priv *nec_priv;
@@ -1303,7 +1274,7 @@ static int fmh_gpib_generic_attach(gpib_board_t *board)
return 0;
}
-static int fmh_gpib_config_dma(gpib_board_t *board, int output)
+static int fmh_gpib_config_dma(struct gpib_board *board, int output)
{
struct fmh_priv *e_priv = board->private_data;
struct dma_slave_config config;
@@ -1333,7 +1304,7 @@ static int fmh_gpib_config_dma(gpib_board_t *board, int output)
return dmaengine_slave_config(e_priv->dma_channel, &config);
}
-static int fmh_gpib_init(struct fmh_priv *e_priv, gpib_board_t *board, int handshake_mode)
+static int fmh_gpib_init(struct fmh_priv *e_priv, struct gpib_board *board, int handshake_mode)
{
struct nec7210_priv *nec_priv = &e_priv->nec7210_priv;
unsigned long flags;
@@ -1376,11 +1347,11 @@ static int fmh_gpib_device_match(struct device *dev, const void *data)
if (config->serial_number)
return 0;
- dev_notice(dev, "matched: %s\n", of_node_full_name(dev_of_node((dev))));
+ dev_dbg(dev, "matched: %s\n", of_node_full_name(dev_of_node((dev))));
return 1;
}
-static int fmh_gpib_attach_impl(gpib_board_t *board, const gpib_board_config_t *config,
+static int fmh_gpib_attach_impl(struct gpib_board *board, const gpib_board_config_t *config,
unsigned int handshake_mode, int acquire_dma)
{
struct fmh_priv *e_priv;
@@ -1393,7 +1364,7 @@ static int fmh_gpib_attach_impl(gpib_board_t *board, const gpib_board_config_t *
board->dev = driver_find_device(&fmh_gpib_platform_driver.driver,
NULL, (const void *)config, &fmh_gpib_device_match);
if (!board->dev) {
- pr_err("No matching fmh_gpib_core device was found, attach failed.");
+ dev_err(board->gpib_dev, "No matching fmh_gpib_core device was found, attach failed.");
return -ENODEV;
}
// currently only used to mark the device as already attached
@@ -1409,7 +1380,7 @@ static int fmh_gpib_attach_impl(gpib_board_t *board, const gpib_board_config_t *
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "gpib_control_status");
if (!res) {
- dev_err(board->dev, "Unable to locate mmio resource for cb7210 gpib\n");
+ dev_err(board->dev, "Unable to locate mmio resource\n");
return -ENODEV;
}
@@ -1422,13 +1393,13 @@ static int fmh_gpib_attach_impl(gpib_board_t *board, const gpib_board_config_t *
e_priv->gpib_iomem_res = res;
nec_priv->mmiobase = ioremap(e_priv->gpib_iomem_res->start,
- resource_size(e_priv->gpib_iomem_res));
+ resource_size(e_priv->gpib_iomem_res));
if (!nec_priv->mmiobase) {
- dev_err(board->dev, "Could not map I/O memory for gpib\n");
+ dev_err(board->dev, "Could not map I/O memory\n");
return -ENOMEM;
}
- dev_info(board->dev, "iobase %pr remapped to %p\n",
- e_priv->gpib_iomem_res, nec_priv->mmiobase);
+ dev_dbg(board->dev, "iobase %pr remapped to %p\n",
+ e_priv->gpib_iomem_res, nec_priv->mmiobase);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dma_fifos");
if (!res) {
@@ -1448,14 +1419,13 @@ static int fmh_gpib_attach_impl(gpib_board_t *board, const gpib_board_config_t *
dev_err(board->dev, "Could not map I/O memory for fifos\n");
return -ENOMEM;
}
- dev_info(board->dev, "dma fifos 0x%lx remapped to %p, length=%ld\n",
- (unsigned long)e_priv->dma_port_res->start, e_priv->fifo_base,
- (unsigned long)resource_size(e_priv->dma_port_res));
+ dev_dbg(board->dev, "dma fifos 0x%lx remapped to %p, length=%ld\n",
+ (unsigned long)e_priv->dma_port_res->start, e_priv->fifo_base,
+ (unsigned long)resource_size(e_priv->dma_port_res));
irq = platform_get_irq(pdev, 0);
- pr_info("gpib: irq %d\n", irq);
if (irq < 0) {
- dev_err(board->dev, "fmh_gpib_gpib: request for IRQ failed\n");
+ dev_err(board->dev, "request for IRQ failed\n");
return -EBUSY;
}
retval = request_irq(irq, fmh_gpib_interrupt, IRQF_SHARED, pdev->name, board);
@@ -1484,17 +1454,17 @@ static int fmh_gpib_attach_impl(gpib_board_t *board, const gpib_board_config_t *
return fmh_gpib_init(e_priv, board, handshake_mode);
}
-int fmh_gpib_attach_holdoff_all(gpib_board_t *board, const gpib_board_config_t *config)
+int fmh_gpib_attach_holdoff_all(struct gpib_board *board, const gpib_board_config_t *config)
{
return fmh_gpib_attach_impl(board, config, HR_HLDA, 0);
}
-int fmh_gpib_attach_holdoff_end(gpib_board_t *board, const gpib_board_config_t *config)
+int fmh_gpib_attach_holdoff_end(struct gpib_board *board, const gpib_board_config_t *config)
{
return fmh_gpib_attach_impl(board, config, HR_HLDE, 1);
}
-void fmh_gpib_detach(gpib_board_t *board)
+void fmh_gpib_detach(struct gpib_board *board)
{
struct fmh_priv *e_priv = board->private_data;
struct nec7210_priv *nec_priv;
@@ -1527,7 +1497,7 @@ void fmh_gpib_detach(gpib_board_t *board)
fmh_gpib_generic_detach(board);
}
-static int fmh_gpib_pci_attach_impl(gpib_board_t *board, const gpib_board_config_t *config,
+static int fmh_gpib_pci_attach_impl(struct gpib_board *board, const gpib_board_config_t *config,
unsigned int handshake_mode)
{
struct fmh_priv *e_priv;
@@ -1546,7 +1516,7 @@ static int fmh_gpib_pci_attach_impl(gpib_board_t *board, const gpib_board_config
pci_device = gpib_pci_get_device(config, BOGUS_PCI_VENDOR_ID_FLUKE,
BOGUS_PCI_DEVICE_ID_FLUKE_BLADERUNNER, NULL);
if (!pci_device) {
- pr_err("No matching fmh_gpib_core pci device was found, attach failed.");
+ dev_err(board->gpib_dev, "No matching fmh_gpib_core pci device was found, attach failed.");
return -ENODEV;
}
board->dev = &pci_device->dev;
@@ -1563,34 +1533,32 @@ static int fmh_gpib_pci_attach_impl(gpib_board_t *board, const gpib_board_config
return -EIO;
}
e_priv->gpib_iomem_res = &pci_device->resource[gpib_control_status_pci_resource_index];
- e_priv->dma_port_res = &pci_device->resource[gpib_fifo_pci_resource_index];
+ e_priv->dma_port_res = &pci_device->resource[gpib_fifo_pci_resource_index];
nec_priv->mmiobase = ioremap(pci_resource_start(pci_device,
- gpib_control_status_pci_resource_index),
- pci_resource_len(pci_device,
- gpib_control_status_pci_resource_index));
- dev_info(board->dev, "base address for gpib control/status registers remapped to 0x%p\n",
- nec_priv->mmiobase);
+ gpib_control_status_pci_resource_index),
+ pci_resource_len(pci_device,
+ gpib_control_status_pci_resource_index));
+ dev_dbg(board->dev, "base address for gpib control/status registers remapped to 0x%p\n",
+ nec_priv->mmiobase);
if (e_priv->dma_port_res->flags & IORESOURCE_MEM) {
e_priv->fifo_base = ioremap(pci_resource_start(pci_device,
gpib_fifo_pci_resource_index),
pci_resource_len(pci_device,
gpib_fifo_pci_resource_index));
- dev_info(board->dev, "base address for gpib fifo registers remapped to 0x%p\n",
- e_priv->fifo_base);
+ dev_dbg(board->dev, "base address for gpib fifo registers remapped to 0x%p\n",
+ e_priv->fifo_base);
} else {
e_priv->fifo_base = NULL;
- dev_info(board->dev, "hardware has no gpib fifo registers.\n");
+ dev_dbg(board->dev, "hardware has no gpib fifo registers.\n");
}
if (pci_device->irq) {
retval = request_irq(pci_device->irq, fmh_gpib_interrupt, IRQF_SHARED,
KBUILD_MODNAME, board);
if (retval) {
- dev_err(board->dev,
- "cannot register interrupt handler err=%d\n",
- retval);
+ dev_err(board->dev, "cannot register interrupt handler err=%d\n", retval);
return retval;
}
}
@@ -1602,12 +1570,12 @@ static int fmh_gpib_pci_attach_impl(gpib_board_t *board, const gpib_board_config
return fmh_gpib_init(e_priv, board, handshake_mode);
}
-int fmh_gpib_pci_attach_holdoff_all(gpib_board_t *board, const gpib_board_config_t *config)
+int fmh_gpib_pci_attach_holdoff_all(struct gpib_board *board, const gpib_board_config_t *config)
{
return fmh_gpib_pci_attach_impl(board, config, HR_HLDA);
}
-int fmh_gpib_pci_attach_holdoff_end(gpib_board_t *board, const gpib_board_config_t *config)
+int fmh_gpib_pci_attach_holdoff_end(struct gpib_board *board, const gpib_board_config_t *config)
{
int retval;
struct fmh_priv *e_priv;
@@ -1615,13 +1583,13 @@ int fmh_gpib_pci_attach_holdoff_end(gpib_board_t *board, const gpib_board_config
retval = fmh_gpib_pci_attach_impl(board, config, HR_HLDE);
e_priv = board->private_data;
if (retval == 0 && e_priv && e_priv->supports_fifo_interrupts == 0) {
- pr_err("fmh_gpib: your fmh_gpib_core does not appear to support fifo interrupts. Try the fmh_gpib_pci_unaccel board type instead.");
+ dev_err(board->gpib_dev, "your fmh_gpib_core does not appear to support fifo interrupts. Try the fmh_gpib_pci_unaccel board type instead.");
return -EIO;
}
return retval;
}
-void fmh_gpib_pci_detach(gpib_board_t *board)
+void fmh_gpib_pci_detach(struct gpib_board *board)
{
struct fmh_priv *e_priv = board->private_data;
struct nec7210_priv *nec_priv;
@@ -1662,7 +1630,7 @@ MODULE_DEVICE_TABLE(of, fmh_gpib_of_match);
static struct platform_driver fmh_gpib_platform_driver = {
.driver = {
- .name = "fmh_gpib",
+ .name = DRV_NAME,
.owner = THIS_MODULE,
.of_match_table = fmh_gpib_of_match,
},
@@ -1681,7 +1649,7 @@ static const struct pci_device_id fmh_gpib_pci_match[] = {
MODULE_DEVICE_TABLE(pci, fmh_gpib_pci_match);
static struct pci_driver fmh_gpib_pci_driver = {
- .name = "fmh_gpib",
+ .name = DRV_NAME,
.id_table = fmh_gpib_pci_match,
.probe = &fmh_gpib_pci_probe
};
@@ -1692,37 +1660,37 @@ static int __init fmh_gpib_init_module(void)
result = platform_driver_register(&fmh_gpib_platform_driver);
if (result) {
- pr_err("fmh_gpib: platform_driver_register failed: error = %d\n", result);
+ pr_err("platform_driver_register failed: error = %d\n", result);
return result;
}
result = pci_register_driver(&fmh_gpib_pci_driver);
if (result) {
- pr_err("fmh_gpib: pci_register_driver failed: error = %d\n", result);
+ pr_err("pci_register_driver failed: error = %d\n", result);
goto err_pci_driver;
}
result = gpib_register_driver(&fmh_gpib_unaccel_interface, THIS_MODULE);
if (result) {
- pr_err("fmh_gpib: gpib_register_driver failed: error = %d\n", result);
+ pr_err("gpib_register_driver failed: error = %d\n", result);
goto err_unaccel;
}
result = gpib_register_driver(&fmh_gpib_interface, THIS_MODULE);
if (result) {
- pr_err("fmh_gpib: gpib_register_driver failed: error = %d\n", result);
+ pr_err("gpib_register_driver failed: error = %d\n", result);
goto err_interface;
}
result = gpib_register_driver(&fmh_gpib_pci_unaccel_interface, THIS_MODULE);
if (result) {
- pr_err("fmh_gpib: gpib_register_driver failed: error = %d\n", result);
+ pr_err("gpib_register_driver failed: error = %d\n", result);
goto err_pci_unaccel;
}
result = gpib_register_driver(&fmh_gpib_pci_interface, THIS_MODULE);
if (result) {
- pr_err("fmh_gpib: gpib_register_driver failed: error = %d\n", result);
+ pr_err("gpib_register_driver failed: error = %d\n", result);
goto err_pci;
}
diff --git a/drivers/staging/gpib/gpio/gpib_bitbang.c b/drivers/staging/gpib/gpio/gpib_bitbang.c
index 828c99ea613f..86bdd381472a 100644
--- a/drivers/staging/gpib/gpio/gpib_bitbang.c
+++ b/drivers/staging/gpib/gpio/gpib_bitbang.c
@@ -25,6 +25,8 @@
* device support (non master operation)
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#define dev_fmt pr_fmt
#define NAME KBUILD_MODNAME
#define ENABLE_IRQ(IRQ, TYPE) irq_set_irq_type(IRQ, TYPE)
@@ -41,7 +43,7 @@
*/
#define dbg_printk(level, frm, ...) \
do { if (debug >= (level)) \
- pr_info("%s:%s - " frm, NAME, __func__, ## __VA_ARGS__); } \
+ dev_dbg(board->gpib_dev, frm, ## __VA_ARGS__); } \
while (0)
#define LINVAL gpiod_get_value(DAV), \
@@ -316,13 +318,14 @@ struct bb_priv {
};
static inline long usec_diff(struct timespec64 *a, struct timespec64 *b);
-static void bb_buffer_print(unsigned char *buffer, size_t length, int cmd, int eoi);
+static void bb_buffer_print(struct gpib_board *board, unsigned char *buffer, size_t length,
+ int cmd, int eoi);
static void set_data_lines(u8 byte);
static u8 get_data_lines(void);
static void set_data_lines_input(void);
static void set_data_lines_output(void);
static inline int check_for_eos(struct bb_priv *priv, uint8_t byte);
-static void set_atn(struct bb_priv *priv, int atn_asserted);
+static void set_atn(struct gpib_board *board, int atn_asserted);
static inline void SET_DIR_WRITE(struct bb_priv *priv);
static inline void SET_DIR_READ(struct bb_priv *priv);
@@ -334,11 +337,7 @@ MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("GPIB helper functions for bitbanging I/O");
/**** global variables ****/
-#ifdef CONFIG_GPIB_DEBUG
-static int debug = 1;
-#else
static int debug;
-#endif
module_param(debug, int, 0644);
static char printable(char x)
@@ -354,7 +353,7 @@ static char printable(char x)
* *
***************************************************************************/
-static int bb_read(gpib_board_t *board, uint8_t *buffer, size_t length,
+static int bb_read(struct gpib_board *board, uint8_t *buffer, size_t length,
int *end, size_t *bytes_read)
{
struct bb_priv *priv = board->private_data;
@@ -426,7 +425,7 @@ read_end:
static irqreturn_t bb_DAV_interrupt(int irq, void *arg)
{
- gpib_board_t *board = arg;
+ struct gpib_board *board = arg;
struct bb_priv *priv = board->private_data;
int val;
unsigned long flags;
@@ -492,7 +491,7 @@ dav_exit:
* *
***************************************************************************/
-static int bb_write(gpib_board_t *board, uint8_t *buffer, size_t length,
+static int bb_write(struct gpib_board *board, uint8_t *buffer, size_t length,
int send_eoi, size_t *bytes_written)
{
unsigned long flags;
@@ -508,7 +507,7 @@ static int bb_write(gpib_board_t *board, uint8_t *buffer, size_t length,
board, mutex_is_locked(&board->user_mutex), length);
if (debug > 1)
- bb_buffer_print(buffer, length, priv->cmd, send_eoi);
+ bb_buffer_print(board, buffer, length, priv->cmd, send_eoi);
priv->count = 0;
priv->phase = 300;
@@ -550,7 +549,6 @@ static int bb_write(gpib_board_t *board, uint8_t *buffer, size_t length,
dbg_printk(1, "timeout after %zu/%zu at %d " LINFMT " eoi: %d\n",
priv->w_cnt, length, priv->phase, LINVAL, send_eoi);
} else {
- // dbg_printk(1,"written %zu\n", priv->w_cnt);
retval = priv->w_cnt;
}
} else {
@@ -582,7 +580,7 @@ write_end:
static irqreturn_t bb_NRFD_interrupt(int irq, void *arg)
{
- gpib_board_t *board = arg;
+ struct gpib_board *board = arg;
struct bb_priv *priv = board->private_data;
unsigned long flags;
int nrfd;
@@ -655,7 +653,7 @@ nrfd_exit:
static irqreturn_t bb_NDAC_interrupt(int irq, void *arg)
{
- gpib_board_t *board = arg;
+ struct gpib_board *board = arg;
struct bb_priv *priv = board->private_data;
unsigned long flags;
int ndac;
@@ -716,7 +714,7 @@ ndac_exit:
static irqreturn_t bb_SRQ_interrupt(int irq, void *arg)
{
- gpib_board_t *board = arg;
+ struct gpib_board *board = arg;
int val = gpiod_get_value(SRQ);
@@ -730,7 +728,7 @@ static irqreturn_t bb_SRQ_interrupt(int irq, void *arg)
return IRQ_HANDLED;
}
-static int bb_command(gpib_board_t *board, uint8_t *buffer,
+static int bb_command(struct gpib_board *board, uint8_t *buffer,
size_t length, size_t *bytes_written)
{
size_t ret;
@@ -811,7 +809,8 @@ static char *cmd_string[32] = {
"CFE" // 0x1f
};
-static void bb_buffer_print(unsigned char *buffer, size_t length, int cmd, int eoi)
+static void bb_buffer_print(struct gpib_board *board, unsigned char *buffer, size_t length,
+ int cmd, int eoi)
{
int i;
@@ -843,11 +842,13 @@ static void bb_buffer_print(unsigned char *buffer, size_t length, int cmd, int e
* STATUS Management *
* *
***************************************************************************/
-static void set_atn(struct bb_priv *priv, int atn_asserted)
+static void set_atn(struct gpib_board *board, int atn_asserted)
{
+ struct bb_priv *priv = board->private_data;
+
if (priv->listener_state != listener_idle &&
priv->talker_state != talker_idle) {
- dbg_printk(0, "listener/talker state machine conflict\n");
+ dev_err(board->gpib_dev, "listener/talker state machine conflict\n");
}
if (atn_asserted) {
if (priv->listener_state == listener_active)
@@ -866,22 +867,22 @@ static void set_atn(struct bb_priv *priv, int atn_asserted)
priv->atn_asserted = atn_asserted;
}
-static int bb_take_control(gpib_board_t *board, int synchronous)
+static int bb_take_control(struct gpib_board *board, int synchronous)
{
dbg_printk(2, "%d\n", synchronous);
- set_atn(board->private_data, 1);
+ set_atn(board, 1);
set_bit(CIC_NUM, &board->status);
return 0;
}
-static int bb_go_to_standby(gpib_board_t *board)
+static int bb_go_to_standby(struct gpib_board *board)
{
dbg_printk(2, "\n");
- set_atn(board->private_data, 0);
+ set_atn(board, 0);
return 0;
}
-static void bb_request_system_control(gpib_board_t *board, int request_control)
+static void bb_request_system_control(struct gpib_board *board, int request_control)
{
dbg_printk(2, "%d\n", request_control);
if (request_control) {
@@ -893,7 +894,7 @@ static void bb_request_system_control(gpib_board_t *board, int request_control)
}
}
-static void bb_interface_clear(gpib_board_t *board, int assert)
+static void bb_interface_clear(struct gpib_board *board, int assert)
{
struct bb_priv *priv = board->private_data;
@@ -907,7 +908,7 @@ static void bb_interface_clear(gpib_board_t *board, int assert)
}
}
-static void bb_remote_enable(gpib_board_t *board, int enable)
+static void bb_remote_enable(struct gpib_board *board, int enable)
{
dbg_printk(2, "%d\n", enable);
if (enable) {
@@ -919,7 +920,7 @@ static void bb_remote_enable(gpib_board_t *board, int enable)
}
}
-static int bb_enable_eos(gpib_board_t *board, uint8_t eos_byte, int compare_8_bits)
+static int bb_enable_eos(struct gpib_board *board, uint8_t eos_byte, int compare_8_bits)
{
struct bb_priv *priv = board->private_data;
@@ -932,7 +933,7 @@ static int bb_enable_eos(gpib_board_t *board, uint8_t eos_byte, int compare_8_bi
return 0;
}
-static void bb_disable_eos(gpib_board_t *board)
+static void bb_disable_eos(struct gpib_board *board)
{
struct bb_priv *priv = board->private_data;
@@ -940,7 +941,7 @@ static void bb_disable_eos(gpib_board_t *board)
priv->eos_flags &= ~REOS;
}
-static unsigned int bb_update_status(gpib_board_t *board, unsigned int clear_mask)
+static unsigned int bb_update_status(struct gpib_board *board, unsigned int clear_mask)
{
struct bb_priv *priv = board->private_data;
@@ -971,14 +972,14 @@ static unsigned int bb_update_status(gpib_board_t *board, unsigned int clear_mas
return board->status;
}
-static int bb_primary_address(gpib_board_t *board, unsigned int address)
+static int bb_primary_address(struct gpib_board *board, unsigned int address)
{
dbg_printk(2, "%d\n", address);
board->pad = address;
return 0;
}
-static int bb_secondary_address(gpib_board_t *board, unsigned int address, int enable)
+static int bb_secondary_address(struct gpib_board *board, unsigned int address, int enable)
{
dbg_printk(2, "%d %d\n", address, enable);
if (enable)
@@ -986,33 +987,29 @@ static int bb_secondary_address(gpib_board_t *board, unsigned int address, int e
return 0;
}
-static int bb_parallel_poll(gpib_board_t *board, uint8_t *result)
+static int bb_parallel_poll(struct gpib_board *board, uint8_t *result)
{
- dbg_printk(1, "%s\n", "not implemented");
- return -EPERM;
+ return -ENOENT;
}
-static void bb_parallel_poll_configure(gpib_board_t *board, uint8_t config)
+static void bb_parallel_poll_configure(struct gpib_board *board, uint8_t config)
{
- dbg_printk(1, "%s\n", "not implemented");
}
-static void bb_parallel_poll_response(gpib_board_t *board, int ist)
+static void bb_parallel_poll_response(struct gpib_board *board, int ist)
{
}
-static void bb_serial_poll_response(gpib_board_t *board, uint8_t status)
+static void bb_serial_poll_response(struct gpib_board *board, uint8_t status)
{
- dbg_printk(1, "%s\n", "not implemented");
}
-static uint8_t bb_serial_poll_status(gpib_board_t *board)
+static uint8_t bb_serial_poll_status(struct gpib_board *board)
{
- dbg_printk(1, "%s\n", "not implemented");
- return 0; // -ENOSYS;
+ return 0; // -ENOENT;
}
-static unsigned int bb_t1_delay(gpib_board_t *board, unsigned int nano_sec)
+static int bb_t1_delay(struct gpib_board *board, unsigned int nano_sec)
{
struct bb_priv *priv = board->private_data;
@@ -1028,33 +1025,30 @@ static unsigned int bb_t1_delay(gpib_board_t *board, unsigned int nano_sec)
return priv->t1_delay;
}
-static void bb_return_to_local(gpib_board_t *board)
+static void bb_return_to_local(struct gpib_board *board)
{
- dbg_printk(1, "%s\n", "not implemented");
}
-static int bb_line_status(const gpib_board_t *board)
+static int bb_line_status(const struct gpib_board *board)
{
- int line_status = ValidALL;
-
-// dbg_printk(1,"\n");
+ int line_status = VALID_ALL;
if (gpiod_get_value(REN) == 0)
- line_status |= BusREN;
+ line_status |= BUS_REN;
if (gpiod_get_value(IFC) == 0)
- line_status |= BusIFC;
+ line_status |= BUS_IFC;
if (gpiod_get_value(NDAC) == 0)
- line_status |= BusNDAC;
+ line_status |= BUS_NDAC;
if (gpiod_get_value(NRFD) == 0)
- line_status |= BusNRFD;
+ line_status |= BUS_NRFD;
if (gpiod_get_value(DAV) == 0)
- line_status |= BusDAV;
+ line_status |= BUS_DAV;
if (gpiod_get_value(EOI) == 0)
- line_status |= BusEOI;
+ line_status |= BUS_EOI;
if (gpiod_get_value(_ATN) == 0)
- line_status |= BusATN;
+ line_status |= BUS_ATN;
if (gpiod_get_value(SRQ) == 0)
- line_status |= BusSRQ;
+ line_status |= BUS_SRQ;
dbg_printk(2, "status lines: %4x\n", line_status);
@@ -1067,7 +1061,7 @@ static int bb_line_status(const gpib_board_t *board)
* *
***************************************************************************/
-static int allocate_private(gpib_board_t *board)
+static int allocate_private(struct gpib_board *board)
{
board->private_data = kzalloc(sizeof(struct bb_priv), GFP_KERNEL);
if (!board->private_data)
@@ -1075,13 +1069,13 @@ static int allocate_private(gpib_board_t *board)
return 0;
}
-static void free_private(gpib_board_t *board)
+static void free_private(struct gpib_board *board)
{
kfree(board->private_data);
board->private_data = NULL;
}
-static int bb_get_irq(gpib_board_t *board, char *name,
+static int bb_get_irq(struct gpib_board *board, char *name,
struct gpio_desc *gpio, int *irq,
irq_handler_t handler, irq_handler_t thread_fn, unsigned long flags)
{
@@ -1091,11 +1085,11 @@ static int bb_get_irq(gpib_board_t *board, char *name,
*irq = gpiod_to_irq(gpio);
dbg_printk(2, "IRQ %s: %d\n", name, *irq);
if (*irq < 0) {
- dbg_printk(0, "gpib: can't get IRQ for %s\n", name);
+ dev_err(board->gpib_dev, "can't get IRQ for %s\n", name);
return -1;
}
if (request_threaded_irq(*irq, handler, thread_fn, flags, name, board)) {
- dbg_printk(0, "gpib: can't request IRQ for %s %d\n", name, *irq);
+ dev_err(board->gpib_dev, "can't request IRQ for %s %d\n", name, *irq);
*irq = 0;
return -1;
}
@@ -1103,7 +1097,7 @@ static int bb_get_irq(gpib_board_t *board, char *name,
return 0;
}
-static void bb_free_irq(gpib_board_t *board, int *irq, char *name)
+static void bb_free_irq(struct gpib_board *board, int *irq, char *name)
{
if (*irq) {
free_irq(*irq, board);
@@ -1124,7 +1118,7 @@ static void release_gpios(void)
}
}
-static int allocate_gpios(gpib_board_t *board)
+static int allocate_gpios(struct gpib_board *board)
{
int j, retval = 0;
bool error = false;
@@ -1163,8 +1157,8 @@ try_again:
gpiod_add_lookup_table(lookup_table);
goto try_again;
}
- dbg_printk(0, "Unable to obtain gpio descriptor for pin %d error %ld\n",
- gpios_vector[j], PTR_ERR(desc));
+ dev_err(board->gpib_dev, "Unable to obtain gpio descriptor for pin %d error %ld\n",
+ gpios_vector[j], PTR_ERR(desc));
error = true;
break;
}
@@ -1182,7 +1176,7 @@ try_again:
return retval;
}
-static void bb_detach(gpib_board_t *board)
+static void bb_detach(struct gpib_board *board)
{
struct bb_priv *priv = board->private_data;
@@ -1212,7 +1206,7 @@ static void bb_detach(gpib_board_t *board)
free_private(board);
}
-static int bb_attach(gpib_board_t *board, const gpib_board_config_t *config)
+static int bb_attach(struct gpib_board *board, const gpib_board_config_t *config)
{
struct bb_priv *priv;
int retval = 0;
@@ -1253,7 +1247,7 @@ static int bb_attach(gpib_board_t *board, const gpib_board_config_t *config)
gpios_vector[&(DC) - &all_descriptors[0]] = -1;
gpios_vector[&(ACT_LED) - &all_descriptors[0]] = -1;
} else {
- dbg_printk(0, "Unrecognized pin mapping.\n");
+ dev_err(board->gpib_dev, "Unrecognized pin map %s\n", pin_map);
goto bb_attach_fail;
}
dbg_printk(0, "Using pin map \"%s\" %s\n", pin_map, (sn7516x) ?
@@ -1344,19 +1338,15 @@ static int __init bb_init_module(void)
int result = gpib_register_driver(&bb_interface, THIS_MODULE);
if (result) {
- pr_err("gpib_bitbang: gpib_register_driver failed: error = %d\n", result);
+ pr_err("gpib_register_driver failed: error = %d\n", result);
return result;
}
- dbg_printk(0, "module loaded with pin map \"%s\"%s\n",
- pin_map, (sn7516x_used) ? " and SN7516x driver support" : "");
return 0;
}
static void __exit bb_exit_module(void)
{
- dbg_printk(0, "module unloaded!");
-
gpib_unregister_driver(&bb_interface);
}
diff --git a/drivers/staging/gpib/hp_82335/hp82335.c b/drivers/staging/gpib/hp_82335/hp82335.c
index 700d1ba029d2..fd23b1cb80f9 100644
--- a/drivers/staging/gpib/hp_82335/hp82335.c
+++ b/drivers/staging/gpib/hp_82335/hp82335.c
@@ -8,6 +8,10 @@
* implement recovery from bus errors (if necessary)
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#define dev_fmt pr_fmt
+#define DRV_NAME KBUILD_MODNAME
+
#include "hp82335.h"
#include <linux/io.h>
#include <linux/ioport.h>
@@ -20,153 +24,155 @@
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("GPIB driver for HP 82335 interface cards");
-static int hp82335_attach(gpib_board_t *board, const gpib_board_config_t *config);
-
-static void hp82335_detach(gpib_board_t *board);
+static int hp82335_attach(struct gpib_board *board, const gpib_board_config_t *config);
+static void hp82335_detach(struct gpib_board *board);
+static irqreturn_t hp82335_interrupt(int irq, void *arg);
// wrappers for interface functions
-int hp82335_read(gpib_board_t *board, uint8_t *buffer, size_t length, int *end, size_t *bytes_read)
+static int hp82335_read(struct gpib_board *board, uint8_t *buffer, size_t length,
+ int *end, size_t *bytes_read)
{
struct hp82335_priv *priv = board->private_data;
return tms9914_read(board, &priv->tms9914_priv, buffer, length, end, bytes_read);
}
-int hp82335_write(gpib_board_t *board, uint8_t *buffer, size_t length, int send_eoi,
- size_t *bytes_written)
+static int hp82335_write(struct gpib_board *board, uint8_t *buffer, size_t length, int send_eoi,
+ size_t *bytes_written)
{
struct hp82335_priv *priv = board->private_data;
return tms9914_write(board, &priv->tms9914_priv, buffer, length, send_eoi, bytes_written);
}
-int hp82335_command(gpib_board_t *board, uint8_t *buffer, size_t length, size_t *bytes_written)
+static int hp82335_command(struct gpib_board *board, uint8_t *buffer, size_t length,
+ size_t *bytes_written)
{
struct hp82335_priv *priv = board->private_data;
return tms9914_command(board, &priv->tms9914_priv, buffer, length, bytes_written);
}
-int hp82335_take_control(gpib_board_t *board, int synchronous)
+static int hp82335_take_control(struct gpib_board *board, int synchronous)
{
struct hp82335_priv *priv = board->private_data;
return tms9914_take_control(board, &priv->tms9914_priv, synchronous);
}
-int hp82335_go_to_standby(gpib_board_t *board)
+static int hp82335_go_to_standby(struct gpib_board *board)
{
struct hp82335_priv *priv = board->private_data;
return tms9914_go_to_standby(board, &priv->tms9914_priv);
}
-void hp82335_request_system_control(gpib_board_t *board, int request_control)
+static void hp82335_request_system_control(struct gpib_board *board, int request_control)
{
struct hp82335_priv *priv = board->private_data;
tms9914_request_system_control(board, &priv->tms9914_priv, request_control);
}
-void hp82335_interface_clear(gpib_board_t *board, int assert)
+static void hp82335_interface_clear(struct gpib_board *board, int assert)
{
struct hp82335_priv *priv = board->private_data;
tms9914_interface_clear(board, &priv->tms9914_priv, assert);
}
-void hp82335_remote_enable(gpib_board_t *board, int enable)
+static void hp82335_remote_enable(struct gpib_board *board, int enable)
{
struct hp82335_priv *priv = board->private_data;
tms9914_remote_enable(board, &priv->tms9914_priv, enable);
}
-int hp82335_enable_eos(gpib_board_t *board, uint8_t eos_byte, int compare_8_bits)
+static int hp82335_enable_eos(struct gpib_board *board, uint8_t eos_byte, int compare_8_bits)
{
struct hp82335_priv *priv = board->private_data;
return tms9914_enable_eos(board, &priv->tms9914_priv, eos_byte, compare_8_bits);
}
-void hp82335_disable_eos(gpib_board_t *board)
+static void hp82335_disable_eos(struct gpib_board *board)
{
struct hp82335_priv *priv = board->private_data;
tms9914_disable_eos(board, &priv->tms9914_priv);
}
-unsigned int hp82335_update_status(gpib_board_t *board, unsigned int clear_mask)
+static unsigned int hp82335_update_status(struct gpib_board *board, unsigned int clear_mask)
{
struct hp82335_priv *priv = board->private_data;
return tms9914_update_status(board, &priv->tms9914_priv, clear_mask);
}
-int hp82335_primary_address(gpib_board_t *board, unsigned int address)
+static int hp82335_primary_address(struct gpib_board *board, unsigned int address)
{
struct hp82335_priv *priv = board->private_data;
return tms9914_primary_address(board, &priv->tms9914_priv, address);
}
-int hp82335_secondary_address(gpib_board_t *board, unsigned int address, int enable)
+static int hp82335_secondary_address(struct gpib_board *board, unsigned int address, int enable)
{
struct hp82335_priv *priv = board->private_data;
return tms9914_secondary_address(board, &priv->tms9914_priv, address, enable);
}
-int hp82335_parallel_poll(gpib_board_t *board, uint8_t *result)
+static int hp82335_parallel_poll(struct gpib_board *board, uint8_t *result)
{
struct hp82335_priv *priv = board->private_data;
return tms9914_parallel_poll(board, &priv->tms9914_priv, result);
}
-void hp82335_parallel_poll_configure(gpib_board_t *board, uint8_t config)
+static void hp82335_parallel_poll_configure(struct gpib_board *board, uint8_t config)
{
struct hp82335_priv *priv = board->private_data;
tms9914_parallel_poll_configure(board, &priv->tms9914_priv, config);
}
-void hp82335_parallel_poll_response(gpib_board_t *board, int ist)
+static void hp82335_parallel_poll_response(struct gpib_board *board, int ist)
{
struct hp82335_priv *priv = board->private_data;
tms9914_parallel_poll_response(board, &priv->tms9914_priv, ist);
}
-void hp82335_serial_poll_response(gpib_board_t *board, uint8_t status)
+static void hp82335_serial_poll_response(struct gpib_board *board, uint8_t status)
{
struct hp82335_priv *priv = board->private_data;
tms9914_serial_poll_response(board, &priv->tms9914_priv, status);
}
-static uint8_t hp82335_serial_poll_status(gpib_board_t *board)
+static uint8_t hp82335_serial_poll_status(struct gpib_board *board)
{
struct hp82335_priv *priv = board->private_data;
return tms9914_serial_poll_status(board, &priv->tms9914_priv);
}
-static int hp82335_line_status(const gpib_board_t *board)
+static int hp82335_line_status(const struct gpib_board *board)
{
struct hp82335_priv *priv = board->private_data;
return tms9914_line_status(board, &priv->tms9914_priv);
}
-static unsigned int hp82335_t1_delay(gpib_board_t *board, unsigned int nano_sec)
+static int hp82335_t1_delay(struct gpib_board *board, unsigned int nano_sec)
{
struct hp82335_priv *priv = board->private_data;
return tms9914_t1_delay(board, &priv->tms9914_priv, nano_sec);
}
-void hp82335_return_to_local(gpib_board_t *board)
+static void hp82335_return_to_local(struct gpib_board *board)
{
struct hp82335_priv *priv = board->private_data;
@@ -201,7 +207,7 @@ static gpib_interface_t hp82335_interface = {
.return_to_local = hp82335_return_to_local,
};
-int hp82335_allocate_private(gpib_board_t *board)
+static int hp82335_allocate_private(struct gpib_board *board)
{
board->private_data = kzalloc(sizeof(struct hp82335_priv), GFP_KERNEL);
if (!board->private_data)
@@ -209,7 +215,7 @@ int hp82335_allocate_private(gpib_board_t *board)
return 0;
}
-void hp82335_free_private(gpib_board_t *board)
+static void hp82335_free_private(struct gpib_board *board)
{
kfree(board->private_data);
board->private_data = NULL;
@@ -237,7 +243,7 @@ static void hp82335_clear_interrupt(struct hp82335_priv *hp_priv)
writeb(0, tms_priv->mmiobase + HPREG_INTR_CLEAR);
}
-int hp82335_attach(gpib_board_t *board, const gpib_board_config_t *config)
+static int hp82335_attach(struct gpib_board *board, const gpib_board_config_t *config)
{
struct hp82335_priv *hp_priv;
struct tms9914_priv *tms_priv;
@@ -272,26 +278,23 @@ int hp82335_attach(gpib_board_t *board, const gpib_board_config_t *config)
case 0xfc000:
break;
default:
- pr_err("hp82335: invalid base io address 0x%u\n", config->ibbase);
+ dev_err(board->gpib_dev, "invalid base io address 0x%x\n", config->ibbase);
return -EINVAL;
}
if (!request_mem_region(upper_iomem_base, hp82335_upper_iomem_size, "hp82335")) {
- pr_err("hp82335: failed to allocate io memory region 0x%lx-0x%lx\n",
- upper_iomem_base, upper_iomem_base + hp82335_upper_iomem_size - 1);
+ dev_err(board->gpib_dev, "failed to allocate io memory region 0x%lx-0x%lx\n",
+ upper_iomem_base, upper_iomem_base + hp82335_upper_iomem_size - 1);
return -EBUSY;
}
hp_priv->raw_iobase = upper_iomem_base;
tms_priv->mmiobase = ioremap(upper_iomem_base, hp82335_upper_iomem_size);
- pr_info("hp82335: upper half of 82335 iomem region 0x%lx remapped to 0x%p\n",
- hp_priv->raw_iobase, tms_priv->mmiobase);
- retval = request_irq(config->ibirq, hp82335_interrupt, 0, "hp82335", board);
+ retval = request_irq(config->ibirq, hp82335_interrupt, 0, DRV_NAME, board);
if (retval) {
- pr_err("hp82335: can't request IRQ %d\n", config->ibirq);
+ dev_err(board->gpib_dev, "can't request IRQ %d\n", config->ibirq);
return retval;
}
hp_priv->irq = config->ibirq;
- pr_info("hp82335: IRQ %d\n", config->ibirq);
tms9914_board_reset(tms_priv);
@@ -304,7 +307,7 @@ int hp82335_attach(gpib_board_t *board, const gpib_board_config_t *config)
return 0;
}
-void hp82335_detach(gpib_board_t *board)
+static void hp82335_detach(struct gpib_board *board)
{
struct hp82335_priv *hp_priv = board->private_data;
struct tms9914_priv *tms_priv;
@@ -329,7 +332,7 @@ static int __init hp82335_init_module(void)
int result = gpib_register_driver(&hp82335_interface, THIS_MODULE);
if (result) {
- pr_err("hp82335: gpib_register_driver failed: error = %d\n", result);
+ pr_err("gpib_register_driver failed: error = %d\n", result);
return result;
}
@@ -348,10 +351,10 @@ module_exit(hp82335_exit_module);
* GPIB interrupt service routines
*/
-irqreturn_t hp82335_interrupt(int irq, void *arg)
+static irqreturn_t hp82335_interrupt(int irq, void *arg)
{
int status1, status2;
- gpib_board_t *board = arg;
+ struct gpib_board *board = arg;
struct hp82335_priv *priv = board->private_data;
unsigned long flags;
irqreturn_t retval;
diff --git a/drivers/staging/gpib/hp_82335/hp82335.h b/drivers/staging/gpib/hp_82335/hp82335.h
index 4b185d7c5188..0c252a712ec9 100644
--- a/drivers/staging/gpib/hp_82335/hp82335.h
+++ b/drivers/staging/gpib/hp_82335/hp82335.h
@@ -17,36 +17,6 @@ struct hp82335_priv {
unsigned long raw_iobase;
};
-// interface functions
-int hp82335_read(gpib_board_t *board, uint8_t *buffer, size_t length, int *end, size_t *bytes_read);
-int hp82335_write(gpib_board_t *board, uint8_t *buffer, size_t length,
- int send_eoi, size_t *bytes_written);
-int hp82335_command(gpib_board_t *board, uint8_t *buffer, size_t length, size_t *bytes_written);
-int hp82335_take_control(gpib_board_t *board, int synchronous);
-int hp82335_go_to_standby(gpib_board_t *board);
-void hp82335_request_system_control(gpib_board_t *board, int request_control);
-void hp82335_interface_clear(gpib_board_t *board, int assert);
-void hp82335_remote_enable(gpib_board_t *board, int enable);
-int hp82335_enable_eos(gpib_board_t *board, uint8_t eos_byte, int
- compare_8_bits);
-void hp82335_disable_eos(gpib_board_t *board);
-unsigned int hp82335_update_status(gpib_board_t *board, unsigned int clear_mask);
-int hp82335_primary_address(gpib_board_t *board, unsigned int address);
-int hp82335_secondary_address(gpib_board_t *board, unsigned int address, int
- enable);
-int hp82335_parallel_poll(gpib_board_t *board, uint8_t *result);
-void hp82335_parallel_poll_configure(gpib_board_t *board, uint8_t config);
-void hp82335_parallel_poll_response(gpib_board_t *board, int ist);
-void hp82335_serial_poll_response(gpib_board_t *board, uint8_t status);
-void hp82335_return_to_local(gpib_board_t *board);
-
-// interrupt service routines
-irqreturn_t hp82335_interrupt(int irq, void *arg);
-
-// utility functions
-int hp82335_allocate_private(gpib_board_t *board);
-void hp82335_free_private(gpib_board_t *board);
-
// size of io memory region used
static const int hp82335_rom_size = 0x2000;
static const int hp82335_upper_iomem_size = 0x2000;
diff --git a/drivers/staging/gpib/hp_82341/hp_82341.c b/drivers/staging/gpib/hp_82341/hp_82341.c
index 0ddae295912f..f52e673dc869 100644
--- a/drivers/staging/gpib/hp_82341/hp_82341.c
+++ b/drivers/staging/gpib/hp_82341/hp_82341.c
@@ -6,6 +6,10 @@
* copyright : (C) 2002, 2005 by Frank Mori Hess *
***************************************************************************/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#define dev_fmt pr_fmt
+#define DRV_NAME KBUILD_MODNAME
+
#include "hp_82341.h"
#include <linux/delay.h>
#include <linux/ioport.h>
@@ -16,9 +20,17 @@
#include <linux/isapnp.h>
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("GPIB driver for hp 82341a/b/c/d boards");
+
+static unsigned short read_and_clear_event_status(struct gpib_board *board);
+static void set_transfer_counter(struct hp_82341_priv *hp_priv, int count);
+static int read_transfer_counter(struct hp_82341_priv *hp_priv);
+static int hp_82341_write(struct gpib_board *board, uint8_t *buffer, size_t length, int send_eoi,
+ size_t *bytes_written);
+static irqreturn_t hp_82341_interrupt(int irq, void *arg);
-int hp_82341_accel_read(gpib_board_t *board, uint8_t *buffer, size_t length, int *end,
- size_t *bytes_read)
+static int hp_82341_accel_read(struct gpib_board *board, uint8_t *buffer, size_t length, int *end,
+ size_t *bytes_read)
{
struct hp_82341_priv *hp_priv = board->private_data;
struct tms9914_priv *tms_priv = &hp_priv->tms9914_priv;
@@ -50,7 +62,7 @@ int hp_82341_accel_read(gpib_board_t *board, uint8_t *buffer, size_t length, int
retval = tms9914_read(board, tms_priv, buffer, 1, end, &num_bytes);
*bytes_read += num_bytes;
if (retval < 0)
- pr_err("tms9914_read failed retval=%i\n", retval);
+ dev_err(board->gpib_dev, "tms9914_read failed retval=%i\n", retval);
if (retval < 0 || *end)
return retval;
++buffer;
@@ -86,7 +98,6 @@ int hp_82341_accel_read(gpib_board_t *board, uint8_t *buffer, size_t length, int
test_bit(DEV_CLEAR_BN, &tms_priv->state) ||
test_bit(TIMO_NUM, &board->status));
if (retval) {
- pr_warn("%s: read wait interrupted\n", __func__);
retval = -ERESTARTSYS;
break;
}
@@ -111,12 +122,10 @@ int hp_82341_accel_read(gpib_board_t *board, uint8_t *buffer, size_t length, int
tms_priv->holdoff_active = 1;
}
if (test_bit(TIMO_NUM, &board->status)) {
- pr_debug("%s: minor %i: read timed out\n", __FILE__, board->minor);
retval = -ETIMEDOUT;
break;
}
if (test_bit(DEV_CLEAR_BN, &tms_priv->state)) {
- pr_warn("%s: device clear interrupted read\n", __FILE__);
retval = -EINTR;
break;
}
@@ -138,7 +147,7 @@ int hp_82341_accel_read(gpib_board_t *board, uint8_t *buffer, size_t length, int
return 0;
}
-static int restart_write_fifo(gpib_board_t *board, struct hp_82341_priv *hp_priv)
+static int restart_write_fifo(struct gpib_board *board, struct hp_82341_priv *hp_priv)
{
struct tms9914_priv *tms_priv = &hp_priv->tms9914_priv;
@@ -149,7 +158,7 @@ static int restart_write_fifo(gpib_board_t *board, struct hp_82341_priv *hp_priv
//restart doesn't work if data holdoff is in effect
status = tms9914_line_status(board, tms_priv);
- if ((status & BusNRFD) == 0) {
+ if ((status & BUS_NRFD) == 0) {
outb(RESTART_STREAM_BIT, hp_priv->iobase[0] + STREAM_STATUS_REG);
return 0;
}
@@ -163,8 +172,8 @@ static int restart_write_fifo(gpib_board_t *board, struct hp_82341_priv *hp_priv
return 0;
}
-int hp_82341_accel_write(gpib_board_t *board, uint8_t *buffer, size_t length,
- int send_eoi, size_t *bytes_written)
+static int hp_82341_accel_write(struct gpib_board *board, uint8_t *buffer, size_t length,
+ int send_eoi, size_t *bytes_written)
{
struct hp_82341_priv *hp_priv = board->private_data;
struct tms9914_priv *tms_priv = &hp_priv->tms9914_priv;
@@ -204,7 +213,7 @@ int hp_82341_accel_write(gpib_board_t *board, uint8_t *buffer, size_t length,
outb(ENABLE_TI_BUFFER_BIT, hp_priv->iobase[3] + BUFFER_CONTROL_REG);
retval = restart_write_fifo(board, hp_priv);
if (retval < 0) {
- pr_err("hp82341: failed to restart write stream\n");
+ dev_err(board->gpib_dev, "failed to restart write stream\n");
break;
}
retval = wait_event_interruptible(board->wait,
@@ -216,17 +225,14 @@ int hp_82341_accel_write(gpib_board_t *board, uint8_t *buffer, size_t length,
outb(0, hp_priv->iobase[3] + BUFFER_CONTROL_REG);
*bytes_written += block_size - read_transfer_counter(hp_priv);
if (retval) {
- pr_warn("%s: write wait interrupted\n", __FILE__);
retval = -ERESTARTSYS;
break;
}
if (test_bit(TIMO_NUM, &board->status)) {
- pr_debug("%s: minor %i: write timed out\n", __FILE__, board->minor);
retval = -ETIMEDOUT;
break;
}
if (test_bit(DEV_CLEAR_BN, &tms_priv->state)) {
- pr_warn("%s: device clear interrupted write\n", __FILE__);
retval = -EINTR;
break;
}
@@ -244,48 +250,50 @@ int hp_82341_accel_write(gpib_board_t *board, uint8_t *buffer, size_t length,
return 0;
}
-static int hp_82341_attach(gpib_board_t *board, const gpib_board_config_t *config);
+static int hp_82341_attach(struct gpib_board *board, const gpib_board_config_t *config);
-static void hp_82341_detach(gpib_board_t *board);
+static void hp_82341_detach(struct gpib_board *board);
// wrappers for interface functions
-int hp_82341_read(gpib_board_t *board, uint8_t *buffer, size_t length, int *end, size_t *bytes_read)
+static int hp_82341_read(struct gpib_board *board, uint8_t *buffer, size_t length, int *end,
+ size_t *bytes_read)
{
struct hp_82341_priv *priv = board->private_data;
return tms9914_read(board, &priv->tms9914_priv, buffer, length, end, bytes_read);
}
-int hp_82341_write(gpib_board_t *board, uint8_t *buffer, size_t length, int send_eoi,
- size_t *bytes_written)
+static int hp_82341_write(struct gpib_board *board, uint8_t *buffer, size_t length, int send_eoi,
+ size_t *bytes_written)
{
struct hp_82341_priv *priv = board->private_data;
return tms9914_write(board, &priv->tms9914_priv, buffer, length, send_eoi, bytes_written);
}
-int hp_82341_command(gpib_board_t *board, uint8_t *buffer, size_t length, size_t *bytes_written)
+static int hp_82341_command(struct gpib_board *board, uint8_t *buffer, size_t length,
+ size_t *bytes_written)
{
struct hp_82341_priv *priv = board->private_data;
return tms9914_command(board, &priv->tms9914_priv, buffer, length, bytes_written);
}
-int hp_82341_take_control(gpib_board_t *board, int synchronous)
+static int hp_82341_take_control(struct gpib_board *board, int synchronous)
{
struct hp_82341_priv *priv = board->private_data;
return tms9914_take_control(board, &priv->tms9914_priv, synchronous);
}
-int hp_82341_go_to_standby(gpib_board_t *board)
+static int hp_82341_go_to_standby(struct gpib_board *board)
{
struct hp_82341_priv *priv = board->private_data;
return tms9914_go_to_standby(board, &priv->tms9914_priv);
}
-void hp_82341_request_system_control(gpib_board_t *board, int request_control)
+static void hp_82341_request_system_control(struct gpib_board *board, int request_control)
{
struct hp_82341_priv *priv = board->private_data;
@@ -297,105 +305,105 @@ void hp_82341_request_system_control(gpib_board_t *board, int request_control)
tms9914_request_system_control(board, &priv->tms9914_priv, request_control);
}
-void hp_82341_interface_clear(gpib_board_t *board, int assert)
+static void hp_82341_interface_clear(struct gpib_board *board, int assert)
{
struct hp_82341_priv *priv = board->private_data;
tms9914_interface_clear(board, &priv->tms9914_priv, assert);
}
-void hp_82341_remote_enable(gpib_board_t *board, int enable)
+static void hp_82341_remote_enable(struct gpib_board *board, int enable)
{
struct hp_82341_priv *priv = board->private_data;
tms9914_remote_enable(board, &priv->tms9914_priv, enable);
}
-int hp_82341_enable_eos(gpib_board_t *board, uint8_t eos_byte, int compare_8_bits)
+static int hp_82341_enable_eos(struct gpib_board *board, uint8_t eos_byte, int compare_8_bits)
{
struct hp_82341_priv *priv = board->private_data;
return tms9914_enable_eos(board, &priv->tms9914_priv, eos_byte, compare_8_bits);
}
-void hp_82341_disable_eos(gpib_board_t *board)
+static void hp_82341_disable_eos(struct gpib_board *board)
{
struct hp_82341_priv *priv = board->private_data;
tms9914_disable_eos(board, &priv->tms9914_priv);
}
-unsigned int hp_82341_update_status(gpib_board_t *board, unsigned int clear_mask)
+static unsigned int hp_82341_update_status(struct gpib_board *board, unsigned int clear_mask)
{
struct hp_82341_priv *priv = board->private_data;
return tms9914_update_status(board, &priv->tms9914_priv, clear_mask);
}
-int hp_82341_primary_address(gpib_board_t *board, unsigned int address)
+static int hp_82341_primary_address(struct gpib_board *board, unsigned int address)
{
struct hp_82341_priv *priv = board->private_data;
return tms9914_primary_address(board, &priv->tms9914_priv, address);
}
-int hp_82341_secondary_address(gpib_board_t *board, unsigned int address, int enable)
+static int hp_82341_secondary_address(struct gpib_board *board, unsigned int address, int enable)
{
struct hp_82341_priv *priv = board->private_data;
return tms9914_secondary_address(board, &priv->tms9914_priv, address, enable);
}
-int hp_82341_parallel_poll(gpib_board_t *board, uint8_t *result)
+static int hp_82341_parallel_poll(struct gpib_board *board, uint8_t *result)
{
struct hp_82341_priv *priv = board->private_data;
return tms9914_parallel_poll(board, &priv->tms9914_priv, result);
}
-void hp_82341_parallel_poll_configure(gpib_board_t *board, uint8_t config)
+static void hp_82341_parallel_poll_configure(struct gpib_board *board, uint8_t config)
{
struct hp_82341_priv *priv = board->private_data;
tms9914_parallel_poll_configure(board, &priv->tms9914_priv, config);
}
-void hp_82341_parallel_poll_response(gpib_board_t *board, int ist)
+static void hp_82341_parallel_poll_response(struct gpib_board *board, int ist)
{
struct hp_82341_priv *priv = board->private_data;
tms9914_parallel_poll_response(board, &priv->tms9914_priv, ist);
}
-void hp_82341_serial_poll_response(gpib_board_t *board, uint8_t status)
+static void hp_82341_serial_poll_response(struct gpib_board *board, uint8_t status)
{
struct hp_82341_priv *priv = board->private_data;
tms9914_serial_poll_response(board, &priv->tms9914_priv, status);
}
-static uint8_t hp_82341_serial_poll_status(gpib_board_t *board)
+static uint8_t hp_82341_serial_poll_status(struct gpib_board *board)
{
struct hp_82341_priv *priv = board->private_data;
return tms9914_serial_poll_status(board, &priv->tms9914_priv);
}
-static int hp_82341_line_status(const gpib_board_t *board)
+static int hp_82341_line_status(const struct gpib_board *board)
{
struct hp_82341_priv *priv = board->private_data;
return tms9914_line_status(board, &priv->tms9914_priv);
}
-static unsigned int hp_82341_t1_delay(gpib_board_t *board, unsigned int nano_sec)
+static int hp_82341_t1_delay(struct gpib_board *board, unsigned int nano_sec)
{
struct hp_82341_priv *priv = board->private_data;
return tms9914_t1_delay(board, &priv->tms9914_priv, nano_sec);
}
-void hp_82341_return_to_local(gpib_board_t *board)
+static void hp_82341_return_to_local(struct gpib_board *board)
{
struct hp_82341_priv *priv = board->private_data;
@@ -457,7 +465,7 @@ static gpib_interface_t hp_82341_interface = {
.return_to_local = hp_82341_return_to_local,
};
-int hp_82341_allocate_private(gpib_board_t *board)
+static int hp_82341_allocate_private(struct gpib_board *board)
{
board->private_data = kzalloc(sizeof(struct hp_82341_priv), GFP_KERNEL);
if (!board->private_data)
@@ -465,7 +473,7 @@ int hp_82341_allocate_private(gpib_board_t *board)
return 0;
}
-void hp_82341_free_private(gpib_board_t *board)
+static void hp_82341_free_private(struct gpib_board *board)
{
kfree(board->private_data);
board->private_data = NULL;
@@ -486,21 +494,21 @@ static int hp_82341_find_isapnp_board(struct pnp_dev **dev)
*dev = pnp_find_dev(NULL, ISAPNP_VENDOR('H', 'W', 'P'),
ISAPNP_FUNCTION(0x1411), NULL);
if (!*dev || !(*dev)->card) {
- pr_err("hp_82341: failed to find isapnp board\n");
+ pr_err("failed to find isapnp board\n");
return -ENODEV;
}
if (pnp_device_attach(*dev) < 0) {
- pr_err("hp_82341: board already active, skipping\n");
+ pr_err("board already active, skipping\n");
return -EBUSY;
}
if (pnp_activate_dev(*dev) < 0) {
pnp_device_detach(*dev);
- pr_err("hp_82341: failed to activate() atgpib/tnt, aborting\n");
+ pr_err("failed to activate(), aborting\n");
return -EAGAIN;
}
if (!pnp_port_valid(*dev, 0) || !pnp_irq_valid(*dev, 0)) {
pnp_device_detach(*dev);
- pr_err("hp_82341: invalid port or irq for atgpib/tnt, aborting\n");
+ pr_err("invalid port or irq, aborting\n");
return -ENOMEM;
}
return 0;
@@ -521,7 +529,7 @@ static int xilinx_ready(struct hp_82341_priv *hp_priv)
else
return 0;
default:
- pr_err("hp_82341: %s: bug! unknown hw_version\n", __func__);
+ pr_err("bug! unknown hw_version\n");
break;
}
return 0;
@@ -541,7 +549,7 @@ static int xilinx_done(struct hp_82341_priv *hp_priv)
else
return 0;
default:
- pr_err("hp_82341: %s: bug! unknown hw_version\n", __func__);
+ pr_err("bug! unknown hw_version\n");
break;
}
return 0;
@@ -562,7 +570,7 @@ static int irq_valid(struct hp_82341_priv *hp_priv, int irq)
case 15:
return 1;
default:
- pr_err("hp_82341: invalid irq=%i for 82341C, irq must be 3, 5, 7, 9, 10, 11, 12, or 15.\n",
+ pr_err("invalid irq=%i for 82341C, irq must be 3, 5, 7, 9, 10, 11, 12, or 15.\n",
irq);
return 0;
}
@@ -570,7 +578,7 @@ static int irq_valid(struct hp_82341_priv *hp_priv, int irq)
case HW_VERSION_82341D:
return 1;
default:
- pr_err("hp_82341: %s: bug! unknown hw_version\n", __func__);
+ pr_err("bug! unknown hw_version\n");
break;
}
return 0;
@@ -592,7 +600,7 @@ static int hp_82341_load_firmware_array(struct hp_82341_priv *hp_priv,
usleep_range(10, 15);
}
if (j == timeout) {
- pr_err("hp_82341: timed out waiting for Xilinx ready.\n");
+ pr_err("timed out waiting for Xilinx ready.\n");
return -ETIMEDOUT;
}
outb(firmware_data[i], hp_priv->iobase[0] + XILINX_DATA_REG);
@@ -605,7 +613,7 @@ static int hp_82341_load_firmware_array(struct hp_82341_priv *hp_priv,
usleep_range(10, 15);
}
if (j == timeout) {
- pr_err("hp_82341: timed out waiting for Xilinx done.\n");
+ pr_err("timed out waiting for Xilinx done.\n");
return -ETIMEDOUT;
}
return 0;
@@ -616,27 +624,27 @@ static int hp_82341_load_firmware(struct hp_82341_priv *hp_priv, const gpib_boar
if (config->init_data_length == 0) {
if (xilinx_done(hp_priv))
return 0;
- pr_err("hp_82341: board needs be initialized with firmware upload.\n"
+ pr_err("board needs be initialized with firmware upload.\n"
"\tUse the --init-data option of gpib_config.\n");
return -EINVAL;
}
switch (hp_priv->hw_version) {
case HW_VERSION_82341C:
if (config->init_data_length != hp_82341c_firmware_length) {
- pr_err("hp_82341: bad firmware length=%i for 82341c (expected %i).\n",
+ pr_err("bad firmware length=%i for 82341c (expected %i).\n",
config->init_data_length, hp_82341c_firmware_length);
return -EINVAL;
}
break;
case HW_VERSION_82341D:
if (config->init_data_length != hp_82341d_firmware_length) {
- pr_err("hp_82341: bad firmware length=%i for 82341d (expected %i).\n",
+ pr_err("bad firmware length=%i for 82341d (expected %i).\n",
config->init_data_length, hp_82341d_firmware_length);
return -EINVAL;
}
break;
default:
- pr_err("hp_82341: %s: bug! unknown hw_version\n", __func__);
+ pr_err("bug! unknown hw_version\n");
break;
}
return hp_82341_load_firmware_array(hp_priv, config->init_data, config->init_data_length);
@@ -678,7 +686,7 @@ static int clear_xilinx(struct hp_82341_priv *hp_priv)
return 0;
}
-int hp_82341_attach(gpib_board_t *board, const gpib_board_config_t *config)
+static int hp_82341_attach(struct gpib_board *board, const gpib_board_config_t *config)
{
struct hp_82341_priv *hp_priv;
struct tms9914_priv *tms_priv;
@@ -714,13 +722,12 @@ int hp_82341_attach(gpib_board_t *board, const gpib_board_config_t *config)
hp_priv->hw_version = HW_VERSION_82341C;
hp_priv->io_region_offset = 0x400;
}
- pr_info("hp_82341: base io 0x%u\n", iobase);
for (i = 0; i < hp_82341_num_io_regions; ++i) {
start_addr = iobase + i * hp_priv->io_region_offset;
- if (!request_region(start_addr, hp_82341_region_iosize, "hp_82341")) {
- pr_err("hp_82341: failed to allocate io ports 0x%lx-0x%lx\n",
- start_addr,
- start_addr + hp_82341_region_iosize - 1);
+ if (!request_region(start_addr, hp_82341_region_iosize, DRV_NAME)) {
+ dev_err(board->gpib_dev, "failed to allocate io ports 0x%x-0x%x\n",
+ start_addr,
+ start_addr + hp_82341_region_iosize - 1);
return -EIO;
}
hp_priv->iobase[i] = start_addr;
@@ -730,7 +737,7 @@ int hp_82341_attach(gpib_board_t *board, const gpib_board_config_t *config)
retval = isapnp_cfg_begin(hp_priv->pnp_dev->card->number,
hp_priv->pnp_dev->number);
if (retval < 0) {
- pr_err("hp_82341: isapnp_cfg_begin returned error\n");
+ dev_err(board->gpib_dev, "isapnp_cfg_begin returned error\n");
return retval;
}
isapnp_write_byte(PIO_DIRECTION_REG, HP_82341D_XILINX_READY_BIT |
@@ -746,12 +753,11 @@ int hp_82341_attach(gpib_board_t *board, const gpib_board_config_t *config)
return retval;
if (irq_valid(hp_priv, irq) == 0)
return -EINVAL;
- if (request_irq(irq, hp_82341_interrupt, 0, "hp_82341", board)) {
- pr_err("hp_82341: failed to allocate IRQ %d\n", irq);
+ if (request_irq(irq, hp_82341_interrupt, 0, DRV_NAME, board)) {
+ dev_err(board->gpib_dev, "failed to allocate IRQ %d\n", irq);
return -EIO;
}
hp_priv->irq = irq;
- pr_info("hp_82341: IRQ %d\n", irq);
hp_priv->config_control_bits &= ~IRQ_SELECT_MASK;
hp_priv->config_control_bits |= IRQ_SELECT_BITS(irq);
outb(hp_priv->config_control_bits, hp_priv->iobase[0] + CONFIG_CONTROL_STATUS_REG);
@@ -768,13 +774,11 @@ int hp_82341_attach(gpib_board_t *board, const gpib_board_config_t *config)
hp_priv->iobase[0] + EVENT_STATUS_REG);
tms9914_online(board, tms_priv);
- pr_info("hp_82341: board id %x %x %x %x\n", inb(hp_priv->iobase[1] + ID0_REG),
- inb(hp_priv->iobase[1] + ID1_REG), inb(hp_priv->iobase[2] + ID2_REG),
- inb(hp_priv->iobase[2] + ID3_REG));
+
return 0;
}
-void hp_82341_detach(gpib_board_t *board)
+static void hp_82341_detach(struct gpib_board *board)
{
struct hp_82341_priv *hp_priv = board->private_data;
struct tms9914_priv *tms_priv;
@@ -799,11 +803,14 @@ void hp_82341_detach(gpib_board_t *board)
hp_82341_free_private(board);
}
+#if 0
+/* unused, will be needed when the driver is turned into a pnp_driver */
static const struct pnp_device_id hp_82341_pnp_table[] = {
{.id = "HWP1411"},
{.id = ""}
};
MODULE_DEVICE_TABLE(pnp, hp_82341_pnp_table);
+#endif
static int __init hp_82341_init_module(void)
{
@@ -811,13 +818,13 @@ static int __init hp_82341_init_module(void)
ret = gpib_register_driver(&hp_82341_unaccel_interface, THIS_MODULE);
if (ret) {
- pr_err("hp_82341: gpib_register_driver failed: error = %d\n", ret);
+ pr_err("gpib_register_driver failed: error = %d\n", ret);
return ret;
}
ret = gpib_register_driver(&hp_82341_interface, THIS_MODULE);
if (ret) {
- pr_err("hp_82341: gpib_register_driver failed: error = %d\n", ret);
+ pr_err("gpib_register_driver failed: error = %d\n", ret);
gpib_unregister_driver(&hp_82341_unaccel_interface);
return ret;
}
@@ -837,7 +844,7 @@ module_exit(hp_82341_exit_module);
/*
* GPIB interrupt service routines
*/
-unsigned short read_and_clear_event_status(gpib_board_t *board)
+static unsigned short read_and_clear_event_status(struct gpib_board *board)
{
struct hp_82341_priv *hp_priv = board->private_data;
unsigned long flags;
@@ -850,10 +857,10 @@ unsigned short read_and_clear_event_status(gpib_board_t *board)
return status;
}
-irqreturn_t hp_82341_interrupt(int irq, void *arg)
+static irqreturn_t hp_82341_interrupt(int irq, void *arg)
{
int status1, status2;
- gpib_board_t *board = arg;
+ struct gpib_board *board = arg;
struct hp_82341_priv *hp_priv = board->private_data;
struct tms9914_priv *tms_priv = &hp_priv->tms9914_priv;
unsigned long flags;
@@ -862,7 +869,6 @@ irqreturn_t hp_82341_interrupt(int irq, void *arg)
spin_lock_irqsave(&board->spinlock, flags);
event_status = inb(hp_priv->iobase[0] + EVENT_STATUS_REG);
-// printk("hp_82341: interrupt event_status=0x%x\n", event_status);
if (event_status & INTERRUPT_PENDING_EVENT_BIT)
retval = IRQ_HANDLED;
//write-clear status bits
@@ -877,15 +883,12 @@ irqreturn_t hp_82341_interrupt(int irq, void *arg)
status1 = read_byte(tms_priv, ISR0);
status2 = read_byte(tms_priv, ISR1);
tms9914_interrupt_have_status(board, tms_priv, status1, status2);
-/* printk("hp_82341: interrupt status1=0x%x status2=0x%x\n",
- * status1, status2);
- */
}
spin_unlock_irqrestore(&board->spinlock, flags);
return retval;
}
-int read_transfer_counter(struct hp_82341_priv *hp_priv)
+static int read_transfer_counter(struct hp_82341_priv *hp_priv)
{
int lo, mid, value;
@@ -896,7 +899,7 @@ int read_transfer_counter(struct hp_82341_priv *hp_priv)
return value;
}
-void set_transfer_counter(struct hp_82341_priv *hp_priv, int count)
+static void set_transfer_counter(struct hp_82341_priv *hp_priv, int count)
{
int complement = -count;
diff --git a/drivers/staging/gpib/hp_82341/hp_82341.h b/drivers/staging/gpib/hp_82341/hp_82341.h
index 0065ebd9747c..370a3d4576eb 100644
--- a/drivers/staging/gpib/hp_82341/hp_82341.h
+++ b/drivers/staging/gpib/hp_82341/hp_82341.h
@@ -26,42 +26,6 @@ struct hp_82341_priv {
enum hp_82341_hardware_version hw_version;
};
-
-// interface functions
-int hp_82341_accel_read(gpib_board_t *board, uint8_t *buffer, size_t length, int *end,
- size_t *bytes_read);
-int hp_82341_accel_write(gpib_board_t *board, uint8_t *buffer, size_t length, int send_eoi,
- size_t *bytes_written);
-int hp_82341_read(gpib_board_t *board, uint8_t *buffer, size_t length, int *end,
- size_t *bytes_read);
-int hp_82341_write(gpib_board_t *board, uint8_t *buffer, size_t length, int send_eoi,
- size_t *bytes_written);
-int hp_82341_command(gpib_board_t *board, uint8_t *buffer, size_t length, size_t *bytes_written);
-int hp_82341_take_control(gpib_board_t *board, int synchronous);
-int hp_82341_go_to_standby(gpib_board_t *board);
-void hp_82341_request_system_control(gpib_board_t *board, int request_control);
-void hp_82341_interface_clear(gpib_board_t *board, int assert);
-void hp_82341_remote_enable(gpib_board_t *board, int enable);
-int hp_82341_enable_eos(gpib_board_t *board, uint8_t eos_byte, int
- compare_8_bits);
-void hp_82341_disable_eos(gpib_board_t *board);
-unsigned int hp_82341_update_status(gpib_board_t *board, unsigned int clear_mask);
-int hp_82341_primary_address(gpib_board_t *board, unsigned int address);
-int hp_82341_secondary_address(gpib_board_t *board, unsigned int address, int
- enable);
-int hp_82341_parallel_poll(gpib_board_t *board, uint8_t *result);
-void hp_82341_parallel_poll_configure(gpib_board_t *board, uint8_t config);
-void hp_82341_parallel_poll_response(gpib_board_t *board, int ist);
-void hp_82341_serial_poll_response(gpib_board_t *board, uint8_t status);
-void hp_82341_return_to_local(gpib_board_t *board);
-
-// interrupt service routines
-irqreturn_t hp_82341_interrupt(int irq, void *arg);
-
-// utility functions
-int hp_82341_allocate_private(gpib_board_t *board);
-void hp_82341_free_private(gpib_board_t *board);
-
static const int hp_82341_region_iosize = 0x8;
static const int hp_82341_num_io_regions = 4;
static const int hp_82341_fifo_size = 0xffe;
@@ -199,7 +163,3 @@ enum hp_82341d_pnp_pio_bits {
HP_82341D_LEGACY_MODE_BIT = 0x4,
HP_82341D_NOT_PROG_BIT = 0x8, // clear to reinitialize xilinx
};
-
-unsigned short read_and_clear_event_status(gpib_board_t *board);
-int read_transfer_counter(struct hp_82341_priv *hp_priv);
-void set_transfer_counter(struct hp_82341_priv *hp_priv, int count);
diff --git a/drivers/staging/gpib/include/gpibP.h b/drivers/staging/gpib/include/gpibP.h
index d35fdd391f7e..0c71a038e444 100644
--- a/drivers/staging/gpib/include/gpibP.h
+++ b/drivers/staging/gpib/include/gpibP.h
@@ -26,13 +26,13 @@ struct pci_dev *gpib_pci_get_subsys(const gpib_board_config_t *config, unsigned
unsigned int device_id, unsigned int ss_vendor,
unsigned int ss_device, struct pci_dev *from);
unsigned int num_gpib_events(const gpib_event_queue_t *queue);
-int push_gpib_event(gpib_board_t *board, short event_type);
-int pop_gpib_event(gpib_board_t *board, gpib_event_queue_t *queue, short *event_type);
-int gpib_request_pseudo_irq(gpib_board_t *board, irqreturn_t (*handler)(int, void *));
-void gpib_free_pseudo_irq(gpib_board_t *board);
+int push_gpib_event(struct gpib_board *board, short event_type);
+int pop_gpib_event(struct gpib_board *board, gpib_event_queue_t *queue, short *event_type);
+int gpib_request_pseudo_irq(struct gpib_board *board, irqreturn_t (*handler)(int, void *));
+void gpib_free_pseudo_irq(struct gpib_board *board);
int gpib_match_device_path(struct device *dev, const char *device_path_in);
-extern gpib_board_t board_array[GPIB_MAX_NUM_BOARDS];
+extern struct gpib_board board_array[GPIB_MAX_NUM_BOARDS];
extern struct list_head registered_drivers;
diff --git a/drivers/staging/gpib/include/gpib_proto.h b/drivers/staging/gpib/include/gpib_proto.h
index 1499f954210b..2c7dfc02f517 100644
--- a/drivers/staging/gpib/include/gpib_proto.h
+++ b/drivers/staging/gpib/include/gpib_proto.h
@@ -10,11 +10,11 @@ int ibclose(struct inode *inode, struct file *file);
long ibioctl(struct file *filep, unsigned int cmd, unsigned long arg);
int osInit(void);
void osReset(void);
-void os_start_timer(gpib_board_t *board, unsigned int usec_timeout);
-void os_remove_timer(gpib_board_t *board);
+void os_start_timer(struct gpib_board *board, unsigned int usec_timeout);
+void os_remove_timer(struct gpib_board *board);
void osSendEOI(void);
void osSendEOI(void);
-void init_gpib_board(gpib_board_t *board);
+void init_gpib_board(struct gpib_board *board);
static inline unsigned long usec_to_jiffies(unsigned int usec)
{
unsigned long usec_per_jiffy = 1000000 / HZ;
@@ -22,35 +22,35 @@ static inline unsigned long usec_to_jiffies(unsigned int usec)
return 1 + (usec + usec_per_jiffy - 1) / usec_per_jiffy;
};
-int serial_poll_all(gpib_board_t *board, unsigned int usec_timeout);
+int serial_poll_all(struct gpib_board *board, unsigned int usec_timeout);
void init_gpib_descriptor(gpib_descriptor_t *desc);
-int dvrsp(gpib_board_t *board, unsigned int pad, int sad,
+int dvrsp(struct gpib_board *board, unsigned int pad, int sad,
unsigned int usec_timeout, uint8_t *result);
-int ibAPWait(gpib_board_t *board, int pad);
-int ibAPrsp(gpib_board_t *board, int padsad, char *spb);
-void ibAPE(gpib_board_t *board, int pad, int v);
-int ibcac(gpib_board_t *board, int sync, int fallback_to_async);
-int ibcmd(gpib_board_t *board, uint8_t *buf, size_t length, size_t *bytes_written);
-int ibgts(gpib_board_t *board);
-int ibonline(gpib_board_t *board);
-int iboffline(gpib_board_t *board);
-int iblines(const gpib_board_t *board, short *lines);
-int ibrd(gpib_board_t *board, uint8_t *buf, size_t length, int *end_flag, size_t *bytes_read);
-int ibrpp(gpib_board_t *board, uint8_t *buf);
-int ibrsv2(gpib_board_t *board, uint8_t status_byte, int new_reason_for_service);
-void ibrsc(gpib_board_t *board, int request_control);
-int ibsic(gpib_board_t *board, unsigned int usec_duration);
-int ibsre(gpib_board_t *board, int enable);
-int ibpad(gpib_board_t *board, unsigned int addr);
-int ibsad(gpib_board_t *board, int addr);
-int ibeos(gpib_board_t *board, int eos, int eosflags);
-int ibwait(gpib_board_t *board, int wait_mask, int clear_mask, int set_mask,
+int ibAPWait(struct gpib_board *board, int pad);
+int ibAPrsp(struct gpib_board *board, int padsad, char *spb);
+void ibAPE(struct gpib_board *board, int pad, int v);
+int ibcac(struct gpib_board *board, int sync, int fallback_to_async);
+int ibcmd(struct gpib_board *board, uint8_t *buf, size_t length, size_t *bytes_written);
+int ibgts(struct gpib_board *board);
+int ibonline(struct gpib_board *board);
+int iboffline(struct gpib_board *board);
+int iblines(const struct gpib_board *board, short *lines);
+int ibrd(struct gpib_board *board, uint8_t *buf, size_t length, int *end_flag, size_t *bytes_read);
+int ibrpp(struct gpib_board *board, uint8_t *buf);
+int ibrsv2(struct gpib_board *board, uint8_t status_byte, int new_reason_for_service);
+void ibrsc(struct gpib_board *board, int request_control);
+int ibsic(struct gpib_board *board, unsigned int usec_duration);
+int ibsre(struct gpib_board *board, int enable);
+int ibpad(struct gpib_board *board, unsigned int addr);
+int ibsad(struct gpib_board *board, int addr);
+int ibeos(struct gpib_board *board, int eos, int eosflags);
+int ibwait(struct gpib_board *board, int wait_mask, int clear_mask, int set_mask,
int *status, unsigned long usec_timeout, gpib_descriptor_t *desc);
-int ibwrt(gpib_board_t *board, uint8_t *buf, size_t cnt, int send_eoi, size_t *bytes_written);
-int ibstatus(gpib_board_t *board);
-int general_ibstatus(gpib_board_t *board, const gpib_status_queue_t *device,
+int ibwrt(struct gpib_board *board, uint8_t *buf, size_t cnt, int send_eoi, size_t *bytes_written);
+int ibstatus(struct gpib_board *board);
+int general_ibstatus(struct gpib_board *board, const gpib_status_queue_t *device,
int clear_mask, int set_mask, gpib_descriptor_t *desc);
-int io_timed_out(gpib_board_t *board);
-int ibppc(gpib_board_t *board, uint8_t configuration);
+int io_timed_out(struct gpib_board *board);
+int ibppc(struct gpib_board *board, uint8_t configuration);
#endif /* GPIB_PROTO_INCLUDED */
diff --git a/drivers/staging/gpib/include/gpib_types.h b/drivers/staging/gpib/include/gpib_types.h
index b41781a55a60..2d9b9be683f8 100644
--- a/drivers/staging/gpib/include/gpib_types.h
+++ b/drivers/staging/gpib/include/gpib_types.h
@@ -23,7 +23,7 @@
#include <linux/interrupt.h>
typedef struct gpib_interface_struct gpib_interface_t;
-typedef struct gpib_board_struct gpib_board_t;
+struct gpib_board;
/* config parameters that are only used by driver attach functions */
typedef struct {
@@ -55,9 +55,9 @@ struct gpib_interface_struct {
/* name of board */
char *name;
/* attach() initializes board and allocates resources */
- int (*attach)(gpib_board_t *board, const gpib_board_config_t *config);
+ int (*attach)(struct gpib_board *board, const gpib_board_config_t *config);
/* detach() shuts down board and frees resources */
- void (*detach)(gpib_board_t *board);
+ void (*detach)(struct gpib_board *board);
/* read() should read at most 'length' bytes from the bus into
* 'buffer'. It should return when it fills the buffer or
* encounters an END (EOI and or EOS if appropriate). It should set 'end'
@@ -68,19 +68,19 @@ struct gpib_interface_struct {
* return indicates error.
* nbytes returns number of bytes read
*/
- int (*read)(gpib_board_t *board, uint8_t *buffer, size_t length, int *end,
+ int (*read)(struct gpib_board *board, uint8_t *buffer, size_t length, int *end,
size_t *bytes_read);
/* write() should write 'length' bytes from buffer to the bus.
* If the boolean value send_eoi is nonzero, then EOI should
* be sent along with the last byte. Returns number of bytes
* written or negative value on error.
*/
- int (*write)(gpib_board_t *board, uint8_t *buffer, size_t length, int send_eoi,
+ int (*write)(struct gpib_board *board, uint8_t *buffer, size_t length, int send_eoi,
size_t *bytes_written);
/* command() writes the command bytes in 'buffer' to the bus
* Returns zero on success or negative value on error.
*/
- int (*command)(gpib_board_t *board, uint8_t *buffer, size_t length,
+ int (*command)(struct gpib_board *board, uint8_t *buffer, size_t length,
size_t *bytes_written);
/* Take control (assert ATN). If 'asyncronous' is nonzero, take
* control asyncronously (assert ATN immediately without waiting
@@ -88,54 +88,54 @@ struct gpib_interface_struct {
* until board becomes controller in charge. Returns zero no success,
* nonzero on error.
*/
- int (*take_control)(gpib_board_t *board, int asyncronous);
+ int (*take_control)(struct gpib_board *board, int asyncronous);
/* De-assert ATN. Returns zero on success, nonzer on error.
*/
- int (*go_to_standby)(gpib_board_t *board);
+ int (*go_to_standby)(struct gpib_board *board);
/* request/release control of the IFC and REN lines (system controller) */
- void (*request_system_control)(gpib_board_t *board, int request_control);
+ void (*request_system_control)(struct gpib_board *board, int request_control);
/* Asserts or de-asserts 'interface clear' (IFC) depending on
* boolean value of 'assert'
*/
- void (*interface_clear)(gpib_board_t *board, int assert);
+ void (*interface_clear)(struct gpib_board *board, int assert);
/* Sends remote enable command if 'enable' is nonzero, disables remote mode
* if 'enable' is zero
*/
- void (*remote_enable)(gpib_board_t *board, int enable);
+ void (*remote_enable)(struct gpib_board *board, int enable);
/* enable END for reads, when byte 'eos' is received. If
* 'compare_8_bits' is nonzero, then all 8 bits are compared
* with the eos bytes. Otherwise only the 7 least significant
* bits are compared.
*/
- int (*enable_eos)(gpib_board_t *board, uint8_t eos, int compare_8_bits);
+ int (*enable_eos)(struct gpib_board *board, uint8_t eos, int compare_8_bits);
/* disable END on eos byte (END on EOI only)*/
- void (*disable_eos)(gpib_board_t *board);
+ void (*disable_eos)(struct gpib_board *board);
/* configure parallel poll */
- void (*parallel_poll_configure)(gpib_board_t *board, uint8_t configuration);
+ void (*parallel_poll_configure)(struct gpib_board *board, uint8_t configuration);
/* conduct parallel poll */
- int (*parallel_poll)(gpib_board_t *board, uint8_t *result);
+ int (*parallel_poll)(struct gpib_board *board, uint8_t *result);
/* set/clear ist (individual status bit) */
- void (*parallel_poll_response)(gpib_board_t *board, int ist);
+ void (*parallel_poll_response)(struct gpib_board *board, int ist);
/* select local parallel poll configuration mode PP2 versus remote PP1 */
- void (*local_parallel_poll_mode)(gpib_board_t *board, int local);
+ void (*local_parallel_poll_mode)(struct gpib_board *board, int local);
/* Returns current status of the bus lines. Should be set to
* NULL if your board does not have the ability to query the
* state of the bus lines.
*/
- int (*line_status)(const gpib_board_t *board);
+ int (*line_status)(const struct gpib_board *board);
/* updates and returns the board's current status.
* The meaning of the bits are specified in gpib_user.h
* in the IBSTA section. The driver does not need to
* worry about setting the CMPL, END, TIMO, or ERR bits.
*/
- unsigned int (*update_status)(gpib_board_t *board, unsigned int clear_mask);
+ unsigned int (*update_status)(struct gpib_board *board, unsigned int clear_mask);
/* Sets primary address 0-30 for gpib interface card.
*/
- int (*primary_address)(gpib_board_t *board, unsigned int address);
+ int (*primary_address)(struct gpib_board *board, unsigned int address);
/* Sets and enables, or disables secondary address 0-30
* for gpib interface card.
*/
- int (*secondary_address)(gpib_board_t *board, unsigned int address,
+ int (*secondary_address)(struct gpib_board *board, unsigned int address,
int enable);
/* Sets the byte the board should send in response to a serial poll.
* This function should also start or stop requests for service via
@@ -149,7 +149,7 @@ struct gpib_interface_struct {
* by IEEE 488.2 section 11.3.3.4.3 "Allowed Coupled Control of
* STB, reqt, and reqf".
*/
- void (*serial_poll_response)(gpib_board_t *board, uint8_t status_byte);
+ void (*serial_poll_response)(struct gpib_board *board, uint8_t status_byte);
/* Sets the byte the board should send in response to a serial poll.
* This function should also request service via IEEE 488.2 reqt/reqf
* based on MSS (bit 6 of the status_byte) and new_reason_for_service.
@@ -164,15 +164,15 @@ struct gpib_interface_struct {
* If this method is left NULL by the driver, then the user library
* function ibrsv2 will not work.
*/
- void (*serial_poll_response2)(gpib_board_t *board, uint8_t status_byte,
+ void (*serial_poll_response2)(struct gpib_board *board, uint8_t status_byte,
int new_reason_for_service);
/* returns the byte the board will send in response to a serial poll.
*/
- uint8_t (*serial_poll_status)(gpib_board_t *board);
+ uint8_t (*serial_poll_status)(struct gpib_board *board);
/* adjust T1 delay */
- unsigned int (*t1_delay)(gpib_board_t *board, unsigned int nano_sec);
+ int (*t1_delay)(struct gpib_board *board, unsigned int nano_sec);
/* go to local mode */
- void (*return_to_local)(gpib_board_t *board);
+ void (*return_to_local)(struct gpib_board *board);
/* board does not support 7 bit eos comparisons */
unsigned no_7_bit_eos : 1;
/* skip check for listeners before trying to send command bytes */
@@ -198,7 +198,7 @@ static inline void init_event_queue(gpib_event_queue_t *queue)
struct gpib_pseudo_irq {
struct timer_list timer;
irqreturn_t (*handler)(int irq, void *arg);
- gpib_board_t *board;
+ struct gpib_board *board;
atomic_t active;
};
@@ -216,11 +216,11 @@ typedef struct gpib_interface_list_struct {
struct module *module;
} gpib_interface_list_t;
-/* One gpib_board_t is allocated for each physical board in the computer.
+/* One struct gpib_board is allocated for each physical board in the computer.
* It provides storage for variables local to each board, and interface
* functions for performing operations on the board
*/
-struct gpib_board_struct {
+struct gpib_board {
/* functions used by this board */
gpib_interface_t *interface;
/* Pointer to module whose use count we should increment when
diff --git a/drivers/staging/gpib/include/nec7210.h b/drivers/staging/gpib/include/nec7210.h
index ca998c4a84bf..069896456230 100644
--- a/drivers/staging/gpib/include/nec7210.h
+++ b/drivers/staging/gpib/include/nec7210.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+//* SPDX-License-Identifier: GPL-2.0 */
/***************************************************************************
* copyright : (C) 2002 by Frank Mori Hess
@@ -78,48 +78,48 @@ enum {
};
// interface functions
-int nec7210_read(gpib_board_t *board, struct nec7210_priv *priv, uint8_t *buffer,
+int nec7210_read(struct gpib_board *board, struct nec7210_priv *priv, uint8_t *buffer,
size_t length, int *end, size_t *bytes_read);
-int nec7210_write(gpib_board_t *board, struct nec7210_priv *priv, uint8_t *buffer,
+int nec7210_write(struct gpib_board *board, struct nec7210_priv *priv, uint8_t *buffer,
size_t length, int send_eoi, size_t *bytes_written);
-int nec7210_command(gpib_board_t *board, struct nec7210_priv *priv, uint8_t *buffer,
+int nec7210_command(struct gpib_board *board, struct nec7210_priv *priv, uint8_t *buffer,
size_t length, size_t *bytes_written);
-int nec7210_take_control(gpib_board_t *board, struct nec7210_priv *priv, int syncronous);
-int nec7210_go_to_standby(gpib_board_t *board, struct nec7210_priv *priv);
-void nec7210_request_system_control(gpib_board_t *board,
+int nec7210_take_control(struct gpib_board *board, struct nec7210_priv *priv, int syncronous);
+int nec7210_go_to_standby(struct gpib_board *board, struct nec7210_priv *priv);
+void nec7210_request_system_control(struct gpib_board *board,
struct nec7210_priv *priv, int request_control);
-void nec7210_interface_clear(gpib_board_t *board, struct nec7210_priv *priv, int assert);
-void nec7210_remote_enable(gpib_board_t *board, struct nec7210_priv *priv, int enable);
-int nec7210_enable_eos(gpib_board_t *board, struct nec7210_priv *priv, uint8_t eos_bytes,
+void nec7210_interface_clear(struct gpib_board *board, struct nec7210_priv *priv, int assert);
+void nec7210_remote_enable(struct gpib_board *board, struct nec7210_priv *priv, int enable);
+int nec7210_enable_eos(struct gpib_board *board, struct nec7210_priv *priv, uint8_t eos_bytes,
int compare_8_bits);
-void nec7210_disable_eos(gpib_board_t *board, struct nec7210_priv *priv);
-unsigned int nec7210_update_status(gpib_board_t *board, struct nec7210_priv *priv,
+void nec7210_disable_eos(struct gpib_board *board, struct nec7210_priv *priv);
+unsigned int nec7210_update_status(struct gpib_board *board, struct nec7210_priv *priv,
unsigned int clear_mask);
-unsigned int nec7210_update_status_nolock(gpib_board_t *board, struct nec7210_priv *priv);
-int nec7210_primary_address(const gpib_board_t *board,
+unsigned int nec7210_update_status_nolock(struct gpib_board *board, struct nec7210_priv *priv);
+int nec7210_primary_address(const struct gpib_board *board,
struct nec7210_priv *priv, unsigned int address);
-int nec7210_secondary_address(const gpib_board_t *board, struct nec7210_priv *priv,
+int nec7210_secondary_address(const struct gpib_board *board, struct nec7210_priv *priv,
unsigned int address, int enable);
-int nec7210_parallel_poll(gpib_board_t *board, struct nec7210_priv *priv, uint8_t *result);
-void nec7210_serial_poll_response(gpib_board_t *board, struct nec7210_priv *priv, uint8_t status);
-void nec7210_parallel_poll_configure(gpib_board_t *board,
+int nec7210_parallel_poll(struct gpib_board *board, struct nec7210_priv *priv, uint8_t *result);
+void nec7210_serial_poll_response(struct gpib_board *board, struct nec7210_priv *priv, uint8_t status);
+void nec7210_parallel_poll_configure(struct gpib_board *board,
struct nec7210_priv *priv, unsigned int configuration);
-void nec7210_parallel_poll_response(gpib_board_t *board,
+void nec7210_parallel_poll_response(struct gpib_board *board,
struct nec7210_priv *priv, int ist);
-uint8_t nec7210_serial_poll_status(gpib_board_t *board,
+uint8_t nec7210_serial_poll_status(struct gpib_board *board,
struct nec7210_priv *priv);
-unsigned int nec7210_t1_delay(gpib_board_t *board,
- struct nec7210_priv *priv, unsigned int nano_sec);
-void nec7210_return_to_local(const gpib_board_t *board, struct nec7210_priv *priv);
+int nec7210_t1_delay(struct gpib_board *board,
+ struct nec7210_priv *priv, unsigned int nano_sec);
+void nec7210_return_to_local(const struct gpib_board *board, struct nec7210_priv *priv);
// utility functions
-void nec7210_board_reset(struct nec7210_priv *priv, const gpib_board_t *board);
-void nec7210_board_online(struct nec7210_priv *priv, const gpib_board_t *board);
+void nec7210_board_reset(struct nec7210_priv *priv, const struct gpib_board *board);
+void nec7210_board_online(struct nec7210_priv *priv, const struct gpib_board *board);
unsigned int nec7210_set_reg_bits(struct nec7210_priv *priv, unsigned int reg,
unsigned int mask, unsigned int bits);
-void nec7210_set_handshake_mode(gpib_board_t *board, struct nec7210_priv *priv, int mode);
-void nec7210_release_rfd_holdoff(gpib_board_t *board, struct nec7210_priv *priv);
-uint8_t nec7210_read_data_in(gpib_board_t *board, struct nec7210_priv *priv, int *end);
+void nec7210_set_handshake_mode(struct gpib_board *board, struct nec7210_priv *priv, int mode);
+void nec7210_release_rfd_holdoff(struct gpib_board *board, struct nec7210_priv *priv);
+uint8_t nec7210_read_data_in(struct gpib_board *board, struct nec7210_priv *priv, int *end);
// wrappers for io functions
uint8_t nec7210_ioport_read_byte(struct nec7210_priv *priv, unsigned int register_num);
@@ -134,8 +134,8 @@ void nec7210_locking_iomem_write_byte(struct nec7210_priv *priv, uint8_t data,
unsigned int register_num);
// interrupt service routine
-irqreturn_t nec7210_interrupt(gpib_board_t *board, struct nec7210_priv *priv);
-irqreturn_t nec7210_interrupt_have_status(gpib_board_t *board,
+irqreturn_t nec7210_interrupt(struct gpib_board *board, struct nec7210_priv *priv);
+irqreturn_t nec7210_interrupt_have_status(struct gpib_board *board,
struct nec7210_priv *priv, int status1, int status2);
#endif //_NEC7210_H
diff --git a/drivers/staging/gpib/include/tms9914.h b/drivers/staging/gpib/include/tms9914.h
index d8c8d1c9b131..424c95ad85c6 100644
--- a/drivers/staging/gpib/include/tms9914.h
+++ b/drivers/staging/gpib/include/tms9914.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+//* SPDX-License-Identifier: GPL-2.0 */
/***************************************************************************
* copyright : (C) 2002 by Frank Mori Hess
@@ -79,47 +79,47 @@ enum {
};
// interface functions
-int tms9914_read(gpib_board_t *board, struct tms9914_priv *priv, uint8_t *buffer,
+int tms9914_read(struct gpib_board *board, struct tms9914_priv *priv, uint8_t *buffer,
size_t length, int *end, size_t *bytes_read);
-int tms9914_write(gpib_board_t *board, struct tms9914_priv *priv, uint8_t *buffer,
+int tms9914_write(struct gpib_board *board, struct tms9914_priv *priv, uint8_t *buffer,
size_t length, int send_eoi, size_t *bytes_written);
-int tms9914_command(gpib_board_t *board, struct tms9914_priv *priv, uint8_t *buffer,
+int tms9914_command(struct gpib_board *board, struct tms9914_priv *priv, uint8_t *buffer,
size_t length, size_t *bytes_written);
-int tms9914_take_control(gpib_board_t *board, struct tms9914_priv *priv, int syncronous);
+int tms9914_take_control(struct gpib_board *board, struct tms9914_priv *priv, int syncronous);
/* alternate version of tms9914_take_control which works around buggy tcs
* implementation.
*/
-int tms9914_take_control_workaround(gpib_board_t *board, struct tms9914_priv *priv,
+int tms9914_take_control_workaround(struct gpib_board *board, struct tms9914_priv *priv,
int syncronous);
-int tms9914_go_to_standby(gpib_board_t *board, struct tms9914_priv *priv);
-void tms9914_request_system_control(gpib_board_t *board, struct tms9914_priv *priv,
+int tms9914_go_to_standby(struct gpib_board *board, struct tms9914_priv *priv);
+void tms9914_request_system_control(struct gpib_board *board, struct tms9914_priv *priv,
int request_control);
-void tms9914_interface_clear(gpib_board_t *board, struct tms9914_priv *priv, int assert);
-void tms9914_remote_enable(gpib_board_t *board, struct tms9914_priv *priv, int enable);
-int tms9914_enable_eos(gpib_board_t *board, struct tms9914_priv *priv, uint8_t eos_bytes,
+void tms9914_interface_clear(struct gpib_board *board, struct tms9914_priv *priv, int assert);
+void tms9914_remote_enable(struct gpib_board *board, struct tms9914_priv *priv, int enable);
+int tms9914_enable_eos(struct gpib_board *board, struct tms9914_priv *priv, uint8_t eos_bytes,
int compare_8_bits);
-void tms9914_disable_eos(gpib_board_t *board, struct tms9914_priv *priv);
-unsigned int tms9914_update_status(gpib_board_t *board, struct tms9914_priv *priv,
+void tms9914_disable_eos(struct gpib_board *board, struct tms9914_priv *priv);
+unsigned int tms9914_update_status(struct gpib_board *board, struct tms9914_priv *priv,
unsigned int clear_mask);
-int tms9914_primary_address(gpib_board_t *board,
+int tms9914_primary_address(struct gpib_board *board,
struct tms9914_priv *priv, unsigned int address);
-int tms9914_secondary_address(gpib_board_t *board, struct tms9914_priv *priv,
+int tms9914_secondary_address(struct gpib_board *board, struct tms9914_priv *priv,
unsigned int address, int enable);
-int tms9914_parallel_poll(gpib_board_t *board, struct tms9914_priv *priv, uint8_t *result);
-void tms9914_parallel_poll_configure(gpib_board_t *board,
+int tms9914_parallel_poll(struct gpib_board *board, struct tms9914_priv *priv, uint8_t *result);
+void tms9914_parallel_poll_configure(struct gpib_board *board,
struct tms9914_priv *priv, uint8_t config);
-void tms9914_parallel_poll_response(gpib_board_t *board,
+void tms9914_parallel_poll_response(struct gpib_board *board,
struct tms9914_priv *priv, int ist);
-void tms9914_serial_poll_response(gpib_board_t *board, struct tms9914_priv *priv, uint8_t status);
-uint8_t tms9914_serial_poll_status(gpib_board_t *board, struct tms9914_priv *priv);
-int tms9914_line_status(const gpib_board_t *board, struct tms9914_priv *priv);
-unsigned int tms9914_t1_delay(gpib_board_t *board, struct tms9914_priv *priv,
+void tms9914_serial_poll_response(struct gpib_board *board, struct tms9914_priv *priv, uint8_t status);
+uint8_t tms9914_serial_poll_status(struct gpib_board *board, struct tms9914_priv *priv);
+int tms9914_line_status(const struct gpib_board *board, struct tms9914_priv *priv);
+unsigned int tms9914_t1_delay(struct gpib_board *board, struct tms9914_priv *priv,
unsigned int nano_sec);
-void tms9914_return_to_local(const gpib_board_t *board, struct tms9914_priv *priv);
+void tms9914_return_to_local(const struct gpib_board *board, struct tms9914_priv *priv);
// utility functions
void tms9914_board_reset(struct tms9914_priv *priv);
-void tms9914_online(gpib_board_t *board, struct tms9914_priv *priv);
+void tms9914_online(struct gpib_board *board, struct tms9914_priv *priv);
void tms9914_release_holdoff(struct tms9914_priv *priv);
void tms9914_set_holdoff_mode(struct tms9914_priv *priv, enum tms9914_holdoff_mode mode);
@@ -130,8 +130,8 @@ uint8_t tms9914_iomem_read_byte(struct tms9914_priv *priv, unsigned int register
void tms9914_iomem_write_byte(struct tms9914_priv *priv, uint8_t data, unsigned int register_num);
// interrupt service routine
-irqreturn_t tms9914_interrupt(gpib_board_t *board, struct tms9914_priv *priv);
-irqreturn_t tms9914_interrupt_have_status(gpib_board_t *board, struct tms9914_priv *priv,
+irqreturn_t tms9914_interrupt(struct gpib_board *board, struct tms9914_priv *priv);
+irqreturn_t tms9914_interrupt_have_status(struct gpib_board *board, struct tms9914_priv *priv,
int status1, int status2);
// tms9914 has 8 registers
diff --git a/drivers/staging/gpib/ines/Makefile b/drivers/staging/gpib/ines/Makefile
index 6b6e480fd811..88241f15ecea 100644
--- a/drivers/staging/gpib/ines/Makefile
+++ b/drivers/staging/gpib/ines/Makefile
@@ -1,4 +1,3 @@
-ccflags-$(CONFIG_GPIB_PCMCIA) := -DGPIB_PCMCIA
obj-$(CONFIG_GPIB_INES) += ines_gpib.o
diff --git a/drivers/staging/gpib/ines/ines.h b/drivers/staging/gpib/ines/ines.h
index 3918737fa21a..ff27f055a0ff 100644
--- a/drivers/staging/gpib/ines/ines.h
+++ b/drivers/staging/gpib/ines/ines.h
@@ -36,41 +36,41 @@ struct ines_priv {
};
// interface functions
-int ines_read(gpib_board_t *board, uint8_t *buffer, size_t length, int *end, size_t *bytes_read);
-int ines_write(gpib_board_t *board, uint8_t *buffer, size_t length,
+int ines_read(struct gpib_board *board, uint8_t *buffer, size_t length, int *end, size_t *bytes_read);
+int ines_write(struct gpib_board *board, uint8_t *buffer, size_t length,
int send_eoi, size_t *bytes_written);
-int ines_accel_read(gpib_board_t *board, uint8_t *buffer, size_t length,
+int ines_accel_read(struct gpib_board *board, uint8_t *buffer, size_t length,
int *end, size_t *bytes_read);
-int ines_accel_write(gpib_board_t *board, uint8_t *buffer, size_t length,
+int ines_accel_write(struct gpib_board *board, uint8_t *buffer, size_t length,
int send_eoi, size_t *bytes_written);
-int ines_command(gpib_board_t *board, uint8_t *buffer, size_t length, size_t *bytes_written);
-int ines_take_control(gpib_board_t *board, int synchronous);
-int ines_go_to_standby(gpib_board_t *board);
-void ines_request_system_control(gpib_board_t *board, int request_control);
-void ines_interface_clear(gpib_board_t *board, int assert);
-void ines_remote_enable(gpib_board_t *board, int enable);
-int ines_enable_eos(gpib_board_t *board, uint8_t eos_byte, int compare_8_bits);
-void ines_disable_eos(gpib_board_t *board);
-unsigned int ines_update_status(gpib_board_t *board, unsigned int clear_mask);
-int ines_primary_address(gpib_board_t *board, unsigned int address);
-int ines_secondary_address(gpib_board_t *board, unsigned int address, int enable);
-int ines_parallel_poll(gpib_board_t *board, uint8_t *result);
-void ines_parallel_poll_configure(gpib_board_t *board, uint8_t config);
-void ines_parallel_poll_response(gpib_board_t *board, int ist);
-void ines_serial_poll_response(gpib_board_t *board, uint8_t status);
-uint8_t ines_serial_poll_status(gpib_board_t *board);
-int ines_line_status(const gpib_board_t *board);
-unsigned int ines_t1_delay(gpib_board_t *board, unsigned int nano_sec);
-void ines_return_to_local(gpib_board_t *board);
+int ines_command(struct gpib_board *board, uint8_t *buffer, size_t length, size_t *bytes_written);
+int ines_take_control(struct gpib_board *board, int synchronous);
+int ines_go_to_standby(struct gpib_board *board);
+void ines_request_system_control(struct gpib_board *board, int request_control);
+void ines_interface_clear(struct gpib_board *board, int assert);
+void ines_remote_enable(struct gpib_board *board, int enable);
+int ines_enable_eos(struct gpib_board *board, uint8_t eos_byte, int compare_8_bits);
+void ines_disable_eos(struct gpib_board *board);
+unsigned int ines_update_status(struct gpib_board *board, unsigned int clear_mask);
+int ines_primary_address(struct gpib_board *board, unsigned int address);
+int ines_secondary_address(struct gpib_board *board, unsigned int address, int enable);
+int ines_parallel_poll(struct gpib_board *board, uint8_t *result);
+void ines_parallel_poll_configure(struct gpib_board *board, uint8_t config);
+void ines_parallel_poll_response(struct gpib_board *board, int ist);
+void ines_serial_poll_response(struct gpib_board *board, uint8_t status);
+uint8_t ines_serial_poll_status(struct gpib_board *board);
+int ines_line_status(const struct gpib_board *board);
+int ines_t1_delay(struct gpib_board *board, unsigned int nano_sec);
+void ines_return_to_local(struct gpib_board *board);
// interrupt service routines
irqreturn_t ines_pci_interrupt(int irq, void *arg);
-irqreturn_t ines_interrupt(gpib_board_t *board);
+irqreturn_t ines_interrupt(struct gpib_board *board);
// utility functions
-void ines_free_private(gpib_board_t *board);
-int ines_generic_attach(gpib_board_t *board);
-void ines_online(struct ines_priv *priv, const gpib_board_t *board, int use_accel);
+void ines_free_private(struct gpib_board *board);
+int ines_generic_attach(struct gpib_board *board);
+void ines_online(struct ines_priv *priv, const struct gpib_board *board, int use_accel);
void ines_set_xfer_counter(struct ines_priv *priv, unsigned int count);
/* inb/outb wrappers */
diff --git a/drivers/staging/gpib/ines/ines_gpib.c b/drivers/staging/gpib/ines/ines_gpib.c
index 22a05a287bce..d93eb05dab90 100644
--- a/drivers/staging/gpib/ines/ines_gpib.c
+++ b/drivers/staging/gpib/ines/ines_gpib.c
@@ -5,6 +5,10 @@
* (C) 2002 by Frank Mori Hess
***************************************************************************/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#define dev_fmt pr_fmt
+#define DRV_NAME KBUILD_MODNAME
+
#include "ines.h"
#include <linux/pci.h>
@@ -21,34 +25,32 @@
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("GPIB driver for Ines iGPIB 72010");
-int ines_line_status(const gpib_board_t *board)
+int ines_line_status(const struct gpib_board *board)
{
- int status = ValidALL;
+ int status = VALID_ALL;
int bcm_bits;
struct ines_priv *ines_priv;
- struct nec7210_priv *nec_priv;
ines_priv = board->private_data;
- nec_priv = &ines_priv->nec7210_priv;
bcm_bits = ines_inb(ines_priv, BUS_CONTROL_MONITOR);
if (bcm_bits & BCM_REN_BIT)
- status |= BusREN;
+ status |= BUS_REN;
if (bcm_bits & BCM_IFC_BIT)
- status |= BusIFC;
+ status |= BUS_IFC;
if (bcm_bits & BCM_SRQ_BIT)
- status |= BusSRQ;
+ status |= BUS_SRQ;
if (bcm_bits & BCM_EOI_BIT)
- status |= BusEOI;
+ status |= BUS_EOI;
if (bcm_bits & BCM_NRFD_BIT)
- status |= BusNRFD;
+ status |= BUS_NRFD;
if (bcm_bits & BCM_NDAC_BIT)
- status |= BusNDAC;
+ status |= BUS_NDAC;
if (bcm_bits & BCM_DAV_BIT)
- status |= BusDAV;
+ status |= BUS_DAV;
if (bcm_bits & BCM_ATN_BIT)
- status |= BusATN;
+ status |= BUS_ATN;
return status;
}
@@ -56,14 +58,14 @@ int ines_line_status(const gpib_board_t *board)
void ines_set_xfer_counter(struct ines_priv *priv, unsigned int count)
{
if (count > 0xffff) {
- pr_err("ines: bug! tried to set xfer counter > 0xffff\n");
+ pr_err("bug! tried to set xfer counter > 0xffff\n");
return;
}
ines_outb(priv, (count >> 8) & 0xff, XFER_COUNT_UPPER);
ines_outb(priv, count & 0xff, XFER_COUNT_LOWER);
}
-unsigned int ines_t1_delay(gpib_board_t *board, unsigned int nano_sec)
+int ines_t1_delay(struct gpib_board *board, unsigned int nano_sec)
{
struct ines_priv *ines_priv = board->private_data;
struct nec7210_priv *nec_priv = &ines_priv->nec7210_priv;
@@ -93,7 +95,7 @@ static inline unsigned short num_in_fifo_bytes(struct ines_priv *ines_priv)
return ines_inb(ines_priv, IN_FIFO_COUNT);
}
-static ssize_t pio_read(gpib_board_t *board, struct ines_priv *ines_priv, uint8_t *buffer,
+static ssize_t pio_read(struct gpib_board *board, struct ines_priv *ines_priv, uint8_t *buffer,
size_t length, size_t *nbytes)
{
ssize_t retval = 0;
@@ -106,21 +108,18 @@ static ssize_t pio_read(gpib_board_t *board, struct ines_priv *ines_priv, uint8_
num_in_fifo_bytes(ines_priv) ||
test_bit(RECEIVED_END_BN, &nec_priv->state) ||
test_bit(DEV_CLEAR_BN, &nec_priv->state) ||
- test_bit(TIMO_NUM, &board->status))) {
- pr_warn("gpib: pio read wait interrupted\n");
+ test_bit(TIMO_NUM, &board->status)))
return -ERESTARTSYS;
- }
+
if (test_bit(TIMO_NUM, &board->status))
return -ETIMEDOUT;
if (test_bit(DEV_CLEAR_BN, &nec_priv->state))
return -EINTR;
num_fifo_bytes = num_in_fifo_bytes(ines_priv);
- if (num_fifo_bytes + *nbytes > length) {
- pr_warn("ines: counter allowed %li extra byte(s)\n",
- (long)(num_fifo_bytes - (length - *nbytes)));
+ if (num_fifo_bytes + *nbytes > length)
num_fifo_bytes = length - *nbytes;
- }
+
for (i = 0; i < num_fifo_bytes; i++)
buffer[(*nbytes)++] = read_byte(nec_priv, DIR);
if (test_bit(RECEIVED_END_BN, &nec_priv->state) &&
@@ -134,7 +133,7 @@ static ssize_t pio_read(gpib_board_t *board, struct ines_priv *ines_priv, uint8_
return retval;
}
-int ines_accel_read(gpib_board_t *board, uint8_t *buffer,
+int ines_accel_read(struct gpib_board *board, uint8_t *buffer,
size_t length, int *end, size_t *bytes_read)
{
ssize_t retval = 0;
@@ -191,7 +190,7 @@ static inline unsigned short num_out_fifo_bytes(struct ines_priv *ines_priv)
return ines_inb(ines_priv, OUT_FIFO_COUNT);
}
-static int ines_write_wait(gpib_board_t *board, struct ines_priv *ines_priv,
+static int ines_write_wait(struct gpib_board *board, struct ines_priv *ines_priv,
unsigned int fifo_threshold)
{
struct nec7210_priv *nec_priv = &ines_priv->nec7210_priv;
@@ -201,10 +200,9 @@ static int ines_write_wait(gpib_board_t *board, struct ines_priv *ines_priv,
num_out_fifo_bytes(ines_priv) < fifo_threshold ||
test_bit(BUS_ERROR_BN, &nec_priv->state) ||
test_bit(DEV_CLEAR_BN, &nec_priv->state) ||
- test_bit(TIMO_NUM, &board->status))) {
- dev_dbg(board->gpib_dev, "gpib write interrupted\n");
+ test_bit(TIMO_NUM, &board->status)))
return -ERESTARTSYS;
- }
+
if (test_bit(BUS_ERROR_BN, &nec_priv->state))
return -EIO;
if (test_bit(DEV_CLEAR_BN, &nec_priv->state))
@@ -215,7 +213,7 @@ static int ines_write_wait(gpib_board_t *board, struct ines_priv *ines_priv,
return 0;
}
-int ines_accel_write(gpib_board_t *board, uint8_t *buffer, size_t length,
+int ines_accel_write(struct gpib_board *board, uint8_t *buffer, size_t length,
int send_eoi, size_t *bytes_written)
{
size_t count = 0;
@@ -268,7 +266,7 @@ int ines_accel_write(gpib_board_t *board, uint8_t *buffer, size_t length,
irqreturn_t ines_pci_interrupt(int irq, void *arg)
{
- gpib_board_t *board = arg;
+ struct gpib_board *board = arg;
struct ines_priv *priv = board->private_data;
struct nec7210_priv *nec_priv = &priv->nec7210_priv;
@@ -283,7 +281,7 @@ irqreturn_t ines_pci_interrupt(int irq, void *arg)
return ines_interrupt(board);
}
-irqreturn_t ines_interrupt(gpib_board_t *board)
+irqreturn_t ines_interrupt(struct gpib_board *board)
{
struct ines_priv *priv = board->private_data;
struct nec7210_priv *nec_priv = &priv->nec7210_priv;
@@ -301,7 +299,7 @@ irqreturn_t ines_interrupt(gpib_board_t *board)
wake++;
}
if (isr3_bits & FIFO_ERROR_BIT)
- pr_err("ines gpib: fifo error\n");
+ dev_err(board->gpib_dev, "fifo error\n");
if (isr3_bits & XFER_COUNT_BIT)
wake++;
@@ -315,12 +313,12 @@ irqreturn_t ines_interrupt(gpib_board_t *board)
return IRQ_HANDLED;
}
-static int ines_pci_attach(gpib_board_t *board, const gpib_board_config_t *config);
-static int ines_pci_accel_attach(gpib_board_t *board, const gpib_board_config_t *config);
-static int ines_isa_attach(gpib_board_t *board, const gpib_board_config_t *config);
+static int ines_pci_attach(struct gpib_board *board, const gpib_board_config_t *config);
+static int ines_pci_accel_attach(struct gpib_board *board, const gpib_board_config_t *config);
+static int ines_isa_attach(struct gpib_board *board, const gpib_board_config_t *config);
-static void ines_pci_detach(gpib_board_t *board);
-static void ines_isa_detach(gpib_board_t *board);
+static void ines_pci_detach(struct gpib_board *board);
+static void ines_isa_detach(struct gpib_board *board);
enum ines_pci_vendor_ids {
PCI_VENDOR_ID_INES_QUICKLOGIC = 0x16da
@@ -395,7 +393,8 @@ static struct ines_pci_id pci_ids[] = {
static const int num_pci_chips = ARRAY_SIZE(pci_ids);
// wrappers for interface functions
-int ines_read(gpib_board_t *board, uint8_t *buffer, size_t length, int *end, size_t *bytes_read)
+int ines_read(struct gpib_board *board, uint8_t *buffer, size_t length,
+ int *end, size_t *bytes_read)
{
struct ines_priv *priv = board->private_data;
struct nec7210_priv *nec_priv = &priv->nec7210_priv;
@@ -413,7 +412,7 @@ int ines_read(gpib_board_t *board, uint8_t *buffer, size_t length, int *end, siz
return retval;
}
-int ines_write(gpib_board_t *board, uint8_t *buffer, size_t length, int send_eoi,
+int ines_write(struct gpib_board *board, uint8_t *buffer, size_t length, int send_eoi,
size_t *bytes_written)
{
struct ines_priv *priv = board->private_data;
@@ -421,119 +420,119 @@ int ines_write(gpib_board_t *board, uint8_t *buffer, size_t length, int send_eoi
return nec7210_write(board, &priv->nec7210_priv, buffer, length, send_eoi, bytes_written);
}
-int ines_command(gpib_board_t *board, uint8_t *buffer, size_t length, size_t *bytes_written)
+int ines_command(struct gpib_board *board, uint8_t *buffer, size_t length, size_t *bytes_written)
{
struct ines_priv *priv = board->private_data;
return nec7210_command(board, &priv->nec7210_priv, buffer, length, bytes_written);
}
-int ines_take_control(gpib_board_t *board, int synchronous)
+int ines_take_control(struct gpib_board *board, int synchronous)
{
struct ines_priv *priv = board->private_data;
return nec7210_take_control(board, &priv->nec7210_priv, synchronous);
}
-int ines_go_to_standby(gpib_board_t *board)
+int ines_go_to_standby(struct gpib_board *board)
{
struct ines_priv *priv = board->private_data;
return nec7210_go_to_standby(board, &priv->nec7210_priv);
}
-void ines_request_system_control(gpib_board_t *board, int request_control)
+void ines_request_system_control(struct gpib_board *board, int request_control)
{
struct ines_priv *priv = board->private_data;
nec7210_request_system_control(board, &priv->nec7210_priv, request_control);
}
-void ines_interface_clear(gpib_board_t *board, int assert)
+void ines_interface_clear(struct gpib_board *board, int assert)
{
struct ines_priv *priv = board->private_data;
nec7210_interface_clear(board, &priv->nec7210_priv, assert);
}
-void ines_remote_enable(gpib_board_t *board, int enable)
+void ines_remote_enable(struct gpib_board *board, int enable)
{
struct ines_priv *priv = board->private_data;
nec7210_remote_enable(board, &priv->nec7210_priv, enable);
}
-int ines_enable_eos(gpib_board_t *board, uint8_t eos_byte, int compare_8_bits)
+int ines_enable_eos(struct gpib_board *board, uint8_t eos_byte, int compare_8_bits)
{
struct ines_priv *priv = board->private_data;
return nec7210_enable_eos(board, &priv->nec7210_priv, eos_byte, compare_8_bits);
}
-void ines_disable_eos(gpib_board_t *board)
+void ines_disable_eos(struct gpib_board *board)
{
struct ines_priv *priv = board->private_data;
nec7210_disable_eos(board, &priv->nec7210_priv);
}
-unsigned int ines_update_status(gpib_board_t *board, unsigned int clear_mask)
+unsigned int ines_update_status(struct gpib_board *board, unsigned int clear_mask)
{
struct ines_priv *priv = board->private_data;
return nec7210_update_status(board, &priv->nec7210_priv, clear_mask);
}
-int ines_primary_address(gpib_board_t *board, unsigned int address)
+int ines_primary_address(struct gpib_board *board, unsigned int address)
{
struct ines_priv *priv = board->private_data;
return nec7210_primary_address(board, &priv->nec7210_priv, address);
}
-int ines_secondary_address(gpib_board_t *board, unsigned int address, int enable)
+int ines_secondary_address(struct gpib_board *board, unsigned int address, int enable)
{
struct ines_priv *priv = board->private_data;
return nec7210_secondary_address(board, &priv->nec7210_priv, address, enable);
}
-int ines_parallel_poll(gpib_board_t *board, uint8_t *result)
+int ines_parallel_poll(struct gpib_board *board, uint8_t *result)
{
struct ines_priv *priv = board->private_data;
return nec7210_parallel_poll(board, &priv->nec7210_priv, result);
}
-void ines_parallel_poll_configure(gpib_board_t *board, uint8_t config)
+void ines_parallel_poll_configure(struct gpib_board *board, uint8_t config)
{
struct ines_priv *priv = board->private_data;
nec7210_parallel_poll_configure(board, &priv->nec7210_priv, config);
}
-void ines_parallel_poll_response(gpib_board_t *board, int ist)
+void ines_parallel_poll_response(struct gpib_board *board, int ist)
{
struct ines_priv *priv = board->private_data;
nec7210_parallel_poll_response(board, &priv->nec7210_priv, ist);
}
-void ines_serial_poll_response(gpib_board_t *board, uint8_t status)
+void ines_serial_poll_response(struct gpib_board *board, uint8_t status)
{
struct ines_priv *priv = board->private_data;
nec7210_serial_poll_response(board, &priv->nec7210_priv, status);
}
-uint8_t ines_serial_poll_status(gpib_board_t *board)
+uint8_t ines_serial_poll_status(struct gpib_board *board)
{
struct ines_priv *priv = board->private_data;
return nec7210_serial_poll_status(board, &priv->nec7210_priv);
}
-void ines_return_to_local(gpib_board_t *board)
+void ines_return_to_local(struct gpib_board *board)
{
struct ines_priv *priv = board->private_data;
@@ -652,7 +651,7 @@ static gpib_interface_t ines_isa_interface = {
.return_to_local = ines_return_to_local,
};
-static int ines_allocate_private(gpib_board_t *board)
+static int ines_allocate_private(struct gpib_board *board)
{
struct ines_priv *priv;
@@ -665,13 +664,13 @@ static int ines_allocate_private(gpib_board_t *board)
return 0;
}
-void ines_free_private(gpib_board_t *board)
+void ines_free_private(struct gpib_board *board)
{
kfree(board->private_data);
board->private_data = NULL;
}
-int ines_generic_attach(gpib_board_t *board)
+int ines_generic_attach(struct gpib_board *board)
{
struct ines_priv *ines_priv;
struct nec7210_priv *nec_priv;
@@ -691,7 +690,7 @@ int ines_generic_attach(gpib_board_t *board)
return 0;
}
-void ines_online(struct ines_priv *ines_priv, const gpib_board_t *board, int use_accel)
+void ines_online(struct ines_priv *ines_priv, const struct gpib_board *board, int use_accel)
{
struct nec7210_priv *nec_priv = &ines_priv->nec7210_priv;
@@ -725,7 +724,7 @@ void ines_online(struct ines_priv *ines_priv, const gpib_board_t *board, int use
nec7210_set_reg_bits(nec_priv, IMR1, HR_DOIE | HR_DIIE, 0);
}
-static int ines_common_pci_attach(gpib_board_t *board, const gpib_board_config_t *config)
+static int ines_common_pci_attach(struct gpib_board *board, const gpib_board_config_t *config)
{
struct ines_priv *ines_priv;
struct nec7210_priv *nec_priv;
@@ -769,16 +768,16 @@ static int ines_common_pci_attach(gpib_board_t *board, const gpib_board_config_t
} while (1);
}
if (!ines_priv->pci_device) {
- pr_err("gpib: could not find ines PCI board\n");
+ dev_err(board->gpib_dev, "could not find ines PCI board\n");
return -1;
}
if (pci_enable_device(ines_priv->pci_device)) {
- pr_err("error enabling pci device\n");
+ dev_err(board->gpib_dev, "error enabling pci device\n");
return -1;
}
- if (pci_request_regions(ines_priv->pci_device, "ines-gpib"))
+ if (pci_request_regions(ines_priv->pci_device, DRV_NAME))
return -1;
nec_priv->iobase = pci_resource_start(ines_priv->pci_device,
found_id.gpib_region);
@@ -797,7 +796,7 @@ static int ines_common_pci_attach(gpib_board_t *board, const gpib_board_config_t
case PCI_CHIP_QUICKLOGIC5030:
break;
default:
- pr_err("gpib: unspecified chip type? (bug)\n");
+ dev_err(board->gpib_dev, "unspecified chip type? (bug)\n");
nec_priv->iobase = 0;
pci_release_regions(ines_priv->pci_device);
return -1;
@@ -813,8 +812,8 @@ static int ines_common_pci_attach(gpib_board_t *board, const gpib_board_config_t
#endif
isr_flags |= IRQF_SHARED;
if (request_irq(ines_priv->pci_device->irq, ines_pci_interrupt, isr_flags,
- "pci-gpib", board)) {
- pr_err("gpib: can't request IRQ %d\n", ines_priv->pci_device->irq);
+ DRV_NAME, board)) {
+ dev_err(board->gpib_dev, "can't request IRQ %d\n", ines_priv->pci_device->irq);
return -1;
}
ines_priv->irq = ines_priv->pci_device->irq;
@@ -846,14 +845,14 @@ static int ines_common_pci_attach(gpib_board_t *board, const gpib_board_config_t
case PCI_CHIP_QUICKLOGIC5030:
break;
default:
- pr_err("gpib: unspecified chip type? (bug)\n");
+ dev_err(board->gpib_dev, "unspecified chip type? (bug)\n");
return -1;
}
return 0;
}
-int ines_pci_attach(gpib_board_t *board, const gpib_board_config_t *config)
+int ines_pci_attach(struct gpib_board *board, const gpib_board_config_t *config)
{
struct ines_priv *ines_priv;
int retval;
@@ -868,7 +867,7 @@ int ines_pci_attach(gpib_board_t *board, const gpib_board_config_t *config)
return 0;
}
-int ines_pci_accel_attach(gpib_board_t *board, const gpib_board_config_t *config)
+int ines_pci_accel_attach(struct gpib_board *board, const gpib_board_config_t *config)
{
struct ines_priv *ines_priv;
int retval;
@@ -885,7 +884,7 @@ int ines_pci_accel_attach(gpib_board_t *board, const gpib_board_config_t *config
static const int ines_isa_iosize = 0x20;
-int ines_isa_attach(gpib_board_t *board, const gpib_board_config_t *config)
+int ines_isa_attach(struct gpib_board *board, const gpib_board_config_t *config)
{
struct ines_priv *ines_priv;
struct nec7210_priv *nec_priv;
@@ -899,15 +898,16 @@ int ines_isa_attach(gpib_board_t *board, const gpib_board_config_t *config)
ines_priv = board->private_data;
nec_priv = &ines_priv->nec7210_priv;
- if (!request_region(config->ibbase, ines_isa_iosize, "ines_gpib")) {
- pr_err("ines_gpib: ioports at 0x%x already in use\n", config->ibbase);
- return -1;
+ if (!request_region(config->ibbase, ines_isa_iosize, DRV_NAME)) {
+ dev_err(board->gpib_dev, "ioports at 0x%x already in use\n",
+ config->ibbase);
+ return -EBUSY;
}
nec_priv->iobase = config->ibbase;
nec_priv->offset = 1;
nec7210_board_reset(nec_priv, board);
- if (request_irq(config->ibirq, ines_pci_interrupt, isr_flags, "ines_gpib", board)) {
- pr_err("ines_gpib: failed to allocate IRQ %d\n", config->ibirq);
+ if (request_irq(config->ibirq, ines_pci_interrupt, isr_flags, DRV_NAME, board)) {
+ dev_err(board->gpib_dev, "failed to allocate IRQ %d\n", config->ibirq);
return -1;
}
ines_priv->irq = config->ibirq;
@@ -915,7 +915,7 @@ int ines_isa_attach(gpib_board_t *board, const gpib_board_config_t *config)
return 0;
}
-void ines_pci_detach(gpib_board_t *board)
+void ines_pci_detach(struct gpib_board *board)
{
struct ines_priv *ines_priv = board->private_data;
struct nec7210_priv *nec_priv;
@@ -949,7 +949,7 @@ void ines_pci_detach(gpib_board_t *board)
ines_free_private(board);
}
-void ines_isa_detach(gpib_board_t *board)
+void ines_isa_detach(struct gpib_board *board)
{
struct ines_priv *ines_priv = board->private_data;
struct nec7210_priv *nec_priv;
@@ -977,7 +977,7 @@ static struct pci_driver ines_pci_driver = {
.probe = &ines_pci_probe
};
-#ifdef GPIB_PCMCIA
+#ifdef CONFIG_GPIB_PCMCIA
#include <linux/kernel.h>
#include <linux/ptrace.h>
@@ -988,13 +988,6 @@ static struct pci_driver ines_pci_driver = {
#include <pcmcia/ds.h>
#include <pcmcia/cisreg.h>
-#ifdef PCMCIA_DEBUG
-static int pc_debug = PCMCIA_DEBUG;
-#define DEBUG(n, args...) do {if (pc_debug > (n)) pr_debug(args)} while (0)
-#else
-#define DEBUG(args...)
-#endif
-
static const int ines_pcmcia_iosize = 0x20;
/* The event() function is this driver's Card Services event handler.
@@ -1007,11 +1000,11 @@ static const int ines_pcmcia_iosize = 0x20;
static int ines_gpib_config(struct pcmcia_device *link);
static void ines_gpib_release(struct pcmcia_device *link);
-static int ines_pcmcia_attach(gpib_board_t *board, const gpib_board_config_t *config);
-static int ines_pcmcia_accel_attach(gpib_board_t *board, const gpib_board_config_t *config);
-static void ines_pcmcia_detach(gpib_board_t *board);
+static int ines_pcmcia_attach(struct gpib_board *board, const gpib_board_config_t *config);
+static int ines_pcmcia_accel_attach(struct gpib_board *board, const gpib_board_config_t *config);
+static void ines_pcmcia_detach(struct gpib_board *board);
static irqreturn_t ines_pcmcia_interrupt(int irq, void *arg);
-static int ines_common_pcmcia_attach(gpib_board_t *board);
+static int ines_common_pcmcia_attach(struct gpib_board *board);
/*
* A linked list of "instances" of the gpib device. Each actual
* PCMCIA card corresponds to one device instance, and is described
@@ -1043,7 +1036,7 @@ static struct pcmcia_device *curr_dev;
struct local_info {
struct pcmcia_device *p_dev;
- gpib_board_t *dev;
+ struct gpib_board *dev;
u_short manfid;
u_short cardid;
};
@@ -1063,8 +1056,6 @@ static int ines_gpib_probe(struct pcmcia_device *link)
// int ret, i;
- DEBUG(0, "%s(0x%p)\n", __func__ link);
-
/* Allocate space for private device-specific data */
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info)
@@ -1096,9 +1087,7 @@ static int ines_gpib_probe(struct pcmcia_device *link)
static void ines_gpib_remove(struct pcmcia_device *link)
{
struct local_info *info = link->priv;
- //struct gpib_board_t *dev = info->dev;
-
- DEBUG(0, "%s(0x%p)\n", __func__, link);
+ //struct struct gpib_board *dev = info->dev;
if (info->dev)
ines_pcmcia_detach(info->dev);
@@ -1125,7 +1114,6 @@ static int ines_gpib_config(struct pcmcia_device *link)
void __iomem *virt;
dev = link->priv;
- DEBUG(0, "%s(0x%p)\n", __func__, link);
retval = pcmcia_loop_config(link, &ines_gpib_config_iteration, NULL);
if (retval) {
@@ -1134,8 +1122,8 @@ static int ines_gpib_config(struct pcmcia_device *link)
return -ENODEV;
}
- pr_debug("ines_cs: manufacturer: 0x%x card: 0x%x\n",
- link->manf_id, link->card_id);
+ dev_dbg(&link->dev, "ines_cs: manufacturer: 0x%x card: 0x%x\n",
+ link->manf_id, link->card_id);
/* for the ines card we have to setup the configuration registers in
* attribute memory here
@@ -1167,7 +1155,6 @@ static int ines_gpib_config(struct pcmcia_device *link)
ines_gpib_release(link);
return -ENODEV;
}
- pr_info("ines gpib device loaded\n");
return 0;
} /* gpib_config */
@@ -1179,18 +1166,16 @@ static int ines_gpib_config(struct pcmcia_device *link)
static void ines_gpib_release(struct pcmcia_device *link)
{
- DEBUG(0, "%s(0x%p)\n", __func__, link);
pcmcia_disable_device(link);
} /* gpib_release */
static int ines_gpib_suspend(struct pcmcia_device *link)
{
//struct local_info *info = link->priv;
- //struct gpib_board_t *dev = info->dev;
- DEBUG(0, "%s(0x%p)\n", __func__, link);
+ //struct struct gpib_board *dev = info->dev;
if (link->open)
- pr_err("Device still open ???\n");
+ dev_err(&link->dev, "Device still open\n");
//netif_device_detach(dev);
return 0;
@@ -1199,12 +1184,10 @@ static int ines_gpib_suspend(struct pcmcia_device *link)
static int ines_gpib_resume(struct pcmcia_device *link)
{
//struct local_info_t *info = link->priv;
- //struct gpib_board_t *dev = info->dev;
- DEBUG(0, "%s(0x%p)\n", __func__, link);
+ //struct struct gpib_board *dev = info->dev;
/*if (link->open) {
* ni_gpib_probe(dev); / really?
- * printk("Gpib resumed ???\n");
* //netif_device_attach(dev);
*}
*/
@@ -1229,7 +1212,6 @@ static struct pcmcia_driver ines_gpib_cs_driver = {
void ines_pcmcia_cleanup_module(void)
{
- DEBUG(0, "ines_cs: unloading\n");
pcmcia_unregister_driver(&ines_gpib_cs_driver);
}
@@ -1319,19 +1301,19 @@ static gpib_interface_t ines_pcmcia_interface = {
irqreturn_t ines_pcmcia_interrupt(int irq, void *arg)
{
- gpib_board_t *board = arg;
+ struct gpib_board *board = arg;
return ines_interrupt(board);
}
-int ines_common_pcmcia_attach(gpib_board_t *board)
+int ines_common_pcmcia_attach(struct gpib_board *board)
{
struct ines_priv *ines_priv;
struct nec7210_priv *nec_priv;
int retval;
if (!curr_dev) {
- pr_err("no ines pcmcia cards found\n");
+ dev_err(board->gpib_dev, "no ines pcmcia cards found\n");
return -1;
}
@@ -1343,9 +1325,9 @@ int ines_common_pcmcia_attach(gpib_board_t *board)
nec_priv = &ines_priv->nec7210_priv;
if (!request_region(curr_dev->resource[0]->start,
- resource_size(curr_dev->resource[0]), "ines_gpib")) {
- pr_err("ines_gpib: ioports at 0x%lx already in use\n",
- (unsigned long)(curr_dev->resource[0]->start));
+ resource_size(curr_dev->resource[0]), DRV_NAME)) {
+ dev_err(board->gpib_dev, "ioports at 0x%lx already in use\n",
+ (unsigned long)(curr_dev->resource[0]->start));
return -1;
}
@@ -1355,7 +1337,7 @@ int ines_common_pcmcia_attach(gpib_board_t *board)
if (request_irq(curr_dev->irq, ines_pcmcia_interrupt, IRQF_SHARED,
"pcmcia-gpib", board)) {
- pr_err("gpib: can't request IRQ %d\n", curr_dev->irq);
+ dev_err(board->gpib_dev, "can't request IRQ %d\n", curr_dev->irq);
return -1;
}
ines_priv->irq = curr_dev->irq;
@@ -1363,7 +1345,7 @@ int ines_common_pcmcia_attach(gpib_board_t *board)
return 0;
}
-int ines_pcmcia_attach(gpib_board_t *board, const gpib_board_config_t *config)
+int ines_pcmcia_attach(struct gpib_board *board, const gpib_board_config_t *config)
{
struct ines_priv *ines_priv;
int retval;
@@ -1378,7 +1360,7 @@ int ines_pcmcia_attach(gpib_board_t *board, const gpib_board_config_t *config)
return 0;
}
-int ines_pcmcia_accel_attach(gpib_board_t *board, const gpib_board_config_t *config)
+int ines_pcmcia_accel_attach(struct gpib_board *board, const gpib_board_config_t *config)
{
struct ines_priv *ines_priv;
int retval;
@@ -1393,7 +1375,7 @@ int ines_pcmcia_accel_attach(gpib_board_t *board, const gpib_board_config_t *con
return 0;
}
-void ines_pcmcia_detach(gpib_board_t *board)
+void ines_pcmcia_detach(struct gpib_board *board)
{
struct ines_priv *ines_priv = board->private_data;
struct nec7210_priv *nec_priv;
@@ -1410,7 +1392,7 @@ void ines_pcmcia_detach(gpib_board_t *board)
ines_free_private(board);
}
-#endif /* GPIB_PCMCIA */
+#endif /* CONFIG_GPIB_PCMCIA */
static int __init ines_init_module(void)
{
@@ -1418,63 +1400,63 @@ static int __init ines_init_module(void)
ret = pci_register_driver(&ines_pci_driver);
if (ret) {
- pr_err("ines_gpib: pci_register_driver failed: error = %d\n", ret);
+ pr_err("pci_register_driver failed: error = %d\n", ret);
return ret;
}
ret = gpib_register_driver(&ines_pci_interface, THIS_MODULE);
if (ret) {
- pr_err("ines_gpib: gpib_register_driver failed: error = %d\n", ret);
+ pr_err("gpib_register_driver failed: error = %d\n", ret);
goto err_pci;
}
ret = gpib_register_driver(&ines_pci_unaccel_interface, THIS_MODULE);
if (ret) {
- pr_err("ines_gpib: gpib_register_driver failed: error = %d\n", ret);
+ pr_err("gpib_register_driver failed: error = %d\n", ret);
goto err_pci_unaccel;
}
ret = gpib_register_driver(&ines_pci_accel_interface, THIS_MODULE);
if (ret) {
- pr_err("ines_gpib: gpib_register_driver failed: error = %d\n", ret);
+ pr_err("gpib_register_driver failed: error = %d\n", ret);
goto err_pci_accel;
}
ret = gpib_register_driver(&ines_isa_interface, THIS_MODULE);
if (ret) {
- pr_err("ines_gpib: gpib_register_driver failed: error = %d\n", ret);
+ pr_err("gpib_register_driver failed: error = %d\n", ret);
goto err_isa;
}
-#ifdef GPIB_PCMCIA
+#ifdef CONFIG_GPIB_PCMCIA
ret = gpib_register_driver(&ines_pcmcia_interface, THIS_MODULE);
if (ret) {
- pr_err("ines_gpib: gpib_register_driver failed: error = %d\n", ret);
+ pr_err("gpib_register_driver failed: error = %d\n", ret);
goto err_pcmcia;
}
ret = gpib_register_driver(&ines_pcmcia_unaccel_interface, THIS_MODULE);
if (ret) {
- pr_err("ines_gpib: gpib_register_driver failed: error = %d\n", ret);
+ pr_err("gpib_register_driver failed: error = %d\n", ret);
goto err_pcmcia_unaccel;
}
ret = gpib_register_driver(&ines_pcmcia_accel_interface, THIS_MODULE);
if (ret) {
- pr_err("ines_gpib: gpib_register_driver failed: error = %d\n", ret);
+ pr_err("gpib_register_driver failed: error = %d\n", ret);
goto err_pcmcia_accel;
}
ret = pcmcia_register_driver(&ines_gpib_cs_driver);
if (ret) {
- pr_err("ines_gpib: pcmcia_register_driver failed: error = %d\n", ret);
+ pr_err("pcmcia_register_driver failed: error = %d\n", ret);
goto err_pcmcia_driver;
}
#endif
return 0;
-#ifdef GPIB_PCMCIA
+#ifdef CONFIG_GPIB_PCMCIA
err_pcmcia_driver:
gpib_unregister_driver(&ines_pcmcia_accel_interface);
err_pcmcia_accel:
diff --git a/drivers/staging/gpib/lpvo_usb_gpib/lpvo_usb_gpib.c b/drivers/staging/gpib/lpvo_usb_gpib/lpvo_usb_gpib.c
index 85322af62c23..faf96e9cc4a1 100644
--- a/drivers/staging/gpib/lpvo_usb_gpib/lpvo_usb_gpib.c
+++ b/drivers/staging/gpib/lpvo_usb_gpib/lpvo_usb_gpib.c
@@ -8,6 +8,10 @@
* copyright : (C) 2011 Marcello Carla' *
***************************************************************************/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#define dev_fmt pr_fmt
+#define NAME KBUILD_MODNAME
+
/* base module includes */
#include <linux/module.h>
@@ -31,8 +35,6 @@
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("GPIB driver for LPVO usb devices");
-#define NAME "lpvo_usb_gpib"
-
/*
* Table of devices that work with this driver.
*
@@ -55,10 +57,11 @@ MODULE_DEVICE_TABLE(usb, skel_table);
/*
* *** Diagnostics and Debug ***
- *
+ * To enable the diagnostic and debug messages either compile with DEBUG set
+ * or control via the dynamic debug mechanisms.
* The module parameter "debug" controls the sending of debug messages to
- * syslog. By default it is set to 0 or 1 according to GPIB_CONFIG_KERNEL_DEBUG.
- * debug = 0: only register/deregister messages are generated
+ * syslog. By default it is set to 0
+ * debug = 0: only attach/detach messages are sent
* 1: every action is logged
* 2: extended logging; each single exchanged byte is documented
* (about twice the log volume of [1])
@@ -70,11 +73,15 @@ MODULE_DEVICE_TABLE(usb, skel_table);
static int debug;
module_param(debug, int, 0644);
-#define DIA_LOG(level, format, ...) \
+#define DIA_LOG(level, format, ...) \
do { if (debug >= (level)) \
- pr_alert("%s:%s - " format, NAME, __func__, ## __VA_ARGS__); } \
+ dev_dbg(board->gpib_dev, format, ## __VA_ARGS__); } \
while (0)
+#define WQT wait_queue_entry_t
+#define WQH head
+#define WQE entry
+
/* standard and extended command sets of the usb-gpib adapter */
#define USB_GPIB_ON "\nIB\n"
@@ -135,7 +142,7 @@ struct char_buf { /* used by one_char() routine */
};
struct usb_gpib_priv { /* private data to the device */
- u8 eos; /* eos character */
+ u8 eos; /* eos character */
short eos_flags; /* eos mode */
int timeout; /* current value for timeout */
void *dev; /* the usb device private data structure */
@@ -143,42 +150,23 @@ struct usb_gpib_priv { /* private data to the device */
#define GPIB_DEV (((struct usb_gpib_priv *)board->private_data)->dev)
-#define SHOW_STATUS(board) { \
- DIA_LOG(2, "# - board %p\n", board); \
- DIA_LOG(2, "# - buffer_length %d\n", board->buffer_length); \
- DIA_LOG(2, "# - status %lx\n", board->status); \
- DIA_LOG(2, "# - use_count %d\n", board->use_count); \
- DIA_LOG(2, "# - pad %x\n", board->pad); \
- DIA_LOG(2, "# - sad %x\n", board->sad); \
- DIA_LOG(2, "# - timeout %d\n", board->usec_timeout); \
- DIA_LOG(2, "# - ppc %d\n", board->parallel_poll_configuration); \
- DIA_LOG(2, "# - t1delay %d\n", board->t1_nano_sec); \
- DIA_LOG(2, "# - online %d\n", board->online); \
- DIA_LOG(2, "# - autopoll %d\n", board->autospollers); \
- DIA_LOG(2, "# - autopoll task %p\n", board->autospoll_task); \
- DIA_LOG(2, "# - minor %d\n", board->minor); \
- DIA_LOG(2, "# - master %d\n", board->master); \
- DIA_LOG(2, "# - list %d\n", board->ist); \
- }
-/*
- * n = 0;
- * list_for_each (l, &board->device_list) n++;
- * TTY_LOG ("%s:%s - devices in list %d\n", a, b, n);
- */
-
-/*
- * TTY_LOG - write a message to the current work terminal (if any)
- */
-
-#define TTY_LOG(format, ...) { \
- char buf[128]; \
- struct tty_struct *tty = get_current_tty(); \
- if (tty) { \
- snprintf(buf, 128, format, __VA_ARGS__); \
- tty->driver->ops->write(tty, buf, strlen(buf)); \
- tty->driver->ops->write(tty, "\r", 1); \
- } \
- }
+static void show_status(struct gpib_board *board)
+{
+ DIA_LOG(2, "# - buffer_length %d\n", board->buffer_length);
+ DIA_LOG(2, "# - status %lx\n", board->status);
+ DIA_LOG(2, "# - use_count %d\n", board->use_count);
+ DIA_LOG(2, "# - pad %x\n", board->pad);
+ DIA_LOG(2, "# - sad %x\n", board->sad);
+ DIA_LOG(2, "# - timeout %d\n", board->usec_timeout);
+ DIA_LOG(2, "# - ppc %d\n", board->parallel_poll_configuration);
+ DIA_LOG(2, "# - t1delay %d\n", board->t1_nano_sec);
+ DIA_LOG(2, "# - online %d\n", board->online);
+ DIA_LOG(2, "# - autopoll %d\n", board->autospollers);
+ DIA_LOG(2, "# - autopoll task %p\n", board->autospoll_task);
+ DIA_LOG(2, "# - minor %d\n", board->minor);
+ DIA_LOG(2, "# - master %d\n", board->master);
+ DIA_LOG(2, "# - list %d\n", board->ist);
+}
/*
* GLOBAL VARIABLES: required for
@@ -200,8 +188,8 @@ static struct mutex minors_lock; /* operations on usb_minors are to be prote
struct usb_skel;
static ssize_t skel_do_write(struct usb_skel *, const char *, size_t);
static ssize_t skel_do_read(struct usb_skel *, char *, size_t);
-static int skel_do_open(gpib_board_t *, int);
-static int skel_do_release(gpib_board_t *);
+static int skel_do_open(struct gpib_board *, int);
+static int skel_do_release(struct gpib_board *);
/*
* usec_diff : take difference in MICROsec between two 'timespec'
@@ -229,27 +217,7 @@ static inline int usec_diff(struct timespec64 *a, struct timespec64 *b)
static int write_loop(void *dev, char *msg, int leng)
{
-// int nchar = 0, val;
-
-// do {
-
return skel_do_write(dev, msg, leng);
-
-// if (val < 1) {
-// printk (KERN_ALERT "%s:%s - write error: %d %d/%d\n",
-// NAME, __func__, val, nchar, leng);
-// return -EIO;
-// }
-// nchar +=val;
-// } while (nchar < leng);
-// return leng;
-}
-
-static char printable(char x)
-{
- if (x < 32 || x > 126)
- return ' ';
- return x;
}
/**
@@ -257,15 +225,15 @@ static char printable(char x)
*
* @board: the gpib_board_struct data area for this gpib interface
* @msg: the byte sequence.
- * @leng the byte sequence length; can be given as zero and is
+ * @leng: the byte sequence length; can be given as zero and is
* computed automatically, but if 'msg' contains a zero byte,
* it has to be given explicitly.
*/
-static int send_command(gpib_board_t *board, char *msg, int leng)
+static int send_command(struct gpib_board *board, char *msg, int leng)
{
char buffer[64];
- int nchar, j;
+ int nchar;
int retval;
struct timespec64 before, after;
@@ -280,17 +248,10 @@ static int send_command(gpib_board_t *board, char *msg, int leng)
nchar = skel_do_read(GPIB_DEV, buffer, 64);
if (nchar < 0) {
- DIA_LOG(0, " return from read: %d\n", nchar);
+ dev_err(board->gpib_dev, " return from read: %d\n", nchar);
return nchar;
} else if (nchar != 1) {
- for (j = 0 ; j < leng ; j++) {
- DIA_LOG(0, " Irregular reply to command: %d %x %c\n",
- j, msg[j], printable(msg[j]));
- }
- for (j = 0 ; j < nchar ; j++) {
- DIA_LOG(0, " Irregular command reply: %d %x %c\n",
- j, buffer[j] & 0xff, printable(buffer[j]));
- }
+ dev_err(board->gpib_dev, " Irregular reply to command: %s\n", msg);
return -EIO;
}
ktime_get_real_ts64 (&after);
@@ -310,7 +271,7 @@ static int send_command(gpib_board_t *board, char *msg, int leng)
*
*/
-static int set_control_line(gpib_board_t *board, int line, int value)
+static int set_control_line(struct gpib_board *board, int line, int value)
{
char msg[] = USB_GPIB_SET_LINES;
int retval;
@@ -337,11 +298,11 @@ static int set_control_line(gpib_board_t *board, int line, int value)
/*
* one_char() - read one single byte from input buffer
*
- * @board: the gpib_board_struct data area for this gpib interface
- * @char_buf: the routine private data structure
+ * @board: the gpib_board_struct data area for this gpib interface
+ * @char_buf: the routine private data structure
*/
-static int one_char(gpib_board_t *board, struct char_buf *b)
+static int one_char(struct gpib_board *board, struct char_buf *b)
{
struct timespec64 before, after;
@@ -360,13 +321,7 @@ static int one_char(gpib_board_t *board, struct char_buf *b)
if (b->nchar > 0) {
DIA_LOG(2, "--> %x\n", b->inbuf[b->last - b->nchar]);
return b->inbuf[b->last - b->nchar--];
- } else if (b->nchar == 0) {
- dev_alert(board->gpib_dev, "%s:%s - read returned EOF\n", NAME, __func__);
- return -EIO;
}
- dev_alert(board->gpib_dev, "%s:%s - read error %d\n", NAME, __func__, b->nchar);
- TTY_LOG("\n *** %s *** Read Error - %s\n", NAME,
- "Reset the adapter with 'gpib_config'\n");
return -EIO;
}
@@ -381,7 +336,7 @@ static int one_char(gpib_board_t *board, struct char_buf *b)
* not supported.
*/
-static void set_timeout(gpib_board_t *board)
+static void set_timeout(struct gpib_board *board)
{
int n, val;
char command[sizeof(USB_GPIB_TTMO) + 6];
@@ -406,12 +361,10 @@ static void set_timeout(gpib_board_t *board)
val = send_command(board, command, 0);
}
- if (val != ACK) {
- dev_alert(board->gpib_dev, "%s:%s - error in timeout set: <%s>\n",
- NAME, __func__, command);
- } else {
+ if (val != ACK)
+ dev_err(board->gpib_dev, "error in timeout set: <%s>\n", command);
+ else
data->timeout = board->usec_timeout;
- }
}
/*
@@ -431,7 +384,7 @@ static void set_timeout(gpib_board_t *board)
* detach() will be called. Always.
*/
-static int usb_gpib_attach(gpib_board_t *board, const gpib_board_config_t *config)
+static int usb_gpib_attach(struct gpib_board *board, const gpib_board_config_t *config)
{
int retval, j;
u32 base = config->ibbase;
@@ -451,8 +404,6 @@ static int usb_gpib_attach(gpib_board_t *board, const gpib_board_config_t *confi
if (config->device_path) {
/* if config->device_path given, try that first */
- dev_alert(board->gpib_dev, "%s:%s - Looking for device_path: %s\n",
- NAME, __func__, config->device_path);
for (j = 0 ; j < MAX_DEV ; j++) {
if ((assigned_usb_minors & 1 << j) == 0)
continue;
@@ -487,8 +438,7 @@ static int usb_gpib_attach(gpib_board_t *board, const gpib_board_config_t *confi
mutex_unlock(&minors_lock);
if (j == MAX_DEV) {
- dev_alert(board->gpib_dev, "%s:%s - Requested device is not registered.\n",
- NAME, __func__);
+ dev_err(board->gpib_dev, "Requested device is not registered.\n");
return -EIO;
}
@@ -501,13 +451,13 @@ static int usb_gpib_attach(gpib_board_t *board, const gpib_board_config_t *confi
DIA_LOG(1, "Skel open: %d\n", retval);
if (retval) {
- TTY_LOG("%s:%s - skel open failed.\n", NAME, __func__);
+ dev_err(board->gpib_dev, "skel open failed.\n");
kfree(board->private_data);
board->private_data = NULL;
return -ENODEV;
}
- SHOW_STATUS(board);
+ show_status(board);
retval = send_command(board, USB_GPIB_ON, 0);
DIA_LOG(1, "USB_GPIB_ON returns %x\n", retval);
@@ -541,8 +491,8 @@ static int usb_gpib_attach(gpib_board_t *board, const gpib_board_config_t *confi
if (retval != ACK)
return -EIO;
- SHOW_STATUS(board);
- TTY_LOG("Module '%s' has been sucesfully configured\n", NAME);
+ show_status(board);
+ DIA_LOG(0, "attached\n");
return 0;
}
@@ -553,13 +503,13 @@ static int usb_gpib_attach(gpib_board_t *board, const gpib_board_config_t *confi
*
*/
-static void usb_gpib_detach(gpib_board_t *board)
+static void usb_gpib_detach(struct gpib_board *board)
{
int retval;
- SHOW_STATUS(board);
+ show_status(board);
- DIA_LOG(0, "detaching %p\n", board);
+ DIA_LOG(0, "detaching\n");
if (board->private_data) {
if (GPIB_DEV) {
@@ -573,15 +523,14 @@ static void usb_gpib_detach(gpib_board_t *board)
board->private_data = NULL;
}
- DIA_LOG(0, "done %p\n", board);
- TTY_LOG("Module '%s' has been detached\n", NAME);
+ DIA_LOG(0, "detached\n");
}
/*
* Other functions follow in alphabetical order
*/
/* command */
-static int usb_gpib_command(gpib_board_t *board,
+static int usb_gpib_command(struct gpib_board *board,
u8 *buffer,
size_t length,
size_t *bytes_written)
@@ -614,7 +563,7 @@ static int usb_gpib_command(gpib_board_t *board,
* Cannot do nothing here, but remember for future use.
*/
-static void usb_gpib_disable_eos(gpib_board_t *board)
+static void usb_gpib_disable_eos(struct gpib_board *board)
{
((struct usb_gpib_priv *)board->private_data)->eos_flags &= ~REOS;
DIA_LOG(1, "done: %x\n",
@@ -630,7 +579,7 @@ static void usb_gpib_disable_eos(gpib_board_t *board)
*
*/
-static int usb_gpib_enable_eos(gpib_board_t *board,
+static int usb_gpib_enable_eos(struct gpib_board *board,
u8 eos_byte,
int compare_8_bits)
{
@@ -650,7 +599,7 @@ static int usb_gpib_enable_eos(gpib_board_t *board,
* @board: the gpib_board data area for this gpib interface
*/
-static int usb_gpib_go_to_standby(gpib_board_t *board)
+static int usb_gpib_go_to_standby(struct gpib_board *board)
{
int retval = set_control_line(board, IB_BUS_ATN, 0);
@@ -665,14 +614,14 @@ static int usb_gpib_go_to_standby(gpib_board_t *board)
* usb_gpib_interface_clear() - Assert or de-assert IFC
*
* @board: the gpib_board data area for this gpib interface
- * assert: 1: assert IFC; 0: de-assert IFC
+ * @assert: 1: assert IFC; 0: de-assert IFC
*
* Currently on the assert request we issue the lpvo IBZ
* command that cycles IFC low for 100 usec, then we ignore
* the de-assert request.
*/
-static void usb_gpib_interface_clear(gpib_board_t *board, int assert)
+static void usb_gpib_interface_clear(struct gpib_board *board, int assert)
{
int retval = 0;
@@ -688,21 +637,16 @@ static void usb_gpib_interface_clear(gpib_board_t *board, int assert)
}
/**
- * line_status() - Read the status of the bus lines.
+ * usb_gpib_line_status() - Read the status of the bus lines.
*
* @board: the gpib_board data area for this gpib interface
*
* We can read all lines.
*/
-
-#define WQT wait_queue_entry_t
-#define WQH head
-#define WQE entry
-
-static int usb_gpib_line_status(const gpib_board_t *board)
+static int usb_gpib_line_status(const struct gpib_board *board)
{
int buffer;
- int line_status = ValidALL; /* all lines will be read */
+ int line_status = VALID_ALL; /* all lines will be read */
struct list_head *p, *q;
WQT *item;
unsigned long flags;
@@ -730,30 +674,29 @@ static int usb_gpib_line_status(const gpib_board_t *board)
msleep(sleep);
}
- buffer = send_command((gpib_board_t *)board, USB_GPIB_STATUS, 0);
+ buffer = send_command((struct gpib_board *)board, USB_GPIB_STATUS, 0);
if (buffer < 0) {
- dev_alert(board->gpib_dev, "%s:%s - line status read failed with %d\n",
- NAME, __func__, buffer);
+ dev_err(board->gpib_dev, "line status read failed with %d\n", buffer);
return -1;
}
if ((buffer & 0x01) == 0)
- line_status |= BusREN;
+ line_status |= BUS_REN;
if ((buffer & 0x02) == 0)
- line_status |= BusIFC;
+ line_status |= BUS_IFC;
if ((buffer & 0x04) == 0)
- line_status |= BusNDAC;
+ line_status |= BUS_NDAC;
if ((buffer & 0x08) == 0)
- line_status |= BusNRFD;
+ line_status |= BUS_NRFD;
if ((buffer & 0x10) == 0)
- line_status |= BusDAV;
+ line_status |= BUS_DAV;
if ((buffer & 0x20) == 0)
- line_status |= BusEOI;
+ line_status |= BUS_EOI;
if ((buffer & 0x40) == 0)
- line_status |= BusATN;
+ line_status |= BUS_ATN;
if ((buffer & 0x80) == 0)
- line_status |= BusSRQ;
+ line_status |= BUS_SRQ;
DIA_LOG(1, "done with %x %x\n", buffer, line_status);
@@ -762,7 +705,7 @@ static int usb_gpib_line_status(const gpib_board_t *board)
/* parallel_poll */
-static int usb_gpib_parallel_poll(gpib_board_t *board, uint8_t *result)
+static int usb_gpib_parallel_poll(struct gpib_board *board, uint8_t *result)
{
/* request parallel poll asserting ATN | EOI;
* we suppose ATN already asserted
@@ -773,27 +716,23 @@ static int usb_gpib_parallel_poll(gpib_board_t *board, uint8_t *result)
DIA_LOG(1, "enter %p\n", board);
retval = set_control_line(board, IB_BUS_EOI, 1);
- if (retval != ACK) {
- dev_alert(board->gpib_dev, "%s:%s - assert EOI failed\n", NAME, __func__);
+ if (retval != ACK)
return -EIO;
- }
*result = send_command(board, USB_GPIB_READ_DATA, 0);
DIA_LOG(1, "done with %x\n", *result);
retval = set_control_line(board, IB_BUS_EOI, 0);
- if (retval != 0x06) {
- dev_alert(board->gpib_dev, "%s:%s - unassert EOI failed\n", NAME, __func__);
+ if (retval != 0x06)
return -EIO;
- }
return 0;
}
/* read */
-static int usb_gpib_read(gpib_board_t *board,
+static int usb_gpib_read(struct gpib_board *board,
u8 *buffer,
size_t length,
int *end,
@@ -866,8 +805,7 @@ static int usb_gpib_read(gpib_board_t *board,
goto read_return;
if (one_char(board, &b) != DLE || one_char(board, &b) != STX) {
- dev_alert(board->gpib_dev, "%s:%s - wrong <DLE><STX> sequence\n",
- NAME, __func__);
+ dev_err(board->gpib_dev, "wrong <DLE><STX> sequence\n");
retval = -EIO;
goto read_return;
}
@@ -907,15 +845,12 @@ static int usb_gpib_read(gpib_board_t *board,
retval = 0;
goto read_return;
} else {
- dev_alert(board->gpib_dev, "%s:%s - %s %x\n",
- NAME, __func__,
- "Wrong end of message", c);
+ dev_err(board->gpib_dev, "wrong end of message %x", c);
retval = -ETIME;
goto read_return;
}
} else {
- dev_alert(board->gpib_dev, "%s:%s - %s\n", NAME, __func__,
- "lone <DLE> in stream");
+ dev_err(board->gpib_dev, "lone <DLE> in stream");
retval = -EIO;
goto read_return;
}
@@ -934,8 +869,7 @@ static int usb_gpib_read(gpib_board_t *board,
c = one_char(board, &b);
if (c == ACK) {
if (MAX_READ_EXCESS - read_count > 1)
- dev_alert(board->gpib_dev, "%s:%s - %s\n", NAME, __func__,
- "small buffer - maybe some data lost");
+ dev_dbg(board->gpib_dev, "small buffer - maybe some data lost");
retval = 0;
goto read_return;
}
@@ -943,15 +877,13 @@ static int usb_gpib_read(gpib_board_t *board,
}
}
- dev_alert(board->gpib_dev, "%s:%s - no input end - GPIB board in odd state\n",
- NAME, __func__);
+ dev_err(board->gpib_dev, "no input end - board in odd state\n");
retval = -EIO;
read_return:
kfree(b.inbuf);
- DIA_LOG(1, "done with byte/status: %d %x %d\n",
- (int)*bytes_read, retval, *end);
+ DIA_LOG(1, "done with byte/status: %d %x %d\n", (int)*bytes_read, retval, *end);
if (retval == 0 || retval == -ETIME) {
if (send_command(board, USB_GPIB_UNTALK, sizeof(USB_GPIB_UNTALK)) == 0x06)
@@ -964,21 +896,20 @@ read_return:
/* remote_enable */
-static void usb_gpib_remote_enable(gpib_board_t *board, int enable)
+static void usb_gpib_remote_enable(struct gpib_board *board, int enable)
{
int retval;
retval = set_control_line(board, IB_BUS_REN, enable ? 1 : 0);
if (retval != ACK)
- dev_alert(board->gpib_dev, "%s:%s - could not set REN line: %x\n",
- NAME, __func__, retval);
+ dev_err(board->gpib_dev, "could not set REN line: %x\n", retval);
DIA_LOG(1, "done with %x\n", retval);
}
/* request_system_control */
-static void usb_gpib_request_system_control(gpib_board_t *board,
+static void usb_gpib_request_system_control(struct gpib_board *board,
int request_control)
{
if (request_control)
@@ -992,7 +923,7 @@ static void usb_gpib_request_system_control(gpib_board_t *board,
/* take_control */
/* beware: the sync flag is ignored; what is its real meaning? */
-static int usb_gpib_take_control(gpib_board_t *board, int sync)
+static int usb_gpib_take_control(struct gpib_board *board, int sync)
{
int retval;
@@ -1007,7 +938,7 @@ static int usb_gpib_take_control(gpib_board_t *board, int sync)
/* update_status */
-static unsigned int usb_gpib_update_status(gpib_board_t *board,
+static unsigned int usb_gpib_update_status(struct gpib_board *board,
unsigned int clear_mask)
{
/* There is nothing we can do here, I guess */
@@ -1022,7 +953,7 @@ static unsigned int usb_gpib_update_status(gpib_board_t *board,
/* write */
/* beware: DLE characters are not escaped - can only send ASCII data */
-static int usb_gpib_write(gpib_board_t *board,
+static int usb_gpib_write(struct gpib_board *board,
u8 *buffer,
size_t length,
int send_eoi,
@@ -1053,9 +984,8 @@ static int usb_gpib_write(gpib_board_t *board,
*bytes_written = length;
- if (send_command(board, USB_GPIB_UNLISTEN, sizeof(USB_GPIB_UNLISTEN))
- != 0x06)
- return -EPIPE;
+ if (send_command(board, USB_GPIB_UNLISTEN, sizeof(USB_GPIB_UNLISTEN)) != 0x06)
+ return -EPIPE;
return length;
}
@@ -1066,64 +996,56 @@ static int usb_gpib_write(gpib_board_t *board,
/* parallel_poll configure */
-static void usb_gpib_parallel_poll_configure(gpib_board_t *board,
+static void usb_gpib_parallel_poll_configure(struct gpib_board *board,
uint8_t configuration)
{
- dev_alert(board->gpib_dev, "%s:%s - currently a NOP\n", NAME, __func__);
}
/* parallel_poll_response */
-static void usb_gpib_parallel_poll_response(gpib_board_t *board, int ist)
+static void usb_gpib_parallel_poll_response(struct gpib_board *board, int ist)
{
- dev_alert(board->gpib_dev, "%s:%s - currently a NOP\n", NAME, __func__);
}
/* primary_address */
-static int usb_gpib_primary_address(gpib_board_t *board, unsigned int address)
+static int usb_gpib_primary_address(struct gpib_board *board, unsigned int address)
{
- dev_alert(board->gpib_dev, "%s:%s - currently a NOP\n", NAME, __func__);
return 0;
}
/* return_to_local */
-static void usb_gpib_return_to_local(gpib_board_t *board)
+static void usb_gpib_return_to_local(struct gpib_board *board)
{
- dev_alert(board->gpib_dev, "%s:%s - currently a NOP\n", NAME, __func__);
}
/* secondary_address */
-static int usb_gpib_secondary_address(gpib_board_t *board,
+static int usb_gpib_secondary_address(struct gpib_board *board,
unsigned int address,
int enable)
{
- dev_alert(board->gpib_dev, "%s:%s - currently a NOP\n", NAME, __func__);
return 0;
}
/* serial_poll_response */
-static void usb_gpib_serial_poll_response(gpib_board_t *board, uint8_t status)
+static void usb_gpib_serial_poll_response(struct gpib_board *board, uint8_t status)
{
- dev_alert(board->gpib_dev, "%s:%s - currently a NOP\n", NAME, __func__);
}
/* serial_poll_status */
-static uint8_t usb_gpib_serial_poll_status(gpib_board_t *board)
+static uint8_t usb_gpib_serial_poll_status(struct gpib_board *board)
{
- dev_alert(board->gpib_dev, "%s:%s - currently a NOP\n", NAME, __func__);
return 0;
}
/* t1_delay */
-static unsigned int usb_gpib_t1_delay(gpib_board_t *board, unsigned int nano_sec)
+static int usb_gpib_t1_delay(struct gpib_board *board, unsigned int nano_sec)
{
- dev_alert(board->gpib_dev, "%s:%s - currently a NOP\n", NAME, __func__);
return 0;
}
@@ -1181,7 +1103,7 @@ static int usb_gpib_init_module(struct usb_interface *interface)
if (!assigned_usb_minors) {
rv = gpib_register_driver(&usb_gpib_interface, THIS_MODULE);
if (rv) {
- pr_err("lpvo_usb_gpib: gpib_register_driver failed: error = %d\n", rv);
+ pr_err("gpib_register_driver failed: error = %d\n", rv);
goto exit;
}
} else {
@@ -1191,8 +1113,8 @@ static int usb_gpib_init_module(struct usb_interface *interface)
for (j = 0 ; j < MAX_DEV ; j++) {
if (usb_minors[j] == interface->minor && assigned_usb_minors & 1 << j) {
- pr_alert("%s:%s - CODE BUG: USB minor %d registered at %d.\n",
- NAME, __func__, interface->minor, j);
+ pr_err("CODE BUG: USB minor %d registered at %d.\n",
+ interface->minor, j);
rv = -1;
goto exit;
}
@@ -1207,13 +1129,11 @@ static int usb_gpib_init_module(struct usb_interface *interface)
usb_minors[j] = interface->minor;
lpvo_usb_interfaces[j] = interface;
assigned_usb_minors |= mask;
- DIA_LOG(0, "usb minor %d registered at %d\n", interface->minor, j);
rv = 0;
goto exit;
}
}
- pr_alert("%s:%s - No slot available for interface %p minor %d\n",
- NAME, __func__, interface, interface->minor);
+ pr_err("No slot available for interface %p minor %d\n", interface, interface->minor);
rv = -1;
exit:
@@ -1235,7 +1155,7 @@ static void usb_gpib_exit_module(int minor)
goto exit;
}
}
- pr_alert("%s:%s - CODE BUG: USB minor %d not found.\n", NAME, __func__, minor);
+ pr_err("CODE BUG: USB minor %d not found.\n", minor);
exit:
mutex_unlock(&minors_lock);
@@ -1267,7 +1187,7 @@ static int write_latency_timer(struct usb_device *udev)
LATENCY_TIMER, LATENCY_CHANNEL,
NULL, 0, WDR_TIMEOUT);
if (rv < 0)
- pr_alert("Unable to write latency timer: %i\n", rv);
+ dev_err(&udev->dev, "Unable to write latency timer: %i\n", rv);
return rv;
}
@@ -1363,18 +1283,15 @@ static void skel_delete(struct kref *kref)
* skel_do_open() - to be called by usb_gpib_attach
*/
-static int skel_do_open(gpib_board_t *board, int subminor)
+static int skel_do_open(struct gpib_board *board, int subminor)
{
struct usb_skel *dev;
struct usb_interface *interface;
int retval = 0;
- DIA_LOG(0, "Required minor: %d\n", subminor);
-
interface = usb_find_interface(&skel_driver, subminor);
if (!interface) {
- pr_err("%s - error, can't find device for minor %d\n",
- __func__, subminor);
+ dev_err(board->gpib_dev, "can't find device for minor %d\n", subminor);
retval = -ENODEV;
goto exit;
}
@@ -1403,7 +1320,7 @@ exit:
* skel_do_release() - to be called by usb_gpib_detach
*/
-static int skel_do_release(gpib_board_t *board)
+static int skel_do_release(struct gpib_board *board)
{
struct usb_skel *dev;
@@ -1439,9 +1356,8 @@ static void skel_read_bulk_callback(struct urb *urb)
if (!(urb->status == -ENOENT ||
urb->status == -ECONNRESET ||
urb->status == -ESHUTDOWN))
- dev_err(&dev->interface->dev,
- "%s - nonzero read bulk status received: %d\n",
- __func__, urb->status);
+ dev_err(&dev->interface->dev, "nonzero read bulk status received: %d\n",
+ urb->status);
dev->errors = urb->status;
} else {
@@ -1478,9 +1394,7 @@ static int skel_do_read_io(struct usb_skel *dev, size_t count)
/* do it */
rv = usb_submit_urb(dev->bulk_in_urb, GFP_KERNEL);
if (rv < 0) {
- dev_err(&dev->interface->dev,
- "%s - failed submitting read urb, error %d\n",
- __func__, rv);
+ dev_err(&dev->interface->dev, "failed submitting read urb, error %d\n", rv);
rv = (rv == -ENOMEM) ? rv : -EIO;
spin_lock_irq(&dev->err_lock);
dev->ongoing_read = 0;
@@ -1504,14 +1418,10 @@ static ssize_t skel_do_read(struct usb_skel *dev, char *buffer, size_t count)
if (!dev->bulk_in_urb || !count)
return 0;
- DIA_LOG(1, "enter for %zu.\n", count);
-
restart: /* added to comply with ftdi timeout technique */
/* no concurrent readers */
- DIA_LOG(2, "restart with %zd %zd.\n", dev->bulk_in_filled, dev->bulk_in_copied);
-
rv = mutex_lock_interruptible(&dev->io_mutex);
if (rv < 0)
return rv;
@@ -1527,8 +1437,6 @@ retry:
ongoing_io = dev->ongoing_read;
spin_unlock_irq(&dev->err_lock);
- DIA_LOG(2, "retry with %d.\n", ongoing_io);
-
if (ongoing_io) {
// /* nonblocking IO shall not wait */
// /* no file, no O_NONBLOCK; maybe provide when from user space */
@@ -1569,8 +1477,6 @@ retry:
// size_t chunk = min(available, count); /* compute chunk later */
size_t chunk;
- DIA_LOG(2, "we have data: %zu %zu.\n", dev->bulk_in_filled, dev->bulk_in_copied);
-
if (!available) {
/*
* all data has been used
@@ -1596,12 +1502,6 @@ retry:
*/
if (dev->bulk_in_copied) {
- int j;
-
- for (j = 0 ; j < dev->bulk_in_filled ; j++) {
- pr_alert("copy -> %x %zu %x\n",
- j, dev->bulk_in_copied, dev->bulk_in_buffer[j]);
- }
chunk = min(available, count);
memcpy(buffer, dev->bulk_in_buffer + dev->bulk_in_copied, chunk);
rv = chunk;
@@ -1613,7 +1513,7 @@ retry:
/* account for two bytes to be discarded */
chunk = min(available, count + 2);
if (chunk < 2) {
- pr_alert("BAD READ - chunk: %zu\n", chunk);
+ dev_err(&dev->udev->dev, "BAD READ - chunk: %zu\n", chunk);
rv = -EIO;
goto exit;
}
@@ -1633,8 +1533,6 @@ retry:
// if (available < count)
// skel_do_read_io(dev, dev->bulk_in_size);
} else {
- DIA_LOG(1, "no data - start read - copied: %zd.\n", dev->bulk_in_copied);
-
/* no data in the buffer */
rv = skel_do_read_io(dev, dev->bulk_in_size);
if (rv < 0)
@@ -1645,10 +1543,10 @@ retry:
exit:
mutex_unlock(&dev->io_mutex);
if (rv == 2)
- goto restart; /* ftdi chip returns two status bytes after a latency anyhow */
- DIA_LOG(1, "exit with %d.\n", rv);
+ goto restart; /* ftdi chip returns two status bytes after a latency anyhow */
+
if (rv > 0)
- return rv - 2; /* account for 2 discarded bytes in a valid buffer */
+ return rv - 2; /* account for 2 discarded bytes in a valid buffer */
return rv;
}
@@ -1669,8 +1567,7 @@ static void skel_write_bulk_callback(struct urb *urb)
urb->status == -ECONNRESET ||
urb->status == -ESHUTDOWN))
dev_err(&dev->interface->dev,
- "%s - nonzero write bulk status received: %d\n",
- __func__, urb->status);
+ "nonzero write bulk status received: %d\n", urb->status);
spin_lock_irqsave(&dev->err_lock, flags);
dev->errors = urb->status;
@@ -1763,9 +1660,7 @@ static ssize_t skel_do_write(struct usb_skel *dev, const char *buffer, size_t co
retval = usb_submit_urb(urb, GFP_KERNEL);
mutex_unlock(&dev->io_mutex);
if (retval) {
- dev_err(&dev->interface->dev,
- "%s - failed submitting write urb, error %d\n",
- __func__, retval);
+ dev_err(&dev->interface->dev, "failed submitting write urb, error %d\n", retval);
goto error_unanchor;
}
@@ -1831,8 +1726,7 @@ static int skel_open(struct inode *inode, struct file *file)
interface = usb_find_interface(&skel_driver, subminor);
if (!interface) {
- pr_err("%s - error, can't find device for minor %d\n",
- __func__, subminor);
+ pr_err("can't find device for minor %d\n", subminor);
retval = -ENODEV;
goto exit;
}
@@ -1895,8 +1789,6 @@ static ssize_t skel_read(struct file *file, char __user *buffer, size_t count,
rv = skel_do_read(dev, buf, count);
- pr_alert("%s - return with %zu\n", __func__, rv);
-
if (rv > 0) {
if (copy_to_user(buffer, buf, rv)) {
kfree(buf);
@@ -2015,8 +1907,8 @@ static int skel_probe(struct usb_interface *interface,
/* let the world know */
device_path = kobject_get_path(&dev->udev->dev.kobj, GFP_KERNEL);
- pr_alert("%s:%s - New lpvo_usb_device -> bus: %d dev: %d path: %s\n", NAME, __func__,
- dev->udev->bus->busnum, dev->udev->devnum, device_path);
+ dev_dbg(&interface->dev, "New lpvo_usb_device -> bus: %d dev: %d path: %s\n",
+ dev->udev->bus->busnum, dev->udev->devnum, device_path);
kfree(device_path);
#if USER_DEVICE
@@ -2029,14 +1921,9 @@ static int skel_probe(struct usb_interface *interface,
usb_set_intfdata(interface, NULL);
goto error;
}
-
- /* let the user know what node this device is now attached to */
- dev_info(&interface->dev,
- "lpvo_usb_gpib device now attached to lpvo_raw%d",
- interface->minor);
#endif
- write_latency_timer(dev->udev); /* adjust the latency timer */
+ write_latency_timer(dev->udev); /* adjust the latency timer */
usb_gpib_init_module(interface); /* last, init the lpvo for this minor */
@@ -2073,8 +1960,6 @@ static void skel_disconnect(struct usb_interface *interface)
/* decrement our usage count */
kref_put(&dev->kref, skel_delete);
-
- dev_info(&interface->dev, "USB lpvo_raw #%d now disconnected", minor);
}
static void skel_draw_down(struct usb_skel *dev)
diff --git a/drivers/staging/gpib/nec7210/nec7210.c b/drivers/staging/gpib/nec7210/nec7210.c
index c9a837fad96e..846c0a3fa1dc 100644
--- a/drivers/staging/gpib/nec7210/nec7210.c
+++ b/drivers/staging/gpib/nec7210/nec7210.c
@@ -4,6 +4,8 @@
* copyright : (C) 2001, 2002 by Frank Mori Hess
***************************************************************************/
+#define dev_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include "board.h"
#include <linux/ioport.h>
#include <linux/sched.h>
@@ -21,7 +23,7 @@
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("GPIB library code for NEC uPD7210");
-int nec7210_enable_eos(gpib_board_t *board, struct nec7210_priv *priv, uint8_t eos_byte,
+int nec7210_enable_eos(struct gpib_board *board, struct nec7210_priv *priv, uint8_t eos_byte,
int compare_8_bits)
{
write_byte(priv, eos_byte, EOSR);
@@ -35,14 +37,14 @@ int nec7210_enable_eos(gpib_board_t *board, struct nec7210_priv *priv, uint8_t e
}
EXPORT_SYMBOL(nec7210_enable_eos);
-void nec7210_disable_eos(gpib_board_t *board, struct nec7210_priv *priv)
+void nec7210_disable_eos(struct gpib_board *board, struct nec7210_priv *priv)
{
priv->auxa_bits &= ~HR_REOS;
write_byte(priv, priv->auxa_bits, AUXMR);
}
EXPORT_SYMBOL(nec7210_disable_eos);
-int nec7210_parallel_poll(gpib_board_t *board, struct nec7210_priv *priv, uint8_t *result)
+int nec7210_parallel_poll(struct gpib_board *board, struct nec7210_priv *priv, uint8_t *result)
{
int ret;
@@ -62,14 +64,14 @@ int nec7210_parallel_poll(gpib_board_t *board, struct nec7210_priv *priv, uint8_
}
EXPORT_SYMBOL(nec7210_parallel_poll);
-void nec7210_parallel_poll_configure(gpib_board_t *board,
+void nec7210_parallel_poll_configure(struct gpib_board *board,
struct nec7210_priv *priv, unsigned int configuration)
{
write_byte(priv, PPR | configuration, AUXMR);
}
EXPORT_SYMBOL(nec7210_parallel_poll_configure);
-void nec7210_parallel_poll_response(gpib_board_t *board, struct nec7210_priv *priv, int ist)
+void nec7210_parallel_poll_response(struct gpib_board *board, struct nec7210_priv *priv, int ist)
{
if (ist)
write_byte(priv, AUX_SPPF, AUXMR);
@@ -83,7 +85,8 @@ EXPORT_SYMBOL(nec7210_parallel_poll_response);
* the 488.2 capability (for example with NI chips), or we need to implement the
* 488.2 set srv state machine in the driver (if that is even viable).
*/
-void nec7210_serial_poll_response(gpib_board_t *board, struct nec7210_priv *priv, uint8_t status)
+void nec7210_serial_poll_response(struct gpib_board *board,
+ struct nec7210_priv *priv, uint8_t status)
{
unsigned long flags;
@@ -100,13 +103,13 @@ void nec7210_serial_poll_response(gpib_board_t *board, struct nec7210_priv *priv
}
EXPORT_SYMBOL(nec7210_serial_poll_response);
-uint8_t nec7210_serial_poll_status(gpib_board_t *board, struct nec7210_priv *priv)
+uint8_t nec7210_serial_poll_status(struct gpib_board *board, struct nec7210_priv *priv)
{
return read_byte(priv, SPSR);
}
EXPORT_SYMBOL(nec7210_serial_poll_status);
-int nec7210_primary_address(const gpib_board_t *board, struct nec7210_priv *priv,
+int nec7210_primary_address(const struct gpib_board *board, struct nec7210_priv *priv,
unsigned int address)
{
// put primary address in address0
@@ -115,7 +118,7 @@ int nec7210_primary_address(const gpib_board_t *board, struct nec7210_priv *priv
}
EXPORT_SYMBOL(nec7210_primary_address);
-int nec7210_secondary_address(const gpib_board_t *board, struct nec7210_priv *priv,
+int nec7210_secondary_address(const struct gpib_board *board, struct nec7210_priv *priv,
unsigned int address, int enable)
{
if (enable) {
@@ -164,7 +167,7 @@ static void update_listener_state(struct nec7210_priv *priv, unsigned int addres
}
}
-unsigned int nec7210_update_status_nolock(gpib_board_t *board, struct nec7210_priv *priv)
+unsigned int nec7210_update_status_nolock(struct gpib_board *board, struct nec7210_priv *priv)
{
int address_status_bits;
u8 spoll_status;
@@ -198,7 +201,6 @@ unsigned int nec7210_update_status_nolock(gpib_board_t *board, struct nec7210_pr
priv->srq_pending = 0;
set_bit(SPOLL_NUM, &board->status);
}
-// dev_dbg(board->gpib_dev, "status 0x%x, state 0x%x\n", board->status, priv->state);
/* we rely on the interrupt handler to set the
* rest of the status bits
@@ -208,7 +210,7 @@ unsigned int nec7210_update_status_nolock(gpib_board_t *board, struct nec7210_pr
}
EXPORT_SYMBOL(nec7210_update_status_nolock);
-unsigned int nec7210_update_status(gpib_board_t *board, struct nec7210_priv *priv,
+unsigned int nec7210_update_status(struct gpib_board *board, struct nec7210_priv *priv,
unsigned int clear_mask)
{
unsigned long flags;
@@ -233,7 +235,7 @@ unsigned int nec7210_set_reg_bits(struct nec7210_priv *priv, unsigned int reg,
}
EXPORT_SYMBOL(nec7210_set_reg_bits);
-void nec7210_set_handshake_mode(gpib_board_t *board, struct nec7210_priv *priv, int mode)
+void nec7210_set_handshake_mode(struct gpib_board *board, struct nec7210_priv *priv, int mode)
{
unsigned long flags;
@@ -249,7 +251,7 @@ void nec7210_set_handshake_mode(gpib_board_t *board, struct nec7210_priv *priv,
}
EXPORT_SYMBOL(nec7210_set_handshake_mode);
-uint8_t nec7210_read_data_in(gpib_board_t *board, struct nec7210_priv *priv, int *end)
+uint8_t nec7210_read_data_in(struct gpib_board *board, struct nec7210_priv *priv, int *end)
{
unsigned long flags;
u8 data;
@@ -267,7 +269,7 @@ uint8_t nec7210_read_data_in(gpib_board_t *board, struct nec7210_priv *priv, int
}
EXPORT_SYMBOL(nec7210_read_data_in);
-int nec7210_take_control(gpib_board_t *board, struct nec7210_priv *priv, int syncronous)
+int nec7210_take_control(struct gpib_board *board, struct nec7210_priv *priv, int syncronous)
{
int i;
const int timeout = 100;
@@ -294,7 +296,7 @@ int nec7210_take_control(gpib_board_t *board, struct nec7210_priv *priv, int syn
}
EXPORT_SYMBOL(nec7210_take_control);
-int nec7210_go_to_standby(gpib_board_t *board, struct nec7210_priv *priv)
+int nec7210_go_to_standby(struct gpib_board *board, struct nec7210_priv *priv)
{
int i;
const int timeout = 1000;
@@ -319,10 +321,8 @@ int nec7210_go_to_standby(gpib_board_t *board, struct nec7210_priv *priv)
if (adsr_bits & HR_NATN)
break;
}
- if (i == HZ) {
- pr_err("nec7210: error waiting for NATN\n");
+ if (i == HZ)
return -ETIMEDOUT;
- }
}
clear_bit(COMMAND_READY_BN, &priv->state);
@@ -330,7 +330,7 @@ int nec7210_go_to_standby(gpib_board_t *board, struct nec7210_priv *priv)
}
EXPORT_SYMBOL(nec7210_go_to_standby);
-void nec7210_request_system_control(gpib_board_t *board, struct nec7210_priv *priv,
+void nec7210_request_system_control(struct gpib_board *board, struct nec7210_priv *priv,
int request_control)
{
if (request_control == 0) {
@@ -341,7 +341,7 @@ void nec7210_request_system_control(gpib_board_t *board, struct nec7210_priv *pr
}
EXPORT_SYMBOL(nec7210_request_system_control);
-void nec7210_interface_clear(gpib_board_t *board, struct nec7210_priv *priv, int assert)
+void nec7210_interface_clear(struct gpib_board *board, struct nec7210_priv *priv, int assert)
{
if (assert)
write_byte(priv, AUX_SIFC, AUXMR);
@@ -350,7 +350,7 @@ void nec7210_interface_clear(gpib_board_t *board, struct nec7210_priv *priv, int
}
EXPORT_SYMBOL(nec7210_interface_clear);
-void nec7210_remote_enable(gpib_board_t *board, struct nec7210_priv *priv, int enable)
+void nec7210_remote_enable(struct gpib_board *board, struct nec7210_priv *priv, int enable)
{
if (enable)
write_byte(priv, AUX_SREN, AUXMR);
@@ -359,7 +359,7 @@ void nec7210_remote_enable(gpib_board_t *board, struct nec7210_priv *priv, int e
}
EXPORT_SYMBOL(nec7210_remote_enable);
-void nec7210_release_rfd_holdoff(gpib_board_t *board, struct nec7210_priv *priv)
+void nec7210_release_rfd_holdoff(struct gpib_board *board, struct nec7210_priv *priv)
{
unsigned long flags;
@@ -373,8 +373,8 @@ void nec7210_release_rfd_holdoff(gpib_board_t *board, struct nec7210_priv *priv)
}
EXPORT_SYMBOL(nec7210_release_rfd_holdoff);
-unsigned int nec7210_t1_delay(gpib_board_t *board, struct nec7210_priv *priv,
- unsigned int nano_sec)
+int nec7210_t1_delay(struct gpib_board *board, struct nec7210_priv *priv,
+ unsigned int nano_sec)
{
unsigned int retval;
@@ -391,13 +391,13 @@ unsigned int nec7210_t1_delay(gpib_board_t *board, struct nec7210_priv *priv,
}
EXPORT_SYMBOL(nec7210_t1_delay);
-void nec7210_return_to_local(const gpib_board_t *board, struct nec7210_priv *priv)
+void nec7210_return_to_local(const struct gpib_board *board, struct nec7210_priv *priv)
{
write_byte(priv, AUX_RTL, AUXMR);
}
EXPORT_SYMBOL(nec7210_return_to_local);
-static inline short nec7210_atn_has_changed(gpib_board_t *board, struct nec7210_priv *priv)
+static inline short nec7210_atn_has_changed(struct gpib_board *board, struct nec7210_priv *priv)
{
short address_status_bits = read_byte(priv, ADSR);
@@ -415,7 +415,7 @@ static inline short nec7210_atn_has_changed(gpib_board_t *board, struct nec7210_
return -1;
}
-int nec7210_command(gpib_board_t *board, struct nec7210_priv *priv, uint8_t
+int nec7210_command(struct gpib_board *board, struct nec7210_priv *priv, uint8_t
*buffer, size_t length, size_t *bytes_written)
{
int retval = 0;
@@ -430,17 +430,14 @@ int nec7210_command(gpib_board_t *board, struct nec7210_priv *priv, uint8_t
test_bit(COMMAND_READY_BN, &priv->state) ||
test_bit(BUS_ERROR_BN, &priv->state) ||
test_bit(TIMO_NUM, &board->status))) {
- dev_dbg(board->gpib_dev, "gpib command wait interrupted\n");
+ dev_dbg(board->gpib_dev, "command wait interrupted\n");
retval = -ERESTARTSYS;
break;
}
if (test_bit(TIMO_NUM, &board->status))
break;
- if (test_and_clear_bit(BUS_ERROR_BN, &priv->state)) {
- pr_err("nec7210: bus error on command byte\n");
+ if (test_and_clear_bit(BUS_ERROR_BN, &priv->state))
break;
- }
-
spin_lock_irqsave(&board->spinlock, flags);
clear_bit(COMMAND_READY_BN, &priv->state);
write_byte(priv, buffer[*bytes_written], CDOR);
@@ -454,24 +451,20 @@ int nec7210_command(gpib_board_t *board, struct nec7210_priv *priv, uint8_t
// wait for last byte to get sent
if (wait_event_interruptible(board->wait, test_bit(COMMAND_READY_BN, &priv->state) ||
test_bit(BUS_ERROR_BN, &priv->state) ||
- test_bit(TIMO_NUM, &board->status))) {
- dev_dbg(board->gpib_dev, "gpib command wait interrupted\n");
+ test_bit(TIMO_NUM, &board->status)))
retval = -ERESTARTSYS;
- }
- if (test_bit(TIMO_NUM, &board->status)) {
- dev_dbg(board->gpib_dev, "gpib command timed out\n");
+
+ if (test_bit(TIMO_NUM, &board->status))
retval = -ETIMEDOUT;
- }
- if (test_and_clear_bit(BUS_ERROR_BN, &priv->state)) {
- pr_err("nec7210: bus error on command byte\n");
+
+ if (test_and_clear_bit(BUS_ERROR_BN, &priv->state))
retval = -EIO;
- }
return retval;
}
EXPORT_SYMBOL(nec7210_command);
-static int pio_read(gpib_board_t *board, struct nec7210_priv *priv, uint8_t *buffer,
+static int pio_read(struct gpib_board *board, struct nec7210_priv *priv, uint8_t *buffer,
size_t length, int *end, size_t *bytes_read)
{
ssize_t retval = 0;
@@ -484,7 +477,6 @@ static int pio_read(gpib_board_t *board, struct nec7210_priv *priv, uint8_t *buf
test_bit(READ_READY_BN, &priv->state) ||
test_bit(DEV_CLEAR_BN, &priv->state) ||
test_bit(TIMO_NUM, &board->status))) {
- dev_dbg(board->gpib_dev, "nec7210: pio read wait interrupted\n");
retval = -ERESTARTSYS;
break;
}
@@ -503,12 +495,10 @@ static int pio_read(gpib_board_t *board, struct nec7210_priv *priv, uint8_t *buf
break;
}
if (test_bit(TIMO_NUM, &board->status)) {
- dev_dbg(board->gpib_dev, "interrupted by timeout\n");
retval = -ETIMEDOUT;
break;
}
if (test_bit(DEV_CLEAR_BN, &priv->state)) {
- dev_dbg(board->gpib_dev, "interrupted by device clear\n");
retval = -EINTR;
break;
}
@@ -523,7 +513,7 @@ static int pio_read(gpib_board_t *board, struct nec7210_priv *priv, uint8_t *buf
}
#ifdef NEC_DMA
-static ssize_t __dma_read(gpib_board_t *board, struct nec7210_priv *priv, size_t length)
+static ssize_t __dma_read(struct gpib_board *board, struct nec7210_priv *priv, size_t length)
{
ssize_t retval = 0;
size_t count = 0;
@@ -557,10 +547,9 @@ static ssize_t __dma_read(gpib_board_t *board, struct nec7210_priv *priv, size_t
if (wait_event_interruptible(board->wait,
test_bit(DMA_READ_IN_PROGRESS_BN, &priv->state) == 0 ||
test_bit(DEV_CLEAR_BN, &priv->state) ||
- test_bit(TIMO_NUM, &board->status))) {
- dev_dbg(board->gpib_dev, "nec7210: dma read wait interrupted\n");
+ test_bit(TIMO_NUM, &board->status)))
retval = -ERESTARTSYS;
- }
+
if (test_bit(TIMO_NUM, &board->status))
retval = -ETIMEDOUT;
if (test_bit(DEV_CLEAR_BN, &priv->state))
@@ -579,7 +568,7 @@ static ssize_t __dma_read(gpib_board_t *board, struct nec7210_priv *priv, size_t
return retval ? retval : count;
}
-static ssize_t dma_read(gpib_board_t *board, struct nec7210_priv *priv, uint8_t *buffer,
+static ssize_t dma_read(struct gpib_board *board, struct nec7210_priv *priv, uint8_t *buffer,
size_t length)
{
size_t remain = length;
@@ -606,7 +595,7 @@ static ssize_t dma_read(gpib_board_t *board, struct nec7210_priv *priv, uint8_t
}
#endif
-int nec7210_read(gpib_board_t *board, struct nec7210_priv *priv, uint8_t *buffer,
+int nec7210_read(struct gpib_board *board, struct nec7210_priv *priv, uint8_t *buffer,
size_t length, int *end, size_t *bytes_read)
{
ssize_t retval = 0;
@@ -627,7 +616,7 @@ int nec7210_read(gpib_board_t *board, struct nec7210_priv *priv, uint8_t *buffer
}
EXPORT_SYMBOL(nec7210_read);
-static int pio_write_wait(gpib_board_t *board, struct nec7210_priv *priv,
+static int pio_write_wait(struct gpib_board *board, struct nec7210_priv *priv,
short wake_on_lacs, short wake_on_atn, short wake_on_bus_error)
{
// wait until byte is ready to be sent
@@ -638,26 +627,22 @@ static int pio_write_wait(gpib_board_t *board, struct nec7210_priv *priv,
(wake_on_bus_error && test_bit(BUS_ERROR_BN, &priv->state)) ||
(wake_on_lacs && test_bit(LACS_NUM, &board->status)) ||
(wake_on_atn && test_bit(ATN_NUM, &board->status)) ||
- test_bit(TIMO_NUM, &board->status))) {
- dev_dbg(board->gpib_dev, "gpib write interrupted\n");
+ test_bit(TIMO_NUM, &board->status)))
return -ERESTARTSYS;
- }
- if (test_bit(TIMO_NUM, &board->status)) {
- dev_dbg(board->gpib_dev, "nec7210: write timed out\n");
+
+ if (test_bit(TIMO_NUM, &board->status))
return -ETIMEDOUT;
- }
- if (test_bit(DEV_CLEAR_BN, &priv->state)) {
- dev_dbg(board->gpib_dev, "nec7210: write interrupted by clear\n");
+
+ if (test_bit(DEV_CLEAR_BN, &priv->state))
return -EINTR;
- }
- if (wake_on_bus_error && test_and_clear_bit(BUS_ERROR_BN, &priv->state)) {
- dev_dbg(board->gpib_dev, "nec7210: bus error on write\n");
+
+ if (wake_on_bus_error && test_and_clear_bit(BUS_ERROR_BN, &priv->state))
return -EIO;
- }
+
return 0;
}
-static int pio_write(gpib_board_t *board, struct nec7210_priv *priv, uint8_t *buffer,
+static int pio_write(struct gpib_board *board, struct nec7210_priv *priv, uint8_t *buffer,
size_t length, size_t *bytes_written)
{
size_t last_count = 0;
@@ -677,7 +662,6 @@ static int pio_write(gpib_board_t *board, struct nec7210_priv *priv, uint8_t *bu
if (retval == -EIO) {
/* resend last byte on bus error */
*bytes_written = last_count;
- dev_dbg(board->gpib_dev, "resending %c\n", buffer[*bytes_written]);
/* we can get unrecoverable bus errors,
* so give up after a while
*/
@@ -701,7 +685,7 @@ static int pio_write(gpib_board_t *board, struct nec7210_priv *priv, uint8_t *bu
}
#ifdef NEC_DMA
-static ssize_t __dma_write(gpib_board_t *board, struct nec7210_priv *priv, dma_addr_t address,
+static ssize_t __dma_write(struct gpib_board *board, struct nec7210_priv *priv, dma_addr_t address,
size_t length)
{
unsigned long flags, dma_irq_flags;
@@ -733,10 +717,9 @@ static ssize_t __dma_write(gpib_board_t *board, struct nec7210_priv *priv, dma_a
test_bit(DMA_WRITE_IN_PROGRESS_BN, &priv->state) == 0 ||
test_bit(BUS_ERROR_BN, &priv->state) ||
test_bit(DEV_CLEAR_BN, &priv->state) ||
- test_bit(TIMO_NUM, &board->status))) {
- dev_dbg(board->gpib_dev, "gpib write interrupted!\n");
+ test_bit(TIMO_NUM, &board->status)))
retval = -ERESTARTSYS;
- }
+
if (test_bit(TIMO_NUM, &board->status))
retval = -ETIMEDOUT;
if (test_and_clear_bit(DEV_CLEAR_BN, &priv->state))
@@ -759,7 +742,7 @@ static ssize_t __dma_write(gpib_board_t *board, struct nec7210_priv *priv, dma_a
return retval ? retval : length;
}
-static ssize_t dma_write(gpib_board_t *board, struct nec7210_priv *priv, uint8_t *buffer,
+static ssize_t dma_write(struct gpib_board *board, struct nec7210_priv *priv, uint8_t *buffer,
size_t length)
{
size_t remain = length;
@@ -783,8 +766,9 @@ static ssize_t dma_write(gpib_board_t *board, struct nec7210_priv *priv, uint8_t
return length - remain;
}
#endif
-int nec7210_write(gpib_board_t *board, struct nec7210_priv *priv, uint8_t *buffer, size_t length,
- int send_eoi, size_t *bytes_written)
+int nec7210_write(struct gpib_board *board, struct nec7210_priv *priv,
+ uint8_t *buffer, size_t length, int send_eoi,
+ size_t *bytes_written)
{
int retval = 0;
@@ -845,7 +829,7 @@ EXPORT_SYMBOL(nec7210_write);
/*
* interrupt service routine
*/
-irqreturn_t nec7210_interrupt(gpib_board_t *board, struct nec7210_priv *priv)
+irqreturn_t nec7210_interrupt(struct gpib_board *board, struct nec7210_priv *priv)
{
int status1, status2;
@@ -857,7 +841,7 @@ irqreturn_t nec7210_interrupt(gpib_board_t *board, struct nec7210_priv *priv)
}
EXPORT_SYMBOL(nec7210_interrupt);
-irqreturn_t nec7210_interrupt_have_status(gpib_board_t *board,
+irqreturn_t nec7210_interrupt_have_status(struct gpib_board *board,
struct nec7210_priv *priv, int status1, int status2)
{
#ifdef NEC_DMA
@@ -937,13 +921,8 @@ irqreturn_t nec7210_interrupt_have_status(gpib_board_t *board,
set_bit(COMMAND_READY_BN, &priv->state);
// command pass through received
- if (status1 & HR_CPT) {
- unsigned int command;
-
- command = read_byte(priv, CPTR) & gpib_command_mask;
+ if (status1 & HR_CPT)
write_byte(priv, AUX_NVAL, AUXMR);
-// printk("gpib: command pass through 0x%x\n", command);
- }
if (status1 & HR_ERR)
set_bit(BUS_ERROR_BN, &priv->state);
@@ -980,7 +959,7 @@ irqreturn_t nec7210_interrupt_have_status(gpib_board_t *board,
}
EXPORT_SYMBOL(nec7210_interrupt_have_status);
-void nec7210_board_reset(struct nec7210_priv *priv, const gpib_board_t *board)
+void nec7210_board_reset(struct nec7210_priv *priv, const struct gpib_board *board)
{
/* 7210 chip reset */
write_byte(priv, AUX_CR, AUXMR);
@@ -1014,7 +993,7 @@ void nec7210_board_reset(struct nec7210_priv *priv, const gpib_board_t *board)
}
EXPORT_SYMBOL(nec7210_board_reset);
-void nec7210_board_online(struct nec7210_priv *priv, const gpib_board_t *board)
+void nec7210_board_online(struct nec7210_priv *priv, const struct gpib_board *board)
{
/* set GPIB address */
nec7210_primary_address(board, priv, board->pad);
diff --git a/drivers/staging/gpib/ni_usb/ni_usb_gpib.c b/drivers/staging/gpib/ni_usb/ni_usb_gpib.c
index d0656dc520f5..14f7049a8e5e 100644
--- a/drivers/staging/gpib/ni_usb/ni_usb_gpib.c
+++ b/drivers/staging/gpib/ni_usb/ni_usb_gpib.c
@@ -5,6 +5,10 @@
* copyright : (C) 2004 by Frank Mori Hess
***************************************************************************/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#define dev_fmt pr_fmt
+#define DRV_NAME KBUILD_MODNAME
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
@@ -20,7 +24,7 @@ MODULE_DESCRIPTION("GPIB driver for National Instruments USB devices");
static struct usb_interface *ni_usb_driver_interfaces[MAX_NUM_NI_USB_INTERFACES];
static int ni_usb_parse_status_block(const u8 *buffer, struct ni_usb_status_block *status);
-static int ni_usb_set_interrupt_monitor(gpib_board_t *board, unsigned int monitored_bits);
+static int ni_usb_set_interrupt_monitor(struct gpib_board *board, unsigned int monitored_bits);
static void ni_usb_stop(struct ni_usb_priv *ni_priv);
static DEFINE_MUTEX(ni_usb_hotplug_lock);
@@ -75,7 +79,7 @@ static unsigned short ni_usb_timeout_code(unsigned int usec)
*/
else if (usec <= 1000000000)
return 0x02;
- pr_err("%s: bug? usec is greater than 1e9\n", __func__);
+ pr_err("bug? usec is greater than 1e9\n");
return 0xf0;
}
@@ -83,8 +87,6 @@ static void ni_usb_bulk_complete(struct urb *urb)
{
struct ni_usb_urb_ctx *context = urb->context;
-// printk("debug: %s: status=0x%x, error_count=%i, actual_length=%i\n", __func__,
-// urb->status, urb->error_count, urb->actual_length);
complete(&context->complete);
}
@@ -137,8 +139,8 @@ static int ni_usb_nonblocking_send_bulk_msg(struct ni_usb_priv *ni_priv, void *d
del_timer_sync(&ni_priv->bulk_timer);
usb_free_urb(ni_priv->bulk_urb);
ni_priv->bulk_urb = NULL;
- dev_err(&usb_dev->dev, "%s: failed to submit bulk out urb, retval=%i\n",
- __func__, retval);
+ dev_err(&usb_dev->dev, "failed to submit bulk out urb, retval=%i\n",
+ retval);
mutex_unlock(&ni_priv->bulk_transfer_lock);
return retval;
}
@@ -146,7 +148,7 @@ static int ni_usb_nonblocking_send_bulk_msg(struct ni_usb_priv *ni_priv, void *d
wait_for_completion(&context->complete); // wait for ni_usb_bulk_complete
if (context->timed_out) {
usb_kill_urb(ni_priv->bulk_urb);
- dev_err(&usb_dev->dev, "%s: killed urb due to timeout\n", __func__);
+ dev_err(&usb_dev->dev, "killed urb due to timeout\n");
retval = -ETIMEDOUT;
} else {
retval = ni_priv->bulk_urb->status;
@@ -218,14 +220,12 @@ static int ni_usb_nonblocking_receive_bulk_msg(struct ni_usb_priv *ni_priv,
if (timeout_msecs)
mod_timer(&ni_priv->bulk_timer, jiffies + msecs_to_jiffies(timeout_msecs));
- //printk("%s: submitting urb\n", __func__);
retval = usb_submit_urb(ni_priv->bulk_urb, GFP_KERNEL);
if (retval) {
del_timer_sync(&ni_priv->bulk_timer);
usb_free_urb(ni_priv->bulk_urb);
ni_priv->bulk_urb = NULL;
- dev_err(&usb_dev->dev, "%s: failed to submit bulk out urb, retval=%i\n",
- __func__, retval);
+ dev_err(&usb_dev->dev, "failed to submit bulk in urb, retval=%i\n", retval);
mutex_unlock(&ni_priv->bulk_transfer_lock);
return retval;
}
@@ -250,7 +250,7 @@ static int ni_usb_nonblocking_receive_bulk_msg(struct ni_usb_priv *ni_priv,
}
if (context->timed_out) {
usb_kill_urb(ni_priv->bulk_urb);
- dev_err(&usb_dev->dev, "%s: killed urb due to timeout\n", __func__);
+ dev_err(&usb_dev->dev, "killed urb due to timeout\n");
retval = -ETIMEDOUT;
} else {
if (ni_priv->bulk_urb->status)
@@ -310,7 +310,7 @@ static int ni_usb_receive_control_msg(struct ni_usb_priv *ni_priv, __u8 request,
return retval;
}
-static void ni_usb_soft_update_status(gpib_board_t *board, unsigned int ni_usb_ibsta,
+static void ni_usb_soft_update_status(struct gpib_board *board, unsigned int ni_usb_ibsta,
unsigned int clear_mask)
{
static const unsigned int ni_usb_ibsta_mask = SRQI | ATN | CIC | REM | LACS | TACS | LOK;
@@ -330,14 +330,14 @@ static void ni_usb_soft_update_status(gpib_board_t *board, unsigned int ni_usb_i
ni_priv->monitored_ibsta_bits &= ~ni_usb_ibsta;
need_monitoring_bits &= ~ni_priv->monitored_ibsta_bits; /* mm - monitored set */
spin_unlock_irqrestore(&board->spinlock, flags);
- dev_dbg(&usb_dev->dev, "%s: need_monitoring_bits=0x%x\n", __func__, need_monitoring_bits);
+ dev_dbg(&usb_dev->dev, "need_monitoring_bits=0x%x\n", need_monitoring_bits);
if (need_monitoring_bits & ~ni_usb_ibsta)
ni_usb_set_interrupt_monitor(board, ni_usb_ibsta_monitor_mask);
else if (need_monitoring_bits & ni_usb_ibsta)
wake_up_interruptible(&board->wait);
- dev_dbg(&usb_dev->dev, "%s: ni_usb_ibsta=0x%x\n", __func__, ni_usb_ibsta);
+ dev_dbg(&usb_dev->dev, "ibsta=0x%x\n", ni_usb_ibsta);
}
static int ni_usb_parse_status_block(const u8 *buffer, struct ni_usb_status_block *status)
@@ -371,7 +371,7 @@ static int ni_usb_parse_register_read_block(const u8 *raw_data, unsigned int *re
int k;
if (raw_data[i++] != NIUSB_REGISTER_READ_DATA_START_ID) {
- pr_err("%s: parse error: wrong start id\n", __func__);
+ pr_err("parse error: wrong start id\n");
unexpected = 1;
}
for (k = 0; k < results_per_chunk && j < num_results; ++k)
@@ -380,18 +380,18 @@ static int ni_usb_parse_register_read_block(const u8 *raw_data, unsigned int *re
while (i % 4)
i++;
if (raw_data[i++] != NIUSB_REGISTER_READ_DATA_END_ID) {
- pr_err("%s: parse error: wrong end id\n", __func__);
+ pr_err("parse error: wrong end id\n");
unexpected = 1;
}
if (raw_data[i++] % results_per_chunk != num_results % results_per_chunk) {
- pr_err("%s: parse error: wrong count=%i for NIUSB_REGISTER_READ_DATA_END\n",
- __func__, (int)raw_data[i - 1]);
+ pr_err("parse error: wrong count=%i for NIUSB_REGISTER_READ_DATA_END\n",
+ (int)raw_data[i - 1]);
unexpected = 1;
}
while (i % 4) {
if (raw_data[i++] != 0) {
- pr_err("%s: unexpected data: raw_data[%i]=0x%x, expected 0\n",
- __func__, i - 1, (int)raw_data[i - 1]);
+ pr_err("unexpected data: raw_data[%i]=0x%x, expected 0\n",
+ i - 1, (int)raw_data[i - 1]);
unexpected = 1;
}
}
@@ -408,9 +408,8 @@ static int ni_usb_parse_termination_block(const u8 *buffer)
buffer[i++] != 0x0 ||
buffer[i++] != 0x0 ||
buffer[i++] != 0x0) {
- pr_err("%s: received unexpected termination block\n", __func__);
- pr_err(" expected: 0x%x 0x%x 0x%x 0x%x\n",
- NIUSB_TERM_ID, 0x0, 0x0, 0x0);
+ pr_err("received unexpected termination block\n");
+ pr_err(" expected: 0x%x 0x%x 0x%x 0x%x\n", NIUSB_TERM_ID, 0x0, 0x0, 0x0);
pr_err(" received: 0x%x 0x%x 0x%x 0x%x\n",
buffer[i - 4], buffer[i - 3], buffer[i - 2], buffer[i - 1]);
}
@@ -427,7 +426,6 @@ static int parse_board_ibrd_readback(const u8 *raw_data, struct ni_usb_status_bl
int i = 0;
int j = 0;
int k;
- unsigned int adr1_bits;
int num_data_blocks = 0;
struct ni_usb_status_block register_write_status;
int unexpected = 0;
@@ -438,12 +436,12 @@ static int parse_board_ibrd_readback(const u8 *raw_data, struct ni_usb_status_bl
} else if (raw_data[i] == NIUSB_IBRD_EXTENDED_DATA_ID) {
data_block_length = ibrd_extended_data_block_length;
if (raw_data[++i] != 0) {
- pr_err("%s: unexpected data: raw_data[%i]=0x%x, expected 0\n",
- __func__, i, (int)raw_data[i]);
+ pr_err("unexpected data: raw_data[%i]=0x%x, expected 0\n",
+ i, (int)raw_data[i]);
unexpected = 1;
}
} else {
- pr_err("%s: logic bug!\n", __func__);
+ pr_err("Unexpected NIUSB_IBRD ID\n");
return -EINVAL;
}
++i;
@@ -457,10 +455,10 @@ static int parse_board_ibrd_readback(const u8 *raw_data, struct ni_usb_status_bl
}
i += ni_usb_parse_status_block(&raw_data[i], status);
if (status->id != NIUSB_IBRD_STATUS_ID) {
- pr_err("%s: bug: status->id=%i, != ibrd_status_id\n", __func__, status->id);
+ pr_err("bug: status->id=%i, != ibrd_status_id\n", status->id);
return -EIO;
}
- adr1_bits = raw_data[i++];
+ i++;
if (num_data_blocks) {
*actual_bytes_read = (num_data_blocks - 1) * data_block_length + raw_data[i++];
} else {
@@ -468,29 +466,28 @@ static int parse_board_ibrd_readback(const u8 *raw_data, struct ni_usb_status_bl
*actual_bytes_read = 0;
}
if (*actual_bytes_read > j)
- pr_err("%s: bug: discarded data. actual_bytes_read=%i, j=%i\n",
- __func__, *actual_bytes_read, j);
+ pr_err("bug: discarded data. actual_bytes_read=%i, j=%i\n", *actual_bytes_read, j);
for (k = 0; k < 2; k++)
if (raw_data[i++] != 0) {
- pr_err("%s: unexpected data: raw_data[%i]=0x%x, expected 0\n",
- __func__, i - 1, (int)raw_data[i - 1]);
+ pr_err("unexpected data: raw_data[%i]=0x%x, expected 0\n",
+ i - 1, (int)raw_data[i - 1]);
unexpected = 1;
}
i += ni_usb_parse_status_block(&raw_data[i], &register_write_status);
if (register_write_status.id != NIUSB_REG_WRITE_ID) {
- pr_err("%s: unexpected data: register write status id=0x%x, expected 0x%x\n",
- __func__, register_write_status.id, NIUSB_REG_WRITE_ID);
+ pr_err("unexpected data: register write status id=0x%x, expected 0x%x\n",
+ register_write_status.id, NIUSB_REG_WRITE_ID);
unexpected = 1;
}
if (raw_data[i++] != 2) {
- pr_err("%s: unexpected data: register write count=%i, expected 2\n",
- __func__, (int)raw_data[i - 1]);
+ pr_err("unexpected data: register write count=%i, expected 2\n",
+ (int)raw_data[i - 1]);
unexpected = 1;
}
for (k = 0; k < 3; k++)
if (raw_data[i++] != 0) {
- pr_err("%s: unexpected data: raw_data[%i]=0x%x, expected 0\n",
- __func__, i - 1, (int)raw_data[i - 1]);
+ pr_err("unexpected data: raw_data[%i]=0x%x, expected 0\n",
+ i - 1, (int)raw_data[i - 1]);
unexpected = 1;
}
i += ni_usb_parse_termination_block(&raw_data[i]);
@@ -530,18 +527,14 @@ static int ni_usb_write_registers(struct ni_usb_priv *ni_priv,
out_data_length = num_writes * bytes_per_write + 0x10;
out_data = kmalloc(out_data_length, GFP_KERNEL);
- if (!out_data) {
- dev_err(&usb_dev->dev, "%s: kmalloc failed\n", __func__);
+ if (!out_data)
return -ENOMEM;
- }
i += ni_usb_bulk_register_write_header(&out_data[i], num_writes);
for (j = 0; j < num_writes; j++)
i += ni_usb_bulk_register_write(&out_data[i], writes[j]);
while (i % 4)
out_data[i++] = 0x00;
i += ni_usb_bulk_termination(&out_data[i]);
- if (i > out_data_length)
- dev_err(&usb_dev->dev, "%s: bug! buffer overrun\n", __func__);
mutex_lock(&ni_priv->addressed_transfer_lock);
@@ -549,22 +542,21 @@ static int ni_usb_write_registers(struct ni_usb_priv *ni_priv,
kfree(out_data);
if (retval) {
mutex_unlock(&ni_priv->addressed_transfer_lock);
- dev_err(&usb_dev->dev, "%s: ni_usb_send_bulk_msg returned %i, bytes_written=%i, i=%i\n",
- __func__, retval, bytes_written, i);
+ dev_err(&usb_dev->dev, "send_bulk_msg returned %i, bytes_written=%i, i=%i\n",
+ retval, bytes_written, i);
return retval;
}
in_data = kmalloc(in_data_length, GFP_KERNEL);
if (!in_data) {
mutex_unlock(&ni_priv->addressed_transfer_lock);
- dev_err(&usb_dev->dev, "%s: kmalloc failed\n", __func__);
return -ENOMEM;
}
retval = ni_usb_receive_bulk_msg(ni_priv, in_data, in_data_length, &bytes_read, 1000, 0);
if (retval || bytes_read != 16) {
mutex_unlock(&ni_priv->addressed_transfer_lock);
- dev_err(&usb_dev->dev, "%s: ni_usb_receive_bulk_msg returned %i, bytes_read=%i\n",
- __func__, retval, bytes_read);
+ dev_err(&usb_dev->dev, "receive_bulk_msg returned %i, bytes_read=%i\n",
+ retval, bytes_read);
ni_usb_dump_raw_block(in_data, bytes_read);
kfree(in_data);
return retval;
@@ -576,18 +568,16 @@ static int ni_usb_write_registers(struct ni_usb_priv *ni_priv,
//FIXME parse extra 09 status bits and termination
kfree(in_data);
if (status.id != NIUSB_REG_WRITE_ID) {
- dev_err(&usb_dev->dev, "%s: parse error, id=0x%x != NIUSB_REG_WRITE_ID\n",
- __func__, status.id);
+ dev_err(&usb_dev->dev, "parse error, id=0x%x != NIUSB_REG_WRITE_ID\n", status.id);
return -EIO;
}
if (status.error_code) {
- dev_err(&usb_dev->dev, "%s: nonzero error code 0x%x\n",
- __func__, status.error_code);
+ dev_err(&usb_dev->dev, "nonzero error code 0x%x\n", status.error_code);
return -EIO;
}
if (reg_writes_completed != num_writes) {
- dev_err(&usb_dev->dev, "%s: reg_writes_completed=%i, num_writes=%i\n",
- __func__, reg_writes_completed, num_writes);
+ dev_err(&usb_dev->dev, "reg_writes_completed=%i, num_writes=%i\n",
+ reg_writes_completed, num_writes);
return -EIO;
}
if (ibsta)
@@ -596,12 +586,12 @@ static int ni_usb_write_registers(struct ni_usb_priv *ni_priv,
}
// interface functions
-static int ni_usb_read(gpib_board_t *board, uint8_t *buffer, size_t length,
+static int ni_usb_read(struct gpib_board *board, uint8_t *buffer, size_t length,
int *end, size_t *bytes_read)
{
int retval, parse_retval;
struct ni_usb_priv *ni_priv = board->private_data;
- struct usb_device *usb_dev = interface_to_usbdev(ni_priv->bus_interface);
+ struct usb_device *usb_dev;
u8 *out_data, *in_data;
static const int out_data_length = 0x20;
int in_data_length;
@@ -614,10 +604,11 @@ static int ni_usb_read(gpib_board_t *board, uint8_t *buffer, size_t length,
struct ni_usb_register reg;
*bytes_read = 0;
- if (length > max_read_length) {
- length = max_read_length;
- dev_err(&usb_dev->dev, "%s: read length too long\n", __func__);
- }
+ if (!ni_priv->bus_interface)
+ return -ENODEV;
+ if (length > max_read_length)
+ return -EINVAL;
+ usb_dev = interface_to_usbdev(ni_priv->bus_interface);
out_data = kmalloc(out_data_length, GFP_KERNEL);
if (!out_data)
return -ENOMEM;
@@ -649,8 +640,8 @@ static int ni_usb_read(gpib_board_t *board, uint8_t *buffer, size_t length,
if (retval || usb_bytes_written != i) {
if (retval == 0)
retval = -EIO;
- dev_err(&usb_dev->dev, "%s: ni_usb_send_bulk_msg returned %i, usb_bytes_written=%i, i=%i\n",
- __func__, retval, usb_bytes_written, i);
+ dev_err(&usb_dev->dev, "send_bulk_msg returned %i, usb_bytes_written=%i, i=%i\n",
+ retval, usb_bytes_written, i);
mutex_unlock(&ni_priv->addressed_transfer_lock);
return retval;
}
@@ -668,8 +659,8 @@ static int ni_usb_read(gpib_board_t *board, uint8_t *buffer, size_t length,
if (retval == -ERESTARTSYS) {
} else if (retval) {
- dev_err(&usb_dev->dev, "%s: ni_usb_receive_bulk_msg returned %i, usb_bytes_read=%i\n",
- __func__, retval, usb_bytes_read);
+ dev_err(&usb_dev->dev, "receive_bulk_msg returned %i, usb_bytes_read=%i\n",
+ retval, usb_bytes_read);
kfree(in_data);
return retval;
}
@@ -677,14 +668,14 @@ static int ni_usb_read(gpib_board_t *board, uint8_t *buffer, size_t length,
if (parse_retval != usb_bytes_read) {
if (parse_retval >= 0)
parse_retval = -EIO;
- dev_err(&usb_dev->dev, "%s: retval=%i usb_bytes_read=%i\n",
- __func__, parse_retval, usb_bytes_read);
+ dev_err(&usb_dev->dev, "retval=%i usb_bytes_read=%i\n",
+ parse_retval, usb_bytes_read);
kfree(in_data);
return parse_retval;
}
if (actual_length != length - status.count) {
- dev_err(&usb_dev->dev, "%s: actual_length=%i expected=%li\n",
- __func__, actual_length, (long)(length - status.count));
+ dev_err(&usb_dev->dev, "actual_length=%i expected=%li\n",
+ actual_length, (long)(length - status.count));
ni_usb_dump_raw_block(in_data, usb_bytes_read);
}
kfree(in_data);
@@ -699,7 +690,7 @@ static int ni_usb_read(gpib_board_t *board, uint8_t *buffer, size_t length,
break;
case NIUSB_ATN_STATE_ERROR:
retval = -EIO;
- dev_err(&usb_dev->dev, "%s: read when ATN set\n", __func__);
+ dev_err(&usb_dev->dev, "read when ATN set\n");
break;
case NIUSB_ADDRESSING_ERROR:
retval = -EIO;
@@ -708,12 +699,11 @@ static int ni_usb_read(gpib_board_t *board, uint8_t *buffer, size_t length,
retval = -ETIMEDOUT;
break;
case NIUSB_EOSMODE_ERROR:
- dev_err(&usb_dev->dev, "%s: driver bug, we should have been able to avoid NIUSB_EOSMODE_ERROR.\n",
- __func__);
+ dev_err(&usb_dev->dev, "driver bug, we should have been able to avoid NIUSB_EOSMODE_ERROR.\n");
retval = -EINVAL;
break;
default:
- dev_err(&usb_dev->dev, "%s: unknown error code=%i\n", __func__, status.error_code);
+ dev_err(&usb_dev->dev, "unknown error code=%i\n", status.error_code);
retval = -EIO;
break;
}
@@ -726,12 +716,12 @@ static int ni_usb_read(gpib_board_t *board, uint8_t *buffer, size_t length,
return retval;
}
-static int ni_usb_write(gpib_board_t *board, uint8_t *buffer, size_t length,
+static int ni_usb_write(struct gpib_board *board, uint8_t *buffer, size_t length,
int send_eoi, size_t *bytes_written)
{
int retval;
struct ni_usb_priv *ni_priv = board->private_data;
- struct usb_device *usb_dev = interface_to_usbdev(ni_priv->bus_interface);
+ struct usb_device *usb_dev;
u8 *out_data, *in_data;
int out_data_length;
static const int in_data_length = 0x10;
@@ -741,12 +731,11 @@ static int ni_usb_write(gpib_board_t *board, uint8_t *buffer, size_t length,
struct ni_usb_status_block status;
static const int max_write_length = 0xffff;
- *bytes_written = 0;
- if (length > max_write_length) {
- length = max_write_length;
- send_eoi = 0;
- dev_err(&usb_dev->dev, "%s: write length too long\n", __func__);
- }
+ if (!ni_priv->bus_interface)
+ return -ENODEV;
+ if (length > max_write_length)
+ return -EINVAL;
+ usb_dev = interface_to_usbdev(ni_priv->bus_interface);
out_data_length = length + 0x10;
out_data = kmalloc(out_data_length, GFP_KERNEL);
if (!out_data)
@@ -777,8 +766,8 @@ static int ni_usb_write(gpib_board_t *board, uint8_t *buffer, size_t length,
kfree(out_data);
if (retval || usb_bytes_written != i) {
mutex_unlock(&ni_priv->addressed_transfer_lock);
- dev_err(&usb_dev->dev, "%s: ni_usb_send_bulk_msg returned %i, usb_bytes_written=%i, i=%i\n",
- __func__, retval, usb_bytes_written, i);
+ dev_err(&usb_dev->dev, "send_bulk_msg returned %i, usb_bytes_written=%i, i=%i\n",
+ retval, usb_bytes_written, i);
return retval;
}
@@ -793,8 +782,8 @@ static int ni_usb_write(gpib_board_t *board, uint8_t *buffer, size_t length,
mutex_unlock(&ni_priv->addressed_transfer_lock);
if ((retval && retval != -ERESTARTSYS) || usb_bytes_read != 12) {
- dev_err(&usb_dev->dev, "%s: ni_usb_receive_bulk_msg returned %i, usb_bytes_read=%i\n",
- __func__, retval, usb_bytes_read);
+ dev_err(&usb_dev->dev, "receive_bulk_msg returned %i, usb_bytes_read=%i\n",
+ retval, usb_bytes_read);
kfree(in_data);
return retval;
}
@@ -810,8 +799,8 @@ static int ni_usb_write(gpib_board_t *board, uint8_t *buffer, size_t length,
*/
break;
case NIUSB_ADDRESSING_ERROR:
- dev_err(&usb_dev->dev, "%s: Addressing error retval %d error code=%i\n",
- __func__, retval, status.error_code);
+ dev_err(&usb_dev->dev, "Addressing error retval %d error code=%i\n",
+ retval, status.error_code);
retval = -ENXIO;
break;
case NIUSB_NO_LISTENER_ERROR:
@@ -821,8 +810,7 @@ static int ni_usb_write(gpib_board_t *board, uint8_t *buffer, size_t length,
retval = -ETIMEDOUT;
break;
default:
- dev_err(&usb_dev->dev, "%s: unknown error code=%i\n",
- __func__, status.error_code);
+ dev_err(&usb_dev->dev, "unknown error code=%i\n", status.error_code);
retval = -EPIPE;
break;
}
@@ -831,12 +819,12 @@ static int ni_usb_write(gpib_board_t *board, uint8_t *buffer, size_t length,
return retval;
}
-static int ni_usb_command_chunk(gpib_board_t *board, uint8_t *buffer, size_t length,
+static int ni_usb_command_chunk(struct gpib_board *board, uint8_t *buffer, size_t length,
size_t *command_bytes_written)
{
int retval;
struct ni_usb_priv *ni_priv = board->private_data;
- struct usb_device *usb_dev = interface_to_usbdev(ni_priv->bus_interface);
+ struct usb_device *usb_dev;
u8 *out_data, *in_data;
int out_data_length;
static const int in_data_length = 0x10;
@@ -848,8 +836,11 @@ static int ni_usb_command_chunk(gpib_board_t *board, uint8_t *buffer, size_t len
static const int max_command_length = 0x10;
*command_bytes_written = 0;
+ if (!ni_priv->bus_interface)
+ return -ENODEV;
if (length > max_command_length)
length = max_command_length;
+ usb_dev = interface_to_usbdev(ni_priv->bus_interface);
out_data_length = length + 0x10;
out_data = kmalloc(out_data_length, GFP_KERNEL);
if (!out_data)
@@ -873,8 +864,8 @@ static int ni_usb_command_chunk(gpib_board_t *board, uint8_t *buffer, size_t len
kfree(out_data);
if (retval || bytes_written != i) {
mutex_unlock(&ni_priv->addressed_transfer_lock);
- dev_err(&usb_dev->dev, "%s: ni_usb_send_bulk_msg returned %i, bytes_written=%i, i=%i\n",
- __func__, retval, bytes_written, i);
+ dev_err(&usb_dev->dev, "send_bulk_msg returned %i, bytes_written=%i, i=%i\n",
+ retval, bytes_written, i);
return retval;
}
@@ -890,8 +881,8 @@ static int ni_usb_command_chunk(gpib_board_t *board, uint8_t *buffer, size_t len
mutex_unlock(&ni_priv->addressed_transfer_lock);
if ((retval && retval != -ERESTARTSYS) || bytes_read != 12) {
- dev_err(&usb_dev->dev, "%s: ni_usb_receive_bulk_msg returned %i, bytes_read=%i\n",
- __func__, retval, bytes_read);
+ dev_err(&usb_dev->dev, "receive_bulk_msg returned %i, bytes_read=%i\n",
+ retval, bytes_read);
kfree(in_data);
return retval;
}
@@ -909,19 +900,19 @@ static int ni_usb_command_chunk(gpib_board_t *board, uint8_t *buffer, size_t len
case NIUSB_NO_BUS_ERROR:
return -ENOTCONN;
case NIUSB_EOSMODE_ERROR:
- dev_err(&usb_dev->dev, "%s: got eosmode error. Driver bug?\n", __func__);
+ dev_err(&usb_dev->dev, "got eosmode error. Driver bug?\n");
return -EIO;
case NIUSB_TIMEOUT_ERROR:
return -ETIMEDOUT;
default:
- dev_err(&usb_dev->dev, "%s: unknown error code=%i\n", __func__, status.error_code);
+ dev_err(&usb_dev->dev, "unknown error code=%i\n", status.error_code);
return -EIO;
}
ni_usb_soft_update_status(board, status.ibsta, 0);
return 0;
}
-static int ni_usb_command(gpib_board_t *board, uint8_t *buffer, size_t length,
+static int ni_usb_command(struct gpib_board *board, uint8_t *buffer, size_t length,
size_t *bytes_written)
{
size_t count;
@@ -938,11 +929,11 @@ static int ni_usb_command(gpib_board_t *board, uint8_t *buffer, size_t length,
return 0;
}
-static int ni_usb_take_control(gpib_board_t *board, int synchronous)
+static int ni_usb_take_control(struct gpib_board *board, int synchronous)
{
int retval;
struct ni_usb_priv *ni_priv = board->private_data;
- struct usb_device *usb_dev = interface_to_usbdev(ni_priv->bus_interface);
+ struct usb_device *usb_dev;
u8 *out_data, *in_data;
static const int out_data_length = 0x10;
static const int in_data_length = 0x10;
@@ -950,6 +941,9 @@ static int ni_usb_take_control(gpib_board_t *board, int synchronous)
int i = 0;
struct ni_usb_status_block status;
+ if (!ni_priv->bus_interface)
+ return -ENODEV;
+ usb_dev = interface_to_usbdev(ni_priv->bus_interface);
out_data = kmalloc(out_data_length, GFP_KERNEL);
if (!out_data)
return -ENOMEM;
@@ -968,15 +962,14 @@ static int ni_usb_take_control(gpib_board_t *board, int synchronous)
kfree(out_data);
if (retval || bytes_written != i) {
mutex_unlock(&ni_priv->addressed_transfer_lock);
- dev_err(&usb_dev->dev, "%s: ni_usb_send_bulk_msg returned %i, bytes_written=%i, i=%i\n",
- __func__, retval, bytes_written, i);
+ dev_err(&usb_dev->dev, "send_bulk_msg returned %i, bytes_written=%i, i=%i\n",
+ retval, bytes_written, i);
return retval;
}
in_data = kmalloc(in_data_length, GFP_KERNEL);
if (!in_data) {
mutex_unlock(&ni_priv->addressed_transfer_lock);
- dev_err(&usb_dev->dev, "%s: kmalloc failed\n", __func__);
return -ENOMEM;
}
retval = ni_usb_receive_bulk_msg(ni_priv, in_data, in_data_length, &bytes_read, 1000, 1);
@@ -986,8 +979,8 @@ static int ni_usb_take_control(gpib_board_t *board, int synchronous)
if ((retval && retval != -ERESTARTSYS) || bytes_read != 12) {
if (retval == 0)
retval = -EIO;
- dev_err(&usb_dev->dev, "%s: ni_usb_receive_bulk_msg returned %i, bytes_read=%i\n",
- __func__, retval, bytes_read);
+ dev_err(&usb_dev->dev, "receive_bulk_msg returned %i, bytes_read=%i\n",
+ retval, bytes_read);
kfree(in_data);
return retval;
}
@@ -997,11 +990,11 @@ static int ni_usb_take_control(gpib_board_t *board, int synchronous)
return retval;
}
-static int ni_usb_go_to_standby(gpib_board_t *board)
+static int ni_usb_go_to_standby(struct gpib_board *board)
{
int retval;
struct ni_usb_priv *ni_priv = board->private_data;
- struct usb_device *usb_dev = interface_to_usbdev(ni_priv->bus_interface);
+ struct usb_device *usb_dev;
u8 *out_data, *in_data;
static const int out_data_length = 0x10;
static const int in_data_length = 0x20;
@@ -1009,6 +1002,9 @@ static int ni_usb_go_to_standby(gpib_board_t *board)
int i = 0;
struct ni_usb_status_block status;
+ if (!ni_priv->bus_interface)
+ return -ENODEV;
+ usb_dev = interface_to_usbdev(ni_priv->bus_interface);
out_data = kmalloc(out_data_length, GFP_KERNEL);
if (!out_data)
return -ENOMEM;
@@ -1025,15 +1021,14 @@ static int ni_usb_go_to_standby(gpib_board_t *board)
kfree(out_data);
if (retval || bytes_written != i) {
mutex_unlock(&ni_priv->addressed_transfer_lock);
- dev_err(&usb_dev->dev, "%s: ni_usb_send_bulk_msg returned %i, bytes_written=%i, i=%i\n",
- __func__, retval, bytes_written, i);
+ dev_err(&usb_dev->dev, "send_bulk_msg returned %i, bytes_written=%i, i=%i\n",
+ retval, bytes_written, i);
return retval;
}
in_data = kmalloc(in_data_length, GFP_KERNEL);
if (!in_data) {
mutex_unlock(&ni_priv->addressed_transfer_lock);
- dev_err(&usb_dev->dev, "%s: kmalloc failed\n", __func__);
return -ENOMEM;
}
retval = ni_usb_receive_bulk_msg(ni_priv, in_data, in_data_length, &bytes_read, 1000, 0);
@@ -1041,29 +1036,31 @@ static int ni_usb_go_to_standby(gpib_board_t *board)
mutex_unlock(&ni_priv->addressed_transfer_lock);
if (retval || bytes_read != 12) {
- dev_err(&usb_dev->dev, "%s: ni_usb_receive_bulk_msg returned %i, bytes_read=%i\n",
- __func__, retval, bytes_read);
+ dev_err(&usb_dev->dev, "receive_bulk_msg returned %i, bytes_read=%i\n",
+ retval, bytes_read);
kfree(in_data);
return retval;
}
ni_usb_parse_status_block(in_data, &status);
kfree(in_data);
if (status.id != NIUSB_IBGTS_ID)
- dev_err(&usb_dev->dev, "%s: bug: status.id 0x%x != INUSB_IBGTS_ID\n",
- __func__, status.id);
+ dev_err(&usb_dev->dev, "bug: status.id 0x%x != INUSB_IBGTS_ID\n", status.id);
ni_usb_soft_update_status(board, status.ibsta, 0);
return 0;
}
-static void ni_usb_request_system_control(gpib_board_t *board, int request_control)
+static void ni_usb_request_system_control(struct gpib_board *board, int request_control)
{
int retval;
struct ni_usb_priv *ni_priv = board->private_data;
- struct usb_device *usb_dev = interface_to_usbdev(ni_priv->bus_interface);
+ struct usb_device *usb_dev;
int i = 0;
struct ni_usb_register writes[4];
unsigned int ibsta;
+ if (!ni_priv->bus_interface)
+ return; // -ENODEV;
+ usb_dev = interface_to_usbdev(ni_priv->bus_interface);
if (request_control) {
writes[i].device = NIUSB_SUBDEV_TNT4882;
writes[i].address = CMDR;
@@ -1093,7 +1090,7 @@ static void ni_usb_request_system_control(gpib_board_t *board, int request_contr
}
retval = ni_usb_write_registers(ni_priv, writes, i, &ibsta);
if (retval < 0) {
- dev_err(&usb_dev->dev, "%s: register write failed, retval=%i\n", __func__, retval);
+ dev_err(&usb_dev->dev, "register write failed, retval=%i\n", retval);
return; // retval;
}
if (!request_control)
@@ -1103,11 +1100,11 @@ static void ni_usb_request_system_control(gpib_board_t *board, int request_contr
}
//FIXME maybe the interface should have a "pulse interface clear" function that can return an error?
-static void ni_usb_interface_clear(gpib_board_t *board, int assert)
+static void ni_usb_interface_clear(struct gpib_board *board, int assert)
{
int retval;
struct ni_usb_priv *ni_priv = board->private_data;
- struct usb_device *usb_dev = interface_to_usbdev(ni_priv->bus_interface);
+ struct usb_device *usb_dev;
u8 *out_data, *in_data;
static const int out_data_length = 0x10;
static const int in_data_length = 0x10;
@@ -1115,14 +1112,15 @@ static void ni_usb_interface_clear(gpib_board_t *board, int assert)
int i = 0;
struct ni_usb_status_block status;
- // FIXME: we are going to pulse when assert is true, and ignore otherwise
+ if (!ni_priv->bus_interface)
+ return; // -ENODEV;
+ usb_dev = interface_to_usbdev(ni_priv->bus_interface);
+// FIXME: we are going to pulse when assert is true, and ignore otherwise
if (assert == 0)
return;
out_data = kmalloc(out_data_length, GFP_KERNEL);
- if (!out_data) {
- dev_err(&usb_dev->dev, "%s: kmalloc failed\n", __func__);
+ if (!out_data)
return;
- }
out_data[i++] = NIUSB_IBSIC_ID;
out_data[i++] = 0x0;
out_data[i++] = 0x0;
@@ -1131,8 +1129,8 @@ static void ni_usb_interface_clear(gpib_board_t *board, int assert)
retval = ni_usb_send_bulk_msg(ni_priv, out_data, i, &bytes_written, 1000);
kfree(out_data);
if (retval || bytes_written != i) {
- dev_err(&usb_dev->dev, "%s: ni_usb_send_bulk_msg returned %i, bytes_written=%i, i=%i\n",
- __func__, retval, bytes_written, i);
+ dev_err(&usb_dev->dev, "send_bulk_msg returned %i, bytes_written=%i, i=%i\n",
+ retval, bytes_written, i);
return;
}
in_data = kmalloc(in_data_length, GFP_KERNEL);
@@ -1141,8 +1139,8 @@ static void ni_usb_interface_clear(gpib_board_t *board, int assert)
retval = ni_usb_receive_bulk_msg(ni_priv, in_data, in_data_length, &bytes_read, 1000, 0);
if (retval || bytes_read != 12) {
- dev_err(&usb_dev->dev, "%s: ni_usb_receive_bulk_msg returned %i, bytes_read=%i\n",
- __func__, retval, bytes_read);
+ dev_err(&usb_dev->dev, "receive_bulk_msg returned %i, bytes_read=%i\n",
+ retval, bytes_read);
kfree(in_data);
return;
}
@@ -1151,14 +1149,17 @@ static void ni_usb_interface_clear(gpib_board_t *board, int assert)
ni_usb_soft_update_status(board, status.ibsta, 0);
}
-static void ni_usb_remote_enable(gpib_board_t *board, int enable)
+static void ni_usb_remote_enable(struct gpib_board *board, int enable)
{
int retval;
struct ni_usb_priv *ni_priv = board->private_data;
- struct usb_device *usb_dev = interface_to_usbdev(ni_priv->bus_interface);
+ struct usb_device *usb_dev;
struct ni_usb_register reg;
unsigned int ibsta;
+ if (!ni_priv->bus_interface)
+ return; // -ENODEV;
+ usb_dev = interface_to_usbdev(ni_priv->bus_interface);
reg.device = NIUSB_SUBDEV_TNT4882;
reg.address = nec7210_to_tnt4882_offset(AUXMR);
if (enable)
@@ -1167,7 +1168,7 @@ static void ni_usb_remote_enable(gpib_board_t *board, int enable)
reg.value = AUX_CREN;
retval = ni_usb_write_registers(ni_priv, &reg, 1, &ibsta);
if (retval < 0) {
- dev_err(&usb_dev->dev, "%s: register write failed, retval=%i\n", __func__, retval);
+ dev_err(&usb_dev->dev, "register write failed, retval=%i\n", retval);
return; //retval;
}
ni_priv->ren_state = enable;
@@ -1175,7 +1176,7 @@ static void ni_usb_remote_enable(gpib_board_t *board, int enable)
return;// 0;
}
-static int ni_usb_enable_eos(gpib_board_t *board, uint8_t eos_byte, int compare_8_bits)
+static int ni_usb_enable_eos(struct gpib_board *board, uint8_t eos_byte, int compare_8_bits)
{
struct ni_usb_priv *ni_priv = board->private_data;
@@ -1188,7 +1189,7 @@ static int ni_usb_enable_eos(gpib_board_t *board, uint8_t eos_byte, int compare_
return 0;
}
-static void ni_usb_disable_eos(gpib_board_t *board)
+static void ni_usb_disable_eos(struct gpib_board *board)
{
struct ni_usb_priv *ni_priv = board->private_data;
/* adapter gets unhappy if you don't zero all the bits
@@ -1198,16 +1199,18 @@ static void ni_usb_disable_eos(gpib_board_t *board)
ni_priv->eos_char = 0;
}
-static unsigned int ni_usb_update_status(gpib_board_t *board, unsigned int clear_mask)
+static unsigned int ni_usb_update_status(struct gpib_board *board, unsigned int clear_mask)
{
int retval;
struct ni_usb_priv *ni_priv = board->private_data;
- struct usb_device *usb_dev = interface_to_usbdev(ni_priv->bus_interface);
+ struct usb_device *usb_dev;
static const int buffer_length = 8;
u8 *buffer;
struct ni_usb_status_block status;
- //printk("%s: receive control pipe is %i\n", __func__, pipe);
+ if (!ni_priv->bus_interface)
+ return -ENODEV;
+ usb_dev = interface_to_usbdev(ni_priv->bus_interface);
buffer = kmalloc(buffer_length, GFP_KERNEL);
if (!buffer)
return board->status;
@@ -1216,7 +1219,7 @@ static unsigned int ni_usb_update_status(gpib_board_t *board, unsigned int clear
USB_TYPE_VENDOR | USB_RECIP_DEVICE,
0x200, 0x0, buffer, buffer_length, 1000);
if (retval != buffer_length) {
- dev_err(&usb_dev->dev, "%s: usb_control_msg returned %i\n", __func__, retval);
+ dev_err(&usb_dev->dev, "usb_control_msg returned %i\n", retval);
kfree(buffer);
return board->status;
}
@@ -1235,7 +1238,6 @@ static void ni_usb_stop(struct ni_usb_priv *ni_priv)
u8 *buffer;
struct ni_usb_status_block status;
- //printk("%s: receive control pipe is %i\n", __func__, pipe);
buffer = kmalloc(buffer_length, GFP_KERNEL);
if (!buffer)
return;
@@ -1244,7 +1246,7 @@ static void ni_usb_stop(struct ni_usb_priv *ni_priv)
USB_TYPE_VENDOR | USB_RECIP_DEVICE,
0x0, 0x0, buffer, buffer_length, 1000);
if (retval != buffer_length) {
- dev_err(&usb_dev->dev, "%s: usb_control_msg returned %i\n", __func__, retval);
+ dev_err(&usb_dev->dev, "usb_control_msg returned %i\n", retval);
kfree(buffer);
return;
}
@@ -1252,15 +1254,18 @@ static void ni_usb_stop(struct ni_usb_priv *ni_priv)
kfree(buffer);
}
-static int ni_usb_primary_address(gpib_board_t *board, unsigned int address)
+static int ni_usb_primary_address(struct gpib_board *board, unsigned int address)
{
int retval;
struct ni_usb_priv *ni_priv = board->private_data;
- struct usb_device *usb_dev = interface_to_usbdev(ni_priv->bus_interface);
+ struct usb_device *usb_dev;
int i = 0;
struct ni_usb_register writes[2];
unsigned int ibsta;
+ if (!ni_priv->bus_interface)
+ return -ENODEV;
+ usb_dev = interface_to_usbdev(ni_priv->bus_interface);
writes[i].device = NIUSB_SUBDEV_TNT4882;
writes[i].address = nec7210_to_tnt4882_offset(ADR);
writes[i].value = address;
@@ -1271,7 +1276,7 @@ static int ni_usb_primary_address(gpib_board_t *board, unsigned int address)
i++;
retval = ni_usb_write_registers(ni_priv, writes, i, &ibsta);
if (retval < 0) {
- dev_err(&usb_dev->dev, "%s: register write failed, retval=%i\n", __func__, retval);
+ dev_err(&usb_dev->dev, "register write failed, retval=%i\n", retval);
return retval;
}
ni_usb_soft_update_status(board, ibsta, 0);
@@ -1307,30 +1312,33 @@ static int ni_usb_write_sad(struct ni_usb_register *writes, int address, int ena
return i;
}
-static int ni_usb_secondary_address(gpib_board_t *board, unsigned int address, int enable)
+static int ni_usb_secondary_address(struct gpib_board *board, unsigned int address, int enable)
{
int retval;
struct ni_usb_priv *ni_priv = board->private_data;
- struct usb_device *usb_dev = interface_to_usbdev(ni_priv->bus_interface);
+ struct usb_device *usb_dev;
int i = 0;
struct ni_usb_register writes[3];
unsigned int ibsta;
+ if (!ni_priv->bus_interface)
+ return -ENODEV;
+ usb_dev = interface_to_usbdev(ni_priv->bus_interface);
i += ni_usb_write_sad(writes, address, enable);
retval = ni_usb_write_registers(ni_priv, writes, i, &ibsta);
if (retval < 0) {
- dev_err(&usb_dev->dev, "%s: register write failed, retval=%i\n", __func__, retval);
+ dev_err(&usb_dev->dev, "register write failed, retval=%i\n", retval);
return retval;
}
ni_usb_soft_update_status(board, ibsta, 0);
return 0;
}
-static int ni_usb_parallel_poll(gpib_board_t *board, uint8_t *result)
+static int ni_usb_parallel_poll(struct gpib_board *board, uint8_t *result)
{
int retval;
struct ni_usb_priv *ni_priv = board->private_data;
- struct usb_device *usb_dev = interface_to_usbdev(ni_priv->bus_interface);
+ struct usb_device *usb_dev;
u8 *out_data, *in_data;
static const int out_data_length = 0x10;
static const int in_data_length = 0x20;
@@ -1339,6 +1347,9 @@ static int ni_usb_parallel_poll(gpib_board_t *board, uint8_t *result)
int j = 0;
struct ni_usb_status_block status;
+ if (!ni_priv->bus_interface)
+ return -ENODEV;
+ usb_dev = interface_to_usbdev(ni_priv->bus_interface);
out_data = kmalloc(out_data_length, GFP_KERNEL);
if (!out_data)
return -ENOMEM;
@@ -1353,8 +1364,8 @@ static int ni_usb_parallel_poll(gpib_board_t *board, uint8_t *result)
kfree(out_data);
if (retval || bytes_written != i) {
- dev_err(&usb_dev->dev, "%s: ni_usb_send_bulk_msg returned %i, bytes_written=%i, i=%i\n",
- __func__, retval, bytes_written, i);
+ dev_err(&usb_dev->dev, "send_bulk_msg returned %i, bytes_written=%i, i=%i\n",
+ retval, bytes_written, i);
return retval;
}
in_data = kmalloc(in_data_length, GFP_KERNEL);
@@ -1366,8 +1377,8 @@ static int ni_usb_parallel_poll(gpib_board_t *board, uint8_t *result)
&bytes_read, 1000, 1);
if (retval && retval != -ERESTARTSYS) {
- dev_err(&usb_dev->dev, "%s: ni_usb_receive_bulk_msg returned %i, bytes_read=%i\n",
- __func__, retval, bytes_read);
+ dev_err(&usb_dev->dev, "receive_bulk_msg returned %i, bytes_read=%i\n",
+ retval, bytes_read);
kfree(in_data);
return retval;
}
@@ -1378,37 +1389,43 @@ static int ni_usb_parallel_poll(gpib_board_t *board, uint8_t *result)
return retval;
}
-static void ni_usb_parallel_poll_configure(gpib_board_t *board, uint8_t config)
+static void ni_usb_parallel_poll_configure(struct gpib_board *board, uint8_t config)
{
int retval;
struct ni_usb_priv *ni_priv = board->private_data;
- struct usb_device *usb_dev = interface_to_usbdev(ni_priv->bus_interface);
+ struct usb_device *usb_dev;
int i = 0;
struct ni_usb_register writes[1];
unsigned int ibsta;
+ if (!ni_priv->bus_interface)
+ return; // -ENODEV;
+ usb_dev = interface_to_usbdev(ni_priv->bus_interface);
writes[i].device = NIUSB_SUBDEV_TNT4882;
writes[i].address = nec7210_to_tnt4882_offset(AUXMR);
writes[i].value = PPR | config;
i++;
retval = ni_usb_write_registers(ni_priv, writes, i, &ibsta);
if (retval < 0) {
- dev_err(&usb_dev->dev, "%s: register write failed, retval=%i\n", __func__, retval);
+ dev_err(&usb_dev->dev, "register write failed, retval=%i\n", retval);
return;// retval;
}
ni_usb_soft_update_status(board, ibsta, 0);
return;// 0;
}
-static void ni_usb_parallel_poll_response(gpib_board_t *board, int ist)
+static void ni_usb_parallel_poll_response(struct gpib_board *board, int ist)
{
int retval;
struct ni_usb_priv *ni_priv = board->private_data;
- struct usb_device *usb_dev = interface_to_usbdev(ni_priv->bus_interface);
+ struct usb_device *usb_dev;
int i = 0;
struct ni_usb_register writes[1];
unsigned int ibsta;
+ if (!ni_priv->bus_interface)
+ return; // -ENODEV;
+ usb_dev = interface_to_usbdev(ni_priv->bus_interface);
writes[i].device = NIUSB_SUBDEV_TNT4882;
writes[i].address = nec7210_to_tnt4882_offset(AUXMR);
if (ist)
@@ -1418,76 +1435,85 @@ static void ni_usb_parallel_poll_response(gpib_board_t *board, int ist)
i++;
retval = ni_usb_write_registers(ni_priv, writes, i, &ibsta);
if (retval < 0) {
- dev_err(&usb_dev->dev, "%s: register write failed, retval=%i\n", __func__, retval);
+ dev_err(&usb_dev->dev, "register write failed, retval=%i\n", retval);
return;// retval;
}
ni_usb_soft_update_status(board, ibsta, 0);
return;// 0;
}
-static void ni_usb_serial_poll_response(gpib_board_t *board, u8 status)
+static void ni_usb_serial_poll_response(struct gpib_board *board, u8 status)
{
int retval;
struct ni_usb_priv *ni_priv = board->private_data;
- struct usb_device *usb_dev = interface_to_usbdev(ni_priv->bus_interface);
+ struct usb_device *usb_dev;
int i = 0;
struct ni_usb_register writes[1];
unsigned int ibsta;
+ if (!ni_priv->bus_interface)
+ return; // -ENODEV;
+ usb_dev = interface_to_usbdev(ni_priv->bus_interface);
writes[i].device = NIUSB_SUBDEV_TNT4882;
writes[i].address = nec7210_to_tnt4882_offset(SPMR);
writes[i].value = status;
i++;
retval = ni_usb_write_registers(ni_priv, writes, i, &ibsta);
if (retval < 0) {
- dev_err(&usb_dev->dev, "%s: register write failed, retval=%i\n", __func__, retval);
+ dev_err(&usb_dev->dev, "register write failed, retval=%i\n", retval);
return;// retval;
}
ni_usb_soft_update_status(board, ibsta, 0);
return;// 0;
}
-static uint8_t ni_usb_serial_poll_status(gpib_board_t *board)
+static uint8_t ni_usb_serial_poll_status(struct gpib_board *board)
{
return 0;
}
-static void ni_usb_return_to_local(gpib_board_t *board)
+static void ni_usb_return_to_local(struct gpib_board *board)
{
int retval;
struct ni_usb_priv *ni_priv = board->private_data;
- struct usb_device *usb_dev = interface_to_usbdev(ni_priv->bus_interface);
+ struct usb_device *usb_dev;
int i = 0;
struct ni_usb_register writes[1];
unsigned int ibsta;
+ if (!ni_priv->bus_interface)
+ return; // -ENODEV;
+ usb_dev = interface_to_usbdev(ni_priv->bus_interface);
writes[i].device = NIUSB_SUBDEV_TNT4882;
writes[i].address = nec7210_to_tnt4882_offset(AUXMR);
writes[i].value = AUX_RTL;
i++;
retval = ni_usb_write_registers(ni_priv, writes, i, &ibsta);
if (retval < 0) {
- dev_err(&usb_dev->dev, "%s: register write failed, retval=%i\n", __func__, retval);
+ dev_err(&usb_dev->dev, "register write failed, retval=%i\n", retval);
return;// retval;
}
ni_usb_soft_update_status(board, ibsta, 0);
return;// 0;
}
-static int ni_usb_line_status(const gpib_board_t *board)
+static int ni_usb_line_status(const struct gpib_board *board)
{
int retval;
struct ni_usb_priv *ni_priv = board->private_data;
- struct usb_device *usb_dev = interface_to_usbdev(ni_priv->bus_interface);
+ struct usb_device *usb_dev;
u8 *out_data, *in_data;
static const int out_data_length = 0x20;
static const int in_data_length = 0x20;
int bytes_written = 0, bytes_read = 0;
int i = 0;
unsigned int bsr_bits;
- int line_status = ValidALL;
+ int line_status = VALID_ALL;
// NI windows driver reads 0xd(HSSEL), 0xc (ARD0), 0x1f (BSR)
+ if (!ni_priv->bus_interface)
+ return -ENODEV;
+ usb_dev = interface_to_usbdev(ni_priv->bus_interface);
out_data = kmalloc(out_data_length, GFP_KERNEL);
if (!out_data)
return -ENOMEM;
@@ -1509,15 +1535,14 @@ static int ni_usb_line_status(const gpib_board_t *board)
if (retval || bytes_written != i) {
mutex_unlock(&ni_priv->addressed_transfer_lock);
if (retval != -EAGAIN)
- dev_err(&usb_dev->dev, "%s: ni_usb_send_bulk_msg returned %i, bytes_written=%i, i=%i\n",
- __func__, retval, bytes_written, i);
+ dev_err(&usb_dev->dev, "send_bulk_msg returned %i, bytes_written=%i, i=%i\n",
+ retval, bytes_written, i);
return retval;
}
in_data = kmalloc(in_data_length, GFP_KERNEL);
if (!in_data) {
mutex_unlock(&ni_priv->addressed_transfer_lock);
- dev_err(&usb_dev->dev, "%s: kmalloc failed\n", __func__);
return -ENOMEM;
}
retval = ni_usb_nonblocking_receive_bulk_msg(ni_priv, in_data, in_data_length,
@@ -1527,8 +1552,8 @@ static int ni_usb_line_status(const gpib_board_t *board)
if (retval) {
if (retval != -EAGAIN)
- dev_err(&usb_dev->dev, "%s: ni_usb_receive_bulk_msg returned %i, bytes_read=%i\n",
- __func__, retval, bytes_read);
+ dev_err(&usb_dev->dev, "receive_bulk_msg returned %i, bytes_read=%i\n",
+ retval, bytes_read);
kfree(in_data);
return retval;
}
@@ -1536,21 +1561,21 @@ static int ni_usb_line_status(const gpib_board_t *board)
ni_usb_parse_register_read_block(in_data, &bsr_bits, 1);
kfree(in_data);
if (bsr_bits & BCSR_REN_BIT)
- line_status |= BusREN;
+ line_status |= BUS_REN;
if (bsr_bits & BCSR_IFC_BIT)
- line_status |= BusIFC;
+ line_status |= BUS_IFC;
if (bsr_bits & BCSR_SRQ_BIT)
- line_status |= BusSRQ;
+ line_status |= BUS_SRQ;
if (bsr_bits & BCSR_EOI_BIT)
- line_status |= BusEOI;
+ line_status |= BUS_EOI;
if (bsr_bits & BCSR_NRFD_BIT)
- line_status |= BusNRFD;
+ line_status |= BUS_NRFD;
if (bsr_bits & BCSR_NDAC_BIT)
- line_status |= BusNDAC;
+ line_status |= BUS_NDAC;
if (bsr_bits & BCSR_DAV_BIT)
- line_status |= BusDAV;
+ line_status |= BUS_DAV;
if (bsr_bits & BCSR_ATN_BIT)
- line_status |= BusATN;
+ line_status |= BUS_ATN;
return line_status;
}
@@ -1591,28 +1616,31 @@ static int ni_usb_setup_t1_delay(struct ni_usb_register *reg, unsigned int nano_
return i;
}
-static unsigned int ni_usb_t1_delay(gpib_board_t *board, unsigned int nano_sec)
+static int ni_usb_t1_delay(struct gpib_board *board, unsigned int nano_sec)
{
int retval;
struct ni_usb_priv *ni_priv = board->private_data;
- struct usb_device *usb_dev = interface_to_usbdev(ni_priv->bus_interface);
+ struct usb_device *usb_dev;
struct ni_usb_register writes[3];
unsigned int ibsta;
unsigned int actual_ns;
int i;
+ if (!ni_priv->bus_interface)
+ return -ENODEV;
+ usb_dev = interface_to_usbdev(ni_priv->bus_interface);
i = ni_usb_setup_t1_delay(writes, nano_sec, &actual_ns);
retval = ni_usb_write_registers(ni_priv, writes, i, &ibsta);
if (retval < 0) {
- dev_err(&usb_dev->dev, "%s: register write failed, retval=%i\n", __func__, retval);
- return -1; //FIXME should change return type to int for error reporting
+ dev_err(&usb_dev->dev, "register write failed, retval=%i\n", retval);
+ return retval;
}
board->t1_nano_sec = actual_ns;
ni_usb_soft_update_status(board, ibsta, 0);
return actual_ns;
}
-static int ni_usb_allocate_private(gpib_board_t *board)
+static int ni_usb_allocate_private(struct gpib_board *board)
{
struct ni_usb_priv *ni_priv;
@@ -1635,7 +1663,7 @@ static void ni_usb_free_private(struct ni_usb_priv *ni_priv)
}
#define NUM_INIT_WRITES 26
-static int ni_usb_setup_init(gpib_board_t *board, struct ni_usb_register *writes)
+static int ni_usb_setup_init(struct gpib_board *board, struct ni_usb_register *writes)
{
struct ni_usb_priv *ni_priv = board->private_data;
struct usb_device *usb_dev = interface_to_usbdev(ni_priv->bus_interface);
@@ -1736,13 +1764,13 @@ static int ni_usb_setup_init(gpib_board_t *board, struct ni_usb_register *writes
writes[i].value = AUX_CPPF;
i++;
if (i > NUM_INIT_WRITES) {
- dev_err(&usb_dev->dev, "%s: bug!, buffer overrun, i=%i\n", __func__, i);
+ dev_err(&usb_dev->dev, "bug!, buffer overrun, i=%i\n", i);
return 0;
}
return i;
}
-static int ni_usb_init(gpib_board_t *board)
+static int ni_usb_init(struct gpib_board *board)
{
int retval;
struct ni_usb_priv *ni_priv = board->private_data;
@@ -1762,7 +1790,7 @@ static int ni_usb_init(gpib_board_t *board)
return -EFAULT;
kfree(writes);
if (retval) {
- dev_err(&usb_dev->dev, "%s: register write failed, retval=%i\n", __func__, retval);
+ dev_err(&usb_dev->dev, "register write failed, retval=%i\n", retval);
return retval;
}
ni_usb_soft_update_status(board, ibsta, 0);
@@ -1771,16 +1799,13 @@ static int ni_usb_init(gpib_board_t *board)
static void ni_usb_interrupt_complete(struct urb *urb)
{
- gpib_board_t *board = urb->context;
+ struct gpib_board *board = urb->context;
struct ni_usb_priv *ni_priv = board->private_data;
struct usb_device *usb_dev = interface_to_usbdev(ni_priv->bus_interface);
int retval;
struct ni_usb_status_block status;
unsigned long flags;
-// printk("debug: %s: status=0x%x, error_count=%i, actual_length=%i\n", __func__,
-// urb->status, urb->error_count, urb->actual_length);
-
switch (urb->status) {
/* success */
case 0:
@@ -1793,26 +1818,24 @@ static void ni_usb_interrupt_complete(struct urb *urb)
default: /* other error, resubmit */
retval = usb_submit_urb(ni_priv->interrupt_urb, GFP_ATOMIC);
if (retval)
- dev_err(&usb_dev->dev, "%s: failed to resubmit interrupt urb\n", __func__);
+ dev_err(&usb_dev->dev, "failed to resubmit interrupt urb\n");
return;
}
ni_usb_parse_status_block(urb->transfer_buffer, &status);
-// printk("debug: ibsta=0x%x\n", status.ibsta);
spin_lock_irqsave(&board->spinlock, flags);
ni_priv->monitored_ibsta_bits &= ~status.ibsta;
-// printk("debug: monitored_ibsta_bits=0x%x\n", ni_priv->monitored_ibsta_bits);
spin_unlock_irqrestore(&board->spinlock, flags);
wake_up_interruptible(&board->wait);
retval = usb_submit_urb(ni_priv->interrupt_urb, GFP_ATOMIC);
if (retval)
- dev_err(&usb_dev->dev, "%s: failed to resubmit interrupt urb\n", __func__);
+ dev_err(&usb_dev->dev, "failed to resubmit interrupt urb\n");
}
-static int ni_usb_set_interrupt_monitor(gpib_board_t *board, unsigned int monitored_bits)
+static int ni_usb_set_interrupt_monitor(struct gpib_board *board, unsigned int monitored_bits)
{
int retval;
struct ni_usb_priv *ni_priv = board->private_data;
@@ -1821,22 +1844,20 @@ static int ni_usb_set_interrupt_monitor(gpib_board_t *board, unsigned int monito
u8 *buffer;
struct ni_usb_status_block status;
unsigned long flags;
- //printk("%s: receive control pipe is %i\n", __func__, pipe);
+
buffer = kmalloc(buffer_length, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
spin_lock_irqsave(&board->spinlock, flags);
ni_priv->monitored_ibsta_bits = ni_usb_ibsta_monitor_mask & monitored_bits;
-// dev_err(&usb_dev->dev, "debug: %s: monitored_ibsta_bits=0x%x\n",
-// __func__, ni_priv->monitored_ibsta_bits);
spin_unlock_irqrestore(&board->spinlock, flags);
retval = ni_usb_receive_control_msg(ni_priv, NI_USB_WAIT_REQUEST, USB_DIR_IN |
USB_TYPE_VENDOR | USB_RECIP_DEVICE,
0x300, ni_usb_ibsta_monitor_mask & monitored_bits,
buffer, buffer_length, 1000);
if (retval != buffer_length) {
- dev_err(&usb_dev->dev, "%s: usb_control_msg returned %i\n", __func__, retval);
+ dev_err(&usb_dev->dev, "usb_control_msg returned %i\n", retval);
kfree(buffer);
return -1;
}
@@ -1845,7 +1866,7 @@ static int ni_usb_set_interrupt_monitor(gpib_board_t *board, unsigned int monito
return 0;
}
-static int ni_usb_setup_urbs(gpib_board_t *board)
+static int ni_usb_setup_urbs(struct gpib_board *board)
{
struct ni_usb_priv *ni_priv = board->private_data;
struct usb_device *usb_dev;
@@ -1872,8 +1893,7 @@ static int ni_usb_setup_urbs(gpib_board_t *board)
retval = usb_submit_urb(ni_priv->interrupt_urb, GFP_KERNEL);
mutex_unlock(&ni_priv->interrupt_transfer_lock);
if (retval) {
- dev_err(&usb_dev->dev, "%s: failed to submit first interrupt urb, retval=%i\n",
- __func__, retval);
+ dev_err(&usb_dev->dev, "failed to submit first interrupt urb, retval=%i\n", retval);
return retval;
}
return 0;
@@ -1904,7 +1924,6 @@ static int ni_usb_b_read_serial_number(struct ni_usb_priv *ni_priv)
int j;
unsigned int serial_number;
-// printk("%s: %s\n", __func__);
in_data = kmalloc(in_data_length, GFP_KERNEL);
if (!in_data)
return -ENOMEM;
@@ -1924,20 +1943,19 @@ static int ni_usb_b_read_serial_number(struct ni_usb_priv *ni_priv)
i += ni_usb_bulk_termination(&out_data[i]);
retval = ni_usb_send_bulk_msg(ni_priv, out_data, out_data_length, &bytes_written, 1000);
if (retval) {
- dev_err(&usb_dev->dev, "%s: ni_usb_send_bulk_msg returned %i, bytes_written=%i, i=%li\n",
- __func__,
+ dev_err(&usb_dev->dev, "send_bulk_msg returned %i, bytes_written=%i, i=%li\n",
retval, bytes_written, (long)out_data_length);
goto serial_out;
}
retval = ni_usb_receive_bulk_msg(ni_priv, in_data, in_data_length, &bytes_read, 1000, 0);
if (retval) {
- dev_err(&usb_dev->dev, "%s: ni_usb_receive_bulk_msg returned %i, bytes_read=%i\n",
- __func__, retval, bytes_read);
+ dev_err(&usb_dev->dev, "receive_bulk_msg returned %i, bytes_read=%i\n",
+ retval, bytes_read);
ni_usb_dump_raw_block(in_data, bytes_read);
goto serial_out;
}
if (ARRAY_SIZE(results) < num_reads) {
- dev_err(&usb_dev->dev, "Setup bug\n");
+ dev_err(&usb_dev->dev, "serial number eetup bug\n");
retval = -EINVAL;
goto serial_out;
}
@@ -1945,7 +1963,7 @@ static int ni_usb_b_read_serial_number(struct ni_usb_priv *ni_priv)
serial_number = 0;
for (j = 0; j < num_reads; ++j)
serial_number |= (results[j] & 0xff) << (8 * j);
- dev_info(&usb_dev->dev, "%s: board serial number is 0x%x\n", __func__, serial_number);
+ dev_dbg(&usb_dev->dev, "board serial number is 0x%x\n", serial_number);
retval = 0;
serial_out:
kfree(in_data);
@@ -1973,22 +1991,22 @@ static int ni_usb_hs_wait_for_ready(struct ni_usb_priv *ni_priv)
USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
0x0, 0x0, buffer, buffer_size, 1000);
if (retval < 0) {
- dev_err(&usb_dev->dev, "%s: usb_control_msg request 0x%x returned %i\n",
- __func__, NI_USB_SERIAL_NUMBER_REQUEST, retval);
+ dev_err(&usb_dev->dev, "usb_control_msg request 0x%x returned %i\n",
+ NI_USB_SERIAL_NUMBER_REQUEST, retval);
goto ready_out;
}
j = 0;
if (buffer[j] != NI_USB_SERIAL_NUMBER_REQUEST) {
- dev_err(&usb_dev->dev, "%s: unexpected data: buffer[%i]=0x%x, expected 0x%x\n",
- __func__, j, (int)buffer[j], NI_USB_SERIAL_NUMBER_REQUEST);
+ dev_err(&usb_dev->dev, "unexpected data: buffer[%i]=0x%x, expected 0x%x\n",
+ j, (int)buffer[j], NI_USB_SERIAL_NUMBER_REQUEST);
unexpected = 1;
}
if (unexpected)
ni_usb_dump_raw_block(buffer, retval);
// NI-USB-HS+ pads the serial with 0x0 to make 16 bytes
if (retval != 5 && retval != 16) {
- dev_err(&usb_dev->dev, "%s: received unexpected number of bytes = %i, expected 5 or 16\n",
- __func__, retval);
+ dev_err(&usb_dev->dev, "received unexpected number of bytes = %i, expected 5 or 16\n",
+ retval);
ni_usb_dump_raw_block(buffer, retval);
}
serial_number = 0;
@@ -1996,7 +2014,7 @@ static int ni_usb_hs_wait_for_ready(struct ni_usb_priv *ni_priv)
serial_number |= (buffer[++j] << 8);
serial_number |= (buffer[++j] << 16);
serial_number |= (buffer[++j] << 24);
- dev_info(&usb_dev->dev, "%s: board serial number is 0x%x\n", __func__, serial_number);
+ dev_dbg(&usb_dev->dev, "board serial number is 0x%x\n", serial_number);
for (i = 0; i < timeout; ++i) {
int ready = 0;
@@ -2004,26 +2022,26 @@ static int ni_usb_hs_wait_for_ready(struct ni_usb_priv *ni_priv)
USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
0x0, 0x0, buffer, buffer_size, 100);
if (retval < 0) {
- dev_err(&usb_dev->dev, "%s: usb_control_msg request 0x%x returned %i\n",
- __func__, NI_USB_POLL_READY_REQUEST, retval);
+ dev_err(&usb_dev->dev, "usb_control_msg request 0x%x returned %i\n",
+ NI_USB_POLL_READY_REQUEST, retval);
goto ready_out;
}
j = 0;
unexpected = 0;
if (buffer[j] != NI_USB_POLL_READY_REQUEST) { // [0]
- dev_err(&usb_dev->dev, "%s: unexpected data: buffer[%i]=0x%x, expected 0x%x\n",
- __func__, j, (int)buffer[j], NI_USB_POLL_READY_REQUEST);
+ dev_err(&usb_dev->dev, "unexpected data: buffer[%i]=0x%x, expected 0x%x\n",
+ j, (int)buffer[j], NI_USB_POLL_READY_REQUEST);
unexpected = 1;
}
++j;
if (buffer[j] != 0x1 && buffer[j] != 0x0) { // [1] HS+ sends 0x0
- dev_err(&usb_dev->dev, "%s: unexpected data: buffer[%i]=0x%x, expected 0x1 or 0x0\n",
- __func__, j, (int)buffer[j]);
+ dev_err(&usb_dev->dev, "unexpected data: buffer[%i]=0x%x, expected 0x1 or 0x0\n",
+ j, (int)buffer[j]);
unexpected = 1;
}
if (buffer[++j] != 0x0) { // [2]
- dev_err(&usb_dev->dev, "%s: unexpected data: buffer[%i]=0x%x, expected 0x%x\n",
- __func__, j, (int)buffer[j], 0x0);
+ dev_err(&usb_dev->dev, "unexpected data: buffer[%i]=0x%x, expected 0x%x\n",
+ j, (int)buffer[j], 0x0);
unexpected = 1;
}
++j;
@@ -2031,22 +2049,22 @@ static int ni_usb_hs_wait_for_ready(struct ni_usb_priv *ni_priv)
// NI-USB-HS+ sends 0x0
if (buffer[j] != 0x1 && buffer[j] != 0x8 && buffer[j] != 0x7 && buffer[j] != 0x0) {
// [3]
- dev_err(&usb_dev->dev, "%s: unexpected data: buffer[%i]=0x%x, expected 0x0, 0x1, 0x7 or 0x8\n",
- __func__, j, (int)buffer[j]);
+ dev_err(&usb_dev->dev, "unexpected data: buffer[%i]=0x%x, expected 0x0, 0x1, 0x7 or 0x8\n",
+ j, (int)buffer[j]);
unexpected = 1;
}
++j;
// NI-USB-HS+ sends 0 here
if (buffer[j] != 0x30 && buffer[j] != 0x0) { // [4]
- dev_err(&usb_dev->dev, "%s: unexpected data: buffer[%i]=0x%x, expected 0x0 or 0x30\n",
- __func__, j, (int)buffer[j]);
+ dev_err(&usb_dev->dev, "unexpected data: buffer[%i]=0x%x, expected 0x0 or 0x30\n",
+ j, (int)buffer[j]);
unexpected = 1;
}
++j;
// MC usb-488 (and sometimes NI-USB-HS?) and NI-USB-HS+ sends 0x0 here
if (buffer[j] != 0x1 && buffer[j] != 0x0) { // [5]
- dev_err(&usb_dev->dev, "%s: unexpected data: buffer[%i]=0x%x, expected 0x1 or 0x0\n",
- __func__, j, (int)buffer[j]);
+ dev_err(&usb_dev->dev, "unexpected data: buffer[%i]=0x%x, expected 0x1 or 0x0\n",
+ j, (int)buffer[j]);
unexpected = 1;
}
if (buffer[++j] != 0x0) { // [6]
@@ -2054,8 +2072,8 @@ static int ni_usb_hs_wait_for_ready(struct ni_usb_priv *ni_priv)
// NI-USB-HS+ sends 0xf here
if (buffer[j] != 0x2 && buffer[j] != 0xe && buffer[j] != 0xf &&
buffer[j] != 0x16) {
- dev_err(&usb_dev->dev, "%s: unexpected data: buffer[%i]=0x%x, expected 0x2, 0xe, 0xf or 0x16\n",
- __func__, j, (int)buffer[j]);
+ dev_err(&usb_dev->dev, "unexpected data: buffer[%i]=0x%x, expected 0x2, 0xe, 0xf or 0x16\n",
+ j, (int)buffer[j]);
unexpected = 1;
}
}
@@ -2064,30 +2082,30 @@ static int ni_usb_hs_wait_for_ready(struct ni_usb_priv *ni_priv)
// MC usb-488 sends 0x5 here; MC usb-488A sends 0x6 here
if (buffer[j] != 0x3 && buffer[j] != 0x5 && buffer[j] != 0x6 &&
buffer[j] != 0x8) {
- dev_err(&usb_dev->dev, "%s: unexpected data: buffer[%i]=0x%x, expected 0x3 or 0x5, 0x6 or 0x08\n",
- __func__, j, (int)buffer[j]);
+ dev_err(&usb_dev->dev, "unexpected data: buffer[%i]=0x%x, expected 0x3 or 0x5, 0x6 or 0x08\n",
+ j, (int)buffer[j]);
unexpected = 1;
}
}
++j;
if (buffer[j] != 0x0 && buffer[j] != 0x2) { // [8] MC usb-488 sends 0x2 here
- dev_err(&usb_dev->dev, "%s: unexpected data: buffer[%i]=0x%x, expected 0x0 or 0x2\n",
- __func__, j, (int)buffer[j]);
+ dev_err(&usb_dev->dev, " unexpected data: buffer[%i]=0x%x, expected 0x0 or 0x2\n",
+ j, (int)buffer[j]);
unexpected = 1;
}
++j;
// MC usb-488A and NI-USB-HS sends 0x3 here; NI-USB-HS+ sends 0x30 here
if (buffer[j] != 0x0 && buffer[j] != 0x3 && buffer[j] != 0x30) { // [9]
- dev_err(&usb_dev->dev, "%s: unexpected data: buffer[%i]=0x%x, expected 0x0, 0x3 or 0x30\n",
- __func__, j, (int)buffer[j]);
+ dev_err(&usb_dev->dev, "unexpected data: buffer[%i]=0x%x, expected 0x0, 0x3 or 0x30\n",
+ j, (int)buffer[j]);
unexpected = 1;
}
if (buffer[++j] != 0x0) {
ready = 1;
if (buffer[j] != 0x96 && buffer[j] != 0x7 && buffer[j] != 0x6e) {
// [10] MC usb-488 sends 0x7 here
- dev_err(&usb_dev->dev, "%s: unexpected data: buffer[%i]=0x%x, expected 0x96, 0x07 or 0x6e\n",
- __func__, j, (int)buffer[j]);
+ dev_err(&usb_dev->dev, "unexpected data: buffer[%i]=0x%x, expected 0x96, 0x07 or 0x6e\n",
+ j, (int)buffer[j]);
unexpected = 1;
}
}
@@ -2097,7 +2115,6 @@ static int ni_usb_hs_wait_for_ready(struct ni_usb_priv *ni_priv)
break;
retval = msleep_interruptible(msec_sleep_duration);
if (retval) {
- dev_err(&usb_dev->dev, "ni_usb_gpib: msleep interrupted\n");
retval = -ERESTARTSYS;
goto ready_out;
}
@@ -2106,7 +2123,7 @@ static int ni_usb_hs_wait_for_ready(struct ni_usb_priv *ni_priv)
ready_out:
kfree(buffer);
- dev_dbg(&usb_dev->dev, "%s: exit retval=%d\n", __func__, retval);
+ dev_dbg(&usb_dev->dev, "exit retval=%d\n", retval);
return retval;
}
@@ -2134,14 +2151,14 @@ static int ni_usb_hs_plus_extra_init(struct ni_usb_priv *ni_priv)
USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
0x0, 0x0, buffer, transfer_size, 1000);
if (retval < 0) {
- dev_err(&usb_dev->dev, "%s: usb_control_msg request 0x%x returned %i\n",
- __func__, NI_USB_HS_PLUS_0x48_REQUEST, retval);
+ dev_err(&usb_dev->dev, "usb_control_msg request 0x%x returned %i\n",
+ NI_USB_HS_PLUS_0x48_REQUEST, retval);
break;
}
// expected response data: 48 f3 30 00 00 00 00 00 00 00 00 00 00 00 00 00
if (buffer[0] != NI_USB_HS_PLUS_0x48_REQUEST)
- dev_err(&usb_dev->dev, "%s: unexpected data: buffer[0]=0x%x, expected 0x%x\n",
- __func__, (int)buffer[0], NI_USB_HS_PLUS_0x48_REQUEST);
+ dev_err(&usb_dev->dev, "unexpected data: buffer[0]=0x%x, expected 0x%x\n",
+ (int)buffer[0], NI_USB_HS_PLUS_0x48_REQUEST);
transfer_size = 2;
@@ -2149,14 +2166,14 @@ static int ni_usb_hs_plus_extra_init(struct ni_usb_priv *ni_priv)
USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
0x1, 0x0, buffer, transfer_size, 1000);
if (retval < 0) {
- dev_err(&usb_dev->dev, "%s: usb_control_msg request 0x%x returned %i\n",
- __func__, NI_USB_HS_PLUS_LED_REQUEST, retval);
+ dev_err(&usb_dev->dev, "usb_control_msg request 0x%x returned %i\n",
+ NI_USB_HS_PLUS_LED_REQUEST, retval);
break;
}
// expected response data: 4b 00
if (buffer[0] != NI_USB_HS_PLUS_LED_REQUEST)
- dev_err(&usb_dev->dev, "%s: unexpected data: buffer[0]=0x%x, expected 0x%x\n",
- __func__, (int)buffer[0], NI_USB_HS_PLUS_LED_REQUEST);
+ dev_err(&usb_dev->dev, "unexpected data: buffer[0]=0x%x, expected 0x%x\n",
+ (int)buffer[0], NI_USB_HS_PLUS_LED_REQUEST);
transfer_size = 9;
@@ -2165,15 +2182,14 @@ static int ni_usb_hs_plus_extra_init(struct ni_usb_priv *ni_priv)
USB_RECIP_INTERFACE,
0x0, 0x1, buffer, transfer_size, 1000);
if (retval < 0) {
- dev_err(&usb_dev->dev, "%s: usb_control_msg request 0x%x returned %i\n",
- __func__, NI_USB_HS_PLUS_0xf8_REQUEST, retval);
+ dev_err(&usb_dev->dev, "usb_control_msg request 0x%x returned %i\n",
+ NI_USB_HS_PLUS_0xf8_REQUEST, retval);
break;
}
// expected response data: f8 01 00 00 00 01 00 00 00
if (buffer[0] != NI_USB_HS_PLUS_0xf8_REQUEST)
- dev_err(&usb_dev->dev, "%s: unexpected data: buffer[0]=0x%x, expected 0x%x\n",
- __func__, (int)buffer[0], NI_USB_HS_PLUS_0xf8_REQUEST);
-
+ dev_err(&usb_dev->dev, "unexpected data: buffer[0]=0x%x, expected 0x%x\n",
+ (int)buffer[0], NI_USB_HS_PLUS_0xf8_REQUEST);
} while (0);
// cleanup
@@ -2189,10 +2205,10 @@ static inline int ni_usb_device_match(struct usb_interface *interface,
return 1;
}
-static int ni_usb_attach(gpib_board_t *board, const gpib_board_config_t *config)
+static int ni_usb_attach(struct gpib_board *board, const gpib_board_config_t *config)
{
int retval;
- int i;
+ int i, index;
struct ni_usb_priv *ni_priv;
int product_id;
struct usb_device *usb_dev;
@@ -2211,19 +2227,17 @@ static int ni_usb_attach(gpib_board_t *board, const gpib_board_config_t *config)
ni_priv->bus_interface = ni_usb_driver_interfaces[i];
usb_set_intfdata(ni_usb_driver_interfaces[i], board);
usb_dev = interface_to_usbdev(ni_priv->bus_interface);
- dev_info(&usb_dev->dev,
- "bus %d dev num %d attached to gpib minor %d, NI usb interface %i\n",
- usb_dev->bus->busnum, usb_dev->devnum, board->minor, i);
+ index = i;
break;
}
}
if (i == MAX_NUM_NI_USB_INTERFACES) {
mutex_unlock(&ni_usb_hotplug_lock);
- pr_err("No supported NI usb gpib adapters found, have you loaded its firmware?\n");
+ dev_err(board->gpib_dev, "No supported adapters found, have you loaded its firmware?\n");
return -ENODEV;
}
if (usb_reset_configuration(interface_to_usbdev(ni_priv->bus_interface)))
- dev_err(&usb_dev->dev, "ni_usb_gpib: usb_reset_configuration() failed.\n");
+ dev_err(&usb_dev->dev, "usb_reset_configuration() failed.\n");
product_id = le16_to_cpu(usb_dev->descriptor.idProduct);
ni_priv->product_id = product_id;
@@ -2296,7 +2310,9 @@ static int ni_usb_attach(gpib_board_t *board, const gpib_board_config_t *config)
}
mutex_unlock(&ni_usb_hotplug_lock);
- dev_info(&usb_dev->dev, "%s: attached\n", __func__);
+ dev_info(&usb_dev->dev,
+ "bus %d dev num %d attached to gpib%d, intf %i\n",
+ usb_dev->bus->busnum, usb_dev->devnum, board->minor, index);
return retval;
}
@@ -2304,33 +2320,25 @@ static int ni_usb_shutdown_hardware(struct ni_usb_priv *ni_priv)
{
struct usb_device *usb_dev = interface_to_usbdev(ni_priv->bus_interface);
int retval;
- int i = 0;
struct ni_usb_register writes[2];
static const int writes_length = ARRAY_SIZE(writes);
unsigned int ibsta;
-// printk("%s: %s\n", __func__);
- writes[i].device = NIUSB_SUBDEV_TNT4882;
- writes[i].address = nec7210_to_tnt4882_offset(AUXMR);
- writes[i].value = AUX_CR;
- i++;
- writes[i].device = NIUSB_SUBDEV_UNKNOWN3;
- writes[i].address = 0x10;
- writes[i].value = 0x0;
- i++;
- if (i > writes_length) {
- dev_err(&usb_dev->dev, "%s: bug!, buffer overrun, i=%i\n", __func__, i);
- return -EINVAL;
- }
- retval = ni_usb_write_registers(ni_priv, writes, i, &ibsta);
+ writes[0].device = NIUSB_SUBDEV_TNT4882;
+ writes[0].address = nec7210_to_tnt4882_offset(AUXMR);
+ writes[0].value = AUX_CR;
+ writes[1].device = NIUSB_SUBDEV_UNKNOWN3;
+ writes[1].address = 0x10;
+ writes[1].value = 0x0;
+ retval = ni_usb_write_registers(ni_priv, writes, writes_length, &ibsta);
if (retval) {
- dev_err(&usb_dev->dev, "%s: register write failed, retval=%i\n", __func__, retval);
+ dev_err(&usb_dev->dev, "register write failed, retval=%i\n", retval);
return retval;
}
return 0;
}
-static void ni_usb_detach(gpib_board_t *board)
+static void ni_usb_detach(struct gpib_board *board)
{
struct ni_usb_priv *ni_priv;
@@ -2413,7 +2421,7 @@ static int ni_usb_driver_probe(struct usb_interface *interface, const struct usb
if (i == MAX_NUM_NI_USB_INTERFACES) {
usb_put_dev(usb_dev);
mutex_unlock(&ni_usb_hotplug_lock);
- dev_err(&usb_dev->dev, "%s: ni_usb_driver_interfaces[] full\n", __func__);
+ dev_err(&usb_dev->dev, "ni_usb_driver_interfaces[] full\n");
return -1;
}
path = kmalloc(path_length, GFP_KERNEL);
@@ -2423,7 +2431,7 @@ static int ni_usb_driver_probe(struct usb_interface *interface, const struct usb
return -ENOMEM;
}
usb_make_path(usb_dev, path, path_length);
- dev_info(&usb_dev->dev, "ni_usb_gpib: probe succeeded for path: %s\n", path);
+ dev_info(&usb_dev->dev, "probe succeeded for path: %s\n", path);
kfree(path);
mutex_unlock(&ni_usb_hotplug_lock);
return 0;
@@ -2437,7 +2445,7 @@ static void ni_usb_driver_disconnect(struct usb_interface *interface)
mutex_lock(&ni_usb_hotplug_lock);
for (i = 0; i < MAX_NUM_NI_USB_INTERFACES; i++) {
if (ni_usb_driver_interfaces[i] == interface) {
- gpib_board_t *board = usb_get_intfdata(interface);
+ struct gpib_board *board = usb_get_intfdata(interface);
if (board) {
struct ni_usb_priv *ni_priv = board->private_data;
@@ -2458,8 +2466,7 @@ static void ni_usb_driver_disconnect(struct usb_interface *interface)
}
}
if (i == MAX_NUM_NI_USB_INTERFACES)
- dev_err(&usb_dev->dev, "%s: unable to find interface in ni_usb_driver_interfaces[]? bug?\n",
- __func__);
+ dev_err(&usb_dev->dev, "unable to find interface bug?\n");
usb_put_dev(usb_dev);
mutex_unlock(&ni_usb_hotplug_lock);
}
@@ -2467,7 +2474,7 @@ static void ni_usb_driver_disconnect(struct usb_interface *interface)
static int ni_usb_driver_suspend(struct usb_interface *interface, pm_message_t message)
{
struct usb_device *usb_dev = interface_to_usbdev(interface);
- gpib_board_t *board;
+ struct gpib_board *board;
int i, retval;
mutex_lock(&ni_usb_hotplug_lock);
@@ -2498,9 +2505,9 @@ static int ni_usb_driver_suspend(struct usb_interface *interface, pm_message_t m
ni_usb_cleanup_urbs(ni_priv);
mutex_unlock(&ni_priv->interrupt_transfer_lock);
}
- dev_info(&usb_dev->dev,
- "bus %d dev num %d gpib minor %d, ni usb interface %i suspended\n",
- usb_dev->bus->busnum, usb_dev->devnum, board->minor, i);
+ dev_dbg(&usb_dev->dev,
+ "bus %d dev num %d gpib%d, interface %i suspended\n",
+ usb_dev->bus->busnum, usb_dev->devnum, board->minor, i);
}
mutex_unlock(&ni_usb_hotplug_lock);
@@ -2511,7 +2518,7 @@ static int ni_usb_driver_resume(struct usb_interface *interface)
{
struct usb_device *usb_dev = interface_to_usbdev(interface);
- gpib_board_t *board;
+ struct gpib_board *board;
int i, retval;
mutex_lock(&ni_usb_hotplug_lock);
@@ -2535,15 +2542,15 @@ static int ni_usb_driver_resume(struct usb_interface *interface)
mutex_lock(&ni_priv->interrupt_transfer_lock);
retval = usb_submit_urb(ni_priv->interrupt_urb, GFP_KERNEL);
if (retval) {
- dev_err(&usb_dev->dev, "%s: failed to resubmit interrupt urb, retval=%i\n",
- __func__, retval);
+ dev_err(&usb_dev->dev, "resume failed to resubmit interrupt urb, retval=%i\n",
+ retval);
mutex_unlock(&ni_priv->interrupt_transfer_lock);
mutex_unlock(&ni_usb_hotplug_lock);
return retval;
}
mutex_unlock(&ni_priv->interrupt_transfer_lock);
} else {
- dev_err(&usb_dev->dev, "%s: bug! int urb not set up\n", __func__);
+ dev_err(&usb_dev->dev, "bug! resume int urb not set up\n");
mutex_unlock(&ni_usb_hotplug_lock);
return -EINVAL;
}
@@ -2600,9 +2607,9 @@ static int ni_usb_driver_resume(struct usb_interface *interface)
if (ni_priv->ren_state)
ni_usb_remote_enable(board, 1);
- dev_info(&usb_dev->dev,
- "bus %d dev num %d gpib minor %d, ni usb interface %i resumed\n",
- usb_dev->bus->busnum, usb_dev->devnum, board->minor, i);
+ dev_dbg(&usb_dev->dev,
+ "bus %d dev num %d gpib%d, interface %i resumed\n",
+ usb_dev->bus->busnum, usb_dev->devnum, board->minor, i);
}
mutex_unlock(&ni_usb_hotplug_lock);
@@ -2610,7 +2617,7 @@ static int ni_usb_driver_resume(struct usb_interface *interface)
}
static struct usb_driver ni_usb_bus_driver = {
- .name = "ni_usb_gpib",
+ .name = DRV_NAME,
.probe = ni_usb_driver_probe,
.disconnect = ni_usb_driver_disconnect,
.suspend = ni_usb_driver_suspend,
@@ -2623,19 +2630,18 @@ static int __init ni_usb_init_module(void)
int i;
int ret;
- pr_info("ni_usb_gpib driver loading\n");
for (i = 0; i < MAX_NUM_NI_USB_INTERFACES; i++)
ni_usb_driver_interfaces[i] = NULL;
ret = usb_register(&ni_usb_bus_driver);
if (ret) {
- pr_err("ni_usb_gpib: usb_register failed: error = %d\n", ret);
+ pr_err("usb_register failed: error = %d\n", ret);
return ret;
}
ret = gpib_register_driver(&ni_usb_gpib_interface, THIS_MODULE);
if (ret) {
- pr_err("ni_usb_gpib: gpib_register_driver failed: error = %d\n", ret);
+ pr_err("gpib_register_driver failed: error = %d\n", ret);
return ret;
}
@@ -2644,7 +2650,6 @@ static int __init ni_usb_init_module(void)
static void __exit ni_usb_exit_module(void)
{
- pr_info("ni_usb_gpib driver unloading\n");
gpib_unregister_driver(&ni_usb_gpib_interface);
usb_deregister(&ni_usb_bus_driver);
}
diff --git a/drivers/staging/gpib/pc2/pc2_gpib.c b/drivers/staging/gpib/pc2/pc2_gpib.c
index c0b07cb63d9a..96d3c09f2273 100644
--- a/drivers/staging/gpib/pc2/pc2_gpib.c
+++ b/drivers/staging/gpib/pc2/pc2_gpib.c
@@ -4,6 +4,9 @@
* copyright : (C) 2001, 2002 by Frank Mori Hess
***************************************************************************/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#define dev_fmt pr_fmt
+
#include <linux/ioport.h>
#include <linux/sched.h>
#include <linux/module.h>
@@ -49,22 +52,13 @@ static inline unsigned int CLEAR_INTR_REG(unsigned int irq)
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("GPIB driver for PC2/PC2a and compatible devices");
-static int pc2_attach(gpib_board_t *board, const gpib_board_config_t *config);
-static int pc2a_attach(gpib_board_t *board, const gpib_board_config_t *config);
-static int pc2a_cb7210_attach(gpib_board_t *board, const gpib_board_config_t *config);
-static int pc2_2a_attach(gpib_board_t *board, const gpib_board_config_t *config);
-
-static void pc2_detach(gpib_board_t *board);
-static void pc2a_detach(gpib_board_t *board);
-static void pc2_2a_detach(gpib_board_t *board);
-
/*
* GPIB interrupt service routines
*/
irqreturn_t pc2_interrupt(int irq, void *arg)
{
- gpib_board_t *board = arg;
+ struct gpib_board *board = arg;
struct pc2_priv *priv = board->private_data;
unsigned long flags;
irqreturn_t retval;
@@ -77,7 +71,7 @@ irqreturn_t pc2_interrupt(int irq, void *arg)
irqreturn_t pc2a_interrupt(int irq, void *arg)
{
- gpib_board_t *board = arg;
+ struct gpib_board *board = arg;
struct pc2_priv *priv = board->private_data;
int status1, status2;
unsigned long flags;
@@ -96,7 +90,7 @@ irqreturn_t pc2a_interrupt(int irq, void *arg)
}
// wrappers for interface functions
-static int pc2_read(gpib_board_t *board, uint8_t *buffer, size_t length, int *end,
+static int pc2_read(struct gpib_board *board, uint8_t *buffer, size_t length, int *end,
size_t *bytes_read)
{
struct pc2_priv *priv = board->private_data;
@@ -104,7 +98,7 @@ static int pc2_read(gpib_board_t *board, uint8_t *buffer, size_t length, int *en
return nec7210_read(board, &priv->nec7210_priv, buffer, length, end, bytes_read);
}
-static int pc2_write(gpib_board_t *board, uint8_t *buffer, size_t length, int send_eoi,
+static int pc2_write(struct gpib_board *board, uint8_t *buffer, size_t length, int send_eoi,
size_t *bytes_written)
{
struct pc2_priv *priv = board->private_data;
@@ -112,245 +106,133 @@ static int pc2_write(gpib_board_t *board, uint8_t *buffer, size_t length, int se
return nec7210_write(board, &priv->nec7210_priv, buffer, length, send_eoi, bytes_written);
}
-static int pc2_command(gpib_board_t *board, uint8_t *buffer, size_t length, size_t *bytes_written)
+static int pc2_command(struct gpib_board *board, uint8_t *buffer, size_t length, size_t *bytes_written)
{
struct pc2_priv *priv = board->private_data;
return nec7210_command(board, &priv->nec7210_priv, buffer, length, bytes_written);
}
-static int pc2_take_control(gpib_board_t *board, int synchronous)
+static int pc2_take_control(struct gpib_board *board, int synchronous)
{
struct pc2_priv *priv = board->private_data;
return nec7210_take_control(board, &priv->nec7210_priv, synchronous);
}
-static int pc2_go_to_standby(gpib_board_t *board)
+static int pc2_go_to_standby(struct gpib_board *board)
{
struct pc2_priv *priv = board->private_data;
return nec7210_go_to_standby(board, &priv->nec7210_priv);
}
-static void pc2_request_system_control(gpib_board_t *board, int request_control)
+static void pc2_request_system_control(struct gpib_board *board, int request_control)
{
struct pc2_priv *priv = board->private_data;
nec7210_request_system_control(board, &priv->nec7210_priv, request_control);
}
-static void pc2_interface_clear(gpib_board_t *board, int assert)
+static void pc2_interface_clear(struct gpib_board *board, int assert)
{
struct pc2_priv *priv = board->private_data;
nec7210_interface_clear(board, &priv->nec7210_priv, assert);
}
-static void pc2_remote_enable(gpib_board_t *board, int enable)
+static void pc2_remote_enable(struct gpib_board *board, int enable)
{
struct pc2_priv *priv = board->private_data;
nec7210_remote_enable(board, &priv->nec7210_priv, enable);
}
-static int pc2_enable_eos(gpib_board_t *board, uint8_t eos_byte, int compare_8_bits)
+static int pc2_enable_eos(struct gpib_board *board, uint8_t eos_byte, int compare_8_bits)
{
struct pc2_priv *priv = board->private_data;
return nec7210_enable_eos(board, &priv->nec7210_priv, eos_byte, compare_8_bits);
}
-static void pc2_disable_eos(gpib_board_t *board)
+static void pc2_disable_eos(struct gpib_board *board)
{
struct pc2_priv *priv = board->private_data;
nec7210_disable_eos(board, &priv->nec7210_priv);
}
-static unsigned int pc2_update_status(gpib_board_t *board, unsigned int clear_mask)
+static unsigned int pc2_update_status(struct gpib_board *board, unsigned int clear_mask)
{
struct pc2_priv *priv = board->private_data;
return nec7210_update_status(board, &priv->nec7210_priv, clear_mask);
}
-static int pc2_primary_address(gpib_board_t *board, unsigned int address)
+static int pc2_primary_address(struct gpib_board *board, unsigned int address)
{
struct pc2_priv *priv = board->private_data;
return nec7210_primary_address(board, &priv->nec7210_priv, address);
}
-static int pc2_secondary_address(gpib_board_t *board, unsigned int address, int enable)
+static int pc2_secondary_address(struct gpib_board *board, unsigned int address, int enable)
{
struct pc2_priv *priv = board->private_data;
return nec7210_secondary_address(board, &priv->nec7210_priv, address, enable);
}
-static int pc2_parallel_poll(gpib_board_t *board, uint8_t *result)
+static int pc2_parallel_poll(struct gpib_board *board, uint8_t *result)
{
struct pc2_priv *priv = board->private_data;
return nec7210_parallel_poll(board, &priv->nec7210_priv, result);
}
-static void pc2_parallel_poll_configure(gpib_board_t *board, uint8_t config)
+static void pc2_parallel_poll_configure(struct gpib_board *board, uint8_t config)
{
struct pc2_priv *priv = board->private_data;
nec7210_parallel_poll_configure(board, &priv->nec7210_priv, config);
}
-static void pc2_parallel_poll_response(gpib_board_t *board, int ist)
+static void pc2_parallel_poll_response(struct gpib_board *board, int ist)
{
struct pc2_priv *priv = board->private_data;
nec7210_parallel_poll_response(board, &priv->nec7210_priv, ist);
}
-static void pc2_serial_poll_response(gpib_board_t *board, uint8_t status)
+static void pc2_serial_poll_response(struct gpib_board *board, uint8_t status)
{
struct pc2_priv *priv = board->private_data;
nec7210_serial_poll_response(board, &priv->nec7210_priv, status);
}
-static uint8_t pc2_serial_poll_status(gpib_board_t *board)
+static uint8_t pc2_serial_poll_status(struct gpib_board *board)
{
struct pc2_priv *priv = board->private_data;
return nec7210_serial_poll_status(board, &priv->nec7210_priv);
}
-static unsigned int pc2_t1_delay(gpib_board_t *board, unsigned int nano_sec)
+static int pc2_t1_delay(struct gpib_board *board, unsigned int nano_sec)
{
struct pc2_priv *priv = board->private_data;
return nec7210_t1_delay(board, &priv->nec7210_priv, nano_sec);
}
-static void pc2_return_to_local(gpib_board_t *board)
+static void pc2_return_to_local(struct gpib_board *board)
{
struct pc2_priv *priv = board->private_data;
nec7210_return_to_local(board, &priv->nec7210_priv);
}
-static gpib_interface_t pc2_interface = {
- .name = "pcII",
- .attach = pc2_attach,
- .detach = pc2_detach,
- .read = pc2_read,
- .write = pc2_write,
- .command = pc2_command,
- .take_control = pc2_take_control,
- .go_to_standby = pc2_go_to_standby,
- .request_system_control = pc2_request_system_control,
- .interface_clear = pc2_interface_clear,
- .remote_enable = pc2_remote_enable,
- .enable_eos = pc2_enable_eos,
- .disable_eos = pc2_disable_eos,
- .parallel_poll = pc2_parallel_poll,
- .parallel_poll_configure = pc2_parallel_poll_configure,
- .parallel_poll_response = pc2_parallel_poll_response,
- .local_parallel_poll_mode = NULL, // XXX
- .line_status = NULL,
- .update_status = pc2_update_status,
- .primary_address = pc2_primary_address,
- .secondary_address = pc2_secondary_address,
- .serial_poll_response = pc2_serial_poll_response,
- .serial_poll_status = pc2_serial_poll_status,
- .t1_delay = pc2_t1_delay,
- .return_to_local = pc2_return_to_local,
-};
-
-static gpib_interface_t pc2a_interface = {
- .name = "pcIIa",
- .attach = pc2a_attach,
- .detach = pc2a_detach,
- .read = pc2_read,
- .write = pc2_write,
- .command = pc2_command,
- .take_control = pc2_take_control,
- .go_to_standby = pc2_go_to_standby,
- .request_system_control = pc2_request_system_control,
- .interface_clear = pc2_interface_clear,
- .remote_enable = pc2_remote_enable,
- .enable_eos = pc2_enable_eos,
- .disable_eos = pc2_disable_eos,
- .parallel_poll = pc2_parallel_poll,
- .parallel_poll_configure = pc2_parallel_poll_configure,
- .parallel_poll_response = pc2_parallel_poll_response,
- .local_parallel_poll_mode = NULL, // XXX
- .line_status = NULL,
- .update_status = pc2_update_status,
- .primary_address = pc2_primary_address,
- .secondary_address = pc2_secondary_address,
- .serial_poll_response = pc2_serial_poll_response,
- .serial_poll_status = pc2_serial_poll_status,
- .t1_delay = pc2_t1_delay,
- .return_to_local = pc2_return_to_local,
-};
-
-static gpib_interface_t pc2a_cb7210_interface = {
- .name = "pcIIa_cb7210",
- .attach = pc2a_cb7210_attach,
- .detach = pc2a_detach,
- .read = pc2_read,
- .write = pc2_write,
- .command = pc2_command,
- .take_control = pc2_take_control,
- .go_to_standby = pc2_go_to_standby,
- .request_system_control = pc2_request_system_control,
- .interface_clear = pc2_interface_clear,
- .remote_enable = pc2_remote_enable,
- .enable_eos = pc2_enable_eos,
- .disable_eos = pc2_disable_eos,
- .parallel_poll = pc2_parallel_poll,
- .parallel_poll_configure = pc2_parallel_poll_configure,
- .parallel_poll_response = pc2_parallel_poll_response,
- .local_parallel_poll_mode = NULL, // XXX
- .line_status = NULL, //XXX
- .update_status = pc2_update_status,
- .primary_address = pc2_primary_address,
- .secondary_address = pc2_secondary_address,
- .serial_poll_response = pc2_serial_poll_response,
- .serial_poll_status = pc2_serial_poll_status,
- .t1_delay = pc2_t1_delay,
- .return_to_local = pc2_return_to_local,
-};
-
-static gpib_interface_t pc2_2a_interface = {
- .name = "pcII_IIa",
- .attach = pc2_2a_attach,
- .detach = pc2_2a_detach,
- .read = pc2_read,
- .write = pc2_write,
- .command = pc2_command,
- .take_control = pc2_take_control,
- .go_to_standby = pc2_go_to_standby,
- .request_system_control = pc2_request_system_control,
- .interface_clear = pc2_interface_clear,
- .remote_enable = pc2_remote_enable,
- .enable_eos = pc2_enable_eos,
- .disable_eos = pc2_disable_eos,
- .parallel_poll = pc2_parallel_poll,
- .parallel_poll_configure = pc2_parallel_poll_configure,
- .parallel_poll_response = pc2_parallel_poll_response,
- .local_parallel_poll_mode = NULL, // XXX
- .line_status = NULL,
- .update_status = pc2_update_status,
- .primary_address = pc2_primary_address,
- .secondary_address = pc2_secondary_address,
- .serial_poll_response = pc2_serial_poll_response,
- .serial_poll_status = pc2_serial_poll_status,
- .t1_delay = pc2_t1_delay,
- .return_to_local = pc2_return_to_local,
-};
-
-static int allocate_private(gpib_board_t *board)
+static int allocate_private(struct gpib_board *board)
{
struct pc2_priv *priv;
@@ -363,13 +245,13 @@ static int allocate_private(gpib_board_t *board)
return 0;
}
-static void free_private(gpib_board_t *board)
+static void free_private(struct gpib_board *board)
{
kfree(board->private_data);
board->private_data = NULL;
}
-static int pc2_generic_attach(gpib_board_t *board, const gpib_board_config_t *config,
+static int pc2_generic_attach(struct gpib_board *board, const gpib_board_config_t *config,
enum nec7210_chipset chipset)
{
struct pc2_priv *pc2_priv;
@@ -389,7 +271,8 @@ static int pc2_generic_attach(gpib_board_t *board, const gpib_board_config_t *co
* is adapted to use isa_register_driver.
*/
if (config->ibdma)
- pr_err("DMA disabled for pc2 gpib, driver needs to be adapted to use isa_register_driver to get a struct device*");
+ // driver needs to be adapted to use isa_register_driver to get a struct device*
+ dev_err(board->gpib_dev, "DMA disabled for pc2 gpib");
#else
if (config->ibdma) {
nec_priv->dma_buffer_length = 0x1000;
@@ -401,7 +284,7 @@ static int pc2_generic_attach(gpib_board_t *board, const gpib_board_config_t *co
// request isa dma channel
if (request_dma(config->ibdma, "pc2")) {
- pr_err("gpib: can't request DMA %d\n", config->ibdma);
+ dev_err(board->gpib_dev, "can't request DMA %d\n", config->ibdma);
return -1;
}
nec_priv->dma_channel = config->ibdma;
@@ -411,7 +294,7 @@ static int pc2_generic_attach(gpib_board_t *board, const gpib_board_config_t *co
return 0;
}
-int pc2_attach(gpib_board_t *board, const gpib_board_config_t *config)
+static int pc2_attach(struct gpib_board *board, const gpib_board_config_t *config)
{
int isr_flags = 0;
struct pc2_priv *pc2_priv;
@@ -427,8 +310,8 @@ int pc2_attach(gpib_board_t *board, const gpib_board_config_t *config)
nec_priv->offset = pc2_reg_offset;
if (!request_region(config->ibbase, pc2_iosize, "pc2")) {
- pr_err("gpib: ioports are already in use\n");
- return -1;
+ dev_err(board->gpib_dev, "ioports are already in use\n");
+ return -EBUSY;
}
nec_priv->iobase = config->ibbase;
@@ -437,14 +320,14 @@ int pc2_attach(gpib_board_t *board, const gpib_board_config_t *config)
// install interrupt handler
if (config->ibirq) {
if (request_irq(config->ibirq, pc2_interrupt, isr_flags, "pc2", board)) {
- pr_err("gpib: can't request IRQ %d\n", config->ibirq);
- return -1;
+ dev_err(board->gpib_dev, "can't request IRQ %d\n", config->ibirq);
+ return -EBUSY;
}
}
pc2_priv->irq = config->ibirq;
/* poll so we can detect assertion of ATN */
if (gpib_request_pseudo_irq(board, pc2_interrupt)) {
- pr_err("pc2_gpib: failed to allocate pseudo_irq\n");
+ dev_err(board->gpib_dev, "failed to allocate pseudo_irq\n");
return -1;
}
/* set internal counter register for 8 MHz input clock */
@@ -455,7 +338,7 @@ int pc2_attach(gpib_board_t *board, const gpib_board_config_t *config)
return 0;
}
-void pc2_detach(gpib_board_t *board)
+static void pc2_detach(struct gpib_board *board)
{
struct pc2_priv *pc2_priv = board->private_data;
struct nec7210_priv *nec_priv;
@@ -482,7 +365,7 @@ void pc2_detach(gpib_board_t *board)
free_private(board);
}
-static int pc2a_common_attach(gpib_board_t *board, const gpib_board_config_t *config,
+static int pc2a_common_attach(struct gpib_board *board, const gpib_board_config_t *config,
unsigned int num_registers, enum nec7210_chipset chipset)
{
unsigned int i, j;
@@ -505,18 +388,19 @@ static int pc2a_common_attach(gpib_board_t *board, const gpib_board_config_t *co
case 0x62e1:
break;
default:
- pr_err("PCIIa base range invalid, must be one of 0x[0246]2e1, but is 0x%d\n",
- config->ibbase);
+ dev_err(board->gpib_dev, "PCIIa base range invalid, must be one of 0x[0246]2e1, but is 0x%x\n",
+ config->ibbase);
return -1;
}
if (config->ibirq) {
if (config->ibirq < 2 || config->ibirq > 7) {
- pr_err("pc2_gpib: illegal interrupt level %i\n", config->ibirq);
+ dev_err(board->gpib_dev, "illegal interrupt level %i\n",
+ config->ibirq);
return -1;
}
} else {
- pr_err("pc2_gpib: interrupt disabled, using polling mode (slow)\n");
+ dev_err(board->gpib_dev, "interrupt disabled, using polling mode (slow)\n");
}
#ifdef CHECK_IOPORTS
unsigned int err = 0;
@@ -528,36 +412,36 @@ static int pc2a_common_attach(gpib_board_t *board, const gpib_board_config_t *co
if (config->ibirq && check_region(pc2a_clear_intr_iobase + config->ibirq, 1))
err++;
if (err) {
- pr_err("gpib: ioports are already in use");
- return -1;
+ dev_err(board->gpib_dev, "ioports are already in use");
+ return -EBUSY;
}
#endif
for (i = 0; i < num_registers; i++) {
if (!request_region(config->ibbase +
i * pc2a_reg_offset, 1, "pc2a")) {
- pr_err("gpib: ioports are already in use");
+ dev_err(board->gpib_dev, "ioports are already in use");
for (j = 0; j < i; j++)
release_region(config->ibbase +
j * pc2a_reg_offset, 1);
- return -1;
+ return -EBUSY;
}
}
nec_priv->iobase = config->ibbase;
if (config->ibirq) {
if (!request_region(pc2a_clear_intr_iobase + config->ibirq, 1, "pc2a")) {
- pr_err("gpib: ioports are already in use");
+ dev_err(board->gpib_dev, "ioports are already in use");
return -1;
}
pc2_priv->clear_intr_addr = pc2a_clear_intr_iobase + config->ibirq;
if (request_irq(config->ibirq, pc2a_interrupt, 0, "pc2a", board)) {
- pr_err("gpib: can't request IRQ %d\n", config->ibirq);
- return -1;
+ dev_err(board->gpib_dev, "can't request IRQ %d\n", config->ibirq);
+ return -EBUSY;
}
}
pc2_priv->irq = config->ibirq;
/* poll so we can detect assertion of ATN */
if (gpib_request_pseudo_irq(board, pc2_interrupt)) {
- pr_err("pc2_gpib: failed to allocate pseudo_irq\n");
+ dev_err(board->gpib_dev, "failed to allocate pseudo_irq\n");
return -1;
}
@@ -575,22 +459,22 @@ static int pc2a_common_attach(gpib_board_t *board, const gpib_board_config_t *co
return 0;
}
-int pc2a_attach(gpib_board_t *board, const gpib_board_config_t *config)
+static int pc2a_attach(struct gpib_board *board, const gpib_board_config_t *config)
{
return pc2a_common_attach(board, config, pc2a_iosize, NEC7210);
}
-int pc2a_cb7210_attach(gpib_board_t *board, const gpib_board_config_t *config)
+static int pc2a_cb7210_attach(struct gpib_board *board, const gpib_board_config_t *config)
{
return pc2a_common_attach(board, config, pc2a_iosize, CB7210);
}
-int pc2_2a_attach(gpib_board_t *board, const gpib_board_config_t *config)
+static int pc2_2a_attach(struct gpib_board *board, const gpib_board_config_t *config)
{
return pc2a_common_attach(board, config, pc2_2a_iosize, NAT4882);
}
-static void pc2a_common_detach(gpib_board_t *board, unsigned int num_registers)
+static void pc2a_common_detach(struct gpib_board *board, unsigned int num_registers)
{
int i;
struct pc2_priv *pc2_priv = board->private_data;
@@ -623,41 +507,153 @@ static void pc2a_common_detach(gpib_board_t *board, unsigned int num_registers)
free_private(board);
}
-void pc2a_detach(gpib_board_t *board)
+static void pc2a_detach(struct gpib_board *board)
{
pc2a_common_detach(board, pc2a_iosize);
}
-void pc2_2a_detach(gpib_board_t *board)
+static void pc2_2a_detach(struct gpib_board *board)
{
pc2a_common_detach(board, pc2_2a_iosize);
}
+static gpib_interface_t pc2_interface = {
+ .name = "pcII",
+ .attach = pc2_attach,
+ .detach = pc2_detach,
+ .read = pc2_read,
+ .write = pc2_write,
+ .command = pc2_command,
+ .take_control = pc2_take_control,
+ .go_to_standby = pc2_go_to_standby,
+ .request_system_control = pc2_request_system_control,
+ .interface_clear = pc2_interface_clear,
+ .remote_enable = pc2_remote_enable,
+ .enable_eos = pc2_enable_eos,
+ .disable_eos = pc2_disable_eos,
+ .parallel_poll = pc2_parallel_poll,
+ .parallel_poll_configure = pc2_parallel_poll_configure,
+ .parallel_poll_response = pc2_parallel_poll_response,
+ .local_parallel_poll_mode = NULL, // XXX
+ .line_status = NULL,
+ .update_status = pc2_update_status,
+ .primary_address = pc2_primary_address,
+ .secondary_address = pc2_secondary_address,
+ .serial_poll_response = pc2_serial_poll_response,
+ .serial_poll_status = pc2_serial_poll_status,
+ .t1_delay = pc2_t1_delay,
+ .return_to_local = pc2_return_to_local,
+};
+
+static gpib_interface_t pc2a_interface = {
+ .name = "pcIIa",
+ .attach = pc2a_attach,
+ .detach = pc2a_detach,
+ .read = pc2_read,
+ .write = pc2_write,
+ .command = pc2_command,
+ .take_control = pc2_take_control,
+ .go_to_standby = pc2_go_to_standby,
+ .request_system_control = pc2_request_system_control,
+ .interface_clear = pc2_interface_clear,
+ .remote_enable = pc2_remote_enable,
+ .enable_eos = pc2_enable_eos,
+ .disable_eos = pc2_disable_eos,
+ .parallel_poll = pc2_parallel_poll,
+ .parallel_poll_configure = pc2_parallel_poll_configure,
+ .parallel_poll_response = pc2_parallel_poll_response,
+ .local_parallel_poll_mode = NULL, // XXX
+ .line_status = NULL,
+ .update_status = pc2_update_status,
+ .primary_address = pc2_primary_address,
+ .secondary_address = pc2_secondary_address,
+ .serial_poll_response = pc2_serial_poll_response,
+ .serial_poll_status = pc2_serial_poll_status,
+ .t1_delay = pc2_t1_delay,
+ .return_to_local = pc2_return_to_local,
+};
+
+static gpib_interface_t pc2a_cb7210_interface = {
+ .name = "pcIIa_cb7210",
+ .attach = pc2a_cb7210_attach,
+ .detach = pc2a_detach,
+ .read = pc2_read,
+ .write = pc2_write,
+ .command = pc2_command,
+ .take_control = pc2_take_control,
+ .go_to_standby = pc2_go_to_standby,
+ .request_system_control = pc2_request_system_control,
+ .interface_clear = pc2_interface_clear,
+ .remote_enable = pc2_remote_enable,
+ .enable_eos = pc2_enable_eos,
+ .disable_eos = pc2_disable_eos,
+ .parallel_poll = pc2_parallel_poll,
+ .parallel_poll_configure = pc2_parallel_poll_configure,
+ .parallel_poll_response = pc2_parallel_poll_response,
+ .local_parallel_poll_mode = NULL, // XXX
+ .line_status = NULL, //XXX
+ .update_status = pc2_update_status,
+ .primary_address = pc2_primary_address,
+ .secondary_address = pc2_secondary_address,
+ .serial_poll_response = pc2_serial_poll_response,
+ .serial_poll_status = pc2_serial_poll_status,
+ .t1_delay = pc2_t1_delay,
+ .return_to_local = pc2_return_to_local,
+};
+
+static gpib_interface_t pc2_2a_interface = {
+ .name = "pcII_IIa",
+ .attach = pc2_2a_attach,
+ .detach = pc2_2a_detach,
+ .read = pc2_read,
+ .write = pc2_write,
+ .command = pc2_command,
+ .take_control = pc2_take_control,
+ .go_to_standby = pc2_go_to_standby,
+ .request_system_control = pc2_request_system_control,
+ .interface_clear = pc2_interface_clear,
+ .remote_enable = pc2_remote_enable,
+ .enable_eos = pc2_enable_eos,
+ .disable_eos = pc2_disable_eos,
+ .parallel_poll = pc2_parallel_poll,
+ .parallel_poll_configure = pc2_parallel_poll_configure,
+ .parallel_poll_response = pc2_parallel_poll_response,
+ .local_parallel_poll_mode = NULL, // XXX
+ .line_status = NULL,
+ .update_status = pc2_update_status,
+ .primary_address = pc2_primary_address,
+ .secondary_address = pc2_secondary_address,
+ .serial_poll_response = pc2_serial_poll_response,
+ .serial_poll_status = pc2_serial_poll_status,
+ .t1_delay = pc2_t1_delay,
+ .return_to_local = pc2_return_to_local,
+};
+
static int __init pc2_init_module(void)
{
int ret;
ret = gpib_register_driver(&pc2_interface, THIS_MODULE);
if (ret) {
- pr_err("pc2_gpib: gpib_register_driver failed: error = %d\n", ret);
+ pr_err("gpib_register_driver failed: error = %d\n", ret);
return ret;
}
ret = gpib_register_driver(&pc2a_interface, THIS_MODULE);
if (ret) {
- pr_err("pc2_gpib: gpib_register_driver failed: error = %d\n", ret);
+ pr_err("gpib_register_driver failed: error = %d\n", ret);
goto err_pc2a;
}
ret = gpib_register_driver(&pc2a_cb7210_interface, THIS_MODULE);
if (ret) {
- pr_err("pc2_gpib: gpib_register_driver failed: error = %d\n", ret);
+ pr_err("gpib_register_driver failed: error = %d\n", ret);
goto err_cb7210;
}
ret = gpib_register_driver(&pc2_2a_interface, THIS_MODULE);
if (ret) {
- pr_err("pc2_gpib: gpib_register_driver failed: error = %d\n", ret);
+ pr_err("gpib_register_driver failed: error = %d\n", ret);
goto err_pc2_2a;
}
diff --git a/drivers/staging/gpib/tms9914/tms9914.c b/drivers/staging/gpib/tms9914/tms9914.c
index ec8e1d4d762f..2abda9d7dfcb 100644
--- a/drivers/staging/gpib/tms9914/tms9914.c
+++ b/drivers/staging/gpib/tms9914/tms9914.c
@@ -4,6 +4,9 @@
* copyright : (C) 2001, 2002 by Frank Mori Hess
***************************************************************************/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#define dev_fmt pr_fmt
+
#include <linux/ioport.h>
#include <linux/sched.h>
#include <linux/module.h>
@@ -24,9 +27,9 @@
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("GPIB library for tms9914");
-static unsigned int update_status_nolock(gpib_board_t *board, struct tms9914_priv *priv);
+static unsigned int update_status_nolock(struct gpib_board *board, struct tms9914_priv *priv);
-int tms9914_take_control(gpib_board_t *board, struct tms9914_priv *priv, int synchronous)
+int tms9914_take_control(struct gpib_board *board, struct tms9914_priv *priv, int synchronous)
{
int i;
const int timeout = 100;
@@ -63,7 +66,7 @@ EXPORT_SYMBOL_GPL(tms9914_take_control);
* The rest of the tms9914 based drivers still use tms9914_take_control
* directly (which does issue tcs).
*/
-int tms9914_take_control_workaround(gpib_board_t *board, struct tms9914_priv *priv, int synchronous)
+int tms9914_take_control_workaround(struct gpib_board *board, struct tms9914_priv *priv, int synchronous)
{
if (synchronous)
return -ETIMEDOUT;
@@ -71,7 +74,7 @@ int tms9914_take_control_workaround(gpib_board_t *board, struct tms9914_priv *pr
}
EXPORT_SYMBOL_GPL(tms9914_take_control_workaround);
-int tms9914_go_to_standby(gpib_board_t *board, struct tms9914_priv *priv)
+int tms9914_go_to_standby(struct gpib_board *board, struct tms9914_priv *priv)
{
int i;
const int timeout = 1000;
@@ -83,10 +86,8 @@ int tms9914_go_to_standby(gpib_board_t *board, struct tms9914_priv *priv)
break;
udelay(1);
}
- if (i == timeout) {
- pr_err("error waiting for NATN\n");
+ if (i == timeout)
return -ETIMEDOUT;
- }
clear_bit(COMMAND_READY_BN, &priv->state);
@@ -94,7 +95,7 @@ int tms9914_go_to_standby(gpib_board_t *board, struct tms9914_priv *priv)
}
EXPORT_SYMBOL_GPL(tms9914_go_to_standby);
-void tms9914_interface_clear(gpib_board_t *board, struct tms9914_priv *priv, int assert)
+void tms9914_interface_clear(struct gpib_board *board, struct tms9914_priv *priv, int assert)
{
if (assert) {
write_byte(priv, AUX_SIC | AUX_CS, AUXCR);
@@ -106,7 +107,7 @@ void tms9914_interface_clear(gpib_board_t *board, struct tms9914_priv *priv, int
}
EXPORT_SYMBOL_GPL(tms9914_interface_clear);
-void tms9914_remote_enable(gpib_board_t *board, struct tms9914_priv *priv, int enable)
+void tms9914_remote_enable(struct gpib_board *board, struct tms9914_priv *priv, int enable)
{
if (enable)
write_byte(priv, AUX_SRE | AUX_CS, AUXCR);
@@ -115,7 +116,7 @@ void tms9914_remote_enable(gpib_board_t *board, struct tms9914_priv *priv, int e
}
EXPORT_SYMBOL_GPL(tms9914_remote_enable);
-void tms9914_request_system_control(gpib_board_t *board, struct tms9914_priv *priv,
+void tms9914_request_system_control(struct gpib_board *board, struct tms9914_priv *priv,
int request_control)
{
if (request_control) {
@@ -127,7 +128,7 @@ void tms9914_request_system_control(gpib_board_t *board, struct tms9914_priv *pr
}
EXPORT_SYMBOL_GPL(tms9914_request_system_control);
-unsigned int tms9914_t1_delay(gpib_board_t *board, struct tms9914_priv *priv,
+unsigned int tms9914_t1_delay(struct gpib_board *board, struct tms9914_priv *priv,
unsigned int nano_sec)
{
static const int clock_period = 200; // assuming 5Mhz input clock
@@ -153,7 +154,7 @@ unsigned int tms9914_t1_delay(gpib_board_t *board, struct tms9914_priv *priv,
}
EXPORT_SYMBOL_GPL(tms9914_t1_delay);
-void tms9914_return_to_local(const gpib_board_t *board, struct tms9914_priv *priv)
+void tms9914_return_to_local(const struct gpib_board *board, struct tms9914_priv *priv)
{
write_byte(priv, AUX_RTL, AUXCR);
}
@@ -175,7 +176,7 @@ void tms9914_set_holdoff_mode(struct tms9914_priv *priv, enum tms9914_holdoff_mo
write_byte(priv, AUX_HLDA | AUX_CS, AUXCR);
break;
default:
- pr_err("%s: bug! bad holdoff mode %i\n", __func__, mode);
+ pr_err("bug! bad holdoff mode %i\n", mode);
break;
}
priv->holdoff_mode = mode;
@@ -191,7 +192,7 @@ void tms9914_release_holdoff(struct tms9914_priv *priv)
}
EXPORT_SYMBOL_GPL(tms9914_release_holdoff);
-int tms9914_enable_eos(gpib_board_t *board, struct tms9914_priv *priv, uint8_t eos_byte,
+int tms9914_enable_eos(struct gpib_board *board, struct tms9914_priv *priv, uint8_t eos_byte,
int compare_8_bits)
{
priv->eos = eos_byte;
@@ -202,13 +203,13 @@ int tms9914_enable_eos(gpib_board_t *board, struct tms9914_priv *priv, uint8_t e
}
EXPORT_SYMBOL(tms9914_enable_eos);
-void tms9914_disable_eos(gpib_board_t *board, struct tms9914_priv *priv)
+void tms9914_disable_eos(struct gpib_board *board, struct tms9914_priv *priv)
{
priv->eos_flags &= ~REOS;
}
EXPORT_SYMBOL(tms9914_disable_eos);
-int tms9914_parallel_poll(gpib_board_t *board, struct tms9914_priv *priv, uint8_t *result)
+int tms9914_parallel_poll(struct gpib_board *board, struct tms9914_priv *priv, uint8_t *result)
{
// execute parallel poll
write_byte(priv, AUX_CS | AUX_RPP, AUXCR);
@@ -233,7 +234,7 @@ static void set_ppoll_reg(struct tms9914_priv *priv, int enable,
}
}
-void tms9914_parallel_poll_configure(gpib_board_t *board,
+void tms9914_parallel_poll_configure(struct gpib_board *board,
struct tms9914_priv *priv, uint8_t config)
{
priv->ppoll_enable = (config & PPC_DISABLE) == 0;
@@ -243,14 +244,14 @@ void tms9914_parallel_poll_configure(gpib_board_t *board,
}
EXPORT_SYMBOL(tms9914_parallel_poll_configure);
-void tms9914_parallel_poll_response(gpib_board_t *board,
+void tms9914_parallel_poll_response(struct gpib_board *board,
struct tms9914_priv *priv, int ist)
{
set_ppoll_reg(priv, priv->ppoll_enable, priv->ppoll_line, priv->ppoll_sense, ist);
}
EXPORT_SYMBOL(tms9914_parallel_poll_response);
-void tms9914_serial_poll_response(gpib_board_t *board, struct tms9914_priv *priv, uint8_t status)
+void tms9914_serial_poll_response(struct gpib_board *board, struct tms9914_priv *priv, uint8_t status)
{
unsigned long flags;
@@ -265,7 +266,7 @@ void tms9914_serial_poll_response(gpib_board_t *board, struct tms9914_priv *priv
}
EXPORT_SYMBOL(tms9914_serial_poll_response);
-uint8_t tms9914_serial_poll_status(gpib_board_t *board, struct tms9914_priv *priv)
+uint8_t tms9914_serial_poll_status(struct gpib_board *board, struct tms9914_priv *priv)
{
u8 status;
unsigned long flags;
@@ -278,7 +279,7 @@ uint8_t tms9914_serial_poll_status(gpib_board_t *board, struct tms9914_priv *pri
}
EXPORT_SYMBOL(tms9914_serial_poll_status);
-int tms9914_primary_address(gpib_board_t *board, struct tms9914_priv *priv, unsigned int address)
+int tms9914_primary_address(struct gpib_board *board, struct tms9914_priv *priv, unsigned int address)
{
// put primary address in address0
write_byte(priv, address & ADDRESS_MASK, ADR);
@@ -286,7 +287,7 @@ int tms9914_primary_address(gpib_board_t *board, struct tms9914_priv *priv, unsi
}
EXPORT_SYMBOL(tms9914_primary_address);
-int tms9914_secondary_address(gpib_board_t *board, struct tms9914_priv *priv,
+int tms9914_secondary_address(struct gpib_board *board, struct tms9914_priv *priv,
unsigned int address, int enable)
{
if (enable)
@@ -299,7 +300,7 @@ int tms9914_secondary_address(gpib_board_t *board, struct tms9914_priv *priv,
}
EXPORT_SYMBOL(tms9914_secondary_address);
-unsigned int tms9914_update_status(gpib_board_t *board, struct tms9914_priv *priv,
+unsigned int tms9914_update_status(struct gpib_board *board, struct tms9914_priv *priv,
unsigned int clear_mask)
{
unsigned long flags;
@@ -341,7 +342,7 @@ static void update_listener_state(struct tms9914_priv *priv, unsigned int addres
}
}
-static unsigned int update_status_nolock(gpib_board_t *board, struct tms9914_priv *priv)
+static unsigned int update_status_nolock(struct gpib_board *board, struct tms9914_priv *priv)
{
int address_status;
int bsr_bits;
@@ -387,29 +388,29 @@ static unsigned int update_status_nolock(gpib_board_t *board, struct tms9914_pri
return board->status;
}
-int tms9914_line_status(const gpib_board_t *board, struct tms9914_priv *priv)
+int tms9914_line_status(const struct gpib_board *board, struct tms9914_priv *priv)
{
int bsr_bits;
- int status = ValidALL;
+ int status = VALID_ALL;
bsr_bits = read_byte(priv, BSR);
if (bsr_bits & BSR_REN_BIT)
- status |= BusREN;
+ status |= BUS_REN;
if (bsr_bits & BSR_IFC_BIT)
- status |= BusIFC;
+ status |= BUS_IFC;
if (bsr_bits & BSR_SRQ_BIT)
- status |= BusSRQ;
+ status |= BUS_SRQ;
if (bsr_bits & BSR_EOI_BIT)
- status |= BusEOI;
+ status |= BUS_EOI;
if (bsr_bits & BSR_NRFD_BIT)
- status |= BusNRFD;
+ status |= BUS_NRFD;
if (bsr_bits & BSR_NDAC_BIT)
- status |= BusNDAC;
+ status |= BUS_NDAC;
if (bsr_bits & BSR_DAV_BIT)
- status |= BusDAV;
+ status |= BUS_DAV;
if (bsr_bits & BSR_ATN_BIT)
- status |= BusATN;
+ status |= BUS_ATN;
return status;
}
@@ -432,15 +433,14 @@ static int check_for_eos(struct tms9914_priv *priv, uint8_t byte)
return 0;
}
-static int wait_for_read_byte(gpib_board_t *board, struct tms9914_priv *priv)
+static int wait_for_read_byte(struct gpib_board *board, struct tms9914_priv *priv)
{
if (wait_event_interruptible(board->wait,
test_bit(READ_READY_BN, &priv->state) ||
test_bit(DEV_CLEAR_BN, &priv->state) ||
- test_bit(TIMO_NUM, &board->status))) {
- pr_debug("gpib: pio read wait interrupted\n");
+ test_bit(TIMO_NUM, &board->status)))
return -ERESTARTSYS;
- }
+
if (test_bit(TIMO_NUM, &board->status))
return -ETIMEDOUT;
@@ -449,7 +449,7 @@ static int wait_for_read_byte(gpib_board_t *board, struct tms9914_priv *priv)
return 0;
}
-static inline uint8_t tms9914_read_data_in(gpib_board_t *board, struct tms9914_priv *priv, int *end)
+static inline uint8_t tms9914_read_data_in(struct gpib_board *board, struct tms9914_priv *priv, int *end)
{
unsigned long flags;
u8 data;
@@ -472,7 +472,7 @@ static inline uint8_t tms9914_read_data_in(gpib_board_t *board, struct tms9914_p
case TMS9914_HOLDOFF_NONE:
break;
default:
- pr_err("%s: bug! bad holdoff mode %i\n", __func__, priv->holdoff_mode);
+ dev_err(board->gpib_dev, "bug! bad holdoff mode %i\n", priv->holdoff_mode);
break;
}
spin_unlock_irqrestore(&board->spinlock, flags);
@@ -480,7 +480,7 @@ static inline uint8_t tms9914_read_data_in(gpib_board_t *board, struct tms9914_p
return data;
}
-static int pio_read(gpib_board_t *board, struct tms9914_priv *priv, uint8_t *buffer,
+static int pio_read(struct gpib_board *board, struct tms9914_priv *priv, uint8_t *buffer,
size_t length, int *end, size_t *bytes_read)
{
ssize_t retval = 0;
@@ -501,7 +501,7 @@ static int pio_read(gpib_board_t *board, struct tms9914_priv *priv, uint8_t *buf
return retval;
}
-int tms9914_read(gpib_board_t *board, struct tms9914_priv *priv, uint8_t *buffer,
+int tms9914_read(struct gpib_board *board, struct tms9914_priv *priv, uint8_t *buffer,
size_t length, int *end, size_t *bytes_read)
{
ssize_t retval = 0;
@@ -541,17 +541,16 @@ int tms9914_read(gpib_board_t *board, struct tms9914_priv *priv, uint8_t *buffer
}
EXPORT_SYMBOL(tms9914_read);
-static int pio_write_wait(gpib_board_t *board, struct tms9914_priv *priv)
+static int pio_write_wait(struct gpib_board *board, struct tms9914_priv *priv)
{
// wait until next byte is ready to be sent
if (wait_event_interruptible(board->wait,
test_bit(WRITE_READY_BN, &priv->state) ||
test_bit(BUS_ERROR_BN, &priv->state) ||
test_bit(DEV_CLEAR_BN, &priv->state) ||
- test_bit(TIMO_NUM, &board->status))) {
- dev_dbg(board->gpib_dev, "gpib write interrupted!\n");
+ test_bit(TIMO_NUM, &board->status)))
return -ERESTARTSYS;
- }
+
if (test_bit(TIMO_NUM, &board->status))
return -ETIMEDOUT;
if (test_bit(BUS_ERROR_BN, &priv->state))
@@ -562,7 +561,7 @@ static int pio_write_wait(gpib_board_t *board, struct tms9914_priv *priv)
return 0;
}
-static int pio_write(gpib_board_t *board, struct tms9914_priv *priv, uint8_t *buffer,
+static int pio_write(struct gpib_board *board, struct tms9914_priv *priv, uint8_t *buffer,
size_t length, size_t *bytes_written)
{
ssize_t retval = 0;
@@ -586,7 +585,7 @@ static int pio_write(gpib_board_t *board, struct tms9914_priv *priv, uint8_t *bu
return length;
}
-int tms9914_write(gpib_board_t *board, struct tms9914_priv *priv, uint8_t *buffer, size_t length,
+int tms9914_write(struct gpib_board *board, struct tms9914_priv *priv, uint8_t *buffer, size_t length,
int send_eoi, size_t *bytes_written)
{
ssize_t retval = 0;
@@ -621,7 +620,7 @@ int tms9914_write(gpib_board_t *board, struct tms9914_priv *priv, uint8_t *buffe
}
EXPORT_SYMBOL(tms9914_write);
-static void check_my_address_state(gpib_board_t *board, struct tms9914_priv *priv, int cmd_byte)
+static void check_my_address_state(struct gpib_board *board, struct tms9914_priv *priv, int cmd_byte)
{
if (cmd_byte == MLA(board->pad)) {
priv->primary_listen_addressed = 1;
@@ -656,7 +655,7 @@ static void check_my_address_state(gpib_board_t *board, struct tms9914_priv *pri
}
}
-int tms9914_command(gpib_board_t *board, struct tms9914_priv *priv, uint8_t *buffer,
+int tms9914_command(struct gpib_board *board, struct tms9914_priv *priv, uint8_t *buffer,
size_t length, size_t *bytes_written)
{
int retval = 0;
@@ -667,10 +666,8 @@ int tms9914_command(gpib_board_t *board, struct tms9914_priv *priv, uint8_t *bu
if (wait_event_interruptible(board->wait,
test_bit(COMMAND_READY_BN,
&priv->state) ||
- test_bit(TIMO_NUM, &board->status))) {
- pr_debug("gpib command wait interrupted\n");
+ test_bit(TIMO_NUM, &board->status)))
break;
- }
if (test_bit(TIMO_NUM, &board->status))
break;
@@ -695,7 +692,7 @@ int tms9914_command(gpib_board_t *board, struct tms9914_priv *priv, uint8_t *bu
}
EXPORT_SYMBOL(tms9914_command);
-irqreturn_t tms9914_interrupt(gpib_board_t *board, struct tms9914_priv *priv)
+irqreturn_t tms9914_interrupt(struct gpib_board *board, struct tms9914_priv *priv)
{
int status0, status1;
@@ -706,7 +703,7 @@ irqreturn_t tms9914_interrupt(gpib_board_t *board, struct tms9914_priv *priv)
}
EXPORT_SYMBOL(tms9914_interrupt);
-irqreturn_t tms9914_interrupt_have_status(gpib_board_t *board, struct tms9914_priv *priv,
+irqreturn_t tms9914_interrupt_have_status(struct gpib_board *board, struct tms9914_priv *priv,
int status0, int status1)
{
// record reception of END
@@ -761,8 +758,6 @@ irqreturn_t tms9914_interrupt_have_status(gpib_board_t *board, struct tms9914_pr
write_byte(priv, AUX_INVAL, AUXCR);
}
} else {
- // printk("tms9914: unrecognized gpib command pass thru 0x%x\n",
- // command_byte);
// clear dac holdoff
write_byte(priv, AUX_INVAL, AUXCR);
}
@@ -799,7 +794,7 @@ irqreturn_t tms9914_interrupt_have_status(gpib_board_t *board, struct tms9914_pr
// check for being addressed with secondary addressing
if (status1 & HR_APT) {
if (board->sad < 0)
- pr_err("tms9914: bug, APT interrupt without secondary addressing?\n");
+ dev_err(board->gpib_dev, "bug, APT interrupt without secondary addressing?\n");
if ((read_byte(priv, CPTR) & gpib_command_mask) == MSA(board->sad))
write_byte(priv, AUX_VAL, AUXCR);
else
@@ -807,8 +802,8 @@ irqreturn_t tms9914_interrupt_have_status(gpib_board_t *board, struct tms9914_pr
}
if ((status0 & priv->imr0_bits) || (status1 & priv->imr1_bits)) {
-// dev_dbg(board->gpib_dev, "isr0 0x%x, imr0 0x%x, isr1 0x%x, imr1 0x%x\n",
-// status0, priv->imr0_bits, status1, priv->imr1_bits);
+ dev_dbg(board->gpib_dev, "isr0 0x%x, imr0 0x%x, isr1 0x%x, imr1 0x%x\n",
+ status0, priv->imr0_bits, status1, priv->imr1_bits);
update_status_nolock(board, priv);
wake_up_interruptible(&board->wait);
}
@@ -842,7 +837,7 @@ void tms9914_board_reset(struct tms9914_priv *priv)
}
EXPORT_SYMBOL_GPL(tms9914_board_reset);
-void tms9914_online(gpib_board_t *board, struct tms9914_priv *priv)
+void tms9914_online(struct gpib_board *board, struct tms9914_priv *priv)
{
/* set GPIB address */
tms9914_primary_address(board, priv, board->pad);
diff --git a/drivers/staging/gpib/tnt4882/Makefile b/drivers/staging/gpib/tnt4882/Makefile
index a3c3fb96d5ed..fa1687ad0d1b 100644
--- a/drivers/staging/gpib/tnt4882/Makefile
+++ b/drivers/staging/gpib/tnt4882/Makefile
@@ -1,4 +1,3 @@
-ccflags-$(CONFIG_GPIB_PCMCIA) := -DGPIB_PCMCIA
obj-$(CONFIG_GPIB_NI_PCI_ISA) += tnt4882.o
tnt4882-objs := tnt4882_gpib.o mite.o
diff --git a/drivers/staging/gpib/tnt4882/mite.c b/drivers/staging/gpib/tnt4882/mite.c
index ea64dde46bcb..847b96f411bd 100644
--- a/drivers/staging/gpib/tnt4882/mite.c
+++ b/drivers/staging/gpib/tnt4882/mite.c
@@ -88,7 +88,6 @@ int mite_setup(struct mite_struct *mite)
pr_err("mite: failed to remap mite io memory address.\n");
return -ENOMEM;
}
- pr_info("mite: 0x%08lx mapped to %p\n", mite->mite_phys_addr, mite->mite_io_addr);
addr = pci_resource_start(mite->pcidev, 1);
mite->daq_phys_addr = addr;
mite->daq_io_addr = ioremap(mite->daq_phys_addr, pci_resource_len(mite->pcidev, 1));
@@ -96,7 +95,6 @@ int mite_setup(struct mite_struct *mite)
pr_err("mite: failed to remap daq io memory address.\n");
return -ENOMEM;
}
- pr_info("mite: daq: 0x%08lx mapped to %p\n", mite->daq_phys_addr, mite->daq_io_addr);
writel(mite->daq_phys_addr | WENAB, mite->mite_io_addr + MITE_IODWBSR);
mite->used = 1;
return 0;
@@ -133,18 +131,3 @@ void mite_unsetup(struct mite_struct *mite)
}
mite->used = 0;
}
-
-void mite_list_devices(void)
-{
- struct mite_struct *mite, *next;
-
- pr_info("Available NI PCI device IDs:");
- if (mite_devices)
- for (mite = mite_devices; mite; mite = next) {
- next = mite->next;
- pr_info(" 0x%04x", mite_device_id(mite));
- if (mite->used)
- pr_info("(used)");
- }
- pr_info("\n");
-}
diff --git a/drivers/staging/gpib/tnt4882/tnt4882_gpib.c b/drivers/staging/gpib/tnt4882/tnt4882_gpib.c
index b39ab2abe495..c35b084b6fd0 100644
--- a/drivers/staging/gpib/tnt4882/tnt4882_gpib.c
+++ b/drivers/staging/gpib/tnt4882/tnt4882_gpib.c
@@ -5,6 +5,10 @@
* copyright : (C) 2001, 2002 by Frank Mori Hess
***************************************************************************/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#define dev_fmt pr_fmt
+#define DRV_NAME KBUILD_MODNAME
+
#include <linux/ioport.h>
#include <linux/sched.h>
#include <linux/module.h>
@@ -47,49 +51,7 @@ struct tnt4882_priv {
unsigned short auxg_bits; // bits written to auxiliary register G
};
-// interface functions
-static int tnt4882_read(gpib_board_t *board, uint8_t *buffer, size_t length,
- int *end, size_t *bytes_read);
-static int tnt4882_accel_read(gpib_board_t *board, uint8_t *buffer, size_t length,
- int *end, size_t *bytes_read);
-static int tnt4882_write(gpib_board_t *board, uint8_t *buffer, size_t length,
- int send_eoi, size_t *bytes_written);
-static int tnt4882_accel_write(gpib_board_t *board, uint8_t *buffer, size_t length,
- int send_eoi, size_t *bytes_written);
-static int tnt4882_command(gpib_board_t *board, uint8_t *buffer, size_t length,
- size_t *bytes_written);
-static int tnt4882_command_unaccel(gpib_board_t *board, uint8_t *buffer,
- size_t length, size_t *bytes_written);
-static int tnt4882_take_control(gpib_board_t *board, int synchronous);
-static int tnt4882_go_to_standby(gpib_board_t *board);
-static void tnt4882_request_system_control(gpib_board_t *board, int request_control);
-static void tnt4882_interface_clear(gpib_board_t *board, int assert);
-static void tnt4882_remote_enable(gpib_board_t *board, int enable);
-static int tnt4882_enable_eos(gpib_board_t *board, uint8_t eos_byte, int
- compare_8_bits);
-static void tnt4882_disable_eos(gpib_board_t *board);
-static unsigned int tnt4882_update_status(gpib_board_t *board, unsigned int clear_mask);
-static int tnt4882_primary_address(gpib_board_t *board, unsigned int address);
-static int tnt4882_secondary_address(gpib_board_t *board, unsigned int address,
- int enable);
-static int tnt4882_parallel_poll(gpib_board_t *board, uint8_t *result);
-static void tnt4882_parallel_poll_configure(gpib_board_t *board, uint8_t config);
-static void tnt4882_parallel_poll_response(gpib_board_t *board, int ist);
-static void tnt4882_serial_poll_response(gpib_board_t *board, uint8_t status);
-static uint8_t tnt4882_serial_poll_status(gpib_board_t *board);
-static int tnt4882_line_status(const gpib_board_t *board);
-static unsigned int tnt4882_t1_delay(gpib_board_t *board, unsigned int nano_sec);
-static void tnt4882_return_to_local(gpib_board_t *board);
-
-// interrupt service routines
-static irqreturn_t tnt4882_internal_interrupt(gpib_board_t *board);
-static irqreturn_t tnt4882_interrupt(int irq, void *arg);
-
-// utility functions
-static int tnt4882_allocate_private(gpib_board_t *board);
-static void tnt4882_free_private(gpib_board_t *board);
-static void tnt4882_init(struct tnt4882_priv *tnt_priv, const gpib_board_t *board);
-static void tnt4882_board_reset(struct tnt4882_priv *tnt_priv, gpib_board_t *board);
+static irqreturn_t tnt4882_internal_interrupt(struct gpib_board *board);
// register offset for nec7210 compatible registers
static const int atgpib_reg_offset = 2;
@@ -139,7 +101,6 @@ static inline unsigned short tnt_readb(struct tnt4882_priv *priv, unsigned long
retval = 0;
break;
default:
- pr_err("tnt4882: bug! unsupported ni_chipset\n");
retval = 0;
break;
}
@@ -174,7 +135,6 @@ static inline void tnt_writeb(struct tnt4882_priv *priv, unsigned short value, u
case NEC7210:
break;
default:
- pr_err("tnt4882: bug! unsupported ni_chipset\n");
break;
}
break;
@@ -188,9 +148,9 @@ static inline void tnt_writeb(struct tnt4882_priv *priv, unsigned short value, u
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("GPIB driver for National Instruments boards using tnt4882 or compatible chips");
-int tnt4882_line_status(const gpib_board_t *board)
+static int tnt4882_line_status(const struct gpib_board *board)
{
- int status = ValidALL;
+ int status = VALID_ALL;
int bcsr_bits;
struct tnt4882_priv *tnt_priv;
@@ -199,26 +159,26 @@ int tnt4882_line_status(const gpib_board_t *board)
bcsr_bits = tnt_readb(tnt_priv, BSR);
if (bcsr_bits & BCSR_REN_BIT)
- status |= BusREN;
+ status |= BUS_REN;
if (bcsr_bits & BCSR_IFC_BIT)
- status |= BusIFC;
+ status |= BUS_IFC;
if (bcsr_bits & BCSR_SRQ_BIT)
- status |= BusSRQ;
+ status |= BUS_SRQ;
if (bcsr_bits & BCSR_EOI_BIT)
- status |= BusEOI;
+ status |= BUS_EOI;
if (bcsr_bits & BCSR_NRFD_BIT)
- status |= BusNRFD;
+ status |= BUS_NRFD;
if (bcsr_bits & BCSR_NDAC_BIT)
- status |= BusNDAC;
+ status |= BUS_NDAC;
if (bcsr_bits & BCSR_DAV_BIT)
- status |= BusDAV;
+ status |= BUS_DAV;
if (bcsr_bits & BCSR_ATN_BIT)
- status |= BusATN;
+ status |= BUS_ATN;
return status;
}
-unsigned int tnt4882_t1_delay(gpib_board_t *board, unsigned int nano_sec)
+static int tnt4882_t1_delay(struct gpib_board *board, unsigned int nano_sec)
{
struct tnt4882_priv *tnt_priv = board->private_data;
struct nec7210_priv *nec_priv = &tnt_priv->nec7210_priv;
@@ -291,7 +251,7 @@ static int drain_fifo_words(struct tnt4882_priv *tnt_priv, uint8_t *buffer, int
return count;
}
-static void tnt4882_release_holdoff(gpib_board_t *board, struct tnt4882_priv *tnt_priv)
+static void tnt4882_release_holdoff(struct gpib_board *board, struct tnt4882_priv *tnt_priv)
{
struct nec7210_priv *nec_priv = &tnt_priv->nec7210_priv;
unsigned short sasr_bits;
@@ -314,8 +274,8 @@ static void tnt4882_release_holdoff(gpib_board_t *board, struct tnt4882_priv *tn
}
}
-int tnt4882_accel_read(gpib_board_t *board, uint8_t *buffer, size_t length, int *end,
- size_t *bytes_read)
+static int tnt4882_accel_read(struct gpib_board *board, uint8_t *buffer, size_t length, int *end,
+ size_t *bytes_read)
{
size_t count = 0;
ssize_t retval = 0;
@@ -368,22 +328,18 @@ int tnt4882_accel_read(gpib_board_t *board, uint8_t *buffer, size_t length, int
test_bit(DEV_CLEAR_BN, &nec_priv->state) ||
test_bit(ADR_CHANGE_BN, &nec_priv->state) ||
test_bit(TIMO_NUM, &board->status))) {
- pr_err("tnt4882: read interrupted\n");
retval = -ERESTARTSYS;
break;
}
if (test_bit(TIMO_NUM, &board->status)) {
- //pr_info("tnt4882: minor %i read timed out\n", board->minor);
retval = -ETIMEDOUT;
break;
}
if (test_bit(DEV_CLEAR_BN, &nec_priv->state)) {
- pr_err("tnt4882: device clear interrupted read\n");
retval = -EINTR;
break;
}
if (test_bit(ADR_CHANGE_BN, &nec_priv->state)) {
- pr_err("tnt4882: address change interrupted read\n");
retval = -EINTR;
break;
}
@@ -410,20 +366,14 @@ int tnt4882_accel_read(gpib_board_t *board, uint8_t *buffer, size_t length, int
test_bit(DEV_CLEAR_BN, &nec_priv->state) ||
test_bit(ADR_CHANGE_BN, &nec_priv->state) ||
test_bit(TIMO_NUM, &board->status))) {
- pr_err("tnt4882: read interrupted\n");
retval = -ERESTARTSYS;
}
if (test_bit(TIMO_NUM, &board->status))
- //pr_info("tnt4882: read timed out\n");
retval = -ETIMEDOUT;
- if (test_bit(DEV_CLEAR_BN, &nec_priv->state)) {
- pr_err("tnt4882: device clear interrupted read\n");
+ if (test_bit(DEV_CLEAR_BN, &nec_priv->state))
retval = -EINTR;
- }
- if (test_bit(ADR_CHANGE_BN, &nec_priv->state)) {
- pr_err("tnt4882: address change interrupted read\n");
+ if (test_bit(ADR_CHANGE_BN, &nec_priv->state))
retval = -EINTR;
- }
count += drain_fifo_words(tnt_priv, &buffer[count], length - count);
if (fifo_byte_available(tnt_priv) && count < length)
buffer[count++] = tnt_readb(tnt_priv, FIFOB);
@@ -476,7 +426,7 @@ static unsigned int tnt_transfer_count(struct tnt4882_priv *tnt_priv)
return -count;
};
-static int write_wait(gpib_board_t *board, struct tnt4882_priv *tnt_priv,
+static int write_wait(struct gpib_board *board, struct tnt4882_priv *tnt_priv,
int wait_for_done, int send_commands)
{
struct nec7210_priv *nec_priv = &tnt_priv->nec7210_priv;
@@ -486,26 +436,19 @@ static int write_wait(gpib_board_t *board, struct tnt4882_priv *tnt_priv,
fifo_xfer_done(tnt_priv) ||
test_bit(BUS_ERROR_BN, &nec_priv->state) ||
test_bit(DEV_CLEAR_BN, &nec_priv->state) ||
- test_bit(TIMO_NUM, &board->status))) {
- dev_dbg(board->gpib_dev, "gpib write interrupted\n");
+ test_bit(TIMO_NUM, &board->status)))
return -ERESTARTSYS;
- }
- if (test_bit(TIMO_NUM, &board->status)) {
- pr_info("tnt4882: write timed out\n");
+
+ if (test_bit(TIMO_NUM, &board->status))
return -ETIMEDOUT;
- }
- if (test_and_clear_bit(BUS_ERROR_BN, &nec_priv->state)) {
- pr_err("tnt4882: write bus error\n");
+ if (test_and_clear_bit(BUS_ERROR_BN, &nec_priv->state))
return (send_commands) ? -ENOTCONN : -ECOMM;
- }
- if (test_bit(DEV_CLEAR_BN, &nec_priv->state)) {
- pr_err("tnt4882: device clear interrupted write\n");
+ if (test_bit(DEV_CLEAR_BN, &nec_priv->state))
return -EINTR;
- }
return 0;
}
-static int generic_write(gpib_board_t *board, uint8_t *buffer, size_t length,
+static int generic_write(struct gpib_board *board, uint8_t *buffer, size_t length,
int send_eoi, int send_commands, size_t *bytes_written)
{
size_t count = 0;
@@ -596,18 +539,19 @@ static int generic_write(gpib_board_t *board, uint8_t *buffer, size_t length,
return retval;
}
-int tnt4882_accel_write(gpib_board_t *board, uint8_t *buffer, size_t length, int send_eoi,
- size_t *bytes_written)
+static int tnt4882_accel_write(struct gpib_board *board, uint8_t *buffer, size_t length, int send_eoi,
+ size_t *bytes_written)
{
return generic_write(board, buffer, length, send_eoi, 0, bytes_written);
}
-int tnt4882_command(gpib_board_t *board, uint8_t *buffer, size_t length, size_t *bytes_written)
+static int tnt4882_command(struct gpib_board *board, uint8_t *buffer, size_t length,
+ size_t *bytes_written)
{
return generic_write(board, buffer, length, 0, 1, bytes_written);
}
-irqreturn_t tnt4882_internal_interrupt(gpib_board_t *board)
+static irqreturn_t tnt4882_internal_interrupt(struct gpib_board *board)
{
struct tnt4882_priv *priv = board->private_data;
int isr0_bits, isr3_bits, imr3_bits;
@@ -633,7 +577,7 @@ irqreturn_t tnt4882_internal_interrupt(gpib_board_t *board)
if (isr3_bits & HR_DONE)
priv->imr3_bits &= ~HR_DONE;
if (isr3_bits & (HR_INTR | HR_TLCI)) {
- dev_dbg(board->gpib_dev, "tnt4882: minor %i isr0 0x%x imr0 0x%x isr3 0x%x imr3 0x%x\n",
+ dev_dbg(board->gpib_dev, "minor %i isr0 0x%x imr0 0x%x isr3 0x%x imr3 0x%x\n",
board->minor, isr0_bits, priv->imr0_bits, isr3_bits, imr3_bits);
tnt_writeb(priv, priv->imr3_bits, IMR3);
wake_up_interruptible(&board->wait);
@@ -642,28 +586,14 @@ irqreturn_t tnt4882_internal_interrupt(gpib_board_t *board)
return IRQ_HANDLED;
}
-irqreturn_t tnt4882_interrupt(int irq, void *arg)
+static irqreturn_t tnt4882_interrupt(int irq, void *arg)
{
return tnt4882_internal_interrupt(arg);
}
-static int ni_tnt_isa_attach(gpib_board_t *board, const gpib_board_config_t *config);
-static int ni_nat4882_isa_attach(gpib_board_t *board, const gpib_board_config_t *config);
-static int ni_nec_isa_attach(gpib_board_t *board, const gpib_board_config_t *config);
-static int ni_pci_attach(gpib_board_t *board, const gpib_board_config_t *config);
-
-static void ni_isa_detach(gpib_board_t *board);
-static void ni_pci_detach(gpib_board_t *board);
-
-#ifdef GPIB_PCMCIA
-static int ni_pcmcia_attach(gpib_board_t *board, const gpib_board_config_t *config);
-static void ni_pcmcia_detach(gpib_board_t *board);
-static int init_ni_gpib_cs(void);
-static void __exit exit_ni_gpib_cs(void);
-#endif
-
// wrappers for interface functions
-int tnt4882_read(gpib_board_t *board, uint8_t *buffer, size_t length, int *end, size_t *bytes_read)
+static int tnt4882_read(struct gpib_board *board, uint8_t *buffer, size_t length, int *end,
+ size_t *bytes_read)
{
struct tnt4882_priv *priv = board->private_data;
struct nec7210_priv *nec_priv = &priv->nec7210_priv;
@@ -682,37 +612,37 @@ int tnt4882_read(gpib_board_t *board, uint8_t *buffer, size_t length, int *end,
return retval;
}
-int tnt4882_write(gpib_board_t *board, uint8_t *buffer, size_t length, int send_eoi,
- size_t *bytes_written)
+static int tnt4882_write(struct gpib_board *board, uint8_t *buffer, size_t length, int send_eoi,
+ size_t *bytes_written)
{
struct tnt4882_priv *priv = board->private_data;
return nec7210_write(board, &priv->nec7210_priv, buffer, length, send_eoi, bytes_written);
}
-int tnt4882_command_unaccel(gpib_board_t *board, uint8_t *buffer,
- size_t length, size_t *bytes_written)
+static int tnt4882_command_unaccel(struct gpib_board *board, uint8_t *buffer,
+ size_t length, size_t *bytes_written)
{
struct tnt4882_priv *priv = board->private_data;
return nec7210_command(board, &priv->nec7210_priv, buffer, length, bytes_written);
}
-int tnt4882_take_control(gpib_board_t *board, int synchronous)
+static int tnt4882_take_control(struct gpib_board *board, int synchronous)
{
struct tnt4882_priv *priv = board->private_data;
return nec7210_take_control(board, &priv->nec7210_priv, synchronous);
}
-int tnt4882_go_to_standby(gpib_board_t *board)
+static int tnt4882_go_to_standby(struct gpib_board *board)
{
struct tnt4882_priv *priv = board->private_data;
return nec7210_go_to_standby(board, &priv->nec7210_priv);
}
-void tnt4882_request_system_control(gpib_board_t *board, int request_control)
+static void tnt4882_request_system_control(struct gpib_board *board, int request_control)
{
struct tnt4882_priv *priv = board->private_data;
@@ -727,44 +657,43 @@ void tnt4882_request_system_control(gpib_board_t *board, int request_control)
}
}
-void tnt4882_interface_clear(gpib_board_t *board, int assert)
+static void tnt4882_interface_clear(struct gpib_board *board, int assert)
{
struct tnt4882_priv *priv = board->private_data;
nec7210_interface_clear(board, &priv->nec7210_priv, assert);
}
-void tnt4882_remote_enable(gpib_board_t *board, int enable)
+static void tnt4882_remote_enable(struct gpib_board *board, int enable)
{
struct tnt4882_priv *priv = board->private_data;
nec7210_remote_enable(board, &priv->nec7210_priv, enable);
}
-int tnt4882_enable_eos(gpib_board_t *board, uint8_t eos_byte, int compare_8_bits)
+static int tnt4882_enable_eos(struct gpib_board *board, uint8_t eos_byte, int compare_8_bits)
{
struct tnt4882_priv *priv = board->private_data;
return nec7210_enable_eos(board, &priv->nec7210_priv, eos_byte, compare_8_bits);
}
-void tnt4882_disable_eos(gpib_board_t *board)
+static void tnt4882_disable_eos(struct gpib_board *board)
{
struct tnt4882_priv *priv = board->private_data;
nec7210_disable_eos(board, &priv->nec7210_priv);
}
-unsigned int tnt4882_update_status(gpib_board_t *board, unsigned int clear_mask)
+static unsigned int tnt4882_update_status(struct gpib_board *board, unsigned int clear_mask)
{
unsigned long flags;
u8 line_status;
- unsigned int retval;
struct tnt4882_priv *priv = board->private_data;
spin_lock_irqsave(&board->spinlock, flags);
board->status &= ~clear_mask;
- retval = nec7210_update_status_nolock(board, &priv->nec7210_priv);
+ nec7210_update_status_nolock(board, &priv->nec7210_priv);
/* set / clear SRQ state since it is not cleared by interrupt */
line_status = tnt_readb(priv, BSR);
if (line_status & BCSR_SRQ_BIT)
@@ -775,22 +704,21 @@ unsigned int tnt4882_update_status(gpib_board_t *board, unsigned int clear_mask)
return board->status;
}
-int tnt4882_primary_address(gpib_board_t *board, unsigned int address)
+static int tnt4882_primary_address(struct gpib_board *board, unsigned int address)
{
struct tnt4882_priv *priv = board->private_data;
return nec7210_primary_address(board, &priv->nec7210_priv, address);
}
-int tnt4882_secondary_address(gpib_board_t *board, unsigned int address, int enable)
+static int tnt4882_secondary_address(struct gpib_board *board, unsigned int address, int enable)
{
struct tnt4882_priv *priv = board->private_data;
return nec7210_secondary_address(board, &priv->nec7210_priv, address, enable);
}
-int tnt4882_parallel_poll(gpib_board_t *board, uint8_t *result)
-
+static int tnt4882_parallel_poll(struct gpib_board *board, uint8_t *result)
{
struct tnt4882_priv *tnt_priv = board->private_data;
@@ -807,7 +735,7 @@ int tnt4882_parallel_poll(gpib_board_t *board, uint8_t *result)
}
}
-void tnt4882_parallel_poll_configure(gpib_board_t *board, uint8_t config)
+static void tnt4882_parallel_poll_configure(struct gpib_board *board, uint8_t config)
{
struct tnt4882_priv *priv = board->private_data;
@@ -825,7 +753,7 @@ void tnt4882_parallel_poll_configure(gpib_board_t *board, uint8_t config)
}
}
-void tnt4882_parallel_poll_response(gpib_board_t *board, int ist)
+static void tnt4882_parallel_poll_response(struct gpib_board *board, int ist)
{
struct tnt4882_priv *priv = board->private_data;
@@ -835,14 +763,14 @@ void tnt4882_parallel_poll_response(gpib_board_t *board, int ist)
/* this is just used by the old nec7210 isa interfaces, the newer
* boards use tnt4882_serial_poll_response2
*/
-void tnt4882_serial_poll_response(gpib_board_t *board, uint8_t status)
+static void tnt4882_serial_poll_response(struct gpib_board *board, uint8_t status)
{
struct tnt4882_priv *priv = board->private_data;
nec7210_serial_poll_response(board, &priv->nec7210_priv, status);
}
-static void tnt4882_serial_poll_response2(gpib_board_t *board, uint8_t status,
+static void tnt4882_serial_poll_response2(struct gpib_board *board, uint8_t status,
int new_reason_for_service)
{
struct tnt4882_priv *priv = board->private_data;
@@ -876,303 +804,21 @@ static void tnt4882_serial_poll_response2(gpib_board_t *board, uint8_t status,
spin_unlock_irqrestore(&board->spinlock, flags);
}
-uint8_t tnt4882_serial_poll_status(gpib_board_t *board)
+static uint8_t tnt4882_serial_poll_status(struct gpib_board *board)
{
struct tnt4882_priv *priv = board->private_data;
return nec7210_serial_poll_status(board, &priv->nec7210_priv);
}
-void tnt4882_return_to_local(gpib_board_t *board)
+static void tnt4882_return_to_local(struct gpib_board *board)
{
struct tnt4882_priv *priv = board->private_data;
nec7210_return_to_local(board, &priv->nec7210_priv);
}
-static gpib_interface_t ni_pci_interface = {
- .name = "ni_pci",
- .attach = ni_pci_attach,
- .detach = ni_pci_detach,
- .read = tnt4882_accel_read,
- .write = tnt4882_accel_write,
- .command = tnt4882_command,
- .take_control = tnt4882_take_control,
- .go_to_standby = tnt4882_go_to_standby,
- .request_system_control = tnt4882_request_system_control,
- .interface_clear = tnt4882_interface_clear,
- .remote_enable = tnt4882_remote_enable,
- .enable_eos = tnt4882_enable_eos,
- .disable_eos = tnt4882_disable_eos,
- .parallel_poll = tnt4882_parallel_poll,
- .parallel_poll_configure = tnt4882_parallel_poll_configure,
- .parallel_poll_response = tnt4882_parallel_poll_response,
- .local_parallel_poll_mode = NULL, // XXX
- .line_status = tnt4882_line_status,
- .update_status = tnt4882_update_status,
- .primary_address = tnt4882_primary_address,
- .secondary_address = tnt4882_secondary_address,
- .serial_poll_response2 = tnt4882_serial_poll_response2,
- .serial_poll_status = tnt4882_serial_poll_status,
- .t1_delay = tnt4882_t1_delay,
- .return_to_local = tnt4882_return_to_local,
-};
-
-static gpib_interface_t ni_pci_accel_interface = {
- .name = "ni_pci_accel",
- .attach = ni_pci_attach,
- .detach = ni_pci_detach,
- .read = tnt4882_accel_read,
- .write = tnt4882_accel_write,
- .command = tnt4882_command,
- .take_control = tnt4882_take_control,
- .go_to_standby = tnt4882_go_to_standby,
- .request_system_control = tnt4882_request_system_control,
- .interface_clear = tnt4882_interface_clear,
- .remote_enable = tnt4882_remote_enable,
- .enable_eos = tnt4882_enable_eos,
- .disable_eos = tnt4882_disable_eos,
- .parallel_poll = tnt4882_parallel_poll,
- .parallel_poll_configure = tnt4882_parallel_poll_configure,
- .parallel_poll_response = tnt4882_parallel_poll_response,
- .local_parallel_poll_mode = NULL, // XXX
- .line_status = tnt4882_line_status,
- .update_status = tnt4882_update_status,
- .primary_address = tnt4882_primary_address,
- .secondary_address = tnt4882_secondary_address,
- .serial_poll_response2 = tnt4882_serial_poll_response2,
- .serial_poll_status = tnt4882_serial_poll_status,
- .t1_delay = tnt4882_t1_delay,
- .return_to_local = tnt4882_return_to_local,
-};
-
-static gpib_interface_t ni_isa_interface = {
- .name = "ni_isa",
- .attach = ni_tnt_isa_attach,
- .detach = ni_isa_detach,
- .read = tnt4882_accel_read,
- .write = tnt4882_accel_write,
- .command = tnt4882_command,
- .take_control = tnt4882_take_control,
- .go_to_standby = tnt4882_go_to_standby,
- .request_system_control = tnt4882_request_system_control,
- .interface_clear = tnt4882_interface_clear,
- .remote_enable = tnt4882_remote_enable,
- .enable_eos = tnt4882_enable_eos,
- .disable_eos = tnt4882_disable_eos,
- .parallel_poll = tnt4882_parallel_poll,
- .parallel_poll_configure = tnt4882_parallel_poll_configure,
- .parallel_poll_response = tnt4882_parallel_poll_response,
- .local_parallel_poll_mode = NULL, // XXX
- .line_status = tnt4882_line_status,
- .update_status = tnt4882_update_status,
- .primary_address = tnt4882_primary_address,
- .secondary_address = tnt4882_secondary_address,
- .serial_poll_response2 = tnt4882_serial_poll_response2,
- .serial_poll_status = tnt4882_serial_poll_status,
- .t1_delay = tnt4882_t1_delay,
- .return_to_local = tnt4882_return_to_local,
-};
-
-static gpib_interface_t ni_nat4882_isa_interface = {
- .name = "ni_nat4882_isa",
- .attach = ni_nat4882_isa_attach,
- .detach = ni_isa_detach,
- .read = tnt4882_read,
- .write = tnt4882_write,
- .command = tnt4882_command_unaccel,
- .take_control = tnt4882_take_control,
- .go_to_standby = tnt4882_go_to_standby,
- .request_system_control = tnt4882_request_system_control,
- .interface_clear = tnt4882_interface_clear,
- .remote_enable = tnt4882_remote_enable,
- .enable_eos = tnt4882_enable_eos,
- .disable_eos = tnt4882_disable_eos,
- .parallel_poll = tnt4882_parallel_poll,
- .parallel_poll_configure = tnt4882_parallel_poll_configure,
- .parallel_poll_response = tnt4882_parallel_poll_response,
- .local_parallel_poll_mode = NULL, // XXX
- .line_status = tnt4882_line_status,
- .update_status = tnt4882_update_status,
- .primary_address = tnt4882_primary_address,
- .secondary_address = tnt4882_secondary_address,
- .serial_poll_response2 = tnt4882_serial_poll_response2,
- .serial_poll_status = tnt4882_serial_poll_status,
- .t1_delay = tnt4882_t1_delay,
- .return_to_local = tnt4882_return_to_local,
-};
-
-static gpib_interface_t ni_nec_isa_interface = {
- .name = "ni_nec_isa",
- .attach = ni_nec_isa_attach,
- .detach = ni_isa_detach,
- .read = tnt4882_read,
- .write = tnt4882_write,
- .command = tnt4882_command_unaccel,
- .take_control = tnt4882_take_control,
- .go_to_standby = tnt4882_go_to_standby,
- .request_system_control = tnt4882_request_system_control,
- .interface_clear = tnt4882_interface_clear,
- .remote_enable = tnt4882_remote_enable,
- .enable_eos = tnt4882_enable_eos,
- .disable_eos = tnt4882_disable_eos,
- .parallel_poll = tnt4882_parallel_poll,
- .parallel_poll_configure = tnt4882_parallel_poll_configure,
- .parallel_poll_response = tnt4882_parallel_poll_response,
- .local_parallel_poll_mode = NULL, // XXX
- .line_status = NULL,
- .update_status = tnt4882_update_status,
- .primary_address = tnt4882_primary_address,
- .secondary_address = tnt4882_secondary_address,
- .serial_poll_response = tnt4882_serial_poll_response,
- .serial_poll_status = tnt4882_serial_poll_status,
- .t1_delay = tnt4882_t1_delay,
- .return_to_local = tnt4882_return_to_local,
-};
-
-static gpib_interface_t ni_isa_accel_interface = {
- .name = "ni_isa_accel",
- .attach = ni_tnt_isa_attach,
- .detach = ni_isa_detach,
- .read = tnt4882_accel_read,
- .write = tnt4882_accel_write,
- .command = tnt4882_command,
- .take_control = tnt4882_take_control,
- .go_to_standby = tnt4882_go_to_standby,
- .request_system_control = tnt4882_request_system_control,
- .interface_clear = tnt4882_interface_clear,
- .remote_enable = tnt4882_remote_enable,
- .enable_eos = tnt4882_enable_eos,
- .disable_eos = tnt4882_disable_eos,
- .parallel_poll = tnt4882_parallel_poll,
- .parallel_poll_configure = tnt4882_parallel_poll_configure,
- .parallel_poll_response = tnt4882_parallel_poll_response,
- .local_parallel_poll_mode = NULL, // XXX
- .line_status = tnt4882_line_status,
- .update_status = tnt4882_update_status,
- .primary_address = tnt4882_primary_address,
- .secondary_address = tnt4882_secondary_address,
- .serial_poll_response2 = tnt4882_serial_poll_response2,
- .serial_poll_status = tnt4882_serial_poll_status,
- .t1_delay = tnt4882_t1_delay,
- .return_to_local = tnt4882_return_to_local,
-};
-
-static gpib_interface_t ni_nat4882_isa_accel_interface = {
- .name = "ni_nat4882_isa_accel",
- .attach = ni_nat4882_isa_attach,
- .detach = ni_isa_detach,
- .read = tnt4882_accel_read,
- .write = tnt4882_accel_write,
- .command = tnt4882_command_unaccel,
- .take_control = tnt4882_take_control,
- .go_to_standby = tnt4882_go_to_standby,
- .request_system_control = tnt4882_request_system_control,
- .interface_clear = tnt4882_interface_clear,
- .remote_enable = tnt4882_remote_enable,
- .enable_eos = tnt4882_enable_eos,
- .disable_eos = tnt4882_disable_eos,
- .parallel_poll = tnt4882_parallel_poll,
- .parallel_poll_configure = tnt4882_parallel_poll_configure,
- .parallel_poll_response = tnt4882_parallel_poll_response,
- .local_parallel_poll_mode = NULL, // XXX
- .line_status = tnt4882_line_status,
- .update_status = tnt4882_update_status,
- .primary_address = tnt4882_primary_address,
- .secondary_address = tnt4882_secondary_address,
- .serial_poll_response2 = tnt4882_serial_poll_response2,
- .serial_poll_status = tnt4882_serial_poll_status,
- .t1_delay = tnt4882_t1_delay,
- .return_to_local = tnt4882_return_to_local,
-};
-
-static gpib_interface_t ni_nec_isa_accel_interface = {
- .name = "ni_nec_isa_accel",
- .attach = ni_nec_isa_attach,
- .detach = ni_isa_detach,
- .read = tnt4882_accel_read,
- .write = tnt4882_accel_write,
- .command = tnt4882_command_unaccel,
- .take_control = tnt4882_take_control,
- .go_to_standby = tnt4882_go_to_standby,
- .request_system_control = tnt4882_request_system_control,
- .interface_clear = tnt4882_interface_clear,
- .remote_enable = tnt4882_remote_enable,
- .enable_eos = tnt4882_enable_eos,
- .disable_eos = tnt4882_disable_eos,
- .parallel_poll = tnt4882_parallel_poll,
- .parallel_poll_configure = tnt4882_parallel_poll_configure,
- .parallel_poll_response = tnt4882_parallel_poll_response,
- .local_parallel_poll_mode = NULL, // XXX
- .line_status = NULL,
- .update_status = tnt4882_update_status,
- .primary_address = tnt4882_primary_address,
- .secondary_address = tnt4882_secondary_address,
- .serial_poll_response = tnt4882_serial_poll_response,
- .serial_poll_status = tnt4882_serial_poll_status,
- .t1_delay = tnt4882_t1_delay,
- .return_to_local = tnt4882_return_to_local,
-};
-
-#ifdef GPIB_PCMCIA
-static gpib_interface_t ni_pcmcia_interface = {
- .name = "ni_pcmcia",
- .attach = ni_pcmcia_attach,
- .detach = ni_pcmcia_detach,
- .read = tnt4882_accel_read,
- .write = tnt4882_accel_write,
- .command = tnt4882_command,
- .take_control = tnt4882_take_control,
- .go_to_standby = tnt4882_go_to_standby,
- .request_system_control = tnt4882_request_system_control,
- .interface_clear = tnt4882_interface_clear,
- .remote_enable = tnt4882_remote_enable,
- .enable_eos = tnt4882_enable_eos,
- .disable_eos = tnt4882_disable_eos,
- .parallel_poll = tnt4882_parallel_poll,
- .parallel_poll_configure = tnt4882_parallel_poll_configure,
- .parallel_poll_response = tnt4882_parallel_poll_response,
- .local_parallel_poll_mode = NULL, // XXX
- .line_status = tnt4882_line_status,
- .update_status = tnt4882_update_status,
- .primary_address = tnt4882_primary_address,
- .secondary_address = tnt4882_secondary_address,
- .serial_poll_response = tnt4882_serial_poll_response,
- .serial_poll_status = tnt4882_serial_poll_status,
- .t1_delay = tnt4882_t1_delay,
- .return_to_local = tnt4882_return_to_local,
-};
-
-static gpib_interface_t ni_pcmcia_accel_interface = {
- .name = "ni_pcmcia_accel",
- .attach = ni_pcmcia_attach,
- .detach = ni_pcmcia_detach,
- .read = tnt4882_accel_read,
- .write = tnt4882_accel_write,
- .command = tnt4882_command,
- .take_control = tnt4882_take_control,
- .go_to_standby = tnt4882_go_to_standby,
- .request_system_control = tnt4882_request_system_control,
- .interface_clear = tnt4882_interface_clear,
- .remote_enable = tnt4882_remote_enable,
- .enable_eos = tnt4882_enable_eos,
- .disable_eos = tnt4882_disable_eos,
- .parallel_poll = tnt4882_parallel_poll,
- .parallel_poll_configure = tnt4882_parallel_poll_configure,
- .parallel_poll_response = tnt4882_parallel_poll_response,
- .local_parallel_poll_mode = NULL, // XXX
- .line_status = tnt4882_line_status,
- .update_status = tnt4882_update_status,
- .primary_address = tnt4882_primary_address,
- .secondary_address = tnt4882_secondary_address,
- .serial_poll_response = tnt4882_serial_poll_response,
- .serial_poll_status = tnt4882_serial_poll_status,
- .t1_delay = tnt4882_t1_delay,
- .return_to_local = tnt4882_return_to_local,
-};
-#endif
-
-void tnt4882_board_reset(struct tnt4882_priv *tnt_priv, gpib_board_t *board)
+static void tnt4882_board_reset(struct tnt4882_priv *tnt_priv, struct gpib_board *board)
{
struct nec7210_priv *nec_priv = &tnt_priv->nec7210_priv;
@@ -1185,7 +831,7 @@ void tnt4882_board_reset(struct tnt4882_priv *tnt_priv, gpib_board_t *board)
nec7210_board_reset(nec_priv, board);
}
-int tnt4882_allocate_private(gpib_board_t *board)
+static int tnt4882_allocate_private(struct gpib_board *board)
{
struct tnt4882_priv *tnt_priv;
@@ -1198,13 +844,13 @@ int tnt4882_allocate_private(gpib_board_t *board)
return 0;
}
-void tnt4882_free_private(gpib_board_t *board)
+static void tnt4882_free_private(struct gpib_board *board)
{
kfree(board->private_data);
board->private_data = NULL;
}
-void tnt4882_init(struct tnt4882_priv *tnt_priv, const gpib_board_t *board)
+static void tnt4882_init(struct tnt4882_priv *tnt_priv, const struct gpib_board *board)
{
struct nec7210_priv *nec_priv = &tnt_priv->nec7210_priv;
@@ -1252,7 +898,7 @@ void tnt4882_init(struct tnt4882_priv *tnt_priv, const gpib_board_t *board)
tnt_writeb(tnt_priv, tnt_priv->imr0_bits, IMR0);
}
-int ni_pci_attach(gpib_board_t *board, const gpib_board_config_t *config)
+static int ni_pci_attach(struct gpib_board *board, const gpib_board_config_t *config)
{
struct tnt4882_priv *tnt_priv;
struct nec7210_priv *nec_priv;
@@ -1271,10 +917,8 @@ int ni_pci_attach(gpib_board_t *board, const gpib_board_config_t *config)
nec_priv->write_byte = nec7210_locking_iomem_write_byte;
nec_priv->offset = atgpib_reg_offset;
- if (!mite_devices) {
- pr_err("no National Instruments PCI boards found\n");
- return -1;
- }
+ if (!mite_devices)
+ return -ENODEV;
for (mite = mite_devices; mite; mite = mite->next) {
short found_board;
@@ -1305,37 +949,32 @@ int ni_pci_attach(gpib_board_t *board, const gpib_board_config_t *config)
if (found_board)
break;
}
- if (!mite) {
- pr_err("no NI PCI-GPIB boards found\n");
- return -1;
- }
+ if (!mite)
+ return -ENODEV;
+
tnt_priv->mite = mite;
retval = mite_setup(tnt_priv->mite);
- if (retval < 0) {
- pr_err("tnt4882: error setting up mite.\n");
+ if (retval < 0)
return retval;
- }
nec_priv->mmiobase = tnt_priv->mite->daq_io_addr;
// get irq
- if (request_irq(mite_irq(tnt_priv->mite), tnt4882_interrupt, isr_flags,
- "ni-pci-gpib", board)) {
- pr_err("gpib: can't request IRQ %d\n", mite_irq(tnt_priv->mite));
- return -1;
+ retval = request_irq(mite_irq(tnt_priv->mite), tnt4882_interrupt, isr_flags, "ni-pci-gpib",
+ board);
+ if (retval) {
+ dev_err(board->gpib_dev, "failed to obtain pci irq %d\n", mite_irq(tnt_priv->mite));
+ return retval;
}
tnt_priv->irq = mite_irq(tnt_priv->mite);
- pr_info("tnt4882: irq %i\n", tnt_priv->irq);
// TNT5004 detection
switch (tnt_readb(tnt_priv, CSR) & 0xf0) {
case 0x30:
nec_priv->type = TNT4882;
- pr_info("tnt4882: TNT4882 chipset detected\n");
break;
case 0x40:
nec_priv->type = TNT5004;
- pr_info("tnt4882: TNT5004 chipset detected\n");
break;
}
tnt4882_init(tnt_priv, board);
@@ -1343,7 +982,7 @@ int ni_pci_attach(gpib_board_t *board, const gpib_board_config_t *config)
return 0;
}
-void ni_pci_detach(gpib_board_t *board)
+static void ni_pci_detach(struct gpib_board *board)
{
struct tnt4882_priv *tnt_priv = board->private_data;
struct nec7210_priv *nec_priv;
@@ -1365,28 +1004,22 @@ static int ni_isapnp_find(struct pnp_dev **dev)
{
*dev = pnp_find_dev(NULL, ISAPNP_VENDOR_ID_NI,
ISAPNP_FUNCTION(ISAPNP_ID_NI_ATGPIB_TNT), NULL);
- if (!*dev || !(*dev)->card) {
- pr_err("tnt4882: failed to find isapnp board\n");
+ if (!*dev || !(*dev)->card)
return -ENODEV;
- }
- if (pnp_device_attach(*dev) < 0) {
- pr_err("tnt4882: atgpib/tnt board already active, skipping\n");
+ if (pnp_device_attach(*dev) < 0)
return -EBUSY;
- }
if (pnp_activate_dev(*dev) < 0) {
pnp_device_detach(*dev);
- pr_err("tnt4882: failed to activate() atgpib/tnt, aborting\n");
return -EAGAIN;
}
if (!pnp_port_valid(*dev, 0) || !pnp_irq_valid(*dev, 0)) {
pnp_device_detach(*dev);
- pr_err("tnt4882: invalid port or irq for atgpib/tnt, aborting\n");
- return -ENOMEM;
+ return -EINVAL;
}
return 0;
}
-static int ni_isa_attach_common(gpib_board_t *board, const gpib_board_config_t *config,
+static int ni_isa_attach_common(struct gpib_board *board, const gpib_board_config_t *config,
enum nec7210_chipset chipset)
{
struct tnt4882_priv *tnt_priv;
@@ -1394,6 +1027,7 @@ static int ni_isa_attach_common(gpib_board_t *board, const gpib_board_config_t *
int isr_flags = 0;
u32 iobase;
int irq;
+ int retval;
board->status = 0;
@@ -1409,7 +1043,6 @@ static int ni_isa_attach_common(gpib_board_t *board, const gpib_board_config_t *
// look for plug-n-play board
if (config->ibbase == 0) {
struct pnp_dev *dev;
- int retval;
retval = ni_isapnp_find(&dev);
if (retval < 0)
@@ -1422,18 +1055,18 @@ static int ni_isa_attach_common(gpib_board_t *board, const gpib_board_config_t *
irq = config->ibirq;
}
// allocate ioports
- if (!request_region(iobase, atgpib_iosize, "atgpib")) {
- pr_err("tnt4882: failed to allocate ioports\n");
- return -1;
- }
+ if (!request_region(iobase, atgpib_iosize, "atgpib"))
+ return -EBUSY;
+
nec_priv->mmiobase = ioport_map(iobase, atgpib_iosize);
if (!nec_priv->mmiobase)
- return -1;
+ return -EBUSY;
// get irq
- if (request_irq(irq, tnt4882_interrupt, isr_flags, "atgpib", board)) {
- pr_err("gpib: can't request IRQ %d\n", irq);
- return -1;
+ retval = request_irq(irq, tnt4882_interrupt, isr_flags, "atgpib", board);
+ if (retval) {
+ dev_err(board->gpib_dev, "failed to request ISA irq %d\n", irq);
+ return retval;
}
tnt_priv->irq = irq;
@@ -1442,22 +1075,22 @@ static int ni_isa_attach_common(gpib_board_t *board, const gpib_board_config_t *
return 0;
}
-int ni_tnt_isa_attach(gpib_board_t *board, const gpib_board_config_t *config)
+static int ni_tnt_isa_attach(struct gpib_board *board, const gpib_board_config_t *config)
{
return ni_isa_attach_common(board, config, TNT4882);
}
-int ni_nat4882_isa_attach(gpib_board_t *board, const gpib_board_config_t *config)
+static int ni_nat4882_isa_attach(struct gpib_board *board, const gpib_board_config_t *config)
{
return ni_isa_attach_common(board, config, NAT4882);
}
-int ni_nec_isa_attach(gpib_board_t *board, const gpib_board_config_t *config)
+static int ni_nec_isa_attach(struct gpib_board *board, const gpib_board_config_t *config)
{
return ni_isa_attach_common(board, config, NEC7210);
}
-void ni_isa_detach(gpib_board_t *board)
+static void ni_isa_detach(struct gpib_board *board)
{
struct tnt4882_priv *tnt_priv = board->private_data;
struct nec7210_priv *nec_priv;
@@ -1483,6 +1116,230 @@ static int tnt4882_pci_probe(struct pci_dev *dev, const struct pci_device_id *id
return 0;
}
+static gpib_interface_t ni_pci_interface = {
+ .name = "ni_pci",
+ .attach = ni_pci_attach,
+ .detach = ni_pci_detach,
+ .read = tnt4882_accel_read,
+ .write = tnt4882_accel_write,
+ .command = tnt4882_command,
+ .take_control = tnt4882_take_control,
+ .go_to_standby = tnt4882_go_to_standby,
+ .request_system_control = tnt4882_request_system_control,
+ .interface_clear = tnt4882_interface_clear,
+ .remote_enable = tnt4882_remote_enable,
+ .enable_eos = tnt4882_enable_eos,
+ .disable_eos = tnt4882_disable_eos,
+ .parallel_poll = tnt4882_parallel_poll,
+ .parallel_poll_configure = tnt4882_parallel_poll_configure,
+ .parallel_poll_response = tnt4882_parallel_poll_response,
+ .local_parallel_poll_mode = NULL, // XXX
+ .line_status = tnt4882_line_status,
+ .update_status = tnt4882_update_status,
+ .primary_address = tnt4882_primary_address,
+ .secondary_address = tnt4882_secondary_address,
+ .serial_poll_response2 = tnt4882_serial_poll_response2,
+ .serial_poll_status = tnt4882_serial_poll_status,
+ .t1_delay = tnt4882_t1_delay,
+ .return_to_local = tnt4882_return_to_local,
+};
+
+static gpib_interface_t ni_pci_accel_interface = {
+ .name = "ni_pci_accel",
+ .attach = ni_pci_attach,
+ .detach = ni_pci_detach,
+ .read = tnt4882_accel_read,
+ .write = tnt4882_accel_write,
+ .command = tnt4882_command,
+ .take_control = tnt4882_take_control,
+ .go_to_standby = tnt4882_go_to_standby,
+ .request_system_control = tnt4882_request_system_control,
+ .interface_clear = tnt4882_interface_clear,
+ .remote_enable = tnt4882_remote_enable,
+ .enable_eos = tnt4882_enable_eos,
+ .disable_eos = tnt4882_disable_eos,
+ .parallel_poll = tnt4882_parallel_poll,
+ .parallel_poll_configure = tnt4882_parallel_poll_configure,
+ .parallel_poll_response = tnt4882_parallel_poll_response,
+ .local_parallel_poll_mode = NULL, // XXX
+ .line_status = tnt4882_line_status,
+ .update_status = tnt4882_update_status,
+ .primary_address = tnt4882_primary_address,
+ .secondary_address = tnt4882_secondary_address,
+ .serial_poll_response2 = tnt4882_serial_poll_response2,
+ .serial_poll_status = tnt4882_serial_poll_status,
+ .t1_delay = tnt4882_t1_delay,
+ .return_to_local = tnt4882_return_to_local,
+};
+
+static gpib_interface_t ni_isa_interface = {
+ .name = "ni_isa",
+ .attach = ni_tnt_isa_attach,
+ .detach = ni_isa_detach,
+ .read = tnt4882_accel_read,
+ .write = tnt4882_accel_write,
+ .command = tnt4882_command,
+ .take_control = tnt4882_take_control,
+ .go_to_standby = tnt4882_go_to_standby,
+ .request_system_control = tnt4882_request_system_control,
+ .interface_clear = tnt4882_interface_clear,
+ .remote_enable = tnt4882_remote_enable,
+ .enable_eos = tnt4882_enable_eos,
+ .disable_eos = tnt4882_disable_eos,
+ .parallel_poll = tnt4882_parallel_poll,
+ .parallel_poll_configure = tnt4882_parallel_poll_configure,
+ .parallel_poll_response = tnt4882_parallel_poll_response,
+ .local_parallel_poll_mode = NULL, // XXX
+ .line_status = tnt4882_line_status,
+ .update_status = tnt4882_update_status,
+ .primary_address = tnt4882_primary_address,
+ .secondary_address = tnt4882_secondary_address,
+ .serial_poll_response2 = tnt4882_serial_poll_response2,
+ .serial_poll_status = tnt4882_serial_poll_status,
+ .t1_delay = tnt4882_t1_delay,
+ .return_to_local = tnt4882_return_to_local,
+};
+
+static gpib_interface_t ni_nat4882_isa_interface = {
+ .name = "ni_nat4882_isa",
+ .attach = ni_nat4882_isa_attach,
+ .detach = ni_isa_detach,
+ .read = tnt4882_read,
+ .write = tnt4882_write,
+ .command = tnt4882_command_unaccel,
+ .take_control = tnt4882_take_control,
+ .go_to_standby = tnt4882_go_to_standby,
+ .request_system_control = tnt4882_request_system_control,
+ .interface_clear = tnt4882_interface_clear,
+ .remote_enable = tnt4882_remote_enable,
+ .enable_eos = tnt4882_enable_eos,
+ .disable_eos = tnt4882_disable_eos,
+ .parallel_poll = tnt4882_parallel_poll,
+ .parallel_poll_configure = tnt4882_parallel_poll_configure,
+ .parallel_poll_response = tnt4882_parallel_poll_response,
+ .local_parallel_poll_mode = NULL, // XXX
+ .line_status = tnt4882_line_status,
+ .update_status = tnt4882_update_status,
+ .primary_address = tnt4882_primary_address,
+ .secondary_address = tnt4882_secondary_address,
+ .serial_poll_response2 = tnt4882_serial_poll_response2,
+ .serial_poll_status = tnt4882_serial_poll_status,
+ .t1_delay = tnt4882_t1_delay,
+ .return_to_local = tnt4882_return_to_local,
+};
+
+static gpib_interface_t ni_nec_isa_interface = {
+ .name = "ni_nec_isa",
+ .attach = ni_nec_isa_attach,
+ .detach = ni_isa_detach,
+ .read = tnt4882_read,
+ .write = tnt4882_write,
+ .command = tnt4882_command_unaccel,
+ .take_control = tnt4882_take_control,
+ .go_to_standby = tnt4882_go_to_standby,
+ .request_system_control = tnt4882_request_system_control,
+ .interface_clear = tnt4882_interface_clear,
+ .remote_enable = tnt4882_remote_enable,
+ .enable_eos = tnt4882_enable_eos,
+ .disable_eos = tnt4882_disable_eos,
+ .parallel_poll = tnt4882_parallel_poll,
+ .parallel_poll_configure = tnt4882_parallel_poll_configure,
+ .parallel_poll_response = tnt4882_parallel_poll_response,
+ .local_parallel_poll_mode = NULL, // XXX
+ .line_status = NULL,
+ .update_status = tnt4882_update_status,
+ .primary_address = tnt4882_primary_address,
+ .secondary_address = tnt4882_secondary_address,
+ .serial_poll_response = tnt4882_serial_poll_response,
+ .serial_poll_status = tnt4882_serial_poll_status,
+ .t1_delay = tnt4882_t1_delay,
+ .return_to_local = tnt4882_return_to_local,
+};
+
+static gpib_interface_t ni_isa_accel_interface = {
+ .name = "ni_isa_accel",
+ .attach = ni_tnt_isa_attach,
+ .detach = ni_isa_detach,
+ .read = tnt4882_accel_read,
+ .write = tnt4882_accel_write,
+ .command = tnt4882_command,
+ .take_control = tnt4882_take_control,
+ .go_to_standby = tnt4882_go_to_standby,
+ .request_system_control = tnt4882_request_system_control,
+ .interface_clear = tnt4882_interface_clear,
+ .remote_enable = tnt4882_remote_enable,
+ .enable_eos = tnt4882_enable_eos,
+ .disable_eos = tnt4882_disable_eos,
+ .parallel_poll = tnt4882_parallel_poll,
+ .parallel_poll_configure = tnt4882_parallel_poll_configure,
+ .parallel_poll_response = tnt4882_parallel_poll_response,
+ .local_parallel_poll_mode = NULL, // XXX
+ .line_status = tnt4882_line_status,
+ .update_status = tnt4882_update_status,
+ .primary_address = tnt4882_primary_address,
+ .secondary_address = tnt4882_secondary_address,
+ .serial_poll_response2 = tnt4882_serial_poll_response2,
+ .serial_poll_status = tnt4882_serial_poll_status,
+ .t1_delay = tnt4882_t1_delay,
+ .return_to_local = tnt4882_return_to_local,
+};
+
+static gpib_interface_t ni_nat4882_isa_accel_interface = {
+ .name = "ni_nat4882_isa_accel",
+ .attach = ni_nat4882_isa_attach,
+ .detach = ni_isa_detach,
+ .read = tnt4882_accel_read,
+ .write = tnt4882_accel_write,
+ .command = tnt4882_command_unaccel,
+ .take_control = tnt4882_take_control,
+ .go_to_standby = tnt4882_go_to_standby,
+ .request_system_control = tnt4882_request_system_control,
+ .interface_clear = tnt4882_interface_clear,
+ .remote_enable = tnt4882_remote_enable,
+ .enable_eos = tnt4882_enable_eos,
+ .disable_eos = tnt4882_disable_eos,
+ .parallel_poll = tnt4882_parallel_poll,
+ .parallel_poll_configure = tnt4882_parallel_poll_configure,
+ .parallel_poll_response = tnt4882_parallel_poll_response,
+ .local_parallel_poll_mode = NULL, // XXX
+ .line_status = tnt4882_line_status,
+ .update_status = tnt4882_update_status,
+ .primary_address = tnt4882_primary_address,
+ .secondary_address = tnt4882_secondary_address,
+ .serial_poll_response2 = tnt4882_serial_poll_response2,
+ .serial_poll_status = tnt4882_serial_poll_status,
+ .t1_delay = tnt4882_t1_delay,
+ .return_to_local = tnt4882_return_to_local,
+};
+
+static gpib_interface_t ni_nec_isa_accel_interface = {
+ .name = "ni_nec_isa_accel",
+ .attach = ni_nec_isa_attach,
+ .detach = ni_isa_detach,
+ .read = tnt4882_accel_read,
+ .write = tnt4882_accel_write,
+ .command = tnt4882_command_unaccel,
+ .take_control = tnt4882_take_control,
+ .go_to_standby = tnt4882_go_to_standby,
+ .request_system_control = tnt4882_request_system_control,
+ .interface_clear = tnt4882_interface_clear,
+ .remote_enable = tnt4882_remote_enable,
+ .enable_eos = tnt4882_enable_eos,
+ .disable_eos = tnt4882_disable_eos,
+ .parallel_poll = tnt4882_parallel_poll,
+ .parallel_poll_configure = tnt4882_parallel_poll_configure,
+ .parallel_poll_response = tnt4882_parallel_poll_response,
+ .local_parallel_poll_mode = NULL, // XXX
+ .line_status = NULL,
+ .update_status = tnt4882_update_status,
+ .primary_address = tnt4882_primary_address,
+ .secondary_address = tnt4882_secondary_address,
+ .serial_poll_response = tnt4882_serial_poll_response,
+ .serial_poll_status = tnt4882_serial_poll_status,
+ .t1_delay = tnt4882_t1_delay,
+ .return_to_local = tnt4882_return_to_local,
+};
+
static const struct pci_device_id tnt4882_pci_table[] = {
{PCI_DEVICE(PCI_VENDOR_ID_NATINST, PCI_DEVICE_ID_NI_GPIB)},
{PCI_DEVICE(PCI_VENDOR_ID_NATINST, PCI_DEVICE_ID_NI_GPIB_PLUS)},
@@ -1499,16 +1356,26 @@ static const struct pci_device_id tnt4882_pci_table[] = {
MODULE_DEVICE_TABLE(pci, tnt4882_pci_table);
static struct pci_driver tnt4882_pci_driver = {
- .name = "tnt4882",
+ .name = DRV_NAME,
.id_table = tnt4882_pci_table,
.probe = &tnt4882_pci_probe
};
+#if 0
+/* unused, will be needed when the driver is turned into a pnp_driver */
static const struct pnp_device_id tnt4882_pnp_table[] = {
{.id = "NICC601"},
{.id = ""}
};
MODULE_DEVICE_TABLE(pnp, tnt4882_pnp_table);
+#endif
+
+#ifdef CONFIG_GPIB_PCMCIA
+static gpib_interface_t ni_pcmcia_interface;
+static gpib_interface_t ni_pcmcia_accel_interface;
+static int __init init_ni_gpib_cs(void);
+static void __exit exit_ni_gpib_cs(void);
+#endif
static int __init tnt4882_init_module(void)
{
@@ -1516,84 +1383,83 @@ static int __init tnt4882_init_module(void)
result = pci_register_driver(&tnt4882_pci_driver);
if (result) {
- pr_err("tnt4882_gpib: pci_register_driver failed: error = %d\n", result);
+ pr_err("pci_register_driver failed: error = %d\n", result);
return result;
}
result = gpib_register_driver(&ni_isa_interface, THIS_MODULE);
if (result) {
- pr_err("tnt4882_gpib: gpib_register_driver failed: error = %d\n", result);
+ pr_err("gpib_register_driver failed: error = %d\n", result);
goto err_isa;
}
result = gpib_register_driver(&ni_isa_accel_interface, THIS_MODULE);
if (result) {
- pr_err("tnt4882_gpib: gpib_register_driver failed: error = %d\n", result);
+ pr_err("gpib_register_driver failed: error = %d\n", result);
goto err_isa_accel;
}
result = gpib_register_driver(&ni_nat4882_isa_interface, THIS_MODULE);
if (result) {
- pr_err("tnt4882_gpib: gpib_register_driver failed: error = %d\n", result);
+ pr_err("gpib_register_driver failed: error = %d\n", result);
goto err_nat4882_isa;
}
result = gpib_register_driver(&ni_nat4882_isa_accel_interface, THIS_MODULE);
if (result) {
- pr_err("tnt4882_gpib: gpib_register_driver failed: error = %d\n", result);
+ pr_err("gpib_register_driver failed: error = %d\n", result);
goto err_nat4882_isa_accel;
}
result = gpib_register_driver(&ni_nec_isa_interface, THIS_MODULE);
if (result) {
- pr_err("tnt4882_gpib: gpib_register_driver failed: error = %d\n", result);
+ pr_err("gpib_register_driver failed: error = %d\n", result);
goto err_nec_isa;
}
result = gpib_register_driver(&ni_nec_isa_accel_interface, THIS_MODULE);
if (result) {
- pr_err("tnt4882_gpib: gpib_register_driver failed: error = %d\n", result);
+ pr_err("gpib_register_driver failed: error = %d\n", result);
goto err_nec_isa_accel;
}
result = gpib_register_driver(&ni_pci_interface, THIS_MODULE);
if (result) {
- pr_err("tnt4882_gpib: gpib_register_driver failed: error = %d\n", result);
+ pr_err("gpib_register_driver failed: error = %d\n", result);
goto err_pci;
}
result = gpib_register_driver(&ni_pci_accel_interface, THIS_MODULE);
if (result) {
- pr_err("tnt4882_gpib: gpib_register_driver failed: error = %d\n", result);
+ pr_err("gpib_register_driver failed: error = %d\n", result);
goto err_pci_accel;
}
-#ifdef GPIB_PCMCIA
+#ifdef CONFIG_GPIB_PCMCIA
result = gpib_register_driver(&ni_pcmcia_interface, THIS_MODULE);
if (result) {
- pr_err("tnt4882_gpib: gpib_register_driver failed: error = %d\n", result);
+ pr_err("gpib_register_driver failed: error = %d\n", result);
goto err_pcmcia;
}
result = gpib_register_driver(&ni_pcmcia_accel_interface, THIS_MODULE);
if (result) {
- pr_err("tnt4882_gpib: gpib_register_driver failed: error = %d\n", result);
+ pr_err("gpib_register_driver failed: error = %d\n", result);
goto err_pcmcia_accel;
}
result = init_ni_gpib_cs();
if (result) {
- pr_err("tnt4882_gpib: pcmcia_register_driver failed: error = %d\n", result);
+ pr_err("pcmcia_register_driver failed: error = %d\n", result);
goto err_pcmcia_driver;
}
#endif
mite_init();
- mite_list_devices();
return 0;
-#ifdef GPIB_PCMCIA
+#ifdef CONFIG_GPIB_PCMCIA
err_pcmcia_driver:
gpib_unregister_driver(&ni_pcmcia_accel_interface);
err_pcmcia_accel:
@@ -1631,7 +1497,7 @@ static void __exit tnt4882_exit_module(void)
gpib_unregister_driver(&ni_nec_isa_accel_interface);
gpib_unregister_driver(&ni_pci_interface);
gpib_unregister_driver(&ni_pci_accel_interface);
-#ifdef GPIB_PCMCIA
+#ifdef CONFIG_GPIB_PCMCIA
gpib_unregister_driver(&ni_pcmcia_interface);
gpib_unregister_driver(&ni_pcmcia_accel_interface);
exit_ni_gpib_cs();
@@ -1642,7 +1508,7 @@ static void __exit tnt4882_exit_module(void)
pci_unregister_driver(&tnt4882_pci_driver);
}
-#ifdef GPIB_PCMCIA
+#ifdef CONFIG_GPIB_PCMCIA
#include <linux/kernel.h>
#include <linux/moduleparam.h>
@@ -1655,29 +1521,9 @@ static void __exit tnt4882_exit_module(void)
#include <pcmcia/cisreg.h>
#include <pcmcia/ds.h>
-/*
- * All the PCMCIA modules use PCMCIA_DEBUG to control debugging. If
- * you do not define PCMCIA_DEBUG at all, all the debug code will be
- * left out. If you compile with PCMCIA_DEBUG=0, the debug code will
- * be present but disabled -- but it can then be enabled for specific
- * modules at load time with a 'pc_debug=#' option to insmod.
- */
-#define PCMCIA_DEBUG 1
-#ifdef PCMCIA_DEBUG
-static int pc_debug = PCMCIA_DEBUG;
-module_param(pc_debug, int, 0);
-#define DEBUG(n, args...) \
- do {if (pc_debug > (n)) \
- pr_debug(args); } \
- while (0)
-#else
-#define DEBUG(args...)
-#endif
-
static int ni_gpib_config(struct pcmcia_device *link);
static void ni_gpib_release(struct pcmcia_device *link);
-static int ni_pcmcia_attach(gpib_board_t *board, const gpib_board_config_t *config);
-static void ni_pcmcia_detach(gpib_board_t *board);
+static void ni_pcmcia_detach(struct gpib_board *board);
/*
* A linked list of "instances" of the dummy device. Each actual
@@ -1696,7 +1542,7 @@ static struct pcmcia_device *curr_dev;
struct local_info_t {
struct pcmcia_device *p_dev;
- gpib_board_t *dev;
+ struct gpib_board *dev;
int stop;
struct bus_operations *bus;
};
@@ -1710,9 +1556,7 @@ struct local_info_t {
static int ni_gpib_probe(struct pcmcia_device *link)
{
struct local_info_t *info;
- //struct gpib_board_t *dev;
-
- DEBUG(0, "%s(0x%p)\n", __func__, link);
+ //struct struct gpib_board *dev;
/* Allocate space for private device-specific data */
info = kzalloc(sizeof(*info), GFP_KERNEL);
@@ -1745,9 +1589,7 @@ static int ni_gpib_probe(struct pcmcia_device *link)
static void ni_gpib_remove(struct pcmcia_device *link)
{
struct local_info_t *info = link->priv;
- //struct gpib_board_t *dev = info->dev;
-
- DEBUG(0, "%s(%p)\n", __func__, link);
+ //struct struct gpib_board *dev = info->dev;
if (info->dev)
ni_pcmcia_detach(info->dev);
@@ -1776,11 +1618,9 @@ static int ni_gpib_config_iteration(struct pcmcia_device *link, void *priv_data)
static int ni_gpib_config(struct pcmcia_device *link)
{
//struct local_info_t *info = link->priv;
- //gpib_board_t *dev = info->dev;
+ //struct gpib_board *dev = info->dev;
int last_ret;
- DEBUG(0, "%s(0x%p)\n", __func__, link);
-
last_ret = pcmcia_loop_config(link, &ni_gpib_config_iteration, NULL);
if (last_ret) {
dev_warn(&link->dev, "no configuration found\n");
@@ -1803,18 +1643,16 @@ static int ni_gpib_config(struct pcmcia_device *link)
*/
static void ni_gpib_release(struct pcmcia_device *link)
{
- DEBUG(0, "%s(0x%p)\n", __func__, link);
pcmcia_disable_device(link);
} /* ni_gpib_release */
static int ni_gpib_suspend(struct pcmcia_device *link)
{
//struct local_info_t *info = link->priv;
- //struct gpib_board_t *dev = info->dev;
- DEBUG(0, "%s(0x%p)\n", __func__, link);
+ //struct struct gpib_board *dev = info->dev;
if (link->open)
- pr_err("Device still open ???\n");
+ dev_warn(&link->dev, "Device still open\n");
//netif_device_detach(dev);
return 0;
@@ -1823,12 +1661,10 @@ static int ni_gpib_suspend(struct pcmcia_device *link)
static int ni_gpib_resume(struct pcmcia_device *link)
{
//struct local_info_t *info = link->priv;
- //struct gpib_board_t *dev = info->dev;
- DEBUG(0, "%s(0x%p)\n", __func__, link);
+ //struct struct gpib_board *dev = info->dev;
/*if (link->open) {
* ni_gpib_probe(dev); / really?
- * printk("Gpib resumed ???\n");
* //netif_device_attach(dev);
*}
*/
@@ -1854,32 +1690,28 @@ static struct pcmcia_driver ni_gpib_cs_driver = {
.resume = ni_gpib_resume,
};
-int __init init_ni_gpib_cs(void)
+static int __init init_ni_gpib_cs(void)
{
return pcmcia_register_driver(&ni_gpib_cs_driver);
}
-void __exit exit_ni_gpib_cs(void)
+static void __exit exit_ni_gpib_cs(void)
{
- DEBUG(0, "ni_gpib_cs: unloading\n");
pcmcia_unregister_driver(&ni_gpib_cs_driver);
}
static const int pcmcia_gpib_iosize = 32;
-int ni_pcmcia_attach(gpib_board_t *board, const gpib_board_config_t *config)
+static int ni_pcmcia_attach(struct gpib_board *board, const gpib_board_config_t *config)
{
struct local_info_t *info;
struct tnt4882_priv *tnt_priv;
struct nec7210_priv *nec_priv;
int isr_flags = IRQF_SHARED;
+ int retval;
- DEBUG(0, "%s(0x%p)\n", __func__, board);
-
- if (!curr_dev) {
- pr_err("gpib: no NI PCMCIA board found\n");
- return -1;
- }
+ if (!curr_dev)
+ return -ENODEV;
info = curr_dev->priv;
info->dev = board;
@@ -1888,6 +1720,7 @@ int ni_pcmcia_attach(gpib_board_t *board, const gpib_board_config_t *config)
if (tnt4882_allocate_private(board))
return -ENOMEM;
+
tnt_priv = board->private_data;
nec_priv = &tnt_priv->nec7210_priv;
nec_priv->type = TNT4882;
@@ -1895,23 +1728,20 @@ int ni_pcmcia_attach(gpib_board_t *board, const gpib_board_config_t *config)
nec_priv->write_byte = nec7210_locking_ioport_write_byte;
nec_priv->offset = atgpib_reg_offset;
- DEBUG(0, "ioport1 window attributes: 0x%lx\n", curr_dev->resource[0]->flags);
if (!request_region(curr_dev->resource[0]->start, resource_size(curr_dev->resource[0]),
- "tnt4882")) {
- pr_err("gpib: ioports starting at 0x%lx are already in use\n",
- (unsigned long)curr_dev->resource[0]->start);
- return -EIO;
- }
+ DRV_NAME))
+ return -ENOMEM;
nec_priv->mmiobase = ioport_map(curr_dev->resource[0]->start,
resource_size(curr_dev->resource[0]));
if (!nec_priv->mmiobase)
- return -1;
+ return -ENOMEM;
// get irq
- if (request_irq(curr_dev->irq, tnt4882_interrupt, isr_flags, "tnt4882", board)) {
- pr_err("gpib: can't request IRQ %d\n", curr_dev->irq);
- return -1;
+ retval = request_irq(curr_dev->irq, tnt4882_interrupt, isr_flags, DRV_NAME, board);
+ if (retval) {
+ dev_err(board->gpib_dev, "failed to obtain PCMCIA irq %d\n", curr_dev->irq);
+ return retval;
}
tnt_priv->irq = curr_dev->irq;
@@ -1920,13 +1750,11 @@ int ni_pcmcia_attach(gpib_board_t *board, const gpib_board_config_t *config)
return 0;
}
-void ni_pcmcia_detach(gpib_board_t *board)
+static void ni_pcmcia_detach(struct gpib_board *board)
{
struct tnt4882_priv *tnt_priv = board->private_data;
struct nec7210_priv *nec_priv;
- DEBUG(0, "%s(0x%p)\n", __func__, board);
-
if (tnt_priv) {
nec_priv = &tnt_priv->nec7210_priv;
if (tnt_priv->irq)
@@ -1941,7 +1769,63 @@ void ni_pcmcia_detach(gpib_board_t *board)
tnt4882_free_private(board);
}
-#endif // GPIB_PCMCIA
+static gpib_interface_t ni_pcmcia_interface = {
+ .name = "ni_pcmcia",
+ .attach = ni_pcmcia_attach,
+ .detach = ni_pcmcia_detach,
+ .read = tnt4882_accel_read,
+ .write = tnt4882_accel_write,
+ .command = tnt4882_command,
+ .take_control = tnt4882_take_control,
+ .go_to_standby = tnt4882_go_to_standby,
+ .request_system_control = tnt4882_request_system_control,
+ .interface_clear = tnt4882_interface_clear,
+ .remote_enable = tnt4882_remote_enable,
+ .enable_eos = tnt4882_enable_eos,
+ .disable_eos = tnt4882_disable_eos,
+ .parallel_poll = tnt4882_parallel_poll,
+ .parallel_poll_configure = tnt4882_parallel_poll_configure,
+ .parallel_poll_response = tnt4882_parallel_poll_response,
+ .local_parallel_poll_mode = NULL, // XXX
+ .line_status = tnt4882_line_status,
+ .update_status = tnt4882_update_status,
+ .primary_address = tnt4882_primary_address,
+ .secondary_address = tnt4882_secondary_address,
+ .serial_poll_response = tnt4882_serial_poll_response,
+ .serial_poll_status = tnt4882_serial_poll_status,
+ .t1_delay = tnt4882_t1_delay,
+ .return_to_local = tnt4882_return_to_local,
+};
+
+static gpib_interface_t ni_pcmcia_accel_interface = {
+ .name = "ni_pcmcia_accel",
+ .attach = ni_pcmcia_attach,
+ .detach = ni_pcmcia_detach,
+ .read = tnt4882_accel_read,
+ .write = tnt4882_accel_write,
+ .command = tnt4882_command,
+ .take_control = tnt4882_take_control,
+ .go_to_standby = tnt4882_go_to_standby,
+ .request_system_control = tnt4882_request_system_control,
+ .interface_clear = tnt4882_interface_clear,
+ .remote_enable = tnt4882_remote_enable,
+ .enable_eos = tnt4882_enable_eos,
+ .disable_eos = tnt4882_disable_eos,
+ .parallel_poll = tnt4882_parallel_poll,
+ .parallel_poll_configure = tnt4882_parallel_poll_configure,
+ .parallel_poll_response = tnt4882_parallel_poll_response,
+ .local_parallel_poll_mode = NULL, // XXX
+ .line_status = tnt4882_line_status,
+ .update_status = tnt4882_update_status,
+ .primary_address = tnt4882_primary_address,
+ .secondary_address = tnt4882_secondary_address,
+ .serial_poll_response = tnt4882_serial_poll_response,
+ .serial_poll_status = tnt4882_serial_poll_status,
+ .t1_delay = tnt4882_t1_delay,
+ .return_to_local = tnt4882_return_to_local,
+};
+
+#endif // CONFIG_GPIB_PCMCIA
module_init(tnt4882_init_module);
module_exit(tnt4882_exit_module);
diff --git a/drivers/staging/gpib/uapi/gpib_user.h b/drivers/staging/gpib/uapi/gpib_user.h
index 0896a55a758f..5ff4588686fd 100644
--- a/drivers/staging/gpib/uapi/gpib_user.h
+++ b/drivers/staging/gpib/uapi/gpib_user.h
@@ -106,26 +106,15 @@ enum eos_flags {
/* GPIB Bus Control Lines bit vector */
enum bus_control_line {
- ValidDAV = 0x01,
- ValidNDAC = 0x02,
- ValidNRFD = 0x04,
- ValidIFC = 0x08,
- ValidREN = 0x10,
- ValidSRQ = 0x20,
- ValidATN = 0x40,
- ValidEOI = 0x80,
- ValidALL = 0xff,
- BusDAV = 0x0100, /* DAV line status bit */
- BusNDAC = 0x0200, /* NDAC line status bit */
- BusNRFD = 0x0400, /* NRFD line status bit */
- BusIFC = 0x0800, /* IFC line status bit */
- BusREN = 0x1000, /* REN line status bit */
- BusSRQ = 0x2000, /* SRQ line status bit */
- BusATN = 0x4000, /* ATN line status bit */
- BusEOI = 0x8000 /* EOI line status bit */
-};
-
-enum old_bus_control_line {
+ VALID_DAV = 0x01,
+ VALID_NDAC = 0x02,
+ VALID_NRFD = 0x04,
+ VALID_IFC = 0x08,
+ VALID_REN = 0x10,
+ VALID_SRQ = 0x20,
+ VALID_ATN = 0x40,
+ VALID_EOI = 0x80,
+ VALID_ALL = 0xff,
BUS_DAV = 0x0100, /* DAV line status bit */
BUS_NDAC = 0x0200, /* NDAC line status bit */
BUS_NRFD = 0x0400, /* NRFD line status bit */
diff --git a/drivers/staging/greybus/uart.c b/drivers/staging/greybus/uart.c
index 8eab94cb06fa..308ed1ca9947 100644
--- a/drivers/staging/greybus/uart.c
+++ b/drivers/staging/greybus/uart.c
@@ -948,7 +948,8 @@ static int gb_tty_init(void)
{
int retval = 0;
- gb_tty_driver = tty_alloc_driver(GB_NUM_MINORS, 0);
+ gb_tty_driver = tty_alloc_driver(GB_NUM_MINORS, TTY_DRIVER_REAL_RAW |
+ TTY_DRIVER_DYNAMIC_DEV);
if (IS_ERR(gb_tty_driver)) {
pr_err("Can not allocate tty driver\n");
retval = -ENOMEM;
@@ -961,7 +962,6 @@ static int gb_tty_init(void)
gb_tty_driver->minor_start = 0;
gb_tty_driver->type = TTY_DRIVER_TYPE_SERIAL;
gb_tty_driver->subtype = SERIAL_TYPE_NORMAL;
- gb_tty_driver->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV;
gb_tty_driver->init_termios = tty_std_termios;
gb_tty_driver->init_termios.c_cflag = B9600 | CS8 |
CREAD | HUPCL | CLOCAL;
diff --git a/drivers/staging/iio/accel/Kconfig b/drivers/staging/iio/accel/Kconfig
index 3318997a7009..cee51f64bc4b 100644
--- a/drivers/staging/iio/accel/Kconfig
+++ b/drivers/staging/iio/accel/Kconfig
@@ -16,16 +16,4 @@ config ADIS16203
To compile this driver as a module, say M here: the module will be
called adis16203.
-config ADIS16240
- tristate "Analog Devices ADIS16240 Programmable Impact Sensor and Recorder"
- depends on SPI
- select IIO_ADIS_LIB
- select IIO_ADIS_LIB_BUFFER if IIO_BUFFER
- help
- Say Y here to build support for Analog Devices adis16240 programmable
- impact Sensor and recorder.
-
- To compile this driver as a module, say M here: the module will be
- called adis16240.
-
endmenu
diff --git a/drivers/staging/iio/accel/Makefile b/drivers/staging/iio/accel/Makefile
index 094cc9be35bd..acac7bc9b9c0 100644
--- a/drivers/staging/iio/accel/Makefile
+++ b/drivers/staging/iio/accel/Makefile
@@ -4,4 +4,3 @@
#
obj-$(CONFIG_ADIS16203) += adis16203.o
-obj-$(CONFIG_ADIS16240) += adis16240.o
diff --git a/drivers/staging/iio/accel/adis16240.c b/drivers/staging/iio/accel/adis16240.c
deleted file mode 100644
index 3be3eaf5d9d4..000000000000
--- a/drivers/staging/iio/accel/adis16240.c
+++ /dev/null
@@ -1,443 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/*
- * ADIS16240 Programmable Impact Sensor and Recorder driver
- *
- * Copyright 2010 Analog Devices Inc.
- */
-
-#include <linux/device.h>
-#include <linux/kernel.h>
-#include <linux/spi/spi.h>
-#include <linux/sysfs.h>
-#include <linux/module.h>
-
-#include <linux/iio/iio.h>
-#include <linux/iio/sysfs.h>
-#include <linux/iio/imu/adis.h>
-
-#define ADIS16240_STARTUP_DELAY 220 /* ms */
-
-/* Flash memory write count */
-#define ADIS16240_FLASH_CNT 0x00
-
-/* Output, power supply */
-#define ADIS16240_SUPPLY_OUT 0x02
-
-/* Output, x-axis accelerometer */
-#define ADIS16240_XACCL_OUT 0x04
-
-/* Output, y-axis accelerometer */
-#define ADIS16240_YACCL_OUT 0x06
-
-/* Output, z-axis accelerometer */
-#define ADIS16240_ZACCL_OUT 0x08
-
-/* Output, auxiliary ADC input */
-#define ADIS16240_AUX_ADC 0x0A
-
-/* Output, temperature */
-#define ADIS16240_TEMP_OUT 0x0C
-
-/* Output, x-axis acceleration peak */
-#define ADIS16240_XPEAK_OUT 0x0E
-
-/* Output, y-axis acceleration peak */
-#define ADIS16240_YPEAK_OUT 0x10
-
-/* Output, z-axis acceleration peak */
-#define ADIS16240_ZPEAK_OUT 0x12
-
-/* Output, sum-of-squares acceleration peak */
-#define ADIS16240_XYZPEAK_OUT 0x14
-
-/* Output, Capture Buffer 1, X and Y acceleration */
-#define ADIS16240_CAPT_BUF1 0x16
-
-/* Output, Capture Buffer 2, Z acceleration */
-#define ADIS16240_CAPT_BUF2 0x18
-
-/* Diagnostic, error flags */
-#define ADIS16240_DIAG_STAT 0x1A
-
-/* Diagnostic, event counter */
-#define ADIS16240_EVNT_CNTR 0x1C
-
-/* Diagnostic, check sum value from firmware test */
-#define ADIS16240_CHK_SUM 0x1E
-
-/* Calibration, x-axis acceleration offset adjustment */
-#define ADIS16240_XACCL_OFF 0x20
-
-/* Calibration, y-axis acceleration offset adjustment */
-#define ADIS16240_YACCL_OFF 0x22
-
-/* Calibration, z-axis acceleration offset adjustment */
-#define ADIS16240_ZACCL_OFF 0x24
-
-/* Clock, hour and minute */
-#define ADIS16240_CLK_TIME 0x2E
-
-/* Clock, month and day */
-#define ADIS16240_CLK_DATE 0x30
-
-/* Clock, year */
-#define ADIS16240_CLK_YEAR 0x32
-
-/* Wake-up setting, hour and minute */
-#define ADIS16240_WAKE_TIME 0x34
-
-/* Wake-up setting, month and day */
-#define ADIS16240_WAKE_DATE 0x36
-
-/* Alarm 1 amplitude threshold */
-#define ADIS16240_ALM_MAG1 0x38
-
-/* Alarm 2 amplitude threshold */
-#define ADIS16240_ALM_MAG2 0x3A
-
-/* Alarm control */
-#define ADIS16240_ALM_CTRL 0x3C
-
-/* Capture, external trigger control */
-#define ADIS16240_XTRIG_CTRL 0x3E
-
-/* Capture, address pointer */
-#define ADIS16240_CAPT_PNTR 0x40
-
-/* Capture, configuration and control */
-#define ADIS16240_CAPT_CTRL 0x42
-
-/* General-purpose digital input/output control */
-#define ADIS16240_GPIO_CTRL 0x44
-
-/* Miscellaneous control */
-#define ADIS16240_MSC_CTRL 0x46
-
-/* Internal sample period (rate) control */
-#define ADIS16240_SMPL_PRD 0x48
-
-/* System command */
-#define ADIS16240_GLOB_CMD 0x4A
-
-/* MSC_CTRL */
-
-/* Enables sum-of-squares output (XYZPEAK_OUT) */
-#define ADIS16240_MSC_CTRL_XYZPEAK_OUT_EN BIT(15)
-
-/* Enables peak tracking output (XPEAK_OUT, YPEAK_OUT, and ZPEAK_OUT) */
-#define ADIS16240_MSC_CTRL_X_Y_ZPEAK_OUT_EN BIT(14)
-
-/* Self-test enable: 1 = apply electrostatic force, 0 = disabled */
-#define ADIS16240_MSC_CTRL_SELF_TEST_EN BIT(8)
-
-/* Data-ready enable: 1 = enabled, 0 = disabled */
-#define ADIS16240_MSC_CTRL_DATA_RDY_EN BIT(2)
-
-/* Data-ready polarity: 1 = active high, 0 = active low */
-#define ADIS16240_MSC_CTRL_ACTIVE_HIGH BIT(1)
-
-/* Data-ready line selection: 1 = DIO2, 0 = DIO1 */
-#define ADIS16240_MSC_CTRL_DATA_RDY_DIO2 BIT(0)
-
-/* DIAG_STAT */
-
-/* Alarm 2 status: 1 = alarm active, 0 = alarm inactive */
-#define ADIS16240_DIAG_STAT_ALARM2 BIT(9)
-
-/* Alarm 1 status: 1 = alarm active, 0 = alarm inactive */
-#define ADIS16240_DIAG_STAT_ALARM1 BIT(8)
-
-/* Capture buffer full: 1 = capture buffer is full */
-#define ADIS16240_DIAG_STAT_CPT_BUF_FUL BIT(7)
-
-/* Flash test, checksum flag: 1 = mismatch, 0 = match */
-#define ADIS16240_DIAG_STAT_CHKSUM BIT(6)
-
-/* Power-on, self-test flag: 1 = failure, 0 = pass */
-#define ADIS16240_DIAG_STAT_PWRON_FAIL_BIT 5
-
-/* Power-on self-test: 1 = in-progress, 0 = complete */
-#define ADIS16240_DIAG_STAT_PWRON_BUSY BIT(4)
-
-/* SPI communications failure */
-#define ADIS16240_DIAG_STAT_SPI_FAIL_BIT 3
-
-/* Flash update failure */
-#define ADIS16240_DIAG_STAT_FLASH_UPT_BIT 2
-
-/* Power supply above 3.625 V */
-#define ADIS16240_DIAG_STAT_POWER_HIGH_BIT 1
-
- /* Power supply below 2.225 V */
-#define ADIS16240_DIAG_STAT_POWER_LOW_BIT 0
-
-/* GLOB_CMD */
-
-#define ADIS16240_GLOB_CMD_RESUME BIT(8)
-#define ADIS16240_GLOB_CMD_SW_RESET BIT(7)
-#define ADIS16240_GLOB_CMD_STANDBY BIT(2)
-
-#define ADIS16240_ERROR_ACTIVE BIT(14)
-
-/* At the moment triggers are only used for ring buffer
- * filling. This may change!
- */
-
-enum adis16240_scan {
- ADIS16240_SCAN_ACC_X,
- ADIS16240_SCAN_ACC_Y,
- ADIS16240_SCAN_ACC_Z,
- ADIS16240_SCAN_SUPPLY,
- ADIS16240_SCAN_AUX_ADC,
- ADIS16240_SCAN_TEMP,
-};
-
-static ssize_t adis16240_spi_read_signed(struct device *dev,
- struct device_attribute *attr,
- char *buf,
- unsigned int bits)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct adis *st = iio_priv(indio_dev);
- int ret;
- s16 val = 0;
- unsigned int shift = 16 - bits;
- struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
-
- ret = adis_read_reg_16(st,
- this_attr->address, (u16 *)&val);
- if (ret)
- return ret;
-
- if (val & ADIS16240_ERROR_ACTIVE)
- adis_check_status(st);
-
- val = (s16)(val << shift) >> shift;
- return sprintf(buf, "%d\n", val);
-}
-
-static ssize_t adis16240_read_12bit_signed(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- return adis16240_spi_read_signed(dev, attr, buf, 12);
-}
-
-static IIO_DEVICE_ATTR(in_accel_xyz_squared_peak_raw, 0444,
- adis16240_read_12bit_signed, NULL,
- ADIS16240_XYZPEAK_OUT);
-
-static IIO_CONST_ATTR_SAMP_FREQ_AVAIL("4096");
-
-static const u8 adis16240_addresses[][2] = {
- [ADIS16240_SCAN_ACC_X] = { ADIS16240_XACCL_OFF, ADIS16240_XPEAK_OUT },
- [ADIS16240_SCAN_ACC_Y] = { ADIS16240_YACCL_OFF, ADIS16240_YPEAK_OUT },
- [ADIS16240_SCAN_ACC_Z] = { ADIS16240_ZACCL_OFF, ADIS16240_ZPEAK_OUT },
-};
-
-static int adis16240_read_raw(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan,
- int *val, int *val2,
- long mask)
-{
- struct adis *st = iio_priv(indio_dev);
- int ret;
- u8 addr;
- s16 val16;
-
- switch (mask) {
- case IIO_CHAN_INFO_RAW:
- return adis_single_conversion(indio_dev, chan,
- ADIS16240_ERROR_ACTIVE, val);
- case IIO_CHAN_INFO_SCALE:
- switch (chan->type) {
- case IIO_VOLTAGE:
- if (chan->channel == 0) {
- *val = 4;
- *val2 = 880000; /* 4.88 mV */
- return IIO_VAL_INT_PLUS_MICRO;
- }
- return -EINVAL;
- case IIO_TEMP:
- *val = 244; /* 0.244 C */
- *val2 = 0;
- return IIO_VAL_INT_PLUS_MICRO;
- case IIO_ACCEL:
- *val = 0;
- *val2 = IIO_G_TO_M_S_2(51400); /* 51.4 mg */
- return IIO_VAL_INT_PLUS_MICRO;
- default:
- return -EINVAL;
- }
- break;
- case IIO_CHAN_INFO_PEAK_SCALE:
- *val = 0;
- *val2 = IIO_G_TO_M_S_2(51400); /* 51.4 mg */
- return IIO_VAL_INT_PLUS_MICRO;
- case IIO_CHAN_INFO_OFFSET:
- *val = 25000 / 244 - 0x133; /* 25 C = 0x133 */
- return IIO_VAL_INT;
- case IIO_CHAN_INFO_CALIBBIAS:
- addr = adis16240_addresses[chan->scan_index][0];
- ret = adis_read_reg_16(st, addr, &val16);
- if (ret)
- return ret;
- *val = sign_extend32(val16, 9);
- return IIO_VAL_INT;
- case IIO_CHAN_INFO_PEAK:
- addr = adis16240_addresses[chan->scan_index][1];
- ret = adis_read_reg_16(st, addr, &val16);
- if (ret)
- return ret;
- *val = sign_extend32(val16, 9);
- return IIO_VAL_INT;
- }
- return -EINVAL;
-}
-
-static int adis16240_write_raw(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan,
- int val,
- int val2,
- long mask)
-{
- struct adis *st = iio_priv(indio_dev);
- u8 addr;
-
- switch (mask) {
- case IIO_CHAN_INFO_CALIBBIAS:
- addr = adis16240_addresses[chan->scan_index][0];
- return adis_write_reg_16(st, addr, val & GENMASK(9, 0));
- }
- return -EINVAL;
-}
-
-static const struct iio_chan_spec adis16240_channels[] = {
- ADIS_SUPPLY_CHAN(ADIS16240_SUPPLY_OUT, ADIS16240_SCAN_SUPPLY, 0, 10),
- ADIS_AUX_ADC_CHAN(ADIS16240_AUX_ADC, ADIS16240_SCAN_AUX_ADC, 0, 10),
- ADIS_ACCEL_CHAN(X, ADIS16240_XACCL_OUT, ADIS16240_SCAN_ACC_X,
- BIT(IIO_CHAN_INFO_CALIBBIAS) | BIT(IIO_CHAN_INFO_PEAK),
- 0, 10),
- ADIS_ACCEL_CHAN(Y, ADIS16240_YACCL_OUT, ADIS16240_SCAN_ACC_Y,
- BIT(IIO_CHAN_INFO_CALIBBIAS) | BIT(IIO_CHAN_INFO_PEAK),
- 0, 10),
- ADIS_ACCEL_CHAN(Z, ADIS16240_ZACCL_OUT, ADIS16240_SCAN_ACC_Z,
- BIT(IIO_CHAN_INFO_CALIBBIAS) | BIT(IIO_CHAN_INFO_PEAK),
- 0, 10),
- ADIS_TEMP_CHAN(ADIS16240_TEMP_OUT, ADIS16240_SCAN_TEMP, 0, 10),
- IIO_CHAN_SOFT_TIMESTAMP(6)
-};
-
-static struct attribute *adis16240_attributes[] = {
- &iio_dev_attr_in_accel_xyz_squared_peak_raw.dev_attr.attr,
- &iio_const_attr_sampling_frequency_available.dev_attr.attr,
- NULL
-};
-
-static const struct attribute_group adis16240_attribute_group = {
- .attrs = adis16240_attributes,
-};
-
-static const struct iio_info adis16240_info = {
- .attrs = &adis16240_attribute_group,
- .read_raw = adis16240_read_raw,
- .write_raw = adis16240_write_raw,
- .update_scan_mode = adis_update_scan_mode,
-};
-
-static const char * const adis16240_status_error_msgs[] = {
- [ADIS16240_DIAG_STAT_PWRON_FAIL_BIT] = "Power on, self-test failed",
- [ADIS16240_DIAG_STAT_SPI_FAIL_BIT] = "SPI failure",
- [ADIS16240_DIAG_STAT_FLASH_UPT_BIT] = "Flash update failed",
- [ADIS16240_DIAG_STAT_POWER_HIGH_BIT] = "Power supply above 3.625V",
- [ADIS16240_DIAG_STAT_POWER_LOW_BIT] = "Power supply below 2.225V",
-};
-
-static const struct adis_timeout adis16240_timeouts = {
- .reset_ms = ADIS16240_STARTUP_DELAY,
- .sw_reset_ms = ADIS16240_STARTUP_DELAY,
- .self_test_ms = ADIS16240_STARTUP_DELAY,
-};
-
-static const struct adis_data adis16240_data = {
- .write_delay = 35,
- .read_delay = 35,
- .msc_ctrl_reg = ADIS16240_MSC_CTRL,
- .glob_cmd_reg = ADIS16240_GLOB_CMD,
- .diag_stat_reg = ADIS16240_DIAG_STAT,
-
- .self_test_mask = ADIS16240_MSC_CTRL_SELF_TEST_EN,
- .self_test_reg = ADIS16240_MSC_CTRL,
- .self_test_no_autoclear = true,
- .timeouts = &adis16240_timeouts,
-
- .status_error_msgs = adis16240_status_error_msgs,
- .status_error_mask = BIT(ADIS16240_DIAG_STAT_PWRON_FAIL_BIT) |
- BIT(ADIS16240_DIAG_STAT_SPI_FAIL_BIT) |
- BIT(ADIS16240_DIAG_STAT_FLASH_UPT_BIT) |
- BIT(ADIS16240_DIAG_STAT_POWER_HIGH_BIT) |
- BIT(ADIS16240_DIAG_STAT_POWER_LOW_BIT),
-};
-
-static int adis16240_probe(struct spi_device *spi)
-{
- int ret;
- struct adis *st;
- struct iio_dev *indio_dev;
-
- /* setup the industrialio driver allocated elements */
- indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
- if (!indio_dev)
- return -ENOMEM;
- st = iio_priv(indio_dev);
- /* this is only used for removal purposes */
- spi_set_drvdata(spi, indio_dev);
-
- indio_dev->name = spi->dev.driver->name;
- indio_dev->info = &adis16240_info;
- indio_dev->channels = adis16240_channels;
- indio_dev->num_channels = ARRAY_SIZE(adis16240_channels);
- indio_dev->modes = INDIO_DIRECT_MODE;
-
- spi->mode = SPI_MODE_3;
- ret = spi_setup(spi);
- if (ret) {
- dev_err(&spi->dev, "spi_setup failed!\n");
- return ret;
- }
-
- ret = adis_init(st, indio_dev, spi, &adis16240_data);
- if (ret)
- return ret;
- ret = devm_adis_setup_buffer_and_trigger(st, indio_dev, NULL);
- if (ret)
- return ret;
-
- /* Get the device into a sane initial state */
- ret = __adis_initial_startup(st);
- if (ret)
- return ret;
-
- return devm_iio_device_register(&spi->dev, indio_dev);
-}
-
-static const struct of_device_id adis16240_of_match[] = {
- { .compatible = "adi,adis16240" },
- { },
-};
-MODULE_DEVICE_TABLE(of, adis16240_of_match);
-
-static struct spi_driver adis16240_driver = {
- .driver = {
- .name = "adis16240",
- .of_match_table = adis16240_of_match,
- },
- .probe = adis16240_probe,
-};
-module_spi_driver(adis16240_driver);
-
-MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
-MODULE_DESCRIPTION("Analog Devices Programmable Impact Sensor and Recorder");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("spi:adis16240");
-MODULE_IMPORT_NS("IIO_ADISLIB");
diff --git a/drivers/staging/iio/frequency/ad9832.c b/drivers/staging/iio/frequency/ad9832.c
index 140ee4f9c137..db42810c7664 100644
--- a/drivers/staging/iio/frequency/ad9832.c
+++ b/drivers/staging/iio/frequency/ad9832.c
@@ -74,8 +74,6 @@
/**
* struct ad9832_state - driver instance specific data
* @spi: spi_device
- * @avdd: supply regulator for the analog section
- * @dvdd: supply regulator for the digital section
* @mclk: external master clock
* @ctrl_fp: cached frequency/phase control word
* @ctrl_ss: cached sync/selsrc control word
@@ -94,8 +92,6 @@
struct ad9832_state {
struct spi_device *spi;
- struct regulator *avdd;
- struct regulator *dvdd;
struct clk *mclk;
unsigned short ctrl_fp;
unsigned short ctrl_ss;
@@ -297,11 +293,6 @@ static const struct iio_info ad9832_info = {
.attrs = &ad9832_attribute_group,
};
-static void ad9832_reg_disable(void *reg)
-{
- regulator_disable(reg);
-}
-
static int ad9832_probe(struct spi_device *spi)
{
struct ad9832_platform_data *pdata = dev_get_platdata(&spi->dev);
@@ -320,33 +311,13 @@ static int ad9832_probe(struct spi_device *spi)
st = iio_priv(indio_dev);
- st->avdd = devm_regulator_get(&spi->dev, "avdd");
- if (IS_ERR(st->avdd))
- return PTR_ERR(st->avdd);
-
- ret = regulator_enable(st->avdd);
- if (ret) {
- dev_err(&spi->dev, "Failed to enable specified AVDD supply\n");
- return ret;
- }
-
- ret = devm_add_action_or_reset(&spi->dev, ad9832_reg_disable, st->avdd);
+ ret = devm_regulator_get_enable(&spi->dev, "avdd");
if (ret)
- return ret;
-
- st->dvdd = devm_regulator_get(&spi->dev, "dvdd");
- if (IS_ERR(st->dvdd))
- return PTR_ERR(st->dvdd);
+ return dev_err_probe(&spi->dev, ret, "failed to enable specified AVDD voltage\n");
- ret = regulator_enable(st->dvdd);
- if (ret) {
- dev_err(&spi->dev, "Failed to enable specified DVDD supply\n");
- return ret;
- }
-
- ret = devm_add_action_or_reset(&spi->dev, ad9832_reg_disable, st->dvdd);
+ ret = devm_regulator_get_enable(&spi->dev, "dvdd");
if (ret)
- return ret;
+ return dev_err_probe(&spi->dev, ret, "Failed to enable specified DVDD supply\n");
st->mclk = devm_clk_get_enabled(&spi->dev, "mclk");
if (IS_ERR(st->mclk))
diff --git a/drivers/staging/iio/frequency/ad9834.c b/drivers/staging/iio/frequency/ad9834.c
index 6e99e008c5f4..50413da2aa65 100644
--- a/drivers/staging/iio/frequency/ad9834.c
+++ b/drivers/staging/iio/frequency/ad9834.c
@@ -387,33 +387,15 @@ static const struct iio_info ad9833_info = {
.attrs = &ad9833_attribute_group,
};
-static void ad9834_disable_reg(void *data)
-{
- struct regulator *reg = data;
-
- regulator_disable(reg);
-}
-
static int ad9834_probe(struct spi_device *spi)
{
struct ad9834_state *st;
struct iio_dev *indio_dev;
- struct regulator *reg;
int ret;
- reg = devm_regulator_get(&spi->dev, "avdd");
- if (IS_ERR(reg))
- return PTR_ERR(reg);
-
- ret = regulator_enable(reg);
- if (ret) {
- dev_err(&spi->dev, "Failed to enable specified AVDD supply\n");
- return ret;
- }
-
- ret = devm_add_action_or_reset(&spi->dev, ad9834_disable_reg, reg);
+ ret = devm_regulator_get_enable(&spi->dev, "avdd");
if (ret)
- return ret;
+ return dev_err_probe(&spi->dev, ret, "Failed to enable specified AVDD supply\n");
indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
if (!indio_dev) {
diff --git a/drivers/staging/rtl8723bs/Kconfig b/drivers/staging/rtl8723bs/Kconfig
index 8d48c61961a6..353e6ee2c145 100644
--- a/drivers/staging/rtl8723bs/Kconfig
+++ b/drivers/staging/rtl8723bs/Kconfig
@@ -4,6 +4,7 @@ config RTL8723BS
depends on WLAN && MMC && CFG80211
depends on m
select CRYPTO
+ select CRYPTO_LIB_AES
select CRYPTO_LIB_ARC4
help
This option enables support for RTL8723BS SDIO drivers, such as
diff --git a/drivers/staging/rtl8723bs/core/rtw_ap.c b/drivers/staging/rtl8723bs/core/rtw_ap.c
index a6dc88dd4ba1..50022bb5911e 100644
--- a/drivers/staging/rtl8723bs/core/rtw_ap.c
+++ b/drivers/staging/rtl8723bs/core/rtw_ap.c
@@ -324,7 +324,7 @@ void add_RATid(struct adapter *padapter, struct sta_info *psta, u8 rssi_level)
{
unsigned char sta_band = 0, shortGIrate = false;
unsigned int tx_ra_bitmap = 0;
- struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct wlan_bssid_ex
*pcur_network = (struct wlan_bssid_ex *)&pmlmepriv->cur_network.network;
@@ -372,9 +372,9 @@ void update_bmc_sta(struct adapter *padapter)
unsigned char network_type;
int supportRateNum = 0;
unsigned int tx_ra_bitmap = 0;
- struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
- struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
- struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
struct wlan_bssid_ex
*pcur_network = (struct wlan_bssid_ex *)&pmlmepriv->cur_network.network;
struct sta_info *psta = rtw_get_bcmc_stainfo(padapter);
@@ -451,9 +451,9 @@ void update_bmc_sta(struct adapter *padapter)
void update_sta_info_apmode(struct adapter *padapter, struct sta_info *psta)
{
- struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct security_priv *psecuritypriv = &padapter->securitypriv;
- struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct ht_priv *phtpriv_ap = &pmlmepriv->htpriv;
struct ht_priv *phtpriv_sta = &psta->htpriv;
u8 cur_ldpc_cap = 0, cur_stbc_cap = 0, cur_beamform_cap = 0;
@@ -563,10 +563,10 @@ void update_sta_info_apmode(struct adapter *padapter, struct sta_info *psta)
static void update_ap_info(struct adapter *padapter, struct sta_info *psta)
{
- struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct wlan_bssid_ex
*pnetwork = (struct wlan_bssid_ex *)&pmlmepriv->cur_network.network;
- struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct ht_priv *phtpriv_ap = &pmlmepriv->htpriv;
psta->wireless_mode = pmlmeext->cur_wireless_mode;
@@ -609,7 +609,7 @@ static void update_hw_ht_param(struct adapter *padapter)
unsigned char max_AMPDU_len;
unsigned char min_MPDU_spacing;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
/* handle A-MPDU parameter field
*
@@ -645,13 +645,13 @@ void start_bss_network(struct adapter *padapter)
u32 acparm;
int ie_len;
struct registry_priv *pregpriv = &padapter->registrypriv;
- struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
- struct security_priv *psecuritypriv = &(padapter->securitypriv);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct security_priv *psecuritypriv = &padapter->securitypriv;
struct wlan_bssid_ex
*pnetwork = (struct wlan_bssid_ex *)&pmlmepriv->cur_network.network;
- struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
- struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
- struct wlan_bssid_ex *pnetwork_mlmeext = &(pmlmeinfo->network);
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
+ struct wlan_bssid_ex *pnetwork_mlmeext = &pmlmeinfo->network;
struct HT_info_element *pht_info = NULL;
u8 cbw40_enable = 0;
@@ -823,7 +823,7 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf, int len)
u8 WMM_PARA_IE[] = {0x00, 0x50, 0xf2, 0x02, 0x01, 0x01};
struct registry_priv *pregistrypriv = &padapter->registrypriv;
struct security_priv *psecuritypriv = &padapter->securitypriv;
- struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct wlan_bssid_ex
*pbss_network = (struct wlan_bssid_ex *)&pmlmepriv->cur_network.network;
u8 *ie = pbss_network->ies;
@@ -845,7 +845,7 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf, int len)
pbss_network->rssi = 0;
- memcpy(pbss_network->mac_address, myid(&(padapter->eeprompriv)), ETH_ALEN);
+ memcpy(pbss_network->mac_address, myid(&padapter->eeprompriv), ETH_ALEN);
/* beacon interval */
p = rtw_get_beacon_interval_from_ie(ie);/* ie + 8; 8: TimeStamp, 2: Beacon Interval 2:Capability */
@@ -1186,7 +1186,7 @@ int rtw_acl_add_sta(struct adapter *padapter, u8 *addr)
if ((NUM_ACL - 1) < pacl_list->num)
return (-1);
- spin_lock_bh(&(pacl_node_q->lock));
+ spin_lock_bh(&pacl_node_q->lock);
phead = get_list_head(pacl_node_q);
list_for_each(plist, phead) {
@@ -1200,12 +1200,12 @@ int rtw_acl_add_sta(struct adapter *padapter, u8 *addr)
}
}
- spin_unlock_bh(&(pacl_node_q->lock));
+ spin_unlock_bh(&pacl_node_q->lock);
if (added)
return ret;
- spin_lock_bh(&(pacl_node_q->lock));
+ spin_lock_bh(&pacl_node_q->lock);
for (i = 0; i < NUM_ACL; i++) {
paclnode = &pacl_list->aclnode[i];
@@ -1225,7 +1225,7 @@ int rtw_acl_add_sta(struct adapter *padapter, u8 *addr)
}
}
- spin_unlock_bh(&(pacl_node_q->lock));
+ spin_unlock_bh(&pacl_node_q->lock);
return ret;
}
@@ -1238,7 +1238,7 @@ void rtw_acl_remove_sta(struct adapter *padapter, u8 *addr)
struct wlan_acl_pool *pacl_list = &pstapriv->acl_list;
struct __queue *pacl_node_q = &pacl_list->acl_node_q;
- spin_lock_bh(&(pacl_node_q->lock));
+ spin_lock_bh(&pacl_node_q->lock);
phead = get_list_head(pacl_node_q);
list_for_each_safe(plist, tmp, phead) {
@@ -1258,7 +1258,7 @@ void rtw_acl_remove_sta(struct adapter *padapter, u8 *addr)
}
}
- spin_unlock_bh(&(pacl_node_q->lock));
+ spin_unlock_bh(&pacl_node_q->lock);
}
@@ -1308,7 +1308,7 @@ static int rtw_ap_set_key(
u8 keylen;
struct cmd_obj *pcmd;
struct setkey_parm *psetkeyparm;
- struct cmd_priv *pcmdpriv = &(padapter->cmdpriv);
+ struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
int res = _SUCCESS;
pcmd = rtw_zmalloc(sizeof(struct cmd_obj));
@@ -1345,7 +1345,7 @@ static int rtw_ap_set_key(
keylen = 16;
}
- memcpy(&(psetkeyparm->key[0]), key, keylen);
+ memcpy(&psetkeyparm->key[0], key, keylen);
pcmd->cmdcode = _SetKey_CMD_;
pcmd->parmbuf = (u8 *)psetkeyparm;
@@ -1397,10 +1397,10 @@ static void update_bcn_fixed_ie(struct adapter *padapter)
static void update_bcn_erpinfo_ie(struct adapter *padapter)
{
- struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
- struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
- struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
- struct wlan_bssid_ex *pnetwork = &(pmlmeinfo->network);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
+ struct wlan_bssid_ex *pnetwork = &pmlmeinfo->network;
unsigned char *p, *ie = pnetwork->ies;
u32 len = 0;
@@ -1461,10 +1461,10 @@ static void update_bcn_wps_ie(struct adapter *padapter)
u8 *pbackup_remainder_ie = NULL;
uint wps_ielen = 0, wps_offset, remainder_ielen;
- struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
- struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
- struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
- struct wlan_bssid_ex *pnetwork = &(pmlmeinfo->network);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
+ struct wlan_bssid_ex *pnetwork = &pmlmeinfo->network;
unsigned char *ie = pnetwork->ies;
u32 ielen = pnetwork->ie_length;
@@ -1537,8 +1537,8 @@ void update_beacon(struct adapter *padapter, u8 ie_id, u8 *oui, u8 tx)
if (!padapter)
return;
- pmlmepriv = &(padapter->mlmepriv);
- pmlmeext = &(padapter->mlmeextpriv);
+ pmlmepriv = &padapter->mlmepriv;
+ pmlmeext = &padapter->mlmeextpriv;
/* pmlmeinfo = &(pmlmeext->mlmext_info); */
if (!pmlmeext->bstart_bss)
@@ -1619,7 +1619,7 @@ static int rtw_ht_operation_update(struct adapter *padapter)
{
u16 cur_op_mode, new_op_mode;
int op_mode_changes = 0;
- struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct ht_priv *phtpriv_ap = &pmlmepriv->htpriv;
if (pmlmepriv->htpriv.ht_option)
@@ -1703,8 +1703,8 @@ void associated_clients_update(struct adapter *padapter, u8 updated)
void bss_cap_update_on_sta_join(struct adapter *padapter, struct sta_info *psta)
{
u8 beacon_updated = false;
- struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
- struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
if (!(psta->flags & WLAN_STA_SHORT_PREAMBLE)) {
if (!psta->no_short_preamble_set) {
@@ -1823,8 +1823,8 @@ void bss_cap_update_on_sta_join(struct adapter *padapter, struct sta_info *psta)
u8 bss_cap_update_on_sta_leave(struct adapter *padapter, struct sta_info *psta)
{
u8 beacon_updated = false;
- struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
- struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
if (!psta)
return beacon_updated;
@@ -1932,7 +1932,7 @@ void rtw_sta_flush(struct adapter *padapter)
struct sta_info *psta = NULL;
struct sta_priv *pstapriv = &padapter->stapriv;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
u8 bc_addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
if ((pmlmeinfo->state & 0x03) != WIFI_FW_AP_STATE)
@@ -1962,7 +1962,7 @@ void rtw_sta_flush(struct adapter *padapter)
void sta_info_update(struct adapter *padapter, struct sta_info *psta)
{
int flags = psta->flags;
- struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
/* update wmm cap. */
if (WLAN_STA_WME & flags)
@@ -1991,7 +1991,7 @@ void sta_info_update(struct adapter *padapter, struct sta_info *psta)
void ap_sta_info_defer_update(struct adapter *padapter, struct sta_info *psta)
{
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
if (psta->state & _FW_LINKED) {
pmlmeinfo->FW_sta_info[psta->mac_id].psta = psta;
@@ -2006,7 +2006,7 @@ void rtw_ap_restore_network(struct adapter *padapter)
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct sta_priv *pstapriv = &padapter->stapriv;
struct sta_info *psta;
- struct security_priv *psecuritypriv = &(padapter->securitypriv);
+ struct security_priv *psecuritypriv = &padapter->securitypriv;
struct list_head *phead, *plist;
u8 chk_alive_num = 0;
char chk_alive_list[NUM_STA];
@@ -2072,7 +2072,7 @@ void rtw_ap_restore_network(struct adapter *padapter)
void start_ap_mode(struct adapter *padapter)
{
int i;
- struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct sta_priv *pstapriv = &padapter->stapriv;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct wlan_acl_pool *pacl_list = &pstapriv->acl_list;
@@ -2109,7 +2109,7 @@ void start_ap_mode(struct adapter *padapter)
pmlmepriv->p2p_probe_resp_ie = NULL;
/* for ACL */
- INIT_LIST_HEAD(&(pacl_list->acl_node_q.queue));
+ INIT_LIST_HEAD(&pacl_list->acl_node_q.queue);
pacl_list->num = 0;
pacl_list->mode = 0;
for (i = 0; i < NUM_ACL; i++) {
@@ -2124,7 +2124,7 @@ void stop_ap_mode(struct adapter *padapter)
struct rtw_wlan_acl_node *paclnode;
struct sta_info *psta = NULL;
struct sta_priv *pstapriv = &padapter->stapriv;
- struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct wlan_acl_pool *pacl_list = &pstapriv->acl_list;
struct __queue *pacl_node_q = &pacl_list->acl_node_q;
@@ -2142,7 +2142,7 @@ void stop_ap_mode(struct adapter *padapter)
padapter->securitypriv.ndisencryptstatus = Ndis802_11WEPDisabled;
/* for ACL */
- spin_lock_bh(&(pacl_node_q->lock));
+ spin_lock_bh(&pacl_node_q->lock);
phead = get_list_head(pacl_node_q);
list_for_each_safe(plist, tmp, phead) {
paclnode = list_entry(plist, struct rtw_wlan_acl_node, list);
@@ -2155,7 +2155,7 @@ void stop_ap_mode(struct adapter *padapter)
pacl_list->num--;
}
}
- spin_unlock_bh(&(pacl_node_q->lock));
+ spin_unlock_bh(&pacl_node_q->lock);
rtw_sta_flush(padapter);
diff --git a/drivers/staging/rtl8723bs/hal/hal_com.c b/drivers/staging/rtl8723bs/hal/hal_com.c
index b41ec89932af..1213a91cffff 100644
--- a/drivers/staging/rtl8723bs/hal/hal_com.c
+++ b/drivers/staging/rtl8723bs/hal/hal_com.c
@@ -884,6 +884,9 @@ static u32 Array_kfreemap[] = {
0xfc, 0x0,
};
+#define REG_RF_BB_GAIN_OFFSET 0x7f
+//#define RF_GAIN_OFFSET_MASK 0xfffff
+
void rtw_bb_rf_gain_offset(struct adapter *padapter)
{
u8 value = padapter->eeprompriv.EEPROMRFGainOffset;
diff --git a/drivers/staging/rtl8723bs/include/osdep_intf.h b/drivers/staging/rtl8723bs/include/osdep_intf.h
index 73199be78139..83a25598e962 100644
--- a/drivers/staging/rtl8723bs/include/osdep_intf.h
+++ b/drivers/staging/rtl8723bs/include/osdep_intf.h
@@ -8,33 +8,6 @@
#ifndef __OSDEP_INTF_H_
#define __OSDEP_INTF_H_
-
-struct intf_priv {
-
- u8 *intf_dev;
- u32 max_iosz; /* USB2.0: 128, USB1.1: 64, SDIO:64 */
- u32 max_xmitsz; /* USB2.0: unlimited, SDIO:512 */
- u32 max_recvsz; /* USB2.0: unlimited, SDIO:512 */
-
- volatile u8 *io_rwmem;
- volatile u8 *allocated_io_rwmem;
- u32 io_wsz; /* unit: 4bytes */
- u32 io_rsz;/* unit: 4bytes */
- u8 intf_status;
-
- void (*_bus_io)(u8 *priv);
-
-/*
-Under Sync. IRP (SDIO/USB)
-A protection mechanism is necessary for the io_rwmem(read/write protocol)
-
-Under Async. IRP (SDIO/USB)
-The protection mechanism is through the pending queue.
-*/
-
- struct mutex ioctl_mutex;
-};
-
struct dvobj_priv *devobj_init(void);
void devobj_deinit(struct dvobj_priv *pdvobj);
diff --git a/drivers/staging/rtl8723bs/include/rtl8723b_hal.h b/drivers/staging/rtl8723bs/include/rtl8723b_hal.h
index e6d6e9de5474..a4a14474c35d 100644
--- a/drivers/staging/rtl8723bs/include/rtl8723b_hal.h
+++ b/drivers/staging/rtl8723bs/include/rtl8723b_hal.h
@@ -15,7 +15,6 @@
#include "rtl8723b_recv.h"
#include "rtl8723b_xmit.h"
#include "rtl8723b_cmd.h"
-#include "rtw_mp.h"
#include "hal_pwr_seq.h"
#include "Hal8192CPhyReg.h"
#include "hal_phy_cfg.h"
diff --git a/drivers/staging/rtl8723bs/include/rtw_io.h b/drivers/staging/rtl8723bs/include/rtw_io.h
index 0ee87be6dc4f..adf1de4d7924 100644
--- a/drivers/staging/rtl8723bs/include/rtw_io.h
+++ b/drivers/staging/rtl8723bs/include/rtw_io.h
@@ -8,16 +8,7 @@
#ifndef _RTW_IO_H_
#define _RTW_IO_H_
-/*
- For prompt mode accessing, caller shall free io_req
- Otherwise, io_handler will free io_req
-*/
-
-/* below is for the intf_option bit definition... */
-
-struct intf_priv;
struct intf_hdl;
-struct io_queue;
struct _io_ops {
u8 (*_read8)(struct intf_hdl *pintfhdl, u32 addr);
@@ -36,8 +27,6 @@ struct _io_ops {
void (*_read_mem)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, u8 *pmem);
void (*_write_mem)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, u8 *pmem);
- void (*_sync_irp_protocol_rw)(struct io_queue *pio_q);
-
u32 (*_read_interrupt)(struct intf_hdl *pintfhdl, u32 addr);
u32 (*_read_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, u8 *pmem);
@@ -49,18 +38,6 @@ struct _io_ops {
void (*_write_port_cancel)(struct intf_hdl *pintfhdl);
};
-struct io_req {
- struct list_head list;
- u32 addr;
- volatile u32 val;
- u32 command;
- u32 status;
- u8 *pbuf;
-
- void (*_async_io_callback)(struct adapter *padater, struct io_req *pio_req, u8 *cnxt);
- u8 *cnxt;
-};
-
struct intf_hdl {
struct adapter *padapter;
struct dvobj_priv *pintf_dev;/* pointer to &(padapter->dvobjpriv); */
@@ -74,21 +51,6 @@ struct intf_hdl {
int rtw_inc_and_chk_continual_io_error(struct dvobj_priv *dvobj);
void rtw_reset_continual_io_error(struct dvobj_priv *dvobj);
-/*
-Below is the data structure used by _io_handler
-
-*/
-
-struct io_queue {
- spinlock_t lock;
- struct list_head free_ioreqs;
- struct list_head pending; /* The io_req list that will be served in the single protocol read/write. */
- struct list_head processing;
- u8 *free_ioreqs_buf; /* 4-byte aligned */
- u8 *pallocated_free_ioreqs_buf;
- struct intf_hdl intf;
-};
-
struct io_priv {
struct adapter *padapter;
@@ -97,20 +59,6 @@ struct io_priv {
};
-extern uint ioreq_flush(struct adapter *adapter, struct io_queue *ioqueue);
-extern void sync_ioreq_enqueue(struct io_req *preq, struct io_queue *ioqueue);
-extern uint sync_ioreq_flush(struct adapter *adapter, struct io_queue *ioqueue);
-
-
-extern uint free_ioreq(struct io_req *preq, struct io_queue *pio_queue);
-extern struct io_req *alloc_ioreq(struct io_queue *pio_q);
-
-extern uint register_intf_hdl(u8 *dev, struct intf_hdl *pintfhdl);
-extern void unregister_intf_hdl(struct intf_hdl *pintfhdl);
-
-extern void _rtw_attrib_read(struct adapter *adapter, u32 addr, u32 cnt, u8 *pmem);
-extern void _rtw_attrib_write(struct adapter *adapter, u32 addr, u32 cnt, u8 *pmem);
-
extern u8 rtw_read8(struct adapter *adapter, u32 addr);
extern u16 rtw_read16(struct adapter *adapter, u32 addr);
extern u32 rtw_read32(struct adapter *adapter, u32 addr);
@@ -121,46 +69,6 @@ extern int rtw_write32(struct adapter *adapter, u32 addr, u32 val);
extern u32 rtw_write_port(struct adapter *adapter, u32 addr, u32 cnt, u8 *pmem);
-extern void rtw_write_scsi(struct adapter *adapter, u32 cnt, u8 *pmem);
-
-/* ioreq */
-extern void ioreq_read8(struct adapter *adapter, u32 addr, u8 *pval);
-extern void ioreq_read16(struct adapter *adapter, u32 addr, u16 *pval);
-extern void ioreq_read32(struct adapter *adapter, u32 addr, u32 *pval);
-extern void ioreq_write8(struct adapter *adapter, u32 addr, u8 val);
-extern void ioreq_write16(struct adapter *adapter, u32 addr, u16 val);
-extern void ioreq_write32(struct adapter *adapter, u32 addr, u32 val);
-
-
-extern uint async_read8(struct adapter *adapter, u32 addr, u8 *pbuff,
- void (*_async_io_callback)(struct adapter *padater, struct io_req *pio_req, u8 *cnxt), u8 *cnxt);
-extern uint async_read16(struct adapter *adapter, u32 addr, u8 *pbuff,
- void (*_async_io_callback)(struct adapter *padater, struct io_req *pio_req, u8 *cnxt), u8 *cnxt);
-extern uint async_read32(struct adapter *adapter, u32 addr, u8 *pbuff,
- void (*_async_io_callback)(struct adapter *padater, struct io_req *pio_req, u8 *cnxt), u8 *cnxt);
-
-extern void async_read_mem(struct adapter *adapter, u32 addr, u32 cnt, u8 *pmem);
-extern void async_read_port(struct adapter *adapter, u32 addr, u32 cnt, u8 *pmem);
-
-extern void async_write8(struct adapter *adapter, u32 addr, u8 val,
- void (*_async_io_callback)(struct adapter *padater, struct io_req *pio_req, u8 *cnxt), u8 *cnxt);
-extern void async_write16(struct adapter *adapter, u32 addr, u16 val,
- void (*_async_io_callback)(struct adapter *padater, struct io_req *pio_req, u8 *cnxt), u8 *cnxt);
-extern void async_write32(struct adapter *adapter, u32 addr, u32 val,
- void (*_async_io_callback)(struct adapter *padater, struct io_req *pio_req, u8 *cnxt), u8 *cnxt);
-
-extern void async_write_mem(struct adapter *adapter, u32 addr, u32 cnt, u8 *pmem);
-extern void async_write_port(struct adapter *adapter, u32 addr, u32 cnt, u8 *pmem);
-
-
int rtw_init_io_priv(struct adapter *padapter, void (*set_intf_ops)(struct adapter *padapter, struct _io_ops *pops));
-
-extern uint alloc_io_queue(struct adapter *adapter);
-extern void free_io_queue(struct adapter *adapter);
-extern void async_bus_io(struct io_queue *pio_q);
-extern void bus_sync_io(struct io_queue *pio_q);
-extern u32 _ioreq2rwmem(struct io_queue *pio_q);
-extern void dev_power_down(struct adapter *Adapter, u8 bpwrup);
-
#endif /* _RTL8711_IO_H_ */
diff --git a/drivers/staging/rtl8723bs/include/rtw_mp.h b/drivers/staging/rtl8723bs/include/rtw_mp.h
deleted file mode 100644
index 5a1cbd2ed851..000000000000
--- a/drivers/staging/rtl8723bs/include/rtw_mp.h
+++ /dev/null
@@ -1,341 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
- *
- ******************************************************************************/
-#ifndef _RTW_MP_H_
-#define _RTW_MP_H_
-
-#define MAX_MP_XMITBUF_SZ 2048
-
-struct mp_xmit_frame {
- struct list_head list;
-
- struct pkt_attrib attrib;
-
- struct sk_buff *pkt;
-
- int frame_tag;
-
- struct adapter *padapter;
-
- uint mem[(MAX_MP_XMITBUF_SZ >> 2)];
-};
-
-struct mp_wiparam {
- u32 bcompleted;
- u32 act_type;
- u32 io_offset;
- u32 io_value;
-};
-
-struct mp_tx {
- u8 stop;
- u32 count, sended;
- u8 payload;
- struct pkt_attrib attrib;
- /* struct tx_desc desc; */
- /* u8 resvdtx[7]; */
- u8 desc[TXDESC_SIZE];
- u8 *pallocated_buf;
- u8 *buf;
- u32 buf_size, write_size;
- void *PktTxThread;
-};
-
-#define MP_MAX_LINES 1000
-#define MP_MAX_LINES_BYTES 256
-
-typedef void (*MPT_WORK_ITEM_HANDLER)(void *Adapter);
-struct mpt_context {
- /* Indicate if we have started Mass Production Test. */
- bool bMassProdTest;
-
- /* Indicate if the driver is unloading or unloaded. */
- bool bMptDrvUnload;
-
- struct timer_list MPh2c_timeout_timer;
-/* Event used to sync H2c for BT control */
-
- bool MptH2cRspEvent;
- bool MptBtC2hEvent;
- bool bMPh2c_timeout;
-
- /* 8190 PCI does not support NDIS_WORK_ITEM. */
- /* Work Item for Mass Production Test. */
- /* NDIS_WORK_ITEM MptWorkItem; */
-/* RT_WORK_ITEM MptWorkItem; */
- /* Event used to sync the case unloading driver and MptWorkItem is still in progress. */
-/* NDIS_EVENT MptWorkItemEvent; */
- /* To protect the following variables. */
-/* NDIS_SPIN_LOCK MptWorkItemSpinLock; */
- /* Indicate a MptWorkItem is scheduled and not yet finished. */
- bool bMptWorkItemInProgress;
- /* An instance which implements function and context of MptWorkItem. */
- MPT_WORK_ITEM_HANDLER CurrMptAct;
-
- /* 1 =Start, 0 =Stop from UI. */
- u32 MptTestStart;
- /* _TEST_MODE, defined in MPT_Req2.h */
- u32 MptTestItem;
- /* Variable needed in each implementation of CurrMptAct. */
- u32 MptActType; /* Type of action performed in CurrMptAct. */
- /* The Offset of IO operation is depend of MptActType. */
- u32 MptIoOffset;
- /* The Value of IO operation is depend of MptActType. */
- u32 MptIoValue;
- /* The RfPath of IO operation is depend of MptActType. */
- u32 MptRfPath;
-
- enum wireless_mode MptWirelessModeToSw; /* Wireless mode to switch. */
- u8 MptChannelToSw; /* Channel to switch. */
- u8 MptInitGainToSet; /* Initial gain to set. */
- u32 MptBandWidth; /* bandwidth to switch. */
- u32 MptRateIndex; /* rate index. */
- /* Register value kept for Single Carrier Tx test. */
- u8 btMpCckTxPower;
- /* Register value kept for Single Carrier Tx test. */
- u8 btMpOfdmTxPower;
- /* For MP Tx Power index */
- u8 TxPwrLevel[2]; /* rf-A, rf-B */
- u32 RegTxPwrLimit;
- /* Content of RCR Register for Mass Production Test. */
- u32 MptRCR;
- /* true if we only receive packets with specific pattern. */
- bool bMptFilterPattern;
- /* Rx OK count, statistics used in Mass Production Test. */
- u32 MptRxOkCnt;
- /* Rx CRC32 error count, statistics used in Mass Production Test. */
- u32 MptRxCrcErrCnt;
-
- bool bCckContTx; /* true if we are in CCK Continuous Tx test. */
- bool bOfdmContTx; /* true if we are in OFDM Continuous Tx test. */
- bool bStartContTx; /* true if we have start Continuous Tx test. */
- /* true if we are in Single Carrier Tx test. */
- bool bSingleCarrier;
- /* true if we are in Carrier Suppression Tx Test. */
- bool bCarrierSuppression;
- /* true if we are in Single Tone Tx test. */
- bool bSingleTone;
-
- /* ACK counter asked by K.Y.. */
- bool bMptEnableAckCounter;
- u32 MptAckCounter;
-
- /* SD3 Willis For 8192S to save 1T/2T RF table for ACUT Only fro ACUT delete later ~~~! */
- /* s8 BufOfLines[2][MAX_LINES_HWCONFIG_TXT][MAX_BYTES_LINE_HWCONFIG_TXT]; */
- /* s8 BufOfLines[2][MP_MAX_LINES][MP_MAX_LINES_BYTES]; */
- /* s32 RfReadLine[2]; */
-
- u8 APK_bound[2]; /* for APK path A/path B */
- bool bMptIndexEven;
-
- u8 backup0xc50;
- u8 backup0xc58;
- u8 backup0xc30;
- u8 backup0x52_RF_A;
- u8 backup0x52_RF_B;
-
- u32 backup0x58_RF_A;
- u32 backup0x58_RF_B;
-
- u8 h2cReqNum;
- u8 c2hBuf[32];
-
- u8 btInBuf[100];
- u32 mptOutLen;
- u8 mptOutBuf[100];
-
-};
-/* endif */
-
-/* define RTPRIV_IOCTL_MP (SIOCIWFIRSTPRIV + 0x17) */
-enum {
- WRITE_REG = 1,
- READ_REG,
- WRITE_RF,
- READ_RF,
- MP_START,
- MP_STOP,
- MP_RATE,
- MP_CHANNEL,
- MP_BANDWIDTH,
- MP_TXPOWER,
- MP_ANT_TX,
- MP_ANT_RX,
- MP_CTX,
- MP_QUERY,
- MP_ARX,
- MP_PSD,
- MP_PWRTRK,
- MP_THER,
- MP_IOCTL,
- EFUSE_GET,
- EFUSE_SET,
- MP_RESET_STATS,
- MP_DUMP,
- MP_PHYPARA,
- MP_SetRFPathSwh,
- MP_QueryDrvStats,
- MP_SetBT,
- CTA_TEST,
- MP_DISABLE_BT_COEXIST,
- MP_PwrCtlDM,
- MP_NULL,
- MP_GET_TXPOWER_INX,
-};
-
-struct mp_priv {
- struct adapter *papdater;
-
- /* Testing Flag */
- u32 mode;/* 0 for normal type packet, 1 for loopback packet (16bytes TXCMD) */
-
- u32 prev_fw_state;
-
- /* OID cmd handler */
- struct mp_wiparam workparam;
-/* u8 act_in_progress; */
-
- /* Tx Section */
- u8 TID;
- u32 tx_pktcount;
- u32 pktInterval;
- struct mp_tx tx;
-
- /* Rx Section */
- u32 rx_bssidpktcount;
- u32 rx_pktcount;
- u32 rx_pktcount_filter_out;
- u32 rx_crcerrpktcount;
- u32 rx_pktloss;
- bool rx_bindicatePkt;
- struct recv_stat rxstat;
-
- /* RF/BB relative */
- u8 channel;
- u8 bandwidth;
- u8 prime_channel_offset;
- u8 txpoweridx;
- u8 txpoweridx_b;
- u8 rateidx;
- u32 preamble;
-/* u8 modem; */
- u32 CrystalCap;
-/* u32 curr_crystalcap; */
-
- u16 antenna_tx;
- u16 antenna_rx;
-/* u8 curr_rfpath; */
-
- u8 check_mp_pkt;
-
- u8 bSetTxPower;
-/* uint ForcedDataRate; */
- u8 mp_dm;
- u8 mac_filter[ETH_ALEN];
- u8 bmac_filter;
-
- struct wlan_network mp_network;
- NDIS_802_11_MAC_ADDRESS network_macaddr;
-
- u8 *pallocated_mp_xmitframe_buf;
- u8 *pmp_xmtframe_buf;
- struct __queue free_mp_xmitqueue;
- u32 free_mp_xmitframe_cnt;
- bool bSetRxBssid;
- bool bTxBufCkFail;
-
- struct mpt_context MptCtx;
-
- u8 *TXradomBuffer;
-};
-
-/* Hardware Registers */
-extern u8 mpdatarate[NumRates];
-
-#define MAX_TX_PWR_INDEX_N_MODE 64 /* 0x3F */
-
-#define REG_RF_BB_GAIN_OFFSET 0x7f
-#define RF_GAIN_OFFSET_MASK 0xfffff
-
-/* */
-/* struct mp_xmit_frame *alloc_mp_xmitframe(struct mp_priv *pmp_priv); */
-/* int free_mp_xmitframe(struct xmit_priv *pxmitpriv, struct mp_xmit_frame *pmp_xmitframe); */
-
-s32 init_mp_priv(struct adapter *padapter);
-void free_mp_priv(struct mp_priv *pmp_priv);
-s32 MPT_InitializeAdapter(struct adapter *padapter, u8 Channel);
-void MPT_DeInitAdapter(struct adapter *padapter);
-s32 mp_start_test(struct adapter *padapter);
-void mp_stop_test(struct adapter *padapter);
-
-u32 _read_rfreg(struct adapter *padapter, u8 rfpath, u32 addr, u32 bitmask);
-void _write_rfreg(struct adapter *padapter, u8 rfpath, u32 addr, u32 bitmask, u32 val);
-
-u32 read_macreg(struct adapter *padapter, u32 addr, u32 sz);
-void write_macreg(struct adapter *padapter, u32 addr, u32 val, u32 sz);
-
-void SetChannel(struct adapter *padapter);
-void SetBandwidth(struct adapter *padapter);
-int SetTxPower(struct adapter *padapter);
-void SetAntennaPathPower(struct adapter *padapter);
-void SetDataRate(struct adapter *padapter);
-
-void SetAntenna(struct adapter *padapter);
-
-s32 SetThermalMeter(struct adapter *padapter, u8 target_ther);
-void GetThermalMeter(struct adapter *padapter, u8 *value);
-
-void SetContinuousTx(struct adapter *padapter, u8 bStart);
-void SetSingleCarrierTx(struct adapter *padapter, u8 bStart);
-void SetSingleToneTx(struct adapter *padapter, u8 bStart);
-void SetCarrierSuppressionTx(struct adapter *padapter, u8 bStart);
-void PhySetTxPowerLevel(struct adapter *padapter);
-
-void fill_txdesc_for_mp(struct adapter *padapter, u8 *ptxdesc);
-void SetPacketTx(struct adapter *padapter);
-void SetPacketRx(struct adapter *padapter, u8 bStartRx);
-
-void ResetPhyRxPktCount(struct adapter *padapter);
-u32 GetPhyRxPktReceived(struct adapter *padapter);
-u32 GetPhyRxPktCRC32Error(struct adapter *padapter);
-
-s32 SetPowerTracking(struct adapter *padapter, u8 enable);
-void GetPowerTracking(struct adapter *padapter, u8 *enable);
-
-u32 mp_query_psd(struct adapter *padapter, u8 *data);
-
-void Hal_SetAntenna(struct adapter *padapter);
-void Hal_SetBandwidth(struct adapter *padapter);
-
-void Hal_SetTxPower(struct adapter *padapter);
-void Hal_SetCarrierSuppressionTx(struct adapter *padapter, u8 bStart);
-void Hal_SetSingleToneTx(struct adapter *padapter, u8 bStart);
-void Hal_SetSingleCarrierTx(struct adapter *padapter, u8 bStart);
-void Hal_SetContinuousTx(struct adapter *padapter, u8 bStart);
-
-void Hal_SetDataRate(struct adapter *padapter);
-void Hal_SetChannel(struct adapter *padapter);
-void Hal_SetAntennaPathPower(struct adapter *padapter);
-s32 Hal_SetThermalMeter(struct adapter *padapter, u8 target_ther);
-s32 Hal_SetPowerTracking(struct adapter *padapter, u8 enable);
-void Hal_GetPowerTracking(struct adapter *padapter, u8 *enable);
-void Hal_GetThermalMeter(struct adapter *padapter, u8 *value);
-void Hal_mpt_SwitchRfSetting(struct adapter *padapter);
-void Hal_MPT_CCKTxPowerAdjust(struct adapter *Adapter, bool bInCH14);
-void Hal_MPT_CCKTxPowerAdjustbyIndex(struct adapter *padapter, bool beven);
-void Hal_SetCCKTxPower(struct adapter *padapter, u8 *TxPower);
-void Hal_SetOFDMTxPower(struct adapter *padapter, u8 *TxPower);
-void Hal_TriggerRFThermalMeter(struct adapter *padapter);
-u8 Hal_ReadRFThermalMeter(struct adapter *padapter);
-void Hal_SetCCKContinuousTx(struct adapter *padapter, u8 bStart);
-void Hal_SetOFDMContinuousTx(struct adapter *padapter, u8 bStart);
-void Hal_ProSetCrystalCap(struct adapter *padapter, u32 CrystalCapVal);
-void MP_PHY_SetRFPathSwitch(struct adapter *padapter, bool bMain);
-u32 mpt_ProQueryCalTxPower(struct adapter *padapter, u8 RfPath);
-void MPT_PwrCtlDM(struct adapter *padapter, u32 bstart);
-u8 MptToMgntRate(u32 MptRateIdx);
-
-#endif /* _RTW_MP_H_ */
diff --git a/drivers/staging/rtl8723bs/os_dep/os_intfs.c b/drivers/staging/rtl8723bs/os_dep/os_intfs.c
index 738a601c55bb..de48c3454ab3 100644
--- a/drivers/staging/rtl8723bs/os_dep/os_intfs.c
+++ b/drivers/staging/rtl8723bs/os_dep/os_intfs.c
@@ -724,8 +724,6 @@ u8 rtw_free_drv_sw(struct adapter *padapter)
rtw_free_mlme_priv(&padapter->mlmepriv);
- /* free_io_queue(padapter); */
-
_rtw_free_xmit_priv(&padapter->xmitpriv);
_rtw_free_sta_priv(&padapter->stapriv); /* will free bcmc_stainfo here */
diff --git a/drivers/staging/sm750fb/ddk750_chip.c b/drivers/staging/sm750fb/ddk750_chip.c
index 02860d3ec365..025dae3756aa 100644
--- a/drivers/staging/sm750fb/ddk750_chip.c
+++ b/drivers/staging/sm750fb/ddk750_chip.c
@@ -228,8 +228,8 @@ int ddk750_init_hw(struct initchip_param *p_init_param)
reg = peek32(VGA_CONFIGURATION);
reg |= (VGA_CONFIGURATION_PLL | VGA_CONFIGURATION_MODE);
poke32(VGA_CONFIGURATION, reg);
+#ifdef CONFIG_X86
} else {
-#if defined(__i386__) || defined(__x86_64__)
/* set graphic mode via IO method */
outb_p(0x88, 0x3d4);
outb_p(0x06, 0x3d5);
diff --git a/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c b/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c
index deec33f63bcf..b839b50ac26a 100644
--- a/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c
+++ b/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c
@@ -658,8 +658,6 @@ static const struct vb2_ops bcm2835_mmal_video_qops = {
.buf_queue = buffer_queue,
.start_streaming = start_streaming,
.stop_streaming = stop_streaming,
- .wait_prepare = vb2_ops_wait_prepare,
- .wait_finish = vb2_ops_wait_finish,
};
/* ------------------------------------------------------------------
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
index a4e83e5d619b..5dbf8d53db09 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
@@ -97,13 +97,6 @@ struct vchiq_arm_state {
* tracked separately with the state.
*/
int peer_use_count;
-
- /*
- * Flag to indicate that the first vchiq connect has made it through.
- * This means that both sides should be fully ready, and we should
- * be able to suspend after this point.
- */
- int first_connect;
};
static int
@@ -271,7 +264,7 @@ static int vchiq_platform_init(struct platform_device *pdev, struct vchiq_state
return -ENXIO;
}
- dev_dbg(&pdev->dev, "arm: vchiq_init - done (slots %pK, phys %pad)\n",
+ dev_dbg(&pdev->dev, "arm: vchiq_init - done (slots %p, phys %pad)\n",
vchiq_slot_zero, &slot_phys);
mutex_init(&drv_mgmt->connected_mutex);
@@ -280,32 +273,23 @@ static int vchiq_platform_init(struct platform_device *pdev, struct vchiq_state
return 0;
}
-int
-vchiq_platform_init_state(struct vchiq_state *state)
+static struct vchiq_arm_state *vchiq_platform_get_arm_state(struct vchiq_state *state)
{
- struct vchiq_arm_state *platform_state;
-
- platform_state = devm_kzalloc(state->dev, sizeof(*platform_state), GFP_KERNEL);
- if (!platform_state)
- return -ENOMEM;
-
- rwlock_init(&platform_state->susp_res_lock);
-
- init_completion(&platform_state->ka_evt);
- atomic_set(&platform_state->ka_use_count, 0);
- atomic_set(&platform_state->ka_use_ack_count, 0);
- atomic_set(&platform_state->ka_release_count, 0);
-
- platform_state->state = state;
-
- state->platform_state = (struct opaque_platform_state *)platform_state;
-
- return 0;
+ return (struct vchiq_arm_state *)state->platform_state;
}
-static struct vchiq_arm_state *vchiq_platform_get_arm_state(struct vchiq_state *state)
+static void
+vchiq_platform_uninit(struct vchiq_drv_mgmt *mgmt)
{
- return (struct vchiq_arm_state *)state->platform_state;
+ struct vchiq_arm_state *arm_state;
+
+ kthread_stop(mgmt->state.sync_thread);
+ kthread_stop(mgmt->state.recycle_thread);
+ kthread_stop(mgmt->state.slot_handler_thread);
+
+ arm_state = vchiq_platform_get_arm_state(&mgmt->state);
+ if (!IS_ERR_OR_NULL(arm_state->ka_thread))
+ kthread_stop(arm_state->ka_thread);
}
void vchiq_dump_platform_state(struct seq_file *f)
@@ -368,7 +352,7 @@ void free_bulk_waiter(struct vchiq_instance *instance)
&instance->bulk_waiter_list, list) {
list_del(&waiter->list);
dev_dbg(instance->state->dev,
- "arm: bulk_waiter - cleaned up %pK for pid %d\n",
+ "arm: bulk_waiter - cleaned up %p for pid %d\n",
waiter, waiter->pid);
kfree(waiter);
}
@@ -622,7 +606,7 @@ vchiq_blocking_bulk_transfer(struct vchiq_instance *instance, unsigned int handl
mutex_lock(&instance->bulk_waiter_list_mutex);
list_add(&waiter->list, &instance->bulk_waiter_list);
mutex_unlock(&instance->bulk_waiter_list_mutex);
- dev_dbg(instance->state->dev, "arm: saved bulk_waiter %pK for pid %d\n",
+ dev_dbg(instance->state->dev, "arm: saved bulk_waiter %p for pid %d\n",
waiter, current->pid);
}
@@ -998,6 +982,39 @@ exit:
}
int
+vchiq_platform_init_state(struct vchiq_state *state)
+{
+ struct vchiq_arm_state *platform_state;
+ char threadname[16];
+
+ platform_state = devm_kzalloc(state->dev, sizeof(*platform_state), GFP_KERNEL);
+ if (!platform_state)
+ return -ENOMEM;
+
+ snprintf(threadname, sizeof(threadname), "vchiq-keep/%d",
+ state->id);
+ platform_state->ka_thread = kthread_create(&vchiq_keepalive_thread_func,
+ (void *)state, threadname);
+ if (IS_ERR(platform_state->ka_thread)) {
+ dev_err(state->dev, "couldn't create thread %s\n", threadname);
+ return PTR_ERR(platform_state->ka_thread);
+ }
+
+ rwlock_init(&platform_state->susp_res_lock);
+
+ init_completion(&platform_state->ka_evt);
+ atomic_set(&platform_state->ka_use_count, 0);
+ atomic_set(&platform_state->ka_use_ack_count, 0);
+ atomic_set(&platform_state->ka_release_count, 0);
+
+ platform_state->state = state;
+
+ state->platform_state = (struct opaque_platform_state *)platform_state;
+
+ return 0;
+}
+
+int
vchiq_use_internal(struct vchiq_state *state, struct vchiq_service *service,
enum USE_TYPE_E use_type)
{
@@ -1312,37 +1329,19 @@ out:
return ret;
}
+void vchiq_platform_connected(struct vchiq_state *state)
+{
+ struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
+
+ wake_up_process(arm_state->ka_thread);
+}
+
void vchiq_platform_conn_state_changed(struct vchiq_state *state,
enum vchiq_connstate oldstate,
enum vchiq_connstate newstate)
{
- struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
- char threadname[16];
-
dev_dbg(state->dev, "suspend: %d: %s->%s\n",
state->id, get_conn_state_name(oldstate), get_conn_state_name(newstate));
- if (state->conn_state != VCHIQ_CONNSTATE_CONNECTED)
- return;
-
- write_lock_bh(&arm_state->susp_res_lock);
- if (arm_state->first_connect) {
- write_unlock_bh(&arm_state->susp_res_lock);
- return;
- }
-
- arm_state->first_connect = 1;
- write_unlock_bh(&arm_state->susp_res_lock);
- snprintf(threadname, sizeof(threadname), "vchiq-keep/%d",
- state->id);
- arm_state->ka_thread = kthread_create(&vchiq_keepalive_thread_func,
- (void *)state,
- threadname);
- if (IS_ERR(arm_state->ka_thread)) {
- dev_err(state->dev, "suspend: Couldn't create thread %s\n",
- threadname);
- } else {
- wake_up_process(arm_state->ka_thread);
- }
}
static const struct of_device_id vchiq_of_match[] = {
@@ -1386,8 +1385,6 @@ static int vchiq_probe(struct platform_device *pdev)
return ret;
}
- vchiq_debugfs_init(&mgmt->state);
-
dev_dbg(&pdev->dev, "arm: platform initialised - version %d (min %d)\n",
VCHIQ_VERSION, VCHIQ_VERSION_MIN);
@@ -1398,9 +1395,12 @@ static int vchiq_probe(struct platform_device *pdev)
ret = vchiq_register_chrdev(&pdev->dev);
if (ret) {
dev_err(&pdev->dev, "arm: Failed to initialize vchiq cdev\n");
+ vchiq_platform_uninit(mgmt);
return ret;
}
+ vchiq_debugfs_init(&mgmt->state);
+
bcm2835_audio = vchiq_device_register(&pdev->dev, "bcm2835-audio");
bcm2835_camera = vchiq_device_register(&pdev->dev, "bcm2835-camera");
@@ -1410,19 +1410,12 @@ static int vchiq_probe(struct platform_device *pdev)
static void vchiq_remove(struct platform_device *pdev)
{
struct vchiq_drv_mgmt *mgmt = dev_get_drvdata(&pdev->dev);
- struct vchiq_arm_state *arm_state;
vchiq_device_unregister(bcm2835_audio);
vchiq_device_unregister(bcm2835_camera);
vchiq_debugfs_deinit();
vchiq_deregister_chrdev();
-
- kthread_stop(mgmt->state.sync_thread);
- kthread_stop(mgmt->state.recycle_thread);
- kthread_stop(mgmt->state.slot_handler_thread);
-
- arm_state = vchiq_platform_get_arm_state(&mgmt->state);
- kthread_stop(arm_state->ka_thread);
+ vchiq_platform_uninit(mgmt);
}
static struct platform_driver vchiq_driver = {
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
index 8d5795db4f39..e7b0c800a205 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
@@ -470,7 +470,7 @@ make_service_callback(struct vchiq_service *service, enum vchiq_reason reason,
cb_userdata = bulk->cb_userdata;
}
- dev_dbg(service->state->dev, "core: %d: callback:%d (%s, %pK, %pK %pK)\n",
+ dev_dbg(service->state->dev, "core: %d: callback:%d (%s, %p, %p %p)\n",
service->state->id, service->localport, reason_names[reason],
header, cb_data, cb_userdata);
status = service->base.callback(service->instance, reason, header, service->handle,
@@ -778,7 +778,7 @@ process_free_data_message(struct vchiq_state *state, u32 *service_found,
complete(&quota->quota_event);
} else if (count == 0) {
dev_err(state->dev,
- "core: service %d message_use_count=%d (header %pK, msgid %x, header->msgid %x, header->size %x)\n",
+ "core: service %d message_use_count=%d (header %p, msgid %x, header->msgid %x, header->size %x)\n",
port, quota->message_use_count, header, msgid,
header->msgid, header->size);
WARN(1, "invalid message use count\n");
@@ -799,11 +799,11 @@ process_free_data_message(struct vchiq_state *state, u32 *service_found,
* it has dropped below its quota
*/
complete(&quota->quota_event);
- dev_dbg(state->dev, "core: %d: pfq:%d %x@%pK - slot_use->%d\n",
+ dev_dbg(state->dev, "core: %d: pfq:%d %x@%p - slot_use->%d\n",
state->id, port, header->size, header, count - 1);
} else {
dev_err(state->dev,
- "core: service %d slot_use_count=%d (header %pK, msgid %x, header->msgid %x, header->size %x)\n",
+ "core: service %d slot_use_count=%d (header %p, msgid %x, header->msgid %x, header->size %x)\n",
port, count, header, msgid, header->msgid, header->size);
WARN(1, "bad slot use count\n");
}
@@ -845,7 +845,7 @@ process_free_queue(struct vchiq_state *state, u32 *service_found,
*/
rmb();
- dev_dbg(state->dev, "core: %d: pfq %d=%pK %x %x\n",
+ dev_dbg(state->dev, "core: %d: pfq %d=%p %x %x\n",
state->id, slot_index, data, local->slot_queue_recycle,
slot_queue_available);
@@ -868,7 +868,7 @@ process_free_queue(struct vchiq_state *state, u32 *service_found,
pos += calc_stride(header->size);
if (pos > VCHIQ_SLOT_SIZE) {
dev_err(state->dev,
- "core: pfq - pos %x: header %pK, msgid %x, header->msgid %x, header->size %x\n",
+ "core: pfq - pos %x: header %p, msgid %x, header->msgid %x, header->size %x\n",
pos, header, msgid, header->msgid, header->size);
WARN(1, "invalid slot position\n");
}
@@ -1060,7 +1060,7 @@ queue_message(struct vchiq_state *state, struct vchiq_service *service,
int tx_end_index;
int slot_use_count;
- dev_dbg(state->dev, "core: %d: qm %s@%pK,%zx (%d->%d)\n",
+ dev_dbg(state->dev, "core: %d: qm %s@%p,%zx (%d->%d)\n",
state->id, msg_type_str(VCHIQ_MSG_TYPE(msgid)), header, size,
VCHIQ_MSG_SRCPORT(msgid), VCHIQ_MSG_DSTPORT(msgid));
@@ -1117,7 +1117,7 @@ queue_message(struct vchiq_state *state, struct vchiq_service *service,
VCHIQ_SERVICE_STATS_INC(service, ctrl_tx_count);
VCHIQ_SERVICE_STATS_ADD(service, ctrl_tx_bytes, size);
} else {
- dev_dbg(state->dev, "core: %d: qm %s@%pK,%zx (%d->%d)\n",
+ dev_dbg(state->dev, "core: %d: qm %s@%p,%zx (%d->%d)\n",
state->id, msg_type_str(VCHIQ_MSG_TYPE(msgid)), header, size,
VCHIQ_MSG_SRCPORT(msgid), VCHIQ_MSG_DSTPORT(msgid));
if (size != 0) {
@@ -1204,7 +1204,7 @@ queue_message_sync(struct vchiq_state *state, struct vchiq_service *service,
state->id, oldmsgid);
}
- dev_dbg(state->dev, "sync: %d: qms %s@%pK,%x (%d->%d)\n",
+ dev_dbg(state->dev, "sync: %d: qms %s@%p,%x (%d->%d)\n",
state->id, msg_type_str(VCHIQ_MSG_TYPE(msgid)), header, size,
VCHIQ_MSG_SRCPORT(msgid), VCHIQ_MSG_DSTPORT(msgid));
@@ -1539,7 +1539,7 @@ create_pagelist(struct vchiq_instance *instance, struct vchiq_bulk *bulk)
pagelist = dma_alloc_coherent(instance->state->dev, pagelist_size, &dma_addr,
GFP_KERNEL);
- dev_dbg(instance->state->dev, "arm: %pK\n", pagelist);
+ dev_dbg(instance->state->dev, "arm: %p\n", pagelist);
if (!pagelist)
return NULL;
@@ -1692,7 +1692,7 @@ free_pagelist(struct vchiq_instance *instance, struct vchiq_pagelist_info *pagel
unsigned int num_pages = pagelistinfo->num_pages;
unsigned int cache_line_size;
- dev_dbg(instance->state->dev, "arm: %pK, %d\n", pagelistinfo->pagelist, actual);
+ dev_dbg(instance->state->dev, "arm: %p, %d\n", pagelistinfo->pagelist, actual);
drv_mgmt = dev_get_drvdata(instance->state->dev);
@@ -1849,7 +1849,7 @@ parse_open(struct vchiq_state *state, struct vchiq_header *header)
payload = (struct vchiq_open_payload *)header->data;
fourcc = payload->fourcc;
- dev_dbg(state->dev, "core: %d: prs OPEN@%pK (%d->'%p4cc')\n",
+ dev_dbg(state->dev, "core: %d: prs OPEN@%p (%d->'%p4cc')\n",
state->id, header, localport, &fourcc);
service = get_listening_service(state, fourcc);
@@ -1976,14 +1976,14 @@ parse_message(struct vchiq_state *state, struct vchiq_header *header)
service = get_connected_service(state, remoteport);
if (service)
dev_warn(state->dev,
- "core: %d: prs %s@%pK (%d->%d) - found connected service %d\n",
+ "core: %d: prs %s@%p (%d->%d) - found connected service %d\n",
state->id, msg_type_str(type), header,
remoteport, localport, service->localport);
}
if (!service) {
dev_err(state->dev,
- "core: %d: prs %s@%pK (%d->%d) - invalid/closed service %d\n",
+ "core: %d: prs %s@%p (%d->%d) - invalid/closed service %d\n",
state->id, msg_type_str(type), header, remoteport,
localport, localport);
goto skip_message;
@@ -2003,7 +2003,7 @@ parse_message(struct vchiq_state *state, struct vchiq_header *header)
if (((unsigned long)header & VCHIQ_SLOT_MASK) +
calc_stride(size) > VCHIQ_SLOT_SIZE) {
- dev_err(state->dev, "core: header %pK (msgid %x) - size %x too big for slot\n",
+ dev_err(state->dev, "core: header %p (msgid %x) - size %x too big for slot\n",
header, (unsigned int)msgid, (unsigned int)size);
WARN(1, "oversized for slot\n");
}
@@ -2022,7 +2022,7 @@ parse_message(struct vchiq_state *state, struct vchiq_header *header)
service->peer_version = payload->version;
}
dev_dbg(state->dev,
- "core: %d: prs OPENACK@%pK,%x (%d->%d) v:%d\n",
+ "core: %d: prs OPENACK@%p,%x (%d->%d) v:%d\n",
state->id, header, size, remoteport, localport,
service->peer_version);
if (service->srvstate == VCHIQ_SRVSTATE_OPENING) {
@@ -2037,7 +2037,7 @@ parse_message(struct vchiq_state *state, struct vchiq_header *header)
case VCHIQ_MSG_CLOSE:
WARN_ON(size); /* There should be no data */
- dev_dbg(state->dev, "core: %d: prs CLOSE@%pK (%d->%d)\n",
+ dev_dbg(state->dev, "core: %d: prs CLOSE@%p (%d->%d)\n",
state->id, header, remoteport, localport);
mark_service_closing_internal(service, 1);
@@ -2049,7 +2049,7 @@ parse_message(struct vchiq_state *state, struct vchiq_header *header)
&service->base.fourcc, service->localport, service->remoteport);
break;
case VCHIQ_MSG_DATA:
- dev_dbg(state->dev, "core: %d: prs DATA@%pK,%x (%d->%d)\n",
+ dev_dbg(state->dev, "core: %d: prs DATA@%p,%x (%d->%d)\n",
state->id, header, size, remoteport, localport);
if ((service->remoteport == remoteport) &&
@@ -2069,7 +2069,7 @@ parse_message(struct vchiq_state *state, struct vchiq_header *header)
}
break;
case VCHIQ_MSG_CONNECT:
- dev_dbg(state->dev, "core: %d: prs CONNECT@%pK\n",
+ dev_dbg(state->dev, "core: %d: prs CONNECT@%p\n",
state->id, header);
state->version_common = ((struct vchiq_slot_zero *)
state->slot_data)->version;
@@ -2102,7 +2102,7 @@ parse_message(struct vchiq_state *state, struct vchiq_header *header)
if ((int)(queue->remote_insert -
queue->local_insert) >= 0) {
dev_err(state->dev,
- "core: %d: prs %s@%pK (%d->%d) unexpected (ri=%d,li=%d)\n",
+ "core: %d: prs %s@%p (%d->%d) unexpected (ri=%d,li=%d)\n",
state->id, msg_type_str(type), header, remoteport,
localport, queue->remote_insert, queue->local_insert);
mutex_unlock(&service->bulk_mutex);
@@ -2120,7 +2120,7 @@ parse_message(struct vchiq_state *state, struct vchiq_header *header)
bulk->actual = *(int *)header->data;
queue->remote_insert++;
- dev_dbg(state->dev, "core: %d: prs %s@%pK (%d->%d) %x@%pad\n",
+ dev_dbg(state->dev, "core: %d: prs %s@%p (%d->%d) %x@%pad\n",
state->id, msg_type_str(type), header, remoteport,
localport, bulk->actual, &bulk->dma_addr);
@@ -2140,12 +2140,12 @@ parse_message(struct vchiq_state *state, struct vchiq_header *header)
}
break;
case VCHIQ_MSG_PADDING:
- dev_dbg(state->dev, "core: %d: prs PADDING@%pK,%x\n",
+ dev_dbg(state->dev, "core: %d: prs PADDING@%p,%x\n",
state->id, header, size);
break;
case VCHIQ_MSG_PAUSE:
/* If initiated, signal the application thread */
- dev_dbg(state->dev, "core: %d: prs PAUSE@%pK,%x\n",
+ dev_dbg(state->dev, "core: %d: prs PAUSE@%p,%x\n",
state->id, header, size);
if (state->conn_state == VCHIQ_CONNSTATE_PAUSED) {
dev_err(state->dev, "core: %d: PAUSE received in state PAUSED\n",
@@ -2162,7 +2162,7 @@ parse_message(struct vchiq_state *state, struct vchiq_header *header)
vchiq_set_conn_state(state, VCHIQ_CONNSTATE_PAUSED);
break;
case VCHIQ_MSG_RESUME:
- dev_dbg(state->dev, "core: %d: prs RESUME@%pK,%x\n",
+ dev_dbg(state->dev, "core: %d: prs RESUME@%p,%x\n",
state->id, header, size);
/* Release the slot mutex */
mutex_unlock(&state->slot_mutex);
@@ -2179,7 +2179,7 @@ parse_message(struct vchiq_state *state, struct vchiq_header *header)
break;
default:
- dev_err(state->dev, "core: %d: prs invalid msgid %x@%pK,%x\n",
+ dev_err(state->dev, "core: %d: prs invalid msgid %x@%p,%x\n",
state->id, msgid, header, size);
WARN(1, "invalid message\n");
break;
@@ -2400,7 +2400,7 @@ sync_func(void *v)
if (!service) {
dev_err(state->dev,
- "sync: %d: sf %s@%pK (%d->%d) - invalid/closed service %d\n",
+ "sync: %d: sf %s@%p (%d->%d) - invalid/closed service %d\n",
state->id, msg_type_str(type), header, remoteport,
localport, localport);
release_message_sync(state, header);
@@ -2422,7 +2422,7 @@ sync_func(void *v)
header->data;
service->peer_version = payload->version;
}
- dev_err(state->dev, "sync: %d: sf OPENACK@%pK,%x (%d->%d) v:%d\n",
+ dev_err(state->dev, "sync: %d: sf OPENACK@%p,%x (%d->%d) v:%d\n",
state->id, header, size, remoteport, localport,
service->peer_version);
if (service->srvstate == VCHIQ_SRVSTATE_OPENING) {
@@ -2435,7 +2435,7 @@ sync_func(void *v)
break;
case VCHIQ_MSG_DATA:
- dev_dbg(state->dev, "sync: %d: sf DATA@%pK,%x (%d->%d)\n",
+ dev_dbg(state->dev, "sync: %d: sf DATA@%p,%x (%d->%d)\n",
state->id, header, size, remoteport, localport);
if ((service->remoteport == remoteport) &&
@@ -2449,7 +2449,7 @@ sync_func(void *v)
break;
default:
- dev_err(state->dev, "sync: error: %d: sf unexpected msgid %x@%pK,%x\n",
+ dev_err(state->dev, "sync: error: %d: sf unexpected msgid %x@%p,%x\n",
state->id, msgid, header, size);
release_message_sync(state, header);
break;
@@ -2926,13 +2926,13 @@ release_service_messages(struct vchiq_service *service)
int port = VCHIQ_MSG_DSTPORT(msgid);
if ((port == service->localport) && (msgid & VCHIQ_MSGID_CLAIMED)) {
- dev_dbg(state->dev, "core: fsi - hdr %pK\n", header);
+ dev_dbg(state->dev, "core: fsi - hdr %p\n", header);
release_slot(state, slot_info, header, NULL);
}
pos += calc_stride(header->size);
if (pos > VCHIQ_SLOT_SIZE) {
dev_err(state->dev,
- "core: fsi - pos %x: header %pK, msgid %x, header->msgid %x, header->size %x\n",
+ "core: fsi - pos %x: header %p, msgid %x, header->msgid %x, header->size %x\n",
pos, header, msgid, header->msgid, header->size);
WARN(1, "invalid slot position\n");
}
@@ -3091,7 +3091,7 @@ vchiq_bulk_xfer_queue_msg_killable(struct vchiq_service *service,
*/
wmb();
- dev_dbg(state->dev, "core: %d: bt (%d->%d) %cx %x@%pad %pK\n",
+ dev_dbg(state->dev, "core: %d: bt (%d->%d) %cx %x@%pad %p\n",
state->id, service->localport, service->remoteport,
dir_char, bulk->size, &bulk->dma_addr, bulk->cb_data);
@@ -3343,6 +3343,7 @@ vchiq_connect_internal(struct vchiq_state *state, struct vchiq_instance *instanc
return -EAGAIN;
vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTED);
+ vchiq_platform_connected(state);
complete(&state->connect);
}
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h
index 9b4e766990a4..3b5c0618e567 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h
@@ -575,6 +575,8 @@ int vchiq_send_remote_use(struct vchiq_state *state);
int vchiq_send_remote_use_active(struct vchiq_state *state);
+void vchiq_platform_connected(struct vchiq_state *state);
+
void vchiq_platform_conn_state_changed(struct vchiq_state *state,
enum vchiq_connstate oldstate,
enum vchiq_connstate newstate);
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_dev.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_dev.c
index 454f43416503..3b20ba5c7362 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_dev.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_dev.c
@@ -270,7 +270,7 @@ static int vchiq_ioc_dequeue_message(struct vchiq_instance *instance,
}
} else {
dev_err(service->state->dev,
- "arm: header %pK: bufsize %x < size %x\n",
+ "arm: header %p: bufsize %x < size %x\n",
header, args->bufsize, header->size);
WARN(1, "invalid size\n");
ret = -EMSGSIZE;
@@ -328,7 +328,7 @@ static int vchiq_irq_queue_bulk_tx_rx(struct vchiq_instance *instance,
ret = -ESRCH;
goto out;
}
- dev_dbg(service->state->dev, "arm: found bulk_waiter %pK for pid %d\n",
+ dev_dbg(service->state->dev, "arm: found bulk_waiter %p for pid %d\n",
waiter, current->pid);
status = vchiq_bulk_xfer_waiting(instance, args->handle,
@@ -366,7 +366,7 @@ static int vchiq_irq_queue_bulk_tx_rx(struct vchiq_instance *instance,
mutex_lock(&instance->bulk_waiter_list_mutex);
list_add(&waiter->list, &instance->bulk_waiter_list);
mutex_unlock(&instance->bulk_waiter_list_mutex);
- dev_dbg(service->state->dev, "arm: saved bulk_waiter %pK for pid %d\n",
+ dev_dbg(service->state->dev, "arm: saved bulk_waiter %p for pid %d\n",
waiter, current->pid);
ret = put_user(mode_waiting, mode);
@@ -512,7 +512,7 @@ static int vchiq_ioc_await_completion(struct vchiq_instance *instance,
/* This must be a VCHIQ-style service */
if (args->msgbufsize < msglen) {
dev_err(service->state->dev,
- "arm: header %pK: msgbufsize %x < msglen %x\n",
+ "arm: header %p: msgbufsize %x < msglen %x\n",
header, args->msgbufsize, msglen);
WARN(1, "invalid message size\n");
if (ret == 0)
@@ -588,7 +588,7 @@ vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
long ret = 0;
int i, rc;
- dev_dbg(instance->state->dev, "arm: instance %pK, cmd %s, arg %lx\n", instance,
+ dev_dbg(instance->state->dev, "arm: instance %p, cmd %s, arg %lx\n", instance,
((_IOC_TYPE(cmd) == VCHIQ_IOC_MAGIC) && (_IOC_NR(cmd) <= VCHIQ_IOC_MAX)) ?
ioctl_names[_IOC_NR(cmd)] : "<invalid>", arg);
@@ -874,12 +874,12 @@ vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
if (!status && (ret < 0) && (ret != -EINTR) && (ret != -EWOULDBLOCK)) {
dev_dbg(instance->state->dev,
- "arm: ioctl instance %pK, cmd %s -> status %d, %ld\n",
+ "arm: ioctl instance %p, cmd %s -> status %d, %ld\n",
instance, (_IOC_NR(cmd) <= VCHIQ_IOC_MAX) ?
ioctl_names[_IOC_NR(cmd)] : "<invalid>", status, ret);
} else {
dev_dbg(instance->state->dev,
- "arm: ioctl instance %pK, cmd %s -> status %d\n, %ld\n",
+ "arm: ioctl instance %p, cmd %s -> status %d\n, %ld\n",
instance, (_IOC_NR(cmd) <= VCHIQ_IOC_MAX) ?
ioctl_names[_IOC_NR(cmd)] : "<invalid>", status, ret);
}
diff --git a/drivers/thermal/broadcom/brcmstb_thermal.c b/drivers/thermal/broadcom/brcmstb_thermal.c
index 270982740fde..f46f2ddc174e 100644
--- a/drivers/thermal/broadcom/brcmstb_thermal.c
+++ b/drivers/thermal/broadcom/brcmstb_thermal.c
@@ -286,14 +286,20 @@ static int brcmstb_set_trips(struct thermal_zone_device *tz, int low, int high)
return 0;
}
-static const struct thermal_zone_device_ops brcmstb_16nm_of_ops = {
+static const struct thermal_zone_device_ops brcmstb_of_ops = {
.get_temp = brcmstb_get_temp,
};
+static const struct brcmstb_thermal_params brcmstb_8nm_params = {
+ .offset = 418670,
+ .mult = 509,
+ .of_ops = &brcmstb_of_ops,
+};
+
static const struct brcmstb_thermal_params brcmstb_16nm_params = {
.offset = 457829,
.mult = 557,
- .of_ops = &brcmstb_16nm_of_ops,
+ .of_ops = &brcmstb_of_ops,
};
static const struct thermal_zone_device_ops brcmstb_28nm_of_ops = {
@@ -308,6 +314,7 @@ static const struct brcmstb_thermal_params brcmstb_28nm_params = {
};
static const struct of_device_id brcmstb_thermal_id_table[] = {
+ { .compatible = "brcm,avs-tmon-bcm74110", .data = &brcmstb_8nm_params },
{ .compatible = "brcm,avs-tmon-bcm7216", .data = &brcmstb_16nm_params },
{ .compatible = "brcm,avs-tmon", .data = &brcmstb_28nm_params },
{},
diff --git a/drivers/thermal/mediatek/lvts_thermal.c b/drivers/thermal/mediatek/lvts_thermal.c
index 07f7f3b7a2fb..088481d91e6e 100644
--- a/drivers/thermal/mediatek/lvts_thermal.c
+++ b/drivers/thermal/mediatek/lvts_thermal.c
@@ -65,12 +65,15 @@
#define LVTS_HW_FILTER 0x0
#define LVTS_TSSEL_CONF 0x13121110
#define LVTS_CALSCALE_CONF 0x300
-#define LVTS_MONINT_CONF 0x8300318C
-#define LVTS_MONINT_OFFSET_SENSOR0 0xC
-#define LVTS_MONINT_OFFSET_SENSOR1 0x180
-#define LVTS_MONINT_OFFSET_SENSOR2 0x3000
-#define LVTS_MONINT_OFFSET_SENSOR3 0x3000000
+#define LVTS_MONINT_OFFSET_HIGH_INTEN_SENSOR0 BIT(3)
+#define LVTS_MONINT_OFFSET_HIGH_INTEN_SENSOR1 BIT(8)
+#define LVTS_MONINT_OFFSET_HIGH_INTEN_SENSOR2 BIT(13)
+#define LVTS_MONINT_OFFSET_HIGH_INTEN_SENSOR3 BIT(25)
+#define LVTS_MONINT_OFFSET_LOW_INTEN_SENSOR0 BIT(2)
+#define LVTS_MONINT_OFFSET_LOW_INTEN_SENSOR1 BIT(7)
+#define LVTS_MONINT_OFFSET_LOW_INTEN_SENSOR2 BIT(12)
+#define LVTS_MONINT_OFFSET_LOW_INTEN_SENSOR3 BIT(24)
#define LVTS_INT_SENSOR0 0x0009001F
#define LVTS_INT_SENSOR1 0x001203E0
@@ -91,8 +94,6 @@
#define LVTS_MSR_READ_TIMEOUT_US 400
#define LVTS_MSR_READ_WAIT_US (LVTS_MSR_READ_TIMEOUT_US / 2)
-#define LVTS_HW_TSHUT_TEMP 105000
-
#define LVTS_MINIMUM_THRESHOLD 20000
static int golden_temp = LVTS_GOLDEN_TEMP_DEFAULT;
@@ -145,7 +146,6 @@ struct lvts_ctrl {
struct lvts_sensor sensors[LVTS_SENSOR_MAX];
const struct lvts_data *lvts_data;
u32 calibration[LVTS_SENSOR_MAX];
- u32 hw_tshut_raw_temp;
u8 valid_sensor_mask;
int mode;
void __iomem *base;
@@ -329,23 +329,41 @@ static int lvts_get_temp(struct thermal_zone_device *tz, int *temp)
static void lvts_update_irq_mask(struct lvts_ctrl *lvts_ctrl)
{
- static const u32 masks[] = {
- LVTS_MONINT_OFFSET_SENSOR0,
- LVTS_MONINT_OFFSET_SENSOR1,
- LVTS_MONINT_OFFSET_SENSOR2,
- LVTS_MONINT_OFFSET_SENSOR3,
+ static const u32 high_offset_inten_masks[] = {
+ LVTS_MONINT_OFFSET_HIGH_INTEN_SENSOR0,
+ LVTS_MONINT_OFFSET_HIGH_INTEN_SENSOR1,
+ LVTS_MONINT_OFFSET_HIGH_INTEN_SENSOR2,
+ LVTS_MONINT_OFFSET_HIGH_INTEN_SENSOR3,
+ };
+ static const u32 low_offset_inten_masks[] = {
+ LVTS_MONINT_OFFSET_LOW_INTEN_SENSOR0,
+ LVTS_MONINT_OFFSET_LOW_INTEN_SENSOR1,
+ LVTS_MONINT_OFFSET_LOW_INTEN_SENSOR2,
+ LVTS_MONINT_OFFSET_LOW_INTEN_SENSOR3,
};
u32 value = 0;
int i;
value = readl(LVTS_MONINT(lvts_ctrl->base));
- for (i = 0; i < ARRAY_SIZE(masks); i++) {
+ lvts_for_each_valid_sensor(i, lvts_ctrl) {
if (lvts_ctrl->sensors[i].high_thresh == lvts_ctrl->high_thresh
- && lvts_ctrl->sensors[i].low_thresh == lvts_ctrl->low_thresh)
- value |= masks[i];
- else
- value &= ~masks[i];
+ && lvts_ctrl->sensors[i].low_thresh == lvts_ctrl->low_thresh) {
+ /*
+ * The minimum threshold needs to be configured in the
+ * OFFSETL register to get working interrupts, but we
+ * don't actually want to generate interrupts when
+ * crossing it.
+ */
+ if (lvts_ctrl->low_thresh == -INT_MAX) {
+ value &= ~low_offset_inten_masks[i];
+ value |= high_offset_inten_masks[i];
+ } else {
+ value |= low_offset_inten_masks[i] | high_offset_inten_masks[i];
+ }
+ } else {
+ value &= ~(low_offset_inten_masks[i] | high_offset_inten_masks[i]);
+ }
}
writel(value, LVTS_MONINT(lvts_ctrl->base));
@@ -837,14 +855,6 @@ static int lvts_ctrl_init(struct device *dev, struct lvts_domain *lvts_td,
*/
lvts_ctrl[i].mode = lvts_data->lvts_ctrl[i].mode;
- /*
- * The temperature to raw temperature must be done
- * after initializing the calibration.
- */
- lvts_ctrl[i].hw_tshut_raw_temp =
- lvts_temp_to_raw(LVTS_HW_TSHUT_TEMP,
- lvts_data->temp_factor);
-
lvts_ctrl[i].low_thresh = INT_MIN;
lvts_ctrl[i].high_thresh = INT_MIN;
}
@@ -860,6 +870,32 @@ static int lvts_ctrl_init(struct device *dev, struct lvts_domain *lvts_td,
return 0;
}
+static void lvts_ctrl_monitor_enable(struct device *dev, struct lvts_ctrl *lvts_ctrl, bool enable)
+{
+ /*
+ * Bitmaps to enable each sensor on filtered mode in the MONCTL0
+ * register.
+ */
+ static const u8 sensor_filt_bitmap[] = { BIT(0), BIT(1), BIT(2), BIT(3) };
+ u32 sensor_map = 0;
+ int i;
+
+ if (lvts_ctrl->mode != LVTS_MSR_FILTERED_MODE)
+ return;
+
+ if (enable) {
+ lvts_for_each_valid_sensor(i, lvts_ctrl)
+ sensor_map |= sensor_filt_bitmap[i];
+ }
+
+ /*
+ * Bits:
+ * 9: Single point access flow
+ * 0-3: Enable sensing point 0-3
+ */
+ writel(sensor_map | BIT(9), LVTS_MONCTL0(lvts_ctrl->base));
+}
+
/*
* At this point the configuration register is the only place in the
* driver where we write multiple values. Per hardware constraint,
@@ -893,7 +929,6 @@ static int lvts_irq_init(struct lvts_ctrl *lvts_ctrl)
* 10 : Selected sensor with bits 19-18
* 11 : Reserved
*/
- writel(BIT(16), LVTS_PROTCTL(lvts_ctrl->base));
/*
* LVTS_PROTTA : Stage 1 temperature threshold
@@ -906,8 +941,8 @@ static int lvts_irq_init(struct lvts_ctrl *lvts_ctrl)
*
* writel(0x0, LVTS_PROTTA(lvts_ctrl->base));
* writel(0x0, LVTS_PROTTB(lvts_ctrl->base));
+ * writel(0x0, LVTS_PROTTC(lvts_ctrl->base));
*/
- writel(lvts_ctrl->hw_tshut_raw_temp, LVTS_PROTTC(lvts_ctrl->base));
/*
* LVTS_MONINT : Interrupt configuration register
@@ -915,7 +950,7 @@ static int lvts_irq_init(struct lvts_ctrl *lvts_ctrl)
* The LVTS_MONINT register layout is the same as the LVTS_MONINTSTS
* register, except we set the bits to enable the interrupt.
*/
- writel(LVTS_MONINT_CONF, LVTS_MONINT(lvts_ctrl->base));
+ writel(0, LVTS_MONINT(lvts_ctrl->base));
return 0;
}
@@ -1381,8 +1416,11 @@ static int lvts_suspend(struct device *dev)
lvts_td = dev_get_drvdata(dev);
- for (i = 0; i < lvts_td->num_lvts_ctrl; i++)
+ for (i = 0; i < lvts_td->num_lvts_ctrl; i++) {
+ lvts_ctrl_monitor_enable(dev, &lvts_td->lvts_ctrl[i], false);
+ usleep_range(100, 200);
lvts_ctrl_set_enable(&lvts_td->lvts_ctrl[i], false);
+ }
clk_disable_unprepare(lvts_td->clk);
@@ -1400,8 +1438,11 @@ static int lvts_resume(struct device *dev)
if (ret)
return ret;
- for (i = 0; i < lvts_td->num_lvts_ctrl; i++)
+ for (i = 0; i < lvts_td->num_lvts_ctrl; i++) {
lvts_ctrl_set_enable(&lvts_td->lvts_ctrl[i], true);
+ usleep_range(100, 200);
+ lvts_ctrl_monitor_enable(dev, &lvts_td->lvts_ctrl[i], true);
+ }
return 0;
}
diff --git a/drivers/thermal/qcom/qcom-spmi-temp-alarm.c b/drivers/thermal/qcom/qcom-spmi-temp-alarm.c
index c2d59cbfaea9..a81e7d6e865f 100644
--- a/drivers/thermal/qcom/qcom-spmi-temp-alarm.c
+++ b/drivers/thermal/qcom/qcom-spmi-temp-alarm.c
@@ -360,7 +360,6 @@ static int qpnp_tm_probe(struct platform_device *pdev)
if (!chip)
return -ENOMEM;
- dev_set_drvdata(&pdev->dev, chip);
chip->dev = &pdev->dev;
mutex_init(&chip->lock);
diff --git a/drivers/thermal/qcom/tsens-v2.c b/drivers/thermal/qcom/tsens-v2.c
index 0cb7301eca6e..8d9698ea3ec4 100644
--- a/drivers/thermal/qcom/tsens-v2.c
+++ b/drivers/thermal/qcom/tsens-v2.c
@@ -4,13 +4,32 @@
* Copyright (c) 2018, Linaro Limited
*/
+#include <linux/bitfield.h>
#include <linux/bitops.h>
+#include <linux/nvmem-consumer.h>
#include <linux/regmap.h>
#include "tsens.h"
/* ----- SROT ------ */
#define SROT_HW_VER_OFF 0x0000
#define SROT_CTRL_OFF 0x0004
+#define SROT_MEASURE_PERIOD 0x0008
+#define SROT_Sn_CONVERSION 0x0060
+#define V2_SHIFT_DEFAULT 0x0003
+#define V2_SLOPE_DEFAULT 0x0cd0
+#define V2_CZERO_DEFAULT 0x016a
+#define ONE_PT_SLOPE 0x0cd0
+#define TWO_PT_SHIFTED_GAIN 921600
+#define ONE_PT_CZERO_CONST 94
+#define SW_RST_DEASSERT 0x0
+#define SW_RST_ASSERT 0x1
+#define MEASURE_PERIOD_2mSEC 0x1
+#define RESULT_FORMAT_TEMP 0x1
+#define TSENS_ENABLE 0x1
+#define SENSOR_CONVERSION(n) (((n) * 4) + SROT_Sn_CONVERSION)
+#define CONVERSION_SHIFT_MASK GENMASK(24, 23)
+#define CONVERSION_SLOPE_MASK GENMASK(22, 10)
+#define CONVERSION_CZERO_MASK GENMASK(9, 0)
/* ----- TM ------ */
#define TM_INT_EN_OFF 0x0004
@@ -50,6 +69,17 @@ static struct tsens_features ipq8074_feat = {
.trip_max_temp = 204000,
};
+static struct tsens_features ipq5332_feat = {
+ .ver_major = VER_2_X_NO_RPM,
+ .crit_int = 1,
+ .combo_int = 1,
+ .adc = 0,
+ .srot_split = 1,
+ .max_sensors = 16,
+ .trip_min_temp = 0,
+ .trip_max_temp = 204000,
+};
+
static const struct reg_field tsens_v2_regfields[MAX_REGFIELDS] = {
/* ----- SROT ------ */
/* VERSION */
@@ -59,6 +89,10 @@ static const struct reg_field tsens_v2_regfields[MAX_REGFIELDS] = {
/* CTRL_OFF */
[TSENS_EN] = REG_FIELD(SROT_CTRL_OFF, 0, 0),
[TSENS_SW_RST] = REG_FIELD(SROT_CTRL_OFF, 1, 1),
+ [SENSOR_EN] = REG_FIELD(SROT_CTRL_OFF, 3, 18),
+ [CODE_OR_TEMP] = REG_FIELD(SROT_CTRL_OFF, 21, 21),
+
+ [MAIN_MEASURE_PERIOD] = REG_FIELD(SROT_MEASURE_PERIOD, 0, 7),
/* ----- TM ------ */
/* INTERRUPT ENABLE */
@@ -104,6 +138,128 @@ static const struct reg_field tsens_v2_regfields[MAX_REGFIELDS] = {
[TRDY] = REG_FIELD(TM_TRDY_OFF, 0, 0),
};
+static int tsens_v2_calibrate_sensor(struct device *dev, struct tsens_sensor *sensor,
+ struct regmap *map, u32 mode, u32 base0, u32 base1)
+{
+ u32 shift = V2_SHIFT_DEFAULT;
+ u32 slope = V2_SLOPE_DEFAULT;
+ u32 czero = V2_CZERO_DEFAULT;
+ char name[20];
+ u32 val;
+ int ret;
+
+ /* Read offset value */
+ ret = snprintf(name, sizeof(name), "tsens_sens%d_off", sensor->hw_id);
+ if (ret < 0)
+ return ret;
+
+ ret = nvmem_cell_read_variable_le_u32(dev, name, &sensor->offset);
+ if (ret)
+ return ret;
+
+ /* Based on calib mode, program SHIFT, SLOPE and CZERO */
+ switch (mode) {
+ case TWO_PT_CALIB:
+ slope = (TWO_PT_SHIFTED_GAIN / (base1 - base0));
+
+ czero = (base0 + sensor->offset - ((base1 - base0) / 3));
+
+ break;
+ case ONE_PT_CALIB2:
+ czero = base0 + sensor->offset - ONE_PT_CZERO_CONST;
+
+ slope = ONE_PT_SLOPE;
+
+ break;
+ default:
+ dev_dbg(dev, "calibrationless mode\n");
+ }
+
+ val = FIELD_PREP(CONVERSION_SHIFT_MASK, shift) |
+ FIELD_PREP(CONVERSION_SLOPE_MASK, slope) |
+ FIELD_PREP(CONVERSION_CZERO_MASK, czero);
+
+ regmap_write(map, SENSOR_CONVERSION(sensor->hw_id), val);
+
+ return 0;
+}
+
+static int tsens_v2_calibration(struct tsens_priv *priv)
+{
+ struct device *dev = priv->dev;
+ u32 mode, base0, base1;
+ int i, ret;
+
+ if (priv->num_sensors > MAX_SENSORS)
+ return -EINVAL;
+
+ ret = nvmem_cell_read_variable_le_u32(priv->dev, "mode", &mode);
+ if (ret == -ENOENT)
+ dev_warn(priv->dev, "Calibration data not present in DT\n");
+ if (ret < 0)
+ return ret;
+
+ dev_dbg(priv->dev, "calibration mode is %d\n", mode);
+
+ ret = nvmem_cell_read_variable_le_u32(priv->dev, "base0", &base0);
+ if (ret < 0)
+ return ret;
+
+ ret = nvmem_cell_read_variable_le_u32(priv->dev, "base1", &base1);
+ if (ret < 0)
+ return ret;
+
+ /* Calibrate each sensor */
+ for (i = 0; i < priv->num_sensors; i++) {
+ ret = tsens_v2_calibrate_sensor(dev, &priv->sensor[i], priv->srot_map,
+ mode, base0, base1);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int __init init_tsens_v2_no_rpm(struct tsens_priv *priv)
+{
+ struct device *dev = priv->dev;
+ int i, ret;
+ u32 val = 0;
+
+ ret = init_common(priv);
+ if (ret < 0)
+ return ret;
+
+ priv->rf[CODE_OR_TEMP] = devm_regmap_field_alloc(dev, priv->srot_map,
+ priv->fields[CODE_OR_TEMP]);
+ if (IS_ERR(priv->rf[CODE_OR_TEMP]))
+ return PTR_ERR(priv->rf[CODE_OR_TEMP]);
+
+ priv->rf[MAIN_MEASURE_PERIOD] = devm_regmap_field_alloc(dev, priv->srot_map,
+ priv->fields[MAIN_MEASURE_PERIOD]);
+ if (IS_ERR(priv->rf[MAIN_MEASURE_PERIOD]))
+ return PTR_ERR(priv->rf[MAIN_MEASURE_PERIOD]);
+
+ regmap_field_write(priv->rf[TSENS_SW_RST], SW_RST_ASSERT);
+
+ regmap_field_write(priv->rf[MAIN_MEASURE_PERIOD], MEASURE_PERIOD_2mSEC);
+
+ /* Enable available sensors */
+ for (i = 0; i < priv->num_sensors; i++)
+ val |= 1 << priv->sensor[i].hw_id;
+
+ regmap_field_write(priv->rf[SENSOR_EN], val);
+
+ /* Select temperature format, unit is deci-Celsius */
+ regmap_field_write(priv->rf[CODE_OR_TEMP], RESULT_FORMAT_TEMP);
+
+ regmap_field_write(priv->rf[TSENS_SW_RST], SW_RST_DEASSERT);
+
+ regmap_field_write(priv->rf[TSENS_EN], TSENS_ENABLE);
+
+ return 0;
+}
+
static const struct tsens_ops ops_generic_v2 = {
.init = init_common,
.get_temp = get_temp_tsens_valid,
@@ -122,6 +278,28 @@ struct tsens_plat_data data_ipq8074 = {
.fields = tsens_v2_regfields,
};
+static const struct tsens_ops ops_ipq5332 = {
+ .init = init_tsens_v2_no_rpm,
+ .get_temp = get_temp_tsens_valid,
+ .calibrate = tsens_v2_calibration,
+};
+
+const struct tsens_plat_data data_ipq5332 = {
+ .num_sensors = 5,
+ .ops = &ops_ipq5332,
+ .hw_ids = (unsigned int []){11, 12, 13, 14, 15},
+ .feat = &ipq5332_feat,
+ .fields = tsens_v2_regfields,
+};
+
+const struct tsens_plat_data data_ipq5424 = {
+ .num_sensors = 7,
+ .ops = &ops_ipq5332,
+ .hw_ids = (unsigned int []){9, 10, 11, 12, 13, 14, 15},
+ .feat = &ipq5332_feat,
+ .fields = tsens_v2_regfields,
+};
+
/* Kept around for backward compatibility with old msm8996.dtsi */
struct tsens_plat_data data_8996 = {
.num_sensors = 13,
diff --git a/drivers/thermal/qcom/tsens.c b/drivers/thermal/qcom/tsens.c
index 3aa3736181aa..1f5d4de017d9 100644
--- a/drivers/thermal/qcom/tsens.c
+++ b/drivers/thermal/qcom/tsens.c
@@ -975,7 +975,7 @@ int __init init_common(struct tsens_priv *priv)
ret = regmap_field_read(priv->rf[TSENS_EN], &enabled);
if (ret)
goto err_put_device;
- if (!enabled) {
+ if (!enabled && (tsens_version(priv) != VER_2_X_NO_RPM)) {
dev_err(dev, "%s: device not enabled\n", __func__);
ret = -ENODEV;
goto err_put_device;
@@ -1102,6 +1102,12 @@ static SIMPLE_DEV_PM_OPS(tsens_pm_ops, tsens_suspend, tsens_resume);
static const struct of_device_id tsens_table[] = {
{
+ .compatible = "qcom,ipq5332-tsens",
+ .data = &data_ipq5332,
+ }, {
+ .compatible = "qcom,ipq5424-tsens",
+ .data = &data_ipq5424,
+ }, {
.compatible = "qcom,ipq8064-tsens",
.data = &data_8960,
}, {
diff --git a/drivers/thermal/qcom/tsens.h b/drivers/thermal/qcom/tsens.h
index 7b36a0318fa6..336bc868fd7c 100644
--- a/drivers/thermal/qcom/tsens.h
+++ b/drivers/thermal/qcom/tsens.h
@@ -35,6 +35,7 @@ enum tsens_ver {
VER_0_1,
VER_1_X,
VER_2_X,
+ VER_2_X_NO_RPM,
};
enum tsens_irq_type {
@@ -168,6 +169,7 @@ enum regfield_ids {
TSENS_SW_RST,
SENSOR_EN,
CODE_OR_TEMP,
+ MAIN_MEASURE_PERIOD,
/* ----- TM ------ */
/* TRDY */
@@ -651,5 +653,6 @@ extern struct tsens_plat_data data_tsens_v1, data_8937, data_8976, data_8956;
/* TSENS v2 targets */
extern struct tsens_plat_data data_8996, data_ipq8074, data_tsens_v2;
+extern const struct tsens_plat_data data_ipq5332, data_ipq5424;
#endif /* __QCOM_TSENS_H__ */
diff --git a/drivers/thermal/qoriq_thermal.c b/drivers/thermal/qoriq_thermal.c
index 52e26be8c53d..01b58be0dcc6 100644
--- a/drivers/thermal/qoriq_thermal.c
+++ b/drivers/thermal/qoriq_thermal.c
@@ -18,6 +18,7 @@
#define SITES_MAX 16
#define TMR_DISABLE 0x0
#define TMR_ME 0x80000000
+#define TMR_CMD BIT(29)
#define TMR_ALPF 0x0c000000
#define TMR_ALPF_V2 0x03000000
#define TMTMIR_DEFAULT 0x0000000f
@@ -265,7 +266,6 @@ static void qoriq_tmu_action(void *p)
struct qoriq_tmu_data *data = p;
regmap_write(data->regmap, REGS_TMR, TMR_DISABLE);
- clk_disable_unprepare(data->clk);
}
static int qoriq_tmu_probe(struct platform_device *pdev)
@@ -296,38 +296,27 @@ static int qoriq_tmu_probe(struct platform_device *pdev)
base = devm_platform_ioremap_resource(pdev, 0);
ret = PTR_ERR_OR_ZERO(base);
- if (ret) {
- dev_err(dev, "Failed to get memory region\n");
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to get memory region\n");
data->regmap = devm_regmap_init_mmio(dev, base, &regmap_config);
ret = PTR_ERR_OR_ZERO(data->regmap);
- if (ret) {
- dev_err(dev, "Failed to init regmap (%d)\n", ret);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to init regmap\n");
- data->clk = devm_clk_get_optional(dev, NULL);
+ data->clk = devm_clk_get_optional_enabled(dev, NULL);
if (IS_ERR(data->clk))
return PTR_ERR(data->clk);
- ret = clk_prepare_enable(data->clk);
- if (ret) {
- dev_err(dev, "Failed to enable clock\n");
- return ret;
- }
-
ret = devm_add_action_or_reset(dev, qoriq_tmu_action, data);
if (ret)
return ret;
/* version register offset at: 0xbf8 on both v1 and v2 */
ret = regmap_read(data->regmap, REGS_IPBRR(0), &ver);
- if (ret) {
- dev_err(&pdev->dev, "Failed to read IP block version\n");
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to read IP block version\n");
+
data->ver = (ver >> 8) & 0xff;
qoriq_tmu_init_device(data); /* TMU initialization */
@@ -337,10 +326,8 @@ static int qoriq_tmu_probe(struct platform_device *pdev)
return ret;
ret = qoriq_tmu_register_tmu_zone(dev, data);
- if (ret < 0) {
- dev_err(dev, "Failed to register sensors\n");
- return ret;
- }
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Failed to register sensors\n");
platform_set_drvdata(pdev, data);
@@ -356,6 +343,12 @@ static int qoriq_tmu_suspend(struct device *dev)
if (ret)
return ret;
+ if (data->ver > TMU_VER1) {
+ ret = regmap_set_bits(data->regmap, REGS_TMR, TMR_CMD);
+ if (ret)
+ return ret;
+ }
+
clk_disable_unprepare(data->clk);
return 0;
@@ -370,6 +363,12 @@ static int qoriq_tmu_resume(struct device *dev)
if (ret)
return ret;
+ if (data->ver > TMU_VER1) {
+ ret = regmap_clear_bits(data->regmap, REGS_TMR, TMR_CMD);
+ if (ret)
+ return ret;
+ }
+
/* Enable monitoring */
return regmap_update_bits(data->regmap, REGS_TMR, TMR_ME, TMR_ME);
}
diff --git a/drivers/thermal/renesas/rcar_gen3_thermal.c b/drivers/thermal/renesas/rcar_gen3_thermal.c
index 1ec169aeacfc..24a702ee4c1f 100644
--- a/drivers/thermal/renesas/rcar_gen3_thermal.c
+++ b/drivers/thermal/renesas/rcar_gen3_thermal.c
@@ -21,11 +21,11 @@
/* Register offsets */
#define REG_GEN3_IRQSTR 0x04
#define REG_GEN3_IRQMSK 0x08
-#define REG_GEN3_IRQCTL 0x0C
+#define REG_GEN3_IRQCTL 0x0c
#define REG_GEN3_IRQEN 0x10
#define REG_GEN3_IRQTEMP1 0x14
#define REG_GEN3_IRQTEMP2 0x18
-#define REG_GEN3_IRQTEMP3 0x1C
+#define REG_GEN3_IRQTEMP3 0x1c
#define REG_GEN3_THCTR 0x20
#define REG_GEN3_TEMP 0x28
#define REG_GEN3_THCODE1 0x50
@@ -38,9 +38,9 @@
#define REG_GEN4_THSFMON00 0x180
#define REG_GEN4_THSFMON01 0x184
#define REG_GEN4_THSFMON02 0x188
-#define REG_GEN4_THSFMON15 0x1BC
-#define REG_GEN4_THSFMON16 0x1C0
-#define REG_GEN4_THSFMON17 0x1C4
+#define REG_GEN4_THSFMON15 0x1bc
+#define REG_GEN4_THSFMON16 0x1c0
+#define REG_GEN4_THSFMON17 0x1c4
/* IRQ{STR,MSK,EN} bits */
#define IRQ_TEMP1 BIT(0)
@@ -57,21 +57,27 @@
/* THSCP bits */
#define THSCP_COR_PARA_VLD (BIT(15) | BIT(14))
-#define CTEMP_MASK 0xFFF
+#define CTEMP_MASK 0xfff
#define MCELSIUS(temp) ((temp) * 1000)
-#define GEN3_FUSE_MASK 0xFFF
-#define GEN4_FUSE_MASK 0xFFF
+#define GEN3_FUSE_MASK 0xfff
+#define GEN4_FUSE_MASK 0xfff
#define TSC_MAX_NUM 5
struct rcar_gen3_thermal_priv;
+struct rcar_gen3_thermal_fuse_info {
+ u32 ptat[3];
+ u32 thcode[3];
+ u32 mask;
+};
+
struct rcar_thermal_info {
int scale;
int adj_below;
int adj_above;
- void (*read_fuses)(struct rcar_gen3_thermal_priv *priv);
+ const struct rcar_gen3_thermal_fuse_info *fuses;
};
struct equation_set_coef {
@@ -253,59 +259,31 @@ static irqreturn_t rcar_gen3_thermal_irq(int irq, void *data)
return IRQ_HANDLED;
}
-static void rcar_gen3_thermal_read_fuses_gen3(struct rcar_gen3_thermal_priv *priv)
+static void rcar_gen3_thermal_fetch_fuses(struct rcar_gen3_thermal_priv *priv)
{
- unsigned int i;
+ const struct rcar_gen3_thermal_fuse_info *fuses = priv->info->fuses;
/*
* Set the pseudo calibration points with fused values.
* PTAT is shared between all TSCs but only fused for the first
* TSC while THCODEs are fused for each TSC.
*/
- priv->ptat[0] = rcar_gen3_thermal_read(priv->tscs[0], REG_GEN3_PTAT1) &
- GEN3_FUSE_MASK;
- priv->ptat[1] = rcar_gen3_thermal_read(priv->tscs[0], REG_GEN3_PTAT2) &
- GEN3_FUSE_MASK;
- priv->ptat[2] = rcar_gen3_thermal_read(priv->tscs[0], REG_GEN3_PTAT3) &
- GEN3_FUSE_MASK;
-
- for (i = 0; i < priv->num_tscs; i++) {
+ priv->ptat[0] = rcar_gen3_thermal_read(priv->tscs[0], fuses->ptat[0])
+ & fuses->mask;
+ priv->ptat[1] = rcar_gen3_thermal_read(priv->tscs[0], fuses->ptat[1])
+ & fuses->mask;
+ priv->ptat[2] = rcar_gen3_thermal_read(priv->tscs[0], fuses->ptat[2])
+ & fuses->mask;
+
+ for (unsigned int i = 0; i < priv->num_tscs; i++) {
struct rcar_gen3_thermal_tsc *tsc = priv->tscs[i];
- tsc->thcode[0] = rcar_gen3_thermal_read(tsc, REG_GEN3_THCODE1) &
- GEN3_FUSE_MASK;
- tsc->thcode[1] = rcar_gen3_thermal_read(tsc, REG_GEN3_THCODE2) &
- GEN3_FUSE_MASK;
- tsc->thcode[2] = rcar_gen3_thermal_read(tsc, REG_GEN3_THCODE3) &
- GEN3_FUSE_MASK;
- }
-}
-
-static void rcar_gen3_thermal_read_fuses_gen4(struct rcar_gen3_thermal_priv *priv)
-{
- unsigned int i;
-
- /*
- * Set the pseudo calibration points with fused values.
- * PTAT is shared between all TSCs but only fused for the first
- * TSC while THCODEs are fused for each TSC.
- */
- priv->ptat[0] = rcar_gen3_thermal_read(priv->tscs[0], REG_GEN4_THSFMON16) &
- GEN4_FUSE_MASK;
- priv->ptat[1] = rcar_gen3_thermal_read(priv->tscs[0], REG_GEN4_THSFMON17) &
- GEN4_FUSE_MASK;
- priv->ptat[2] = rcar_gen3_thermal_read(priv->tscs[0], REG_GEN4_THSFMON15) &
- GEN4_FUSE_MASK;
-
- for (i = 0; i < priv->num_tscs; i++) {
- struct rcar_gen3_thermal_tsc *tsc = priv->tscs[i];
-
- tsc->thcode[0] = rcar_gen3_thermal_read(tsc, REG_GEN4_THSFMON01) &
- GEN4_FUSE_MASK;
- tsc->thcode[1] = rcar_gen3_thermal_read(tsc, REG_GEN4_THSFMON02) &
- GEN4_FUSE_MASK;
- tsc->thcode[2] = rcar_gen3_thermal_read(tsc, REG_GEN4_THSFMON00) &
- GEN4_FUSE_MASK;
+ tsc->thcode[0] = rcar_gen3_thermal_read(tsc, fuses->thcode[0])
+ & fuses->mask;
+ tsc->thcode[1] = rcar_gen3_thermal_read(tsc, fuses->thcode[1])
+ & fuses->mask;
+ tsc->thcode[2] = rcar_gen3_thermal_read(tsc, fuses->thcode[2])
+ & fuses->mask;
}
}
@@ -316,7 +294,7 @@ static bool rcar_gen3_thermal_read_fuses(struct rcar_gen3_thermal_priv *priv)
/* If fuses are not set, fallback to pseudo values. */
thscp = rcar_gen3_thermal_read(priv->tscs[0], REG_GEN3_THSCP);
- if (!priv->info->read_fuses ||
+ if (!priv->info->fuses ||
(thscp & THSCP_COR_PARA_VLD) != THSCP_COR_PARA_VLD) {
/* Default THCODE values in case FUSEs are not set. */
static const int thcodes[TSC_MAX_NUM][3] = {
@@ -342,7 +320,8 @@ static bool rcar_gen3_thermal_read_fuses(struct rcar_gen3_thermal_priv *priv)
return false;
}
- priv->info->read_fuses(priv);
+ rcar_gen3_thermal_fetch_fuses(priv);
+
return true;
}
@@ -370,25 +349,37 @@ static void rcar_gen3_thermal_init(struct rcar_gen3_thermal_priv *priv,
usleep_range(1000, 2000);
}
+static const struct rcar_gen3_thermal_fuse_info rcar_gen3_thermal_fuse_info_gen3 = {
+ .ptat = { REG_GEN3_PTAT1, REG_GEN3_PTAT2, REG_GEN3_PTAT3 },
+ .thcode = { REG_GEN3_THCODE1, REG_GEN3_THCODE2, REG_GEN3_THCODE3 },
+ .mask = GEN3_FUSE_MASK,
+};
+
+static const struct rcar_gen3_thermal_fuse_info rcar_gen3_thermal_fuse_info_gen4 = {
+ .ptat = { REG_GEN4_THSFMON16, REG_GEN4_THSFMON17, REG_GEN4_THSFMON15 },
+ .thcode = { REG_GEN4_THSFMON01, REG_GEN4_THSFMON02, REG_GEN4_THSFMON00 },
+ .mask = GEN4_FUSE_MASK,
+};
+
static const struct rcar_thermal_info rcar_m3w_thermal_info = {
.scale = 157,
.adj_below = -41,
.adj_above = 116,
- .read_fuses = rcar_gen3_thermal_read_fuses_gen3,
+ .fuses = &rcar_gen3_thermal_fuse_info_gen3,
};
static const struct rcar_thermal_info rcar_gen3_thermal_info = {
.scale = 167,
.adj_below = -41,
.adj_above = 126,
- .read_fuses = rcar_gen3_thermal_read_fuses_gen3,
+ .fuses = &rcar_gen3_thermal_fuse_info_gen3,
};
static const struct rcar_thermal_info rcar_gen4_thermal_info = {
.scale = 167,
.adj_below = -41,
.adj_above = 126,
- .read_fuses = rcar_gen3_thermal_read_fuses_gen4,
+ .fuses = &rcar_gen3_thermal_fuse_info_gen4,
};
static const struct of_device_id rcar_gen3_thermal_dt_ids[] = {
diff --git a/drivers/thermal/rockchip_thermal.c b/drivers/thermal/rockchip_thermal.c
index f551df48eef9..a8ad85feb68f 100644
--- a/drivers/thermal/rockchip_thermal.c
+++ b/drivers/thermal/rockchip_thermal.c
@@ -386,6 +386,7 @@ static const struct tsadc_table rk3328_code_table[] = {
{296, -40000},
{304, -35000},
{313, -30000},
+ {322, -25000},
{331, -20000},
{340, -15000},
{349, -10000},
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index f96ca2710928..17ca5c082643 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -369,7 +369,8 @@ void thermal_governor_update_tz(struct thermal_zone_device *tz,
tz->governor->update_tz(tz, reason);
}
-static void thermal_zone_device_halt(struct thermal_zone_device *tz, bool shutdown)
+static void thermal_zone_device_halt(struct thermal_zone_device *tz,
+ enum hw_protection_action action)
{
/*
* poweroff_delay_ms must be a carefully profiled positive value.
@@ -380,21 +381,23 @@ static void thermal_zone_device_halt(struct thermal_zone_device *tz, bool shutdo
dev_emerg(&tz->device, "%s: critical temperature reached\n", tz->type);
- if (shutdown)
- hw_protection_shutdown(msg, poweroff_delay_ms);
- else
- hw_protection_reboot(msg, poweroff_delay_ms);
+ __hw_protection_trigger(msg, poweroff_delay_ms, action);
}
void thermal_zone_device_critical(struct thermal_zone_device *tz)
{
- thermal_zone_device_halt(tz, true);
+ thermal_zone_device_halt(tz, HWPROT_ACT_DEFAULT);
}
EXPORT_SYMBOL(thermal_zone_device_critical);
+void thermal_zone_device_critical_shutdown(struct thermal_zone_device *tz)
+{
+ thermal_zone_device_halt(tz, HWPROT_ACT_SHUTDOWN);
+}
+
void thermal_zone_device_critical_reboot(struct thermal_zone_device *tz)
{
- thermal_zone_device_halt(tz, false);
+ thermal_zone_device_halt(tz, HWPROT_ACT_REBOOT);
}
static void handle_critical_trips(struct thermal_zone_device *tz,
diff --git a/drivers/thermal/thermal_core.h b/drivers/thermal/thermal_core.h
index 09866f0ce765..bdadd141aa24 100644
--- a/drivers/thermal/thermal_core.h
+++ b/drivers/thermal/thermal_core.h
@@ -262,6 +262,7 @@ int thermal_build_list_of_policies(char *buf);
void __thermal_zone_device_update(struct thermal_zone_device *tz,
enum thermal_notify_event event);
void thermal_zone_device_critical_reboot(struct thermal_zone_device *tz);
+void thermal_zone_device_critical_shutdown(struct thermal_zone_device *tz);
void thermal_governor_update_tz(struct thermal_zone_device *tz,
enum thermal_notify_event reason);
diff --git a/drivers/thermal/thermal_of.c b/drivers/thermal/thermal_of.c
index 8264d5c3bbb3..1a51a4d240ff 100644
--- a/drivers/thermal/thermal_of.c
+++ b/drivers/thermal/thermal_of.c
@@ -405,9 +405,12 @@ static struct thermal_zone_device *thermal_of_zone_register(struct device_node *
of_ops.should_bind = thermal_of_should_bind;
ret = of_property_read_string(np, "critical-action", &action);
- if (!ret)
- if (!of_ops.critical && !strcasecmp(action, "reboot"))
+ if (!ret && !of_ops.critical) {
+ if (!strcasecmp(action, "reboot"))
of_ops.critical = thermal_zone_device_critical_reboot;
+ else if (!strcasecmp(action, "shutdown"))
+ of_ops.critical = thermal_zone_device_critical_shutdown;
+ }
tz = thermal_zone_device_register_with_trips(np->name, trips, ntrips,
data, &of_ops, &tzp,
diff --git a/drivers/thunderbolt/retimer.c b/drivers/thunderbolt/retimer.c
index 1f25529fe05d..361fece3d818 100644
--- a/drivers/thunderbolt/retimer.c
+++ b/drivers/thunderbolt/retimer.c
@@ -93,9 +93,11 @@ static int tb_retimer_nvm_add(struct tb_retimer *rt)
if (ret)
goto err_nvm;
- ret = tb_nvm_add_non_active(nvm, nvm_write);
- if (ret)
- goto err_nvm;
+ if (!rt->no_nvm_upgrade) {
+ ret = tb_nvm_add_non_active(nvm, nvm_write);
+ if (ret)
+ goto err_nvm;
+ }
rt->nvm = nvm;
dev_dbg(&rt->dev, "NVM version %x.%x\n", nvm->major, nvm->minor);
diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c
index 390abcfe7188..8c527af98927 100644
--- a/drivers/thunderbolt/tb.c
+++ b/drivers/thunderbolt/tb.c
@@ -1305,12 +1305,16 @@ static void tb_scan_port(struct tb_port *port)
goto out_rpm_put;
}
- tb_retimer_scan(port, true);
-
sw = tb_switch_alloc(port->sw->tb, &port->sw->dev,
tb_downstream_route(port));
if (IS_ERR(sw)) {
/*
+ * Make the downstream retimers available even if there
+ * is no router connected.
+ */
+ tb_retimer_scan(port, true);
+
+ /*
* If there is an error accessing the connected switch
* it may be connected to another domain. Also we allow
* the other domain to be connected to a max depth switch.
@@ -1360,6 +1364,14 @@ static void tb_scan_port(struct tb_port *port)
tb_configure_link(port, upstream_port, sw);
/*
+ * Scan for downstream retimers. We only scan them after the
+ * router has been enumerated to avoid issues with certain
+ * Pluggable devices that expect the host to enumerate them
+ * within certain timeout.
+ */
+ tb_retimer_scan(port, true);
+
+ /*
* CL0s and CL1 are enabled and supported together.
* Silently ignore CLx enabling in case CLx is not supported.
*/
diff --git a/drivers/thunderbolt/tunnel.c b/drivers/thunderbolt/tunnel.c
index 717b31d78728..76254ed3f47f 100644
--- a/drivers/thunderbolt/tunnel.c
+++ b/drivers/thunderbolt/tunnel.c
@@ -2229,19 +2229,15 @@ struct tb_tunnel *tb_tunnel_alloc_usb3(struct tb *tb, struct tb_port *up,
path = tb_path_alloc(tb, down, TB_USB3_HOPID, up, TB_USB3_HOPID, 0,
"USB3 Down");
- if (!path) {
- tb_tunnel_put(tunnel);
- return NULL;
- }
+ if (!path)
+ goto err_free;
tb_usb3_init_path(path);
tunnel->paths[TB_USB3_PATH_DOWN] = path;
path = tb_path_alloc(tb, up, TB_USB3_HOPID, down, TB_USB3_HOPID, 0,
"USB3 Up");
- if (!path) {
- tb_tunnel_put(tunnel);
- return NULL;
- }
+ if (!path)
+ goto err_free;
tb_usb3_init_path(path);
tunnel->paths[TB_USB3_PATH_UP] = path;
@@ -2258,6 +2254,10 @@ struct tb_tunnel *tb_tunnel_alloc_usb3(struct tb *tb, struct tb_port *up,
}
return tunnel;
+
+err_free:
+ tb_tunnel_put(tunnel);
+ return NULL;
}
/**
diff --git a/drivers/tty/Kconfig b/drivers/tty/Kconfig
index 7fb81bbaee60..149f3d53b760 100644
--- a/drivers/tty/Kconfig
+++ b/drivers/tty/Kconfig
@@ -210,7 +210,7 @@ config SERIAL_NONSTANDARD
config MOXA_INTELLIO
tristate "Moxa Intellio support"
- depends on SERIAL_NONSTANDARD && (ISA || EISA || PCI)
+ depends on SERIAL_NONSTANDARD && PCI
select FW_LOADER
help
Say Y here if you have a Moxa Intellio multiport serial card.
diff --git a/drivers/tty/moxa.c b/drivers/tty/moxa.c
index ebaada8db929..1348e2214b81 100644
--- a/drivers/tty/moxa.c
+++ b/drivers/tty/moxa.c
@@ -43,15 +43,6 @@
#include <linux/ratelimit.h>
#include <asm/io.h>
-#include <linux/uaccess.h>
-
-#define MOXA 0x400
-#define MOXA_GET_IQUEUE (MOXA + 1) /* get input buffered count */
-#define MOXA_GET_OQUEUE (MOXA + 2) /* get output buffered count */
-#define MOXA_GETDATACOUNT (MOXA + 23)
-#define MOXA_GET_IOQUEUE (MOXA + 27)
-#define MOXA_FLUSH_QUEUE (MOXA + 28)
-#define MOXA_GETMSTATUS (MOXA + 65)
/*
* System Configuration
@@ -347,8 +338,6 @@
#define MX_PARMARK 0xA0
#define MX_PARSPACE 0x20
-#define MOXA_VERSION "6.0k"
-
#define MOXA_FW_HDRLEN 32
#define MOXAMAJOR 172
@@ -357,33 +346,21 @@
#define MAX_PORTS_PER_BOARD 32 /* Don't change this value */
#define MAX_PORTS (MAX_BOARDS * MAX_PORTS_PER_BOARD)
-#define MOXA_IS_320(brd) ((brd)->boardType == MOXA_BOARD_C320_ISA || \
- (brd)->boardType == MOXA_BOARD_C320_PCI)
-
-/*
- * Define the Moxa PCI vendor and device IDs.
- */
-#define MOXA_BUS_TYPE_ISA 0
-#define MOXA_BUS_TYPE_PCI 1
+#define MOXA_IS_320(brd) ((brd)->boardType == MOXA_BOARD_C320_PCI)
enum {
MOXA_BOARD_C218_PCI = 1,
- MOXA_BOARD_C218_ISA,
MOXA_BOARD_C320_PCI,
- MOXA_BOARD_C320_ISA,
MOXA_BOARD_CP204J,
};
static char *moxa_brdname[] =
{
"C218 Turbo PCI series",
- "C218 Turbo ISA series",
"C320 Turbo PCI series",
- "C320 Turbo ISA series",
"CP-204J series",
};
-#ifdef CONFIG_PCI
static const struct pci_device_id moxa_pcibrds[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_MOXA, PCI_DEVICE_ID_MOXA_C218),
.driver_data = MOXA_BOARD_C218_PCI },
@@ -394,14 +371,12 @@ static const struct pci_device_id moxa_pcibrds[] = {
{ 0 }
};
MODULE_DEVICE_TABLE(pci, moxa_pcibrds);
-#endif /* CONFIG_PCI */
struct moxa_port;
static struct moxa_board_conf {
int boardType;
int numPorts;
- int busType;
unsigned int ready;
@@ -413,19 +388,6 @@ static struct moxa_board_conf {
void __iomem *intTable;
} moxa_boards[MAX_BOARDS];
-struct mxser_mstatus {
- tcflag_t cflag;
- int cts;
- int dsr;
- int ri;
- int dcd;
-};
-
-struct moxaq_str {
- int inq;
- int outq;
-};
-
struct moxa_port {
struct tty_port port;
struct moxa_board_conf *board;
@@ -440,12 +402,6 @@ struct moxa_port {
u8 lowChkFlag;
};
-struct mon_str {
- int tick;
- int rxcnt[MAX_PORTS];
- int txcnt[MAX_PORTS];
-};
-
/* statusflags */
#define TXSTOPPED 1
#define LOWWAIT 2
@@ -455,17 +411,11 @@ struct mon_str {
#define WAKEUP_CHARS 256
static int ttymajor = MOXAMAJOR;
-static struct mon_str moxaLog;
static unsigned int moxaFuncTout = HZ / 2;
static unsigned int moxaLowWaterChk;
static DEFINE_MUTEX(moxa_openlock);
static DEFINE_SPINLOCK(moxa_lock);
-static unsigned long baseaddr[MAX_BOARDS];
-static unsigned int type[MAX_BOARDS];
-static unsigned int numports[MAX_BOARDS];
-static struct tty_port moxa_service_port;
-
MODULE_AUTHOR("William Chen");
MODULE_DESCRIPTION("MOXA Intellio Family Multiport Board Device Driver");
MODULE_LICENSE("GPL");
@@ -473,13 +423,6 @@ MODULE_FIRMWARE("c218tunx.cod");
MODULE_FIRMWARE("cp204unx.cod");
MODULE_FIRMWARE("c320tunx.cod");
-module_param_array(type, uint, NULL, 0);
-MODULE_PARM_DESC(type, "card type: C218=2, C320=4");
-module_param_hw_array(baseaddr, ulong, ioport, NULL, 0);
-MODULE_PARM_DESC(baseaddr, "base address");
-module_param_array(numports, uint, NULL, 0);
-MODULE_PARM_DESC(numports, "numports (ignored for C218)");
-
module_param(ttymajor, int, 0);
/*
@@ -583,104 +526,6 @@ static void moxa_low_water_check(void __iomem *ofsAddr)
* TTY operations
*/
-static int moxa_ioctl(struct tty_struct *tty,
- unsigned int cmd, unsigned long arg)
-{
- struct moxa_port *ch = tty->driver_data;
- void __user *argp = (void __user *)arg;
- int status, ret = 0;
-
- if (tty->index == MAX_PORTS) {
- if (cmd != MOXA_GETDATACOUNT && cmd != MOXA_GET_IOQUEUE &&
- cmd != MOXA_GETMSTATUS)
- return -EINVAL;
- } else if (!ch)
- return -ENODEV;
-
- switch (cmd) {
- case MOXA_GETDATACOUNT:
- moxaLog.tick = jiffies;
- if (copy_to_user(argp, &moxaLog, sizeof(moxaLog)))
- ret = -EFAULT;
- break;
- case MOXA_FLUSH_QUEUE:
- MoxaPortFlushData(ch, arg);
- break;
- case MOXA_GET_IOQUEUE: {
- struct moxaq_str __user *argm = argp;
- struct moxaq_str tmp;
- struct moxa_port *p;
- unsigned int i, j;
-
- for (i = 0; i < MAX_BOARDS; i++) {
- p = moxa_boards[i].ports;
- for (j = 0; j < MAX_PORTS_PER_BOARD; j++, p++, argm++) {
- memset(&tmp, 0, sizeof(tmp));
- spin_lock_bh(&moxa_lock);
- if (moxa_boards[i].ready) {
- tmp.inq = MoxaPortRxQueue(p);
- tmp.outq = MoxaPortTxQueue(p);
- }
- spin_unlock_bh(&moxa_lock);
- if (copy_to_user(argm, &tmp, sizeof(tmp)))
- return -EFAULT;
- }
- }
- break;
- } case MOXA_GET_OQUEUE:
- status = MoxaPortTxQueue(ch);
- ret = put_user(status, (unsigned long __user *)argp);
- break;
- case MOXA_GET_IQUEUE:
- status = MoxaPortRxQueue(ch);
- ret = put_user(status, (unsigned long __user *)argp);
- break;
- case MOXA_GETMSTATUS: {
- struct mxser_mstatus __user *argm = argp;
- struct mxser_mstatus tmp;
- struct moxa_port *p;
- unsigned int i, j;
-
- for (i = 0; i < MAX_BOARDS; i++) {
- p = moxa_boards[i].ports;
- for (j = 0; j < MAX_PORTS_PER_BOARD; j++, p++, argm++) {
- struct tty_struct *ttyp;
- memset(&tmp, 0, sizeof(tmp));
- spin_lock_bh(&moxa_lock);
- if (!moxa_boards[i].ready) {
- spin_unlock_bh(&moxa_lock);
- goto copy;
- }
-
- status = MoxaPortLineStatus(p);
- spin_unlock_bh(&moxa_lock);
-
- if (status & 1)
- tmp.cts = 1;
- if (status & 2)
- tmp.dsr = 1;
- if (status & 4)
- tmp.dcd = 1;
-
- ttyp = tty_port_tty_get(&p->port);
- if (!ttyp)
- tmp.cflag = p->cflag;
- else
- tmp.cflag = ttyp->termios.c_cflag;
- tty_kref_put(ttyp);
-copy:
- if (copy_to_user(argm, &tmp, sizeof(tmp)))
- return -EFAULT;
- }
- }
- break;
- }
- default:
- ret = -ENOIOCTLCMD;
- }
- return ret;
-}
-
static int moxa_break_ctl(struct tty_struct *tty, int state)
{
struct moxa_port *port = tty->driver_data;
@@ -697,7 +542,6 @@ static const struct tty_operations moxa_ops = {
.write_room = moxa_write_room,
.flush_buffer = moxa_flush_buffer,
.chars_in_buffer = moxa_chars_in_buffer,
- .ioctl = moxa_ioctl,
.set_termios = moxa_set_termios,
.stop = moxa_stop,
.start = moxa_start,
@@ -725,7 +569,6 @@ static DEFINE_TIMER(moxaTimer, moxa_poll);
static int moxa_check_fw_model(struct moxa_board_conf *brd, u8 model)
{
switch (brd->boardType) {
- case MOXA_BOARD_C218_ISA:
case MOXA_BOARD_C218_PCI:
if (model != 1)
goto err;
@@ -769,7 +612,6 @@ static int moxa_load_bios(struct moxa_board_conf *brd, const u8 *buf,
msleep(2000);
switch (brd->boardType) {
- case MOXA_BOARD_C218_ISA:
case MOXA_BOARD_C218_PCI:
tmp = readw(baseAddr + C218_key);
if (tmp != C218_KeyCode)
@@ -833,7 +675,6 @@ static int moxa_real_load_code(struct moxa_board_conf *brd, const void *ptr,
switch (brd->boardType) {
case MOXA_BOARD_CP204J:
- case MOXA_BOARD_C218_ISA:
case MOXA_BOARD_C218_PCI:
key = C218_key;
loadbuf = C218_LoadBuf;
@@ -898,15 +739,9 @@ static int moxa_real_load_code(struct moxa_board_conf *brd, const void *ptr,
return -EIO;
if (MOXA_IS_320(brd)) {
- if (brd->busType == MOXA_BUS_TYPE_PCI) { /* ASIC board */
- writew(0x3800, baseAddr + TMS320_PORT1);
- writew(0x3900, baseAddr + TMS320_PORT2);
- writew(28499, baseAddr + TMS320_CLOCK);
- } else {
- writew(0x3200, baseAddr + TMS320_PORT1);
- writew(0x3400, baseAddr + TMS320_PORT2);
- writew(19999, baseAddr + TMS320_CLOCK);
- }
+ writew(0x3800, baseAddr + TMS320_PORT1);
+ writew(0x3900, baseAddr + TMS320_PORT2);
+ writew(28499, baseAddr + TMS320_CLOCK);
}
writew(1, baseAddr + Disable_IRQ);
writew(0, baseAddr + Magic_no);
@@ -957,7 +792,6 @@ static int moxa_load_code(struct moxa_board_conf *brd, const void *ptr,
return retval;
switch (brd->boardType) {
- case MOXA_BOARD_C218_ISA:
case MOXA_BOARD_C218_PCI:
case MOXA_BOARD_CP204J:
port = brd->ports;
@@ -1141,7 +975,6 @@ static int moxa_init_board(struct moxa_board_conf *brd, struct device *dev)
}
switch (brd->boardType) {
- case MOXA_BOARD_C218_ISA:
case MOXA_BOARD_C218_PCI:
file = "c218tunx.cod";
break;
@@ -1227,7 +1060,6 @@ static void moxa_board_deinit(struct moxa_board_conf *brd)
kfree(brd->ports);
}
-#ifdef CONFIG_PCI
static int moxa_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
@@ -1270,7 +1102,6 @@ static int moxa_pci_probe(struct pci_dev *pdev,
board->boardType = board_type;
switch (board_type) {
- case MOXA_BOARD_C218_ISA:
case MOXA_BOARD_C218_PCI:
board->numPorts = 8;
break;
@@ -1282,7 +1113,6 @@ static int moxa_pci_probe(struct pci_dev *pdev,
board->numPorts = 0;
break;
}
- board->busType = MOXA_BUS_TYPE_PCI;
retval = moxa_init_board(board, &pdev->dev);
if (retval)
@@ -1318,21 +1148,12 @@ static struct pci_driver moxa_pci_driver = {
.probe = moxa_pci_probe,
.remove = moxa_pci_remove
};
-#endif /* CONFIG_PCI */
static int __init moxa_init(void)
{
- unsigned int isabrds = 0;
int retval = 0;
- struct moxa_board_conf *brd = moxa_boards;
- unsigned int i;
-
- printk(KERN_INFO "MOXA Intellio family driver version %s\n",
- MOXA_VERSION);
- tty_port_init(&moxa_service_port);
-
- moxaDriver = tty_alloc_driver(MAX_PORTS + 1,
+ moxaDriver = tty_alloc_driver(MAX_PORTS,
TTY_DRIVER_REAL_RAW |
TTY_DRIVER_DYNAMIC_DEV);
if (IS_ERR(moxaDriver))
@@ -1348,8 +1169,6 @@ static int __init moxa_init(void)
moxaDriver->init_termios.c_ispeed = 9600;
moxaDriver->init_termios.c_ospeed = 9600;
tty_set_operations(moxaDriver, &moxa_ops);
- /* Having one more port only for ioctls is ugly */
- tty_port_link_device(&moxa_service_port, moxaDriver, MAX_PORTS);
if (tty_register_driver(moxaDriver)) {
printk(KERN_ERR "can't register MOXA Smartio tty driver!\n");
@@ -1357,64 +1176,16 @@ static int __init moxa_init(void)
return -1;
}
- /* Find the boards defined from module args. */
-
- for (i = 0; i < MAX_BOARDS; i++) {
- if (!baseaddr[i])
- break;
- if (type[i] == MOXA_BOARD_C218_ISA ||
- type[i] == MOXA_BOARD_C320_ISA) {
- pr_debug("Moxa board %2d: %s board(baseAddr=%lx)\n",
- isabrds + 1, moxa_brdname[type[i] - 1],
- baseaddr[i]);
- brd->boardType = type[i];
- brd->numPorts = type[i] == MOXA_BOARD_C218_ISA ? 8 :
- numports[i];
- brd->busType = MOXA_BUS_TYPE_ISA;
- brd->basemem = ioremap(baseaddr[i], 0x4000);
- if (!brd->basemem) {
- printk(KERN_ERR "MOXA: can't remap %lx\n",
- baseaddr[i]);
- continue;
- }
- if (moxa_init_board(brd, NULL)) {
- iounmap(brd->basemem);
- brd->basemem = NULL;
- continue;
- }
-
- printk(KERN_INFO "MOXA isa board found at 0x%.8lx and "
- "ready (%u ports, firmware loaded)\n",
- baseaddr[i], brd->numPorts);
-
- brd++;
- isabrds++;
- }
- }
-
-#ifdef CONFIG_PCI
retval = pci_register_driver(&moxa_pci_driver);
- if (retval) {
+ if (retval)
printk(KERN_ERR "Can't register MOXA pci driver!\n");
- if (isabrds)
- retval = 0;
- }
-#endif
return retval;
}
static void __exit moxa_exit(void)
{
- unsigned int i;
-
-#ifdef CONFIG_PCI
pci_unregister_driver(&moxa_pci_driver);
-#endif
-
- for (i = 0; i < MAX_BOARDS; i++) /* ISA boards */
- if (moxa_boards[i].ready)
- moxa_board_deinit(&moxa_boards[i]);
del_timer_sync(&moxaTimer);
@@ -1457,9 +1228,6 @@ static int moxa_open(struct tty_struct *tty, struct file *filp)
int port;
port = tty->index;
- if (port == MAX_PORTS) {
- return capable(CAP_SYS_ADMIN) ? 0 : -EPERM;
- }
if (mutex_lock_interruptible(&moxa_openlock))
return -ERESTARTSYS;
brd = &moxa_boards[port / MAX_PORTS_PER_BOARD];
@@ -2182,7 +1950,6 @@ static ssize_t MoxaPortWriteData(struct tty_struct *tty, const u8 *buffer,
c = (head > tail) ? (head - tail - 1) : (head - tail + tx_mask);
if (c > len)
c = len;
- moxaLog.txcnt[port->port.tty->index] += c;
total = c;
if (spage == epage) {
bufhead = readw(ofsAddr + Ofs_txb);
@@ -2224,7 +1991,6 @@ static ssize_t MoxaPortWriteData(struct tty_struct *tty, const u8 *buffer,
static int MoxaPortReadData(struct moxa_port *port)
{
- struct tty_struct *tty = port->port.tty;
void __iomem *baseAddr, *ofsAddr, *ofs;
u8 *dst;
unsigned int count, len, total;
@@ -2243,7 +2009,6 @@ static int MoxaPortReadData(struct moxa_port *port)
return 0;
total = count;
- moxaLog.rxcnt[tty->index] += total;
if (spage == epage) {
bufhead = readw(ofsAddr + Ofs_rxb);
writew(spage, baseAddr + Control_reg);
@@ -2331,8 +2096,6 @@ static int moxa_get_serial_info(struct tty_struct *tty,
{
struct moxa_port *info = tty->driver_data;
- if (tty->index == MAX_PORTS)
- return -EINVAL;
if (!info)
return -ENODEV;
mutex_lock(&info->port.mutex);
@@ -2352,8 +2115,6 @@ static int moxa_set_serial_info(struct tty_struct *tty,
struct moxa_port *info = tty->driver_data;
unsigned int close_delay;
- if (tty->index == MAX_PORTS)
- return -EINVAL;
if (!info)
return -ENODEV;
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index 5e9ca4376d68..6af3f3a0b531 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -56,6 +56,8 @@
*/
#define WAKEUP_CHARS 256
+#define N_TTY_BUF_SIZE 4096
+
/*
* This defines the low- and high-watermarks for throttling and
* unthrottling the TTY driver. These watermarks are used for
@@ -79,14 +81,6 @@
#define ECHO_BLOCK 256
#define ECHO_DISCARD_WATERMARK N_TTY_BUF_SIZE - (ECHO_BLOCK + 32)
-
-#undef N_TTY_TRACE
-#ifdef N_TTY_TRACE
-# define n_tty_trace(f, args...) trace_printk(f, ##args)
-#else
-# define n_tty_trace(f, args...) no_printk(f, ##args)
-#endif
-
struct n_tty_data {
/* producer-published */
size_t read_head;
@@ -486,18 +480,13 @@ static int do_output_char(u8 c, struct tty_struct *tty, int space)
static int process_output(u8 c, struct tty_struct *tty)
{
struct n_tty_data *ldata = tty->disc_data;
- int space, retval;
- mutex_lock(&ldata->output_lock);
-
- space = tty_write_room(tty);
- retval = do_output_char(c, tty, space);
+ guard(mutex)(&ldata->output_lock);
- mutex_unlock(&ldata->output_lock);
- if (retval < 0)
+ if (do_output_char(c, tty, tty_write_room(tty)) < 0)
return -1;
- else
- return 0;
+
+ return 0;
}
/**
@@ -522,17 +511,15 @@ static ssize_t process_output_block(struct tty_struct *tty,
const u8 *buf, unsigned int nr)
{
struct n_tty_data *ldata = tty->disc_data;
- int space;
- int i;
+ unsigned int space, i;
const u8 *cp;
- mutex_lock(&ldata->output_lock);
+ guard(mutex)(&ldata->output_lock);
space = tty_write_room(tty);
- if (space <= 0) {
- mutex_unlock(&ldata->output_lock);
- return space;
- }
+ if (space == 0)
+ return 0;
+
if (nr > space)
nr = space;
@@ -544,18 +531,18 @@ static ssize_t process_output_block(struct tty_struct *tty,
if (O_ONLRET(tty))
ldata->column = 0;
if (O_ONLCR(tty))
- goto break_out;
+ goto do_write;
ldata->canon_column = ldata->column;
break;
case '\r':
if (O_ONOCR(tty) && ldata->column == 0)
- goto break_out;
+ goto do_write;
if (O_OCRNL(tty))
- goto break_out;
+ goto do_write;
ldata->canon_column = ldata->column = 0;
break;
case '\t':
- goto break_out;
+ goto do_write;
case '\b':
if (ldata->column > 0)
ldata->column--;
@@ -563,18 +550,15 @@ static ssize_t process_output_block(struct tty_struct *tty,
default:
if (!iscntrl(c)) {
if (O_OLCUC(tty))
- goto break_out;
+ goto do_write;
if (!is_continuation(c, tty))
ldata->column++;
}
break;
}
}
-break_out:
- i = tty->ops->write(tty, buf, i);
-
- mutex_unlock(&ldata->output_lock);
- return i;
+do_write:
+ return tty->ops->write(tty, buf, i);
}
static int n_tty_process_echo_ops(struct tty_struct *tty, size_t *tail,
@@ -696,7 +680,7 @@ static int n_tty_process_echo_ops(struct tty_struct *tty, size_t *tail,
static size_t __process_echoes(struct tty_struct *tty)
{
struct n_tty_data *ldata = tty->disc_data;
- int space, old_space;
+ unsigned int space, old_space;
size_t tail;
u8 c;
@@ -2034,9 +2018,6 @@ static bool canon_copy_from_read_buf(const struct tty_struct *tty, u8 **kbp,
tail = MASK(ldata->read_tail);
size = min_t(size_t, tail + n, N_TTY_BUF_SIZE);
- n_tty_trace("%s: nr:%zu tail:%zu n:%zu size:%zu\n",
- __func__, *nr, tail, n, size);
-
eol = find_next_bit(ldata->read_flags, size, tail);
more = n - (size - tail);
if (eol == N_TTY_BUF_SIZE && more) {
@@ -2054,9 +2035,6 @@ static bool canon_copy_from_read_buf(const struct tty_struct *tty, u8 **kbp,
if (!found || read_buf(ldata, eol) != __DISABLED_CHAR)
n = c;
- n_tty_trace("%s: eol:%zu found:%d n:%zu c:%zu tail:%zu more:%zu\n",
- __func__, eol, found, n, c, tail, more);
-
tty_copy(tty, *kbp, tail, n);
*kbp += n;
*nr -= n;
@@ -2133,6 +2111,66 @@ static int job_control(struct tty_struct *tty, struct file *file)
return __tty_check_change(tty, SIGTTIN);
}
+/*
+ * We still hold the atomic_read_lock and the termios_rwsem, and can just
+ * continue to copy data.
+ */
+static ssize_t n_tty_continue_cookie(struct tty_struct *tty, u8 *kbuf,
+ size_t nr, void **cookie)
+{
+ struct n_tty_data *ldata = tty->disc_data;
+ u8 *kb = kbuf;
+
+ if (ldata->icanon && !L_EXTPROC(tty)) {
+ /*
+ * If we have filled the user buffer, see if we should skip an
+ * EOF character before releasing the lock and returning done.
+ */
+ if (!nr)
+ canon_skip_eof(ldata);
+ else if (canon_copy_from_read_buf(tty, &kb, &nr))
+ return kb - kbuf;
+ } else {
+ if (copy_from_read_buf(tty, &kb, &nr))
+ return kb - kbuf;
+ }
+
+ /* No more data - release locks and stop retries */
+ n_tty_kick_worker(tty);
+ n_tty_check_unthrottle(tty);
+ up_read(&tty->termios_rwsem);
+ mutex_unlock(&ldata->atomic_read_lock);
+ *cookie = NULL;
+
+ return kb - kbuf;
+}
+
+static int n_tty_wait_for_input(struct tty_struct *tty, struct file *file,
+ struct wait_queue_entry *wait, long *timeout)
+{
+ if (test_bit(TTY_OTHER_CLOSED, &tty->flags))
+ return -EIO;
+ if (tty_hung_up_p(file))
+ return 0;
+ /*
+ * Abort readers for ttys which never actually get hung up.
+ * See __tty_hangup().
+ */
+ if (test_bit(TTY_HUPPING, &tty->flags))
+ return 0;
+ if (!*timeout)
+ return 0;
+ if (tty_io_nonblock(tty, file))
+ return -EAGAIN;
+ if (signal_pending(current))
+ return -ERESTARTSYS;
+
+ up_read(&tty->termios_rwsem);
+ *timeout = wait_woken(wait, TASK_INTERRUPTIBLE, *timeout);
+ down_read(&tty->termios_rwsem);
+
+ return 1;
+}
/**
* n_tty_read - read function for tty
@@ -2166,36 +2204,9 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file, u8 *kbuf,
bool packet;
size_t old_tail;
- /*
- * Is this a continuation of a read started earler?
- *
- * If so, we still hold the atomic_read_lock and the
- * termios_rwsem, and can just continue to copy data.
- */
- if (*cookie) {
- if (ldata->icanon && !L_EXTPROC(tty)) {
- /*
- * If we have filled the user buffer, see
- * if we should skip an EOF character before
- * releasing the lock and returning done.
- */
- if (!nr)
- canon_skip_eof(ldata);
- else if (canon_copy_from_read_buf(tty, &kb, &nr))
- return kb - kbuf;
- } else {
- if (copy_from_read_buf(tty, &kb, &nr))
- return kb - kbuf;
- }
-
- /* No more data - release locks and stop retries */
- n_tty_kick_worker(tty);
- n_tty_check_unthrottle(tty);
- up_read(&tty->termios_rwsem);
- mutex_unlock(&ldata->atomic_read_lock);
- *cookie = NULL;
- return kb - kbuf;
- }
+ /* Is this a continuation of a read started earlier? */
+ if (*cookie)
+ return n_tty_continue_cookie(tty, kbuf, nr, cookie);
retval = job_control(tty, file);
if (retval < 0)
@@ -2250,34 +2261,12 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file, u8 *kbuf,
tty_buffer_flush_work(tty->port);
down_read(&tty->termios_rwsem);
if (!input_available_p(tty, 0)) {
- if (test_bit(TTY_OTHER_CLOSED, &tty->flags)) {
- retval = -EIO;
+ int ret = n_tty_wait_for_input(tty, file, &wait,
+ &timeout);
+ if (ret <= 0) {
+ retval = ret;
break;
}
- if (tty_hung_up_p(file))
- break;
- /*
- * Abort readers for ttys which never actually
- * get hung up. See __tty_hangup().
- */
- if (test_bit(TTY_HUPPING, &tty->flags))
- break;
- if (!timeout)
- break;
- if (tty_io_nonblock(tty, file)) {
- retval = -EAGAIN;
- break;
- }
- if (signal_pending(current)) {
- retval = -ERESTARTSYS;
- break;
- }
- up_read(&tty->termios_rwsem);
-
- timeout = wait_woken(&wait, TASK_INTERRUPTIBLE,
- timeout);
-
- down_read(&tty->termios_rwsem);
continue;
}
}
@@ -2292,21 +2281,8 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file, u8 *kbuf,
nr--;
}
- /*
- * Copy data, and if there is more to be had
- * and we have nothing more to wait for, then
- * let's mark us for retries.
- *
- * NOTE! We return here with both the termios_sem
- * and atomic_read_lock still held, the retries
- * will release them when done.
- */
- if (copy_from_read_buf(tty, &kb, &nr) && kb - kbuf >= minimum) {
-more_to_be_read:
- remove_wait_queue(&tty->read_wait, &wait);
- *cookie = cookie;
- return kb - kbuf;
- }
+ if (copy_from_read_buf(tty, &kb, &nr) && kb - kbuf >= minimum)
+ goto more_to_be_read;
}
n_tty_check_unthrottle(tty);
@@ -2333,6 +2309,18 @@ more_to_be_read:
retval = kb - kbuf;
return retval;
+more_to_be_read:
+ /*
+ * There is more to be had and we have nothing more to wait for, so
+ * let's mark us for retries.
+ *
+ * NOTE! We return here with both the termios_sem and atomic_read_lock
+ * still held, the retries will release them when done.
+ */
+ remove_wait_queue(&tty->read_wait, &wait);
+ *cookie = cookie;
+
+ return kb - kbuf;
}
/**
diff --git a/drivers/tty/serdev/core.c b/drivers/tty/serdev/core.c
index ebf0bbc2cff2..eb2a2e58fe78 100644
--- a/drivers/tty/serdev/core.c
+++ b/drivers/tty/serdev/core.c
@@ -316,17 +316,6 @@ void serdev_device_write_flush(struct serdev_device *serdev)
}
EXPORT_SYMBOL_GPL(serdev_device_write_flush);
-int serdev_device_write_room(struct serdev_device *serdev)
-{
- struct serdev_controller *ctrl = serdev->ctrl;
-
- if (!ctrl || !ctrl->ops->write_room)
- return 0;
-
- return serdev->ctrl->ops->write_room(ctrl);
-}
-EXPORT_SYMBOL_GPL(serdev_device_write_room);
-
unsigned int serdev_device_set_baudrate(struct serdev_device *serdev, unsigned int speed)
{
struct serdev_controller *ctrl = serdev->ctrl;
diff --git a/drivers/tty/serdev/serdev-ttyport.c b/drivers/tty/serdev/serdev-ttyport.c
index 3d7ae7fa5018..bab1b143b8a6 100644
--- a/drivers/tty/serdev/serdev-ttyport.c
+++ b/drivers/tty/serdev/serdev-ttyport.c
@@ -92,14 +92,6 @@ static void ttyport_write_flush(struct serdev_controller *ctrl)
tty_driver_flush_buffer(tty);
}
-static int ttyport_write_room(struct serdev_controller *ctrl)
-{
- struct serport *serport = serdev_controller_get_drvdata(ctrl);
- struct tty_struct *tty = serport->tty;
-
- return tty_write_room(tty);
-}
-
static int ttyport_open(struct serdev_controller *ctrl)
{
struct serport *serport = serdev_controller_get_drvdata(ctrl);
@@ -259,7 +251,6 @@ static int ttyport_break_ctl(struct serdev_controller *ctrl, unsigned int break_
static const struct serdev_controller_ops ctrl_ops = {
.write_buf = ttyport_write_buf,
.write_flush = ttyport_write_flush,
- .write_room = ttyport_write_room,
.open = ttyport_open,
.close = ttyport_close,
.set_flow_control = ttyport_set_flow_control,
diff --git a/drivers/tty/serial/8250/8250_dma.c b/drivers/tty/serial/8250/8250_dma.c
index f245a84f4a50..bdd26c9f34bd 100644
--- a/drivers/tty/serial/8250/8250_dma.c
+++ b/drivers/tty/serial/8250/8250_dma.c
@@ -162,7 +162,7 @@ void serial8250_tx_dma_flush(struct uart_8250_port *p)
*/
dma->tx_size = 0;
- dmaengine_terminate_async(dma->rxchan);
+ dmaengine_terminate_async(dma->txchan);
}
int serial8250_rx_dma(struct uart_8250_port *p)
diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
index 6afcf27db3b8..1902f29444a1 100644
--- a/drivers/tty/serial/8250/8250_dw.c
+++ b/drivers/tty/serial/8250/8250_dw.c
@@ -107,11 +107,23 @@ static inline int dw8250_modify_msr(struct uart_port *p, int offset, int value)
return value;
}
+/*
+ * This function is being called as part of the uart_port::serial_out()
+ * routine. Hence, it must not call serial_port_out() or serial_out()
+ * against the modified registers here, i.e. LCR.
+ */
static void dw8250_force_idle(struct uart_port *p)
{
struct uart_8250_port *up = up_to_u8250p(p);
unsigned int lsr;
+ /*
+ * The following call currently performs serial_out()
+ * against the FCR register. Because it differs to LCR
+ * there will be no infinite loop, but if it ever gets
+ * modified, we might need a new custom version of it
+ * that avoids infinite recursion.
+ */
serial8250_clear_and_reinit_fifos(up);
/*
@@ -120,14 +132,19 @@ static void dw8250_force_idle(struct uart_port *p)
* enabled.
*/
if (up->fcr & UART_FCR_ENABLE_FIFO) {
- lsr = p->serial_in(p, UART_LSR);
+ lsr = serial_port_in(p, UART_LSR);
if (!(lsr & UART_LSR_DR))
return;
}
- (void)p->serial_in(p, UART_RX);
+ serial_port_in(p, UART_RX);
}
+/*
+ * This function is being called as part of the uart_port::serial_out()
+ * routine. Hence, it must not call serial_port_out() or serial_out()
+ * against the modified registers here, i.e. LCR.
+ */
static void dw8250_check_lcr(struct uart_port *p, int offset, int value)
{
struct dw8250_data *d = to_dw8250_data(p->private_data);
@@ -139,7 +156,7 @@ static void dw8250_check_lcr(struct uart_port *p, int offset, int value)
/* Make sure LCR write wasn't ignored */
while (tries--) {
- unsigned int lcr = p->serial_in(p, offset);
+ unsigned int lcr = serial_port_in(p, offset);
if ((value & ~UART_LCR_SPAR) == (lcr & ~UART_LCR_SPAR))
return;
@@ -260,7 +277,7 @@ static int dw8250_handle_irq(struct uart_port *p)
{
struct uart_8250_port *up = up_to_u8250p(p);
struct dw8250_data *d = to_dw8250_data(p->private_data);
- unsigned int iir = p->serial_in(p, UART_IIR);
+ unsigned int iir = serial_port_in(p, UART_IIR);
bool rx_timeout = (iir & 0x3f) == UART_IIR_RX_TIMEOUT;
unsigned int quirks = d->pdata->quirks;
unsigned int status;
@@ -281,7 +298,7 @@ static int dw8250_handle_irq(struct uart_port *p)
status = serial_lsr_in(up);
if (!(status & (UART_LSR_DR | UART_LSR_BI)))
- (void) p->serial_in(p, UART_RX);
+ serial_port_in(p, UART_RX);
uart_port_unlock_irqrestore(p, flags);
}
@@ -303,7 +320,7 @@ static int dw8250_handle_irq(struct uart_port *p)
if ((iir & UART_IIR_BUSY) == UART_IIR_BUSY) {
/* Clear the USR */
- (void)p->serial_in(p, d->pdata->usr_reg);
+ serial_port_in(p, d->pdata->usr_reg);
return 1;
}
@@ -390,7 +407,7 @@ static void dw8250_set_termios(struct uart_port *p, struct ktermios *termios,
static void dw8250_set_ldisc(struct uart_port *p, struct ktermios *termios)
{
struct uart_8250_port *up = up_to_u8250p(p);
- unsigned int mcr = p->serial_in(p, UART_MCR);
+ unsigned int mcr = serial_port_in(p, UART_MCR);
if (up->capabilities & UART_CAP_IRDA) {
if (termios->c_line == N_IRDA)
@@ -398,7 +415,7 @@ static void dw8250_set_ldisc(struct uart_port *p, struct ktermios *termios)
else
mcr &= ~DW_UART_MCR_SIRE;
- p->serial_out(p, UART_MCR, mcr);
+ serial_port_out(p, UART_MCR, mcr);
}
serial8250_do_set_ldisc(p, termios);
}
@@ -421,6 +438,18 @@ static bool dw8250_idma_filter(struct dma_chan *chan, void *param)
return param == chan->device->dev;
}
+static void dw8250_setup_dma_filter(struct uart_port *p, struct dw8250_data *data)
+{
+ /* Platforms with iDMA 64-bit */
+ if (platform_get_resource_byname(to_platform_device(p->dev), IORESOURCE_MEM, "lpss_priv")) {
+ data->data.dma.rx_param = p->dev->parent;
+ data->data.dma.tx_param = p->dev->parent;
+ data->data.dma.fn = dw8250_idma_filter;
+ } else {
+ data->data.dma.fn = dw8250_fallback_dma_filter;
+ }
+}
+
static u32 dw8250_rzn1_get_dmacr_burst(int max_burst)
{
if (max_burst >= 8)
@@ -459,8 +488,8 @@ static void dw8250_prepare_rx_dma(struct uart_8250_port *p)
static void dw8250_quirks(struct uart_port *p, struct dw8250_data *data)
{
- unsigned int quirks = data->pdata ? data->pdata->quirks : 0;
- u32 cpr_value = data->pdata ? data->pdata->cpr_value : 0;
+ unsigned int quirks = data->pdata->quirks;
+ u32 cpr_value = data->pdata->cpr_value;
if (quirks & DW_UART_QUIRK_CPR_VALUE)
data->data.cpr_value = cpr_value;
@@ -491,14 +520,6 @@ static void dw8250_quirks(struct uart_port *p, struct dw8250_data *data)
p->serial_in = dw8250_serial_in32;
data->uart_16550_compatible = true;
}
-
- /* Platforms with iDMA 64-bit */
- if (platform_get_resource_byname(to_platform_device(p->dev),
- IORESOURCE_MEM, "lpss_priv")) {
- data->data.dma.rx_param = p->dev->parent;
- data->data.dma.tx_param = p->dev->parent;
- data->data.dma.fn = dw8250_idma_filter;
- }
}
static void dw8250_reset_control_assert(void *data)
@@ -520,7 +541,6 @@ static int dw8250_probe(struct platform_device *pdev)
return dev_err_probe(dev, -EINVAL, "no registers defined\n");
spin_lock_init(&p->lock);
- p->handle_irq = dw8250_handle_irq;
p->pm = dw8250_do_pm;
p->type = PORT_8250;
p->flags = UPF_FIXED_PORT;
@@ -532,13 +552,8 @@ static int dw8250_probe(struct platform_device *pdev)
if (!data)
return -ENOMEM;
- data->data.dma.fn = dw8250_fallback_dma_filter;
- data->pdata = device_get_match_data(p->dev);
p->private_data = &data->data;
- data->uart_16550_compatible = device_property_read_bool(dev,
- "snps,uart-16550-compatible");
-
p->mapbase = regs->start;
p->mapsize = resource_size(regs);
@@ -626,11 +641,19 @@ static int dw8250_probe(struct platform_device *pdev)
if (err)
return err;
- dw8250_quirks(p, data);
+ data->uart_16550_compatible = device_property_read_bool(dev, "snps,uart-16550-compatible");
+
+ data->pdata = device_get_match_data(p->dev);
+ if (data->pdata)
+ dw8250_quirks(p, data);
/* If the Busy Functionality is not implemented, don't handle it */
if (data->uart_16550_compatible)
p->handle_irq = NULL;
+ else if (data->pdata)
+ p->handle_irq = dw8250_handle_irq;
+
+ dw8250_setup_dma_filter(p, data);
if (!data->skip_autocfg)
dw8250_setup_port(p);
diff --git a/drivers/tty/serial/8250/8250_fsl.c b/drivers/tty/serial/8250/8250_fsl.c
index 1b7bd55619c6..649ae5c8304d 100644
--- a/drivers/tty/serial/8250/8250_fsl.c
+++ b/drivers/tty/serial/8250/8250_fsl.c
@@ -32,7 +32,7 @@ int fsl8250_handle_irq(struct uart_port *port)
uart_port_lock_irqsave(&up->port, &flags);
- iir = port->serial_in(port, UART_IIR);
+ iir = serial_port_in(port, UART_IIR);
if (iir & UART_IIR_NO_INT) {
uart_port_unlock_irqrestore(&up->port, flags);
return 0;
@@ -54,12 +54,12 @@ int fsl8250_handle_irq(struct uart_port *port)
if (unlikely((iir & UART_IIR_ID) == UART_IIR_RLSI &&
(up->lsr_saved_flags & UART_LSR_BI))) {
up->lsr_saved_flags &= ~UART_LSR_BI;
- port->serial_in(port, UART_RX);
+ serial_port_in(port, UART_RX);
uart_port_unlock_irqrestore(&up->port, flags);
return 1;
}
- lsr = orig_lsr = up->port.serial_in(&up->port, UART_LSR);
+ lsr = orig_lsr = serial_port_in(port, UART_LSR);
/* Process incoming characters first */
if ((lsr & (UART_LSR_DR | UART_LSR_BI)) &&
@@ -71,7 +71,7 @@ int fsl8250_handle_irq(struct uart_port *port)
if ((orig_lsr & UART_LSR_OE) && (up->overrun_backoff_time_ms > 0)) {
unsigned long delay;
- up->ier = port->serial_in(port, UART_IER);
+ up->ier = serial_port_in(port, UART_IER);
if (up->ier & (UART_IER_RLSI | UART_IER_RDI)) {
port->ops->stop_rx(port);
} else {
diff --git a/drivers/tty/serial/8250/8250_ni.c b/drivers/tty/serial/8250/8250_ni.c
new file mode 100644
index 000000000000..b10a42d2ad63
--- /dev/null
+++ b/drivers/tty/serial/8250/8250_ni.c
@@ -0,0 +1,461 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * NI 16550 UART Driver
+ *
+ * The National Instruments (NI) 16550 is a UART that is compatible with the
+ * TL16C550C and OX16C950B register interfaces, but has additional functions
+ * for RS-485 transceiver control. This driver implements support for the
+ * additional functionality on top of the standard serial8250 core.
+ *
+ * Copyright 2012-2023 National Instruments Corporation
+ */
+
+#include <linux/acpi.h>
+#include <linux/bitfield.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/property.h>
+#include <linux/clk.h>
+
+#include "8250.h"
+
+/* Extra bits in UART_ACR */
+#define NI16550_ACR_AUTO_DTR_EN BIT(4)
+
+/* TFS - TX FIFO Size */
+#define NI16550_TFS_OFFSET 0x0C
+/* RFS - RX FIFO Size */
+#define NI16550_RFS_OFFSET 0x0D
+
+/* PMR - Port Mode Register */
+#define NI16550_PMR_OFFSET 0x0E
+/* PMR[1:0] - Port Capabilities */
+#define NI16550_PMR_CAP_MASK GENMASK(1, 0)
+#define NI16550_PMR_NOT_IMPL FIELD_PREP(NI16550_PMR_CAP_MASK, 0) /* not implemented */
+#define NI16550_PMR_CAP_RS232 FIELD_PREP(NI16550_PMR_CAP_MASK, 1) /* RS-232 capable */
+#define NI16550_PMR_CAP_RS485 FIELD_PREP(NI16550_PMR_CAP_MASK, 2) /* RS-485 capable */
+#define NI16550_PMR_CAP_DUAL FIELD_PREP(NI16550_PMR_CAP_MASK, 3) /* dual-port */
+/* PMR[4] - Interface Mode */
+#define NI16550_PMR_MODE_MASK GENMASK(4, 4)
+#define NI16550_PMR_MODE_RS232 FIELD_PREP(NI16550_PMR_MODE_MASK, 0) /* currently 232 */
+#define NI16550_PMR_MODE_RS485 FIELD_PREP(NI16550_PMR_MODE_MASK, 1) /* currently 485 */
+
+/* PCR - Port Control Register */
+/*
+ * Wire Mode | Tx enabled? | Rx enabled?
+ * ---------------|----------------------|--------------------------
+ * PCR_RS422 | Always | Always
+ * PCR_ECHO_RS485 | When DTR asserted | Always
+ * PCR_DTR_RS485 | When DTR asserted | Disabled when TX enabled
+ * PCR_AUTO_RS485 | When data in TX FIFO | Disabled when TX enabled
+ */
+#define NI16550_PCR_OFFSET 0x0F
+#define NI16550_PCR_WIRE_MODE_MASK GENMASK(1, 0)
+#define NI16550_PCR_RS422 FIELD_PREP(NI16550_PCR_WIRE_MODE_MASK, 0)
+#define NI16550_PCR_ECHO_RS485 FIELD_PREP(NI16550_PCR_WIRE_MODE_MASK, 1)
+#define NI16550_PCR_DTR_RS485 FIELD_PREP(NI16550_PCR_WIRE_MODE_MASK, 2)
+#define NI16550_PCR_AUTO_RS485 FIELD_PREP(NI16550_PCR_WIRE_MODE_MASK, 3)
+#define NI16550_PCR_TXVR_ENABLE_BIT BIT(3)
+#define NI16550_PCR_RS485_TERMINATION_BIT BIT(6)
+
+/* flags for ni16550_device_info */
+#define NI_HAS_PMR BIT(0)
+
+struct ni16550_device_info {
+ u32 uartclk;
+ u8 prescaler;
+ u8 flags;
+};
+
+struct ni16550_data {
+ int line;
+ struct clk *clk;
+};
+
+static int ni16550_enable_transceivers(struct uart_port *port)
+{
+ u8 pcr;
+
+ pcr = port->serial_in(port, NI16550_PCR_OFFSET);
+ pcr |= NI16550_PCR_TXVR_ENABLE_BIT;
+ dev_dbg(port->dev, "enable transceivers: write pcr: 0x%02x\n", pcr);
+ port->serial_out(port, NI16550_PCR_OFFSET, pcr);
+
+ return 0;
+}
+
+static int ni16550_disable_transceivers(struct uart_port *port)
+{
+ u8 pcr;
+
+ pcr = port->serial_in(port, NI16550_PCR_OFFSET);
+ pcr &= ~NI16550_PCR_TXVR_ENABLE_BIT;
+ dev_dbg(port->dev, "disable transceivers: write pcr: 0x%02x\n", pcr);
+ port->serial_out(port, NI16550_PCR_OFFSET, pcr);
+
+ return 0;
+}
+
+static int ni16550_rs485_config(struct uart_port *port,
+ struct ktermios *termios,
+ struct serial_rs485 *rs485)
+{
+ struct uart_8250_port *up = container_of(port, struct uart_8250_port, port);
+ u8 pcr;
+
+ pcr = serial_in(up, NI16550_PCR_OFFSET);
+ pcr &= ~NI16550_PCR_WIRE_MODE_MASK;
+
+ if ((rs485->flags & SER_RS485_MODE_RS422) ||
+ !(rs485->flags & SER_RS485_ENABLED)) {
+ /* RS-422 */
+ pcr |= NI16550_PCR_RS422;
+ up->acr &= ~NI16550_ACR_AUTO_DTR_EN;
+ } else {
+ /* RS-485 2-wire Auto */
+ pcr |= NI16550_PCR_AUTO_RS485;
+ up->acr |= NI16550_ACR_AUTO_DTR_EN;
+ }
+
+ dev_dbg(port->dev, "config rs485: write pcr: 0x%02x, acr: %02x\n", pcr, up->acr);
+ serial_out(up, NI16550_PCR_OFFSET, pcr);
+ serial_icr_write(up, UART_ACR, up->acr);
+
+ return 0;
+}
+
+static bool is_pmr_rs232_mode(struct uart_8250_port *up)
+{
+ u8 pmr = serial_in(up, NI16550_PMR_OFFSET);
+ u8 pmr_mode = pmr & NI16550_PMR_MODE_MASK;
+ u8 pmr_cap = pmr & NI16550_PMR_CAP_MASK;
+
+ /*
+ * If the PMR is not implemented, then by default NI UARTs are
+ * connected to RS-485 transceivers
+ */
+ if (pmr_cap == NI16550_PMR_NOT_IMPL)
+ return false;
+
+ if (pmr_cap == NI16550_PMR_CAP_DUAL)
+ /*
+ * If the port is dual-mode capable, then read the mode bit
+ * to know the current mode
+ */
+ return pmr_mode == NI16550_PMR_MODE_RS232;
+ /*
+ * If it is not dual-mode capable, then decide based on the
+ * capability
+ */
+ return pmr_cap == NI16550_PMR_CAP_RS232;
+}
+
+static void ni16550_config_prescaler(struct uart_8250_port *up,
+ u8 prescaler)
+{
+ /*
+ * Page in the Enhanced Mode Registers
+ * Sets EFR[4] for Enhanced Mode.
+ */
+ u8 lcr_value;
+ u8 efr_value;
+
+ lcr_value = serial_in(up, UART_LCR);
+ serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
+
+ efr_value = serial_in(up, UART_EFR);
+ efr_value |= UART_EFR_ECB;
+
+ serial_out(up, UART_EFR, efr_value);
+
+ /* Page out the Enhanced Mode Registers */
+ serial_out(up, UART_LCR, lcr_value);
+
+ /* Set prescaler to CPR register. */
+ serial_out(up, UART_SCR, UART_CPR);
+ serial_out(up, UART_ICR, prescaler);
+}
+
+static const struct serial_rs485 ni16550_rs485_supported = {
+ .flags = SER_RS485_ENABLED | SER_RS485_MODE_RS422 | SER_RS485_RTS_ON_SEND |
+ SER_RS485_RTS_AFTER_SEND,
+ /*
+ * delay_rts_* and RX_DURING_TX are not supported.
+ *
+ * RTS_{ON,AFTER}_SEND are supported, but ignored; the transceiver
+ * is connected in only one way and we don't need userspace to tell
+ * us, but want to retain compatibility with applications that do.
+ */
+};
+
+static void ni16550_rs485_setup(struct uart_port *port)
+{
+ port->rs485_config = ni16550_rs485_config;
+ port->rs485_supported = ni16550_rs485_supported;
+ /*
+ * The hardware comes up by default in 2-wire auto mode and we
+ * set the flags to represent that
+ */
+ port->rs485.flags = SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND;
+}
+
+static int ni16550_port_startup(struct uart_port *port)
+{
+ int ret;
+
+ ret = serial8250_do_startup(port);
+ if (ret)
+ return ret;
+
+ return ni16550_enable_transceivers(port);
+}
+
+static void ni16550_port_shutdown(struct uart_port *port)
+{
+ ni16550_disable_transceivers(port);
+
+ serial8250_do_shutdown(port);
+}
+
+static int ni16550_get_regs(struct platform_device *pdev,
+ struct uart_port *port)
+{
+ struct resource *regs;
+
+ regs = platform_get_resource(pdev, IORESOURCE_IO, 0);
+ if (regs) {
+ port->iotype = UPIO_PORT;
+ port->iobase = regs->start;
+
+ return 0;
+ }
+
+ regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (regs) {
+ port->iotype = UPIO_MEM;
+ port->mapbase = regs->start;
+ port->mapsize = resource_size(regs);
+ port->flags |= UPF_IOREMAP;
+
+ port->membase = devm_ioremap(&pdev->dev, port->mapbase,
+ port->mapsize);
+ if (!port->membase)
+ return -ENOMEM;
+
+ return 0;
+ }
+
+ dev_err(&pdev->dev, "no registers defined\n");
+ return -EINVAL;
+}
+
+/*
+ * Very old implementations don't have the TFS or RFS registers
+ * defined, so we may read all-0s or all-1s. For such devices,
+ * assume a FIFO size of 128.
+ */
+static u8 ni16550_read_fifo_size(struct uart_8250_port *uart, int reg)
+{
+ u8 value = serial_in(uart, reg);
+
+ if (value == 0x00 || value == 0xFF)
+ return 128;
+
+ return value;
+}
+
+static void ni16550_set_mctrl(struct uart_port *port, unsigned int mctrl)
+{
+ struct uart_8250_port *up = up_to_u8250p(port);
+
+ up->mcr |= UART_MCR_CLKSEL;
+ serial8250_do_set_mctrl(port, mctrl);
+}
+
+static int ni16550_probe(struct platform_device *pdev)
+{
+ const struct ni16550_device_info *info;
+ struct device *dev = &pdev->dev;
+ struct uart_8250_port uart = {};
+ unsigned int txfifosz, rxfifosz;
+ unsigned int prescaler = 0;
+ struct ni16550_data *data;
+ const char *portmode;
+ bool rs232_property;
+ int ret;
+ int irq;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ spin_lock_init(&uart.port.lock);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ ret = ni16550_get_regs(pdev, &uart.port);
+ if (ret < 0)
+ return ret;
+
+ /* early setup so that serial_in()/serial_out() work */
+ serial8250_set_defaults(&uart);
+
+ info = device_get_match_data(dev);
+
+ uart.port.dev = dev;
+ uart.port.irq = irq;
+ uart.port.irqflags = IRQF_SHARED;
+ uart.port.flags = UPF_SHARE_IRQ | UPF_BOOT_AUTOCONF
+ | UPF_FIXED_PORT | UPF_FIXED_TYPE;
+ uart.port.startup = ni16550_port_startup;
+ uart.port.shutdown = ni16550_port_shutdown;
+
+ /*
+ * Hardware instantiation of FIFO sizes are held in registers.
+ */
+ txfifosz = ni16550_read_fifo_size(&uart, NI16550_TFS_OFFSET);
+ rxfifosz = ni16550_read_fifo_size(&uart, NI16550_RFS_OFFSET);
+
+ dev_dbg(dev, "NI 16550 has TX FIFO size %u, RX FIFO size %u\n",
+ txfifosz, rxfifosz);
+
+ uart.port.type = PORT_16550A;
+ uart.port.fifosize = txfifosz;
+ uart.tx_loadsz = txfifosz;
+ uart.fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10;
+ uart.capabilities = UART_CAP_FIFO | UART_CAP_AFE | UART_CAP_EFR;
+
+ /*
+ * Declaration of the base clock frequency can come from one of:
+ * - static declaration in this driver (for older ACPI IDs)
+ * - a "clock-frquency" ACPI
+ */
+ if (info->uartclk)
+ uart.port.uartclk = info->uartclk;
+ if (device_property_read_u32(dev, "clock-frequency",
+ &uart.port.uartclk)) {
+ data->clk = devm_clk_get_enabled(dev, NULL);
+ if (!IS_ERR(data->clk))
+ uart.port.uartclk = clk_get_rate(data->clk);
+ }
+
+ if (!uart.port.uartclk) {
+ dev_err(dev, "unable to determine clock frequency!\n");
+ ret = -ENODEV;
+ goto err;
+ }
+
+ if (info->prescaler)
+ prescaler = info->prescaler;
+ device_property_read_u32(dev, "clock-prescaler", &prescaler);
+
+ if (prescaler != 0) {
+ uart.port.set_mctrl = ni16550_set_mctrl;
+ ni16550_config_prescaler(&uart, (u8)prescaler);
+ }
+
+ /*
+ * The determination of whether or not this is an RS-485 or RS-232 port
+ * can come from the PMR (if present), otherwise we're solely an RS-485
+ * port.
+ *
+ * This is a device-specific property, and there are old devices in the
+ * field using "transceiver" as an ACPI property, so we have to check
+ * for that as well.
+ */
+ if (!device_property_read_string(dev, "transceiver", &portmode)) {
+ rs232_property = strncmp(portmode, "RS-232", 6) == 0;
+
+ dev_dbg(dev, "port is in %s mode (via device property)\n",
+ rs232_property ? "RS-232" : "RS-485");
+ } else if (info->flags & NI_HAS_PMR) {
+ rs232_property = is_pmr_rs232_mode(&uart);
+
+ dev_dbg(dev, "port is in %s mode (via PMR)\n",
+ rs232_property ? "RS-232" : "RS-485");
+ } else {
+ rs232_property = 0;
+
+ dev_dbg(dev, "port is fixed as RS-485\n");
+ }
+
+ if (!rs232_property) {
+ /*
+ * Neither the 'transceiver' property nor the PMR indicate
+ * that this is an RS-232 port, so it must be an RS-485 one.
+ */
+ ni16550_rs485_setup(&uart.port);
+ }
+
+ ret = serial8250_register_8250_port(&uart);
+ if (ret < 0)
+ goto err;
+ data->line = ret;
+
+ platform_set_drvdata(pdev, data);
+ return 0;
+
+err:
+ return ret;
+}
+
+static void ni16550_remove(struct platform_device *pdev)
+{
+ struct ni16550_data *data = platform_get_drvdata(pdev);
+
+ serial8250_unregister_port(data->line);
+}
+
+#ifdef CONFIG_ACPI
+/* NI 16550 RS-485 Interface */
+static const struct ni16550_device_info nic7750 = {
+ .uartclk = 33333333,
+};
+
+/* NI CVS-145x RS-485 Interface */
+static const struct ni16550_device_info nic7772 = {
+ .uartclk = 1843200,
+ .flags = NI_HAS_PMR,
+};
+
+/* NI cRIO-904x RS-485 Interface */
+static const struct ni16550_device_info nic792b = {
+ /* Sets UART clock rate to 22.222 MHz with 1.125 prescale */
+ .uartclk = 22222222,
+ .prescaler = 0x09,
+};
+
+/* NI sbRIO 96x8 RS-232/485 Interfaces */
+static const struct ni16550_device_info nic7a69 = {
+ /* Set UART clock rate to 29.629 MHz with 1.125 prescale */
+ .uartclk = 29629629,
+ .prescaler = 0x09,
+};
+static const struct acpi_device_id ni16550_acpi_match[] = {
+ { "NIC7750", (kernel_ulong_t)&nic7750 },
+ { "NIC7772", (kernel_ulong_t)&nic7772 },
+ { "NIC792B", (kernel_ulong_t)&nic792b },
+ { "NIC7A69", (kernel_ulong_t)&nic7a69 },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, ni16550_acpi_match);
+#endif
+
+static struct platform_driver ni16550_driver = {
+ .driver = {
+ .name = "ni16550",
+ .acpi_match_table = ACPI_PTR(ni16550_acpi_match),
+ },
+ .probe = ni16550_probe,
+ .remove = ni16550_remove,
+};
+
+module_platform_driver(ni16550_driver);
+
+MODULE_AUTHOR("Emerson Electric Co.");
+MODULE_DESCRIPTION("NI 16550 Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c
index c2b75e3f106d..2a0ce11f405d 100644
--- a/drivers/tty/serial/8250/8250_omap.c
+++ b/drivers/tty/serial/8250/8250_omap.c
@@ -692,7 +692,7 @@ static irqreturn_t omap8250_irq(int irq, void *dev_id)
/* Synchronize UART_IER access against the console. */
uart_port_lock(port);
- up->ier = port->serial_in(port, UART_IER);
+ up->ier = serial_port_in(port, UART_IER);
if (up->ier & (UART_IER_RLSI | UART_IER_RDI)) {
port->ops->stop_rx(port);
} else {
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index df4d0d832e54..73c200127b08 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -2728,6 +2728,22 @@ static struct pci_serial_quirk pci_serial_quirks[] = {
.setup = pci_oxsemi_tornado_setup,
},
{
+ .vendor = PCI_VENDOR_ID_INTASHIELD,
+ .device = 0x4026,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .init = pci_oxsemi_tornado_init,
+ .setup = pci_oxsemi_tornado_setup,
+ },
+ {
+ .vendor = PCI_VENDOR_ID_INTASHIELD,
+ .device = 0x4021,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .init = pci_oxsemi_tornado_init,
+ .setup = pci_oxsemi_tornado_setup,
+ },
+ {
.vendor = PCI_VENDOR_ID_INTEL,
.device = 0x8811,
.subvendor = PCI_ANY_ID,
@@ -5253,6 +5269,14 @@ static const struct pci_device_id serial_pci_tbl[] = {
PCI_ANY_ID, PCI_ANY_ID,
0, 0,
pbn_b2_2_115200 },
+ { PCI_VENDOR_ID_INTASHIELD, 0x0BA2,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_b2_2_115200 },
+ { PCI_VENDOR_ID_INTASHIELD, 0x0BA3,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_b2_2_115200 },
/*
* Brainboxes UC-235/246
*/
@@ -5373,6 +5397,14 @@ static const struct pci_device_id serial_pci_tbl[] = {
PCI_ANY_ID, PCI_ANY_ID,
0, 0,
pbn_b2_4_115200 },
+ { PCI_VENDOR_ID_INTASHIELD, 0x0C42,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_b2_4_115200 },
+ { PCI_VENDOR_ID_INTASHIELD, 0x0C43,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_b2_4_115200 },
/*
* Brainboxes UC-420
*/
@@ -5599,6 +5631,20 @@ static const struct pci_device_id serial_pci_tbl[] = {
PCI_ANY_ID, PCI_ANY_ID,
0, 0,
pbn_oxsemi_1_15625000 },
+ /*
+ * Brainboxes XC-235
+ */
+ { PCI_VENDOR_ID_INTASHIELD, 0x4026,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_oxsemi_1_15625000 },
+ /*
+ * Brainboxes XC-475
+ */
+ { PCI_VENDOR_ID_INTASHIELD, 0x4021,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_oxsemi_1_15625000 },
/*
* Perle PCI-RAS cards
diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
index c57f44882abb..8ac452cea36c 100644
--- a/drivers/tty/serial/8250/8250_port.c
+++ b/drivers/tty/serial/8250/8250_port.c
@@ -1678,7 +1678,7 @@ static void serial8250_disable_ms(struct uart_port *port)
if (up->bugs & UART_BUG_NOMSR)
return;
- mctrl_gpio_disable_ms(up->gpios);
+ mctrl_gpio_disable_ms_no_sync(up->gpios);
up->ier &= ~UART_IER_MSI;
serial_port_out(port, UART_IER, up->ier);
@@ -2406,28 +2406,26 @@ int serial8250_do_startup(struct uart_port *port)
* test if we receive TX irq. This way, we'll never enable
* UART_BUG_TXEN.
*/
- if (up->port.quirks & UPQ_NO_TXEN_TEST)
- goto dont_test_tx_en;
-
- /*
- * Do a quick test to see if we receive an interrupt when we enable
- * the TX irq.
- */
- serial_port_out(port, UART_IER, UART_IER_THRI);
- lsr = serial_port_in(port, UART_LSR);
- iir = serial_port_in(port, UART_IIR);
- serial_port_out(port, UART_IER, 0);
+ if (!(up->port.quirks & UPQ_NO_TXEN_TEST)) {
+ /*
+ * Do a quick test to see if we receive an interrupt when we
+ * enable the TX irq.
+ */
+ serial_port_out(port, UART_IER, UART_IER_THRI);
+ lsr = serial_port_in(port, UART_LSR);
+ iir = serial_port_in(port, UART_IIR);
+ serial_port_out(port, UART_IER, 0);
- if (lsr & UART_LSR_TEMT && iir & UART_IIR_NO_INT) {
- if (!(up->bugs & UART_BUG_TXEN)) {
- up->bugs |= UART_BUG_TXEN;
- dev_dbg(port->dev, "enabling bad tx status workarounds\n");
+ if (lsr & UART_LSR_TEMT && iir & UART_IIR_NO_INT) {
+ if (!(up->bugs & UART_BUG_TXEN)) {
+ up->bugs |= UART_BUG_TXEN;
+ dev_dbg(port->dev, "enabling bad tx status workarounds\n");
+ }
+ } else {
+ up->bugs &= ~UART_BUG_TXEN;
}
- } else {
- up->bugs &= ~UART_BUG_TXEN;
}
-dont_test_tx_en:
uart_port_unlock_irqrestore(port, flags);
/*
@@ -2968,7 +2966,6 @@ static int serial8250_request_std_resource(struct uart_8250_port *up)
{
unsigned int size = serial8250_port_size(up);
struct uart_port *port = &up->port;
- int ret = 0;
switch (port->iotype) {
case UPIO_AU:
@@ -2977,32 +2974,28 @@ static int serial8250_request_std_resource(struct uart_8250_port *up)
case UPIO_MEM32BE:
case UPIO_MEM16:
case UPIO_MEM:
- if (!port->mapbase) {
- ret = -EINVAL;
- break;
- }
+ if (!port->mapbase)
+ return -EINVAL;
- if (!request_mem_region(port->mapbase, size, "serial")) {
- ret = -EBUSY;
- break;
- }
+ if (!request_mem_region(port->mapbase, size, "serial"))
+ return -EBUSY;
if (port->flags & UPF_IOREMAP) {
port->membase = ioremap(port->mapbase, size);
if (!port->membase) {
release_mem_region(port->mapbase, size);
- ret = -ENOMEM;
+ return -ENOMEM;
}
}
- break;
-
+ return 0;
case UPIO_HUB6:
case UPIO_PORT:
if (!request_region(port->iobase, size, "serial"))
- ret = -EBUSY;
- break;
+ return -EBUSY;
+ return 0;
}
- return ret;
+
+ return 0;
}
static void serial8250_release_std_resource(struct uart_8250_port *up)
diff --git a/drivers/tty/serial/8250/8250_rsa.c b/drivers/tty/serial/8250/8250_rsa.c
index dfaa613e452d..82f2593b4c59 100644
--- a/drivers/tty/serial/8250/8250_rsa.c
+++ b/drivers/tty/serial/8250/8250_rsa.c
@@ -16,30 +16,27 @@ static unsigned int probe_rsa_count;
static int rsa8250_request_resource(struct uart_8250_port *up)
{
- unsigned long start = UART_RSA_BASE << up->port.regshift;
- unsigned int size = 8 << up->port.regshift;
struct uart_port *port = &up->port;
- int ret = -EINVAL;
+ unsigned long start = UART_RSA_BASE << port->regshift;
+ unsigned int size = 8 << port->regshift;
switch (port->iotype) {
case UPIO_HUB6:
case UPIO_PORT:
start += port->iobase;
- if (request_region(start, size, "serial-rsa"))
- ret = 0;
- else
- ret = -EBUSY;
- break;
+ if (!request_region(start, size, "serial-rsa"))
+ return -EBUSY;
+ return 0;
+ default:
+ return -EINVAL;
}
-
- return ret;
}
static void rsa8250_release_resource(struct uart_8250_port *up)
{
- unsigned long offset = UART_RSA_BASE << up->port.regshift;
- unsigned int size = 8 << up->port.regshift;
struct uart_port *port = &up->port;
+ unsigned long offset = UART_RSA_BASE << port->regshift;
+ unsigned int size = 8 << port->regshift;
switch (port->iotype) {
case UPIO_HUB6:
diff --git a/drivers/tty/serial/8250/Kconfig b/drivers/tty/serial/8250/Kconfig
index 55d26d16df9b..bd3d636ff962 100644
--- a/drivers/tty/serial/8250/Kconfig
+++ b/drivers/tty/serial/8250/Kconfig
@@ -569,6 +569,19 @@ config SERIAL_8250_BCM7271
including DMA support and high accuracy BAUD rates, say
Y to this option. If unsure, say N.
+config SERIAL_8250_NI
+ tristate "NI 16550 based serial port"
+ depends on SERIAL_8250
+ depends on (X86 && ACPI) || COMPILE_TEST
+ help
+ This driver supports the integrated serial ports on National
+ Instruments (NI) controller hardware. This is required for all NI
+ controller models with onboard RS-485 or dual-mode RS-485/RS-232
+ ports.
+
+ To compile this driver as a module, choose M here: the module
+ will be called 8250_ni.
+
config SERIAL_OF_PLATFORM
tristate "Devicetree based probing for 8250 ports"
depends on SERIAL_8250 && OF
diff --git a/drivers/tty/serial/8250/Makefile b/drivers/tty/serial/8250/Makefile
index 1516de629b61..b04eeda03b23 100644
--- a/drivers/tty/serial/8250/Makefile
+++ b/drivers/tty/serial/8250/Makefile
@@ -40,6 +40,7 @@ obj-$(CONFIG_SERIAL_8250_LPSS) += 8250_lpss.o
obj-$(CONFIG_SERIAL_8250_MEN_MCB) += 8250_men_mcb.o
obj-$(CONFIG_SERIAL_8250_MID) += 8250_mid.o
obj-$(CONFIG_SERIAL_8250_MT6577) += 8250_mtk.o
+obj-$(CONFIG_SERIAL_8250_NI) += 8250_ni.o
obj-$(CONFIG_SERIAL_OF_PLATFORM) += 8250_of.o
obj-$(CONFIG_SERIAL_8250_OMAP) += 8250_omap.o
obj-$(CONFIG_SERIAL_8250_PARISC) += 8250_parisc.o
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index 976dae3bb1bb..79a8186d3361 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -179,25 +179,6 @@ config SERIAL_ATMEL_TTYAT
Say Y if you have an external 8250/16C550 UART. If unsure, say N.
-config SERIAL_KGDB_NMI
- bool "Serial console over KGDB NMI debugger port"
- depends on KGDB_SERIAL_CONSOLE
- help
- This special driver allows you to temporary use NMI debugger port
- as a normal console (assuming that the port is attached to KGDB).
-
- Unlike KDB's disable_nmi command, with this driver you are always
- able to go back to the debugger using KGDB escape sequence ($3#33).
- This is because this console driver processes the input in NMI
- context, and thus is able to intercept the magic sequence.
-
- Note that since the console interprets input and uses polling
- communication methods, for things like PPP you still must fully
- detach debugger port from the KGDB NMI (i.e. disable_nmi), and
- use raw console.
-
- If unsure, say N.
-
config SERIAL_MESON
tristate "Meson serial port support"
depends on ARCH_MESON || COMPILE_TEST
@@ -306,6 +287,29 @@ config SERIAL_TEGRA_TCU_CONSOLE
If unsure, say Y.
+config SERIAL_TEGRA_UTC
+ tristate "NVIDIA Tegra UART Trace Controller"
+ depends on ARCH_TEGRA || COMPILE_TEST
+ select SERIAL_CORE
+ help
+ Support for Tegra UTC (UART Trace controller) client serial port.
+
+ UTC is a HW based serial port that allows multiplexing multiple data
+ streams of up to 16 UTC clients into a single hardware serial port.
+
+config SERIAL_TEGRA_UTC_CONSOLE
+ bool "Support for console on a Tegra UTC serial port"
+ depends on SERIAL_TEGRA_UTC
+ select SERIAL_CORE_CONSOLE
+ default SERIAL_TEGRA_UTC
+ help
+ If you say Y here, it will be possible to use a Tegra UTC client as
+ the system console (the system console is the device which receives
+ all kernel messages and warnings and which allows logins in single
+ user mode).
+
+ If unsure, say Y.
+
config SERIAL_MAX3100
tristate "MAX3100/3110/3111/3222 support"
depends on SPI
diff --git a/drivers/tty/serial/Makefile b/drivers/tty/serial/Makefile
index 6ff74f0a9530..d58d9f719889 100644
--- a/drivers/tty/serial/Makefile
+++ b/drivers/tty/serial/Makefile
@@ -86,6 +86,7 @@ obj-$(CONFIG_SERIAL_STM32) += stm32-usart.o
obj-$(CONFIG_SERIAL_SUNPLUS) += sunplus-uart.o
obj-$(CONFIG_SERIAL_TEGRA) += serial-tegra.o
obj-$(CONFIG_SERIAL_TEGRA_TCU) += tegra-tcu.o
+obj-$(CONFIG_SERIAL_TEGRA_UTC) += tegra-utc.o
obj-$(CONFIG_SERIAL_TIMBERDALE) += timbuart.o
obj-$(CONFIG_SERIAL_TXX9) += serial_txx9.o
obj-$(CONFIG_SERIAL_UARTLITE) += uartlite.o
@@ -96,6 +97,5 @@ obj-$(CONFIG_SERIAL_ZS) += zs.o
# GPIOLIB helpers for modem control lines
obj-$(CONFIG_SERIAL_MCTRL_GPIO) += serial_mctrl_gpio.o
-obj-$(CONFIG_SERIAL_KGDB_NMI) += kgdb_nmi.o
obj-$(CONFIG_KGDB_SERIAL_CONSOLE) += kgdboc.o
obj-$(CONFIG_SERIAL_NUVOTON_MA35D1) += ma35d1_serial.o
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index 98f178bdbcbe..dc092204b472 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -272,6 +272,7 @@ struct uart_amba_port {
enum pl011_rs485_tx_state rs485_tx_state;
struct hrtimer trigger_start_tx;
struct hrtimer trigger_stop_tx;
+ bool console_line_ended;
#ifdef CONFIG_DMA_ENGINE
/* DMA stuff */
unsigned int dmacr; /* dma control reg */
@@ -2366,50 +2367,7 @@ static void pl011_console_putchar(struct uart_port *port, unsigned char ch)
while (pl011_read(uap, REG_FR) & UART01x_FR_TXFF)
cpu_relax();
pl011_write(ch, uap, REG_DR);
-}
-
-static void
-pl011_console_write(struct console *co, const char *s, unsigned int count)
-{
- struct uart_amba_port *uap = amba_ports[co->index];
- unsigned int old_cr = 0, new_cr;
- unsigned long flags;
- int locked = 1;
-
- clk_enable(uap->clk);
-
- if (oops_in_progress)
- locked = uart_port_trylock_irqsave(&uap->port, &flags);
- else
- uart_port_lock_irqsave(&uap->port, &flags);
-
- /*
- * First save the CR then disable the interrupts
- */
- if (!uap->vendor->always_enabled) {
- old_cr = pl011_read(uap, REG_CR);
- new_cr = old_cr & ~UART011_CR_CTSEN;
- new_cr |= UART01x_CR_UARTEN | UART011_CR_TXE;
- pl011_write(new_cr, uap, REG_CR);
- }
-
- uart_console_write(&uap->port, s, count, pl011_console_putchar);
-
- /*
- * Finally, wait for transmitter to become empty and restore the
- * TCR. Allow feature register bits to be inverted to work around
- * errata.
- */
- while ((pl011_read(uap, REG_FR) ^ uap->vendor->inv_fr)
- & uap->vendor->fr_busy)
- cpu_relax();
- if (!uap->vendor->always_enabled)
- pl011_write(old_cr, uap, REG_CR);
-
- if (locked)
- uart_port_unlock_irqrestore(&uap->port, flags);
-
- clk_disable(uap->clk);
+ uap->console_line_ended = (ch == '\n');
}
static void pl011_console_get_options(struct uart_amba_port *uap, int *baud,
@@ -2472,6 +2430,8 @@ static int pl011_console_setup(struct console *co, char *options)
if (ret)
return ret;
+ uap->console_line_ended = true;
+
if (dev_get_platdata(uap->port.dev)) {
struct amba_pl011_data *plat;
@@ -2555,14 +2515,105 @@ static int pl011_console_match(struct console *co, char *name, int idx,
return -ENODEV;
}
+static void
+pl011_console_write_atomic(struct console *co, struct nbcon_write_context *wctxt)
+{
+ struct uart_amba_port *uap = amba_ports[co->index];
+ unsigned int old_cr = 0;
+
+ if (!nbcon_enter_unsafe(wctxt))
+ return;
+
+ clk_enable(uap->clk);
+
+ if (!uap->vendor->always_enabled) {
+ old_cr = pl011_read(uap, REG_CR);
+ pl011_write((old_cr & ~UART011_CR_CTSEN) | (UART01x_CR_UARTEN | UART011_CR_TXE),
+ uap, REG_CR);
+ }
+
+ if (!uap->console_line_ended)
+ uart_console_write(&uap->port, "\n", 1, pl011_console_putchar);
+ uart_console_write(&uap->port, wctxt->outbuf, wctxt->len, pl011_console_putchar);
+
+ while ((pl011_read(uap, REG_FR) ^ uap->vendor->inv_fr) & uap->vendor->fr_busy)
+ cpu_relax();
+
+ if (!uap->vendor->always_enabled)
+ pl011_write(old_cr, uap, REG_CR);
+
+ clk_disable(uap->clk);
+
+ nbcon_exit_unsafe(wctxt);
+}
+
+static void
+pl011_console_write_thread(struct console *co, struct nbcon_write_context *wctxt)
+{
+ struct uart_amba_port *uap = amba_ports[co->index];
+ unsigned int old_cr = 0;
+
+ if (!nbcon_enter_unsafe(wctxt))
+ return;
+
+ clk_enable(uap->clk);
+
+ if (!uap->vendor->always_enabled) {
+ old_cr = pl011_read(uap, REG_CR);
+ pl011_write((old_cr & ~UART011_CR_CTSEN) | (UART01x_CR_UARTEN | UART011_CR_TXE),
+ uap, REG_CR);
+ }
+
+ if (nbcon_exit_unsafe(wctxt)) {
+ int i;
+ unsigned int len = READ_ONCE(wctxt->len);
+
+ for (i = 0; i < len; i++) {
+ if (!nbcon_enter_unsafe(wctxt))
+ break;
+ uart_console_write(&uap->port, wctxt->outbuf + i, 1, pl011_console_putchar);
+ if (!nbcon_exit_unsafe(wctxt))
+ break;
+ }
+ }
+
+ while (!nbcon_enter_unsafe(wctxt))
+ nbcon_reacquire_nobuf(wctxt);
+
+ while ((pl011_read(uap, REG_FR) ^ uap->vendor->inv_fr) & uap->vendor->fr_busy)
+ cpu_relax();
+
+ if (!uap->vendor->always_enabled)
+ pl011_write(old_cr, uap, REG_CR);
+
+ clk_disable(uap->clk);
+
+ nbcon_exit_unsafe(wctxt);
+}
+
+static void
+pl011_console_device_lock(struct console *co, unsigned long *flags)
+{
+ __uart_port_lock_irqsave(&amba_ports[co->index]->port, flags);
+}
+
+static void
+pl011_console_device_unlock(struct console *co, unsigned long flags)
+{
+ __uart_port_unlock_irqrestore(&amba_ports[co->index]->port, flags);
+}
+
static struct uart_driver amba_reg;
static struct console amba_console = {
.name = "ttyAMA",
- .write = pl011_console_write,
.device = uart_console_device,
.setup = pl011_console_setup,
.match = pl011_console_match,
- .flags = CON_PRINTBUFFER | CON_ANYTIME,
+ .write_atomic = pl011_console_write_atomic,
+ .write_thread = pl011_console_write_thread,
+ .device_lock = pl011_console_device_lock,
+ .device_unlock = pl011_console_device_unlock,
+ .flags = CON_PRINTBUFFER | CON_ANYTIME | CON_NBCON,
.index = -1,
.data = &amba_reg,
};
@@ -3000,7 +3051,7 @@ static const struct of_device_id sbsa_uart_of_match[] = {
};
MODULE_DEVICE_TABLE(of, sbsa_uart_of_match);
-static const struct acpi_device_id __maybe_unused sbsa_uart_acpi_match[] = {
+static const struct acpi_device_id sbsa_uart_acpi_match[] = {
{ "ARMH0011", 0 },
{ "ARMHB000", 0 },
{},
@@ -3013,8 +3064,8 @@ static struct platform_driver arm_sbsa_uart_platform_driver = {
.driver = {
.name = "sbsa-uart",
.pm = &pl011_dev_pm_ops,
- .of_match_table = of_match_ptr(sbsa_uart_of_match),
- .acpi_match_table = ACPI_PTR(sbsa_uart_acpi_match),
+ .of_match_table = sbsa_uart_of_match,
+ .acpi_match_table = sbsa_uart_acpi_match,
.suppress_bind_attrs = IS_BUILTIN(CONFIG_SERIAL_AMBA_PL011),
},
};
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index f44f9d20a974..8918fbd4bddd 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -700,7 +700,7 @@ static void atmel_disable_ms(struct uart_port *port)
atmel_port->ms_irq_enabled = false;
- mctrl_gpio_disable_ms(atmel_port->gpios);
+ mctrl_gpio_disable_ms_no_sync(atmel_port->gpios);
if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_CTS))
idr |= ATMEL_US_CTSIC;
diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
index c91b9d9818cd..4470966b826c 100644
--- a/drivers/tty/serial/fsl_lpuart.c
+++ b/drivers/tty/serial/fsl_lpuart.c
@@ -441,36 +441,36 @@ static unsigned int lpuart_get_baud_clk_rate(struct lpuart_port *sport)
static void lpuart_stop_tx(struct uart_port *port)
{
- unsigned char temp;
+ u8 cr2;
- temp = readb(port->membase + UARTCR2);
- temp &= ~(UARTCR2_TIE | UARTCR2_TCIE);
- writeb(temp, port->membase + UARTCR2);
+ cr2 = readb(port->membase + UARTCR2);
+ cr2 &= ~(UARTCR2_TIE | UARTCR2_TCIE);
+ writeb(cr2, port->membase + UARTCR2);
}
static void lpuart32_stop_tx(struct uart_port *port)
{
- unsigned long temp;
+ u32 ctrl;
- temp = lpuart32_read(port, UARTCTRL);
- temp &= ~(UARTCTRL_TIE | UARTCTRL_TCIE);
- lpuart32_write(port, temp, UARTCTRL);
+ ctrl = lpuart32_read(port, UARTCTRL);
+ ctrl &= ~(UARTCTRL_TIE | UARTCTRL_TCIE);
+ lpuart32_write(port, ctrl, UARTCTRL);
}
static void lpuart_stop_rx(struct uart_port *port)
{
- unsigned char temp;
+ u8 cr2;
- temp = readb(port->membase + UARTCR2);
- writeb(temp & ~UARTCR2_RE, port->membase + UARTCR2);
+ cr2 = readb(port->membase + UARTCR2);
+ writeb(cr2 & ~UARTCR2_RE, port->membase + UARTCR2);
}
static void lpuart32_stop_rx(struct uart_port *port)
{
- unsigned long temp;
+ u32 ctrl;
- temp = lpuart32_read(port, UARTCTRL);
- lpuart32_write(port, temp & ~UARTCTRL_RE, UARTCTRL);
+ ctrl = lpuart32_read(port, UARTCTRL);
+ lpuart32_write(port, ctrl & ~UARTCTRL_RE, UARTCTRL);
}
static void lpuart_dma_tx(struct lpuart_port *sport)
@@ -581,7 +581,7 @@ static int lpuart_dma_tx_request(struct uart_port *port)
ret = dmaengine_slave_config(sport->dma_tx_chan, &dma_tx_sconfig);
if (ret) {
- dev_err(sport->port.dev,
+ dev_err(port->dev,
"DMA slave config failed, err = %d\n", ret);
return ret;
}
@@ -599,7 +599,7 @@ static void lpuart_flush_buffer(struct uart_port *port)
{
struct lpuart_port *sport = container_of(port, struct lpuart_port, port);
struct dma_chan *chan = sport->dma_tx_chan;
- u32 val;
+ u32 fifo;
if (sport->lpuart_dma_tx_use) {
if (sport->dma_tx_in_progress) {
@@ -611,13 +611,13 @@ static void lpuart_flush_buffer(struct uart_port *port)
}
if (lpuart_is_32(sport)) {
- val = lpuart32_read(&sport->port, UARTFIFO);
- val |= UARTFIFO_TXFLUSH | UARTFIFO_RXFLUSH;
- lpuart32_write(&sport->port, val, UARTFIFO);
+ fifo = lpuart32_read(port, UARTFIFO);
+ fifo |= UARTFIFO_TXFLUSH | UARTFIFO_RXFLUSH;
+ lpuart32_write(port, fifo, UARTFIFO);
} else {
- val = readb(sport->port.membase + UARTCFIFO);
- val |= UARTCFIFO_TXFLUSH | UARTCFIFO_RXFLUSH;
- writeb(val, sport->port.membase + UARTCFIFO);
+ fifo = readb(port->membase + UARTCFIFO);
+ fifo |= UARTCFIFO_TXFLUSH | UARTCFIFO_RXFLUSH;
+ writeb(fifo, port->membase + UARTCFIFO);
}
}
@@ -639,38 +639,36 @@ static void lpuart32_wait_bit_set(struct uart_port *port, unsigned int offset,
static int lpuart_poll_init(struct uart_port *port)
{
- struct lpuart_port *sport = container_of(port,
- struct lpuart_port, port);
unsigned long flags;
- unsigned char temp;
+ u8 fifo;
- sport->port.fifosize = 0;
+ port->fifosize = 0;
- uart_port_lock_irqsave(&sport->port, &flags);
+ uart_port_lock_irqsave(port, &flags);
/* Disable Rx & Tx */
- writeb(0, sport->port.membase + UARTCR2);
+ writeb(0, port->membase + UARTCR2);
- temp = readb(sport->port.membase + UARTPFIFO);
+ fifo = readb(port->membase + UARTPFIFO);
/* Enable Rx and Tx FIFO */
- writeb(temp | UARTPFIFO_RXFE | UARTPFIFO_TXFE,
- sport->port.membase + UARTPFIFO);
+ writeb(fifo | UARTPFIFO_RXFE | UARTPFIFO_TXFE,
+ port->membase + UARTPFIFO);
/* flush Tx and Rx FIFO */
writeb(UARTCFIFO_TXFLUSH | UARTCFIFO_RXFLUSH,
- sport->port.membase + UARTCFIFO);
+ port->membase + UARTCFIFO);
/* explicitly clear RDRF */
- if (readb(sport->port.membase + UARTSR1) & UARTSR1_RDRF) {
- readb(sport->port.membase + UARTDR);
- writeb(UARTSFIFO_RXUF, sport->port.membase + UARTSFIFO);
+ if (readb(port->membase + UARTSR1) & UARTSR1_RDRF) {
+ readb(port->membase + UARTDR);
+ writeb(UARTSFIFO_RXUF, port->membase + UARTSFIFO);
}
- writeb(0, sport->port.membase + UARTTWFIFO);
- writeb(1, sport->port.membase + UARTRWFIFO);
+ writeb(0, port->membase + UARTTWFIFO);
+ writeb(1, port->membase + UARTRWFIFO);
/* Enable Rx and Tx */
- writeb(UARTCR2_RE | UARTCR2_TE, sport->port.membase + UARTCR2);
- uart_port_unlock_irqrestore(&sport->port, flags);
+ writeb(UARTCR2_RE | UARTCR2_TE, port->membase + UARTCR2);
+ uart_port_unlock_irqrestore(port, flags);
return 0;
}
@@ -693,33 +691,32 @@ static int lpuart_poll_get_char(struct uart_port *port)
static int lpuart32_poll_init(struct uart_port *port)
{
unsigned long flags;
- struct lpuart_port *sport = container_of(port, struct lpuart_port, port);
- u32 temp;
+ u32 fifo;
- sport->port.fifosize = 0;
+ port->fifosize = 0;
- uart_port_lock_irqsave(&sport->port, &flags);
+ uart_port_lock_irqsave(port, &flags);
/* Disable Rx & Tx */
- lpuart32_write(&sport->port, 0, UARTCTRL);
+ lpuart32_write(port, 0, UARTCTRL);
- temp = lpuart32_read(&sport->port, UARTFIFO);
+ fifo = lpuart32_read(port, UARTFIFO);
/* Enable Rx and Tx FIFO */
- lpuart32_write(&sport->port, temp | UARTFIFO_RXFE | UARTFIFO_TXFE, UARTFIFO);
+ lpuart32_write(port, fifo | UARTFIFO_RXFE | UARTFIFO_TXFE, UARTFIFO);
/* flush Tx and Rx FIFO */
- lpuart32_write(&sport->port, UARTFIFO_TXFLUSH | UARTFIFO_RXFLUSH, UARTFIFO);
+ lpuart32_write(port, UARTFIFO_TXFLUSH | UARTFIFO_RXFLUSH, UARTFIFO);
/* explicitly clear RDRF */
- if (lpuart32_read(&sport->port, UARTSTAT) & UARTSTAT_RDRF) {
- lpuart32_read(&sport->port, UARTDATA);
- lpuart32_write(&sport->port, UARTFIFO_RXUF, UARTFIFO);
+ if (lpuart32_read(port, UARTSTAT) & UARTSTAT_RDRF) {
+ lpuart32_read(port, UARTDATA);
+ lpuart32_write(port, UARTFIFO_RXUF, UARTFIFO);
}
/* Enable Rx and Tx */
- lpuart32_write(&sport->port, UARTCTRL_RE | UARTCTRL_TE, UARTCTRL);
- uart_port_unlock_irqrestore(&sport->port, flags);
+ lpuart32_write(port, UARTCTRL_RE | UARTCTRL_TE, UARTCTRL);
+ uart_port_unlock_irqrestore(port, flags);
return 0;
}
@@ -752,7 +749,7 @@ static inline void lpuart_transmit_buffer(struct lpuart_port *sport)
static inline void lpuart32_transmit_buffer(struct lpuart_port *sport)
{
struct tty_port *tport = &sport->port.state->port;
- unsigned long txcnt;
+ u32 txcnt;
unsigned char c;
if (sport->port.x_char) {
@@ -789,10 +786,10 @@ static void lpuart_start_tx(struct uart_port *port)
{
struct lpuart_port *sport = container_of(port,
struct lpuart_port, port);
- unsigned char temp;
+ u8 cr2;
- temp = readb(port->membase + UARTCR2);
- writeb(temp | UARTCR2_TIE, port->membase + UARTCR2);
+ cr2 = readb(port->membase + UARTCR2);
+ writeb(cr2 | UARTCR2_TIE, port->membase + UARTCR2);
if (sport->lpuart_dma_tx_use) {
if (!lpuart_stopped_or_empty(port))
@@ -806,14 +803,14 @@ static void lpuart_start_tx(struct uart_port *port)
static void lpuart32_start_tx(struct uart_port *port)
{
struct lpuart_port *sport = container_of(port, struct lpuart_port, port);
- unsigned long temp;
+ u32 ctrl;
if (sport->lpuart_dma_tx_use) {
if (!lpuart_stopped_or_empty(port))
lpuart_dma_tx(sport);
} else {
- temp = lpuart32_read(port, UARTCTRL);
- lpuart32_write(port, temp | UARTCTRL_TIE, UARTCTRL);
+ ctrl = lpuart32_read(port, UARTCTRL);
+ lpuart32_write(port, ctrl | UARTCTRL_TIE, UARTCTRL);
if (lpuart32_read(port, UARTSTAT) & UARTSTAT_TDRE)
lpuart32_transmit_buffer(sport);
@@ -839,8 +836,8 @@ static unsigned int lpuart_tx_empty(struct uart_port *port)
{
struct lpuart_port *sport = container_of(port,
struct lpuart_port, port);
- unsigned char sr1 = readb(port->membase + UARTSR1);
- unsigned char sfifo = readb(port->membase + UARTSFIFO);
+ u8 sr1 = readb(port->membase + UARTSR1);
+ u8 sfifo = readb(port->membase + UARTSFIFO);
if (sport->dma_tx_in_progress)
return 0;
@@ -855,9 +852,9 @@ static unsigned int lpuart32_tx_empty(struct uart_port *port)
{
struct lpuart_port *sport = container_of(port,
struct lpuart_port, port);
- unsigned long stat = lpuart32_read(port, UARTSTAT);
- unsigned long sfifo = lpuart32_read(port, UARTFIFO);
- unsigned long ctrl = lpuart32_read(port, UARTCTRL);
+ u32 stat = lpuart32_read(port, UARTSTAT);
+ u32 sfifo = lpuart32_read(port, UARTFIFO);
+ u32 ctrl = lpuart32_read(port, UARTCTRL);
if (sport->dma_tx_in_progress)
return 0;
@@ -884,7 +881,7 @@ static void lpuart_rxint(struct lpuart_port *sport)
{
unsigned int flg, ignored = 0, overrun = 0;
struct tty_port *port = &sport->port.state->port;
- unsigned char rx, sr;
+ u8 rx, sr;
uart_port_lock(&sport->port);
@@ -961,7 +958,7 @@ static void lpuart32_rxint(struct lpuart_port *sport)
{
unsigned int flg, ignored = 0;
struct tty_port *port = &sport->port.state->port;
- unsigned long rx, sr;
+ u32 rx, sr;
bool is_break;
uart_port_lock(&sport->port);
@@ -1039,7 +1036,7 @@ out:
static irqreturn_t lpuart_int(int irq, void *dev_id)
{
struct lpuart_port *sport = dev_id;
- unsigned char sts;
+ u8 sts;
sts = readb(sport->port.membase + UARTSR1);
@@ -1113,7 +1110,7 @@ static void lpuart_copy_rx_to_tty(struct lpuart_port *sport)
int count, copied;
if (lpuart_is_32(sport)) {
- unsigned long sr = lpuart32_read(&sport->port, UARTSTAT);
+ u32 sr = lpuart32_read(&sport->port, UARTSTAT);
if (sr & (UARTSTAT_PE | UARTSTAT_FE)) {
/* Clear the error flags */
@@ -1125,10 +1122,10 @@ static void lpuart_copy_rx_to_tty(struct lpuart_port *sport)
sport->port.icount.frame++;
}
} else {
- unsigned char sr = readb(sport->port.membase + UARTSR1);
+ u8 sr = readb(sport->port.membase + UARTSR1);
if (sr & (UARTSR1_PE | UARTSR1_FE)) {
- unsigned char cr2;
+ u8 cr2;
/* Disable receiver during this operation... */
cr2 = readb(sport->port.membase + UARTCR2);
@@ -1279,7 +1276,7 @@ static void lpuart32_dma_idleint(struct lpuart_port *sport)
static irqreturn_t lpuart32_int(int irq, void *dev_id)
{
struct lpuart_port *sport = dev_id;
- unsigned long sts, rxcount;
+ u32 sts, rxcount;
sts = lpuart32_read(&sport->port, UARTSTAT);
rxcount = lpuart32_read(&sport->port, UARTWATER);
@@ -1411,12 +1408,12 @@ static inline int lpuart_start_rx_dma(struct lpuart_port *sport)
dma_async_issue_pending(chan);
if (lpuart_is_32(sport)) {
- unsigned long temp = lpuart32_read(&sport->port, UARTBAUD);
+ u32 baud = lpuart32_read(&sport->port, UARTBAUD);
- lpuart32_write(&sport->port, temp | UARTBAUD_RDMAE, UARTBAUD);
+ lpuart32_write(&sport->port, baud | UARTBAUD_RDMAE, UARTBAUD);
if (sport->dma_idle_int) {
- unsigned long ctrl = lpuart32_read(&sport->port, UARTCTRL);
+ u32 ctrl = lpuart32_read(&sport->port, UARTCTRL);
lpuart32_write(&sport->port, ctrl | UARTCTRL_ILIE, UARTCTRL);
}
@@ -1449,12 +1446,9 @@ static void lpuart_dma_rx_free(struct uart_port *port)
static int lpuart_config_rs485(struct uart_port *port, struct ktermios *termios,
struct serial_rs485 *rs485)
{
- struct lpuart_port *sport = container_of(port,
- struct lpuart_port, port);
-
- u8 modem = readb(sport->port.membase + UARTMODEM) &
+ u8 modem = readb(port->membase + UARTMODEM) &
~(UARTMODEM_TXRTSPOL | UARTMODEM_TXRTSE);
- writeb(modem, sport->port.membase + UARTMODEM);
+ writeb(modem, port->membase + UARTMODEM);
if (rs485->flags & SER_RS485_ENABLED) {
/* Enable auto RS-485 RTS mode */
@@ -1472,19 +1466,29 @@ static int lpuart_config_rs485(struct uart_port *port, struct ktermios *termios,
modem &= ~UARTMODEM_TXRTSPOL;
}
- writeb(modem, sport->port.membase + UARTMODEM);
+ writeb(modem, port->membase + UARTMODEM);
return 0;
}
static int lpuart32_config_rs485(struct uart_port *port, struct ktermios *termios,
struct serial_rs485 *rs485)
{
- struct lpuart_port *sport = container_of(port,
- struct lpuart_port, port);
-
- unsigned long modem = lpuart32_read(&sport->port, UARTMODIR)
+ u32 modem = lpuart32_read(port, UARTMODIR)
& ~(UARTMODIR_TXRTSPOL | UARTMODIR_TXRTSE);
- lpuart32_write(&sport->port, modem, UARTMODIR);
+ u32 ctrl;
+
+ /* TXRTSE and TXRTSPOL only can be changed when transmitter is disabled. */
+ ctrl = lpuart32_read(port, UARTCTRL);
+ if (ctrl & UARTCTRL_TE) {
+ /* wait for the transmit engine to complete */
+ lpuart32_wait_bit_set(port, UARTSTAT, UARTSTAT_TC);
+ lpuart32_write(port, ctrl & ~UARTCTRL_TE, UARTCTRL);
+
+ while (lpuart32_read(port, UARTCTRL) & UARTCTRL_TE)
+ cpu_relax();
+ }
+
+ lpuart32_write(port, modem, UARTMODIR);
if (rs485->flags & SER_RS485_ENABLED) {
/* Enable auto RS-485 RTS mode */
@@ -1502,17 +1506,21 @@ static int lpuart32_config_rs485(struct uart_port *port, struct ktermios *termio
modem &= ~UARTMODIR_TXRTSPOL;
}
- lpuart32_write(&sport->port, modem, UARTMODIR);
+ lpuart32_write(port, modem, UARTMODIR);
+
+ if (ctrl & UARTCTRL_TE)
+ lpuart32_write(port, ctrl, UARTCTRL);
+
return 0;
}
static unsigned int lpuart_get_mctrl(struct uart_port *port)
{
unsigned int mctrl = 0;
- u8 reg;
+ u8 cr1;
- reg = readb(port->membase + UARTCR1);
- if (reg & UARTCR1_LOOPS)
+ cr1 = readb(port->membase + UARTCR1);
+ if (cr1 & UARTCR1_LOOPS)
mctrl |= TIOCM_LOOP;
return mctrl;
@@ -1521,10 +1529,10 @@ static unsigned int lpuart_get_mctrl(struct uart_port *port)
static unsigned int lpuart32_get_mctrl(struct uart_port *port)
{
unsigned int mctrl = TIOCM_CAR | TIOCM_DSR | TIOCM_CTS;
- u32 reg;
+ u32 ctrl;
- reg = lpuart32_read(port, UARTCTRL);
- if (reg & UARTCTRL_LOOPS)
+ ctrl = lpuart32_read(port, UARTCTRL);
+ if (ctrl & UARTCTRL_LOOPS)
mctrl |= TIOCM_LOOP;
return mctrl;
@@ -1532,49 +1540,49 @@ static unsigned int lpuart32_get_mctrl(struct uart_port *port)
static void lpuart_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
- u8 reg;
+ u8 cr1;
- reg = readb(port->membase + UARTCR1);
+ cr1 = readb(port->membase + UARTCR1);
/* for internal loopback we need LOOPS=1 and RSRC=0 */
- reg &= ~(UARTCR1_LOOPS | UARTCR1_RSRC);
+ cr1 &= ~(UARTCR1_LOOPS | UARTCR1_RSRC);
if (mctrl & TIOCM_LOOP)
- reg |= UARTCR1_LOOPS;
+ cr1 |= UARTCR1_LOOPS;
- writeb(reg, port->membase + UARTCR1);
+ writeb(cr1, port->membase + UARTCR1);
}
static void lpuart32_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
- u32 reg;
+ u32 ctrl;
- reg = lpuart32_read(port, UARTCTRL);
+ ctrl = lpuart32_read(port, UARTCTRL);
/* for internal loopback we need LOOPS=1 and RSRC=0 */
- reg &= ~(UARTCTRL_LOOPS | UARTCTRL_RSRC);
+ ctrl &= ~(UARTCTRL_LOOPS | UARTCTRL_RSRC);
if (mctrl & TIOCM_LOOP)
- reg |= UARTCTRL_LOOPS;
+ ctrl |= UARTCTRL_LOOPS;
- lpuart32_write(port, reg, UARTCTRL);
+ lpuart32_write(port, ctrl, UARTCTRL);
}
static void lpuart_break_ctl(struct uart_port *port, int break_state)
{
- unsigned char temp;
+ u8 cr2;
- temp = readb(port->membase + UARTCR2) & ~UARTCR2_SBK;
+ cr2 = readb(port->membase + UARTCR2) & ~UARTCR2_SBK;
if (break_state != 0)
- temp |= UARTCR2_SBK;
+ cr2 |= UARTCR2_SBK;
- writeb(temp, port->membase + UARTCR2);
+ writeb(cr2, port->membase + UARTCR2);
}
static void lpuart32_break_ctl(struct uart_port *port, int break_state)
{
- unsigned long temp;
+ u32 ctrl;
- temp = lpuart32_read(port, UARTCTRL);
+ ctrl = lpuart32_read(port, UARTCTRL);
/*
* LPUART IP now has two known bugs, one is CTS has higher priority than the
@@ -1591,23 +1599,22 @@ static void lpuart32_break_ctl(struct uart_port *port, int break_state)
* Disable the transmitter to prevent any data from being sent out
* during break, then invert the TX line to send break.
*/
- temp &= ~UARTCTRL_TE;
- lpuart32_write(port, temp, UARTCTRL);
- temp |= UARTCTRL_TXINV;
- lpuart32_write(port, temp, UARTCTRL);
+ ctrl &= ~UARTCTRL_TE;
+ lpuart32_write(port, ctrl, UARTCTRL);
+ ctrl |= UARTCTRL_TXINV;
+ lpuart32_write(port, ctrl, UARTCTRL);
} else {
/* Disable the TXINV to turn off break and re-enable transmitter. */
- temp &= ~UARTCTRL_TXINV;
- lpuart32_write(port, temp, UARTCTRL);
- temp |= UARTCTRL_TE;
- lpuart32_write(port, temp, UARTCTRL);
+ ctrl &= ~UARTCTRL_TXINV;
+ lpuart32_write(port, ctrl, UARTCTRL);
+ ctrl |= UARTCTRL_TE;
+ lpuart32_write(port, ctrl, UARTCTRL);
}
}
static void lpuart_setup_watermark(struct lpuart_port *sport)
{
- unsigned char val, cr2;
- unsigned char cr2_saved;
+ u8 fifo, cr2, cr2_saved;
cr2 = readb(sport->port.membase + UARTCR2);
cr2_saved = cr2;
@@ -1615,8 +1622,8 @@ static void lpuart_setup_watermark(struct lpuart_port *sport)
UARTCR2_RIE | UARTCR2_RE);
writeb(cr2, sport->port.membase + UARTCR2);
- val = readb(sport->port.membase + UARTPFIFO);
- writeb(val | UARTPFIFO_TXFE | UARTPFIFO_RXFE,
+ fifo = readb(sport->port.membase + UARTPFIFO);
+ writeb(fifo | UARTPFIFO_TXFE | UARTPFIFO_RXFE,
sport->port.membase + UARTPFIFO);
/* flush Tx and Rx FIFO */
@@ -1640,7 +1647,7 @@ static void lpuart_setup_watermark(struct lpuart_port *sport)
static void lpuart_setup_watermark_enable(struct lpuart_port *sport)
{
- unsigned char cr2;
+ u8 cr2;
lpuart_setup_watermark(sport);
@@ -1651,8 +1658,7 @@ static void lpuart_setup_watermark_enable(struct lpuart_port *sport)
static void lpuart32_setup_watermark(struct lpuart_port *sport)
{
- unsigned long val, ctrl;
- unsigned long ctrl_saved;
+ u32 val, ctrl, ctrl_saved;
ctrl = lpuart32_read(&sport->port, UARTCTRL);
ctrl_saved = ctrl;
@@ -1687,14 +1693,14 @@ static void lpuart32_setup_watermark(struct lpuart_port *sport)
static void lpuart32_setup_watermark_enable(struct lpuart_port *sport)
{
- u32 temp;
+ u32 ctrl;
lpuart32_setup_watermark(sport);
- temp = lpuart32_read(&sport->port, UARTCTRL);
- temp |= UARTCTRL_RE | UARTCTRL_TE;
- temp |= FIELD_PREP(UARTCTRL_IDLECFG, 0x7);
- lpuart32_write(&sport->port, temp, UARTCTRL);
+ ctrl = lpuart32_read(&sport->port, UARTCTRL);
+ ctrl |= UARTCTRL_RE | UARTCTRL_TE;
+ ctrl |= FIELD_PREP(UARTCTRL_IDLECFG, 0x7);
+ lpuart32_write(&sport->port, ctrl, UARTCTRL);
}
static void rx_dma_timer_init(struct lpuart_port *sport)
@@ -1761,7 +1767,7 @@ err:
static void lpuart_rx_dma_startup(struct lpuart_port *sport)
{
int ret;
- unsigned char cr3;
+ u8 cr3;
if (uart_console(&sport->port))
goto err;
@@ -1811,16 +1817,16 @@ static void lpuart_hw_setup(struct lpuart_port *sport)
static int lpuart_startup(struct uart_port *port)
{
struct lpuart_port *sport = container_of(port, struct lpuart_port, port);
- unsigned char temp;
+ u8 fifo;
/* determine FIFO size and enable FIFO mode */
- temp = readb(sport->port.membase + UARTPFIFO);
+ fifo = readb(port->membase + UARTPFIFO);
- sport->txfifo_size = UARTFIFO_DEPTH((temp >> UARTPFIFO_TXSIZE_OFF) &
+ sport->txfifo_size = UARTFIFO_DEPTH((fifo >> UARTPFIFO_TXSIZE_OFF) &
UARTPFIFO_FIFOSIZE_MASK);
- sport->port.fifosize = sport->txfifo_size;
+ port->fifosize = sport->txfifo_size;
- sport->rxfifo_size = UARTFIFO_DEPTH((temp >> UARTPFIFO_RXSIZE_OFF) &
+ sport->rxfifo_size = UARTFIFO_DEPTH((fifo >> UARTPFIFO_RXSIZE_OFF) &
UARTPFIFO_FIFOSIZE_MASK);
lpuart_request_dma(sport);
@@ -1831,24 +1837,24 @@ static int lpuart_startup(struct uart_port *port)
static void lpuart32_hw_disable(struct lpuart_port *sport)
{
- unsigned long temp;
+ u32 ctrl;
- temp = lpuart32_read(&sport->port, UARTCTRL);
- temp &= ~(UARTCTRL_RIE | UARTCTRL_ILIE | UARTCTRL_RE |
+ ctrl = lpuart32_read(&sport->port, UARTCTRL);
+ ctrl &= ~(UARTCTRL_RIE | UARTCTRL_ILIE | UARTCTRL_RE |
UARTCTRL_TIE | UARTCTRL_TE);
- lpuart32_write(&sport->port, temp, UARTCTRL);
+ lpuart32_write(&sport->port, ctrl, UARTCTRL);
}
static void lpuart32_configure(struct lpuart_port *sport)
{
- unsigned long temp;
+ u32 ctrl;
- temp = lpuart32_read(&sport->port, UARTCTRL);
+ ctrl = lpuart32_read(&sport->port, UARTCTRL);
if (!sport->lpuart_dma_rx_use)
- temp |= UARTCTRL_RIE | UARTCTRL_ILIE;
+ ctrl |= UARTCTRL_RIE | UARTCTRL_ILIE;
if (!sport->lpuart_dma_tx_use)
- temp |= UARTCTRL_TIE;
- lpuart32_write(&sport->port, temp, UARTCTRL);
+ ctrl |= UARTCTRL_TIE;
+ lpuart32_write(&sport->port, ctrl, UARTCTRL);
}
static void lpuart32_hw_setup(struct lpuart_port *sport)
@@ -1871,16 +1877,16 @@ static void lpuart32_hw_setup(struct lpuart_port *sport)
static int lpuart32_startup(struct uart_port *port)
{
struct lpuart_port *sport = container_of(port, struct lpuart_port, port);
- unsigned long temp;
+ u32 fifo;
/* determine FIFO size */
- temp = lpuart32_read(&sport->port, UARTFIFO);
+ fifo = lpuart32_read(port, UARTFIFO);
- sport->txfifo_size = UARTFIFO_DEPTH((temp >> UARTFIFO_TXSIZE_OFF) &
+ sport->txfifo_size = UARTFIFO_DEPTH((fifo >> UARTFIFO_TXSIZE_OFF) &
UARTFIFO_FIFOSIZE_MASK);
- sport->port.fifosize = sport->txfifo_size;
+ port->fifosize = sport->txfifo_size;
- sport->rxfifo_size = UARTFIFO_DEPTH((temp >> UARTFIFO_RXSIZE_OFF) &
+ sport->rxfifo_size = UARTFIFO_DEPTH((fifo >> UARTFIFO_RXSIZE_OFF) &
UARTFIFO_FIFOSIZE_MASK);
/*
@@ -1891,7 +1897,7 @@ static int lpuart32_startup(struct uart_port *port)
if (is_layerscape_lpuart(sport)) {
sport->rxfifo_size = 16;
sport->txfifo_size = 16;
- sport->port.fifosize = sport->txfifo_size;
+ port->fifosize = sport->txfifo_size;
}
lpuart_request_dma(sport);
@@ -1925,16 +1931,16 @@ static void lpuart_dma_shutdown(struct lpuart_port *sport)
static void lpuart_shutdown(struct uart_port *port)
{
struct lpuart_port *sport = container_of(port, struct lpuart_port, port);
- unsigned char temp;
+ u8 cr2;
unsigned long flags;
uart_port_lock_irqsave(port, &flags);
/* disable Rx/Tx and interrupts */
- temp = readb(port->membase + UARTCR2);
- temp &= ~(UARTCR2_TE | UARTCR2_RE |
+ cr2 = readb(port->membase + UARTCR2);
+ cr2 &= ~(UARTCR2_TE | UARTCR2_RE |
UARTCR2_TIE | UARTCR2_TCIE | UARTCR2_RIE);
- writeb(temp, port->membase + UARTCR2);
+ writeb(cr2, port->membase + UARTCR2);
uart_port_unlock_irqrestore(port, flags);
@@ -1945,14 +1951,14 @@ static void lpuart32_shutdown(struct uart_port *port)
{
struct lpuart_port *sport =
container_of(port, struct lpuart_port, port);
- unsigned long temp;
+ u32 temp;
unsigned long flags;
uart_port_lock_irqsave(port, &flags);
/* clear status */
- temp = lpuart32_read(&sport->port, UARTSTAT);
- lpuart32_write(&sport->port, temp, UARTSTAT);
+ temp = lpuart32_read(port, UARTSTAT);
+ lpuart32_write(port, temp, UARTSTAT);
/* disable Rx/Tx DMA */
temp = lpuart32_read(port, UARTBAUD);
@@ -1981,17 +1987,17 @@ lpuart_set_termios(struct uart_port *port, struct ktermios *termios,
{
struct lpuart_port *sport = container_of(port, struct lpuart_port, port);
unsigned long flags;
- unsigned char cr1, old_cr1, old_cr2, cr3, cr4, bdh, modem;
+ u8 cr1, old_cr1, old_cr2, cr3, cr4, bdh, modem;
unsigned int baud;
unsigned int old_csize = old ? old->c_cflag & CSIZE : CS8;
unsigned int sbr, brfa;
- cr1 = old_cr1 = readb(sport->port.membase + UARTCR1);
- old_cr2 = readb(sport->port.membase + UARTCR2);
- cr3 = readb(sport->port.membase + UARTCR3);
- cr4 = readb(sport->port.membase + UARTCR4);
- bdh = readb(sport->port.membase + UARTBDH);
- modem = readb(sport->port.membase + UARTMODEM);
+ cr1 = old_cr1 = readb(port->membase + UARTCR1);
+ old_cr2 = readb(port->membase + UARTCR2);
+ cr3 = readb(port->membase + UARTCR3);
+ cr4 = readb(port->membase + UARTCR4);
+ bdh = readb(port->membase + UARTBDH);
+ modem = readb(port->membase + UARTMODEM);
/*
* only support CS8 and CS7, and for CS7 must enable PE.
* supported mode:
@@ -2023,7 +2029,7 @@ lpuart_set_termios(struct uart_port *port, struct ktermios *termios,
* When auto RS-485 RTS mode is enabled,
* hardware flow control need to be disabled.
*/
- if (sport->port.rs485.flags & SER_RS485_ENABLED)
+ if (port->rs485.flags & SER_RS485_ENABLED)
termios->c_cflag &= ~CRTSCTS;
if (termios->c_cflag & CRTSCTS)
@@ -2064,59 +2070,59 @@ lpuart_set_termios(struct uart_port *port, struct ktermios *termios,
* Need to update the Ring buffer length according to the selected
* baud rate and restart Rx DMA path.
*
- * Since timer function acqures sport->port.lock, need to stop before
+ * Since timer function acqures port->lock, need to stop before
* acquring same lock because otherwise del_timer_sync() can deadlock.
*/
if (old && sport->lpuart_dma_rx_use)
- lpuart_dma_rx_free(&sport->port);
+ lpuart_dma_rx_free(port);
- uart_port_lock_irqsave(&sport->port, &flags);
+ uart_port_lock_irqsave(port, &flags);
- sport->port.read_status_mask = 0;
+ port->read_status_mask = 0;
if (termios->c_iflag & INPCK)
- sport->port.read_status_mask |= UARTSR1_FE | UARTSR1_PE;
+ port->read_status_mask |= UARTSR1_FE | UARTSR1_PE;
if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
- sport->port.read_status_mask |= UARTSR1_FE;
+ port->read_status_mask |= UARTSR1_FE;
/* characters to ignore */
- sport->port.ignore_status_mask = 0;
+ port->ignore_status_mask = 0;
if (termios->c_iflag & IGNPAR)
- sport->port.ignore_status_mask |= UARTSR1_PE;
+ port->ignore_status_mask |= UARTSR1_PE;
if (termios->c_iflag & IGNBRK) {
- sport->port.ignore_status_mask |= UARTSR1_FE;
+ port->ignore_status_mask |= UARTSR1_FE;
/*
* if we're ignoring parity and break indicators,
* ignore overruns too (for real raw support).
*/
if (termios->c_iflag & IGNPAR)
- sport->port.ignore_status_mask |= UARTSR1_OR;
+ port->ignore_status_mask |= UARTSR1_OR;
}
/* update the per-port timeout */
uart_update_timeout(port, termios->c_cflag, baud);
/* wait transmit engin complete */
- lpuart_wait_bit_set(&sport->port, UARTSR1, UARTSR1_TC);
+ lpuart_wait_bit_set(port, UARTSR1, UARTSR1_TC);
/* disable transmit and receive */
writeb(old_cr2 & ~(UARTCR2_TE | UARTCR2_RE),
- sport->port.membase + UARTCR2);
+ port->membase + UARTCR2);
- sbr = sport->port.uartclk / (16 * baud);
- brfa = ((sport->port.uartclk - (16 * sbr * baud)) * 2) / baud;
+ sbr = port->uartclk / (16 * baud);
+ brfa = ((port->uartclk - (16 * sbr * baud)) * 2) / baud;
bdh &= ~UARTBDH_SBR_MASK;
bdh |= (sbr >> 8) & 0x1F;
cr4 &= ~UARTCR4_BRFA_MASK;
brfa &= UARTCR4_BRFA_MASK;
- writeb(cr4 | brfa, sport->port.membase + UARTCR4);
- writeb(bdh, sport->port.membase + UARTBDH);
- writeb(sbr & 0xFF, sport->port.membase + UARTBDL);
- writeb(cr3, sport->port.membase + UARTCR3);
- writeb(cr1, sport->port.membase + UARTCR1);
- writeb(modem, sport->port.membase + UARTMODEM);
+ writeb(cr4 | brfa, port->membase + UARTCR4);
+ writeb(bdh, port->membase + UARTBDH);
+ writeb(sbr & 0xFF, port->membase + UARTBDL);
+ writeb(cr3, port->membase + UARTCR3);
+ writeb(cr1, port->membase + UARTCR1);
+ writeb(modem, port->membase + UARTMODEM);
/* restore control register */
- writeb(old_cr2, sport->port.membase + UARTCR2);
+ writeb(old_cr2, port->membase + UARTCR2);
if (old && sport->lpuart_dma_rx_use) {
if (!lpuart_start_rx_dma(sport))
@@ -2125,14 +2131,14 @@ lpuart_set_termios(struct uart_port *port, struct ktermios *termios,
sport->lpuart_dma_rx_use = false;
}
- uart_port_unlock_irqrestore(&sport->port, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static void __lpuart32_serial_setbrg(struct uart_port *port,
unsigned int baudrate, bool use_rx_dma,
bool use_tx_dma)
{
- u32 sbr, osr, baud_diff, tmp_osr, tmp_sbr, tmp_diff, tmp;
+ u32 sbr, osr, baud_diff, tmp_osr, tmp_sbr, tmp_diff, baud;
u32 clk = port->uartclk;
/*
@@ -2161,9 +2167,9 @@ static void __lpuart32_serial_setbrg(struct uart_port *port,
tmp_diff = clk / (tmp_osr * tmp_sbr) - baudrate;
/* select best values between sbr and sbr+1 */
- tmp = clk / (tmp_osr * (tmp_sbr + 1));
- if (tmp_diff > (baudrate - tmp)) {
- tmp_diff = baudrate - tmp;
+ baud = clk / (tmp_osr * (tmp_sbr + 1));
+ if (tmp_diff > (baudrate - baud)) {
+ tmp_diff = baudrate - baud;
tmp_sbr++;
}
@@ -2185,23 +2191,23 @@ static void __lpuart32_serial_setbrg(struct uart_port *port,
dev_warn(port->dev,
"unacceptable baud rate difference of more than 3%%\n");
- tmp = lpuart32_read(port, UARTBAUD);
+ baud = lpuart32_read(port, UARTBAUD);
if ((osr > 3) && (osr < 8))
- tmp |= UARTBAUD_BOTHEDGE;
+ baud |= UARTBAUD_BOTHEDGE;
- tmp &= ~(UARTBAUD_OSR_MASK << UARTBAUD_OSR_SHIFT);
- tmp |= ((osr-1) & UARTBAUD_OSR_MASK) << UARTBAUD_OSR_SHIFT;
+ baud &= ~(UARTBAUD_OSR_MASK << UARTBAUD_OSR_SHIFT);
+ baud |= ((osr-1) & UARTBAUD_OSR_MASK) << UARTBAUD_OSR_SHIFT;
- tmp &= ~UARTBAUD_SBR_MASK;
- tmp |= sbr & UARTBAUD_SBR_MASK;
+ baud &= ~UARTBAUD_SBR_MASK;
+ baud |= sbr & UARTBAUD_SBR_MASK;
if (!use_rx_dma)
- tmp &= ~UARTBAUD_RDMAE;
+ baud &= ~UARTBAUD_RDMAE;
if (!use_tx_dma)
- tmp &= ~UARTBAUD_TDMAE;
+ baud &= ~UARTBAUD_TDMAE;
- lpuart32_write(port, tmp, UARTBAUD);
+ lpuart32_write(port, baud, UARTBAUD);
}
static void lpuart32_serial_setbrg(struct lpuart_port *sport,
@@ -2219,13 +2225,13 @@ lpuart32_set_termios(struct uart_port *port, struct ktermios *termios,
{
struct lpuart_port *sport = container_of(port, struct lpuart_port, port);
unsigned long flags;
- unsigned long ctrl, old_ctrl, bd, modem;
+ u32 ctrl, old_ctrl, bd, modem;
unsigned int baud;
unsigned int old_csize = old ? old->c_cflag & CSIZE : CS8;
- ctrl = old_ctrl = lpuart32_read(&sport->port, UARTCTRL);
- bd = lpuart32_read(&sport->port, UARTBAUD);
- modem = lpuart32_read(&sport->port, UARTMODIR);
+ ctrl = old_ctrl = lpuart32_read(port, UARTCTRL);
+ bd = lpuart32_read(port, UARTBAUD);
+ modem = lpuart32_read(port, UARTMODIR);
sport->is_cs7 = false;
/*
* only support CS8 and CS7
@@ -2259,7 +2265,7 @@ lpuart32_set_termios(struct uart_port *port, struct ktermios *termios,
* When auto RS-485 RTS mode is enabled,
* hardware flow control need to be disabled.
*/
- if (sport->port.rs485.flags & SER_RS485_ENABLED)
+ if (port->rs485.flags & SER_RS485_ENABLED)
termios->c_cflag &= ~CRTSCTS;
if (termios->c_cflag & CRTSCTS)
@@ -2309,59 +2315,61 @@ lpuart32_set_termios(struct uart_port *port, struct ktermios *termios,
* Need to update the Ring buffer length according to the selected
* baud rate and restart Rx DMA path.
*
- * Since timer function acqures sport->port.lock, need to stop before
+ * Since timer function acqures port->lock, need to stop before
* acquring same lock because otherwise del_timer_sync() can deadlock.
*/
if (old && sport->lpuart_dma_rx_use)
- lpuart_dma_rx_free(&sport->port);
+ lpuart_dma_rx_free(port);
- uart_port_lock_irqsave(&sport->port, &flags);
+ uart_port_lock_irqsave(port, &flags);
- sport->port.read_status_mask = 0;
+ port->read_status_mask = 0;
if (termios->c_iflag & INPCK)
- sport->port.read_status_mask |= UARTSTAT_FE | UARTSTAT_PE;
+ port->read_status_mask |= UARTSTAT_FE | UARTSTAT_PE;
if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
- sport->port.read_status_mask |= UARTSTAT_FE;
+ port->read_status_mask |= UARTSTAT_FE;
/* characters to ignore */
- sport->port.ignore_status_mask = 0;
+ port->ignore_status_mask = 0;
if (termios->c_iflag & IGNPAR)
- sport->port.ignore_status_mask |= UARTSTAT_PE;
+ port->ignore_status_mask |= UARTSTAT_PE;
if (termios->c_iflag & IGNBRK) {
- sport->port.ignore_status_mask |= UARTSTAT_FE;
+ port->ignore_status_mask |= UARTSTAT_FE;
/*
* if we're ignoring parity and break indicators,
* ignore overruns too (for real raw support).
*/
if (termios->c_iflag & IGNPAR)
- sport->port.ignore_status_mask |= UARTSTAT_OR;
+ port->ignore_status_mask |= UARTSTAT_OR;
}
/* update the per-port timeout */
uart_update_timeout(port, termios->c_cflag, baud);
/*
+ * disable CTS to ensure the transmit engine is not blocked by the flow
+ * control when there is dirty data in TX FIFO
+ */
+ lpuart32_write(port, modem & ~UARTMODIR_TXCTSE, UARTMODIR);
+
+ /*
* LPUART Transmission Complete Flag may never be set while queuing a break
* character, so skip waiting for transmission complete when UARTCTRL_SBK is
* asserted.
*/
- if (!(old_ctrl & UARTCTRL_SBK)) {
- lpuart32_write(&sport->port, 0, UARTMODIR);
- lpuart32_wait_bit_set(&sport->port, UARTSTAT, UARTSTAT_TC);
- }
+ if (!(old_ctrl & UARTCTRL_SBK))
+ lpuart32_wait_bit_set(port, UARTSTAT, UARTSTAT_TC);
/* disable transmit and receive */
- lpuart32_write(&sport->port, old_ctrl & ~(UARTCTRL_TE | UARTCTRL_RE),
+ lpuart32_write(port, old_ctrl & ~(UARTCTRL_TE | UARTCTRL_RE),
UARTCTRL);
- lpuart32_write(&sport->port, bd, UARTBAUD);
+ lpuart32_write(port, bd, UARTBAUD);
lpuart32_serial_setbrg(sport, baud);
- /* disable CTS before enabling UARTCTRL_TE to avoid pending idle preamble */
- lpuart32_write(&sport->port, modem & ~UARTMODIR_TXCTSE, UARTMODIR);
/* restore control register */
- lpuart32_write(&sport->port, ctrl, UARTCTRL);
+ lpuart32_write(port, ctrl, UARTCTRL);
/* re-enable the CTS if needed */
- lpuart32_write(&sport->port, modem, UARTMODIR);
+ lpuart32_write(port, modem, UARTMODIR);
if ((ctrl & (UARTCTRL_PE | UARTCTRL_M)) == UARTCTRL_PE)
sport->is_cs7 = true;
@@ -2373,7 +2381,7 @@ lpuart32_set_termios(struct uart_port *port, struct ktermios *termios,
sport->lpuart_dma_rx_use = false;
}
- uart_port_unlock_irqrestore(&sport->port, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static const char *lpuart_type(struct uart_port *port)
@@ -2486,7 +2494,7 @@ static void
lpuart_console_write(struct console *co, const char *s, unsigned int count)
{
struct lpuart_port *sport = lpuart_ports[co->index];
- unsigned char old_cr2, cr2;
+ u8 old_cr2, cr2;
unsigned long flags;
int locked = 1;
@@ -2516,7 +2524,7 @@ static void
lpuart32_console_write(struct console *co, const char *s, unsigned int count)
{
struct lpuart_port *sport = lpuart_ports[co->index];
- unsigned long old_cr, cr;
+ u32 old_cr, cr;
unsigned long flags;
int locked = 1;
@@ -2550,7 +2558,7 @@ static void __init
lpuart_console_get_options(struct lpuart_port *sport, int *baud,
int *parity, int *bits)
{
- unsigned char cr, bdh, bdl, brfa;
+ u8 cr, bdh, bdl, brfa;
unsigned int sbr, uartclk, baud_raw;
cr = readb(sport->port.membase + UARTCR2);
@@ -2599,7 +2607,7 @@ static void __init
lpuart32_console_get_options(struct lpuart_port *sport, int *baud,
int *parity, int *bits)
{
- unsigned long cr, bd;
+ u32 cr, bd;
unsigned int sbr, uartclk, baud_raw;
cr = lpuart32_read(&sport->port, UARTCTRL);
@@ -2805,13 +2813,13 @@ static int lpuart_global_reset(struct lpuart_port *sport)
{
struct uart_port *port = &sport->port;
void __iomem *global_addr;
- unsigned long ctrl, bd;
+ u32 ctrl, bd;
unsigned int val = 0;
int ret;
ret = clk_prepare_enable(sport->ipg_clk);
if (ret) {
- dev_err(sport->port.dev, "failed to enable uart ipg clk: %d\n", ret);
+ dev_err(port->dev, "failed to enable uart ipg clk: %d\n", ret);
return ret;
}
@@ -2822,10 +2830,10 @@ static int lpuart_global_reset(struct lpuart_port *sport)
*/
ctrl = lpuart32_read(port, UARTCTRL);
if (ctrl & UARTCTRL_TE) {
- bd = lpuart32_read(&sport->port, UARTBAUD);
+ bd = lpuart32_read(port, UARTBAUD);
if (read_poll_timeout(lpuart32_tx_empty, val, val, 1, 100000, false,
port)) {
- dev_warn(sport->port.dev,
+ dev_warn(port->dev,
"timeout waiting for transmit engine to complete\n");
clk_disable_unprepare(sport->ipg_clk);
return 0;
@@ -2954,7 +2962,7 @@ static int lpuart_probe(struct platform_device *pdev)
goto failed_attach_port;
ret = devm_request_irq(&pdev->dev, sport->port.irq, handler, 0,
- DRIVER_NAME, sport);
+ dev_name(&pdev->dev), sport);
if (ret)
goto failed_irq_request;
@@ -3011,7 +3019,7 @@ static int lpuart_runtime_resume(struct device *dev)
static void serial_lpuart_enable_wakeup(struct lpuart_port *sport, bool on)
{
- unsigned int val, baud;
+ u32 val, baud;
if (lpuart_is_32(sport)) {
val = lpuart32_read(&sport->port, UARTCTRL);
@@ -3076,7 +3084,7 @@ static int lpuart_suspend_noirq(struct device *dev)
static int lpuart_resume_noirq(struct device *dev)
{
struct lpuart_port *sport = dev_get_drvdata(dev);
- unsigned int val;
+ u32 stat;
pinctrl_pm_select_default_state(dev);
@@ -3085,8 +3093,8 @@ static int lpuart_resume_noirq(struct device *dev)
/* clear the wakeup flags */
if (lpuart_is_32(sport)) {
- val = lpuart32_read(&sport->port, UARTSTAT);
- lpuart32_write(&sport->port, val, UARTSTAT);
+ stat = lpuart32_read(&sport->port, UARTSTAT);
+ lpuart32_write(&sport->port, stat, UARTSTAT);
}
}
@@ -3096,7 +3104,8 @@ static int lpuart_resume_noirq(struct device *dev)
static int lpuart_suspend(struct device *dev)
{
struct lpuart_port *sport = dev_get_drvdata(dev);
- unsigned long temp, flags;
+ u32 temp;
+ unsigned long flags;
uart_suspend_port(&lpuart_reg, &sport->port);
@@ -3176,7 +3185,7 @@ static void lpuart_console_fixup(struct lpuart_port *sport)
* in VLLS mode, or restore console setting here.
*/
if (is_imx7ulp_lpuart(sport) && lpuart_uport_is_active(sport) &&
- console_suspend_enabled && uart_console(&sport->port)) {
+ console_suspend_enabled && uart_console(uport)) {
mutex_lock(&port->mutex);
memset(&termios, 0, sizeof(struct ktermios));
diff --git a/drivers/tty/serial/icom.c b/drivers/tty/serial/icom.c
index 29e42831df39..7fb995a8490e 100644
--- a/drivers/tty/serial/icom.c
+++ b/drivers/tty/serial/icom.c
@@ -1764,11 +1764,10 @@ static int icom_probe(struct pci_dev *dev,
goto probe_exit1;
}
- /* save off irq and request irq line */
- retval = request_irq(dev->irq, icom_interrupt, IRQF_SHARED, ICOM_DRIVER_NAME, (void *)icom_adapter);
- if (retval) {
- goto probe_exit2;
- }
+ /* save off irq and request irq line */
+ retval = request_irq(dev->irq, icom_interrupt, IRQF_SHARED, ICOM_DRIVER_NAME, icom_adapter);
+ if (retval)
+ goto probe_exit2;
retval = icom_load_ports(icom_adapter);
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index 9a1afe409b98..19c819705bf9 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -1608,7 +1608,7 @@ static void imx_uart_shutdown(struct uart_port *port)
imx_uart_dma_exit(sport);
}
- mctrl_gpio_disable_ms(sport->gpios);
+ mctrl_gpio_disable_ms_sync(sport->gpios);
uart_port_lock_irqsave(&sport->port, &flags);
ucr2 = imx_uart_readl(sport, UCR2);
diff --git a/drivers/tty/serial/kgdb_nmi.c b/drivers/tty/serial/kgdb_nmi.c
deleted file mode 100644
index 2833708e369f..000000000000
--- a/drivers/tty/serial/kgdb_nmi.c
+++ /dev/null
@@ -1,280 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * KGDB NMI serial console
- *
- * Copyright 2010 Google, Inc.
- * Arve Hjønnevåg <arve@android.com>
- * Colin Cross <ccross@android.com>
- * Copyright 2012 Linaro Ltd.
- * Anton Vorontsov <anton.vorontsov@linaro.org>
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/compiler.h>
-#include <linux/slab.h>
-#include <linux/errno.h>
-#include <linux/atomic.h>
-#include <linux/console.h>
-#include <linux/tty.h>
-#include <linux/tty_driver.h>
-#include <linux/tty_flip.h>
-#include <linux/serial_core.h>
-#include <linux/interrupt.h>
-#include <linux/hrtimer.h>
-#include <linux/tick.h>
-#include <linux/kfifo.h>
-#include <linux/kgdb.h>
-#include <linux/kdb.h>
-
-static atomic_t kgdb_nmi_num_readers = ATOMIC_INIT(0);
-
-static int kgdb_nmi_console_setup(struct console *co, char *options)
-{
- arch_kgdb_ops.enable_nmi(1);
-
- /* The NMI console uses the dbg_io_ops to issue console messages. To
- * avoid duplicate messages during kdb sessions we must inform kdb's
- * I/O utilities that messages sent to the console will automatically
- * be displayed on the dbg_io.
- */
- dbg_io_ops->cons = co;
-
- return 0;
-}
-
-static void kgdb_nmi_console_write(struct console *co, const char *s, uint c)
-{
- int i;
-
- for (i = 0; i < c; i++)
- dbg_io_ops->write_char(s[i]);
-}
-
-static struct tty_driver *kgdb_nmi_tty_driver;
-
-static struct tty_driver *kgdb_nmi_console_device(struct console *co, int *idx)
-{
- *idx = co->index;
- return kgdb_nmi_tty_driver;
-}
-
-static struct console kgdb_nmi_console = {
- .name = "ttyNMI",
- .setup = kgdb_nmi_console_setup,
- .write = kgdb_nmi_console_write,
- .device = kgdb_nmi_console_device,
- .flags = CON_PRINTBUFFER | CON_ANYTIME,
- .index = -1,
-};
-
-/*
- * This is usually the maximum rate on debug ports. We make fifo large enough
- * to make copy-pasting to the terminal usable.
- */
-#define KGDB_NMI_BAUD 115200
-#define KGDB_NMI_FIFO_SIZE roundup_pow_of_two(KGDB_NMI_BAUD / 8 / HZ)
-
-struct kgdb_nmi_tty_priv {
- struct tty_port port;
- struct timer_list timer;
- STRUCT_KFIFO(char, KGDB_NMI_FIFO_SIZE) fifo;
-};
-
-static struct tty_port *kgdb_nmi_port;
-
-/*
- * The tasklet is cheap, it does not cause wakeups when reschedules itself,
- * instead it waits for the next tick.
- */
-static void kgdb_nmi_tty_receiver(struct timer_list *t)
-{
- struct kgdb_nmi_tty_priv *priv = from_timer(priv, t, timer);
- char ch;
-
- priv->timer.expires = jiffies + (HZ/100);
- add_timer(&priv->timer);
-
- if (likely(!atomic_read(&kgdb_nmi_num_readers) ||
- !kfifo_len(&priv->fifo)))
- return;
-
- while (kfifo_out(&priv->fifo, &ch, 1))
- tty_insert_flip_char(&priv->port, ch, TTY_NORMAL);
- tty_flip_buffer_push(&priv->port);
-}
-
-static int kgdb_nmi_tty_activate(struct tty_port *port, struct tty_struct *tty)
-{
- struct kgdb_nmi_tty_priv *priv =
- container_of(port, struct kgdb_nmi_tty_priv, port);
-
- kgdb_nmi_port = port;
- priv->timer.expires = jiffies + (HZ/100);
- add_timer(&priv->timer);
-
- return 0;
-}
-
-static void kgdb_nmi_tty_shutdown(struct tty_port *port)
-{
- struct kgdb_nmi_tty_priv *priv =
- container_of(port, struct kgdb_nmi_tty_priv, port);
-
- del_timer(&priv->timer);
- kgdb_nmi_port = NULL;
-}
-
-static const struct tty_port_operations kgdb_nmi_tty_port_ops = {
- .activate = kgdb_nmi_tty_activate,
- .shutdown = kgdb_nmi_tty_shutdown,
-};
-
-static int kgdb_nmi_tty_install(struct tty_driver *drv, struct tty_struct *tty)
-{
- struct kgdb_nmi_tty_priv *priv;
- int ret;
-
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
-
- INIT_KFIFO(priv->fifo);
- timer_setup(&priv->timer, kgdb_nmi_tty_receiver, 0);
- tty_port_init(&priv->port);
- priv->port.ops = &kgdb_nmi_tty_port_ops;
- tty->driver_data = priv;
-
- ret = tty_port_install(&priv->port, drv, tty);
- if (ret) {
- pr_err("%s: can't install tty port: %d\n", __func__, ret);
- goto err;
- }
- return 0;
-err:
- tty_port_destroy(&priv->port);
- kfree(priv);
- return ret;
-}
-
-static void kgdb_nmi_tty_cleanup(struct tty_struct *tty)
-{
- struct kgdb_nmi_tty_priv *priv = tty->driver_data;
-
- tty->driver_data = NULL;
- tty_port_destroy(&priv->port);
- kfree(priv);
-}
-
-static int kgdb_nmi_tty_open(struct tty_struct *tty, struct file *file)
-{
- struct kgdb_nmi_tty_priv *priv = tty->driver_data;
- unsigned int mode = file->f_flags & O_ACCMODE;
- int ret;
-
- ret = tty_port_open(&priv->port, tty, file);
- if (!ret && (mode == O_RDONLY || mode == O_RDWR))
- atomic_inc(&kgdb_nmi_num_readers);
-
- return ret;
-}
-
-static void kgdb_nmi_tty_close(struct tty_struct *tty, struct file *file)
-{
- struct kgdb_nmi_tty_priv *priv = tty->driver_data;
- unsigned int mode = file->f_flags & O_ACCMODE;
-
- if (mode == O_RDONLY || mode == O_RDWR)
- atomic_dec(&kgdb_nmi_num_readers);
-
- tty_port_close(&priv->port, tty, file);
-}
-
-static void kgdb_nmi_tty_hangup(struct tty_struct *tty)
-{
- struct kgdb_nmi_tty_priv *priv = tty->driver_data;
-
- tty_port_hangup(&priv->port);
-}
-
-static unsigned int kgdb_nmi_tty_write_room(struct tty_struct *tty)
-{
- /* Actually, we can handle any amount as we use polled writes. */
- return 2048;
-}
-
-static ssize_t kgdb_nmi_tty_write(struct tty_struct *tty, const u8 *buf,
- size_t c)
-{
- int i;
-
- for (i = 0; i < c; i++)
- dbg_io_ops->write_char(buf[i]);
- return c;
-}
-
-static const struct tty_operations kgdb_nmi_tty_ops = {
- .open = kgdb_nmi_tty_open,
- .close = kgdb_nmi_tty_close,
- .install = kgdb_nmi_tty_install,
- .cleanup = kgdb_nmi_tty_cleanup,
- .hangup = kgdb_nmi_tty_hangup,
- .write_room = kgdb_nmi_tty_write_room,
- .write = kgdb_nmi_tty_write,
-};
-
-int kgdb_register_nmi_console(void)
-{
- int ret;
-
- if (!arch_kgdb_ops.enable_nmi)
- return 0;
-
- kgdb_nmi_tty_driver = tty_alloc_driver(1, TTY_DRIVER_REAL_RAW);
- if (IS_ERR(kgdb_nmi_tty_driver)) {
- pr_err("%s: cannot allocate tty\n", __func__);
- return PTR_ERR(kgdb_nmi_tty_driver);
- }
- kgdb_nmi_tty_driver->driver_name = "ttyNMI";
- kgdb_nmi_tty_driver->name = "ttyNMI";
- kgdb_nmi_tty_driver->num = 1;
- kgdb_nmi_tty_driver->type = TTY_DRIVER_TYPE_SERIAL;
- kgdb_nmi_tty_driver->subtype = SERIAL_TYPE_NORMAL;
- kgdb_nmi_tty_driver->init_termios = tty_std_termios;
- tty_termios_encode_baud_rate(&kgdb_nmi_tty_driver->init_termios,
- KGDB_NMI_BAUD, KGDB_NMI_BAUD);
- tty_set_operations(kgdb_nmi_tty_driver, &kgdb_nmi_tty_ops);
-
- ret = tty_register_driver(kgdb_nmi_tty_driver);
- if (ret) {
- pr_err("%s: can't register tty driver: %d\n", __func__, ret);
- goto err_drv_reg;
- }
-
- register_console(&kgdb_nmi_console);
-
- return 0;
-err_drv_reg:
- tty_driver_kref_put(kgdb_nmi_tty_driver);
- return ret;
-}
-EXPORT_SYMBOL_GPL(kgdb_register_nmi_console);
-
-int kgdb_unregister_nmi_console(void)
-{
- int ret;
-
- if (!arch_kgdb_ops.enable_nmi)
- return 0;
- arch_kgdb_ops.enable_nmi(0);
-
- ret = unregister_console(&kgdb_nmi_console);
- if (ret)
- return ret;
-
- tty_unregister_driver(kgdb_nmi_tty_driver);
- tty_driver_kref_put(kgdb_nmi_tty_driver);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(kgdb_unregister_nmi_console);
diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
index 58ea1e1391ce..85f6c5a76e0f 100644
--- a/drivers/tty/serial/kgdboc.c
+++ b/drivers/tty/serial/kgdboc.c
@@ -186,8 +186,6 @@ static void cleanup_kgdboc(void)
if (configured != 1)
return;
- if (kgdb_unregister_nmi_console())
- return;
kgdboc_unregister_kbd();
kgdb_unregister_io_module(&kgdboc_io_ops);
}
@@ -250,16 +248,10 @@ do_register:
if (err)
goto noconfig;
- err = kgdb_register_nmi_console();
- if (err)
- goto nmi_con_failed;
-
configured = 1;
return 0;
-nmi_con_failed:
- kgdb_unregister_io_module(&kgdboc_io_ops);
noconfig:
kgdboc_unregister_kbd();
configured = 0;
diff --git a/drivers/tty/serial/ma35d1_serial.c b/drivers/tty/serial/ma35d1_serial.c
index 8dcad52eedfd..285b0fe41a86 100644
--- a/drivers/tty/serial/ma35d1_serial.c
+++ b/drivers/tty/serial/ma35d1_serial.c
@@ -799,7 +799,7 @@ static struct platform_driver ma35d1serial_driver = {
.resume = ma35d1serial_resume,
.driver = {
.name = "ma35d1-uart",
- .of_match_table = of_match_ptr(ma35d1_serial_of_match),
+ .of_match_table = ma35d1_serial_of_match,
},
};
diff --git a/drivers/tty/serial/mpc52xx_uart.c b/drivers/tty/serial/mpc52xx_uart.c
index 2204cc3e3b07..37eb701b0b46 100644
--- a/drivers/tty/serial/mpc52xx_uart.c
+++ b/drivers/tty/serial/mpc52xx_uart.c
@@ -1351,7 +1351,6 @@ static const struct uart_ops mpc52xx_uart_ops = {
.startup = mpc52xx_uart_startup,
.shutdown = mpc52xx_uart_shutdown,
.set_termios = mpc52xx_uart_set_termios,
-/* .pm = mpc52xx_uart_pm, Not supported yet */
.type = mpc52xx_uart_type,
.release_port = mpc52xx_uart_release_port,
.request_port = mpc52xx_uart_request_port,
diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c
index c7cee5fee603..508e8c6f01d4 100644
--- a/drivers/tty/serial/pch_uart.c
+++ b/drivers/tty/serial/pch_uart.c
@@ -1515,7 +1515,6 @@ static const struct uart_ops pch_uart_ops = {
.startup = pch_uart_startup,
.shutdown = pch_uart_shutdown,
.set_termios = pch_uart_set_termios,
-/* .pm = pch_uart_pm, Not supported yet */
.type = pch_uart_type,
.release_port = pch_uart_release_port,
.request_port = pch_uart_request_port,
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index d46650e578e5..88669972d9a0 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -895,8 +895,8 @@ static int uart_set_info(struct tty_struct *tty, struct tty_port *port,
{
struct uart_port *uport = uart_port_check(state);
unsigned long new_port;
- unsigned int change_irq, change_port, closing_wait;
- unsigned int old_custom_divisor, close_delay;
+ unsigned int old_custom_divisor, close_delay, closing_wait;
+ bool change_irq, change_port;
upf_t old_flags, new_flags;
int retval;
@@ -2013,9 +2013,8 @@ static const char *uart_type(struct uart_port *port)
#ifdef CONFIG_PROC_FS
-static void uart_line_info(struct seq_file *m, struct uart_driver *drv, int i)
+static void uart_line_info(struct seq_file *m, struct uart_state *state)
{
- struct uart_state *state = drv->state + i;
struct tty_port *port = &state->port;
enum uart_pm_state pm_state;
struct uart_port *uport;
@@ -2100,7 +2099,7 @@ static int uart_proc_show(struct seq_file *m, void *v)
seq_printf(m, "serinfo:1.0 driver%s%s revision:%s\n", "", "", "");
for (i = 0; i < drv->nr; i++)
- uart_line_info(m, drv, i);
+ uart_line_info(m, drv->state + i);
return 0;
}
#endif
@@ -3156,7 +3155,6 @@ static int serial_core_add_one_port(struct uart_driver *drv, struct uart_port *u
if (uport->cons && uport->dev)
of_console_check(uport->dev->of_node, uport->cons->name, uport->line);
- tty_port_link_device(port, drv->tty_driver, uport->line);
uart_configure_port(drv, state, uport);
port->console = uart_console(uport);
diff --git a/drivers/tty/serial/serial_mctrl_gpio.c b/drivers/tty/serial/serial_mctrl_gpio.c
index 8855688a5b6c..7b02c5ca4afd 100644
--- a/drivers/tty/serial/serial_mctrl_gpio.c
+++ b/drivers/tty/serial/serial_mctrl_gpio.c
@@ -217,7 +217,7 @@ static irqreturn_t mctrl_gpio_irq_handle(int irq, void *context)
*
* This will get the {cts,rts,...}-gpios from device tree if they are present
* and request them, set direction etc, and return an allocated structure.
- * `devm_*` functions are used, so there's no need to call mctrl_gpio_free().
+ * `devm_*` functions are used, so there's no need to explicitly free.
* As this sets up the irq handling, make sure to not handle changes to the
* gpio input lines in your driver, too.
*/
@@ -268,32 +268,6 @@ struct mctrl_gpios *mctrl_gpio_init(struct uart_port *port, unsigned int idx)
EXPORT_SYMBOL_GPL(mctrl_gpio_init);
/**
- * mctrl_gpio_free - explicitly free uart gpios
- * @dev: uart port's device
- * @gpios: gpios structure to be freed
- *
- * This will free the requested gpios in mctrl_gpio_init(). As `devm_*`
- * functions are used, there's generally no need to call this function.
- */
-void mctrl_gpio_free(struct device *dev, struct mctrl_gpios *gpios)
-{
- enum mctrl_gpio_idx i;
-
- if (gpios == NULL)
- return;
-
- for (i = 0; i < UART_GPIO_MAX; i++) {
- if (gpios->irq[i])
- devm_free_irq(gpios->port->dev, gpios->irq[i], gpios);
-
- if (gpios->gpio[i])
- devm_gpiod_put(dev, gpios->gpio[i]);
- }
- devm_kfree(dev, gpios);
-}
-EXPORT_SYMBOL_GPL(mctrl_gpio_free);
-
-/**
* mctrl_gpio_enable_ms - enable irqs and handling of changes to the ms lines
* @gpios: gpios to enable
*/
@@ -322,11 +296,7 @@ void mctrl_gpio_enable_ms(struct mctrl_gpios *gpios)
}
EXPORT_SYMBOL_GPL(mctrl_gpio_enable_ms);
-/**
- * mctrl_gpio_disable_ms - disable irqs and handling of changes to the ms lines
- * @gpios: gpios to disable
- */
-void mctrl_gpio_disable_ms(struct mctrl_gpios *gpios)
+static void mctrl_gpio_disable_ms(struct mctrl_gpios *gpios, bool sync)
{
enum mctrl_gpio_idx i;
@@ -342,10 +312,34 @@ void mctrl_gpio_disable_ms(struct mctrl_gpios *gpios)
if (!gpios->irq[i])
continue;
- disable_irq(gpios->irq[i]);
+ if (sync)
+ disable_irq(gpios->irq[i]);
+ else
+ disable_irq_nosync(gpios->irq[i]);
}
}
-EXPORT_SYMBOL_GPL(mctrl_gpio_disable_ms);
+
+/**
+ * mctrl_gpio_disable_ms_sync - disable irqs and handling of changes to the ms
+ * lines, and wait for any pending IRQ to be processed
+ * @gpios: gpios to disable
+ */
+void mctrl_gpio_disable_ms_sync(struct mctrl_gpios *gpios)
+{
+ mctrl_gpio_disable_ms(gpios, true);
+}
+EXPORT_SYMBOL_GPL(mctrl_gpio_disable_ms_sync);
+
+/**
+ * mctrl_gpio_disable_ms_no_sync - disable irqs and handling of changes to the
+ * ms lines, and return immediately
+ * @gpios: gpios to disable
+ */
+void mctrl_gpio_disable_ms_no_sync(struct mctrl_gpios *gpios)
+{
+ mctrl_gpio_disable_ms(gpios, false);
+}
+EXPORT_SYMBOL_GPL(mctrl_gpio_disable_ms_no_sync);
void mctrl_gpio_enable_irq_wake(struct mctrl_gpios *gpios)
{
diff --git a/drivers/tty/serial/serial_mctrl_gpio.h b/drivers/tty/serial/serial_mctrl_gpio.h
index fc76910fb105..961e4ba0c6f8 100644
--- a/drivers/tty/serial/serial_mctrl_gpio.h
+++ b/drivers/tty/serial/serial_mctrl_gpio.h
@@ -59,7 +59,7 @@ struct gpio_desc *mctrl_gpio_to_gpiod(struct mctrl_gpios *gpios,
/*
* Request and set direction of modem control line GPIOs and set up irq
* handling.
- * devm_* functions are used, so there's no need to call mctrl_gpio_free().
+ * devm_* functions are used, so there's no need to explicitly free.
* Returns a pointer to the allocated mctrl structure if ok, -ENOMEM on
* allocation error.
*/
@@ -67,7 +67,7 @@ struct mctrl_gpios *mctrl_gpio_init(struct uart_port *port, unsigned int idx);
/*
* Request and set direction of modem control line GPIOs.
- * devm_* functions are used, so there's no need to call mctrl_gpio_free().
+ * devm_* functions are used, so there's no need to explicitly free.
* Returns a pointer to the allocated mctrl structure if ok, -ENOMEM on
* allocation error.
*/
@@ -75,21 +75,21 @@ struct mctrl_gpios *mctrl_gpio_init_noauto(struct device *dev,
unsigned int idx);
/*
- * Free the mctrl_gpios structure.
- * Normally, this function will not be called, as the GPIOs will
- * be disposed of by the resource management code.
+ * Enable gpio interrupts to report status line changes.
*/
-void mctrl_gpio_free(struct device *dev, struct mctrl_gpios *gpios);
+void mctrl_gpio_enable_ms(struct mctrl_gpios *gpios);
/*
- * Enable gpio interrupts to report status line changes.
+ * Disable gpio interrupts to report status line changes, and block until
+ * any corresponding IRQ is processed
*/
-void mctrl_gpio_enable_ms(struct mctrl_gpios *gpios);
+void mctrl_gpio_disable_ms_sync(struct mctrl_gpios *gpios);
/*
- * Disable gpio interrupts to report status line changes.
+ * Disable gpio interrupts to report status line changes, and return
+ * immediately
*/
-void mctrl_gpio_disable_ms(struct mctrl_gpios *gpios);
+void mctrl_gpio_disable_ms_no_sync(struct mctrl_gpios *gpios);
/*
* Enable gpio wakeup interrupts to enable wake up source.
@@ -139,16 +139,15 @@ struct mctrl_gpios *mctrl_gpio_init_noauto(struct device *dev, unsigned int idx)
return NULL;
}
-static inline
-void mctrl_gpio_free(struct device *dev, struct mctrl_gpios *gpios)
+static inline void mctrl_gpio_enable_ms(struct mctrl_gpios *gpios)
{
}
-static inline void mctrl_gpio_enable_ms(struct mctrl_gpios *gpios)
+static inline void mctrl_gpio_disable_ms_sync(struct mctrl_gpios *gpios)
{
}
-static inline void mctrl_gpio_disable_ms(struct mctrl_gpios *gpios)
+static inline void mctrl_gpio_disable_ms_no_sync(struct mctrl_gpios *gpios)
{
}
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index b72c3bc19bfa..1c8480d0338e 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -104,6 +104,20 @@ struct plat_sci_reg {
u8 offset, size;
};
+struct sci_suspend_regs {
+ u16 scdl;
+ u16 sccks;
+ u16 scsmr;
+ u16 scscr;
+ u16 scfcr;
+ u16 scsptr;
+ u16 hssrr;
+ u16 scpcr;
+ u16 scpdr;
+ u8 scbrr;
+ u8 semr;
+};
+
struct sci_port_params {
const struct plat_sci_reg regs[SCIx_NR_REGS];
unsigned int fifosize;
@@ -134,6 +148,8 @@ struct sci_port {
struct dma_chan *chan_tx;
struct dma_chan *chan_rx;
+ struct reset_control *rstc;
+
#ifdef CONFIG_SERIAL_SH_SCI_DMA
struct dma_chan *chan_tx_saved;
struct dma_chan *chan_rx_saved;
@@ -153,6 +169,7 @@ struct sci_port {
int rx_trigger;
struct timer_list rx_fifo_timer;
int rx_fifo_timeout;
+ struct sci_suspend_regs suspend_regs;
u16 hscif_tot;
bool has_rtscts;
@@ -2297,7 +2314,7 @@ static void sci_shutdown(struct uart_port *port)
dev_dbg(port->dev, "%s(%d)\n", __func__, port->line);
s->autorts = false;
- mctrl_gpio_disable_ms(to_sci_port(port)->gpios);
+ mctrl_gpio_disable_ms_sync(to_sci_port(port)->gpios);
uart_port_lock_irqsave(port, &flags);
sci_stop_rx(port);
@@ -3373,6 +3390,7 @@ static struct plat_sci_port *sci_parse_dt(struct platform_device *pdev,
}
sp = &sci_ports[id];
+ sp->rstc = rstc;
*dev_id = id;
p->type = SCI_OF_TYPE(data);
@@ -3545,13 +3563,77 @@ static int sci_probe(struct platform_device *dev)
return 0;
}
+static void sci_console_save(struct sci_port *s)
+{
+ struct sci_suspend_regs *regs = &s->suspend_regs;
+ struct uart_port *port = &s->port;
+
+ if (sci_getreg(port, SCDL)->size)
+ regs->scdl = sci_serial_in(port, SCDL);
+ if (sci_getreg(port, SCCKS)->size)
+ regs->sccks = sci_serial_in(port, SCCKS);
+ if (sci_getreg(port, SCSMR)->size)
+ regs->scsmr = sci_serial_in(port, SCSMR);
+ if (sci_getreg(port, SCSCR)->size)
+ regs->scscr = sci_serial_in(port, SCSCR);
+ if (sci_getreg(port, SCFCR)->size)
+ regs->scfcr = sci_serial_in(port, SCFCR);
+ if (sci_getreg(port, SCSPTR)->size)
+ regs->scsptr = sci_serial_in(port, SCSPTR);
+ if (sci_getreg(port, SCBRR)->size)
+ regs->scbrr = sci_serial_in(port, SCBRR);
+ if (sci_getreg(port, HSSRR)->size)
+ regs->hssrr = sci_serial_in(port, HSSRR);
+ if (sci_getreg(port, SCPCR)->size)
+ regs->scpcr = sci_serial_in(port, SCPCR);
+ if (sci_getreg(port, SCPDR)->size)
+ regs->scpdr = sci_serial_in(port, SCPDR);
+ if (sci_getreg(port, SEMR)->size)
+ regs->semr = sci_serial_in(port, SEMR);
+}
+
+static void sci_console_restore(struct sci_port *s)
+{
+ struct sci_suspend_regs *regs = &s->suspend_regs;
+ struct uart_port *port = &s->port;
+
+ if (sci_getreg(port, SCDL)->size)
+ sci_serial_out(port, SCDL, regs->scdl);
+ if (sci_getreg(port, SCCKS)->size)
+ sci_serial_out(port, SCCKS, regs->sccks);
+ if (sci_getreg(port, SCSMR)->size)
+ sci_serial_out(port, SCSMR, regs->scsmr);
+ if (sci_getreg(port, SCSCR)->size)
+ sci_serial_out(port, SCSCR, regs->scscr);
+ if (sci_getreg(port, SCFCR)->size)
+ sci_serial_out(port, SCFCR, regs->scfcr);
+ if (sci_getreg(port, SCSPTR)->size)
+ sci_serial_out(port, SCSPTR, regs->scsptr);
+ if (sci_getreg(port, SCBRR)->size)
+ sci_serial_out(port, SCBRR, regs->scbrr);
+ if (sci_getreg(port, HSSRR)->size)
+ sci_serial_out(port, HSSRR, regs->hssrr);
+ if (sci_getreg(port, SCPCR)->size)
+ sci_serial_out(port, SCPCR, regs->scpcr);
+ if (sci_getreg(port, SCPDR)->size)
+ sci_serial_out(port, SCPDR, regs->scpdr);
+ if (sci_getreg(port, SEMR)->size)
+ sci_serial_out(port, SEMR, regs->semr);
+}
+
static __maybe_unused int sci_suspend(struct device *dev)
{
struct sci_port *sport = dev_get_drvdata(dev);
- if (sport)
+ if (sport) {
uart_suspend_port(&sci_uart_driver, &sport->port);
+ if (!console_suspend_enabled && uart_console(&sport->port))
+ sci_console_save(sport);
+ else
+ return reset_control_assert(sport->rstc);
+ }
+
return 0;
}
@@ -3559,8 +3641,18 @@ static __maybe_unused int sci_resume(struct device *dev)
{
struct sci_port *sport = dev_get_drvdata(dev);
- if (sport)
+ if (sport) {
+ if (!console_suspend_enabled && uart_console(&sport->port)) {
+ sci_console_restore(sport);
+ } else {
+ int ret = reset_control_deassert(sport->rstc);
+
+ if (ret)
+ return ret;
+ }
+
uart_resume_port(&sci_uart_driver, &sport->port);
+ }
return 0;
}
diff --git a/drivers/tty/serial/stm32-usart.c b/drivers/tty/serial/stm32-usart.c
index 1ec5d8c3aef8..ad06b760cfca 100644
--- a/drivers/tty/serial/stm32-usart.c
+++ b/drivers/tty/serial/stm32-usart.c
@@ -944,7 +944,7 @@ static void stm32_usart_enable_ms(struct uart_port *port)
static void stm32_usart_disable_ms(struct uart_port *port)
{
- mctrl_gpio_disable_ms(to_stm32_port(port)->gpios);
+ mctrl_gpio_disable_ms_sync(to_stm32_port(port)->gpios);
}
/* Transmit stop */
@@ -965,10 +965,8 @@ static void stm32_usart_start_tx(struct uart_port *port)
{
struct tty_port *tport = &port->state->port;
- if (kfifo_is_empty(&tport->xmit_fifo) && !port->x_char) {
- stm32_usart_rs485_rts_disable(port);
+ if (kfifo_is_empty(&tport->xmit_fifo) && !port->x_char)
return;
- }
stm32_usart_rs485_rts_enable(port);
diff --git a/drivers/tty/serial/sunsu.c b/drivers/tty/serial/sunsu.c
index 7f0fef07e141..383141fe7ba0 100644
--- a/drivers/tty/serial/sunsu.c
+++ b/drivers/tty/serial/sunsu.c
@@ -151,16 +151,6 @@ static void serial_out(struct uart_sunsu_port *up, int offset, int value)
}
/*
- * We used to support using pause I/O for certain machines. We
- * haven't supported this for a while, but just in case it's badly
- * needed for certain old 386 machines, I've left these #define's
- * in....
- */
-#define serial_inp(up, offset) serial_in(up, offset)
-#define serial_outp(up, offset, value) serial_out(up, offset, value)
-
-
-/*
* For the 16C950
*/
static void serial_icr_write(struct uart_sunsu_port *up, int offset, int value)
@@ -169,20 +159,6 @@ static void serial_icr_write(struct uart_sunsu_port *up, int offset, int value)
serial_out(up, UART_ICR, value);
}
-#if 0 /* Unused currently */
-static unsigned int serial_icr_read(struct uart_sunsu_port *up, int offset)
-{
- unsigned int value;
-
- serial_icr_write(up, UART_ACR, up->acr | UART_ACR_ICRRD);
- serial_out(up, UART_SCR, offset);
- value = serial_in(up, UART_ICR);
- serial_icr_write(up, UART_ACR, up->acr);
-
- return value;
-}
-#endif
-
#ifdef CONFIG_SERIAL_8250_RSA
/*
* Attempts to turn on the RSA FIFO. Returns zero on failure.
@@ -193,12 +169,12 @@ static int __enable_rsa(struct uart_sunsu_port *up)
unsigned char mode;
int result;
- mode = serial_inp(up, UART_RSA_MSR);
+ mode = serial_in(up, UART_RSA_MSR);
result = mode & UART_RSA_MSR_FIFO;
if (!result) {
- serial_outp(up, UART_RSA_MSR, mode | UART_RSA_MSR_FIFO);
- mode = serial_inp(up, UART_RSA_MSR);
+ serial_out(up, UART_RSA_MSR, mode | UART_RSA_MSR_FIFO);
+ mode = serial_in(up, UART_RSA_MSR);
result = mode & UART_RSA_MSR_FIFO;
}
@@ -217,7 +193,7 @@ static void enable_rsa(struct uart_sunsu_port *up)
uart_port_unlock_irq(&up->port);
}
if (up->port.uartclk == SERIAL_RSA_BAUD_BASE * 16)
- serial_outp(up, UART_RSA_FRR, 0);
+ serial_out(up, UART_RSA_FRR, 0);
}
}
@@ -236,12 +212,12 @@ static void disable_rsa(struct uart_sunsu_port *up)
up->port.uartclk == SERIAL_RSA_BAUD_BASE * 16) {
uart_port_lock_irq(&up->port);
- mode = serial_inp(up, UART_RSA_MSR);
+ mode = serial_in(up, UART_RSA_MSR);
result = !(mode & UART_RSA_MSR_FIFO);
if (!result) {
- serial_outp(up, UART_RSA_MSR, mode & ~UART_RSA_MSR_FIFO);
- mode = serial_inp(up, UART_RSA_MSR);
+ serial_out(up, UART_RSA_MSR, mode & ~UART_RSA_MSR_FIFO);
+ mode = serial_in(up, UART_RSA_MSR);
result = !(mode & UART_RSA_MSR_FIFO);
}
@@ -326,7 +302,7 @@ receive_chars(struct uart_sunsu_port *up, unsigned char *status)
int saw_console_brk = 0;
do {
- ch = serial_inp(up, UART_RX);
+ ch = serial_in(up, UART_RX);
flag = TTY_NORMAL;
up->port.icount.rx++;
@@ -387,7 +363,7 @@ receive_chars(struct uart_sunsu_port *up, unsigned char *status)
*/
tty_insert_flip_char(port, 0, TTY_OVERRUN);
ignore_char:
- *status = serial_inp(up, UART_LSR);
+ *status = serial_in(up, UART_LSR);
} while ((*status & UART_LSR_DR) && (max_count-- > 0));
if (saw_console_brk)
@@ -401,7 +377,7 @@ static void transmit_chars(struct uart_sunsu_port *up)
int count;
if (up->port.x_char) {
- serial_outp(up, UART_TX, up->port.x_char);
+ serial_out(up, UART_TX, up->port.x_char);
up->port.icount.tx++;
up->port.x_char = 0;
return;
@@ -460,7 +436,7 @@ static irqreturn_t sunsu_serial_interrupt(int irq, void *dev_id)
uart_port_lock_irqsave(&up->port, &flags);
do {
- status = serial_inp(up, UART_LSR);
+ status = serial_in(up, UART_LSR);
if (status & UART_LSR_DR)
receive_chars(up, &status);
check_modem_status(up);
@@ -498,7 +474,7 @@ static void sunsu_change_mouse_baud(struct uart_sunsu_port *up)
static void receive_kbd_ms_chars(struct uart_sunsu_port *up, int is_break)
{
do {
- unsigned char ch = serial_inp(up, UART_RX);
+ unsigned char ch = serial_in(up, UART_RX);
/* Stop-A is handled by drivers/char/keyboard.c now. */
if (up->su_type == SU_PORT_KBD) {
@@ -530,7 +506,7 @@ static irqreturn_t sunsu_kbd_ms_interrupt(int irq, void *dev_id)
struct uart_sunsu_port *up = dev_id;
if (!(serial_in(up, UART_IIR) & UART_IIR_NO_INT)) {
- unsigned char status = serial_inp(up, UART_LSR);
+ unsigned char status = serial_in(up, UART_LSR);
if ((status & UART_LSR_DR) || (status & UART_LSR_BI))
receive_kbd_ms_chars(up, (status & UART_LSR_BI) != 0);
@@ -619,14 +595,14 @@ static int sunsu_startup(struct uart_port *port)
if (up->port.type == PORT_16C950) {
/* Wake up and initialize UART */
up->acr = 0;
- serial_outp(up, UART_LCR, 0xBF);
- serial_outp(up, UART_EFR, UART_EFR_ECB);
- serial_outp(up, UART_IER, 0);
- serial_outp(up, UART_LCR, 0);
+ serial_out(up, UART_LCR, 0xBF);
+ serial_out(up, UART_EFR, UART_EFR_ECB);
+ serial_out(up, UART_IER, 0);
+ serial_out(up, UART_LCR, 0);
serial_icr_write(up, UART_CSR, 0); /* Reset the UART */
- serial_outp(up, UART_LCR, 0xBF);
- serial_outp(up, UART_EFR, UART_EFR_ECB);
- serial_outp(up, UART_LCR, 0);
+ serial_out(up, UART_LCR, 0xBF);
+ serial_out(up, UART_EFR, UART_EFR_ECB);
+ serial_out(up, UART_LCR, 0);
}
#ifdef CONFIG_SERIAL_8250_RSA
@@ -642,19 +618,19 @@ static int sunsu_startup(struct uart_port *port)
* (they will be reenabled in set_termios())
*/
if (uart_config[up->port.type].flags & UART_CLEAR_FIFO) {
- serial_outp(up, UART_FCR, UART_FCR_ENABLE_FIFO);
- serial_outp(up, UART_FCR, UART_FCR_ENABLE_FIFO |
+ serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO);
+ serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO |
UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT);
- serial_outp(up, UART_FCR, 0);
+ serial_out(up, UART_FCR, 0);
}
/*
* Clear the interrupt registers.
*/
- (void) serial_inp(up, UART_LSR);
- (void) serial_inp(up, UART_RX);
- (void) serial_inp(up, UART_IIR);
- (void) serial_inp(up, UART_MSR);
+ (void) serial_in(up, UART_LSR);
+ (void) serial_in(up, UART_RX);
+ (void) serial_in(up, UART_IIR);
+ (void) serial_in(up, UART_MSR);
/*
* At this point, there's no way the LSR could still be 0xff;
@@ -662,7 +638,7 @@ static int sunsu_startup(struct uart_port *port)
* here.
*/
if (!(up->port.flags & UPF_BUGGY_UART) &&
- (serial_inp(up, UART_LSR) == 0xff)) {
+ (serial_in(up, UART_LSR) == 0xff)) {
printk("ttyS%d: LSR safety check engaged!\n", up->port.line);
return -ENODEV;
}
@@ -682,7 +658,7 @@ static int sunsu_startup(struct uart_port *port)
/*
* Now, initialize the UART
*/
- serial_outp(up, UART_LCR, UART_LCR_WLEN8);
+ serial_out(up, UART_LCR, UART_LCR_WLEN8);
uart_port_lock_irqsave(&up->port, &flags);
@@ -697,7 +673,7 @@ static int sunsu_startup(struct uart_port *port)
* anyway, so we don't enable them here.
*/
up->ier = UART_IER_RLSI | UART_IER_RDI;
- serial_outp(up, UART_IER, up->ier);
+ serial_out(up, UART_IER, up->ier);
if (up->port.flags & UPF_FOURPORT) {
unsigned int icp;
@@ -712,10 +688,10 @@ static int sunsu_startup(struct uart_port *port)
/*
* And clear the interrupt registers again for luck.
*/
- (void) serial_inp(up, UART_LSR);
- (void) serial_inp(up, UART_RX);
- (void) serial_inp(up, UART_IIR);
- (void) serial_inp(up, UART_MSR);
+ (void) serial_in(up, UART_LSR);
+ (void) serial_in(up, UART_RX);
+ (void) serial_in(up, UART_IIR);
+ (void) serial_in(up, UART_MSR);
return 0;
}
@@ -730,7 +706,7 @@ static void sunsu_shutdown(struct uart_port *port)
* Disable interrupts from this port
*/
up->ier = 0;
- serial_outp(up, UART_IER, 0);
+ serial_out(up, UART_IER, 0);
uart_port_lock_irqsave(&up->port, &flags);
if (up->port.flags & UPF_FOURPORT) {
@@ -746,11 +722,11 @@ static void sunsu_shutdown(struct uart_port *port)
/*
* Disable break condition and FIFOs
*/
- serial_out(up, UART_LCR, serial_inp(up, UART_LCR) & ~UART_LCR_SBC);
- serial_outp(up, UART_FCR, UART_FCR_ENABLE_FIFO |
+ serial_out(up, UART_LCR, serial_in(up, UART_LCR) & ~UART_LCR_SBC);
+ serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO |
UART_FCR_CLEAR_RCVR |
UART_FCR_CLEAR_XMIT);
- serial_outp(up, UART_FCR, 0);
+ serial_out(up, UART_FCR, 0);
#ifdef CONFIG_SERIAL_8250_RSA
/*
@@ -872,22 +848,22 @@ sunsu_change_speed(struct uart_port *port, unsigned int cflag,
serial_out(up, UART_IER, up->ier);
if (uart_config[up->port.type].flags & UART_STARTECH) {
- serial_outp(up, UART_LCR, 0xBF);
- serial_outp(up, UART_EFR, cflag & CRTSCTS ? UART_EFR_CTS :0);
+ serial_out(up, UART_LCR, 0xBF);
+ serial_out(up, UART_EFR, cflag & CRTSCTS ? UART_EFR_CTS :0);
}
- serial_outp(up, UART_LCR, cval | UART_LCR_DLAB);/* set DLAB */
- serial_outp(up, UART_DLL, quot & 0xff); /* LS of divisor */
- serial_outp(up, UART_DLM, quot >> 8); /* MS of divisor */
+ serial_out(up, UART_LCR, cval | UART_LCR_DLAB);/* set DLAB */
+ serial_out(up, UART_DLL, quot & 0xff); /* LS of divisor */
+ serial_out(up, UART_DLM, quot >> 8); /* MS of divisor */
if (up->port.type == PORT_16750)
- serial_outp(up, UART_FCR, fcr); /* set fcr */
- serial_outp(up, UART_LCR, cval); /* reset DLAB */
+ serial_out(up, UART_FCR, fcr); /* set fcr */
+ serial_out(up, UART_LCR, cval); /* reset DLAB */
up->lcr = cval; /* Save LCR */
if (up->port.type != PORT_16750) {
if (fcr & UART_FCR_ENABLE_FIFO) {
/* emulated UARTs (Lucent Venus 167x) need two steps */
- serial_outp(up, UART_FCR, UART_FCR_ENABLE_FIFO);
+ serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO);
}
- serial_outp(up, UART_FCR, fcr); /* set fcr */
+ serial_out(up, UART_FCR, fcr); /* set fcr */
}
up->cflag = cflag;
@@ -1051,18 +1027,18 @@ static void sunsu_autoconfig(struct uart_sunsu_port *up)
* 0x80 is a non-existent port; which should be safe since
* include/asm/io.h also makes this assumption.
*/
- scratch = serial_inp(up, UART_IER);
- serial_outp(up, UART_IER, 0);
+ scratch = serial_in(up, UART_IER);
+ serial_out(up, UART_IER, 0);
#ifdef __i386__
outb(0xff, 0x080);
#endif
- scratch2 = serial_inp(up, UART_IER);
- serial_outp(up, UART_IER, 0x0f);
+ scratch2 = serial_in(up, UART_IER);
+ serial_out(up, UART_IER, 0x0f);
#ifdef __i386__
outb(0, 0x080);
#endif
- scratch3 = serial_inp(up, UART_IER);
- serial_outp(up, UART_IER, scratch);
+ scratch3 = serial_in(up, UART_IER);
+ serial_out(up, UART_IER, scratch);
if (scratch2 != 0 || scratch3 != 0x0F)
goto out; /* We failed; there's nothing here */
}
@@ -1080,16 +1056,16 @@ static void sunsu_autoconfig(struct uart_sunsu_port *up)
* that conflicts with COM 1-4 --- we hope!
*/
if (!(up->port.flags & UPF_SKIP_TEST)) {
- serial_outp(up, UART_MCR, UART_MCR_LOOP | 0x0A);
- status1 = serial_inp(up, UART_MSR) & 0xF0;
- serial_outp(up, UART_MCR, save_mcr);
+ serial_out(up, UART_MCR, UART_MCR_LOOP | 0x0A);
+ status1 = serial_in(up, UART_MSR) & 0xF0;
+ serial_out(up, UART_MCR, save_mcr);
if (status1 != 0x90)
goto out; /* We failed loopback test */
}
- serial_outp(up, UART_LCR, 0xBF); /* set up for StarTech test */
- serial_outp(up, UART_EFR, 0); /* EFR is the same as FCR */
- serial_outp(up, UART_LCR, 0);
- serial_outp(up, UART_FCR, UART_FCR_ENABLE_FIFO);
+ serial_out(up, UART_LCR, 0xBF); /* set up for StarTech test */
+ serial_out(up, UART_EFR, 0); /* EFR is the same as FCR */
+ serial_out(up, UART_LCR, 0);
+ serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO);
scratch = serial_in(up, UART_IIR) >> 6;
switch (scratch) {
case 0:
@@ -1107,19 +1083,19 @@ static void sunsu_autoconfig(struct uart_sunsu_port *up)
}
if (up->port.type == PORT_16550A) {
/* Check for Startech UART's */
- serial_outp(up, UART_LCR, UART_LCR_DLAB);
+ serial_out(up, UART_LCR, UART_LCR_DLAB);
if (serial_in(up, UART_EFR) == 0) {
up->port.type = PORT_16650;
} else {
- serial_outp(up, UART_LCR, 0xBF);
+ serial_out(up, UART_LCR, 0xBF);
if (serial_in(up, UART_EFR) == 0)
up->port.type = PORT_16650V2;
}
}
if (up->port.type == PORT_16550A) {
/* Check for TI 16750 */
- serial_outp(up, UART_LCR, save_lcr | UART_LCR_DLAB);
- serial_outp(up, UART_FCR,
+ serial_out(up, UART_LCR, save_lcr | UART_LCR_DLAB);
+ serial_out(up, UART_FCR,
UART_FCR_ENABLE_FIFO | UART_FCR7_64BYTE);
scratch = serial_in(up, UART_IIR) >> 5;
if (scratch == 7) {
@@ -1129,24 +1105,24 @@ static void sunsu_autoconfig(struct uart_sunsu_port *up)
* mode if the UART_FCR7_64BYTE bit was set
* while UART_LCR_DLAB was latched.
*/
- serial_outp(up, UART_FCR, UART_FCR_ENABLE_FIFO);
- serial_outp(up, UART_LCR, 0);
- serial_outp(up, UART_FCR,
+ serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO);
+ serial_out(up, UART_LCR, 0);
+ serial_out(up, UART_FCR,
UART_FCR_ENABLE_FIFO | UART_FCR7_64BYTE);
scratch = serial_in(up, UART_IIR) >> 5;
if (scratch == 6)
up->port.type = PORT_16750;
}
- serial_outp(up, UART_FCR, UART_FCR_ENABLE_FIFO);
+ serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO);
}
- serial_outp(up, UART_LCR, save_lcr);
+ serial_out(up, UART_LCR, save_lcr);
if (up->port.type == PORT_16450) {
scratch = serial_in(up, UART_SCR);
- serial_outp(up, UART_SCR, 0xa5);
+ serial_out(up, UART_SCR, 0xa5);
status1 = serial_in(up, UART_SCR);
- serial_outp(up, UART_SCR, 0x5a);
+ serial_out(up, UART_SCR, 0x5a);
status2 = serial_in(up, UART_SCR);
- serial_outp(up, UART_SCR, scratch);
+ serial_out(up, UART_SCR, scratch);
if ((status1 != 0xa5) || (status2 != 0x5a))
up->port.type = PORT_8250;
@@ -1163,15 +1139,15 @@ static void sunsu_autoconfig(struct uart_sunsu_port *up)
*/
#ifdef CONFIG_SERIAL_8250_RSA
if (up->port.type == PORT_RSA)
- serial_outp(up, UART_RSA_FRR, 0);
+ serial_out(up, UART_RSA_FRR, 0);
#endif
- serial_outp(up, UART_MCR, save_mcr);
- serial_outp(up, UART_FCR, (UART_FCR_ENABLE_FIFO |
+ serial_out(up, UART_MCR, save_mcr);
+ serial_out(up, UART_FCR, (UART_FCR_ENABLE_FIFO |
UART_FCR_CLEAR_RCVR |
UART_FCR_CLEAR_XMIT));
- serial_outp(up, UART_FCR, 0);
+ serial_out(up, UART_FCR, 0);
(void)serial_in(up, UART_RX);
- serial_outp(up, UART_IER, 0);
+ serial_out(up, UART_IER, 0);
out:
uart_port_unlock_irqrestore(&up->port, flags);
diff --git a/drivers/tty/serial/tegra-utc.c b/drivers/tty/serial/tegra-utc.c
new file mode 100644
index 000000000000..39b14fe813c9
--- /dev/null
+++ b/drivers/tty/serial/tegra-utc.c
@@ -0,0 +1,625 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+// NVIDIA Tegra UTC (UART Trace Controller) driver.
+
+#include <linux/bits.h>
+#include <linux/console.h>
+#include <linux/container_of.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/iopoll.h>
+#include <linux/kfifo.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/property.h>
+#include <linux/platform_device.h>
+#include <linux/serial.h>
+#include <linux/serial_core.h>
+#include <linux/slab.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/types.h>
+
+#define TEGRA_UTC_ENABLE 0x000
+#define TEGRA_UTC_ENABLE_CLIENT_ENABLE BIT(0)
+
+#define TEGRA_UTC_FIFO_THRESHOLD 0x008
+
+#define TEGRA_UTC_COMMAND 0x00c
+#define TEGRA_UTC_COMMAND_RESET BIT(0)
+#define TEGRA_UTC_COMMAND_FLUSH BIT(1)
+
+#define TEGRA_UTC_DATA 0x020
+
+#define TEGRA_UTC_FIFO_STATUS 0x100
+#define TEGRA_UTC_FIFO_EMPTY BIT(0)
+#define TEGRA_UTC_FIFO_FULL BIT(1)
+#define TEGRA_UTC_FIFO_REQ BIT(2)
+#define TEGRA_UTC_FIFO_OVERFLOW BIT(3)
+#define TEGRA_UTC_FIFO_TIMEOUT BIT(4)
+
+#define TEGRA_UTC_FIFO_OCCUPANCY 0x104
+
+#define TEGRA_UTC_INTR_STATUS 0x108
+#define TEGRA_UTC_INTR_SET 0x10c
+#define TEGRA_UTC_INTR_MASK 0x110
+#define TEGRA_UTC_INTR_CLEAR 0x114
+#define TEGRA_UTC_INTR_EMPTY BIT(0)
+#define TEGRA_UTC_INTR_FULL BIT(1)
+#define TEGRA_UTC_INTR_REQ BIT(2)
+#define TEGRA_UTC_INTR_OVERFLOW BIT(3)
+#define TEGRA_UTC_INTR_TIMEOUT BIT(4)
+
+#define TEGRA_UTC_UART_NR 16
+
+#define TEGRA_UTC_INTR_COMMON (TEGRA_UTC_INTR_REQ | TEGRA_UTC_INTR_FULL | TEGRA_UTC_INTR_EMPTY)
+
+struct tegra_utc_port {
+#if IS_ENABLED(CONFIG_SERIAL_TEGRA_UTC_CONSOLE)
+ struct console console;
+#endif
+ struct uart_port port;
+
+ void __iomem *rx_base;
+ void __iomem *tx_base;
+
+ u32 tx_irqmask;
+ u32 rx_irqmask;
+
+ unsigned int fifosize;
+ u32 tx_threshold;
+ u32 rx_threshold;
+};
+
+static u32 tegra_utc_rx_readl(struct tegra_utc_port *tup, unsigned int offset)
+{
+ void __iomem *addr = tup->rx_base + offset;
+
+ return readl_relaxed(addr);
+}
+
+static void tegra_utc_rx_writel(struct tegra_utc_port *tup, u32 val, unsigned int offset)
+{
+ void __iomem *addr = tup->rx_base + offset;
+
+ writel_relaxed(val, addr);
+}
+
+static u32 tegra_utc_tx_readl(struct tegra_utc_port *tup, unsigned int offset)
+{
+ void __iomem *addr = tup->tx_base + offset;
+
+ return readl_relaxed(addr);
+}
+
+static void tegra_utc_tx_writel(struct tegra_utc_port *tup, u32 val, unsigned int offset)
+{
+ void __iomem *addr = tup->tx_base + offset;
+
+ writel_relaxed(val, addr);
+}
+
+static void tegra_utc_enable_tx_irq(struct tegra_utc_port *tup)
+{
+ tup->tx_irqmask = TEGRA_UTC_INTR_REQ;
+
+ tegra_utc_tx_writel(tup, tup->tx_irqmask, TEGRA_UTC_INTR_MASK);
+ tegra_utc_tx_writel(tup, tup->tx_irqmask, TEGRA_UTC_INTR_SET);
+}
+
+static void tegra_utc_disable_tx_irq(struct tegra_utc_port *tup)
+{
+ tup->tx_irqmask = 0x0;
+
+ tegra_utc_tx_writel(tup, tup->tx_irqmask, TEGRA_UTC_INTR_MASK);
+ tegra_utc_tx_writel(tup, tup->tx_irqmask, TEGRA_UTC_INTR_SET);
+}
+
+static void tegra_utc_stop_tx(struct uart_port *port)
+{
+ struct tegra_utc_port *tup = container_of(port, struct tegra_utc_port, port);
+
+ tegra_utc_disable_tx_irq(tup);
+}
+
+static void tegra_utc_init_tx(struct tegra_utc_port *tup)
+{
+ /* Disable TX. */
+ tegra_utc_tx_writel(tup, 0x0, TEGRA_UTC_ENABLE);
+
+ /* Update the FIFO Threshold. */
+ tegra_utc_tx_writel(tup, tup->tx_threshold, TEGRA_UTC_FIFO_THRESHOLD);
+
+ /* Clear and mask all the interrupts. */
+ tegra_utc_tx_writel(tup, TEGRA_UTC_INTR_COMMON, TEGRA_UTC_INTR_CLEAR);
+ tegra_utc_disable_tx_irq(tup);
+
+ /* Enable TX. */
+ tegra_utc_tx_writel(tup, TEGRA_UTC_ENABLE_CLIENT_ENABLE, TEGRA_UTC_ENABLE);
+}
+
+static void tegra_utc_init_rx(struct tegra_utc_port *tup)
+{
+ tup->rx_irqmask = TEGRA_UTC_INTR_REQ | TEGRA_UTC_INTR_TIMEOUT;
+
+ tegra_utc_rx_writel(tup, TEGRA_UTC_COMMAND_RESET, TEGRA_UTC_COMMAND);
+ tegra_utc_rx_writel(tup, tup->rx_threshold, TEGRA_UTC_FIFO_THRESHOLD);
+
+ /* Clear all the pending interrupts. */
+ tegra_utc_rx_writel(tup, TEGRA_UTC_INTR_TIMEOUT | TEGRA_UTC_INTR_OVERFLOW |
+ TEGRA_UTC_INTR_COMMON, TEGRA_UTC_INTR_CLEAR);
+ tegra_utc_rx_writel(tup, tup->rx_irqmask, TEGRA_UTC_INTR_MASK);
+ tegra_utc_rx_writel(tup, tup->rx_irqmask, TEGRA_UTC_INTR_SET);
+
+ /* Enable RX. */
+ tegra_utc_rx_writel(tup, TEGRA_UTC_ENABLE_CLIENT_ENABLE, TEGRA_UTC_ENABLE);
+}
+
+static bool tegra_utc_tx_chars(struct tegra_utc_port *tup)
+{
+ struct uart_port *port = &tup->port;
+ unsigned int pending;
+ u8 c;
+
+ pending = uart_port_tx(port, c,
+ !(tegra_utc_tx_readl(tup, TEGRA_UTC_FIFO_STATUS) & TEGRA_UTC_FIFO_FULL),
+ tegra_utc_tx_writel(tup, c, TEGRA_UTC_DATA));
+
+ return pending;
+}
+
+static void tegra_utc_rx_chars(struct tegra_utc_port *tup)
+{
+ struct tty_port *port = &tup->port.state->port;
+ unsigned int max_chars = 256;
+ u32 status;
+ int sysrq;
+ u32 ch;
+
+ while (max_chars--) {
+ status = tegra_utc_rx_readl(tup, TEGRA_UTC_FIFO_STATUS);
+ if (status & TEGRA_UTC_FIFO_EMPTY)
+ break;
+
+ ch = tegra_utc_rx_readl(tup, TEGRA_UTC_DATA);
+ tup->port.icount.rx++;
+
+ if (status & TEGRA_UTC_FIFO_OVERFLOW)
+ tup->port.icount.overrun++;
+
+ uart_port_unlock(&tup->port);
+ sysrq = uart_handle_sysrq_char(&tup->port, ch);
+ uart_port_lock(&tup->port);
+
+ if (!sysrq)
+ tty_insert_flip_char(port, ch, TTY_NORMAL);
+ }
+
+ tty_flip_buffer_push(port);
+}
+
+static irqreturn_t tegra_utc_isr(int irq, void *dev_id)
+{
+ struct tegra_utc_port *tup = dev_id;
+ unsigned int handled = 0;
+ u32 status;
+
+ uart_port_lock(&tup->port);
+
+ /* Process RX_REQ and RX_TIMEOUT interrupts. */
+ do {
+ status = tegra_utc_rx_readl(tup, TEGRA_UTC_INTR_STATUS) & tup->rx_irqmask;
+ if (status) {
+ tegra_utc_rx_writel(tup, tup->rx_irqmask, TEGRA_UTC_INTR_CLEAR);
+ tegra_utc_rx_chars(tup);
+ handled = 1;
+ }
+ } while (status);
+
+ /* Process TX_REQ interrupt. */
+ do {
+ status = tegra_utc_tx_readl(tup, TEGRA_UTC_INTR_STATUS) & tup->tx_irqmask;
+ if (status) {
+ tegra_utc_tx_writel(tup, tup->tx_irqmask, TEGRA_UTC_INTR_CLEAR);
+ tegra_utc_tx_chars(tup);
+ handled = 1;
+ }
+ } while (status);
+
+ uart_port_unlock(&tup->port);
+
+ return IRQ_RETVAL(handled);
+}
+
+static unsigned int tegra_utc_tx_empty(struct uart_port *port)
+{
+ struct tegra_utc_port *tup = container_of(port, struct tegra_utc_port, port);
+
+ return tegra_utc_tx_readl(tup, TEGRA_UTC_FIFO_OCCUPANCY) ? 0 : TIOCSER_TEMT;
+}
+
+static void tegra_utc_set_mctrl(struct uart_port *port, unsigned int mctrl)
+{
+}
+
+static unsigned int tegra_utc_get_mctrl(struct uart_port *port)
+{
+ return 0;
+}
+
+static void tegra_utc_start_tx(struct uart_port *port)
+{
+ struct tegra_utc_port *tup = container_of(port, struct tegra_utc_port, port);
+
+ if (tegra_utc_tx_chars(tup))
+ tegra_utc_enable_tx_irq(tup);
+}
+
+static void tegra_utc_stop_rx(struct uart_port *port)
+{
+ struct tegra_utc_port *tup = container_of(port, struct tegra_utc_port, port);
+
+ tup->rx_irqmask = 0x0;
+ tegra_utc_rx_writel(tup, tup->rx_irqmask, TEGRA_UTC_INTR_MASK);
+ tegra_utc_rx_writel(tup, tup->rx_irqmask, TEGRA_UTC_INTR_SET);
+}
+
+static void tegra_utc_hw_init(struct tegra_utc_port *tup)
+{
+ tegra_utc_init_tx(tup);
+ tegra_utc_init_rx(tup);
+}
+
+static int tegra_utc_startup(struct uart_port *port)
+{
+ struct tegra_utc_port *tup = container_of(port, struct tegra_utc_port, port);
+ int ret;
+
+ tegra_utc_hw_init(tup);
+
+ /* Interrupt is dedicated to this UTC client. */
+ ret = request_irq(port->irq, tegra_utc_isr, 0, dev_name(port->dev), tup);
+ if (ret < 0)
+ dev_err(port->dev, "failed to register interrupt handler\n");
+
+ return ret;
+}
+
+static void tegra_utc_shutdown(struct uart_port *port)
+{
+ struct tegra_utc_port *tup = container_of(port, struct tegra_utc_port, port);
+
+ tegra_utc_rx_writel(tup, 0x0, TEGRA_UTC_ENABLE);
+ free_irq(port->irq, tup);
+}
+
+static void tegra_utc_set_termios(struct uart_port *port, struct ktermios *termios,
+ const struct ktermios *old)
+{
+ /* The Tegra UTC clients supports only 8-N-1 configuration without HW flow control */
+ termios->c_cflag &= ~(CSIZE | CSTOPB | PARENB | PARODD);
+ termios->c_cflag &= ~(CMSPAR | CRTSCTS);
+ termios->c_cflag |= CS8 | CLOCAL;
+}
+
+#ifdef CONFIG_CONSOLE_POLL
+
+static int tegra_utc_poll_init(struct uart_port *port)
+{
+ struct tegra_utc_port *tup = container_of(port, struct tegra_utc_port, port);
+
+ tegra_utc_hw_init(tup);
+ return 0;
+}
+
+static int tegra_utc_get_poll_char(struct uart_port *port)
+{
+ struct tegra_utc_port *tup = container_of(port, struct tegra_utc_port, port);
+
+ if (tegra_utc_rx_readl(tup, TEGRA_UTC_FIFO_STATUS) & TEGRA_UTC_FIFO_EMPTY)
+ return NO_POLL_CHAR;
+
+ return tegra_utc_rx_readl(tup, TEGRA_UTC_DATA);
+}
+
+static void tegra_utc_put_poll_char(struct uart_port *port, unsigned char ch)
+{
+ struct tegra_utc_port *tup = container_of(port, struct tegra_utc_port, port);
+ u32 val;
+
+ read_poll_timeout_atomic(tegra_utc_tx_readl, val, !(val & TEGRA_UTC_FIFO_FULL),
+ 0, USEC_PER_SEC, false, tup, TEGRA_UTC_FIFO_STATUS);
+
+ tegra_utc_tx_writel(tup, ch, TEGRA_UTC_DATA);
+}
+
+#endif
+
+static const struct uart_ops tegra_utc_uart_ops = {
+ .tx_empty = tegra_utc_tx_empty,
+ .set_mctrl = tegra_utc_set_mctrl,
+ .get_mctrl = tegra_utc_get_mctrl,
+ .stop_tx = tegra_utc_stop_tx,
+ .start_tx = tegra_utc_start_tx,
+ .stop_rx = tegra_utc_stop_rx,
+ .startup = tegra_utc_startup,
+ .shutdown = tegra_utc_shutdown,
+ .set_termios = tegra_utc_set_termios,
+#ifdef CONFIG_CONSOLE_POLL
+ .poll_init = tegra_utc_poll_init,
+ .poll_get_char = tegra_utc_get_poll_char,
+ .poll_put_char = tegra_utc_put_poll_char,
+#endif
+};
+
+#if IS_ENABLED(CONFIG_SERIAL_TEGRA_UTC_CONSOLE)
+#define TEGRA_UTC_DEFAULT_FIFO_THRESHOLD 4
+#define TEGRA_UTC_EARLYCON_MAX_BURST_SIZE 128
+
+static void tegra_utc_putc(struct uart_port *port, unsigned char c)
+{
+ writel(c, port->membase + TEGRA_UTC_DATA);
+}
+
+static void tegra_utc_early_write(struct console *con, const char *s, unsigned int n)
+{
+ struct earlycon_device *dev = con->data;
+
+ while (n) {
+ u32 burst_size = TEGRA_UTC_EARLYCON_MAX_BURST_SIZE;
+
+ burst_size -= readl(dev->port.membase + TEGRA_UTC_FIFO_OCCUPANCY);
+ if (n < burst_size)
+ burst_size = n;
+
+ uart_console_write(&dev->port, s, burst_size, tegra_utc_putc);
+
+ n -= burst_size;
+ s += burst_size;
+ }
+}
+
+static int __init tegra_utc_early_console_setup(struct earlycon_device *device, const char *opt)
+{
+ if (!device->port.membase)
+ return -ENODEV;
+
+ /* Configure TX */
+ writel(TEGRA_UTC_COMMAND_FLUSH | TEGRA_UTC_COMMAND_RESET,
+ device->port.membase + TEGRA_UTC_COMMAND);
+ writel(TEGRA_UTC_DEFAULT_FIFO_THRESHOLD, device->port.membase + TEGRA_UTC_FIFO_THRESHOLD);
+
+ /* Clear and mask all the interrupts. */
+ writel(TEGRA_UTC_INTR_COMMON, device->port.membase + TEGRA_UTC_INTR_CLEAR);
+
+ writel(0x0, device->port.membase + TEGRA_UTC_INTR_MASK);
+ writel(0x0, device->port.membase + TEGRA_UTC_INTR_SET);
+
+ /* Enable TX. */
+ writel(TEGRA_UTC_ENABLE_CLIENT_ENABLE, device->port.membase + TEGRA_UTC_ENABLE);
+
+ device->con->write = tegra_utc_early_write;
+
+ return 0;
+}
+OF_EARLYCON_DECLARE(tegra_utc, "nvidia,tegra264-utc", tegra_utc_early_console_setup);
+
+static void tegra_utc_console_putchar(struct uart_port *port, unsigned char ch)
+{
+ struct tegra_utc_port *tup = container_of(port, struct tegra_utc_port, port);
+
+ tegra_utc_tx_writel(tup, ch, TEGRA_UTC_DATA);
+}
+
+static void tegra_utc_console_write_atomic(struct console *cons, struct nbcon_write_context *wctxt)
+{
+ struct tegra_utc_port *tup = container_of(cons, struct tegra_utc_port, console);
+ unsigned int len;
+ char *outbuf;
+
+ if (!nbcon_enter_unsafe(wctxt))
+ return;
+
+ outbuf = wctxt->outbuf;
+ len = wctxt->len;
+
+ while (len) {
+ u32 burst_size = tup->fifosize;
+
+ burst_size -= tegra_utc_tx_readl(tup, TEGRA_UTC_FIFO_OCCUPANCY);
+ if (len < burst_size)
+ burst_size = len;
+
+ uart_console_write(&tup->port, outbuf, burst_size, tegra_utc_console_putchar);
+
+ outbuf += burst_size;
+ len -= burst_size;
+ };
+
+ nbcon_exit_unsafe(wctxt);
+}
+
+static void tegra_utc_console_write_thread(struct console *cons, struct nbcon_write_context *wctxt)
+{
+ struct tegra_utc_port *tup = container_of(cons, struct tegra_utc_port, console);
+ unsigned int len = READ_ONCE(wctxt->len);
+ unsigned int i;
+ u32 val;
+
+ for (i = 0; i < len; i++) {
+ if (!nbcon_enter_unsafe(wctxt))
+ break;
+
+ read_poll_timeout_atomic(tegra_utc_tx_readl, val, !(val & TEGRA_UTC_FIFO_FULL),
+ 0, USEC_PER_SEC, false, tup, TEGRA_UTC_FIFO_STATUS);
+ uart_console_write(&tup->port, wctxt->outbuf + i, 1, tegra_utc_console_putchar);
+
+ if (!nbcon_exit_unsafe(wctxt))
+ break;
+ }
+}
+
+static void tegra_utc_console_device_lock(struct console *cons, unsigned long *flags)
+{
+ struct tegra_utc_port *tup = container_of(cons, struct tegra_utc_port, console);
+ struct uart_port *port = &tup->port;
+
+ __uart_port_lock_irqsave(port, flags);
+}
+
+static void tegra_utc_console_device_unlock(struct console *cons, unsigned long flags)
+{
+ struct tegra_utc_port *tup = container_of(cons, struct tegra_utc_port, console);
+ struct uart_port *port = &tup->port;
+
+ __uart_port_unlock_irqrestore(port, flags);
+}
+
+static int tegra_utc_console_setup(struct console *cons, char *options)
+{
+ struct tegra_utc_port *tup = container_of(cons, struct tegra_utc_port, console);
+
+ tegra_utc_init_tx(tup);
+
+ return 0;
+}
+#endif
+
+static struct uart_driver tegra_utc_driver = {
+ .driver_name = "tegra-utc",
+ .dev_name = "ttyUTC",
+ .nr = TEGRA_UTC_UART_NR,
+};
+
+static int tegra_utc_setup_port(struct device *dev, struct tegra_utc_port *tup)
+{
+ tup->port.dev = dev;
+ tup->port.fifosize = tup->fifosize;
+ tup->port.flags = UPF_BOOT_AUTOCONF;
+ tup->port.iotype = UPIO_MEM;
+ tup->port.ops = &tegra_utc_uart_ops;
+ tup->port.type = PORT_TEGRA_TCU;
+ tup->port.private_data = tup;
+
+#if IS_ENABLED(CONFIG_SERIAL_TEGRA_UTC_CONSOLE)
+ strscpy(tup->console.name, "ttyUTC", sizeof(tup->console.name));
+ tup->console.write_atomic = tegra_utc_console_write_atomic;
+ tup->console.write_thread = tegra_utc_console_write_thread;
+ tup->console.device_lock = tegra_utc_console_device_lock;
+ tup->console.device_unlock = tegra_utc_console_device_unlock;
+ tup->console.device = uart_console_device;
+ tup->console.setup = tegra_utc_console_setup;
+ tup->console.flags = CON_PRINTBUFFER | CON_NBCON;
+ tup->console.data = &tegra_utc_driver;
+#endif
+
+ return uart_read_port_properties(&tup->port);
+}
+
+static int tegra_utc_register_port(struct tegra_utc_port *tup)
+{
+ int ret;
+
+ ret = uart_add_one_port(&tegra_utc_driver, &tup->port);
+ if (ret)
+ return ret;
+
+#if IS_ENABLED(CONFIG_SERIAL_TEGRA_UTC_CONSOLE)
+ register_console(&tup->console);
+#endif
+
+ return 0;
+}
+
+static int tegra_utc_probe(struct platform_device *pdev)
+{
+ const unsigned int *soc_fifosize;
+ struct device *dev = &pdev->dev;
+ struct tegra_utc_port *tup;
+ int ret;
+
+ tup = devm_kzalloc(dev, sizeof(*tup), GFP_KERNEL);
+ if (!tup)
+ return -ENOMEM;
+
+ ret = device_property_read_u32(dev, "tx-threshold", &tup->tx_threshold);
+ if (ret)
+ return dev_err_probe(dev, ret, "missing %s property\n", "tx-threshold");
+
+ ret = device_property_read_u32(dev, "rx-threshold", &tup->rx_threshold);
+ if (ret)
+ return dev_err_probe(dev, ret, "missing %s property\n", "rx-threshold");
+
+ soc_fifosize = device_get_match_data(dev);
+ tup->fifosize = *soc_fifosize;
+
+ tup->tx_base = devm_platform_ioremap_resource_byname(pdev, "tx");
+ if (IS_ERR(tup->tx_base))
+ return PTR_ERR(tup->tx_base);
+
+ tup->rx_base = devm_platform_ioremap_resource_byname(pdev, "rx");
+ if (IS_ERR(tup->rx_base))
+ return PTR_ERR(tup->rx_base);
+
+ ret = tegra_utc_setup_port(dev, tup);
+ if (ret)
+ dev_err_probe(dev, ret, "failed to setup uart port\n");
+
+ platform_set_drvdata(pdev, tup);
+
+ return tegra_utc_register_port(tup);
+}
+
+static void tegra_utc_remove(struct platform_device *pdev)
+{
+ struct tegra_utc_port *tup = platform_get_drvdata(pdev);
+
+#if IS_ENABLED(CONFIG_SERIAL_TEGRA_UTC_CONSOLE)
+ unregister_console(&tup->console);
+#endif
+ uart_remove_one_port(&tegra_utc_driver, &tup->port);
+}
+
+static const unsigned int tegra264_utc_soc = 128;
+
+static const struct of_device_id tegra_utc_of_match[] = {
+ { .compatible = "nvidia,tegra264-utc", .data = &tegra264_utc_soc },
+ {}
+};
+MODULE_DEVICE_TABLE(of, tegra_utc_of_match);
+
+static struct platform_driver tegra_utc_platform_driver = {
+ .probe = tegra_utc_probe,
+ .remove = tegra_utc_remove,
+ .driver = {
+ .name = "tegra-utc",
+ .of_match_table = tegra_utc_of_match,
+ },
+};
+
+static int __init tegra_utc_init(void)
+{
+ int ret;
+
+ ret = uart_register_driver(&tegra_utc_driver);
+ if (ret)
+ return ret;
+
+ ret = platform_driver_register(&tegra_utc_platform_driver);
+ if (ret)
+ uart_unregister_driver(&tegra_utc_driver);
+
+ return ret;
+}
+module_init(tegra_utc_init);
+
+static void __exit tegra_utc_exit(void)
+{
+ platform_driver_unregister(&tegra_utc_platform_driver);
+ uart_unregister_driver(&tegra_utc_driver);
+}
+module_exit(tegra_utc_exit);
+
+MODULE_AUTHOR("Kartik Rajput <kkartik@nvidia.com>");
+MODULE_DESCRIPTION("Tegra UART Trace Controller");
+MODULE_LICENSE("GPL");
diff --git a/drivers/tty/tty_audit.c b/drivers/tty/tty_audit.c
index 1d81eeefb068..75542333c54a 100644
--- a/drivers/tty/tty_audit.c
+++ b/drivers/tty/tty_audit.c
@@ -12,12 +12,14 @@
#include <linux/tty.h>
#include "tty.h"
+#define TTY_AUDIT_BUF_SIZE 4096
+
struct tty_audit_buf {
struct mutex mutex; /* Protects all data below */
dev_t dev; /* The TTY which the data is from */
bool icanon;
size_t valid;
- u8 *data; /* Allocated size N_TTY_BUF_SIZE */
+ u8 *data; /* Allocated size TTY_AUDIT_BUF_SIZE */
};
static struct tty_audit_buf *tty_audit_buf_ref(void)
@@ -37,7 +39,7 @@ static struct tty_audit_buf *tty_audit_buf_alloc(void)
if (!buf)
goto err;
- buf->data = kmalloc(N_TTY_BUF_SIZE, GFP_KERNEL);
+ buf->data = kmalloc(TTY_AUDIT_BUF_SIZE, GFP_KERNEL);
if (!buf->data)
goto err_buf;
@@ -235,14 +237,14 @@ void tty_audit_add_data(const struct tty_struct *tty, const void *data,
do {
size_t run;
- run = N_TTY_BUF_SIZE - buf->valid;
+ run = TTY_AUDIT_BUF_SIZE - buf->valid;
if (run > size)
run = size;
memcpy(buf->data + buf->valid, data, run);
buf->valid += run;
data += run;
size -= run;
- if (buf->valid == N_TTY_BUF_SIZE)
+ if (buf->valid == TTY_AUDIT_BUF_SIZE)
tty_audit_buf_push(buf);
} while (size != 0);
mutex_unlock(&buf->mutex);
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index 449dbd216460..ca9b7d7bad2b 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -3329,10 +3329,12 @@ EXPORT_SYMBOL(tty_unregister_device);
* __tty_alloc_driver - allocate tty driver
* @lines: count of lines this driver can handle at most
* @owner: module which is responsible for this driver
- * @flags: some of %TTY_DRIVER_ flags, will be set in driver->flags
+ * @flags: some of enum tty_driver_flag, will be set in driver->flags
*
- * This should not be called directly, some of the provided macros should be
- * used instead. Use IS_ERR() and friends on @retval.
+ * This should not be called directly, tty_alloc_driver() should be used
+ * instead.
+ *
+ * Returns: struct tty_driver or a PTR-encoded error (use IS_ERR() and friends).
*/
struct tty_driver *__tty_alloc_driver(unsigned int lines, struct module *owner,
unsigned long flags)
diff --git a/drivers/tty/tty_ldsem.c b/drivers/tty/tty_ldsem.c
index 3be428c16260..4e18031a5ca3 100644
--- a/drivers/tty/tty_ldsem.c
+++ b/drivers/tty/tty_ldsem.c
@@ -367,23 +367,6 @@ int __sched ldsem_down_write(struct ld_semaphore *sem, long timeout)
}
/*
- * trylock for writing -- returns 1 if successful, 0 if contention
- */
-int ldsem_down_write_trylock(struct ld_semaphore *sem)
-{
- long count = atomic_long_read(&sem->count);
-
- while ((count & LDSEM_ACTIVE_MASK) == 0) {
- if (atomic_long_try_cmpxchg(&sem->count, &count, count + LDSEM_WRITE_BIAS)) {
- rwsem_acquire(&sem->dep_map, 0, 1, _RET_IP_);
- lock_acquired(&sem->dep_map, _RET_IP_);
- return 1;
- }
- }
- return 0;
-}
-
-/*
* release a read lock
*/
void ldsem_up_read(struct ld_semaphore *sem)
diff --git a/drivers/usb/cdns3/cdns3-gadget.c b/drivers/usb/cdns3/cdns3-gadget.c
index fd1beb10bba7..694aa1457739 100644
--- a/drivers/usb/cdns3/cdns3-gadget.c
+++ b/drivers/usb/cdns3/cdns3-gadget.c
@@ -3468,7 +3468,7 @@ __must_hold(&cdns->lock)
return 0;
}
-static int cdns3_gadget_resume(struct cdns *cdns, bool hibernated)
+static int cdns3_gadget_resume(struct cdns *cdns, bool lost_power)
{
struct cdns3_device *priv_dev = cdns->gadget_dev;
@@ -3476,7 +3476,7 @@ static int cdns3_gadget_resume(struct cdns *cdns, bool hibernated)
return 0;
cdns3_gadget_config(priv_dev);
- if (hibernated)
+ if (lost_power)
writel(USB_CONF_DEVEN, &priv_dev->regs->usb_conf);
return 0;
diff --git a/drivers/usb/cdns3/cdns3-ti.c b/drivers/usb/cdns3/cdns3-ti.c
index 040bb91e9c01..302ebf6d8e53 100644
--- a/drivers/usb/cdns3/cdns3-ti.c
+++ b/drivers/usb/cdns3/cdns3-ti.c
@@ -58,6 +58,7 @@ struct cdns_ti {
unsigned vbus_divider:1;
struct clk *usb2_refclk;
struct clk *lpm_clk;
+ int usb2_refclk_rate_code;
};
static const int cdns_ti_rate_table[] = { /* in KHZ */
@@ -98,15 +99,50 @@ static const struct of_dev_auxdata cdns_ti_auxdata[] = {
{},
};
+static void cdns_ti_reset_and_init_hw(struct cdns_ti *data)
+{
+ u32 reg;
+
+ /* assert RESET */
+ reg = cdns_ti_readl(data, USBSS_W1);
+ reg &= ~USBSS_W1_PWRUP_RST;
+ cdns_ti_writel(data, USBSS_W1, reg);
+
+ /* set static config */
+ reg = cdns_ti_readl(data, USBSS_STATIC_CONFIG);
+ reg &= ~USBSS1_STATIC_PLL_REF_SEL_MASK;
+ reg |= data->usb2_refclk_rate_code << USBSS1_STATIC_PLL_REF_SEL_SHIFT;
+
+ reg &= ~USBSS1_STATIC_VBUS_SEL_MASK;
+ if (data->vbus_divider)
+ reg |= 1 << USBSS1_STATIC_VBUS_SEL_SHIFT;
+
+ cdns_ti_writel(data, USBSS_STATIC_CONFIG, reg);
+ reg = cdns_ti_readl(data, USBSS_STATIC_CONFIG);
+
+ /* set USB2_ONLY mode if requested */
+ reg = cdns_ti_readl(data, USBSS_W1);
+ if (data->usb2_only)
+ reg |= USBSS_W1_USB2_ONLY;
+
+ /* set default modestrap */
+ reg |= USBSS_W1_MODESTRAP_SEL;
+ reg &= ~USBSS_W1_MODESTRAP_MASK;
+ reg |= USBSS_MODESTRAP_MODE_NONE << USBSS_W1_MODESTRAP_SHIFT;
+ cdns_ti_writel(data, USBSS_W1, reg);
+
+ /* de-assert RESET */
+ reg |= USBSS_W1_PWRUP_RST;
+ cdns_ti_writel(data, USBSS_W1, reg);
+}
+
static int cdns_ti_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *node = pdev->dev.of_node;
struct cdns_ti *data;
- int error;
- u32 reg;
- int rate_code, i;
unsigned long rate;
+ int error, i;
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
if (!data)
@@ -146,7 +182,17 @@ static int cdns_ti_probe(struct platform_device *pdev)
return -EINVAL;
}
- rate_code = i;
+ data->usb2_refclk_rate_code = i;
+ data->vbus_divider = device_property_read_bool(dev, "ti,vbus-divider");
+ data->usb2_only = device_property_read_bool(dev, "ti,usb2-only");
+
+ /*
+ * The call below to pm_runtime_get_sync() MIGHT reset hardware, if it
+ * detects it as uninitialised. We want to enforce a reset at probe,
+ * and so do it manually here. This means the first runtime_resume()
+ * will be a no-op.
+ */
+ cdns_ti_reset_and_init_hw(data);
pm_runtime_enable(dev);
error = pm_runtime_get_sync(dev);
@@ -155,40 +201,6 @@ static int cdns_ti_probe(struct platform_device *pdev)
goto err;
}
- /* assert RESET */
- reg = cdns_ti_readl(data, USBSS_W1);
- reg &= ~USBSS_W1_PWRUP_RST;
- cdns_ti_writel(data, USBSS_W1, reg);
-
- /* set static config */
- reg = cdns_ti_readl(data, USBSS_STATIC_CONFIG);
- reg &= ~USBSS1_STATIC_PLL_REF_SEL_MASK;
- reg |= rate_code << USBSS1_STATIC_PLL_REF_SEL_SHIFT;
-
- reg &= ~USBSS1_STATIC_VBUS_SEL_MASK;
- data->vbus_divider = device_property_read_bool(dev, "ti,vbus-divider");
- if (data->vbus_divider)
- reg |= 1 << USBSS1_STATIC_VBUS_SEL_SHIFT;
-
- cdns_ti_writel(data, USBSS_STATIC_CONFIG, reg);
- reg = cdns_ti_readl(data, USBSS_STATIC_CONFIG);
-
- /* set USB2_ONLY mode if requested */
- reg = cdns_ti_readl(data, USBSS_W1);
- data->usb2_only = device_property_read_bool(dev, "ti,usb2-only");
- if (data->usb2_only)
- reg |= USBSS_W1_USB2_ONLY;
-
- /* set default modestrap */
- reg |= USBSS_W1_MODESTRAP_SEL;
- reg &= ~USBSS_W1_MODESTRAP_MASK;
- reg |= USBSS_MODESTRAP_MODE_NONE << USBSS_W1_MODESTRAP_SHIFT;
- cdns_ti_writel(data, USBSS_W1, reg);
-
- /* de-assert RESET */
- reg |= USBSS_W1_PWRUP_RST;
- cdns_ti_writel(data, USBSS_W1, reg);
-
error = of_platform_populate(node, NULL, cdns_ti_auxdata, dev);
if (error) {
dev_err(dev, "failed to create children: %d\n", error);
@@ -224,6 +236,24 @@ static void cdns_ti_remove(struct platform_device *pdev)
platform_set_drvdata(pdev, NULL);
}
+static int cdns_ti_runtime_resume(struct device *dev)
+{
+ const u32 mask = USBSS_W1_PWRUP_RST | USBSS_W1_MODESTRAP_SEL;
+ struct cdns_ti *data = dev_get_drvdata(dev);
+ u32 w1;
+
+ w1 = cdns_ti_readl(data, USBSS_W1);
+ if ((w1 & mask) != mask)
+ cdns_ti_reset_and_init_hw(data);
+
+ return 0;
+}
+
+static const struct dev_pm_ops cdns_ti_pm_ops = {
+ RUNTIME_PM_OPS(NULL, cdns_ti_runtime_resume, NULL)
+ SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
+};
+
static const struct of_device_id cdns_ti_of_match[] = {
{ .compatible = "ti,j721e-usb", },
{ .compatible = "ti,am64-usb", },
@@ -237,6 +267,7 @@ static struct platform_driver cdns_ti_driver = {
.driver = {
.name = "cdns3-ti",
.of_match_table = cdns_ti_of_match,
+ .pm = pm_ptr(&cdns_ti_pm_ops),
},
};
diff --git a/drivers/usb/cdns3/cdnsp-gadget.c b/drivers/usb/cdns3/cdnsp-gadget.c
index 97edf767ecee..87f310841735 100644
--- a/drivers/usb/cdns3/cdnsp-gadget.c
+++ b/drivers/usb/cdns3/cdnsp-gadget.c
@@ -1974,7 +1974,7 @@ static int cdnsp_gadget_suspend(struct cdns *cdns, bool do_wakeup)
return 0;
}
-static int cdnsp_gadget_resume(struct cdns *cdns, bool hibernated)
+static int cdnsp_gadget_resume(struct cdns *cdns, bool lost_power)
{
struct cdnsp_device *pdev = cdns->gadget_dev;
enum usb_device_speed max_speed;
diff --git a/drivers/usb/cdns3/core.c b/drivers/usb/cdns3/core.c
index 98980a23e1c2..1243a5cea91b 100644
--- a/drivers/usb/cdns3/core.c
+++ b/drivers/usb/cdns3/core.c
@@ -524,11 +524,12 @@ EXPORT_SYMBOL_GPL(cdns_suspend);
int cdns_resume(struct cdns *cdns)
{
+ bool power_lost = cdns_power_is_lost(cdns);
enum usb_role real_role;
bool role_changed = false;
int ret = 0;
- if (cdns_power_is_lost(cdns)) {
+ if (power_lost) {
if (!cdns->role_sw) {
real_role = cdns_hw_role_state_machine(cdns);
if (real_role != cdns->role) {
@@ -551,7 +552,7 @@ int cdns_resume(struct cdns *cdns)
}
if (cdns->roles[cdns->role]->resume)
- cdns->roles[cdns->role]->resume(cdns, cdns_power_is_lost(cdns));
+ cdns->roles[cdns->role]->resume(cdns, power_lost);
return 0;
}
diff --git a/drivers/usb/cdns3/core.h b/drivers/usb/cdns3/core.h
index 57d47348dc19..921cccf1ca9d 100644
--- a/drivers/usb/cdns3/core.h
+++ b/drivers/usb/cdns3/core.h
@@ -30,7 +30,7 @@ struct cdns_role_driver {
int (*start)(struct cdns *cdns);
void (*stop)(struct cdns *cdns);
int (*suspend)(struct cdns *cdns, bool do_wakeup);
- int (*resume)(struct cdns *cdns, bool hibernated);
+ int (*resume)(struct cdns *cdns, bool lost_power);
const char *name;
#define CDNS_ROLE_STATE_INACTIVE 0
#define CDNS_ROLE_STATE_ACTIVE 1
diff --git a/drivers/usb/cdns3/host.c b/drivers/usb/cdns3/host.c
index 7ba760ee62e3..f0df114c2b53 100644
--- a/drivers/usb/cdns3/host.c
+++ b/drivers/usb/cdns3/host.c
@@ -138,6 +138,16 @@ static void cdns_host_exit(struct cdns *cdns)
cdns_drd_host_off(cdns);
}
+static int cdns_host_resume(struct cdns *cdns, bool power_lost)
+{
+ struct usb_hcd *hcd = platform_get_drvdata(cdns->host_dev);
+ struct xhci_plat_priv *priv = hcd_to_xhci_priv(hcd);
+
+ priv->power_lost = power_lost;
+
+ return 0;
+}
+
int cdns_host_init(struct cdns *cdns)
{
struct cdns_role_driver *rdrv;
@@ -148,6 +158,7 @@ int cdns_host_init(struct cdns *cdns)
rdrv->start = __cdns_host_init;
rdrv->stop = cdns_host_exit;
+ rdrv->resume = cdns_host_resume;
rdrv->state = CDNS_ROLE_STATE_INACTIVE;
rdrv->name = "host";
diff --git a/drivers/usb/chipidea/usbmisc_imx.c b/drivers/usb/chipidea/usbmisc_imx.c
index 1394881fde5f..6243d8005f5d 100644
--- a/drivers/usb/chipidea/usbmisc_imx.c
+++ b/drivers/usb/chipidea/usbmisc_imx.c
@@ -440,7 +440,7 @@ static int usbmisc_imx6q_init(struct imx_usbmisc_data *data)
else if (data->oc_pol_configured)
reg &= ~MX6_BM_OVER_CUR_POLARITY;
}
- /* If the polarity is not set keep it as setup by the bootlader */
+ /* If the polarity is not set keep it as setup by the bootloader */
if (data->pwr_pol == 1)
reg |= MX6_BM_PWR_POLARITY;
writel(reg, usbmisc->base + data->index * 4);
@@ -645,7 +645,7 @@ static int usbmisc_imx7d_init(struct imx_usbmisc_data *data)
else if (data->oc_pol_configured)
reg &= ~MX6_BM_OVER_CUR_POLARITY;
}
- /* If the polarity is not set keep it as setup by the bootlader */
+ /* If the polarity is not set keep it as setup by the bootloader */
if (data->pwr_pol == 1)
reg |= MX6_BM_PWR_POLARITY;
writel(reg, usbmisc->base);
@@ -939,7 +939,7 @@ static int usbmisc_imx7ulp_init(struct imx_usbmisc_data *data)
else if (data->oc_pol_configured)
reg &= ~MX6_BM_OVER_CUR_POLARITY;
}
- /* If the polarity is not set keep it as setup by the bootlader */
+ /* If the polarity is not set keep it as setup by the bootloader */
if (data->pwr_pol == 1)
reg |= MX6_BM_PWR_POLARITY;
@@ -1185,7 +1185,7 @@ int imx_usbmisc_suspend(struct imx_usbmisc_data *data, bool wakeup)
if (usbmisc->ops->hsic_set_clk && data->hsic)
ret = usbmisc->ops->hsic_set_clk(data, false);
if (ret) {
- dev_err(data->dev, "set_wakeup failed, ret=%d\n", ret);
+ dev_err(data->dev, "hsic_set_clk failed, ret=%d\n", ret);
return ret;
}
@@ -1224,7 +1224,7 @@ int imx_usbmisc_resume(struct imx_usbmisc_data *data, bool wakeup)
if (usbmisc->ops->hsic_set_clk && data->hsic)
ret = usbmisc->ops->hsic_set_clk(data, true);
if (ret) {
- dev_err(data->dev, "set_wakeup failed, ret=%d\n", ret);
+ dev_err(data->dev, "hsic_set_clk failed, ret=%d\n", ret);
goto hsic_set_clk_fail;
}
diff --git a/drivers/usb/common/usb-conn-gpio.c b/drivers/usb/common/usb-conn-gpio.c
index aa710b50791b..1e36be2a28fd 100644
--- a/drivers/usb/common/usb-conn-gpio.c
+++ b/drivers/usb/common/usb-conn-gpio.c
@@ -158,7 +158,7 @@ static int usb_conn_psy_register(struct usb_conn_info *info)
struct device *dev = info->dev;
struct power_supply_desc *desc = &info->desc;
struct power_supply_config cfg = {
- .of_node = dev->of_node,
+ .fwnode = dev_fwnode(dev),
};
desc->name = "usb-charger";
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
index f7bf8d1de3ad..13bd4ec4ea5f 100644
--- a/drivers/usb/core/config.c
+++ b/drivers/usb/core/config.c
@@ -64,6 +64,37 @@ static void usb_parse_ssp_isoc_endpoint_companion(struct device *ddev,
memcpy(&ep->ssp_isoc_ep_comp, desc, USB_DT_SSP_ISOC_EP_COMP_SIZE);
}
+static void usb_parse_eusb2_isoc_endpoint_companion(struct device *ddev,
+ int cfgno, int inum, int asnum, struct usb_host_endpoint *ep,
+ unsigned char *buffer, int size)
+{
+ struct usb_eusb2_isoc_ep_comp_descriptor *desc;
+ struct usb_descriptor_header *h;
+
+ /*
+ * eUSB2 isochronous endpoint companion descriptor for this endpoint
+ * shall be declared before the next endpoint or interface descriptor
+ */
+ while (size >= USB_DT_EUSB2_ISOC_EP_COMP_SIZE) {
+ h = (struct usb_descriptor_header *)buffer;
+
+ if (h->bDescriptorType == USB_DT_EUSB2_ISOC_ENDPOINT_COMP) {
+ desc = (struct usb_eusb2_isoc_ep_comp_descriptor *)buffer;
+ ep->eusb2_isoc_ep_comp = *desc;
+ return;
+ }
+ if (h->bDescriptorType == USB_DT_ENDPOINT ||
+ h->bDescriptorType == USB_DT_INTERFACE)
+ break;
+
+ buffer += h->bLength;
+ size -= h->bLength;
+ }
+
+ dev_notice(ddev, "No eUSB2 isoc ep %d companion for config %d interface %d altsetting %d\n",
+ ep->desc.bEndpointAddress, cfgno, inum, asnum);
+}
+
static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno,
int inum, int asnum, struct usb_host_endpoint *ep,
unsigned char *buffer, int size)
@@ -258,8 +289,10 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno,
int n, i, j, retval;
unsigned int maxp;
const unsigned short *maxpacket_maxes;
+ u16 bcdUSB;
d = (struct usb_endpoint_descriptor *) buffer;
+ bcdUSB = le16_to_cpu(udev->descriptor.bcdUSB);
buffer += d->bLength;
size -= d->bLength;
@@ -409,15 +442,17 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno,
/*
* Validate the wMaxPacketSize field.
- * Some devices have isochronous endpoints in altsetting 0;
- * the USB-2 spec requires such endpoints to have wMaxPacketSize = 0
- * (see the end of section 5.6.3), so don't warn about them.
+ * eUSB2 devices (see USB 2.0 Double Isochronous IN ECN 9.6.6 Endpoint)
+ * and devices with isochronous endpoints in altsetting 0 (see USB 2.0
+ * end of section 5.6.3) have wMaxPacketSize = 0.
+ * So don't warn about those.
*/
maxp = le16_to_cpu(endpoint->desc.wMaxPacketSize);
- if (maxp == 0 && !(usb_endpoint_xfer_isoc(d) && asnum == 0)) {
+
+ if (maxp == 0 && bcdUSB != 0x0220 &&
+ !(usb_endpoint_xfer_isoc(d) && asnum == 0))
dev_notice(ddev, "config %d interface %d altsetting %d endpoint 0x%X has invalid wMaxPacketSize 0\n",
cfgno, inum, asnum, d->bEndpointAddress);
- }
/* Find the highest legal maxpacket size for this endpoint */
i = 0; /* additional transactions per microframe */
@@ -465,6 +500,12 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno,
maxp);
}
+ /* Parse a possible eUSB2 periodic endpoint companion descriptor */
+ if (bcdUSB == 0x0220 && d->wMaxPacketSize == 0 &&
+ (usb_endpoint_xfer_isoc(d) || usb_endpoint_xfer_int(d)))
+ usb_parse_eusb2_isoc_endpoint_companion(ddev, cfgno, inum, asnum,
+ endpoint, buffer, size);
+
/* Parse a possible SuperSpeed endpoint companion descriptor */
if (udev->speed >= USB_SPEED_SUPER)
usb_parse_ss_endpoint_companion(ddev, cfgno,
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index a75cf1f6d741..46026b331267 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -1609,7 +1609,7 @@ int usb_hcd_unlink_urb (struct urb *urb, int status)
if (retval == 0)
retval = -EINPROGRESS;
else if (retval != -EIDRM && retval != -EBUSY)
- dev_dbg(&udev->dev, "hcd_unlink_urb %pK fail %d\n",
+ dev_dbg(&udev->dev, "hcd_unlink_urb %p fail %d\n",
urb, retval);
usb_put_dev(udev);
}
@@ -1786,7 +1786,7 @@ rescan:
/* kick hcd */
unlink1(hcd, urb, -ESHUTDOWN);
dev_dbg (hcd->self.controller,
- "shutdown urb %pK ep%d%s-%s\n",
+ "shutdown urb %p ep%d%s-%s\n",
urb, usb_endpoint_num(&ep->desc),
is_in ? "in" : "out",
usb_ep_type_string(usb_endpoint_type(&ep->desc)));
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index dcba4281ea48..8c7f9cc785bb 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -4708,8 +4708,6 @@ void usb_ep0_reinit(struct usb_device *udev)
}
EXPORT_SYMBOL_GPL(usb_ep0_reinit);
-#define usb_sndaddr0pipe() (PIPE_CONTROL << 30)
-
static int hub_set_address(struct usb_device *udev, int devnum)
{
int retval;
@@ -4733,7 +4731,7 @@ static int hub_set_address(struct usb_device *udev, int devnum)
if (hcd->driver->address_device)
retval = hcd->driver->address_device(hcd, udev, timeout_ms);
else
- retval = usb_control_msg(udev, usb_sndaddr0pipe(),
+ retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
USB_REQ_SET_ADDRESS, 0, devnum, 0,
NULL, 0, timeout_ms);
if (retval == 0) {
diff --git a/drivers/usb/core/urb.c b/drivers/usb/core/urb.c
index 7576920e2d5a..5e52a35486af 100644
--- a/drivers/usb/core/urb.c
+++ b/drivers/usb/core/urb.c
@@ -376,7 +376,7 @@ int usb_submit_urb(struct urb *urb, gfp_t mem_flags)
if (!urb || !urb->complete)
return -EINVAL;
if (urb->hcpriv) {
- WARN_ONCE(1, "URB %pK submitted while active\n", urb);
+ WARN_ONCE(1, "URB %p submitted while active\n", urb);
return -EBUSY;
}
diff --git a/drivers/usb/dwc2/core.c b/drivers/usb/dwc2/core.c
index 9919ab725d54..c3d24312db0f 100644
--- a/drivers/usb/dwc2/core.c
+++ b/drivers/usb/dwc2/core.c
@@ -43,6 +43,7 @@ int dwc2_backup_global_registers(struct dwc2_hsotg *hsotg)
/* Backup global regs */
gr = &hsotg->gr_backup;
+ gr->gintsts = dwc2_readl(hsotg, GINTSTS);
gr->gotgctl = dwc2_readl(hsotg, GOTGCTL);
gr->gintmsk = dwc2_readl(hsotg, GINTMSK);
gr->gahbcfg = dwc2_readl(hsotg, GAHBCFG);
diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h
index 2bd74f3033ed..34127b890b2a 100644
--- a/drivers/usb/dwc2/core.h
+++ b/drivers/usb/dwc2/core.h
@@ -667,6 +667,7 @@ struct dwc2_hw_params {
/**
* struct dwc2_gregs_backup - Holds global registers state before
* entering partial power down
+ * @gintsts: Backup of GINTSTS register
* @gotgctl: Backup of GOTGCTL register
* @gintmsk: Backup of GINTMSK register
* @gahbcfg: Backup of GAHBCFG register
@@ -683,6 +684,7 @@ struct dwc2_hw_params {
* @valid: True if registers values backuped.
*/
struct dwc2_gregs_backup {
+ u32 gintsts;
u32 gotgctl;
u32 gintmsk;
u32 gahbcfg;
@@ -1127,6 +1129,9 @@ struct dwc2_hsotg {
#define DWC2_FS_IOT_ID 0x55310000
#define DWC2_HS_IOT_ID 0x55320000
+#define DWC2_RESTORE_DCTL BIT(0)
+#define DWC2_RESTORE_DCFG BIT(1)
+
#if IS_ENABLED(CONFIG_USB_DWC2_HOST) || IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE)
union dwc2_hcd_internal_flags {
u32 d32;
@@ -1420,7 +1425,7 @@ int dwc2_hsotg_set_test_mode(struct dwc2_hsotg *hsotg, int testmode);
#define dwc2_is_device_connected(hsotg) (hsotg->connected)
#define dwc2_is_device_enabled(hsotg) (hsotg->enabled)
int dwc2_backup_device_registers(struct dwc2_hsotg *hsotg);
-int dwc2_restore_device_registers(struct dwc2_hsotg *hsotg, int remote_wakeup);
+int dwc2_restore_device_registers(struct dwc2_hsotg *hsotg, unsigned int flags);
int dwc2_gadget_enter_hibernation(struct dwc2_hsotg *hsotg);
int dwc2_gadget_exit_hibernation(struct dwc2_hsotg *hsotg,
int rem_wakeup, int reset);
@@ -1435,6 +1440,9 @@ int dwc2_hsotg_tx_fifo_total_depth(struct dwc2_hsotg *hsotg);
int dwc2_hsotg_tx_fifo_average_depth(struct dwc2_hsotg *hsotg);
void dwc2_gadget_init_lpm(struct dwc2_hsotg *hsotg);
void dwc2_gadget_program_ref_clk(struct dwc2_hsotg *hsotg);
+int dwc2_gadget_backup_critical_registers(struct dwc2_hsotg *hsotg);
+int dwc2_gadget_restore_critical_registers(struct dwc2_hsotg *hsotg,
+ unsigned int flags);
static inline void dwc2_clear_fifo_map(struct dwc2_hsotg *hsotg)
{ hsotg->fifo_map = 0; }
#else
@@ -1459,7 +1467,7 @@ static inline int dwc2_hsotg_set_test_mode(struct dwc2_hsotg *hsotg,
static inline int dwc2_backup_device_registers(struct dwc2_hsotg *hsotg)
{ return 0; }
static inline int dwc2_restore_device_registers(struct dwc2_hsotg *hsotg,
- int remote_wakeup)
+ unsigned int flags)
{ return 0; }
static inline int dwc2_gadget_enter_hibernation(struct dwc2_hsotg *hsotg)
{ return 0; }
@@ -1482,6 +1490,11 @@ static inline int dwc2_hsotg_tx_fifo_average_depth(struct dwc2_hsotg *hsotg)
{ return 0; }
static inline void dwc2_gadget_init_lpm(struct dwc2_hsotg *hsotg) {}
static inline void dwc2_gadget_program_ref_clk(struct dwc2_hsotg *hsotg) {}
+static inline int dwc2_gadget_backup_critical_registers(struct dwc2_hsotg *hsotg)
+{ return 0; }
+static inline int dwc2_gadget_restore_critical_registers(struct dwc2_hsotg *hsotg,
+ unsigned int flags)
+{ return 0; }
static inline void dwc2_clear_fifo_map(struct dwc2_hsotg *hsotg) {}
#endif
@@ -1505,6 +1518,8 @@ int dwc2_host_exit_partial_power_down(struct dwc2_hsotg *hsotg,
void dwc2_host_enter_clock_gating(struct dwc2_hsotg *hsotg);
void dwc2_host_exit_clock_gating(struct dwc2_hsotg *hsotg, int rem_wakeup);
bool dwc2_host_can_poweroff_phy(struct dwc2_hsotg *dwc2);
+int dwc2_host_backup_critical_registers(struct dwc2_hsotg *hsotg);
+int dwc2_host_restore_critical_registers(struct dwc2_hsotg *hsotg);
static inline void dwc2_host_schedule_phy_reset(struct dwc2_hsotg *hsotg)
{ schedule_work(&hsotg->phy_reset_work); }
#else
@@ -1544,6 +1559,10 @@ static inline void dwc2_host_exit_clock_gating(struct dwc2_hsotg *hsotg,
int rem_wakeup) {}
static inline bool dwc2_host_can_poweroff_phy(struct dwc2_hsotg *dwc2)
{ return false; }
+static inline int dwc2_host_backup_critical_registers(struct dwc2_hsotg *hsotg)
+{ return 0; }
+static inline int dwc2_host_restore_critical_registers(struct dwc2_hsotg *hsotg)
+{ return 0; }
static inline void dwc2_host_schedule_phy_reset(struct dwc2_hsotg *hsotg) {}
#endif
diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
index bd4c788f03bc..300ea4969f0c 100644
--- a/drivers/usb/dwc2/gadget.c
+++ b/drivers/usb/dwc2/gadget.c
@@ -5204,11 +5204,11 @@ int dwc2_backup_device_registers(struct dwc2_hsotg *hsotg)
* if controller power were disabled.
*
* @hsotg: Programming view of the DWC_otg controller
- * @remote_wakeup: Indicates whether resume is initiated by Device or Host.
+ * @flags: Defines which registers should be restored.
*
* Return: 0 if successful, negative error code otherwise
*/
-int dwc2_restore_device_registers(struct dwc2_hsotg *hsotg, int remote_wakeup)
+int dwc2_restore_device_registers(struct dwc2_hsotg *hsotg, unsigned int flags)
{
struct dwc2_dregs_backup *dr;
int i;
@@ -5224,7 +5224,10 @@ int dwc2_restore_device_registers(struct dwc2_hsotg *hsotg, int remote_wakeup)
}
dr->valid = false;
- if (!remote_wakeup)
+ if (flags & DWC2_RESTORE_DCFG)
+ dwc2_writel(hsotg, dr->dcfg, DCFG);
+
+ if (flags & DWC2_RESTORE_DCTL)
dwc2_writel(hsotg, dr->dctl, DCTL);
dwc2_writel(hsotg, dr->daintmsk, DAINTMSK);
@@ -5310,6 +5313,49 @@ void dwc2_gadget_program_ref_clk(struct dwc2_hsotg *hsotg)
dev_dbg(hsotg->dev, "GREFCLK=0x%08x\n", dwc2_readl(hsotg, GREFCLK));
}
+int dwc2_gadget_backup_critical_registers(struct dwc2_hsotg *hsotg)
+{
+ int ret;
+
+ /* Backup all registers */
+ ret = dwc2_backup_global_registers(hsotg);
+ if (ret) {
+ dev_err(hsotg->dev, "%s: failed to backup global registers\n",
+ __func__);
+ return ret;
+ }
+
+ ret = dwc2_backup_device_registers(hsotg);
+ if (ret) {
+ dev_err(hsotg->dev, "%s: failed to backup device registers\n",
+ __func__);
+ return ret;
+ }
+
+ return 0;
+}
+
+int dwc2_gadget_restore_critical_registers(struct dwc2_hsotg *hsotg,
+ unsigned int flags)
+{
+ int ret;
+
+ ret = dwc2_restore_global_registers(hsotg);
+ if (ret) {
+ dev_err(hsotg->dev, "%s: failed to restore registers\n",
+ __func__);
+ return ret;
+ }
+ ret = dwc2_restore_device_registers(hsotg, flags);
+ if (ret) {
+ dev_err(hsotg->dev, "%s: failed to restore device registers\n",
+ __func__);
+ return ret;
+ }
+
+ return 0;
+}
+
/**
* dwc2_gadget_enter_hibernation() - Put controller in Hibernation.
*
@@ -5327,18 +5373,9 @@ int dwc2_gadget_enter_hibernation(struct dwc2_hsotg *hsotg)
/* Change to L2(suspend) state */
hsotg->lx_state = DWC2_L2;
dev_dbg(hsotg->dev, "Start of hibernation completed\n");
- ret = dwc2_backup_global_registers(hsotg);
- if (ret) {
- dev_err(hsotg->dev, "%s: failed to backup global registers\n",
- __func__);
- return ret;
- }
- ret = dwc2_backup_device_registers(hsotg);
- if (ret) {
- dev_err(hsotg->dev, "%s: failed to backup device registers\n",
- __func__);
+ ret = dwc2_gadget_backup_critical_registers(hsotg);
+ if (ret)
return ret;
- }
gpwrdn = GPWRDN_PWRDNRSTN;
udelay(10);
@@ -5415,6 +5452,7 @@ int dwc2_gadget_exit_hibernation(struct dwc2_hsotg *hsotg,
u32 gpwrdn;
u32 dctl;
int ret = 0;
+ unsigned int flags = 0;
struct dwc2_gregs_backup *gr;
struct dwc2_dregs_backup *dr;
@@ -5477,6 +5515,7 @@ int dwc2_gadget_exit_hibernation(struct dwc2_hsotg *hsotg,
dctl = dwc2_readl(hsotg, DCTL);
dctl |= DCTL_PWRONPRGDONE;
dwc2_writel(hsotg, dctl, DCTL);
+ flags |= DWC2_RESTORE_DCTL;
}
/* Wait for interrupts which must be cleared */
mdelay(2);
@@ -5484,20 +5523,9 @@ int dwc2_gadget_exit_hibernation(struct dwc2_hsotg *hsotg,
dwc2_writel(hsotg, 0xffffffff, GINTSTS);
/* Restore global registers */
- ret = dwc2_restore_global_registers(hsotg);
- if (ret) {
- dev_err(hsotg->dev, "%s: failed to restore registers\n",
- __func__);
- return ret;
- }
-
- /* Restore device registers */
- ret = dwc2_restore_device_registers(hsotg, rem_wakeup);
- if (ret) {
- dev_err(hsotg->dev, "%s: failed to restore device registers\n",
- __func__);
+ ret = dwc2_gadget_restore_critical_registers(hsotg, flags);
+ if (ret)
return ret;
- }
if (rem_wakeup) {
mdelay(10);
@@ -5531,19 +5559,9 @@ int dwc2_gadget_enter_partial_power_down(struct dwc2_hsotg *hsotg)
dev_dbg(hsotg->dev, "Entering device partial power down started.\n");
/* Backup all registers */
- ret = dwc2_backup_global_registers(hsotg);
- if (ret) {
- dev_err(hsotg->dev, "%s: failed to backup global registers\n",
- __func__);
- return ret;
- }
-
- ret = dwc2_backup_device_registers(hsotg);
- if (ret) {
- dev_err(hsotg->dev, "%s: failed to backup device registers\n",
- __func__);
+ ret = dwc2_gadget_backup_critical_registers(hsotg);
+ if (ret)
return ret;
- }
/*
* Clear any pending interrupts since dwc2 will not be able to
@@ -5590,11 +5608,8 @@ int dwc2_gadget_exit_partial_power_down(struct dwc2_hsotg *hsotg,
{
u32 pcgcctl;
u32 dctl;
- struct dwc2_dregs_backup *dr;
int ret = 0;
- dr = &hsotg->dr_backup;
-
dev_dbg(hsotg->dev, "Exiting device partial Power Down started.\n");
pcgcctl = dwc2_readl(hsotg, PCGCTL);
@@ -5611,21 +5626,10 @@ int dwc2_gadget_exit_partial_power_down(struct dwc2_hsotg *hsotg,
udelay(100);
if (restore) {
- ret = dwc2_restore_global_registers(hsotg);
- if (ret) {
- dev_err(hsotg->dev, "%s: failed to restore registers\n",
- __func__);
- return ret;
- }
- /* Restore DCFG */
- dwc2_writel(hsotg, dr->dcfg, DCFG);
-
- ret = dwc2_restore_device_registers(hsotg, 0);
- if (ret) {
- dev_err(hsotg->dev, "%s: failed to restore device registers\n",
- __func__);
+ ret = dwc2_gadget_restore_critical_registers(hsotg, DWC2_RESTORE_DCTL |
+ DWC2_RESTORE_DCFG);
+ if (ret)
return ret;
- }
}
/* Set the Power-On Programming done bit */
diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
index 8c3941ecaaf5..869245238d6c 100644
--- a/drivers/usb/dwc2/hcd.c
+++ b/drivers/usb/dwc2/hcd.c
@@ -5474,6 +5474,49 @@ int dwc2_restore_host_registers(struct dwc2_hsotg *hsotg)
return 0;
}
+int dwc2_host_backup_critical_registers(struct dwc2_hsotg *hsotg)
+{
+ int ret;
+
+ /* Backup all registers */
+ ret = dwc2_backup_global_registers(hsotg);
+ if (ret) {
+ dev_err(hsotg->dev, "%s: failed to backup global registers\n",
+ __func__);
+ return ret;
+ }
+
+ ret = dwc2_backup_host_registers(hsotg);
+ if (ret) {
+ dev_err(hsotg->dev, "%s: failed to backup host registers\n",
+ __func__);
+ return ret;
+ }
+
+ return 0;
+}
+
+int dwc2_host_restore_critical_registers(struct dwc2_hsotg *hsotg)
+{
+ int ret;
+
+ ret = dwc2_restore_global_registers(hsotg);
+ if (ret) {
+ dev_err(hsotg->dev, "%s: failed to restore registers\n",
+ __func__);
+ return ret;
+ }
+
+ ret = dwc2_restore_host_registers(hsotg);
+ if (ret) {
+ dev_err(hsotg->dev, "%s: failed to restore host registers\n",
+ __func__);
+ return ret;
+ }
+
+ return 0;
+}
+
/**
* dwc2_host_enter_hibernation() - Put controller in Hibernation.
*
@@ -5489,18 +5532,9 @@ int dwc2_host_enter_hibernation(struct dwc2_hsotg *hsotg)
u32 gpwrdn;
dev_dbg(hsotg->dev, "Preparing host for hibernation\n");
- ret = dwc2_backup_global_registers(hsotg);
- if (ret) {
- dev_err(hsotg->dev, "%s: failed to backup global registers\n",
- __func__);
- return ret;
- }
- ret = dwc2_backup_host_registers(hsotg);
- if (ret) {
- dev_err(hsotg->dev, "%s: failed to backup host registers\n",
- __func__);
+ ret = dwc2_host_backup_critical_registers(hsotg);
+ if (ret)
return ret;
- }
/* Enter USB Suspend Mode */
hprt0 = dwc2_readl(hsotg, HPRT0);
@@ -5694,20 +5728,9 @@ int dwc2_host_exit_hibernation(struct dwc2_hsotg *hsotg, int rem_wakeup,
dwc2_writel(hsotg, 0xffffffff, GINTSTS);
/* Restore global registers */
- ret = dwc2_restore_global_registers(hsotg);
- if (ret) {
- dev_err(hsotg->dev, "%s: failed to restore registers\n",
- __func__);
+ ret = dwc2_host_restore_critical_registers(hsotg);
+ if (ret)
return ret;
- }
-
- /* Restore host registers */
- ret = dwc2_restore_host_registers(hsotg);
- if (ret) {
- dev_err(hsotg->dev, "%s: failed to restore host registers\n",
- __func__);
- return ret;
- }
if (rem_wakeup) {
dwc2_hcd_rem_wakeup(hsotg);
@@ -5774,19 +5797,9 @@ int dwc2_host_enter_partial_power_down(struct dwc2_hsotg *hsotg)
dev_warn(hsotg->dev, "Suspend wasn't generated\n");
/* Backup all registers */
- ret = dwc2_backup_global_registers(hsotg);
- if (ret) {
- dev_err(hsotg->dev, "%s: failed to backup global registers\n",
- __func__);
- return ret;
- }
-
- ret = dwc2_backup_host_registers(hsotg);
- if (ret) {
- dev_err(hsotg->dev, "%s: failed to backup host registers\n",
- __func__);
+ ret = dwc2_host_backup_critical_registers(hsotg);
+ if (ret)
return ret;
- }
/*
* Clear any pending interrupts since dwc2 will not be able to
@@ -5855,19 +5868,9 @@ int dwc2_host_exit_partial_power_down(struct dwc2_hsotg *hsotg,
udelay(100);
if (restore) {
- ret = dwc2_restore_global_registers(hsotg);
- if (ret) {
- dev_err(hsotg->dev, "%s: failed to restore registers\n",
- __func__);
- return ret;
- }
-
- ret = dwc2_restore_host_registers(hsotg);
- if (ret) {
- dev_err(hsotg->dev, "%s: failed to restore host registers\n",
- __func__);
+ ret = dwc2_host_restore_critical_registers(hsotg);
+ if (ret)
return ret;
- }
}
/* Drive resume signaling and exit suspend mode on the port. */
diff --git a/drivers/usb/dwc2/platform.c b/drivers/usb/dwc2/platform.c
index 91c80a92d9b8..12b4dc07d08a 100644
--- a/drivers/usb/dwc2/platform.c
+++ b/drivers/usb/dwc2/platform.c
@@ -685,6 +685,14 @@ static int __maybe_unused dwc2_suspend(struct device *dev)
regulator_disable(dwc2->usb33d);
}
+ if (is_device_mode)
+ ret = dwc2_gadget_backup_critical_registers(dwc2);
+ else
+ ret = dwc2_host_backup_critical_registers(dwc2);
+
+ if (ret)
+ return ret;
+
if (dwc2->ll_hw_enabled &&
(is_device_mode || dwc2_host_can_poweroff_phy(dwc2))) {
ret = __dwc2_lowlevel_hw_disable(dwc2);
@@ -694,6 +702,24 @@ static int __maybe_unused dwc2_suspend(struct device *dev)
return ret;
}
+static int dwc2_restore_critical_registers(struct dwc2_hsotg *hsotg)
+{
+ struct dwc2_gregs_backup *gr;
+
+ gr = &hsotg->gr_backup;
+
+ if (!gr->valid) {
+ dev_err(hsotg->dev, "No valid register backup, failed to restore\n");
+ return -EINVAL;
+ }
+
+ if (gr->gintsts & GINTSTS_CURMODE_HOST)
+ return dwc2_host_restore_critical_registers(hsotg);
+
+ return dwc2_gadget_restore_critical_registers(hsotg, DWC2_RESTORE_DCTL |
+ DWC2_RESTORE_DCFG);
+}
+
static int __maybe_unused dwc2_resume(struct device *dev)
{
struct dwc2_hsotg *dwc2 = dev_get_drvdata(dev);
@@ -706,6 +732,18 @@ static int __maybe_unused dwc2_resume(struct device *dev)
}
dwc2->phy_off_for_suspend = false;
+ /*
+ * During suspend it's possible that the power domain for the
+ * DWC2 controller is disabled and all register values get lost.
+ * In case the GUSBCFG register is not initialized, it's clear the
+ * registers must be restored.
+ */
+ if (!(dwc2_readl(dwc2, GUSBCFG) & GUSBCFG_TOUTCAL_MASK)) {
+ ret = dwc2_restore_critical_registers(dwc2);
+ if (ret)
+ return ret;
+ }
+
if (dwc2->params.activate_stm_id_vb_detection) {
unsigned long flags;
u32 ggpio, gotgctl;
diff --git a/drivers/usb/dwc3/dwc3-am62.c b/drivers/usb/dwc3/dwc3-am62.c
index c158364bc03e..9db8f3ca493d 100644
--- a/drivers/usb/dwc3/dwc3-am62.c
+++ b/drivers/usb/dwc3/dwc3-am62.c
@@ -153,11 +153,11 @@ static int phy_syscon_pll_refclk(struct dwc3_am62 *am62)
{
struct device *dev = am62->dev;
struct device_node *node = dev->of_node;
- struct of_phandle_args args;
struct regmap *syscon;
int ret;
- syscon = syscon_regmap_lookup_by_phandle(node, "ti,syscon-phy-pll-refclk");
+ syscon = syscon_regmap_lookup_by_phandle_args(node, "ti,syscon-phy-pll-refclk",
+ 1, &am62->offset);
if (IS_ERR(syscon)) {
dev_err(dev, "unable to get ti,syscon-phy-pll-refclk regmap\n");
return PTR_ERR(syscon);
@@ -165,14 +165,6 @@ static int phy_syscon_pll_refclk(struct dwc3_am62 *am62)
am62->syscon = syscon;
- ret = of_parse_phandle_with_fixed_args(node, "ti,syscon-phy-pll-refclk", 1,
- 0, &args);
- if (ret)
- return ret;
-
- of_node_put(args.np);
- am62->offset = args.args[0];
-
/* Core voltage. PHY_CORE_VOLTAGE bit Recommended to be 0 always */
ret = regmap_update_bits(am62->syscon, am62->offset, PHY_CORE_VOLTAGE_MASK, 0);
if (ret) {
diff --git a/drivers/usb/dwc3/dwc3-exynos.c b/drivers/usb/dwc3/dwc3-exynos.c
index f5d963fae9e0..de686b9e6404 100644
--- a/drivers/usb/dwc3/dwc3-exynos.c
+++ b/drivers/usb/dwc3/dwc3-exynos.c
@@ -163,6 +163,12 @@ static const struct dwc3_exynos_driverdata exynos7_drvdata = {
.suspend_clk_idx = 1,
};
+static const struct dwc3_exynos_driverdata exynos7870_drvdata = {
+ .clk_names = { "bus_early", "ref", "ctrl" },
+ .num_clks = 3,
+ .suspend_clk_idx = -1,
+};
+
static const struct dwc3_exynos_driverdata exynos850_drvdata = {
.clk_names = { "bus_early", "ref" },
.num_clks = 2,
@@ -186,6 +192,9 @@ static const struct of_device_id exynos_dwc3_match[] = {
.compatible = "samsung,exynos7-dwusb3",
.data = &exynos7_drvdata,
}, {
+ .compatible = "samsung,exynos7870-dwusb3",
+ .data = &exynos7870_drvdata,
+ }, {
.compatible = "samsung,exynos850-dwusb3",
.data = &exynos850_drvdata,
}, {
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
index 052852f80146..54a4ee2b90b7 100644
--- a/drivers/usb/dwc3/dwc3-pci.c
+++ b/drivers/usb/dwc3/dwc3-pci.c
@@ -148,11 +148,21 @@ static const struct property_entry dwc3_pci_intel_byt_properties[] = {
{}
};
+/*
+ * Intel Merrifield SoC uses these endpoints for tracing and they cannot
+ * be re-allocated if being used because the side band flow control signals
+ * are hard wired to certain endpoints:
+ * - 1 High BW Bulk IN (IN#1) (RTIT)
+ * - 1 1KB BW Bulk IN (IN#8) + 1 1KB BW Bulk OUT (Run Control) (OUT#8)
+ */
+static const u8 dwc3_pci_mrfld_reserved_endpoints[] = { 3, 16, 17 };
+
static const struct property_entry dwc3_pci_mrfld_properties[] = {
PROPERTY_ENTRY_STRING("dr_mode", "otg"),
PROPERTY_ENTRY_STRING("linux,extcon-name", "mrfld_bcove_pwrsrc"),
PROPERTY_ENTRY_BOOL("snps,dis_u3_susphy_quirk"),
PROPERTY_ENTRY_BOOL("snps,dis_u2_susphy_quirk"),
+ PROPERTY_ENTRY_U8_ARRAY("snps,reserved-endpoints", dwc3_pci_mrfld_reserved_endpoints),
PROPERTY_ENTRY_BOOL("snps,usb2-gadget-lpm-disable"),
PROPERTY_ENTRY_BOOL("linux,sysdev_is_parent"),
{}
diff --git a/drivers/usb/dwc3/dwc3-st.c b/drivers/usb/dwc3/dwc3-st.c
index ef7c43008946..5d513decaacd 100644
--- a/drivers/usb/dwc3/dwc3-st.c
+++ b/drivers/usb/dwc3/dwc3-st.c
@@ -225,7 +225,7 @@ static int st_dwc3_probe(struct platform_device *pdev)
dwc3_data->syscfg_reg_off = res->start;
- dev_vdbg(dev, "glue-logic addr 0x%pK, syscfg-reg offset 0x%x\n",
+ dev_vdbg(dev, "glue-logic addr 0x%p, syscfg-reg offset 0x%x\n",
dwc3_data->glue_base, dwc3_data->syscfg_reg_off);
struct device_node *child __free(device_node) = of_get_compatible_child(node,
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 89a4dc8ebf94..47e73c4ed62d 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -547,6 +547,7 @@ static int dwc3_gadget_set_xfer_resource(struct dwc3_ep *dep)
int dwc3_gadget_start_config(struct dwc3 *dwc, unsigned int resource_index)
{
struct dwc3_gadget_ep_cmd_params params;
+ struct dwc3_ep *dep;
u32 cmd;
int i;
int ret;
@@ -563,8 +564,13 @@ int dwc3_gadget_start_config(struct dwc3 *dwc, unsigned int resource_index)
return ret;
/* Reset resource allocation flags */
- for (i = resource_index; i < dwc->num_eps && dwc->eps[i]; i++)
- dwc->eps[i]->flags &= ~DWC3_EP_RESOURCE_ALLOCATED;
+ for (i = resource_index; i < dwc->num_eps; i++) {
+ dep = dwc->eps[i];
+ if (!dep)
+ continue;
+
+ dep->flags &= ~DWC3_EP_RESOURCE_ALLOCATED;
+ }
return 0;
}
@@ -751,9 +757,11 @@ void dwc3_gadget_clear_tx_fifos(struct dwc3 *dwc)
dwc->last_fifo_depth = fifo_depth;
/* Clear existing TXFIFO for all IN eps except ep0 */
- for (num = 3; num < min_t(int, dwc->num_eps, DWC3_ENDPOINTS_NUM);
- num += 2) {
+ for (num = 3; num < min_t(int, dwc->num_eps, DWC3_ENDPOINTS_NUM); num += 2) {
dep = dwc->eps[num];
+ if (!dep)
+ continue;
+
/* Don't change TXFRAMNUM on usb31 version */
size = DWC3_IP_IS(DWC3) ? 0 :
dwc3_readl(dwc->regs, DWC3_GTXFIFOSIZ(num >> 1)) &
@@ -1971,12 +1979,12 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
return -ESHUTDOWN;
}
- if (WARN(req->dep != dep, "request %pK belongs to '%s'\n",
+ if (WARN(req->dep != dep, "request %p belongs to '%s'\n",
&req->request, req->dep->name))
return -EINVAL;
if (WARN(req->status < DWC3_REQUEST_STATUS_COMPLETED,
- "%s: request %pK already in flight\n",
+ "%s: request %p already in flight\n",
dep->name, &req->request))
return -EINVAL;
@@ -2165,7 +2173,7 @@ static int dwc3_gadget_ep_dequeue(struct usb_ep *ep,
}
}
- dev_err(dwc->dev, "request %pK was not queued to %s\n",
+ dev_err(dwc->dev, "request %p was not queued to %s\n",
request, ep->name);
ret = -EINVAL;
out:
@@ -3429,14 +3437,53 @@ static int dwc3_gadget_init_endpoint(struct dwc3 *dwc, u8 epnum)
return 0;
}
+static int dwc3_gadget_get_reserved_endpoints(struct dwc3 *dwc, const char *propname,
+ u8 *eps, u8 num)
+{
+ u8 count;
+ int ret;
+
+ if (!device_property_present(dwc->dev, propname))
+ return 0;
+
+ ret = device_property_count_u8(dwc->dev, propname);
+ if (ret < 0)
+ return ret;
+ count = ret;
+
+ ret = device_property_read_u8_array(dwc->dev, propname, eps, min(num, count));
+ if (ret)
+ return ret;
+
+ return count;
+}
+
static int dwc3_gadget_init_endpoints(struct dwc3 *dwc, u8 total)
{
+ const char *propname = "snps,reserved-endpoints";
u8 epnum;
+ u8 reserved_eps[DWC3_ENDPOINTS_NUM];
+ u8 count;
+ u8 num;
+ int ret;
INIT_LIST_HEAD(&dwc->gadget->ep_list);
+ ret = dwc3_gadget_get_reserved_endpoints(dwc, propname,
+ reserved_eps, ARRAY_SIZE(reserved_eps));
+ if (ret < 0) {
+ dev_err(dwc->dev, "failed to read %s\n", propname);
+ return ret;
+ }
+ count = ret;
+
for (epnum = 0; epnum < total; epnum++) {
- int ret;
+ for (num = 0; num < count; num++) {
+ if (epnum == reserved_eps[num])
+ break;
+ }
+ if (num < count)
+ continue;
ret = dwc3_gadget_init_endpoint(dwc, epnum);
if (ret)
@@ -3703,6 +3750,8 @@ out:
for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) {
dep = dwc->eps[i];
+ if (!dep)
+ continue;
if (!(dep->flags & DWC3_EP_ENABLED))
continue;
@@ -3852,6 +3901,10 @@ static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
u8 epnum = event->endpoint_number;
dep = dwc->eps[epnum];
+ if (!dep) {
+ dev_warn(dwc->dev, "spurious event, endpoint %u is not allocated\n", epnum);
+ return;
+ }
if (!(dep->flags & DWC3_EP_ENABLED)) {
if ((epnum > 1) && !(dep->flags & DWC3_EP_TRANSFER_STARTED))
diff --git a/drivers/usb/gadget/function/uvc_queue.c b/drivers/usb/gadget/function/uvc_queue.c
index 5eaeae3e2441..9a1bbd79ff5a 100644
--- a/drivers/usb/gadget/function/uvc_queue.c
+++ b/drivers/usb/gadget/function/uvc_queue.c
@@ -122,8 +122,6 @@ static const struct vb2_ops uvc_queue_qops = {
.queue_setup = uvc_queue_setup,
.buf_prepare = uvc_buffer_prepare,
.buf_queue = uvc_buffer_queue,
- .wait_prepare = vb2_ops_wait_prepare,
- .wait_finish = vb2_ops_wait_finish,
};
int uvcg_queue_init(struct uvc_video_queue *queue, struct device *dev, enum v4l2_buf_type type,
diff --git a/drivers/usb/gadget/udc/aspeed-vhub/dev.c b/drivers/usb/gadget/udc/aspeed-vhub/dev.c
index 573109ca5b79..a09f72772e6e 100644
--- a/drivers/usb/gadget/udc/aspeed-vhub/dev.c
+++ b/drivers/usb/gadget/udc/aspeed-vhub/dev.c
@@ -548,6 +548,9 @@ int ast_vhub_init_dev(struct ast_vhub *vhub, unsigned int idx)
d->vhub = vhub;
d->index = idx;
d->name = devm_kasprintf(parent, GFP_KERNEL, "port%d", idx+1);
+ if (!d->name)
+ return -ENOMEM;
+
d->regs = vhub->regs + 0x100 + 0x10 * idx;
ast_vhub_init_ep0(vhub, &d->ep0, d);
diff --git a/drivers/usb/host/max3421-hcd.c b/drivers/usb/host/max3421-hcd.c
index 0881fdd1823e..dcf31a592f5d 100644
--- a/drivers/usb/host/max3421-hcd.c
+++ b/drivers/usb/host/max3421-hcd.c
@@ -1946,6 +1946,12 @@ max3421_remove(struct spi_device *spi)
usb_put_hcd(hcd);
}
+static const struct spi_device_id max3421_spi_ids[] = {
+ { "max3421" },
+ { },
+};
+MODULE_DEVICE_TABLE(spi, max3421_spi_ids);
+
static const struct of_device_id max3421_of_match_table[] = {
{ .compatible = "maxim,max3421", },
{},
@@ -1955,6 +1961,7 @@ MODULE_DEVICE_TABLE(of, max3421_of_match_table);
static struct spi_driver max3421_driver = {
.probe = max3421_probe,
.remove = max3421_remove,
+ .id_table = max3421_spi_ids,
.driver = {
.name = "max3421-hcd",
.of_match_table = max3421_of_match_table,
diff --git a/drivers/usb/host/xhci-histb.c b/drivers/usb/host/xhci-histb.c
index 8a7d46dae62c..02396c8721dc 100644
--- a/drivers/usb/host/xhci-histb.c
+++ b/drivers/usb/host/xhci-histb.c
@@ -355,7 +355,7 @@ static int __maybe_unused xhci_histb_resume(struct device *dev)
if (!device_may_wakeup(dev))
xhci_histb_host_enable(histb);
- return xhci_resume(xhci, PMSG_RESUME);
+ return xhci_resume(xhci, false, false);
}
static const struct dev_pm_ops xhci_histb_pm_ops = {
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index fdf0c1008225..d698095fc88d 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -1953,7 +1953,6 @@ no_bw:
xhci->interrupters = NULL;
xhci->page_size = 0;
- xhci->page_shift = 0;
xhci->usb2_rhub.bus_state.bus_suspended = 0;
xhci->usb3_rhub.bus_state.bus_suspended = 0;
}
@@ -2372,6 +2371,22 @@ xhci_create_secondary_interrupter(struct usb_hcd *hcd, unsigned int segs,
}
EXPORT_SYMBOL_GPL(xhci_create_secondary_interrupter);
+static void xhci_hcd_page_size(struct xhci_hcd *xhci)
+{
+ u32 page_size;
+
+ page_size = readl(&xhci->op_regs->page_size) & XHCI_PAGE_SIZE_MASK;
+ if (!is_power_of_2(page_size)) {
+ xhci_warn(xhci, "Invalid page size register = 0x%x\n", page_size);
+ /* Fallback to 4K page size, since that's common */
+ page_size = 1;
+ }
+
+ xhci->page_size = page_size << 12;
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init, "HCD page size set to %iK",
+ xhci->page_size >> 10);
+}
+
int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
{
struct xhci_interrupter *ir;
@@ -2379,7 +2394,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
dma_addr_t dma;
unsigned int val, val2;
u64 val_64;
- u32 page_size, temp;
+ u32 temp;
int i;
INIT_LIST_HEAD(&xhci->cmd_list);
@@ -2388,20 +2403,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
INIT_DELAYED_WORK(&xhci->cmd_timer, xhci_handle_command_timeout);
init_completion(&xhci->cmd_ring_stop_completion);
- page_size = readl(&xhci->op_regs->page_size);
- xhci_dbg_trace(xhci, trace_xhci_dbg_init,
- "Supported page size register = 0x%x", page_size);
- i = ffs(page_size);
- if (i < 16)
- xhci_dbg_trace(xhci, trace_xhci_dbg_init,
- "Supported page size of %iK", (1 << (i+12)) / 1024);
- else
- xhci_warn(xhci, "WARN: no supported page size\n");
- /* Use 4K pages, since that's common and the minimum the HC supports */
- xhci->page_shift = 12;
- xhci->page_size = 1 << xhci->page_shift;
- xhci_dbg_trace(xhci, trace_xhci_dbg_init,
- "HCD page size set to %iK", xhci->page_size / 1024);
+ xhci_hcd_page_size(xhci);
/*
* Program the Number of Device Slots Enabled field in the CONFIG
diff --git a/drivers/usb/host/xhci-mvebu.c b/drivers/usb/host/xhci-mvebu.c
index 87f1597a0e5a..257e4d79971f 100644
--- a/drivers/usb/host/xhci-mvebu.c
+++ b/drivers/usb/host/xhci-mvebu.c
@@ -73,13 +73,3 @@ int xhci_mvebu_mbus_init_quirk(struct usb_hcd *hcd)
return 0;
}
-
-int xhci_mvebu_a3700_init_quirk(struct usb_hcd *hcd)
-{
- struct xhci_hcd *xhci = hcd_to_xhci(hcd);
-
- /* Without reset on resume, the HC won't work at all */
- xhci->quirks |= XHCI_RESET_ON_RESUME;
-
- return 0;
-}
diff --git a/drivers/usb/host/xhci-mvebu.h b/drivers/usb/host/xhci-mvebu.h
index 3be021793cc8..9d26e22c4842 100644
--- a/drivers/usb/host/xhci-mvebu.h
+++ b/drivers/usb/host/xhci-mvebu.h
@@ -12,16 +12,10 @@ struct usb_hcd;
#if IS_ENABLED(CONFIG_USB_XHCI_MVEBU)
int xhci_mvebu_mbus_init_quirk(struct usb_hcd *hcd);
-int xhci_mvebu_a3700_init_quirk(struct usb_hcd *hcd);
#else
static inline int xhci_mvebu_mbus_init_quirk(struct usb_hcd *hcd)
{
return 0;
}
-
-static inline int xhci_mvebu_a3700_init_quirk(struct usb_hcd *hcd)
-{
- return 0;
-}
#endif
#endif /* __LINUX_XHCI_MVEBU_H */
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 54460d11f7ee..0c481cbc8f08 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -807,8 +807,10 @@ static int xhci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup)
static int xhci_pci_resume(struct usb_hcd *hcd, pm_message_t msg)
{
- struct xhci_hcd *xhci = hcd_to_xhci(hcd);
- struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
+ bool power_lost = msg.event == PM_EVENT_RESTORE;
+ bool is_auto_resume = msg.event == PM_EVENT_AUTO_RESUME;
reset_control_reset(xhci->reset);
@@ -839,7 +841,7 @@ static int xhci_pci_resume(struct usb_hcd *hcd, pm_message_t msg)
if (xhci->quirks & XHCI_PME_STUCK_QUIRK)
xhci_pme_quirk(hcd);
- return xhci_resume(xhci, msg);
+ return xhci_resume(xhci, power_lost, is_auto_resume);
}
static int xhci_pci_poweroff_late(struct usb_hcd *hcd, bool do_wakeup)
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index d85ffa9ffaa7..3155e3a842da 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -106,7 +106,7 @@ static const struct xhci_plat_priv xhci_plat_marvell_armada = {
};
static const struct xhci_plat_priv xhci_plat_marvell_armada3700 = {
- .init_quirk = xhci_mvebu_a3700_init_quirk,
+ .quirks = XHCI_RESET_ON_RESUME,
};
static const struct xhci_plat_priv xhci_plat_brcm = {
@@ -479,9 +479,10 @@ static int xhci_plat_suspend(struct device *dev)
return 0;
}
-static int xhci_plat_resume_common(struct device *dev, struct pm_message pmsg)
+static int xhci_plat_resume_common(struct device *dev, bool power_lost)
{
struct usb_hcd *hcd = dev_get_drvdata(dev);
+ struct xhci_plat_priv *priv = hcd_to_xhci_priv(hcd);
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
int ret;
@@ -501,7 +502,7 @@ static int xhci_plat_resume_common(struct device *dev, struct pm_message pmsg)
if (ret)
goto disable_clks;
- ret = xhci_resume(xhci, pmsg);
+ ret = xhci_resume(xhci, power_lost || priv->power_lost, false);
if (ret)
goto disable_clks;
@@ -522,12 +523,12 @@ disable_clks:
static int xhci_plat_resume(struct device *dev)
{
- return xhci_plat_resume_common(dev, PMSG_RESUME);
+ return xhci_plat_resume_common(dev, false);
}
static int xhci_plat_restore(struct device *dev)
{
- return xhci_plat_resume_common(dev, PMSG_RESTORE);
+ return xhci_plat_resume_common(dev, true);
}
static int __maybe_unused xhci_plat_runtime_suspend(struct device *dev)
@@ -548,7 +549,7 @@ static int __maybe_unused xhci_plat_runtime_resume(struct device *dev)
struct usb_hcd *hcd = dev_get_drvdata(dev);
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
- return xhci_resume(xhci, PMSG_AUTO_RESUME);
+ return xhci_resume(xhci, false, true);
}
const struct dev_pm_ops xhci_plat_pm_ops = {
diff --git a/drivers/usb/host/xhci-plat.h b/drivers/usb/host/xhci-plat.h
index 6475130eac4b..fe4f95e690fa 100644
--- a/drivers/usb/host/xhci-plat.h
+++ b/drivers/usb/host/xhci-plat.h
@@ -15,6 +15,7 @@ struct usb_hcd;
struct xhci_plat_priv {
const char *firmware_name;
unsigned long long quirks;
+ bool power_lost;
void (*plat_start)(struct usb_hcd *);
int (*init_quirk)(struct usb_hcd *);
int (*suspend_quirk)(struct usb_hcd *);
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 965bffce301e..5d64c297721c 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -204,6 +204,50 @@ void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring)
}
/*
+ * If enqueue points at a link TRB, follow links until an ordinary TRB is reached.
+ * Toggle the cycle bit of passed link TRBs and optionally chain them.
+ */
+static void inc_enq_past_link(struct xhci_hcd *xhci, struct xhci_ring *ring, u32 chain)
+{
+ unsigned int link_trb_count = 0;
+
+ while (trb_is_link(ring->enqueue)) {
+
+ /*
+ * Section 6.4.4.1 of the 0.95 spec says link TRBs cannot have the chain bit
+ * set, but other sections talk about dealing with the chain bit set. This was
+ * fixed in the 0.96 specification errata, but we have to assume that all 0.95
+ * xHCI hardware can't handle the chain bit being cleared on a link TRB.
+ *
+ * On 0.95 and some 0.96 HCs the chain bit is set once at segment initalization
+ * and never changed here. On all others, modify it as requested by the caller.
+ */
+ if (!xhci_link_chain_quirk(xhci, ring->type)) {
+ ring->enqueue->link.control &= cpu_to_le32(~TRB_CHAIN);
+ ring->enqueue->link.control |= cpu_to_le32(chain);
+ }
+
+ /* Give this link TRB to the hardware */
+ wmb();
+ ring->enqueue->link.control ^= cpu_to_le32(TRB_CYCLE);
+
+ /* Toggle the cycle bit after the last ring segment. */
+ if (link_trb_toggles_cycle(ring->enqueue))
+ ring->cycle_state ^= 1;
+
+ ring->enq_seg = ring->enq_seg->next;
+ ring->enqueue = ring->enq_seg->trbs;
+
+ trace_xhci_inc_enq(ring);
+
+ if (link_trb_count++ > ring->num_segs) {
+ xhci_warn(xhci, "Link TRB loop at enqueue\n");
+ break;
+ }
+ }
+}
+
+/*
* See Cycle bit rules. SW is the consumer for the event ring only.
*
* If we've just enqueued a TRB that is in the middle of a TD (meaning the
@@ -211,11 +255,6 @@ void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring)
* If we've enqueued the last TRB in a TD, make sure the following link TRBs
* have their chain bit cleared (so that each Link TRB is a separate TD).
*
- * Section 6.4.4.1 of the 0.95 spec says link TRBs cannot have the chain bit
- * set, but other sections talk about dealing with the chain bit set. This was
- * fixed in the 0.96 specification errata, but we have to assume that all 0.95
- * xHCI hardware can't handle the chain bit being cleared on a link TRB.
- *
* @more_trbs_coming: Will you enqueue more TRBs before calling
* prepare_transfer()?
*/
@@ -223,8 +262,6 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
bool more_trbs_coming)
{
u32 chain;
- union xhci_trb *next;
- unsigned int link_trb_count = 0;
chain = le32_to_cpu(ring->enqueue->generic.field[3]) & TRB_CHAIN;
@@ -233,48 +270,67 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
return;
}
- next = ++(ring->enqueue);
+ ring->enqueue++;
- /* Update the dequeue pointer further if that was a link TRB */
- while (trb_is_link(next)) {
+ /*
+ * If we are in the middle of a TD or the caller plans to enqueue more
+ * TDs as one transfer (eg. control), traverse any link TRBs right now.
+ * Otherwise, enqueue can stay on a link until the next prepare_ring().
+ * This avoids enqueue entering deq_seg and simplifies ring expansion.
+ */
+ if (trb_is_link(ring->enqueue) && (chain || more_trbs_coming))
+ inc_enq_past_link(xhci, ring, chain);
+}
- /*
- * If the caller doesn't plan on enqueueing more TDs before
- * ringing the doorbell, then we don't want to give the link TRB
- * to the hardware just yet. We'll give the link TRB back in
- * prepare_ring() just before we enqueue the TD at the top of
- * the ring.
- */
- if (!chain && !more_trbs_coming)
- break;
+/*
+ * If the suspect DMA address is a TRB in this TD, this function returns that
+ * TRB's segment. Otherwise it returns 0.
+ */
+static struct xhci_segment *trb_in_td(struct xhci_td *td, dma_addr_t suspect_dma)
+{
+ dma_addr_t start_dma;
+ dma_addr_t end_seg_dma;
+ dma_addr_t end_trb_dma;
+ struct xhci_segment *cur_seg;
- /* If we're not dealing with 0.95 hardware or isoc rings on
- * AMD 0.96 host, carry over the chain bit of the previous TRB
- * (which may mean the chain bit is cleared).
- */
- if (!xhci_link_chain_quirk(xhci, ring->type)) {
- next->link.control &= cpu_to_le32(~TRB_CHAIN);
- next->link.control |= cpu_to_le32(chain);
- }
- /* Give this link TRB to the hardware */
- wmb();
- next->link.control ^= cpu_to_le32(TRB_CYCLE);
+ start_dma = xhci_trb_virt_to_dma(td->start_seg, td->start_trb);
+ cur_seg = td->start_seg;
- /* Toggle the cycle bit after the last ring segment. */
- if (link_trb_toggles_cycle(next))
- ring->cycle_state ^= 1;
+ do {
+ if (start_dma == 0)
+ return NULL;
+ /* We may get an event for a Link TRB in the middle of a TD */
+ end_seg_dma = xhci_trb_virt_to_dma(cur_seg,
+ &cur_seg->trbs[TRBS_PER_SEGMENT - 1]);
+ /* If the end TRB isn't in this segment, this is set to 0 */
+ end_trb_dma = xhci_trb_virt_to_dma(cur_seg, td->end_trb);
- ring->enq_seg = ring->enq_seg->next;
- ring->enqueue = ring->enq_seg->trbs;
- next = ring->enqueue;
+ if (end_trb_dma > 0) {
+ /* The end TRB is in this segment, so suspect should be here */
+ if (start_dma <= end_trb_dma) {
+ if (suspect_dma >= start_dma && suspect_dma <= end_trb_dma)
+ return cur_seg;
+ } else {
+ /* Case for one segment with
+ * a TD wrapped around to the top
+ */
+ if ((suspect_dma >= start_dma &&
+ suspect_dma <= end_seg_dma) ||
+ (suspect_dma >= cur_seg->dma &&
+ suspect_dma <= end_trb_dma))
+ return cur_seg;
+ }
+ return NULL;
+ }
+ /* Might still be somewhere in this segment */
+ if (suspect_dma >= start_dma && suspect_dma <= end_seg_dma)
+ return cur_seg;
- trace_xhci_inc_enq(ring);
+ cur_seg = cur_seg->next;
+ start_dma = xhci_trb_virt_to_dma(cur_seg, &cur_seg->trbs[0]);
+ } while (cur_seg != td->start_seg);
- if (link_trb_count++ > ring->num_segs) {
- xhci_warn(xhci, "%s: Ring link TRB loop\n", __func__);
- break;
- }
- }
+ return NULL;
}
/*
@@ -505,8 +561,8 @@ void xhci_ring_ep_doorbell(struct xhci_hcd *xhci,
* pointer command pending because the device can choose to start any
* stream once the endpoint is on the HW schedule.
*/
- if ((ep_state & EP_STOP_CMD_PENDING) || (ep_state & SET_DEQ_PENDING) ||
- (ep_state & EP_HALTED) || (ep_state & EP_CLEARING_TT))
+ if (ep_state & (EP_STOP_CMD_PENDING | SET_DEQ_PENDING | EP_HALTED |
+ EP_CLEARING_TT | EP_STALLED))
return;
trace_xhci_ring_ep_doorbell(slot_id, DB_VALUE(ep_index, stream_id));
@@ -1014,7 +1070,7 @@ static int xhci_invalidate_cancelled_tds(struct xhci_virt_ep *ep)
td->urb->stream_id);
hw_deq &= ~0xf;
- if (td->cancel_status == TD_HALTED || trb_in_td(xhci, td, hw_deq, false)) {
+ if (td->cancel_status == TD_HALTED || trb_in_td(td, hw_deq)) {
switch (td->cancel_status) {
case TD_CLEARED: /* TD is already no-op */
case TD_CLEARING_CACHE: /* set TR deq command already queued */
@@ -1104,7 +1160,7 @@ static struct xhci_td *find_halted_td(struct xhci_virt_ep *ep)
hw_deq = xhci_get_hw_deq(ep->xhci, ep->vdev, ep->ep_index, 0);
hw_deq &= ~0xf;
td = list_first_entry(&ep->ring->td_list, struct xhci_td, td_list);
- if (trb_in_td(ep->xhci, td, hw_deq, false))
+ if (trb_in_td(td, hw_deq))
return td;
}
return NULL;
@@ -1164,7 +1220,14 @@ static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id,
*/
switch (GET_EP_CTX_STATE(ep_ctx)) {
case EP_STATE_HALTED:
- xhci_dbg(xhci, "Stop ep completion raced with stall, reset ep\n");
+ xhci_dbg(xhci, "Stop ep completion raced with stall\n");
+ /*
+ * If the halt happened before Stop Endpoint failed, its transfer event
+ * should have already been handled and Reset Endpoint should be pending.
+ */
+ if (ep->ep_state & EP_HALTED)
+ goto reset_done;
+
if (ep->ep_state & EP_HAS_STREAMS) {
reset_type = EP_SOFT_RESET;
} else {
@@ -1175,8 +1238,11 @@ static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id,
}
/* reset ep, reset handler cleans up cancelled tds */
err = xhci_handle_halted_endpoint(xhci, ep, td, reset_type);
+ xhci_dbg(xhci, "Stop ep completion resetting ep, status %d\n", err);
if (err)
break;
+reset_done:
+ /* Reset EP handler will clean up cancelled TDs */
ep->ep_state &= ~EP_STOP_CMD_PENDING;
return;
case EP_STATE_STOPPED:
@@ -1198,16 +1264,19 @@ static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id,
* Stopped state, but it will soon change to Running.
*
* Assume this bug on unexpected Stop Endpoint failures.
- * Keep retrying until the EP starts and stops again, on
- * chips where this is known to help. Wait for 100ms.
+ * Keep retrying until the EP starts and stops again.
*/
- if (time_is_before_jiffies(ep->stop_time + msecs_to_jiffies(100)))
- break;
fallthrough;
case EP_STATE_RUNNING:
/* Race, HW handled stop ep cmd before ep was running */
xhci_dbg(xhci, "Stop ep completion ctx error, ctx_state %d\n",
GET_EP_CTX_STATE(ep_ctx));
+ /*
+ * Don't retry forever if we guessed wrong or a defective HC never starts
+ * the EP or says 'Running' but fails the command. We must give back TDs.
+ */
+ if (time_is_before_jiffies(ep->stop_time + msecs_to_jiffies(100)))
+ break;
command = xhci_alloc_command(xhci, false, GFP_ATOMIC);
if (!command) {
@@ -1332,43 +1401,6 @@ void xhci_hc_died(struct xhci_hcd *xhci)
usb_hc_died(xhci_to_hcd(xhci));
}
-static void update_ring_for_set_deq_completion(struct xhci_hcd *xhci,
- struct xhci_virt_device *dev,
- struct xhci_ring *ep_ring,
- unsigned int ep_index)
-{
- union xhci_trb *dequeue_temp;
-
- dequeue_temp = ep_ring->dequeue;
-
- /* If we get two back-to-back stalls, and the first stalled transfer
- * ends just before a link TRB, the dequeue pointer will be left on
- * the link TRB by the code in the while loop. So we have to update
- * the dequeue pointer one segment further, or we'll jump off
- * the segment into la-la-land.
- */
- if (trb_is_link(ep_ring->dequeue)) {
- ep_ring->deq_seg = ep_ring->deq_seg->next;
- ep_ring->dequeue = ep_ring->deq_seg->trbs;
- }
-
- while (ep_ring->dequeue != dev->eps[ep_index].queued_deq_ptr) {
- /* We have more usable TRBs */
- ep_ring->dequeue++;
- if (trb_is_link(ep_ring->dequeue)) {
- if (ep_ring->dequeue ==
- dev->eps[ep_index].queued_deq_ptr)
- break;
- ep_ring->deq_seg = ep_ring->deq_seg->next;
- ep_ring->dequeue = ep_ring->deq_seg->trbs;
- }
- if (ep_ring->dequeue == dequeue_temp) {
- xhci_dbg(xhci, "Unable to find new dequeue pointer\n");
- break;
- }
- }
-}
-
/*
* When we get a completion for a Set Transfer Ring Dequeue Pointer command,
* we need to clear the set deq pending flag in the endpoint ring state, so that
@@ -1473,8 +1505,8 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id,
/* Update the ring's dequeue segment and dequeue pointer
* to reflect the new position.
*/
- update_ring_for_set_deq_completion(xhci, ep->vdev,
- ep_ring, ep_index);
+ ep_ring->deq_seg = ep->queued_deq_seg;
+ ep_ring->dequeue = ep->queued_deq_ptr;
} else {
xhci_warn(xhci, "Mismatch between completed Set TR Deq Ptr command & xHCI internal state.\n");
xhci_warn(xhci, "ep deq seg = %p, deq ptr = %p\n",
@@ -2116,67 +2148,6 @@ cleanup:
spin_lock(&xhci->lock);
}
-/*
- * If the suspect DMA address is a TRB in this TD, this function returns that
- * TRB's segment. Otherwise it returns 0.
- */
-struct xhci_segment *trb_in_td(struct xhci_hcd *xhci, struct xhci_td *td, dma_addr_t suspect_dma,
- bool debug)
-{
- dma_addr_t start_dma;
- dma_addr_t end_seg_dma;
- dma_addr_t end_trb_dma;
- struct xhci_segment *cur_seg;
-
- start_dma = xhci_trb_virt_to_dma(td->start_seg, td->start_trb);
- cur_seg = td->start_seg;
-
- do {
- if (start_dma == 0)
- return NULL;
- /* We may get an event for a Link TRB in the middle of a TD */
- end_seg_dma = xhci_trb_virt_to_dma(cur_seg,
- &cur_seg->trbs[TRBS_PER_SEGMENT - 1]);
- /* If the end TRB isn't in this segment, this is set to 0 */
- end_trb_dma = xhci_trb_virt_to_dma(cur_seg, td->end_trb);
-
- if (debug)
- xhci_warn(xhci,
- "Looking for event-dma %016llx trb-start %016llx trb-end %016llx seg-start %016llx seg-end %016llx\n",
- (unsigned long long)suspect_dma,
- (unsigned long long)start_dma,
- (unsigned long long)end_trb_dma,
- (unsigned long long)cur_seg->dma,
- (unsigned long long)end_seg_dma);
-
- if (end_trb_dma > 0) {
- /* The end TRB is in this segment, so suspect should be here */
- if (start_dma <= end_trb_dma) {
- if (suspect_dma >= start_dma && suspect_dma <= end_trb_dma)
- return cur_seg;
- } else {
- /* Case for one segment with
- * a TD wrapped around to the top
- */
- if ((suspect_dma >= start_dma &&
- suspect_dma <= end_seg_dma) ||
- (suspect_dma >= cur_seg->dma &&
- suspect_dma <= end_trb_dma))
- return cur_seg;
- }
- return NULL;
- } else {
- /* Might still be somewhere in this segment */
- if (suspect_dma >= start_dma && suspect_dma <= end_seg_dma)
- return cur_seg;
- }
- cur_seg = cur_seg->next;
- start_dma = xhci_trb_virt_to_dma(cur_seg, &cur_seg->trbs[0]);
- } while (cur_seg != td->start_seg);
-
- return NULL;
-}
-
static void xhci_clear_hub_tt_buffer(struct xhci_hcd *xhci, struct xhci_td *td,
struct xhci_virt_ep *ep)
{
@@ -2476,6 +2447,12 @@ static void process_isoc_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
if (ep_trb != td->end_trb)
td->error_mid_td = true;
break;
+ case COMP_MISSED_SERVICE_ERROR:
+ frame->status = -EXDEV;
+ sum_trbs_for_length = true;
+ if (ep_trb != td->end_trb)
+ td->error_mid_td = true;
+ break;
case COMP_INCOMPATIBLE_DEVICE_ERROR:
case COMP_STALL_ERROR:
frame->status = -EPROTO;
@@ -2596,6 +2573,9 @@ static void process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
xhci_handle_halted_endpoint(xhci, ep, td, EP_SOFT_RESET);
return;
+ case COMP_STALL_ERROR:
+ ep->ep_state |= EP_STALLED;
+ break;
default:
/* do nothing */
break;
@@ -2644,6 +2624,22 @@ static int handle_transferless_tx_event(struct xhci_hcd *xhci, struct xhci_virt_
return 0;
}
+static bool xhci_spurious_success_tx_event(struct xhci_hcd *xhci,
+ struct xhci_ring *ring)
+{
+ switch (ring->old_trb_comp_code) {
+ case COMP_SHORT_PACKET:
+ return xhci->quirks & XHCI_SPURIOUS_SUCCESS;
+ case COMP_USB_TRANSACTION_ERROR:
+ case COMP_BABBLE_DETECTED_ERROR:
+ case COMP_ISOCH_BUFFER_OVERRUN:
+ return xhci->quirks & XHCI_ETRON_HOST &&
+ ring->type == TYPE_ISOC;
+ default:
+ return false;
+ }
+}
+
/*
* If this function returns an error condition, it means it got a Transfer
* event with a corrupted Slot ID, Endpoint ID, or TRB DMA address.
@@ -2664,6 +2660,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
int status = -EINPROGRESS;
struct xhci_ep_ctx *ep_ctx;
u32 trb_comp_code;
+ bool ring_xrun_event = false;
slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
@@ -2697,8 +2694,8 @@ static int handle_tx_event(struct xhci_hcd *xhci,
case COMP_SUCCESS:
if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) {
trb_comp_code = COMP_SHORT_PACKET;
- xhci_dbg(xhci, "Successful completion on short TX for slot %u ep %u with last td short %d\n",
- slot_id, ep_index, ep_ring->last_td_was_short);
+ xhci_dbg(xhci, "Successful completion on short TX for slot %u ep %u with last td comp code %d\n",
+ slot_id, ep_index, ep_ring->old_trb_comp_code);
}
break;
case COMP_SHORT_PACKET:
@@ -2770,14 +2767,12 @@ static int handle_tx_event(struct xhci_hcd *xhci,
* Underrun Event for OUT Isoch endpoint.
*/
xhci_dbg(xhci, "Underrun event on slot %u ep %u\n", slot_id, ep_index);
- if (ep->skip)
- break;
- return 0;
+ ring_xrun_event = true;
+ break;
case COMP_RING_OVERRUN:
xhci_dbg(xhci, "Overrun event on slot %u ep %u\n", slot_id, ep_index);
- if (ep->skip)
- break;
- return 0;
+ ring_xrun_event = true;
+ break;
case COMP_MISSED_SERVICE_ERROR:
/*
* When encounter missed service error, one or more isoc tds
@@ -2787,9 +2782,9 @@ static int handle_tx_event(struct xhci_hcd *xhci,
*/
ep->skip = true;
xhci_dbg(xhci,
- "Miss service interval error for slot %u ep %u, set skip flag\n",
- slot_id, ep_index);
- return 0;
+ "Miss service interval error for slot %u ep %u, set skip flag%s\n",
+ slot_id, ep_index, ep_trb_dma ? ", skip now" : "");
+ break;
case COMP_NO_PING_RESPONSE_ERROR:
ep->skip = true;
xhci_dbg(xhci,
@@ -2832,11 +2827,15 @@ static int handle_tx_event(struct xhci_hcd *xhci,
*/
td = list_first_entry_or_null(&ep_ring->td_list, struct xhci_td, td_list);
- if (td && td->error_mid_td && !trb_in_td(xhci, td, ep_trb_dma, false)) {
+ if (td && td->error_mid_td && !trb_in_td(td, ep_trb_dma)) {
xhci_dbg(xhci, "Missing TD completion event after mid TD error\n");
xhci_dequeue_td(xhci, td, ep_ring, td->status);
}
+ /* If the TRB pointer is NULL, missed TDs will be skipped on the next event */
+ if (trb_comp_code == COMP_MISSED_SERVICE_ERROR && !ep_trb_dma)
+ return 0;
+
if (list_empty(&ep_ring->td_list)) {
/*
* Don't print wanings if ring is empty due to a stopped endpoint generating an
@@ -2846,7 +2845,8 @@ static int handle_tx_event(struct xhci_hcd *xhci,
*/
if (trb_comp_code != COMP_STOPPED &&
trb_comp_code != COMP_STOPPED_LENGTH_INVALID &&
- !ep_ring->last_td_was_short) {
+ !ring_xrun_event &&
+ !xhci_spurious_success_tx_event(xhci, ep_ring)) {
xhci_warn(xhci, "Event TRB for slot %u ep %u with no TDs queued\n",
slot_id, ep_index);
}
@@ -2860,14 +2860,31 @@ static int handle_tx_event(struct xhci_hcd *xhci,
td_list);
/* Is this a TRB in the currently executing TD? */
- ep_seg = trb_in_td(xhci, td, ep_trb_dma, false);
+ ep_seg = trb_in_td(td, ep_trb_dma);
if (!ep_seg) {
if (ep->skip && usb_endpoint_xfer_isoc(&td->urb->ep->desc)) {
+ /* this event is unlikely to match any TD, don't skip them all */
+ if (trb_comp_code == COMP_STOPPED_LENGTH_INVALID)
+ return 0;
+
skip_isoc_td(xhci, td, ep, status);
- if (!list_empty(&ep_ring->td_list))
+
+ if (!list_empty(&ep_ring->td_list)) {
+ if (ring_xrun_event) {
+ /*
+ * If we are here, we are on xHCI 1.0 host with no
+ * idea how many TDs were missed or where the xrun
+ * occurred. New TDs may have been added after the
+ * xrun, so skip only one TD to be safe.
+ */
+ xhci_dbg(xhci, "Skipped one TD for slot %u ep %u",
+ slot_id, ep_index);
+ return 0;
+ }
continue;
+ }
xhci_dbg(xhci, "All TDs skipped for slot %u ep %u. Clear skip flag.\n",
slot_id, ep_index);
@@ -2876,6 +2893,10 @@ static int handle_tx_event(struct xhci_hcd *xhci,
goto check_endpoint_halted;
}
+ /* TD was queued after xrun, maybe xrun was on a link, don't panic yet */
+ if (ring_xrun_event)
+ return 0;
+
/*
* Skip the Force Stopped Event. The 'ep_trb' of FSE is not in the current
* TD pointed by 'ep_ring->dequeue' because that the hardware dequeue
@@ -2890,21 +2911,17 @@ static int handle_tx_event(struct xhci_hcd *xhci,
/*
* Some hosts give a spurious success event after a short
- * transfer. Ignore it.
+ * transfer or error on last TRB. Ignore it.
*/
- if ((xhci->quirks & XHCI_SPURIOUS_SUCCESS) &&
- ep_ring->last_td_was_short) {
- ep_ring->last_td_was_short = false;
+ if (xhci_spurious_success_tx_event(xhci, ep_ring)) {
+ xhci_dbg(xhci, "Spurious event dma %pad, comp_code %u after %u\n",
+ &ep_trb_dma, trb_comp_code, ep_ring->old_trb_comp_code);
+ ep_ring->old_trb_comp_code = trb_comp_code;
return 0;
}
/* HC is busted, give up! */
- xhci_err(xhci,
- "ERROR Transfer event TRB DMA ptr not part of current TD ep_index %d comp_code %u\n",
- ep_index, trb_comp_code);
- trb_in_td(xhci, td, ep_trb_dma, true);
-
- return -ESHUTDOWN;
+ goto debug_finding_td;
}
if (ep->skip) {
@@ -2922,10 +2939,11 @@ static int handle_tx_event(struct xhci_hcd *xhci,
*/
} while (ep->skip);
- if (trb_comp_code == COMP_SHORT_PACKET)
- ep_ring->last_td_was_short = true;
- else
- ep_ring->last_td_was_short = false;
+ ep_ring->old_trb_comp_code = trb_comp_code;
+
+ /* Get out if a TD was queued at enqueue after the xrun occurred */
+ if (ring_xrun_event)
+ return 0;
ep_trb = &ep_seg->trbs[(ep_trb_dma - ep_seg->dma) / sizeof(*ep_trb)];
trace_xhci_handle_transfer(ep_ring, (struct xhci_generic_trb *) ep_trb, ep_trb_dma);
@@ -2957,6 +2975,17 @@ check_endpoint_halted:
return 0;
+debug_finding_td:
+ xhci_err(xhci, "Event dma %pad for ep %d status %d not part of TD at %016llx - %016llx\n",
+ &ep_trb_dma, ep_index, trb_comp_code,
+ (unsigned long long)xhci_trb_virt_to_dma(td->start_seg, td->start_trb),
+ (unsigned long long)xhci_trb_virt_to_dma(td->end_seg, td->end_trb));
+
+ xhci_for_each_ring_seg(ep_ring->first_seg, ep_seg)
+ xhci_warn(xhci, "Ring seg %u dma %pad\n", ep_seg->num, &ep_seg->dma);
+
+ return -ESHUTDOWN;
+
err_out:
xhci_err(xhci, "@%016llx %08x %08x %08x %08x\n",
(unsigned long long) xhci_trb_virt_to_dma(
@@ -3216,7 +3245,6 @@ static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
u32 ep_state, unsigned int num_trbs, gfp_t mem_flags)
{
- unsigned int link_trb_count = 0;
unsigned int new_segs = 0;
/* Make sure the endpoint has been added to xHC schedule */
@@ -3264,33 +3292,9 @@ static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
}
}
- while (trb_is_link(ep_ring->enqueue)) {
- /* If we're not dealing with 0.95 hardware or isoc rings
- * on AMD 0.96 host, clear the chain bit.
- */
- if (!xhci_link_chain_quirk(xhci, ep_ring->type))
- ep_ring->enqueue->link.control &=
- cpu_to_le32(~TRB_CHAIN);
- else
- ep_ring->enqueue->link.control |=
- cpu_to_le32(TRB_CHAIN);
-
- wmb();
- ep_ring->enqueue->link.control ^= cpu_to_le32(TRB_CYCLE);
-
- /* Toggle the cycle bit after the last ring segment. */
- if (link_trb_toggles_cycle(ep_ring->enqueue))
- ep_ring->cycle_state ^= 1;
-
- ep_ring->enq_seg = ep_ring->enq_seg->next;
- ep_ring->enqueue = ep_ring->enq_seg->trbs;
-
- /* prevent infinite loop if all first trbs are link trbs */
- if (link_trb_count++ > ep_ring->num_segs) {
- xhci_warn(xhci, "Ring is an endless link TRB loop\n");
- return -EINVAL;
- }
- }
+ /* Ensure that new TRBs won't overwrite a link */
+ if (trb_is_link(ep_ring->enqueue))
+ inc_enq_past_link(xhci, ep_ring, 0);
if (last_trb_on_seg(ep_ring->enq_seg, ep_ring->enqueue)) {
xhci_warn(xhci, "Missing link TRB at end of ring segment\n");
diff --git a/drivers/usb/host/xhci-tegra.c b/drivers/usb/host/xhci-tegra.c
index 22dc86fb5254..b5c362c2051d 100644
--- a/drivers/usb/host/xhci-tegra.c
+++ b/drivers/usb/host/xhci-tegra.c
@@ -2162,11 +2162,11 @@ static void tegra_xhci_program_utmi_power_lp0_exit(struct tegra_xusb *tegra)
}
}
-static int tegra_xusb_enter_elpg(struct tegra_xusb *tegra, bool runtime)
+static int tegra_xusb_enter_elpg(struct tegra_xusb *tegra, bool is_auto_resume)
{
struct xhci_hcd *xhci = hcd_to_xhci(tegra->hcd);
struct device *dev = tegra->dev;
- bool wakeup = runtime ? true : device_may_wakeup(dev);
+ bool wakeup = is_auto_resume ? true : device_may_wakeup(dev);
unsigned int i;
int err;
u32 usbcmd;
@@ -2232,11 +2232,11 @@ out:
return err;
}
-static int tegra_xusb_exit_elpg(struct tegra_xusb *tegra, bool runtime)
+static int tegra_xusb_exit_elpg(struct tegra_xusb *tegra, bool is_auto_resume)
{
struct xhci_hcd *xhci = hcd_to_xhci(tegra->hcd);
struct device *dev = tegra->dev;
- bool wakeup = runtime ? true : device_may_wakeup(dev);
+ bool wakeup = is_auto_resume ? true : device_may_wakeup(dev);
unsigned int i;
u32 usbcmd;
int err;
@@ -2287,7 +2287,7 @@ static int tegra_xusb_exit_elpg(struct tegra_xusb *tegra, bool runtime)
if (wakeup)
tegra_xhci_disable_phy_sleepwalk(tegra);
- err = xhci_resume(xhci, runtime ? PMSG_AUTO_RESUME : PMSG_RESUME);
+ err = xhci_resume(xhci, false, is_auto_resume);
if (err < 0) {
dev_err(tegra->dev, "failed to resume XHCI: %d\n", err);
goto disable_phy;
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 1a90ebc8a30e..83a4adf57bae 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -994,16 +994,14 @@ EXPORT_SYMBOL_GPL(xhci_suspend);
* This is called when the machine transition from S3/S4 mode.
*
*/
-int xhci_resume(struct xhci_hcd *xhci, pm_message_t msg)
+int xhci_resume(struct xhci_hcd *xhci, bool power_lost, bool is_auto_resume)
{
- bool hibernated = (msg.event == PM_EVENT_RESTORE);
u32 command, temp = 0;
struct usb_hcd *hcd = xhci_to_hcd(xhci);
int retval = 0;
bool comp_timer_running = false;
bool pending_portevent = false;
bool suspended_usb3_devs = false;
- bool reinit_xhc = false;
if (!hcd->state)
return 0;
@@ -1022,10 +1020,10 @@ int xhci_resume(struct xhci_hcd *xhci, pm_message_t msg)
spin_lock_irq(&xhci->lock);
- if (hibernated || xhci->quirks & XHCI_RESET_ON_RESUME || xhci->broken_suspend)
- reinit_xhc = true;
+ if (xhci->quirks & XHCI_RESET_ON_RESUME || xhci->broken_suspend)
+ power_lost = true;
- if (!reinit_xhc) {
+ if (!power_lost) {
/*
* Some controllers might lose power during suspend, so wait
* for controller not ready bit to clear, just as in xHC init.
@@ -1065,12 +1063,12 @@ int xhci_resume(struct xhci_hcd *xhci, pm_message_t msg)
/* re-initialize the HC on Restore Error, or Host Controller Error */
if ((temp & (STS_SRE | STS_HCE)) &&
!(xhci->xhc_state & XHCI_STATE_REMOVING)) {
- reinit_xhc = true;
- if (!xhci->broken_suspend)
+ if (!power_lost)
xhci_warn(xhci, "xHC error in resume, USBSTS 0x%x, Reinit\n", temp);
+ power_lost = true;
}
- if (reinit_xhc) {
+ if (power_lost) {
if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
!(xhci_all_ports_seen_u0(xhci))) {
del_timer_sync(&xhci->comp_mode_recovery_timer);
@@ -1168,8 +1166,7 @@ int xhci_resume(struct xhci_hcd *xhci, pm_message_t msg)
pending_portevent = xhci_pending_portevent(xhci);
- if (suspended_usb3_devs && !pending_portevent &&
- msg.event == PM_EVENT_AUTO_RESUME) {
+ if (suspended_usb3_devs && !pending_portevent && is_auto_resume) {
msleep(120);
pending_portevent = xhci_pending_portevent(xhci);
}
@@ -1608,6 +1605,11 @@ static int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flag
goto free_priv;
}
+ /* Class driver might not be aware ep halted due to async URB giveback */
+ if (*ep_state & EP_STALLED)
+ dev_dbg(&urb->dev->dev, "URB %p queued before clearing halt\n",
+ urb);
+
switch (usb_endpoint_type(&urb->ep->desc)) {
case USB_ENDPOINT_XFER_CONTROL:
@@ -1768,8 +1770,8 @@ static int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
goto done;
}
- /* In this case no commands are pending but the endpoint is stopped */
- if (ep->ep_state & EP_CLEARING_TT) {
+ /* In these cases no commands are pending but the endpoint is stopped */
+ if (ep->ep_state & (EP_CLEARING_TT | EP_STALLED)) {
/* and cancelled TDs can be given back right away */
xhci_dbg(xhci, "Invalidating TDs instantly on slot %d ep %d in state 0x%x\n",
urb->dev->slot_id, ep_index, ep->ep_state);
@@ -3207,8 +3209,11 @@ static void xhci_endpoint_reset(struct usb_hcd *hcd,
ep = &vdev->eps[ep_index];
- /* Bail out if toggle is already being cleared by a endpoint reset */
spin_lock_irqsave(&xhci->lock, flags);
+
+ ep->ep_state &= ~EP_STALLED;
+
+ /* Bail out if toggle is already being cleared by a endpoint reset */
if (ep->ep_state & EP_HARD_CLEAR_TOGGLE) {
ep->ep_state &= ~EP_HARD_CLEAR_TOGGLE;
spin_unlock_irqrestore(&xhci->lock, flags);
@@ -4759,8 +4764,8 @@ static u16 xhci_calculate_u1_timeout(struct xhci_hcd *xhci,
*/
if (timeout_ns <= USB3_LPM_U1_MAX_TIMEOUT)
return timeout_ns;
- dev_dbg(&udev->dev, "Hub-initiated U1 disabled "
- "due to long timeout %llu ms\n", timeout_ns);
+ dev_dbg(&udev->dev, "Hub-initiated U1 disabled due to long timeout %lluus\n",
+ timeout_ns);
return xhci_get_timeout_no_hub_lpm(udev, USB3_LPM_U1);
}
@@ -4817,8 +4822,8 @@ static u16 xhci_calculate_u2_timeout(struct xhci_hcd *xhci,
*/
if (timeout_ns <= USB3_LPM_U2_MAX_TIMEOUT)
return timeout_ns;
- dev_dbg(&udev->dev, "Hub-initiated U2 disabled "
- "due to long timeout %llu ms\n", timeout_ns);
+ dev_dbg(&udev->dev, "Hub-initiated U2 disabled due to long timeout %lluus\n",
+ timeout_ns * 256);
return xhci_get_timeout_no_hub_lpm(udev, USB3_LPM_U2);
}
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 779b01dee068..37860f1e3aba 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -211,6 +211,9 @@ struct xhci_op_regs {
#define CONFIG_CIE (1 << 9)
/* bits 10:31 - reserved and should be preserved */
+/* bits 15:0 - HCD page shift bit */
+#define XHCI_PAGE_SIZE_MASK 0xffff
+
/**
* struct xhci_intr_reg - Interrupt Register Set
* @irq_pending: IMAN - Interrupt Management Register. Used to enable
@@ -661,7 +664,7 @@ struct xhci_virt_ep {
unsigned int err_count;
unsigned int ep_state;
#define SET_DEQ_PENDING (1 << 0)
-#define EP_HALTED (1 << 1) /* For stall handling */
+#define EP_HALTED (1 << 1) /* Halted host ep handling */
#define EP_STOP_CMD_PENDING (1 << 2) /* For URB cancellation */
/* Transitioning the endpoint to using streams, don't enqueue URBs */
#define EP_GETTING_STREAMS (1 << 3)
@@ -672,6 +675,7 @@ struct xhci_virt_ep {
#define EP_SOFT_CLEAR_TOGGLE (1 << 7)
/* usb_hub_clear_tt_buffer is in progress */
#define EP_CLEARING_TT (1 << 8)
+#define EP_STALLED (1 << 9) /* For stall handling */
/* ---- Related to URB cancellation ---- */
struct list_head cancelled_td_list;
struct xhci_hcd *xhci;
@@ -1371,7 +1375,7 @@ struct xhci_ring {
unsigned int num_trbs_free; /* used only by xhci DbC */
unsigned int bounce_buf_len;
enum xhci_ring_type type;
- bool last_td_was_short;
+ u32 old_trb_comp_code;
struct radix_tree_root *trb_address_map;
};
@@ -1514,10 +1518,7 @@ struct xhci_hcd {
u16 max_interrupters;
/* imod_interval in ns (I * 250ns) */
u32 imod_interval;
- /* 4KB min, 128MB max */
- int page_size;
- /* Valid values are 12 to 20, inclusive */
- int page_shift;
+ u32 page_size;
/* MSI-X/MSI vectors */
int nvecs;
/* optional clocks */
@@ -1759,11 +1760,20 @@ static inline void xhci_write_64(struct xhci_hcd *xhci,
}
-/* Link TRB chain should always be set on 0.95 hosts, and AMD 0.96 ISOC rings */
+/*
+ * Reportedly, some chapters of v0.95 spec said that Link TRB always has its chain bit set.
+ * Other chapters and later specs say that it should only be set if the link is inside a TD
+ * which continues from the end of one segment to the next segment.
+ *
+ * Some 0.95 hardware was found to misbehave if any link TRB doesn't have the chain bit set.
+ *
+ * 0.96 hardware from AMD and NEC was found to ignore unchained isochronous link TRBs when
+ * "resynchronizing the pipe" after a Missed Service Error.
+ */
static inline bool xhci_link_chain_quirk(struct xhci_hcd *xhci, enum xhci_ring_type type)
{
return (xhci->quirks & XHCI_LINK_TRB_QUIRK) ||
- (type == TYPE_ISOC && (xhci->quirks & XHCI_AMD_0x96_HOST));
+ (type == TYPE_ISOC && (xhci->quirks & (XHCI_AMD_0x96_HOST | XHCI_NEC_HOST)));
}
/* xHCI debugging */
@@ -1870,7 +1880,7 @@ int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id);
int xhci_ext_cap_init(struct xhci_hcd *xhci);
int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup);
-int xhci_resume(struct xhci_hcd *xhci, pm_message_t msg);
+int xhci_resume(struct xhci_hcd *xhci, bool power_lost, bool is_auto_resume);
irqreturn_t xhci_irq(struct usb_hcd *hcd);
irqreturn_t xhci_msi_irq(int irq, void *hcd);
@@ -1884,8 +1894,6 @@ int xhci_set_interrupter_moderation(struct xhci_interrupter *ir,
/* xHCI ring, segment, TRB, and TD functions */
dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg, union xhci_trb *trb);
-struct xhci_segment *trb_in_td(struct xhci_hcd *xhci, struct xhci_td *td,
- dma_addr_t suspect_dma, bool debug);
int xhci_is_vendor_info_code(struct xhci_hcd *xhci, unsigned int trb_comp_code);
void xhci_ring_cmd_db(struct xhci_hcd *xhci);
int xhci_queue_slot_control(struct xhci_hcd *xhci, struct xhci_command *cmd,
diff --git a/drivers/usb/misc/onboard_usb_dev.h b/drivers/usb/misc/onboard_usb_dev.h
index 317b3eb99c02..933797a7e084 100644
--- a/drivers/usb/misc/onboard_usb_dev.h
+++ b/drivers/usb/misc/onboard_usb_dev.h
@@ -23,6 +23,13 @@ static const struct onboard_dev_pdata microchip_usb424_data = {
.is_hub = true,
};
+static const struct onboard_dev_pdata microchip_usb2514_data = {
+ .reset_us = 1,
+ .num_supplies = 2,
+ .supply_names = { "vdd", "vdda" },
+ .is_hub = true,
+};
+
static const struct onboard_dev_pdata microchip_usb5744_data = {
.reset_us = 0,
.power_on_delay_us = 10000,
@@ -96,7 +103,7 @@ static const struct onboard_dev_pdata xmos_xvf3500_data = {
static const struct of_device_id onboard_dev_match[] = {
{ .compatible = "usb424,2412", .data = &microchip_usb424_data, },
- { .compatible = "usb424,2514", .data = &microchip_usb424_data, },
+ { .compatible = "usb424,2514", .data = &microchip_usb2514_data, },
{ .compatible = "usb424,2517", .data = &microchip_usb424_data, },
{ .compatible = "usb424,2744", .data = &microchip_usb5744_data, },
{ .compatible = "usb424,5744", .data = &microchip_usb5744_data, },
diff --git a/drivers/usb/misc/usb251xb.c b/drivers/usb/misc/usb251xb.c
index e24cdb667307..4fb453ca5450 100644
--- a/drivers/usb/misc/usb251xb.c
+++ b/drivers/usb/misc/usb251xb.c
@@ -636,10 +636,8 @@ static int usb251xb_probe(struct usb251xb *hub)
if (np && usb_data) {
err = usb251xb_get_ofdata(hub, usb_data);
- if (err) {
- dev_err(dev, "failed to get ofdata: %d\n", err);
- return err;
- }
+ if (err)
+ return dev_err_probe(dev, err, "failed to get ofdata\n");
}
/*
diff --git a/drivers/usb/musb/jz4740.c b/drivers/usb/musb/jz4740.c
index acdeb1117cd3..df56c972986f 100644
--- a/drivers/usb/musb/jz4740.c
+++ b/drivers/usb/musb/jz4740.c
@@ -59,7 +59,7 @@ static irqreturn_t jz4740_musb_interrupt(int irq, void *__hci)
return IRQ_NONE;
}
-static struct musb_fifo_cfg jz4740_musb_fifo_cfg[] = {
+static const struct musb_fifo_cfg jz4740_musb_fifo_cfg[] = {
{ .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, },
{ .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, },
{ .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 64, },
@@ -205,7 +205,7 @@ static const struct musb_hdrc_platform_data jz4740_musb_pdata = {
.platform_ops = &jz4740_musb_ops,
};
-static struct musb_fifo_cfg jz4770_musb_fifo_cfg[] = {
+static const struct musb_fifo_cfg jz4770_musb_fifo_cfg[] = {
{ .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, },
{ .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, },
{ .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 512, },
diff --git a/drivers/usb/musb/mediatek.c b/drivers/usb/musb/mediatek.c
index aa988d74b58d..c6cbe718b1da 100644
--- a/drivers/usb/musb/mediatek.c
+++ b/drivers/usb/musb/mediatek.c
@@ -365,7 +365,7 @@ static const struct musb_platform_ops mtk_musb_ops = {
#define MTK_MUSB_MAX_EP_NUM 8
#define MTK_MUSB_RAM_BITS 11
-static struct musb_fifo_cfg mtk_musb_mode_cfg[] = {
+static const struct musb_fifo_cfg mtk_musb_mode_cfg[] = {
{ .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, },
{ .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, },
{ .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 512, },
diff --git a/drivers/usb/musb/mpfs.c b/drivers/usb/musb/mpfs.c
index 7edc8429b274..71e4271cba75 100644
--- a/drivers/usb/musb/mpfs.c
+++ b/drivers/usb/musb/mpfs.c
@@ -29,7 +29,7 @@ struct mpfs_glue {
struct clk *clk;
};
-static struct musb_fifo_cfg mpfs_musb_mode_cfg[] = {
+static const struct musb_fifo_cfg mpfs_musb_mode_cfg[] = {
{ .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, },
{ .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, },
{ .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 512, },
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index 7f349f5e781d..96fa700eaed1 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -1271,7 +1271,7 @@ MODULE_PARM_DESC(fifo_mode, "initial endpoint configuration");
*/
/* mode 0 - fits in 2KB */
-static struct musb_fifo_cfg mode_0_cfg[] = {
+static const struct musb_fifo_cfg mode_0_cfg[] = {
{ .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, },
{ .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, },
{ .hw_ep_num = 2, .style = FIFO_RXTX, .maxpacket = 512, },
@@ -1280,7 +1280,7 @@ static struct musb_fifo_cfg mode_0_cfg[] = {
};
/* mode 1 - fits in 4KB */
-static struct musb_fifo_cfg mode_1_cfg[] = {
+static const struct musb_fifo_cfg mode_1_cfg[] = {
{ .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, .mode = BUF_DOUBLE, },
{ .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, .mode = BUF_DOUBLE, },
{ .hw_ep_num = 2, .style = FIFO_RXTX, .maxpacket = 512, .mode = BUF_DOUBLE, },
@@ -1289,7 +1289,7 @@ static struct musb_fifo_cfg mode_1_cfg[] = {
};
/* mode 2 - fits in 4KB */
-static struct musb_fifo_cfg mode_2_cfg[] = {
+static const struct musb_fifo_cfg mode_2_cfg[] = {
{ .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, },
{ .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, },
{ .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 512, },
@@ -1299,7 +1299,7 @@ static struct musb_fifo_cfg mode_2_cfg[] = {
};
/* mode 3 - fits in 4KB */
-static struct musb_fifo_cfg mode_3_cfg[] = {
+static const struct musb_fifo_cfg mode_3_cfg[] = {
{ .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, .mode = BUF_DOUBLE, },
{ .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, .mode = BUF_DOUBLE, },
{ .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 512, },
@@ -1309,7 +1309,7 @@ static struct musb_fifo_cfg mode_3_cfg[] = {
};
/* mode 4 - fits in 16KB */
-static struct musb_fifo_cfg mode_4_cfg[] = {
+static const struct musb_fifo_cfg mode_4_cfg[] = {
{ .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, },
{ .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, },
{ .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 512, },
@@ -1340,7 +1340,7 @@ static struct musb_fifo_cfg mode_4_cfg[] = {
};
/* mode 5 - fits in 8KB */
-static struct musb_fifo_cfg mode_5_cfg[] = {
+static const struct musb_fifo_cfg mode_5_cfg[] = {
{ .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, },
{ .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, },
{ .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 512, },
@@ -1447,7 +1447,7 @@ fifo_setup(struct musb *musb, struct musb_hw_ep *hw_ep,
return offset + (maxpacket << ((c_size & MUSB_FIFOSZ_DPB) ? 1 : 0));
}
-static struct musb_fifo_cfg ep0_cfg = {
+static const struct musb_fifo_cfg ep0_cfg = {
.style = FIFO_RXTX, .maxpacket = 64,
};
diff --git a/drivers/usb/musb/sunxi.c b/drivers/usb/musb/sunxi.c
index eac1cde86be3..a6bd3e968cc7 100644
--- a/drivers/usb/musb/sunxi.c
+++ b/drivers/usb/musb/sunxi.c
@@ -629,7 +629,7 @@ static const struct musb_platform_ops sunxi_musb_ops = {
#define SUNXI_MUSB_RAM_BITS 11
/* Allwinner OTG supports up to 5 endpoints */
-static struct musb_fifo_cfg sunxi_musb_mode_cfg_5eps[] = {
+static const struct musb_fifo_cfg sunxi_musb_mode_cfg_5eps[] = {
MUSB_EP_FIFO_SINGLE(1, FIFO_TX, 512),
MUSB_EP_FIFO_SINGLE(1, FIFO_RX, 512),
MUSB_EP_FIFO_SINGLE(2, FIFO_TX, 512),
@@ -643,7 +643,7 @@ static struct musb_fifo_cfg sunxi_musb_mode_cfg_5eps[] = {
};
/* H3/V3s OTG supports only 4 endpoints */
-static struct musb_fifo_cfg sunxi_musb_mode_cfg_4eps[] = {
+static const struct musb_fifo_cfg sunxi_musb_mode_cfg_4eps[] = {
MUSB_EP_FIFO_SINGLE(1, FIFO_TX, 512),
MUSB_EP_FIFO_SINGLE(1, FIFO_RX, 512),
MUSB_EP_FIFO_SINGLE(2, FIFO_TX, 512),
diff --git a/drivers/usb/phy/phy-mxs-usb.c b/drivers/usb/phy/phy-mxs-usb.c
index 7490f1798b46..7069dd3f4d0d 100644
--- a/drivers/usb/phy/phy-mxs-usb.c
+++ b/drivers/usb/phy/phy-mxs-usb.c
@@ -769,11 +769,9 @@ static int mxs_phy_probe(struct platform_device *pdev)
return PTR_ERR(base);
clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(clk)) {
- dev_err(&pdev->dev,
- "can't get the clock, err=%ld", PTR_ERR(clk));
- return PTR_ERR(clk);
- }
+ if (IS_ERR(clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(clk),
+ "can't get the clock\n");
mxs_phy = devm_kzalloc(&pdev->dev, sizeof(*mxs_phy), GFP_KERNEL);
if (!mxs_phy)
diff --git a/drivers/usb/phy/phy-ulpi.c b/drivers/usb/phy/phy-ulpi.c
index e683a37e3a7a..4df63e67bb37 100644
--- a/drivers/usb/phy/phy-ulpi.c
+++ b/drivers/usb/phy/phy-ulpi.c
@@ -256,29 +256,6 @@ static void otg_ulpi_init(struct usb_phy *phy, struct usb_otg *otg,
}
struct usb_phy *
-otg_ulpi_create(struct usb_phy_io_ops *ops,
- unsigned int flags)
-{
- struct usb_phy *phy;
- struct usb_otg *otg;
-
- phy = kzalloc(sizeof(*phy), GFP_KERNEL);
- if (!phy)
- return NULL;
-
- otg = kzalloc(sizeof(*otg), GFP_KERNEL);
- if (!otg) {
- kfree(phy);
- return NULL;
- }
-
- otg_ulpi_init(phy, otg, ops, flags);
-
- return phy;
-}
-EXPORT_SYMBOL_GPL(otg_ulpi_create);
-
-struct usb_phy *
devm_otg_ulpi_create(struct device *dev,
struct usb_phy_io_ops *ops,
unsigned int flags)
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
index ca3da79afd23..93710b762893 100644
--- a/drivers/usb/serial/mos7840.c
+++ b/drivers/usb/serial/mos7840.c
@@ -66,29 +66,16 @@
#define MOS_WDR_TIMEOUT 5000 /* default urb timeout */
-#define MOS_PORT1 0x0200
-#define MOS_PORT2 0x0300
-#define MOS_VENREG 0x0000
-#define MOS_MAX_PORT 0x02
-#define MOS_WRITE 0x0E
-#define MOS_READ 0x0D
-
/* Requests */
#define MCS_RD_RTYPE 0xC0
#define MCS_WR_RTYPE 0x40
#define MCS_RDREQ 0x0D
#define MCS_WRREQ 0x0E
-#define MCS_CTRL_TIMEOUT 500
#define VENDOR_READ_LENGTH (0x01)
-#define MAX_NAME_LEN 64
-
#define ZLP_REG1 0x3A /* Zero_Flag_Reg1 58 */
#define ZLP_REG5 0x3E /* Zero_Flag_Reg5 62 */
-/* For higher baud Rates use TIOCEXBAUD */
-#define TIOCEXBAUD 0x5462
-
/*
* Vendor id and device id defines
*
diff --git a/drivers/usb/storage/alauda.c b/drivers/usb/storage/alauda.c
index 6263c4e61678..e01f3a42bde4 100644
--- a/drivers/usb/storage/alauda.c
+++ b/drivers/usb/storage/alauda.c
@@ -174,7 +174,7 @@ struct alauda_card_info {
unsigned char zoneshift; /* 1<<zs blocks per zone */
};
-static struct alauda_card_info alauda_card_ids[] = {
+static const struct alauda_card_info alauda_card_ids[] = {
/* NAND flash */
{ 0x6e, 20, 8, 4, 8}, /* 1 MB */
{ 0xe8, 20, 8, 4, 8}, /* 1 MB */
@@ -200,7 +200,7 @@ static struct alauda_card_info alauda_card_ids[] = {
{ 0,}
};
-static struct alauda_card_info *alauda_card_find_id(unsigned char id)
+static const struct alauda_card_info *alauda_card_find_id(unsigned char id)
{
int i;
@@ -383,7 +383,7 @@ static int alauda_init_media(struct us_data *us)
{
unsigned char *data = us->iobuf;
int ready = 0;
- struct alauda_card_info *media_info;
+ const struct alauda_card_info *media_info;
unsigned int num_zones;
while (ready == 0) {
@@ -1132,7 +1132,7 @@ static int alauda_transport(struct scsi_cmnd *srb, struct us_data *us)
int rc;
struct alauda_info *info = (struct alauda_info *) us->extra;
unsigned char *ptr = us->iobuf;
- static unsigned char inquiry_response[36] = {
+ static const unsigned char inquiry_response[36] = {
0x00, 0x80, 0x00, 0x01, 0x1F, 0x00, 0x00, 0x00
};
diff --git a/drivers/usb/storage/datafab.c b/drivers/usb/storage/datafab.c
index bbfa2398b170..9ba369483c9b 100644
--- a/drivers/usb/storage/datafab.c
+++ b/drivers/usb/storage/datafab.c
@@ -319,7 +319,7 @@ static int datafab_determine_lun(struct us_data *us,
//
// There might be a better way of doing this?
- static unsigned char scommand[8] = { 0, 1, 0, 0, 0, 0xa0, 0xec, 1 };
+ static const unsigned char scommand[8] = { 0, 1, 0, 0, 0, 0xa0, 0xec, 1 };
unsigned char *command = us->iobuf;
unsigned char *buf;
int count = 0, rc;
@@ -384,7 +384,7 @@ static int datafab_id_device(struct us_data *us,
// to the ATA spec, 'Sector Count' isn't used but the Windows driver
// sets this bit so we do too...
//
- static unsigned char scommand[8] = { 0, 1, 0, 0, 0, 0xa0, 0xec, 1 };
+ static const unsigned char scommand[8] = { 0, 1, 0, 0, 0, 0xa0, 0xec, 1 };
unsigned char *command = us->iobuf;
unsigned char *reply;
int rc;
@@ -437,16 +437,16 @@ static int datafab_handle_mode_sense(struct us_data *us,
struct scsi_cmnd * srb,
int sense_6)
{
- static unsigned char rw_err_page[12] = {
+ static const unsigned char rw_err_page[12] = {
0x1, 0xA, 0x21, 1, 0, 0, 0, 0, 1, 0, 0, 0
};
- static unsigned char cache_page[12] = {
+ static const unsigned char cache_page[12] = {
0x8, 0xA, 0x1, 0, 0, 0, 0, 0, 0, 0, 0, 0
};
- static unsigned char rbac_page[12] = {
+ static const unsigned char rbac_page[12] = {
0x1B, 0xA, 0, 0x81, 0, 0, 0, 0, 0, 0, 0, 0
};
- static unsigned char timer_page[8] = {
+ static const unsigned char timer_page[8] = {
0x1C, 0x6, 0, 0, 0, 0
};
unsigned char pc, page_code;
@@ -550,7 +550,7 @@ static int datafab_transport(struct scsi_cmnd *srb, struct us_data *us)
int rc;
unsigned long block, blocks;
unsigned char *ptr = us->iobuf;
- static unsigned char inquiry_reply[8] = {
+ static const unsigned char inquiry_reply[8] = {
0x00, 0x80, 0x00, 0x01, 0x1F, 0x00, 0x00, 0x00
};
diff --git a/drivers/usb/storage/initializers.c b/drivers/usb/storage/initializers.c
index f8f9ce8dc710..b243bd5521a6 100644
--- a/drivers/usb/storage/initializers.c
+++ b/drivers/usb/storage/initializers.c
@@ -54,7 +54,7 @@ int usb_stor_ucr61s2b_init(struct us_data *us)
struct bulk_cs_wrap *bcs = (struct bulk_cs_wrap*) us->iobuf;
int res;
unsigned int partial;
- static char init_string[] = "\xec\x0a\x06\x00$PCCHIPS";
+ static const char init_string[] = "\xec\x0a\x06\x00$PCCHIPS";
usb_stor_dbg(us, "Sending UCR-61S2B initialization packet...\n");
diff --git a/drivers/usb/storage/jumpshot.c b/drivers/usb/storage/jumpshot.c
index 39ca84d68591..089c6f8ac85f 100644
--- a/drivers/usb/storage/jumpshot.c
+++ b/drivers/usb/storage/jumpshot.c
@@ -367,16 +367,16 @@ static int jumpshot_handle_mode_sense(struct us_data *us,
struct scsi_cmnd * srb,
int sense_6)
{
- static unsigned char rw_err_page[12] = {
+ static const unsigned char rw_err_page[12] = {
0x1, 0xA, 0x21, 1, 0, 0, 0, 0, 1, 0, 0, 0
};
- static unsigned char cache_page[12] = {
+ static const unsigned char cache_page[12] = {
0x8, 0xA, 0x1, 0, 0, 0, 0, 0, 0, 0, 0, 0
};
- static unsigned char rbac_page[12] = {
+ static const unsigned char rbac_page[12] = {
0x1B, 0xA, 0, 0x81, 0, 0, 0, 0, 0, 0, 0, 0
};
- static unsigned char timer_page[8] = {
+ static const unsigned char timer_page[8] = {
0x1C, 0x6, 0, 0, 0, 0
};
unsigned char pc, page_code;
@@ -477,7 +477,7 @@ static int jumpshot_transport(struct scsi_cmnd *srb, struct us_data *us)
int rc;
unsigned long block, blocks;
unsigned char *ptr = us->iobuf;
- static unsigned char inquiry_response[8] = {
+ static const unsigned char inquiry_response[8] = {
0x00, 0x80, 0x00, 0x01, 0x1F, 0x00, 0x00, 0x00
};
diff --git a/drivers/usb/storage/realtek_cr.c b/drivers/usb/storage/realtek_cr.c
index 2a82ed7b68ea..4e516b445136 100644
--- a/drivers/usb/storage/realtek_cr.c
+++ b/drivers/usb/storage/realtek_cr.c
@@ -191,7 +191,7 @@ MODULE_DEVICE_TABLE(usb, realtek_cr_ids);
.initFunction = init_function, \
}
-static struct us_unusual_dev realtek_cr_unusual_dev_list[] = {
+static const struct us_unusual_dev realtek_cr_unusual_dev_list[] = {
# include "unusual_realtek.h"
{} /* Terminating entry */
};
@@ -797,10 +797,10 @@ static void rts51x_invoke_transport(struct scsi_cmnd *srb, struct us_data *us)
{
struct rts51x_chip *chip = (struct rts51x_chip *)(us->extra);
static int card_first_show = 1;
- static u8 media_not_present[] = { 0x70, 0, 0x02, 0, 0, 0, 0,
+ static const u8 media_not_present[] = { 0x70, 0, 0x02, 0, 0, 0, 0,
10, 0, 0, 0, 0, 0x3A, 0, 0, 0, 0, 0
};
- static u8 invalid_cmd_field[] = { 0x70, 0, 0x05, 0, 0, 0, 0,
+ static const u8 invalid_cmd_field[] = { 0x70, 0, 0x05, 0, 0, 0, 0,
10, 0, 0, 0, 0, 0x24, 0, 0, 0, 0, 0
};
int ret;
diff --git a/drivers/usb/storage/sddr09.c b/drivers/usb/storage/sddr09.c
index d21ce3466e25..e66b920e99e2 100644
--- a/drivers/usb/storage/sddr09.c
+++ b/drivers/usb/storage/sddr09.c
@@ -144,7 +144,7 @@ static inline char *nand_flash_manufacturer(int manuf_id) {
* 256 MB NAND flash has a 5-byte ID with 2nd byte 0xaa, 0xba, 0xca or 0xda.
*/
-static struct nand_flash_dev nand_flash_ids[] = {
+static const struct nand_flash_dev nand_flash_ids[] = {
/* NAND flash */
{ 0x6e, 20, 8, 4, 8, 2}, /* 1 MB */
{ 0xe8, 20, 8, 4, 8, 2}, /* 1 MB */
@@ -169,7 +169,7 @@ static struct nand_flash_dev nand_flash_ids[] = {
{ 0,}
};
-static struct nand_flash_dev *
+static const struct nand_flash_dev *
nand_find_id(unsigned char id) {
int i;
@@ -1133,9 +1133,9 @@ sddr09_reset(struct us_data *us) {
}
#endif
-static struct nand_flash_dev *
+static const struct nand_flash_dev *
sddr09_get_cardinfo(struct us_data *us, unsigned char flags) {
- struct nand_flash_dev *cardinfo;
+ const struct nand_flash_dev *cardinfo;
unsigned char deviceID[4];
char blurbtxt[256];
int result;
@@ -1545,12 +1545,12 @@ static int sddr09_transport(struct scsi_cmnd *srb, struct us_data *us)
struct sddr09_card_info *info;
- static unsigned char inquiry_response[8] = {
+ static const unsigned char inquiry_response[8] = {
0x00, 0x80, 0x00, 0x02, 0x1F, 0x00, 0x00, 0x00
};
/* note: no block descriptor support */
- static unsigned char mode_page_01[19] = {
+ static const unsigned char mode_page_01[19] = {
0x00, 0x0F, 0x00, 0x0, 0x0, 0x0, 0x00,
0x01, 0x0A,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
@@ -1584,7 +1584,7 @@ static int sddr09_transport(struct scsi_cmnd *srb, struct us_data *us)
}
if (srb->cmnd[0] == READ_CAPACITY) {
- struct nand_flash_dev *cardinfo;
+ const struct nand_flash_dev *cardinfo;
sddr09_get_wp(us, info); /* read WP bit */
diff --git a/drivers/usb/storage/sddr55.c b/drivers/usb/storage/sddr55.c
index d5cdff30f6f3..b323f0a36260 100644
--- a/drivers/usb/storage/sddr55.c
+++ b/drivers/usb/storage/sddr55.c
@@ -775,11 +775,11 @@ static void sddr55_card_info_destructor(void *extra) {
static int sddr55_transport(struct scsi_cmnd *srb, struct us_data *us)
{
int result;
- static unsigned char inquiry_response[8] = {
+ static const unsigned char inquiry_response[8] = {
0x00, 0x80, 0x00, 0x02, 0x1F, 0x00, 0x00, 0x00
};
// write-protected for now, no block descriptor support
- static unsigned char mode_page_01[20] = {
+ static const unsigned char mode_page_01[20] = {
0x0, 0x12, 0x00, 0x80, 0x0, 0x0, 0x0, 0x0,
0x01, 0x0A,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
diff --git a/drivers/usb/storage/shuttle_usbat.c b/drivers/usb/storage/shuttle_usbat.c
index c33cbf177e6f..27faa0ead11d 100644
--- a/drivers/usb/storage/shuttle_usbat.c
+++ b/drivers/usb/storage/shuttle_usbat.c
@@ -1683,7 +1683,7 @@ static int usbat_flash_transport(struct scsi_cmnd * srb, struct us_data *us)
struct usbat_info *info = (struct usbat_info *) (us->extra);
unsigned long block, blocks;
unsigned char *ptr = us->iobuf;
- static unsigned char inquiry_response[36] = {
+ static const unsigned char inquiry_response[36] = {
0x00, 0x80, 0x00, 0x01, 0x1F, 0x00, 0x00, 0x00
};
diff --git a/drivers/usb/storage/transport.c b/drivers/usb/storage/transport.c
index e6bc8ecaecbb..1aa1bd26c81f 100644
--- a/drivers/usb/storage/transport.c
+++ b/drivers/usb/storage/transport.c
@@ -528,7 +528,7 @@ static void last_sector_hacks(struct us_data *us, struct scsi_cmnd *srb)
u32 sector;
/* To Report "Medium Error: Record Not Found */
- static unsigned char record_not_found[18] = {
+ static const unsigned char record_not_found[18] = {
[0] = 0x70, /* current error */
[2] = MEDIUM_ERROR, /* = 0x03 */
[7] = 0x0a, /* additional length */
diff --git a/drivers/usb/typec/altmodes/thunderbolt.c b/drivers/usb/typec/altmodes/thunderbolt.c
index 1b475b1d98e7..6eadf7835f8f 100644
--- a/drivers/usb/typec/altmodes/thunderbolt.c
+++ b/drivers/usb/typec/altmodes/thunderbolt.c
@@ -112,7 +112,7 @@ static void tbt_altmode_work(struct work_struct *work)
return;
disable_plugs:
- for (int i = TYPEC_PLUG_SOP_PP; i > 0; --i) {
+ for (int i = TYPEC_PLUG_SOP_PP; i >= 0; --i) {
if (tbt->plug[i])
typec_altmode_put_plug(tbt->plug[i]);
@@ -143,7 +143,7 @@ static int tbt_enter_modes_ordered(struct typec_altmode *alt)
if (tbt->plug[TYPEC_PLUG_SOP_P]) {
ret = typec_cable_altmode_enter(alt, TYPEC_PLUG_SOP_P, NULL);
if (ret < 0) {
- for (int i = TYPEC_PLUG_SOP_PP; i > 0; --i) {
+ for (int i = TYPEC_PLUG_SOP_PP; i >= 0; --i) {
if (tbt->plug[i])
typec_altmode_put_plug(tbt->plug[i]);
@@ -324,7 +324,7 @@ static void tbt_altmode_remove(struct typec_altmode *alt)
{
struct tbt_altmode *tbt = typec_altmode_get_drvdata(alt);
- for (int i = TYPEC_PLUG_SOP_PP; i > 0; --i) {
+ for (int i = TYPEC_PLUG_SOP_PP; i >= 0; --i) {
if (tbt->plug[i])
typec_altmode_put_plug(tbt->plug[i]);
}
@@ -351,10 +351,10 @@ static bool tbt_ready(struct typec_altmode *alt)
*/
for (int i = 0; i < TYPEC_PLUG_SOP_PP + 1; i++) {
plug = typec_altmode_get_plug(tbt->alt, i);
- if (IS_ERR(plug))
+ if (!plug)
continue;
- if (!plug || plug->svid != USB_TYPEC_TBT_SID)
+ if (plug->svid != USB_TYPEC_TBT_SID)
break;
plug->desc = "Thunderbolt3";
diff --git a/drivers/usb/typec/mux/Kconfig b/drivers/usb/typec/mux/Kconfig
index 67381b4ef4f6..6dd8f961b593 100644
--- a/drivers/usb/typec/mux/Kconfig
+++ b/drivers/usb/typec/mux/Kconfig
@@ -56,6 +56,16 @@ config TYPEC_MUX_NB7VPQ904M
Say Y or M if your system has a On Semiconductor NB7VPQ904M Type-C
redriver chip found on some devices with a Type-C port.
+config TYPEC_MUX_PS883X
+ tristate "Parade PS883x Type-C retimer driver"
+ depends on I2C
+ depends on DRM || DRM=n
+ select DRM_AUX_BRIDGE if DRM_BRIDGE && OF
+ select REGMAP_I2C
+ help
+ Say Y or M if your system has a Parade PS883x Type-C retimer chip
+ found on some devices with a Type-C port.
+
config TYPEC_MUX_PTN36502
tristate "NXP PTN36502 Type-C redriver driver"
depends on I2C
diff --git a/drivers/usb/typec/mux/Makefile b/drivers/usb/typec/mux/Makefile
index 60879446da93..b4f599eb5053 100644
--- a/drivers/usb/typec/mux/Makefile
+++ b/drivers/usb/typec/mux/Makefile
@@ -6,6 +6,7 @@ obj-$(CONFIG_TYPEC_MUX_PI3USB30532) += pi3usb30532.o
obj-$(CONFIG_TYPEC_MUX_INTEL_PMC) += intel_pmc_mux.o
obj-$(CONFIG_TYPEC_MUX_IT5205) += it5205.o
obj-$(CONFIG_TYPEC_MUX_NB7VPQ904M) += nb7vpq904m.o
+obj-$(CONFIG_TYPEC_MUX_PS883X) += ps883x.o
obj-$(CONFIG_TYPEC_MUX_PTN36502) += ptn36502.o
obj-$(CONFIG_TYPEC_MUX_TUSB1046) += tusb1046.o
obj-$(CONFIG_TYPEC_MUX_WCD939X_USBSS) += wcd939x-usbss.o
diff --git a/drivers/usb/typec/mux/ps883x.c b/drivers/usb/typec/mux/ps883x.c
new file mode 100644
index 000000000000..ad59babf7cce
--- /dev/null
+++ b/drivers/usb/typec/mux/ps883x.c
@@ -0,0 +1,466 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Parade ps883x usb retimer driver
+ *
+ * Copyright (C) 2024 Linaro Ltd.
+ */
+
+#include <drm/bridge/aux-bridge.h>
+#include <linux/clk.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/usb/typec_altmode.h>
+#include <linux/usb/typec_dp.h>
+#include <linux/usb/typec_mux.h>
+#include <linux/usb/typec_retimer.h>
+
+#define REG_USB_PORT_CONN_STATUS_0 0x00
+
+#define CONN_STATUS_0_CONNECTION_PRESENT BIT(0)
+#define CONN_STATUS_0_ORIENTATION_REVERSED BIT(1)
+#define CONN_STATUS_0_USB_3_1_CONNECTED BIT(5)
+
+#define REG_USB_PORT_CONN_STATUS_1 0x01
+
+#define CONN_STATUS_1_DP_CONNECTED BIT(0)
+#define CONN_STATUS_1_DP_SINK_REQUESTED BIT(1)
+#define CONN_STATUS_1_DP_PIN_ASSIGNMENT_C_D BIT(2)
+#define CONN_STATUS_1_DP_HPD_LEVEL BIT(7)
+
+#define REG_USB_PORT_CONN_STATUS_2 0x02
+
+struct ps883x_retimer {
+ struct i2c_client *client;
+ struct gpio_desc *reset_gpio;
+ struct regmap *regmap;
+ struct typec_switch_dev *sw;
+ struct typec_retimer *retimer;
+ struct clk *xo_clk;
+ struct regulator *vdd_supply;
+ struct regulator *vdd33_supply;
+ struct regulator *vdd33_cap_supply;
+ struct regulator *vddat_supply;
+ struct regulator *vddar_supply;
+ struct regulator *vddio_supply;
+
+ struct typec_switch *typec_switch;
+ struct typec_mux *typec_mux;
+
+ struct mutex lock; /* protect non-concurrent retimer & switch */
+
+ enum typec_orientation orientation;
+ unsigned long mode;
+ unsigned int svid;
+};
+
+static int ps883x_configure(struct ps883x_retimer *retimer, int cfg0,
+ int cfg1, int cfg2)
+{
+ struct device *dev = &retimer->client->dev;
+ int ret;
+
+ ret = regmap_write(retimer->regmap, REG_USB_PORT_CONN_STATUS_0, cfg0);
+ if (ret) {
+ dev_err(dev, "failed to write conn_status_0: %d\n", ret);
+ return ret;
+ }
+
+ ret = regmap_write(retimer->regmap, REG_USB_PORT_CONN_STATUS_1, cfg1);
+ if (ret) {
+ dev_err(dev, "failed to write conn_status_1: %d\n", ret);
+ return ret;
+ }
+
+ ret = regmap_write(retimer->regmap, REG_USB_PORT_CONN_STATUS_2, cfg2);
+ if (ret) {
+ dev_err(dev, "failed to write conn_status_2: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ps883x_set(struct ps883x_retimer *retimer)
+{
+ int cfg0 = CONN_STATUS_0_CONNECTION_PRESENT;
+ int cfg1 = 0x00;
+ int cfg2 = 0x00;
+
+ if (retimer->orientation == TYPEC_ORIENTATION_NONE ||
+ retimer->mode == TYPEC_STATE_SAFE) {
+ return ps883x_configure(retimer, cfg0, cfg1, cfg2);
+ }
+
+ if (retimer->mode != TYPEC_STATE_USB && retimer->svid != USB_TYPEC_DP_SID)
+ return -EINVAL;
+
+ if (retimer->orientation == TYPEC_ORIENTATION_REVERSE)
+ cfg0 |= CONN_STATUS_0_ORIENTATION_REVERSED;
+
+ switch (retimer->mode) {
+ case TYPEC_STATE_USB:
+ cfg0 |= CONN_STATUS_0_USB_3_1_CONNECTED;
+ break;
+
+ case TYPEC_DP_STATE_C:
+ cfg1 = CONN_STATUS_1_DP_CONNECTED |
+ CONN_STATUS_1_DP_SINK_REQUESTED |
+ CONN_STATUS_1_DP_PIN_ASSIGNMENT_C_D |
+ CONN_STATUS_1_DP_HPD_LEVEL;
+ break;
+
+ case TYPEC_DP_STATE_D:
+ cfg0 |= CONN_STATUS_0_USB_3_1_CONNECTED;
+ cfg1 = CONN_STATUS_1_DP_CONNECTED |
+ CONN_STATUS_1_DP_SINK_REQUESTED |
+ CONN_STATUS_1_DP_PIN_ASSIGNMENT_C_D |
+ CONN_STATUS_1_DP_HPD_LEVEL;
+ break;
+
+ case TYPEC_DP_STATE_E:
+ cfg1 = CONN_STATUS_1_DP_CONNECTED |
+ CONN_STATUS_1_DP_HPD_LEVEL;
+ break;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return ps883x_configure(retimer, cfg0, cfg1, cfg2);
+}
+
+static int ps883x_sw_set(struct typec_switch_dev *sw,
+ enum typec_orientation orientation)
+{
+ struct ps883x_retimer *retimer = typec_switch_get_drvdata(sw);
+ int ret = 0;
+
+ ret = typec_switch_set(retimer->typec_switch, orientation);
+ if (ret)
+ return ret;
+
+ mutex_lock(&retimer->lock);
+
+ if (retimer->orientation != orientation) {
+ retimer->orientation = orientation;
+
+ ret = ps883x_set(retimer);
+ }
+
+ mutex_unlock(&retimer->lock);
+
+ return ret;
+}
+
+static int ps883x_retimer_set(struct typec_retimer *rtmr,
+ struct typec_retimer_state *state)
+{
+ struct ps883x_retimer *retimer = typec_retimer_get_drvdata(rtmr);
+ struct typec_mux_state mux_state;
+ int ret = 0;
+
+ mutex_lock(&retimer->lock);
+
+ if (state->mode != retimer->mode) {
+ retimer->mode = state->mode;
+
+ if (state->alt)
+ retimer->svid = state->alt->svid;
+ else
+ retimer->svid = 0;
+
+ ret = ps883x_set(retimer);
+ }
+
+ mutex_unlock(&retimer->lock);
+
+ if (ret)
+ return ret;
+
+ mux_state.alt = state->alt;
+ mux_state.data = state->data;
+ mux_state.mode = state->mode;
+
+ return typec_mux_set(retimer->typec_mux, &mux_state);
+}
+
+static int ps883x_enable_vregs(struct ps883x_retimer *retimer)
+{
+ struct device *dev = &retimer->client->dev;
+ int ret;
+
+ ret = regulator_enable(retimer->vdd33_supply);
+ if (ret) {
+ dev_err(dev, "cannot enable VDD 3.3V regulator: %d\n", ret);
+ return ret;
+ }
+
+ ret = regulator_enable(retimer->vdd33_cap_supply);
+ if (ret) {
+ dev_err(dev, "cannot enable VDD 3.3V CAP regulator: %d\n", ret);
+ goto err_vdd33_disable;
+ }
+
+ usleep_range(4000, 10000);
+
+ ret = regulator_enable(retimer->vdd_supply);
+ if (ret) {
+ dev_err(dev, "cannot enable VDD regulator: %d\n", ret);
+ goto err_vdd33_cap_disable;
+ }
+
+ ret = regulator_enable(retimer->vddar_supply);
+ if (ret) {
+ dev_err(dev, "cannot enable VDD AR regulator: %d\n", ret);
+ goto err_vdd_disable;
+ }
+
+ ret = regulator_enable(retimer->vddat_supply);
+ if (ret) {
+ dev_err(dev, "cannot enable VDD AT regulator: %d\n", ret);
+ goto err_vddar_disable;
+ }
+
+ ret = regulator_enable(retimer->vddio_supply);
+ if (ret) {
+ dev_err(dev, "cannot enable VDD IO regulator: %d\n", ret);
+ goto err_vddat_disable;
+ }
+
+ return 0;
+
+err_vddat_disable:
+ regulator_disable(retimer->vddat_supply);
+err_vddar_disable:
+ regulator_disable(retimer->vddar_supply);
+err_vdd_disable:
+ regulator_disable(retimer->vdd_supply);
+err_vdd33_cap_disable:
+ regulator_disable(retimer->vdd33_cap_supply);
+err_vdd33_disable:
+ regulator_disable(retimer->vdd33_supply);
+
+ return ret;
+}
+
+static void ps883x_disable_vregs(struct ps883x_retimer *retimer)
+{
+ regulator_disable(retimer->vddio_supply);
+ regulator_disable(retimer->vddat_supply);
+ regulator_disable(retimer->vddar_supply);
+ regulator_disable(retimer->vdd_supply);
+ regulator_disable(retimer->vdd33_cap_supply);
+ regulator_disable(retimer->vdd33_supply);
+}
+
+static int ps883x_get_vregs(struct ps883x_retimer *retimer)
+{
+ struct device *dev = &retimer->client->dev;
+
+ retimer->vdd_supply = devm_regulator_get(dev, "vdd");
+ if (IS_ERR(retimer->vdd_supply))
+ return dev_err_probe(dev, PTR_ERR(retimer->vdd_supply),
+ "failed to get VDD\n");
+
+ retimer->vdd33_supply = devm_regulator_get(dev, "vdd33");
+ if (IS_ERR(retimer->vdd33_supply))
+ return dev_err_probe(dev, PTR_ERR(retimer->vdd33_supply),
+ "failed to get VDD 3.3V\n");
+
+ retimer->vdd33_cap_supply = devm_regulator_get(dev, "vdd33-cap");
+ if (IS_ERR(retimer->vdd33_cap_supply))
+ return dev_err_probe(dev, PTR_ERR(retimer->vdd33_cap_supply),
+ "failed to get VDD CAP 3.3V\n");
+
+ retimer->vddat_supply = devm_regulator_get(dev, "vddat");
+ if (IS_ERR(retimer->vddat_supply))
+ return dev_err_probe(dev, PTR_ERR(retimer->vddat_supply),
+ "failed to get VDD AT\n");
+
+ retimer->vddar_supply = devm_regulator_get(dev, "vddar");
+ if (IS_ERR(retimer->vddar_supply))
+ return dev_err_probe(dev, PTR_ERR(retimer->vddar_supply),
+ "failed to get VDD AR\n");
+
+ retimer->vddio_supply = devm_regulator_get(dev, "vddio");
+ if (IS_ERR(retimer->vddio_supply))
+ return dev_err_probe(dev, PTR_ERR(retimer->vddio_supply),
+ "failed to get VDD IO\n");
+
+ return 0;
+}
+
+static const struct regmap_config ps883x_retimer_regmap = {
+ .max_register = 0x1f,
+ .reg_bits = 8,
+ .val_bits = 8,
+};
+
+static int ps883x_retimer_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct typec_switch_desc sw_desc = { };
+ struct typec_retimer_desc rtmr_desc = { };
+ struct ps883x_retimer *retimer;
+ unsigned int val;
+ int ret;
+
+ retimer = devm_kzalloc(dev, sizeof(*retimer), GFP_KERNEL);
+ if (!retimer)
+ return -ENOMEM;
+
+ retimer->client = client;
+
+ mutex_init(&retimer->lock);
+
+ retimer->regmap = devm_regmap_init_i2c(client, &ps883x_retimer_regmap);
+ if (IS_ERR(retimer->regmap))
+ return dev_err_probe(dev, PTR_ERR(retimer->regmap),
+ "failed to allocate register map\n");
+
+ ret = ps883x_get_vregs(retimer);
+ if (ret)
+ return ret;
+
+ retimer->xo_clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(retimer->xo_clk))
+ return dev_err_probe(dev, PTR_ERR(retimer->xo_clk),
+ "failed to get xo clock\n");
+
+ retimer->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_ASIS);
+ if (IS_ERR(retimer->reset_gpio))
+ return dev_err_probe(dev, PTR_ERR(retimer->reset_gpio),
+ "failed to get reset gpio\n");
+
+ retimer->typec_switch = typec_switch_get(dev);
+ if (IS_ERR(retimer->typec_switch))
+ return dev_err_probe(dev, PTR_ERR(retimer->typec_switch),
+ "failed to acquire orientation-switch\n");
+
+ retimer->typec_mux = typec_mux_get(dev);
+ if (IS_ERR(retimer->typec_mux)) {
+ ret = dev_err_probe(dev, PTR_ERR(retimer->typec_mux),
+ "failed to acquire mode-mux\n");
+ goto err_switch_put;
+ }
+
+ ret = drm_aux_bridge_register(dev);
+ if (ret)
+ goto err_mux_put;
+
+ ret = ps883x_enable_vregs(retimer);
+ if (ret)
+ goto err_mux_put;
+
+ ret = clk_prepare_enable(retimer->xo_clk);
+ if (ret) {
+ dev_err(dev, "failed to enable XO: %d\n", ret);
+ goto err_vregs_disable;
+ }
+
+ /* skip resetting if already configured */
+ if (regmap_test_bits(retimer->regmap, REG_USB_PORT_CONN_STATUS_0,
+ CONN_STATUS_0_CONNECTION_PRESENT) == 1) {
+ gpiod_direction_output(retimer->reset_gpio, 0);
+ } else {
+ gpiod_direction_output(retimer->reset_gpio, 1);
+
+ /* VDD IO supply enable to reset release delay */
+ usleep_range(4000, 14000);
+
+ gpiod_set_value(retimer->reset_gpio, 0);
+
+ /* firmware initialization delay */
+ msleep(60);
+
+ /* make sure device is accessible */
+ ret = regmap_read(retimer->regmap, REG_USB_PORT_CONN_STATUS_0,
+ &val);
+ if (ret) {
+ dev_err(dev, "failed to read conn_status_0: %d\n", ret);
+ if (ret == -ENXIO)
+ ret = -EIO;
+ goto err_clk_disable;
+ }
+ }
+
+ sw_desc.drvdata = retimer;
+ sw_desc.fwnode = dev_fwnode(dev);
+ sw_desc.set = ps883x_sw_set;
+
+ retimer->sw = typec_switch_register(dev, &sw_desc);
+ if (IS_ERR(retimer->sw)) {
+ ret = PTR_ERR(retimer->sw);
+ dev_err(dev, "failed to register typec switch: %d\n", ret);
+ goto err_clk_disable;
+ }
+
+ rtmr_desc.drvdata = retimer;
+ rtmr_desc.fwnode = dev_fwnode(dev);
+ rtmr_desc.set = ps883x_retimer_set;
+
+ retimer->retimer = typec_retimer_register(dev, &rtmr_desc);
+ if (IS_ERR(retimer->retimer)) {
+ ret = PTR_ERR(retimer->retimer);
+ dev_err(dev, "failed to register typec retimer: %d\n", ret);
+ goto err_switch_unregister;
+ }
+
+ return 0;
+
+err_switch_unregister:
+ typec_switch_unregister(retimer->sw);
+err_clk_disable:
+ clk_disable_unprepare(retimer->xo_clk);
+err_vregs_disable:
+ gpiod_set_value(retimer->reset_gpio, 1);
+ ps883x_disable_vregs(retimer);
+err_mux_put:
+ typec_mux_put(retimer->typec_mux);
+err_switch_put:
+ typec_switch_put(retimer->typec_switch);
+
+ return ret;
+}
+
+static void ps883x_retimer_remove(struct i2c_client *client)
+{
+ struct ps883x_retimer *retimer = i2c_get_clientdata(client);
+
+ typec_retimer_unregister(retimer->retimer);
+ typec_switch_unregister(retimer->sw);
+
+ gpiod_set_value(retimer->reset_gpio, 1);
+
+ clk_disable_unprepare(retimer->xo_clk);
+
+ ps883x_disable_vregs(retimer);
+
+ typec_mux_put(retimer->typec_mux);
+ typec_switch_put(retimer->typec_switch);
+}
+
+static const struct of_device_id ps883x_retimer_of_table[] = {
+ { .compatible = "parade,ps8830" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ps883x_retimer_of_table);
+
+static struct i2c_driver ps883x_retimer_driver = {
+ .driver = {
+ .name = "ps883x_retimer",
+ .of_match_table = ps883x_retimer_of_table,
+ },
+ .probe = ps883x_retimer_probe,
+ .remove = ps883x_retimer_remove,
+};
+
+module_i2c_driver(ps883x_retimer_driver);
+
+MODULE_DESCRIPTION("Parade ps883x Type-C Retimer driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/typec/ucsi/cros_ec_ucsi.c b/drivers/usb/typec/ucsi/cros_ec_ucsi.c
index c605c8616726..4ec1c6d22310 100644
--- a/drivers/usb/typec/ucsi/cros_ec_ucsi.c
+++ b/drivers/usb/typec/ucsi/cros_ec_ucsi.c
@@ -105,12 +105,13 @@ static int cros_ucsi_async_control(struct ucsi *ucsi, u64 cmd)
return 0;
}
-static int cros_ucsi_sync_control(struct ucsi *ucsi, u64 cmd)
+static int cros_ucsi_sync_control(struct ucsi *ucsi, u64 cmd, u32 *cci,
+ void *data, size_t size)
{
struct cros_ucsi_data *udata = ucsi_get_drvdata(ucsi);
int ret;
- ret = ucsi_sync_control_common(ucsi, cmd);
+ ret = ucsi_sync_control_common(ucsi, cmd, cci, data, size);
switch (ret) {
case -EBUSY:
/* EC may return -EBUSY if CCI.busy is set.
@@ -205,12 +206,19 @@ static int cros_ucsi_event(struct notifier_block *nb,
{
struct cros_ucsi_data *udata = container_of(nb, struct cros_ucsi_data, nb);
- if (!(host_event & PD_EVENT_PPM))
- return NOTIFY_OK;
+ if (host_event & PD_EVENT_INIT) {
+ /* Late init event received from ChromeOS EC. Treat this as a
+ * system resume to re-enable communication with the PPM.
+ */
+ dev_dbg(udata->dev, "Late PD init received\n");
+ ucsi_resume(udata->ucsi);
+ }
- dev_dbg(udata->dev, "UCSI notification received\n");
- flush_work(&udata->work);
- schedule_work(&udata->work);
+ if (host_event & PD_EVENT_PPM) {
+ dev_dbg(udata->dev, "UCSI notification received\n");
+ flush_work(&udata->work);
+ schedule_work(&udata->work);
+ }
return NOTIFY_OK;
}
diff --git a/drivers/usb/typec/ucsi/debugfs.c b/drivers/usb/typec/ucsi/debugfs.c
index 83ff23086d79..eae2b18a2d8a 100644
--- a/drivers/usb/typec/ucsi/debugfs.c
+++ b/drivers/usb/typec/ucsi/debugfs.c
@@ -28,11 +28,12 @@ static int ucsi_cmd(void *data, u64 val)
ucsi->debugfs->status = 0;
switch (UCSI_COMMAND(val)) {
- case UCSI_SET_UOM:
+ case UCSI_SET_CCOM:
case UCSI_SET_UOR:
case UCSI_SET_PDR:
case UCSI_CONNECTOR_RESET:
case UCSI_SET_SINK_PATH:
+ case UCSI_SET_NEW_CAM:
ret = ucsi_send_command(ucsi, val, NULL, 0);
break;
case UCSI_GET_CAPABILITY:
@@ -42,6 +43,9 @@ static int ucsi_cmd(void *data, u64 val)
case UCSI_GET_PDOS:
case UCSI_GET_CABLE_PROPERTY:
case UCSI_GET_CONNECTOR_STATUS:
+ case UCSI_GET_ERROR_STATUS:
+ case UCSI_GET_CAM_CS:
+ case UCSI_GET_LPM_PPM_INFO:
ret = ucsi_send_command(ucsi, val,
&ucsi->debugfs->response,
sizeof(ucsi->debugfs->response));
diff --git a/drivers/usb/typec/ucsi/trace.c b/drivers/usb/typec/ucsi/trace.c
index cb62ad835761..596a9542d401 100644
--- a/drivers/usb/typec/ucsi/trace.c
+++ b/drivers/usb/typec/ucsi/trace.c
@@ -12,7 +12,7 @@ static const char * const ucsi_cmd_strs[] = {
[UCSI_SET_NOTIFICATION_ENABLE] = "SET_NOTIFICATION_ENABLE",
[UCSI_GET_CAPABILITY] = "GET_CAPABILITY",
[UCSI_GET_CONNECTOR_CAPABILITY] = "GET_CONNECTOR_CAPABILITY",
- [UCSI_SET_UOM] = "SET_UOM",
+ [UCSI_SET_CCOM] = "SET_CCOM",
[UCSI_SET_UOR] = "SET_UOR",
[UCSI_SET_PDM] = "SET_PDM",
[UCSI_SET_PDR] = "SET_PDR",
diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c
index 2a2915b0a645..e8c7e9dc4930 100644
--- a/drivers/usb/typec/ucsi/ucsi.c
+++ b/drivers/usb/typec/ucsi/ucsi.c
@@ -55,7 +55,8 @@ void ucsi_notify_common(struct ucsi *ucsi, u32 cci)
}
EXPORT_SYMBOL_GPL(ucsi_notify_common);
-int ucsi_sync_control_common(struct ucsi *ucsi, u64 command)
+int ucsi_sync_control_common(struct ucsi *ucsi, u64 command, u32 *cci,
+ void *data, size_t size)
{
bool ack = UCSI_COMMAND(command) == UCSI_ACK_CC_CI;
int ret;
@@ -80,6 +81,13 @@ out_clear_bit:
else
clear_bit(COMMAND_PENDING, &ucsi->flags);
+ if (!ret && cci)
+ ret = ucsi->ops->read_cci(ucsi, cci);
+
+ if (!ret && data &&
+ (*cci & UCSI_CCI_COMMAND_COMPLETE))
+ ret = ucsi->ops->read_message_in(ucsi, data, size);
+
return ret;
}
EXPORT_SYMBOL_GPL(ucsi_sync_control_common);
@@ -95,7 +103,7 @@ static int ucsi_acknowledge(struct ucsi *ucsi, bool conn_ack)
ctrl |= UCSI_ACK_CONNECTOR_CHANGE;
}
- return ucsi->ops->sync_control(ucsi, ctrl);
+ return ucsi->ops->sync_control(ucsi, ctrl, NULL, NULL, 0);
}
static int ucsi_run_command(struct ucsi *ucsi, u64 command, u32 *cci,
@@ -108,9 +116,7 @@ static int ucsi_run_command(struct ucsi *ucsi, u64 command, u32 *cci,
if (size > UCSI_MAX_DATA_LENGTH(ucsi))
return -EINVAL;
- ret = ucsi->ops->sync_control(ucsi, command);
- if (ucsi->ops->read_cci(ucsi, cci))
- return -EIO;
+ ret = ucsi->ops->sync_control(ucsi, command, cci, data, size);
if (*cci & UCSI_CCI_BUSY)
return ucsi_run_command(ucsi, UCSI_CANCEL, cci, NULL, 0, false) ?: -EBUSY;
@@ -127,9 +133,6 @@ static int ucsi_run_command(struct ucsi *ucsi, u64 command, u32 *cci,
else
err = 0;
- if (!err && data && UCSI_CCI_LENGTH(*cci))
- err = ucsi->ops->read_message_in(ucsi, data, size);
-
/*
* Don't ACK connection change if there was an error.
*/
diff --git a/drivers/usb/typec/ucsi/ucsi.h b/drivers/usb/typec/ucsi/ucsi.h
index 28780acc4af2..3a2c1762bec1 100644
--- a/drivers/usb/typec/ucsi/ucsi.h
+++ b/drivers/usb/typec/ucsi/ucsi.h
@@ -79,7 +79,8 @@ struct ucsi_operations {
int (*read_cci)(struct ucsi *ucsi, u32 *cci);
int (*poll_cci)(struct ucsi *ucsi, u32 *cci);
int (*read_message_in)(struct ucsi *ucsi, void *val, size_t val_len);
- int (*sync_control)(struct ucsi *ucsi, u64 command);
+ int (*sync_control)(struct ucsi *ucsi, u64 command, u32 *cci,
+ void *data, size_t size);
int (*async_control)(struct ucsi *ucsi, u64 command);
bool (*update_altmodes)(struct ucsi *ucsi, struct ucsi_altmode *orig,
struct ucsi_altmode *updated);
@@ -108,7 +109,7 @@ void ucsi_connector_change(struct ucsi *ucsi, u8 num);
#define UCSI_GET_CAPABILITY_SIZE 128
#define UCSI_GET_CONNECTOR_CAPABILITY 0x07
#define UCSI_GET_CONNECTOR_CAPABILITY_SIZE 32
-#define UCSI_SET_UOM 0x08
+#define UCSI_SET_CCOM 0x08
#define UCSI_SET_UOR 0x09
#define UCSI_SET_PDM 0x0a
#define UCSI_SET_PDR 0x0b
@@ -123,7 +124,9 @@ void ucsi_connector_change(struct ucsi *ucsi, u8 num);
#define UCSI_GET_CONNECTOR_STATUS_SIZE 152
#define UCSI_GET_ERROR_STATUS 0x13
#define UCSI_GET_PD_MESSAGE 0x15
+#define UCSI_GET_CAM_CS 0x18
#define UCSI_SET_SINK_PATH 0x1c
+#define UCSI_GET_LPM_PPM_INFO 0x22
#define UCSI_CONNECTOR_NUMBER(_num_) ((u64)(_num_) << 16)
#define UCSI_COMMAND(_cmd_) ((_cmd_) & 0xff)
@@ -531,7 +534,8 @@ void ucsi_altmode_update_active(struct ucsi_connector *con);
int ucsi_resume(struct ucsi *ucsi);
void ucsi_notify_common(struct ucsi *ucsi, u32 cci);
-int ucsi_sync_control_common(struct ucsi *ucsi, u64 command);
+int ucsi_sync_control_common(struct ucsi *ucsi, u64 command, u32 *cci,
+ void *data, size_t size);
#if IS_ENABLED(CONFIG_POWER_SUPPLY)
int ucsi_register_port_psy(struct ucsi_connector *con);
diff --git a/drivers/usb/typec/ucsi/ucsi_acpi.c b/drivers/usb/typec/ucsi/ucsi_acpi.c
index ac1ebb5d9527..6b92f296e985 100644
--- a/drivers/usb/typec/ucsi/ucsi_acpi.c
+++ b/drivers/usb/typec/ucsi/ucsi_acpi.c
@@ -105,17 +105,23 @@ static const struct ucsi_operations ucsi_acpi_ops = {
.async_control = ucsi_acpi_async_control
};
-static int ucsi_gram_read_message_in(struct ucsi *ucsi, void *val, size_t val_len)
+static int ucsi_gram_sync_control(struct ucsi *ucsi, u64 command, u32 *cci,
+ void *val, size_t len)
{
u16 bogus_change = UCSI_CONSTAT_POWER_LEVEL_CHANGE |
UCSI_CONSTAT_PDOS_CHANGE;
struct ucsi_acpi *ua = ucsi_get_drvdata(ucsi);
int ret;
- ret = ucsi_acpi_read_message_in(ucsi, val, val_len);
+ ret = ucsi_sync_control_common(ucsi, command, cci, val, len);
if (ret < 0)
return ret;
+ if (UCSI_COMMAND(ua->cmd) == UCSI_GET_PDOS &&
+ ua->cmd & UCSI_GET_PDOS_PARTNER_PDO(1) &&
+ ua->cmd & UCSI_GET_PDOS_SRC_PDOS)
+ ua->check_bogus_event = true;
+
if (UCSI_COMMAND(ua->cmd) == UCSI_GET_CONNECTOR_STATUS &&
ua->check_bogus_event) {
/* Clear the bogus change */
@@ -128,28 +134,11 @@ static int ucsi_gram_read_message_in(struct ucsi *ucsi, void *val, size_t val_le
return ret;
}
-static int ucsi_gram_sync_control(struct ucsi *ucsi, u64 command)
-{
- struct ucsi_acpi *ua = ucsi_get_drvdata(ucsi);
- int ret;
-
- ret = ucsi_sync_control_common(ucsi, command);
- if (ret < 0)
- return ret;
-
- if (UCSI_COMMAND(ua->cmd) == UCSI_GET_PDOS &&
- ua->cmd & UCSI_GET_PDOS_PARTNER_PDO(1) &&
- ua->cmd & UCSI_GET_PDOS_SRC_PDOS)
- ua->check_bogus_event = true;
-
- return ret;
-}
-
static const struct ucsi_operations ucsi_gram_ops = {
.read_version = ucsi_acpi_read_version,
.read_cci = ucsi_acpi_read_cci,
.poll_cci = ucsi_acpi_poll_cci,
- .read_message_in = ucsi_gram_read_message_in,
+ .read_message_in = ucsi_acpi_read_message_in,
.sync_control = ucsi_gram_sync_control,
.async_control = ucsi_acpi_async_control
};
diff --git a/drivers/usb/typec/ucsi/ucsi_ccg.c b/drivers/usb/typec/ucsi/ucsi_ccg.c
index 4b1668733a4b..f01e4ef6619d 100644
--- a/drivers/usb/typec/ucsi/ucsi_ccg.c
+++ b/drivers/usb/typec/ucsi/ucsi_ccg.c
@@ -222,7 +222,6 @@ struct ucsi_ccg {
u16 fw_build;
struct work_struct pm_work;
- u64 last_cmd_sent;
bool has_multiple_dp;
struct ucsi_ccg_altmode orig[UCSI_MAX_ALTMODES];
struct ucsi_ccg_altmode updated[UCSI_MAX_ALTMODES];
@@ -538,9 +537,10 @@ static void ucsi_ccg_update_set_new_cam_cmd(struct ucsi_ccg *uc,
* first and then vdo=0x3
*/
static void ucsi_ccg_nvidia_altmode(struct ucsi_ccg *uc,
- struct ucsi_altmode *alt)
+ struct ucsi_altmode *alt,
+ u64 command)
{
- switch (UCSI_ALTMODE_OFFSET(uc->last_cmd_sent)) {
+ switch (UCSI_ALTMODE_OFFSET(command)) {
case NVIDIA_FTB_DP_OFFSET:
if (alt[0].mid == USB_TYPEC_NVIDIA_VLINK_DBG_VDO)
alt[0].mid = USB_TYPEC_NVIDIA_VLINK_DP_VDO |
@@ -578,37 +578,11 @@ static int ucsi_ccg_read_cci(struct ucsi *ucsi, u32 *cci)
static int ucsi_ccg_read_message_in(struct ucsi *ucsi, void *val, size_t val_len)
{
struct ucsi_ccg *uc = ucsi_get_drvdata(ucsi);
- struct ucsi_capability *cap;
- struct ucsi_altmode *alt;
spin_lock(&uc->op_lock);
memcpy(val, uc->op_data.message_in, val_len);
spin_unlock(&uc->op_lock);
- switch (UCSI_COMMAND(uc->last_cmd_sent)) {
- case UCSI_GET_CURRENT_CAM:
- if (uc->has_multiple_dp)
- ucsi_ccg_update_get_current_cam_cmd(uc, (u8 *)val);
- break;
- case UCSI_GET_ALTERNATE_MODES:
- if (UCSI_ALTMODE_RECIPIENT(uc->last_cmd_sent) ==
- UCSI_RECIPIENT_SOP) {
- alt = val;
- if (alt[0].svid == USB_TYPEC_NVIDIA_VLINK_SID)
- ucsi_ccg_nvidia_altmode(uc, alt);
- }
- break;
- case UCSI_GET_CAPABILITY:
- if (uc->fw_build == CCG_FW_BUILD_NVIDIA_TEGRA) {
- cap = val;
- cap->features &= ~UCSI_CAP_ALT_MODE_DETAILS;
- }
- break;
- default:
- break;
- }
- uc->last_cmd_sent = 0;
-
return 0;
}
@@ -628,7 +602,8 @@ static int ucsi_ccg_async_control(struct ucsi *ucsi, u64 command)
return ccg_write(uc, reg, (u8 *)&command, sizeof(command));
}
-static int ucsi_ccg_sync_control(struct ucsi *ucsi, u64 command)
+static int ucsi_ccg_sync_control(struct ucsi *ucsi, u64 command, u32 *cci,
+ void *data, size_t size)
{
struct ucsi_ccg *uc = ucsi_get_drvdata(ucsi);
struct ucsi_connector *con;
@@ -638,11 +613,9 @@ static int ucsi_ccg_sync_control(struct ucsi *ucsi, u64 command)
mutex_lock(&uc->lock);
pm_runtime_get_sync(uc->dev);
- uc->last_cmd_sent = command;
-
- if (UCSI_COMMAND(uc->last_cmd_sent) == UCSI_SET_NEW_CAM &&
+ if (UCSI_COMMAND(command) == UCSI_SET_NEW_CAM &&
uc->has_multiple_dp) {
- con_index = (uc->last_cmd_sent >> 16) &
+ con_index = (command >> 16) &
UCSI_CMD_CONNECTOR_MASK;
if (con_index == 0) {
ret = -EINVAL;
@@ -652,7 +625,31 @@ static int ucsi_ccg_sync_control(struct ucsi *ucsi, u64 command)
ucsi_ccg_update_set_new_cam_cmd(uc, con, &command);
}
- ret = ucsi_sync_control_common(ucsi, command);
+ ret = ucsi_sync_control_common(ucsi, command, cci, data, size);
+
+ switch (UCSI_COMMAND(command)) {
+ case UCSI_GET_CURRENT_CAM:
+ if (uc->has_multiple_dp)
+ ucsi_ccg_update_get_current_cam_cmd(uc, (u8 *)data);
+ break;
+ case UCSI_GET_ALTERNATE_MODES:
+ if (UCSI_ALTMODE_RECIPIENT(command) == UCSI_RECIPIENT_SOP) {
+ struct ucsi_altmode *alt = data;
+
+ if (alt[0].svid == USB_TYPEC_NVIDIA_VLINK_SID)
+ ucsi_ccg_nvidia_altmode(uc, alt, command);
+ }
+ break;
+ case UCSI_GET_CAPABILITY:
+ if (uc->fw_build == CCG_FW_BUILD_NVIDIA_TEGRA) {
+ struct ucsi_capability *cap = data;
+
+ cap->features &= ~UCSI_CAP_ALT_MODE_DETAILS;
+ }
+ break;
+ default:
+ break;
+ }
err_put:
pm_runtime_put_sync(uc->dev);
@@ -1391,22 +1388,35 @@ static ssize_t do_flash_store(struct device *dev,
if (!flash)
return n;
- if (uc->fw_build == 0x0) {
- dev_err(dev, "fail to flash FW due to missing FW build info\n");
- return -EINVAL;
- }
-
schedule_work(&uc->work);
return n;
}
+static umode_t ucsi_ccg_attrs_is_visible(struct kobject *kobj, struct attribute *attr, int idx)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct ucsi_ccg *uc = i2c_get_clientdata(to_i2c_client(dev));
+
+ if (!uc->fw_build)
+ return 0;
+
+ return attr->mode;
+}
+
static DEVICE_ATTR_WO(do_flash);
static struct attribute *ucsi_ccg_attrs[] = {
&dev_attr_do_flash.attr,
NULL,
};
-ATTRIBUTE_GROUPS(ucsi_ccg);
+static struct attribute_group ucsi_ccg_attr_group = {
+ .attrs = ucsi_ccg_attrs,
+ .is_visible = ucsi_ccg_attrs_is_visible,
+};
+static const struct attribute_group *ucsi_ccg_groups[] = {
+ &ucsi_ccg_attr_group,
+ NULL,
+};
static int ucsi_ccg_probe(struct i2c_client *client)
{
@@ -1433,11 +1443,10 @@ static int ucsi_ccg_probe(struct i2c_client *client)
uc->fw_build = CCG_FW_BUILD_NVIDIA_TEGRA;
else if (!strcmp(fw_name, "nvidia,gpu"))
uc->fw_build = CCG_FW_BUILD_NVIDIA;
+ if (!uc->fw_build)
+ dev_err(uc->dev, "failed to get FW build information\n");
}
- if (!uc->fw_build)
- dev_err(uc->dev, "failed to get FW build information\n");
-
/* reset ccg device and initialize ucsi */
status = ucsi_ccg_init(uc);
if (status < 0) {
diff --git a/drivers/vdpa/mlx5/core/mr.c b/drivers/vdpa/mlx5/core/mr.c
index 8455f08f5d40..61424342c096 100644
--- a/drivers/vdpa/mlx5/core/mr.c
+++ b/drivers/vdpa/mlx5/core/mr.c
@@ -190,9 +190,12 @@ again:
klm->bcount = cpu_to_be32(klm_bcount(dmr->end - dmr->start));
preve = dmr->end;
} else {
+ u64 bcount = min_t(u64, dmr->start - preve, MAX_KLM_SIZE);
+
klm->key = cpu_to_be32(mvdev->res.null_mkey);
- klm->bcount = cpu_to_be32(klm_bcount(dmr->start - preve));
- preve = dmr->start;
+ klm->bcount = cpu_to_be32(klm_bcount(bcount));
+ preve += bcount;
+
goto again;
}
}
diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c
index 36099047560d..cccc49a08a1a 100644
--- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
+++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
@@ -3884,6 +3884,9 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name,
ndev->mvdev.max_vqs = max_vqs;
mvdev = &ndev->mvdev;
mvdev->mdev = mdev;
+ /* cpu_to_mlx5vdpa16() below depends on this flag */
+ mvdev->actual_features =
+ (device_features & BIT_ULL(VIRTIO_F_VERSION_1));
ndev->vqs = kcalloc(max_vqs, sizeof(*ndev->vqs), GFP_KERNEL);
ndev->event_cbs = kcalloc(max_vqs + 1, sizeof(*ndev->event_cbs), GFP_KERNEL);
diff --git a/drivers/vdpa/vdpa_user/vduse_dev.c b/drivers/vdpa/vdpa_user/vduse_dev.c
index 7ae99691efdf..6a9a37351310 100644
--- a/drivers/vdpa/vdpa_user/vduse_dev.c
+++ b/drivers/vdpa/vdpa_user/vduse_dev.c
@@ -144,6 +144,7 @@ static struct workqueue_struct *vduse_irq_bound_wq;
static u32 allowed_device_id[] = {
VIRTIO_ID_BLOCK,
VIRTIO_ID_NET,
+ VIRTIO_ID_FS,
};
static inline struct vduse_dev *vdpa_to_vduse(struct vdpa_device *vdpa)
diff --git a/drivers/vfio/device_cdev.c b/drivers/vfio/device_cdev.c
index bb1817bd4ff3..281a8dc3ed49 100644
--- a/drivers/vfio/device_cdev.c
+++ b/drivers/vfio/device_cdev.c
@@ -162,9 +162,9 @@ void vfio_df_unbind_iommufd(struct vfio_device_file *df)
int vfio_df_ioctl_attach_pt(struct vfio_device_file *df,
struct vfio_device_attach_iommufd_pt __user *arg)
{
- struct vfio_device *device = df->device;
struct vfio_device_attach_iommufd_pt attach;
- unsigned long minsz;
+ struct vfio_device *device = df->device;
+ unsigned long minsz, xend = 0;
int ret;
minsz = offsetofend(struct vfio_device_attach_iommufd_pt, pt_id);
@@ -172,11 +172,34 @@ int vfio_df_ioctl_attach_pt(struct vfio_device_file *df,
if (copy_from_user(&attach, arg, minsz))
return -EFAULT;
- if (attach.argsz < minsz || attach.flags)
+ if (attach.argsz < minsz)
return -EINVAL;
+ if (attach.flags & ~VFIO_DEVICE_ATTACH_PASID)
+ return -EINVAL;
+
+ if (attach.flags & VFIO_DEVICE_ATTACH_PASID) {
+ if (!device->ops->pasid_attach_ioas)
+ return -EOPNOTSUPP;
+ xend = offsetofend(struct vfio_device_attach_iommufd_pt, pasid);
+ }
+
+ if (xend) {
+ if (attach.argsz < xend)
+ return -EINVAL;
+
+ if (copy_from_user((void *)&attach + minsz,
+ (void __user *)arg + minsz, xend - minsz))
+ return -EFAULT;
+ }
+
mutex_lock(&device->dev_set->lock);
- ret = device->ops->attach_ioas(device, &attach.pt_id);
+ if (attach.flags & VFIO_DEVICE_ATTACH_PASID)
+ ret = device->ops->pasid_attach_ioas(device,
+ attach.pasid,
+ &attach.pt_id);
+ else
+ ret = device->ops->attach_ioas(device, &attach.pt_id);
if (ret)
goto out_unlock;
@@ -198,20 +221,41 @@ out_unlock:
int vfio_df_ioctl_detach_pt(struct vfio_device_file *df,
struct vfio_device_detach_iommufd_pt __user *arg)
{
- struct vfio_device *device = df->device;
struct vfio_device_detach_iommufd_pt detach;
- unsigned long minsz;
+ struct vfio_device *device = df->device;
+ unsigned long minsz, xend = 0;
minsz = offsetofend(struct vfio_device_detach_iommufd_pt, flags);
if (copy_from_user(&detach, arg, minsz))
return -EFAULT;
- if (detach.argsz < minsz || detach.flags)
+ if (detach.argsz < minsz)
return -EINVAL;
+ if (detach.flags & ~VFIO_DEVICE_DETACH_PASID)
+ return -EINVAL;
+
+ if (detach.flags & VFIO_DEVICE_DETACH_PASID) {
+ if (!device->ops->pasid_detach_ioas)
+ return -EOPNOTSUPP;
+ xend = offsetofend(struct vfio_device_detach_iommufd_pt, pasid);
+ }
+
+ if (xend) {
+ if (detach.argsz < xend)
+ return -EINVAL;
+
+ if (copy_from_user((void *)&detach + minsz,
+ (void __user *)arg + minsz, xend - minsz))
+ return -EFAULT;
+ }
+
mutex_lock(&device->dev_set->lock);
- device->ops->detach_ioas(device);
+ if (detach.flags & VFIO_DEVICE_DETACH_PASID)
+ device->ops->pasid_detach_ioas(device, detach.pasid);
+ else
+ device->ops->detach_ioas(device);
mutex_unlock(&device->dev_set->lock);
return 0;
diff --git a/drivers/vfio/iommufd.c b/drivers/vfio/iommufd.c
index 516294fd901b..c8c3a2d53f86 100644
--- a/drivers/vfio/iommufd.c
+++ b/drivers/vfio/iommufd.c
@@ -119,16 +119,24 @@ int vfio_iommufd_physical_bind(struct vfio_device *vdev,
if (IS_ERR(idev))
return PTR_ERR(idev);
vdev->iommufd_device = idev;
+ ida_init(&vdev->pasids);
return 0;
}
EXPORT_SYMBOL_GPL(vfio_iommufd_physical_bind);
void vfio_iommufd_physical_unbind(struct vfio_device *vdev)
{
+ int pasid;
+
lockdep_assert_held(&vdev->dev_set->lock);
+ while ((pasid = ida_find_first(&vdev->pasids)) >= 0) {
+ iommufd_device_detach(vdev->iommufd_device, pasid);
+ ida_free(&vdev->pasids, pasid);
+ }
+
if (vdev->iommufd_attached) {
- iommufd_device_detach(vdev->iommufd_device);
+ iommufd_device_detach(vdev->iommufd_device, IOMMU_NO_PASID);
vdev->iommufd_attached = false;
}
iommufd_device_unbind(vdev->iommufd_device);
@@ -146,9 +154,11 @@ int vfio_iommufd_physical_attach_ioas(struct vfio_device *vdev, u32 *pt_id)
return -EINVAL;
if (vdev->iommufd_attached)
- rc = iommufd_device_replace(vdev->iommufd_device, pt_id);
+ rc = iommufd_device_replace(vdev->iommufd_device,
+ IOMMU_NO_PASID, pt_id);
else
- rc = iommufd_device_attach(vdev->iommufd_device, pt_id);
+ rc = iommufd_device_attach(vdev->iommufd_device,
+ IOMMU_NO_PASID, pt_id);
if (rc)
return rc;
vdev->iommufd_attached = true;
@@ -163,11 +173,53 @@ void vfio_iommufd_physical_detach_ioas(struct vfio_device *vdev)
if (WARN_ON(!vdev->iommufd_device) || !vdev->iommufd_attached)
return;
- iommufd_device_detach(vdev->iommufd_device);
+ iommufd_device_detach(vdev->iommufd_device, IOMMU_NO_PASID);
vdev->iommufd_attached = false;
}
EXPORT_SYMBOL_GPL(vfio_iommufd_physical_detach_ioas);
+int vfio_iommufd_physical_pasid_attach_ioas(struct vfio_device *vdev,
+ u32 pasid, u32 *pt_id)
+{
+ int rc;
+
+ lockdep_assert_held(&vdev->dev_set->lock);
+
+ if (WARN_ON(!vdev->iommufd_device))
+ return -EINVAL;
+
+ if (ida_exists(&vdev->pasids, pasid))
+ return iommufd_device_replace(vdev->iommufd_device,
+ pasid, pt_id);
+
+ rc = ida_alloc_range(&vdev->pasids, pasid, pasid, GFP_KERNEL);
+ if (rc < 0)
+ return rc;
+
+ rc = iommufd_device_attach(vdev->iommufd_device, pasid, pt_id);
+ if (rc)
+ ida_free(&vdev->pasids, pasid);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(vfio_iommufd_physical_pasid_attach_ioas);
+
+void vfio_iommufd_physical_pasid_detach_ioas(struct vfio_device *vdev,
+ u32 pasid)
+{
+ lockdep_assert_held(&vdev->dev_set->lock);
+
+ if (WARN_ON(!vdev->iommufd_device))
+ return;
+
+ if (!ida_exists(&vdev->pasids, pasid))
+ return;
+
+ iommufd_device_detach(vdev->iommufd_device, pasid);
+ ida_free(&vdev->pasids, pasid);
+}
+EXPORT_SYMBOL_GPL(vfio_iommufd_physical_pasid_detach_ioas);
+
/*
* The emulated standard ops mean that vfio_device is going to use the
* "mdev path" and will call vfio_pin_pages()/vfio_dma_rw(). Drivers using this
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
index e727941f589d..5ba39f7623bb 100644
--- a/drivers/vfio/pci/vfio_pci.c
+++ b/drivers/vfio/pci/vfio_pci.c
@@ -111,9 +111,7 @@ static int vfio_pci_open_device(struct vfio_device *core_vdev)
if (ret)
return ret;
- if (vfio_pci_is_vga(pdev) &&
- pdev->vendor == PCI_VENDOR_ID_INTEL &&
- IS_ENABLED(CONFIG_VFIO_PCI_IGD)) {
+ if (vfio_pci_is_intel_display(pdev)) {
ret = vfio_pci_igd_init(vdev);
if (ret && ret != -ENODEV) {
pci_warn(pdev, "Failed to setup Intel IGD regions\n");
@@ -144,6 +142,8 @@ static const struct vfio_device_ops vfio_pci_ops = {
.unbind_iommufd = vfio_iommufd_physical_unbind,
.attach_ioas = vfio_iommufd_physical_attach_ioas,
.detach_ioas = vfio_iommufd_physical_detach_ioas,
+ .pasid_attach_ioas = vfio_iommufd_physical_pasid_attach_ioas,
+ .pasid_detach_ioas = vfio_iommufd_physical_pasid_detach_ioas,
};
static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c
index 94142581c98c..14437396d721 100644
--- a/drivers/vfio/pci/vfio_pci_config.c
+++ b/drivers/vfio/pci/vfio_pci_config.c
@@ -1814,7 +1814,8 @@ int vfio_config_init(struct vfio_pci_core_device *vdev)
cpu_to_le16(PCI_COMMAND_MEMORY);
}
- if (!IS_ENABLED(CONFIG_VFIO_PCI_INTX) || vdev->nointx)
+ if (!IS_ENABLED(CONFIG_VFIO_PCI_INTX) || vdev->nointx ||
+ vdev->pdev->irq == IRQ_NOTCONNECTED)
vconfig[PCI_INTERRUPT_PIN] = 0;
ret = vfio_cap_init(vdev);
diff --git a/drivers/vfio/pci/vfio_pci_core.c b/drivers/vfio/pci/vfio_pci_core.c
index c8586d47704c..35f9046af315 100644
--- a/drivers/vfio/pci/vfio_pci_core.c
+++ b/drivers/vfio/pci/vfio_pci_core.c
@@ -727,15 +727,7 @@ EXPORT_SYMBOL_GPL(vfio_pci_core_finish_enable);
static int vfio_pci_get_irq_count(struct vfio_pci_core_device *vdev, int irq_type)
{
if (irq_type == VFIO_PCI_INTX_IRQ_INDEX) {
- u8 pin;
-
- if (!IS_ENABLED(CONFIG_VFIO_PCI_INTX) ||
- vdev->nointx || vdev->pdev->is_virtfn)
- return 0;
-
- pci_read_config_byte(vdev->pdev, PCI_INTERRUPT_PIN, &pin);
-
- return pin ? 1 : 0;
+ return vdev->vconfig[PCI_INTERRUPT_PIN] ? 1 : 0;
} else if (irq_type == VFIO_PCI_MSI_IRQ_INDEX) {
u8 pos;
u16 flags;
diff --git a/drivers/vfio/pci/vfio_pci_igd.c b/drivers/vfio/pci/vfio_pci_igd.c
index dd70e2431bd7..ef490a4545f4 100644
--- a/drivers/vfio/pci/vfio_pci_igd.c
+++ b/drivers/vfio/pci/vfio_pci_igd.c
@@ -435,6 +435,12 @@ static int vfio_pci_igd_cfg_init(struct vfio_pci_core_device *vdev)
return 0;
}
+bool vfio_pci_is_intel_display(struct pci_dev *pdev)
+{
+ return (pdev->vendor == PCI_VENDOR_ID_INTEL) &&
+ ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY);
+}
+
int vfio_pci_igd_init(struct vfio_pci_core_device *vdev)
{
int ret;
diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c
index 8382c5834335..565966351dfa 100644
--- a/drivers/vfio/pci/vfio_pci_intrs.c
+++ b/drivers/vfio/pci/vfio_pci_intrs.c
@@ -259,7 +259,7 @@ static int vfio_intx_enable(struct vfio_pci_core_device *vdev,
if (!is_irq_none(vdev))
return -EINVAL;
- if (!pdev->irq)
+ if (!pdev->irq || pdev->irq == IRQ_NOTCONNECTED)
return -ENODEV;
name = kasprintf(GFP_KERNEL_ACCOUNT, "vfio-intx(%s)", pci_name(pdev));
diff --git a/drivers/vfio/pci/vfio_pci_priv.h b/drivers/vfio/pci/vfio_pci_priv.h
index 5e4fa69aee16..a9972eacb293 100644
--- a/drivers/vfio/pci/vfio_pci_priv.h
+++ b/drivers/vfio/pci/vfio_pci_priv.h
@@ -67,8 +67,14 @@ void vfio_pci_memory_unlock_and_restore(struct vfio_pci_core_device *vdev,
u16 cmd);
#ifdef CONFIG_VFIO_PCI_IGD
+bool vfio_pci_is_intel_display(struct pci_dev *pdev);
int vfio_pci_igd_init(struct vfio_pci_core_device *vdev);
#else
+static inline bool vfio_pci_is_intel_display(struct pci_dev *pdev)
+{
+ return false;
+}
+
static inline int vfio_pci_igd_init(struct vfio_pci_core_device *vdev)
{
return -ENODEV;
diff --git a/drivers/vfio/pci/virtio/Kconfig b/drivers/vfio/pci/virtio/Kconfig
index 2770f7eb702c..33e04e65bec6 100644
--- a/drivers/vfio/pci/virtio/Kconfig
+++ b/drivers/vfio/pci/virtio/Kconfig
@@ -1,11 +1,11 @@
# SPDX-License-Identifier: GPL-2.0-only
config VIRTIO_VFIO_PCI
- tristate "VFIO support for VIRTIO NET PCI VF devices"
+ tristate "VFIO support for VIRTIO PCI VF devices"
depends on VIRTIO_PCI
select VFIO_PCI_CORE
help
- This provides migration support for VIRTIO NET PCI VF devices
- using the VFIO framework. Migration support requires the
+ This provides migration support for VIRTIO NET and BLOCK PCI VF
+ devices using the VFIO framework. Migration support requires the
SR-IOV PF device to support specific VIRTIO extensions,
otherwise this driver provides no additional functionality
beyond vfio-pci.
diff --git a/drivers/vfio/pci/virtio/legacy_io.c b/drivers/vfio/pci/virtio/legacy_io.c
index 20382ee15fac..832af5ba267c 100644
--- a/drivers/vfio/pci/virtio/legacy_io.c
+++ b/drivers/vfio/pci/virtio/legacy_io.c
@@ -382,7 +382,9 @@ static bool virtiovf_bar0_exists(struct pci_dev *pdev)
bool virtiovf_support_legacy_io(struct pci_dev *pdev)
{
- return virtio_pci_admin_has_legacy_io(pdev) && !virtiovf_bar0_exists(pdev);
+ /* For now, the legacy IO functionality is supported only for virtio-net */
+ return pdev->device == 0x1041 && virtio_pci_admin_has_legacy_io(pdev) &&
+ !virtiovf_bar0_exists(pdev);
}
int virtiovf_init_legacy_io(struct virtiovf_pci_core_device *virtvdev)
diff --git a/drivers/vfio/pci/virtio/main.c b/drivers/vfio/pci/virtio/main.c
index d534d48c4163..515fe1b9f94d 100644
--- a/drivers/vfio/pci/virtio/main.c
+++ b/drivers/vfio/pci/virtio/main.c
@@ -187,8 +187,9 @@ static void virtiovf_pci_remove(struct pci_dev *pdev)
}
static const struct pci_device_id virtiovf_pci_table[] = {
- /* Only virtio-net is supported/tested so far */
+ /* Only virtio-net and virtio-block are supported/tested so far */
{ PCI_DRIVER_OVERRIDE_DEVICE_VFIO(PCI_VENDOR_ID_REDHAT_QUMRANET, 0x1041) },
+ { PCI_DRIVER_OVERRIDE_DEVICE_VFIO(PCI_VENDOR_ID_REDHAT_QUMRANET, 0x1042) },
{}
};
@@ -221,4 +222,4 @@ module_pci_driver(virtiovf_pci_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Yishai Hadas <yishaih@nvidia.com>");
MODULE_DESCRIPTION(
- "VIRTIO VFIO PCI - User Level meta-driver for VIRTIO NET devices");
+ "VIRTIO VFIO PCI - User Level meta-driver for VIRTIO NET and BLOCK devices");
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index 50ebc9593c9d..0ac56072af9f 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -103,9 +103,9 @@ struct vfio_dma {
struct vfio_batch {
struct page **pages; /* for pin_user_pages_remote */
struct page *fallback_page; /* if pages alloc fails */
- int capacity; /* length of pages array */
- int size; /* of batch currently */
- int offset; /* of next entry in pages */
+ unsigned int capacity; /* length of pages array */
+ unsigned int size; /* of batch currently */
+ unsigned int offset; /* of next entry in pages */
};
struct vfio_iommu_group {
@@ -471,12 +471,12 @@ static int put_pfn(unsigned long pfn, int prot)
#define VFIO_BATCH_MAX_CAPACITY (PAGE_SIZE / sizeof(struct page *))
-static void vfio_batch_init(struct vfio_batch *batch)
+static void __vfio_batch_init(struct vfio_batch *batch, bool single)
{
batch->size = 0;
batch->offset = 0;
- if (unlikely(disable_hugepages))
+ if (single || unlikely(disable_hugepages))
goto fallback;
batch->pages = (struct page **) __get_free_page(GFP_KERNEL);
@@ -491,6 +491,16 @@ fallback:
batch->capacity = 1;
}
+static void vfio_batch_init(struct vfio_batch *batch)
+{
+ __vfio_batch_init(batch, false);
+}
+
+static void vfio_batch_init_single(struct vfio_batch *batch)
+{
+ __vfio_batch_init(batch, true);
+}
+
static void vfio_batch_unpin(struct vfio_batch *batch, struct vfio_dma *dma)
{
while (batch->size) {
@@ -510,7 +520,7 @@ static void vfio_batch_fini(struct vfio_batch *batch)
static int follow_fault_pfn(struct vm_area_struct *vma, struct mm_struct *mm,
unsigned long vaddr, unsigned long *pfn,
- bool write_fault)
+ unsigned long *addr_mask, bool write_fault)
{
struct follow_pfnmap_args args = { .vma = vma, .address = vaddr };
int ret;
@@ -534,10 +544,12 @@ static int follow_fault_pfn(struct vm_area_struct *vma, struct mm_struct *mm,
return ret;
}
- if (write_fault && !args.writable)
+ if (write_fault && !args.writable) {
ret = -EFAULT;
- else
+ } else {
*pfn = args.pfn;
+ *addr_mask = args.addr_mask;
+ }
follow_pfnmap_end(&args);
return ret;
@@ -545,25 +557,33 @@ static int follow_fault_pfn(struct vm_area_struct *vma, struct mm_struct *mm,
/*
* Returns the positive number of pfns successfully obtained or a negative
- * error code.
+ * error code. The initial pfn is stored in the pfn arg. For page-backed
+ * pfns, the provided batch is also updated to indicate the filled pages and
+ * initial offset. For VM_PFNMAP pfns, only the returned number of pfns and
+ * returned initial pfn are provided; subsequent pfns are contiguous.
*/
-static int vaddr_get_pfns(struct mm_struct *mm, unsigned long vaddr,
- long npages, int prot, unsigned long *pfn,
- struct page **pages)
+static long vaddr_get_pfns(struct mm_struct *mm, unsigned long vaddr,
+ unsigned long npages, int prot, unsigned long *pfn,
+ struct vfio_batch *batch)
{
+ unsigned long pin_pages = min_t(unsigned long, npages, batch->capacity);
struct vm_area_struct *vma;
unsigned int flags = 0;
- int ret;
+ long ret;
if (prot & IOMMU_WRITE)
flags |= FOLL_WRITE;
mmap_read_lock(mm);
- ret = pin_user_pages_remote(mm, vaddr, npages, flags | FOLL_LONGTERM,
- pages, NULL);
+ ret = pin_user_pages_remote(mm, vaddr, pin_pages, flags | FOLL_LONGTERM,
+ batch->pages, NULL);
if (ret > 0) {
- *pfn = page_to_pfn(pages[0]);
+ *pfn = page_to_pfn(batch->pages[0]);
+ batch->size = ret;
+ batch->offset = 0;
goto done;
+ } else if (!ret) {
+ ret = -EFAULT;
}
vaddr = untagged_addr_remote(mm, vaddr);
@@ -572,15 +592,22 @@ retry:
vma = vma_lookup(mm, vaddr);
if (vma && vma->vm_flags & VM_PFNMAP) {
- ret = follow_fault_pfn(vma, mm, vaddr, pfn, prot & IOMMU_WRITE);
+ unsigned long addr_mask;
+
+ ret = follow_fault_pfn(vma, mm, vaddr, pfn, &addr_mask,
+ prot & IOMMU_WRITE);
if (ret == -EAGAIN)
goto retry;
if (!ret) {
- if (is_invalid_reserved_pfn(*pfn))
- ret = 1;
- else
+ if (is_invalid_reserved_pfn(*pfn)) {
+ unsigned long epfn;
+
+ epfn = (*pfn | (~addr_mask >> PAGE_SHIFT)) + 1;
+ ret = min_t(long, npages, epfn - *pfn);
+ } else {
ret = -EFAULT;
+ }
}
}
done:
@@ -594,7 +621,7 @@ done:
* first page and all consecutive pages with the same locking.
*/
static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr,
- long npage, unsigned long *pfn_base,
+ unsigned long npage, unsigned long *pfn_base,
unsigned long limit, struct vfio_batch *batch)
{
unsigned long pfn;
@@ -616,32 +643,42 @@ static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr,
*pfn_base = 0;
}
+ if (unlikely(disable_hugepages))
+ npage = 1;
+
while (npage) {
if (!batch->size) {
/* Empty batch, so refill it. */
- long req_pages = min_t(long, npage, batch->capacity);
-
- ret = vaddr_get_pfns(mm, vaddr, req_pages, dma->prot,
- &pfn, batch->pages);
+ ret = vaddr_get_pfns(mm, vaddr, npage, dma->prot,
+ &pfn, batch);
if (ret < 0)
goto unpin_out;
- batch->size = ret;
- batch->offset = 0;
-
if (!*pfn_base) {
*pfn_base = pfn;
rsvd = is_invalid_reserved_pfn(*pfn_base);
}
+
+ /* Handle pfnmap */
+ if (!batch->size) {
+ if (pfn != *pfn_base + pinned || !rsvd)
+ goto out;
+
+ pinned += ret;
+ npage -= ret;
+ vaddr += (PAGE_SIZE * ret);
+ iova += (PAGE_SIZE * ret);
+ continue;
+ }
}
/*
- * pfn is preset for the first iteration of this inner loop and
- * updated at the end to handle a VM_PFNMAP pfn. In that case,
- * batch->pages isn't valid (there's no struct page), so allow
- * batch->pages to be touched only when there's more than one
- * pfn to check, which guarantees the pfns are from a
- * !VM_PFNMAP vma.
+ * pfn is preset for the first iteration of this inner loop
+ * due to the fact that vaddr_get_pfns() needs to provide the
+ * initial pfn for pfnmaps. Therefore to reduce redundancy,
+ * the next pfn is fetched at the end of the loop.
+ * A PageReserved() page could still qualify as page backed
+ * and rsvd here, and therefore continues to use the batch.
*/
while (true) {
if (pfn != *pfn_base + pinned ||
@@ -676,21 +713,12 @@ static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr,
pfn = page_to_pfn(batch->pages[batch->offset]);
}
-
- if (unlikely(disable_hugepages))
- break;
}
out:
ret = vfio_lock_acct(dma, lock_acct, false);
unpin_out:
- if (batch->size == 1 && !batch->offset) {
- /* May be a VM_PFNMAP pfn, which the batch can't remember. */
- put_pfn(pfn, dma->prot);
- batch->size = 0;
- }
-
if (ret < 0) {
if (pinned && !rsvd) {
for (pfn = *pfn_base ; pinned ; pfn++, pinned--)
@@ -705,7 +733,7 @@ unpin_out:
}
static long vfio_unpin_pages_remote(struct vfio_dma *dma, dma_addr_t iova,
- unsigned long pfn, long npage,
+ unsigned long pfn, unsigned long npage,
bool do_accounting)
{
long unlocked = 0, locked = 0;
@@ -728,7 +756,7 @@ static long vfio_unpin_pages_remote(struct vfio_dma *dma, dma_addr_t iova,
static int vfio_pin_page_external(struct vfio_dma *dma, unsigned long vaddr,
unsigned long *pfn_base, bool do_accounting)
{
- struct page *pages[1];
+ struct vfio_batch batch;
struct mm_struct *mm;
int ret;
@@ -736,7 +764,9 @@ static int vfio_pin_page_external(struct vfio_dma *dma, unsigned long vaddr,
if (!mmget_not_zero(mm))
return -ENODEV;
- ret = vaddr_get_pfns(mm, vaddr, 1, dma->prot, pfn_base, pages);
+ vfio_batch_init_single(&batch);
+
+ ret = vaddr_get_pfns(mm, vaddr, 1, dma->prot, pfn_base, &batch);
if (ret != 1)
goto out;
@@ -755,6 +785,7 @@ static int vfio_pin_page_external(struct vfio_dma *dma, unsigned long vaddr,
}
out:
+ vfio_batch_fini(&batch);
mmput(mm);
return ret;
}
diff --git a/drivers/vhost/Kconfig b/drivers/vhost/Kconfig
index b455d9ab6f3d..020d4fbb947c 100644
--- a/drivers/vhost/Kconfig
+++ b/drivers/vhost/Kconfig
@@ -47,6 +47,7 @@ config VHOST_SCSI
tristate "VHOST_SCSI TCM fabric driver"
depends on TARGET_CORE && EVENTFD
select VHOST
+ select SG_POOL
default n
help
Say M here to enable the vhost_scsi TCM fabric module
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index 718fa4e0b31e..f6f5a7ac7894 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -45,6 +45,55 @@
#define VHOST_SCSI_PREALLOC_SGLS 2048
#define VHOST_SCSI_PREALLOC_UPAGES 2048
#define VHOST_SCSI_PREALLOC_PROT_SGLS 2048
+/*
+ * For the legacy descriptor case we allocate an iov per byte in the
+ * virtio_scsi_cmd_resp struct.
+ */
+#define VHOST_SCSI_MAX_RESP_IOVS sizeof(struct virtio_scsi_cmd_resp)
+
+static unsigned int vhost_scsi_inline_sg_cnt = VHOST_SCSI_PREALLOC_SGLS;
+
+#ifdef CONFIG_ARCH_NO_SG_CHAIN
+static int vhost_scsi_set_inline_sg_cnt(const char *buf,
+ const struct kernel_param *kp)
+{
+ pr_err("Setting inline_sg_cnt is not supported.\n");
+ return -EOPNOTSUPP;
+}
+#else
+static int vhost_scsi_set_inline_sg_cnt(const char *buf,
+ const struct kernel_param *kp)
+{
+ unsigned int cnt;
+ int ret;
+
+ ret = kstrtouint(buf, 10, &cnt);
+ if (ret)
+ return ret;
+
+ if (ret > VHOST_SCSI_PREALLOC_SGLS) {
+ pr_err("Max inline_sg_cnt is %u\n", VHOST_SCSI_PREALLOC_SGLS);
+ return -EINVAL;
+ }
+
+ vhost_scsi_inline_sg_cnt = cnt;
+ return 0;
+}
+#endif
+
+static int vhost_scsi_get_inline_sg_cnt(char *buf,
+ const struct kernel_param *kp)
+{
+ return sprintf(buf, "%u\n", vhost_scsi_inline_sg_cnt);
+}
+
+static const struct kernel_param_ops vhost_scsi_inline_sg_cnt_op = {
+ .get = vhost_scsi_get_inline_sg_cnt,
+ .set = vhost_scsi_set_inline_sg_cnt,
+};
+
+module_param_cb(inline_sg_cnt, &vhost_scsi_inline_sg_cnt_op, NULL, 0644);
+MODULE_PARM_DESC(inline_sg_cnt, "Set the number of scatterlist entries to pre-allocate. The default is 2048.");
/* Max number of requests before requeueing the job.
* Using this limit prevents one virtqueue from starving others with
@@ -62,40 +111,26 @@ struct vhost_scsi_inflight {
struct vhost_scsi_cmd {
/* Descriptor from vhost_get_vq_desc() for virt_queue segment */
int tvc_vq_desc;
- /* virtio-scsi initiator task attribute */
- int tvc_task_attr;
- /* virtio-scsi response incoming iovecs */
- int tvc_in_iovs;
- /* virtio-scsi initiator data direction */
- enum dma_data_direction tvc_data_direction;
- /* Expected data transfer length from virtio-scsi header */
- u32 tvc_exp_data_len;
- /* The Tag from include/linux/virtio_scsi.h:struct virtio_scsi_cmd_req */
- u64 tvc_tag;
/* The number of scatterlists associated with this cmd */
u32 tvc_sgl_count;
u32 tvc_prot_sgl_count;
- /* Saved unpacked SCSI LUN for vhost_scsi_target_queue_cmd() */
- u32 tvc_lun;
u32 copied_iov:1;
- const void *saved_iter_addr;
- struct iov_iter saved_iter;
- /* Pointer to the SGL formatted memory from virtio-scsi */
- struct scatterlist *tvc_sgl;
- struct scatterlist *tvc_prot_sgl;
- struct page **tvc_upages;
- /* Pointer to response header iovec */
- struct iovec *tvc_resp_iov;
- /* Pointer to vhost_scsi for our device */
- struct vhost_scsi *tvc_vhost;
+ const void *read_iov;
+ struct iov_iter *read_iter;
+ struct scatterlist *sgl;
+ struct sg_table table;
+ struct scatterlist *prot_sgl;
+ struct sg_table prot_table;
+ /* Fast path response header iovec used when only one vec is needed */
+ struct iovec tvc_resp_iov;
+ /* Number of iovs for response */
+ unsigned int tvc_resp_iovs_cnt;
+ /* Pointer to response header iovecs if more than one is needed */
+ struct iovec *tvc_resp_iovs;
/* Pointer to vhost_virtqueue for the cmd */
struct vhost_virtqueue *tvc_vq;
- /* Pointer to vhost nexus memory */
- struct vhost_scsi_nexus *tvc_nexus;
/* The TCM I/O descriptor that is accessed via container_of() */
struct se_cmd tvc_se_cmd;
- /* Copy of the incoming SCSI command descriptor block (CDB) */
- unsigned char tvc_cdb[VHOST_SCSI_MAX_CDB_SIZE];
/* Sense buffer that will be mapped into outgoing status */
unsigned char tvc_sense_buf[TRANSPORT_SENSE_BUFFER];
/* Completed commands list, serviced from vhost worker thread */
@@ -187,6 +222,7 @@ struct vhost_scsi_virtqueue {
struct vhost_scsi_cmd *scsi_cmds;
struct sbitmap scsi_tags;
int max_cmds;
+ struct page **upages;
struct vhost_work completion_work;
struct llist_head completion_list;
@@ -206,6 +242,8 @@ struct vhost_scsi {
bool vs_events_missed; /* any missed events, protected by vq->mutex */
int vs_events_nr; /* num of pending events, protected by vq->mutex */
+
+ unsigned int inline_sg_cnt;
};
struct vhost_scsi_tmf {
@@ -330,23 +368,38 @@ static void vhost_scsi_release_cmd_res(struct se_cmd *se_cmd)
struct vhost_scsi_cmd, tvc_se_cmd);
struct vhost_scsi_virtqueue *svq = container_of(tv_cmd->tvc_vq,
struct vhost_scsi_virtqueue, vq);
+ struct vhost_scsi *vs = svq->vs;
struct vhost_scsi_inflight *inflight = tv_cmd->inflight;
+ struct scatterlist *sg;
+ struct page *page;
int i;
if (tv_cmd->tvc_sgl_count) {
- for (i = 0; i < tv_cmd->tvc_sgl_count; i++) {
+ for_each_sgtable_sg(&tv_cmd->table, sg, i) {
+ page = sg_page(sg);
+ if (!page)
+ continue;
+
if (tv_cmd->copied_iov)
- __free_page(sg_page(&tv_cmd->tvc_sgl[i]));
+ __free_page(page);
else
- put_page(sg_page(&tv_cmd->tvc_sgl[i]));
+ put_page(page);
}
- kfree(tv_cmd->saved_iter_addr);
+ kfree(tv_cmd->read_iter);
+ kfree(tv_cmd->read_iov);
+ sg_free_table_chained(&tv_cmd->table, vs->inline_sg_cnt);
}
if (tv_cmd->tvc_prot_sgl_count) {
- for (i = 0; i < tv_cmd->tvc_prot_sgl_count; i++)
- put_page(sg_page(&tv_cmd->tvc_prot_sgl[i]));
+ for_each_sgtable_sg(&tv_cmd->prot_table, sg, i) {
+ page = sg_page(sg);
+ if (page)
+ put_page(page);
+ }
+ sg_free_table_chained(&tv_cmd->prot_table, vs->inline_sg_cnt);
}
+ if (tv_cmd->tvc_resp_iovs != &tv_cmd->tvc_resp_iov)
+ kfree(tv_cmd->tvc_resp_iovs);
sbitmap_clear_bit(&svq->scsi_tags, se_cmd->map_tag);
vhost_scsi_put_inflight(inflight);
}
@@ -533,15 +586,18 @@ static void vhost_scsi_evt_work(struct vhost_work *work)
static int vhost_scsi_copy_sgl_to_iov(struct vhost_scsi_cmd *cmd)
{
- struct iov_iter *iter = &cmd->saved_iter;
- struct scatterlist *sg = cmd->tvc_sgl;
+ struct iov_iter *iter = cmd->read_iter;
+ struct scatterlist *sg;
struct page *page;
size_t len;
int i;
- for (i = 0; i < cmd->tvc_sgl_count; i++) {
- page = sg_page(&sg[i]);
- len = sg[i].length;
+ for_each_sgtable_sg(&cmd->table, sg, i) {
+ page = sg_page(sg);
+ if (!page)
+ continue;
+
+ len = sg->length;
if (copy_page_to_iter(page, 0, len, iter) != len) {
pr_err("Could not copy data while handling misaligned cmd. Error %zu\n",
@@ -578,7 +634,7 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
cmd, se_cmd->residual_count, se_cmd->scsi_status);
memset(&v_rsp, 0, sizeof(v_rsp));
- if (cmd->saved_iter_addr && vhost_scsi_copy_sgl_to_iov(cmd)) {
+ if (cmd->read_iter && vhost_scsi_copy_sgl_to_iov(cmd)) {
v_rsp.response = VIRTIO_SCSI_S_BAD_TARGET;
} else {
v_rsp.resid = cpu_to_vhost32(cmd->tvc_vq,
@@ -591,8 +647,8 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
se_cmd->scsi_sense_length);
}
- iov_iter_init(&iov_iter, ITER_DEST, cmd->tvc_resp_iov,
- cmd->tvc_in_iovs, sizeof(v_rsp));
+ iov_iter_init(&iov_iter, ITER_DEST, cmd->tvc_resp_iovs,
+ cmd->tvc_resp_iovs_cnt, sizeof(v_rsp));
ret = copy_to_iter(&v_rsp, sizeof(v_rsp), &iov_iter);
if (likely(ret == sizeof(v_rsp))) {
signal = true;
@@ -609,55 +665,53 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
}
static struct vhost_scsi_cmd *
-vhost_scsi_get_cmd(struct vhost_virtqueue *vq, struct vhost_scsi_tpg *tpg,
- unsigned char *cdb, u64 scsi_tag, u16 lun, u8 task_attr,
- u32 exp_data_len, int data_direction)
+vhost_scsi_get_cmd(struct vhost_virtqueue *vq, u64 scsi_tag)
{
struct vhost_scsi_virtqueue *svq = container_of(vq,
struct vhost_scsi_virtqueue, vq);
struct vhost_scsi_cmd *cmd;
- struct vhost_scsi_nexus *tv_nexus;
- struct scatterlist *sg, *prot_sg;
- struct iovec *tvc_resp_iov;
- struct page **pages;
+ struct scatterlist *sgl, *prot_sgl;
int tag;
- tv_nexus = tpg->tpg_nexus;
- if (!tv_nexus) {
- pr_err("Unable to locate active struct vhost_scsi_nexus\n");
- return ERR_PTR(-EIO);
- }
-
tag = sbitmap_get(&svq->scsi_tags);
if (tag < 0) {
- pr_err("Unable to obtain tag for vhost_scsi_cmd\n");
+ pr_warn_once("Guest sent too many cmds. Returning TASK_SET_FULL.\n");
return ERR_PTR(-ENOMEM);
}
cmd = &svq->scsi_cmds[tag];
- sg = cmd->tvc_sgl;
- prot_sg = cmd->tvc_prot_sgl;
- pages = cmd->tvc_upages;
- tvc_resp_iov = cmd->tvc_resp_iov;
+ sgl = cmd->sgl;
+ prot_sgl = cmd->prot_sgl;
memset(cmd, 0, sizeof(*cmd));
- cmd->tvc_sgl = sg;
- cmd->tvc_prot_sgl = prot_sg;
- cmd->tvc_upages = pages;
+ cmd->sgl = sgl;
+ cmd->prot_sgl = prot_sgl;
cmd->tvc_se_cmd.map_tag = tag;
- cmd->tvc_tag = scsi_tag;
- cmd->tvc_lun = lun;
- cmd->tvc_task_attr = task_attr;
- cmd->tvc_exp_data_len = exp_data_len;
- cmd->tvc_data_direction = data_direction;
- cmd->tvc_nexus = tv_nexus;
cmd->inflight = vhost_scsi_get_inflight(vq);
- cmd->tvc_resp_iov = tvc_resp_iov;
-
- memcpy(cmd->tvc_cdb, cdb, VHOST_SCSI_MAX_CDB_SIZE);
return cmd;
}
+static void vhost_scsi_revert_map_iov_to_sgl(struct iov_iter *iter,
+ struct scatterlist *curr,
+ struct scatterlist *end)
+{
+ size_t revert_bytes = 0;
+ struct page *page;
+
+ while (curr != end) {
+ page = sg_page(curr);
+
+ if (page) {
+ put_page(page);
+ revert_bytes += curr->length;
+ }
+ /* Clear so we can re-use it for the copy path */
+ sg_set_page(curr, NULL, 0, 0);
+ curr = sg_next(curr);
+ }
+ iov_iter_revert(iter, revert_bytes);
+}
+
/*
* Map a user memory range into a scatterlist
*
@@ -666,14 +720,17 @@ vhost_scsi_get_cmd(struct vhost_virtqueue *vq, struct vhost_scsi_tpg *tpg,
static int
vhost_scsi_map_to_sgl(struct vhost_scsi_cmd *cmd,
struct iov_iter *iter,
- struct scatterlist *sgl,
+ struct sg_table *sg_table,
+ struct scatterlist **sgl,
bool is_prot)
{
- struct page **pages = cmd->tvc_upages;
- struct scatterlist *sg = sgl;
- ssize_t bytes, mapped_bytes;
- size_t offset, mapped_offset;
- unsigned int npages = 0;
+ struct vhost_scsi_virtqueue *svq = container_of(cmd->tvc_vq,
+ struct vhost_scsi_virtqueue, vq);
+ struct page **pages = svq->upages;
+ struct scatterlist *sg = *sgl;
+ ssize_t bytes;
+ size_t offset;
+ unsigned int n, npages = 0;
bytes = iov_iter_get_pages2(iter, pages, LONG_MAX,
VHOST_SCSI_PREALLOC_UPAGES, &offset);
@@ -681,11 +738,8 @@ vhost_scsi_map_to_sgl(struct vhost_scsi_cmd *cmd,
if (bytes <= 0)
return bytes < 0 ? bytes : -EFAULT;
- mapped_bytes = bytes;
- mapped_offset = offset;
-
while (bytes) {
- unsigned n = min_t(unsigned, PAGE_SIZE - offset, bytes);
+ n = min_t(unsigned int, PAGE_SIZE - offset, bytes);
/*
* The block layer requires bios/requests to be a multiple of
* 512 bytes, but Windows can send us vecs that are misaligned.
@@ -706,25 +760,24 @@ vhost_scsi_map_to_sgl(struct vhost_scsi_cmd *cmd,
goto revert_iter_get_pages;
}
- sg_set_page(sg++, pages[npages++], n, offset);
+ sg_set_page(sg, pages[npages++], n, offset);
+ sg = sg_next(sg);
bytes -= n;
offset = 0;
}
+ *sgl = sg;
return npages;
revert_iter_get_pages:
- iov_iter_revert(iter, mapped_bytes);
+ vhost_scsi_revert_map_iov_to_sgl(iter, *sgl, sg);
- npages = 0;
- while (mapped_bytes) {
- unsigned int n = min_t(unsigned int, PAGE_SIZE - mapped_offset,
- mapped_bytes);
+ iov_iter_revert(iter, bytes);
+ while (bytes) {
+ n = min_t(unsigned int, PAGE_SIZE, bytes);
put_page(pages[npages++]);
-
- mapped_bytes -= n;
- mapped_offset = 0;
+ bytes -= n;
}
return -EINVAL;
@@ -752,33 +805,42 @@ vhost_scsi_calc_sgls(struct iov_iter *iter, size_t bytes, int max_sgls)
static int
vhost_scsi_copy_iov_to_sgl(struct vhost_scsi_cmd *cmd, struct iov_iter *iter,
- struct scatterlist *sg, int sg_count)
+ struct sg_table *sg_table, int sg_count,
+ int data_dir)
{
size_t len = iov_iter_count(iter);
unsigned int nbytes = 0;
+ struct scatterlist *sg;
struct page *page;
- int i;
+ int i, ret;
- if (cmd->tvc_data_direction == DMA_FROM_DEVICE) {
- cmd->saved_iter_addr = dup_iter(&cmd->saved_iter, iter,
- GFP_KERNEL);
- if (!cmd->saved_iter_addr)
+ if (data_dir == DMA_FROM_DEVICE) {
+ cmd->read_iter = kzalloc(sizeof(*cmd->read_iter), GFP_KERNEL);
+ if (!cmd->read_iter)
return -ENOMEM;
+
+ cmd->read_iov = dup_iter(cmd->read_iter, iter, GFP_KERNEL);
+ if (!cmd->read_iov) {
+ ret = -ENOMEM;
+ goto free_iter;
+ }
}
- for (i = 0; i < sg_count; i++) {
+ for_each_sgtable_sg(sg_table, sg, i) {
page = alloc_page(GFP_KERNEL);
if (!page) {
- i--;
+ ret = -ENOMEM;
goto err;
}
nbytes = min_t(unsigned int, PAGE_SIZE, len);
- sg_set_page(&sg[i], page, nbytes, 0);
+ sg_set_page(sg, page, nbytes, 0);
- if (cmd->tvc_data_direction == DMA_TO_DEVICE &&
- copy_page_from_iter(page, 0, nbytes, iter) != nbytes)
+ if (data_dir == DMA_TO_DEVICE &&
+ copy_page_from_iter(page, 0, nbytes, iter) != nbytes) {
+ ret = -EFAULT;
goto err;
+ }
len -= nbytes;
}
@@ -790,66 +852,63 @@ err:
pr_err("Could not read %u bytes while handling misaligned cmd\n",
nbytes);
- for (; i >= 0; i--)
- __free_page(sg_page(&sg[i]));
- kfree(cmd->saved_iter_addr);
- return -ENOMEM;
+ for_each_sgtable_sg(sg_table, sg, i) {
+ page = sg_page(sg);
+ if (page)
+ __free_page(page);
+ }
+ kfree(cmd->read_iov);
+free_iter:
+ kfree(cmd->read_iter);
+ return ret;
}
static int
vhost_scsi_map_iov_to_sgl(struct vhost_scsi_cmd *cmd, struct iov_iter *iter,
- struct scatterlist *sg, int sg_count, bool is_prot)
+ struct sg_table *sg_table, int sg_count, bool is_prot)
{
- struct scatterlist *p = sg;
- size_t revert_bytes;
+ struct scatterlist *sg = sg_table->sgl;
int ret;
while (iov_iter_count(iter)) {
- ret = vhost_scsi_map_to_sgl(cmd, iter, sg, is_prot);
+ ret = vhost_scsi_map_to_sgl(cmd, iter, sg_table, &sg, is_prot);
if (ret < 0) {
- revert_bytes = 0;
-
- while (p < sg) {
- struct page *page = sg_page(p);
-
- if (page) {
- put_page(page);
- revert_bytes += p->length;
- }
- p++;
- }
-
- iov_iter_revert(iter, revert_bytes);
+ vhost_scsi_revert_map_iov_to_sgl(iter, sg_table->sgl,
+ sg);
return ret;
}
- sg += ret;
}
return 0;
}
static int
-vhost_scsi_mapal(struct vhost_scsi_cmd *cmd,
+vhost_scsi_mapal(struct vhost_scsi *vs, struct vhost_scsi_cmd *cmd,
size_t prot_bytes, struct iov_iter *prot_iter,
- size_t data_bytes, struct iov_iter *data_iter)
+ size_t data_bytes, struct iov_iter *data_iter, int data_dir)
{
int sgl_count, ret;
if (prot_bytes) {
sgl_count = vhost_scsi_calc_sgls(prot_iter, prot_bytes,
VHOST_SCSI_PREALLOC_PROT_SGLS);
- if (sgl_count < 0)
- return sgl_count;
+ cmd->prot_table.sgl = cmd->prot_sgl;
+ ret = sg_alloc_table_chained(&cmd->prot_table, sgl_count,
+ cmd->prot_table.sgl,
+ vs->inline_sg_cnt);
+ if (ret)
+ return ret;
- sg_init_table(cmd->tvc_prot_sgl, sgl_count);
cmd->tvc_prot_sgl_count = sgl_count;
pr_debug("%s prot_sg %p prot_sgl_count %u\n", __func__,
- cmd->tvc_prot_sgl, cmd->tvc_prot_sgl_count);
+ cmd->prot_table.sgl, cmd->tvc_prot_sgl_count);
ret = vhost_scsi_map_iov_to_sgl(cmd, prot_iter,
- cmd->tvc_prot_sgl,
+ &cmd->prot_table,
cmd->tvc_prot_sgl_count, true);
if (ret < 0) {
+ sg_free_table_chained(&cmd->prot_table,
+ vs->inline_sg_cnt);
cmd->tvc_prot_sgl_count = 0;
return ret;
}
@@ -859,20 +918,23 @@ vhost_scsi_mapal(struct vhost_scsi_cmd *cmd,
if (sgl_count < 0)
return sgl_count;
- sg_init_table(cmd->tvc_sgl, sgl_count);
+ cmd->table.sgl = cmd->sgl;
+ ret = sg_alloc_table_chained(&cmd->table, sgl_count, cmd->table.sgl,
+ vs->inline_sg_cnt);
+ if (ret)
+ return ret;
+
cmd->tvc_sgl_count = sgl_count;
pr_debug("%s data_sg %p data_sgl_count %u\n", __func__,
- cmd->tvc_sgl, cmd->tvc_sgl_count);
+ cmd->table.sgl, cmd->tvc_sgl_count);
- ret = vhost_scsi_map_iov_to_sgl(cmd, data_iter, cmd->tvc_sgl,
+ ret = vhost_scsi_map_iov_to_sgl(cmd, data_iter, &cmd->table,
cmd->tvc_sgl_count, false);
- if (ret == -EINVAL) {
- sg_init_table(cmd->tvc_sgl, cmd->tvc_sgl_count);
- ret = vhost_scsi_copy_iov_to_sgl(cmd, data_iter, cmd->tvc_sgl,
- cmd->tvc_sgl_count);
- }
-
+ if (ret == -EINVAL)
+ ret = vhost_scsi_copy_iov_to_sgl(cmd, data_iter, &cmd->table,
+ cmd->tvc_sgl_count, data_dir);
if (ret < 0) {
+ sg_free_table_chained(&cmd->table, vs->inline_sg_cnt);
cmd->tvc_sgl_count = 0;
return ret;
}
@@ -896,32 +958,33 @@ static int vhost_scsi_to_tcm_attr(int attr)
return TCM_SIMPLE_TAG;
}
-static void vhost_scsi_target_queue_cmd(struct vhost_scsi_cmd *cmd)
+static void vhost_scsi_target_queue_cmd(struct vhost_scsi_nexus *nexus,
+ struct vhost_scsi_cmd *cmd,
+ unsigned char *cdb, u16 lun,
+ int task_attr, int data_dir,
+ u32 exp_data_len)
{
struct se_cmd *se_cmd = &cmd->tvc_se_cmd;
- struct vhost_scsi_nexus *tv_nexus;
struct scatterlist *sg_ptr, *sg_prot_ptr = NULL;
/* FIXME: BIDI operation */
if (cmd->tvc_sgl_count) {
- sg_ptr = cmd->tvc_sgl;
+ sg_ptr = cmd->table.sgl;
if (cmd->tvc_prot_sgl_count)
- sg_prot_ptr = cmd->tvc_prot_sgl;
+ sg_prot_ptr = cmd->prot_table.sgl;
else
se_cmd->prot_pto = true;
} else {
sg_ptr = NULL;
}
- tv_nexus = cmd->tvc_nexus;
se_cmd->tag = 0;
- target_init_cmd(se_cmd, tv_nexus->tvn_se_sess, &cmd->tvc_sense_buf[0],
- cmd->tvc_lun, cmd->tvc_exp_data_len,
- vhost_scsi_to_tcm_attr(cmd->tvc_task_attr),
- cmd->tvc_data_direction, TARGET_SCF_ACK_KREF);
+ target_init_cmd(se_cmd, nexus->tvn_se_sess, &cmd->tvc_sense_buf[0],
+ lun, exp_data_len, vhost_scsi_to_tcm_attr(task_attr),
+ data_dir, TARGET_SCF_ACK_KREF);
- if (target_submit_prep(se_cmd, cmd->tvc_cdb, sg_ptr,
+ if (target_submit_prep(se_cmd, cdb, sg_ptr,
cmd->tvc_sgl_count, NULL, 0, sg_prot_ptr,
cmd->tvc_prot_sgl_count, GFP_KERNEL))
return;
@@ -930,6 +993,24 @@ static void vhost_scsi_target_queue_cmd(struct vhost_scsi_cmd *cmd)
}
static void
+vhost_scsi_send_status(struct vhost_scsi *vs, struct vhost_virtqueue *vq,
+ int head, unsigned int out, u8 status)
+{
+ struct virtio_scsi_cmd_resp __user *resp;
+ struct virtio_scsi_cmd_resp rsp;
+ int ret;
+
+ memset(&rsp, 0, sizeof(rsp));
+ rsp.status = status;
+ resp = vq->iov[out].iov_base;
+ ret = __copy_to_user(resp, &rsp, sizeof(rsp));
+ if (!ret)
+ vhost_add_used_and_signal(&vs->dev, vq, head, 0);
+ else
+ pr_err("Faulted on virtio_scsi_cmd_resp\n");
+}
+
+static void
vhost_scsi_send_bad_target(struct vhost_scsi *vs,
struct vhost_virtqueue *vq,
int head, unsigned out)
@@ -1049,6 +1130,43 @@ out:
return ret;
}
+static int
+vhost_scsi_setup_resp_iovs(struct vhost_scsi_cmd *cmd, struct iovec *in_iovs,
+ unsigned int in_iovs_cnt)
+{
+ int i, cnt;
+
+ if (!in_iovs_cnt)
+ return 0;
+ /*
+ * Initiator's normally just put the virtio_scsi_cmd_resp in the first
+ * iov, but just in case they wedged in some data with it we check for
+ * greater than or equal to the response struct.
+ */
+ if (in_iovs[0].iov_len >= sizeof(struct virtio_scsi_cmd_resp)) {
+ cmd->tvc_resp_iovs = &cmd->tvc_resp_iov;
+ cmd->tvc_resp_iovs_cnt = 1;
+ } else {
+ /*
+ * Legacy descriptor layouts didn't specify that we must put
+ * the entire response in one iov. Worst case we have a
+ * iov per byte.
+ */
+ cnt = min(VHOST_SCSI_MAX_RESP_IOVS, in_iovs_cnt);
+ cmd->tvc_resp_iovs = kcalloc(cnt, sizeof(struct iovec),
+ GFP_KERNEL);
+ if (!cmd->tvc_resp_iovs)
+ return -ENOMEM;
+
+ cmd->tvc_resp_iovs_cnt = cnt;
+ }
+
+ for (i = 0; i < cmd->tvc_resp_iovs_cnt; i++)
+ cmd->tvc_resp_iovs[i] = in_iovs[i];
+
+ return 0;
+}
+
static u16 vhost_buf_to_lun(u8 *lun_buf)
{
return ((lun_buf[2] << 8) | lun_buf[3]) & 0x3FFF;
@@ -1060,16 +1178,17 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
struct vhost_scsi_tpg **vs_tpg, *tpg;
struct virtio_scsi_cmd_req v_req;
struct virtio_scsi_cmd_req_pi v_req_pi;
+ struct vhost_scsi_nexus *nexus;
struct vhost_scsi_ctx vc;
struct vhost_scsi_cmd *cmd;
struct iov_iter in_iter, prot_iter, data_iter;
u64 tag;
u32 exp_data_len, data_direction;
- int ret, prot_bytes, i, c = 0;
+ int ret, prot_bytes, c = 0;
u16 lun;
u8 task_attr;
bool t10_pi = vhost_has_feature(vq, VIRTIO_SCSI_F_T10_PI);
- void *cdb;
+ u8 *cdb;
mutex_lock(&vq->mutex);
/*
@@ -1212,29 +1331,39 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
scsi_command_size(cdb), VHOST_SCSI_MAX_CDB_SIZE);
goto err;
}
- cmd = vhost_scsi_get_cmd(vq, tpg, cdb, tag, lun, task_attr,
- exp_data_len + prot_bytes,
- data_direction);
+
+ nexus = tpg->tpg_nexus;
+ if (!nexus) {
+ vq_err(vq, "Unable to locate active struct vhost_scsi_nexus\n");
+ ret = -EIO;
+ goto err;
+ }
+
+ cmd = vhost_scsi_get_cmd(vq, tag);
if (IS_ERR(cmd)) {
- vq_err(vq, "vhost_scsi_get_cmd failed %ld\n",
- PTR_ERR(cmd));
+ ret = PTR_ERR(cmd);
+ vq_err(vq, "vhost_scsi_get_tag failed %dd\n", ret);
goto err;
}
- cmd->tvc_vhost = vs;
cmd->tvc_vq = vq;
- for (i = 0; i < vc.in ; i++)
- cmd->tvc_resp_iov[i] = vq->iov[vc.out + i];
- cmd->tvc_in_iovs = vc.in;
+
+ ret = vhost_scsi_setup_resp_iovs(cmd, &vq->iov[vc.out], vc.in);
+ if (ret) {
+ vq_err(vq, "Failed to alloc recv iovs\n");
+ vhost_scsi_release_cmd_res(&cmd->tvc_se_cmd);
+ goto err;
+ }
pr_debug("vhost_scsi got command opcode: %#02x, lun: %d\n",
- cmd->tvc_cdb[0], cmd->tvc_lun);
+ cdb[0], lun);
pr_debug("cmd: %p exp_data_len: %d, prot_bytes: %d data_direction:"
" %d\n", cmd, exp_data_len, prot_bytes, data_direction);
if (data_direction != DMA_NONE) {
- if (unlikely(vhost_scsi_mapal(cmd, prot_bytes,
- &prot_iter, exp_data_len,
- &data_iter))) {
+ ret = vhost_scsi_mapal(vs, cmd, prot_bytes, &prot_iter,
+ exp_data_len, &data_iter,
+ data_direction);
+ if (unlikely(ret)) {
vq_err(vq, "Failed to map iov to sgl\n");
vhost_scsi_release_cmd_res(&cmd->tvc_se_cmd);
goto err;
@@ -1246,7 +1375,9 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
* vhost_scsi_queue_data_in() and vhost_scsi_queue_status()
*/
cmd->tvc_vq_desc = vc.head;
- vhost_scsi_target_queue_cmd(cmd);
+ vhost_scsi_target_queue_cmd(nexus, cmd, cdb, lun, task_attr,
+ data_direction,
+ exp_data_len + prot_bytes);
ret = 0;
err:
/*
@@ -1254,11 +1385,15 @@ err:
* EINVAL: Invalid response buffer, drop the request
* EIO: Respond with bad target
* EAGAIN: Pending request
+ * ENOMEM: Could not allocate resources for request
*/
if (ret == -ENXIO)
break;
else if (ret == -EIO)
vhost_scsi_send_bad_target(vs, vq, vc.head, vc.out);
+ else if (ret == -ENOMEM)
+ vhost_scsi_send_status(vs, vq, vc.head, vc.out,
+ SAM_STAT_TASK_SET_FULL);
} while (likely(!vhost_exceeds_weight(vq, ++c, 0)));
out:
mutex_unlock(&vq->mutex);
@@ -1596,13 +1731,12 @@ static void vhost_scsi_destroy_vq_cmds(struct vhost_virtqueue *vq)
for (i = 0; i < svq->max_cmds; i++) {
tv_cmd = &svq->scsi_cmds[i];
- kfree(tv_cmd->tvc_sgl);
- kfree(tv_cmd->tvc_prot_sgl);
- kfree(tv_cmd->tvc_upages);
- kfree(tv_cmd->tvc_resp_iov);
+ kfree(tv_cmd->sgl);
+ kfree(tv_cmd->prot_sgl);
}
sbitmap_free(&svq->scsi_tags);
+ kfree(svq->upages);
kfree(svq->scsi_cmds);
svq->scsi_cmds = NULL;
}
@@ -1611,6 +1745,7 @@ static int vhost_scsi_setup_vq_cmds(struct vhost_virtqueue *vq, int max_cmds)
{
struct vhost_scsi_virtqueue *svq = container_of(vq,
struct vhost_scsi_virtqueue, vq);
+ struct vhost_scsi *vs = svq->vs;
struct vhost_scsi_cmd *tv_cmd;
unsigned int i;
@@ -1628,39 +1763,33 @@ static int vhost_scsi_setup_vq_cmds(struct vhost_virtqueue *vq, int max_cmds)
return -ENOMEM;
}
+ svq->upages = kcalloc(VHOST_SCSI_PREALLOC_UPAGES, sizeof(struct page *),
+ GFP_KERNEL);
+ if (!svq->upages)
+ goto out;
+
for (i = 0; i < max_cmds; i++) {
tv_cmd = &svq->scsi_cmds[i];
- tv_cmd->tvc_sgl = kcalloc(VHOST_SCSI_PREALLOC_SGLS,
- sizeof(struct scatterlist),
- GFP_KERNEL);
- if (!tv_cmd->tvc_sgl) {
- pr_err("Unable to allocate tv_cmd->tvc_sgl\n");
- goto out;
- }
-
- tv_cmd->tvc_upages = kcalloc(VHOST_SCSI_PREALLOC_UPAGES,
- sizeof(struct page *),
- GFP_KERNEL);
- if (!tv_cmd->tvc_upages) {
- pr_err("Unable to allocate tv_cmd->tvc_upages\n");
- goto out;
- }
-
- tv_cmd->tvc_resp_iov = kcalloc(UIO_MAXIOV,
- sizeof(struct iovec),
- GFP_KERNEL);
- if (!tv_cmd->tvc_resp_iov) {
- pr_err("Unable to allocate tv_cmd->tvc_resp_iov\n");
- goto out;
+ if (vs->inline_sg_cnt) {
+ tv_cmd->sgl = kcalloc(vs->inline_sg_cnt,
+ sizeof(struct scatterlist),
+ GFP_KERNEL);
+ if (!tv_cmd->sgl) {
+ pr_err("Unable to allocate tv_cmd->sgl\n");
+ goto out;
+ }
}
- tv_cmd->tvc_prot_sgl = kcalloc(VHOST_SCSI_PREALLOC_PROT_SGLS,
- sizeof(struct scatterlist),
- GFP_KERNEL);
- if (!tv_cmd->tvc_prot_sgl) {
- pr_err("Unable to allocate tv_cmd->tvc_prot_sgl\n");
- goto out;
+ if (vhost_has_feature(vq, VIRTIO_SCSI_F_T10_PI) &&
+ vs->inline_sg_cnt) {
+ tv_cmd->prot_sgl = kcalloc(vs->inline_sg_cnt,
+ sizeof(struct scatterlist),
+ GFP_KERNEL);
+ if (!tv_cmd->prot_sgl) {
+ pr_err("Unable to allocate tv_cmd->prot_sgl\n");
+ goto out;
+ }
}
}
return 0;
@@ -1699,14 +1828,19 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs,
}
}
+ if (vs->vs_tpg) {
+ pr_err("vhost-scsi endpoint already set for %s.\n",
+ vs->vs_vhost_wwpn);
+ ret = -EEXIST;
+ goto out;
+ }
+
len = sizeof(vs_tpg[0]) * VHOST_SCSI_MAX_TARGET;
vs_tpg = kzalloc(len, GFP_KERNEL);
if (!vs_tpg) {
ret = -ENOMEM;
goto out;
}
- if (vs->vs_tpg)
- memcpy(vs_tpg, vs->vs_tpg, len);
mutex_lock(&vhost_scsi_mutex);
list_for_each_entry(tpg, &vhost_scsi_list, tv_tpg_list) {
@@ -1722,12 +1856,6 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs,
tv_tport = tpg->tport;
if (!strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
- if (vs->vs_tpg && vs->vs_tpg[tpg->tport_tpgt]) {
- mutex_unlock(&tpg->tv_tpg_mutex);
- mutex_unlock(&vhost_scsi_mutex);
- ret = -EEXIST;
- goto undepend;
- }
/*
* In order to ensure individual vhost-scsi configfs
* groups cannot be removed while in use by vhost ioctl,
@@ -1774,15 +1902,15 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs,
}
ret = 0;
} else {
- ret = -EEXIST;
+ ret = -ENODEV;
+ goto free_tpg;
}
/*
- * Act as synchronize_rcu to make sure access to
- * old vs->vs_tpg is finished.
+ * Act as synchronize_rcu to make sure requests after this point
+ * see a fully setup device.
*/
vhost_scsi_flush(vs);
- kfree(vs->vs_tpg);
vs->vs_tpg = vs_tpg;
goto out;
@@ -1802,6 +1930,7 @@ undepend:
target_undepend_item(&tpg->se_tpg.tpg_group.cg_item);
}
}
+free_tpg:
kfree(vs_tpg);
out:
mutex_unlock(&vs->dev.mutex);
@@ -1904,6 +2033,7 @@ free_vs_tpg:
vhost_scsi_flush(vs);
kfree(vs->vs_tpg);
vs->vs_tpg = NULL;
+ memset(vs->vs_vhost_wwpn, 0, sizeof(vs->vs_vhost_wwpn));
WARN_ON(vs->vs_events_nr);
mutex_unlock(&vs->dev.mutex);
return 0;
@@ -1948,6 +2078,7 @@ static int vhost_scsi_open(struct inode *inode, struct file *f)
vs = kvzalloc(sizeof(*vs), GFP_KERNEL);
if (!vs)
goto err_vs;
+ vs->inline_sg_cnt = vhost_scsi_inline_sg_cnt;
if (nvqs > VHOST_SCSI_MAX_IO_VQ) {
pr_err("Invalid max_io_vqs of %d. Using %d.\n", nvqs,
diff --git a/drivers/video/fbdev/core/fb_defio.c b/drivers/video/fbdev/core/fb_defio.c
index 65363df8e81b..4fc93f253e06 100644
--- a/drivers/video/fbdev/core/fb_defio.c
+++ b/drivers/video/fbdev/core/fb_defio.c
@@ -69,14 +69,6 @@ out:
return pageref;
}
-static void fb_deferred_io_pageref_clear(struct fb_deferred_io_pageref *pageref)
-{
- struct page *page = pageref->page;
-
- if (page)
- page->mapping = NULL;
-}
-
static struct fb_deferred_io_pageref *fb_deferred_io_pageref_get(struct fb_info *info,
unsigned long offset,
struct page *page)
@@ -140,13 +132,10 @@ static vm_fault_t fb_deferred_io_fault(struct vm_fault *vmf)
if (!page)
return VM_FAULT_SIGBUS;
- if (vmf->vma->vm_file)
- page->mapping = vmf->vma->vm_file->f_mapping;
- else
- printk(KERN_ERR "no mapping available\n");
+ if (!vmf->vma->vm_file)
+ fb_err(info, "no mapping available\n");
- BUG_ON(!page->mapping);
- page->index = vmf->pgoff; /* for folio_mkclean() */
+ BUG_ON(!info->fbdefio->mapping);
vmf->page = page;
return 0;
@@ -194,9 +183,9 @@ static vm_fault_t fb_deferred_io_track_page(struct fb_info *info, unsigned long
/*
* We want the page to remain locked from ->page_mkwrite until
- * the PTE is marked dirty to avoid folio_mkclean() being called
- * before the PTE is updated, which would leave the page ignored
- * by defio.
+ * the PTE is marked dirty to avoid mapping_wrprotect_range()
+ * being called before the PTE is updated, which would leave
+ * the page ignored by defio.
* Do this by locking the page here and informing the caller
* about it with VM_FAULT_LOCKED.
*/
@@ -274,15 +263,17 @@ static void fb_deferred_io_work(struct work_struct *work)
struct fb_deferred_io_pageref *pageref, *next;
struct fb_deferred_io *fbdefio = info->fbdefio;
- /* here we mkclean the pages, then do all deferred IO */
+ /* here we wrprotect the page's mappings, then do all deferred IO. */
mutex_lock(&fbdefio->lock);
+#ifdef CONFIG_MMU
list_for_each_entry(pageref, &fbdefio->pagereflist, list) {
- struct folio *folio = page_folio(pageref->page);
+ struct page *page = pageref->page;
+ pgoff_t pgoff = pageref->offset >> PAGE_SHIFT;
- folio_lock(folio);
- folio_mkclean(folio);
- folio_unlock(folio);
+ mapping_wrprotect_range(fbdefio->mapping, pgoff,
+ page_to_pfn(page), 1);
}
+#endif
/* driver's callback with pagereflist */
fbdefio->deferred_io(info, &fbdefio->pagereflist);
@@ -337,6 +328,7 @@ void fb_deferred_io_open(struct fb_info *info,
{
struct fb_deferred_io *fbdefio = info->fbdefio;
+ fbdefio->mapping = file->f_mapping;
file->f_mapping->a_ops = &fb_deferred_io_aops;
fbdefio->open_count++;
}
@@ -344,13 +336,7 @@ EXPORT_SYMBOL_GPL(fb_deferred_io_open);
static void fb_deferred_io_lastclose(struct fb_info *info)
{
- unsigned long i;
-
flush_delayed_work(&info->deferred_work);
-
- /* clear out the mapping that we setup */
- for (i = 0; i < info->npagerefs; ++i)
- fb_deferred_io_pageref_clear(&info->pagerefs[i]);
}
void fb_deferred_io_release(struct fb_info *info)
@@ -370,5 +356,6 @@ void fb_deferred_io_cleanup(struct fb_info *info)
kvfree(info->pagerefs);
mutex_destroy(&fbdefio->lock);
+ fbdefio->mapping = NULL;
}
EXPORT_SYMBOL_GPL(fb_deferred_io_cleanup);
diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c
index ba37665188b5..150753c3b578 100644
--- a/drivers/virtio/virtio.c
+++ b/drivers/virtio/virtio.c
@@ -395,6 +395,34 @@ static const struct cpumask *virtio_irq_get_affinity(struct device *_d,
return dev->config->get_vq_affinity(dev, irq_vec);
}
+static void virtio_dev_shutdown(struct device *_d)
+{
+ struct virtio_device *dev = dev_to_virtio(_d);
+ struct virtio_driver *drv = drv_to_virtio(dev->dev.driver);
+
+ /*
+ * Stop accesses to or from the device.
+ * We only need to do it if there's a driver - no accesses otherwise.
+ */
+ if (!drv)
+ return;
+
+ /*
+ * Some devices get wedged if you kick them after they are
+ * reset. Mark all vqs as broken to make sure we don't.
+ */
+ virtio_break_device(dev);
+ /*
+ * Guarantee that any callback will see vq->broken as true.
+ */
+ virtio_synchronize_cbs(dev);
+ /*
+ * As IOMMUs are reset on shutdown, this will block device access to memory.
+ * Some devices get wedged if this happens, so reset to make sure it does not.
+ */
+ dev->config->reset(dev);
+}
+
static const struct bus_type virtio_bus = {
.name = "virtio",
.match = virtio_dev_match,
@@ -403,6 +431,7 @@ static const struct bus_type virtio_bus = {
.probe = virtio_dev_probe,
.remove = virtio_dev_remove,
.irq_get_affinity = virtio_irq_get_affinity,
+ .shutdown = virtio_dev_shutdown,
};
int __register_virtio_driver(struct virtio_driver *driver, struct module *owner)
diff --git a/drivers/w1/masters/w1-uart.c b/drivers/w1/masters/w1-uart.c
index a31782e56ba7..c87eea347806 100644
--- a/drivers/w1/masters/w1-uart.c
+++ b/drivers/w1/masters/w1-uart.c
@@ -372,11 +372,11 @@ static int w1_uart_probe(struct serdev_device *serdev)
init_completion(&w1dev->rx_byte_received);
mutex_init(&w1dev->rx_mutex);
+ serdev_device_set_drvdata(serdev, w1dev);
+ serdev_device_set_client_ops(serdev, &w1_uart_serdev_ops);
ret = w1_uart_serdev_open(w1dev);
if (ret < 0)
return ret;
- serdev_device_set_drvdata(serdev, w1dev);
- serdev_device_set_client_ops(serdev, &w1_uart_serdev_ops);
return w1_add_master_device(&w1dev->bus);
}
diff --git a/drivers/w1/slaves/w1_therm.c b/drivers/w1/slaves/w1_therm.c
index c85e80c7e130..9ccedb3264fb 100644
--- a/drivers/w1/slaves/w1_therm.c
+++ b/drivers/w1/slaves/w1_therm.c
@@ -444,18 +444,8 @@ static int w1_read(struct device *dev, enum hwmon_sensor_types type,
}
}
-static const u32 w1_temp_config[] = {
- HWMON_T_INPUT,
- 0
-};
-
-static const struct hwmon_channel_info w1_temp = {
- .type = hwmon_temp,
- .config = w1_temp_config,
-};
-
static const struct hwmon_channel_info * const w1_info[] = {
- &w1_temp,
+ HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT),
NULL
};
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index f81705f8539a..0d8d37f712e8 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -279,6 +279,18 @@ config LENOVO_SE10_WDT
This driver can also be built as a module. If so, the module
will be called lenovo-se10-wdt.
+config LENOVO_SE30_WDT
+ tristate "Lenovo SE30 Watchdog"
+ depends on (X86 && DMI) || COMPILE_TEST
+ depends on HAS_IOPORT
+ select WATCHDOG_CORE
+ help
+ If you say yes here you get support for the watchdog
+ functionality for the Lenovo SE30 platform.
+
+ This driver can also be built as a module. If so, the module
+ will be called lenovo-se30-wdt.
+
config MENF21BMC_WATCHDOG
tristate "MEN 14F021P00 BMC Watchdog"
depends on MFD_MENF21BMC || COMPILE_TEST
@@ -963,13 +975,14 @@ config RENESAS_RZG2LWDT
Renesas RZ/G2L SoCs. These watchdogs can be used to reset a system.
config RENESAS_RZV2HWDT
- tristate "Renesas RZ/V2H(P) WDT Watchdog"
- depends on ARCH_R9A09G057 || COMPILE_TEST
+ tristate "Renesas RZ/{G3E,V2H(P)} WDT Watchdog"
+ depends on ARCH_RENESAS || COMPILE_TEST
depends on PM || COMPILE_TEST
select WATCHDOG_CORE
help
This driver adds watchdog support for the integrated watchdogs in the
- Renesas RZ/V2H(P) SoCs. These watchdogs can be used to reset a system.
+ Renesas RZ/{G3E,V2H(P)} SoCs. These watchdogs can be used to reset a
+ system.
config ASPEED_WATCHDOG
tristate "Aspeed BMC watchdog support"
@@ -1730,7 +1743,8 @@ config NI903X_WDT
config NIC7018_WDT
tristate "NIC7018 Watchdog"
- depends on X86 && ACPI
+ depends on HAS_IOPORT
+ depends on ACPI || COMPILE_TEST
select WATCHDOG_CORE
help
Support for National Instruments NIC7018 Watchdog.
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index 8411626fa162..c9482904bf87 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -124,6 +124,7 @@ obj-$(CONFIG_I6300ESB_WDT) += i6300esb.o
obj-$(CONFIG_IE6XX_WDT) += ie6xx_wdt.o
obj-$(CONFIG_ITCO_WDT) += iTCO_wdt.o
obj-$(CONFIG_LENOVO_SE10_WDT) += lenovo_se10_wdt.o
+obj-$(CONFIG_LENOVO_SE30_WDT) += lenovo_se30_wdt.o
ifeq ($(CONFIG_ITCO_VENDOR_SUPPORT),y)
obj-$(CONFIG_ITCO_WDT) += iTCO_vendor_support.o
endif
diff --git a/drivers/watchdog/aspeed_wdt.c b/drivers/watchdog/aspeed_wdt.c
index b4773a6aaf8c..837e15701c0e 100644
--- a/drivers/watchdog/aspeed_wdt.c
+++ b/drivers/watchdog/aspeed_wdt.c
@@ -11,21 +11,30 @@
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/kstrtox.h>
+#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/platform_device.h>
+#include <linux/regmap.h>
#include <linux/watchdog.h>
static bool nowayout = WATCHDOG_NOWAYOUT;
module_param(nowayout, bool, 0);
MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
__MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
+struct aspeed_wdt_scu {
+ const char *compatible;
+ u32 reset_status_reg;
+ u32 wdt_reset_mask;
+ u32 wdt_reset_mask_shift;
+};
struct aspeed_wdt_config {
u32 ext_pulse_width_mask;
u32 irq_shift;
u32 irq_mask;
+ struct aspeed_wdt_scu scu;
};
struct aspeed_wdt {
@@ -39,18 +48,36 @@ static const struct aspeed_wdt_config ast2400_config = {
.ext_pulse_width_mask = 0xff,
.irq_shift = 0,
.irq_mask = 0,
+ .scu = {
+ .compatible = "aspeed,ast2400-scu",
+ .reset_status_reg = 0x3c,
+ .wdt_reset_mask = 0x1,
+ .wdt_reset_mask_shift = 1,
+ },
};
static const struct aspeed_wdt_config ast2500_config = {
.ext_pulse_width_mask = 0xfffff,
.irq_shift = 12,
.irq_mask = GENMASK(31, 12),
+ .scu = {
+ .compatible = "aspeed,ast2500-scu",
+ .reset_status_reg = 0x3c,
+ .wdt_reset_mask = 0x1,
+ .wdt_reset_mask_shift = 2,
+ },
};
static const struct aspeed_wdt_config ast2600_config = {
.ext_pulse_width_mask = 0xfffff,
.irq_shift = 0,
.irq_mask = GENMASK(31, 10),
+ .scu = {
+ .compatible = "aspeed,ast2600-scu",
+ .reset_status_reg = 0x74,
+ .wdt_reset_mask = 0xf,
+ .wdt_reset_mask_shift = 16,
+ },
};
static const struct of_device_id aspeed_wdt_of_table[] = {
@@ -213,6 +240,56 @@ static int aspeed_wdt_restart(struct watchdog_device *wdd,
return 0;
}
+static void aspeed_wdt_update_bootstatus(struct platform_device *pdev,
+ struct aspeed_wdt *wdt)
+{
+ const struct resource *res;
+ struct aspeed_wdt_scu scu = wdt->cfg->scu;
+ struct regmap *scu_base;
+ u32 reset_mask_width;
+ u32 reset_mask_shift;
+ u32 idx = 0;
+ u32 status;
+ int ret;
+
+ if (!of_device_is_compatible(pdev->dev.of_node, "aspeed,ast2400-wdt")) {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ idx = ((intptr_t)wdt->base & 0x00000fff) / (uintptr_t)resource_size(res);
+ }
+
+ scu_base = syscon_regmap_lookup_by_compatible(scu.compatible);
+ if (IS_ERR(scu_base)) {
+ wdt->wdd.bootstatus = WDIOS_UNKNOWN;
+ return;
+ }
+
+ ret = regmap_read(scu_base, scu.reset_status_reg, &status);
+ if (ret) {
+ wdt->wdd.bootstatus = WDIOS_UNKNOWN;
+ return;
+ }
+
+ reset_mask_width = hweight32(scu.wdt_reset_mask);
+ reset_mask_shift = scu.wdt_reset_mask_shift +
+ reset_mask_width * idx;
+
+ if (status & (scu.wdt_reset_mask << reset_mask_shift))
+ wdt->wdd.bootstatus = WDIOF_CARDRESET;
+
+ /* clear wdt reset event flag */
+ if (of_device_is_compatible(pdev->dev.of_node, "aspeed,ast2400-wdt") ||
+ of_device_is_compatible(pdev->dev.of_node, "aspeed,ast2500-wdt")) {
+ ret = regmap_read(scu_base, scu.reset_status_reg, &status);
+ if (!ret) {
+ status &= ~(scu.wdt_reset_mask << reset_mask_shift);
+ regmap_write(scu_base, scu.reset_status_reg, status);
+ }
+ } else {
+ regmap_write(scu_base, scu.reset_status_reg,
+ scu.wdt_reset_mask << reset_mask_shift);
+ }
+}
+
/* access_cs0 shows if cs0 is accessible, hence the reverted bit */
static ssize_t access_cs0_show(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -458,10 +535,10 @@ static int aspeed_wdt_probe(struct platform_device *pdev)
writel(duration - 1, wdt->base + WDT_RESET_WIDTH);
}
+ aspeed_wdt_update_bootstatus(pdev, wdt);
+
status = readl(wdt->base + WDT_TIMEOUT_STATUS);
if (status & WDT_TIMEOUT_STATUS_BOOT_SECONDARY) {
- wdt->wdd.bootstatus = WDIOF_CARDRESET;
-
if (of_device_is_compatible(np, "aspeed,ast2400-wdt") ||
of_device_is_compatible(np, "aspeed,ast2500-wdt"))
wdt->wdd.groups = bswitch_groups;
diff --git a/drivers/watchdog/cros_ec_wdt.c b/drivers/watchdog/cros_ec_wdt.c
index ba045e29f9a5..716c23f4388c 100644
--- a/drivers/watchdog/cros_ec_wdt.c
+++ b/drivers/watchdog/cros_ec_wdt.c
@@ -58,7 +58,7 @@ static int cros_ec_wdt_ping(struct watchdog_device *wdd)
arg.req.command = EC_HANG_DETECT_CMD_RELOAD;
ret = cros_ec_wdt_send_cmd(cros_ec, &arg);
if (ret < 0)
- dev_dbg(wdd->parent, "Failed to ping watchdog (%d)", ret);
+ dev_dbg(wdd->parent, "Failed to ping watchdog (%d)\n", ret);
return ret;
}
@@ -74,7 +74,7 @@ static int cros_ec_wdt_start(struct watchdog_device *wdd)
arg.req.reboot_timeout_sec = wdd->timeout;
ret = cros_ec_wdt_send_cmd(cros_ec, &arg);
if (ret < 0)
- dev_dbg(wdd->parent, "Failed to start watchdog (%d)", ret);
+ dev_dbg(wdd->parent, "Failed to start watchdog (%d)\n", ret);
return ret;
}
@@ -88,7 +88,7 @@ static int cros_ec_wdt_stop(struct watchdog_device *wdd)
arg.req.command = EC_HANG_DETECT_CMD_CANCEL;
ret = cros_ec_wdt_send_cmd(cros_ec, &arg);
if (ret < 0)
- dev_dbg(wdd->parent, "Failed to stop watchdog (%d)", ret);
+ dev_dbg(wdd->parent, "Failed to stop watchdog (%d)\n", ret);
return ret;
}
@@ -136,7 +136,7 @@ static int cros_ec_wdt_probe(struct platform_device *pdev)
arg.req.command = EC_HANG_DETECT_CMD_GET_STATUS;
ret = cros_ec_wdt_send_cmd(cros_ec, &arg);
if (ret < 0)
- return dev_err_probe(dev, ret, "Failed to get watchdog bootstatus");
+ return dev_err_probe(dev, ret, "Failed to get watchdog bootstatus\n");
wdd->parent = &pdev->dev;
wdd->info = &cros_ec_wdt_ident;
@@ -150,7 +150,7 @@ static int cros_ec_wdt_probe(struct platform_device *pdev)
arg.req.command = EC_HANG_DETECT_CMD_CLEAR_STATUS;
ret = cros_ec_wdt_send_cmd(cros_ec, &arg);
if (ret < 0)
- return dev_err_probe(dev, ret, "Failed to clear watchdog bootstatus");
+ return dev_err_probe(dev, ret, "Failed to clear watchdog bootstatus\n");
watchdog_stop_on_reboot(wdd);
watchdog_stop_on_unregister(wdd);
diff --git a/drivers/watchdog/lenovo_se30_wdt.c b/drivers/watchdog/lenovo_se30_wdt.c
new file mode 100644
index 000000000000..024b842499b3
--- /dev/null
+++ b/drivers/watchdog/lenovo_se30_wdt.c
@@ -0,0 +1,394 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * WDT driver for Lenovo SE30 device
+ */
+
+#define dev_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/io.h>
+#include <linux/dmi.h>
+#include <linux/delay.h>
+#include <linux/iommu.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/platform_device.h>
+#include <linux/watchdog.h>
+
+#define IOREGION_OFFSET 4 /* Use EC port 1 */
+#define IOREGION_LENGTH 4
+
+#define WATCHDOG_TIMEOUT 60
+
+#define MIN_TIMEOUT 1
+#define MAX_TIMEOUT 255
+#define MAX_WAIT 10
+
+static int timeout; /* in seconds */
+module_param(timeout, int, 0);
+MODULE_PARM_DESC(timeout,
+ "Watchdog timeout in seconds. 1 <= timeout <= 255, default="
+ __MODULE_STRING(WATCHDOG_TIMEOUT) ".");
+
+static bool nowayout = WATCHDOG_NOWAYOUT;
+module_param(nowayout, bool, 0);
+MODULE_PARM_DESC(nowayout,
+ "Watchdog cannot be stopped once started (default="
+ __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
+
+#define LNV_SE30_NAME "lenovo-se30-wdt"
+#define LNV_SE30_ID 0x0110
+#define CHIPID_MASK 0xFFF0
+
+#define CHIPID_REG 0x20
+#define SIO_REG 0x2e
+#define LDN_REG 0x07
+#define UNLOCK_KEY 0x87
+#define LOCK_KEY 0xAA
+#define LD_NUM_SHM 0x0F
+#define LD_BASE_ADDR 0xF8
+
+#define WDT_MODULE 0x10
+#define WDT_CFG_INDEX 0x15 /* WD configuration register */
+#define WDT_CNT_INDEX 0x16 /* WD timer count register */
+#define WDT_CFG_RESET 0x2
+
+/* Host Interface WIN2 offset definition */
+#define SHM_WIN_SIZE 0xFF
+#define SHM_WIN_MOD_OFFSET 0x01
+#define SHM_WIN_CMD_OFFSET 0x02
+#define SHM_WIN_SEL_OFFSET 0x03
+#define SHM_WIN_CTL_OFFSET 0x04
+#define VAL_SHM_WIN_CTRL_WR 0x40
+#define VAL_SHM_WIN_CTRL_RD 0x80
+#define SHM_WIN_ID_OFFSET 0x08
+#define SHM_WIN_DAT_OFFSET 0x10
+
+struct nct6692_reg {
+ unsigned char mod;
+ unsigned char cmd;
+ unsigned char sel;
+ unsigned int idx;
+};
+
+/* Watchdog is based on NCT6692 device */
+struct lenovo_se30_wdt {
+ unsigned char __iomem *shm_base_addr;
+ struct nct6692_reg wdt_cfg;
+ struct nct6692_reg wdt_cnt;
+ struct watchdog_device wdt;
+};
+
+static inline void superio_outb(int ioreg, int reg, int val)
+{
+ outb(reg, ioreg);
+ outb(val, ioreg + 1);
+}
+
+static inline int superio_inb(int ioreg, int reg)
+{
+ outb(reg, ioreg);
+ return inb(ioreg + 1);
+}
+
+static inline int superio_enter(int key, int addr, const char *name)
+{
+ if (!request_muxed_region(addr, 2, name)) {
+ pr_err("I/O address 0x%04x already in use\n", addr);
+ return -EBUSY;
+ }
+ outb(key, addr); /* Enter extended function mode */
+ outb(key, addr); /* Again according to manual */
+
+ return 0;
+}
+
+static inline void superio_exit(int key, int addr)
+{
+ outb(key, addr); /* Leave extended function mode */
+ release_region(addr, 2);
+}
+
+static int shm_get_ready(unsigned char __iomem *shm_base_addr,
+ const struct nct6692_reg *reg)
+{
+ unsigned char pre_id, new_id;
+ int loop = 0;
+
+ iowrite8(reg->mod, shm_base_addr + SHM_WIN_MOD_OFFSET);
+ iowrite8(reg->cmd, shm_base_addr + SHM_WIN_CMD_OFFSET);
+ iowrite8(reg->sel, shm_base_addr + SHM_WIN_SEL_OFFSET);
+
+ pre_id = ioread8(shm_base_addr + SHM_WIN_ID_OFFSET);
+ iowrite8(VAL_SHM_WIN_CTRL_RD, shm_base_addr + SHM_WIN_CTL_OFFSET);
+
+ /* Loop checking when interface is ready */
+ while (loop < MAX_WAIT) {
+ new_id = ioread8(shm_base_addr + SHM_WIN_ID_OFFSET);
+ if (new_id != pre_id)
+ return 0;
+ loop++;
+ usleep_range(10, 125);
+ }
+ return -ETIMEDOUT;
+}
+
+static int read_shm_win(unsigned char __iomem *shm_base_addr,
+ const struct nct6692_reg *reg,
+ unsigned char idx_offset,
+ unsigned char *data)
+{
+ int err = shm_get_ready(shm_base_addr, reg);
+
+ if (err)
+ return err;
+ *data = ioread8(shm_base_addr + SHM_WIN_DAT_OFFSET + reg->idx + idx_offset);
+ return 0;
+}
+
+static int write_shm_win(unsigned char __iomem *shm_base_addr,
+ const struct nct6692_reg *reg,
+ unsigned char idx_offset,
+ unsigned char val)
+{
+ int err = shm_get_ready(shm_base_addr, reg);
+
+ if (err)
+ return err;
+ iowrite8(val, shm_base_addr + SHM_WIN_DAT_OFFSET + reg->idx + idx_offset);
+ iowrite8(VAL_SHM_WIN_CTRL_WR, shm_base_addr + SHM_WIN_CTL_OFFSET);
+ err = shm_get_ready(shm_base_addr, reg);
+ return err;
+}
+
+static int lenovo_se30_wdt_enable(struct lenovo_se30_wdt *data, unsigned int timeout)
+{
+ if (timeout) {
+ int err = write_shm_win(data->shm_base_addr, &data->wdt_cfg, 0, WDT_CFG_RESET);
+
+ if (err)
+ return err;
+ }
+ return write_shm_win(data->shm_base_addr, &data->wdt_cnt, 0, timeout);
+}
+
+static int lenovo_se30_wdt_start(struct watchdog_device *wdog)
+{
+ struct lenovo_se30_wdt *data = watchdog_get_drvdata(wdog);
+
+ return lenovo_se30_wdt_enable(data, wdog->timeout);
+}
+
+static int lenovo_se30_wdt_stop(struct watchdog_device *wdog)
+{
+ struct lenovo_se30_wdt *data = watchdog_get_drvdata(wdog);
+
+ return lenovo_se30_wdt_enable(data, 0);
+}
+
+static unsigned int lenovo_se30_wdt_get_timeleft(struct watchdog_device *wdog)
+{
+ struct lenovo_se30_wdt *data = watchdog_get_drvdata(wdog);
+ unsigned char timeleft;
+ int err;
+
+ err = read_shm_win(data->shm_base_addr, &data->wdt_cnt, 0, &timeleft);
+ if (err)
+ return 0;
+ return timeleft;
+}
+
+static int lenovo_se30_wdt_ping(struct watchdog_device *wdt)
+{
+ struct lenovo_se30_wdt *data = watchdog_get_drvdata(wdt);
+ int err = 0;
+
+ /*
+ * Device does not support refreshing WDT_TIMER_REG register when
+ * the watchdog is active. Need to disable, feed and enable again
+ */
+ err = lenovo_se30_wdt_enable(data, 0);
+ if (err)
+ return err;
+
+ err = write_shm_win(data->shm_base_addr, &data->wdt_cnt, 0, wdt->timeout);
+ if (!err)
+ err = lenovo_se30_wdt_enable(data, wdt->timeout);
+
+ return err;
+}
+
+static const struct watchdog_info lenovo_se30_wdt_info = {
+ .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING |
+ WDIOF_MAGICCLOSE,
+ .identity = "Lenovo SE30 watchdog",
+};
+
+static const struct watchdog_ops lenovo_se30_wdt_ops = {
+ .owner = THIS_MODULE,
+ .start = lenovo_se30_wdt_start,
+ .stop = lenovo_se30_wdt_stop,
+ .ping = lenovo_se30_wdt_ping,
+ .get_timeleft = lenovo_se30_wdt_get_timeleft,
+};
+
+static int lenovo_se30_wdt_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct lenovo_se30_wdt *priv;
+ unsigned long base_phys;
+ unsigned short val;
+ int err;
+
+ err = superio_enter(UNLOCK_KEY, SIO_REG, LNV_SE30_NAME);
+ if (err)
+ return err;
+
+ val = superio_inb(SIO_REG, CHIPID_REG) << 8;
+ val |= superio_inb(SIO_REG, CHIPID_REG + 1);
+
+ if ((val & CHIPID_MASK) != LNV_SE30_ID) {
+ superio_exit(LOCK_KEY, SIO_REG);
+ return -ENODEV;
+ }
+
+ superio_outb(SIO_REG, LDN_REG, LD_NUM_SHM);
+ base_phys = (superio_inb(SIO_REG, LD_BASE_ADDR) |
+ (superio_inb(SIO_REG, LD_BASE_ADDR + 1) << 8) |
+ (superio_inb(SIO_REG, LD_BASE_ADDR + 2) << 16) |
+ (superio_inb(SIO_REG, LD_BASE_ADDR + 3) << 24)) &
+ 0xFFFFFFFF;
+
+ superio_exit(LOCK_KEY, SIO_REG);
+ if (base_phys == 0xFFFFFFFF || base_phys == 0)
+ return -ENODEV;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ if (!devm_request_mem_region(dev, base_phys, SHM_WIN_SIZE, LNV_SE30_NAME))
+ return -EBUSY;
+
+ priv->shm_base_addr = devm_ioremap(dev, base_phys, SHM_WIN_SIZE);
+
+ priv->wdt_cfg.mod = WDT_MODULE;
+ priv->wdt_cfg.idx = WDT_CFG_INDEX;
+ priv->wdt_cnt.mod = WDT_MODULE;
+ priv->wdt_cnt.idx = WDT_CNT_INDEX;
+
+ priv->wdt.ops = &lenovo_se30_wdt_ops;
+ priv->wdt.info = &lenovo_se30_wdt_info;
+ priv->wdt.timeout = WATCHDOG_TIMEOUT; /* Set default timeout */
+ priv->wdt.min_timeout = MIN_TIMEOUT;
+ priv->wdt.max_timeout = MAX_TIMEOUT;
+ priv->wdt.parent = dev;
+
+ watchdog_init_timeout(&priv->wdt, timeout, dev);
+ watchdog_set_drvdata(&priv->wdt, priv);
+ watchdog_set_nowayout(&priv->wdt, nowayout);
+ watchdog_stop_on_reboot(&priv->wdt);
+ watchdog_stop_on_unregister(&priv->wdt);
+
+ return devm_watchdog_register_device(dev, &priv->wdt);
+}
+
+static struct platform_device *pdev;
+
+static struct platform_driver lenovo_se30_wdt_driver = {
+ .driver = {
+ .name = LNV_SE30_NAME,
+ },
+ .probe = lenovo_se30_wdt_probe,
+};
+
+static int lenovo_se30_create_platform_device(const struct dmi_system_id *id)
+{
+ int err;
+
+ pdev = platform_device_alloc(LNV_SE30_NAME, -1);
+ if (!pdev)
+ return -ENOMEM;
+
+ err = platform_device_add(pdev);
+ if (err)
+ platform_device_put(pdev);
+
+ return err;
+}
+
+static const struct dmi_system_id lenovo_se30_wdt_dmi_table[] __initconst = {
+ {
+ .ident = "LENOVO-SE30",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "11NA"),
+ },
+ .callback = lenovo_se30_create_platform_device,
+ },
+ {
+ .ident = "LENOVO-SE30",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "11NB"),
+ },
+ .callback = lenovo_se30_create_platform_device,
+ },
+ {
+ .ident = "LENOVO-SE30",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "11NC"),
+ },
+ .callback = lenovo_se30_create_platform_device,
+ },
+ {
+ .ident = "LENOVO-SE30",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "11NH"),
+ },
+ .callback = lenovo_se30_create_platform_device,
+ },
+ {
+ .ident = "LENOVO-SE30",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "11NJ"),
+ },
+ .callback = lenovo_se30_create_platform_device,
+ },
+ {
+ .ident = "LENOVO-SE30",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "11NK"),
+ },
+ .callback = lenovo_se30_create_platform_device,
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(dmi, lenovo_se30_wdt_dmi_table);
+
+static int __init lenovo_se30_wdt_init(void)
+{
+ if (!dmi_check_system(lenovo_se30_wdt_dmi_table))
+ return -ENODEV;
+
+ return platform_driver_register(&lenovo_se30_wdt_driver);
+}
+
+static void __exit lenovo_se30_wdt_exit(void)
+{
+ if (pdev)
+ platform_device_unregister(pdev);
+ platform_driver_unregister(&lenovo_se30_wdt_driver);
+}
+
+module_init(lenovo_se30_wdt_init);
+module_exit(lenovo_se30_wdt_exit);
+
+MODULE_AUTHOR("Mark Pearson <mpearson-lenovo@squebb.ca>");
+MODULE_AUTHOR("David Ober <dober@lenovo.com>");
+MODULE_DESCRIPTION("Lenovo SE30 watchdog driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/watchdog/nic7018_wdt.c b/drivers/watchdog/nic7018_wdt.c
index 44982b37ba6f..44b5298f599a 100644
--- a/drivers/watchdog/nic7018_wdt.c
+++ b/drivers/watchdog/nic7018_wdt.c
@@ -3,12 +3,13 @@
* Copyright (C) 2016 National Instruments Corp.
*/
-#include <linux/acpi.h>
#include <linux/bitops.h>
#include <linux/device.h>
#include <linux/io.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/types.h>
#include <linux/watchdog.h>
#define LOCK 0xA5
@@ -229,8 +230,8 @@ static void nic7018_remove(struct platform_device *pdev)
}
static const struct acpi_device_id nic7018_device_ids[] = {
- {"NIC7018", 0},
- {"", 0},
+ { "NIC7018" },
+ { }
};
MODULE_DEVICE_TABLE(acpi, nic7018_device_ids);
@@ -239,7 +240,7 @@ static struct platform_driver watchdog_driver = {
.remove = nic7018_remove,
.driver = {
.name = KBUILD_MODNAME,
- .acpi_match_table = ACPI_PTR(nic7018_device_ids),
+ .acpi_match_table = nic7018_device_ids,
},
};
diff --git a/drivers/watchdog/npcm_wdt.c b/drivers/watchdog/npcm_wdt.c
index a5dd1c230137..e62ea054bc61 100644
--- a/drivers/watchdog/npcm_wdt.c
+++ b/drivers/watchdog/npcm_wdt.c
@@ -68,8 +68,7 @@ static int npcm_wdt_start(struct watchdog_device *wdd)
struct npcm_wdt *wdt = to_npcm_wdt(wdd);
u32 val;
- if (wdt->clk)
- clk_prepare_enable(wdt->clk);
+ clk_prepare_enable(wdt->clk);
if (wdd->timeout < 2)
val = 0x800;
@@ -105,8 +104,7 @@ static int npcm_wdt_stop(struct watchdog_device *wdd)
writel(0, wdt->reg);
- if (wdt->clk)
- clk_disable_unprepare(wdt->clk);
+ clk_disable_unprepare(wdt->clk);
return 0;
}
@@ -156,8 +154,7 @@ static int npcm_wdt_restart(struct watchdog_device *wdd,
struct npcm_wdt *wdt = to_npcm_wdt(wdd);
/* For reset, we start the WDT clock and leave it running. */
- if (wdt->clk)
- clk_prepare_enable(wdt->clk);
+ clk_prepare_enable(wdt->clk);
writel(NPCM_WTR | NPCM_WTRE | NPCM_WTE, wdt->reg);
udelay(1000);
diff --git a/drivers/watchdog/s3c2410_wdt.c b/drivers/watchdog/s3c2410_wdt.c
index 30450e99e5e9..bdd81d8074b2 100644
--- a/drivers/watchdog/s3c2410_wdt.c
+++ b/drivers/watchdog/s3c2410_wdt.c
@@ -72,6 +72,8 @@
#define EXYNOS850_CLUSTER1_WDTRESET_BIT 23
#define EXYNOSAUTOV9_CLUSTER0_WDTRESET_BIT 25
#define EXYNOSAUTOV9_CLUSTER1_WDTRESET_BIT 24
+#define EXYNOSAUTOV920_CLUSTER0_WDTRESET_BIT 0
+#define EXYNOSAUTOV920_CLUSTER1_WDTRESET_BIT 1
#define GS_CLUSTER0_NONCPU_OUT 0x1220
#define GS_CLUSTER1_NONCPU_OUT 0x1420
@@ -312,9 +314,9 @@ static const struct s3c2410_wdt_variant drv_data_exynosautov920_cl0 = {
.mask_bit = 2,
.mask_reset_inv = true,
.rst_stat_reg = EXYNOS5_RST_STAT_REG_OFFSET,
- .rst_stat_bit = EXYNOSAUTOV9_CLUSTER0_WDTRESET_BIT,
+ .rst_stat_bit = EXYNOSAUTOV920_CLUSTER0_WDTRESET_BIT,
.cnt_en_reg = EXYNOSAUTOV920_CLUSTER0_NONCPU_OUT,
- .cnt_en_bit = 7,
+ .cnt_en_bit = 8,
.quirks = QUIRK_HAS_WTCLRINT_REG | QUIRK_HAS_PMU_MASK_RESET |
QUIRK_HAS_PMU_RST_STAT | QUIRK_HAS_PMU_CNT_EN |
QUIRK_HAS_DBGACK_BIT,
@@ -325,9 +327,9 @@ static const struct s3c2410_wdt_variant drv_data_exynosautov920_cl1 = {
.mask_bit = 2,
.mask_reset_inv = true,
.rst_stat_reg = EXYNOS5_RST_STAT_REG_OFFSET,
- .rst_stat_bit = EXYNOSAUTOV9_CLUSTER1_WDTRESET_BIT,
+ .rst_stat_bit = EXYNOSAUTOV920_CLUSTER1_WDTRESET_BIT,
.cnt_en_reg = EXYNOSAUTOV920_CLUSTER1_NONCPU_OUT,
- .cnt_en_bit = 7,
+ .cnt_en_bit = 8,
.quirks = QUIRK_HAS_WTCLRINT_REG | QUIRK_HAS_PMU_MASK_RESET |
QUIRK_HAS_PMU_RST_STAT | QUIRK_HAS_PMU_CNT_EN |
QUIRK_HAS_DBGACK_BIT,
diff --git a/drivers/watchdog/sunxi_wdt.c b/drivers/watchdog/sunxi_wdt.c
index b85354a99582..b6c761acc3de 100644
--- a/drivers/watchdog/sunxi_wdt.c
+++ b/drivers/watchdog/sunxi_wdt.c
@@ -236,10 +236,21 @@ static const struct sunxi_wdt_reg sun20i_wdt_reg = {
.wdt_key_val = 0x16aa0000,
};
+static const struct sunxi_wdt_reg sun55i_wdt_reg = {
+ .wdt_ctrl = 0x0c,
+ .wdt_cfg = 0x10,
+ .wdt_mode = 0x14,
+ .wdt_timeout_shift = 4,
+ .wdt_reset_mask = 0x03,
+ .wdt_reset_val = 0x01,
+ .wdt_key_val = 0x16aa0000,
+};
+
static const struct of_device_id sunxi_wdt_dt_ids[] = {
{ .compatible = "allwinner,sun4i-a10-wdt", .data = &sun4i_wdt_reg },
{ .compatible = "allwinner,sun6i-a31-wdt", .data = &sun6i_wdt_reg },
{ .compatible = "allwinner,sun20i-d1-wdt", .data = &sun20i_wdt_reg },
+ { .compatible = "allwinner,sun55i-a523-wdt", .data = &sun55i_wdt_reg },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, sunxi_wdt_dt_ids);
diff --git a/drivers/watchdog/watchdog_core.c b/drivers/watchdog/watchdog_core.c
index d46d8c8c01f2..6152dba4b52c 100644
--- a/drivers/watchdog/watchdog_core.c
+++ b/drivers/watchdog/watchdog_core.c
@@ -33,7 +33,8 @@
#include <linux/init.h> /* For __init/__exit/... */
#include <linux/idr.h> /* For ida_* macros */
#include <linux/err.h> /* For IS_ERR macros */
-#include <linux/of.h> /* For of_get_timeout_sec */
+#include <linux/of.h> /* For of_alias_get_id */
+#include <linux/property.h> /* For device_property_read_u32 */
#include <linux/suspend.h>
#include "watchdog_core.h" /* For watchdog_dev_register/... */
@@ -137,8 +138,7 @@ int watchdog_init_timeout(struct watchdog_device *wdd,
}
/* try to get the timeout_sec property */
- if (dev && dev->of_node &&
- of_property_read_u32(dev->of_node, "timeout-sec", &t) == 0) {
+ if (dev && device_property_read_u32(dev, "timeout-sec", &t) == 0) {
if (t && !watchdog_timeout_invalid(wdd, t)) {
wdd->timeout = t;
return 0;
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index 163f7f1d70f1..65d4e7fa1eb8 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -157,6 +157,8 @@ static void balloon_append(struct page *page)
list_add(&page->lru, &ballooned_pages);
balloon_stats.balloon_low++;
}
+ inc_node_page_state(page, NR_BALLOON_PAGES);
+
wake_up(&balloon_wq);
}
@@ -179,6 +181,8 @@ static struct page *balloon_retrieve(bool require_lowmem)
balloon_stats.balloon_low--;
__ClearPageOffline(page);
+ dec_node_page_state(page, NR_BALLOON_PAGES);
+
return page;
}
diff --git a/fs/9p/vfs_inode_dotl.c b/fs/9p/vfs_inode_dotl.c
index cc2007be2173..5b5fda617b80 100644
--- a/fs/9p/vfs_inode_dotl.c
+++ b/fs/9p/vfs_inode_dotl.c
@@ -407,8 +407,8 @@ static struct dentry *v9fs_vfs_mkdir_dotl(struct mnt_idmap *idmap,
err);
goto error;
}
- v9fs_fid_add(dentry, &fid);
v9fs_set_create_acl(inode, fid, dacl, pacl);
+ v9fs_fid_add(dentry, &fid);
d_instantiate(dentry, inode);
err = 0;
inc_nlink(dir);
diff --git a/fs/Kconfig b/fs/Kconfig
index afe21866d6b4..c718b2e2de0e 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -286,6 +286,7 @@ config HUGETLB_PAGE_OPTIMIZE_VMEMMAP
def_bool HUGETLB_PAGE
depends on ARCH_WANT_OPTIMIZE_HUGETLB_VMEMMAP
depends on SPARSEMEM_VMEMMAP
+ select SPARSEMEM_VMEMMAP_PREINIT if ARCH_WANT_HUGETLB_VMEMMAP_PREINIT
config HUGETLB_PMD_PAGE_TABLE_SHARING
def_bool HUGETLB_PAGE
diff --git a/fs/bcachefs/Kconfig b/fs/bcachefs/Kconfig
index c9798750202d..bf1c94e51dd0 100644
--- a/fs/bcachefs/Kconfig
+++ b/fs/bcachefs/Kconfig
@@ -26,6 +26,7 @@ config BCACHEFS_FS
select SRCU
select SYMBOLIC_ERRNAME
select MIN_HEAP
+ select XARRAY_MULTI
help
The bcachefs filesystem - a modern, copy on write filesystem, with
support for multiple devices, compression, checksumming, etc.
diff --git a/fs/bcachefs/acl.c b/fs/bcachefs/acl.c
index 99487727ae64..d03adc36100e 100644
--- a/fs/bcachefs/acl.c
+++ b/fs/bcachefs/acl.c
@@ -273,7 +273,7 @@ struct posix_acl *bch2_get_acl(struct inode *vinode, int type, bool rcu)
struct bch_fs *c = inode->v.i_sb->s_fs_info;
struct bch_hash_info hash = bch2_hash_info_init(c, &inode->ei_inode);
struct xattr_search_key search = X_SEARCH(acl_to_xattr_type(type), "", 0);
- struct btree_iter iter = { NULL };
+ struct btree_iter iter = {};
struct posix_acl *acl = NULL;
if (rcu)
@@ -344,7 +344,7 @@ int bch2_set_acl(struct mnt_idmap *idmap,
{
struct bch_inode_info *inode = to_bch_ei(dentry->d_inode);
struct bch_fs *c = inode->v.i_sb->s_fs_info;
- struct btree_iter inode_iter = { NULL };
+ struct btree_iter inode_iter = {};
struct bch_inode_unpacked inode_u;
struct posix_acl *acl;
umode_t mode;
diff --git a/fs/bcachefs/alloc_background.c b/fs/bcachefs/alloc_background.c
index c12ca7538e4f..94ea9e49aec4 100644
--- a/fs/bcachefs/alloc_background.c
+++ b/fs/bcachefs/alloc_background.c
@@ -610,7 +610,7 @@ int bch2_alloc_read(struct bch_fs *c)
* bch2_check_alloc_key() which runs later:
*/
if (!ca) {
- bch2_btree_iter_set_pos(&iter, POS(k.k->p.inode + 1, 0));
+ bch2_btree_iter_set_pos(trans, &iter, POS(k.k->p.inode + 1, 0));
continue;
}
@@ -631,17 +631,17 @@ int bch2_alloc_read(struct bch_fs *c)
* bch2_check_alloc_key() which runs later:
*/
if (!ca) {
- bch2_btree_iter_set_pos(&iter, POS(k.k->p.inode + 1, 0));
+ bch2_btree_iter_set_pos(trans, &iter, POS(k.k->p.inode + 1, 0));
continue;
}
if (k.k->p.offset < ca->mi.first_bucket) {
- bch2_btree_iter_set_pos(&iter, POS(k.k->p.inode, ca->mi.first_bucket));
+ bch2_btree_iter_set_pos(trans, &iter, POS(k.k->p.inode, ca->mi.first_bucket));
continue;
}
if (k.k->p.offset >= ca->mi.nbuckets) {
- bch2_btree_iter_set_pos(&iter, POS(k.k->p.inode + 1, 0));
+ bch2_btree_iter_set_pos(trans, &iter, POS(k.k->p.inode + 1, 0));
continue;
}
@@ -1039,9 +1039,10 @@ invalid_bucket:
* This synthesizes deleted extents for holes, similar to BTREE_ITER_slots for
* extents style btrees, but works on non-extents btrees:
*/
-static struct bkey_s_c bch2_get_key_or_hole(struct btree_iter *iter, struct bpos end, struct bkey *hole)
+static struct bkey_s_c bch2_get_key_or_hole(struct btree_trans *trans, struct btree_iter *iter,
+ struct bpos end, struct bkey *hole)
{
- struct bkey_s_c k = bch2_btree_iter_peek_slot(iter);
+ struct bkey_s_c k = bch2_btree_iter_peek_slot(trans, iter);
if (bkey_err(k))
return k;
@@ -1052,9 +1053,9 @@ static struct bkey_s_c bch2_get_key_or_hole(struct btree_iter *iter, struct bpos
struct btree_iter iter2;
struct bpos next;
- bch2_trans_copy_iter(&iter2, iter);
+ bch2_trans_copy_iter(trans, &iter2, iter);
- struct btree_path *path = btree_iter_path(iter->trans, iter);
+ struct btree_path *path = btree_iter_path(trans, iter);
if (!bpos_eq(path->l[0].b->key.k.p, SPOS_MAX))
end = bkey_min(end, bpos_nosnap_successor(path->l[0].b->key.k.p));
@@ -1064,9 +1065,9 @@ static struct bkey_s_c bch2_get_key_or_hole(struct btree_iter *iter, struct bpos
* btree node min/max is a closed interval, upto takes a half
* open interval:
*/
- k = bch2_btree_iter_peek_max(&iter2, end);
+ k = bch2_btree_iter_peek_max(trans, &iter2, end);
next = iter2.pos;
- bch2_trans_iter_exit(iter->trans, &iter2);
+ bch2_trans_iter_exit(trans, &iter2);
BUG_ON(next.offset >= iter->pos.offset + U32_MAX);
@@ -1107,13 +1108,14 @@ static bool next_bucket(struct bch_fs *c, struct bch_dev **ca, struct bpos *buck
return *ca != NULL;
}
-static struct bkey_s_c bch2_get_key_or_real_bucket_hole(struct btree_iter *iter,
- struct bch_dev **ca, struct bkey *hole)
+static struct bkey_s_c bch2_get_key_or_real_bucket_hole(struct btree_trans *trans,
+ struct btree_iter *iter,
+ struct bch_dev **ca, struct bkey *hole)
{
- struct bch_fs *c = iter->trans->c;
+ struct bch_fs *c = trans->c;
struct bkey_s_c k;
again:
- k = bch2_get_key_or_hole(iter, POS_MAX, hole);
+ k = bch2_get_key_or_hole(trans, iter, POS_MAX, hole);
if (bkey_err(k))
return k;
@@ -1126,7 +1128,7 @@ again:
if (!next_bucket(c, ca, &hole_start))
return bkey_s_c_null;
- bch2_btree_iter_set_pos(iter, hole_start);
+ bch2_btree_iter_set_pos(trans, iter, hole_start);
goto again;
}
@@ -1167,8 +1169,8 @@ int bch2_check_alloc_key(struct btree_trans *trans,
a = bch2_alloc_to_v4(alloc_k, &a_convert);
- bch2_btree_iter_set_pos(discard_iter, alloc_k.k->p);
- k = bch2_btree_iter_peek_slot(discard_iter);
+ bch2_btree_iter_set_pos(trans, discard_iter, alloc_k.k->p);
+ k = bch2_btree_iter_peek_slot(trans, discard_iter);
ret = bkey_err(k);
if (ret)
goto err;
@@ -1181,8 +1183,8 @@ int bch2_check_alloc_key(struct btree_trans *trans,
goto err;
}
- bch2_btree_iter_set_pos(freespace_iter, alloc_freespace_pos(alloc_k.k->p, *a));
- k = bch2_btree_iter_peek_slot(freespace_iter);
+ bch2_btree_iter_set_pos(trans, freespace_iter, alloc_freespace_pos(alloc_k.k->p, *a));
+ k = bch2_btree_iter_peek_slot(trans, freespace_iter);
ret = bkey_err(k);
if (ret)
goto err;
@@ -1195,8 +1197,8 @@ int bch2_check_alloc_key(struct btree_trans *trans,
goto err;
}
- bch2_btree_iter_set_pos(bucket_gens_iter, alloc_gens_pos(alloc_k.k->p, &gens_offset));
- k = bch2_btree_iter_peek_slot(bucket_gens_iter);
+ bch2_btree_iter_set_pos(trans, bucket_gens_iter, alloc_gens_pos(alloc_k.k->p, &gens_offset));
+ k = bch2_btree_iter_peek_slot(trans, bucket_gens_iter);
ret = bkey_err(k);
if (ret)
goto err;
@@ -1249,9 +1251,9 @@ int bch2_check_alloc_hole_freespace(struct btree_trans *trans,
if (!ca->mi.freespace_initialized)
return 0;
- bch2_btree_iter_set_pos(freespace_iter, start);
+ bch2_btree_iter_set_pos(trans, freespace_iter, start);
- k = bch2_btree_iter_peek_slot(freespace_iter);
+ k = bch2_btree_iter_peek_slot(trans, freespace_iter);
ret = bkey_err(k);
if (ret)
goto err;
@@ -1300,9 +1302,9 @@ int bch2_check_alloc_hole_bucket_gens(struct btree_trans *trans,
unsigned i, gens_offset, gens_end_offset;
int ret;
- bch2_btree_iter_set_pos(bucket_gens_iter, alloc_gens_pos(start, &gens_offset));
+ bch2_btree_iter_set_pos(trans, bucket_gens_iter, alloc_gens_pos(start, &gens_offset));
- k = bch2_btree_iter_peek_slot(bucket_gens_iter);
+ k = bch2_btree_iter_peek_slot(trans, bucket_gens_iter);
ret = bkey_err(k);
if (ret)
goto err;
@@ -1435,7 +1437,7 @@ int bch2_check_discard_freespace_key(struct btree_trans *trans, struct btree_ite
*gen = a->gen;
out:
fsck_err:
- bch2_set_btree_iter_dontneed(&alloc_iter);
+ bch2_set_btree_iter_dontneed(trans, &alloc_iter);
bch2_trans_iter_exit(trans, &alloc_iter);
printbuf_exit(&buf);
return ret;
@@ -1572,7 +1574,7 @@ int bch2_check_alloc_info(struct bch_fs *c)
bch2_trans_begin(trans);
- k = bch2_get_key_or_real_bucket_hole(&iter, &ca, &hole);
+ k = bch2_get_key_or_real_bucket_hole(trans, &iter, &ca, &hole);
ret = bkey_err(k);
if (ret)
goto bkey_err;
@@ -1610,7 +1612,7 @@ int bch2_check_alloc_info(struct bch_fs *c)
if (ret)
goto bkey_err;
- bch2_btree_iter_set_pos(&iter, next);
+ bch2_btree_iter_set_pos(trans, &iter, next);
bkey_err:
if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
continue;
@@ -1638,7 +1640,7 @@ bkey_err:
BTREE_ITER_prefetch);
while (1) {
bch2_trans_begin(trans);
- k = bch2_btree_iter_peek(&iter);
+ k = bch2_btree_iter_peek(trans, &iter);
if (!k.k)
break;
@@ -1657,7 +1659,7 @@ bkey_err:
break;
}
- bch2_btree_iter_set_pos(&iter, bpos_nosnap_successor(iter.pos));
+ bch2_btree_iter_set_pos(trans, &iter, bpos_nosnap_successor(iter.pos));
}
bch2_trans_iter_exit(trans, &iter);
if (ret)
@@ -1685,7 +1687,7 @@ static int bch2_check_alloc_to_lru_ref(struct btree_trans *trans,
struct printbuf buf = PRINTBUF;
int ret;
- alloc_k = bch2_btree_iter_peek(alloc_iter);
+ alloc_k = bch2_btree_iter_peek(trans, alloc_iter);
if (!alloc_k.k)
return 0;
@@ -1826,7 +1828,7 @@ static int bch2_discard_one_bucket(struct btree_trans *trans,
{
struct bch_fs *c = trans->c;
struct bpos pos = need_discard_iter->pos;
- struct btree_iter iter = { NULL };
+ struct btree_iter iter = {};
struct bkey_s_c k;
struct bkey_i_alloc_v4 *a;
struct printbuf buf = PRINTBUF;
@@ -1950,7 +1952,7 @@ static void bch2_do_discards_work(struct work_struct *work)
trace_discard_buckets(c, s.seen, s.open, s.need_journal_commit, s.discarded,
bch2_err_str(ret));
- percpu_ref_put(&ca->io_ref);
+ percpu_ref_put(&ca->io_ref[WRITE]);
bch2_write_ref_put(c, BCH_WRITE_REF_discard);
}
@@ -1967,7 +1969,7 @@ void bch2_dev_do_discards(struct bch_dev *ca)
if (queue_work(c->write_ref_wq, &ca->discard_work))
return;
- percpu_ref_put(&ca->io_ref);
+ percpu_ref_put(&ca->io_ref[WRITE]);
put_write_ref:
bch2_write_ref_put(c, BCH_WRITE_REF_discard);
}
@@ -2045,7 +2047,7 @@ static void bch2_do_discards_fast_work(struct work_struct *work)
trace_discard_buckets_fast(c, s.seen, s.open, s.need_journal_commit, s.discarded, bch2_err_str(ret));
bch2_trans_put(trans);
- percpu_ref_put(&ca->io_ref);
+ percpu_ref_put(&ca->io_ref[WRITE]);
bch2_write_ref_put(c, BCH_WRITE_REF_discard_fast);
}
@@ -2065,7 +2067,7 @@ static void bch2_discard_one_bucket_fast(struct bch_dev *ca, u64 bucket)
if (queue_work(c->write_ref_wq, &ca->discard_fast_work))
return;
- percpu_ref_put(&ca->io_ref);
+ percpu_ref_put(&ca->io_ref[WRITE]);
put_ref:
bch2_write_ref_put(c, BCH_WRITE_REF_discard_fast);
}
@@ -2082,6 +2084,9 @@ static int invalidate_one_bp(struct btree_trans *trans,
if (ret)
return ret;
+ if (!extent_k.k)
+ return 0;
+
struct bkey_i *n =
bch2_bkey_make_mut(trans, &extent_iter, &extent_k,
BTREE_UPDATE_internal_snapshot_node);
@@ -2199,9 +2204,9 @@ static struct bkey_s_c next_lru_key(struct btree_trans *trans, struct btree_iter
{
struct bkey_s_c k;
again:
- k = bch2_btree_iter_peek_max(iter, lru_pos(ca->dev_idx, U64_MAX, LRU_TIME_MAX));
+ k = bch2_btree_iter_peek_max(trans, iter, lru_pos(ca->dev_idx, U64_MAX, LRU_TIME_MAX));
if (!k.k && !*wrapped) {
- bch2_btree_iter_set_pos(iter, lru_pos(ca->dev_idx, 0, 0));
+ bch2_btree_iter_set_pos(trans, iter, lru_pos(ca->dev_idx, 0, 0));
*wrapped = true;
goto again;
}
@@ -2251,12 +2256,12 @@ restart_err:
if (ret)
break;
- bch2_btree_iter_advance(&iter);
+ bch2_btree_iter_advance(trans, &iter);
}
bch2_trans_iter_exit(trans, &iter);
err:
bch2_trans_put(trans);
- percpu_ref_put(&ca->io_ref);
+ percpu_ref_put(&ca->io_ref[WRITE]);
bch2_bkey_buf_exit(&last_flushed, c);
bch2_write_ref_put(c, BCH_WRITE_REF_invalidate);
}
@@ -2274,7 +2279,7 @@ void bch2_dev_do_invalidates(struct bch_dev *ca)
if (queue_work(c->write_ref_wq, &ca->invalidate_work))
return;
- percpu_ref_put(&ca->io_ref);
+ percpu_ref_put(&ca->io_ref[WRITE]);
put_ref:
bch2_write_ref_put(c, BCH_WRITE_REF_invalidate);
}
@@ -2321,7 +2326,7 @@ int bch2_dev_freespace_init(struct bch_fs *c, struct bch_dev *ca,
break;
}
- k = bch2_get_key_or_hole(&iter, end, &hole);
+ k = bch2_get_key_or_hole(trans, &iter, end, &hole);
ret = bkey_err(k);
if (ret)
goto bkey_err;
@@ -2340,7 +2345,7 @@ int bch2_dev_freespace_init(struct bch_fs *c, struct bch_dev *ca,
if (ret)
goto bkey_err;
- bch2_btree_iter_advance(&iter);
+ bch2_btree_iter_advance(trans, &iter);
} else {
struct bkey_i *freespace;
@@ -2360,7 +2365,7 @@ int bch2_dev_freespace_init(struct bch_fs *c, struct bch_dev *ca,
if (ret)
goto bkey_err;
- bch2_btree_iter_set_pos(&iter, k.k->p);
+ bch2_btree_iter_set_pos(trans, &iter, k.k->p);
}
bkey_err:
if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
@@ -2506,7 +2511,7 @@ void bch2_recalc_capacity(struct bch_fs *c)
bch2_set_ra_pages(c, ra_pages);
- for_each_rw_member(c, ca) {
+ __for_each_online_member(c, ca, BIT(BCH_MEMBER_STATE_rw), READ) {
u64 dev_reserve = 0;
/*
diff --git a/fs/bcachefs/alloc_background.h b/fs/bcachefs/alloc_background.h
index c556ccaffe89..34b3d6ac4fbb 100644
--- a/fs/bcachefs/alloc_background.h
+++ b/fs/bcachefs/alloc_background.h
@@ -321,11 +321,11 @@ static inline u64 should_invalidate_buckets(struct bch_dev *ca,
{
u64 want_free = ca->mi.nbuckets >> 7;
u64 free = max_t(s64, 0,
- u.d[BCH_DATA_free].buckets
- + u.d[BCH_DATA_need_discard].buckets
+ u.buckets[BCH_DATA_free]
+ + u.buckets[BCH_DATA_need_discard]
- bch2_dev_buckets_reserved(ca, BCH_WATERMARK_stripe));
- return clamp_t(s64, want_free - free, 0, u.d[BCH_DATA_cached].buckets);
+ return clamp_t(s64, want_free - free, 0, u.buckets[BCH_DATA_cached]);
}
void bch2_dev_do_invalidates(struct bch_dev *);
diff --git a/fs/bcachefs/alloc_foreground.c b/fs/bcachefs/alloc_foreground.c
index da0d72928b5b..7c930ef77380 100644
--- a/fs/bcachefs/alloc_foreground.c
+++ b/fs/bcachefs/alloc_foreground.c
@@ -327,7 +327,7 @@ again:
bucket = sector_to_bucket(ca,
round_up(bucket_to_sector(ca, bucket) + 1,
1ULL << ca->mi.btree_bitmap_shift));
- bch2_btree_iter_set_pos(&iter, POS(ca->dev_idx, bucket));
+ bch2_btree_iter_set_pos(trans, &iter, POS(ca->dev_idx, bucket));
s->buckets_seen++;
s->skipped_mi_btree_bitmap++;
continue;
@@ -355,7 +355,7 @@ again:
watermark, s, cl)
: NULL;
next:
- bch2_set_btree_iter_dontneed(&citer);
+ bch2_set_btree_iter_dontneed(trans, &citer);
bch2_trans_iter_exit(trans, &citer);
if (ob)
break;
@@ -417,7 +417,7 @@ again:
1ULL << ca->mi.btree_bitmap_shift));
alloc_cursor = bucket|(iter.pos.offset & (~0ULL << 56));
- bch2_btree_iter_set_pos(&iter, POS(ca->dev_idx, alloc_cursor));
+ bch2_btree_iter_set_pos(trans, &iter, POS(ca->dev_idx, alloc_cursor));
s->skipped_mi_btree_bitmap++;
goto next;
}
@@ -426,7 +426,7 @@ again:
if (ob) {
if (!IS_ERR(ob))
*dev_alloc_cursor = iter.pos.offset;
- bch2_set_btree_iter_dontneed(&iter);
+ bch2_set_btree_iter_dontneed(trans, &iter);
break;
}
@@ -469,7 +469,7 @@ static noinline void trace_bucket_alloc2(struct bch_fs *c, struct bch_dev *ca,
prt_printf(&buf, "watermark\t%s\n", bch2_watermarks[watermark]);
prt_printf(&buf, "data type\t%s\n", __bch2_data_types[data_type]);
prt_printf(&buf, "blocking\t%u\n", cl != NULL);
- prt_printf(&buf, "free\t%llu\n", usage->d[BCH_DATA_free].buckets);
+ prt_printf(&buf, "free\t%llu\n", usage->buckets[BCH_DATA_free]);
prt_printf(&buf, "avail\t%llu\n", dev_buckets_free(ca, *usage, watermark));
prt_printf(&buf, "copygc_wait\t%lu/%lli\n",
bch2_copygc_wait_amount(c),
@@ -524,10 +524,10 @@ again:
bch2_dev_usage_read_fast(ca, usage);
avail = dev_buckets_free(ca, *usage, watermark);
- if (usage->d[BCH_DATA_need_discard].buckets > avail)
+ if (usage->buckets[BCH_DATA_need_discard] > avail)
bch2_dev_do_discards(ca);
- if (usage->d[BCH_DATA_need_gc_gens].buckets > avail)
+ if (usage->buckets[BCH_DATA_need_gc_gens] > avail)
bch2_gc_gens_async(c);
if (should_invalidate_buckets(ca, *usage))
@@ -606,8 +606,7 @@ struct open_bucket *bch2_bucket_alloc(struct bch_fs *c, struct bch_dev *ca,
static int __dev_stripe_cmp(struct dev_stripe_state *stripe,
unsigned l, unsigned r)
{
- return ((stripe->next_alloc[l] > stripe->next_alloc[r]) -
- (stripe->next_alloc[l] < stripe->next_alloc[r]));
+ return cmp_int(stripe->next_alloc[l], stripe->next_alloc[r]);
}
#define dev_stripe_cmp(l, r) __dev_stripe_cmp(stripe, l, r)
@@ -626,25 +625,62 @@ struct dev_alloc_list bch2_dev_alloc_list(struct bch_fs *c,
return ret;
}
+static const u64 stripe_clock_hand_rescale = 1ULL << 62; /* trigger rescale at */
+static const u64 stripe_clock_hand_max = 1ULL << 56; /* max after rescale */
+static const u64 stripe_clock_hand_inv = 1ULL << 52; /* max increment, if a device is empty */
+
+static noinline void bch2_stripe_state_rescale(struct dev_stripe_state *stripe)
+{
+ /*
+ * Avoid underflowing clock hands if at all possible, if clock hands go
+ * to 0 then we lose information - clock hands can be in a wide range if
+ * we have devices we rarely try to allocate from, if we generally
+ * allocate from a specified target but only sometimes have to fall back
+ * to the whole filesystem.
+ */
+ u64 scale_max = U64_MAX; /* maximum we can subtract without underflow */
+ u64 scale_min = 0; /* minumum we must subtract to avoid overflow */
+
+ for (u64 *v = stripe->next_alloc;
+ v < stripe->next_alloc + ARRAY_SIZE(stripe->next_alloc); v++) {
+ if (*v)
+ scale_max = min(scale_max, *v);
+ if (*v > stripe_clock_hand_max)
+ scale_min = max(scale_min, *v - stripe_clock_hand_max);
+ }
+
+ u64 scale = max(scale_min, scale_max);
+
+ for (u64 *v = stripe->next_alloc;
+ v < stripe->next_alloc + ARRAY_SIZE(stripe->next_alloc); v++)
+ *v = *v < scale ? 0 : *v - scale;
+}
+
static inline void bch2_dev_stripe_increment_inlined(struct bch_dev *ca,
struct dev_stripe_state *stripe,
struct bch_dev_usage *usage)
{
+ /*
+ * Stripe state has a per device clock hand: we allocate from the device
+ * with the smallest clock hand.
+ *
+ * When we allocate, we don't do a simple increment; we add the inverse
+ * of the device's free space. This results in round robin behavior that
+ * biases in favor of the device(s) with more free space.
+ */
+
u64 *v = stripe->next_alloc + ca->dev_idx;
u64 free_space = __dev_buckets_available(ca, *usage, BCH_WATERMARK_normal);
u64 free_space_inv = free_space
- ? div64_u64(1ULL << 48, free_space)
- : 1ULL << 48;
- u64 scale = *v / 4;
+ ? div64_u64(stripe_clock_hand_inv, free_space)
+ : stripe_clock_hand_inv;
- if (*v + free_space_inv >= *v)
- *v += free_space_inv;
- else
- *v = U64_MAX;
+ /* Saturating add, avoid overflow: */
+ u64 sum = *v + free_space_inv;
+ *v = sum >= *v ? sum : U64_MAX;
- for (v = stripe->next_alloc;
- v < stripe->next_alloc + ARRAY_SIZE(stripe->next_alloc); v++)
- *v = *v < scale ? 0 : *v - scale;
+ if (unlikely(*v > stripe_clock_hand_rescale))
+ bch2_stripe_state_rescale(stripe);
}
void bch2_dev_stripe_increment(struct bch_dev *ca,
@@ -1633,7 +1669,7 @@ void bch2_fs_alloc_debug_to_text(struct printbuf *out, struct bch_fs *c)
void bch2_dev_alloc_debug_to_text(struct printbuf *out, struct bch_dev *ca)
{
struct bch_fs *c = ca->fs;
- struct bch_dev_usage stats = bch2_dev_usage_read(ca);
+ struct bch_dev_usage_full stats = bch2_dev_usage_full_read(ca);
unsigned nr[BCH_DATA_NR];
memset(nr, 0, sizeof(nr));
@@ -1656,7 +1692,8 @@ void bch2_dev_alloc_debug_to_text(struct printbuf *out, struct bch_dev *ca)
printbuf_tabstop_push(out, 16);
prt_printf(out, "open buckets\t%i\r\n", ca->nr_open_buckets);
- prt_printf(out, "buckets to invalidate\t%llu\r\n", should_invalidate_buckets(ca, stats));
+ prt_printf(out, "buckets to invalidate\t%llu\r\n",
+ should_invalidate_buckets(ca, bch2_dev_usage_read(ca)));
}
static noinline void bch2_print_allocator_stuck(struct bch_fs *c)
diff --git a/fs/bcachefs/backpointers.c b/fs/bcachefs/backpointers.c
index 21d1d86d5008..ff26bb515150 100644
--- a/fs/bcachefs/backpointers.c
+++ b/fs/bcachefs/backpointers.c
@@ -252,12 +252,24 @@ struct bkey_s_c bch2_backpointer_get_key(struct btree_trans *trans,
0,
bp.v->level,
iter_flags);
- struct bkey_s_c k = bch2_btree_iter_peek_slot(iter);
+ struct bkey_s_c k = bch2_btree_iter_peek_slot(trans, iter);
if (bkey_err(k)) {
bch2_trans_iter_exit(trans, iter);
return k;
}
+ /*
+ * peek_slot() doesn't normally return NULL - except when we ask for a
+ * key at a btree level that doesn't exist.
+ *
+ * We may want to revisit this and change peek_slot():
+ */
+ if (!k.k) {
+ bkey_init(&iter->k);
+ iter->k.p = bp.v->pos;
+ k.k = &iter->k;
+ }
+
if (k.k &&
extent_matches_bp(c, bp.v->btree_id, bp.v->level, k, bp))
return k;
@@ -293,7 +305,7 @@ struct btree *bch2_backpointer_get_node(struct btree_trans *trans,
0,
bp.v->level - 1,
0);
- struct btree *b = bch2_btree_iter_peek_node(iter);
+ struct btree *b = bch2_btree_iter_peek_node(trans, iter);
if (IS_ERR_OR_NULL(b))
goto err;
@@ -321,7 +333,7 @@ static int bch2_check_backpointer_has_valid_bucket(struct btree_trans *trans, st
return 0;
struct bch_fs *c = trans->c;
- struct btree_iter alloc_iter = { NULL };
+ struct btree_iter alloc_iter = {};
struct bkey_s_c alloc_k;
struct printbuf buf = PRINTBUF;
int ret = 0;
@@ -462,7 +474,7 @@ err:
if (bio)
bio_put(bio);
kvfree(data_buf);
- percpu_ref_put(&ca->io_ref);
+ percpu_ref_put(&ca->io_ref[READ]);
printbuf_exit(&buf);
return ret;
}
@@ -650,7 +662,7 @@ static int check_btree_root_to_backpointers(struct btree_trans *trans,
retry:
bch2_trans_node_iter_init(trans, &iter, btree_id, POS_MIN,
0, bch2_btree_id_root(c, btree_id)->b->c.level, 0);
- b = bch2_btree_iter_peek_node(&iter);
+ b = bch2_btree_iter_peek_node(trans, &iter);
ret = PTR_ERR_OR_ZERO(b);
if (ret)
goto err;
@@ -934,7 +946,7 @@ static int btree_node_get_and_pin(struct btree_trans *trans, struct bkey_i *k,
{
struct btree_iter iter;
bch2_trans_node_iter_init(trans, &iter, btree, k->k.p, 0, level, 0);
- struct btree *b = bch2_btree_iter_peek_node(&iter);
+ struct btree *b = bch2_btree_iter_peek_node(trans, &iter);
int ret = PTR_ERR_OR_ZERO(b);
if (ret)
goto err;
diff --git a/fs/bcachefs/bcachefs.h b/fs/bcachefs/bcachefs.h
index f52311017aee..5d9f208a1bb7 100644
--- a/fs/bcachefs/bcachefs.h
+++ b/fs/bcachefs/bcachefs.h
@@ -524,8 +524,8 @@ struct bch_dev {
struct percpu_ref ref;
#endif
struct completion ref_completion;
- struct percpu_ref io_ref;
- struct completion io_ref_completion;
+ struct percpu_ref io_ref[2];
+ struct completion io_ref_completion[2];
struct bch_fs *fs;
@@ -562,7 +562,8 @@ struct bch_dev {
unsigned long *bucket_backpointer_mismatches;
unsigned long *bucket_backpointer_empty;
- struct bch_dev_usage __percpu *usage;
+ struct bch_dev_usage_full __percpu
+ *usage;
/* Allocator: */
u64 alloc_cursor[3];
diff --git a/fs/bcachefs/btree_gc.c b/fs/bcachefs/btree_gc.c
index 2025d408979c..7b98ba2dec64 100644
--- a/fs/bcachefs/btree_gc.c
+++ b/fs/bcachefs/btree_gc.c
@@ -691,7 +691,7 @@ retry_root:
struct btree_iter iter;
bch2_trans_node_iter_init(trans, &iter, btree, POS_MIN,
0, bch2_btree_id_root(c, btree)->b->c.level, 0);
- struct btree *b = bch2_btree_iter_peek_node(&iter);
+ struct btree *b = bch2_btree_iter_peek_node(trans, &iter);
ret = PTR_ERR_OR_ZERO(b);
if (ret)
goto err_root;
@@ -1199,7 +1199,7 @@ int bch2_gc_gens(struct bch_fs *c)
BCH_TRANS_COMMIT_no_enospc, ({
ca = bch2_dev_iterate(c, ca, k.k->p.inode);
if (!ca) {
- bch2_btree_iter_set_pos(&iter, POS(k.k->p.inode + 1, 0));
+ bch2_btree_iter_set_pos(trans, &iter, POS(k.k->p.inode + 1, 0));
continue;
}
bch2_alloc_write_oldest_gen(trans, ca, &iter, k);
diff --git a/fs/bcachefs/btree_io.c b/fs/bcachefs/btree_io.c
index 1d94a2bf706d..5fd4a58d2ad2 100644
--- a/fs/bcachefs/btree_io.c
+++ b/fs/bcachefs/btree_io.c
@@ -1353,7 +1353,7 @@ start:
"btree read error %s for %s",
bch2_blk_status_to_str(bio->bi_status), buf.buf);
if (rb->have_ioref)
- percpu_ref_put(&ca->io_ref);
+ percpu_ref_put(&ca->io_ref[READ]);
rb->have_ioref = false;
bch2_mark_io_failure(&failed, &rb->pick, false);
@@ -1609,6 +1609,7 @@ static void btree_node_read_all_replicas_endio(struct bio *bio)
struct bch_dev *ca = bch2_dev_have_ref(c, rb->pick.ptr.dev);
bch2_latency_acct(ca, rb->start_time, READ);
+ percpu_ref_put(&ca->io_ref[READ]);
}
ra->err[rb->idx] = bio->bi_status;
@@ -1908,7 +1909,8 @@ static void btree_node_scrub_work(struct work_struct *work)
scrub->key.k->k.p, 0, scrub->level - 1, 0);
struct btree *b;
- int ret = lockrestart_do(trans, PTR_ERR_OR_ZERO(b = bch2_btree_iter_peek_node(&iter)));
+ int ret = lockrestart_do(trans,
+ PTR_ERR_OR_ZERO(b = bch2_btree_iter_peek_node(trans, &iter)));
if (ret)
goto err;
@@ -1927,7 +1929,7 @@ err:
printbuf_exit(&err);
bch2_bkey_buf_exit(&scrub->key, c);;
btree_bounce_free(c, c->opts.btree_node_size, scrub->used_mempool, scrub->buf);
- percpu_ref_put(&scrub->ca->io_ref);
+ percpu_ref_put(&scrub->ca->io_ref[READ]);
kfree(scrub);
bch2_write_ref_put(c, BCH_WRITE_REF_btree_node_scrub);
}
@@ -1996,7 +1998,7 @@ int bch2_btree_node_scrub(struct btree_trans *trans,
return 0;
err_free:
btree_bounce_free(c, c->opts.btree_node_size, used_mempool, buf);
- percpu_ref_put(&ca->io_ref);
+ percpu_ref_put(&ca->io_ref[READ]);
err:
bch2_write_ref_put(c, BCH_WRITE_REF_btree_node_scrub);
return ret;
@@ -2144,6 +2146,7 @@ static void btree_node_write_endio(struct bio *bio)
if (ca && bio->bi_status) {
struct printbuf buf = PRINTBUF;
+ buf.atomic++;
prt_printf(&buf, "btree write error: %s\n ",
bch2_blk_status_to_str(bio->bi_status));
bch2_btree_pos_to_text(&buf, c, b);
@@ -2158,8 +2161,12 @@ static void btree_node_write_endio(struct bio *bio)
spin_unlock_irqrestore(&c->btree_write_error_lock, flags);
}
+ /*
+ * XXX: we should be using io_ref[WRITE], but we aren't retrying failed
+ * btree writes yet (due to device removal/ro):
+ */
if (wbio->have_ioref)
- percpu_ref_put(&ca->io_ref);
+ percpu_ref_put(&ca->io_ref[READ]);
if (parent) {
bio_put(bio);
diff --git a/fs/bcachefs/btree_iter.c b/fs/bcachefs/btree_iter.c
index a9c110b846b5..e34e9598ef25 100644
--- a/fs/bcachefs/btree_iter.c
+++ b/fs/bcachefs/btree_iter.c
@@ -244,10 +244,8 @@ void bch2_trans_verify_paths(struct btree_trans *trans)
bch2_btree_path_verify(trans, path);
}
-static void bch2_btree_iter_verify(struct btree_iter *iter)
+static void bch2_btree_iter_verify(struct btree_trans *trans, struct btree_iter *iter)
{
- struct btree_trans *trans = iter->trans;
-
BUG_ON(!!(iter->flags & BTREE_ITER_cached) != btree_iter_path(trans, iter)->cached);
BUG_ON((iter->flags & BTREE_ITER_is_extents) &&
@@ -276,9 +274,9 @@ static void bch2_btree_iter_verify_entry_exit(struct btree_iter *iter)
bkey_gt(iter->pos, iter->k.p)));
}
-static int bch2_btree_iter_verify_ret(struct btree_iter *iter, struct bkey_s_c k)
+static int bch2_btree_iter_verify_ret(struct btree_trans *trans,
+ struct btree_iter *iter, struct bkey_s_c k)
{
- struct btree_trans *trans = iter->trans;
struct btree_iter copy;
struct bkey_s_c prev;
int ret = 0;
@@ -299,7 +297,7 @@ static int bch2_btree_iter_verify_ret(struct btree_iter *iter, struct bkey_s_c k
bch2_trans_iter_init(trans, &copy, iter->btree_id, iter->pos,
BTREE_ITER_nopreserve|
BTREE_ITER_all_snapshots);
- prev = bch2_btree_iter_prev(&copy);
+ prev = bch2_btree_iter_prev(trans, &copy);
if (!prev.k)
goto out;
@@ -365,9 +363,11 @@ static inline void bch2_btree_path_verify_level(struct btree_trans *trans,
struct btree_path *path, unsigned l) {}
static inline void bch2_btree_path_verify(struct btree_trans *trans,
struct btree_path *path) {}
-static inline void bch2_btree_iter_verify(struct btree_iter *iter) {}
+static inline void bch2_btree_iter_verify(struct btree_trans *trans,
+ struct btree_iter *iter) {}
static inline void bch2_btree_iter_verify_entry_exit(struct btree_iter *iter) {}
-static inline int bch2_btree_iter_verify_ret(struct btree_iter *iter, struct bkey_s_c k) { return 0; }
+static inline int bch2_btree_iter_verify_ret(struct btree_trans *trans, struct btree_iter *iter,
+ struct bkey_s_c k) { return 0; }
#endif
@@ -1855,10 +1855,8 @@ hole:
return (struct bkey_s_c) { u, NULL };
}
-void bch2_set_btree_iter_dontneed(struct btree_iter *iter)
+void bch2_set_btree_iter_dontneed(struct btree_trans *trans, struct btree_iter *iter)
{
- struct btree_trans *trans = iter->trans;
-
if (!iter->path || trans->restarted)
return;
@@ -1870,17 +1868,14 @@ void bch2_set_btree_iter_dontneed(struct btree_iter *iter)
/* Btree iterators: */
int __must_check
-__bch2_btree_iter_traverse(struct btree_iter *iter)
+__bch2_btree_iter_traverse(struct btree_trans *trans, struct btree_iter *iter)
{
- return bch2_btree_path_traverse(iter->trans, iter->path, iter->flags);
+ return bch2_btree_path_traverse(trans, iter->path, iter->flags);
}
int __must_check
-bch2_btree_iter_traverse(struct btree_iter *iter)
+bch2_btree_iter_traverse(struct btree_trans *trans, struct btree_iter *iter)
{
- struct btree_trans *trans = iter->trans;
- int ret;
-
bch2_trans_verify_not_unlocked_or_in_restart(trans);
iter->path = bch2_btree_path_set_pos(trans, iter->path,
@@ -1888,7 +1883,7 @@ bch2_btree_iter_traverse(struct btree_iter *iter)
iter->flags & BTREE_ITER_intent,
btree_iter_ip_allocated(iter));
- ret = bch2_btree_path_traverse(iter->trans, iter->path, iter->flags);
+ int ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
if (ret)
return ret;
@@ -1900,14 +1895,14 @@ bch2_btree_iter_traverse(struct btree_iter *iter)
/* Iterate across nodes (leaf and interior nodes) */
-struct btree *bch2_btree_iter_peek_node(struct btree_iter *iter)
+struct btree *bch2_btree_iter_peek_node(struct btree_trans *trans,
+ struct btree_iter *iter)
{
- struct btree_trans *trans = iter->trans;
struct btree *b = NULL;
int ret;
EBUG_ON(trans->paths[iter->path].cached);
- bch2_btree_iter_verify(iter);
+ bch2_btree_iter_verify(trans, iter);
ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
if (ret)
@@ -1929,7 +1924,7 @@ struct btree *bch2_btree_iter_peek_node(struct btree_iter *iter)
btree_path_set_should_be_locked(trans, btree_iter_path(trans, iter));
out:
bch2_btree_iter_verify_entry_exit(iter);
- bch2_btree_iter_verify(iter);
+ bch2_btree_iter_verify(trans, iter);
return b;
err:
@@ -1938,26 +1933,26 @@ err:
}
/* Only kept for -tools */
-struct btree *bch2_btree_iter_peek_node_and_restart(struct btree_iter *iter)
+struct btree *bch2_btree_iter_peek_node_and_restart(struct btree_trans *trans,
+ struct btree_iter *iter)
{
struct btree *b;
- while (b = bch2_btree_iter_peek_node(iter),
+ while (b = bch2_btree_iter_peek_node(trans, iter),
bch2_err_matches(PTR_ERR_OR_ZERO(b), BCH_ERR_transaction_restart))
- bch2_trans_begin(iter->trans);
+ bch2_trans_begin(trans);
return b;
}
-struct btree *bch2_btree_iter_next_node(struct btree_iter *iter)
+struct btree *bch2_btree_iter_next_node(struct btree_trans *trans, struct btree_iter *iter)
{
- struct btree_trans *trans = iter->trans;
struct btree *b = NULL;
int ret;
EBUG_ON(trans->paths[iter->path].cached);
bch2_trans_verify_not_unlocked_or_in_restart(trans);
- bch2_btree_iter_verify(iter);
+ bch2_btree_iter_verify(trans, iter);
ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
if (ret)
@@ -2024,7 +2019,7 @@ struct btree *bch2_btree_iter_next_node(struct btree_iter *iter)
EBUG_ON(btree_iter_path(trans, iter)->uptodate);
out:
bch2_btree_iter_verify_entry_exit(iter);
- bch2_btree_iter_verify(iter);
+ bch2_btree_iter_verify(trans, iter);
return b;
err:
@@ -2034,7 +2029,7 @@ err:
/* Iterate across keys (in leaf nodes only) */
-inline bool bch2_btree_iter_advance(struct btree_iter *iter)
+inline bool bch2_btree_iter_advance(struct btree_trans *trans, struct btree_iter *iter)
{
struct bpos pos = iter->k.p;
bool ret = !(iter->flags & BTREE_ITER_all_snapshots
@@ -2043,11 +2038,11 @@ inline bool bch2_btree_iter_advance(struct btree_iter *iter)
if (ret && !(iter->flags & BTREE_ITER_is_extents))
pos = bkey_successor(iter, pos);
- bch2_btree_iter_set_pos(iter, pos);
+ bch2_btree_iter_set_pos(trans, iter, pos);
return ret;
}
-inline bool bch2_btree_iter_rewind(struct btree_iter *iter)
+inline bool bch2_btree_iter_rewind(struct btree_trans *trans, struct btree_iter *iter)
{
struct bpos pos = bkey_start_pos(&iter->k);
bool ret = !(iter->flags & BTREE_ITER_all_snapshots
@@ -2056,7 +2051,7 @@ inline bool bch2_btree_iter_rewind(struct btree_iter *iter)
if (ret && !(iter->flags & BTREE_ITER_is_extents))
pos = bkey_predecessor(iter, pos);
- bch2_btree_iter_set_pos(iter, pos);
+ bch2_btree_iter_set_pos(trans, iter, pos);
return ret;
}
@@ -2183,9 +2178,9 @@ void btree_trans_peek_prev_journal(struct btree_trans *trans,
* bkey_s_c_null:
*/
static noinline
-struct bkey_s_c btree_trans_peek_key_cache(struct btree_iter *iter, struct bpos pos)
+struct bkey_s_c btree_trans_peek_key_cache(struct btree_trans *trans, struct btree_iter *iter,
+ struct bpos pos)
{
- struct btree_trans *trans = iter->trans;
struct bch_fs *c = trans->c;
struct bkey u;
struct bkey_s_c k;
@@ -2231,14 +2226,14 @@ struct bkey_s_c btree_trans_peek_key_cache(struct btree_iter *iter, struct bpos
return k;
}
-static struct bkey_s_c __bch2_btree_iter_peek(struct btree_iter *iter, struct bpos search_key)
+static struct bkey_s_c __bch2_btree_iter_peek(struct btree_trans *trans, struct btree_iter *iter,
+ struct bpos search_key)
{
- struct btree_trans *trans = iter->trans;
struct bkey_s_c k, k2;
int ret;
EBUG_ON(btree_iter_path(trans, iter)->cached);
- bch2_btree_iter_verify(iter);
+ bch2_btree_iter_verify(trans, iter);
while (1) {
iter->path = bch2_btree_path_set_pos(trans, iter->path, search_key,
@@ -2248,7 +2243,7 @@ static struct bkey_s_c __bch2_btree_iter_peek(struct btree_iter *iter, struct bp
ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
if (unlikely(ret)) {
/* ensure that iter->k is consistent with iter->pos: */
- bch2_btree_iter_set_pos(iter, iter->pos);
+ bch2_btree_iter_set_pos(trans, iter, iter->pos);
k = bkey_s_c_err(ret);
break;
}
@@ -2258,7 +2253,7 @@ static struct bkey_s_c __bch2_btree_iter_peek(struct btree_iter *iter, struct bp
if (unlikely(!l->b)) {
/* No btree nodes at requested level: */
- bch2_btree_iter_set_pos(iter, SPOS_MAX);
+ bch2_btree_iter_set_pos(trans, iter, SPOS_MAX);
k = bkey_s_c_null;
break;
}
@@ -2269,10 +2264,10 @@ static struct bkey_s_c __bch2_btree_iter_peek(struct btree_iter *iter, struct bp
if (unlikely(iter->flags & BTREE_ITER_with_key_cache) &&
k.k &&
- (k2 = btree_trans_peek_key_cache(iter, k.k->p)).k) {
+ (k2 = btree_trans_peek_key_cache(trans, iter, k.k->p)).k) {
k = k2;
if (bkey_err(k)) {
- bch2_btree_iter_set_pos(iter, iter->pos);
+ bch2_btree_iter_set_pos(trans, iter, iter->pos);
break;
}
}
@@ -2305,27 +2300,28 @@ static struct bkey_s_c __bch2_btree_iter_peek(struct btree_iter *iter, struct bp
search_key = bpos_successor(l->b->key.k.p);
} else {
/* End of btree: */
- bch2_btree_iter_set_pos(iter, SPOS_MAX);
+ bch2_btree_iter_set_pos(trans, iter, SPOS_MAX);
k = bkey_s_c_null;
break;
}
}
- bch2_btree_iter_verify(iter);
+ bch2_btree_iter_verify(trans, iter);
return k;
}
/**
* bch2_btree_iter_peek_max() - returns first key greater than or equal to
* iterator's current position
+ * @trans: btree transaction object
* @iter: iterator to peek from
* @end: search limit: returns keys less than or equal to @end
*
* Returns: key if found, or an error extractable with bkey_err().
*/
-struct bkey_s_c bch2_btree_iter_peek_max(struct btree_iter *iter, struct bpos end)
+struct bkey_s_c bch2_btree_iter_peek_max(struct btree_trans *trans, struct btree_iter *iter,
+ struct bpos end)
{
- struct btree_trans *trans = iter->trans;
struct bpos search_key = btree_iter_search_key(iter);
struct bkey_s_c k;
struct bpos iter_pos = iter->pos;
@@ -2348,7 +2344,7 @@ struct bkey_s_c bch2_btree_iter_peek_max(struct btree_iter *iter, struct bpos en
}
while (1) {
- k = __bch2_btree_iter_peek(iter, search_key);
+ k = __bch2_btree_iter_peek(trans, iter, search_key);
if (unlikely(!k.k))
goto end;
if (unlikely(bkey_err(k)))
@@ -2462,9 +2458,9 @@ out_no_locked:
if (!(iter->flags & BTREE_ITER_all_snapshots))
iter->pos.snapshot = iter->snapshot;
- ret = bch2_btree_iter_verify_ret(iter, k);
+ ret = bch2_btree_iter_verify_ret(trans, iter, k);
if (unlikely(ret)) {
- bch2_btree_iter_set_pos(iter, iter->pos);
+ bch2_btree_iter_set_pos(trans, iter, iter->pos);
k = bkey_s_c_err(ret);
}
@@ -2472,7 +2468,7 @@ out_no_locked:
return k;
end:
- bch2_btree_iter_set_pos(iter, end);
+ bch2_btree_iter_set_pos(trans, iter, end);
k = bkey_s_c_null;
goto out_no_locked;
}
@@ -2480,24 +2476,25 @@ end:
/**
* bch2_btree_iter_next() - returns first key greater than iterator's current
* position
+ * @trans: btree transaction object
* @iter: iterator to peek from
*
* Returns: key if found, or an error extractable with bkey_err().
*/
-struct bkey_s_c bch2_btree_iter_next(struct btree_iter *iter)
+struct bkey_s_c bch2_btree_iter_next(struct btree_trans *trans, struct btree_iter *iter)
{
- if (!bch2_btree_iter_advance(iter))
+ if (!bch2_btree_iter_advance(trans, iter))
return bkey_s_c_null;
- return bch2_btree_iter_peek(iter);
+ return bch2_btree_iter_peek(trans, iter);
}
-static struct bkey_s_c __bch2_btree_iter_peek_prev(struct btree_iter *iter, struct bpos search_key)
+static struct bkey_s_c __bch2_btree_iter_peek_prev(struct btree_trans *trans, struct btree_iter *iter,
+ struct bpos search_key)
{
- struct btree_trans *trans = iter->trans;
struct bkey_s_c k, k2;
- bch2_btree_iter_verify(iter);
+ bch2_btree_iter_verify(trans, iter);
while (1) {
iter->path = bch2_btree_path_set_pos(trans, iter->path, search_key,
@@ -2507,7 +2504,7 @@ static struct bkey_s_c __bch2_btree_iter_peek_prev(struct btree_iter *iter, stru
int ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
if (unlikely(ret)) {
/* ensure that iter->k is consistent with iter->pos: */
- bch2_btree_iter_set_pos(iter, iter->pos);
+ bch2_btree_iter_set_pos(trans, iter, iter->pos);
k = bkey_s_c_err(ret);
break;
}
@@ -2517,7 +2514,7 @@ static struct bkey_s_c __bch2_btree_iter_peek_prev(struct btree_iter *iter, stru
if (unlikely(!l->b)) {
/* No btree nodes at requested level: */
- bch2_btree_iter_set_pos(iter, SPOS_MAX);
+ bch2_btree_iter_set_pos(trans, iter, SPOS_MAX);
k = bkey_s_c_null;
break;
}
@@ -2533,10 +2530,10 @@ static struct bkey_s_c __bch2_btree_iter_peek_prev(struct btree_iter *iter, stru
if (unlikely(iter->flags & BTREE_ITER_with_key_cache) &&
k.k &&
- (k2 = btree_trans_peek_key_cache(iter, k.k->p)).k) {
+ (k2 = btree_trans_peek_key_cache(trans, iter, k.k->p)).k) {
k = k2;
if (bkey_err(k2)) {
- bch2_btree_iter_set_pos(iter, iter->pos);
+ bch2_btree_iter_set_pos(trans, iter, iter->pos);
break;
}
}
@@ -2557,25 +2554,27 @@ static struct bkey_s_c __bch2_btree_iter_peek_prev(struct btree_iter *iter, stru
search_key = bpos_predecessor(path->l[0].b->data->min_key);
} else {
/* Start of btree: */
- bch2_btree_iter_set_pos(iter, POS_MIN);
+ bch2_btree_iter_set_pos(trans, iter, POS_MIN);
k = bkey_s_c_null;
break;
}
}
- bch2_btree_iter_verify(iter);
+ bch2_btree_iter_verify(trans, iter);
return k;
}
/**
* bch2_btree_iter_peek_prev_min() - returns first key less than or equal to
* iterator's current position
+ * @trans: btree transaction object
* @iter: iterator to peek from
* @end: search limit: returns keys greater than or equal to @end
*
* Returns: key if found, or an error extractable with bkey_err().
*/
-struct bkey_s_c bch2_btree_iter_peek_prev_min(struct btree_iter *iter, struct bpos end)
+struct bkey_s_c bch2_btree_iter_peek_prev_min(struct btree_trans *trans, struct btree_iter *iter,
+ struct bpos end)
{
if ((iter->flags & (BTREE_ITER_is_extents|BTREE_ITER_filter_snapshots)) &&
!bkey_eq(iter->pos, POS_MAX)) {
@@ -2587,7 +2586,7 @@ struct bkey_s_c bch2_btree_iter_peek_prev_min(struct btree_iter *iter, struct bp
* real visible extents - easiest to just use peek_slot() (which
* internally uses peek() for extents)
*/
- struct bkey_s_c k = bch2_btree_iter_peek_slot(iter);
+ struct bkey_s_c k = bch2_btree_iter_peek_slot(trans, iter);
if (bkey_err(k))
return k;
@@ -2597,7 +2596,6 @@ struct bkey_s_c bch2_btree_iter_peek_prev_min(struct btree_iter *iter, struct bp
return k;
}
- struct btree_trans *trans = iter->trans;
struct bpos search_key = iter->pos;
struct bkey_s_c k;
btree_path_idx_t saved_path = 0;
@@ -2613,7 +2611,7 @@ struct bkey_s_c bch2_btree_iter_peek_prev_min(struct btree_iter *iter, struct bp
}
while (1) {
- k = __bch2_btree_iter_peek_prev(iter, search_key);
+ k = __bch2_btree_iter_peek_prev(trans, iter, search_key);
if (unlikely(!k.k))
goto end;
if (unlikely(bkey_err(k)))
@@ -2704,10 +2702,10 @@ out_no_locked:
bch2_path_put_nokeep(trans, saved_path, iter->flags & BTREE_ITER_intent);
bch2_btree_iter_verify_entry_exit(iter);
- bch2_btree_iter_verify(iter);
+ bch2_btree_iter_verify(trans, iter);
return k;
end:
- bch2_btree_iter_set_pos(iter, end);
+ bch2_btree_iter_set_pos(trans, iter, end);
k = bkey_s_c_null;
goto out_no_locked;
}
@@ -2715,27 +2713,27 @@ end:
/**
* bch2_btree_iter_prev() - returns first key less than iterator's current
* position
+ * @trans: btree transaction object
* @iter: iterator to peek from
*
* Returns: key if found, or an error extractable with bkey_err().
*/
-struct bkey_s_c bch2_btree_iter_prev(struct btree_iter *iter)
+struct bkey_s_c bch2_btree_iter_prev(struct btree_trans *trans, struct btree_iter *iter)
{
- if (!bch2_btree_iter_rewind(iter))
+ if (!bch2_btree_iter_rewind(trans, iter))
return bkey_s_c_null;
- return bch2_btree_iter_peek_prev(iter);
+ return bch2_btree_iter_peek_prev(trans, iter);
}
-struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter)
+struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_trans *trans, struct btree_iter *iter)
{
- struct btree_trans *trans = iter->trans;
struct bpos search_key;
struct bkey_s_c k;
int ret;
bch2_trans_verify_not_unlocked_or_in_restart(trans);
- bch2_btree_iter_verify(iter);
+ bch2_btree_iter_verify(trans, iter);
bch2_btree_iter_verify_entry_exit(iter);
EBUG_ON(btree_iter_path(trans, iter)->level && (iter->flags & BTREE_ITER_with_key_cache));
@@ -2751,7 +2749,7 @@ struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter)
if (iter->pos.inode == KEY_INODE_MAX)
return bkey_s_c_null;
- bch2_btree_iter_set_pos(iter, bpos_nosnap_successor(iter->pos));
+ bch2_btree_iter_set_pos(trans, iter, bpos_nosnap_successor(iter->pos));
}
search_key = btree_iter_search_key(iter);
@@ -2785,7 +2783,7 @@ struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter)
goto out;
if (unlikely(iter->flags & BTREE_ITER_with_key_cache) &&
- (k = btree_trans_peek_key_cache(iter, iter->pos)).k) {
+ (k = btree_trans_peek_key_cache(trans, iter, iter->pos)).k) {
if (!bkey_err(k))
iter->k = *k.k;
/* We're not returning a key from iter->path: */
@@ -2812,8 +2810,8 @@ struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter)
if (iter->flags & BTREE_ITER_intent) {
struct btree_iter iter2;
- bch2_trans_copy_iter(&iter2, iter);
- k = bch2_btree_iter_peek_max(&iter2, end);
+ bch2_trans_copy_iter(trans, &iter2, iter);
+ k = bch2_btree_iter_peek_max(trans, &iter2, end);
if (k.k && !bkey_err(k)) {
swap(iter->key_cache_path, iter2.key_cache_path);
@@ -2824,9 +2822,9 @@ struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter)
} else {
struct bpos pos = iter->pos;
- k = bch2_btree_iter_peek_max(iter, end);
+ k = bch2_btree_iter_peek_max(trans, iter, end);
if (unlikely(bkey_err(k)))
- bch2_btree_iter_set_pos(iter, pos);
+ bch2_btree_iter_set_pos(trans, iter, pos);
else
iter->pos = pos;
}
@@ -2857,39 +2855,39 @@ out:
btree_path_set_should_be_locked(trans, btree_iter_path(trans, iter));
out_no_locked:
bch2_btree_iter_verify_entry_exit(iter);
- bch2_btree_iter_verify(iter);
- ret = bch2_btree_iter_verify_ret(iter, k);
+ bch2_btree_iter_verify(trans, iter);
+ ret = bch2_btree_iter_verify_ret(trans, iter, k);
if (unlikely(ret))
return bkey_s_c_err(ret);
return k;
}
-struct bkey_s_c bch2_btree_iter_next_slot(struct btree_iter *iter)
+struct bkey_s_c bch2_btree_iter_next_slot(struct btree_trans *trans, struct btree_iter *iter)
{
- if (!bch2_btree_iter_advance(iter))
+ if (!bch2_btree_iter_advance(trans, iter))
return bkey_s_c_null;
- return bch2_btree_iter_peek_slot(iter);
+ return bch2_btree_iter_peek_slot(trans, iter);
}
-struct bkey_s_c bch2_btree_iter_prev_slot(struct btree_iter *iter)
+struct bkey_s_c bch2_btree_iter_prev_slot(struct btree_trans *trans, struct btree_iter *iter)
{
- if (!bch2_btree_iter_rewind(iter))
+ if (!bch2_btree_iter_rewind(trans, iter))
return bkey_s_c_null;
- return bch2_btree_iter_peek_slot(iter);
+ return bch2_btree_iter_peek_slot(trans, iter);
}
/* Obsolete, but still used by rust wrapper in -tools */
-struct bkey_s_c bch2_btree_iter_peek_and_restart_outlined(struct btree_iter *iter)
+struct bkey_s_c bch2_btree_iter_peek_and_restart_outlined(struct btree_trans *trans, struct btree_iter *iter)
{
struct bkey_s_c k;
- while (btree_trans_too_many_iters(iter->trans) ||
- (k = bch2_btree_iter_peek_type(iter, iter->flags),
+ while (btree_trans_too_many_iters(trans) ||
+ (k = bch2_btree_iter_peek_type(trans, iter, iter->flags),
bch2_err_matches(bkey_err(k), BCH_ERR_transaction_restart)))
- bch2_trans_begin(iter->trans);
+ bch2_trans_begin(trans);
return k;
}
@@ -3035,7 +3033,6 @@ void bch2_trans_iter_exit(struct btree_trans *trans, struct btree_iter *iter)
iter->path = 0;
iter->update_path = 0;
iter->key_cache_path = 0;
- iter->trans = NULL;
}
void bch2_trans_iter_init_outlined(struct btree_trans *trans,
@@ -3075,10 +3072,9 @@ void bch2_trans_node_iter_init(struct btree_trans *trans,
BUG_ON(iter->min_depth != depth);
}
-void bch2_trans_copy_iter(struct btree_iter *dst, struct btree_iter *src)
+void bch2_trans_copy_iter(struct btree_trans *trans,
+ struct btree_iter *dst, struct btree_iter *src)
{
- struct btree_trans *trans = src->trans;
-
*dst = *src;
#ifdef TRACK_PATH_ALLOCATED
dst->ip_allocated = _RET_IP_;
diff --git a/fs/bcachefs/btree_iter.h b/fs/bcachefs/btree_iter.h
index e6f51a3b8187..9d2cccf5d21a 100644
--- a/fs/bcachefs/btree_iter.h
+++ b/fs/bcachefs/btree_iter.h
@@ -393,36 +393,37 @@ void bch2_trans_node_add(struct btree_trans *trans, struct btree_path *, struct
void bch2_trans_node_drop(struct btree_trans *trans, struct btree *);
void bch2_trans_node_reinit_iter(struct btree_trans *, struct btree *);
-int __must_check __bch2_btree_iter_traverse(struct btree_iter *iter);
-int __must_check bch2_btree_iter_traverse(struct btree_iter *);
+int __must_check __bch2_btree_iter_traverse(struct btree_trans *, struct btree_iter *);
+int __must_check bch2_btree_iter_traverse(struct btree_trans *, struct btree_iter *);
-struct btree *bch2_btree_iter_peek_node(struct btree_iter *);
-struct btree *bch2_btree_iter_peek_node_and_restart(struct btree_iter *);
-struct btree *bch2_btree_iter_next_node(struct btree_iter *);
+struct btree *bch2_btree_iter_peek_node(struct btree_trans *, struct btree_iter *);
+struct btree *bch2_btree_iter_peek_node_and_restart(struct btree_trans *, struct btree_iter *);
+struct btree *bch2_btree_iter_next_node(struct btree_trans *, struct btree_iter *);
-struct bkey_s_c bch2_btree_iter_peek_max(struct btree_iter *, struct bpos);
-struct bkey_s_c bch2_btree_iter_next(struct btree_iter *);
+struct bkey_s_c bch2_btree_iter_peek_max(struct btree_trans *, struct btree_iter *, struct bpos);
+struct bkey_s_c bch2_btree_iter_next(struct btree_trans *, struct btree_iter *);
-static inline struct bkey_s_c bch2_btree_iter_peek(struct btree_iter *iter)
+static inline struct bkey_s_c bch2_btree_iter_peek(struct btree_trans *trans,
+ struct btree_iter *iter)
{
- return bch2_btree_iter_peek_max(iter, SPOS_MAX);
+ return bch2_btree_iter_peek_max(trans, iter, SPOS_MAX);
}
-struct bkey_s_c bch2_btree_iter_peek_prev_min(struct btree_iter *, struct bpos);
+struct bkey_s_c bch2_btree_iter_peek_prev_min(struct btree_trans *, struct btree_iter *, struct bpos);
-static inline struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *iter)
+static inline struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_trans *trans, struct btree_iter *iter)
{
- return bch2_btree_iter_peek_prev_min(iter, POS_MIN);
+ return bch2_btree_iter_peek_prev_min(trans, iter, POS_MIN);
}
-struct bkey_s_c bch2_btree_iter_prev(struct btree_iter *);
+struct bkey_s_c bch2_btree_iter_prev(struct btree_trans *, struct btree_iter *);
-struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *);
-struct bkey_s_c bch2_btree_iter_next_slot(struct btree_iter *);
-struct bkey_s_c bch2_btree_iter_prev_slot(struct btree_iter *);
+struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_trans *, struct btree_iter *);
+struct bkey_s_c bch2_btree_iter_next_slot(struct btree_trans *, struct btree_iter *);
+struct bkey_s_c bch2_btree_iter_prev_slot(struct btree_trans *, struct btree_iter *);
-bool bch2_btree_iter_advance(struct btree_iter *);
-bool bch2_btree_iter_rewind(struct btree_iter *);
+bool bch2_btree_iter_advance(struct btree_trans *, struct btree_iter *);
+bool bch2_btree_iter_rewind(struct btree_trans *, struct btree_iter *);
static inline void __bch2_btree_iter_set_pos(struct btree_iter *iter, struct bpos new_pos)
{
@@ -433,10 +434,9 @@ static inline void __bch2_btree_iter_set_pos(struct btree_iter *iter, struct bpo
iter->k.size = 0;
}
-static inline void bch2_btree_iter_set_pos(struct btree_iter *iter, struct bpos new_pos)
+static inline void bch2_btree_iter_set_pos(struct btree_trans *trans,
+ struct btree_iter *iter, struct bpos new_pos)
{
- struct btree_trans *trans = iter->trans;
-
if (unlikely(iter->update_path))
bch2_path_put(trans, iter->update_path,
iter->flags & BTREE_ITER_intent);
@@ -454,13 +454,14 @@ static inline void bch2_btree_iter_set_pos_to_extent_start(struct btree_iter *it
iter->pos = bkey_start_pos(&iter->k);
}
-static inline void bch2_btree_iter_set_snapshot(struct btree_iter *iter, u32 snapshot)
+static inline void bch2_btree_iter_set_snapshot(struct btree_trans *trans,
+ struct btree_iter *iter, u32 snapshot)
{
struct bpos pos = iter->pos;
iter->snapshot = snapshot;
pos.snapshot = snapshot;
- bch2_btree_iter_set_pos(iter, pos);
+ bch2_btree_iter_set_pos(trans, iter, pos);
}
void bch2_trans_iter_exit(struct btree_trans *, struct btree_iter *);
@@ -502,7 +503,6 @@ static inline void bch2_trans_iter_init_common(struct btree_trans *trans,
unsigned flags,
unsigned long ip)
{
- iter->trans = trans;
iter->update_path = 0;
iter->key_cache_path = 0;
iter->btree_id = btree_id;
@@ -539,9 +539,9 @@ static inline void bch2_trans_iter_init(struct btree_trans *trans,
void bch2_trans_node_iter_init(struct btree_trans *, struct btree_iter *,
enum btree_id, struct bpos,
unsigned, unsigned, unsigned);
-void bch2_trans_copy_iter(struct btree_iter *, struct btree_iter *);
+void bch2_trans_copy_iter(struct btree_trans *, struct btree_iter *, struct btree_iter *);
-void bch2_set_btree_iter_dontneed(struct btree_iter *);
+void bch2_set_btree_iter_dontneed(struct btree_trans *, struct btree_iter *);
void *__bch2_trans_kmalloc(struct btree_trans *, size_t);
@@ -588,7 +588,7 @@ static inline struct bkey_s_c __bch2_bkey_get_iter(struct btree_trans *trans,
struct bkey_s_c k;
bch2_trans_iter_init(trans, iter, btree_id, pos, flags);
- k = bch2_btree_iter_peek_slot(iter);
+ k = bch2_btree_iter_peek_slot(trans, iter);
if (!bkey_err(k) && type && k.k->type != type)
k = bkey_s_c_err(-BCH_ERR_ENOENT_bkey_type_mismatch);
@@ -658,14 +658,14 @@ u32 bch2_trans_begin(struct btree_trans *);
int _ret3 = 0; \
do { \
_ret3 = lockrestart_do((_trans), ({ \
- struct btree *_b = bch2_btree_iter_peek_node(&_iter); \
+ struct btree *_b = bch2_btree_iter_peek_node(_trans, &_iter);\
if (!_b) \
break; \
\
PTR_ERR_OR_ZERO(_b) ?: (_do); \
})) ?: \
lockrestart_do((_trans), \
- PTR_ERR_OR_ZERO(bch2_btree_iter_next_node(&_iter))); \
+ PTR_ERR_OR_ZERO(bch2_btree_iter_next_node(_trans, &_iter)));\
} while (!_ret3); \
\
bch2_trans_iter_exit((_trans), &(_iter)); \
@@ -677,31 +677,34 @@ u32 bch2_trans_begin(struct btree_trans *);
__for_each_btree_node(_trans, _iter, _btree_id, _start, \
0, 0, _flags, _b, _do)
-static inline struct bkey_s_c bch2_btree_iter_peek_prev_type(struct btree_iter *iter,
+static inline struct bkey_s_c bch2_btree_iter_peek_prev_type(struct btree_trans *trans,
+ struct btree_iter *iter,
unsigned flags)
{
- return flags & BTREE_ITER_slots ? bch2_btree_iter_peek_slot(iter) :
- bch2_btree_iter_peek_prev(iter);
+ return flags & BTREE_ITER_slots ? bch2_btree_iter_peek_slot(trans, iter) :
+ bch2_btree_iter_peek_prev(trans, iter);
}
-static inline struct bkey_s_c bch2_btree_iter_peek_type(struct btree_iter *iter,
+static inline struct bkey_s_c bch2_btree_iter_peek_type(struct btree_trans *trans,
+ struct btree_iter *iter,
unsigned flags)
{
- return flags & BTREE_ITER_slots ? bch2_btree_iter_peek_slot(iter) :
- bch2_btree_iter_peek(iter);
+ return flags & BTREE_ITER_slots ? bch2_btree_iter_peek_slot(trans, iter) :
+ bch2_btree_iter_peek(trans, iter);
}
-static inline struct bkey_s_c bch2_btree_iter_peek_max_type(struct btree_iter *iter,
- struct bpos end,
- unsigned flags)
+static inline struct bkey_s_c bch2_btree_iter_peek_max_type(struct btree_trans *trans,
+ struct btree_iter *iter,
+ struct bpos end,
+ unsigned flags)
{
if (!(flags & BTREE_ITER_slots))
- return bch2_btree_iter_peek_max(iter, end);
+ return bch2_btree_iter_peek_max(trans, iter, end);
if (bkey_gt(iter->pos, end))
return bkey_s_c_null;
- return bch2_btree_iter_peek_slot(iter);
+ return bch2_btree_iter_peek_slot(trans, iter);
}
int __bch2_btree_trans_too_many_iters(struct btree_trans *);
@@ -768,14 +771,14 @@ transaction_restart: \
\
do { \
_ret3 = lockrestart_do(_trans, ({ \
- (_k) = bch2_btree_iter_peek_max_type(&(_iter), \
+ (_k) = bch2_btree_iter_peek_max_type(_trans, &(_iter), \
_end, (_flags)); \
if (!(_k).k) \
break; \
\
bkey_err(_k) ?: (_do); \
})); \
- } while (!_ret3 && bch2_btree_iter_advance(&(_iter))); \
+ } while (!_ret3 && bch2_btree_iter_advance(_trans, &(_iter))); \
\
bch2_trans_iter_exit((_trans), &(_iter)); \
_ret3; \
@@ -813,14 +816,14 @@ transaction_restart: \
\
do { \
_ret3 = lockrestart_do(_trans, ({ \
- (_k) = bch2_btree_iter_peek_prev_type(&(_iter), \
+ (_k) = bch2_btree_iter_peek_prev_type(_trans, &(_iter), \
(_flags)); \
if (!(_k).k) \
break; \
\
bkey_err(_k) ?: (_do); \
})); \
- } while (!_ret3 && bch2_btree_iter_rewind(&(_iter))); \
+ } while (!_ret3 && bch2_btree_iter_rewind(_trans, &(_iter))); \
\
bch2_trans_iter_exit((_trans), &(_iter)); \
_ret3; \
@@ -850,37 +853,38 @@ transaction_restart: \
(_do) ?: bch2_trans_commit(_trans, (_disk_res),\
(_journal_seq), (_commit_flags)))
-struct bkey_s_c bch2_btree_iter_peek_and_restart_outlined(struct btree_iter *);
+struct bkey_s_c bch2_btree_iter_peek_and_restart_outlined(struct btree_trans *,
+ struct btree_iter *);
#define for_each_btree_key_max_norestart(_trans, _iter, _btree_id, \
_start, _end, _flags, _k, _ret) \
for (bch2_trans_iter_init((_trans), &(_iter), (_btree_id), \
(_start), (_flags)); \
- (_k) = bch2_btree_iter_peek_max_type(&(_iter), _end, _flags),\
+ (_k) = bch2_btree_iter_peek_max_type(_trans, &(_iter), _end, _flags),\
!((_ret) = bkey_err(_k)) && (_k).k; \
- bch2_btree_iter_advance(&(_iter)))
+ bch2_btree_iter_advance(_trans, &(_iter)))
-#define for_each_btree_key_max_continue_norestart(_iter, _end, _flags, _k, _ret)\
+#define for_each_btree_key_max_continue_norestart(_trans, _iter, _end, _flags, _k, _ret)\
for (; \
- (_k) = bch2_btree_iter_peek_max_type(&(_iter), _end, _flags), \
+ (_k) = bch2_btree_iter_peek_max_type(_trans, &(_iter), _end, _flags), \
!((_ret) = bkey_err(_k)) && (_k).k; \
- bch2_btree_iter_advance(&(_iter)))
+ bch2_btree_iter_advance(_trans, &(_iter)))
#define for_each_btree_key_norestart(_trans, _iter, _btree_id, \
_start, _flags, _k, _ret) \
for_each_btree_key_max_norestart(_trans, _iter, _btree_id, _start,\
SPOS_MAX, _flags, _k, _ret)
-#define for_each_btree_key_reverse_norestart(_trans, _iter, _btree_id, \
- _start, _flags, _k, _ret) \
- for (bch2_trans_iter_init((_trans), &(_iter), (_btree_id), \
- (_start), (_flags)); \
- (_k) = bch2_btree_iter_peek_prev_type(&(_iter), _flags), \
- !((_ret) = bkey_err(_k)) && (_k).k; \
- bch2_btree_iter_rewind(&(_iter)))
+#define for_each_btree_key_reverse_norestart(_trans, _iter, _btree_id, \
+ _start, _flags, _k, _ret) \
+ for (bch2_trans_iter_init((_trans), &(_iter), (_btree_id), \
+ (_start), (_flags)); \
+ (_k) = bch2_btree_iter_peek_prev_type(_trans, &(_iter), _flags), \
+ !((_ret) = bkey_err(_k)) && (_k).k; \
+ bch2_btree_iter_rewind(_trans, &(_iter)))
-#define for_each_btree_key_continue_norestart(_iter, _flags, _k, _ret) \
- for_each_btree_key_max_continue_norestart(_iter, SPOS_MAX, _flags, _k, _ret)
+#define for_each_btree_key_continue_norestart(_trans, _iter, _flags, _k, _ret) \
+ for_each_btree_key_max_continue_norestart(_trans, _iter, SPOS_MAX, _flags, _k, _ret)
/*
* This should not be used in a fastpath, without first trying _do in
diff --git a/fs/bcachefs/btree_key_cache.c b/fs/bcachefs/btree_key_cache.c
index edce59433375..2b186584a291 100644
--- a/fs/bcachefs/btree_key_cache.c
+++ b/fs/bcachefs/btree_key_cache.c
@@ -287,6 +287,19 @@ err:
return ret;
}
+static noinline_for_stack void do_trace_key_cache_fill(struct btree_trans *trans,
+ struct btree_path *ck_path,
+ struct bkey_s_c k)
+{
+ struct printbuf buf = PRINTBUF;
+
+ bch2_bpos_to_text(&buf, ck_path->pos);
+ prt_char(&buf, ' ');
+ bch2_bkey_val_to_text(&buf, trans->c, k);
+ trace_key_cache_fill(trans, buf.buf);
+ printbuf_exit(&buf);
+}
+
static noinline int btree_key_cache_fill(struct btree_trans *trans,
struct btree_path *ck_path,
unsigned flags)
@@ -306,7 +319,7 @@ static noinline int btree_key_cache_fill(struct btree_trans *trans,
BTREE_ITER_key_cache_fill|
BTREE_ITER_cached_nofill);
iter.flags &= ~BTREE_ITER_with_journal;
- k = bch2_btree_iter_peek_slot(&iter);
+ k = bch2_btree_iter_peek_slot(trans, &iter);
ret = bkey_err(k);
if (ret)
goto err;
@@ -320,18 +333,11 @@ static noinline int btree_key_cache_fill(struct btree_trans *trans,
if (ret)
goto err;
- if (trace_key_cache_fill_enabled()) {
- struct printbuf buf = PRINTBUF;
-
- bch2_bpos_to_text(&buf, ck_path->pos);
- prt_char(&buf, ' ');
- bch2_bkey_val_to_text(&buf, trans->c, k);
- trace_key_cache_fill(trans, buf.buf);
- printbuf_exit(&buf);
- }
+ if (trace_key_cache_fill_enabled())
+ do_trace_key_cache_fill(trans, ck_path, k);
out:
/* We're not likely to need this iterator again: */
- bch2_set_btree_iter_dontneed(&iter);
+ bch2_set_btree_iter_dontneed(trans, &iter);
err:
bch2_trans_iter_exit(trans, &iter);
return ret;
@@ -412,7 +418,7 @@ static int btree_key_cache_flush_pos(struct btree_trans *trans,
BTREE_ITER_intent);
b_iter.flags &= ~BTREE_ITER_with_key_cache;
- ret = bch2_btree_iter_traverse(&c_iter);
+ ret = bch2_btree_iter_traverse(trans, &c_iter);
if (ret)
goto out;
@@ -444,7 +450,7 @@ static int btree_key_cache_flush_pos(struct btree_trans *trans,
!test_bit(JOURNAL_space_low, &c->journal.flags))
commit_flags |= BCH_TRANS_COMMIT_no_journal_res;
- struct bkey_s_c btree_k = bch2_btree_iter_peek_slot(&b_iter);
+ struct bkey_s_c btree_k = bch2_btree_iter_peek_slot(trans, &b_iter);
ret = bkey_err(btree_k);
if (ret)
goto err;
diff --git a/fs/bcachefs/btree_node_scan.c b/fs/bcachefs/btree_node_scan.c
index 25d54b77cdc2..8c9fdb7263fe 100644
--- a/fs/bcachefs/btree_node_scan.c
+++ b/fs/bcachefs/btree_node_scan.c
@@ -271,7 +271,7 @@ static int read_btree_nodes_worker(void *p)
err:
bio_put(bio);
free_page((unsigned long) buf);
- percpu_ref_put(&ca->io_ref);
+ percpu_ref_put(&ca->io_ref[READ]);
closure_put(w->cl);
kfree(w);
return 0;
@@ -291,7 +291,7 @@ static int read_btree_nodes(struct find_btree_nodes *f)
struct find_btree_nodes_worker *w = kmalloc(sizeof(*w), GFP_KERNEL);
if (!w) {
- percpu_ref_put(&ca->io_ref);
+ percpu_ref_put(&ca->io_ref[READ]);
ret = -ENOMEM;
goto err;
}
@@ -303,14 +303,14 @@ static int read_btree_nodes(struct find_btree_nodes *f)
struct task_struct *t = kthread_create(read_btree_nodes_worker, w, "read_btree_nodes/%s", ca->name);
ret = PTR_ERR_OR_ZERO(t);
if (ret) {
- percpu_ref_put(&ca->io_ref);
+ percpu_ref_put(&ca->io_ref[READ]);
kfree(w);
bch_err_msg(c, ret, "starting kthread");
break;
}
closure_get(&cl);
- percpu_ref_get(&ca->io_ref);
+ percpu_ref_get(&ca->io_ref[READ]);
wake_up_process(t);
}
err:
diff --git a/fs/bcachefs/btree_types.h b/fs/bcachefs/btree_types.h
index 77578da2d23f..023c472dc9ee 100644
--- a/fs/bcachefs/btree_types.h
+++ b/fs/bcachefs/btree_types.h
@@ -367,7 +367,6 @@ static inline unsigned long btree_path_ip_allocated(struct btree_path *path)
* @nodes_intent_locked - bitmask indicating which locks are intent locks
*/
struct btree_iter {
- struct btree_trans *trans;
btree_path_idx_t path;
btree_path_idx_t update_path;
btree_path_idx_t key_cache_path;
diff --git a/fs/bcachefs/btree_update.c b/fs/bcachefs/btree_update.c
index c05394f56424..1e6b7836cc01 100644
--- a/fs/bcachefs/btree_update.c
+++ b/fs/bcachefs/btree_update.c
@@ -126,7 +126,7 @@ int __bch2_insert_snapshot_whiteouts(struct btree_trans *trans,
struct bpos new_pos)
{
struct bch_fs *c = trans->c;
- struct btree_iter old_iter, new_iter = { NULL };
+ struct btree_iter old_iter, new_iter = {};
struct bkey_s_c old_k, new_k;
snapshot_id_list s;
struct bkey_i *update;
@@ -140,7 +140,7 @@ int __bch2_insert_snapshot_whiteouts(struct btree_trans *trans,
bch2_trans_iter_init(trans, &old_iter, id, old_pos,
BTREE_ITER_not_extents|
BTREE_ITER_all_snapshots);
- while ((old_k = bch2_btree_iter_prev(&old_iter)).k &&
+ while ((old_k = bch2_btree_iter_prev(trans, &old_iter)).k &&
!(ret = bkey_err(old_k)) &&
bkey_eq(old_pos, old_k.k->p)) {
struct bpos whiteout_pos =
@@ -296,7 +296,7 @@ static int bch2_trans_update_extent(struct btree_trans *trans,
BTREE_ITER_intent|
BTREE_ITER_with_updates|
BTREE_ITER_not_extents);
- k = bch2_btree_iter_peek_max(&iter, POS(insert->k.p.inode, U64_MAX));
+ k = bch2_btree_iter_peek_max(trans, &iter, POS(insert->k.p.inode, U64_MAX));
if ((ret = bkey_err(k)))
goto err;
if (!k.k)
@@ -322,8 +322,8 @@ static int bch2_trans_update_extent(struct btree_trans *trans,
if (done)
goto out;
next:
- bch2_btree_iter_advance(&iter);
- k = bch2_btree_iter_peek_max(&iter, POS(insert->k.p.inode, U64_MAX));
+ bch2_btree_iter_advance(trans, &iter);
+ k = bch2_btree_iter_peek_max(trans, &iter, POS(insert->k.p.inode, U64_MAX));
if ((ret = bkey_err(k)))
goto err;
if (!k.k)
@@ -592,13 +592,13 @@ int bch2_bkey_get_empty_slot(struct btree_trans *trans, struct btree_iter *iter,
enum btree_id btree, struct bpos end)
{
bch2_trans_iter_init(trans, iter, btree, end, BTREE_ITER_intent);
- struct bkey_s_c k = bch2_btree_iter_peek_prev(iter);
+ struct bkey_s_c k = bch2_btree_iter_peek_prev(trans, iter);
int ret = bkey_err(k);
if (ret)
goto err;
- bch2_btree_iter_advance(iter);
- k = bch2_btree_iter_peek_slot(iter);
+ bch2_btree_iter_advance(trans, iter);
+ k = bch2_btree_iter_peek_slot(trans, iter);
ret = bkey_err(k);
if (ret)
goto err;
@@ -634,7 +634,7 @@ int bch2_btree_insert_nonextent(struct btree_trans *trans,
BTREE_ITER_cached|
BTREE_ITER_not_extents|
BTREE_ITER_intent);
- ret = bch2_btree_iter_traverse(&iter) ?:
+ ret = bch2_btree_iter_traverse(trans, &iter) ?:
bch2_trans_update(trans, &iter, k, flags);
bch2_trans_iter_exit(trans, &iter);
return ret;
@@ -646,7 +646,7 @@ int bch2_btree_insert_trans(struct btree_trans *trans, enum btree_id id,
struct btree_iter iter;
bch2_trans_iter_init(trans, &iter, id, bkey_start_pos(&k->k),
BTREE_ITER_intent|flags);
- int ret = bch2_btree_iter_traverse(&iter) ?:
+ int ret = bch2_btree_iter_traverse(trans, &iter) ?:
bch2_trans_update(trans, &iter, k, flags);
bch2_trans_iter_exit(trans, &iter);
return ret;
@@ -695,7 +695,7 @@ int bch2_btree_delete(struct btree_trans *trans,
bch2_trans_iter_init(trans, &iter, btree, pos,
BTREE_ITER_cached|
BTREE_ITER_intent);
- ret = bch2_btree_iter_traverse(&iter) ?:
+ ret = bch2_btree_iter_traverse(trans, &iter) ?:
bch2_btree_delete_at(trans, &iter, update_flags);
bch2_trans_iter_exit(trans, &iter);
@@ -713,7 +713,7 @@ int bch2_btree_delete_range_trans(struct btree_trans *trans, enum btree_id id,
int ret = 0;
bch2_trans_iter_init(trans, &iter, id, start, BTREE_ITER_intent);
- while ((k = bch2_btree_iter_peek_max(&iter, end)).k) {
+ while ((k = bch2_btree_iter_peek_max(trans, &iter, end)).k) {
struct disk_reservation disk_res =
bch2_disk_reservation_init(trans->c, 0);
struct bkey_i delete;
@@ -808,7 +808,7 @@ int bch2_btree_bit_mod(struct btree_trans *trans, enum btree_id btree,
struct btree_iter iter;
bch2_trans_iter_init(trans, &iter, btree, pos, BTREE_ITER_intent);
- int ret = bch2_btree_iter_traverse(&iter) ?:
+ int ret = bch2_btree_iter_traverse(trans, &iter) ?:
bch2_btree_bit_mod_iter(trans, &iter, set);
bch2_trans_iter_exit(trans, &iter);
return ret;
diff --git a/fs/bcachefs/btree_update_interior.c b/fs/bcachefs/btree_update_interior.c
index bf7e1dac7f46..55fbeeb8eaaa 100644
--- a/fs/bcachefs/btree_update_interior.c
+++ b/fs/bcachefs/btree_update_interior.c
@@ -2147,7 +2147,7 @@ static int get_iter_to_node(struct btree_trans *trans, struct btree_iter *iter,
bch2_trans_node_iter_init(trans, iter, b->c.btree_id, b->key.k.p,
BTREE_MAX_DEPTH, b->c.level,
BTREE_ITER_intent);
- int ret = bch2_btree_iter_traverse(iter);
+ int ret = bch2_btree_iter_traverse(trans, iter);
if (ret)
goto err;
@@ -2239,7 +2239,7 @@ static int bch2_btree_node_rewrite_key(struct btree_trans *trans,
bch2_trans_node_iter_init(trans, &iter,
btree, k->k.p,
BTREE_MAX_DEPTH, level, 0);
- struct btree *b = bch2_btree_iter_peek_node(&iter);
+ struct btree *b = bch2_btree_iter_peek_node(trans, &iter);
int ret = PTR_ERR_OR_ZERO(b);
if (ret)
goto out;
@@ -2262,7 +2262,7 @@ int bch2_btree_node_rewrite_pos(struct btree_trans *trans,
/* Traverse one depth lower to get a pointer to the node itself: */
struct btree_iter iter;
bch2_trans_node_iter_init(trans, &iter, btree, pos, 0, level - 1, 0);
- struct btree *b = bch2_btree_iter_peek_node(&iter);
+ struct btree *b = bch2_btree_iter_peek_node(trans, &iter);
int ret = PTR_ERR_OR_ZERO(b);
if (ret)
goto err;
@@ -2406,7 +2406,7 @@ static int __bch2_btree_node_update_key(struct btree_trans *trans,
bool skip_triggers)
{
struct bch_fs *c = trans->c;
- struct btree_iter iter2 = { NULL };
+ struct btree_iter iter2 = {};
struct btree *parent;
int ret;
@@ -2430,7 +2430,7 @@ static int __bch2_btree_node_update_key(struct btree_trans *trans,
parent = btree_node_parent(btree_iter_path(trans, iter), b);
if (parent) {
- bch2_trans_copy_iter(&iter2, iter);
+ bch2_trans_copy_iter(trans, &iter2, iter);
iter2.path = bch2_btree_path_make_mut(trans, iter2.path,
iter2.flags & BTREE_ITER_intent,
@@ -2444,7 +2444,7 @@ static int __bch2_btree_node_update_key(struct btree_trans *trans,
trans->paths_sorted = false;
- ret = bch2_btree_iter_traverse(&iter2) ?:
+ ret = bch2_btree_iter_traverse(trans, &iter2) ?:
bch2_trans_update(trans, &iter2, new_key, BTREE_TRIGGER_norun);
if (ret)
goto err;
diff --git a/fs/bcachefs/btree_write_buffer.c b/fs/bcachefs/btree_write_buffer.c
index 2c09d19dd621..adbe576ec77e 100644
--- a/fs/bcachefs/btree_write_buffer.c
+++ b/fs/bcachefs/btree_write_buffer.c
@@ -144,7 +144,7 @@ static inline int wb_flush_one(struct btree_trans *trans, struct btree_iter *ite
EBUG_ON(!trans->c->btree_write_buffer.flushing.pin.seq);
EBUG_ON(trans->c->btree_write_buffer.flushing.pin.seq > wb->journal_seq);
- ret = bch2_btree_iter_traverse(iter);
+ ret = bch2_btree_iter_traverse(trans, iter);
if (ret)
return ret;
@@ -208,7 +208,7 @@ btree_write_buffered_insert(struct btree_trans *trans,
trans->journal_res.seq = wb->journal_seq;
- ret = bch2_btree_iter_traverse(&iter) ?:
+ ret = bch2_btree_iter_traverse(trans, &iter) ?:
bch2_trans_update(trans, &iter, &wb->k,
BTREE_UPDATE_internal_snapshot_node);
bch2_trans_iter_exit(trans, &iter);
@@ -285,7 +285,7 @@ static int bch2_btree_write_buffer_flush_locked(struct btree_trans *trans)
struct bch_fs *c = trans->c;
struct journal *j = &c->journal;
struct btree_write_buffer *wb = &c->btree_write_buffer;
- struct btree_iter iter = { NULL };
+ struct btree_iter iter = {};
size_t overwritten = 0, fast = 0, slowpath = 0, could_not_insert = 0;
bool write_locked = false;
bool accounting_replay_done = test_bit(BCH_FS_accounting_replay_done, &c->flags);
@@ -368,7 +368,7 @@ static int bch2_btree_write_buffer_flush_locked(struct btree_trans *trans)
write_locked = false;
ret = lockrestart_do(trans,
- bch2_btree_iter_traverse(&iter) ?:
+ bch2_btree_iter_traverse(trans, &iter) ?:
bch2_foreground_maybe_merge(trans, iter.path, 0,
BCH_WATERMARK_reclaim|
BCH_TRANS_COMMIT_journal_reclaim|
@@ -385,7 +385,7 @@ static int bch2_btree_write_buffer_flush_locked(struct btree_trans *trans)
BTREE_ITER_intent|BTREE_ITER_all_snapshots);
}
- bch2_btree_iter_set_pos(&iter, k->k.k.p);
+ bch2_btree_iter_set_pos(trans, &iter, k->k.k.p);
btree_iter_path(trans, &iter)->preserve = false;
bool accounting_accumulated = false;
diff --git a/fs/bcachefs/buckets.c b/fs/bcachefs/buckets.c
index 0903311cc71e..fea61e60a9ee 100644
--- a/fs/bcachefs/buckets.c
+++ b/fs/bcachefs/buckets.c
@@ -30,6 +30,12 @@
void bch2_dev_usage_read_fast(struct bch_dev *ca, struct bch_dev_usage *usage)
{
+ for (unsigned i = 0; i < BCH_DATA_NR; i++)
+ usage->buckets[i] = percpu_u64_get(&ca->usage->d[i].buckets);
+}
+
+void bch2_dev_usage_full_read_fast(struct bch_dev *ca, struct bch_dev_usage_full *usage)
+{
memset(usage, 0, sizeof(*usage));
acc_u64s_percpu((u64 *) usage, (u64 __percpu *) ca->usage, dev_usage_u64s());
}
@@ -75,7 +81,7 @@ bch2_fs_usage_read_short(struct bch_fs *c)
void bch2_dev_usage_to_text(struct printbuf *out,
struct bch_dev *ca,
- struct bch_dev_usage *usage)
+ struct bch_dev_usage_full *usage)
{
if (out->nr_tabstops < 5) {
printbuf_tabstops_reset(out);
@@ -365,7 +371,7 @@ found:
struct btree_iter iter;
bch2_trans_node_iter_init(trans, &iter, btree, new->k.p, 0, level,
BTREE_ITER_intent|BTREE_ITER_all_snapshots);
- ret = bch2_btree_iter_traverse(&iter) ?:
+ ret = bch2_btree_iter_traverse(trans, &iter) ?:
bch2_trans_update(trans, &iter, new,
BTREE_UPDATE_internal_snapshot_node|
BTREE_TRIGGER_norun);
@@ -707,7 +713,7 @@ err:
struct disk_accounting_pos acc;
memset(&acc, 0, sizeof(acc));
acc.type = BCH_DISK_ACCOUNTING_replicas;
- memcpy(&acc.replicas, &m->r.e, replicas_entry_bytes(&m->r.e));
+ unsafe_memcpy(&acc.replicas, &m->r.e, replicas_entry_bytes(&m->r.e), "VLA");
gc_stripe_unlock(m);
acc.replicas.data_type = data_type;
@@ -1132,7 +1138,7 @@ int bch2_trans_mark_dev_sbs_flags(struct bch_fs *c,
for_each_online_member(c, ca) {
int ret = bch2_trans_mark_dev_sb(c, ca, flags);
if (ret) {
- percpu_ref_put(&ca->io_ref);
+ percpu_ref_put(&ca->io_ref[READ]);
return ret;
}
}
@@ -1331,7 +1337,7 @@ void bch2_dev_buckets_free(struct bch_dev *ca)
int bch2_dev_buckets_alloc(struct bch_fs *c, struct bch_dev *ca)
{
- ca->usage = alloc_percpu(struct bch_dev_usage);
+ ca->usage = alloc_percpu(struct bch_dev_usage_full);
if (!ca->usage)
return -BCH_ERR_ENOMEM_usage_init;
diff --git a/fs/bcachefs/buckets.h b/fs/bcachefs/buckets.h
index c5363256e363..1c38b165f48b 100644
--- a/fs/bcachefs/buckets.h
+++ b/fs/bcachefs/buckets.h
@@ -172,7 +172,16 @@ static inline struct bch_dev_usage bch2_dev_usage_read(struct bch_dev *ca)
return ret;
}
-void bch2_dev_usage_to_text(struct printbuf *, struct bch_dev *, struct bch_dev_usage *);
+void bch2_dev_usage_full_read_fast(struct bch_dev *, struct bch_dev_usage_full *);
+static inline struct bch_dev_usage_full bch2_dev_usage_full_read(struct bch_dev *ca)
+{
+ struct bch_dev_usage_full ret;
+
+ bch2_dev_usage_full_read_fast(ca, &ret);
+ return ret;
+}
+
+void bch2_dev_usage_to_text(struct printbuf *, struct bch_dev *, struct bch_dev_usage_full *);
static inline u64 bch2_dev_buckets_reserved(struct bch_dev *ca, enum bch_watermark watermark)
{
@@ -207,7 +216,7 @@ static inline u64 dev_buckets_free(struct bch_dev *ca,
enum bch_watermark watermark)
{
return max_t(s64, 0,
- usage.d[BCH_DATA_free].buckets -
+ usage.buckets[BCH_DATA_free]-
ca->nr_open_buckets -
bch2_dev_buckets_reserved(ca, watermark));
}
@@ -217,10 +226,10 @@ static inline u64 __dev_buckets_available(struct bch_dev *ca,
enum bch_watermark watermark)
{
return max_t(s64, 0,
- usage.d[BCH_DATA_free].buckets
- + usage.d[BCH_DATA_cached].buckets
- + usage.d[BCH_DATA_need_gc_gens].buckets
- + usage.d[BCH_DATA_need_discard].buckets
+ usage.buckets[BCH_DATA_free]
+ + usage.buckets[BCH_DATA_cached]
+ + usage.buckets[BCH_DATA_need_gc_gens]
+ + usage.buckets[BCH_DATA_need_discard]
- ca->nr_open_buckets
- bch2_dev_buckets_reserved(ca, watermark));
}
diff --git a/fs/bcachefs/buckets_types.h b/fs/bcachefs/buckets_types.h
index 900b8680c8b5..0aed2500ade3 100644
--- a/fs/bcachefs/buckets_types.h
+++ b/fs/bcachefs/buckets_types.h
@@ -54,7 +54,12 @@ struct bucket_gens {
u8 b[] __counted_by(nbuckets);
};
+/* Only info on bucket countns: */
struct bch_dev_usage {
+ u64 buckets[BCH_DATA_NR];
+};
+
+struct bch_dev_usage_full {
struct bch_dev_usage_type {
u64 buckets;
u64 sectors; /* _compressed_ sectors: */
diff --git a/fs/bcachefs/chardev.c b/fs/bcachefs/chardev.c
index 584f4a3eb670..5891b3a1e61c 100644
--- a/fs/bcachefs/chardev.c
+++ b/fs/bcachefs/chardev.c
@@ -350,8 +350,8 @@ static ssize_t bch2_data_job_read(struct file *file, char __user *buf,
if (ctx->arg.op == BCH_DATA_OP_scrub) {
struct bch_dev *ca = bch2_dev_tryget(c, ctx->arg.scrub.dev);
if (ca) {
- struct bch_dev_usage u;
- bch2_dev_usage_read_fast(ca, &u);
+ struct bch_dev_usage_full u;
+ bch2_dev_usage_full_read_fast(ca, &u);
for (unsigned i = BCH_DATA_btree; i < ARRAY_SIZE(u.d); i++)
if (ctx->arg.scrub.data_types & BIT(i))
e.p.sectors_total += u.d[i].sectors;
@@ -473,7 +473,7 @@ static long bch2_ioctl_dev_usage(struct bch_fs *c,
struct bch_ioctl_dev_usage __user *user_arg)
{
struct bch_ioctl_dev_usage arg;
- struct bch_dev_usage src;
+ struct bch_dev_usage_full src;
struct bch_dev *ca;
unsigned i;
@@ -493,7 +493,7 @@ static long bch2_ioctl_dev_usage(struct bch_fs *c,
if (IS_ERR(ca))
return PTR_ERR(ca);
- src = bch2_dev_usage_read(ca);
+ src = bch2_dev_usage_full_read(ca);
arg.state = ca->mi.state;
arg.bucket_size = ca->mi.bucket_size;
@@ -514,7 +514,7 @@ static long bch2_ioctl_dev_usage_v2(struct bch_fs *c,
struct bch_ioctl_dev_usage_v2 __user *user_arg)
{
struct bch_ioctl_dev_usage_v2 arg;
- struct bch_dev_usage src;
+ struct bch_dev_usage_full src;
struct bch_dev *ca;
int ret = 0;
@@ -534,7 +534,7 @@ static long bch2_ioctl_dev_usage_v2(struct bch_fs *c,
if (IS_ERR(ca))
return PTR_ERR(ca);
- src = bch2_dev_usage_read(ca);
+ src = bch2_dev_usage_full_read(ca);
arg.state = ca->mi.state;
arg.bucket_size = ca->mi.bucket_size;
@@ -615,7 +615,7 @@ static long bch2_ioctl_disk_get_idx(struct bch_fs *c,
for_each_online_member(c, ca)
if (ca->dev == dev) {
- percpu_ref_put(&ca->io_ref);
+ percpu_ref_put(&ca->io_ref[READ]);
return ca->dev_idx;
}
diff --git a/fs/bcachefs/compress.c b/fs/bcachefs/compress.c
index 85fc90342492..28ed32449913 100644
--- a/fs/bcachefs/compress.c
+++ b/fs/bcachefs/compress.c
@@ -371,13 +371,14 @@ static int attempt_compress(struct bch_fs *c,
};
zlib_set_workspace(&strm, workspace);
- zlib_deflateInit2(&strm,
+ if (zlib_deflateInit2(&strm,
compression.level
? clamp_t(unsigned, compression.level,
Z_BEST_SPEED, Z_BEST_COMPRESSION)
: Z_DEFAULT_COMPRESSION,
Z_DEFLATED, -MAX_WBITS, DEF_MEM_LEVEL,
- Z_DEFAULT_STRATEGY);
+ Z_DEFAULT_STRATEGY) != Z_OK)
+ return 0;
if (zlib_deflate(&strm, Z_FINISH) != Z_STREAM_END)
return 0;
diff --git a/fs/bcachefs/data_update.c b/fs/bcachefs/data_update.c
index fe400dfc5d76..de02ebf847ec 100644
--- a/fs/bcachefs/data_update.c
+++ b/fs/bcachefs/data_update.c
@@ -216,7 +216,7 @@ static int __bch2_data_update_index_update(struct btree_trans *trans,
bch2_trans_begin(trans);
- k = bch2_btree_iter_peek_slot(&iter);
+ k = bch2_btree_iter_peek_slot(trans, &iter);
ret = bkey_err(k);
if (ret)
goto err;
@@ -398,7 +398,7 @@ restart_drop_extra_replicas:
BCH_TRANS_COMMIT_no_enospc|
m->data_opts.btree_insert_flags);
if (!ret) {
- bch2_btree_iter_set_pos(&iter, next_pos);
+ bch2_btree_iter_set_pos(trans, &iter, next_pos);
this_cpu_add(c->counters[BCH_COUNTER_io_move_finish], new->k.size);
if (trace_io_move_finish_enabled())
@@ -426,7 +426,7 @@ nowork:
count_event(c, io_move_fail);
- bch2_btree_iter_advance(&iter);
+ bch2_btree_iter_advance(trans, &iter);
goto next;
}
out:
@@ -497,7 +497,7 @@ static int bch2_update_unwritten_extent(struct btree_trans *trans,
bch2_trans_iter_init(trans, &iter, update->btree_id, update->op.pos,
BTREE_ITER_slots);
ret = lockrestart_do(trans, ({
- k = bch2_btree_iter_peek_slot(&iter);
+ k = bch2_btree_iter_peek_slot(trans, &iter);
bkey_err(k);
}));
bch2_trans_iter_exit(trans, &iter);
diff --git a/fs/bcachefs/debug.c b/fs/bcachefs/debug.c
index 788af88f6979..5a8bc7013512 100644
--- a/fs/bcachefs/debug.c
+++ b/fs/bcachefs/debug.c
@@ -57,7 +57,7 @@ static bool bch2_btree_verify_replica(struct bch_fs *c, struct btree *b,
submit_bio_wait(bio);
bio_put(bio);
- percpu_ref_put(&ca->io_ref);
+ percpu_ref_put(&ca->io_ref[READ]);
memcpy(n_ondisk, n_sorted, btree_buf_bytes(b));
@@ -297,7 +297,7 @@ out:
if (bio)
bio_put(bio);
kvfree(n_ondisk);
- percpu_ref_put(&ca->io_ref);
+ percpu_ref_put(&ca->io_ref[READ]);
}
#ifdef CONFIG_DEBUG_FS
diff --git a/fs/bcachefs/dirent.c b/fs/bcachefs/dirent.c
index d7f9f79318a2..bf53a029f356 100644
--- a/fs/bcachefs/dirent.c
+++ b/fs/bcachefs/dirent.c
@@ -417,8 +417,8 @@ int bch2_dirent_rename(struct btree_trans *trans,
enum bch_rename_mode mode)
{
struct qstr src_name_lookup, dst_name_lookup;
- struct btree_iter src_iter = { NULL };
- struct btree_iter dst_iter = { NULL };
+ struct btree_iter src_iter = {};
+ struct btree_iter dst_iter = {};
struct bkey_s_c old_src, old_dst = bkey_s_c_null;
struct bkey_i_dirent *new_src = NULL, *new_dst = NULL;
struct bpos dst_pos =
@@ -586,16 +586,16 @@ out_set_src:
}
if (delete_src) {
- bch2_btree_iter_set_snapshot(&src_iter, old_src.k->p.snapshot);
- ret = bch2_btree_iter_traverse(&src_iter) ?:
+ bch2_btree_iter_set_snapshot(trans, &src_iter, old_src.k->p.snapshot);
+ ret = bch2_btree_iter_traverse(trans, &src_iter) ?:
bch2_btree_delete_at(trans, &src_iter, BTREE_UPDATE_internal_snapshot_node);
if (ret)
goto out;
}
if (delete_dst) {
- bch2_btree_iter_set_snapshot(&dst_iter, old_dst.k->p.snapshot);
- ret = bch2_btree_iter_traverse(&dst_iter) ?:
+ bch2_btree_iter_set_snapshot(trans, &dst_iter, old_dst.k->p.snapshot);
+ ret = bch2_btree_iter_traverse(trans, &dst_iter) ?:
bch2_btree_delete_at(trans, &dst_iter, BTREE_UPDATE_internal_snapshot_node);
if (ret)
goto out;
@@ -642,7 +642,7 @@ u64 bch2_dirent_lookup(struct bch_fs *c, subvol_inum dir,
const struct qstr *name, subvol_inum *inum)
{
struct btree_trans *trans = bch2_trans_get(c);
- struct btree_iter iter = { NULL };
+ struct btree_iter iter = {};
int ret = lockrestart_do(trans,
bch2_dirent_lookup_trans(trans, &iter, dir, hash_info, name, inum, 0));
@@ -771,7 +771,7 @@ int bch2_fsck_remove_dirent(struct btree_trans *trans, struct bpos pos)
bch2_trans_iter_init(trans, &iter, BTREE_ID_dirents, pos, BTREE_ITER_intent);
- ret = bch2_btree_iter_traverse(&iter) ?:
+ ret = bch2_btree_iter_traverse(trans, &iter) ?:
bch2_hash_delete_at(trans, bch2_dirent_hash_desc,
&dir_hash_info, &iter,
BTREE_UPDATE_internal_snapshot_node);
diff --git a/fs/bcachefs/disk_accounting.c b/fs/bcachefs/disk_accounting.c
index a59f6c12529b..b007319b72e9 100644
--- a/fs/bcachefs/disk_accounting.c
+++ b/fs/bcachefs/disk_accounting.c
@@ -739,7 +739,7 @@ int bch2_accounting_read(struct bch_fs *c)
struct disk_accounting_pos next;
memset(&next, 0, sizeof(next));
next.type = acc_k.type + 1;
- bch2_btree_iter_set_pos(&iter, disk_accounting_pos_to_bpos(&next));
+ bch2_btree_iter_set_pos(trans, &iter, disk_accounting_pos_to_bpos(&next));
continue;
}
@@ -930,7 +930,7 @@ void bch2_verify_accounting_clean(struct bch_fs *c)
struct disk_accounting_pos next;
memset(&next, 0, sizeof(next));
next.type = acc_k.type + 1;
- bch2_btree_iter_set_pos(&iter, disk_accounting_pos_to_bpos(&next));
+ bch2_btree_iter_set_pos(trans, &iter, disk_accounting_pos_to_bpos(&next));
continue;
}
diff --git a/fs/bcachefs/disk_groups.c b/fs/bcachefs/disk_groups.c
index 5df8de0b8c02..1186280b29e9 100644
--- a/fs/bcachefs/disk_groups.c
+++ b/fs/bcachefs/disk_groups.c
@@ -555,9 +555,9 @@ void bch2_target_to_text(struct printbuf *out, struct bch_fs *c, unsigned v)
? rcu_dereference(c->devs[t.dev])
: NULL;
- if (ca && percpu_ref_tryget(&ca->io_ref)) {
+ if (ca && percpu_ref_tryget(&ca->io_ref[READ])) {
prt_printf(out, "/dev/%s", ca->name);
- percpu_ref_put(&ca->io_ref);
+ percpu_ref_put(&ca->io_ref[READ]);
} else if (ca) {
prt_printf(out, "offline device %u", t.dev);
} else {
diff --git a/fs/bcachefs/ec.c b/fs/bcachefs/ec.c
index 6faeda7ad03d..a396865e8b17 100644
--- a/fs/bcachefs/ec.c
+++ b/fs/bcachefs/ec.c
@@ -105,6 +105,7 @@ struct ec_bio {
struct bch_dev *ca;
struct ec_stripe_buf *buf;
size_t idx;
+ int rw;
u64 submit_time;
struct bio bio;
};
@@ -462,7 +463,8 @@ int bch2_trigger_stripe(struct btree_trans *trans,
return ret;
if (gc)
- memcpy(&gc->r.e, &acc.replicas, replicas_entry_bytes(&acc.replicas));
+ unsafe_memcpy(&gc->r.e, &acc.replicas,
+ replicas_entry_bytes(&acc.replicas), "VLA");
}
if (old_s) {
@@ -703,6 +705,7 @@ static void ec_block_endio(struct bio *bio)
struct bch_extent_ptr *ptr = &v->ptrs[ec_bio->idx];
struct bch_dev *ca = ec_bio->ca;
struct closure *cl = bio->bi_private;
+ int rw = ec_bio->rw;
bch2_account_io_completion(ca, bio_data_dir(bio),
ec_bio->submit_time, !bio->bi_status);
@@ -724,7 +727,7 @@ static void ec_block_endio(struct bio *bio)
}
bio_put(&ec_bio->bio);
- percpu_ref_put(&ca->io_ref);
+ percpu_ref_put(&ca->io_ref[rw]);
closure_put(cl);
}
@@ -775,6 +778,7 @@ static void ec_block_io(struct bch_fs *c, struct ec_stripe_buf *buf,
ec_bio->ca = ca;
ec_bio->buf = buf;
ec_bio->idx = idx;
+ ec_bio->rw = rw;
ec_bio->submit_time = local_clock();
ec_bio->bio.bi_iter.bi_sector = ptr->offset + buf->offset + (offset >> 9);
@@ -784,14 +788,14 @@ static void ec_block_io(struct bch_fs *c, struct ec_stripe_buf *buf,
bch2_bio_map(&ec_bio->bio, buf->data[idx] + offset, b);
closure_get(cl);
- percpu_ref_get(&ca->io_ref);
+ percpu_ref_get(&ca->io_ref[rw]);
submit_bio(&ec_bio->bio);
offset += b;
}
- percpu_ref_put(&ca->io_ref);
+ percpu_ref_put(&ca->io_ref[rw]);
}
static int get_stripe_key_trans(struct btree_trans *trans, u64 idx,
@@ -1264,7 +1268,7 @@ static void zero_out_rest_of_ec_bucket(struct bch_fs *c,
ob->sectors_free,
GFP_KERNEL, 0);
- percpu_ref_put(&ca->io_ref);
+ percpu_ref_put(&ca->io_ref[WRITE]);
if (ret)
s->err = ret;
@@ -1836,7 +1840,7 @@ static int __get_existing_stripe(struct btree_trans *trans,
ret = 1;
}
out:
- bch2_set_btree_iter_dontneed(&iter);
+ bch2_set_btree_iter_dontneed(trans, &iter);
err:
bch2_trans_iter_exit(trans, &iter);
return ret;
@@ -1949,7 +1953,7 @@ static int __bch2_ec_stripe_head_reserve(struct btree_trans *trans, struct ec_st
if (bkey_gt(k.k->p, POS(0, U32_MAX))) {
if (start_pos.offset) {
start_pos = min_pos;
- bch2_btree_iter_set_pos(&iter, start_pos);
+ bch2_btree_iter_set_pos(trans, &iter, start_pos);
continue;
}
diff --git a/fs/bcachefs/error.c b/fs/bcachefs/error.c
index d4dfd13a8076..baf5dfb32298 100644
--- a/fs/bcachefs/error.c
+++ b/fs/bcachefs/error.c
@@ -34,7 +34,7 @@ bool __bch2_inconsistent_error(struct bch_fs *c, struct printbuf *out)
journal_cur_seq(&c->journal));
return true;
case BCH_ON_ERROR_panic:
- bch2_print_string_as_lines(KERN_ERR, out->buf);
+ bch2_print_string_as_lines_nonblocking(KERN_ERR, out->buf);
panic(bch2_fmt(c, "panic after error"));
return true;
default:
@@ -45,6 +45,8 @@ bool __bch2_inconsistent_error(struct bch_fs *c, struct printbuf *out)
bool bch2_inconsistent_error(struct bch_fs *c)
{
struct printbuf buf = PRINTBUF;
+ buf.atomic++;
+
printbuf_indent_add_nextline(&buf, 2);
bool ret = __bch2_inconsistent_error(c, &buf);
@@ -59,6 +61,7 @@ static bool bch2_fs_trans_inconsistent(struct bch_fs *c, struct btree_trans *tra
const char *fmt, va_list args)
{
struct printbuf buf = PRINTBUF;
+ buf.atomic++;
bch2_log_msg_start(c, &buf);
@@ -68,7 +71,7 @@ static bool bch2_fs_trans_inconsistent(struct bch_fs *c, struct btree_trans *tra
if (trans)
bch2_trans_updates_to_text(&buf, trans);
bool ret = __bch2_inconsistent_error(c, &buf);
- bch2_print_string_as_lines(KERN_ERR, buf.buf);
+ bch2_print_string_as_lines_nonblocking(KERN_ERR, buf.buf);
printbuf_exit(&buf);
return ret;
diff --git a/fs/bcachefs/extent_update.c b/fs/bcachefs/extent_update.c
index 6aac579a692a..6bb42985306e 100644
--- a/fs/bcachefs/extent_update.c
+++ b/fs/bcachefs/extent_update.c
@@ -112,7 +112,7 @@ int bch2_extent_atomic_end(struct btree_trans *trans,
unsigned nr_iters = 0;
int ret;
- ret = bch2_btree_iter_traverse(iter);
+ ret = bch2_btree_iter_traverse(trans, iter);
if (ret)
return ret;
@@ -126,9 +126,9 @@ int bch2_extent_atomic_end(struct btree_trans *trans,
if (ret < 0)
return ret;
- bch2_trans_copy_iter(&copy, iter);
+ bch2_trans_copy_iter(trans, &copy, iter);
- for_each_btree_key_max_continue_norestart(copy, insert->k.p, 0, k, ret) {
+ for_each_btree_key_max_continue_norestart(trans, copy, insert->k.p, 0, k, ret) {
unsigned offset = 0;
if (bkey_gt(bkey_start_pos(&insert->k), bkey_start_pos(k.k)))
diff --git a/fs/bcachefs/fs-io-buffered.c b/fs/bcachefs/fs-io-buffered.c
index a03e2c780cba..19d4599918dc 100644
--- a/fs/bcachefs/fs-io-buffered.c
+++ b/fs/bcachefs/fs-io-buffered.c
@@ -183,12 +183,12 @@ static void bchfs_read(struct btree_trans *trans,
if (ret)
goto err;
- bch2_btree_iter_set_snapshot(&iter, snapshot);
+ bch2_btree_iter_set_snapshot(trans, &iter, snapshot);
- bch2_btree_iter_set_pos(&iter,
+ bch2_btree_iter_set_pos(trans, &iter,
POS(inum.inum, rbio->bio.bi_iter.bi_sector));
- k = bch2_btree_iter_peek_slot(&iter);
+ k = bch2_btree_iter_peek_slot(trans, &iter);
ret = bkey_err(k);
if (ret)
goto err;
diff --git a/fs/bcachefs/fs-io.c b/fs/bcachefs/fs-io.c
index c80ed3a54e70..65c2c33d253d 100644
--- a/fs/bcachefs/fs-io.c
+++ b/fs/bcachefs/fs-io.c
@@ -48,7 +48,7 @@ static void nocow_flush_endio(struct bio *_bio)
struct nocow_flush *bio = container_of(_bio, struct nocow_flush, bio);
closure_put(bio->cl);
- percpu_ref_put(&bio->ca->io_ref);
+ percpu_ref_put(&bio->ca->io_ref[WRITE]);
bio_put(&bio->bio);
}
@@ -71,7 +71,7 @@ void bch2_inode_flush_nocow_writes_async(struct bch_fs *c,
for_each_set_bit(dev, devs.d, BCH_SB_MEMBERS_MAX) {
rcu_read_lock();
ca = rcu_dereference(c->devs[dev]);
- if (ca && !percpu_ref_tryget(&ca->io_ref))
+ if (ca && !percpu_ref_tryget(&ca->io_ref[WRITE]))
ca = NULL;
rcu_read_unlock();
@@ -636,9 +636,9 @@ static noinline int __bchfs_fallocate(struct bch_inode_info *inode, int mode,
if (ret)
goto bkey_err;
- bch2_btree_iter_set_snapshot(&iter, snapshot);
+ bch2_btree_iter_set_snapshot(trans, &iter, snapshot);
- k = bch2_btree_iter_peek_slot(&iter);
+ k = bch2_btree_iter_peek_slot(trans, &iter);
if ((ret = bkey_err(k)))
goto bkey_err;
@@ -649,13 +649,13 @@ static noinline int __bchfs_fallocate(struct bch_inode_info *inode, int mode,
/* already reserved */
if (bkey_extent_is_reservation(k) &&
bch2_bkey_nr_ptrs_fully_allocated(k) >= opts.data_replicas) {
- bch2_btree_iter_advance(&iter);
+ bch2_btree_iter_advance(trans, &iter);
continue;
}
if (bkey_extent_is_data(k.k) &&
!(mode & FALLOC_FL_ZERO_RANGE)) {
- bch2_btree_iter_advance(&iter);
+ bch2_btree_iter_advance(trans, &iter);
continue;
}
@@ -676,7 +676,7 @@ static noinline int __bchfs_fallocate(struct bch_inode_info *inode, int mode,
if (ret)
goto bkey_err;
}
- bch2_btree_iter_set_pos(&iter, POS(iter.pos.inode, hole_start));
+ bch2_btree_iter_set_pos(trans, &iter, POS(iter.pos.inode, hole_start));
if (ret)
goto bkey_err;
diff --git a/fs/bcachefs/fs.c b/fs/bcachefs/fs.c
index fc834bdf1f52..5a41b1a8e54f 100644
--- a/fs/bcachefs/fs.c
+++ b/fs/bcachefs/fs.c
@@ -88,7 +88,7 @@ int __must_check bch2_write_inode(struct bch_fs *c,
void *p, unsigned fields)
{
struct btree_trans *trans = bch2_trans_get(c);
- struct btree_iter iter = { NULL };
+ struct btree_iter iter = {};
struct bch_inode_unpacked inode_u;
int ret;
retry:
@@ -1075,7 +1075,7 @@ int bch2_setattr_nonsize(struct mnt_idmap *idmap,
struct bch_fs *c = inode->v.i_sb->s_fs_info;
struct bch_qid qid;
struct btree_trans *trans;
- struct btree_iter inode_iter = { NULL };
+ struct btree_iter inode_iter = {};
struct bch_inode_unpacked inode_u;
struct posix_acl *acl = NULL;
kuid_t kuid;
@@ -1330,9 +1330,9 @@ static int bch2_fiemap(struct inode *vinode, struct fiemap_extent_info *info,
if (ret)
continue;
- bch2_btree_iter_set_snapshot(&iter, snapshot);
+ bch2_btree_iter_set_snapshot(trans, &iter, snapshot);
- k = bch2_btree_iter_peek_max(&iter, end);
+ k = bch2_btree_iter_peek_max(trans, &iter, end);
ret = bkey_err(k);
if (ret)
continue;
@@ -1342,7 +1342,7 @@ static int bch2_fiemap(struct inode *vinode, struct fiemap_extent_info *info,
if (!bkey_extent_is_data(k.k) &&
k.k->type != KEY_TYPE_reservation) {
- bch2_btree_iter_advance(&iter);
+ bch2_btree_iter_advance(trans, &iter);
continue;
}
@@ -1380,7 +1380,7 @@ static int bch2_fiemap(struct inode *vinode, struct fiemap_extent_info *info,
bkey_copy(prev.k, cur.k);
have_extent = true;
- bch2_btree_iter_set_pos(&iter,
+ bch2_btree_iter_set_pos(trans, &iter,
POS(iter.pos.inode, iter.pos.offset + sectors));
}
bch2_trans_iter_exit(trans, &iter);
@@ -1697,17 +1697,17 @@ retry:
if (ret)
goto err;
- bch2_btree_iter_set_snapshot(&iter1, snapshot);
- bch2_btree_iter_set_snapshot(&iter2, snapshot);
+ bch2_btree_iter_set_snapshot(trans, &iter1, snapshot);
+ bch2_btree_iter_set_snapshot(trans, &iter2, snapshot);
ret = bch2_inode_find_by_inum_trans(trans, inode_inum(inode), &inode_u);
if (ret)
goto err;
if (inode_u.bi_dir == dir->ei_inode.bi_inum) {
- bch2_btree_iter_set_pos(&iter1, POS(inode_u.bi_dir, inode_u.bi_dir_offset));
+ bch2_btree_iter_set_pos(trans, &iter1, POS(inode_u.bi_dir, inode_u.bi_dir_offset));
- k = bch2_btree_iter_peek_slot(&iter1);
+ k = bch2_btree_iter_peek_slot(trans, &iter1);
ret = bkey_err(k);
if (ret)
goto err;
@@ -1731,7 +1731,7 @@ retry:
* File with multiple hardlinks and our backref is to the wrong
* directory - linear search:
*/
- for_each_btree_key_continue_norestart(iter2, 0, k, ret) {
+ for_each_btree_key_continue_norestart(trans, iter2, 0, k, ret) {
if (k.k->p.inode > dir->ei_inode.bi_inum)
break;
@@ -2237,7 +2237,7 @@ got_sb:
/* XXX: create an anonymous device for multi device filesystems */
sb->s_bdev = bdev;
sb->s_dev = bdev->bd_dev;
- percpu_ref_put(&ca->io_ref);
+ percpu_ref_put(&ca->io_ref[READ]);
break;
}
diff --git a/fs/bcachefs/fsck.c b/fs/bcachefs/fsck.c
index 52320295dcf6..18308f3d64a1 100644
--- a/fs/bcachefs/fsck.c
+++ b/fs/bcachefs/fsck.c
@@ -186,7 +186,7 @@ static int lookup_lostfound(struct btree_trans *trans, u32 snapshot,
{
struct bch_fs *c = trans->c;
struct qstr lostfound_str = QSTR("lost+found");
- struct btree_iter lostfound_iter = { NULL };
+ struct btree_iter lostfound_iter = {};
u64 inum = 0;
unsigned d_type = 0;
int ret;
@@ -295,8 +295,8 @@ create_lostfound:
if (ret)
goto err;
- bch2_btree_iter_set_snapshot(&lostfound_iter, snapshot);
- ret = bch2_btree_iter_traverse(&lostfound_iter);
+ bch2_btree_iter_set_snapshot(trans, &lostfound_iter, snapshot);
+ ret = bch2_btree_iter_traverse(trans, &lostfound_iter);
if (ret)
goto err;
@@ -544,7 +544,7 @@ static int reconstruct_subvol(struct btree_trans *trans, u32 snapshotid, u32 sub
new_inode.bi_subvol = subvolid;
int ret = bch2_inode_create(trans, &inode_iter, &new_inode, snapshotid, cpu) ?:
- bch2_btree_iter_traverse(&inode_iter) ?:
+ bch2_btree_iter_traverse(trans, &inode_iter) ?:
bch2_inode_write(trans, &inode_iter, &new_inode);
bch2_trans_iter_exit(trans, &inode_iter);
if (ret)
@@ -609,7 +609,7 @@ static int reconstruct_inode(struct btree_trans *trans, enum btree_id btree, u32
struct btree_iter iter = {};
bch2_trans_iter_init(trans, &iter, BTREE_ID_extents, SPOS(inum, U64_MAX, snapshot), 0);
- struct bkey_s_c k = bch2_btree_iter_peek_prev_min(&iter, POS(inum, 0));
+ struct bkey_s_c k = bch2_btree_iter_peek_prev_min(trans, &iter, POS(inum, 0));
bch2_trans_iter_exit(trans, &iter);
int ret = bkey_err(k);
if (ret)
@@ -1557,7 +1557,7 @@ static int overlapping_extents_found(struct btree_trans *trans,
{
struct bch_fs *c = trans->c;
struct printbuf buf = PRINTBUF;
- struct btree_iter iter1, iter2 = { NULL };
+ struct btree_iter iter1, iter2 = {};
struct bkey_s_c k1, k2;
int ret;
@@ -1566,7 +1566,7 @@ static int overlapping_extents_found(struct btree_trans *trans,
bch2_trans_iter_init(trans, &iter1, btree, pos1,
BTREE_ITER_all_snapshots|
BTREE_ITER_not_extents);
- k1 = bch2_btree_iter_peek_max(&iter1, POS(pos1.inode, U64_MAX));
+ k1 = bch2_btree_iter_peek_max(trans, &iter1, POS(pos1.inode, U64_MAX));
ret = bkey_err(k1);
if (ret)
goto err;
@@ -1586,12 +1586,12 @@ static int overlapping_extents_found(struct btree_trans *trans,
goto err;
}
- bch2_trans_copy_iter(&iter2, &iter1);
+ bch2_trans_copy_iter(trans, &iter2, &iter1);
while (1) {
- bch2_btree_iter_advance(&iter2);
+ bch2_btree_iter_advance(trans, &iter2);
- k2 = bch2_btree_iter_peek_max(&iter2, POS(pos1.inode, U64_MAX));
+ k2 = bch2_btree_iter_peek_max(trans, &iter2, POS(pos1.inode, U64_MAX));
ret = bkey_err(k2);
if (ret)
goto err;
@@ -1791,9 +1791,9 @@ static int check_extent(struct btree_trans *trans, struct btree_iter *iter,
(bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
struct btree_iter iter2;
- bch2_trans_copy_iter(&iter2, iter);
- bch2_btree_iter_set_snapshot(&iter2, i->snapshot);
- ret = bch2_btree_iter_traverse(&iter2) ?:
+ bch2_trans_copy_iter(trans, &iter2, iter);
+ bch2_btree_iter_set_snapshot(trans, &iter2, i->snapshot);
+ ret = bch2_btree_iter_traverse(trans, &iter2) ?:
bch2_btree_delete_at(trans, &iter2,
BTREE_UPDATE_internal_snapshot_node);
bch2_trans_iter_exit(trans, &iter2);
@@ -2185,7 +2185,7 @@ static int check_dirent(struct btree_trans *trans, struct btree_iter *iter,
BTREE_ID_dirents,
SPOS(k.k->p.inode, k.k->p.offset, *i),
BTREE_ITER_intent);
- ret = bch2_btree_iter_traverse(&delete_iter) ?:
+ ret = bch2_btree_iter_traverse(trans, &delete_iter) ?:
bch2_hash_delete_at(trans, bch2_dirent_hash_desc,
hash_info,
&delete_iter,
@@ -2412,7 +2412,7 @@ static int check_subvol_path(struct btree_trans *trans, struct btree_iter *iter,
bch2_trans_iter_exit(trans, &parent_iter);
bch2_trans_iter_init(trans, &parent_iter,
BTREE_ID_subvolumes, POS(0, parent), 0);
- k = bch2_btree_iter_peek_slot(&parent_iter);
+ k = bch2_btree_iter_peek_slot(trans, &parent_iter);
ret = bkey_err(k);
if (ret)
goto err;
diff --git a/fs/bcachefs/inode.c b/fs/bcachefs/inode.c
index 80051073f613..b51d98cf8a80 100644
--- a/fs/bcachefs/inode.c
+++ b/fs/bcachefs/inode.c
@@ -940,7 +940,7 @@ int bch2_inode_create(struct btree_trans *trans,
BTREE_ITER_intent);
struct bkey_s_c k;
again:
- while ((k = bch2_btree_iter_peek(iter)).k &&
+ while ((k = bch2_btree_iter_peek(trans, iter)).k &&
!(ret = bkey_err(k)) &&
bkey_lt(k.k->p, POS(0, max))) {
if (pos < iter->pos.offset)
@@ -951,7 +951,7 @@ again:
* we've found just one:
*/
pos = iter->pos.offset + 1;
- bch2_btree_iter_set_pos(iter, POS(0, pos));
+ bch2_btree_iter_set_pos(trans, iter, POS(0, pos));
}
if (!ret && pos < max)
@@ -967,12 +967,12 @@ again:
/* Retry from start */
pos = start = min;
- bch2_btree_iter_set_pos(iter, POS(0, pos));
+ bch2_btree_iter_set_pos(trans, iter, POS(0, pos));
le32_add_cpu(&cursor->v.gen, 1);
goto again;
found_slot:
- bch2_btree_iter_set_pos(iter, SPOS(0, pos, snapshot));
- k = bch2_btree_iter_peek_slot(iter);
+ bch2_btree_iter_set_pos(trans, iter, SPOS(0, pos, snapshot));
+ k = bch2_btree_iter_peek_slot(trans, iter);
ret = bkey_err(k);
if (ret) {
bch2_trans_iter_exit(trans, iter);
@@ -1009,9 +1009,9 @@ static int bch2_inode_delete_keys(struct btree_trans *trans,
if (ret)
goto err;
- bch2_btree_iter_set_snapshot(&iter, snapshot);
+ bch2_btree_iter_set_snapshot(trans, &iter, snapshot);
- k = bch2_btree_iter_peek_max(&iter, end);
+ k = bch2_btree_iter_peek_max(trans, &iter, end);
ret = bkey_err(k);
if (ret)
goto err;
@@ -1042,7 +1042,7 @@ err:
int bch2_inode_rm(struct bch_fs *c, subvol_inum inum)
{
struct btree_trans *trans = bch2_trans_get(c);
- struct btree_iter iter = { NULL };
+ struct btree_iter iter = {};
struct bkey_s_c k;
u32 snapshot;
int ret;
@@ -1207,7 +1207,7 @@ int bch2_inum_opts_get(struct btree_trans *trans, subvol_inum inum, struct bch_i
static noinline int __bch2_inode_rm_snapshot(struct btree_trans *trans, u64 inum, u32 snapshot)
{
struct bch_fs *c = trans->c;
- struct btree_iter iter = { NULL };
+ struct btree_iter iter = {};
struct bkey_i_inode_generation delete;
struct bch_inode_unpacked inode_u;
struct bkey_s_c k;
diff --git a/fs/bcachefs/io_misc.c b/fs/bcachefs/io_misc.c
index 6b842c8d21be..cc07729a4b62 100644
--- a/fs/bcachefs/io_misc.c
+++ b/fs/bcachefs/io_misc.c
@@ -43,7 +43,7 @@ int bch2_extent_fallocate(struct btree_trans *trans,
bch2_bkey_buf_init(&new);
closure_init_stack(&cl);
- k = bch2_btree_iter_peek_slot(iter);
+ k = bch2_btree_iter_peek_slot(trans, iter);
ret = bkey_err(k);
if (ret)
return ret;
@@ -164,12 +164,12 @@ int bch2_fpunch_at(struct btree_trans *trans, struct btree_iter *iter,
if (ret)
continue;
- bch2_btree_iter_set_snapshot(iter, snapshot);
+ bch2_btree_iter_set_snapshot(trans, iter, snapshot);
/*
* peek_max() doesn't have ideal semantics for extents:
*/
- k = bch2_btree_iter_peek_max(iter, end_pos);
+ k = bch2_btree_iter_peek_max(trans, iter, end_pos);
if (!k.k)
break;
@@ -230,7 +230,7 @@ static int truncate_set_isize(struct btree_trans *trans,
u64 new_i_size,
bool warn)
{
- struct btree_iter iter = { NULL };
+ struct btree_iter iter = {};
struct bch_inode_unpacked inode_u;
int ret;
@@ -399,7 +399,7 @@ case LOGGED_OP_FINSERT_start:
if (ret)
goto err;
} else {
- bch2_btree_iter_set_pos(&iter, POS(inum.inum, src_offset));
+ bch2_btree_iter_set_pos(trans, &iter, POS(inum.inum, src_offset));
ret = bch2_fpunch_at(trans, &iter, inum, src_offset + len, i_sectors_delta);
if (ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart))
@@ -425,12 +425,12 @@ case LOGGED_OP_FINSERT_shift_extents:
if (ret)
goto btree_err;
- bch2_btree_iter_set_snapshot(&iter, snapshot);
- bch2_btree_iter_set_pos(&iter, SPOS(inum.inum, pos, snapshot));
+ bch2_btree_iter_set_snapshot(trans, &iter, snapshot);
+ bch2_btree_iter_set_pos(trans, &iter, SPOS(inum.inum, pos, snapshot));
k = insert
- ? bch2_btree_iter_peek_prev_min(&iter, POS(inum.inum, 0))
- : bch2_btree_iter_peek_max(&iter, POS(inum.inum, U64_MAX));
+ ? bch2_btree_iter_peek_prev_min(trans, &iter, POS(inum.inum, 0))
+ : bch2_btree_iter_peek_max(trans, &iter, POS(inum.inum, U64_MAX));
if ((ret = bkey_err(k)))
goto btree_err;
diff --git a/fs/bcachefs/io_read.c b/fs/bcachefs/io_read.c
index fd01e67b3e84..417bb0c7bbfa 100644
--- a/fs/bcachefs/io_read.c
+++ b/fs/bcachefs/io_read.c
@@ -394,7 +394,7 @@ static inline struct bch_read_bio *bch2_rbio_free(struct bch_read_bio *rbio)
if (rbio->have_ioref) {
struct bch_dev *ca = bch2_dev_have_ref(rbio->c, rbio->pick.ptr.dev);
- percpu_ref_put(&ca->io_ref);
+ percpu_ref_put(&ca->io_ref[READ]);
}
if (rbio->split) {
@@ -909,7 +909,7 @@ static noinline void read_from_stale_dirty_pointer(struct btree_trans *trans,
prt_printf(&buf, "memory gen: %u", gen);
- ret = lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_slot(&iter)));
+ ret = lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_slot(trans, &iter)));
if (!ret) {
prt_newline(&buf);
bch2_bkey_val_to_text(&buf, c, k);
@@ -1003,7 +1003,7 @@ retry_pick:
unlikely(dev_ptr_stale(ca, &pick.ptr))) {
read_from_stale_dirty_pointer(trans, ca, k, pick.ptr);
bch2_mark_io_failure(failed, &pick, false);
- percpu_ref_put(&ca->io_ref);
+ percpu_ref_put(&ca->io_ref[READ]);
goto retry_pick;
}
@@ -1036,7 +1036,7 @@ retry_pick:
*/
if (pick.crc.compressed_size > u->op.wbio.bio.bi_iter.bi_size) {
if (ca)
- percpu_ref_put(&ca->io_ref);
+ percpu_ref_put(&ca->io_ref[READ]);
rbio->ret = -BCH_ERR_data_read_buffer_too_small;
goto out_read_done;
}
@@ -1285,12 +1285,12 @@ int __bch2_read(struct btree_trans *trans, struct bch_read_bio *rbio,
if (ret)
goto err;
- bch2_btree_iter_set_snapshot(&iter, snapshot);
+ bch2_btree_iter_set_snapshot(trans, &iter, snapshot);
- bch2_btree_iter_set_pos(&iter,
+ bch2_btree_iter_set_pos(trans, &iter,
POS(inum.inum, bvec_iter.bi_sector));
- k = bch2_btree_iter_peek_slot(&iter);
+ k = bch2_btree_iter_peek_slot(trans, &iter);
ret = bkey_err(k);
if (ret)
goto err;
diff --git a/fs/bcachefs/io_write.c b/fs/bcachefs/io_write.c
index 07b55839768e..a418fa62f09d 100644
--- a/fs/bcachefs/io_write.c
+++ b/fs/bcachefs/io_write.c
@@ -168,9 +168,9 @@ int bch2_sum_sector_overwrites(struct btree_trans *trans,
*i_sectors_delta = 0;
*disk_sectors_delta = 0;
- bch2_trans_copy_iter(&iter, extent_iter);
+ bch2_trans_copy_iter(trans, &iter, extent_iter);
- for_each_btree_key_max_continue_norestart(iter,
+ for_each_btree_key_max_continue_norestart(trans, iter,
new->k.p, BTREE_ITER_slots, old, ret) {
s64 sectors = min(new->k.p.offset, old.k->p.offset) -
max(bkey_start_offset(&new->k),
@@ -292,7 +292,7 @@ int bch2_extent_update(struct btree_trans *trans,
* path already traversed at iter->pos because
* bch2_trans_extent_update() will use it to attempt extent merging
*/
- ret = __bch2_btree_iter_traverse(iter);
+ ret = __bch2_btree_iter_traverse(trans, iter);
if (ret)
return ret;
@@ -337,7 +337,7 @@ int bch2_extent_update(struct btree_trans *trans,
if (i_sectors_delta_total)
*i_sectors_delta_total += i_sectors_delta;
- bch2_btree_iter_set_pos(iter, next_pos);
+ bch2_btree_iter_set_pos(trans, iter, next_pos);
return 0;
}
@@ -445,6 +445,11 @@ void bch2_submit_wbio_replicas(struct bch_write_bio *wbio, struct bch_fs *c,
BUG_ON(c->opts.nochanges);
bkey_for_each_ptr(ptrs, ptr) {
+ /*
+ * XXX: btree writes should be using io_ref[WRITE], but we
+ * aren't retrying failed btree writes yet (due to device
+ * removal/ro):
+ */
struct bch_dev *ca = nocow
? bch2_dev_have_ref(c, ptr->dev)
: bch2_dev_get_ioref(c, ptr->dev, type == BCH_DATA_btree ? READ : WRITE);
@@ -697,12 +702,19 @@ static void bch2_write_endio(struct bio *bio)
bch2_account_io_completion(ca, BCH_MEMBER_ERROR_write,
wbio->submit_time, !bio->bi_status);
- if (bio->bi_status) {
- bch_err_inum_offset_ratelimited(ca,
- op->pos.inode,
- wbio->inode_offset << 9,
- "data write error: %s",
- bch2_blk_status_to_str(bio->bi_status));
+ if (unlikely(bio->bi_status)) {
+ if (ca)
+ bch_err_inum_offset_ratelimited(ca,
+ op->pos.inode,
+ wbio->inode_offset << 9,
+ "data write error: %s",
+ bch2_blk_status_to_str(bio->bi_status));
+ else
+ bch_err_inum_offset_ratelimited(c,
+ op->pos.inode,
+ wbio->inode_offset << 9,
+ "data write error: %s",
+ bch2_blk_status_to_str(bio->bi_status));
set_bit(wbio->dev, op->failed.d);
op->flags |= BCH_WRITE_io_error;
}
@@ -715,7 +727,7 @@ static void bch2_write_endio(struct bio *bio)
}
if (wbio->have_ioref)
- percpu_ref_put(&ca->io_ref);
+ percpu_ref_put(&ca->io_ref[WRITE]);
if (wbio->bounce)
bch2_bio_free_pages_pool(c, bio);
@@ -1293,7 +1305,7 @@ retry:
if (ret)
break;
- k = bch2_btree_iter_peek_slot(&iter);
+ k = bch2_btree_iter_peek_slot(trans, &iter);
ret = bkey_err(k);
if (ret)
break;
@@ -1377,7 +1389,7 @@ retry:
bch2_keylist_push(&op->insert_keys);
if (op->flags & BCH_WRITE_submitted)
break;
- bch2_btree_iter_advance(&iter);
+ bch2_btree_iter_advance(trans, &iter);
}
out:
bch2_trans_iter_exit(trans, &iter);
@@ -1414,7 +1426,7 @@ err:
return;
err_get_ioref:
darray_for_each(buckets, i)
- percpu_ref_put(&bch2_dev_have_ref(c, i->b.inode)->io_ref);
+ percpu_ref_put(&bch2_dev_have_ref(c, i->b.inode)->io_ref[WRITE]);
/* Fall back to COW path: */
goto out;
diff --git a/fs/bcachefs/journal.c b/fs/bcachefs/journal.c
index 8a36d5536668..d8f74b6d0a75 100644
--- a/fs/bcachefs/journal.c
+++ b/fs/bcachefs/journal.c
@@ -1315,7 +1315,7 @@ int bch2_fs_journal_alloc(struct bch_fs *c)
int ret = bch2_dev_journal_alloc(ca, true);
if (ret) {
- percpu_ref_put(&ca->io_ref);
+ percpu_ref_put(&ca->io_ref[READ]);
return ret;
}
}
@@ -1404,6 +1404,14 @@ int bch2_fs_journal_start(struct journal *j, u64 cur_seq)
nr = cur_seq - last_seq;
+ /*
+ * Extra fudge factor, in case we crashed when the journal pin fifo was
+ * nearly or completely full. We'll need to be able to open additional
+ * journal entries (at least a few) in order for journal replay to get
+ * going:
+ */
+ nr += nr / 4;
+
if (nr + 1 > j->pin.size) {
free_fifo(&j->pin);
init_fifo(&j->pin, roundup_pow_of_two(nr + 1), GFP_KERNEL);
@@ -1461,11 +1469,9 @@ int bch2_fs_journal_start(struct journal *j, u64 cur_seq)
j->reservations.idx = journal_cur_seq(j);
c->last_bucket_seq_cleanup = journal_cur_seq(j);
-
- bch2_journal_space_available(j);
spin_unlock(&j->lock);
- return bch2_journal_reclaim_start(j);
+ return 0;
}
/* init/exit: */
diff --git a/fs/bcachefs/journal_io.c b/fs/bcachefs/journal_io.c
index 2debc213e47c..1b7961f4f609 100644
--- a/fs/bcachefs/journal_io.c
+++ b/fs/bcachefs/journal_io.c
@@ -1218,7 +1218,7 @@ static CLOSURE_CALLBACK(bch2_journal_read_device)
out:
bch_verbose(c, "journal read done on device %s, ret %i", ca->name, ret);
kvfree(buf.data);
- percpu_ref_put(&ca->io_ref);
+ percpu_ref_put(&ca->io_ref[READ]);
closure_return(cl);
return;
err:
@@ -1253,7 +1253,7 @@ int bch2_journal_read(struct bch_fs *c,
if ((ca->mi.state == BCH_MEMBER_STATE_rw ||
ca->mi.state == BCH_MEMBER_STATE_ro) &&
- percpu_ref_tryget(&ca->io_ref))
+ percpu_ref_tryget(&ca->io_ref[READ]))
closure_call(&ca->journal.read,
bch2_journal_read_device,
system_unbound_wq,
@@ -1768,7 +1768,7 @@ static void journal_write_endio(struct bio *bio)
}
closure_put(&w->io);
- percpu_ref_put(&ca->io_ref);
+ percpu_ref_put(&ca->io_ref[WRITE]);
}
static CLOSURE_CALLBACK(journal_write_submit)
@@ -1843,7 +1843,7 @@ static CLOSURE_CALLBACK(journal_write_preflush)
if (w->separate_flush) {
for_each_rw_member(c, ca) {
- percpu_ref_get(&ca->io_ref);
+ percpu_ref_get(&ca->io_ref[WRITE]);
struct journal_device *ja = &ca->journal;
struct bio *bio = &ja->bio[w->idx]->bio;
diff --git a/fs/bcachefs/migrate.c b/fs/bcachefs/migrate.c
index 57ad662871ba..90dcf80bd64a 100644
--- a/fs/bcachefs/migrate.c
+++ b/fs/bcachefs/migrate.c
@@ -130,7 +130,7 @@ static int bch2_dev_metadata_drop(struct bch_fs *c,
retry:
ret = 0;
while (bch2_trans_begin(trans),
- (b = bch2_btree_iter_peek_node(&iter)) &&
+ (b = bch2_btree_iter_peek_node(trans, &iter)) &&
!(ret = PTR_ERR_OR_ZERO(b))) {
bch2_progress_update_iter(trans, progress, &iter, "dropping metadata");
@@ -154,7 +154,7 @@ retry:
if (ret)
break;
next:
- bch2_btree_iter_next_node(&iter);
+ bch2_btree_iter_next_node(trans, &iter);
}
if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
goto retry;
diff --git a/fs/bcachefs/move.c b/fs/bcachefs/move.c
index 5d41260e10da..fc396b9fa754 100644
--- a/fs/bcachefs/move.c
+++ b/fs/bcachefs/move.c
@@ -545,7 +545,7 @@ static struct bkey_s_c bch2_lookup_indirect_extent_for_move(struct btree_trans *
BTREE_ID_reflink, reflink_pos,
BTREE_ITER_not_extents);
- struct bkey_s_c k = bch2_btree_iter_peek(iter);
+ struct bkey_s_c k = bch2_btree_iter_peek(trans, iter);
if (!k.k || bkey_err(k)) {
bch2_trans_iter_exit(trans, iter);
return k;
@@ -603,7 +603,7 @@ static int bch2_move_data_btree(struct moving_context *ctxt,
bch2_trans_begin(trans);
- k = bch2_btree_iter_peek(&iter);
+ k = bch2_btree_iter_peek(trans, &iter);
if (!k.k)
break;
@@ -681,7 +681,7 @@ next:
if (ctxt->stats)
atomic64_add(k.k->size, &ctxt->stats->sectors_seen);
next_nondata:
- bch2_btree_iter_advance(&iter);
+ bch2_btree_iter_advance(trans, &iter);
}
bch2_trans_iter_exit(trans, &reflink_iter);
@@ -794,7 +794,7 @@ static int __bch2_move_data_phys(struct moving_context *ctxt,
bch2_trans_begin(trans);
- k = bch2_btree_iter_peek(&bp_iter);
+ k = bch2_btree_iter_peek(trans, &bp_iter);
ret = bkey_err(k);
if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
continue;
@@ -876,7 +876,7 @@ static int __bch2_move_data_phys(struct moving_context *ctxt,
if (ctxt->stats)
atomic64_add(sectors, &ctxt->stats->sectors_seen);
next:
- bch2_btree_iter_advance(&bp_iter);
+ bch2_btree_iter_advance(trans, &bp_iter);
}
err:
bch2_trans_iter_exit(trans, &bp_iter);
@@ -991,7 +991,7 @@ static int bch2_move_btree(struct bch_fs *c,
retry:
ret = 0;
while (bch2_trans_begin(trans),
- (b = bch2_btree_iter_peek_node(&iter)) &&
+ (b = bch2_btree_iter_peek_node(trans, &iter)) &&
!(ret = PTR_ERR_OR_ZERO(b))) {
if (kthread && kthread_should_stop())
break;
@@ -1011,7 +1011,7 @@ retry:
if (ret)
break;
next:
- bch2_btree_iter_next_node(&iter);
+ bch2_btree_iter_next_node(trans, &iter);
}
if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
goto retry;
diff --git a/fs/bcachefs/movinggc.c b/fs/bcachefs/movinggc.c
index 5126c870ce5b..159410c50861 100644
--- a/fs/bcachefs/movinggc.c
+++ b/fs/bcachefs/movinggc.c
@@ -280,7 +280,11 @@ unsigned long bch2_copygc_wait_amount(struct bch_fs *c)
s64 wait = S64_MAX, fragmented_allowed, fragmented;
for_each_rw_member(c, ca) {
- struct bch_dev_usage usage = bch2_dev_usage_read(ca);
+ struct bch_dev_usage_full usage_full = bch2_dev_usage_full_read(ca);
+ struct bch_dev_usage usage;
+
+ for (unsigned i = 0; i < BCH_DATA_NR; i++)
+ usage.buckets[i] = usage_full.d[i].buckets;
fragmented_allowed = ((__dev_buckets_available(ca, usage, BCH_WATERMARK_stripe) *
ca->mi.bucket_size) >> 1);
@@ -288,7 +292,7 @@ unsigned long bch2_copygc_wait_amount(struct bch_fs *c)
for (unsigned i = 0; i < BCH_DATA_NR; i++)
if (data_type_movable(i))
- fragmented += usage.d[i].fragmented;
+ fragmented += usage_full.d[i].fragmented;
wait = min(wait, max(0LL, fragmented_allowed - fragmented));
}
diff --git a/fs/bcachefs/namei.c b/fs/bcachefs/namei.c
index ee7251709fb9..0d65ea96f7a2 100644
--- a/fs/bcachefs/namei.c
+++ b/fs/bcachefs/namei.c
@@ -28,8 +28,8 @@ int bch2_create_trans(struct btree_trans *trans,
unsigned flags)
{
struct bch_fs *c = trans->c;
- struct btree_iter dir_iter = { NULL };
- struct btree_iter inode_iter = { NULL };
+ struct btree_iter dir_iter = {};
+ struct btree_iter inode_iter = {};
subvol_inum new_inum = dir;
u64 now = bch2_current_time(c);
u64 cpu = raw_smp_processor_id();
@@ -127,8 +127,8 @@ int bch2_create_trans(struct btree_trans *trans,
if (ret)
goto err;
- bch2_btree_iter_set_snapshot(&dir_iter, dir_snapshot);
- ret = bch2_btree_iter_traverse(&dir_iter);
+ bch2_btree_iter_set_snapshot(trans, &dir_iter, dir_snapshot);
+ ret = bch2_btree_iter_traverse(trans, &dir_iter);
if (ret)
goto err;
}
@@ -177,9 +177,9 @@ int bch2_create_trans(struct btree_trans *trans,
new_inode->bi_depth = dir_u->bi_depth + 1;
inode_iter.flags &= ~BTREE_ITER_all_snapshots;
- bch2_btree_iter_set_snapshot(&inode_iter, snapshot);
+ bch2_btree_iter_set_snapshot(trans, &inode_iter, snapshot);
- ret = bch2_btree_iter_traverse(&inode_iter) ?:
+ ret = bch2_btree_iter_traverse(trans, &inode_iter) ?:
bch2_inode_write(trans, &inode_iter, new_inode);
err:
bch2_trans_iter_exit(trans, &inode_iter);
@@ -193,8 +193,8 @@ int bch2_link_trans(struct btree_trans *trans,
const struct qstr *name)
{
struct bch_fs *c = trans->c;
- struct btree_iter dir_iter = { NULL };
- struct btree_iter inode_iter = { NULL };
+ struct btree_iter dir_iter = {};
+ struct btree_iter inode_iter = {};
struct bch_hash_info dir_hash;
u64 now = bch2_current_time(c);
u64 dir_offset = 0;
@@ -253,9 +253,9 @@ int bch2_unlink_trans(struct btree_trans *trans,
bool deleting_subvol)
{
struct bch_fs *c = trans->c;
- struct btree_iter dir_iter = { NULL };
- struct btree_iter dirent_iter = { NULL };
- struct btree_iter inode_iter = { NULL };
+ struct btree_iter dir_iter = {};
+ struct btree_iter dirent_iter = {};
+ struct btree_iter inode_iter = {};
struct bch_hash_info dir_hash;
subvol_inum inum;
u64 now = bch2_current_time(c);
@@ -301,7 +301,7 @@ int bch2_unlink_trans(struct btree_trans *trans,
if (ret)
goto err;
- k = bch2_btree_iter_peek_slot(&dirent_iter);
+ k = bch2_btree_iter_peek_slot(trans, &dirent_iter);
ret = bkey_err(k);
if (ret)
goto err;
@@ -310,8 +310,8 @@ int bch2_unlink_trans(struct btree_trans *trans,
* If we're deleting a subvolume, we need to really delete the
* dirent, not just emit a whiteout in the current snapshot:
*/
- bch2_btree_iter_set_snapshot(&dirent_iter, k.k->p.snapshot);
- ret = bch2_btree_iter_traverse(&dirent_iter);
+ bch2_btree_iter_set_snapshot(trans, &dirent_iter, k.k->p.snapshot);
+ ret = bch2_btree_iter_traverse(trans, &dirent_iter);
if (ret)
goto err;
} else {
@@ -390,10 +390,10 @@ int bch2_rename_trans(struct btree_trans *trans,
enum bch_rename_mode mode)
{
struct bch_fs *c = trans->c;
- struct btree_iter src_dir_iter = { NULL };
- struct btree_iter dst_dir_iter = { NULL };
- struct btree_iter src_inode_iter = { NULL };
- struct btree_iter dst_inode_iter = { NULL };
+ struct btree_iter src_dir_iter = {};
+ struct btree_iter dst_dir_iter = {};
+ struct btree_iter src_inode_iter = {};
+ struct btree_iter dst_inode_iter = {};
struct bch_hash_info src_hash, dst_hash;
subvol_inum src_inum, dst_inum;
u64 src_offset, dst_offset;
@@ -666,7 +666,7 @@ static int bch2_check_dirent_inode_dirent(struct btree_trans *trans,
{
struct bch_fs *c = trans->c;
struct printbuf buf = PRINTBUF;
- struct btree_iter bp_iter = { NULL };
+ struct btree_iter bp_iter = {};
int ret = 0;
if (inode_points_to_dirent(target, d))
diff --git a/fs/bcachefs/quota.c b/fs/bcachefs/quota.c
index 8b857fc33244..3d4755d73af7 100644
--- a/fs/bcachefs/quota.c
+++ b/fs/bcachefs/quota.c
@@ -516,7 +516,7 @@ static int bch2_fs_quota_read_inode(struct btree_trans *trans,
bch2_quota_acct(c, bch_qid(&u), Q_INO, 1,
KEY_TYPE_QUOTA_NOCHECK);
advance:
- bch2_btree_iter_set_pos(iter, bpos_nosnap_successor(iter->pos));
+ bch2_btree_iter_set_pos(trans, iter, bpos_nosnap_successor(iter->pos));
return 0;
}
diff --git a/fs/bcachefs/rebalance.c b/fs/bcachefs/rebalance.c
index b9bde04b66c0..c63fa53f30d2 100644
--- a/fs/bcachefs/rebalance.c
+++ b/fs/bcachefs/rebalance.c
@@ -233,7 +233,7 @@ int bch2_set_rebalance_needs_scan_trans(struct btree_trans *trans, u64 inum)
bch2_trans_iter_init(trans, &iter, BTREE_ID_rebalance_work,
SPOS(inum, REBALANCE_WORK_SCAN_OFFSET, U32_MAX),
BTREE_ITER_intent);
- k = bch2_btree_iter_peek_slot(&iter);
+ k = bch2_btree_iter_peek_slot(trans, &iter);
ret = bkey_err(k);
if (ret)
goto err;
@@ -281,7 +281,7 @@ static int bch2_clear_rebalance_needs_scan(struct btree_trans *trans, u64 inum,
bch2_trans_iter_init(trans, &iter, BTREE_ID_rebalance_work,
SPOS(inum, REBALANCE_WORK_SCAN_OFFSET, U32_MAX),
BTREE_ITER_intent);
- k = bch2_btree_iter_peek_slot(&iter);
+ k = bch2_btree_iter_peek_slot(trans, &iter);
ret = bkey_err(k);
if (ret)
goto err;
@@ -301,7 +301,7 @@ static struct bkey_s_c next_rebalance_entry(struct btree_trans *trans,
struct btree_iter *work_iter)
{
return !kthread_should_stop()
- ? bch2_btree_iter_peek(work_iter)
+ ? bch2_btree_iter_peek(trans, work_iter)
: bkey_s_c_null;
}
@@ -335,7 +335,7 @@ static struct bkey_s_c next_rebalance_extent(struct btree_trans *trans,
work_pos.inode ? BTREE_ID_extents : BTREE_ID_reflink,
work_pos,
BTREE_ITER_all_snapshots);
- struct bkey_s_c k = bch2_btree_iter_peek_slot(extent_iter);
+ struct bkey_s_c k = bch2_btree_iter_peek_slot(trans, extent_iter);
if (bkey_err(k))
return k;
@@ -511,7 +511,7 @@ static int do_rebalance(struct moving_context *ctxt)
struct btree_trans *trans = ctxt->trans;
struct bch_fs *c = trans->c;
struct bch_fs_rebalance *r = &c->rebalance;
- struct btree_iter rebalance_work_iter, extent_iter = { NULL };
+ struct btree_iter rebalance_work_iter, extent_iter = {};
struct bkey_s_c k;
int ret = 0;
@@ -552,7 +552,7 @@ static int do_rebalance(struct moving_context *ctxt)
if (ret)
break;
- bch2_btree_iter_advance(&rebalance_work_iter);
+ bch2_btree_iter_advance(trans, &rebalance_work_iter);
}
bch2_trans_iter_exit(trans, &extent_iter);
diff --git a/fs/bcachefs/recovery.c b/fs/bcachefs/recovery.c
index 266c5770c824..79fd18a5a07c 100644
--- a/fs/bcachefs/recovery.c
+++ b/fs/bcachefs/recovery.c
@@ -198,7 +198,7 @@ static int bch2_journal_replay_accounting_key(struct btree_trans *trans,
bch2_trans_node_iter_init(trans, &iter, k->btree_id, k->k->k.p,
BTREE_MAX_DEPTH, k->level,
BTREE_ITER_intent);
- int ret = bch2_btree_iter_traverse(&iter);
+ int ret = bch2_btree_iter_traverse(trans, &iter);
if (ret)
goto out;
@@ -261,7 +261,7 @@ static int bch2_journal_replay_key(struct btree_trans *trans,
bch2_trans_node_iter_init(trans, &iter, k->btree_id, k->k->k.p,
BTREE_MAX_DEPTH, k->level,
iter_flags);
- ret = bch2_btree_iter_traverse(&iter);
+ ret = bch2_btree_iter_traverse(trans, &iter);
if (ret)
goto out;
@@ -270,7 +270,7 @@ static int bch2_journal_replay_key(struct btree_trans *trans,
bch2_trans_iter_exit(trans, &iter);
bch2_trans_node_iter_init(trans, &iter, k->btree_id, k->k->k.p,
BTREE_MAX_DEPTH, 0, iter_flags);
- ret = bch2_btree_iter_traverse(&iter) ?:
+ ret = bch2_btree_iter_traverse(trans, &iter) ?:
bch2_btree_increase_depth(trans, iter.path, 0) ?:
-BCH_ERR_transaction_restart_nested;
goto out;
diff --git a/fs/bcachefs/reflink.c b/fs/bcachefs/reflink.c
index ee23f1f93acc..710178e3da4c 100644
--- a/fs/bcachefs/reflink.c
+++ b/fs/bcachefs/reflink.c
@@ -495,7 +495,7 @@ static int bch2_make_extent_indirect(struct btree_trans *trans,
bool reflink_p_may_update_opts_field)
{
struct bch_fs *c = trans->c;
- struct btree_iter reflink_iter = { NULL };
+ struct btree_iter reflink_iter = {};
struct bkey_s_c k;
struct bkey_i *r_v;
struct bkey_i_reflink_p *r_p;
@@ -507,7 +507,7 @@ static int bch2_make_extent_indirect(struct btree_trans *trans,
bch2_trans_iter_init(trans, &reflink_iter, BTREE_ID_reflink, POS_MAX,
BTREE_ITER_intent);
- k = bch2_btree_iter_peek_prev(&reflink_iter);
+ k = bch2_btree_iter_peek_prev(trans, &reflink_iter);
ret = bkey_err(k);
if (ret)
goto err;
@@ -569,12 +569,13 @@ err:
return ret;
}
-static struct bkey_s_c get_next_src(struct btree_iter *iter, struct bpos end)
+static struct bkey_s_c get_next_src(struct btree_trans *trans,
+ struct btree_iter *iter, struct bpos end)
{
struct bkey_s_c k;
int ret;
- for_each_btree_key_max_continue_norestart(*iter, end, 0, k, ret) {
+ for_each_btree_key_max_continue_norestart(trans, *iter, end, 0, k, ret) {
if (bkey_extent_is_unwritten(k))
continue;
@@ -583,7 +584,7 @@ static struct bkey_s_c get_next_src(struct btree_iter *iter, struct bpos end)
}
if (bkey_ge(iter->pos, end))
- bch2_btree_iter_set_pos(iter, end);
+ bch2_btree_iter_set_pos(trans, iter, end);
return ret ? bkey_s_c_err(ret) : bkey_s_c_null;
}
@@ -647,27 +648,27 @@ s64 bch2_remap_range(struct bch_fs *c,
if (ret)
continue;
- bch2_btree_iter_set_snapshot(&src_iter, src_snapshot);
+ bch2_btree_iter_set_snapshot(trans, &src_iter, src_snapshot);
ret = bch2_subvolume_get_snapshot(trans, dst_inum.subvol,
&dst_snapshot);
if (ret)
continue;
- bch2_btree_iter_set_snapshot(&dst_iter, dst_snapshot);
+ bch2_btree_iter_set_snapshot(trans, &dst_iter, dst_snapshot);
if (dst_inum.inum < src_inum.inum) {
/* Avoid some lock cycle transaction restarts */
- ret = bch2_btree_iter_traverse(&dst_iter);
+ ret = bch2_btree_iter_traverse(trans, &dst_iter);
if (ret)
continue;
}
dst_done = dst_iter.pos.offset - dst_start.offset;
src_want = POS(src_start.inode, src_start.offset + dst_done);
- bch2_btree_iter_set_pos(&src_iter, src_want);
+ bch2_btree_iter_set_pos(trans, &src_iter, src_want);
- src_k = get_next_src(&src_iter, src_end);
+ src_k = get_next_src(trans, &src_iter, src_end);
ret = bkey_err(src_k);
if (ret)
continue;
@@ -738,7 +739,7 @@ s64 bch2_remap_range(struct bch_fs *c,
do {
struct bch_inode_unpacked inode_u;
- struct btree_iter inode_iter = { NULL };
+ struct btree_iter inode_iter = {};
bch2_trans_begin(trans);
diff --git a/fs/bcachefs/sb-members.h b/fs/bcachefs/sb-members.h
index 38261638a611..06bb41a3f360 100644
--- a/fs/bcachefs/sb-members.h
+++ b/fs/bcachefs/sb-members.h
@@ -20,7 +20,7 @@ struct bch_member bch2_sb_member_get(struct bch_sb *sb, int i);
static inline bool bch2_dev_is_online(struct bch_dev *ca)
{
- return !percpu_ref_is_zero(&ca->io_ref);
+ return !percpu_ref_is_zero(&ca->io_ref[READ]);
}
static inline struct bch_dev *bch2_dev_rcu(struct bch_fs *, unsigned);
@@ -156,33 +156,34 @@ static inline struct bch_dev *bch2_get_next_dev(struct bch_fs *c, struct bch_dev
static inline struct bch_dev *bch2_get_next_online_dev(struct bch_fs *c,
struct bch_dev *ca,
- unsigned state_mask)
+ unsigned state_mask,
+ int rw)
{
rcu_read_lock();
if (ca)
- percpu_ref_put(&ca->io_ref);
+ percpu_ref_put(&ca->io_ref[rw]);
while ((ca = __bch2_next_dev(c, ca, NULL)) &&
(!((1 << ca->mi.state) & state_mask) ||
- !percpu_ref_tryget(&ca->io_ref)))
+ !percpu_ref_tryget(&ca->io_ref[rw])))
;
rcu_read_unlock();
return ca;
}
-#define __for_each_online_member(_c, _ca, state_mask) \
+#define __for_each_online_member(_c, _ca, state_mask, rw) \
for (struct bch_dev *_ca = NULL; \
- (_ca = bch2_get_next_online_dev(_c, _ca, state_mask));)
+ (_ca = bch2_get_next_online_dev(_c, _ca, state_mask, rw));)
#define for_each_online_member(c, ca) \
- __for_each_online_member(c, ca, ~0)
+ __for_each_online_member(c, ca, ~0, READ)
#define for_each_rw_member(c, ca) \
- __for_each_online_member(c, ca, BIT(BCH_MEMBER_STATE_rw))
+ __for_each_online_member(c, ca, BIT(BCH_MEMBER_STATE_rw), WRITE)
#define for_each_readable_member(c, ca) \
- __for_each_online_member(c, ca, BIT( BCH_MEMBER_STATE_rw)|BIT(BCH_MEMBER_STATE_ro))
+ __for_each_online_member(c, ca, BIT( BCH_MEMBER_STATE_rw)|BIT(BCH_MEMBER_STATE_ro), READ)
static inline bool bch2_dev_exists(const struct bch_fs *c, unsigned dev)
{
@@ -287,7 +288,7 @@ static inline struct bch_dev *bch2_dev_get_ioref(struct bch_fs *c, unsigned dev,
rcu_read_lock();
struct bch_dev *ca = bch2_dev_rcu(c, dev);
- if (ca && !percpu_ref_tryget(&ca->io_ref))
+ if (ca && !percpu_ref_tryget(&ca->io_ref[rw]))
ca = NULL;
rcu_read_unlock();
@@ -297,7 +298,7 @@ static inline struct bch_dev *bch2_dev_get_ioref(struct bch_fs *c, unsigned dev,
return ca;
if (ca)
- percpu_ref_put(&ca->io_ref);
+ percpu_ref_put(&ca->io_ref[rw]);
return NULL;
}
diff --git a/fs/bcachefs/snapshot.c b/fs/bcachefs/snapshot.c
index 0c65065b08ec..b7de29aed839 100644
--- a/fs/bcachefs/snapshot.c
+++ b/fs/bcachefs/snapshot.c
@@ -843,9 +843,6 @@ static int check_snapshot_exists(struct btree_trans *trans, u32 id)
{
struct bch_fs *c = trans->c;
- if (bch2_snapshot_exists(c, id))
- return 0;
-
/* Do we need to reconstruct the snapshot_tree entry as well? */
struct btree_iter iter;
struct bkey_s_c k;
@@ -1074,9 +1071,9 @@ static inline void normalize_snapshot_child_pointers(struct bch_snapshot *s)
static int bch2_snapshot_node_delete(struct btree_trans *trans, u32 id)
{
struct bch_fs *c = trans->c;
- struct btree_iter iter, p_iter = (struct btree_iter) { NULL };
- struct btree_iter c_iter = (struct btree_iter) { NULL };
- struct btree_iter tree_iter = (struct btree_iter) { NULL };
+ struct btree_iter iter, p_iter = {};
+ struct btree_iter c_iter = {};
+ struct btree_iter tree_iter = {};
struct bkey_s_c_snapshot s;
u32 parent_id, child_id;
unsigned i;
@@ -1193,13 +1190,13 @@ static int create_snapids(struct btree_trans *trans, u32 parent, u32 tree,
bch2_trans_iter_init(trans, &iter, BTREE_ID_snapshots,
POS_MIN, BTREE_ITER_intent);
- k = bch2_btree_iter_peek(&iter);
+ k = bch2_btree_iter_peek(trans, &iter);
ret = bkey_err(k);
if (ret)
goto err;
for (i = 0; i < nr_snapids; i++) {
- k = bch2_btree_iter_prev_slot(&iter);
+ k = bch2_btree_iter_prev_slot(trans, &iter);
ret = bkey_err(k);
if (ret)
goto err;
diff --git a/fs/bcachefs/str_hash.c b/fs/bcachefs/str_hash.c
index 602afca2f5ef..a90bf7b8a2b4 100644
--- a/fs/bcachefs/str_hash.c
+++ b/fs/bcachefs/str_hash.c
@@ -195,7 +195,7 @@ int __bch2_str_hash_check_key(struct btree_trans *trans,
struct btree_iter *k_iter, struct bkey_s_c hash_k)
{
struct bch_fs *c = trans->c;
- struct btree_iter iter = { NULL };
+ struct btree_iter iter = {};
struct printbuf buf = PRINTBUF;
struct bkey_s_c k;
int ret = 0;
diff --git a/fs/bcachefs/str_hash.h b/fs/bcachefs/str_hash.h
index 575ad1e03904..09a354a26c3b 100644
--- a/fs/bcachefs/str_hash.h
+++ b/fs/bcachefs/str_hash.h
@@ -231,11 +231,11 @@ int bch2_hash_needs_whiteout(struct btree_trans *trans,
struct bkey_s_c k;
int ret;
- bch2_trans_copy_iter(&iter, start);
+ bch2_trans_copy_iter(trans, &iter, start);
- bch2_btree_iter_advance(&iter);
+ bch2_btree_iter_advance(trans, &iter);
- for_each_btree_key_continue_norestart(iter, BTREE_ITER_slots, k, ret) {
+ for_each_btree_key_continue_norestart(trans, iter, BTREE_ITER_slots, k, ret) {
if (k.k->type != desc.key_type &&
k.k->type != KEY_TYPE_hash_whiteout)
break;
@@ -280,7 +280,7 @@ struct bkey_s_c bch2_hash_set_or_get_in_snapshot(struct btree_trans *trans,
}
if (!slot.path && !(flags & STR_HASH_must_replace))
- bch2_trans_copy_iter(&slot, iter);
+ bch2_trans_copy_iter(trans, &slot, iter);
if (k.k->type != KEY_TYPE_hash_whiteout)
goto not_found;
diff --git a/fs/bcachefs/subvolume.c b/fs/bcachefs/subvolume.c
index cd0d8e5e44e7..5537283d0bea 100644
--- a/fs/bcachefs/subvolume.c
+++ b/fs/bcachefs/subvolume.c
@@ -275,7 +275,7 @@ int bch2_subvol_has_children(struct btree_trans *trans, u32 subvol)
struct btree_iter iter;
bch2_trans_iter_init(trans, &iter, BTREE_ID_subvolume_children, POS(subvol, 0), 0);
- struct bkey_s_c k = bch2_btree_iter_peek(&iter);
+ struct bkey_s_c k = bch2_btree_iter_peek(trans, &iter);
bch2_trans_iter_exit(trans, &iter);
return bkey_err(k) ?: k.k && k.k->p.inode == subvol
@@ -574,7 +574,7 @@ int bch2_subvolume_create(struct btree_trans *trans, u64 inode,
bool ro)
{
struct bch_fs *c = trans->c;
- struct btree_iter dst_iter, src_iter = (struct btree_iter) { NULL };
+ struct btree_iter dst_iter, src_iter = {};
struct bkey_i_subvolume *new_subvol = NULL;
struct bkey_i_subvolume *src_subvol = NULL;
u32 parent = 0, new_nodes[2], snapshot_subvols[2];
diff --git a/fs/bcachefs/subvolume.h b/fs/bcachefs/subvolume.h
index 910f6196700e..f640c1e3d639 100644
--- a/fs/bcachefs/subvolume.h
+++ b/fs/bcachefs/subvolume.h
@@ -33,16 +33,16 @@ int bch2_subvol_is_ro_trans(struct btree_trans *, u32);
int bch2_subvol_is_ro(struct bch_fs *, u32);
static inline struct bkey_s_c
-bch2_btree_iter_peek_in_subvolume_max_type(struct btree_iter *iter, struct bpos end,
- u32 subvolid, unsigned flags)
+bch2_btree_iter_peek_in_subvolume_max_type(struct btree_trans *trans, struct btree_iter *iter,
+ struct bpos end, u32 subvolid, unsigned flags)
{
u32 snapshot;
- int ret = bch2_subvolume_get_snapshot(iter->trans, subvolid, &snapshot);
+ int ret = bch2_subvolume_get_snapshot(trans, subvolid, &snapshot);
if (ret)
return bkey_s_c_err(ret);
- bch2_btree_iter_set_snapshot(iter, snapshot);
- return bch2_btree_iter_peek_max_type(iter, end, flags);
+ bch2_btree_iter_set_snapshot(trans, iter, snapshot);
+ return bch2_btree_iter_peek_max_type(trans, iter, end, flags);
}
#define for_each_btree_key_in_subvolume_max_continue(_trans, _iter, \
@@ -53,14 +53,14 @@ bch2_btree_iter_peek_in_subvolume_max_type(struct btree_iter *iter, struct bpos
\
do { \
_ret3 = lockrestart_do(_trans, ({ \
- (_k) = bch2_btree_iter_peek_in_subvolume_max_type(&(_iter), \
+ (_k) = bch2_btree_iter_peek_in_subvolume_max_type(trans, &(_iter),\
_end, _subvolid, (_flags)); \
if (!(_k).k) \
break; \
\
bkey_err(_k) ?: (_do); \
})); \
- } while (!_ret3 && bch2_btree_iter_advance(&(_iter))); \
+ } while (!_ret3 && bch2_btree_iter_advance(_trans, &(_iter))); \
\
bch2_trans_iter_exit((_trans), &(_iter)); \
_ret3; \
diff --git a/fs/bcachefs/super-io.c b/fs/bcachefs/super-io.c
index 572b06bfa0b8..e27422b6d9c6 100644
--- a/fs/bcachefs/super-io.c
+++ b/fs/bcachefs/super-io.c
@@ -248,7 +248,7 @@ struct bch_sb_field *bch2_sb_field_resize_id(struct bch_sb_handle *sb,
struct bch_sb_handle *dev_sb = &ca->disk_sb;
if (bch2_sb_realloc(dev_sb, le32_to_cpu(dev_sb->sb->u64s) + d)) {
- percpu_ref_put(&ca->io_ref);
+ percpu_ref_put(&ca->io_ref[READ]);
return NULL;
}
}
@@ -945,7 +945,7 @@ static void write_super_endio(struct bio *bio)
}
closure_put(&ca->fs->sb_write);
- percpu_ref_put(&ca->io_ref);
+ percpu_ref_put(&ca->io_ref[READ]);
}
static void read_back_super(struct bch_fs *c, struct bch_dev *ca)
@@ -963,7 +963,7 @@ static void read_back_super(struct bch_fs *c, struct bch_dev *ca)
this_cpu_add(ca->io_done->sectors[READ][BCH_DATA_sb], bio_sectors(bio));
- percpu_ref_get(&ca->io_ref);
+ percpu_ref_get(&ca->io_ref[READ]);
closure_bio_submit(bio, &c->sb_write);
}
@@ -989,7 +989,7 @@ static void write_one_super(struct bch_fs *c, struct bch_dev *ca, unsigned idx)
this_cpu_add(ca->io_done->sectors[WRITE][BCH_DATA_sb],
bio_sectors(bio));
- percpu_ref_get(&ca->io_ref);
+ percpu_ref_get(&ca->io_ref[READ]);
closure_bio_submit(bio, &c->sb_write);
}
@@ -1014,13 +1014,20 @@ int bch2_write_super(struct bch_fs *c)
closure_init_stack(cl);
memset(&sb_written, 0, sizeof(sb_written));
+ /*
+ * Note: we do writes to RO devices here, and we might want to change
+ * that in the future.
+ *
+ * For now, we expect to be able to call write_super() when we're not
+ * yet RW:
+ */
for_each_online_member(c, ca) {
ret = darray_push(&online_devices, ca);
if (bch2_fs_fatal_err_on(ret, c, "%s: error allocating online devices", __func__)) {
- percpu_ref_put(&ca->io_ref);
+ percpu_ref_put(&ca->io_ref[READ]);
goto out;
}
- percpu_ref_get(&ca->io_ref);
+ percpu_ref_get(&ca->io_ref[READ]);
}
/* Make sure we're using the new magic numbers: */
@@ -1186,7 +1193,7 @@ out:
/* Make new options visible after they're persistent: */
bch2_sb_update(c);
darray_for_each(online_devices, ca)
- percpu_ref_put(&(*ca)->io_ref);
+ percpu_ref_put(&(*ca)->io_ref[READ]);
darray_exit(&online_devices);
printbuf_exit(&err);
return ret;
diff --git a/fs/bcachefs/super.c b/fs/bcachefs/super.c
index 20208f3c5d8b..a58edde43bee 100644
--- a/fs/bcachefs/super.c
+++ b/fs/bcachefs/super.c
@@ -185,6 +185,7 @@ static void bch2_dev_unlink(struct bch_dev *);
static void bch2_dev_free(struct bch_dev *);
static int bch2_dev_alloc(struct bch_fs *, unsigned);
static int bch2_dev_sysfs_online(struct bch_fs *, struct bch_dev *);
+static void bch2_dev_io_ref_stop(struct bch_dev *, int);
static void __bch2_dev_read_only(struct bch_fs *, struct bch_dev *);
struct bch_fs *bch2_dev_to_fs(dev_t dev)
@@ -294,8 +295,10 @@ static void __bch2_fs_read_only(struct bch_fs *c)
/*
* After stopping journal:
*/
- for_each_member_device(c, ca)
+ for_each_member_device(c, ca) {
+ bch2_dev_io_ref_stop(ca, WRITE);
bch2_dev_allocator_remove(c, ca);
+ }
}
#ifndef BCH_WRITE_REF_DEBUG
@@ -465,10 +468,6 @@ static int __bch2_fs_read_write(struct bch_fs *c, bool early)
if (ret)
goto err;
- ret = bch2_fs_mark_dirty(c);
- if (ret)
- goto err;
-
clear_bit(BCH_FS_clean_shutdown, &c->flags);
/*
@@ -480,10 +479,24 @@ static int __bch2_fs_read_write(struct bch_fs *c, bool early)
set_bit(JOURNAL_need_flush_write, &c->journal.flags);
set_bit(JOURNAL_running, &c->journal.flags);
- for_each_rw_member(c, ca)
+ __for_each_online_member(c, ca, BIT(BCH_MEMBER_STATE_rw), READ) {
bch2_dev_allocator_add(c, ca);
+ percpu_ref_reinit(&ca->io_ref[WRITE]);
+ }
bch2_recalc_capacity(c);
+ ret = bch2_fs_mark_dirty(c);
+ if (ret)
+ goto err;
+
+ spin_lock(&c->journal.lock);
+ bch2_journal_space_available(&c->journal);
+ spin_unlock(&c->journal.lock);
+
+ ret = bch2_journal_reclaim_start(&c->journal);
+ if (ret)
+ goto err;
+
set_bit(BCH_FS_rw, &c->flags);
set_bit(BCH_FS_was_rw, &c->flags);
@@ -495,11 +508,6 @@ static int __bch2_fs_read_write(struct bch_fs *c, bool early)
atomic_long_inc(&c->writes[i]);
}
#endif
-
- ret = bch2_journal_reclaim_start(&c->journal);
- if (ret)
- goto err;
-
if (!early) {
ret = bch2_fs_read_write_late(c);
if (ret)
@@ -675,6 +683,7 @@ void bch2_fs_free(struct bch_fs *c)
if (ca) {
EBUG_ON(atomic_long_read(&ca->ref) != 1);
+ bch2_dev_io_ref_stop(ca, READ);
bch2_free_super(&ca->disk_sb);
bch2_dev_free(ca);
}
@@ -1199,6 +1208,15 @@ static int bch2_dev_in_fs(struct bch_sb_handle *fs,
/* Device startup/shutdown: */
+static void bch2_dev_io_ref_stop(struct bch_dev *ca, int rw)
+{
+ if (!percpu_ref_is_zero(&ca->io_ref[rw])) {
+ reinit_completion(&ca->io_ref_completion[rw]);
+ percpu_ref_kill(&ca->io_ref[rw]);
+ wait_for_completion(&ca->io_ref_completion[rw]);
+ }
+}
+
static void bch2_dev_release(struct kobject *kobj)
{
struct bch_dev *ca = container_of(kobj, struct bch_dev, kobj);
@@ -1208,6 +1226,9 @@ static void bch2_dev_release(struct kobject *kobj)
static void bch2_dev_free(struct bch_dev *ca)
{
+ WARN_ON(!percpu_ref_is_zero(&ca->io_ref[WRITE]));
+ WARN_ON(!percpu_ref_is_zero(&ca->io_ref[READ]));
+
cancel_work_sync(&ca->io_error_work);
bch2_dev_unlink(ca);
@@ -1226,7 +1247,8 @@ static void bch2_dev_free(struct bch_dev *ca)
bch2_time_stats_quantiles_exit(&ca->io_latency[WRITE]);
bch2_time_stats_quantiles_exit(&ca->io_latency[READ]);
- percpu_ref_exit(&ca->io_ref);
+ percpu_ref_exit(&ca->io_ref[WRITE]);
+ percpu_ref_exit(&ca->io_ref[READ]);
#ifndef CONFIG_BCACHEFS_DEBUG
percpu_ref_exit(&ca->ref);
#endif
@@ -1238,14 +1260,12 @@ static void __bch2_dev_offline(struct bch_fs *c, struct bch_dev *ca)
lockdep_assert_held(&c->state_lock);
- if (percpu_ref_is_zero(&ca->io_ref))
+ if (percpu_ref_is_zero(&ca->io_ref[READ]))
return;
__bch2_dev_read_only(c, ca);
- reinit_completion(&ca->io_ref_completion);
- percpu_ref_kill(&ca->io_ref);
- wait_for_completion(&ca->io_ref_completion);
+ bch2_dev_io_ref_stop(ca, READ);
bch2_dev_unlink(ca);
@@ -1262,11 +1282,18 @@ static void bch2_dev_ref_complete(struct percpu_ref *ref)
}
#endif
-static void bch2_dev_io_ref_complete(struct percpu_ref *ref)
+static void bch2_dev_io_ref_read_complete(struct percpu_ref *ref)
+{
+ struct bch_dev *ca = container_of(ref, struct bch_dev, io_ref[READ]);
+
+ complete(&ca->io_ref_completion[READ]);
+}
+
+static void bch2_dev_io_ref_write_complete(struct percpu_ref *ref)
{
- struct bch_dev *ca = container_of(ref, struct bch_dev, io_ref);
+ struct bch_dev *ca = container_of(ref, struct bch_dev, io_ref[WRITE]);
- complete(&ca->io_ref_completion);
+ complete(&ca->io_ref_completion[WRITE]);
}
static void bch2_dev_unlink(struct bch_dev *ca)
@@ -1330,7 +1357,8 @@ static struct bch_dev *__bch2_dev_alloc(struct bch_fs *c,
kobject_init(&ca->kobj, &bch2_dev_ktype);
init_completion(&ca->ref_completion);
- init_completion(&ca->io_ref_completion);
+ init_completion(&ca->io_ref_completion[READ]);
+ init_completion(&ca->io_ref_completion[WRITE]);
INIT_WORK(&ca->io_error_work, bch2_io_error_work);
@@ -1356,7 +1384,9 @@ static struct bch_dev *__bch2_dev_alloc(struct bch_fs *c,
bch2_dev_allocator_background_init(ca);
- if (percpu_ref_init(&ca->io_ref, bch2_dev_io_ref_complete,
+ if (percpu_ref_init(&ca->io_ref[READ], bch2_dev_io_ref_read_complete,
+ PERCPU_REF_INIT_DEAD, GFP_KERNEL) ||
+ percpu_ref_init(&ca->io_ref[WRITE], bch2_dev_io_ref_write_complete,
PERCPU_REF_INIT_DEAD, GFP_KERNEL) ||
!(ca->sb_read_scratch = kmalloc(BCH_SB_READ_SCRATCH_BUF_SIZE, GFP_KERNEL)) ||
bch2_dev_buckets_alloc(c, ca) ||
@@ -1419,7 +1449,8 @@ static int __bch2_dev_attach_bdev(struct bch_dev *ca, struct bch_sb_handle *sb)
return -BCH_ERR_device_size_too_small;
}
- BUG_ON(!percpu_ref_is_zero(&ca->io_ref));
+ BUG_ON(!percpu_ref_is_zero(&ca->io_ref[READ]));
+ BUG_ON(!percpu_ref_is_zero(&ca->io_ref[WRITE]));
ret = bch2_dev_journal_init(ca, sb->sb);
if (ret)
@@ -1438,7 +1469,7 @@ static int __bch2_dev_attach_bdev(struct bch_dev *ca, struct bch_sb_handle *sb)
ca->dev = ca->disk_sb.bdev->bd_dev;
- percpu_ref_reinit(&ca->io_ref);
+ percpu_ref_reinit(&ca->io_ref[READ]);
return 0;
}
@@ -1568,6 +1599,8 @@ static bool bch2_fs_may_start(struct bch_fs *c)
static void __bch2_dev_read_only(struct bch_fs *c, struct bch_dev *ca)
{
+ bch2_dev_io_ref_stop(ca, WRITE);
+
/*
* The allocator thread itself allocates btree nodes, so stop it first:
*/
@@ -1584,6 +1617,10 @@ static void __bch2_dev_read_write(struct bch_fs *c, struct bch_dev *ca)
bch2_dev_allocator_add(c, ca);
bch2_recalc_capacity(c);
+
+ if (percpu_ref_is_zero(&ca->io_ref[WRITE]))
+ percpu_ref_reinit(&ca->io_ref[WRITE]);
+
bch2_dev_do_discards(ca);
}
@@ -1731,7 +1768,7 @@ int bch2_dev_remove(struct bch_fs *c, struct bch_dev *ca, int flags)
return 0;
err:
if (ca->mi.state == BCH_MEMBER_STATE_rw &&
- !percpu_ref_is_zero(&ca->io_ref))
+ !percpu_ref_is_zero(&ca->io_ref[READ]))
__bch2_dev_read_write(c, ca);
up_write(&c->state_lock);
return ret;
diff --git a/fs/bcachefs/tests.c b/fs/bcachefs/tests.c
index 6c6469814637..c265b102267a 100644
--- a/fs/bcachefs/tests.c
+++ b/fs/bcachefs/tests.c
@@ -43,7 +43,7 @@ static int test_delete(struct bch_fs *c, u64 nr)
BTREE_ITER_intent);
ret = commit_do(trans, NULL, NULL, 0,
- bch2_btree_iter_traverse(&iter) ?:
+ bch2_btree_iter_traverse(trans, &iter) ?:
bch2_trans_update(trans, &iter, &k.k_i, 0));
bch_err_msg(c, ret, "update error");
if (ret)
@@ -51,7 +51,7 @@ static int test_delete(struct bch_fs *c, u64 nr)
pr_info("deleting once");
ret = commit_do(trans, NULL, NULL, 0,
- bch2_btree_iter_traverse(&iter) ?:
+ bch2_btree_iter_traverse(trans, &iter) ?:
bch2_btree_delete_at(trans, &iter, 0));
bch_err_msg(c, ret, "delete error (first)");
if (ret)
@@ -59,7 +59,7 @@ static int test_delete(struct bch_fs *c, u64 nr)
pr_info("deleting twice");
ret = commit_do(trans, NULL, NULL, 0,
- bch2_btree_iter_traverse(&iter) ?:
+ bch2_btree_iter_traverse(trans, &iter) ?:
bch2_btree_delete_at(trans, &iter, 0));
bch_err_msg(c, ret, "delete error (second)");
if (ret)
@@ -84,7 +84,7 @@ static int test_delete_written(struct bch_fs *c, u64 nr)
BTREE_ITER_intent);
ret = commit_do(trans, NULL, NULL, 0,
- bch2_btree_iter_traverse(&iter) ?:
+ bch2_btree_iter_traverse(trans, &iter) ?:
bch2_trans_update(trans, &iter, &k.k_i, 0));
bch_err_msg(c, ret, "update error");
if (ret)
@@ -94,7 +94,7 @@ static int test_delete_written(struct bch_fs *c, u64 nr)
bch2_journal_flush_all_pins(&c->journal);
ret = commit_do(trans, NULL, NULL, 0,
- bch2_btree_iter_traverse(&iter) ?:
+ bch2_btree_iter_traverse(trans, &iter) ?:
bch2_btree_delete_at(trans, &iter, 0));
bch_err_msg(c, ret, "delete error");
if (ret)
@@ -349,10 +349,10 @@ static int test_peek_end(struct bch_fs *c, u64 nr)
bch2_trans_iter_init(trans, &iter, BTREE_ID_xattrs,
SPOS(0, 0, U32_MAX), 0);
- lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_max(&iter, POS(0, U64_MAX))));
+ lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_max(trans, &iter, POS(0, U64_MAX))));
BUG_ON(k.k);
- lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_max(&iter, POS(0, U64_MAX))));
+ lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_max(trans, &iter, POS(0, U64_MAX))));
BUG_ON(k.k);
bch2_trans_iter_exit(trans, &iter);
@@ -369,10 +369,10 @@ static int test_peek_end_extents(struct bch_fs *c, u64 nr)
bch2_trans_iter_init(trans, &iter, BTREE_ID_extents,
SPOS(0, 0, U32_MAX), 0);
- lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_max(&iter, POS(0, U64_MAX))));
+ lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_max(trans, &iter, POS(0, U64_MAX))));
BUG_ON(k.k);
- lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_max(&iter, POS(0, U64_MAX))));
+ lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_max(trans, &iter, POS(0, U64_MAX))));
BUG_ON(k.k);
bch2_trans_iter_exit(trans, &iter);
@@ -488,7 +488,7 @@ static int test_snapshot_filter(struct bch_fs *c, u32 snapid_lo, u32 snapid_hi)
trans = bch2_trans_get(c);
bch2_trans_iter_init(trans, &iter, BTREE_ID_xattrs,
SPOS(0, 0, snapid_lo), 0);
- lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_max(&iter, POS(0, U64_MAX))));
+ lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_max(trans, &iter, POS(0, U64_MAX))));
BUG_ON(k.k->p.snapshot != U32_MAX);
@@ -602,9 +602,9 @@ static int rand_lookup(struct bch_fs *c, u64 nr)
SPOS(0, 0, U32_MAX), 0);
for (i = 0; i < nr; i++) {
- bch2_btree_iter_set_pos(&iter, SPOS(0, test_rand(), U32_MAX));
+ bch2_btree_iter_set_pos(trans, &iter, SPOS(0, test_rand(), U32_MAX));
- lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek(&iter)));
+ lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek(trans, &iter)));
ret = bkey_err(k);
if (ret)
break;
@@ -623,9 +623,9 @@ static int rand_mixed_trans(struct btree_trans *trans,
struct bkey_s_c k;
int ret;
- bch2_btree_iter_set_pos(iter, SPOS(0, pos, U32_MAX));
+ bch2_btree_iter_set_pos(trans, iter, SPOS(0, pos, U32_MAX));
- k = bch2_btree_iter_peek(iter);
+ k = bch2_btree_iter_peek(trans, iter);
ret = bkey_err(k);
bch_err_msg(trans->c, ret, "lookup error");
if (ret)
@@ -672,7 +672,7 @@ static int __do_delete(struct btree_trans *trans, struct bpos pos)
bch2_trans_iter_init(trans, &iter, BTREE_ID_xattrs, pos,
BTREE_ITER_intent);
- k = bch2_btree_iter_peek_max(&iter, POS(0, U64_MAX));
+ k = bch2_btree_iter_peek_max(trans, &iter, POS(0, U64_MAX));
ret = bkey_err(k);
if (ret)
goto err;
diff --git a/fs/bcachefs/util.h b/fs/bcachefs/util.h
index 1e94f89aabed..6ba5071ab6dd 100644
--- a/fs/bcachefs/util.h
+++ b/fs/bcachefs/util.h
@@ -622,7 +622,7 @@ do { \
#define per_cpu_sum(_p) \
({ \
- typeof(*_p) _ret = 0; \
+ TYPEOF_UNQUAL(*_p) _ret = 0; \
\
int cpu; \
for_each_possible_cpu(cpu) \
diff --git a/fs/bcachefs/xattr.c b/fs/bcachefs/xattr.c
index f9667b944c0d..651da52b2cbc 100644
--- a/fs/bcachefs/xattr.c
+++ b/fs/bcachefs/xattr.c
@@ -168,7 +168,7 @@ int bch2_xattr_set(struct btree_trans *trans, subvol_inum inum,
int type, int flags)
{
struct bch_fs *c = trans->c;
- struct btree_iter inode_iter = { NULL };
+ struct btree_iter inode_iter = {};
int ret;
ret = bch2_subvol_is_ro_trans(trans, inum.subvol) ?:
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 1a916716cefe..3dd555db3d32 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -1564,7 +1564,7 @@ static int transaction_kthread(void *arg)
do {
cannot_commit = false;
- delay = msecs_to_jiffies(fs_info->commit_interval * 1000);
+ delay = secs_to_jiffies(fs_info->commit_interval);
mutex_lock(&fs_info->transaction_kthread_mutex);
spin_lock(&fs_info->trans_lock);
@@ -1579,9 +1579,9 @@ static int transaction_kthread(void *arg)
cur->state < TRANS_STATE_COMMIT_PREP &&
delta < fs_info->commit_interval) {
spin_unlock(&fs_info->trans_lock);
- delay -= msecs_to_jiffies((delta - 1) * 1000);
+ delay -= secs_to_jiffies(delta - 1);
delay = min(delay,
- msecs_to_jiffies(fs_info->commit_interval * 1000));
+ secs_to_jiffies(fs_info->commit_interval));
goto sleep;
}
transid = cur->transid;
diff --git a/fs/buffer.c b/fs/buffer.c
index 194eacbefc95..c7abb4a029dc 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -2166,7 +2166,7 @@ int __block_write_begin(struct folio *folio, loff_t pos, unsigned len,
}
EXPORT_SYMBOL(__block_write_begin);
-static void __block_commit_write(struct folio *folio, size_t from, size_t to)
+void block_commit_write(struct folio *folio, size_t from, size_t to)
{
size_t block_start, block_end;
bool partial = false;
@@ -2204,6 +2204,7 @@ static void __block_commit_write(struct folio *folio, size_t from, size_t to)
if (!partial)
folio_mark_uptodate(folio);
}
+EXPORT_SYMBOL(block_commit_write);
/*
* block_write_begin takes care of the basic task of block allocation and
@@ -2262,7 +2263,7 @@ int block_write_end(struct file *file, struct address_space *mapping,
flush_dcache_folio(folio);
/* This could be a short (even 0-length) commit */
- __block_commit_write(folio, start, start + copied);
+ block_commit_write(folio, start, start + copied);
return copied;
}
@@ -2566,13 +2567,6 @@ int cont_write_begin(struct file *file, struct address_space *mapping,
}
EXPORT_SYMBOL(cont_write_begin);
-void block_commit_write(struct page *page, unsigned from, unsigned to)
-{
- struct folio *folio = page_folio(page);
- __block_commit_write(folio, from, to);
-}
-EXPORT_SYMBOL(block_commit_write);
-
/*
* block_page_mkwrite() is not allowed to change the file size as it gets
* called from a page fault handler when a page is first dirtied. Hence we must
@@ -2618,7 +2612,7 @@ int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
if (unlikely(ret))
goto out_unlock;
- __block_commit_write(folio, 0, end);
+ block_commit_write(folio, 0, end);
folio_mark_dirty(folio);
folio_wait_stable(folio);
diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
index 83a60126de0f..14d0cc894000 100644
--- a/fs/cachefiles/namei.c
+++ b/fs/cachefiles/namei.c
@@ -128,10 +128,11 @@ retry:
ret = security_path_mkdir(&path, subdir, 0700);
if (ret < 0)
goto mkdir_error;
- subdir = ERR_PTR(cachefiles_inject_write_error());
- if (!IS_ERR(subdir))
+ ret = cachefiles_inject_write_error();
+ if (ret == 0)
subdir = vfs_mkdir(&nop_mnt_idmap, d_inode(dir), subdir, 0700);
- ret = PTR_ERR(subdir);
+ else
+ subdir = ERR_PTR(ret);
if (IS_ERR(subdir)) {
trace_cachefiles_vfs_error(NULL, d_inode(dir), ret,
cachefiles_trace_mkdir_error);
diff --git a/fs/dax.c b/fs/dax.c
index 7fd4cd9a51f2..af5045b0f476 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -71,6 +71,11 @@ static unsigned long dax_to_pfn(void *entry)
return xa_to_value(entry) >> DAX_SHIFT;
}
+static struct folio *dax_to_folio(void *entry)
+{
+ return page_folio(pfn_to_page(dax_to_pfn(entry)));
+}
+
static void *dax_make_entry(pfn_t pfn, unsigned long flags)
{
return xa_mk_value(flags | (pfn_t_to_pfn(pfn) << DAX_SHIFT));
@@ -206,7 +211,7 @@ static void dax_wake_entry(struct xa_state *xas, void *entry,
*
* Must be called with the i_pages lock held.
*/
-static void *get_unlocked_entry(struct xa_state *xas, unsigned int order)
+static void *get_next_unlocked_entry(struct xa_state *xas, unsigned int order)
{
void *entry;
struct wait_exceptional_entry_queue ewait;
@@ -236,6 +241,37 @@ static void *get_unlocked_entry(struct xa_state *xas, unsigned int order)
}
/*
+ * Wait for the given entry to become unlocked. Caller must hold the i_pages
+ * lock and call either put_unlocked_entry() if it did not lock the entry or
+ * dax_unlock_entry() if it did. Returns an unlocked entry if still present.
+ */
+static void *wait_entry_unlocked_exclusive(struct xa_state *xas, void *entry)
+{
+ struct wait_exceptional_entry_queue ewait;
+ wait_queue_head_t *wq;
+
+ init_wait(&ewait.wait);
+ ewait.wait.func = wake_exceptional_entry_func;
+
+ while (unlikely(dax_is_locked(entry))) {
+ wq = dax_entry_waitqueue(xas, entry, &ewait.key);
+ prepare_to_wait_exclusive(wq, &ewait.wait,
+ TASK_UNINTERRUPTIBLE);
+ xas_pause(xas);
+ xas_unlock_irq(xas);
+ schedule();
+ finish_wait(wq, &ewait.wait);
+ xas_lock_irq(xas);
+ entry = xas_load(xas);
+ }
+
+ if (xa_is_internal(entry))
+ return NULL;
+
+ return entry;
+}
+
+/*
* The only thing keeping the address space around is the i_pages lock
* (it's cycled in clear_inode() after removing the entries from i_pages)
* After we call xas_unlock_irq(), we cannot touch xas->xa.
@@ -250,7 +286,7 @@ static void wait_entry_unlocked(struct xa_state *xas, void *entry)
wq = dax_entry_waitqueue(xas, entry, &ewait.key);
/*
- * Unlike get_unlocked_entry() there is no guarantee that this
+ * Unlike get_next_unlocked_entry() there is no guarantee that this
* path ever successfully retrieves an unlocked entry before an
* inode dies. Perform a non-exclusive wait in case this path
* never successfully performs its own wake up.
@@ -307,109 +343,156 @@ static unsigned long dax_entry_size(void *entry)
return PAGE_SIZE;
}
-static unsigned long dax_end_pfn(void *entry)
+/*
+ * A DAX folio is considered shared if it has no mapping set and ->share (which
+ * shares the ->index field) is non-zero. Note this may return false even if the
+ * page is shared between multiple files but has not yet actually been mapped
+ * into multiple address spaces.
+ */
+static inline bool dax_folio_is_shared(struct folio *folio)
{
- return dax_to_pfn(entry) + dax_entry_size(entry) / PAGE_SIZE;
+ return !folio->mapping && folio->share;
}
/*
- * Iterate through all mapped pfns represented by an entry, i.e. skip
- * 'empty' and 'zero' entries.
+ * When it is called by dax_insert_entry(), the shared flag will indicate
+ * whether this entry is shared by multiple files. If the page has not
+ * previously been associated with any mappings the ->mapping and ->index
+ * fields will be set. If it has already been associated with a mapping
+ * the mapping will be cleared and the share count set. It's then up to
+ * reverse map users like memory_failure() to call back into the filesystem to
+ * recover ->mapping and ->index information. For example by implementing
+ * dax_holder_operations.
*/
-#define for_each_mapped_pfn(entry, pfn) \
- for (pfn = dax_to_pfn(entry); \
- pfn < dax_end_pfn(entry); pfn++)
-
-static inline bool dax_page_is_shared(struct page *page)
+static void dax_folio_make_shared(struct folio *folio)
{
- return page->mapping == PAGE_MAPPING_DAX_SHARED;
+ /*
+ * folio is not currently shared so mark it as shared by clearing
+ * folio->mapping.
+ */
+ folio->mapping = NULL;
+
+ /*
+ * folio has previously been mapped into one address space so set the
+ * share count.
+ */
+ folio->share = 1;
}
-/*
- * Set the page->mapping with PAGE_MAPPING_DAX_SHARED flag, increase the
- * refcount.
- */
-static inline void dax_page_share_get(struct page *page)
+static inline unsigned long dax_folio_put(struct folio *folio)
{
- if (page->mapping != PAGE_MAPPING_DAX_SHARED) {
+ unsigned long ref;
+ int order, i;
+
+ if (!dax_folio_is_shared(folio))
+ ref = 0;
+ else
+ ref = --folio->share;
+
+ if (ref)
+ return ref;
+
+ folio->mapping = NULL;
+ order = folio_order(folio);
+ if (!order)
+ return 0;
+
+ for (i = 0; i < (1UL << order); i++) {
+ struct dev_pagemap *pgmap = page_pgmap(&folio->page);
+ struct page *page = folio_page(folio, i);
+ struct folio *new_folio = (struct folio *)page;
+
+ ClearPageHead(page);
+ clear_compound_head(page);
+
+ new_folio->mapping = NULL;
/*
- * Reset the index if the page was already mapped
- * regularly before.
+ * Reset pgmap which was over-written by
+ * prep_compound_page().
*/
- if (page->mapping)
- page->share = 1;
- page->mapping = PAGE_MAPPING_DAX_SHARED;
+ new_folio->pgmap = pgmap;
+ new_folio->share = 0;
+ WARN_ON_ONCE(folio_ref_count(new_folio));
}
- page->share++;
+
+ return ref;
}
-static inline unsigned long dax_page_share_put(struct page *page)
+static void dax_folio_init(void *entry)
{
- return --page->share;
+ struct folio *folio = dax_to_folio(entry);
+ int order = dax_entry_order(entry);
+
+ /*
+ * Folio should have been split back to order-0 pages in
+ * dax_folio_put() when they were removed from their
+ * final mapping.
+ */
+ WARN_ON_ONCE(folio_order(folio));
+
+ if (order > 0) {
+ prep_compound_page(&folio->page, order);
+ if (order > 1)
+ INIT_LIST_HEAD(&folio->_deferred_list);
+ WARN_ON_ONCE(folio_ref_count(folio));
+ }
}
-/*
- * When it is called in dax_insert_entry(), the shared flag will indicate that
- * whether this entry is shared by multiple files. If so, set the page->mapping
- * PAGE_MAPPING_DAX_SHARED, and use page->share as refcount.
- */
static void dax_associate_entry(void *entry, struct address_space *mapping,
- struct vm_area_struct *vma, unsigned long address, bool shared)
+ struct vm_area_struct *vma,
+ unsigned long address, bool shared)
{
- unsigned long size = dax_entry_size(entry), pfn, index;
- int i = 0;
+ unsigned long size = dax_entry_size(entry), index;
+ struct folio *folio = dax_to_folio(entry);
+
+ if (dax_is_zero_entry(entry) || dax_is_empty_entry(entry))
+ return;
if (IS_ENABLED(CONFIG_FS_DAX_LIMITED))
return;
index = linear_page_index(vma, address & ~(size - 1));
- for_each_mapped_pfn(entry, pfn) {
- struct page *page = pfn_to_page(pfn);
+ if (shared && (folio->mapping || dax_folio_is_shared(folio))) {
+ if (folio->mapping)
+ dax_folio_make_shared(folio);
- if (shared) {
- dax_page_share_get(page);
- } else {
- WARN_ON_ONCE(page->mapping);
- page->mapping = mapping;
- page->index = index + i++;
- }
+ WARN_ON_ONCE(!folio->share);
+ WARN_ON_ONCE(dax_entry_order(entry) != folio_order(folio));
+ folio->share++;
+ } else {
+ WARN_ON_ONCE(folio->mapping);
+ dax_folio_init(entry);
+ folio = dax_to_folio(entry);
+ folio->mapping = mapping;
+ folio->index = index;
}
}
static void dax_disassociate_entry(void *entry, struct address_space *mapping,
- bool trunc)
+ bool trunc)
{
- unsigned long pfn;
+ struct folio *folio = dax_to_folio(entry);
if (IS_ENABLED(CONFIG_FS_DAX_LIMITED))
return;
- for_each_mapped_pfn(entry, pfn) {
- struct page *page = pfn_to_page(pfn);
-
- WARN_ON_ONCE(trunc && page_ref_count(page) > 1);
- if (dax_page_is_shared(page)) {
- /* keep the shared flag if this page is still shared */
- if (dax_page_share_put(page) > 0)
- continue;
- } else
- WARN_ON_ONCE(page->mapping && page->mapping != mapping);
- page->mapping = NULL;
- page->index = 0;
- }
+ if (dax_is_zero_entry(entry) || dax_is_empty_entry(entry))
+ return;
+
+ dax_folio_put(folio);
}
static struct page *dax_busy_page(void *entry)
{
- unsigned long pfn;
+ struct folio *folio = dax_to_folio(entry);
- for_each_mapped_pfn(entry, pfn) {
- struct page *page = pfn_to_page(pfn);
+ if (dax_is_zero_entry(entry) || dax_is_empty_entry(entry))
+ return NULL;
- if (page_ref_count(page) > 1)
- return page;
- }
- return NULL;
+ if (folio_ref_count(folio) - folio_mapcount(folio))
+ return &folio->page;
+ else
+ return NULL;
}
/**
@@ -580,7 +663,7 @@ static void *grab_mapping_entry(struct xa_state *xas,
retry:
pmd_downgrade = false;
xas_lock_irq(xas);
- entry = get_unlocked_entry(xas, order);
+ entry = get_next_unlocked_entry(xas, order);
if (entry) {
if (dax_is_conflict(entry))
@@ -690,7 +773,7 @@ struct page *dax_layout_busy_page_range(struct address_space *mapping,
if (IS_ENABLED(CONFIG_FS_DAX_LIMITED))
return NULL;
- if (!dax_mapping(mapping) || !mapping_mapped(mapping))
+ if (!dax_mapping(mapping))
return NULL;
/* If end == LLONG_MAX, all pages from start to till end of file */
@@ -716,8 +799,7 @@ struct page *dax_layout_busy_page_range(struct address_space *mapping,
xas_for_each(&xas, entry, end_idx) {
if (WARN_ON_ONCE(!xa_is_value(entry)))
continue;
- if (unlikely(dax_is_locked(entry)))
- entry = get_unlocked_entry(&xas, 0);
+ entry = wait_entry_unlocked_exclusive(&xas, entry);
if (entry)
page = dax_busy_page(entry);
put_unlocked_entry(&xas, entry, WAKE_NEXT);
@@ -743,14 +825,14 @@ struct page *dax_layout_busy_page(struct address_space *mapping)
EXPORT_SYMBOL_GPL(dax_layout_busy_page);
static int __dax_invalidate_entry(struct address_space *mapping,
- pgoff_t index, bool trunc)
+ pgoff_t index, bool trunc)
{
XA_STATE(xas, &mapping->i_pages, index);
int ret = 0;
void *entry;
xas_lock_irq(&xas);
- entry = get_unlocked_entry(&xas, 0);
+ entry = get_next_unlocked_entry(&xas, 0);
if (!entry || WARN_ON_ONCE(!xa_is_value(entry)))
goto out;
if (!trunc &&
@@ -776,7 +858,9 @@ static int __dax_clear_dirty_range(struct address_space *mapping,
xas_lock_irq(&xas);
xas_for_each(&xas, entry, end) {
- entry = get_unlocked_entry(&xas, 0);
+ entry = wait_entry_unlocked_exclusive(&xas, entry);
+ if (!entry)
+ continue;
xas_clear_mark(&xas, PAGECACHE_TAG_DIRTY);
xas_clear_mark(&xas, PAGECACHE_TAG_TOWRITE);
put_unlocked_entry(&xas, entry, WAKE_NEXT);
@@ -813,6 +897,107 @@ int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index)
return ret;
}
+void dax_delete_mapping_range(struct address_space *mapping,
+ loff_t start, loff_t end)
+{
+ void *entry;
+ pgoff_t start_idx = start >> PAGE_SHIFT;
+ pgoff_t end_idx;
+ XA_STATE(xas, &mapping->i_pages, start_idx);
+
+ /* If end == LLONG_MAX, all pages from start to till end of file */
+ if (end == LLONG_MAX)
+ end_idx = ULONG_MAX;
+ else
+ end_idx = end >> PAGE_SHIFT;
+
+ xas_lock_irq(&xas);
+ xas_for_each(&xas, entry, end_idx) {
+ if (!xa_is_value(entry))
+ continue;
+ entry = wait_entry_unlocked_exclusive(&xas, entry);
+ if (!entry)
+ continue;
+ dax_disassociate_entry(entry, mapping, true);
+ xas_store(&xas, NULL);
+ mapping->nrpages -= 1UL << dax_entry_order(entry);
+ put_unlocked_entry(&xas, entry, WAKE_ALL);
+ }
+ xas_unlock_irq(&xas);
+}
+EXPORT_SYMBOL_GPL(dax_delete_mapping_range);
+
+static int wait_page_idle(struct page *page,
+ void (cb)(struct inode *),
+ struct inode *inode)
+{
+ return ___wait_var_event(page, dax_page_is_idle(page),
+ TASK_INTERRUPTIBLE, 0, 0, cb(inode));
+}
+
+static void wait_page_idle_uninterruptible(struct page *page,
+ struct inode *inode)
+{
+ ___wait_var_event(page, dax_page_is_idle(page),
+ TASK_UNINTERRUPTIBLE, 0, 0, schedule());
+}
+
+/*
+ * Unmaps the inode and waits for any DMA to complete prior to deleting the
+ * DAX mapping entries for the range.
+ *
+ * For NOWAIT behavior, pass @cb as NULL to early-exit on first found
+ * busy page
+ */
+int dax_break_layout(struct inode *inode, loff_t start, loff_t end,
+ void (cb)(struct inode *))
+{
+ struct page *page;
+ int error = 0;
+
+ if (!dax_mapping(inode->i_mapping))
+ return 0;
+
+ do {
+ page = dax_layout_busy_page_range(inode->i_mapping, start, end);
+ if (!page)
+ break;
+ if (!cb) {
+ error = -ERESTARTSYS;
+ break;
+ }
+
+ error = wait_page_idle(page, cb, inode);
+ } while (error == 0);
+
+ if (!page)
+ dax_delete_mapping_range(inode->i_mapping, start, end);
+
+ return error;
+}
+EXPORT_SYMBOL_GPL(dax_break_layout);
+
+void dax_break_layout_final(struct inode *inode)
+{
+ struct page *page;
+
+ if (!dax_mapping(inode->i_mapping))
+ return;
+
+ do {
+ page = dax_layout_busy_page_range(inode->i_mapping, 0,
+ LLONG_MAX);
+ if (!page)
+ break;
+
+ wait_page_idle_uninterruptible(page, inode);
+ } while (true);
+
+ if (!page)
+ dax_delete_mapping_range(inode->i_mapping, 0, LLONG_MAX);
+}
+EXPORT_SYMBOL_GPL(dax_break_layout_final);
+
/*
* Invalidate DAX entry if it is clean.
*/
@@ -895,8 +1080,9 @@ static void *dax_insert_entry(struct xa_state *xas, struct vm_fault *vmf,
void *old;
dax_disassociate_entry(entry, mapping, false);
- dax_associate_entry(new_entry, mapping, vmf->vma, vmf->address,
- shared);
+ dax_associate_entry(new_entry, mapping, vmf->vma,
+ vmf->address, shared);
+
/*
* Only swap our new entry into the page cache if the current
* entry is a zero page or an empty entry. If a normal PTE or
@@ -940,7 +1126,7 @@ static int dax_writeback_one(struct xa_state *xas, struct dax_device *dax_dev,
if (unlikely(dax_is_locked(entry))) {
void *old_entry = entry;
- entry = get_unlocked_entry(xas, 0);
+ entry = get_next_unlocked_entry(xas, 0);
/* Entry got punched out / reallocated? */
if (!entry || WARN_ON_ONCE(!xa_is_value(entry)))
@@ -1084,9 +1270,7 @@ static int dax_iomap_direct_access(const struct iomap *iomap, loff_t pos,
goto out;
if (pfn_t_to_pfn(*pfnp) & (PHYS_PFN(size)-1))
goto out;
- /* For larger pages we need devmap */
- if (length > 1 && !pfn_t_devmap(*pfnp))
- goto out;
+
rc = 0;
out_check_addr:
@@ -1193,7 +1377,7 @@ static vm_fault_t dax_load_hole(struct xa_state *xas, struct vm_fault *vmf,
*entry = dax_insert_entry(xas, vmf, iter, *entry, pfn, DAX_ZERO_PAGE);
- ret = vmf_insert_mixed(vmf->vma, vaddr, pfn);
+ ret = vmf_insert_page_mkwrite(vmf, pfn_t_to_page(pfn), false);
trace_dax_load_hole(inode, vmf, ret);
return ret;
}
@@ -1664,7 +1848,8 @@ static vm_fault_t dax_fault_iter(struct vm_fault *vmf,
loff_t pos = (loff_t)xas->xa_index << PAGE_SHIFT;
bool write = iter->flags & IOMAP_WRITE;
unsigned long entry_flags = pmd ? DAX_PMD : 0;
- int err = 0;
+ struct folio *folio;
+ int ret, err = 0;
pfn_t pfn;
void *kaddr;
@@ -1696,17 +1881,19 @@ static vm_fault_t dax_fault_iter(struct vm_fault *vmf,
return dax_fault_return(err);
}
+ folio = dax_to_folio(*entry);
if (dax_fault_is_synchronous(iter, vmf->vma))
return dax_fault_synchronous_pfnp(pfnp, pfn);
- /* insert PMD pfn */
+ folio_ref_inc(folio);
if (pmd)
- return vmf_insert_pfn_pmd(vmf, pfn, write);
+ ret = vmf_insert_folio_pmd(vmf, pfn_folio(pfn_t_to_pfn(pfn)),
+ write);
+ else
+ ret = vmf_insert_page_mkwrite(vmf, pfn_t_to_page(pfn), write);
+ folio_put(folio);
- /* insert PTE pfn */
- if (write)
- return vmf_insert_mixed_mkwrite(vmf->vma, vmf->address, pfn);
- return vmf_insert_mixed(vmf->vma, vmf->address, pfn);
+ return ret;
}
static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
@@ -1949,11 +2136,12 @@ dax_insert_pfn_mkwrite(struct vm_fault *vmf, pfn_t pfn, unsigned int order)
{
struct address_space *mapping = vmf->vma->vm_file->f_mapping;
XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, order);
+ struct folio *folio;
void *entry;
vm_fault_t ret;
xas_lock_irq(&xas);
- entry = get_unlocked_entry(&xas, order);
+ entry = get_next_unlocked_entry(&xas, order);
/* Did we race with someone splitting entry or so? */
if (!entry || dax_is_conflict(entry) ||
(order == 0 && !dax_is_pte_entry(entry))) {
@@ -1966,14 +2154,17 @@ dax_insert_pfn_mkwrite(struct vm_fault *vmf, pfn_t pfn, unsigned int order)
xas_set_mark(&xas, PAGECACHE_TAG_DIRTY);
dax_lock_entry(&xas, entry);
xas_unlock_irq(&xas);
+ folio = pfn_folio(pfn_t_to_pfn(pfn));
+ folio_ref_inc(folio);
if (order == 0)
- ret = vmf_insert_mixed_mkwrite(vmf->vma, vmf->address, pfn);
+ ret = vmf_insert_page_mkwrite(vmf, &folio->page, true);
#ifdef CONFIG_FS_DAX_PMD
else if (order == PMD_ORDER)
- ret = vmf_insert_pfn_pmd(vmf, pfn, FAULT_FLAG_WRITE);
+ ret = vmf_insert_folio_pmd(vmf, folio, FAULT_FLAG_WRITE);
#endif
else
ret = VM_FAULT_FALLBACK;
+ folio_put(folio);
dax_unlock_entry(&xas, entry);
trace_dax_insert_pfn_mkwrite(mapping->host, vmf, ret);
return ret;
diff --git a/fs/exec.c b/fs/exec.c
index f45859ad13ac..5d1c0d2dc403 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -1227,13 +1227,12 @@ int begin_new_exec(struct linux_binprm * bprm)
*/
bprm->point_of_no_return = true;
- /*
- * Make this the only thread in the thread group.
- */
+ /* Make this the only thread in the thread group */
retval = de_thread(me);
if (retval)
goto out;
-
+ /* see the comment in check_unsafe_exec() */
+ current->fs->in_exec = 0;
/*
* Cancel any io_uring activity across execve
*/
@@ -1495,6 +1494,8 @@ static void free_bprm(struct linux_binprm *bprm)
}
free_arg_pages(bprm);
if (bprm->cred) {
+ /* in case exec fails before de_thread() succeeds */
+ current->fs->in_exec = 0;
mutex_unlock(&current->signal->cred_guard_mutex);
abort_creds(bprm->cred);
}
@@ -1616,6 +1617,10 @@ static void check_unsafe_exec(struct linux_binprm *bprm)
* suid exec because the differently privileged task
* will be able to manipulate the current directory, etc.
* It would be nice to force an unshare instead...
+ *
+ * Otherwise we set fs->in_exec = 1 to deny clone(CLONE_FS)
+ * from another sub-thread until de_thread() succeeds, this
+ * state is protected by cred_guard_mutex we hold.
*/
n_fs = 1;
spin_lock(&p->fs->lock);
@@ -1860,7 +1865,6 @@ static int bprm_execve(struct linux_binprm *bprm)
sched_mm_cid_after_execve(current);
/* execve succeeded */
- current->fs->in_exec = 0;
current->in_execve = 0;
rseq_execve(current);
user_events_execve(current);
@@ -1879,7 +1883,6 @@ out:
force_fatal_sig(SIGSEGV);
sched_mm_cid_after_execve(current);
- current->fs->in_exec = 0;
current->in_execve = 0;
return retval;
diff --git a/fs/exportfs/expfs.c b/fs/exportfs/expfs.c
index b5845c4846b8..128dd092916b 100644
--- a/fs/exportfs/expfs.c
+++ b/fs/exportfs/expfs.c
@@ -608,4 +608,5 @@ struct dentry *exportfs_decode_fh(struct vfsmount *mnt, struct fid *fid,
}
EXPORT_SYMBOL_GPL(exportfs_decode_fh);
+MODULE_DESCRIPTION("Code mapping from inodes to file handles");
MODULE_LICENSE("GPL");
diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
index f608f6554b95..2c9b762925c7 100644
--- a/fs/ext4/inline.c
+++ b/fs/ext4/inline.c
@@ -642,7 +642,7 @@ retry:
goto retry;
if (folio)
- block_commit_write(&folio->page, from, to);
+ block_commit_write(folio, from, to);
out:
if (folio) {
folio_unlock(folio);
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index bcb96caf77c0..1dc09ed5d403 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -182,6 +182,8 @@ void ext4_evict_inode(struct inode *inode)
trace_ext4_evict_inode(inode);
+ dax_break_layout_final(inode);
+
if (EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL)
ext4_evict_ea_inode(inode);
if (inode->i_nlink) {
@@ -3981,24 +3983,10 @@ static void ext4_wait_dax_page(struct inode *inode)
int ext4_break_layouts(struct inode *inode)
{
- struct page *page;
- int error;
-
if (WARN_ON_ONCE(!rwsem_is_locked(&inode->i_mapping->invalidate_lock)))
return -EINVAL;
- do {
- page = dax_layout_busy_page(inode->i_mapping);
- if (!page)
- return 0;
-
- error = ___wait_var_event(&page->_refcount,
- atomic_read(&page->_refcount) == 1,
- TASK_INTERRUPTIBLE, 0, 0,
- ext4_wait_dax_page(inode));
- } while (error == 0);
-
- return error;
+ return dax_break_layout_inode(inode, ext4_wait_dax_page);
}
/*
diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c
index 898443e98efc..48649be64d6a 100644
--- a/fs/ext4/move_extent.c
+++ b/fs/ext4/move_extent.c
@@ -399,7 +399,7 @@ data_copy:
bh = bh->b_this_page;
}
- block_commit_write(&folio[0]->page, from, from + replaced_size);
+ block_commit_write(folio[0], from, from + replaced_size);
/* Even in case of data=writeback it is reasonable to pin
* inode to transaction, to prevent unexpected data loss */
diff --git a/fs/fuse/dax.c b/fs/fuse/dax.c
index 0b6ee6dd1fd6..0502bf3cdf6a 100644
--- a/fs/fuse/dax.c
+++ b/fs/fuse/dax.c
@@ -666,36 +666,12 @@ static void fuse_wait_dax_page(struct inode *inode)
filemap_invalidate_lock(inode->i_mapping);
}
-/* Should be called with mapping->invalidate_lock held exclusively */
-static int __fuse_dax_break_layouts(struct inode *inode, bool *retry,
- loff_t start, loff_t end)
-{
- struct page *page;
-
- page = dax_layout_busy_page_range(inode->i_mapping, start, end);
- if (!page)
- return 0;
-
- *retry = true;
- return ___wait_var_event(&page->_refcount,
- atomic_read(&page->_refcount) == 1, TASK_INTERRUPTIBLE,
- 0, 0, fuse_wait_dax_page(inode));
-}
-
-/* dmap_end == 0 leads to unmapping of whole file */
+/* Should be called with mapping->invalidate_lock held exclusively. */
int fuse_dax_break_layouts(struct inode *inode, u64 dmap_start,
u64 dmap_end)
{
- bool retry;
- int ret;
-
- do {
- retry = false;
- ret = __fuse_dax_break_layouts(inode, &retry, dmap_start,
- dmap_end);
- } while (ret == 0 && retry);
-
- return ret;
+ return dax_break_layout(inode, dmap_start, dmap_end,
+ fuse_wait_dax_page);
}
ssize_t fuse_dax_read_iter(struct kiocb *iocb, struct iov_iter *to)
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index 51e31df4c546..6dcbaa218b7a 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -32,6 +32,100 @@ MODULE_ALIAS("devname:fuse");
static struct kmem_cache *fuse_req_cachep;
+const unsigned long fuse_timeout_timer_freq =
+ secs_to_jiffies(FUSE_TIMEOUT_TIMER_FREQ);
+
+bool fuse_request_expired(struct fuse_conn *fc, struct list_head *list)
+{
+ struct fuse_req *req;
+
+ req = list_first_entry_or_null(list, struct fuse_req, list);
+ if (!req)
+ return false;
+ return time_is_before_jiffies(req->create_time + fc->timeout.req_timeout);
+}
+
+bool fuse_fpq_processing_expired(struct fuse_conn *fc, struct list_head *processing)
+{
+ int i;
+
+ for (i = 0; i < FUSE_PQ_HASH_SIZE; i++)
+ if (fuse_request_expired(fc, &processing[i]))
+ return true;
+
+ return false;
+}
+
+/*
+ * Check if any requests aren't being completed by the time the request timeout
+ * elapses. To do so, we:
+ * - check the fiq pending list
+ * - check the bg queue
+ * - check the fpq io and processing lists
+ *
+ * To make this fast, we only check against the head request on each list since
+ * these are generally queued in order of creation time (eg newer requests get
+ * queued to the tail). We might miss a few edge cases (eg requests transitioning
+ * between lists, re-sent requests at the head of the pending list having a
+ * later creation time than other requests on that list, etc.) but that is fine
+ * since if the request never gets fulfilled, it will eventually be caught.
+ */
+void fuse_check_timeout(struct work_struct *work)
+{
+ struct delayed_work *dwork = to_delayed_work(work);
+ struct fuse_conn *fc = container_of(dwork, struct fuse_conn,
+ timeout.work);
+ struct fuse_iqueue *fiq = &fc->iq;
+ struct fuse_dev *fud;
+ struct fuse_pqueue *fpq;
+ bool expired = false;
+
+ if (!atomic_read(&fc->num_waiting))
+ goto out;
+
+ spin_lock(&fiq->lock);
+ expired = fuse_request_expired(fc, &fiq->pending);
+ spin_unlock(&fiq->lock);
+ if (expired)
+ goto abort_conn;
+
+ spin_lock(&fc->bg_lock);
+ expired = fuse_request_expired(fc, &fc->bg_queue);
+ spin_unlock(&fc->bg_lock);
+ if (expired)
+ goto abort_conn;
+
+ spin_lock(&fc->lock);
+ if (!fc->connected) {
+ spin_unlock(&fc->lock);
+ return;
+ }
+ list_for_each_entry(fud, &fc->devices, entry) {
+ fpq = &fud->pq;
+ spin_lock(&fpq->lock);
+ if (fuse_request_expired(fc, &fpq->io) ||
+ fuse_fpq_processing_expired(fc, fpq->processing)) {
+ spin_unlock(&fpq->lock);
+ spin_unlock(&fc->lock);
+ goto abort_conn;
+ }
+
+ spin_unlock(&fpq->lock);
+ }
+ spin_unlock(&fc->lock);
+
+ if (fuse_uring_request_expired(fc))
+ goto abort_conn;
+
+out:
+ queue_delayed_work(system_wq, &fc->timeout.work,
+ fuse_timeout_timer_freq);
+ return;
+
+abort_conn:
+ fuse_abort_conn(fc);
+}
+
static void fuse_request_init(struct fuse_mount *fm, struct fuse_req *req)
{
INIT_LIST_HEAD(&req->list);
@@ -40,6 +134,7 @@ static void fuse_request_init(struct fuse_mount *fm, struct fuse_req *req)
refcount_set(&req->count, 1);
__set_bit(FR_PENDING, &req->flags);
req->fm = fm;
+ req->create_time = jiffies;
}
static struct fuse_req *fuse_request_alloc(struct fuse_mount *fm, gfp_t flags)
@@ -407,6 +502,24 @@ static int queue_interrupt(struct fuse_req *req)
return 0;
}
+bool fuse_remove_pending_req(struct fuse_req *req, spinlock_t *lock)
+{
+ spin_lock(lock);
+ if (test_bit(FR_PENDING, &req->flags)) {
+ /*
+ * FR_PENDING does not get cleared as the request will end
+ * up in destruction anyway.
+ */
+ list_del(&req->list);
+ spin_unlock(lock);
+ __fuse_put_request(req);
+ req->out.h.error = -EINTR;
+ return true;
+ }
+ spin_unlock(lock);
+ return false;
+}
+
static void request_wait_answer(struct fuse_req *req)
{
struct fuse_conn *fc = req->fm->fc;
@@ -428,22 +541,20 @@ static void request_wait_answer(struct fuse_req *req)
}
if (!test_bit(FR_FORCE, &req->flags)) {
+ bool removed;
+
/* Only fatal signals may interrupt this */
err = wait_event_killable(req->waitq,
test_bit(FR_FINISHED, &req->flags));
if (!err)
return;
- spin_lock(&fiq->lock);
- /* Request is not yet in userspace, bail out */
- if (test_bit(FR_PENDING, &req->flags)) {
- list_del(&req->list);
- spin_unlock(&fiq->lock);
- __fuse_put_request(req);
- req->out.h.error = -EINTR;
+ if (test_bit(FR_URING, &req->flags))
+ removed = fuse_uring_remove_pending_req(req);
+ else
+ removed = fuse_remove_pending_req(req, &fiq->lock);
+ if (removed)
return;
- }
- spin_unlock(&fiq->lock);
}
/*
@@ -1533,14 +1644,10 @@ static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size,
struct fuse_copy_state *cs)
{
struct fuse_notify_inval_entry_out outarg;
- int err = -ENOMEM;
- char *buf;
+ int err;
+ char *buf = NULL;
struct qstr name;
- buf = kzalloc(FUSE_NAME_MAX + 1, GFP_KERNEL);
- if (!buf)
- goto err;
-
err = -EINVAL;
if (size < sizeof(outarg))
goto err;
@@ -1550,13 +1657,18 @@ static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size,
goto err;
err = -ENAMETOOLONG;
- if (outarg.namelen > FUSE_NAME_MAX)
+ if (outarg.namelen > fc->name_max)
goto err;
err = -EINVAL;
if (size != sizeof(outarg) + outarg.namelen + 1)
goto err;
+ err = -ENOMEM;
+ buf = kzalloc(outarg.namelen + 1, GFP_KERNEL);
+ if (!buf)
+ goto err;
+
name.name = buf;
name.len = outarg.namelen;
err = fuse_copy_one(cs, buf, outarg.namelen + 1);
@@ -1581,14 +1693,10 @@ static int fuse_notify_delete(struct fuse_conn *fc, unsigned int size,
struct fuse_copy_state *cs)
{
struct fuse_notify_delete_out outarg;
- int err = -ENOMEM;
- char *buf;
+ int err;
+ char *buf = NULL;
struct qstr name;
- buf = kzalloc(FUSE_NAME_MAX + 1, GFP_KERNEL);
- if (!buf)
- goto err;
-
err = -EINVAL;
if (size < sizeof(outarg))
goto err;
@@ -1598,13 +1706,18 @@ static int fuse_notify_delete(struct fuse_conn *fc, unsigned int size,
goto err;
err = -ENAMETOOLONG;
- if (outarg.namelen > FUSE_NAME_MAX)
+ if (outarg.namelen > fc->name_max)
goto err;
err = -EINVAL;
if (size != sizeof(outarg) + outarg.namelen + 1)
goto err;
+ err = -ENOMEM;
+ buf = kzalloc(outarg.namelen + 1, GFP_KERNEL);
+ if (!buf)
+ goto err;
+
name.name = buf;
name.len = outarg.namelen;
err = fuse_copy_one(cs, buf, outarg.namelen + 1);
@@ -2275,6 +2388,9 @@ void fuse_abort_conn(struct fuse_conn *fc)
LIST_HEAD(to_end);
unsigned int i;
+ if (fc->timeout.req_timeout)
+ cancel_delayed_work(&fc->timeout.work);
+
/* Background queuing checks fc->connected under bg_lock */
spin_lock(&fc->bg_lock);
fc->connected = 0;
diff --git a/fs/fuse/dev_uring.c b/fs/fuse/dev_uring.c
index 82bf458fa9db..accdce2977c5 100644
--- a/fs/fuse/dev_uring.c
+++ b/fs/fuse/dev_uring.c
@@ -140,6 +140,33 @@ void fuse_uring_abort_end_requests(struct fuse_ring *ring)
}
}
+bool fuse_uring_request_expired(struct fuse_conn *fc)
+{
+ struct fuse_ring *ring = fc->ring;
+ struct fuse_ring_queue *queue;
+ int qid;
+
+ if (!ring)
+ return false;
+
+ for (qid = 0; qid < ring->nr_queues; qid++) {
+ queue = READ_ONCE(ring->queues[qid]);
+ if (!queue)
+ continue;
+
+ spin_lock(&queue->lock);
+ if (fuse_request_expired(fc, &queue->fuse_req_queue) ||
+ fuse_request_expired(fc, &queue->fuse_req_bg_queue) ||
+ fuse_fpq_processing_expired(fc, queue->fpq.processing)) {
+ spin_unlock(&queue->lock);
+ return true;
+ }
+ spin_unlock(&queue->lock);
+ }
+
+ return false;
+}
+
void fuse_uring_destruct(struct fuse_conn *fc)
{
struct fuse_ring *ring = fc->ring;
@@ -211,7 +238,6 @@ static struct fuse_ring *fuse_uring_create(struct fuse_conn *fc)
ring->nr_queues = nr_queues;
ring->fc = fc;
ring->max_payload_sz = max_payload_size;
- atomic_set(&ring->queue_refs, 0);
smp_store_release(&fc->ring, ring);
spin_unlock(&fc->lock);
@@ -726,8 +752,6 @@ static void fuse_uring_add_req_to_ring_ent(struct fuse_ring_ent *ent,
struct fuse_req *req)
{
struct fuse_ring_queue *queue = ent->queue;
- struct fuse_conn *fc = req->fm->fc;
- struct fuse_iqueue *fiq = &fc->iq;
lockdep_assert_held(&queue->lock);
@@ -737,9 +761,7 @@ static void fuse_uring_add_req_to_ring_ent(struct fuse_ring_ent *ent,
ent->state);
}
- spin_lock(&fiq->lock);
clear_bit(FR_PENDING, &req->flags);
- spin_unlock(&fiq->lock);
ent->fuse_req = req;
ent->state = FRRS_FUSE_REQ;
list_move(&ent->list, &queue->ent_w_req_queue);
@@ -1238,6 +1260,8 @@ void fuse_uring_queue_fuse_req(struct fuse_iqueue *fiq, struct fuse_req *req)
if (unlikely(queue->stopped))
goto err_unlock;
+ set_bit(FR_URING, &req->flags);
+ req->ring_queue = queue;
ent = list_first_entry_or_null(&queue->ent_avail_queue,
struct fuse_ring_ent, list);
if (ent)
@@ -1276,6 +1300,8 @@ bool fuse_uring_queue_bq_req(struct fuse_req *req)
return false;
}
+ set_bit(FR_URING, &req->flags);
+ req->ring_queue = queue;
list_add_tail(&req->list, &queue->fuse_req_bg_queue);
ent = list_first_entry_or_null(&queue->ent_avail_queue,
@@ -1306,6 +1332,13 @@ bool fuse_uring_queue_bq_req(struct fuse_req *req)
return true;
}
+bool fuse_uring_remove_pending_req(struct fuse_req *req)
+{
+ struct fuse_ring_queue *queue = req->ring_queue;
+
+ return fuse_remove_pending_req(req, &queue->lock);
+}
+
static const struct fuse_iqueue_ops fuse_io_uring_ops = {
/* should be send over io-uring as enhancement */
.send_forget = fuse_dev_queue_forget,
diff --git a/fs/fuse/dev_uring_i.h b/fs/fuse/dev_uring_i.h
index 2102b3d0c1ae..51a563922ce1 100644
--- a/fs/fuse/dev_uring_i.h
+++ b/fs/fuse/dev_uring_i.h
@@ -142,6 +142,8 @@ void fuse_uring_abort_end_requests(struct fuse_ring *ring);
int fuse_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags);
void fuse_uring_queue_fuse_req(struct fuse_iqueue *fiq, struct fuse_req *req);
bool fuse_uring_queue_bq_req(struct fuse_req *req);
+bool fuse_uring_remove_pending_req(struct fuse_req *req);
+bool fuse_uring_request_expired(struct fuse_conn *fc);
static inline void fuse_uring_abort(struct fuse_conn *fc)
{
@@ -172,12 +174,6 @@ static inline bool fuse_uring_ready(struct fuse_conn *fc)
#else /* CONFIG_FUSE_IO_URING */
-struct fuse_ring;
-
-static inline void fuse_uring_create(struct fuse_conn *fc)
-{
-}
-
static inline void fuse_uring_destruct(struct fuse_conn *fc)
{
}
@@ -200,6 +196,16 @@ static inline bool fuse_uring_ready(struct fuse_conn *fc)
return false;
}
+static inline bool fuse_uring_remove_pending_req(struct fuse_req *req)
+{
+ return false;
+}
+
+static inline bool fuse_uring_request_expired(struct fuse_conn *fc)
+{
+ return false;
+}
+
#endif /* CONFIG_FUSE_IO_URING */
#endif /* _FS_FUSE_DEV_URING_I_H */
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index fa8f1141ea74..83ac192e7fdd 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -370,7 +370,7 @@ int fuse_lookup_name(struct super_block *sb, u64 nodeid, const struct qstr *name
*inode = NULL;
err = -ENAMETOOLONG;
- if (name->len > FUSE_NAME_MAX)
+ if (name->len > fm->fc->name_max)
goto out;
@@ -1137,6 +1137,9 @@ static int fuse_link(struct dentry *entry, struct inode *newdir,
struct fuse_mount *fm = get_fuse_mount(inode);
FUSE_ARGS(args);
+ if (fm->fc->no_link)
+ goto out;
+
memset(&inarg, 0, sizeof(inarg));
inarg.oldnodeid = get_node_id(inode);
args.opcode = FUSE_LINK;
@@ -1151,6 +1154,12 @@ static int fuse_link(struct dentry *entry, struct inode *newdir,
else if (err == -EINTR)
fuse_invalidate_attr(inode);
+ if (err == -ENOSYS)
+ fm->fc->no_link = 1;
+out:
+ if (fm->fc->no_link)
+ return -EPERM;
+
return err;
}
@@ -1954,7 +1963,7 @@ int fuse_do_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
if (FUSE_IS_DAX(inode) && is_truncate) {
filemap_invalidate_lock(mapping);
fault_blocked = true;
- err = fuse_dax_break_layouts(inode, 0, 0);
+ err = fuse_dax_break_layouts(inode, 0, -1);
if (err) {
filemap_invalidate_unlock(mapping);
return err;
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index d63e56fd3dd2..754378dd9f71 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -253,7 +253,7 @@ static int fuse_open(struct inode *inode, struct file *file)
if (dax_truncate) {
filemap_invalidate_lock(inode->i_mapping);
- err = fuse_dax_break_layouts(inode, 0, 0);
+ err = fuse_dax_break_layouts(inode, 0, -1);
if (err)
goto out_inode_unlock;
}
@@ -3205,7 +3205,7 @@ static long fuse_file_fallocate(struct file *file, int mode, loff_t offset,
inode_lock(inode);
if (block_faults) {
filemap_invalidate_lock(inode->i_mapping);
- err = fuse_dax_break_layouts(inode, 0, 0);
+ err = fuse_dax_break_layouts(inode, 0, -1);
if (err)
goto out;
}
diff --git a/fs/fuse/fuse_dev_i.h b/fs/fuse/fuse_dev_i.h
index 3b2bfe1248d3..b3c2e32254ba 100644
--- a/fs/fuse/fuse_dev_i.h
+++ b/fs/fuse/fuse_dev_i.h
@@ -61,6 +61,10 @@ int fuse_copy_out_args(struct fuse_copy_state *cs, struct fuse_args *args,
void fuse_dev_queue_forget(struct fuse_iqueue *fiq,
struct fuse_forget_link *forget);
void fuse_dev_queue_interrupt(struct fuse_iqueue *fiq, struct fuse_req *req);
+bool fuse_remove_pending_req(struct fuse_req *req, spinlock_t *lock);
+
+bool fuse_request_expired(struct fuse_conn *fc, struct list_head *list);
+bool fuse_fpq_processing_expired(struct fuse_conn *fc, struct list_head *processing);
#endif
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
index fee96fe7887b..d56d4fd956db 100644
--- a/fs/fuse/fuse_i.h
+++ b/fs/fuse/fuse_i.h
@@ -38,14 +38,34 @@
/** Bias for fi->writectr, meaning new writepages must not be sent */
#define FUSE_NOWRITE INT_MIN
-/** It could be as large as PATH_MAX, but would that have any uses? */
-#define FUSE_NAME_MAX 1024
+/** Maximum length of a filename, not including terminating null */
+
+/* maximum, small enough for FUSE_MIN_READ_BUFFER*/
+#define FUSE_NAME_LOW_MAX 1024
+/* maximum, but needs a request buffer > FUSE_MIN_READ_BUFFER */
+#define FUSE_NAME_MAX (PATH_MAX - 1)
/** Number of dentries for each connection in the control filesystem */
#define FUSE_CTL_NUM_DENTRIES 5
+/* Frequency (in seconds) of request timeout checks, if opted into */
+#define FUSE_TIMEOUT_TIMER_FREQ 15
+
+/** Frequency (in jiffies) of request timeout checks, if opted into */
+extern const unsigned long fuse_timeout_timer_freq;
+
/** Maximum of max_pages received in init_out */
extern unsigned int fuse_max_pages_limit;
+/*
+ * Default timeout (in seconds) for the server to reply to a request
+ * before the connection is aborted, if no timeout was specified on mount.
+ */
+extern unsigned int fuse_default_req_timeout;
+/*
+ * Max timeout (in seconds) for the server to reply to a request before
+ * the connection is aborted.
+ */
+extern unsigned int fuse_max_req_timeout;
/** List of active connections */
extern struct list_head fuse_conn_list;
@@ -378,6 +398,7 @@ struct fuse_io_priv {
* FR_FINISHED: request is finished
* FR_PRIVATE: request is on private list
* FR_ASYNC: request is asynchronous
+ * FR_URING: request is handled through fuse-io-uring
*/
enum fuse_req_flag {
FR_ISREPLY,
@@ -392,6 +413,7 @@ enum fuse_req_flag {
FR_FINISHED,
FR_PRIVATE,
FR_ASYNC,
+ FR_URING,
};
/**
@@ -441,7 +463,10 @@ struct fuse_req {
#ifdef CONFIG_FUSE_IO_URING
void *ring_entry;
+ void *ring_queue;
#endif
+ /** When (in jiffies) the request was created */
+ unsigned long create_time;
};
struct fuse_iqueue;
@@ -867,6 +892,9 @@ struct fuse_conn {
/* Use pages instead of pointer for kernel I/O */
unsigned int use_pages_for_kvec_io:1;
+ /* Is link not implemented by fs? */
+ unsigned int no_link:1;
+
/* Use io_uring for communication */
unsigned int io_uring;
@@ -900,6 +928,9 @@ struct fuse_conn {
/** Version counter for evict inode */
atomic64_t evict_ctr;
+ /* maximum file name length */
+ u32 name_max;
+
/** Called on final put */
void (*release)(struct fuse_conn *);
@@ -935,6 +966,15 @@ struct fuse_conn {
/** uring connection information*/
struct fuse_ring *ring;
#endif
+
+ /** Only used if the connection opts into request timeouts */
+ struct {
+ /* Worker for checking if any requests have timed out */
+ struct delayed_work work;
+
+ /* Request timeout (in jiffies). 0 = no timeout */
+ unsigned int req_timeout;
+ } timeout;
};
/*
@@ -1216,6 +1256,9 @@ void fuse_request_end(struct fuse_req *req);
void fuse_abort_conn(struct fuse_conn *fc);
void fuse_wait_aborted(struct fuse_conn *fc);
+/* Check if any requests timed out */
+void fuse_check_timeout(struct work_struct *work);
+
/**
* Invalidate inode attributes
*/
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index e9db2cb8c150..fd48e8d37f2e 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -37,6 +37,9 @@ DEFINE_MUTEX(fuse_mutex);
static int set_global_limit(const char *val, const struct kernel_param *kp);
unsigned int fuse_max_pages_limit = 256;
+/* default is no timeout */
+unsigned int fuse_default_req_timeout;
+unsigned int fuse_max_req_timeout;
unsigned max_user_bgreq;
module_param_call(max_user_bgreq, set_global_limit, param_get_uint,
@@ -979,6 +982,8 @@ void fuse_conn_init(struct fuse_conn *fc, struct fuse_mount *fm,
fc->user_ns = get_user_ns(user_ns);
fc->max_pages = FUSE_DEFAULT_MAX_PAGES_PER_REQ;
fc->max_pages_limit = fuse_max_pages_limit;
+ fc->name_max = FUSE_NAME_LOW_MAX;
+ fc->timeout.req_timeout = 0;
if (IS_ENABLED(CONFIG_FUSE_PASSTHROUGH))
fuse_backing_files_init(fc);
@@ -1007,6 +1012,8 @@ void fuse_conn_put(struct fuse_conn *fc)
if (IS_ENABLED(CONFIG_FUSE_DAX))
fuse_dax_conn_free(fc);
+ if (fc->timeout.req_timeout)
+ cancel_delayed_work_sync(&fc->timeout.work);
if (fiq->ops->release)
fiq->ops->release(fiq);
put_pid_ns(fc->pid_ns);
@@ -1257,6 +1264,34 @@ static void process_init_limits(struct fuse_conn *fc, struct fuse_init_out *arg)
spin_unlock(&fc->bg_lock);
}
+static void set_request_timeout(struct fuse_conn *fc, unsigned int timeout)
+{
+ fc->timeout.req_timeout = secs_to_jiffies(timeout);
+ INIT_DELAYED_WORK(&fc->timeout.work, fuse_check_timeout);
+ queue_delayed_work(system_wq, &fc->timeout.work,
+ fuse_timeout_timer_freq);
+}
+
+static void init_server_timeout(struct fuse_conn *fc, unsigned int timeout)
+{
+ if (!timeout && !fuse_max_req_timeout && !fuse_default_req_timeout)
+ return;
+
+ if (!timeout)
+ timeout = fuse_default_req_timeout;
+
+ if (fuse_max_req_timeout) {
+ if (timeout)
+ timeout = min(fuse_max_req_timeout, timeout);
+ else
+ timeout = fuse_max_req_timeout;
+ }
+
+ timeout = max(FUSE_TIMEOUT_TIMER_FREQ, timeout);
+
+ set_request_timeout(fc, timeout);
+}
+
struct fuse_init_args {
struct fuse_args args;
struct fuse_init_in in;
@@ -1275,6 +1310,7 @@ static void process_init_reply(struct fuse_mount *fm, struct fuse_args *args,
ok = false;
else {
unsigned long ra_pages;
+ unsigned int timeout = 0;
process_init_limits(fc, arg);
@@ -1338,6 +1374,13 @@ static void process_init_reply(struct fuse_mount *fm, struct fuse_args *args,
fc->max_pages =
min_t(unsigned int, fc->max_pages_limit,
max_t(unsigned int, arg->max_pages, 1));
+
+ /*
+ * PATH_MAX file names might need two pages for
+ * ops like rename
+ */
+ if (fc->max_pages > 1)
+ fc->name_max = FUSE_NAME_MAX;
}
if (IS_ENABLED(CONFIG_FUSE_DAX)) {
if (flags & FUSE_MAP_ALIGNMENT &&
@@ -1392,12 +1435,17 @@ static void process_init_reply(struct fuse_mount *fm, struct fuse_args *args,
}
if (flags & FUSE_OVER_IO_URING && fuse_uring_enabled())
fc->io_uring = 1;
+
+ if (flags & FUSE_REQUEST_TIMEOUT)
+ timeout = arg->request_timeout;
} else {
ra_pages = fc->max_read / PAGE_SIZE;
fc->no_lock = 1;
fc->no_flock = 1;
}
+ init_server_timeout(fc, timeout);
+
fm->sb->s_bdi->ra_pages =
min(fm->sb->s_bdi->ra_pages, ra_pages);
fc->minor = arg->minor;
@@ -1439,7 +1487,8 @@ void fuse_send_init(struct fuse_mount *fm)
FUSE_HANDLE_KILLPRIV_V2 | FUSE_SETXATTR_EXT | FUSE_INIT_EXT |
FUSE_SECURITY_CTX | FUSE_CREATE_SUPP_GROUP |
FUSE_HAS_EXPIRE_ONLY | FUSE_DIRECT_IO_ALLOW_MMAP |
- FUSE_NO_EXPORT_SUPPORT | FUSE_HAS_RESEND | FUSE_ALLOW_IDMAP;
+ FUSE_NO_EXPORT_SUPPORT | FUSE_HAS_RESEND | FUSE_ALLOW_IDMAP |
+ FUSE_REQUEST_TIMEOUT;
#ifdef CONFIG_FUSE_DAX
if (fm->fc->dax)
flags |= FUSE_MAP_ALIGNMENT;
diff --git a/fs/fuse/sysctl.c b/fs/fuse/sysctl.c
index 63fb1e5bee30..e2d921abcb88 100644
--- a/fs/fuse/sysctl.c
+++ b/fs/fuse/sysctl.c
@@ -13,6 +13,12 @@ static struct ctl_table_header *fuse_table_header;
/* Bound by fuse_init_out max_pages, which is a u16 */
static unsigned int sysctl_fuse_max_pages_limit = 65535;
+/*
+ * fuse_init_out request timeouts are u16.
+ * This goes up to ~18 hours, which is plenty for a timeout.
+ */
+static unsigned int sysctl_fuse_req_timeout_limit = 65535;
+
static const struct ctl_table fuse_sysctl_table[] = {
{
.procname = "max_pages_limit",
@@ -23,6 +29,24 @@ static const struct ctl_table fuse_sysctl_table[] = {
.extra1 = SYSCTL_ONE,
.extra2 = &sysctl_fuse_max_pages_limit,
},
+ {
+ .procname = "default_request_timeout",
+ .data = &fuse_default_req_timeout,
+ .maxlen = sizeof(fuse_default_req_timeout),
+ .mode = 0644,
+ .proc_handler = proc_douintvec_minmax,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = &sysctl_fuse_req_timeout_limit,
+ },
+ {
+ .procname = "max_request_timeout",
+ .data = &fuse_max_req_timeout,
+ .maxlen = sizeof(fuse_max_req_timeout),
+ .mode = 0644,
+ .proc_handler = proc_douintvec_minmax,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = &sysctl_fuse_req_timeout_limit,
+ },
};
int fuse_sysctl_register(void)
diff --git a/fs/fuse/virtio_fs.c b/fs/fuse/virtio_fs.c
index 82afe78ec542..2c7b24cb67ad 100644
--- a/fs/fuse/virtio_fs.c
+++ b/fs/fuse/virtio_fs.c
@@ -1017,8 +1017,7 @@ static long virtio_fs_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
if (kaddr)
*kaddr = fs->window_kaddr + offset;
if (pfn)
- *pfn = phys_to_pfn_t(fs->window_phys_addr + offset,
- PFN_DEV | PFN_MAP);
+ *pfn = phys_to_pfn_t(fs->window_phys_addr + offset, 0);
return nr_pages > max_nr_pages ? max_nr_pages : nr_pages;
}
diff --git a/fs/hostfs/hostfs.h b/fs/hostfs/hostfs.h
index 8b39c15c408c..15b2f094d36e 100644
--- a/fs/hostfs/hostfs.h
+++ b/fs/hostfs/hostfs.h
@@ -60,7 +60,7 @@ struct hostfs_stat {
unsigned int uid;
unsigned int gid;
unsigned long long size;
- struct hostfs_timespec atime, mtime, ctime;
+ struct hostfs_timespec atime, mtime, ctime, btime;
unsigned int blksize;
unsigned long long blocks;
struct {
diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
index a2c6b9051c5b..702c41317589 100644
--- a/fs/hostfs/hostfs_kern.c
+++ b/fs/hostfs/hostfs_kern.c
@@ -33,6 +33,7 @@ struct hostfs_inode_info {
struct inode vfs_inode;
struct mutex open_mutex;
dev_t dev;
+ struct hostfs_timespec btime;
};
static inline struct hostfs_inode_info *HOSTFS_I(struct inode *inode)
@@ -547,6 +548,7 @@ static int hostfs_inode_set(struct inode *ino, void *data)
}
HOSTFS_I(ino)->dev = dev;
+ HOSTFS_I(ino)->btime = st->btime;
ino->i_ino = st->ino;
ino->i_mode = st->mode;
return hostfs_inode_update(ino, st);
@@ -557,7 +559,10 @@ static int hostfs_inode_test(struct inode *inode, void *data)
const struct hostfs_stat *st = data;
dev_t dev = MKDEV(st->dev.maj, st->dev.min);
- return inode->i_ino == st->ino && HOSTFS_I(inode)->dev == dev;
+ return inode->i_ino == st->ino && HOSTFS_I(inode)->dev == dev &&
+ (inode->i_mode & S_IFMT) == (st->mode & S_IFMT) &&
+ HOSTFS_I(inode)->btime.tv_sec == st->btime.tv_sec &&
+ HOSTFS_I(inode)->btime.tv_nsec == st->btime.tv_nsec;
}
static struct inode *hostfs_iget(struct super_block *sb, char *name)
diff --git a/fs/hostfs/hostfs_user.c b/fs/hostfs/hostfs_user.c
index 97e9c40a9448..3bcd9f35e70b 100644
--- a/fs/hostfs/hostfs_user.c
+++ b/fs/hostfs/hostfs_user.c
@@ -18,39 +18,48 @@
#include "hostfs.h"
#include <utime.h>
-static void stat64_to_hostfs(const struct stat64 *buf, struct hostfs_stat *p)
+static void statx_to_hostfs(const struct statx *buf, struct hostfs_stat *p)
{
- p->ino = buf->st_ino;
- p->mode = buf->st_mode;
- p->nlink = buf->st_nlink;
- p->uid = buf->st_uid;
- p->gid = buf->st_gid;
- p->size = buf->st_size;
- p->atime.tv_sec = buf->st_atime;
- p->atime.tv_nsec = 0;
- p->ctime.tv_sec = buf->st_ctime;
- p->ctime.tv_nsec = 0;
- p->mtime.tv_sec = buf->st_mtime;
- p->mtime.tv_nsec = 0;
- p->blksize = buf->st_blksize;
- p->blocks = buf->st_blocks;
- p->rdev.maj = os_major(buf->st_rdev);
- p->rdev.min = os_minor(buf->st_rdev);
- p->dev.maj = os_major(buf->st_dev);
- p->dev.min = os_minor(buf->st_dev);
+ p->ino = buf->stx_ino;
+ p->mode = buf->stx_mode;
+ p->nlink = buf->stx_nlink;
+ p->uid = buf->stx_uid;
+ p->gid = buf->stx_gid;
+ p->size = buf->stx_size;
+ p->atime.tv_sec = buf->stx_atime.tv_sec;
+ p->atime.tv_nsec = buf->stx_atime.tv_nsec;
+ p->ctime.tv_sec = buf->stx_ctime.tv_sec;
+ p->ctime.tv_nsec = buf->stx_ctime.tv_nsec;
+ p->mtime.tv_sec = buf->stx_mtime.tv_sec;
+ p->mtime.tv_nsec = buf->stx_mtime.tv_nsec;
+ if (buf->stx_mask & STATX_BTIME) {
+ p->btime.tv_sec = buf->stx_btime.tv_sec;
+ p->btime.tv_nsec = buf->stx_btime.tv_nsec;
+ } else {
+ memset(&p->btime, 0, sizeof(p->btime));
+ }
+ p->blksize = buf->stx_blksize;
+ p->blocks = buf->stx_blocks;
+ p->rdev.maj = buf->stx_rdev_major;
+ p->rdev.min = buf->stx_rdev_minor;
+ p->dev.maj = buf->stx_dev_major;
+ p->dev.min = buf->stx_dev_minor;
}
int stat_file(const char *path, struct hostfs_stat *p, int fd)
{
- struct stat64 buf;
+ struct statx buf;
+ int flags = AT_SYMLINK_NOFOLLOW;
if (fd >= 0) {
- if (fstat64(fd, &buf) < 0)
- return -errno;
- } else if (lstat64(path, &buf) < 0) {
- return -errno;
+ flags |= AT_EMPTY_PATH;
+ path = "";
}
- stat64_to_hostfs(&buf, p);
+
+ if ((statx(fd, path, flags, STATX_BASIC_STATS | STATX_BTIME, &buf)) < 0)
+ return -errno;
+
+ statx_to_hostfs(&buf, p);
return 0;
}
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index d98caedbb723..e4de5425838d 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -193,19 +193,21 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
}
/*
- * Someone wants to read @bytes from a HWPOISON hugetlb @page from @offset.
+ * Someone wants to read @bytes from a HWPOISON hugetlb @folio from @offset.
* Returns the maximum number of bytes one can read without touching the 1st raw
- * HWPOISON subpage.
+ * HWPOISON page.
*
* The implementation borrows the iteration logic from copy_page_to_iter*.
*/
-static size_t adjust_range_hwpoison(struct page *page, size_t offset, size_t bytes)
+static size_t adjust_range_hwpoison(struct folio *folio, size_t offset,
+ size_t bytes)
{
+ struct page *page;
size_t n = 0;
size_t res = 0;
- /* First subpage to start the loop. */
- page = nth_page(page, offset / PAGE_SIZE);
+ /* First page to start the loop. */
+ page = folio_page(folio, offset / PAGE_SIZE);
offset %= PAGE_SIZE;
while (1) {
if (is_raw_hwpoison_page_in_hugepage(page))
@@ -278,10 +280,10 @@ static ssize_t hugetlbfs_read_iter(struct kiocb *iocb, struct iov_iter *to)
else {
/*
* Adjust how many bytes safe to read without
- * touching the 1st raw HWPOISON subpage after
+ * touching the 1st raw HWPOISON page after
* offset.
*/
- want = adjust_range_hwpoison(&folio->page, offset, nr);
+ want = adjust_range_hwpoison(folio, offset, nr);
if (want == 0) {
folio_put(folio);
retval = -EIO;
@@ -338,8 +340,8 @@ static void hugetlb_delete_from_page_cache(struct folio *folio)
* mutex for the page in the mapping. So, we can not race with page being
* faulted into the vma.
*/
-static bool hugetlb_vma_maps_page(struct vm_area_struct *vma,
- unsigned long addr, struct page *page)
+static bool hugetlb_vma_maps_pfn(struct vm_area_struct *vma,
+ unsigned long addr, unsigned long pfn)
{
pte_t *ptep, pte;
@@ -351,7 +353,7 @@ static bool hugetlb_vma_maps_page(struct vm_area_struct *vma,
if (huge_pte_none(pte) || !pte_present(pte))
return false;
- if (pte_page(pte) == page)
+ if (pte_pfn(pte) == pfn)
return true;
return false;
@@ -396,7 +398,7 @@ static void hugetlb_unmap_file_folio(struct hstate *h,
{
struct rb_root_cached *root = &mapping->i_mmap;
struct hugetlb_vma_lock *vma_lock;
- struct page *page = &folio->page;
+ unsigned long pfn = folio_pfn(folio);
struct vm_area_struct *vma;
unsigned long v_start;
unsigned long v_end;
@@ -412,7 +414,7 @@ retry:
v_start = vma_offset_start(vma, start);
v_end = vma_offset_end(vma, end);
- if (!hugetlb_vma_maps_page(vma, v_start, page))
+ if (!hugetlb_vma_maps_pfn(vma, v_start, pfn))
continue;
if (!hugetlb_vma_trylock_write(vma)) {
@@ -462,7 +464,7 @@ retry:
*/
v_start = vma_offset_start(vma, start);
v_end = vma_offset_end(vma, end);
- if (hugetlb_vma_maps_page(vma, v_start, page))
+ if (hugetlb_vma_maps_pfn(vma, v_start, pfn))
unmap_hugepage_range(vma, v_start, v_end, NULL,
ZAP_FLAG_DROP_MARKER);
diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
index 814b7f679486..31553372b33a 100644
--- a/fs/iomap/buffered-io.c
+++ b/fs/iomap/buffered-io.c
@@ -1480,7 +1480,7 @@ static int iomap_folio_mkwrite_iter(struct iomap_iter *iter,
&iter->iomap);
if (ret)
return ret;
- block_commit_write(&folio->page, 0, length);
+ block_commit_write(folio, 0, length);
} else {
WARN_ON_ONCE(!folio_test_uptodate(folio));
folio_mark_dirty(folio);
diff --git a/fs/kernfs/dir.c b/fs/kernfs/dir.c
index d296aad70800..fc70d72c3fe8 100644
--- a/fs/kernfs/dir.c
+++ b/fs/kernfs/dir.c
@@ -17,7 +17,7 @@
#include "kernfs-internal.h"
-static DEFINE_RWLOCK(kernfs_rename_lock); /* kn->parent and ->name */
+DEFINE_RWLOCK(kernfs_rename_lock); /* kn->parent and ->name */
/*
* Don't use rename_lock to piggy back on pr_cont_buf. We don't want to
* call pr_cont() while holding rename_lock. Because sometimes pr_cont()
@@ -51,22 +51,14 @@ static bool kernfs_lockdep(struct kernfs_node *kn)
#endif
}
-static int kernfs_name_locked(struct kernfs_node *kn, char *buf, size_t buflen)
-{
- if (!kn)
- return strscpy(buf, "(null)", buflen);
-
- return strscpy(buf, kn->parent ? kn->name : "/", buflen);
-}
-
/* kernfs_node_depth - compute depth from @from to @to */
static size_t kernfs_depth(struct kernfs_node *from, struct kernfs_node *to)
{
size_t depth = 0;
- while (to->parent && to != from) {
+ while (rcu_dereference(to->__parent) && to != from) {
depth++;
- to = to->parent;
+ to = rcu_dereference(to->__parent);
}
return depth;
}
@@ -84,18 +76,18 @@ static struct kernfs_node *kernfs_common_ancestor(struct kernfs_node *a,
db = kernfs_depth(rb->kn, b);
while (da > db) {
- a = a->parent;
+ a = rcu_dereference(a->__parent);
da--;
}
while (db > da) {
- b = b->parent;
+ b = rcu_dereference(b->__parent);
db--;
}
/* worst case b and a will be the same at root */
while (b != a) {
- b = b->parent;
- a = a->parent;
+ b = rcu_dereference(b->__parent);
+ a = rcu_dereference(a->__parent);
}
return a;
@@ -168,10 +160,13 @@ static int kernfs_path_from_node_locked(struct kernfs_node *kn_to,
/* Calculate how many bytes we need for the rest */
for (i = depth_to - 1; i >= 0; i--) {
+ const char *name;
+
for (kn = kn_to, j = 0; j < i; j++)
- kn = kn->parent;
+ kn = rcu_dereference(kn->__parent);
- len += scnprintf(buf + len, buflen - len, "/%s", kn->name);
+ name = rcu_dereference(kn->name);
+ len += scnprintf(buf + len, buflen - len, "/%s", name);
}
return len;
@@ -195,13 +190,18 @@ static int kernfs_path_from_node_locked(struct kernfs_node *kn_to,
*/
int kernfs_name(struct kernfs_node *kn, char *buf, size_t buflen)
{
- unsigned long flags;
- int ret;
+ struct kernfs_node *kn_parent;
- read_lock_irqsave(&kernfs_rename_lock, flags);
- ret = kernfs_name_locked(kn, buf, buflen);
- read_unlock_irqrestore(&kernfs_rename_lock, flags);
- return ret;
+ if (!kn)
+ return strscpy(buf, "(null)", buflen);
+
+ guard(rcu)();
+ /*
+ * KERNFS_ROOT_INVARIANT_PARENT is ignored here. The name is RCU freed and
+ * the parent is either existing or not.
+ */
+ kn_parent = rcu_dereference(kn->__parent);
+ return strscpy(buf, kn_parent ? rcu_dereference(kn->name) : "/", buflen);
}
/**
@@ -223,13 +223,17 @@ int kernfs_name(struct kernfs_node *kn, char *buf, size_t buflen)
int kernfs_path_from_node(struct kernfs_node *to, struct kernfs_node *from,
char *buf, size_t buflen)
{
- unsigned long flags;
- int ret;
+ struct kernfs_root *root;
- read_lock_irqsave(&kernfs_rename_lock, flags);
- ret = kernfs_path_from_node_locked(to, from, buf, buflen);
- read_unlock_irqrestore(&kernfs_rename_lock, flags);
- return ret;
+ guard(rcu)();
+ if (to) {
+ root = kernfs_root(to);
+ if (!(root->flags & KERNFS_ROOT_INVARIANT_PARENT)) {
+ guard(read_lock_irqsave)(&kernfs_rename_lock);
+ return kernfs_path_from_node_locked(to, from, buf, buflen);
+ }
+ }
+ return kernfs_path_from_node_locked(to, from, buf, buflen);
}
EXPORT_SYMBOL_GPL(kernfs_path_from_node);
@@ -295,7 +299,7 @@ struct kernfs_node *kernfs_get_parent(struct kernfs_node *kn)
unsigned long flags;
read_lock_irqsave(&kernfs_rename_lock, flags);
- parent = kn->parent;
+ parent = kernfs_parent(kn);
kernfs_get(parent);
read_unlock_irqrestore(&kernfs_rename_lock, flags);
@@ -336,13 +340,13 @@ static int kernfs_name_compare(unsigned int hash, const char *name,
return -1;
if (ns > kn->ns)
return 1;
- return strcmp(name, kn->name);
+ return strcmp(name, kernfs_rcu_name(kn));
}
static int kernfs_sd_compare(const struct kernfs_node *left,
const struct kernfs_node *right)
{
- return kernfs_name_compare(left->hash, left->name, left->ns, right);
+ return kernfs_name_compare(left->hash, kernfs_rcu_name(left), left->ns, right);
}
/**
@@ -360,8 +364,12 @@ static int kernfs_sd_compare(const struct kernfs_node *left,
*/
static int kernfs_link_sibling(struct kernfs_node *kn)
{
- struct rb_node **node = &kn->parent->dir.children.rb_node;
struct rb_node *parent = NULL;
+ struct kernfs_node *kn_parent;
+ struct rb_node **node;
+
+ kn_parent = kernfs_parent(kn);
+ node = &kn_parent->dir.children.rb_node;
while (*node) {
struct kernfs_node *pos;
@@ -380,13 +388,13 @@ static int kernfs_link_sibling(struct kernfs_node *kn)
/* add new node and rebalance the tree */
rb_link_node(&kn->rb, parent, node);
- rb_insert_color(&kn->rb, &kn->parent->dir.children);
+ rb_insert_color(&kn->rb, &kn_parent->dir.children);
/* successfully added, account subdir number */
down_write(&kernfs_root(kn)->kernfs_iattr_rwsem);
if (kernfs_type(kn) == KERNFS_DIR)
- kn->parent->dir.subdirs++;
- kernfs_inc_rev(kn->parent);
+ kn_parent->dir.subdirs++;
+ kernfs_inc_rev(kn_parent);
up_write(&kernfs_root(kn)->kernfs_iattr_rwsem);
return 0;
@@ -407,16 +415,19 @@ static int kernfs_link_sibling(struct kernfs_node *kn)
*/
static bool kernfs_unlink_sibling(struct kernfs_node *kn)
{
+ struct kernfs_node *kn_parent;
+
if (RB_EMPTY_NODE(&kn->rb))
return false;
+ kn_parent = kernfs_parent(kn);
down_write(&kernfs_root(kn)->kernfs_iattr_rwsem);
if (kernfs_type(kn) == KERNFS_DIR)
- kn->parent->dir.subdirs--;
- kernfs_inc_rev(kn->parent);
+ kn_parent->dir.subdirs--;
+ kernfs_inc_rev(kn_parent);
up_write(&kernfs_root(kn)->kernfs_iattr_rwsem);
- rb_erase(&kn->rb, &kn->parent->dir.children);
+ rb_erase(&kn->rb, &kn_parent->dir.children);
RB_CLEAR_NODE(&kn->rb);
return true;
}
@@ -533,7 +544,8 @@ static void kernfs_free_rcu(struct rcu_head *rcu)
{
struct kernfs_node *kn = container_of(rcu, struct kernfs_node, rcu);
- kfree_const(kn->name);
+ /* If the whole node goes away, then name can't be used outside */
+ kfree_const(rcu_access_pointer(kn->name));
if (kn->iattr) {
simple_xattrs_free(&kn->iattr->xattrs, NULL);
@@ -562,11 +574,12 @@ void kernfs_put(struct kernfs_node *kn)
* Moving/renaming is always done while holding reference.
* kn->parent won't change beneath us.
*/
- parent = kn->parent;
+ parent = kernfs_parent(kn);
WARN_ONCE(atomic_read(&kn->active) != KN_DEACTIVATED_BIAS,
"kernfs_put: %s/%s: released with incorrect active_ref %d\n",
- parent ? parent->name : "", kn->name, atomic_read(&kn->active));
+ parent ? rcu_dereference(parent->name) : "",
+ rcu_dereference(kn->name), atomic_read(&kn->active));
if (kernfs_type(kn) == KERNFS_LINK)
kernfs_put(kn->symlink.target_kn);
@@ -643,7 +656,7 @@ static struct kernfs_node *__kernfs_new_node(struct kernfs_root *root,
atomic_set(&kn->active, KN_DEACTIVATED_BIAS);
RB_CLEAR_NODE(&kn->rb);
- kn->name = name;
+ rcu_assign_pointer(kn->name, name);
kn->mode = mode;
kn->flags = flags;
@@ -701,7 +714,7 @@ struct kernfs_node *kernfs_new_node(struct kernfs_node *parent,
name, mode, uid, gid, flags);
if (kn) {
kernfs_get(parent);
- kn->parent = parent;
+ rcu_assign_pointer(kn->__parent, parent);
}
return kn;
}
@@ -769,18 +782,20 @@ err_unlock:
*/
int kernfs_add_one(struct kernfs_node *kn)
{
- struct kernfs_node *parent = kn->parent;
- struct kernfs_root *root = kernfs_root(parent);
+ struct kernfs_root *root = kernfs_root(kn);
struct kernfs_iattrs *ps_iattr;
+ struct kernfs_node *parent;
bool has_ns;
int ret;
down_write(&root->kernfs_rwsem);
+ parent = kernfs_parent(kn);
ret = -EINVAL;
has_ns = kernfs_ns_enabled(parent);
if (WARN(has_ns != (bool)kn->ns, KERN_WARNING "kernfs: ns %s in '%s' for '%s'\n",
- has_ns ? "required" : "invalid", parent->name, kn->name))
+ has_ns ? "required" : "invalid",
+ kernfs_rcu_name(parent), kernfs_rcu_name(kn)))
goto out_unlock;
if (kernfs_type(parent) != KERNFS_DIR)
@@ -790,7 +805,7 @@ int kernfs_add_one(struct kernfs_node *kn)
if (parent->flags & (KERNFS_REMOVING | KERNFS_EMPTY_DIR))
goto out_unlock;
- kn->hash = kernfs_name_hash(kn->name, kn->ns);
+ kn->hash = kernfs_name_hash(kernfs_rcu_name(kn), kn->ns);
ret = kernfs_link_sibling(kn);
if (ret)
@@ -846,7 +861,7 @@ static struct kernfs_node *kernfs_find_ns(struct kernfs_node *parent,
if (has_ns != (bool)ns) {
WARN(1, KERN_WARNING "kernfs: ns %s in '%s' for '%s'\n",
- has_ns ? "required" : "invalid", parent->name, name);
+ has_ns ? "required" : "invalid", kernfs_rcu_name(parent), name);
return NULL;
}
@@ -949,6 +964,11 @@ struct kernfs_node *kernfs_walk_and_get_ns(struct kernfs_node *parent,
return kn;
}
+unsigned int kernfs_root_flags(struct kernfs_node *kn)
+{
+ return kernfs_root(kn)->flags;
+}
+
/**
* kernfs_create_root - create a new kernfs hierarchy
* @scops: optional syscall operations for the hierarchy
@@ -1112,7 +1132,7 @@ struct kernfs_node *kernfs_create_empty_dir(struct kernfs_node *parent,
static int kernfs_dop_revalidate(struct inode *dir, const struct qstr *name,
struct dentry *dentry, unsigned int flags)
{
- struct kernfs_node *kn;
+ struct kernfs_node *kn, *parent;
struct kernfs_root *root;
if (flags & LOOKUP_RCU)
@@ -1120,8 +1140,6 @@ static int kernfs_dop_revalidate(struct inode *dir, const struct qstr *name,
/* Negative hashed dentry? */
if (d_really_is_negative(dentry)) {
- struct kernfs_node *parent;
-
/* If the kernfs parent node has changed discard and
* proceed to ->lookup.
*
@@ -1163,16 +1181,17 @@ static int kernfs_dop_revalidate(struct inode *dir, const struct qstr *name,
if (!kernfs_active(kn))
goto out_bad;
+ parent = kernfs_parent(kn);
/* The kernfs node has been moved? */
- if (kernfs_dentry_node(dentry->d_parent) != kn->parent)
+ if (kernfs_dentry_node(dentry->d_parent) != parent)
goto out_bad;
/* The kernfs node has been renamed */
- if (strcmp(dentry->d_name.name, kn->name) != 0)
+ if (strcmp(dentry->d_name.name, kernfs_rcu_name(kn)) != 0)
goto out_bad;
/* The kernfs node has been moved to a different namespace */
- if (kn->parent && kernfs_ns_enabled(kn->parent) &&
+ if (parent && kernfs_ns_enabled(parent) &&
kernfs_info(dentry->d_sb)->ns != kn->ns)
goto out_bad;
@@ -1365,7 +1384,7 @@ static struct kernfs_node *kernfs_next_descendant_post(struct kernfs_node *pos,
return kernfs_leftmost_descendant(rb_to_kn(rbn));
/* no sibling left, visit parent */
- return pos->parent;
+ return kernfs_parent(pos);
}
static void kernfs_activate_one(struct kernfs_node *kn)
@@ -1377,7 +1396,7 @@ static void kernfs_activate_one(struct kernfs_node *kn)
if (kernfs_active(kn) || (kn->flags & (KERNFS_HIDDEN | KERNFS_REMOVING)))
return;
- WARN_ON_ONCE(kn->parent && RB_EMPTY_NODE(&kn->rb));
+ WARN_ON_ONCE(rcu_access_pointer(kn->__parent) && RB_EMPTY_NODE(&kn->rb));
WARN_ON_ONCE(atomic_read(&kn->active) != KN_DEACTIVATED_BIAS);
atomic_sub(KN_DEACTIVATED_BIAS, &kn->active);
@@ -1447,7 +1466,7 @@ void kernfs_show(struct kernfs_node *kn, bool show)
static void __kernfs_remove(struct kernfs_node *kn)
{
- struct kernfs_node *pos;
+ struct kernfs_node *pos, *parent;
/* Short-circuit if non-root @kn has already finished removal. */
if (!kn)
@@ -1459,10 +1478,10 @@ static void __kernfs_remove(struct kernfs_node *kn)
* This is for kernfs_remove_self() which plays with active ref
* after removal.
*/
- if (kn->parent && RB_EMPTY_NODE(&kn->rb))
+ if (kernfs_parent(kn) && RB_EMPTY_NODE(&kn->rb))
return;
- pr_debug("kernfs %s: removing\n", kn->name);
+ pr_debug("kernfs %s: removing\n", kernfs_rcu_name(kn));
/* prevent new usage by marking all nodes removing and deactivating */
pos = NULL;
@@ -1485,14 +1504,14 @@ static void __kernfs_remove(struct kernfs_node *kn)
kernfs_get(pos);
kernfs_drain(pos);
-
+ parent = kernfs_parent(pos);
/*
* kernfs_unlink_sibling() succeeds once per node. Use it
* to decide who's responsible for cleanups.
*/
- if (!pos->parent || kernfs_unlink_sibling(pos)) {
+ if (!parent || kernfs_unlink_sibling(pos)) {
struct kernfs_iattrs *ps_iattr =
- pos->parent ? pos->parent->iattr : NULL;
+ parent ? parent->iattr : NULL;
/* update timestamps on the parent */
down_write(&kernfs_root(kn)->kernfs_iattr_rwsem);
@@ -1718,11 +1737,11 @@ int kernfs_rename_ns(struct kernfs_node *kn, struct kernfs_node *new_parent,
{
struct kernfs_node *old_parent;
struct kernfs_root *root;
- const char *old_name = NULL;
+ const char *old_name;
int error;
/* can't move or rename root */
- if (!kn->parent)
+ if (!rcu_access_pointer(kn->__parent))
return -EINVAL;
root = kernfs_root(kn);
@@ -1733,9 +1752,19 @@ int kernfs_rename_ns(struct kernfs_node *kn, struct kernfs_node *new_parent,
(new_parent->flags & KERNFS_EMPTY_DIR))
goto out;
+ old_parent = kernfs_parent(kn);
+ if (root->flags & KERNFS_ROOT_INVARIANT_PARENT) {
+ error = -EINVAL;
+ if (WARN_ON_ONCE(old_parent != new_parent))
+ goto out;
+ }
+
error = 0;
- if ((kn->parent == new_parent) && (kn->ns == new_ns) &&
- (strcmp(kn->name, new_name) == 0))
+ old_name = kernfs_rcu_name(kn);
+ if (!new_name)
+ new_name = old_name;
+ if ((old_parent == new_parent) && (kn->ns == new_ns) &&
+ (strcmp(old_name, new_name) == 0))
goto out; /* nothing to rename */
error = -EEXIST;
@@ -1743,7 +1772,7 @@ int kernfs_rename_ns(struct kernfs_node *kn, struct kernfs_node *new_parent,
goto out;
/* rename kernfs_node */
- if (strcmp(kn->name, new_name) != 0) {
+ if (strcmp(old_name, new_name) != 0) {
error = -ENOMEM;
new_name = kstrdup_const(new_name, GFP_KERNEL);
if (!new_name)
@@ -1756,27 +1785,32 @@ int kernfs_rename_ns(struct kernfs_node *kn, struct kernfs_node *new_parent,
* Move to the appropriate place in the appropriate directories rbtree.
*/
kernfs_unlink_sibling(kn);
- kernfs_get(new_parent);
- /* rename_lock protects ->parent and ->name accessors */
- write_lock_irq(&kernfs_rename_lock);
+ /* rename_lock protects ->parent accessors */
+ if (old_parent != new_parent) {
+ kernfs_get(new_parent);
+ write_lock_irq(&kernfs_rename_lock);
- old_parent = kn->parent;
- kn->parent = new_parent;
+ rcu_assign_pointer(kn->__parent, new_parent);
- kn->ns = new_ns;
- if (new_name) {
- old_name = kn->name;
- kn->name = new_name;
- }
+ kn->ns = new_ns;
+ if (new_name)
+ rcu_assign_pointer(kn->name, new_name);
- write_unlock_irq(&kernfs_rename_lock);
+ write_unlock_irq(&kernfs_rename_lock);
+ kernfs_put(old_parent);
+ } else {
+ /* name assignment is RCU protected, parent is the same */
+ kn->ns = new_ns;
+ if (new_name)
+ rcu_assign_pointer(kn->name, new_name);
+ }
- kn->hash = kernfs_name_hash(kn->name, kn->ns);
+ kn->hash = kernfs_name_hash(new_name ?: old_name, kn->ns);
kernfs_link_sibling(kn);
- kernfs_put(old_parent);
- kfree_const(old_name);
+ if (new_name && !is_kernel_rodata((unsigned long)old_name))
+ kfree_rcu_mightsleep(old_name);
error = 0;
out:
@@ -1795,7 +1829,8 @@ static struct kernfs_node *kernfs_dir_pos(const void *ns,
{
if (pos) {
int valid = kernfs_active(pos) &&
- pos->parent == parent && hash == pos->hash;
+ rcu_access_pointer(pos->__parent) == parent &&
+ hash == pos->hash;
kernfs_put(pos);
if (!valid)
pos = NULL;
@@ -1860,7 +1895,7 @@ static int kernfs_fop_readdir(struct file *file, struct dir_context *ctx)
for (pos = kernfs_dir_pos(ns, parent, ctx->pos, pos);
pos;
pos = kernfs_dir_next_pos(ns, parent, ctx->pos, pos)) {
- const char *name = pos->name;
+ const char *name = kernfs_rcu_name(pos);
unsigned int type = fs_umode_to_dtype(pos->mode);
int len = strlen(name);
ino_t ino = kernfs_ino(pos);
@@ -1869,10 +1904,10 @@ static int kernfs_fop_readdir(struct file *file, struct dir_context *ctx)
file->private_data = pos;
kernfs_get(pos);
- up_read(&root->kernfs_rwsem);
- if (!dir_emit(ctx, name, len, ino, type))
+ if (!dir_emit(ctx, name, len, ino, type)) {
+ up_read(&root->kernfs_rwsem);
return 0;
- down_read(&root->kernfs_rwsem);
+ }
}
up_read(&root->kernfs_rwsem);
file->private_data = NULL;
diff --git a/fs/kernfs/file.c b/fs/kernfs/file.c
index 0eb320617d7b..66fe8fe41f06 100644
--- a/fs/kernfs/file.c
+++ b/fs/kernfs/file.c
@@ -911,9 +911,11 @@ repeat:
/* kick fsnotify */
down_read(&root->kernfs_supers_rwsem);
+ down_read(&root->kernfs_rwsem);
list_for_each_entry(info, &kernfs_root(kn)->supers, node) {
struct kernfs_node *parent;
struct inode *p_inode = NULL;
+ const char *kn_name;
struct inode *inode;
struct qstr name;
@@ -927,7 +929,8 @@ repeat:
if (!inode)
continue;
- name = QSTR(kn->name);
+ kn_name = kernfs_rcu_name(kn);
+ name = QSTR(kn_name);
parent = kernfs_get_parent(kn);
if (parent) {
p_inode = ilookup(info->sb, kernfs_ino(parent));
@@ -947,6 +950,7 @@ repeat:
iput(inode);
}
+ up_read(&root->kernfs_rwsem);
up_read(&root->kernfs_supers_rwsem);
kernfs_put(kn);
goto repeat;
diff --git a/fs/kernfs/kernfs-internal.h b/fs/kernfs/kernfs-internal.h
index b42ee6547cdc..40a2a9cd819d 100644
--- a/fs/kernfs/kernfs-internal.h
+++ b/fs/kernfs/kernfs-internal.h
@@ -19,6 +19,8 @@
#include <linux/kernfs.h>
#include <linux/fs_context.h>
+extern rwlock_t kernfs_rename_lock;
+
struct kernfs_iattrs {
kuid_t ia_uid;
kgid_t ia_gid;
@@ -64,11 +66,14 @@ struct kernfs_root {
*
* Return: the kernfs_root @kn belongs to.
*/
-static inline struct kernfs_root *kernfs_root(struct kernfs_node *kn)
+static inline struct kernfs_root *kernfs_root(const struct kernfs_node *kn)
{
+ const struct kernfs_node *knp;
/* if parent exists, it's always a dir; otherwise, @sd is a dir */
- if (kn->parent)
- kn = kn->parent;
+ guard(rcu)();
+ knp = rcu_dereference(kn->__parent);
+ if (knp)
+ kn = knp;
return kn->dir.root;
}
@@ -97,6 +102,32 @@ struct kernfs_super_info {
};
#define kernfs_info(SB) ((struct kernfs_super_info *)(SB->s_fs_info))
+static inline bool kernfs_root_is_locked(const struct kernfs_node *kn)
+{
+ return lockdep_is_held(&kernfs_root(kn)->kernfs_rwsem);
+}
+
+static inline const char *kernfs_rcu_name(const struct kernfs_node *kn)
+{
+ return rcu_dereference_check(kn->name, kernfs_root_is_locked(kn));
+}
+
+static inline struct kernfs_node *kernfs_parent(const struct kernfs_node *kn)
+{
+ /*
+ * The kernfs_node::__parent remains valid within a RCU section. The kn
+ * can be reparented (and renamed) which changes the entry. This can be
+ * avoided by locking kernfs_root::kernfs_rwsem or kernfs_rename_lock.
+ * Both locks can be used to obtain a reference on __parent. Once the
+ * reference count reaches 0 then the node is about to be freed
+ * and can not be renamed (or become a different parent) anymore.
+ */
+ return rcu_dereference_check(kn->__parent,
+ kernfs_root_is_locked(kn) ||
+ lockdep_is_held(&kernfs_rename_lock) ||
+ !atomic_read(&kn->count));
+}
+
static inline struct kernfs_node *kernfs_dentry_node(struct dentry *dentry)
{
if (d_really_is_negative(dentry))
diff --git a/fs/kernfs/mount.c b/fs/kernfs/mount.c
index 1358c21837f1..5124e196c2bf 100644
--- a/fs/kernfs/mount.c
+++ b/fs/kernfs/mount.c
@@ -145,8 +145,10 @@ static struct dentry *kernfs_fh_to_parent(struct super_block *sb,
static struct dentry *kernfs_get_parent_dentry(struct dentry *child)
{
struct kernfs_node *kn = kernfs_dentry_node(child);
+ struct kernfs_root *root = kernfs_root(kn);
- return d_obtain_alias(kernfs_get_inode(child->d_sb, kn->parent));
+ guard(rwsem_read)(&root->kernfs_rwsem);
+ return d_obtain_alias(kernfs_get_inode(child->d_sb, kernfs_parent(kn)));
}
static const struct export_operations kernfs_export_ops = {
@@ -186,10 +188,10 @@ static struct kernfs_node *find_next_ancestor(struct kernfs_node *child,
return NULL;
}
- while (child->parent != parent) {
- if (!child->parent)
+ while (kernfs_parent(child) != parent) {
+ child = kernfs_parent(child);
+ if (!child)
return NULL;
- child = child->parent;
}
return child;
@@ -207,16 +209,27 @@ struct dentry *kernfs_node_dentry(struct kernfs_node *kn,
{
struct dentry *dentry;
struct kernfs_node *knparent;
+ struct kernfs_root *root;
BUG_ON(sb->s_op != &kernfs_sops);
dentry = dget(sb->s_root);
/* Check if this is the root kernfs_node */
- if (!kn->parent)
+ if (!rcu_access_pointer(kn->__parent))
return dentry;
- knparent = find_next_ancestor(kn, NULL);
+ root = kernfs_root(kn);
+ /*
+ * As long as kn is valid, its parent can not vanish. This is cgroup's
+ * kn so it can't have its parent replaced. Therefore it is safe to use
+ * the ancestor node outside of the RCU or locked section.
+ */
+ if (WARN_ON_ONCE(!(root->flags & KERNFS_ROOT_INVARIANT_PARENT)))
+ return ERR_PTR(-EINVAL);
+ scoped_guard(rcu) {
+ knparent = find_next_ancestor(kn, NULL);
+ }
if (WARN_ON(!knparent)) {
dput(dentry);
return ERR_PTR(-EINVAL);
@@ -225,17 +238,26 @@ struct dentry *kernfs_node_dentry(struct kernfs_node *kn,
do {
struct dentry *dtmp;
struct kernfs_node *kntmp;
+ const char *name;
if (kn == knparent)
return dentry;
- kntmp = find_next_ancestor(kn, knparent);
- if (WARN_ON(!kntmp)) {
+
+ scoped_guard(rwsem_read, &root->kernfs_rwsem) {
+ kntmp = find_next_ancestor(kn, knparent);
+ if (WARN_ON(!kntmp)) {
+ dput(dentry);
+ return ERR_PTR(-EINVAL);
+ }
+ name = kstrdup(kernfs_rcu_name(kntmp), GFP_KERNEL);
+ }
+ if (!name) {
dput(dentry);
- return ERR_PTR(-EINVAL);
+ return ERR_PTR(-ENOMEM);
}
- dtmp = lookup_positive_unlocked(kntmp->name, dentry,
- strlen(kntmp->name));
+ dtmp = lookup_positive_unlocked(name, dentry, strlen(name));
dput(dentry);
+ kfree(name);
if (IS_ERR(dtmp))
return dtmp;
knparent = kntmp;
diff --git a/fs/kernfs/symlink.c b/fs/kernfs/symlink.c
index 45371a70caa7..0bd8a2143723 100644
--- a/fs/kernfs/symlink.c
+++ b/fs/kernfs/symlink.c
@@ -62,10 +62,10 @@ static int kernfs_get_target_path(struct kernfs_node *parent,
/* go up to the root, stop at the base */
base = parent;
- while (base->parent) {
- kn = target->parent;
- while (kn->parent && base != kn)
- kn = kn->parent;
+ while (kernfs_parent(base)) {
+ kn = kernfs_parent(target);
+ while (kernfs_parent(kn) && base != kn)
+ kn = kernfs_parent(kn);
if (base == kn)
break;
@@ -75,14 +75,14 @@ static int kernfs_get_target_path(struct kernfs_node *parent,
strcpy(s, "../");
s += 3;
- base = base->parent;
+ base = kernfs_parent(base);
}
/* determine end of target string for reverse fillup */
kn = target;
- while (kn->parent && kn != base) {
- len += strlen(kn->name) + 1;
- kn = kn->parent;
+ while (kernfs_parent(kn) && kn != base) {
+ len += strlen(kernfs_rcu_name(kn)) + 1;
+ kn = kernfs_parent(kn);
}
/* check limits */
@@ -94,15 +94,16 @@ static int kernfs_get_target_path(struct kernfs_node *parent,
/* reverse fillup of target string from target to base */
kn = target;
- while (kn->parent && kn != base) {
- int slen = strlen(kn->name);
+ while (kernfs_parent(kn) && kn != base) {
+ const char *name = kernfs_rcu_name(kn);
+ int slen = strlen(name);
len -= slen;
- memcpy(s + len, kn->name, slen);
+ memcpy(s + len, name, slen);
if (len)
s[--len] = '/';
- kn = kn->parent;
+ kn = kernfs_parent(kn);
}
return 0;
@@ -111,12 +112,13 @@ static int kernfs_get_target_path(struct kernfs_node *parent,
static int kernfs_getlink(struct inode *inode, char *path)
{
struct kernfs_node *kn = inode->i_private;
- struct kernfs_node *parent = kn->parent;
+ struct kernfs_node *parent;
struct kernfs_node *target = kn->symlink.target_kn;
- struct kernfs_root *root = kernfs_root(parent);
+ struct kernfs_root *root = kernfs_root(kn);
int error;
down_read(&root->kernfs_rwsem);
+ parent = kernfs_parent(kn);
error = kernfs_get_target_path(parent, target, path);
up_read(&root->kernfs_rwsem);
diff --git a/fs/namespace.c b/fs/namespace.c
index 6100e5b962a6..14935a0500a2 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -2478,7 +2478,8 @@ struct vfsmount *clone_private_mount(const struct path *path)
struct mount *old_mnt = real_mount(path->mnt);
struct mount *new_mnt;
- scoped_guard(rwsem_read, &namespace_sem)
+ guard(rwsem_read)(&namespace_sem);
+
if (IS_MNT_UNBINDABLE(old_mnt))
return ERR_PTR(-EINVAL);
@@ -5326,8 +5327,10 @@ struct kstatmount {
struct mnt_idmap *idmap;
u64 mask;
struct path root;
- struct statmount sm;
struct seq_file seq;
+
+ /* Must be last --ends in a flexible-array member. */
+ struct statmount sm;
};
static u64 mnt_to_attr_flags(struct vfsmount *mnt)
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index 3b0918ade53c..02c916a55020 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -546,6 +546,8 @@ int nfs_create_rpc_client(struct nfs_client *clp,
args.flags |= RPC_CLNT_CREATE_NOPING;
if (test_bit(NFS_CS_REUSEPORT, &clp->cl_flags))
args.flags |= RPC_CLNT_CREATE_REUSEPORT;
+ if (test_bit(NFS_CS_NETUNREACH_FATAL, &clp->cl_flags))
+ args.flags |= RPC_CLNT_CREATE_NETUNREACH_FATAL;
if (!IS_ERR(clp->cl_rpcclient))
return 0;
@@ -709,6 +711,9 @@ static int nfs_init_server(struct nfs_server *server,
if (ctx->flags & NFS_MOUNT_NORESVPORT)
set_bit(NFS_CS_NORESVPORT, &cl_init.init_flags);
+ if (ctx->flags & NFS_MOUNT_NETUNREACH_FATAL)
+ __set_bit(NFS_CS_NETUNREACH_FATAL, &cl_init.init_flags);
+
/* Allocate or find a client reference we can use */
clp = nfs_get_client(&cl_init);
if (IS_ERR(clp))
diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
index 4db912f56230..8bdbc4dca89c 100644
--- a/fs/nfs/delegation.c
+++ b/fs/nfs/delegation.c
@@ -79,6 +79,7 @@ static void nfs_mark_return_delegation(struct nfs_server *server,
struct nfs_delegation *delegation)
{
set_bit(NFS_DELEGATION_RETURN, &delegation->flags);
+ set_bit(NFS4SERV_DELEGRETURN, &server->delegation_flags);
set_bit(NFS4CLNT_DELEGRETURN, &server->nfs_client->cl_state);
}
@@ -306,7 +307,8 @@ nfs_start_delegation_return_locked(struct nfs_inode *nfsi)
if (delegation == NULL)
goto out;
spin_lock(&delegation->lock);
- if (!test_and_set_bit(NFS_DELEGATION_RETURNING, &delegation->flags)) {
+ if (delegation->inode &&
+ !test_and_set_bit(NFS_DELEGATION_RETURNING, &delegation->flags)) {
clear_bit(NFS_DELEGATION_RETURN_DELAYED, &delegation->flags);
/* Refcount matched in nfs_end_delegation_return() */
ret = nfs_get_delegation(delegation);
@@ -330,14 +332,16 @@ nfs_start_delegation_return(struct nfs_inode *nfsi)
}
static void nfs_abort_delegation_return(struct nfs_delegation *delegation,
- struct nfs_client *clp, int err)
+ struct nfs_server *server, int err)
{
-
spin_lock(&delegation->lock);
clear_bit(NFS_DELEGATION_RETURNING, &delegation->flags);
if (err == -EAGAIN) {
set_bit(NFS_DELEGATION_RETURN_DELAYED, &delegation->flags);
- set_bit(NFS4CLNT_DELEGRETURN_DELAYED, &clp->cl_state);
+ set_bit(NFS4SERV_DELEGRETURN_DELAYED,
+ &server->delegation_flags);
+ set_bit(NFS4CLNT_DELEGRETURN_DELAYED,
+ &server->nfs_client->cl_state);
}
spin_unlock(&delegation->lock);
}
@@ -547,7 +551,7 @@ out:
*/
static int nfs_end_delegation_return(struct inode *inode, struct nfs_delegation *delegation, int issync)
{
- struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
+ struct nfs_server *server = NFS_SERVER(inode);
unsigned int mode = O_WRONLY | O_RDWR;
int err = 0;
@@ -569,11 +573,11 @@ static int nfs_end_delegation_return(struct inode *inode, struct nfs_delegation
/*
* Guard against state recovery
*/
- err = nfs4_wait_clnt_recover(clp);
+ err = nfs4_wait_clnt_recover(server->nfs_client);
}
if (err) {
- nfs_abort_delegation_return(delegation, clp, err);
+ nfs_abort_delegation_return(delegation, server, err);
goto out;
}
@@ -590,17 +594,6 @@ static bool nfs_delegation_need_return(struct nfs_delegation *delegation)
if (test_and_clear_bit(NFS_DELEGATION_RETURN, &delegation->flags))
ret = true;
- else if (test_bit(NFS_DELEGATION_RETURN_IF_CLOSED, &delegation->flags)) {
- struct inode *inode;
-
- spin_lock(&delegation->lock);
- inode = delegation->inode;
- if (inode && list_empty(&NFS_I(inode)->open_files))
- ret = true;
- spin_unlock(&delegation->lock);
- }
- if (ret)
- clear_bit(NFS_DELEGATION_RETURN_IF_CLOSED, &delegation->flags);
if (test_bit(NFS_DELEGATION_RETURNING, &delegation->flags) ||
test_bit(NFS_DELEGATION_RETURN_DELAYED, &delegation->flags) ||
test_bit(NFS_DELEGATION_REVOKED, &delegation->flags))
@@ -619,6 +612,9 @@ static int nfs_server_return_marked_delegations(struct nfs_server *server,
struct nfs_delegation *place_holder_deleg = NULL;
int err = 0;
+ if (!test_and_clear_bit(NFS4SERV_DELEGRETURN,
+ &server->delegation_flags))
+ return 0;
restart:
/*
* To avoid quadratic looping we hold a reference
@@ -670,6 +666,7 @@ restart:
cond_resched();
if (!err)
goto restart;
+ set_bit(NFS4SERV_DELEGRETURN, &server->delegation_flags);
set_bit(NFS4CLNT_DELEGRETURN, &server->nfs_client->cl_state);
goto out;
}
@@ -684,6 +681,9 @@ static bool nfs_server_clear_delayed_delegations(struct nfs_server *server)
struct nfs_delegation *d;
bool ret = false;
+ if (!test_and_clear_bit(NFS4SERV_DELEGRETURN_DELAYED,
+ &server->delegation_flags))
+ goto out;
list_for_each_entry_rcu (d, &server->delegations, super_list) {
if (!test_bit(NFS_DELEGATION_RETURN_DELAYED, &d->flags))
continue;
@@ -691,6 +691,7 @@ static bool nfs_server_clear_delayed_delegations(struct nfs_server *server)
clear_bit(NFS_DELEGATION_RETURN_DELAYED, &d->flags);
ret = true;
}
+out:
return ret;
}
@@ -878,11 +879,25 @@ int nfs4_inode_make_writeable(struct inode *inode)
return nfs4_inode_return_delegation(inode);
}
-static void nfs_mark_return_if_closed_delegation(struct nfs_server *server,
- struct nfs_delegation *delegation)
+static void
+nfs_mark_return_if_closed_delegation(struct nfs_server *server,
+ struct nfs_delegation *delegation)
{
- set_bit(NFS_DELEGATION_RETURN_IF_CLOSED, &delegation->flags);
- set_bit(NFS4CLNT_DELEGRETURN, &server->nfs_client->cl_state);
+ struct inode *inode;
+
+ if (test_bit(NFS_DELEGATION_RETURN, &delegation->flags) ||
+ test_bit(NFS_DELEGATION_RETURN_IF_CLOSED, &delegation->flags))
+ return;
+ spin_lock(&delegation->lock);
+ inode = delegation->inode;
+ if (!inode)
+ goto out;
+ if (list_empty(&NFS_I(inode)->open_files))
+ nfs_mark_return_delegation(server, delegation);
+ else
+ set_bit(NFS_DELEGATION_RETURN_IF_CLOSED, &delegation->flags);
+out:
+ spin_unlock(&delegation->lock);
}
static bool nfs_server_mark_return_all_delegations(struct nfs_server *server)
@@ -1276,6 +1291,7 @@ static void nfs_mark_test_expired_delegation(struct nfs_server *server,
return;
clear_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags);
set_bit(NFS_DELEGATION_TEST_EXPIRED, &delegation->flags);
+ set_bit(NFS4SERV_DELEGATION_EXPIRED, &server->delegation_flags);
set_bit(NFS4CLNT_DELEGATION_EXPIRED, &server->nfs_client->cl_state);
}
@@ -1354,6 +1370,9 @@ static int nfs_server_reap_expired_delegations(struct nfs_server *server,
nfs4_stateid stateid;
unsigned long gen = ++server->delegation_gen;
+ if (!test_and_clear_bit(NFS4SERV_DELEGATION_EXPIRED,
+ &server->delegation_flags))
+ return 0;
restart:
rcu_read_lock();
list_for_each_entry_rcu(delegation, &server->delegations, super_list) {
@@ -1383,6 +1402,9 @@ restart:
goto restart;
}
nfs_inode_mark_test_expired_delegation(server,inode);
+ set_bit(NFS4SERV_DELEGATION_EXPIRED, &server->delegation_flags);
+ set_bit(NFS4CLNT_DELEGATION_EXPIRED,
+ &server->nfs_client->cl_state);
iput(inode);
return -EAGAIN;
}
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index bc957487f6ec..bd23fc736b39 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -666,6 +666,8 @@ static bool nfs_use_readdirplus(struct inode *dir, struct dir_context *ctx,
{
if (!nfs_server_capable(dir, NFS_CAP_READDIRPLUS))
return false;
+ if (NFS_SERVER(dir)->flags & NFS_MOUNT_FORCE_RDIRPLUS)
+ return true;
if (ctx->pos == 0 ||
cache_hits + cache_misses > NFS_READDIR_CACHE_USAGE_THRESHOLD)
return true;
diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c
index 98b45b636be3..61ad269c825f 100644
--- a/fs/nfs/flexfilelayout/flexfilelayout.c
+++ b/fs/nfs/flexfilelayout/flexfilelayout.c
@@ -1154,10 +1154,14 @@ static int ff_layout_async_handle_error_v4(struct rpc_task *task,
rpc_wake_up(&tbl->slot_tbl_waitq);
goto reset;
/* RPC connection errors */
+ case -ENETDOWN:
+ case -ENETUNREACH:
+ if (test_bit(NFS_CS_NETUNREACH_FATAL, &clp->cl_flags))
+ return -NFS4ERR_FATAL_IOERROR;
+ fallthrough;
case -ECONNREFUSED:
case -EHOSTDOWN:
case -EHOSTUNREACH:
- case -ENETUNREACH:
case -EIO:
case -ETIMEDOUT:
case -EPIPE:
@@ -1183,6 +1187,7 @@ reset:
/* Retry all errors through either pNFS or MDS except for -EJUKEBOX */
static int ff_layout_async_handle_error_v3(struct rpc_task *task,
+ struct nfs_client *clp,
struct pnfs_layout_segment *lseg,
u32 idx)
{
@@ -1200,6 +1205,11 @@ static int ff_layout_async_handle_error_v3(struct rpc_task *task,
case -EJUKEBOX:
nfs_inc_stats(lseg->pls_layout->plh_inode, NFSIOS_DELAY);
goto out_retry;
+ case -ENETDOWN:
+ case -ENETUNREACH:
+ if (test_bit(NFS_CS_NETUNREACH_FATAL, &clp->cl_flags))
+ return -NFS4ERR_FATAL_IOERROR;
+ fallthrough;
default:
dprintk("%s DS connection error %d\n", __func__,
task->tk_status);
@@ -1234,7 +1244,7 @@ static int ff_layout_async_handle_error(struct rpc_task *task,
switch (vers) {
case 3:
- return ff_layout_async_handle_error_v3(task, lseg, idx);
+ return ff_layout_async_handle_error_v3(task, clp, lseg, idx);
case 4:
return ff_layout_async_handle_error_v4(task, state, clp,
lseg, idx);
@@ -1264,6 +1274,7 @@ static void ff_layout_io_track_ds_error(struct pnfs_layout_segment *lseg,
case -ECONNRESET:
case -EHOSTDOWN:
case -EHOSTUNREACH:
+ case -ENETDOWN:
case -ENETUNREACH:
case -EADDRINUSE:
case -ENOBUFS:
@@ -1337,6 +1348,9 @@ static int ff_layout_read_done_cb(struct rpc_task *task,
return task->tk_status;
case -EAGAIN:
goto out_eagain;
+ case -NFS4ERR_FATAL_IOERROR:
+ task->tk_status = -EIO;
+ return 0;
}
return 0;
@@ -1507,6 +1521,9 @@ static int ff_layout_write_done_cb(struct rpc_task *task,
return task->tk_status;
case -EAGAIN:
return -EAGAIN;
+ case -NFS4ERR_FATAL_IOERROR:
+ task->tk_status = -EIO;
+ return 0;
}
if (hdr->res.verf->committed == NFS_FILE_SYNC ||
@@ -1551,6 +1568,9 @@ static int ff_layout_commit_done_cb(struct rpc_task *task,
case -EAGAIN:
rpc_restart_call_prepare(task);
return -EAGAIN;
+ case -NFS4ERR_FATAL_IOERROR:
+ task->tk_status = -EIO;
+ return 0;
}
ff_layout_set_layoutcommit(data->inode, data->lseg, data->lwb);
diff --git a/fs/nfs/fs_context.c b/fs/nfs/fs_context.c
index b069385eea17..13f71ca8c974 100644
--- a/fs/nfs/fs_context.c
+++ b/fs/nfs/fs_context.c
@@ -50,6 +50,7 @@ enum nfs_param {
Opt_clientaddr,
Opt_cto,
Opt_alignwrite,
+ Opt_fatal_neterrors,
Opt_fg,
Opt_fscache,
Opt_fscache_flag,
@@ -72,6 +73,8 @@ enum nfs_param {
Opt_posix,
Opt_proto,
Opt_rdirplus,
+ Opt_rdirplus_none,
+ Opt_rdirplus_force,
Opt_rdma,
Opt_resvport,
Opt_retrans,
@@ -96,6 +99,20 @@ enum nfs_param {
};
enum {
+ Opt_fatal_neterrors_default,
+ Opt_fatal_neterrors_enetunreach,
+ Opt_fatal_neterrors_none,
+};
+
+static const struct constant_table nfs_param_enums_fatal_neterrors[] = {
+ { "default", Opt_fatal_neterrors_default },
+ { "ENETDOWN:ENETUNREACH", Opt_fatal_neterrors_enetunreach },
+ { "ENETUNREACH:ENETDOWN", Opt_fatal_neterrors_enetunreach },
+ { "none", Opt_fatal_neterrors_none },
+ {}
+};
+
+enum {
Opt_local_lock_all,
Opt_local_lock_flock,
Opt_local_lock_none,
@@ -151,6 +168,8 @@ static const struct fs_parameter_spec nfs_fs_parameters[] = {
fsparam_string("clientaddr", Opt_clientaddr),
fsparam_flag_no("cto", Opt_cto),
fsparam_flag_no("alignwrite", Opt_alignwrite),
+ fsparam_enum("fatal_neterrors", Opt_fatal_neterrors,
+ nfs_param_enums_fatal_neterrors),
fsparam_flag ("fg", Opt_fg),
fsparam_flag_no("fsc", Opt_fscache_flag),
fsparam_string("fsc", Opt_fscache),
@@ -174,7 +193,8 @@ static const struct fs_parameter_spec nfs_fs_parameters[] = {
fsparam_u32 ("port", Opt_port),
fsparam_flag_no("posix", Opt_posix),
fsparam_string("proto", Opt_proto),
- fsparam_flag_no("rdirplus", Opt_rdirplus),
+ fsparam_flag_no("rdirplus", Opt_rdirplus), // rdirplus|nordirplus
+ fsparam_string("rdirplus", Opt_rdirplus), // rdirplus=...
fsparam_flag ("rdma", Opt_rdma),
fsparam_flag_no("resvport", Opt_resvport),
fsparam_u32 ("retrans", Opt_retrans),
@@ -288,6 +308,12 @@ static const struct constant_table nfs_xprtsec_policies[] = {
{}
};
+static const struct constant_table nfs_rdirplus_tokens[] = {
+ { "none", Opt_rdirplus_none },
+ { "force", Opt_rdirplus_force },
+ {}
+};
+
/*
* Sanity-check a server address provided by the mount command.
*
@@ -636,10 +662,25 @@ static int nfs_fs_context_parse_param(struct fs_context *fc,
ctx->flags &= ~NFS_MOUNT_NOACL;
break;
case Opt_rdirplus:
- if (result.negated)
+ if (result.negated) {
+ ctx->flags &= ~NFS_MOUNT_FORCE_RDIRPLUS;
ctx->flags |= NFS_MOUNT_NORDIRPLUS;
- else
- ctx->flags &= ~NFS_MOUNT_NORDIRPLUS;
+ } else if (!param->string) {
+ ctx->flags &= ~(NFS_MOUNT_NORDIRPLUS | NFS_MOUNT_FORCE_RDIRPLUS);
+ } else {
+ switch (lookup_constant(nfs_rdirplus_tokens, param->string, -1)) {
+ case Opt_rdirplus_none:
+ ctx->flags &= ~NFS_MOUNT_FORCE_RDIRPLUS;
+ ctx->flags |= NFS_MOUNT_NORDIRPLUS;
+ break;
+ case Opt_rdirplus_force:
+ ctx->flags &= ~NFS_MOUNT_NORDIRPLUS;
+ ctx->flags |= NFS_MOUNT_FORCE_RDIRPLUS;
+ break;
+ default:
+ goto out_invalid_value;
+ }
+ }
break;
case Opt_sharecache:
if (result.negated)
@@ -872,6 +913,25 @@ static int nfs_fs_context_parse_param(struct fs_context *fc,
goto out_of_bounds;
ctx->nfs_server.max_connect = result.uint_32;
break;
+ case Opt_fatal_neterrors:
+ trace_nfs_mount_assign(param->key, param->string);
+ switch (result.uint_32) {
+ case Opt_fatal_neterrors_default:
+ if (fc->net_ns != &init_net)
+ ctx->flags |= NFS_MOUNT_NETUNREACH_FATAL;
+ else
+ ctx->flags &= ~NFS_MOUNT_NETUNREACH_FATAL;
+ break;
+ case Opt_fatal_neterrors_enetunreach:
+ ctx->flags |= NFS_MOUNT_NETUNREACH_FATAL;
+ break;
+ case Opt_fatal_neterrors_none:
+ ctx->flags &= ~NFS_MOUNT_NETUNREACH_FATAL;
+ break;
+ default:
+ goto out_invalid_value;
+ }
+ break;
case Opt_lookupcache:
trace_nfs_mount_assign(param->key, param->string);
switch (result.uint_32) {
@@ -1651,6 +1711,9 @@ static int nfs_init_fs_context(struct fs_context *fc)
ctx->xprtsec.cert_serial = TLS_NO_CERT;
ctx->xprtsec.privkey_serial = TLS_NO_PRIVKEY;
+ if (fc->net_ns != &init_net)
+ ctx->flags |= NFS_MOUNT_NETUNREACH_FATAL;
+
fc->s_iflags |= SB_I_STABLE_WRITES;
}
fc->fs_private = ctx;
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index 1aa67fca69b2..119e447758b9 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -74,6 +74,8 @@ nfs_fattr_to_ino_t(struct nfs_fattr *fattr)
int nfs_wait_bit_killable(struct wait_bit_key *key, int mode)
{
+ if (unlikely(nfs_current_task_exiting()))
+ return -EINTR;
schedule();
if (signal_pending_state(mode, current))
return -ERESTARTSYS;
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index 1ac1d3eec517..ec8d32d0e2e9 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -912,6 +912,11 @@ static inline u32 nfs_stateid_hash(nfs4_stateid *stateid)
}
#endif
+static inline bool nfs_current_task_exiting(void)
+{
+ return (current->flags & PF_EXITING) != 0;
+}
+
static inline bool nfs_error_is_fatal(int err)
{
switch (err) {
diff --git a/fs/nfs/nfs3client.c b/fs/nfs/nfs3client.c
index b0c8a39c2bbd..0d7310c1ee0c 100644
--- a/fs/nfs/nfs3client.c
+++ b/fs/nfs/nfs3client.c
@@ -120,6 +120,8 @@ struct nfs_client *nfs3_set_ds_client(struct nfs_server *mds_srv,
if (mds_srv->flags & NFS_MOUNT_NORESVPORT)
__set_bit(NFS_CS_NORESVPORT, &cl_init.init_flags);
+ if (test_bit(NFS_CS_NETUNREACH_FATAL, &mds_clp->cl_flags))
+ __set_bit(NFS_CS_NETUNREACH_FATAL, &cl_init.init_flags);
__set_bit(NFS_CS_DS, &cl_init.init_flags);
diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c
index 755ed3c37051..a4cb67573aa7 100644
--- a/fs/nfs/nfs3proc.c
+++ b/fs/nfs/nfs3proc.c
@@ -39,7 +39,7 @@ nfs3_rpc_wrapper(struct rpc_clnt *clnt, struct rpc_message *msg, int flags)
__set_current_state(TASK_KILLABLE|TASK_FREEZABLE_UNSAFE);
schedule_timeout(NFS_JUKEBOX_RETRY_TIME);
res = -ERESTARTSYS;
- } while (!fatal_signal_pending(current));
+ } while (!fatal_signal_pending(current) && !nfs_current_task_exiting());
return res;
}
diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c
index 1924c4a2077b..5cf52ece96ac 100644
--- a/fs/nfs/nfs42proc.c
+++ b/fs/nfs/nfs42proc.c
@@ -21,6 +21,8 @@
#define NFSDBG_FACILITY NFSDBG_PROC
static int nfs42_do_offload_cancel_async(struct file *dst, nfs4_stateid *std);
+static int nfs42_proc_offload_status(struct file *file, nfs4_stateid *stateid,
+ u64 *copied);
static void nfs42_set_netaddr(struct file *filep, struct nfs42_netaddr *naddr)
{
@@ -173,6 +175,20 @@ int nfs42_proc_deallocate(struct file *filep, loff_t offset, loff_t len)
return err;
}
+static void nfs4_copy_dequeue_callback(struct nfs_server *dst_server,
+ struct nfs_server *src_server,
+ struct nfs4_copy_state *copy)
+{
+ spin_lock(&dst_server->nfs_client->cl_lock);
+ list_del_init(&copy->copies);
+ spin_unlock(&dst_server->nfs_client->cl_lock);
+ if (dst_server != src_server) {
+ spin_lock(&src_server->nfs_client->cl_lock);
+ list_del_init(&copy->src_copies);
+ spin_unlock(&src_server->nfs_client->cl_lock);
+ }
+}
+
static int handle_async_copy(struct nfs42_copy_res *res,
struct nfs_server *dst_server,
struct nfs_server *src_server,
@@ -182,9 +198,12 @@ static int handle_async_copy(struct nfs42_copy_res *res,
bool *restart)
{
struct nfs4_copy_state *copy, *tmp_copy = NULL, *iter;
- int status = NFS4_OK;
struct nfs_open_context *dst_ctx = nfs_file_open_context(dst);
struct nfs_open_context *src_ctx = nfs_file_open_context(src);
+ struct nfs_client *clp = dst_server->nfs_client;
+ unsigned long timeout = 3 * HZ;
+ int status = NFS4_OK;
+ u64 copied;
copy = kzalloc(sizeof(struct nfs4_copy_state), GFP_KERNEL);
if (!copy)
@@ -222,15 +241,12 @@ static int handle_async_copy(struct nfs42_copy_res *res,
spin_unlock(&src_server->nfs_client->cl_lock);
}
- status = wait_for_completion_interruptible(&copy->completion);
- spin_lock(&dst_server->nfs_client->cl_lock);
- list_del_init(&copy->copies);
- spin_unlock(&dst_server->nfs_client->cl_lock);
- if (dst_server != src_server) {
- spin_lock(&src_server->nfs_client->cl_lock);
- list_del_init(&copy->src_copies);
- spin_unlock(&src_server->nfs_client->cl_lock);
- }
+wait:
+ status = wait_for_completion_interruptible_timeout(&copy->completion,
+ timeout);
+ if (!status)
+ goto timeout;
+ nfs4_copy_dequeue_callback(dst_server, src_server, copy);
if (status == -ERESTARTSYS) {
goto out_cancel;
} else if (copy->flags || copy->error == NFS4ERR_PARTNER_NO_AUTH) {
@@ -240,6 +256,7 @@ static int handle_async_copy(struct nfs42_copy_res *res,
}
out:
res->write_res.count = copy->count;
+ /* Copy out the updated write verifier provided by CB_OFFLOAD. */
memcpy(&res->write_res.verifier, &copy->verf, sizeof(copy->verf));
status = -copy->error;
@@ -251,6 +268,39 @@ out_cancel:
if (!nfs42_files_from_same_server(src, dst))
nfs42_do_offload_cancel_async(src, src_stateid);
goto out_free;
+timeout:
+ timeout <<= 1;
+ if (timeout > (clp->cl_lease_time >> 1))
+ timeout = clp->cl_lease_time >> 1;
+ status = nfs42_proc_offload_status(dst, &copy->stateid, &copied);
+ if (status == -EINPROGRESS)
+ goto wait;
+ nfs4_copy_dequeue_callback(dst_server, src_server, copy);
+ switch (status) {
+ case 0:
+ /* The server recognized the copy stateid, so it hasn't
+ * rebooted. Don't overwrite the verifier returned in the
+ * COPY result. */
+ res->write_res.count = copied;
+ goto out_free;
+ case -EREMOTEIO:
+ /* COPY operation failed on the server. */
+ status = -EOPNOTSUPP;
+ res->write_res.count = copied;
+ goto out_free;
+ case -EBADF:
+ /* Server did not recognize the copy stateid. It has
+ * probably restarted and lost the plot. */
+ res->write_res.count = 0;
+ status = -EOPNOTSUPP;
+ break;
+ case -EOPNOTSUPP:
+ /* RFC 7862 REQUIREs server to support OFFLOAD_STATUS when
+ * it has signed up for an async COPY, so server is not
+ * spec-compliant. */
+ res->write_res.count = 0;
+ }
+ goto out_free;
}
static int process_copy_commit(struct file *dst, loff_t pos_dst,
@@ -582,6 +632,108 @@ static int nfs42_do_offload_cancel_async(struct file *dst,
return status;
}
+static int
+_nfs42_proc_offload_status(struct nfs_server *server, struct file *file,
+ struct nfs42_offload_data *data)
+{
+ struct nfs_open_context *ctx = nfs_file_open_context(file);
+ struct rpc_message msg = {
+ .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OFFLOAD_STATUS],
+ .rpc_argp = &data->args,
+ .rpc_resp = &data->res,
+ .rpc_cred = ctx->cred,
+ };
+ int status;
+
+ status = nfs4_call_sync(server->client, server, &msg,
+ &data->args.osa_seq_args,
+ &data->res.osr_seq_res, 1);
+ trace_nfs4_offload_status(&data->args, status);
+ switch (status) {
+ case 0:
+ break;
+
+ case -NFS4ERR_ADMIN_REVOKED:
+ case -NFS4ERR_BAD_STATEID:
+ case -NFS4ERR_OLD_STATEID:
+ /*
+ * Server does not recognize the COPY stateid. CB_OFFLOAD
+ * could have purged it, or server might have rebooted.
+ * Since COPY stateids don't have an associated inode,
+ * avoid triggering state recovery.
+ */
+ status = -EBADF;
+ break;
+ case -NFS4ERR_NOTSUPP:
+ case -ENOTSUPP:
+ case -EOPNOTSUPP:
+ server->caps &= ~NFS_CAP_OFFLOAD_STATUS;
+ status = -EOPNOTSUPP;
+ break;
+ }
+
+ return status;
+}
+
+/**
+ * nfs42_proc_offload_status - Poll completion status of an async copy operation
+ * @dst: handle of file being copied into
+ * @stateid: copy stateid (from async COPY result)
+ * @copied: OUT: number of bytes copied so far
+ *
+ * Return values:
+ * %0: Server returned an NFS4_OK completion status
+ * %-EINPROGRESS: Server returned no completion status
+ * %-EREMOTEIO: Server returned an error completion status
+ * %-EBADF: Server did not recognize the copy stateid
+ * %-EOPNOTSUPP: Server does not support OFFLOAD_STATUS
+ * %-ERESTARTSYS: Wait interrupted by signal
+ *
+ * Other negative errnos indicate the client could not complete the
+ * request.
+ */
+static int
+nfs42_proc_offload_status(struct file *dst, nfs4_stateid *stateid, u64 *copied)
+{
+ struct inode *inode = file_inode(dst);
+ struct nfs_server *server = NFS_SERVER(inode);
+ struct nfs4_exception exception = {
+ .inode = inode,
+ };
+ struct nfs42_offload_data *data;
+ int status;
+
+ if (!(server->caps & NFS_CAP_OFFLOAD_STATUS))
+ return -EOPNOTSUPP;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+ data->seq_server = server;
+ data->args.osa_src_fh = NFS_FH(inode);
+ memcpy(&data->args.osa_stateid, stateid,
+ sizeof(data->args.osa_stateid));
+ exception.stateid = &data->args.osa_stateid;
+ do {
+ status = _nfs42_proc_offload_status(server, dst, data);
+ if (status == -EOPNOTSUPP)
+ goto out;
+ status = nfs4_handle_exception(server, status, &exception);
+ } while (exception.retry);
+ if (status)
+ goto out;
+
+ *copied = data->res.osr_count;
+ if (!data->res.complete_count)
+ status = -EINPROGRESS;
+ else if (data->res.osr_complete != NFS_OK)
+ status = -EREMOTEIO;
+
+out:
+ kfree(data);
+ return status;
+}
+
static int _nfs42_proc_copy_notify(struct file *src, struct file *dst,
struct nfs42_copy_notify_args *args,
struct nfs42_copy_notify_res *res)
diff --git a/fs/nfs/nfs42xdr.c b/fs/nfs/nfs42xdr.c
index 5072d7ea72e9..b1b663468249 100644
--- a/fs/nfs/nfs42xdr.c
+++ b/fs/nfs/nfs42xdr.c
@@ -35,6 +35,11 @@
#define encode_offload_cancel_maxsz (op_encode_hdr_maxsz + \
XDR_QUADLEN(NFS4_STATEID_SIZE))
#define decode_offload_cancel_maxsz (op_decode_hdr_maxsz)
+#define encode_offload_status_maxsz (op_encode_hdr_maxsz + \
+ XDR_QUADLEN(NFS4_STATEID_SIZE))
+#define decode_offload_status_maxsz (op_decode_hdr_maxsz + \
+ 2 /* osr_count */ + \
+ 2 /* osr_complete */)
#define encode_copy_notify_maxsz (op_encode_hdr_maxsz + \
XDR_QUADLEN(NFS4_STATEID_SIZE) + \
1 + /* nl4_type */ \
@@ -143,6 +148,14 @@
decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_offload_cancel_maxsz)
+#define NFS4_enc_offload_status_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
+ encode_putfh_maxsz + \
+ encode_offload_status_maxsz)
+#define NFS4_dec_offload_status_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
+ decode_putfh_maxsz + \
+ decode_offload_status_maxsz)
#define NFS4_enc_copy_notify_sz (compound_encode_hdr_maxsz + \
encode_sequence_maxsz + \
encode_putfh_maxsz + \
@@ -345,6 +358,14 @@ static void encode_offload_cancel(struct xdr_stream *xdr,
encode_nfs4_stateid(xdr, &args->osa_stateid);
}
+static void encode_offload_status(struct xdr_stream *xdr,
+ const struct nfs42_offload_status_args *args,
+ struct compound_hdr *hdr)
+{
+ encode_op_hdr(xdr, OP_OFFLOAD_STATUS, decode_offload_status_maxsz, hdr);
+ encode_nfs4_stateid(xdr, &args->osa_stateid);
+}
+
static void encode_copy_notify(struct xdr_stream *xdr,
const struct nfs42_copy_notify_args *args,
struct compound_hdr *hdr)
@@ -570,6 +591,25 @@ static void nfs4_xdr_enc_offload_cancel(struct rpc_rqst *req,
}
/*
+ * Encode OFFLOAD_STATUS request
+ */
+static void nfs4_xdr_enc_offload_status(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ const void *data)
+{
+ const struct nfs42_offload_status_args *args = data;
+ struct compound_hdr hdr = {
+ .minorversion = nfs4_xdr_minorversion(&args->osa_seq_args),
+ };
+
+ encode_compound_hdr(xdr, req, &hdr);
+ encode_sequence(xdr, &args->osa_seq_args, &hdr);
+ encode_putfh(xdr, args->osa_src_fh, &hdr);
+ encode_offload_status(xdr, args, &hdr);
+ encode_nops(&hdr);
+}
+
+/*
* Encode COPY_NOTIFY request
*/
static void nfs4_xdr_enc_copy_notify(struct rpc_rqst *req,
@@ -921,6 +961,26 @@ static int decode_offload_cancel(struct xdr_stream *xdr,
return decode_op_hdr(xdr, OP_OFFLOAD_CANCEL);
}
+static int decode_offload_status(struct xdr_stream *xdr,
+ struct nfs42_offload_status_res *res)
+{
+ ssize_t result;
+ int status;
+
+ status = decode_op_hdr(xdr, OP_OFFLOAD_STATUS);
+ if (status)
+ return status;
+ /* osr_count */
+ if (xdr_stream_decode_u64(xdr, &res->osr_count) < 0)
+ return -EIO;
+ /* osr_complete<1> */
+ result = xdr_stream_decode_uint32_array(xdr, &res->osr_complete, 1);
+ if (result < 0)
+ return -EIO;
+ res->complete_count = result;
+ return 0;
+}
+
static int decode_copy_notify(struct xdr_stream *xdr,
struct nfs42_copy_notify_res *res)
{
@@ -1371,6 +1431,32 @@ out:
}
/*
+ * Decode OFFLOAD_STATUS response
+ */
+static int nfs4_xdr_dec_offload_status(struct rpc_rqst *rqstp,
+ struct xdr_stream *xdr,
+ void *data)
+{
+ struct nfs42_offload_status_res *res = data;
+ struct compound_hdr hdr;
+ int status;
+
+ status = decode_compound_hdr(xdr, &hdr);
+ if (status)
+ goto out;
+ status = decode_sequence(xdr, &res->osr_seq_res, rqstp);
+ if (status)
+ goto out;
+ status = decode_putfh(xdr);
+ if (status)
+ goto out;
+ status = decode_offload_status(xdr, res);
+
+out:
+ return status;
+}
+
+/*
* Decode COPY_NOTIFY response
*/
static int nfs4_xdr_dec_copy_notify(struct rpc_rqst *rqstp,
diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
index 83378f69b35e..162c85a83a14 100644
--- a/fs/nfs/nfs4client.c
+++ b/fs/nfs/nfs4client.c
@@ -233,6 +233,8 @@ struct nfs_client *nfs4_alloc_client(const struct nfs_client_initdata *cl_init)
__set_bit(NFS_CS_NO_RETRANS_TIMEOUT, &clp->cl_flags);
if (test_bit(NFS_CS_PNFS, &cl_init->init_flags))
__set_bit(NFS_CS_PNFS, &clp->cl_flags);
+ if (test_bit(NFS_CS_NETUNREACH_FATAL, &cl_init->init_flags))
+ __set_bit(NFS_CS_NETUNREACH_FATAL, &clp->cl_flags);
/*
* Set up the connection to the server before we add add to the
* global list.
@@ -937,6 +939,9 @@ static int nfs4_set_client(struct nfs_server *server,
__set_bit(NFS_CS_TSM_POSSIBLE, &cl_init.init_flags);
server->port = rpc_get_port((struct sockaddr *)addr);
+ if (server->flags & NFS_MOUNT_NETUNREACH_FATAL)
+ __set_bit(NFS_CS_NETUNREACH_FATAL, &cl_init.init_flags);
+
/* Allocate or find a client reference we can use */
clp = nfs_get_client(&cl_init);
if (IS_ERR(clp))
@@ -1011,6 +1016,8 @@ struct nfs_client *nfs4_set_ds_client(struct nfs_server *mds_srv,
if (mds_srv->flags & NFS_MOUNT_NORESVPORT)
__set_bit(NFS_CS_NORESVPORT, &cl_init.init_flags);
+ if (test_bit(NFS_CS_NETUNREACH_FATAL, &mds_clp->cl_flags))
+ __set_bit(NFS_CS_NETUNREACH_FATAL, &cl_init.init_flags);
__set_bit(NFS_CS_PNFS, &cl_init.init_flags);
cl_init.max_connect = NFS_MAX_TRANSPORTS;
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 70c8ea943019..970f28dbf253 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -195,6 +195,9 @@ static int nfs4_map_errors(int err)
return -EBUSY;
case -NFS4ERR_NOT_SAME:
return -ENOTSYNC;
+ case -ENETDOWN:
+ case -ENETUNREACH:
+ break;
default:
dprintk("%s could not handle NFSv4 error %d\n",
__func__, -err);
@@ -443,6 +446,8 @@ static int nfs4_delay_killable(long *timeout)
{
might_sleep();
+ if (unlikely(nfs_current_task_exiting()))
+ return -EINTR;
__set_current_state(TASK_KILLABLE|TASK_FREEZABLE_UNSAFE);
schedule_timeout(nfs4_update_delay(timeout));
if (!__fatal_signal_pending(current))
@@ -454,6 +459,8 @@ static int nfs4_delay_interruptible(long *timeout)
{
might_sleep();
+ if (unlikely(nfs_current_task_exiting()))
+ return -EINTR;
__set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE_UNSAFE);
schedule_timeout(nfs4_update_delay(timeout));
if (!signal_pending(current))
@@ -1774,7 +1781,8 @@ static void nfs_set_open_stateid_locked(struct nfs4_state *state,
rcu_read_unlock();
trace_nfs4_open_stateid_update_wait(state->inode, stateid, 0);
- if (!fatal_signal_pending(current)) {
+ if (!fatal_signal_pending(current) &&
+ !nfs_current_task_exiting()) {
if (schedule_timeout(5*HZ) == 0)
status = -EAGAIN;
else
@@ -3576,7 +3584,7 @@ static bool nfs4_refresh_open_old_stateid(nfs4_stateid *dst,
write_sequnlock(&state->seqlock);
trace_nfs4_close_stateid_update_wait(state->inode, dst, 0);
- if (fatal_signal_pending(current))
+ if (fatal_signal_pending(current) || nfs_current_task_exiting())
status = -EINTR;
else
if (schedule_timeout(5*HZ) != 0)
@@ -9594,7 +9602,7 @@ static void nfs41_sequence_call_done(struct rpc_task *task, void *data)
return;
trace_nfs4_sequence(clp, task->tk_status);
- if (task->tk_status < 0 && !task->tk_client->cl_shutdown) {
+ if (task->tk_status < 0 && clp->cl_cons_state >= 0) {
dprintk("%s ERROR %d\n", __func__, task->tk_status);
if (refcount_read(&clp->cl_count) == 1)
return;
@@ -10798,7 +10806,8 @@ static const struct nfs4_minor_version_ops nfs_v4_2_minor_ops = {
| NFS_CAP_CLONE
| NFS_CAP_LAYOUTERROR
| NFS_CAP_READ_PLUS
- | NFS_CAP_MOVEABLE,
+ | NFS_CAP_MOVEABLE
+ | NFS_CAP_OFFLOAD_STATUS,
.init_client = nfs41_init_client,
.shutdown_client = nfs41_shutdown_client,
.match_stateid = nfs41_match_stateid,
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index 542cdf71229f..7612e977e80b 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -1198,7 +1198,7 @@ void nfs4_schedule_state_manager(struct nfs_client *clp)
struct rpc_clnt *clnt = clp->cl_rpcclient;
bool swapon = false;
- if (clnt->cl_shutdown)
+ if (clp->cl_cons_state < 0)
return;
set_bit(NFS4CLNT_RUN_MANAGER, &clp->cl_state);
@@ -1403,7 +1403,7 @@ int nfs4_schedule_stateid_recovery(const struct nfs_server *server, struct nfs4_
dprintk("%s: scheduling stateid recovery for server %s\n", __func__,
clp->cl_hostname);
nfs4_schedule_state_manager(clp);
- return 0;
+ return clp->cl_cons_state < 0 ? clp->cl_cons_state : 0;
}
EXPORT_SYMBOL_GPL(nfs4_schedule_stateid_recovery);
@@ -2739,7 +2739,15 @@ out_error:
pr_warn_ratelimited("NFS: state manager%s%s failed on NFSv4 server %s"
" with error %d\n", section_sep, section,
clp->cl_hostname, -status);
- ssleep(1);
+ switch (status) {
+ case -ENETDOWN:
+ case -ENETUNREACH:
+ nfs_mark_client_ready(clp, -EIO);
+ break;
+ default:
+ ssleep(1);
+ break;
+ }
out_drain:
memalloc_nofs_restore(memflags);
nfs4_end_drain_session(clp);
diff --git a/fs/nfs/nfs4trace.h b/fs/nfs/nfs4trace.h
index 22c973316f0b..bc67fe6801b1 100644
--- a/fs/nfs/nfs4trace.h
+++ b/fs/nfs/nfs4trace.h
@@ -2608,7 +2608,7 @@ TRACE_EVENT(nfs4_copy_notify,
)
);
-TRACE_EVENT(nfs4_offload_cancel,
+DECLARE_EVENT_CLASS(nfs4_offload_class,
TP_PROTO(
const struct nfs42_offload_status_args *args,
int error
@@ -2640,6 +2640,15 @@ TRACE_EVENT(nfs4_offload_cancel,
__entry->stateid_seq, __entry->stateid_hash
)
);
+#define DEFINE_NFS4_OFFLOAD_EVENT(name) \
+ DEFINE_EVENT(nfs4_offload_class, name, \
+ TP_PROTO( \
+ const struct nfs42_offload_status_args *args, \
+ int error \
+ ), \
+ TP_ARGS(args, error))
+DEFINE_NFS4_OFFLOAD_EVENT(nfs4_offload_cancel);
+DEFINE_NFS4_OFFLOAD_EVENT(nfs4_offload_status);
DECLARE_EVENT_CLASS(nfs4_xattr_event,
TP_PROTO(
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
index e8ac3f615f93..55bef5fbfa47 100644
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -82,9 +82,8 @@ static int decode_layoutget(struct xdr_stream *xdr, struct rpc_rqst *req,
* we currently use size 2 (u64) out of (NFS4_OPAQUE_LIMIT >> 2)
*/
#define pagepad_maxsz (1)
-#define open_owner_id_maxsz (1 + 2 + 1 + 1 + 2)
-#define lock_owner_id_maxsz (1 + 1 + 4)
-#define decode_lockowner_maxsz (1 + XDR_QUADLEN(IDMAP_NAMESZ))
+#define open_owner_id_maxsz (2 + 1 + 2 + 2)
+#define lock_owner_id_maxsz (2 + 1 + 2)
#define compound_encode_hdr_maxsz (3 + (NFS4_MAXTAGLEN >> 2))
#define compound_decode_hdr_maxsz (3 + (NFS4_MAXTAGLEN >> 2))
#define op_encode_hdr_maxsz (1)
@@ -185,7 +184,7 @@ static int decode_layoutget(struct xdr_stream *xdr, struct rpc_rqst *req,
#define encode_claim_null_maxsz (1 + nfs4_name_maxsz)
#define encode_open_maxsz (op_encode_hdr_maxsz + \
2 + encode_share_access_maxsz + 2 + \
- open_owner_id_maxsz + \
+ 1 + open_owner_id_maxsz + \
encode_opentype_maxsz + \
encode_claim_null_maxsz)
#define decode_space_limit_maxsz (3)
@@ -255,13 +254,14 @@ static int decode_layoutget(struct xdr_stream *xdr, struct rpc_rqst *req,
#define encode_link_maxsz (op_encode_hdr_maxsz + \
nfs4_name_maxsz)
#define decode_link_maxsz (op_decode_hdr_maxsz + decode_change_info_maxsz)
-#define encode_lockowner_maxsz (7)
+#define encode_lockowner_maxsz (2 + 1 + lock_owner_id_maxsz)
+
#define encode_lock_maxsz (op_encode_hdr_maxsz + \
7 + \
1 + encode_stateid_maxsz + 1 + \
encode_lockowner_maxsz)
#define decode_lock_denied_maxsz \
- (8 + decode_lockowner_maxsz)
+ (2 + 2 + 1 + 2 + 1 + lock_owner_id_maxsz)
#define decode_lock_maxsz (op_decode_hdr_maxsz + \
decode_lock_denied_maxsz)
#define encode_lockt_maxsz (op_encode_hdr_maxsz + 5 + \
@@ -617,7 +617,7 @@ static int decode_layoutget(struct xdr_stream *xdr, struct rpc_rqst *req,
encode_lockowner_maxsz)
#define NFS4_dec_release_lockowner_sz \
(compound_decode_hdr_maxsz + \
- decode_lockowner_maxsz)
+ decode_release_lockowner_maxsz)
#define NFS4_enc_access_sz (compound_encode_hdr_maxsz + \
encode_sequence_maxsz + \
encode_putfh_maxsz + \
@@ -1412,7 +1412,7 @@ static inline void encode_openhdr(struct xdr_stream *xdr, const struct nfs_opena
__be32 *p;
/*
* opcode 4, seqid 4, share_access 4, share_deny 4, clientid 8, ownerlen 4,
- * owner 4 = 32
+ * owner 28
*/
encode_nfs4_seqid(xdr, arg->seqid);
encode_share_access(xdr, arg->share_access);
@@ -5077,7 +5077,7 @@ static int decode_link(struct xdr_stream *xdr, struct nfs4_change_info *cinfo)
/*
* We create the owner, so we know a proper owner.id length is 4.
*/
-static int decode_lock_denied (struct xdr_stream *xdr, struct file_lock *fl)
+static int decode_lock_denied(struct xdr_stream *xdr, struct file_lock *fl)
{
uint64_t offset, length, clientid;
__be32 *p;
@@ -7702,6 +7702,7 @@ const struct rpc_procinfo nfs4_procedures[] = {
PROC42(CLONE, enc_clone, dec_clone),
PROC42(COPY, enc_copy, dec_copy),
PROC42(OFFLOAD_CANCEL, enc_offload_cancel, dec_offload_cancel),
+ PROC42(OFFLOAD_STATUS, enc_offload_status, dec_offload_status),
PROC42(COPY_NOTIFY, enc_copy_notify, dec_copy_notify),
PROC(LOOKUPP, enc_lookupp, dec_lookupp),
PROC42(LAYOUTERROR, enc_layouterror, dec_layouterror),
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index aeb715b4a690..9eea9e62afc9 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -454,8 +454,12 @@ static void nfs_show_mount_options(struct seq_file *m, struct nfs_server *nfss,
{ NFS_MOUNT_NONLM, ",nolock", "" },
{ NFS_MOUNT_NOACL, ",noacl", "" },
{ NFS_MOUNT_NORDIRPLUS, ",nordirplus", "" },
+ { NFS_MOUNT_FORCE_RDIRPLUS, ",rdirplus=force", "" },
{ NFS_MOUNT_UNSHARED, ",nosharecache", "" },
{ NFS_MOUNT_NORESVPORT, ",noresvport", "" },
+ { NFS_MOUNT_NETUNREACH_FATAL,
+ ",fatal_neterrors=ENETDOWN:ENETUNREACH",
+ ",fatal_neterrors=none" },
{ 0, NULL, NULL }
};
const struct proc_nfs_info *nfs_infop;
diff --git a/fs/nfs/sysfs.c b/fs/nfs/sysfs.c
index 7b59a40d40c0..37cb2b776435 100644
--- a/fs/nfs/sysfs.c
+++ b/fs/nfs/sysfs.c
@@ -14,6 +14,7 @@
#include <linux/rcupdate.h>
#include <linux/lockd/lockd.h>
+#include "internal.h"
#include "nfs4_fs.h"
#include "netns.h"
#include "sysfs.h"
@@ -228,6 +229,25 @@ static void shutdown_client(struct rpc_clnt *clnt)
rpc_cancel_tasks(clnt, -EIO, shutdown_match_client, NULL);
}
+/*
+ * Shut down the nfs_client only once all the superblocks
+ * have been shut down.
+ */
+static void shutdown_nfs_client(struct nfs_client *clp)
+{
+ struct nfs_server *server;
+ rcu_read_lock();
+ list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
+ if (!(server->flags & NFS_MOUNT_SHUTDOWN)) {
+ rcu_read_unlock();
+ return;
+ }
+ }
+ rcu_read_unlock();
+ nfs_mark_client_ready(clp, -EIO);
+ shutdown_client(clp->cl_rpcclient);
+}
+
static ssize_t
shutdown_show(struct kobject *kobj, struct kobj_attribute *attr,
char *buf)
@@ -259,7 +279,6 @@ shutdown_store(struct kobject *kobj, struct kobj_attribute *attr,
server->flags |= NFS_MOUNT_SHUTDOWN;
shutdown_client(server->client);
- shutdown_client(server->nfs_client->cl_rpcclient);
if (!IS_ERR(server->client_acl))
shutdown_client(server->client_acl);
@@ -267,11 +286,44 @@ shutdown_store(struct kobject *kobj, struct kobj_attribute *attr,
if (server->nlm_host)
shutdown_client(server->nlm_host->h_rpcclnt);
out:
+ shutdown_nfs_client(server->nfs_client);
return count;
}
static struct kobj_attribute nfs_sysfs_attr_shutdown = __ATTR_RW(shutdown);
+#if IS_ENABLED(CONFIG_NFS_V4_1)
+static ssize_t
+implid_domain_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ struct nfs_server *server = container_of(kobj, struct nfs_server, kobj);
+ struct nfs41_impl_id *impl_id = server->nfs_client->cl_implid;
+
+ if (!impl_id || strlen(impl_id->domain) == 0)
+ return 0; //sysfs_emit(buf, "");
+ return sysfs_emit(buf, "%s\n", impl_id->domain);
+}
+
+static struct kobj_attribute nfs_sysfs_attr_implid_domain = __ATTR_RO(implid_domain);
+
+
+static ssize_t
+implid_name_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ struct nfs_server *server = container_of(kobj, struct nfs_server, kobj);
+ struct nfs41_impl_id *impl_id = server->nfs_client->cl_implid;
+
+ if (!impl_id || strlen(impl_id->name) == 0)
+ return 0; //sysfs_emit(buf, "");
+ return sysfs_emit(buf, "%s\n", impl_id->name);
+}
+
+static struct kobj_attribute nfs_sysfs_attr_implid_name = __ATTR_RO(implid_name);
+
+#endif /* IS_ENABLED(CONFIG_NFS_V4_1) */
+
#define RPC_CLIENT_NAME_SIZE 64
void nfs_sysfs_link_rpc_client(struct nfs_server *server,
@@ -309,6 +361,32 @@ static struct kobj_type nfs_sb_ktype = {
.child_ns_type = nfs_netns_object_child_ns_type,
};
+#if IS_ENABLED(CONFIG_NFS_V4_1)
+static void nfs_sysfs_add_nfsv41_server(struct nfs_server *server)
+{
+ int ret;
+
+ if (!server->nfs_client->cl_implid)
+ return;
+
+ ret = sysfs_create_file_ns(&server->kobj, &nfs_sysfs_attr_implid_domain.attr,
+ nfs_netns_server_namespace(&server->kobj));
+ if (ret < 0)
+ pr_warn("NFS: sysfs_create_file_ns for server-%d failed (%d)\n",
+ server->s_sysfs_id, ret);
+
+ ret = sysfs_create_file_ns(&server->kobj, &nfs_sysfs_attr_implid_name.attr,
+ nfs_netns_server_namespace(&server->kobj));
+ if (ret < 0)
+ pr_warn("NFS: sysfs_create_file_ns for server-%d failed (%d)\n",
+ server->s_sysfs_id, ret);
+}
+#else /* CONFIG_NFS_V4_1 */
+static inline void nfs_sysfs_add_nfsv41_server(struct nfs_server *server)
+{
+}
+#endif /* CONFIG_NFS_V4_1 */
+
void nfs_sysfs_add_server(struct nfs_server *server)
{
int ret;
@@ -325,6 +403,8 @@ void nfs_sysfs_add_server(struct nfs_server *server)
if (ret < 0)
pr_warn("NFS: sysfs_create_file_ns for server-%d failed (%d)\n",
server->s_sysfs_id, ret);
+
+ nfs_sysfs_add_nfsv41_server(server);
}
EXPORT_SYMBOL_GPL(nfs_sysfs_add_server);
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index aa3d8bea3ec0..23df8b214474 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -579,8 +579,10 @@ retry:
while (!nfs_lock_request(head)) {
ret = nfs_wait_on_request(head);
- if (ret < 0)
+ if (ret < 0) {
+ nfs_release_request(head);
return ERR_PTR(ret);
+ }
}
/* Ensure that nobody removed the request before we locked it */
diff --git a/fs/ntfs3/attrib.c b/fs/ntfs3/attrib.c
index af94e3737470..e946f75eb540 100644
--- a/fs/ntfs3/attrib.c
+++ b/fs/ntfs3/attrib.c
@@ -2664,8 +2664,9 @@ int attr_set_compress(struct ntfs_inode *ni, bool compr)
attr->nres.run_off = cpu_to_le16(run_off);
}
- /* Update data attribute flags. */
+ /* Update attribute flags. */
if (compr) {
+ attr->flags &= ~ATTR_FLAG_SPARSED;
attr->flags |= ATTR_FLAG_COMPRESSED;
attr->nres.c_unit = NTFS_LZNT_CUNIT;
} else {
diff --git a/fs/ntfs3/file.c b/fs/ntfs3/file.c
index 3f96a11804c9..9b6a3f8d2e7c 100644
--- a/fs/ntfs3/file.c
+++ b/fs/ntfs3/file.c
@@ -101,8 +101,26 @@ int ntfs_fileattr_set(struct mnt_idmap *idmap, struct dentry *dentry,
/* Allowed to change compression for empty files and for directories only. */
if (!is_dedup(ni) && !is_encrypted(ni) &&
(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode))) {
- /* Change compress state. */
- int err = ni_set_compress(inode, flags & FS_COMPR_FL);
+ int err = 0;
+ struct address_space *mapping = inode->i_mapping;
+
+ /* write out all data and wait. */
+ filemap_invalidate_lock(mapping);
+ err = filemap_write_and_wait(mapping);
+
+ if (err >= 0) {
+ /* Change compress state. */
+ bool compr = flags & FS_COMPR_FL;
+ err = ni_set_compress(inode, compr);
+
+ /* For files change a_ops too. */
+ if (!err)
+ mapping->a_ops = compr ? &ntfs_aops_cmpr :
+ &ntfs_aops;
+ }
+
+ filemap_invalidate_unlock(mapping);
+
if (err)
return err;
}
@@ -412,6 +430,7 @@ static int ntfs_extend(struct inode *inode, loff_t pos, size_t count,
}
if (extend_init && !is_compressed(ni)) {
+ WARN_ON(ni->i_valid >= pos);
err = ntfs_extend_initialized_size(file, ni, ni->i_valid, pos);
if (err)
goto out;
@@ -1228,21 +1247,22 @@ static ssize_t ntfs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
ssize_t ret;
int err;
- err = check_write_restriction(inode);
- if (err)
- return err;
-
- if (is_compressed(ni) && (iocb->ki_flags & IOCB_DIRECT)) {
- ntfs_inode_warn(inode, "direct i/o + compressed not supported");
- return -EOPNOTSUPP;
- }
-
if (!inode_trylock(inode)) {
if (iocb->ki_flags & IOCB_NOWAIT)
return -EAGAIN;
inode_lock(inode);
}
+ ret = check_write_restriction(inode);
+ if (ret)
+ goto out;
+
+ if (is_compressed(ni) && (iocb->ki_flags & IOCB_DIRECT)) {
+ ntfs_inode_warn(inode, "direct i/o + compressed not supported");
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
ret = generic_write_checks(iocb, from);
if (ret <= 0)
goto out;
diff --git a/fs/ntfs3/frecord.c b/fs/ntfs3/frecord.c
index 5df6a0b5add9..b7a83200f2cc 100644
--- a/fs/ntfs3/frecord.c
+++ b/fs/ntfs3/frecord.c
@@ -281,63 +281,6 @@ struct ATTRIB *ni_enum_attr_ex(struct ntfs_inode *ni, struct ATTRIB *attr,
}
/*
- * ni_load_attr - Load attribute that contains given VCN.
- */
-struct ATTRIB *ni_load_attr(struct ntfs_inode *ni, enum ATTR_TYPE type,
- const __le16 *name, u8 name_len, CLST vcn,
- struct mft_inode **pmi)
-{
- struct ATTR_LIST_ENTRY *le;
- struct ATTRIB *attr;
- struct mft_inode *mi;
- struct ATTR_LIST_ENTRY *next;
-
- if (!ni->attr_list.size) {
- if (pmi)
- *pmi = &ni->mi;
- return mi_find_attr(ni, &ni->mi, NULL, type, name, name_len,
- NULL);
- }
-
- le = al_find_ex(ni, NULL, type, name, name_len, NULL);
- if (!le)
- return NULL;
-
- /*
- * Unfortunately ATTR_LIST_ENTRY contains only start VCN.
- * So to find the ATTRIB segment that contains 'vcn' we should
- * enumerate some entries.
- */
- if (vcn) {
- for (;; le = next) {
- next = al_find_ex(ni, le, type, name, name_len, NULL);
- if (!next || le64_to_cpu(next->vcn) > vcn)
- break;
- }
- }
-
- if (ni_load_mi(ni, le, &mi))
- return NULL;
-
- if (pmi)
- *pmi = mi;
-
- attr = mi_find_attr(ni, mi, NULL, type, name, name_len, &le->id);
- if (!attr)
- return NULL;
-
- if (!attr->non_res)
- return attr;
-
- if (le64_to_cpu(attr->nres.svcn) <= vcn &&
- vcn <= le64_to_cpu(attr->nres.evcn))
- return attr;
-
- _ntfs_bad_inode(&ni->vfs_inode);
- return NULL;
-}
-
-/*
* ni_load_all_mi - Load all subrecords.
*/
int ni_load_all_mi(struct ntfs_inode *ni)
@@ -3434,10 +3377,12 @@ int ni_set_compress(struct inode *inode, bool compr)
}
ni->std_fa = std->fa;
- if (compr)
+ if (compr) {
+ std->fa &= ~FILE_ATTRIBUTE_SPARSE_FILE;
std->fa |= FILE_ATTRIBUTE_COMPRESSED;
- else
+ } else {
std->fa &= ~FILE_ATTRIBUTE_COMPRESSED;
+ }
if (ni->std_fa != std->fa) {
ni->std_fa = std->fa;
diff --git a/fs/ntfs3/fsntfs.c b/fs/ntfs3/fsntfs.c
index 938d351ebac7..df81f1f7330c 100644
--- a/fs/ntfs3/fsntfs.c
+++ b/fs/ntfs3/fsntfs.c
@@ -1035,34 +1035,6 @@ struct buffer_head *ntfs_bread(struct super_block *sb, sector_t block)
return NULL;
}
-int ntfs_sb_read(struct super_block *sb, u64 lbo, size_t bytes, void *buffer)
-{
- struct block_device *bdev = sb->s_bdev;
- u32 blocksize = sb->s_blocksize;
- u64 block = lbo >> sb->s_blocksize_bits;
- u32 off = lbo & (blocksize - 1);
- u32 op = blocksize - off;
-
- for (; bytes; block += 1, off = 0, op = blocksize) {
- struct buffer_head *bh = __bread(bdev, block, blocksize);
-
- if (!bh)
- return -EIO;
-
- if (op > bytes)
- op = bytes;
-
- memcpy(buffer, bh->b_data + off, op);
-
- put_bh(bh);
-
- bytes -= op;
- buffer = Add2Ptr(buffer, op);
- }
-
- return 0;
-}
-
int ntfs_sb_write(struct super_block *sb, u64 lbo, size_t bytes,
const void *buf, int wait)
{
diff --git a/fs/ntfs3/index.c b/fs/ntfs3/index.c
index 7eb9fae22f8d..78d20e4baa2c 100644
--- a/fs/ntfs3/index.c
+++ b/fs/ntfs3/index.c
@@ -618,7 +618,7 @@ static bool index_hdr_check(const struct INDEX_HDR *hdr, u32 bytes)
u32 off = le32_to_cpu(hdr->de_off);
if (!IS_ALIGNED(off, 8) || tot > bytes || end > tot ||
- off + sizeof(struct NTFS_DE) > end) {
+ size_add(off, sizeof(struct NTFS_DE)) > end) {
/* incorrect index buffer. */
return false;
}
@@ -736,7 +736,7 @@ fill_table:
if (end > total)
return NULL;
- if (off + sizeof(struct NTFS_DE) > end)
+ if (size_add(off, sizeof(struct NTFS_DE)) > end)
return NULL;
e = Add2Ptr(hdr, off);
diff --git a/fs/ntfs3/inode.c b/fs/ntfs3/inode.c
index a1e11228dafd..3e2957a1e360 100644
--- a/fs/ntfs3/inode.c
+++ b/fs/ntfs3/inode.c
@@ -1025,46 +1025,6 @@ int ntfs_sync_inode(struct inode *inode)
}
/*
- * writeback_inode - Helper function for ntfs_flush_inodes().
- *
- * This writes both the inode and the file data blocks, waiting
- * for in flight data blocks before the start of the call. It
- * does not wait for any io started during the call.
- */
-static int writeback_inode(struct inode *inode)
-{
- int ret = sync_inode_metadata(inode, 0);
-
- if (!ret)
- ret = filemap_fdatawrite(inode->i_mapping);
- return ret;
-}
-
-/*
- * ntfs_flush_inodes
- *
- * Write data and metadata corresponding to i1 and i2. The io is
- * started but we do not wait for any of it to finish.
- *
- * filemap_flush() is used for the block device, so if there is a dirty
- * page for a block already in flight, we will not wait and start the
- * io over again.
- */
-int ntfs_flush_inodes(struct super_block *sb, struct inode *i1,
- struct inode *i2)
-{
- int ret = 0;
-
- if (i1)
- ret = writeback_inode(i1);
- if (!ret && i2)
- ret = writeback_inode(i2);
- if (!ret)
- ret = filemap_flush(sb->s_bdev_file->f_mapping);
- return ret;
-}
-
-/*
* Helper function to read file.
*/
int inode_read_data(struct inode *inode, void *data, size_t bytes)
diff --git a/fs/ntfs3/ntfs.h b/fs/ntfs3/ntfs.h
index 241f2ffdd920..1ff13b6f9613 100644
--- a/fs/ntfs3/ntfs.h
+++ b/fs/ntfs3/ntfs.h
@@ -717,7 +717,7 @@ static inline struct NTFS_DE *hdr_first_de(const struct INDEX_HDR *hdr)
struct NTFS_DE *e;
u16 esize;
- if (de_off >= used || de_off + sizeof(struct NTFS_DE) > used )
+ if (de_off >= used || size_add(de_off, sizeof(struct NTFS_DE)) > used)
return NULL;
e = Add2Ptr(hdr, de_off);
diff --git a/fs/ntfs3/ntfs_fs.h b/fs/ntfs3/ntfs_fs.h
index 382820464dee..d628977e2556 100644
--- a/fs/ntfs3/ntfs_fs.h
+++ b/fs/ntfs3/ntfs_fs.h
@@ -530,9 +530,6 @@ struct ATTRIB *ni_find_attr(struct ntfs_inode *ni, struct ATTRIB *attr,
struct ATTRIB *ni_enum_attr_ex(struct ntfs_inode *ni, struct ATTRIB *attr,
struct ATTR_LIST_ENTRY **le,
struct mft_inode **mi);
-struct ATTRIB *ni_load_attr(struct ntfs_inode *ni, enum ATTR_TYPE type,
- const __le16 *name, u8 name_len, CLST vcn,
- struct mft_inode **pmi);
int ni_load_all_mi(struct ntfs_inode *ni);
bool ni_add_subrecord(struct ntfs_inode *ni, CLST rno, struct mft_inode **mi);
int ni_remove_attr(struct ntfs_inode *ni, enum ATTR_TYPE type,
@@ -619,7 +616,6 @@ enum NTFS_DIRTY_FLAGS {
NTFS_DIRTY_ERROR = 2,
};
int ntfs_set_state(struct ntfs_sb_info *sbi, enum NTFS_DIRTY_FLAGS dirty);
-int ntfs_sb_read(struct super_block *sb, u64 lbo, size_t bytes, void *buffer);
int ntfs_sb_write(struct super_block *sb, u64 lbo, size_t bytes,
const void *buffer, int wait);
int ntfs_sb_write_run(struct ntfs_sb_info *sbi, const struct runs_tree *run,
@@ -717,8 +713,6 @@ int ntfs_write_end(struct file *file, struct address_space *mapping, loff_t pos,
u32 len, u32 copied, struct folio *folio, void *fsdata);
int ntfs3_write_inode(struct inode *inode, struct writeback_control *wbc);
int ntfs_sync_inode(struct inode *inode);
-int ntfs_flush_inodes(struct super_block *sb, struct inode *i1,
- struct inode *i2);
int inode_read_data(struct inode *inode, void *data, size_t bytes);
int ntfs_create_inode(struct mnt_idmap *idmap, struct inode *dir,
struct dentry *dentry, const struct cpu_str *uni,
diff --git a/fs/ntfs3/super.c b/fs/ntfs3/super.c
index 6a0f6b0a3ab2..920a1ab47b63 100644
--- a/fs/ntfs3/super.c
+++ b/fs/ntfs3/super.c
@@ -555,6 +555,55 @@ static const struct proc_ops ntfs3_label_fops = {
.proc_write = ntfs3_label_write,
};
+static void ntfs_create_procdir(struct super_block *sb)
+{
+ struct proc_dir_entry *e;
+
+ if (!proc_info_root)
+ return;
+
+ e = proc_mkdir(sb->s_id, proc_info_root);
+ if (e) {
+ struct ntfs_sb_info *sbi = sb->s_fs_info;
+
+ proc_create_data("volinfo", 0444, e,
+ &ntfs3_volinfo_fops, sb);
+ proc_create_data("label", 0644, e,
+ &ntfs3_label_fops, sb);
+ sbi->procdir = e;
+ }
+}
+
+static void ntfs_remove_procdir(struct super_block *sb)
+{
+ struct ntfs_sb_info *sbi = sb->s_fs_info;
+
+ if (!sbi->procdir)
+ return;
+
+ remove_proc_entry("label", sbi->procdir);
+ remove_proc_entry("volinfo", sbi->procdir);
+ remove_proc_entry(sb->s_id, proc_info_root);
+ sbi->procdir = NULL;
+}
+
+static void ntfs_create_proc_root(void)
+{
+ proc_info_root = proc_mkdir("fs/ntfs3", NULL);
+}
+
+static void ntfs_remove_proc_root(void)
+{
+ if (proc_info_root) {
+ remove_proc_entry("fs/ntfs3", NULL);
+ proc_info_root = NULL;
+ }
+}
+#else
+static void ntfs_create_procdir(struct super_block *sb) {}
+static void ntfs_remove_procdir(struct super_block *sb) {}
+static void ntfs_create_proc_root(void) {}
+static void ntfs_remove_proc_root(void) {}
#endif
static struct kmem_cache *ntfs_inode_cachep;
@@ -644,15 +693,7 @@ static void ntfs_put_super(struct super_block *sb)
{
struct ntfs_sb_info *sbi = sb->s_fs_info;
-#ifdef CONFIG_PROC_FS
- // Remove /proc/fs/ntfs3/..
- if (sbi->procdir) {
- remove_proc_entry("label", sbi->procdir);
- remove_proc_entry("volinfo", sbi->procdir);
- remove_proc_entry(sb->s_id, proc_info_root);
- sbi->procdir = NULL;
- }
-#endif
+ ntfs_remove_procdir(sb);
/* Mark rw ntfs as clear, if possible. */
ntfs_set_state(sbi, NTFS_DIRTY_CLEAR);
@@ -1590,20 +1631,7 @@ load_root:
kfree(boot2);
}
-#ifdef CONFIG_PROC_FS
- /* Create /proc/fs/ntfs3/.. */
- if (proc_info_root) {
- struct proc_dir_entry *e = proc_mkdir(sb->s_id, proc_info_root);
- static_assert((S_IRUGO | S_IWUSR) == 0644);
- if (e) {
- proc_create_data("volinfo", S_IRUGO, e,
- &ntfs3_volinfo_fops, sb);
- proc_create_data("label", S_IRUGO | S_IWUSR, e,
- &ntfs3_label_fops, sb);
- sbi->procdir = e;
- }
- }
-#endif
+ ntfs_create_procdir(sb);
if (is_legacy_ntfs(sb))
sb->s_flags |= SB_RDONLY;
@@ -1853,14 +1881,11 @@ static int __init init_ntfs_fs(void)
if (IS_ENABLED(CONFIG_NTFS3_LZX_XPRESS))
pr_info("ntfs3: Read-only LZX/Xpress compression included\n");
-#ifdef CONFIG_PROC_FS
- /* Create "/proc/fs/ntfs3" */
- proc_info_root = proc_mkdir("fs/ntfs3", NULL);
-#endif
+ ntfs_create_proc_root();
err = ntfs3_init_bitmap();
if (err)
- return err;
+ goto out2;
ntfs_inode_cachep = kmem_cache_create(
"ntfs_inode_cache", sizeof(struct ntfs_inode), 0,
@@ -1880,6 +1905,8 @@ out:
kmem_cache_destroy(ntfs_inode_cachep);
out1:
ntfs3_exit_bitmap();
+out2:
+ ntfs_remove_proc_root();
return err;
}
@@ -1890,11 +1917,7 @@ static void __exit exit_ntfs_fs(void)
unregister_filesystem(&ntfs_fs_type);
unregister_as_ntfs_legacy();
ntfs3_exit_bitmap();
-
-#ifdef CONFIG_PROC_FS
- if (proc_info_root)
- remove_proc_entry("fs/ntfs3", NULL);
-#endif
+ ntfs_remove_proc_root();
}
MODULE_LICENSE("GPL");
diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c
index 4414743b638e..b8ac85b548c7 100644
--- a/fs/ocfs2/alloc.c
+++ b/fs/ocfs2/alloc.c
@@ -1803,6 +1803,14 @@ static int __ocfs2_find_path(struct ocfs2_caching_info *ci,
el = root_el;
while (el->l_tree_depth) {
+ if (unlikely(le16_to_cpu(el->l_tree_depth) >= OCFS2_MAX_PATH_DEPTH)) {
+ ocfs2_error(ocfs2_metadata_cache_get_super(ci),
+ "Owner %llu has invalid tree depth %u in extent list\n",
+ (unsigned long long)ocfs2_metadata_cache_owner(ci),
+ le16_to_cpu(el->l_tree_depth));
+ ret = -EROFS;
+ goto out;
+ }
if (le16_to_cpu(el->l_next_free_rec) == 0) {
ocfs2_error(ocfs2_metadata_cache_get_super(ci),
"Owner %llu has empty extent list at depth %u\n",
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index 5bbeb6fbb1ac..40b6bce12951 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -46,7 +46,6 @@ static int ocfs2_symlink_get_block(struct inode *inode, sector_t iblock,
struct buffer_head *bh = NULL;
struct buffer_head *buffer_cache_bh = NULL;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
- void *kaddr;
trace_ocfs2_symlink_get_block(
(unsigned long long)OCFS2_I(inode)->ip_blkno,
@@ -91,17 +90,11 @@ static int ocfs2_symlink_get_block(struct inode *inode, sector_t iblock,
* could've happened. Since we've got a reference on
* the bh, even if it commits while we're doing the
* copy, the data is still good. */
- if (buffer_jbd(buffer_cache_bh)
- && ocfs2_inode_is_new(inode)) {
- kaddr = kmap_atomic(bh_result->b_page);
- if (!kaddr) {
- mlog(ML_ERROR, "couldn't kmap!\n");
- goto bail;
- }
- memcpy(kaddr + (bh_result->b_size * iblock),
- buffer_cache_bh->b_data,
- bh_result->b_size);
- kunmap_atomic(kaddr);
+ if (buffer_jbd(buffer_cache_bh) && ocfs2_inode_is_new(inode)) {
+ memcpy_to_folio(bh_result->b_folio,
+ bh_result->b_size * iblock,
+ buffer_cache_bh->b_data,
+ bh_result->b_size);
set_buffer_uptodate(bh_result);
}
brelse(buffer_cache_bh);
@@ -920,7 +913,7 @@ static void ocfs2_write_failure(struct inode *inode,
ocfs2_jbd2_inode_add_write(wc->w_handle, inode,
user_pos, user_len);
- block_commit_write(&folio->page, from, to);
+ block_commit_write(folio, from, to);
}
}
}
@@ -2012,7 +2005,7 @@ int ocfs2_write_end_nolock(struct address_space *mapping, loff_t pos,
ocfs2_jbd2_inode_add_write(handle, inode,
start_byte, length);
}
- block_commit_write(&folio->page, from, to);
+ block_commit_write(folio, from, to);
}
}
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index e54f2c4b5a90..2056cf08ac1e 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -813,7 +813,7 @@ static int ocfs2_write_zero_page(struct inode *inode, u64 abs_from,
/* must not update i_size! */
- block_commit_write(&folio->page, block_start + 1, block_start + 1);
+ block_commit_write(folio, block_start + 1, block_start + 1);
}
/*
diff --git a/fs/ocfs2/quota_global.c b/fs/ocfs2/quota_global.c
index 15d9acd456ec..e85b1ccf81be 100644
--- a/fs/ocfs2/quota_global.c
+++ b/fs/ocfs2/quota_global.c
@@ -273,7 +273,7 @@ ssize_t ocfs2_quota_write(struct super_block *sb, int type,
if (new)
memset(bh->b_data, 0, sb->s_blocksize);
memcpy(bh->b_data + offset, data, len);
- flush_dcache_page(bh->b_page);
+ flush_dcache_folio(bh->b_folio);
set_buffer_uptodate(bh);
unlock_buffer(bh);
ocfs2_set_buffer_uptodate(INODE_CACHE(gqinode), bh);
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 5538c4aee8fa..b0d4e1908b22 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -416,7 +416,7 @@ static const struct file_operations proc_pid_cmdline_ops = {
#ifdef CONFIG_KALLSYMS
/*
* Provides a wchan file via kallsyms in a proper one-value-per-file format.
- * Returns the resolved symbol. If that fails, simply return the address.
+ * Returns the resolved symbol to user space.
*/
static int proc_pid_wchan(struct seq_file *m, struct pid_namespace *ns,
struct pid *pid, struct task_struct *task)
diff --git a/fs/proc/internal.h b/fs/proc/internal.h
index 77a517f91821..96122e91c645 100644
--- a/fs/proc/internal.h
+++ b/fs/proc/internal.h
@@ -157,6 +157,7 @@ unsigned name_to_int(const struct qstr *qstr);
/* Worst case buffer size needed for holding an integer. */
#define PROC_NUMBUF 13
+#ifdef CONFIG_PAGE_MAPCOUNT
/**
* folio_precise_page_mapcount() - Number of mappings of this folio page.
* @folio: The folio.
@@ -187,7 +188,49 @@ static inline int folio_precise_page_mapcount(struct folio *folio,
return mapcount;
}
+#else /* !CONFIG_PAGE_MAPCOUNT */
+static inline int folio_precise_page_mapcount(struct folio *folio,
+ struct page *page)
+{
+ BUILD_BUG();
+}
+#endif /* CONFIG_PAGE_MAPCOUNT */
+/**
+ * folio_average_page_mapcount() - Average number of mappings per page in this
+ * folio
+ * @folio: The folio.
+ *
+ * The average number of user page table entries that reference each page in
+ * this folio as tracked via the RMAP: either referenced directly (PTE) or
+ * as part of a larger area that covers this page (e.g., PMD).
+ *
+ * The average is calculated by rounding to the nearest integer; however,
+ * to avoid duplicated code in current callers, the average is at least
+ * 1 if any page of the folio is mapped.
+ *
+ * Returns: The average number of mappings per page in this folio.
+ */
+static inline int folio_average_page_mapcount(struct folio *folio)
+{
+ int mapcount, entire_mapcount, avg;
+
+ if (!folio_test_large(folio))
+ return atomic_read(&folio->_mapcount) + 1;
+
+ mapcount = folio_large_mapcount(folio);
+ if (unlikely(mapcount <= 0))
+ return 0;
+ entire_mapcount = folio_entire_mapcount(folio);
+ if (mapcount <= entire_mapcount)
+ return entire_mapcount;
+ mapcount -= entire_mapcount;
+
+ /* Round to closest integer ... */
+ avg = ((unsigned int)mapcount + folio_large_nr_pages(folio) / 2) >> folio_large_order(folio);
+ /* ... but return at least 1. */
+ return max_t(int, avg + entire_mapcount, 1);
+}
/*
* array.c
*/
diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
index 8ba9b1472390..83be312159c9 100644
--- a/fs/proc/meminfo.c
+++ b/fs/proc/meminfo.c
@@ -162,6 +162,8 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
show_val_kb(m, "Unaccepted: ",
global_zone_page_state(NR_UNACCEPTED));
#endif
+ show_val_kb(m, "Balloon: ",
+ global_node_page_state(NR_BALLOON_PAGES));
hugetlb_report_meminfo(m);
diff --git a/fs/proc/page.c b/fs/proc/page.c
index a55f5acefa97..23fc771100ae 100644
--- a/fs/proc/page.c
+++ b/fs/proc/page.c
@@ -67,9 +67,14 @@ static ssize_t kpagecount_read(struct file *file, char __user *buf,
* memmaps that were actually initialized.
*/
page = pfn_to_online_page(pfn);
- if (page)
- mapcount = folio_precise_page_mapcount(page_folio(page),
- page);
+ if (page) {
+ struct folio *folio = page_folio(page);
+
+ if (IS_ENABLED(CONFIG_PAGE_MAPCOUNT))
+ mapcount = folio_precise_page_mapcount(folio, page);
+ else
+ mapcount = folio_average_page_mapcount(folio);
+ }
if (put_user(mapcount, out)) {
ret = -EFAULT;
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index f02cd362309a..994cde10e3f4 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -707,6 +707,8 @@ static void smaps_account(struct mem_size_stats *mss, struct page *page,
struct folio *folio = page_folio(page);
int i, nr = compound ? compound_nr(page) : 1;
unsigned long size = nr * PAGE_SIZE;
+ bool exclusive;
+ int mapcount;
/*
* First accumulate quantities that depend only on |size| and the type
@@ -747,18 +749,29 @@ static void smaps_account(struct mem_size_stats *mss, struct page *page,
dirty, locked, present);
return;
}
+
+ if (IS_ENABLED(CONFIG_NO_PAGE_MAPCOUNT)) {
+ mapcount = folio_average_page_mapcount(folio);
+ exclusive = !folio_maybe_mapped_shared(folio);
+ }
+
/*
* We obtain a snapshot of the mapcount. Without holding the folio lock
* this snapshot can be slightly wrong as we cannot always read the
* mapcount atomically.
*/
for (i = 0; i < nr; i++, page++) {
- int mapcount = folio_precise_page_mapcount(folio, page);
unsigned long pss = PAGE_SIZE << PSS_SHIFT;
+
+ if (IS_ENABLED(CONFIG_PAGE_MAPCOUNT)) {
+ mapcount = folio_precise_page_mapcount(folio, page);
+ exclusive = mapcount < 2;
+ }
+
if (mapcount >= 2)
pss /= mapcount;
smaps_page_accumulate(mss, folio, PAGE_SIZE, pss,
- dirty, locked, mapcount < 2);
+ dirty, locked, exclusive);
}
}
@@ -1023,7 +1036,7 @@ static int smaps_hugetlb_range(pte_t *pte, unsigned long hmask,
if (folio) {
/* We treat non-present entries as "maybe shared". */
- if (!present || folio_likely_mapped_shared(folio) ||
+ if (!present || folio_maybe_mapped_shared(folio) ||
hugetlb_pmd_shared(pte))
mss->shared_hugetlb += huge_page_size(hstate_vma(vma));
else
@@ -1632,6 +1645,7 @@ struct pagemapread {
#define PM_SOFT_DIRTY BIT_ULL(55)
#define PM_MMAP_EXCLUSIVE BIT_ULL(56)
#define PM_UFFD_WP BIT_ULL(57)
+#define PM_GUARD_REGION BIT_ULL(58)
#define PM_FILE BIT_ULL(61)
#define PM_SWAP BIT_ULL(62)
#define PM_PRESENT BIT_ULL(63)
@@ -1651,6 +1665,13 @@ static int add_to_pagemap(pagemap_entry_t *pme, struct pagemapread *pm)
return 0;
}
+static bool __folio_page_mapped_exclusively(struct folio *folio, struct page *page)
+{
+ if (IS_ENABLED(CONFIG_PAGE_MAPCOUNT))
+ return folio_precise_page_mapcount(folio, page) == 1;
+ return !folio_maybe_mapped_shared(folio);
+}
+
static int pagemap_pte_hole(unsigned long start, unsigned long end,
__always_unused int depth, struct mm_walk *walk)
{
@@ -1732,6 +1753,8 @@ static pagemap_entry_t pte_to_pagemap_entry(struct pagemapread *pm,
page = pfn_swap_entry_to_page(entry);
if (pte_marker_entry_uffd_wp(entry))
flags |= PM_UFFD_WP;
+ if (is_guard_swp_entry(entry))
+ flags |= PM_GUARD_REGION;
}
if (page) {
@@ -1739,7 +1762,7 @@ static pagemap_entry_t pte_to_pagemap_entry(struct pagemapread *pm,
if (!folio_test_anon(folio))
flags |= PM_FILE;
if ((flags & PM_PRESENT) &&
- folio_precise_page_mapcount(folio, page) == 1)
+ __folio_page_mapped_exclusively(folio, page))
flags |= PM_MMAP_EXCLUSIVE;
}
if (vma->vm_flags & VM_SOFTDIRTY)
@@ -1814,7 +1837,7 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end,
pagemap_entry_t pme;
if (folio && (flags & PM_PRESENT) &&
- folio_precise_page_mapcount(folio, page + idx) == 1)
+ __folio_page_mapped_exclusively(folio, page))
cur_flags |= PM_MMAP_EXCLUSIVE;
pme = make_pme(frame, cur_flags);
@@ -1879,7 +1902,7 @@ static int pagemap_hugetlb_range(pte_t *ptep, unsigned long hmask,
if (!folio_test_anon(folio))
flags |= PM_FILE;
- if (!folio_likely_mapped_shared(folio) &&
+ if (!folio_maybe_mapped_shared(folio) &&
!hugetlb_pmd_shared(ptep))
flags |= PM_MMAP_EXCLUSIVE;
@@ -1931,7 +1954,8 @@ static const struct mm_walk_ops pagemap_ops = {
* Bit 55 pte is soft-dirty (see Documentation/admin-guide/mm/soft-dirty.rst)
* Bit 56 page exclusively mapped
* Bit 57 pte is uffd-wp write-protected
- * Bits 58-60 zero
+ * Bit 58 pte is a guard region
+ * Bits 59-60 zero
* Bit 61 page is file-page or shared-anon
* Bit 62 page swapped
* Bit 63 page present
@@ -2455,22 +2479,19 @@ static int pagemap_scan_pmd_entry(pmd_t *pmd, unsigned long start,
spinlock_t *ptl;
int ret;
- arch_enter_lazy_mmu_mode();
-
ret = pagemap_scan_thp_entry(pmd, start, end, walk);
- if (ret != -ENOENT) {
- arch_leave_lazy_mmu_mode();
+ if (ret != -ENOENT)
return ret;
- }
ret = 0;
start_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, start, &ptl);
if (!pte) {
- arch_leave_lazy_mmu_mode();
walk->action = ACTION_AGAIN;
return 0;
}
+ arch_enter_lazy_mmu_mode();
+
if ((p->arg.flags & PM_SCAN_WP_MATCHING) && !p->vec_out) {
/* Fast path for performing exclusive WP */
for (addr = start; addr != end; pte++, addr += PAGE_SIZE) {
@@ -2539,8 +2560,8 @@ flush_and_return:
if (flush_end)
flush_tlb_range(vma, start, addr);
- pte_unmap_unlock(start_pte, ptl);
arch_leave_lazy_mmu_mode();
+ pte_unmap_unlock(start_pte, ptl);
cond_resched();
return ret;
@@ -2855,7 +2876,12 @@ static void gather_stats(struct page *page, struct numa_maps *md, int pte_dirty,
unsigned long nr_pages)
{
struct folio *folio = page_folio(page);
- int count = folio_precise_page_mapcount(folio, page);
+ int count;
+
+ if (IS_ENABLED(CONFIG_PAGE_MAPCOUNT))
+ count = folio_precise_page_mapcount(folio, page);
+ else
+ count = folio_average_page_mapcount(folio);
md->pages += nr_pages;
if (pte_dirty || folio_test_dirty(folio))
diff --git a/fs/smb/server/auth.c b/fs/smb/server/auth.c
index 00b31cf86462..83caa3849749 100644
--- a/fs/smb/server/auth.c
+++ b/fs/smb/server/auth.c
@@ -1016,9 +1016,9 @@ static int ksmbd_get_encryption_key(struct ksmbd_work *work, __u64 ses_id,
ses_enc_key = enc ? sess->smb3encryptionkey :
sess->smb3decryptionkey;
- if (enc)
- ksmbd_user_session_get(sess);
memcpy(key, ses_enc_key, SMB3_ENC_DEC_KEY_SIZE);
+ if (!enc)
+ ksmbd_user_session_put(sess);
return 0;
}
diff --git a/fs/smb/server/connection.h b/fs/smb/server/connection.h
index 91c2318639e7..14620e147dda 100644
--- a/fs/smb/server/connection.h
+++ b/fs/smb/server/connection.h
@@ -27,6 +27,7 @@ enum {
KSMBD_SESS_EXITING,
KSMBD_SESS_NEED_RECONNECT,
KSMBD_SESS_NEED_NEGOTIATE,
+ KSMBD_SESS_NEED_SETUP,
KSMBD_SESS_RELEASING
};
@@ -187,6 +188,11 @@ static inline bool ksmbd_conn_need_negotiate(struct ksmbd_conn *conn)
return READ_ONCE(conn->status) == KSMBD_SESS_NEED_NEGOTIATE;
}
+static inline bool ksmbd_conn_need_setup(struct ksmbd_conn *conn)
+{
+ return READ_ONCE(conn->status) == KSMBD_SESS_NEED_SETUP;
+}
+
static inline bool ksmbd_conn_need_reconnect(struct ksmbd_conn *conn)
{
return READ_ONCE(conn->status) == KSMBD_SESS_NEED_RECONNECT;
@@ -217,6 +223,11 @@ static inline void ksmbd_conn_set_need_negotiate(struct ksmbd_conn *conn)
WRITE_ONCE(conn->status, KSMBD_SESS_NEED_NEGOTIATE);
}
+static inline void ksmbd_conn_set_need_setup(struct ksmbd_conn *conn)
+{
+ WRITE_ONCE(conn->status, KSMBD_SESS_NEED_SETUP);
+}
+
static inline void ksmbd_conn_set_need_reconnect(struct ksmbd_conn *conn)
{
WRITE_ONCE(conn->status, KSMBD_SESS_NEED_RECONNECT);
diff --git a/fs/smb/server/mgmt/user_session.c b/fs/smb/server/mgmt/user_session.c
index 53d308f331af..3f45f28f6f0f 100644
--- a/fs/smb/server/mgmt/user_session.c
+++ b/fs/smb/server/mgmt/user_session.c
@@ -181,7 +181,7 @@ static void ksmbd_expire_session(struct ksmbd_conn *conn)
down_write(&sessions_table_lock);
down_write(&conn->session_lock);
xa_for_each(&conn->sessions, id, sess) {
- if (atomic_read(&sess->refcnt) == 0 &&
+ if (atomic_read(&sess->refcnt) <= 1 &&
(sess->state != SMB2_SESSION_VALID ||
time_after(jiffies,
sess->last_active + SMB2_SESSION_TIMEOUT))) {
@@ -233,7 +233,8 @@ void ksmbd_sessions_deregister(struct ksmbd_conn *conn)
down_write(&conn->session_lock);
xa_erase(&conn->sessions, sess->id);
up_write(&conn->session_lock);
- ksmbd_session_destroy(sess);
+ if (atomic_dec_and_test(&sess->refcnt))
+ ksmbd_session_destroy(sess);
}
}
}
@@ -252,7 +253,8 @@ void ksmbd_sessions_deregister(struct ksmbd_conn *conn)
if (xa_empty(&sess->ksmbd_chann_list)) {
xa_erase(&conn->sessions, sess->id);
hash_del(&sess->hlist);
- ksmbd_session_destroy(sess);
+ if (atomic_dec_and_test(&sess->refcnt))
+ ksmbd_session_destroy(sess);
}
}
up_write(&conn->session_lock);
@@ -328,8 +330,8 @@ void ksmbd_user_session_put(struct ksmbd_session *sess)
if (atomic_read(&sess->refcnt) <= 0)
WARN_ON(1);
- else
- atomic_dec(&sess->refcnt);
+ else if (atomic_dec_and_test(&sess->refcnt))
+ ksmbd_session_destroy(sess);
}
struct preauth_session *ksmbd_preauth_session_alloc(struct ksmbd_conn *conn,
@@ -372,13 +374,13 @@ void destroy_previous_session(struct ksmbd_conn *conn,
ksmbd_all_conn_set_status(id, KSMBD_SESS_NEED_RECONNECT);
err = ksmbd_conn_wait_idle_sess_id(conn, id);
if (err) {
- ksmbd_all_conn_set_status(id, KSMBD_SESS_NEED_NEGOTIATE);
+ ksmbd_all_conn_set_status(id, KSMBD_SESS_NEED_SETUP);
goto out;
}
ksmbd_destroy_file_table(&prev_sess->file_table);
prev_sess->state = SMB2_SESSION_EXPIRED;
- ksmbd_all_conn_set_status(id, KSMBD_SESS_NEED_NEGOTIATE);
+ ksmbd_all_conn_set_status(id, KSMBD_SESS_NEED_SETUP);
ksmbd_launch_ksmbd_durable_scavenger();
out:
up_write(&conn->session_lock);
@@ -436,7 +438,7 @@ static struct ksmbd_session *__session_create(int protocol)
xa_init(&sess->rpc_handle_list);
sess->sequence_number = 1;
rwlock_init(&sess->tree_conns_lock);
- atomic_set(&sess->refcnt, 1);
+ atomic_set(&sess->refcnt, 2);
ret = __init_smb2_session(sess);
if (ret)
diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c
index 4ddf4300371b..d24d95d15d87 100644
--- a/fs/smb/server/smb2pdu.c
+++ b/fs/smb/server/smb2pdu.c
@@ -1249,7 +1249,7 @@ int smb2_handle_negotiate(struct ksmbd_work *work)
}
conn->srv_sec_mode = le16_to_cpu(rsp->SecurityMode);
- ksmbd_conn_set_need_negotiate(conn);
+ ksmbd_conn_set_need_setup(conn);
err_out:
ksmbd_conn_unlock(conn);
@@ -1271,6 +1271,9 @@ static int alloc_preauth_hash(struct ksmbd_session *sess,
if (sess->Preauth_HashValue)
return 0;
+ if (!conn->preauth_info)
+ return -ENOMEM;
+
sess->Preauth_HashValue = kmemdup(conn->preauth_info->Preauth_HashValue,
PREAUTH_HASHVALUE_SIZE, KSMBD_DEFAULT_GFP);
if (!sess->Preauth_HashValue)
@@ -1674,6 +1677,11 @@ int smb2_sess_setup(struct ksmbd_work *work)
ksmbd_debug(SMB, "Received smb2 session setup request\n");
+ if (!ksmbd_conn_need_setup(conn) && !ksmbd_conn_good(conn)) {
+ work->send_no_response = 1;
+ return rc;
+ }
+
WORK_BUFFERS(work, req, rsp);
rsp->StructureSize = cpu_to_le16(9);
@@ -1909,7 +1917,7 @@ out_err:
if (try_delay) {
ksmbd_conn_set_need_reconnect(conn);
ssleep(5);
- ksmbd_conn_set_need_negotiate(conn);
+ ksmbd_conn_set_need_setup(conn);
}
}
smb2_set_err_rsp(work);
@@ -2235,14 +2243,15 @@ int smb2_session_logoff(struct ksmbd_work *work)
return -ENOENT;
}
- ksmbd_destroy_file_table(&sess->file_table);
down_write(&conn->session_lock);
sess->state = SMB2_SESSION_EXPIRED;
up_write(&conn->session_lock);
- ksmbd_free_user(sess->user);
- sess->user = NULL;
- ksmbd_all_conn_set_status(sess_id, KSMBD_SESS_NEED_NEGOTIATE);
+ if (sess->user) {
+ ksmbd_free_user(sess->user);
+ sess->user = NULL;
+ }
+ ksmbd_all_conn_set_status(sess_id, KSMBD_SESS_NEED_SETUP);
rsp->StructureSize = cpu_to_le16(4);
err = ksmbd_iov_pin_rsp(work, rsp, sizeof(struct smb2_logoff_rsp));
diff --git a/fs/smb/server/smbacl.c b/fs/smb/server/smbacl.c
index 49b128698670..5aa7a66334d9 100644
--- a/fs/smb/server/smbacl.c
+++ b/fs/smb/server/smbacl.c
@@ -270,6 +270,11 @@ static int sid_to_id(struct mnt_idmap *idmap,
return -EIO;
}
+ if (psid->num_subauth == 0) {
+ pr_err("%s: zero subauthorities!\n", __func__);
+ return -EIO;
+ }
+
if (sidtype == SIDOWNER) {
kuid_t uid;
uid_t id;
@@ -1026,7 +1031,9 @@ int smb_inherit_dacl(struct ksmbd_conn *conn,
struct dentry *parent = path->dentry->d_parent;
struct mnt_idmap *idmap = mnt_idmap(path->mnt);
int inherited_flags = 0, flags = 0, i, nt_size = 0, pdacl_size;
- int rc = 0, dacloffset, pntsd_type, pntsd_size, acl_len, aces_size;
+ int rc = 0, pntsd_type, pntsd_size, acl_len, aces_size;
+ unsigned int dacloffset;
+ size_t dacl_struct_end;
u16 num_aces, ace_cnt = 0;
char *aces_base;
bool is_dir = S_ISDIR(d_inode(path->dentry)->i_mode);
@@ -1035,8 +1042,11 @@ int smb_inherit_dacl(struct ksmbd_conn *conn,
parent, &parent_pntsd);
if (pntsd_size <= 0)
return -ENOENT;
+
dacloffset = le32_to_cpu(parent_pntsd->dacloffset);
- if (!dacloffset || (dacloffset + sizeof(struct smb_acl) > pntsd_size)) {
+ if (!dacloffset ||
+ check_add_overflow(dacloffset, sizeof(struct smb_acl), &dacl_struct_end) ||
+ dacl_struct_end > (size_t)pntsd_size) {
rc = -EINVAL;
goto free_parent_pntsd;
}
@@ -1240,7 +1250,9 @@ int smb_check_perm_dacl(struct ksmbd_conn *conn, const struct path *path,
struct smb_ntsd *pntsd = NULL;
struct smb_acl *pdacl;
struct posix_acl *posix_acls;
- int rc = 0, pntsd_size, acl_size, aces_size, pdacl_size, dacl_offset;
+ int rc = 0, pntsd_size, acl_size, aces_size, pdacl_size;
+ unsigned int dacl_offset;
+ size_t dacl_struct_end;
struct smb_sid sid;
int granted = le32_to_cpu(*pdaccess & ~FILE_MAXIMAL_ACCESS_LE);
struct smb_ace *ace;
@@ -1259,7 +1271,8 @@ int smb_check_perm_dacl(struct ksmbd_conn *conn, const struct path *path,
dacl_offset = le32_to_cpu(pntsd->dacloffset);
if (!dacl_offset ||
- (dacl_offset + sizeof(struct smb_acl) > pntsd_size))
+ check_add_overflow(dacl_offset, sizeof(struct smb_acl), &dacl_struct_end) ||
+ dacl_struct_end > (size_t)pntsd_size)
goto err_out;
pdacl = (struct smb_acl *)((char *)pntsd + le32_to_cpu(pntsd->dacloffset));
diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
index 4df2afa551dc..94e12efd92f2 100644
--- a/fs/sysfs/dir.c
+++ b/fs/sysfs/dir.c
@@ -123,7 +123,7 @@ int sysfs_move_dir_ns(struct kobject *kobj, struct kobject *new_parent_kobj,
new_parent = new_parent_kobj && new_parent_kobj->sd ?
new_parent_kobj->sd : sysfs_root_kn;
- return kernfs_rename_ns(kn, new_parent, kn->name, new_ns);
+ return kernfs_rename_ns(kn, new_parent, NULL, new_ns);
}
/**
diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
index 6931308876c4..c3d3b079aedd 100644
--- a/fs/sysfs/file.c
+++ b/fs/sysfs/file.c
@@ -19,13 +19,19 @@
#include "sysfs.h"
+static struct kobject *sysfs_file_kobj(struct kernfs_node *kn)
+{
+ guard(rcu)();
+ return rcu_dereference(kn->__parent)->priv;
+}
+
/*
* Determine ktype->sysfs_ops for the given kernfs_node. This function
* must be called while holding an active reference.
*/
static const struct sysfs_ops *sysfs_file_ops(struct kernfs_node *kn)
{
- struct kobject *kobj = kn->parent->priv;
+ struct kobject *kobj = sysfs_file_kobj(kn);
if (kn->flags & KERNFS_LOCKDEP)
lockdep_assert_held(kn);
@@ -40,7 +46,7 @@ static const struct sysfs_ops *sysfs_file_ops(struct kernfs_node *kn)
static int sysfs_kf_seq_show(struct seq_file *sf, void *v)
{
struct kernfs_open_file *of = sf->private;
- struct kobject *kobj = of->kn->parent->priv;
+ struct kobject *kobj = sysfs_file_kobj(of->kn);
const struct sysfs_ops *ops = sysfs_file_ops(of->kn);
ssize_t count;
char *buf;
@@ -78,7 +84,7 @@ static ssize_t sysfs_kf_bin_read(struct kernfs_open_file *of, char *buf,
size_t count, loff_t pos)
{
struct bin_attribute *battr = of->kn->priv;
- struct kobject *kobj = of->kn->parent->priv;
+ struct kobject *kobj = sysfs_file_kobj(of->kn);
loff_t size = file_inode(of->file)->i_size;
if (!count)
@@ -105,7 +111,7 @@ static ssize_t sysfs_kf_read(struct kernfs_open_file *of, char *buf,
size_t count, loff_t pos)
{
const struct sysfs_ops *ops = sysfs_file_ops(of->kn);
- struct kobject *kobj = of->kn->parent->priv;
+ struct kobject *kobj = sysfs_file_kobj(of->kn);
ssize_t len;
/*
@@ -131,7 +137,7 @@ static ssize_t sysfs_kf_write(struct kernfs_open_file *of, char *buf,
size_t count, loff_t pos)
{
const struct sysfs_ops *ops = sysfs_file_ops(of->kn);
- struct kobject *kobj = of->kn->parent->priv;
+ struct kobject *kobj = sysfs_file_kobj(of->kn);
if (!count)
return 0;
@@ -144,7 +150,7 @@ static ssize_t sysfs_kf_bin_write(struct kernfs_open_file *of, char *buf,
size_t count, loff_t pos)
{
struct bin_attribute *battr = of->kn->priv;
- struct kobject *kobj = of->kn->parent->priv;
+ struct kobject *kobj = sysfs_file_kobj(of->kn);
loff_t size = file_inode(of->file)->i_size;
if (size) {
@@ -168,7 +174,7 @@ static int sysfs_kf_bin_mmap(struct kernfs_open_file *of,
struct vm_area_struct *vma)
{
struct bin_attribute *battr = of->kn->priv;
- struct kobject *kobj = of->kn->parent->priv;
+ struct kobject *kobj = sysfs_file_kobj(of->kn);
return battr->mmap(of->file, kobj, battr, vma);
}
@@ -177,7 +183,7 @@ static loff_t sysfs_kf_bin_llseek(struct kernfs_open_file *of, loff_t offset,
int whence)
{
struct bin_attribute *battr = of->kn->priv;
- struct kobject *kobj = of->kn->parent->priv;
+ struct kobject *kobj = sysfs_file_kobj(of->kn);
if (battr->llseek)
return battr->llseek(of->file, kobj, battr, offset, whence);
@@ -494,7 +500,7 @@ EXPORT_SYMBOL_GPL(sysfs_break_active_protection);
*/
void sysfs_unbreak_active_protection(struct kernfs_node *kn)
{
- struct kobject *kobj = kn->parent->priv;
+ struct kobject *kobj = sysfs_file_kobj(kn);
kernfs_unbreak_active_protection(kn);
kernfs_put(kn);
diff --git a/fs/udf/file.c b/fs/udf/file.c
index 412fe7c4d348..0d76c4f37b3e 100644
--- a/fs/udf/file.c
+++ b/fs/udf/file.c
@@ -69,7 +69,7 @@ static vm_fault_t udf_page_mkwrite(struct vm_fault *vmf)
goto out_unlock;
}
- block_commit_write(&folio->page, 0, end);
+ block_commit_write(folio, 0, end);
out_dirty:
folio_mark_dirty(folio);
folio_wait_stable(folio);
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
index 97c4d71115d8..d80f94346199 100644
--- a/fs/userfaultfd.c
+++ b/fs/userfaultfd.c
@@ -396,32 +396,6 @@ vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason)
goto out;
/*
- * If it's already released don't get it. This avoids to loop
- * in __get_user_pages if userfaultfd_release waits on the
- * caller of handle_userfault to release the mmap_lock.
- */
- if (unlikely(READ_ONCE(ctx->released))) {
- /*
- * Don't return VM_FAULT_SIGBUS in this case, so a non
- * cooperative manager can close the uffd after the
- * last UFFDIO_COPY, without risking to trigger an
- * involuntary SIGBUS if the process was starting the
- * userfaultfd while the userfaultfd was still armed
- * (but after the last UFFDIO_COPY). If the uffd
- * wasn't already closed when the userfault reached
- * this point, that would normally be solved by
- * userfaultfd_must_wait returning 'false'.
- *
- * If we were to return VM_FAULT_SIGBUS here, the non
- * cooperative manager would be instead forced to
- * always call UFFDIO_UNREGISTER before it can safely
- * close the uffd.
- */
- ret = VM_FAULT_NOPAGE;
- goto out;
- }
-
- /*
* Check that we can return VM_FAULT_RETRY.
*
* NOTE: it should become possible to return VM_FAULT_RETRY
@@ -457,6 +431,31 @@ vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason)
if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT)
goto out;
+ if (unlikely(READ_ONCE(ctx->released))) {
+ /*
+ * If a concurrent release is detected, do not return
+ * VM_FAULT_SIGBUS or VM_FAULT_NOPAGE, but instead always
+ * return VM_FAULT_RETRY with lock released proactively.
+ *
+ * If we were to return VM_FAULT_SIGBUS here, the non
+ * cooperative manager would be instead forced to
+ * always call UFFDIO_UNREGISTER before it can safely
+ * close the uffd, to avoid involuntary SIGBUS triggered.
+ *
+ * If we were to return VM_FAULT_NOPAGE, it would work for
+ * the fault path, in which the lock will be released
+ * later. However for GUP, faultin_page() does nothing
+ * special on NOPAGE, so GUP would spin retrying without
+ * releasing the mmap read lock, causing possible livelock.
+ *
+ * Here only VM_FAULT_RETRY would make sure the mmap lock
+ * be released immediately, so that the thread concurrently
+ * releasing the userfault would always make progress.
+ */
+ release_fault_lock(vmf);
+ goto out;
+ }
+
/* take the reference before dropping the mmap_lock */
userfaultfd_ctx_get(ctx);
diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c
index 2f53ca7e12d4..726e29b837e6 100644
--- a/fs/xfs/xfs_icache.c
+++ b/fs/xfs/xfs_icache.c
@@ -230,7 +230,7 @@ xfs_blockgc_queue(
rcu_read_lock();
if (radix_tree_tagged(&pag->pag_ici_root, XFS_ICI_BLOCKGC_TAG))
queue_delayed_work(mp->m_blockgc_wq, &pag->pag_blockgc_work,
- msecs_to_jiffies(xfs_blockgc_secs * 1000));
+ secs_to_jiffies(xfs_blockgc_secs));
rcu_read_unlock();
}
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index ce6b8ffbaa2c..ee3e0f284287 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -2734,21 +2734,16 @@ xfs_mmaplock_two_inodes_and_break_dax_layout(
struct xfs_inode *ip2)
{
int error;
- bool retry;
- struct page *page;
if (ip1->i_ino > ip2->i_ino)
swap(ip1, ip2);
again:
- retry = false;
/* Lock the first inode */
xfs_ilock(ip1, XFS_MMAPLOCK_EXCL);
- error = xfs_break_dax_layouts(VFS_I(ip1), &retry);
- if (error || retry) {
+ error = xfs_break_dax_layouts(VFS_I(ip1));
+ if (error) {
xfs_iunlock(ip1, XFS_MMAPLOCK_EXCL);
- if (error == 0 && retry)
- goto again;
return error;
}
@@ -2762,8 +2757,8 @@ again:
* need to unlock & lock the XFS_MMAPLOCK_EXCL which is not suitable
* for this nested lock case.
*/
- page = dax_layout_busy_page(VFS_I(ip2)->i_mapping);
- if (page && page_ref_count(page) != 1) {
+ error = dax_break_layout(VFS_I(ip2), 0, -1, NULL);
+ if (error) {
xfs_iunlock(ip2, XFS_MMAPLOCK_EXCL);
xfs_iunlock(ip1, XFS_MMAPLOCK_EXCL);
goto again;
@@ -3007,21 +3002,11 @@ xfs_wait_dax_page(
int
xfs_break_dax_layouts(
- struct inode *inode,
- bool *retry)
+ struct inode *inode)
{
- struct page *page;
-
xfs_assert_ilocked(XFS_I(inode), XFS_MMAPLOCK_EXCL);
- page = dax_layout_busy_page(inode->i_mapping);
- if (!page)
- return 0;
-
- *retry = true;
- return ___wait_var_event(&page->_refcount,
- atomic_read(&page->_refcount) == 1, TASK_INTERRUPTIBLE,
- 0, 0, xfs_wait_dax_page(inode));
+ return dax_break_layout_inode(inode, xfs_wait_dax_page);
}
int
@@ -3039,8 +3024,8 @@ xfs_break_layouts(
retry = false;
switch (reason) {
case BREAK_UNMAP:
- error = xfs_break_dax_layouts(inode, &retry);
- if (error || retry)
+ error = xfs_break_dax_layouts(inode);
+ if (error)
break;
fallthrough;
case BREAK_WRITE:
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h
index 4bb7a99e0dc4..eae0159983ca 100644
--- a/fs/xfs/xfs_inode.h
+++ b/fs/xfs/xfs_inode.h
@@ -603,7 +603,7 @@ xfs_itruncate_extents(
return xfs_itruncate_extents_flags(tpp, ip, whichfork, new_size, 0);
}
-int xfs_break_dax_layouts(struct inode *inode, bool *retry);
+int xfs_break_dax_layouts(struct inode *inode);
int xfs_break_layouts(struct inode *inode, uint *iolock,
enum layout_break_reason reason);
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index 53944cc7af24..b2dd0c0bf509 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -768,6 +768,17 @@ xfs_fs_drop_inode(
return generic_drop_inode(inode);
}
+STATIC void
+xfs_fs_evict_inode(
+ struct inode *inode)
+{
+ if (IS_DAX(inode))
+ dax_break_layout_final(inode);
+
+ truncate_inode_pages_final(&inode->i_data);
+ clear_inode(inode);
+}
+
static void
xfs_mount_free(
struct xfs_mount *mp)
@@ -1275,6 +1286,7 @@ static const struct super_operations xfs_super_operations = {
.destroy_inode = xfs_fs_destroy_inode,
.dirty_inode = xfs_fs_dirty_inode,
.drop_inode = xfs_fs_drop_inode,
+ .evict_inode = xfs_fs_evict_inode,
.put_super = xfs_fs_put_super,
.sync_fs = xfs_fs_sync_fs,
.freeze_fs = xfs_fs_freeze,
diff --git a/fs/xfs/xfs_sysfs.c b/fs/xfs/xfs_sysfs.c
index b0857e3c1270..b7e82d85f043 100644
--- a/fs/xfs/xfs_sysfs.c
+++ b/fs/xfs/xfs_sysfs.c
@@ -569,8 +569,8 @@ retry_timeout_seconds_store(
if (val == -1)
cfg->retry_timeout = XFS_ERR_RETRY_FOREVER;
else {
- cfg->retry_timeout = msecs_to_jiffies(val * MSEC_PER_SEC);
- ASSERT(msecs_to_jiffies(val * MSEC_PER_SEC) < LONG_MAX);
+ cfg->retry_timeout = secs_to_jiffies(val);
+ ASSERT(secs_to_jiffies(val) < LONG_MAX);
}
return count;
}
@@ -687,8 +687,8 @@ xfs_error_sysfs_init_class(
if (init[i].retry_timeout == XFS_ERR_RETRY_FOREVER)
cfg->retry_timeout = XFS_ERR_RETRY_FOREVER;
else
- cfg->retry_timeout = msecs_to_jiffies(
- init[i].retry_timeout * MSEC_PER_SEC);
+ cfg->retry_timeout =
+ secs_to_jiffies(init[i].retry_timeout);
}
return 0;
diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h
index 3c61c29ff6ab..11abad6c87e1 100644
--- a/include/asm-generic/io.h
+++ b/include/asm-generic/io.h
@@ -1111,7 +1111,7 @@ void __iomem *generic_ioremap_prot(phys_addr_t phys_addr, size_t size,
pgprot_t prot);
void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size,
- unsigned long prot);
+ pgprot_t prot);
void iounmap(volatile void __iomem *addr);
void generic_iounmap(volatile void __iomem *addr);
@@ -1120,7 +1120,7 @@ void generic_iounmap(volatile void __iomem *addr);
static inline void __iomem *ioremap(phys_addr_t addr, size_t size)
{
/* _PAGE_IOREMAP needs to be supplied by the architecture */
- return ioremap_prot(addr, size, _PAGE_IOREMAP);
+ return ioremap_prot(addr, size, __pgprot(_PAGE_IOREMAP));
}
#endif
#endif /* !CONFIG_MMU || CONFIG_GENERIC_IOREMAP */
diff --git a/include/asm-generic/memory_model.h b/include/asm-generic/memory_model.h
index 6d1fb6162ac1..a3b5029aebbd 100644
--- a/include/asm-generic/memory_model.h
+++ b/include/asm-generic/memory_model.h
@@ -19,11 +19,12 @@
#define __page_to_pfn(page) ((unsigned long)((page) - mem_map) + \
ARCH_PFN_OFFSET)
+/* avoid <linux/mm.h> include hell */
+extern unsigned long max_mapnr;
+
#ifndef pfn_valid
static inline int pfn_valid(unsigned long pfn)
{
- /* avoid <linux/mm.h> include hell */
- extern unsigned long max_mapnr;
unsigned long pfn_offset = ARCH_PFN_OFFSET;
return pfn >= pfn_offset && (pfn - pfn_offset) < max_mapnr;
diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h
index 94cbd50cc870..02aeca21479a 100644
--- a/include/asm-generic/percpu.h
+++ b/include/asm-generic/percpu.h
@@ -6,6 +6,19 @@
#include <linux/threads.h>
#include <linux/percpu-defs.h>
+/*
+ * __percpu_qual is the qualifier for the percpu named address space.
+ *
+ * Most arches use generic named address space for percpu variables but
+ * some arches define percpu variables in different named address space
+ * (on the x86 arch, percpu variable may be declared as being relative
+ * to the %fs or %gs segments using __seg_fs or __seg_gs named address
+ * space qualifier).
+ */
+#ifndef __percpu_qual
+# define __percpu_qual
+#endif
+
#ifdef CONFIG_SMP
/*
@@ -74,7 +87,7 @@ do { \
#define raw_cpu_generic_add_return(pcp, val) \
({ \
- typeof(pcp) *__p = raw_cpu_ptr(&(pcp)); \
+ TYPEOF_UNQUAL(pcp) *__p = raw_cpu_ptr(&(pcp)); \
\
*__p += val; \
*__p; \
@@ -82,8 +95,8 @@ do { \
#define raw_cpu_generic_xchg(pcp, nval) \
({ \
- typeof(pcp) *__p = raw_cpu_ptr(&(pcp)); \
- typeof(pcp) __ret; \
+ TYPEOF_UNQUAL(pcp) *__p = raw_cpu_ptr(&(pcp)); \
+ TYPEOF_UNQUAL(pcp) __ret; \
__ret = *__p; \
*__p = nval; \
__ret; \
@@ -91,7 +104,7 @@ do { \
#define __cpu_fallback_try_cmpxchg(pcp, ovalp, nval, _cmpxchg) \
({ \
- typeof(pcp) __val, __old = *(ovalp); \
+ TYPEOF_UNQUAL(pcp) __val, __old = *(ovalp); \
__val = _cmpxchg(pcp, __old, nval); \
if (__val != __old) \
*(ovalp) = __val; \
@@ -100,8 +113,8 @@ do { \
#define raw_cpu_generic_try_cmpxchg(pcp, ovalp, nval) \
({ \
- typeof(pcp) *__p = raw_cpu_ptr(&(pcp)); \
- typeof(pcp) __val = *__p, ___old = *(ovalp); \
+ TYPEOF_UNQUAL(pcp) *__p = raw_cpu_ptr(&(pcp)); \
+ TYPEOF_UNQUAL(pcp) __val = *__p, ___old = *(ovalp); \
bool __ret; \
if (__val == ___old) { \
*__p = nval; \
@@ -115,14 +128,14 @@ do { \
#define raw_cpu_generic_cmpxchg(pcp, oval, nval) \
({ \
- typeof(pcp) __old = (oval); \
+ TYPEOF_UNQUAL(pcp) __old = (oval); \
raw_cpu_generic_try_cmpxchg(pcp, &__old, nval); \
__old; \
})
#define __this_cpu_generic_read_nopreempt(pcp) \
({ \
- typeof(pcp) ___ret; \
+ TYPEOF_UNQUAL(pcp) ___ret; \
preempt_disable_notrace(); \
___ret = READ_ONCE(*raw_cpu_ptr(&(pcp))); \
preempt_enable_notrace(); \
@@ -131,7 +144,7 @@ do { \
#define __this_cpu_generic_read_noirq(pcp) \
({ \
- typeof(pcp) ___ret; \
+ TYPEOF_UNQUAL(pcp) ___ret; \
unsigned long ___flags; \
raw_local_irq_save(___flags); \
___ret = raw_cpu_generic_read(pcp); \
@@ -141,7 +154,7 @@ do { \
#define this_cpu_generic_read(pcp) \
({ \
- typeof(pcp) __ret; \
+ TYPEOF_UNQUAL(pcp) __ret; \
if (__native_word(pcp)) \
__ret = __this_cpu_generic_read_nopreempt(pcp); \
else \
@@ -160,7 +173,7 @@ do { \
#define this_cpu_generic_add_return(pcp, val) \
({ \
- typeof(pcp) __ret; \
+ TYPEOF_UNQUAL(pcp) __ret; \
unsigned long __flags; \
raw_local_irq_save(__flags); \
__ret = raw_cpu_generic_add_return(pcp, val); \
@@ -170,7 +183,7 @@ do { \
#define this_cpu_generic_xchg(pcp, nval) \
({ \
- typeof(pcp) __ret; \
+ TYPEOF_UNQUAL(pcp) __ret; \
unsigned long __flags; \
raw_local_irq_save(__flags); \
__ret = raw_cpu_generic_xchg(pcp, nval); \
@@ -190,7 +203,7 @@ do { \
#define this_cpu_generic_cmpxchg(pcp, oval, nval) \
({ \
- typeof(pcp) __ret; \
+ TYPEOF_UNQUAL(pcp) __ret; \
unsigned long __flags; \
raw_local_irq_save(__flags); \
__ret = raw_cpu_generic_cmpxchg(pcp, oval, nval); \
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
index e402aef79c93..88a42973fa47 100644
--- a/include/asm-generic/tlb.h
+++ b/include/asm-generic/tlb.h
@@ -67,22 +67,21 @@
*
* See also MMU_GATHER_TABLE_FREE and MMU_GATHER_RCU_TABLE_FREE.
*
- * - tlb_remove_page() / __tlb_remove_page()
- * - tlb_remove_page_size() / __tlb_remove_page_size()
- * - __tlb_remove_folio_pages()
+ * - tlb_remove_page() / tlb_remove_page_size()
+ * - __tlb_remove_folio_pages() / __tlb_remove_page_size()
+ * - __tlb_remove_folio_pages_size()
*
- * __tlb_remove_page_size() is the basic primitive that queues a page for
- * freeing. __tlb_remove_page() assumes PAGE_SIZE. Both will return a
- * boolean indicating if the queue is (now) full and a call to
- * tlb_flush_mmu() is required.
+ * __tlb_remove_folio_pages_size() is the basic primitive that queues pages
+ * for freeing. It will return a boolean indicating if the queue is (now)
+ * full and a call to tlb_flush_mmu() is required.
*
* tlb_remove_page() and tlb_remove_page_size() imply the call to
* tlb_flush_mmu() when required and has no return value.
*
- * __tlb_remove_folio_pages() is similar to __tlb_remove_page(), however,
- * instead of removing a single page, remove the given number of consecutive
- * pages that are all part of the same (large) folio: just like calling
- * __tlb_remove_page() on each page individually.
+ * __tlb_remove_folio_pages() is similar to __tlb_remove_page_size(),
+ * however, instead of removing a single page, assume PAGE_SIZE and remove
+ * the given number of consecutive pages that are all part of the
+ * same (large) folio.
*
* - tlb_change_page_size()
*
@@ -228,10 +227,10 @@ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page);
*/
static inline void tlb_remove_table(struct mmu_gather *tlb, void *table)
{
- struct page *page = (struct page *)table;
+ struct ptdesc *ptdesc = (struct ptdesc *)table;
- pagetable_dtor(page_ptdesc(page));
- tlb_remove_page(tlb, page);
+ pagetable_dtor(ptdesc);
+ tlb_remove_page(tlb, ptdesc_page(ptdesc));
}
#endif /* CONFIG_MMU_GATHER_TABLE_FREE */
@@ -489,32 +488,16 @@ static inline void tlb_remove_page_size(struct mmu_gather *tlb,
tlb_flush_mmu(tlb);
}
-static __always_inline bool __tlb_remove_page(struct mmu_gather *tlb,
- struct page *page, bool delay_rmap)
-{
- return __tlb_remove_page_size(tlb, page, delay_rmap, PAGE_SIZE);
-}
-
-/* tlb_remove_page
- * Similar to __tlb_remove_page but will call tlb_flush_mmu() itself when
- * required.
- */
static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
{
return tlb_remove_page_size(tlb, page, PAGE_SIZE);
}
-static inline void tlb_remove_ptdesc(struct mmu_gather *tlb, void *pt)
+static inline void tlb_remove_ptdesc(struct mmu_gather *tlb, struct ptdesc *pt)
{
tlb_remove_table(tlb, pt);
}
-/* Like tlb_remove_ptdesc, but for page-like page directories. */
-static inline void tlb_remove_page_ptdesc(struct mmu_gather *tlb, struct ptdesc *pt)
-{
- tlb_remove_page(tlb, ptdesc_page(pt));
-}
-
static inline void tlb_change_page_size(struct mmu_gather *tlb,
unsigned int page_size)
{
diff --git a/include/cxl/event.h b/include/cxl/event.h
index 04edd44bd26f..f9ae1796da85 100644
--- a/include/cxl/event.h
+++ b/include/cxl/event.h
@@ -164,10 +164,99 @@ struct cxl_cper_work_data {
struct cxl_cper_event_rec rec;
};
+#define PROT_ERR_VALID_AGENT_TYPE BIT_ULL(0)
+#define PROT_ERR_VALID_AGENT_ADDRESS BIT_ULL(1)
+#define PROT_ERR_VALID_DEVICE_ID BIT_ULL(2)
+#define PROT_ERR_VALID_SERIAL_NUMBER BIT_ULL(3)
+#define PROT_ERR_VALID_CAPABILITY BIT_ULL(4)
+#define PROT_ERR_VALID_DVSEC BIT_ULL(5)
+#define PROT_ERR_VALID_ERROR_LOG BIT_ULL(6)
+
+/*
+ * The layout of the enumeration and the values matches CXL Agent Type
+ * field in the UEFI 2.10 Section N.2.13,
+ */
+enum {
+ RCD, /* Restricted CXL Device */
+ RCH_DP, /* Restricted CXL Host Downstream Port */
+ DEVICE, /* CXL Device */
+ LD, /* CXL Logical Device */
+ FMLD, /* CXL Fabric Manager managed Logical Device */
+ RP, /* CXL Root Port */
+ DSP, /* CXL Downstream Switch Port */
+ USP, /* CXL Upstream Switch Port */
+};
+
+#pragma pack(1)
+
+/* Compute Express Link Protocol Error Section, UEFI v2.10 sec N.2.13 */
+struct cxl_cper_sec_prot_err {
+ u64 valid_bits;
+ u8 agent_type;
+ u8 reserved[7];
+
+ /*
+ * Except for RCH Downstream Port, all the remaining CXL Agent
+ * types are uniquely identified by the PCIe compatible SBDF number.
+ */
+ union {
+ u64 rcrb_base_addr;
+ struct {
+ u8 function;
+ u8 device;
+ u8 bus;
+ u16 segment;
+ u8 reserved_1[3];
+ };
+ } agent_addr;
+
+ struct {
+ u16 vendor_id;
+ u16 device_id;
+ u16 subsystem_vendor_id;
+ u16 subsystem_id;
+ u8 class_code[2];
+ u16 slot;
+ u8 reserved_1[4];
+ } device_id;
+
+ struct {
+ u32 lower_dw;
+ u32 upper_dw;
+ } dev_serial_num;
+
+ u8 capability[60];
+ u16 dvsec_len;
+ u16 err_len;
+ u8 reserved_2[4];
+};
+
+#pragma pack()
+
+/* CXL RAS Capability Structure, CXL v3.0 sec 8.2.4.16 */
+struct cxl_ras_capability_regs {
+ u32 uncor_status;
+ u32 uncor_mask;
+ u32 uncor_severity;
+ u32 cor_status;
+ u32 cor_mask;
+ u32 cap_control;
+ u32 header_log[16];
+};
+
+struct cxl_cper_prot_err_work_data {
+ struct cxl_cper_sec_prot_err prot_err;
+ struct cxl_ras_capability_regs ras_cap;
+ int severity;
+};
+
#ifdef CONFIG_ACPI_APEI_GHES
int cxl_cper_register_work(struct work_struct *work);
int cxl_cper_unregister_work(struct work_struct *work);
int cxl_cper_kfifo_get(struct cxl_cper_work_data *wd);
+int cxl_cper_register_prot_err_work(struct work_struct *work);
+int cxl_cper_unregister_prot_err_work(struct work_struct *work);
+int cxl_cper_prot_err_kfifo_get(struct cxl_cper_prot_err_work_data *wd);
#else
static inline int cxl_cper_register_work(struct work_struct *work)
{
@@ -182,6 +271,18 @@ static inline int cxl_cper_kfifo_get(struct cxl_cper_work_data *wd)
{
return 0;
}
+static inline int cxl_cper_register_prot_err_work(struct work_struct *work)
+{
+ return 0;
+}
+static inline int cxl_cper_unregister_prot_err_work(struct work_struct *work)
+{
+ return 0;
+}
+static inline int cxl_cper_prot_err_kfifo_get(struct cxl_cper_prot_err_work_data *wd)
+{
+ return 0;
+}
#endif
#endif /* _LINUX_CXL_EVENT_H */
diff --git a/include/dt-bindings/iio/adc/adi,ad4695.h b/include/dt-bindings/iio/adc/adi,ad4695.h
index 9fbef542bf67..fea4525d2710 100644
--- a/include/dt-bindings/iio/adc/adi,ad4695.h
+++ b/include/dt-bindings/iio/adc/adi,ad4695.h
@@ -6,4 +6,11 @@
#define AD4695_COMMON_MODE_REFGND 0xFF
#define AD4695_COMMON_MODE_COM 0xFE
+#define AD4695_TRIGGER_EVENT_BUSY 0
+#define AD4695_TRIGGER_EVENT_ALERT 1
+
+#define AD4695_TRIGGER_PIN_GP0 0
+#define AD4695_TRIGGER_PIN_GP2 2
+#define AD4695_TRIGGER_PIN_GP3 3
+
#endif /* _DT_BINDINGS_ADI_AD4695_H */
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index a70e62d69dc7..3f2e93ed9730 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -1094,6 +1094,17 @@ static inline acpi_handle acpi_get_processor_handle(int cpu)
#endif /* !CONFIG_ACPI */
+#ifdef CONFIG_ACPI_HMAT
+int hmat_get_extended_linear_cache_size(struct resource *backing_res, int nid,
+ resource_size_t *size);
+#else
+static inline int hmat_get_extended_linear_cache_size(struct resource *backing_res,
+ int nid, resource_size_t *size)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+
extern void arch_post_acpi_subsys_init(void);
#ifdef CONFIG_ACPI_HOTPLUG_IOAPIC
diff --git a/include/linux/bit_spinlock.h b/include/linux/bit_spinlock.h
index bbc4730a6505..c0989b5b0407 100644
--- a/include/linux/bit_spinlock.h
+++ b/include/linux/bit_spinlock.h
@@ -13,7 +13,7 @@
* Don't use this unless you really need to: spin_lock() and spin_unlock()
* are significantly faster.
*/
-static inline void bit_spin_lock(int bitnum, unsigned long *addr)
+static __always_inline void bit_spin_lock(int bitnum, unsigned long *addr)
{
/*
* Assuming the lock is uncontended, this never enters
@@ -38,7 +38,7 @@ static inline void bit_spin_lock(int bitnum, unsigned long *addr)
/*
* Return true if it was acquired
*/
-static inline int bit_spin_trylock(int bitnum, unsigned long *addr)
+static __always_inline int bit_spin_trylock(int bitnum, unsigned long *addr)
{
preempt_disable();
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
@@ -54,7 +54,7 @@ static inline int bit_spin_trylock(int bitnum, unsigned long *addr)
/*
* bit-based spin_unlock()
*/
-static inline void bit_spin_unlock(int bitnum, unsigned long *addr)
+static __always_inline void bit_spin_unlock(int bitnum, unsigned long *addr)
{
#ifdef CONFIG_DEBUG_SPINLOCK
BUG_ON(!test_bit(bitnum, addr));
@@ -71,7 +71,7 @@ static inline void bit_spin_unlock(int bitnum, unsigned long *addr)
* non-atomic version, which can be used eg. if the bit lock itself is
* protecting the rest of the flags in the word.
*/
-static inline void __bit_spin_unlock(int bitnum, unsigned long *addr)
+static __always_inline void __bit_spin_unlock(int bitnum, unsigned long *addr)
{
#ifdef CONFIG_DEBUG_SPINLOCK
BUG_ON(!test_bit(bitnum, addr));
diff --git a/include/linux/bootmem_info.h b/include/linux/bootmem_info.h
index d8a8d245824a..4c506e76a808 100644
--- a/include/linux/bootmem_info.h
+++ b/include/linux/bootmem_info.h
@@ -18,6 +18,8 @@ enum bootmem_type {
#ifdef CONFIG_HAVE_BOOTMEM_INFO_NODE
void __init register_page_bootmem_info_node(struct pglist_data *pgdat);
+void register_page_bootmem_memmap(unsigned long section_nr, struct page *map,
+ unsigned long nr_pages);
void get_page_bootmem(unsigned long info, struct page *page,
enum bootmem_type type);
@@ -58,6 +60,11 @@ static inline void register_page_bootmem_info_node(struct pglist_data *pgdat)
{
}
+static inline void register_page_bootmem_memmap(unsigned long section_nr,
+ struct page *map, unsigned long nr_pages)
+{
+}
+
static inline void put_page_bootmem(struct page *page)
{
}
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index fab70b26e131..f0a4ad7839b6 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -270,7 +270,7 @@ int cont_write_begin(struct file *, struct address_space *, loff_t,
unsigned, struct folio **, void **,
get_block_t *, loff_t *);
int generic_cont_expand_simple(struct inode *inode, loff_t size);
-void block_commit_write(struct page *page, unsigned int from, unsigned int to);
+void block_commit_write(struct folio *folio, size_t from, size_t to);
int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
get_block_t get_block);
sector_t generic_block_bmap(struct address_space *, sector_t, get_block_t *);
diff --git a/include/linux/bvec.h b/include/linux/bvec.h
index ba8f52d48b94..204b22a99c4b 100644
--- a/include/linux/bvec.h
+++ b/include/linux/bvec.h
@@ -184,6 +184,12 @@ static inline void bvec_iter_advance_single(const struct bio_vec *bv,
((bvl = bvec_iter_bvec((bio_vec), (iter))), 1); \
bvec_iter_advance_single((bio_vec), &(iter), (bvl).bv_len))
+#define for_each_mp_bvec(bvl, bio_vec, iter, start) \
+ for (iter = (start); \
+ (iter).bi_size && \
+ ((bvl = mp_bvec_iter_bvec((bio_vec), (iter))), 1); \
+ bvec_iter_advance_single((bio_vec), &(iter), (bvl).bv_len))
+
/* for iterating one bio from start to end */
#define BVEC_ITER_ALL_INIT (struct bvec_iter) \
{ \
diff --git a/include/linux/cma.h b/include/linux/cma.h
index d15b64f51336..62d9c1cf6326 100644
--- a/include/linux/cma.h
+++ b/include/linux/cma.h
@@ -40,6 +40,9 @@ static inline int __init cma_declare_contiguous(phys_addr_t base,
return cma_declare_contiguous_nid(base, size, limit, alignment,
order_per_bit, fixed, name, res_cma, NUMA_NO_NODE);
}
+extern int __init cma_declare_contiguous_multi(phys_addr_t size,
+ phys_addr_t align, unsigned int order_per_bit,
+ const char *name, struct cma **res_cma, int nid);
extern int cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
unsigned int order_per_bit,
const char *name,
@@ -50,12 +53,14 @@ extern bool cma_pages_valid(struct cma *cma, const struct page *pages, unsigned
extern bool cma_release(struct cma *cma, const struct page *pages, unsigned long count);
extern int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data);
+extern bool cma_intersects(struct cma *cma, unsigned long start, unsigned long end);
extern void cma_reserve_pages_on_error(struct cma *cma);
#ifdef CONFIG_CMA
struct folio *cma_alloc_folio(struct cma *cma, int order, gfp_t gfp);
bool cma_free_folio(struct cma *cma, const struct folio *folio);
+bool cma_validate_zones(struct cma *cma);
#else
static inline struct folio *cma_alloc_folio(struct cma *cma, int order, gfp_t gfp)
{
@@ -66,6 +71,10 @@ static inline bool cma_free_folio(struct cma *cma, const struct folio *folio)
{
return false;
}
+static inline bool cma_validate_zones(struct cma *cma)
+{
+ return false;
+}
#endif
#endif
diff --git a/include/linux/compaction.h b/include/linux/compaction.h
index 7bf0c521db63..173d9c07a895 100644
--- a/include/linux/compaction.h
+++ b/include/linux/compaction.h
@@ -95,7 +95,7 @@ extern enum compact_result try_to_compact_pages(gfp_t gfp_mask,
struct page **page);
extern void reset_isolation_suitable(pg_data_t *pgdat);
extern bool compaction_suitable(struct zone *zone, int order,
- int highest_zoneidx);
+ unsigned long watermark, int highest_zoneidx);
extern void compaction_defer_reset(struct zone *zone, int order,
bool alloc_success);
@@ -113,7 +113,8 @@ static inline void reset_isolation_suitable(pg_data_t *pgdat)
}
static inline bool compaction_suitable(struct zone *zone, int order,
- int highest_zoneidx)
+ unsigned long watermark,
+ int highest_zoneidx)
{
return false;
}
diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h
index 2e7c2c282f3a..4fc8e26914ad 100644
--- a/include/linux/compiler-clang.h
+++ b/include/linux/compiler-clang.h
@@ -128,3 +128,11 @@
*/
#define ASM_INPUT_G "ir"
#define ASM_INPUT_RM "r"
+
+/*
+ * Declare compiler support for __typeof_unqual__() operator.
+ *
+ * Bindgen uses LLVM even if our C compiler is GCC, so we cannot
+ * rely on the auto-detected CONFIG_CC_HAS_TYPEOF_UNQUAL.
+ */
+#define CC_HAS_TYPEOF_UNQUAL (__clang_major__ >= 19)
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index c9b58188ec61..32048052c64a 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -137,3 +137,11 @@
#if GCC_VERSION < 90100
#undef __alloc_size__
#endif
+
+/*
+ * Declare compiler support for __typeof_unqual__() operator.
+ *
+ * Bindgen uses LLVM even if our C compiler is GCC, so we cannot
+ * rely on the auto-detected CONFIG_CC_HAS_TYPEOF_UNQUAL.
+ */
+#define CC_HAS_TYPEOF_UNQUAL (__GNUC__ >= 14)
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 9fc30b6b80c9..27725f1ab5ab 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -226,6 +226,26 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
__BUILD_BUG_ON_ZERO_MSG(!__is_noncstr(p), \
"must be non-C-string (not NUL-terminated)")
+/*
+ * Use __typeof_unqual__() when available.
+ *
+ * XXX: Remove test for __CHECKER__ once
+ * sparse learns about __typeof_unqual__().
+ */
+#if CC_HAS_TYPEOF_UNQUAL && !defined(__CHECKER__)
+# define USE_TYPEOF_UNQUAL 1
+#endif
+
+/*
+ * Define TYPEOF_UNQUAL() to use __typeof_unqual__() as typeof
+ * operator when available, to return an unqualified type of the exp.
+ */
+#if defined(USE_TYPEOF_UNQUAL)
+# define TYPEOF_UNQUAL(exp) __typeof_unqual__(exp)
+#else
+# define TYPEOF_UNQUAL(exp) __typeof__(exp)
+#endif
+
#endif /* __KERNEL__ */
#if defined(CONFIG_CFI_CLANG) && !defined(__DISABLE_EXPORTS) && !defined(BUILD_VDSO)
diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h
index e09d323be845..501cffddc2f4 100644
--- a/include/linux/compiler_types.h
+++ b/include/linux/compiler_types.h
@@ -57,7 +57,7 @@ static inline void __chk_io_ptr(const volatile void __iomem *ptr) { }
# define __user BTF_TYPE_TAG(user)
# endif
# define __iomem
-# define __percpu BTF_TYPE_TAG(percpu)
+# define __percpu __percpu_qual BTF_TYPE_TAG(percpu)
# define __rcu BTF_TYPE_TAG(rcu)
# define __chk_user_ptr(x) (void)0
diff --git a/include/linux/context_tracking_irq.h b/include/linux/context_tracking_irq.h
index c50b5670c4a5..197916ee91a4 100644
--- a/include/linux/context_tracking_irq.h
+++ b/include/linux/context_tracking_irq.h
@@ -10,12 +10,12 @@ void ct_irq_exit_irqson(void);
void ct_nmi_enter(void);
void ct_nmi_exit(void);
#else
-static inline void ct_irq_enter(void) { }
-static inline void ct_irq_exit(void) { }
+static __always_inline void ct_irq_enter(void) { }
+static __always_inline void ct_irq_exit(void) { }
static inline void ct_irq_enter_irqson(void) { }
static inline void ct_irq_exit_irqson(void) { }
-static inline void ct_nmi_enter(void) { }
-static inline void ct_nmi_exit(void) { }
+static __always_inline void ct_nmi_enter(void) { }
+static __always_inline void ct_nmi_exit(void) { }
#endif
#endif
diff --git a/include/linux/coresight.h b/include/linux/coresight.h
index 17276965ff1d..d79a242b271d 100644
--- a/include/linux/coresight.h
+++ b/include/linux/coresight.h
@@ -71,7 +71,8 @@ enum coresight_dev_subtype_source {
enum coresight_dev_subtype_helper {
CORESIGHT_DEV_SUBTYPE_HELPER_CATU,
- CORESIGHT_DEV_SUBTYPE_HELPER_ECT_CTI
+ CORESIGHT_DEV_SUBTYPE_HELPER_ECT_CTI,
+ CORESIGHT_DEV_SUBTYPE_HELPER_CTCU,
};
/**
@@ -238,7 +239,7 @@ struct coresight_trace_id_map {
DECLARE_BITMAP(used_ids, CORESIGHT_TRACE_IDS_MAX);
atomic_t __percpu *cpu_map;
atomic_t perf_cs_etm_session_active;
- spinlock_t lock;
+ raw_spinlock_t lock;
};
/**
@@ -301,7 +302,7 @@ struct coresight_device {
/* system configuration and feature lists */
struct list_head feature_csdev_list;
struct list_head config_csdev_list;
- spinlock_t cscfg_csdev_lock;
+ raw_spinlock_t cscfg_csdev_lock;
void *active_cscfg_ctxt;
};
@@ -329,17 +330,29 @@ static struct coresight_dev_list (var) = { \
#define to_coresight_device(d) container_of(d, struct coresight_device, dev)
+/**
+ * struct coresight_path - data needed by enable/disable path
+ * @path_list: path from source to sink.
+ * @trace_id: trace_id of the whole path.
+ */
+struct coresight_path {
+ struct list_head path_list;
+ u8 trace_id;
+};
+
enum cs_mode {
CS_MODE_DISABLED,
CS_MODE_SYSFS,
CS_MODE_PERF,
};
+#define coresight_ops(csdev) csdev->ops
#define source_ops(csdev) csdev->ops->source_ops
#define sink_ops(csdev) csdev->ops->sink_ops
#define link_ops(csdev) csdev->ops->link_ops
#define helper_ops(csdev) csdev->ops->helper_ops
#define ect_ops(csdev) csdev->ops->ect_ops
+#define panic_ops(csdev) csdev->ops->panic_ops
/**
* struct coresight_ops_sink - basic operations for a sink
@@ -389,7 +402,7 @@ struct coresight_ops_link {
struct coresight_ops_source {
int (*cpu_id)(struct coresight_device *csdev);
int (*enable)(struct coresight_device *csdev, struct perf_event *event,
- enum cs_mode mode, struct coresight_trace_id_map *id_map);
+ enum cs_mode mode, struct coresight_path *path);
void (*disable)(struct coresight_device *csdev,
struct perf_event *event);
};
@@ -409,11 +422,24 @@ struct coresight_ops_helper {
int (*disable)(struct coresight_device *csdev, void *data);
};
+
+/**
+ * struct coresight_ops_panic - Generic device ops for panic handing
+ *
+ * @sync : Sync the device register state/trace data
+ */
+struct coresight_ops_panic {
+ int (*sync)(struct coresight_device *csdev);
+};
+
struct coresight_ops {
+ int (*trace_id)(struct coresight_device *csdev, enum cs_mode mode,
+ struct coresight_device *sink);
const struct coresight_ops_sink *sink_ops;
const struct coresight_ops_link *link_ops;
const struct coresight_ops_source *source_ops;
const struct coresight_ops_helper *helper_ops;
+ const struct coresight_ops_panic *panic_ops;
};
static inline u32 csdev_access_relaxed_read32(struct csdev_access *csa,
@@ -459,8 +485,11 @@ static inline struct clk *coresight_get_enable_apb_pclk(struct device *dev)
int ret;
pclk = clk_get(dev, "apb_pclk");
- if (IS_ERR(pclk))
- return NULL;
+ if (IS_ERR(pclk)) {
+ pclk = clk_get(dev, "apb");
+ if (IS_ERR(pclk))
+ return NULL;
+ }
ret = clk_prepare_enable(pclk);
if (ret) {
@@ -649,6 +678,10 @@ extern int coresight_enable_sysfs(struct coresight_device *csdev);
extern void coresight_disable_sysfs(struct coresight_device *csdev);
extern int coresight_timeout(struct csdev_access *csa, u32 offset,
int position, int value);
+typedef void (*coresight_timeout_cb_t) (struct csdev_access *, u32, int, int);
+extern int coresight_timeout_action(struct csdev_access *csa, u32 offset,
+ int position, int value,
+ coresight_timeout_cb_t cb);
extern int coresight_claim_device(struct coresight_device *csdev);
extern int coresight_claim_device_unlocked(struct coresight_device *csdev);
@@ -694,4 +727,6 @@ int coresight_init_driver(const char *drv, struct amba_driver *amba_drv,
void coresight_remove_driver(struct amba_driver *amba_drv,
struct platform_driver *pdev_drv);
+int coresight_etm_get_trace_id(struct coresight_device *csdev, enum cs_mode mode,
+ struct coresight_device *sink);
#endif /* _LINUX_COREISGHT_H */
diff --git a/include/linux/counter.h b/include/linux/counter.h
index 426b7d58a438..f208e867dd0f 100644
--- a/include/linux/counter.h
+++ b/include/linux/counter.h
@@ -580,6 +580,9 @@ struct counter_array {
#define COUNTER_COMP_CEILING(_read, _write) \
COUNTER_COMP_COUNT_U64("ceiling", _read, _write)
+#define COUNTER_COMP_COMPARE(_read, _write) \
+ COUNTER_COMP_COUNT_U64("compare", _read, _write)
+
#define COUNTER_COMP_COUNT_MODE(_read, _write, _available) \
{ \
.type = COUNTER_COMP_COUNT_MODE, \
diff --git a/include/linux/cper.h b/include/linux/cper.h
index 265b0f8fc0b3..0ed60a91eca9 100644
--- a/include/linux/cper.h
+++ b/include/linux/cper.h
@@ -89,6 +89,10 @@ enum {
#define CPER_NOTIFY_DMAR \
GUID_INIT(0x667DD791, 0xC6B3, 0x4c27, 0x8A, 0x6B, 0x0F, 0x8E, \
0x72, 0x2D, 0xEB, 0x41)
+/* CXL Protocol Error Section */
+#define CPER_SEC_CXL_PROT_ERR \
+ GUID_INIT(0x80B9EFB4, 0x52B5, 0x4DE3, 0xA7, 0x77, 0x68, 0x78, \
+ 0x4B, 0x77, 0x10, 0x48)
/* CXL Event record UUIDs are formatted as GUIDs and reported in section type */
/*
@@ -601,4 +605,8 @@ void cper_estatus_print(const char *pfx,
int cper_estatus_check_header(const struct acpi_hest_generic_status *estatus);
int cper_estatus_check(const struct acpi_hest_generic_status *estatus);
+struct cxl_cper_sec_prot_err;
+void cxl_cper_print_prot_err(const char *pfx,
+ const struct cxl_cper_sec_prot_err *prot_err);
+
#endif
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index 6a0a8f1c7c90..e3049543008b 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -148,7 +148,7 @@ static inline int suspend_disable_secondary_cpus(void)
}
static inline void suspend_enable_secondary_cpus(void)
{
- return thaw_secondary_cpus();
+ thaw_secondary_cpus();
}
#else /* !CONFIG_PM_SLEEP_SMP */
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index 6cc5e484547c..1987400000b4 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -116,7 +116,6 @@ enum cpuhp_state {
CPUHP_NET_IUCV_PREPARE,
CPUHP_ARM_BL_PREPARE,
CPUHP_TRACE_RB_PREPARE,
- CPUHP_MM_ZS_PREPARE,
CPUHP_MM_ZSWP_POOL_PREPARE,
CPUHP_KVM_PPC_BOOK3S_PREPARE,
CPUHP_ZCOMP_PREPARE,
diff --git a/include/linux/crash_reserve.h b/include/linux/crash_reserve.h
index 5a9df944fb80..1fe7e7d1b214 100644
--- a/include/linux/crash_reserve.h
+++ b/include/linux/crash_reserve.h
@@ -32,13 +32,12 @@ int __init parse_crashkernel(char *cmdline, unsigned long long system_ram,
#define CRASH_ADDR_HIGH_MAX memblock_end_of_DRAM()
#endif
-void __init reserve_crashkernel_generic(char *cmdline,
- unsigned long long crash_size,
- unsigned long long crash_base,
- unsigned long long crash_low_size,
- bool high);
+void __init reserve_crashkernel_generic(unsigned long long crash_size,
+ unsigned long long crash_base,
+ unsigned long long crash_low_size,
+ bool high);
#else
-static inline void __init reserve_crashkernel_generic(char *cmdline,
+static inline void __init reserve_crashkernel_generic(
unsigned long long crash_size,
unsigned long long crash_base,
unsigned long long crash_low_size,
diff --git a/include/linux/damon.h b/include/linux/damon.h
index c9074d569596..47e36e6ea203 100644
--- a/include/linux/damon.h
+++ b/include/linux/damon.h
@@ -36,6 +36,16 @@ struct damon_addr_range {
};
/**
+ * struct damon_size_range - Represents size for filter to operate on [@min, @max].
+ * @min: Min size (inclusive).
+ * @max: Max size (inclusive).
+ */
+struct damon_size_range {
+ unsigned long min;
+ unsigned long max;
+};
+
+/**
* struct damon_region - Represents a monitoring target region.
* @ar: The address range of the region.
* @sampling_addr: Address of the sample for the next access check.
@@ -324,8 +334,11 @@ struct damos_stat {
/**
* enum damos_filter_type - Type of memory for &struct damos_filter
* @DAMOS_FILTER_TYPE_ANON: Anonymous pages.
+ * @DAMOS_FILTER_TYPE_ACTIVE: Active pages.
* @DAMOS_FILTER_TYPE_MEMCG: Specific memcg's pages.
* @DAMOS_FILTER_TYPE_YOUNG: Recently accessed pages.
+ * @DAMOS_FILTER_TYPE_HUGEPAGE_SIZE: Page is part of a hugepage.
+ * @DAMOS_FILTER_TYPE_UNMAPPED: Unmapped pages.
* @DAMOS_FILTER_TYPE_ADDR: Address range.
* @DAMOS_FILTER_TYPE_TARGET: Data Access Monitoring target.
* @NR_DAMOS_FILTER_TYPES: Number of filter types.
@@ -343,8 +356,11 @@ struct damos_stat {
*/
enum damos_filter_type {
DAMOS_FILTER_TYPE_ANON,
+ DAMOS_FILTER_TYPE_ACTIVE,
DAMOS_FILTER_TYPE_MEMCG,
DAMOS_FILTER_TYPE_YOUNG,
+ DAMOS_FILTER_TYPE_HUGEPAGE_SIZE,
+ DAMOS_FILTER_TYPE_UNMAPPED,
DAMOS_FILTER_TYPE_ADDR,
DAMOS_FILTER_TYPE_TARGET,
NR_DAMOS_FILTER_TYPES,
@@ -360,6 +376,7 @@ enum damos_filter_type {
* @target_idx: Index of the &struct damon_target of
* &damon_ctx->adaptive_targets if @type is
* DAMOS_FILTER_TYPE_TARGET.
+ * @sz_range: Size range if @type is DAMOS_FILTER_TYPE_HUGEPAGE_SIZE.
* @list: List head for siblings.
*
* Before applying the &damos->action to a memory region, DAMOS checks if each
@@ -376,6 +393,7 @@ struct damos_filter {
unsigned short memcg_id;
struct damon_addr_range addr_range;
int target_idx;
+ struct damon_size_range sz_range;
};
struct list_head list;
};
@@ -432,6 +450,8 @@ struct damos_access_pattern {
* @wmarks: Watermarks for automated (in)activation of this scheme.
* @target_nid: Destination node if @action is "migrate_{hot,cold}".
* @filters: Additional set of &struct damos_filter for &action.
+ * @ops_filters: ops layer handling &struct damos_filter objects list.
+ * @last_applied: Last @action applied ops-managing entity.
* @stat: Statistics of this scheme.
* @list: List head for siblings.
*
@@ -454,6 +474,15 @@ struct damos_access_pattern {
* implementation could check pages of the region and skip &action to respect
* &filters
*
+ * The minimum entity that @action can be applied depends on the underlying
+ * &struct damon_operations. Since it may not be aligned with the core layer
+ * abstract, namely &struct damon_region, &struct damon_operations could apply
+ * @action to same entity multiple times. Large folios that underlying on
+ * multiple &struct damon region objects could be such examples. The &struct
+ * damon_operations can use @last_applied to avoid that. DAMOS core logic
+ * unsets @last_applied when each regions walking for applying the scheme is
+ * finished.
+ *
* After applying the &action to each region, &stat_count and &stat_sz is
* updated to reflect the number of regions and total size of regions that the
* &action is applied.
@@ -475,6 +504,9 @@ struct damos {
* layer-handled filters. If true, operations layer allows it, too.
*/
bool core_filters_allowed;
+ /* whether to reject core/ops filters umatched regions */
+ bool core_filters_default_reject;
+ bool ops_filters_default_reject;
/* public: */
struct damos_quota quota;
struct damos_watermarks wmarks;
@@ -482,6 +514,8 @@ struct damos {
int target_nid;
};
struct list_head filters;
+ struct list_head ops_filters;
+ void *last_applied;
struct damos_stat stat;
struct list_head list;
};
@@ -510,7 +544,6 @@ enum damon_ops_id {
* @update: Update operations-related data structures.
* @prepare_access_checks: Prepare next access check of target regions.
* @check_accesses: Check the accesses to target regions.
- * @reset_aggregated: Reset aggregated accesses monitoring results.
* @get_scheme_score: Get the score of a region for a scheme.
* @apply_scheme: Apply a DAMON-based operation scheme.
* @target_valid: Determine if the target is valid.
@@ -522,8 +555,7 @@ enum damon_ops_id {
* (&damon_ctx.kdamond) calls @init and @prepare_access_checks before starting
* the monitoring, @update after each &damon_attrs.ops_update_interval, and
* @check_accesses, @target_valid and @prepare_access_checks after each
- * &damon_attrs.sample_interval. Finally, @reset_aggregated is called after
- * each &damon_attrs.aggr_interval.
+ * &damon_attrs.sample_interval.
*
* Each &struct damon_operations instance having valid @id can be registered
* via damon_register_ops() and selected by damon_select_ops() later.
@@ -538,8 +570,6 @@ enum damon_ops_id {
* last preparation and update the number of observed accesses of each region.
* It should also return max number of observed accesses that made as a result
* of its update. The value will be used for regions adjustment threshold.
- * @reset_aggregated should reset the access monitoring results that aggregated
- * by @check_accesses.
* @get_scheme_score should return the priority score of a region for a scheme
* as an integer in [0, &DAMOS_MAX_SCORE].
* @apply_scheme is called from @kdamond when a region for user provided
@@ -557,7 +587,6 @@ struct damon_operations {
void (*update)(struct damon_ctx *context);
void (*prepare_access_checks)(struct damon_ctx *context);
unsigned int (*check_accesses)(struct damon_ctx *context);
- void (*reset_aggregated)(struct damon_ctx *context);
int (*get_scheme_score)(struct damon_ctx *context,
struct damon_target *t, struct damon_region *r,
struct damos *scheme);
@@ -571,43 +600,28 @@ struct damon_operations {
/**
* struct damon_callback - Monitoring events notification callbacks.
*
- * @before_start: Called before starting the monitoring.
* @after_wmarks_check: Called after each schemes' watermarks check.
- * @after_sampling: Called after each sampling.
* @after_aggregation: Called after each aggregation.
- * @before_damos_apply: Called before applying DAMOS action.
* @before_terminate: Called before terminating the monitoring.
- * @private: User private data.
*
- * The monitoring thread (&damon_ctx.kdamond) calls @before_start and
- * @before_terminate just before starting and finishing the monitoring,
- * respectively. Therefore, those are good places for installing and cleaning
- * @private.
+ * The monitoring thread (&damon_ctx.kdamond) calls @before_terminate just
+ * before finishing the monitoring.
*
* The monitoring thread calls @after_wmarks_check after each DAMON-based
* operation schemes' watermarks check. If users need to make changes to the
* attributes of the monitoring context while it's deactivated due to the
* watermarks, this is the good place to do.
*
- * The monitoring thread calls @after_sampling and @after_aggregation for each
- * of the sampling intervals and aggregation intervals, respectively.
- * Therefore, users can safely access the monitoring results without additional
- * protection. For the reason, users are recommended to use these callback for
- * the accesses to the results.
+ * The monitoring thread calls @after_aggregation for each of the aggregation
+ * intervals. Therefore, users can safely access the monitoring results
+ * without additional protection. For the reason, users are recommended to use
+ * these callback for the accesses to the results.
*
* If any callback returns non-zero, monitoring stops.
*/
struct damon_callback {
- void *private;
-
- int (*before_start)(struct damon_ctx *context);
int (*after_wmarks_check)(struct damon_ctx *context);
- int (*after_sampling)(struct damon_ctx *context);
int (*after_aggregation)(struct damon_ctx *context);
- int (*before_damos_apply)(struct damon_ctx *context,
- struct damon_target *target,
- struct damon_region *region,
- struct damos *scheme);
void (*before_terminate)(struct damon_ctx *context);
};
@@ -633,11 +647,37 @@ struct damon_call_control {
};
/**
+ * struct damon_intervals_goal - Monitoring intervals auto-tuning goal.
+ *
+ * @access_bp: Access events observation ratio to achieve in bp.
+ * @aggrs: Number of aggregations to acheive @access_bp within.
+ * @min_sample_us: Minimum resulting sampling interval in microseconds.
+ * @max_sample_us: Maximum resulting sampling interval in microseconds.
+ *
+ * DAMON automatically tunes &damon_attrs->sample_interval and
+ * &damon_attrs->aggr_interval aiming the ratio in bp (1/10,000) of
+ * DAMON-observed access events to theoretical maximum amount within @aggrs
+ * aggregations be same to @access_bp. The logic increases
+ * &damon_attrs->aggr_interval and &damon_attrs->sampling_interval in same
+ * ratio if the current access events observation ratio is lower than the
+ * target for each @aggrs aggregations, and vice versa.
+ *
+ * If @aggrs is zero, the tuning is disabled and hence this struct is ignored.
+ */
+struct damon_intervals_goal {
+ unsigned long access_bp;
+ unsigned long aggrs;
+ unsigned long min_sample_us;
+ unsigned long max_sample_us;
+};
+
+/**
* struct damon_attrs - Monitoring attributes for accuracy/overhead control.
*
* @sample_interval: The time between access samplings.
* @aggr_interval: The time between monitor results aggregations.
* @ops_update_interval: The time between monitoring operations updates.
+ * @intervals_goal: Intervals auto-tuning goal.
* @min_nr_regions: The minimum number of adaptive monitoring
* regions.
* @max_nr_regions: The maximum number of adaptive monitoring
@@ -657,8 +697,20 @@ struct damon_attrs {
unsigned long sample_interval;
unsigned long aggr_interval;
unsigned long ops_update_interval;
+ struct damon_intervals_goal intervals_goal;
unsigned long min_nr_regions;
unsigned long max_nr_regions;
+/* private: internal use only */
+ /*
+ * @aggr_interval to @sample_interval ratio.
+ * Core-external components call damon_set_attrs() with &damon_attrs
+ * that this field is unset. In the case, damon_set_attrs() sets this
+ * field of resulting &damon_attrs. Core-internal components such as
+ * kdamond_tune_intervals() calls damon_set_attrs() with &damon_attrs
+ * that this field is set. In the case, damon_set_attrs() just keep
+ * it.
+ */
+ unsigned long aggr_samples;
};
/**
@@ -707,6 +759,11 @@ struct damon_ctx {
* update
*/
unsigned long next_ops_update_sis;
+ /*
+ * number of sample intervals that should be passed before next
+ * intervals tuning
+ */
+ unsigned long next_intervals_tune_sis;
/* for waiting until the execution of the kdamond_fn is started */
struct completion kdamond_started;
/* for scheme quotas prioritization */
@@ -788,6 +845,12 @@ static inline unsigned long damon_sz_region(struct damon_region *r)
#define damos_for_each_filter_safe(f, next, scheme) \
list_for_each_entry_safe(f, next, &(scheme)->filters, list)
+#define damos_for_each_ops_filter(f, scheme) \
+ list_for_each_entry(f, &(scheme)->ops_filters, list)
+
+#define damos_for_each_ops_filter_safe(f, next, scheme) \
+ list_for_each_entry_safe(f, next, &(scheme)->ops_filters, list)
+
#ifdef CONFIG_DAMON
struct damon_region *damon_new_region(unsigned long start, unsigned long end);
@@ -813,6 +876,7 @@ void damon_update_region_access_rate(struct damon_region *r, bool accessed,
struct damos_filter *damos_new_filter(enum damos_filter_type type,
bool matching, bool allow);
void damos_add_filter(struct damos *s, struct damos_filter *f);
+bool damos_filter_for_ops(enum damos_filter_type type);
void damos_destroy_filter(struct damos_filter *f);
struct damos_quota_goal *damos_new_quota_goal(
diff --git a/include/linux/dax.h b/include/linux/dax.h
index df41a0017b31..dcc9fcdf14e4 100644
--- a/include/linux/dax.h
+++ b/include/linux/dax.h
@@ -207,6 +207,11 @@ int dax_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero,
int dax_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
const struct iomap_ops *ops);
+static inline bool dax_page_is_idle(struct page *page)
+{
+ return page && page_ref_count(page) == 0;
+}
+
#if IS_ENABLED(CONFIG_DAX)
int dax_read_lock(void);
void dax_read_unlock(int id);
@@ -220,6 +225,19 @@ static inline void dax_read_unlock(int id)
{
}
#endif /* CONFIG_DAX */
+
+#if !IS_ENABLED(CONFIG_FS_DAX)
+static inline int __must_check dax_break_layout(struct inode *inode,
+ loff_t start, loff_t end, void (cb)(struct inode *))
+{
+ return 0;
+}
+
+static inline void dax_break_layout_final(struct inode *inode)
+{
+}
+#endif
+
bool dax_alive(struct dax_device *dax_dev);
void *dax_get_private(struct dax_device *dax_dev);
long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages,
@@ -241,8 +259,18 @@ vm_fault_t dax_iomap_fault(struct vm_fault *vmf, unsigned int order,
vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf,
unsigned int order, pfn_t pfn);
int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index);
+void dax_delete_mapping_range(struct address_space *mapping,
+ loff_t start, loff_t end);
int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
pgoff_t index);
+int __must_check dax_break_layout(struct inode *inode, loff_t start,
+ loff_t end, void (cb)(struct inode *));
+static inline int __must_check dax_break_layout_inode(struct inode *inode,
+ void (cb)(struct inode *))
+{
+ return dax_break_layout(inode, 0, LLONG_MAX, cb);
+}
+void dax_break_layout_final(struct inode *inode);
int dax_dedupe_file_range_compare(struct inode *src, loff_t srcoff,
struct inode *dest, loff_t destoff,
loff_t len, bool *is_same,
diff --git a/include/linux/device.h b/include/linux/device.h
index e3d56cd13fea..79e49fe494b7 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -1162,7 +1162,7 @@ static inline void device_remove_group(struct device *dev,
{
const struct attribute_group *groups[] = { grp, NULL };
- return device_remove_groups(dev, groups);
+ device_remove_groups(dev, groups);
}
int __must_check devm_device_add_group(struct device *dev,
diff --git a/include/linux/device/class.h b/include/linux/device/class.h
index 45ee3a634999..65880e60c720 100644
--- a/include/linux/device/class.h
+++ b/include/linux/device/class.h
@@ -193,7 +193,7 @@ static inline int __must_check class_create_file(const struct class *class,
static inline void class_remove_file(const struct class *class,
const struct class_attribute *attr)
{
- return class_remove_file_ns(class, attr, NULL);
+ class_remove_file_ns(class, attr, NULL);
}
/* Simple class attribute that is just a static string */
diff --git a/include/linux/dma/k3-udma-glue.h b/include/linux/dma/k3-udma-glue.h
index 2dea217629d0..5d43881e6fb7 100644
--- a/include/linux/dma/k3-udma-glue.h
+++ b/include/linux/dma/k3-udma-glue.h
@@ -138,8 +138,7 @@ int k3_udma_glue_rx_get_irq(struct k3_udma_glue_rx_channel *rx_chn,
u32 flow_num);
void k3_udma_glue_reset_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
u32 flow_num, void *data,
- void (*cleanup)(void *data, dma_addr_t desc_dma),
- bool skip_fdq);
+ void (*cleanup)(void *data, dma_addr_t desc_dma));
int k3_udma_glue_rx_flow_enable(struct k3_udma_glue_rx_channel *rx_chn,
u32 flow_idx);
int k3_udma_glue_rx_flow_disable(struct k3_udma_glue_rx_channel *rx_chn,
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index 346251bf1026..bb146c5ac3e4 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -839,7 +839,6 @@ struct dma_filter {
* The function takes a buffer of size buf_len. The callback function will
* be called after period_len bytes have been transferred.
* @device_prep_interleaved_dma: Transfer expression in a generic way.
- * @device_prep_dma_imm_data: DMA's 8 byte immediate data to the dst address
* @device_caps: May be used to override the generic DMA slave capabilities
* with per-channel specific ones
* @device_config: Pushes a new configuration to a channel, return 0 or an error
@@ -942,9 +941,6 @@ struct dma_device {
struct dma_async_tx_descriptor *(*device_prep_interleaved_dma)(
struct dma_chan *chan, struct dma_interleaved_template *xt,
unsigned long flags);
- struct dma_async_tx_descriptor *(*device_prep_dma_imm_data)(
- struct dma_chan *chan, dma_addr_t dst, u64 data,
- unsigned long flags);
void (*device_caps)(struct dma_chan *chan, struct dma_slave_caps *caps);
int (*device_config)(struct dma_chan *chan, struct dma_slave_config *config);
@@ -1639,14 +1635,14 @@ static inline struct dma_chan
{
struct dma_chan *chan;
- chan = dma_request_slave_channel(dev, name);
- if (chan)
+ chan = dma_request_chan(dev, name);
+ if (!IS_ERR(chan))
return chan;
if (!fn || !fn_param)
return NULL;
- return __dma_request_channel(&mask, fn, fn_param, NULL);
+ return dma_request_channel(mask, fn, fn_param);
}
static inline char *
diff --git a/include/linux/fb.h b/include/linux/fb.h
index 5ba187e08cf7..cd653862ab99 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -225,6 +225,7 @@ struct fb_deferred_io {
int open_count; /* number of opened files; protected by fb_info lock */
struct mutex lock; /* mutex that protects the pageref list */
struct list_head pagereflist; /* list of pagerefs for touched pages */
+ struct address_space *mapping; /* page cache object for fb device */
/* callback */
struct page *(*get_page)(struct fb_info *info, unsigned long offset);
void (*deferred_io)(struct fb_info *info, struct list_head *pagelist);
diff --git a/include/linux/folio_queue.h b/include/linux/folio_queue.h
index 4d3f8074c137..45ad2408a80c 100644
--- a/include/linux/folio_queue.h
+++ b/include/linux/folio_queue.h
@@ -15,6 +15,7 @@
#define _LINUX_FOLIO_QUEUE_H
#include <linux/pagevec.h>
+#include <linux/mm.h>
/*
* Segment in a queue of running buffers. Each segment can hold a number of
@@ -216,13 +217,6 @@ static inline void folioq_unmark3(struct folio_queue *folioq, unsigned int slot)
clear_bit(slot, &folioq->marks3);
}
-static inline unsigned int __folio_order(struct folio *folio)
-{
- if (!folio_test_large(folio))
- return 0;
- return folio->_flags_1 & 0xff;
-}
-
/**
* folioq_append: Add a folio to a folio queue segment
* @folioq: The segment to add to
@@ -241,7 +235,7 @@ static inline unsigned int folioq_append(struct folio_queue *folioq, struct foli
unsigned int slot = folioq->vec.nr++;
folioq->vec.folios[slot] = folio;
- folioq->orders[slot] = __folio_order(folio);
+ folioq->orders[slot] = folio_order(folio);
return slot;
}
@@ -263,7 +257,7 @@ static inline unsigned int folioq_append_mark(struct folio_queue *folioq, struct
unsigned int slot = folioq->vec.nr++;
folioq->vec.folios[slot] = folio;
- folioq->orders[slot] = __folio_order(folio);
+ folioq->orders[slot] = folio_order(folio);
folioq_mark(folioq, slot);
return slot;
}
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index 93e509b6c00e..e893d546a49f 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -39,6 +39,10 @@ int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, bool write);
vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn, bool write);
+vm_fault_t vmf_insert_folio_pmd(struct vm_fault *vmf, struct folio *folio,
+ bool write);
+vm_fault_t vmf_insert_folio_pud(struct vm_fault *vmf, struct folio *folio,
+ bool write);
enum transparent_hugepage_flag {
TRANSPARENT_HUGEPAGE_UNSUPPORTED,
@@ -341,6 +345,36 @@ int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
unsigned int new_order);
int min_order_for_split(struct folio *folio);
int split_folio_to_list(struct folio *folio, struct list_head *list);
+bool uniform_split_supported(struct folio *folio, unsigned int new_order,
+ bool warns);
+bool non_uniform_split_supported(struct folio *folio, unsigned int new_order,
+ bool warns);
+int folio_split(struct folio *folio, unsigned int new_order, struct page *page,
+ struct list_head *list);
+/*
+ * try_folio_split - try to split a @folio at @page using non uniform split.
+ * @folio: folio to be split
+ * @page: split to order-0 at the given page
+ * @list: store the after-split folios
+ *
+ * Try to split a @folio at @page using non uniform split to order-0, if
+ * non uniform split is not supported, fall back to uniform split.
+ *
+ * Return: 0: split is successful, otherwise split failed.
+ */
+static inline int try_folio_split(struct folio *folio, struct page *page,
+ struct list_head *list)
+{
+ int ret = min_order_for_split(folio);
+
+ if (ret < 0)
+ return ret;
+
+ if (!non_uniform_split_supported(folio, 0, false))
+ return split_huge_page_to_list_to_order(&folio->page, list,
+ ret);
+ return folio_split(folio, ret, page, list);
+}
static inline int split_huge_page(struct page *page)
{
struct folio *folio = page_folio(page);
@@ -404,7 +438,7 @@ int madvise_collapse(struct vm_area_struct *vma,
struct vm_area_struct **prev,
unsigned long start, unsigned long end);
void vma_adjust_trans_huge(struct vm_area_struct *vma, unsigned long start,
- unsigned long end, long adjust_next);
+ unsigned long end, struct vm_area_struct *next);
spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma);
spinlock_t *__pud_trans_huge_lock(pud_t *pud, struct vm_area_struct *vma);
@@ -533,6 +567,12 @@ static inline int split_folio_to_list(struct folio *folio, struct list_head *lis
return 0;
}
+static inline int try_folio_split(struct folio *folio, struct page *page,
+ struct list_head *list)
+{
+ return 0;
+}
+
static inline void deferred_split_folio(struct folio *folio, bool partially_mapped) {}
#define split_huge_pmd(__vma, __pmd, __address) \
do { } while (0)
@@ -571,7 +611,7 @@ static inline int madvise_collapse(struct vm_area_struct *vma,
static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
unsigned long start,
unsigned long end,
- long adjust_next)
+ struct vm_area_struct *next)
{
}
static inline int is_swap_pmd(pmd_t pmd)
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 76a75ec03dd6..8f3ac832ee7f 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -174,6 +174,9 @@ struct address_space *hugetlb_folio_mapping_lock_write(struct folio *folio);
extern int sysctl_hugetlb_shm_group;
extern struct list_head huge_boot_pages[MAX_NUMNODES];
+void hugetlb_bootmem_alloc(void);
+bool hugetlb_bootmem_allocated(void);
+
/* arch callbacks */
#ifndef CONFIG_HIGHPTE
@@ -588,6 +591,7 @@ enum hugetlb_page_flags {
HPG_freed,
HPG_vmemmap_optimized,
HPG_raw_hwp_unreliable,
+ HPG_cma,
__NR_HPAGEFLAGS,
};
@@ -647,6 +651,7 @@ HPAGEFLAG(Temporary, temporary)
HPAGEFLAG(Freed, freed)
HPAGEFLAG(VmemmapOptimized, vmemmap_optimized)
HPAGEFLAG(RawHwpUnreliable, raw_hwp_unreliable)
+HPAGEFLAG(Cma, cma)
#ifdef CONFIG_HUGETLB_PAGE
@@ -675,11 +680,21 @@ struct hstate {
char name[HSTATE_NAME_LEN];
};
+struct cma;
+
struct huge_bootmem_page {
struct list_head list;
struct hstate *hstate;
+ unsigned long flags;
+ struct cma *cma;
};
+#define HUGE_BOOTMEM_HVO 0x0001
+#define HUGE_BOOTMEM_ZONES_VALID 0x0002
+#define HUGE_BOOTMEM_CMA 0x0004
+
+bool hugetlb_bootmem_page_zones_valid(int nid, struct huge_bootmem_page *m);
+
int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list);
int replace_free_hugepage_folios(unsigned long start_pfn, unsigned long end_pfn);
void wait_for_freed_hugetlb_folios(void);
@@ -815,6 +830,17 @@ static inline pte_t arch_make_huge_pte(pte_t entry, unsigned int shift,
}
#endif
+#ifndef arch_has_huge_bootmem_alloc
+/*
+ * Some architectures do their own bootmem allocation, so they can't use
+ * early CMA allocation.
+ */
+static inline bool arch_has_huge_bootmem_alloc(void)
+{
+ return false;
+}
+#endif
+
static inline struct hstate *folio_hstate(struct folio *folio)
{
VM_BUG_ON_FOLIO(!folio_test_hugetlb(folio), folio);
@@ -1257,6 +1283,15 @@ static inline bool hugetlbfs_pagecache_present(
{
return false;
}
+
+static inline void hugetlb_bootmem_alloc(void)
+{
+}
+
+static inline bool hugetlb_bootmem_allocated(void)
+{
+ return false;
+}
#endif /* CONFIG_HUGETLB_PAGE */
static inline spinlock_t *huge_pte_lock(struct hstate *h,
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index 2b2af24d2a43..2e4903b7f7bc 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -952,6 +952,21 @@ static inline u8 i2c_8bit_addr_from_msg(const struct i2c_msg *msg)
return (msg->addr << 1) | (msg->flags & I2C_M_RD);
}
+/*
+ * 10-bit address
+ * addr_1: 5'b11110 | addr[9:8] | (R/nW)
+ * addr_2: addr[7:0]
+ */
+static inline u8 i2c_10bit_addr_hi_from_msg(const struct i2c_msg *msg)
+{
+ return 0xf0 | ((msg->addr & GENMASK(9, 8)) >> 7) | (msg->flags & I2C_M_RD);
+}
+
+static inline u8 i2c_10bit_addr_lo_from_msg(const struct i2c_msg *msg)
+{
+ return msg->addr & GENMASK(7, 0);
+}
+
u8 *i2c_get_dma_safe_msg_buf(struct i2c_msg *msg, unsigned int threshold);
void i2c_put_dma_safe_msg_buf(u8 *buf, struct i2c_msg *msg, bool xferred);
@@ -1029,10 +1044,6 @@ static inline struct i2c_adapter *of_get_i2c_adapter_by_node(struct device_node
return i2c_get_adapter_by_fwnode(of_fwnode_handle(node));
}
-const struct of_device_id
-*i2c_of_match_device(const struct of_device_id *matches,
- struct i2c_client *client);
-
int of_i2c_get_board_info(struct device *dev, struct device_node *node,
struct i2c_board_info *info);
@@ -1053,13 +1064,6 @@ static inline struct i2c_adapter *of_get_i2c_adapter_by_node(struct device_node
return NULL;
}
-static inline const struct of_device_id
-*i2c_of_match_device(const struct of_device_id *matches,
- struct i2c_client *client)
-{
- return NULL;
-}
-
static inline int of_i2c_get_board_info(struct device *dev,
struct device_node *node,
struct i2c_board_info *info)
diff --git a/include/linux/i3c/master.h b/include/linux/i3c/master.h
index 12d532b012c5..c67922ece617 100644
--- a/include/linux/i3c/master.h
+++ b/include/linux/i3c/master.h
@@ -475,7 +475,7 @@ struct i3c_master_controller_ops {
int (*attach_i2c_dev)(struct i2c_dev_desc *dev);
void (*detach_i2c_dev)(struct i2c_dev_desc *dev);
int (*i2c_xfers)(struct i2c_dev_desc *dev,
- const struct i2c_msg *xfers, int nxfers);
+ struct i2c_msg *xfers, int nxfers);
int (*request_ibi)(struct i3c_dev_desc *dev,
const struct i3c_ibi_setup *req);
void (*free_ibi)(struct i3c_dev_desc *dev);
diff --git a/include/linux/idr.h b/include/linux/idr.h
index cd729be369b3..2267902d29a7 100644
--- a/include/linux/idr.h
+++ b/include/linux/idr.h
@@ -274,6 +274,7 @@ struct ida {
int ida_alloc_range(struct ida *, unsigned int min, unsigned int max, gfp_t);
void ida_free(struct ida *, unsigned int id);
void ida_destroy(struct ida *ida);
+int ida_find_first_range(struct ida *ida, unsigned int min, unsigned int max);
/**
* ida_alloc() - Allocate an unused ID.
@@ -345,4 +346,14 @@ static inline bool ida_is_empty(const struct ida *ida)
{
return xa_empty(&ida->xa);
}
+
+static inline bool ida_exists(struct ida *ida, unsigned int id)
+{
+ return ida_find_first_range(ida, id, id) == id;
+}
+
+static inline int ida_find_first(struct ida *ida)
+{
+ return ida_find_first_range(ida, 0, ~0);
+}
#endif /* __IDR_H__ */
diff --git a/include/linux/iio/adc/ad_sigma_delta.h b/include/linux/iio/adc/ad_sigma_delta.h
index 417073c52380..f242b285081b 100644
--- a/include/linux/iio/adc/ad_sigma_delta.h
+++ b/include/linux/iio/adc/ad_sigma_delta.h
@@ -46,6 +46,7 @@ struct iio_dev;
* modify or drop the sample data, it, may be NULL.
* @has_registers: true if the device has writable and readable registers, false
* if there is just one read-only sample data shift register.
+ * @has_named_irqs: Set to true if there is more than one IRQ line.
* @addr_shift: Shift of the register address in the communications register.
* @read_mask: Mask for the communications register having the read bit set.
* @status_ch_mask: Mask for the channel number stored in status register.
@@ -53,7 +54,6 @@ struct iio_dev;
* be used.
* @irq_flags: flags for the interrupt used by the triggered buffer
* @num_slots: Number of sequencer slots
- * @irq_line: IRQ for reading conversions. If 0, spi->irq will be used
* @num_resetclks: Number of SPI clk cycles with MOSI=1 to reset the chip.
*/
struct ad_sigma_delta_info {
@@ -64,13 +64,13 @@ struct ad_sigma_delta_info {
int (*disable_one)(struct ad_sigma_delta *, unsigned int chan);
int (*postprocess_sample)(struct ad_sigma_delta *, unsigned int raw_sample);
bool has_registers;
+ bool has_named_irqs;
unsigned int addr_shift;
unsigned int read_mask;
unsigned int status_ch_mask;
unsigned int data_reg;
unsigned long irq_flags;
unsigned int num_slots;
- int irq_line;
unsigned int num_resetclks;
};
diff --git a/include/linux/iio/backend.h b/include/linux/iio/backend.h
index 10be00f3b120..e45b7dfbec35 100644
--- a/include/linux/iio/backend.h
+++ b/include/linux/iio/backend.h
@@ -70,6 +70,12 @@ enum iio_backend_sample_trigger {
IIO_BACKEND_SAMPLE_TRIGGER_MAX
};
+enum iio_backend_interface_type {
+ IIO_BACKEND_INTERFACE_SERIAL_LVDS,
+ IIO_BACKEND_INTERFACE_SERIAL_CMOS,
+ IIO_BACKEND_INTERFACE_MAX
+};
+
/**
* struct iio_backend_ops - operations structure for an iio_backend
* @enable: Enable backend.
@@ -88,6 +94,9 @@ enum iio_backend_sample_trigger {
* @extend_chan_spec: Extend an IIO channel.
* @ext_info_set: Extended info setter.
* @ext_info_get: Extended info getter.
+ * @interface_type_get: Interface type.
+ * @data_size_set: Data size.
+ * @oversampling_ratio_set: Set Oversampling ratio.
* @read_raw: Read a channel attribute from a backend device
* @debugfs_print_chan_status: Print channel status into a buffer.
* @debugfs_reg_access: Read or write register value of backend.
@@ -128,6 +137,11 @@ struct iio_backend_ops {
const char *buf, size_t len);
int (*ext_info_get)(struct iio_backend *back, uintptr_t private,
const struct iio_chan_spec *chan, char *buf);
+ int (*interface_type_get)(struct iio_backend *back,
+ enum iio_backend_interface_type *type);
+ int (*data_size_set)(struct iio_backend *back, unsigned int size);
+ int (*oversampling_ratio_set)(struct iio_backend *back,
+ unsigned int ratio);
int (*read_raw)(struct iio_backend *back,
struct iio_chan_spec const *chan, int *val, int *val2,
long mask);
@@ -186,6 +200,11 @@ ssize_t iio_backend_ext_info_set(struct iio_dev *indio_dev, uintptr_t private,
const char *buf, size_t len);
ssize_t iio_backend_ext_info_get(struct iio_dev *indio_dev, uintptr_t private,
const struct iio_chan_spec *chan, char *buf);
+int iio_backend_interface_type_get(struct iio_backend *back,
+ enum iio_backend_interface_type *type);
+int iio_backend_data_size_set(struct iio_backend *back, unsigned int size);
+int iio_backend_oversampling_ratio_set(struct iio_backend *back,
+ unsigned int ratio);
int iio_backend_read_raw(struct iio_backend *back,
struct iio_chan_spec const *chan, int *val, int *val2,
long mask);
diff --git a/include/linux/iio/buffer-dmaengine.h b/include/linux/iio/buffer-dmaengine.h
index 81d9a19aeb91..37f27545f69f 100644
--- a/include/linux/iio/buffer-dmaengine.h
+++ b/include/linux/iio/buffer-dmaengine.h
@@ -11,8 +11,9 @@
struct iio_dev;
struct device;
+struct dma_chan;
-void iio_dmaengine_buffer_free(struct iio_buffer *buffer);
+void iio_dmaengine_buffer_teardown(struct iio_buffer *buffer);
struct iio_buffer *iio_dmaengine_buffer_setup_ext(struct device *dev,
struct iio_dev *indio_dev,
const char *channel,
@@ -26,6 +27,10 @@ int devm_iio_dmaengine_buffer_setup_ext(struct device *dev,
struct iio_dev *indio_dev,
const char *channel,
enum iio_buffer_direction dir);
+int devm_iio_dmaengine_buffer_setup_with_handle(struct device *dev,
+ struct iio_dev *indio_dev,
+ struct dma_chan *chan,
+ enum iio_buffer_direction dir);
#define devm_iio_dmaengine_buffer_setup(dev, indio_dev, channel) \
devm_iio_dmaengine_buffer_setup_ext(dev, indio_dev, channel, \
diff --git a/include/linux/iio/iio-gts-helper.h b/include/linux/iio/iio-gts-helper.h
index e5de7a124bad..66f830ab9b49 100644
--- a/include/linux/iio/iio-gts-helper.h
+++ b/include/linux/iio/iio-gts-helper.h
@@ -208,5 +208,6 @@ int iio_gts_all_avail_scales(struct iio_gts *gts, const int **vals, int *type,
int *length);
int iio_gts_avail_scales_for_time(struct iio_gts *gts, int time,
const int **vals, int *type, int *length);
+int iio_gts_get_total_gain(struct iio_gts *gts, int gain, int time);
#endif
diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h
index 56161e02f002..07a0e8132e88 100644
--- a/include/linux/iio/iio.h
+++ b/include/linux/iio/iio.h
@@ -9,7 +9,7 @@
#include <linux/device.h>
#include <linux/cdev.h>
-#include <linux/cleanup.h>
+#include <linux/compiler_types.h>
#include <linux/slab.h>
#include <linux/iio/types.h>
/* IIO TODO LIST */
@@ -663,30 +663,29 @@ int iio_device_claim_direct_mode(struct iio_dev *indio_dev);
void iio_device_release_direct_mode(struct iio_dev *indio_dev);
/*
- * This autocleanup logic is normally used via
- * iio_device_claim_direct_scoped().
+ * Helper functions that allow claim and release of direct mode
+ * in a fashion that doesn't generate many false positives from sparse.
+ * Note this must remain static inline in the header so that sparse
+ * can see the __acquire() marking. Revisit when sparse supports
+ * __cond_acquires()
*/
-DEFINE_GUARD(iio_claim_direct, struct iio_dev *, iio_device_claim_direct_mode(_T),
- iio_device_release_direct_mode(_T))
+static inline bool iio_device_claim_direct(struct iio_dev *indio_dev)
+{
+ int ret = iio_device_claim_direct_mode(indio_dev);
-DEFINE_GUARD_COND(iio_claim_direct, _try, ({
- struct iio_dev *dev;
- int d = iio_device_claim_direct_mode(_T);
+ if (ret)
+ return false;
- if (d < 0)
- dev = NULL;
- else
- dev = _T;
- dev;
- }))
+ __acquire(iio_dev);
-/**
- * iio_device_claim_direct_scoped() - Scoped call to iio_device_claim_direct.
- * @fail: What to do on failure to claim device.
- * @iio_dev: Pointer to the IIO devices structure
- */
-#define iio_device_claim_direct_scoped(fail, iio_dev) \
- scoped_cond_guard(iio_claim_direct_try, fail, iio_dev)
+ return true;
+}
+
+static inline void iio_device_release_direct(struct iio_dev *indio_dev)
+{
+ iio_device_release_direct_mode(indio_dev);
+ __release(indio_dev);
+}
int iio_device_claim_buffer_mode(struct iio_dev *indio_dev);
void iio_device_release_buffer_mode(struct iio_dev *indio_dev);
diff --git a/include/linux/iio/imu/adis.h b/include/linux/iio/imu/adis.h
index 4bb98d9731de..aa160511e265 100644
--- a/include/linux/iio/imu/adis.h
+++ b/include/linux/iio/imu/adis.h
@@ -44,6 +44,8 @@ struct adis_timeout {
* @glob_cmd_reg: Register address of the GLOB_CMD register
* @msc_ctrl_reg: Register address of the MSC_CTRL register
* @diag_stat_reg: Register address of the DIAG_STAT register
+ * @diag_stat_size: Length (in bytes) of the DIAG_STAT register. If 0 the
+ * default length is 2 bytes long.
* @prod_id_reg: Register address of the PROD_ID register
* @prod_id: Product ID code that should be expected when reading @prod_id_reg
* @self_test_mask: Bitmask of supported self-test operations
@@ -70,6 +72,7 @@ struct adis_data {
unsigned int glob_cmd_reg;
unsigned int msc_ctrl_reg;
unsigned int diag_stat_reg;
+ unsigned int diag_stat_size;
unsigned int prod_id_reg;
unsigned int prod_id;
@@ -95,12 +98,28 @@ struct adis_data {
};
/**
+ * struct adis_ops: Custom ops for adis devices.
+ * @write: Custom spi write implementation.
+ * @read: Custom spi read implementation.
+ * @reset: Custom sw reset implementation. The custom implementation does not
+ * need to sleep after the reset. It's done by the library already.
+ */
+struct adis_ops {
+ int (*write)(struct adis *adis, unsigned int reg, unsigned int value,
+ unsigned int size);
+ int (*read)(struct adis *adis, unsigned int reg, unsigned int *value,
+ unsigned int size);
+ int (*reset)(struct adis *adis);
+};
+
+/**
* struct adis - ADIS device instance data
* @spi: Reference to SPI device which owns this ADIS IIO device
* @trig: IIO trigger object data
* @data: ADIS chip variant specific data
* @burst_extra_len: Burst extra length. Should only be used by devices that can
* dynamically change their burst mode length.
+ * @ops: ops struct for custom read and write functions
* @state_lock: Lock used by the device to protect state
* @msg: SPI message object
* @xfer: SPI transfer objects to be used for a @msg
@@ -116,6 +135,7 @@ struct adis {
const struct adis_data *data;
unsigned int burst_extra_len;
+ const struct adis_ops *ops;
/**
* The state_lock is meant to be used during operations that require
* a sequence of SPI R/W in order to protect the SPI transfer
@@ -168,7 +188,7 @@ int __adis_read_reg(struct adis *adis, unsigned int reg,
static inline int __adis_write_reg_8(struct adis *adis, unsigned int reg,
u8 val)
{
- return __adis_write_reg(adis, reg, val, 1);
+ return adis->ops->write(adis, reg, val, 1);
}
/**
@@ -180,7 +200,7 @@ static inline int __adis_write_reg_8(struct adis *adis, unsigned int reg,
static inline int __adis_write_reg_16(struct adis *adis, unsigned int reg,
u16 val)
{
- return __adis_write_reg(adis, reg, val, 2);
+ return adis->ops->write(adis, reg, val, 2);
}
/**
@@ -192,7 +212,7 @@ static inline int __adis_write_reg_16(struct adis *adis, unsigned int reg,
static inline int __adis_write_reg_32(struct adis *adis, unsigned int reg,
u32 val)
{
- return __adis_write_reg(adis, reg, val, 4);
+ return adis->ops->write(adis, reg, val, 4);
}
/**
@@ -207,7 +227,7 @@ static inline int __adis_read_reg_16(struct adis *adis, unsigned int reg,
unsigned int tmp;
int ret;
- ret = __adis_read_reg(adis, reg, &tmp, 2);
+ ret = adis->ops->read(adis, reg, &tmp, 2);
if (ret == 0)
*val = tmp;
@@ -226,7 +246,7 @@ static inline int __adis_read_reg_32(struct adis *adis, unsigned int reg,
unsigned int tmp;
int ret;
- ret = __adis_read_reg(adis, reg, &tmp, 4);
+ ret = adis->ops->read(adis, reg, &tmp, 4);
if (ret == 0)
*val = tmp;
@@ -244,7 +264,7 @@ static inline int adis_write_reg(struct adis *adis, unsigned int reg,
unsigned int val, unsigned int size)
{
guard(mutex)(&adis->state_lock);
- return __adis_write_reg(adis, reg, val, size);
+ return adis->ops->write(adis, reg, val, size);
}
/**
@@ -258,7 +278,7 @@ static int adis_read_reg(struct adis *adis, unsigned int reg,
unsigned int *val, unsigned int size)
{
guard(mutex)(&adis->state_lock);
- return __adis_read_reg(adis, reg, val, size);
+ return adis->ops->read(adis, reg, val, size);
}
/**
diff --git a/include/linux/interval_tree_generic.h b/include/linux/interval_tree_generic.h
index aaa8a0767aa3..1b400f26f63d 100644
--- a/include/linux/interval_tree_generic.h
+++ b/include/linux/interval_tree_generic.h
@@ -104,12 +104,8 @@ ITPREFIX ## _subtree_search(ITSTRUCT *node, ITTYPE start, ITTYPE last) \
if (ITSTART(node) <= last) { /* Cond1 */ \
if (start <= ITLAST(node)) /* Cond2 */ \
return node; /* node is leftmost match */ \
- if (node->ITRB.rb_right) { \
- node = rb_entry(node->ITRB.rb_right, \
- ITSTRUCT, ITRB); \
- if (start <= node->ITSUBTREE) \
- continue; \
- } \
+ node = rb_entry(node->ITRB.rb_right, ITSTRUCT, ITRB); \
+ continue; \
} \
return NULL; /* No match */ \
} \
diff --git a/include/linux/io_uring/cmd.h b/include/linux/io_uring/cmd.h
index e6723fa95160..0634a3de1782 100644
--- a/include/linux/io_uring/cmd.h
+++ b/include/linux/io_uring/cmd.h
@@ -21,7 +21,6 @@ struct io_uring_cmd {
struct io_uring_cmd_data {
void *op_data;
- struct io_uring_sqe sqes[2];
};
static inline const void *io_uring_sqe_cmd(const struct io_uring_sqe *sqe)
diff --git a/include/linux/iomap.h b/include/linux/iomap.h
index 02fe001feebb..68416b135151 100644
--- a/include/linux/iomap.h
+++ b/include/linux/iomap.h
@@ -79,6 +79,11 @@ struct vm_fault;
#define IOMAP_F_ATOMIC_BIO (1U << 8)
/*
+ * Flag reserved for file system specific usage
+ */
+#define IOMAP_F_PRIVATE (1U << 12)
+
+/*
* Flags set by the core iomap code during operations:
*
* IOMAP_F_SIZE_CHANGED indicates to the iomap_end method that the file size
@@ -88,14 +93,8 @@ struct vm_fault;
* range it covers needs to be remapped by the high level before the operation
* can proceed.
*/
-#define IOMAP_F_SIZE_CHANGED (1U << 8)
-#define IOMAP_F_STALE (1U << 9)
-
-/*
- * Flags from 0x1000 up are for file system specific usage:
- */
-#define IOMAP_F_PRIVATE (1U << 12)
-
+#define IOMAP_F_SIZE_CHANGED (1U << 14)
+#define IOMAP_F_STALE (1U << 15)
/*
* Magic value for addr:
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index cf8c16ba04a0..ccce8a751e2a 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -41,6 +41,7 @@ struct iommu_dirty_ops;
struct notifier_block;
struct iommu_sva;
struct iommu_dma_cookie;
+struct iommu_dma_msi_cookie;
struct iommu_fault_param;
struct iommufd_ctx;
struct iommufd_viommu;
@@ -165,6 +166,15 @@ struct iommu_domain_geometry {
bool force_aperture; /* DMA only allowed in mappable range? */
};
+enum iommu_domain_cookie_type {
+ IOMMU_COOKIE_NONE,
+ IOMMU_COOKIE_DMA_IOVA,
+ IOMMU_COOKIE_DMA_MSI,
+ IOMMU_COOKIE_FAULT_HANDLER,
+ IOMMU_COOKIE_SVA,
+ IOMMU_COOKIE_IOMMUFD,
+};
+
/* Domain feature flags */
#define __IOMMU_DOMAIN_PAGING (1U << 0) /* Support for iommu_map/unmap */
#define __IOMMU_DOMAIN_DMA_API (1U << 1) /* Domain for use in DMA-API
@@ -211,23 +221,18 @@ struct iommu_domain_geometry {
struct iommu_domain {
unsigned type;
+ enum iommu_domain_cookie_type cookie_type;
const struct iommu_domain_ops *ops;
const struct iommu_dirty_ops *dirty_ops;
const struct iommu_ops *owner; /* Whose domain_alloc we came from */
unsigned long pgsize_bitmap; /* Bitmap of page sizes in use */
struct iommu_domain_geometry geometry;
- struct iommu_dma_cookie *iova_cookie;
int (*iopf_handler)(struct iopf_group *group);
-#if IS_ENABLED(CONFIG_IRQ_MSI_IOMMU)
- int (*sw_msi)(struct iommu_domain *domain, struct msi_desc *desc,
- phys_addr_t msi_addr);
-#endif
-
- union { /* Pointer usable by owner of the domain */
- struct iommufd_hw_pagetable *iommufd_hwpt; /* iommufd */
- };
- union { /* Fault handler */
+ union { /* cookie */
+ struct iommu_dma_cookie *iova_cookie;
+ struct iommu_dma_msi_cookie *msi_cookie;
+ struct iommufd_hw_pagetable *iommufd_hwpt;
struct {
iommu_fault_handler_t handler;
void *handler_token;
@@ -244,16 +249,6 @@ struct iommu_domain {
};
};
-static inline void iommu_domain_set_sw_msi(
- struct iommu_domain *domain,
- int (*sw_msi)(struct iommu_domain *domain, struct msi_desc *desc,
- phys_addr_t msi_addr))
-{
-#if IS_ENABLED(CONFIG_IRQ_MSI_IOMMU)
- domain->sw_msi = sw_msi;
-#endif
-}
-
static inline bool iommu_is_dma_domain(struct iommu_domain *domain)
{
return domain->type & __IOMMU_DOMAIN_DMA_API;
diff --git a/include/linux/iommufd.h b/include/linux/iommufd.h
index 11110c749200..34b6e6ca4bfa 100644
--- a/include/linux/iommufd.h
+++ b/include/linux/iommufd.h
@@ -8,9 +8,11 @@
#include <linux/err.h>
#include <linux/errno.h>
+#include <linux/iommu.h>
#include <linux/refcount.h>
#include <linux/types.h>
#include <linux/xarray.h>
+#include <uapi/linux/iommufd.h>
struct device;
struct file;
@@ -34,6 +36,7 @@ enum iommufd_object_type {
IOMMUFD_OBJ_FAULT,
IOMMUFD_OBJ_VIOMMU,
IOMMUFD_OBJ_VDEVICE,
+ IOMMUFD_OBJ_VEVENTQ,
#ifdef CONFIG_IOMMUFD_TEST
IOMMUFD_OBJ_SELFTEST,
#endif
@@ -52,9 +55,11 @@ struct iommufd_device *iommufd_device_bind(struct iommufd_ctx *ictx,
struct device *dev, u32 *id);
void iommufd_device_unbind(struct iommufd_device *idev);
-int iommufd_device_attach(struct iommufd_device *idev, u32 *pt_id);
-int iommufd_device_replace(struct iommufd_device *idev, u32 *pt_id);
-void iommufd_device_detach(struct iommufd_device *idev);
+int iommufd_device_attach(struct iommufd_device *idev, ioasid_t pasid,
+ u32 *pt_id);
+int iommufd_device_replace(struct iommufd_device *idev, ioasid_t pasid,
+ u32 *pt_id);
+void iommufd_device_detach(struct iommufd_device *idev, ioasid_t pasid);
struct iommufd_ctx *iommufd_device_to_ictx(struct iommufd_device *idev);
u32 iommufd_device_to_id(struct iommufd_device *idev);
@@ -93,6 +98,8 @@ struct iommufd_viommu {
const struct iommufd_viommu_ops *ops;
struct xarray vdevs;
+ struct list_head veventqs;
+ struct rw_semaphore veventqs_rwsem;
unsigned int type;
};
@@ -187,6 +194,11 @@ struct iommufd_object *_iommufd_object_alloc(struct iommufd_ctx *ictx,
enum iommufd_object_type type);
struct device *iommufd_viommu_find_dev(struct iommufd_viommu *viommu,
unsigned long vdev_id);
+int iommufd_viommu_get_vdev_id(struct iommufd_viommu *viommu,
+ struct device *dev, unsigned long *vdev_id);
+int iommufd_viommu_report_event(struct iommufd_viommu *viommu,
+ enum iommu_veventq_type type, void *event_data,
+ size_t data_len);
#else /* !CONFIG_IOMMUFD_DRIVER_CORE */
static inline struct iommufd_object *
_iommufd_object_alloc(struct iommufd_ctx *ictx, size_t size,
@@ -200,6 +212,20 @@ iommufd_viommu_find_dev(struct iommufd_viommu *viommu, unsigned long vdev_id)
{
return NULL;
}
+
+static inline int iommufd_viommu_get_vdev_id(struct iommufd_viommu *viommu,
+ struct device *dev,
+ unsigned long *vdev_id)
+{
+ return -ENOENT;
+}
+
+static inline int iommufd_viommu_report_event(struct iommufd_viommu *viommu,
+ enum iommu_veventq_type type,
+ void *event_data, size_t data_len)
+{
+ return -EOPNOTSUPP;
+}
#endif /* CONFIG_IOMMUFD_DRIVER_CORE */
/*
diff --git a/include/linux/ioport.h b/include/linux/ioport.h
index 5385349f0b8a..e8b2d6aa4013 100644
--- a/include/linux/ioport.h
+++ b/include/linux/ioport.h
@@ -154,15 +154,20 @@ enum {
};
/* helpers to define resources */
-#define DEFINE_RES_NAMED(_start, _size, _name, _flags) \
+#define DEFINE_RES_NAMED_DESC(_start, _size, _name, _flags, _desc) \
(struct resource) { \
.start = (_start), \
.end = (_start) + (_size) - 1, \
.name = (_name), \
.flags = (_flags), \
- .desc = IORES_DESC_NONE, \
+ .desc = (_desc), \
}
+#define DEFINE_RES_NAMED(_start, _size, _name, _flags) \
+ DEFINE_RES_NAMED_DESC(_start, _size, _name, _flags, IORES_DESC_NONE)
+#define DEFINE_RES(_start, _size, _flags) \
+ DEFINE_RES_NAMED(_start, _size, NULL, _flags)
+
#define DEFINE_RES_IO_NAMED(_start, _size, _name) \
DEFINE_RES_NAMED((_start), (_size), (_name), IORESOURCE_IO)
#define DEFINE_RES_IO(_start, _size) \
diff --git a/include/linux/kdb.h b/include/linux/kdb.h
index 905a2e2f45f6..ecbf819deeca 100644
--- a/include/linux/kdb.h
+++ b/include/linux/kdb.h
@@ -104,7 +104,7 @@ extern int kdb_initial_cpu;
#define KDB_NOENVVALUE (-6)
#define KDB_NOTIMP (-7)
#define KDB_ENVFULL (-8)
-#define KDB_ENVBUFFULL (-9)
+#define KDB_KMALLOCFAILED (-9)
#define KDB_TOOMANYBPT (-10)
#define KDB_TOOMANYDBREGS (-11)
#define KDB_DUPBPT (-12)
diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h
index 87c79d076d6d..b5a5f32fdfd1 100644
--- a/include/linux/kernfs.h
+++ b/include/linux/kernfs.h
@@ -147,6 +147,11 @@ enum kernfs_root_flag {
* Support user xattrs to be written to nodes rooted at this root.
*/
KERNFS_ROOT_SUPPORT_USER_XATTR = 0x0008,
+
+ /*
+ * Renames must not change the parent node.
+ */
+ KERNFS_ROOT_INVARIANT_PARENT = 0x0010,
};
/* type-specific structures for kernfs_node union members */
@@ -199,8 +204,8 @@ struct kernfs_node {
* never moved to a different parent, it is safe to access the
* parent directly.
*/
- struct kernfs_node *parent;
- const char *name;
+ struct kernfs_node __rcu *__parent;
+ const char __rcu *name;
struct rb_node rb;
@@ -395,7 +400,7 @@ static inline bool kernfs_ns_enabled(struct kernfs_node *kn)
}
int kernfs_name(struct kernfs_node *kn, char *buf, size_t buflen);
-int kernfs_path_from_node(struct kernfs_node *root_kn, struct kernfs_node *kn,
+int kernfs_path_from_node(struct kernfs_node *kn_to, struct kernfs_node *kn_from,
char *buf, size_t buflen);
void pr_cont_kernfs_name(struct kernfs_node *kn);
void pr_cont_kernfs_path(struct kernfs_node *kn);
@@ -416,6 +421,7 @@ struct dentry *kernfs_node_dentry(struct kernfs_node *kn,
struct kernfs_root *kernfs_create_root(struct kernfs_syscall_ops *scops,
unsigned int flags, void *priv);
void kernfs_destroy_root(struct kernfs_root *root);
+unsigned int kernfs_root_flags(struct kernfs_node *kn);
struct kernfs_node *kernfs_create_dir_ns(struct kernfs_node *parent,
const char *name, umode_t mode,
@@ -514,6 +520,8 @@ kernfs_create_root(struct kernfs_syscall_ops *scops, unsigned int flags,
{ return ERR_PTR(-ENOSYS); }
static inline void kernfs_destroy_root(struct kernfs_root *root) { }
+static inline unsigned int kernfs_root_flags(struct kernfs_node *kn)
+{ return 0; }
static inline struct kernfs_node *
kernfs_create_dir_ns(struct kernfs_node *parent, const char *name,
diff --git a/include/linux/kexec.h b/include/linux/kexec.h
index c840431eadda..c8971861521a 100644
--- a/include/linux/kexec.h
+++ b/include/linux/kexec.h
@@ -203,6 +203,15 @@ static inline int arch_kimage_file_post_load_cleanup(struct kimage *image)
}
#endif
+#ifndef arch_check_excluded_range
+static inline int arch_check_excluded_range(struct kimage *image,
+ unsigned long start,
+ unsigned long end)
+{
+ return 0;
+}
+#endif
+
#ifdef CONFIG_KEXEC_SIG
#ifdef CONFIG_SIGNED_PE_FILE_VERIFICATION
int kexec_kernel_verify_pe_sig(const char *kernel, unsigned long kernel_len);
diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
index 51ef131e66b7..5eebbe7a3545 100644
--- a/include/linux/kgdb.h
+++ b/include/linux/kgdb.h
@@ -257,7 +257,6 @@ extern void kgdb_arch_late(void);
* hardware breakpoints.
* @correct_hw_break: Allow an architecture to specify how to correct the
* hardware debug registers.
- * @enable_nmi: Manage NMI-triggered entry to KGDB
*/
struct kgdb_arch {
unsigned char gdb_bpt_instr[BREAK_INSTR_SIZE];
@@ -270,8 +269,6 @@ struct kgdb_arch {
void (*disable_hw_break)(struct pt_regs *regs);
void (*remove_all_hw_break)(void);
void (*correct_hw_break)(void);
-
- void (*enable_nmi)(bool on);
};
/**
@@ -306,14 +303,6 @@ extern const struct kgdb_arch arch_kgdb_ops;
extern unsigned long kgdb_arch_pc(int exception, struct pt_regs *regs);
-#ifdef CONFIG_SERIAL_KGDB_NMI
-extern int kgdb_register_nmi_console(void);
-extern int kgdb_unregister_nmi_console(void);
-#else
-static inline int kgdb_register_nmi_console(void) { return 0; }
-static inline int kgdb_unregister_nmi_console(void) { return 0; }
-#endif
-
extern int kgdb_register_io_module(struct kgdb_io *local_kgdb_io_ops);
extern void kgdb_unregister_io_module(struct kgdb_io *local_kgdb_io_ops);
extern struct kgdb_io *dbg_io_ops;
diff --git a/include/linux/linkage.h b/include/linux/linkage.h
index 5c8865bb59d9..b11660b706c5 100644
--- a/include/linux/linkage.h
+++ b/include/linux/linkage.h
@@ -134,10 +134,6 @@
.size name, .-name
#endif
-/* If symbol 'name' is treated as a subroutine (gets called, and returns)
- * then please use ENDPROC to mark 'name' as STT_FUNC for the benefit of
- * static analysis tools such as stack depth analyzer.
- */
#ifndef ENDPROC
/* deprecated, use SYM_FUNC_END */
#define ENDPROC(name) \
diff --git a/include/linux/list_nulls.h b/include/linux/list_nulls.h
index fa6e8471bd22..248db9b77ee2 100644
--- a/include/linux/list_nulls.h
+++ b/include/linux/list_nulls.h
@@ -28,6 +28,7 @@ struct hlist_nulls_node {
#define NULLS_MARKER(value) (1UL | (((long)value) << 1))
#define INIT_HLIST_NULLS_HEAD(ptr, nulls) \
((ptr)->first = (struct hlist_nulls_node *) NULLS_MARKER(nulls))
+#define HLIST_NULLS_HEAD_INIT(nulls) {.first = (struct hlist_nulls_node *)NULLS_MARKER(nulls)}
#define hlist_nulls_entry(ptr, type, member) container_of(ptr,type,member)
diff --git a/include/linux/mei_cl_bus.h b/include/linux/mei_cl_bus.h
index b38a56a13f39..725fd7727422 100644
--- a/include/linux/mei_cl_bus.h
+++ b/include/linux/mei_cl_bus.h
@@ -97,8 +97,6 @@ ssize_t mei_cldev_send(struct mei_cl_device *cldev, const u8 *buf,
ssize_t mei_cldev_send_timeout(struct mei_cl_device *cldev, const u8 *buf,
size_t length, unsigned long timeout);
ssize_t mei_cldev_recv(struct mei_cl_device *cldev, u8 *buf, size_t length);
-ssize_t mei_cldev_recv_nonblock(struct mei_cl_device *cldev, u8 *buf,
- size_t length);
ssize_t mei_cldev_recv_timeout(struct mei_cl_device *cldev, u8 *buf, size_t length,
unsigned long timeout);
ssize_t mei_cldev_send_vtag(struct mei_cl_device *cldev, const u8 *buf,
@@ -107,8 +105,6 @@ ssize_t mei_cldev_send_vtag_timeout(struct mei_cl_device *cldev, const u8 *buf,
size_t length, u8 vtag, unsigned long timeout);
ssize_t mei_cldev_recv_vtag(struct mei_cl_device *cldev, u8 *buf, size_t length,
u8 *vtag);
-ssize_t mei_cldev_recv_nonblock_vtag(struct mei_cl_device *cldev, u8 *buf,
- size_t length, u8 *vtag);
ssize_t mei_cldev_recv_vtag_timeout(struct mei_cl_device *cldev, u8 *buf, size_t length,
u8 *vtag, unsigned long timeout);
@@ -116,7 +112,6 @@ int mei_cldev_register_rx_cb(struct mei_cl_device *cldev, mei_cldev_cb_t rx_cb);
int mei_cldev_register_notif_cb(struct mei_cl_device *cldev,
mei_cldev_cb_t notif_cb);
-const uuid_le *mei_cldev_uuid(const struct mei_cl_device *cldev);
u8 mei_cldev_ver(const struct mei_cl_device *cldev);
void *mei_cldev_get_drvdata(const struct mei_cl_device *cldev);
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index e79eb6ac516f..ef5a1ecc6e59 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -133,7 +133,6 @@ int memblock_mark_nomap(phys_addr_t base, phys_addr_t size);
int memblock_clear_nomap(phys_addr_t base, phys_addr_t size);
int memblock_reserved_mark_noinit(phys_addr_t base, phys_addr_t size);
-void memblock_free_all(void);
void memblock_free(void *ptr, size_t size);
void reset_all_zones_managed_pages(void);
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 6e74b8254d9b..53364526d877 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -438,9 +438,7 @@ static inline struct mem_cgroup *folio_memcg(struct folio *folio)
*/
static inline bool folio_memcg_charged(struct folio *folio)
{
- if (folio_memcg_kmem(folio))
- return __folio_objcg(folio) != NULL;
- return __folio_memcg(folio) != NULL;
+ return folio->memcg_data != 0;
}
/*
@@ -649,8 +647,6 @@ int mem_cgroup_charge_hugetlb(struct folio* folio, gfp_t gfp);
int mem_cgroup_swapin_charge_folio(struct folio *folio, struct mm_struct *mm,
gfp_t gfp, swp_entry_t entry);
-void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry, unsigned int nr_pages);
-
void __mem_cgroup_uncharge(struct folio *folio);
/**
@@ -1040,7 +1036,9 @@ static inline void memcg_memory_event_mm(struct mm_struct *mm,
rcu_read_unlock();
}
-void split_page_memcg(struct page *head, int old_order, int new_order);
+void split_page_memcg(struct page *first, unsigned order);
+void folio_split_memcg_refs(struct folio *folio, unsigned old_order,
+ unsigned new_order);
static inline u64 cgroup_id_from_mm(struct mm_struct *mm)
{
@@ -1165,10 +1163,6 @@ static inline int mem_cgroup_swapin_charge_folio(struct folio *folio,
return 0;
}
-static inline void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry, unsigned int nr)
-{
-}
-
static inline void mem_cgroup_uncharge(struct folio *folio)
{
}
@@ -1465,7 +1459,12 @@ void count_memcg_event_mm(struct mm_struct *mm, enum vm_event_item idx)
{
}
-static inline void split_page_memcg(struct page *head, int old_order, int new_order)
+static inline void split_page_memcg(struct page *first, unsigned order)
+{
+}
+
+static inline void folio_split_memcg_refs(struct folio *folio,
+ unsigned old_order, unsigned new_order)
{
}
@@ -1848,6 +1847,9 @@ static inline void mem_cgroup_exit_user_fault(void)
current->in_user_fault = 0;
}
+void memcg1_swapout(struct folio *folio, swp_entry_t entry);
+void memcg1_swapin(swp_entry_t entry, unsigned int nr_pages);
+
#else /* CONFIG_MEMCG_V1 */
static inline
unsigned long memcg1_soft_limit_reclaim(pg_data_t *pgdat, int order,
@@ -1875,6 +1877,14 @@ static inline void mem_cgroup_exit_user_fault(void)
{
}
+static inline void memcg1_swapout(struct folio *folio, swp_entry_t entry)
+{
+}
+
+static inline void memcg1_swapin(swp_entry_t entry, unsigned int nr_pages)
+{
+}
+
#endif /* CONFIG_MEMCG_V1 */
#endif /* _LINUX_MEMCONTROL_H */
diff --git a/include/linux/memory.h b/include/linux/memory.h
index c0afee5d126e..12daa6ec7d09 100644
--- a/include/linux/memory.h
+++ b/include/linux/memory.h
@@ -25,7 +25,7 @@
/**
* struct memory_group - a logical group of memory blocks
* @nid: The node id for all memory blocks inside the memory group.
- * @blocks: List of all memory blocks belonging to this memory group.
+ * @memory_blocks: List of all memory blocks belonging to this memory group.
* @present_kernel_pages: Present (online) memory outside ZONE_MOVABLE of this
* memory group.
* @present_movable_pages: Present (online) memory in ZONE_MOVABLE of this
diff --git a/include/linux/memremap.h b/include/linux/memremap.h
index 3f7143ade32c..4aa151914eab 100644
--- a/include/linux/memremap.h
+++ b/include/linux/memremap.h
@@ -161,7 +161,7 @@ static inline bool is_device_private_page(const struct page *page)
{
return IS_ENABLED(CONFIG_DEVICE_PRIVATE) &&
is_zone_device_page(page) &&
- page->pgmap->type == MEMORY_DEVICE_PRIVATE;
+ page_pgmap(page)->type == MEMORY_DEVICE_PRIVATE;
}
static inline bool folio_is_device_private(const struct folio *folio)
@@ -173,13 +173,13 @@ static inline bool is_pci_p2pdma_page(const struct page *page)
{
return IS_ENABLED(CONFIG_PCI_P2PDMA) &&
is_zone_device_page(page) &&
- page->pgmap->type == MEMORY_DEVICE_PCI_P2PDMA;
+ page_pgmap(page)->type == MEMORY_DEVICE_PCI_P2PDMA;
}
static inline bool is_device_coherent_page(const struct page *page)
{
return is_zone_device_page(page) &&
- page->pgmap->type == MEMORY_DEVICE_COHERENT;
+ page_pgmap(page)->type == MEMORY_DEVICE_COHERENT;
}
static inline bool folio_is_device_coherent(const struct folio *folio)
@@ -187,6 +187,17 @@ static inline bool folio_is_device_coherent(const struct folio *folio)
return is_device_coherent_page(&folio->page);
}
+static inline bool is_fsdax_page(const struct page *page)
+{
+ return is_zone_device_page(page) &&
+ page_pgmap(page)->type == MEMORY_DEVICE_FS_DAX;
+}
+
+static inline bool folio_is_fsdax(const struct folio *folio)
+{
+ return is_fsdax_page(&folio->page);
+}
+
#ifdef CONFIG_ZONE_DEVICE
void zone_device_page_init(struct page *page);
void *memremap_pages(struct dev_pagemap *pgmap, int nid);
diff --git a/include/linux/mfd/mt6397/rtc.h b/include/linux/mfd/mt6397/rtc.h
index 068ae1c0f0e8..27883af44f87 100644
--- a/include/linux/mfd/mt6397/rtc.h
+++ b/include/linux/mfd/mt6397/rtc.h
@@ -60,11 +60,6 @@
#define RTC_PDN2 0x002e
#define RTC_PDN2_PWRON_ALARM BIT(4)
-#define RTC_MIN_YEAR 1968
-#define RTC_BASE_YEAR 1900
-#define RTC_NUM_YEARS 128
-#define RTC_MIN_YEAR_OFFSET (RTC_MIN_YEAR - RTC_BASE_YEAR)
-
#define MTK_RTC_POLL_DELAY_US 10
#define MTK_RTC_POLL_TIMEOUT (jiffies_to_usecs(HZ))
diff --git a/include/linux/mhi.h b/include/linux/mhi.h
index 059dc94d20bb..dd372b0123a6 100644
--- a/include/linux/mhi.h
+++ b/include/linux/mhi.h
@@ -721,12 +721,6 @@ enum mhi_state mhi_get_mhi_state(struct mhi_controller *mhi_cntrl);
void mhi_soc_reset(struct mhi_controller *mhi_cntrl);
/**
- * mhi_device_get - Disable device low power mode
- * @mhi_dev: Device associated with the channel
- */
-void mhi_device_get(struct mhi_device *mhi_dev);
-
-/**
* mhi_device_get_sync - Disable device low power mode. Synchronously
* take the controller out of suspended state
* @mhi_dev: Device associated with the channel
@@ -777,18 +771,6 @@ int mhi_prepare_for_transfer_autoqueue(struct mhi_device *mhi_dev);
void mhi_unprepare_from_transfer(struct mhi_device *mhi_dev);
/**
- * mhi_queue_dma - Send or receive DMA mapped buffers from client device
- * over MHI channel
- * @mhi_dev: Device associated with the channels
- * @dir: DMA direction for the channel
- * @mhi_buf: Buffer for holding the DMA mapped data
- * @len: Buffer length
- * @mflags: MHI transfer flags used for the transfer
- */
-int mhi_queue_dma(struct mhi_device *mhi_dev, enum dma_data_direction dir,
- struct mhi_buf *mhi_buf, size_t len, enum mhi_flags mflags);
-
-/**
* mhi_queue_buf - Send or receive raw buffers from client device over MHI
* channel
* @mhi_dev: Device associated with the channels
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index 80891120cca9..aaa2114498d6 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -205,8 +205,8 @@ struct migrate_vma {
unsigned long end;
/*
- * Set to the owner value also stored in page->pgmap->owner for
- * migrating out of device private memory. The flags also need to
+ * Set to the owner value also stored in page_pgmap(page)->owner
+ * for migrating out of device private memory. The flags also need to
* be set to MIGRATE_VMA_SELECT_DEVICE_PRIVATE.
* The caller should always set this field when using mmu notifier
* callbacks to avoid device MMU invalidations for device private
diff --git a/include/linux/min_heap.h b/include/linux/min_heap.h
index 1160bed6579e..79ddc0adbf2b 100644
--- a/include/linux/min_heap.h
+++ b/include/linux/min_heap.h
@@ -218,7 +218,7 @@ static size_t parent(size_t i, unsigned int lsbit, size_t size)
/* Initialize a min-heap. */
static __always_inline
-void __min_heap_init_inline(min_heap_char *heap, void *data, int size)
+void __min_heap_init_inline(min_heap_char *heap, void *data, size_t size)
{
heap->nr = 0;
heap->size = size;
@@ -254,7 +254,7 @@ bool __min_heap_full_inline(min_heap_char *heap)
/* Sift the element at pos down the heap. */
static __always_inline
-void __min_heap_sift_down_inline(min_heap_char *heap, int pos, size_t elem_size,
+void __min_heap_sift_down_inline(min_heap_char *heap, size_t pos, size_t elem_size,
const struct min_heap_callbacks *func, void *args)
{
const unsigned long lsbit = elem_size & -elem_size;
@@ -324,7 +324,7 @@ static __always_inline
void __min_heapify_all_inline(min_heap_char *heap, size_t elem_size,
const struct min_heap_callbacks *func, void *args)
{
- int i;
+ ssize_t i;
for (i = heap->nr / 2 - 1; i >= 0; i--)
__min_heap_sift_down_inline(heap, i, elem_size, func, args);
@@ -379,7 +379,7 @@ bool __min_heap_push_inline(min_heap_char *heap, const void *element, size_t ele
const struct min_heap_callbacks *func, void *args)
{
void *data = heap->data;
- int pos;
+ size_t pos;
if (WARN_ONCE(heap->nr >= heap->size, "Pushing on a full heap"))
return false;
@@ -428,10 +428,10 @@ bool __min_heap_del_inline(min_heap_char *heap, size_t elem_size, size_t idx,
__min_heap_del_inline(container_of(&(_heap)->nr, min_heap_char, nr), \
__minheap_obj_size(_heap), _idx, _func, _args)
-void __min_heap_init(min_heap_char *heap, void *data, int size);
+void __min_heap_init(min_heap_char *heap, void *data, size_t size);
void *__min_heap_peek(struct min_heap_char *heap);
bool __min_heap_full(min_heap_char *heap);
-void __min_heap_sift_down(min_heap_char *heap, int pos, size_t elem_size,
+void __min_heap_sift_down(min_heap_char *heap, size_t pos, size_t elem_size,
const struct min_heap_callbacks *func, void *args);
void __min_heap_sift_up(min_heap_char *heap, size_t elem_size, size_t idx,
const struct min_heap_callbacks *func, void *args);
diff --git a/include/linux/mm.h b/include/linux/mm.h
index beba5ba0fd97..b7f13f087954 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -32,6 +32,7 @@
#include <linux/memremap.h>
#include <linux/slab.h>
#include <linux/cacheinfo.h>
+#include <linux/rcuwait.h>
struct mempolicy;
struct anon_vma;
@@ -40,20 +41,10 @@ struct user_struct;
struct pt_regs;
struct folio_batch;
+void arch_mm_preinit(void);
void mm_core_init(void);
void init_mm_internals(void);
-#ifndef CONFIG_NUMA /* Don't use mapnrs, do it properly */
-extern unsigned long max_mapnr;
-
-static inline void set_max_mapnr(unsigned long limit)
-{
- max_mapnr = limit;
-}
-#else
-static inline void set_max_mapnr(unsigned long limit) { }
-#endif
-
extern atomic_long_t _totalram_pages;
static inline unsigned long totalram_pages(void)
{
@@ -242,8 +233,6 @@ void setup_initial_init_mm(void *start_code, void *end_code,
struct vm_area_struct *vm_area_alloc(struct mm_struct *);
struct vm_area_struct *vm_area_dup(struct vm_area_struct *);
void vm_area_free(struct vm_area_struct *);
-/* Use only if VMA has no other users */
-void __vm_area_free(struct vm_area_struct *vma);
#ifndef CONFIG_MMU
extern struct rb_root nommu_region_tree;
@@ -682,13 +671,57 @@ static inline void vma_numab_state_free(struct vm_area_struct *vma) {}
#endif /* CONFIG_NUMA_BALANCING */
#ifdef CONFIG_PER_VMA_LOCK
+static inline void vma_lock_init(struct vm_area_struct *vma, bool reset_refcnt)
+{
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ static struct lock_class_key lockdep_key;
+
+ lockdep_init_map(&vma->vmlock_dep_map, "vm_lock", &lockdep_key, 0);
+#endif
+ if (reset_refcnt)
+ refcount_set(&vma->vm_refcnt, 0);
+ vma->vm_lock_seq = UINT_MAX;
+}
+
+static inline bool is_vma_writer_only(int refcnt)
+{
+ /*
+ * With a writer and no readers, refcnt is VMA_LOCK_OFFSET if the vma
+ * is detached and (VMA_LOCK_OFFSET + 1) if it is attached. Waiting on
+ * a detached vma happens only in vma_mark_detached() and is a rare
+ * case, therefore most of the time there will be no unnecessary wakeup.
+ */
+ return refcnt & VMA_LOCK_OFFSET && refcnt <= VMA_LOCK_OFFSET + 1;
+}
+
+static inline void vma_refcount_put(struct vm_area_struct *vma)
+{
+ /* Use a copy of vm_mm in case vma is freed after we drop vm_refcnt */
+ struct mm_struct *mm = vma->vm_mm;
+ int oldcnt;
+
+ rwsem_release(&vma->vmlock_dep_map, _RET_IP_);
+ if (!__refcount_dec_and_test(&vma->vm_refcnt, &oldcnt)) {
+
+ if (is_vma_writer_only(oldcnt - 1))
+ rcuwait_wake_up(&mm->vma_writer_wait);
+ }
+}
+
/*
* Try to read-lock a vma. The function is allowed to occasionally yield false
* locked result to avoid performance overhead, in which case we fall back to
* using mmap_lock. The function should never yield false unlocked result.
+ * False locked result is possible if mm_lock_seq overflows or if vma gets
+ * reused and attached to a different mm before we lock it.
+ * Returns the vma on success, NULL on failure to lock and EAGAIN if vma got
+ * detached.
*/
-static inline bool vma_start_read(struct vm_area_struct *vma)
+static inline struct vm_area_struct *vma_start_read(struct mm_struct *mm,
+ struct vm_area_struct *vma)
{
+ int oldcnt;
+
/*
* Check before locking. A race might cause false locked result.
* We can use READ_ONCE() for the mm_lock_seq here, and don't need
@@ -696,16 +729,26 @@ static inline bool vma_start_read(struct vm_area_struct *vma)
* we don't rely on for anything - the mm_lock_seq read against which we
* need ordering is below.
*/
- if (READ_ONCE(vma->vm_lock_seq) == READ_ONCE(vma->vm_mm->mm_lock_seq.sequence))
- return false;
+ if (READ_ONCE(vma->vm_lock_seq) == READ_ONCE(mm->mm_lock_seq.sequence))
+ return NULL;
- if (unlikely(down_read_trylock(&vma->vm_lock->lock) == 0))
- return false;
+ /*
+ * If VMA_LOCK_OFFSET is set, __refcount_inc_not_zero_limited_acquire()
+ * will fail because VMA_REF_LIMIT is less than VMA_LOCK_OFFSET.
+ * Acquire fence is required here to avoid reordering against later
+ * vm_lock_seq check and checks inside lock_vma_under_rcu().
+ */
+ if (unlikely(!__refcount_inc_not_zero_limited_acquire(&vma->vm_refcnt, &oldcnt,
+ VMA_REF_LIMIT))) {
+ /* return EAGAIN if vma got detached from under us */
+ return oldcnt ? NULL : ERR_PTR(-EAGAIN);
+ }
+ rwsem_acquire_read(&vma->vmlock_dep_map, 0, 1, _RET_IP_);
/*
- * Overflow might produce false locked result.
+ * Overflow of vm_lock_seq/mm_lock_seq might produce false locked result.
* False unlocked result is impossible because we modify and check
- * vma->vm_lock_seq under vma->vm_lock protection and mm->mm_lock_seq
+ * vma->vm_lock_seq under vma->vm_refcnt protection and mm->mm_lock_seq
* modification invalidates all existing locks.
*
* We must use ACQUIRE semantics for the mm_lock_seq so that if we are
@@ -713,18 +756,47 @@ static inline bool vma_start_read(struct vm_area_struct *vma)
* after it has been unlocked.
* This pairs with RELEASE semantics in vma_end_write_all().
*/
- if (unlikely(vma->vm_lock_seq == raw_read_seqcount(&vma->vm_mm->mm_lock_seq))) {
- up_read(&vma->vm_lock->lock);
- return false;
+ if (unlikely(vma->vm_lock_seq == raw_read_seqcount(&mm->mm_lock_seq))) {
+ vma_refcount_put(vma);
+ return NULL;
}
+
+ return vma;
+}
+
+/*
+ * Use only while holding mmap read lock which guarantees that locking will not
+ * fail (nobody can concurrently write-lock the vma). vma_start_read() should
+ * not be used in such cases because it might fail due to mm_lock_seq overflow.
+ * This functionality is used to obtain vma read lock and drop the mmap read lock.
+ */
+static inline bool vma_start_read_locked_nested(struct vm_area_struct *vma, int subclass)
+{
+ int oldcnt;
+
+ mmap_assert_locked(vma->vm_mm);
+ if (unlikely(!__refcount_inc_not_zero_limited_acquire(&vma->vm_refcnt, &oldcnt,
+ VMA_REF_LIMIT)))
+ return false;
+
+ rwsem_acquire_read(&vma->vmlock_dep_map, 0, 1, _RET_IP_);
return true;
}
+/*
+ * Use only while holding mmap read lock which guarantees that locking will not
+ * fail (nobody can concurrently write-lock the vma). vma_start_read() should
+ * not be used in such cases because it might fail due to mm_lock_seq overflow.
+ * This functionality is used to obtain vma read lock and drop the mmap read lock.
+ */
+static inline bool vma_start_read_locked(struct vm_area_struct *vma)
+{
+ return vma_start_read_locked_nested(vma, 0);
+}
+
static inline void vma_end_read(struct vm_area_struct *vma)
{
- rcu_read_lock(); /* keeps vma alive till the end of up_read */
- up_read(&vma->vm_lock->lock);
- rcu_read_unlock();
+ vma_refcount_put(vma);
}
/* WARNING! Can only be used if mmap_lock is expected to be write-locked */
@@ -740,6 +812,8 @@ static bool __is_vma_write_locked(struct vm_area_struct *vma, unsigned int *mm_l
return (vma->vm_lock_seq == *mm_lock_seq);
}
+void __vma_start_write(struct vm_area_struct *vma, unsigned int mm_lock_seq);
+
/*
* Begin writing to a VMA.
* Exclude concurrent readers under the per-VMA lock until the currently
@@ -752,15 +826,7 @@ static inline void vma_start_write(struct vm_area_struct *vma)
if (__is_vma_write_locked(vma, &mm_lock_seq))
return;
- down_write(&vma->vm_lock->lock);
- /*
- * We should use WRITE_ONCE() here because we can have concurrent reads
- * from the early lockless pessimistic check in vma_start_read().
- * We don't really care about the correctness of that early check, but
- * we should use WRITE_ONCE() for cleanliness and to keep KCSAN happy.
- */
- WRITE_ONCE(vma->vm_lock_seq, mm_lock_seq);
- up_write(&vma->vm_lock->lock);
+ __vma_start_write(vma, mm_lock_seq);
}
static inline void vma_assert_write_locked(struct vm_area_struct *vma)
@@ -772,18 +838,36 @@ static inline void vma_assert_write_locked(struct vm_area_struct *vma)
static inline void vma_assert_locked(struct vm_area_struct *vma)
{
- if (!rwsem_is_locked(&vma->vm_lock->lock))
- vma_assert_write_locked(vma);
+ unsigned int mm_lock_seq;
+
+ VM_BUG_ON_VMA(refcount_read(&vma->vm_refcnt) <= 1 &&
+ !__is_vma_write_locked(vma, &mm_lock_seq), vma);
}
-static inline void vma_mark_detached(struct vm_area_struct *vma, bool detached)
+/*
+ * WARNING: to avoid racing with vma_mark_attached()/vma_mark_detached(), these
+ * assertions should be made either under mmap_write_lock or when the object
+ * has been isolated under mmap_write_lock, ensuring no competing writers.
+ */
+static inline void vma_assert_attached(struct vm_area_struct *vma)
{
- /* When detaching vma should be write-locked */
- if (detached)
- vma_assert_write_locked(vma);
- vma->detached = detached;
+ WARN_ON_ONCE(!refcount_read(&vma->vm_refcnt));
+}
+
+static inline void vma_assert_detached(struct vm_area_struct *vma)
+{
+ WARN_ON_ONCE(refcount_read(&vma->vm_refcnt));
+}
+
+static inline void vma_mark_attached(struct vm_area_struct *vma)
+{
+ vma_assert_write_locked(vma);
+ vma_assert_detached(vma);
+ refcount_set_release(&vma->vm_refcnt, 1);
}
+void vma_mark_detached(struct vm_area_struct *vma);
+
static inline void release_fault_lock(struct vm_fault *vmf)
{
if (vmf->flags & FAULT_FLAG_VMA_LOCK)
@@ -805,14 +889,18 @@ struct vm_area_struct *lock_vma_under_rcu(struct mm_struct *mm,
#else /* CONFIG_PER_VMA_LOCK */
-static inline bool vma_start_read(struct vm_area_struct *vma)
- { return false; }
+static inline void vma_lock_init(struct vm_area_struct *vma, bool reset_refcnt) {}
+static inline struct vm_area_struct *vma_start_read(struct mm_struct *mm,
+ struct vm_area_struct *vma)
+ { return NULL; }
static inline void vma_end_read(struct vm_area_struct *vma) {}
static inline void vma_start_write(struct vm_area_struct *vma) {}
static inline void vma_assert_write_locked(struct vm_area_struct *vma)
{ mmap_assert_write_locked(vma->vm_mm); }
-static inline void vma_mark_detached(struct vm_area_struct *vma,
- bool detached) {}
+static inline void vma_assert_attached(struct vm_area_struct *vma) {}
+static inline void vma_assert_detached(struct vm_area_struct *vma) {}
+static inline void vma_mark_attached(struct vm_area_struct *vma) {}
+static inline void vma_mark_detached(struct vm_area_struct *vma) {}
static inline struct vm_area_struct *lock_vma_under_rcu(struct mm_struct *mm,
unsigned long address)
@@ -839,18 +927,13 @@ static inline void assert_fault_locked(struct vm_fault *vmf)
extern const struct vm_operations_struct vma_dummy_vm_ops;
-/*
- * WARNING: vma_init does not initialize vma->vm_lock.
- * Use vm_area_alloc()/vm_area_free() if vma needs locking.
- */
static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm)
{
memset(vma, 0, sizeof(*vma));
vma->vm_mm = mm;
vma->vm_ops = &vma_dummy_vm_ops;
INIT_LIST_HEAD(&vma->anon_vma_chain);
- vma_mark_detached(vma, false);
- vma_numab_state_init(vma);
+ vma_lock_init(vma, false);
}
/* Use when VMA is not part of the VMA tree and needs no locking */
@@ -1043,6 +1126,7 @@ static inline int vma_iter_bulk_store(struct vma_iterator *vmi,
if (unlikely(mas_is_err(&vmi->mas)))
return -ENOMEM;
+ vma_mark_attached(vma);
return 0;
}
@@ -1083,6 +1167,25 @@ int vma_is_stack_for_current(struct vm_area_struct *vma);
struct mmu_gather;
struct inode;
+extern void prep_compound_page(struct page *page, unsigned int order);
+
+static inline unsigned int folio_large_order(const struct folio *folio)
+{
+ return folio->_flags_1 & 0xff;
+}
+
+#ifdef NR_PAGES_IN_LARGE_FOLIO
+static inline long folio_large_nr_pages(const struct folio *folio)
+{
+ return folio->_nr_pages;
+}
+#else
+static inline long folio_large_nr_pages(const struct folio *folio)
+{
+ return 1L << folio_large_order(folio);
+}
+#endif
+
/*
* compound_order() can be called without holding a reference, which means
* that niceties like page_folio() don't work. These callers should be
@@ -1096,7 +1199,7 @@ static inline unsigned int compound_order(struct page *page)
if (!test_bit(PG_head, &folio->flags))
return 0;
- return folio->_flags_1 & 0xff;
+ return folio_large_order(folio);
}
/**
@@ -1112,7 +1215,7 @@ static inline unsigned int folio_order(const struct folio *folio)
{
if (!folio_test_large(folio))
return 0;
- return folio->_flags_1 & 0xff;
+ return folio_large_order(folio);
}
#include <linux/huge_mm.h>
@@ -1205,6 +1308,8 @@ static inline int is_vmalloc_or_module_addr(const void *x)
static inline int folio_entire_mapcount(const struct folio *folio)
{
VM_BUG_ON_FOLIO(!folio_test_large(folio), folio);
+ if (!IS_ENABLED(CONFIG_64BIT) && unlikely(folio_large_order(folio) == 1))
+ return 0;
return atomic_read(&folio->_entire_mapcount) + 1;
}
@@ -1404,25 +1509,6 @@ vm_fault_t finish_fault(struct vm_fault *vmf);
* back into memory.
*/
-#if defined(CONFIG_ZONE_DEVICE) && defined(CONFIG_FS_DAX)
-DECLARE_STATIC_KEY_FALSE(devmap_managed_key);
-
-bool __put_devmap_managed_folio_refs(struct folio *folio, int refs);
-static inline bool put_devmap_managed_folio_refs(struct folio *folio, int refs)
-{
- if (!static_branch_unlikely(&devmap_managed_key))
- return false;
- if (!folio_is_zone_device(folio))
- return false;
- return __put_devmap_managed_folio_refs(folio, refs);
-}
-#else /* CONFIG_ZONE_DEVICE && CONFIG_FS_DAX */
-static inline bool put_devmap_managed_folio_refs(struct folio *folio, int refs)
-{
- return false;
-}
-#endif /* CONFIG_ZONE_DEVICE && CONFIG_FS_DAX */
-
/* 127: arbitrary random number, small enough to assemble well */
#define folio_ref_zero_or_close_to_overflow(folio) \
((unsigned int) folio_ref_count(folio) + 127u <= 127u)
@@ -1543,12 +1629,6 @@ static inline void put_page(struct page *page)
if (folio_test_slab(folio))
return;
- /*
- * For some devmap managed pages we need to catch refcount transition
- * from 2 to 1:
- */
- if (put_devmap_managed_folio_refs(folio, 1))
- return;
folio_put(folio);
}
@@ -1907,6 +1987,13 @@ static inline struct folio *pfn_folio(unsigned long pfn)
return page_folio(pfn_to_page(pfn));
}
+static inline bool folio_has_pincount(const struct folio *folio)
+{
+ if (IS_ENABLED(CONFIG_64BIT))
+ return folio_test_large(folio);
+ return folio_order(folio) > 1;
+}
+
/**
* folio_maybe_dma_pinned - Report if a folio may be pinned for DMA.
* @folio: The folio.
@@ -1923,7 +2010,7 @@ static inline struct folio *pfn_folio(unsigned long pfn)
* get that many refcounts, and b) all the callers of this routine are
* expected to be able to deal gracefully with a false positive.
*
- * For large folios, the result will be exactly correct. That's because
+ * For most large folios, the result will be exactly correct. That's because
* we have more tracking data available: the _pincount field is used
* instead of the GUP_PIN_COUNTING_BIAS scheme.
*
@@ -1934,7 +2021,7 @@ static inline struct folio *pfn_folio(unsigned long pfn)
*/
static inline bool folio_maybe_dma_pinned(struct folio *folio)
{
- if (folio_test_large(folio))
+ if (folio_has_pincount(folio))
return atomic_read(&folio->_pincount) > 0;
/*
@@ -2006,6 +2093,13 @@ static inline bool folio_is_longterm_pinnable(struct folio *folio)
if (folio_is_device_coherent(folio))
return false;
+ /*
+ * Filesystems can only tolerate transient delays to truncate and
+ * hole-punch operations
+ */
+ if (folio_is_fsdax(folio))
+ return false;
+
/* Otherwise, non-movable zone folios can be pinned. */
return !folio_is_zone_movable(folio);
@@ -2049,11 +2143,7 @@ static inline long folio_nr_pages(const struct folio *folio)
{
if (!folio_test_large(folio))
return 1;
-#ifdef CONFIG_64BIT
- return folio->_folio_nr_pages;
-#else
- return 1L << (folio->_flags_1 & 0xff);
-#endif
+ return folio_large_nr_pages(folio);
}
/* Only hugetlbfs can allocate folios larger than MAX_ORDER */
@@ -2068,24 +2158,20 @@ static inline long folio_nr_pages(const struct folio *folio)
* page. compound_nr() can be called on a tail page, and is defined to
* return 1 in that case.
*/
-static inline unsigned long compound_nr(struct page *page)
+static inline long compound_nr(struct page *page)
{
struct folio *folio = (struct folio *)page;
if (!test_bit(PG_head, &folio->flags))
return 1;
-#ifdef CONFIG_64BIT
- return folio->_folio_nr_pages;
-#else
- return 1L << (folio->_flags_1 & 0xff);
-#endif
+ return folio_large_nr_pages(folio);
}
/**
* thp_nr_pages - The number of regular pages in this huge page.
* @page: The head page of a huge page.
*/
-static inline int thp_nr_pages(struct page *page)
+static inline long thp_nr_pages(struct page *page)
{
return folio_nr_pages((struct folio *)page);
}
@@ -2140,23 +2226,18 @@ static inline size_t folio_size(const struct folio *folio)
}
/**
- * folio_likely_mapped_shared - Estimate if the folio is mapped into the page
- * tables of more than one MM
+ * folio_maybe_mapped_shared - Whether the folio is mapped into the page
+ * tables of more than one MM
* @folio: The folio.
*
- * This function checks if the folio is currently mapped into more than one
- * MM ("mapped shared"), or if the folio is only mapped into a single MM
- * ("mapped exclusively").
+ * This function checks if the folio maybe currently mapped into more than one
+ * MM ("maybe mapped shared"), or if the folio is certainly mapped into a single
+ * MM ("mapped exclusively").
*
* For KSM folios, this function also returns "mapped shared" when a folio is
* mapped multiple times into the same MM, because the individual page mappings
* are independent.
*
- * As precise information is not easily available for all folios, this function
- * estimates the number of MMs ("sharers") that are currently mapping a folio
- * using the number of times the first page of the folio is currently mapped
- * into page tables.
- *
* For small anonymous folios and anonymous hugetlb folios, the return
* value will be exactly correct: non-KSM folios can only be mapped at most once
* into an MM, and they cannot be partially mapped. KSM folios are
@@ -2164,8 +2245,8 @@ static inline size_t folio_size(const struct folio *folio)
*
* For other folios, the result can be fuzzy:
* #. For partially-mappable large folios (THP), the return value can wrongly
- * indicate "mapped exclusively" (false negative) when the folio is
- * only partially mapped into at least one MM.
+ * indicate "mapped shared" (false positive) if a folio was mapped by
+ * more than two MMs at one point in time.
* #. For pagecache folios (including hugetlb), the return value can wrongly
* indicate "mapped shared" (false positive) when two VMAs in the same MM
* cover the same file range.
@@ -2182,7 +2263,7 @@ static inline size_t folio_size(const struct folio *folio)
*
* Return: Whether the folio is estimated to be mapped into more than one MM.
*/
-static inline bool folio_likely_mapped_shared(struct folio *folio)
+static inline bool folio_maybe_mapped_shared(struct folio *folio)
{
int mapcount = folio_mapcount(folio);
@@ -2190,16 +2271,22 @@ static inline bool folio_likely_mapped_shared(struct folio *folio)
if (!folio_test_large(folio) || unlikely(folio_test_hugetlb(folio)))
return mapcount > 1;
- /* A single mapping implies "mapped exclusively". */
- if (mapcount <= 1)
- return false;
-
- /* If any page is mapped more than once we treat it "mapped shared". */
- if (folio_entire_mapcount(folio) || mapcount > folio_nr_pages(folio))
+ /*
+ * vm_insert_page() without CONFIG_TRANSPARENT_HUGEPAGE ...
+ * simply assume "mapped shared", nobody should really care
+ * about this for arbitrary kernel allocations.
+ */
+ if (!IS_ENABLED(CONFIG_MM_ID))
return true;
- /* Let's guess based on the first subpage. */
- return atomic_read(&folio->_mapcount) > 0;
+ /*
+ * A single mapping implies "mapped exclusively", even if the
+ * folio flag says something different: it's easier to handle this
+ * case here instead of on the RMAP hot path.
+ */
+ if (mapcount <= 1)
+ return false;
+ return folio_test_large_maybe_mapped_shared(folio);
}
#ifndef HAVE_ARCH_MAKE_FOLIO_ACCESSIBLE
@@ -2408,11 +2495,13 @@ struct follow_pfnmap_args {
* Outputs:
*
* @pfn: the PFN of the address
+ * @addr_mask: address mask covering pfn
* @pgprot: the pgprot_t of the mapping
* @writable: whether the mapping is writable
* @special: whether the mapping is a special mapping (real PFN maps)
*/
unsigned long pfn;
+ unsigned long addr_mask;
pgprot_t pgprot;
bool writable;
bool special;
@@ -3179,7 +3268,6 @@ extern void reserve_bootmem_region(phys_addr_t start,
/* Free the reserved page into the buddy system, so it gets managed. */
void free_reserved_page(struct page *page);
-#define free_highmem_page(page) free_reserved_page(page)
static inline void mark_page_reserved(struct page *page)
{
@@ -3539,6 +3627,8 @@ int vm_map_pages(struct vm_area_struct *vma, struct page **pages,
unsigned long num);
int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages,
unsigned long num);
+vm_fault_t vmf_insert_page_mkwrite(struct vm_fault *vmf, struct page *page,
+ bool write);
vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn);
vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
@@ -3817,6 +3907,7 @@ static inline void print_vma_addr(char *prefix, unsigned long rip)
#endif
void *sparse_buffer_alloc(unsigned long size);
+unsigned long section_map_size(void);
struct page * __populate_section_memmap(unsigned long pfn,
unsigned long nr_pages, int nid, struct vmem_altmap *altmap,
struct dev_pagemap *pgmap);
@@ -3825,7 +3916,8 @@ p4d_t *vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node);
pud_t *vmemmap_pud_populate(p4d_t *p4d, unsigned long addr, int node);
pmd_t *vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node);
pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node,
- struct vmem_altmap *altmap, struct page *reuse);
+ struct vmem_altmap *altmap, unsigned long ptpfn,
+ unsigned long flags);
void *vmemmap_alloc_block(unsigned long size, int node);
struct vmem_altmap;
void *vmemmap_alloc_block_buf(unsigned long size, int node,
@@ -3841,6 +3933,12 @@ int vmemmap_populate_hugepages(unsigned long start, unsigned long end,
int node, struct vmem_altmap *altmap);
int vmemmap_populate(unsigned long start, unsigned long end, int node,
struct vmem_altmap *altmap);
+int vmemmap_populate_hvo(unsigned long start, unsigned long end, int node,
+ unsigned long headsize);
+int vmemmap_undo_hvo(unsigned long start, unsigned long end, int node,
+ unsigned long headsize);
+void vmemmap_wrprotect_hvo(unsigned long start, unsigned long end, int node,
+ unsigned long headsize);
void vmemmap_populate_print_last(void);
#ifdef CONFIG_MEMORY_HOTPLUG
void vmemmap_free(unsigned long start, unsigned long end,
@@ -3907,9 +4005,6 @@ static inline bool vmemmap_can_optimize(struct vmem_altmap *altmap,
}
#endif
-void register_page_bootmem_memmap(unsigned long section_nr, struct page *map,
- unsigned long nr_pages);
-
enum mf_flags {
MF_COUNT_INCREASED = 1 << 0,
MF_ACTION_REQUIRED = 1 << 1,
@@ -4143,4 +4238,14 @@ int arch_get_shadow_stack_status(struct task_struct *t, unsigned long __user *st
int arch_set_shadow_stack_status(struct task_struct *t, unsigned long status);
int arch_lock_shadow_stack_status(struct task_struct *t, unsigned long status);
+
+/*
+ * mseal of userspace process's system mappings.
+ */
+#ifdef CONFIG_MSEAL_SYSTEM_MAPPINGS
+#define VM_SEALED_SYSMAP VM_SEALED
+#else
+#define VM_SEALED_SYSMAP VM_NONE
+#endif
+
#endif /* _LINUX_MM_H */
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 75e8850cec3a..56d07edd01f9 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -19,6 +19,7 @@
#include <linux/workqueue.h>
#include <linux/seqlock.h>
#include <linux/percpu_counter.h>
+#include <linux/types.h>
#include <asm/mmu.h>
@@ -133,8 +134,11 @@ struct page {
unsigned long compound_head; /* Bit zero is set */
};
struct { /* ZONE_DEVICE pages */
- /** @pgmap: Points to the hosting device page map. */
- struct dev_pagemap *pgmap;
+ /*
+ * The first word is used for compound_head or folio
+ * pgmap
+ */
+ void *_unused_pgmap_compound_head;
void *zone_device_data;
/*
* ZONE_DEVICE private pages are counted as being
@@ -287,6 +291,49 @@ typedef struct {
unsigned long val;
} swp_entry_t;
+#if defined(CONFIG_MEMCG) || defined(CONFIG_SLAB_OBJ_EXT)
+/* We have some extra room after the refcount in tail pages. */
+#define NR_PAGES_IN_LARGE_FOLIO
+#endif
+
+/*
+ * On 32bit, we can cut the required metadata in half, because:
+ * (a) PID_MAX_LIMIT implicitly limits the number of MMs we could ever have,
+ * so we can limit MM IDs to 15 bit (32767).
+ * (b) We don't expect folios where even a single complete PTE mapping by
+ * one MM would exceed 15 bits (order-15).
+ */
+#ifdef CONFIG_64BIT
+typedef int mm_id_mapcount_t;
+#define MM_ID_MAPCOUNT_MAX INT_MAX
+typedef unsigned int mm_id_t;
+#else /* !CONFIG_64BIT */
+typedef short mm_id_mapcount_t;
+#define MM_ID_MAPCOUNT_MAX SHRT_MAX
+typedef unsigned short mm_id_t;
+#endif /* CONFIG_64BIT */
+
+/* We implicitly use the dummy ID for init-mm etc. where we never rmap pages. */
+#define MM_ID_DUMMY 0
+#define MM_ID_MIN (MM_ID_DUMMY + 1)
+
+/*
+ * We leave the highest bit of each MM id unused, so we can store a flag
+ * in the highest bit of each folio->_mm_id[].
+ */
+#define MM_ID_BITS ((sizeof(mm_id_t) * BITS_PER_BYTE) - 1)
+#define MM_ID_MASK ((1U << MM_ID_BITS) - 1)
+#define MM_ID_MAX MM_ID_MASK
+
+/*
+ * In order to use bit_spin_lock(), which requires an unsigned long, we
+ * operate on folio->_mm_ids when working on flags.
+ */
+#define FOLIO_MM_IDS_LOCK_BITNUM MM_ID_BITS
+#define FOLIO_MM_IDS_LOCK_BIT BIT(FOLIO_MM_IDS_LOCK_BITNUM)
+#define FOLIO_MM_IDS_SHARED_BITNUM (2 * MM_ID_BITS + 1)
+#define FOLIO_MM_IDS_SHARED_BIT BIT(FOLIO_MM_IDS_SHARED_BITNUM)
+
/**
* struct folio - Represents a contiguous set of bytes.
* @flags: Identical to the page flags.
@@ -296,6 +343,8 @@ typedef struct {
* anonymous memory.
* @index: Offset within the file, in units of pages. For anonymous memory,
* this is the index from the beginning of the mmap.
+ * @share: number of DAX mappings that reference this folio. See
+ * dax_associate_entry.
* @private: Filesystem per-folio data (see folio_attach_private()).
* @swap: Used for swp_entry_t if folio_test_swapcache().
* @_mapcount: Do not access this member directly. Use folio_mapcount() to
@@ -303,13 +352,17 @@ typedef struct {
* @_refcount: Do not access this member directly. Use folio_ref_count()
* to find how many references there are to this folio.
* @memcg_data: Memory Control Group data.
+ * @pgmap: Metadata for ZONE_DEVICE mappings
* @virtual: Virtual address in the kernel direct map.
* @_last_cpupid: IDs of last CPU and last process that accessed the folio.
* @_entire_mapcount: Do not use directly, call folio_entire_mapcount().
* @_large_mapcount: Do not use directly, call folio_mapcount().
* @_nr_pages_mapped: Do not use outside of rmap and debug code.
* @_pincount: Do not use directly, call folio_maybe_dma_pinned().
- * @_folio_nr_pages: Do not use directly, call folio_nr_pages().
+ * @_nr_pages: Do not use directly, call folio_nr_pages().
+ * @_mm_id: Do not use outside of rmap code.
+ * @_mm_ids: Do not use outside of rmap code.
+ * @_mm_id_mapcount: Do not use outside of rmap code.
* @_hugetlb_subpool: Do not use directly, use accessor in hugetlb.h.
* @_hugetlb_cgroup: Do not use directly, use accessor in hugetlb_cgroup.h.
* @_hugetlb_cgroup_rsvd: Do not use directly, use accessor in hugetlb_cgroup.h.
@@ -341,9 +394,13 @@ struct folio {
/* private: */
};
/* public: */
+ struct dev_pagemap *pgmap;
};
struct address_space *mapping;
- pgoff_t index;
+ union {
+ pgoff_t index;
+ unsigned long share;
+ };
union {
void *private;
swp_entry_t swap;
@@ -369,14 +426,30 @@ struct folio {
struct {
unsigned long _flags_1;
unsigned long _head_1;
+ union {
+ struct {
/* public: */
- atomic_t _large_mapcount;
- atomic_t _entire_mapcount;
- atomic_t _nr_pages_mapped;
- atomic_t _pincount;
+ atomic_t _large_mapcount;
+ atomic_t _nr_pages_mapped;
#ifdef CONFIG_64BIT
- unsigned int _folio_nr_pages;
-#endif
+ atomic_t _entire_mapcount;
+ atomic_t _pincount;
+#endif /* CONFIG_64BIT */
+ mm_id_mapcount_t _mm_id_mapcount[2];
+ union {
+ mm_id_t _mm_id[2];
+ unsigned long _mm_ids;
+ };
+ /* private: the union with struct page is transitional */
+ };
+ unsigned long _usable_1[4];
+ };
+ atomic_t _mapcount_1;
+ atomic_t _refcount_1;
+ /* public: */
+#ifdef NR_PAGES_IN_LARGE_FOLIO
+ unsigned int _nr_pages;
+#endif /* NR_PAGES_IN_LARGE_FOLIO */
/* private: the union with struct page is transitional */
};
struct page __page_1;
@@ -386,20 +459,27 @@ struct folio {
unsigned long _flags_2;
unsigned long _head_2;
/* public: */
- void *_hugetlb_subpool;
- void *_hugetlb_cgroup;
- void *_hugetlb_cgroup_rsvd;
- void *_hugetlb_hwpoison;
+ struct list_head _deferred_list;
+#ifndef CONFIG_64BIT
+ atomic_t _entire_mapcount;
+ atomic_t _pincount;
+#endif /* !CONFIG_64BIT */
/* private: the union with struct page is transitional */
};
+ struct page __page_2;
+ };
+ union {
struct {
- unsigned long _flags_2a;
- unsigned long _head_2a;
+ unsigned long _flags_3;
+ unsigned long _head_3;
/* public: */
- struct list_head _deferred_list;
+ void *_hugetlb_subpool;
+ void *_hugetlb_cgroup;
+ void *_hugetlb_cgroup_rsvd;
+ void *_hugetlb_hwpoison;
/* private: the union with struct page is transitional */
};
- struct page __page_2;
+ struct page __page_3;
};
};
@@ -428,14 +508,20 @@ FOLIO_MATCH(_last_cpupid, _last_cpupid);
offsetof(struct page, pg) + sizeof(struct page))
FOLIO_MATCH(flags, _flags_1);
FOLIO_MATCH(compound_head, _head_1);
+FOLIO_MATCH(_mapcount, _mapcount_1);
+FOLIO_MATCH(_refcount, _refcount_1);
#undef FOLIO_MATCH
#define FOLIO_MATCH(pg, fl) \
static_assert(offsetof(struct folio, fl) == \
offsetof(struct page, pg) + 2 * sizeof(struct page))
FOLIO_MATCH(flags, _flags_2);
FOLIO_MATCH(compound_head, _head_2);
-FOLIO_MATCH(flags, _flags_2a);
-FOLIO_MATCH(compound_head, _head_2a);
+#undef FOLIO_MATCH
+#define FOLIO_MATCH(pg, fl) \
+ static_assert(offsetof(struct folio, fl) == \
+ offsetof(struct page, pg) + 3 * sizeof(struct page))
+FOLIO_MATCH(flags, _flags_3);
+FOLIO_MATCH(compound_head, _head_3);
#undef FOLIO_MATCH
/**
@@ -578,6 +664,12 @@ static inline void *folio_get_private(struct folio *folio)
typedef unsigned long vm_flags_t;
/*
+ * freeptr_t represents a SLUB freelist pointer, which might be encoded
+ * and not dereferenceable if CONFIG_SLAB_FREELIST_HARDENED is enabled.
+ */
+typedef struct { unsigned long v; } freeptr_t;
+
+/*
* A region containing a mapping of a non-memory backed file under NOMMU
* conditions. These are held in a global tree and are pinned by the VMAs that
* map parts of them.
@@ -633,9 +725,8 @@ static inline struct anon_vma_name *anon_vma_name_alloc(const char *name)
}
#endif
-struct vma_lock {
- struct rw_semaphore lock;
-};
+#define VMA_LOCK_OFFSET 0x40000000
+#define VMA_REF_LIMIT (VMA_LOCK_OFFSET - 1)
struct vma_numab_state {
/*
@@ -681,6 +772,9 @@ struct vma_numab_state {
*
* Only explicitly marked struct members may be accessed by RCU readers before
* getting a stable reference.
+ *
+ * WARNING: when adding new members, please update vm_area_init_from() to copy
+ * them during vm_area_struct content duplication.
*/
struct vm_area_struct {
/* The first cache line has the info for VMA tree walking. */
@@ -691,9 +785,7 @@ struct vm_area_struct {
unsigned long vm_start;
unsigned long vm_end;
};
-#ifdef CONFIG_PER_VMA_LOCK
- struct rcu_head vm_rcu; /* Used for deferred freeing. */
-#endif
+ freeptr_t vm_freeptr; /* Pointer used by SLAB_TYPESAFE_BY_RCU */
};
/*
@@ -714,18 +806,12 @@ struct vm_area_struct {
#ifdef CONFIG_PER_VMA_LOCK
/*
- * Flag to indicate areas detached from the mm->mm_mt tree.
- * Unstable RCU readers are allowed to read this.
- */
- bool detached;
-
- /*
* Can only be written (using WRITE_ONCE()) while holding both:
* - mmap_lock (in write mode)
- * - vm_lock->lock (in write mode)
+ * - vm_refcnt bit at VMA_LOCK_OFFSET is set
* Can be read reliably while holding one of:
* - mmap_lock (in read or write mode)
- * - vm_lock->lock (in read or write mode)
+ * - vm_refcnt bit at VMA_LOCK_OFFSET is set or vm_refcnt > 1
* Can be read unreliably (using READ_ONCE()) for pessimistic bailout
* while holding nothing (except RCU to keep the VMA struct allocated).
*
@@ -734,20 +820,7 @@ struct vm_area_struct {
* slowpath.
*/
unsigned int vm_lock_seq;
- /* Unstable RCU readers are allowed to read this. */
- struct vma_lock *vm_lock;
#endif
-
- /*
- * For areas with an address space and backing store,
- * linkage into the address_space->i_mmap interval tree.
- *
- */
- struct {
- struct rb_node rb;
- unsigned long rb_subtree_last;
- } shared;
-
/*
* A file's MAP_PRIVATE vma can be in both i_mmap tree and anon_vma
* list, after a COW of one of the file pages. A MAP_SHARED vma
@@ -767,14 +840,6 @@ struct vm_area_struct {
struct file * vm_file; /* File we map to (can be NULL). */
void * vm_private_data; /* was vm_pte (shared mem) */
-#ifdef CONFIG_ANON_VMA_NAME
- /*
- * For private and shared anonymous mappings, a pointer to a null
- * terminated string containing the name given to the vma, or NULL if
- * unnamed. Serialized by mmap_lock. Use anon_vma_name to access.
- */
- struct anon_vma_name *anon_name;
-#endif
#ifdef CONFIG_SWAP
atomic_long_t swap_readahead_info;
#endif
@@ -787,6 +852,30 @@ struct vm_area_struct {
#ifdef CONFIG_NUMA_BALANCING
struct vma_numab_state *numab_state; /* NUMA Balancing state */
#endif
+#ifdef CONFIG_PER_VMA_LOCK
+ /* Unstable RCU readers are allowed to read this. */
+ refcount_t vm_refcnt ____cacheline_aligned_in_smp;
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lockdep_map vmlock_dep_map;
+#endif
+#endif
+ /*
+ * For areas with an address space and backing store,
+ * linkage into the address_space->i_mmap interval tree.
+ *
+ */
+ struct {
+ struct rb_node rb;
+ unsigned long rb_subtree_last;
+ } shared;
+#ifdef CONFIG_ANON_VMA_NAME
+ /*
+ * For private and shared anonymous mappings, a pointer to a null
+ * terminated string containing the name given to the vma, or NULL if
+ * unnamed. Serialized by mmap_lock. Use anon_vma_name to access.
+ */
+ struct anon_vma_name *anon_name;
+#endif
struct vm_userfaultfd_ctx vm_userfaultfd_ctx;
} __randomize_layout;
@@ -922,6 +1011,7 @@ struct mm_struct {
* by mmlist_lock
*/
#ifdef CONFIG_PER_VMA_LOCK
+ struct rcuwait vma_writer_wait;
/*
* This field has lock-like semantics, meaning it is sometimes
* accessed with ACQUIRE/RELEASE semantics.
@@ -1074,6 +1164,9 @@ struct mm_struct {
#endif
} lru_gen;
#endif /* CONFIG_LRU_GEN_WALKS_MMU */
+#ifdef CONFIG_MM_ID
+ mm_id_t mm_id;
+#endif /* CONFIG_MM_ID */
} __randomize_layout;
/*
diff --git a/include/linux/mmap_lock.h b/include/linux/mmap_lock.h
index 45a21faa3ff6..4706c6769902 100644
--- a/include/linux/mmap_lock.h
+++ b/include/linux/mmap_lock.h
@@ -122,12 +122,6 @@ static inline bool mmap_lock_speculate_retry(struct mm_struct *mm, unsigned int
#endif /* CONFIG_PER_VMA_LOCK */
-static inline void mmap_init_lock(struct mm_struct *mm)
-{
- init_rwsem(&mm->mmap_lock);
- mm_lock_seqcount_init(mm);
-}
-
static inline void mmap_write_lock(struct mm_struct *mm)
{
__mmap_lock_trace_start_locking(mm, true);
diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
index e2dd57ca368b..bc2402a45741 100644
--- a/include/linux/mmu_notifier.h
+++ b/include/linux/mmu_notifier.h
@@ -43,10 +43,10 @@ struct mmu_interval_notifier;
* a device driver to possibly ignore the invalidation if the
* owner field matches the driver's device private pgmap owner.
*
- * @MMU_NOTIFY_EXCLUSIVE: to signal a device driver that the device will no
- * longer have exclusive access to the page. When sent during creation of an
- * exclusive range the owner will be initialised to the value provided by the
- * caller of make_device_exclusive_range(), otherwise the owner will be NULL.
+ * @MMU_NOTIFY_EXCLUSIVE: conversion of a page table entry to device-exclusive.
+ * The owner is initialized to the value provided by the caller of
+ * make_device_exclusive(), such that this caller can filter out these
+ * events.
*/
enum mmu_notifier_event {
MMU_NOTIFY_UNMAP = 0,
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index e16939553930..25e80b2ca7f4 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -138,6 +138,7 @@ enum numa_stat_item {
enum zone_stat_item {
/* First 128 byte cacheline (assuming 64 bit words) */
NR_FREE_PAGES,
+ NR_FREE_PAGES_BLOCKS,
NR_ZONE_LRU_BASE, /* Used only for compaction and reclaim retry */
NR_ZONE_INACTIVE_ANON = NR_ZONE_LRU_BASE,
NR_ZONE_ACTIVE_ANON,
@@ -220,9 +221,11 @@ enum node_stat_item {
PGDEMOTE_KSWAPD,
PGDEMOTE_DIRECT,
PGDEMOTE_KHUGEPAGED,
+ PGDEMOTE_PROACTIVE,
#ifdef CONFIG_HUGETLB_PAGE
NR_HUGETLB,
#endif
+ NR_BALLOON_PAGES,
NR_VM_NODE_STAT_ITEMS
};
@@ -1161,6 +1164,12 @@ static inline bool is_zone_device_page(const struct page *page)
return page_zonenum(page) == ZONE_DEVICE;
}
+static inline struct dev_pagemap *page_pgmap(const struct page *page)
+{
+ VM_WARN_ON_ONCE_PAGE(!is_zone_device_page(page), page);
+ return page_folio(page)->pgmap;
+}
+
/*
* Consecutive zone device pages should not be merged into the same sgl
* or bvec segment with other types of pages or if they belong to different
@@ -1176,7 +1185,7 @@ static inline bool zone_device_pages_have_same_pgmap(const struct page *a,
return false;
if (!is_zone_device_page(a))
return true;
- return a->pgmap == b->pgmap;
+ return page_pgmap(a) == page_pgmap(b);
}
extern void memmap_init_zone_device(struct zone *, unsigned long,
@@ -1191,6 +1200,10 @@ static inline bool zone_device_pages_have_same_pgmap(const struct page *a,
{
return true;
}
+static inline struct dev_pagemap *page_pgmap(const struct page *page)
+{
+ return NULL;
+}
#endif
static inline bool folio_is_zone_device(const struct folio *folio)
@@ -1937,6 +1950,9 @@ enum {
#ifdef CONFIG_ZONE_DEVICE
SECTION_TAINT_ZONE_DEVICE_BIT,
#endif
+#ifdef CONFIG_SPARSEMEM_VMEMMAP_PREINIT
+ SECTION_IS_VMEMMAP_PREINIT_BIT,
+#endif
SECTION_MAP_LAST_BIT,
};
@@ -1947,6 +1963,9 @@ enum {
#ifdef CONFIG_ZONE_DEVICE
#define SECTION_TAINT_ZONE_DEVICE BIT(SECTION_TAINT_ZONE_DEVICE_BIT)
#endif
+#ifdef CONFIG_SPARSEMEM_VMEMMAP_PREINIT
+#define SECTION_IS_VMEMMAP_PREINIT BIT(SECTION_IS_VMEMMAP_PREINIT_BIT)
+#endif
#define SECTION_MAP_MASK (~(BIT(SECTION_MAP_LAST_BIT) - 1))
#define SECTION_NID_SHIFT SECTION_MAP_LAST_BIT
@@ -2001,6 +2020,30 @@ static inline int online_device_section(struct mem_section *section)
}
#endif
+#ifdef CONFIG_SPARSEMEM_VMEMMAP_PREINIT
+static inline int preinited_vmemmap_section(struct mem_section *section)
+{
+ return (section &&
+ (section->section_mem_map & SECTION_IS_VMEMMAP_PREINIT));
+}
+
+void sparse_vmemmap_init_nid_early(int nid);
+void sparse_vmemmap_init_nid_late(int nid);
+
+#else
+static inline int preinited_vmemmap_section(struct mem_section *section)
+{
+ return 0;
+}
+static inline void sparse_vmemmap_init_nid_early(int nid)
+{
+}
+
+static inline void sparse_vmemmap_init_nid_late(int nid)
+{
+}
+#endif
+
static inline int online_section_nr(unsigned long nr)
{
return online_section(__nr_to_section(nr));
@@ -2038,6 +2081,9 @@ static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn)
}
#endif
+void sparse_init_early_section(int nid, struct page *map, unsigned long pnum,
+ unsigned long flags);
+
#ifndef CONFIG_HAVE_ARCH_PFN_VALID
/**
* pfn_valid - check if there is a valid memory map entry for a PFN
@@ -2100,6 +2146,11 @@ static inline unsigned long next_present_section_nr(unsigned long section_nr)
return -1;
}
+#define for_each_present_section_nr(start, section_nr) \
+ for (section_nr = next_present_section_nr(start - 1); \
+ section_nr != -1; \
+ section_nr = next_present_section_nr(section_nr))
+
/*
* These are _only_ used during initialisation, therefore they
* can use __initdata ... They could have names to indicate
@@ -2119,6 +2170,8 @@ void sparse_init(void);
#else
#define sparse_init() do {} while (0)
#define sparse_index_init(_sec, _nid) do {} while (0)
+#define sparse_vmemmap_init_nid_early(_nid, _use) do {} while (0)
+#define sparse_vmemmap_init_nid_late(_nid) do {} while (0)
#define pfn_in_present_section pfn_valid
#define subsection_map_init(_pfn, _nr_pages) do {} while (0)
#endif /* CONFIG_SPARSEMEM */
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index 2bf91b57591b..2143d05116be 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -202,4 +202,6 @@ DEFINE_GUARD(mutex, struct mutex *, mutex_lock(_T), mutex_unlock(_T))
DEFINE_GUARD_COND(mutex, _try, mutex_trylock(_T))
DEFINE_GUARD_COND(mutex, _intr, mutex_lock_interruptible(_T) == 0)
+extern unsigned long mutex_get_owner(struct mutex *lock);
+
#endif /* __LINUX_MUTEX_H */
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h
index 9ac83ca88326..d8cad844870a 100644
--- a/include/linux/nfs4.h
+++ b/include/linux/nfs4.h
@@ -300,6 +300,7 @@ enum nfsstat4 {
/* error codes for internal client use */
#define NFS4ERR_RESET_TO_MDS 12001
#define NFS4ERR_RESET_TO_PNFS 12002
+#define NFS4ERR_FATAL_IOERROR 12003
static inline bool seqid_mutating_err(u32 err)
{
@@ -691,6 +692,7 @@ enum {
NFSPROC4_CLNT_LISTXATTRS,
NFSPROC4_CLNT_REMOVEXATTR,
NFSPROC4_CLNT_READ_PLUS,
+ NFSPROC4_CLNT_OFFLOAD_STATUS,
};
/* nfs41 types */
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index f00bfcee7120..71319637a84e 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -50,6 +50,7 @@ struct nfs_client {
#define NFS_CS_DS 7 /* - Server is a DS */
#define NFS_CS_REUSEPORT 8 /* - reuse src port on reconnect */
#define NFS_CS_PNFS 9 /* - Server used for pnfs */
+#define NFS_CS_NETUNREACH_FATAL 10 /* - ENETUNREACH errors are fatal */
struct sockaddr_storage cl_addr; /* server identifier */
size_t cl_addrlen;
char * cl_hostname; /* hostname of server */
@@ -167,6 +168,8 @@ struct nfs_server {
#define NFS_MOUNT_TRUNK_DISCOVERY 0x04000000
#define NFS_MOUNT_SHUTDOWN 0x08000000
#define NFS_MOUNT_NO_ALIGNWRITE 0x10000000
+#define NFS_MOUNT_FORCE_RDIRPLUS 0x20000000
+#define NFS_MOUNT_NETUNREACH_FATAL 0x40000000
unsigned int fattr_valid; /* Valid attributes */
unsigned int caps; /* server capabilities */
@@ -250,6 +253,10 @@ struct nfs_server {
struct list_head ss_copies;
struct list_head ss_src_copies;
+ unsigned long delegation_flags;
+#define NFS4SERV_DELEGRETURN (1)
+#define NFS4SERV_DELEGATION_EXPIRED (2)
+#define NFS4SERV_DELEGRETURN_DELAYED (3)
unsigned long delegation_gen;
unsigned long mig_gen;
unsigned long mig_status;
@@ -289,6 +296,7 @@ struct nfs_server {
#define NFS_CAP_CASE_INSENSITIVE (1U << 6)
#define NFS_CAP_CASE_PRESERVING (1U << 7)
#define NFS_CAP_REBOOT_LAYOUTRETURN (1U << 8)
+#define NFS_CAP_OFFLOAD_STATUS (1U << 9)
#define NFS_CAP_OPEN_XOR (1U << 12)
#define NFS_CAP_DELEGTIME (1U << 13)
#define NFS_CAP_POSIX_LOCK (1U << 14)
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index d66c61cbbd1d..67f6632f723b 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -1515,8 +1515,9 @@ struct nfs42_offload_status_args {
struct nfs42_offload_status_res {
struct nfs4_sequence_res osr_seq_res;
- uint64_t osr_count;
- int osr_status;
+ u64 osr_count;
+ int complete_count;
+ u32 osr_complete;
};
struct nfs42_copy_notify_args {
diff --git a/include/linux/node.h b/include/linux/node.h
index 9a881c2208b3..2b7517892230 100644
--- a/include/linux/node.h
+++ b/include/linux/node.h
@@ -57,6 +57,11 @@ enum cache_write_policy {
NODE_CACHE_WRITE_OTHER,
};
+enum cache_mode {
+ NODE_CACHE_ADDR_MODE_RESERVED,
+ NODE_CACHE_ADDR_MODE_EXTENDED_LINEAR,
+};
+
/**
* struct node_cache_attrs - system memory caching attributes
*
@@ -65,6 +70,7 @@ enum cache_write_policy {
* @size: Total size of cache in bytes
* @line_size: Number of bytes fetched on a cache miss
* @level: The cache hierarchy level
+ * @address_mode: The address mode
*/
struct node_cache_attrs {
enum cache_indexing indexing;
@@ -72,6 +78,7 @@ struct node_cache_attrs {
u64 size;
u16 line_size;
u8 level;
+ u16 address_mode;
};
#ifdef CONFIG_HMEM_REPORTING
diff --git a/include/linux/objtool.h b/include/linux/objtool.h
index 3ca965a2ddc8..366ad004d794 100644
--- a/include/linux/objtool.h
+++ b/include/linux/objtool.h
@@ -69,7 +69,7 @@
* In asm, there are two kinds of code: normal C-type callable functions and
* the rest. The normal callable functions can be called by other code, and
* don't do anything unusual with the stack. Such normal callable functions
- * are annotated with the ENTRY/ENDPROC macros. Most asm code falls in this
+ * are annotated with SYM_FUNC_{START,END}. Most asm code falls in this
* category. In this case, no special debugging annotations are needed because
* objtool can automatically generate the ORC data for the ORC unwinder to read
* at runtime.
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index df9234e5f478..e6a21b62dcce 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -226,11 +226,48 @@ static __always_inline const struct page *page_fixed_fake_head(const struct page
}
return page;
}
+
+static __always_inline bool page_count_writable(const struct page *page, int u)
+{
+ if (!static_branch_unlikely(&hugetlb_optimize_vmemmap_key))
+ return true;
+
+ /*
+ * The refcount check is ordered before the fake-head check to prevent
+ * the following race:
+ * CPU 1 (HVO) CPU 2 (speculative PFN walker)
+ *
+ * page_ref_freeze()
+ * synchronize_rcu()
+ * rcu_read_lock()
+ * page_is_fake_head() is false
+ * vmemmap_remap_pte()
+ * XXX: struct page[] becomes r/o
+ *
+ * page_ref_unfreeze()
+ * page_ref_count() is not zero
+ *
+ * atomic_add_unless(&page->_refcount)
+ * XXX: try to modify r/o struct page[]
+ *
+ * The refcount check also prevents modification attempts to other (r/o)
+ * tail pages that are not fake heads.
+ */
+ if (atomic_read_acquire(&page->_refcount) == u)
+ return false;
+
+ return page_fixed_fake_head(page) == page;
+}
#else
static inline const struct page *page_fixed_fake_head(const struct page *page)
{
return page;
}
+
+static inline bool page_count_writable(const struct page *page, int u)
+{
+ return true;
+}
#endif
static __always_inline int page_is_fake_head(const struct page *page)
@@ -673,12 +710,6 @@ PAGEFLAG_FALSE(VmemmapSelfHosted, vmemmap_self_hosted)
#define PAGE_MAPPING_KSM (PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE)
#define PAGE_MAPPING_FLAGS (PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE)
-/*
- * Different with flags above, this flag is used only for fsdax mode. It
- * indicates that this page->mapping is now under reflink case.
- */
-#define PAGE_MAPPING_DAX_SHARED ((void *)0x1)
-
static __always_inline bool folio_mapping_flags(const struct folio *folio)
{
return ((unsigned long)folio->mapping & PAGE_MAPPING_FLAGS) != 0;
@@ -1106,6 +1137,12 @@ static inline bool is_page_hwpoison(const struct page *page)
return folio_test_hugetlb(folio) && PageHWPoison(&folio->page);
}
+static inline bool folio_contain_hwpoisoned_page(struct folio *folio)
+{
+ return folio_test_hwpoison(folio) ||
+ (folio_test_large(folio) && folio_test_has_hwpoisoned(folio));
+}
+
bool is_free_buddy_page(const struct page *page);
PAGEFLAG(Isolated, isolated, PF_ANY);
@@ -1193,6 +1230,10 @@ static inline int folio_has_private(const struct folio *folio)
return !!(folio->flags & PAGE_FLAGS_PRIVATE);
}
+static inline bool folio_test_large_maybe_mapped_shared(const struct folio *folio)
+{
+ return test_bit(FOLIO_MM_IDS_SHARED_BITNUM, &folio->_mm_ids);
+}
#undef PF_ANY
#undef PF_HEAD
#undef PF_NO_TAIL
diff --git a/include/linux/page_counter.h b/include/linux/page_counter.h
index 46406f3fe34d..d649b6bbbc87 100644
--- a/include/linux/page_counter.h
+++ b/include/linux/page_counter.h
@@ -9,10 +9,12 @@
struct page_counter {
/*
- * Make sure 'usage' does not share cacheline with any other field. The
- * memcg->memory.usage is a hot member of struct mem_cgroup.
+ * Make sure 'usage' does not share cacheline with any other field in
+ * v2. The memcg->memory.usage is a hot member of struct mem_cgroup.
*/
atomic_long_t usage;
+ unsigned long failcnt; /* v1-only field */
+
CACHELINE_PADDING(_pad1_);
/* effective memory.min and memory.min usage tracking */
@@ -28,12 +30,12 @@ struct page_counter {
unsigned long watermark;
/* Latest cg2 reset watermark */
unsigned long local_watermark;
- unsigned long failcnt;
/* Keep all the read most fields in a separete cacheline. */
CACHELINE_PADDING(_pad2_);
bool protection_support;
+ bool track_failcnt;
unsigned long min;
unsigned long low;
unsigned long high;
@@ -58,6 +60,7 @@ static inline void page_counter_init(struct page_counter *counter,
counter->max = PAGE_COUNTER_MAX;
counter->parent = parent;
counter->protection_support = protection_support;
+ counter->track_failcnt = false;
}
static inline unsigned long page_counter_read(struct page_counter *counter)
diff --git a/include/linux/page_ext.h b/include/linux/page_ext.h
index e4b48a0dda24..76c817162d2f 100644
--- a/include/linux/page_ext.h
+++ b/include/linux/page_ext.h
@@ -3,6 +3,7 @@
#define __LINUX_PAGE_EXT_H
#include <linux/types.h>
+#include <linux/mmzone.h>
#include <linux/stacktrace.h>
struct pglist_data;
@@ -69,16 +70,31 @@ extern void page_ext_init(void);
static inline void page_ext_init_flatmem_late(void)
{
}
+
+static inline bool page_ext_iter_next_fast_possible(unsigned long next_pfn)
+{
+ /*
+ * page_ext is allocated per memory section. Once we cross a
+ * memory section, we have to fetch the new pointer.
+ */
+ return next_pfn % PAGES_PER_SECTION;
+}
#else
extern void page_ext_init_flatmem(void);
extern void page_ext_init_flatmem_late(void);
static inline void page_ext_init(void)
{
}
+
+static inline bool page_ext_iter_next_fast_possible(unsigned long next_pfn)
+{
+ return true;
+}
#endif
extern struct page_ext *page_ext_get(const struct page *page);
extern void page_ext_put(struct page_ext *page_ext);
+extern struct page_ext *page_ext_lookup(unsigned long pfn);
static inline void *page_ext_data(struct page_ext *page_ext,
struct page_ext_operations *ops)
@@ -93,6 +109,83 @@ static inline struct page_ext *page_ext_next(struct page_ext *curr)
return next;
}
+struct page_ext_iter {
+ unsigned long index;
+ unsigned long start_pfn;
+ struct page_ext *page_ext;
+};
+
+/**
+ * page_ext_iter_begin() - Prepare for iterating through page extensions.
+ * @iter: page extension iterator.
+ * @pfn: PFN of the page we're interested in.
+ *
+ * Must be called with RCU read lock taken.
+ *
+ * Return: NULL if no page_ext exists for this page.
+ */
+static inline struct page_ext *page_ext_iter_begin(struct page_ext_iter *iter,
+ unsigned long pfn)
+{
+ iter->index = 0;
+ iter->start_pfn = pfn;
+ iter->page_ext = page_ext_lookup(pfn);
+
+ return iter->page_ext;
+}
+
+/**
+ * page_ext_iter_next() - Get next page extension
+ * @iter: page extension iterator.
+ *
+ * Must be called with RCU read lock taken.
+ *
+ * Return: NULL if no next page_ext exists.
+ */
+static inline struct page_ext *page_ext_iter_next(struct page_ext_iter *iter)
+{
+ unsigned long pfn;
+
+ if (WARN_ON_ONCE(!iter->page_ext))
+ return NULL;
+
+ iter->index++;
+ pfn = iter->start_pfn + iter->index;
+
+ if (page_ext_iter_next_fast_possible(pfn))
+ iter->page_ext = page_ext_next(iter->page_ext);
+ else
+ iter->page_ext = page_ext_lookup(pfn);
+
+ return iter->page_ext;
+}
+
+/**
+ * page_ext_iter_get() - Get current page extension
+ * @iter: page extension iterator.
+ *
+ * Return: NULL if no page_ext exists for this iterator.
+ */
+static inline struct page_ext *page_ext_iter_get(const struct page_ext_iter *iter)
+{
+ return iter->page_ext;
+}
+
+/**
+ * for_each_page_ext(): iterate through page_ext objects.
+ * @__page: the page we're interested in
+ * @__pgcount: how many pages to iterate through
+ * @__page_ext: struct page_ext pointer where the current page_ext
+ * object is returned
+ * @__iter: struct page_ext_iter object (defined in the stack)
+ *
+ * IMPORTANT: must be called with RCU read lock taken.
+ */
+#define for_each_page_ext(__page, __pgcount, __page_ext, __iter) \
+ for (__page_ext = page_ext_iter_begin(&__iter, page_to_pfn(__page));\
+ __page_ext && __iter.index < __pgcount; \
+ __page_ext = page_ext_iter_next(&__iter))
+
#else /* !CONFIG_PAGE_EXTENSION */
struct page_ext;
diff --git a/include/linux/page_ref.h b/include/linux/page_ref.h
index 8c236c651d1d..544150d1d5fd 100644
--- a/include/linux/page_ref.h
+++ b/include/linux/page_ref.h
@@ -234,7 +234,7 @@ static inline bool page_ref_add_unless(struct page *page, int nr, int u)
rcu_read_lock();
/* avoid writing to the vmemmap area being remapped */
- if (!page_is_fake_head(page) && page_ref_count(page) != u)
+ if (page_count_writable(page, u))
ret = atomic_add_unless(&page->_refcount, nr, u);
rcu_read_unlock();
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 7661be85136c..26baa78f1ca7 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -536,26 +536,6 @@ struct address_space *folio_mapping(struct folio *);
struct address_space *swapcache_mapping(struct folio *);
/**
- * folio_file_mapping - Find the mapping this folio belongs to.
- * @folio: The folio.
- *
- * For folios which are in the page cache, return the mapping that this
- * page belongs to. Folios in the swap cache return the mapping of the
- * swap file or swap device where the data is stored. This is different
- * from the mapping returned by folio_mapping(). The only reason to
- * use it is if, like NFS, you return 0 from ->activate_swapfile.
- *
- * Do not call this for folios which aren't in the page cache or swap cache.
- */
-static inline struct address_space *folio_file_mapping(struct folio *folio)
-{
- if (unlikely(folio_test_swapcache(folio)))
- return swapcache_mapping(folio);
-
- return folio->mapping;
-}
-
-/**
* folio_flush_mapping - Find the file mapping this folio belongs to.
* @folio: The folio.
*
@@ -575,11 +555,6 @@ static inline struct address_space *folio_flush_mapping(struct folio *folio)
return folio_mapping(folio);
}
-static inline struct address_space *page_file_mapping(struct page *page)
-{
- return folio_file_mapping(page_folio(page));
-}
-
/**
* folio_inode - Get the host inode for this folio.
* @folio: The folio.
diff --git a/include/linux/part_stat.h b/include/linux/part_stat.h
index ac8c44dd8237..c5e9cac0575e 100644
--- a/include/linux/part_stat.h
+++ b/include/linux/part_stat.h
@@ -33,7 +33,7 @@ struct disk_stats {
#define part_stat_read(part, field) \
({ \
- typeof((part)->bd_stats->field) res = 0; \
+ TYPEOF_UNQUAL((part)->bd_stats->field) res = 0; \
unsigned int _cpu; \
for_each_possible_cpu(_cpu) \
res += per_cpu_ptr((part)->bd_stats, _cpu)->field; \
diff --git a/include/linux/pci-ats.h b/include/linux/pci-ats.h
index 0e8b74e63767..75c6c86cf09d 100644
--- a/include/linux/pci-ats.h
+++ b/include/linux/pci-ats.h
@@ -42,6 +42,7 @@ int pci_enable_pasid(struct pci_dev *pdev, int features);
void pci_disable_pasid(struct pci_dev *pdev);
int pci_pasid_features(struct pci_dev *pdev);
int pci_max_pasids(struct pci_dev *pdev);
+int pci_pasid_status(struct pci_dev *pdev);
#else /* CONFIG_PCI_PASID */
static inline int pci_enable_pasid(struct pci_dev *pdev, int features)
{ return -EINVAL; }
@@ -50,6 +51,8 @@ static inline int pci_pasid_features(struct pci_dev *pdev)
{ return -EINVAL; }
static inline int pci_max_pasids(struct pci_dev *pdev)
{ return -EINVAL; }
+static inline int pci_pasid_status(struct pci_dev *pdev)
+{ return -EINVAL; }
#endif /* CONFIG_PCI_PASID */
#endif /* LINUX_PCI_ATS_H */
diff --git a/include/linux/percpu-defs.h b/include/linux/percpu-defs.h
index 0fcacb909778..0aeb0e276a3e 100644
--- a/include/linux/percpu-defs.h
+++ b/include/linux/percpu-defs.h
@@ -222,7 +222,7 @@ do { \
} while (0)
#define PERCPU_PTR(__p) \
- (typeof(*(__p)) __force __kernel *)((__force unsigned long)(__p))
+ (TYPEOF_UNQUAL(*(__p)) __force __kernel *)((__force unsigned long)(__p))
#ifdef CONFIG_SMP
@@ -318,7 +318,7 @@ static __always_inline void __this_cpu_preempt_check(const char *op) { }
#define __pcpu_size_call_return(stem, variable) \
({ \
- typeof(variable) pscr_ret__; \
+ TYPEOF_UNQUAL(variable) pscr_ret__; \
__verify_pcpu_ptr(&(variable)); \
switch(sizeof(variable)) { \
case 1: pscr_ret__ = stem##1(variable); break; \
@@ -333,7 +333,7 @@ static __always_inline void __this_cpu_preempt_check(const char *op) { }
#define __pcpu_size_call_return2(stem, variable, ...) \
({ \
- typeof(variable) pscr2_ret__; \
+ TYPEOF_UNQUAL(variable) pscr2_ret__; \
__verify_pcpu_ptr(&(variable)); \
switch(sizeof(variable)) { \
case 1: pscr2_ret__ = stem##1(variable, __VA_ARGS__); break; \
diff --git a/include/linux/pgalloc_tag.h b/include/linux/pgalloc_tag.h
index 3469c4b20105..c74077977830 100644
--- a/include/linux/pgalloc_tag.h
+++ b/include/linux/pgalloc_tag.h
@@ -162,74 +162,32 @@ static inline void update_page_tag_ref(union pgtag_ref_handle handle, union code
}
}
-static inline void clear_page_tag_ref(struct page *page)
-{
- if (mem_alloc_profiling_enabled()) {
- union pgtag_ref_handle handle;
- union codetag_ref ref;
-
- if (get_page_tag_ref(page, &ref, &handle)) {
- set_codetag_empty(&ref);
- update_page_tag_ref(handle, &ref);
- put_page_tag_ref(handle);
- }
- }
-}
-
-static inline void pgalloc_tag_add(struct page *page, struct task_struct *task,
- unsigned int nr)
-{
- if (mem_alloc_profiling_enabled()) {
- union pgtag_ref_handle handle;
- union codetag_ref ref;
-
- if (get_page_tag_ref(page, &ref, &handle)) {
- alloc_tag_add(&ref, task->alloc_tag, PAGE_SIZE * nr);
- update_page_tag_ref(handle, &ref);
- put_page_tag_ref(handle);
- }
- }
-}
+/* Should be called only if mem_alloc_profiling_enabled() */
+void __clear_page_tag_ref(struct page *page);
-static inline void pgalloc_tag_sub(struct page *page, unsigned int nr)
+static inline void clear_page_tag_ref(struct page *page)
{
- if (mem_alloc_profiling_enabled()) {
- union pgtag_ref_handle handle;
- union codetag_ref ref;
-
- if (get_page_tag_ref(page, &ref, &handle)) {
- alloc_tag_sub(&ref, PAGE_SIZE * nr);
- update_page_tag_ref(handle, &ref);
- put_page_tag_ref(handle);
- }
- }
+ if (mem_alloc_profiling_enabled())
+ __clear_page_tag_ref(page);
}
-static inline struct alloc_tag *pgalloc_tag_get(struct page *page)
+/* Should be called only if mem_alloc_profiling_enabled() */
+static inline struct alloc_tag *__pgalloc_tag_get(struct page *page)
{
struct alloc_tag *tag = NULL;
-
- if (mem_alloc_profiling_enabled()) {
- union pgtag_ref_handle handle;
- union codetag_ref ref;
-
- if (get_page_tag_ref(page, &ref, &handle)) {
- alloc_tag_sub_check(&ref);
- if (ref.ct)
- tag = ct_to_alloc_tag(ref.ct);
- put_page_tag_ref(handle);
- }
+ union pgtag_ref_handle handle;
+ union codetag_ref ref;
+
+ if (get_page_tag_ref(page, &ref, &handle)) {
+ alloc_tag_sub_check(&ref);
+ if (ref.ct)
+ tag = ct_to_alloc_tag(ref.ct);
+ put_page_tag_ref(handle);
}
return tag;
}
-static inline void pgalloc_tag_sub_pages(struct alloc_tag *tag, unsigned int nr)
-{
- if (mem_alloc_profiling_enabled() && tag)
- this_cpu_sub(tag->counters->bytes, PAGE_SIZE * nr);
-}
-
void pgalloc_tag_split(struct folio *folio, int old_order, int new_order);
void pgalloc_tag_swap(struct folio *new, struct folio *old);
@@ -238,11 +196,6 @@ void __init alloc_tag_sec_init(void);
#else /* CONFIG_MEM_ALLOC_PROFILING */
static inline void clear_page_tag_ref(struct page *page) {}
-static inline void pgalloc_tag_add(struct page *page, struct task_struct *task,
- unsigned int nr) {}
-static inline void pgalloc_tag_sub(struct page *page, unsigned int nr) {}
-static inline struct alloc_tag *pgalloc_tag_get(struct page *page) { return NULL; }
-static inline void pgalloc_tag_sub_pages(struct alloc_tag *tag, unsigned int nr) {}
static inline void alloc_tag_sec_init(void) {}
static inline void pgalloc_tag_split(struct folio *folio, int old_order, int new_order) {}
static inline void pgalloc_tag_swap(struct folio *new, struct folio *old) {}
diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
index 4c107e17c547..e2b705c14945 100644
--- a/include/linux/pgtable.h
+++ b/include/linux/pgtable.h
@@ -222,10 +222,14 @@ static inline int pmd_dirty(pmd_t pmd)
* hazard could result in the direct mode hypervisor case, since the actual
* write to the page tables may not yet have taken place, so reads though
* a raw PTE pointer after it has been modified are not guaranteed to be
- * up to date. This mode can only be entered and left under the protection of
- * the page table locks for all page tables which may be modified. In the UP
- * case, this is required so that preemption is disabled, and in the SMP case,
- * it must synchronize the delayed page table writes properly on other CPUs.
+ * up to date.
+ *
+ * In the general case, no lock is guaranteed to be held between entry and exit
+ * of the lazy mode. So the implementation must assume preemption may be enabled
+ * and cpu migration is possible; it must take steps to be robust against this.
+ * (In practice, for user PTE updates, the appropriate page table lock(s) are
+ * held, but for kernel PTE updates, no lock is held). Nesting is not permitted
+ * and the mode cannot be used in interrupt context.
*/
#ifndef __HAVE_ARCH_ENTER_LAZY_MMU_MODE
#define arch_enter_lazy_mmu_mode() do {} while (0)
@@ -287,7 +291,6 @@ static inline void set_ptes(struct mm_struct *mm, unsigned long addr,
{
page_table_check_ptes_set(mm, ptep, pte, nr);
- arch_enter_lazy_mmu_mode();
for (;;) {
set_pte(ptep, pte);
if (--nr == 0)
@@ -295,7 +298,6 @@ static inline void set_ptes(struct mm_struct *mm, unsigned long addr,
ptep++;
pte = pte_next_pfn(pte);
}
- arch_leave_lazy_mmu_mode();
}
#endif
#define set_pte_at(mm, addr, ptep, pte) set_ptes(mm, addr, ptep, pte, 1)
diff --git a/include/linux/phy/phy.h b/include/linux/phy/phy.h
index 03cd5bae92d3..e63e6e70e860 100644
--- a/include/linux/phy/phy.h
+++ b/include/linux/phy/phy.h
@@ -227,8 +227,6 @@ int phy_pm_runtime_get(struct phy *phy);
int phy_pm_runtime_get_sync(struct phy *phy);
int phy_pm_runtime_put(struct phy *phy);
int phy_pm_runtime_put_sync(struct phy *phy);
-void phy_pm_runtime_allow(struct phy *phy);
-void phy_pm_runtime_forbid(struct phy *phy);
int phy_init(struct phy *phy);
int phy_exit(struct phy *phy);
int phy_power_on(struct phy *phy);
@@ -321,16 +319,6 @@ static inline int phy_pm_runtime_put_sync(struct phy *phy)
return -ENOSYS;
}
-static inline void phy_pm_runtime_allow(struct phy *phy)
-{
- return;
-}
-
-static inline void phy_pm_runtime_forbid(struct phy *phy)
-{
- return;
-}
-
static inline int phy_init(struct phy *phy)
{
if (!phy)
diff --git a/include/linux/platform_data/cros_ec_commands.h b/include/linux/platform_data/cros_ec_commands.h
index ecf290a0c98f..1f4e4f2b89bb 100644
--- a/include/linux/platform_data/cros_ec_commands.h
+++ b/include/linux/platform_data/cros_ec_commands.h
@@ -5046,6 +5046,7 @@ struct ec_response_pd_status {
#define PD_EVENT_DATA_SWAP BIT(3)
#define PD_EVENT_TYPEC BIT(4)
#define PD_EVENT_PPM BIT(5)
+#define PD_EVENT_INIT BIT(6)
struct ec_response_host_event_status {
uint32_t status; /* PD MCU host event status */
diff --git a/include/linux/pps_gen_kernel.h b/include/linux/pps_gen_kernel.h
index 022ea0ac4440..6214c8aa2e02 100644
--- a/include/linux/pps_gen_kernel.h
+++ b/include/linux/pps_gen_kernel.h
@@ -43,7 +43,7 @@ struct pps_gen_source_info {
/* The main struct */
struct pps_gen_device {
- struct pps_gen_source_info info; /* PSS generator info */
+ const struct pps_gen_source_info *info; /* PSS generator info */
bool enabled; /* PSS generator status */
unsigned int event;
@@ -70,7 +70,7 @@ extern const struct attribute_group *pps_gen_groups[];
*/
extern struct pps_gen_device *pps_gen_register_source(
- struct pps_gen_source_info *info);
+ const struct pps_gen_source_info *info);
extern void pps_gen_unregister_source(struct pps_gen_device *pps_gen);
extern void pps_gen_event(struct pps_gen_device *pps_gen,
unsigned int event, void *data);
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index f8159f8a7d73..120536f4c6eb 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -132,7 +132,7 @@ static inline void rcu_sysrq_end(void) { }
#if defined(CONFIG_NO_HZ_FULL) && (!defined(CONFIG_GENERIC_ENTRY) || !defined(CONFIG_KVM_XFER_TO_GUEST_WORK))
void rcu_irq_work_resched(void);
#else
-static inline void rcu_irq_work_resched(void) { }
+static __always_inline void rcu_irq_work_resched(void) { }
#endif
#ifdef CONFIG_RCU_NOCB_CPU
diff --git a/include/linux/rcuwait.h b/include/linux/rcuwait.h
index 27343424225c..9ad134a04b41 100644
--- a/include/linux/rcuwait.h
+++ b/include/linux/rcuwait.h
@@ -4,18 +4,7 @@
#include <linux/rcupdate.h>
#include <linux/sched/signal.h>
-
-/*
- * rcuwait provides a way of blocking and waking up a single
- * task in an rcu-safe manner.
- *
- * The only time @task is non-nil is when a user is blocked (or
- * checking if it needs to) on a condition, and reset as soon as we
- * know that the condition has succeeded and are awoken.
- */
-struct rcuwait {
- struct task_struct __rcu *task;
-};
+#include <linux/types.h>
#define __RCUWAIT_INITIALIZER(name) \
{ .task = NULL, }
diff --git a/include/linux/reboot.h b/include/linux/reboot.h
index abcdde4df697..aa08c3bbbf59 100644
--- a/include/linux/reboot.h
+++ b/include/linux/reboot.h
@@ -177,16 +177,38 @@ void ctrl_alt_del(void);
extern void orderly_poweroff(bool force);
extern void orderly_reboot(void);
-void __hw_protection_shutdown(const char *reason, int ms_until_forced, bool shutdown);
-static inline void hw_protection_reboot(const char *reason, int ms_until_forced)
-{
- __hw_protection_shutdown(reason, ms_until_forced, false);
-}
+/**
+ * enum hw_protection_action - Hardware protection action
+ *
+ * @HWPROT_ACT_DEFAULT:
+ * The default action should be taken. This is HWPROT_ACT_SHUTDOWN
+ * by default, but can be overridden.
+ * @HWPROT_ACT_SHUTDOWN:
+ * The system should be shut down (powered off) for HW protection.
+ * @HWPROT_ACT_REBOOT:
+ * The system should be rebooted for HW protection.
+ */
+enum hw_protection_action { HWPROT_ACT_DEFAULT, HWPROT_ACT_SHUTDOWN, HWPROT_ACT_REBOOT };
-static inline void hw_protection_shutdown(const char *reason, int ms_until_forced)
+void __hw_protection_trigger(const char *reason, int ms_until_forced,
+ enum hw_protection_action action);
+
+/**
+ * hw_protection_trigger - Trigger default emergency system hardware protection action
+ *
+ * @reason: Reason of emergency shutdown or reboot to be printed.
+ * @ms_until_forced: Time to wait for orderly shutdown or reboot before
+ * triggering it. Negative value disables the forced
+ * shutdown or reboot.
+ *
+ * Initiate an emergency system shutdown or reboot in order to protect
+ * hardware from further damage. The exact action taken is controllable at
+ * runtime and defaults to shutdown.
+ */
+static inline void hw_protection_trigger(const char *reason, int ms_until_forced)
{
- __hw_protection_shutdown(reason, ms_until_forced, true);
+ __hw_protection_trigger(reason, ms_until_forced, HWPROT_ACT_DEFAULT);
}
/*
diff --git a/include/linux/refcount.h b/include/linux/refcount.h
index 35f039ecb272..80dc023ac2bf 100644
--- a/include/linux/refcount.h
+++ b/include/linux/refcount.h
@@ -87,6 +87,15 @@
* The decrements dec_and_test() and sub_and_test() also provide acquire
* ordering on success.
*
+ * refcount_{add|inc}_not_zero_acquire() and refcount_set_release() provide
+ * acquire and release ordering for cases when the memory occupied by the
+ * object might be reused to store another object. This is important for the
+ * cases where secondary validation is required to detect such reuse, e.g.
+ * SLAB_TYPESAFE_BY_RCU. The secondary validation checks have to happen after
+ * the refcount is taken, hence acquire order is necessary. Similarly, when the
+ * object is initialized, all stores to its attributes should be visible before
+ * the refcount is set, otherwise a stale attribute value might be used by
+ * another task which succeeds in taking a refcount to the new object.
*/
#ifndef _LINUX_REFCOUNT_H
@@ -126,6 +135,31 @@ static inline void refcount_set(refcount_t *r, int n)
}
/**
+ * refcount_set_release - set a refcount's value with release ordering
+ * @r: the refcount
+ * @n: value to which the refcount will be set
+ *
+ * This function should be used when memory occupied by the object might be
+ * reused to store another object -- consider SLAB_TYPESAFE_BY_RCU.
+ *
+ * Provides release memory ordering which will order previous memory operations
+ * against this store. This ensures all updates to this object are visible
+ * once the refcount is set and stale values from the object previously
+ * occupying this memory are overwritten with new ones.
+ *
+ * This function should be called only after new object is fully initialized.
+ * After this call the object should be considered visible to other tasks even
+ * if it was not yet added into an object collection normally used to discover
+ * it. This is because other tasks might have discovered the object previously
+ * occupying the same memory and after memory reuse they can succeed in taking
+ * refcount to the new object and start using it.
+ */
+static inline void refcount_set_release(refcount_t *r, int n)
+{
+ atomic_set_release(&r->refs, n);
+}
+
+/**
* refcount_read - get a refcount's value
* @r: the refcount
*
@@ -178,6 +212,71 @@ static inline __must_check bool refcount_add_not_zero(int i, refcount_t *r)
return __refcount_add_not_zero(i, r, NULL);
}
+static inline __must_check __signed_wrap
+bool __refcount_add_not_zero_limited_acquire(int i, refcount_t *r, int *oldp,
+ int limit)
+{
+ int old = refcount_read(r);
+
+ do {
+ if (!old)
+ break;
+
+ if (i > limit - old) {
+ if (oldp)
+ *oldp = old;
+ return false;
+ }
+ } while (!atomic_try_cmpxchg_acquire(&r->refs, &old, old + i));
+
+ if (oldp)
+ *oldp = old;
+
+ if (unlikely(old < 0 || old + i < 0))
+ refcount_warn_saturate(r, REFCOUNT_ADD_NOT_ZERO_OVF);
+
+ return old;
+}
+
+static inline __must_check bool
+__refcount_inc_not_zero_limited_acquire(refcount_t *r, int *oldp, int limit)
+{
+ return __refcount_add_not_zero_limited_acquire(1, r, oldp, limit);
+}
+
+static inline __must_check __signed_wrap
+bool __refcount_add_not_zero_acquire(int i, refcount_t *r, int *oldp)
+{
+ return __refcount_add_not_zero_limited_acquire(i, r, oldp, INT_MAX);
+}
+
+/**
+ * refcount_add_not_zero_acquire - add a value to a refcount with acquire ordering unless it is 0
+ *
+ * @i: the value to add to the refcount
+ * @r: the refcount
+ *
+ * Will saturate at REFCOUNT_SATURATED and WARN.
+ *
+ * This function should be used when memory occupied by the object might be
+ * reused to store another object -- consider SLAB_TYPESAFE_BY_RCU.
+ *
+ * Provides acquire memory ordering on success, it is assumed the caller has
+ * guaranteed the object memory to be stable (RCU, etc.). It does provide a
+ * control dependency and thereby orders future stores. See the comment on top.
+ *
+ * Use of this function is not recommended for the normal reference counting
+ * use case in which references are taken and released one at a time. In these
+ * cases, refcount_inc_not_zero_acquire() should instead be used to increment a
+ * reference count.
+ *
+ * Return: false if the passed refcount is 0, true otherwise
+ */
+static inline __must_check bool refcount_add_not_zero_acquire(int i, refcount_t *r)
+{
+ return __refcount_add_not_zero_acquire(i, r, NULL);
+}
+
static inline __signed_wrap
void __refcount_add(int i, refcount_t *r, int *oldp)
{
@@ -236,6 +335,32 @@ static inline __must_check bool refcount_inc_not_zero(refcount_t *r)
return __refcount_inc_not_zero(r, NULL);
}
+static inline __must_check bool __refcount_inc_not_zero_acquire(refcount_t *r, int *oldp)
+{
+ return __refcount_add_not_zero_acquire(1, r, oldp);
+}
+
+/**
+ * refcount_inc_not_zero_acquire - increment a refcount with acquire ordering unless it is 0
+ * @r: the refcount to increment
+ *
+ * Similar to refcount_inc_not_zero(), but provides acquire memory ordering on
+ * success.
+ *
+ * This function should be used when memory occupied by the object might be
+ * reused to store another object -- consider SLAB_TYPESAFE_BY_RCU.
+ *
+ * Provides acquire memory ordering on success, it is assumed the caller has
+ * guaranteed the object memory to be stable (RCU, etc.). It does provide a
+ * control dependency and thereby orders future stores. See the comment on top.
+ *
+ * Return: true if the increment was successful, false otherwise
+ */
+static inline __must_check bool refcount_inc_not_zero_acquire(refcount_t *r)
+{
+ return __refcount_inc_not_zero_acquire(r, NULL);
+}
+
static inline void __refcount_inc(refcount_t *r, int *oldp)
{
__refcount_add(1, r, oldp);
diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h
index 8463a128e2f4..6c85b28ea30b 100644
--- a/include/linux/rhashtable.h
+++ b/include/linux/rhashtable.h
@@ -1259,7 +1259,7 @@ static inline int rhashtable_replace_fast(
static inline void rhltable_walk_enter(struct rhltable *hlt,
struct rhashtable_iter *iter)
{
- return rhashtable_walk_enter(&hlt->ht, iter);
+ rhashtable_walk_enter(&hlt->ht, iter);
}
/**
@@ -1275,12 +1275,12 @@ static inline void rhltable_free_and_destroy(struct rhltable *hlt,
void *arg),
void *arg)
{
- return rhashtable_free_and_destroy(&hlt->ht, free_fn, arg);
+ rhashtable_free_and_destroy(&hlt->ht, free_fn, arg);
}
static inline void rhltable_destroy(struct rhltable *hlt)
{
- return rhltable_free_and_destroy(hlt, NULL, NULL);
+ rhltable_free_and_destroy(hlt, NULL, NULL);
}
#endif /* _LINUX_RHASHTABLE_H */
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index 683a04088f3f..6b82b618846e 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -13,6 +13,7 @@
#include <linux/highmem.h>
#include <linux/pagemap.h>
#include <linux/memremap.h>
+#include <linux/bit_spinlock.h>
/*
* The anon_vma heads a list of private "related" vmas, to scan if
@@ -173,6 +174,214 @@ static inline void anon_vma_merge(struct vm_area_struct *vma,
struct anon_vma *folio_get_anon_vma(const struct folio *folio);
+#ifdef CONFIG_MM_ID
+static __always_inline void folio_lock_large_mapcount(struct folio *folio)
+{
+ bit_spin_lock(FOLIO_MM_IDS_LOCK_BITNUM, &folio->_mm_ids);
+}
+
+static __always_inline void folio_unlock_large_mapcount(struct folio *folio)
+{
+ __bit_spin_unlock(FOLIO_MM_IDS_LOCK_BITNUM, &folio->_mm_ids);
+}
+
+static inline unsigned int folio_mm_id(const struct folio *folio, int idx)
+{
+ VM_WARN_ON_ONCE(idx != 0 && idx != 1);
+ return folio->_mm_id[idx] & MM_ID_MASK;
+}
+
+static inline void folio_set_mm_id(struct folio *folio, int idx, mm_id_t id)
+{
+ VM_WARN_ON_ONCE(idx != 0 && idx != 1);
+ folio->_mm_id[idx] &= ~MM_ID_MASK;
+ folio->_mm_id[idx] |= id;
+}
+
+static inline void __folio_large_mapcount_sanity_checks(const struct folio *folio,
+ int diff, mm_id_t mm_id)
+{
+ VM_WARN_ON_ONCE(!folio_test_large(folio) || folio_test_hugetlb(folio));
+ VM_WARN_ON_ONCE(diff <= 0);
+ VM_WARN_ON_ONCE(mm_id < MM_ID_MIN || mm_id > MM_ID_MAX);
+
+ /*
+ * Make sure we can detect at least one complete PTE mapping of the
+ * folio in a single MM as "exclusively mapped". This is primarily
+ * a check on 32bit, where we currently reduce the size of the per-MM
+ * mapcount to a short.
+ */
+ VM_WARN_ON_ONCE(diff > folio_large_nr_pages(folio));
+ VM_WARN_ON_ONCE(folio_large_nr_pages(folio) - 1 > MM_ID_MAPCOUNT_MAX);
+
+ VM_WARN_ON_ONCE(folio_mm_id(folio, 0) == MM_ID_DUMMY &&
+ folio->_mm_id_mapcount[0] != -1);
+ VM_WARN_ON_ONCE(folio_mm_id(folio, 0) != MM_ID_DUMMY &&
+ folio->_mm_id_mapcount[0] < 0);
+ VM_WARN_ON_ONCE(folio_mm_id(folio, 1) == MM_ID_DUMMY &&
+ folio->_mm_id_mapcount[1] != -1);
+ VM_WARN_ON_ONCE(folio_mm_id(folio, 1) != MM_ID_DUMMY &&
+ folio->_mm_id_mapcount[1] < 0);
+ VM_WARN_ON_ONCE(!folio_mapped(folio) &&
+ folio_test_large_maybe_mapped_shared(folio));
+}
+
+static __always_inline void folio_set_large_mapcount(struct folio *folio,
+ int mapcount, struct vm_area_struct *vma)
+{
+ __folio_large_mapcount_sanity_checks(folio, mapcount, vma->vm_mm->mm_id);
+
+ VM_WARN_ON_ONCE(folio_mm_id(folio, 0) != MM_ID_DUMMY);
+ VM_WARN_ON_ONCE(folio_mm_id(folio, 1) != MM_ID_DUMMY);
+
+ /* Note: mapcounts start at -1. */
+ atomic_set(&folio->_large_mapcount, mapcount - 1);
+ folio->_mm_id_mapcount[0] = mapcount - 1;
+ folio_set_mm_id(folio, 0, vma->vm_mm->mm_id);
+}
+
+static __always_inline int folio_add_return_large_mapcount(struct folio *folio,
+ int diff, struct vm_area_struct *vma)
+{
+ const mm_id_t mm_id = vma->vm_mm->mm_id;
+ int new_mapcount_val;
+
+ folio_lock_large_mapcount(folio);
+ __folio_large_mapcount_sanity_checks(folio, diff, mm_id);
+
+ new_mapcount_val = atomic_read(&folio->_large_mapcount) + diff;
+ atomic_set(&folio->_large_mapcount, new_mapcount_val);
+
+ /*
+ * If a folio is mapped more than once into an MM on 32bit, we
+ * can in theory overflow the per-MM mapcount (although only for
+ * fairly large folios), turning it negative. In that case, just
+ * free up the slot and mark the folio "mapped shared", otherwise
+ * we might be in trouble when unmapping pages later.
+ */
+ if (folio_mm_id(folio, 0) == mm_id) {
+ folio->_mm_id_mapcount[0] += diff;
+ if (!IS_ENABLED(CONFIG_64BIT) && unlikely(folio->_mm_id_mapcount[0] < 0)) {
+ folio->_mm_id_mapcount[0] = -1;
+ folio_set_mm_id(folio, 0, MM_ID_DUMMY);
+ folio->_mm_ids |= FOLIO_MM_IDS_SHARED_BIT;
+ }
+ } else if (folio_mm_id(folio, 1) == mm_id) {
+ folio->_mm_id_mapcount[1] += diff;
+ if (!IS_ENABLED(CONFIG_64BIT) && unlikely(folio->_mm_id_mapcount[1] < 0)) {
+ folio->_mm_id_mapcount[1] = -1;
+ folio_set_mm_id(folio, 1, MM_ID_DUMMY);
+ folio->_mm_ids |= FOLIO_MM_IDS_SHARED_BIT;
+ }
+ } else if (folio_mm_id(folio, 0) == MM_ID_DUMMY) {
+ folio_set_mm_id(folio, 0, mm_id);
+ folio->_mm_id_mapcount[0] = diff - 1;
+ /* We might have other mappings already. */
+ if (new_mapcount_val != diff - 1)
+ folio->_mm_ids |= FOLIO_MM_IDS_SHARED_BIT;
+ } else if (folio_mm_id(folio, 1) == MM_ID_DUMMY) {
+ folio_set_mm_id(folio, 1, mm_id);
+ folio->_mm_id_mapcount[1] = diff - 1;
+ /* Slot 0 certainly has mappings as well. */
+ folio->_mm_ids |= FOLIO_MM_IDS_SHARED_BIT;
+ }
+ folio_unlock_large_mapcount(folio);
+ return new_mapcount_val + 1;
+}
+#define folio_add_large_mapcount folio_add_return_large_mapcount
+
+static __always_inline int folio_sub_return_large_mapcount(struct folio *folio,
+ int diff, struct vm_area_struct *vma)
+{
+ const mm_id_t mm_id = vma->vm_mm->mm_id;
+ int new_mapcount_val;
+
+ folio_lock_large_mapcount(folio);
+ __folio_large_mapcount_sanity_checks(folio, diff, mm_id);
+
+ new_mapcount_val = atomic_read(&folio->_large_mapcount) - diff;
+ atomic_set(&folio->_large_mapcount, new_mapcount_val);
+
+ /*
+ * There are valid corner cases where we might underflow a per-MM
+ * mapcount (some mappings added when no slot was free, some mappings
+ * added once a slot was free), so we always set it to -1 once we go
+ * negative.
+ */
+ if (folio_mm_id(folio, 0) == mm_id) {
+ folio->_mm_id_mapcount[0] -= diff;
+ if (folio->_mm_id_mapcount[0] >= 0)
+ goto out;
+ folio->_mm_id_mapcount[0] = -1;
+ folio_set_mm_id(folio, 0, MM_ID_DUMMY);
+ } else if (folio_mm_id(folio, 1) == mm_id) {
+ folio->_mm_id_mapcount[1] -= diff;
+ if (folio->_mm_id_mapcount[1] >= 0)
+ goto out;
+ folio->_mm_id_mapcount[1] = -1;
+ folio_set_mm_id(folio, 1, MM_ID_DUMMY);
+ }
+
+ /*
+ * If one MM slot owns all mappings, the folio is mapped exclusively.
+ * Note that if the folio is now unmapped (new_mapcount_val == -1), both
+ * slots must be free (mapcount == -1), and we'll also mark it as
+ * exclusive.
+ */
+ if (folio->_mm_id_mapcount[0] == new_mapcount_val ||
+ folio->_mm_id_mapcount[1] == new_mapcount_val)
+ folio->_mm_ids &= ~FOLIO_MM_IDS_SHARED_BIT;
+out:
+ folio_unlock_large_mapcount(folio);
+ return new_mapcount_val + 1;
+}
+#define folio_sub_large_mapcount folio_sub_return_large_mapcount
+#else /* !CONFIG_MM_ID */
+/*
+ * See __folio_rmap_sanity_checks(), we might map large folios even without
+ * CONFIG_TRANSPARENT_HUGEPAGE. We'll keep that working for now.
+ */
+static inline void folio_set_large_mapcount(struct folio *folio, int mapcount,
+ struct vm_area_struct *vma)
+{
+ /* Note: mapcounts start at -1. */
+ atomic_set(&folio->_large_mapcount, mapcount - 1);
+}
+
+static inline void folio_add_large_mapcount(struct folio *folio,
+ int diff, struct vm_area_struct *vma)
+{
+ atomic_add(diff, &folio->_large_mapcount);
+}
+
+static inline int folio_add_return_large_mapcount(struct folio *folio,
+ int diff, struct vm_area_struct *vma)
+{
+ BUILD_BUG();
+}
+
+static inline void folio_sub_large_mapcount(struct folio *folio,
+ int diff, struct vm_area_struct *vma)
+{
+ atomic_sub(diff, &folio->_large_mapcount);
+}
+
+static inline int folio_sub_return_large_mapcount(struct folio *folio,
+ int diff, struct vm_area_struct *vma)
+{
+ BUILD_BUG();
+}
+#endif /* CONFIG_MM_ID */
+
+#define folio_inc_large_mapcount(folio, vma) \
+ folio_add_large_mapcount(folio, 1, vma)
+#define folio_inc_return_large_mapcount(folio, vma) \
+ folio_add_return_large_mapcount(folio, 1, vma)
+#define folio_dec_large_mapcount(folio, vma) \
+ folio_sub_large_mapcount(folio, 1, vma)
+#define folio_dec_return_large_mapcount(folio, vma) \
+ folio_sub_return_large_mapcount(folio, 1, vma)
+
/* RMAP flags, currently only relevant for some anon rmap operations. */
typedef int __bitwise rmap_t;
@@ -192,6 +401,7 @@ typedef int __bitwise rmap_t;
enum rmap_level {
RMAP_LEVEL_PTE = 0,
RMAP_LEVEL_PMD,
+ RMAP_LEVEL_PUD,
};
static inline void __folio_rmap_sanity_checks(const struct folio *folio,
@@ -228,6 +438,14 @@ static inline void __folio_rmap_sanity_checks(const struct folio *folio,
VM_WARN_ON_FOLIO(folio_nr_pages(folio) != HPAGE_PMD_NR, folio);
VM_WARN_ON_FOLIO(nr_pages != HPAGE_PMD_NR, folio);
break;
+ case RMAP_LEVEL_PUD:
+ /*
+ * Assume that we are creating a single "entire" mapping of the
+ * folio.
+ */
+ VM_WARN_ON_FOLIO(folio_nr_pages(folio) != HPAGE_PUD_NR, folio);
+ VM_WARN_ON_FOLIO(nr_pages != HPAGE_PUD_NR, folio);
+ break;
default:
VM_WARN_ON_ONCE(true);
}
@@ -251,12 +469,16 @@ void folio_add_file_rmap_ptes(struct folio *, struct page *, int nr_pages,
folio_add_file_rmap_ptes(folio, page, 1, vma)
void folio_add_file_rmap_pmd(struct folio *, struct page *,
struct vm_area_struct *);
+void folio_add_file_rmap_pud(struct folio *, struct page *,
+ struct vm_area_struct *);
void folio_remove_rmap_ptes(struct folio *, struct page *, int nr_pages,
struct vm_area_struct *);
#define folio_remove_rmap_pte(folio, page, vma) \
folio_remove_rmap_ptes(folio, page, 1, vma)
void folio_remove_rmap_pmd(struct folio *, struct page *,
struct vm_area_struct *);
+void folio_remove_rmap_pud(struct folio *, struct page *,
+ struct vm_area_struct *);
void hugetlb_add_anon_rmap(struct folio *, struct vm_area_struct *,
unsigned long address, rmap_t flags);
@@ -322,7 +544,8 @@ static inline void hugetlb_remove_rmap(struct folio *folio)
}
static __always_inline void __folio_dup_file_rmap(struct folio *folio,
- struct page *page, int nr_pages, enum rmap_level level)
+ struct page *page, int nr_pages, struct vm_area_struct *dst_vma,
+ enum rmap_level level)
{
const int orig_nr_pages = nr_pages;
@@ -335,14 +558,17 @@ static __always_inline void __folio_dup_file_rmap(struct folio *folio,
break;
}
- do {
- atomic_inc(&page->_mapcount);
- } while (page++, --nr_pages > 0);
- atomic_add(orig_nr_pages, &folio->_large_mapcount);
+ if (IS_ENABLED(CONFIG_PAGE_MAPCOUNT)) {
+ do {
+ atomic_inc(&page->_mapcount);
+ } while (page++, --nr_pages > 0);
+ }
+ folio_add_large_mapcount(folio, orig_nr_pages, dst_vma);
break;
case RMAP_LEVEL_PMD:
+ case RMAP_LEVEL_PUD:
atomic_inc(&folio->_entire_mapcount);
- atomic_inc(&folio->_large_mapcount);
+ folio_inc_large_mapcount(folio, dst_vma);
break;
}
}
@@ -352,45 +578,47 @@ static __always_inline void __folio_dup_file_rmap(struct folio *folio,
* @folio: The folio to duplicate the mappings of
* @page: The first page to duplicate the mappings of
* @nr_pages: The number of pages of which the mapping will be duplicated
+ * @dst_vma: The destination vm area
*
* The page range of the folio is defined by [page, page + nr_pages)
*
* The caller needs to hold the page table lock.
*/
static inline void folio_dup_file_rmap_ptes(struct folio *folio,
- struct page *page, int nr_pages)
+ struct page *page, int nr_pages, struct vm_area_struct *dst_vma)
{
- __folio_dup_file_rmap(folio, page, nr_pages, RMAP_LEVEL_PTE);
+ __folio_dup_file_rmap(folio, page, nr_pages, dst_vma, RMAP_LEVEL_PTE);
}
static __always_inline void folio_dup_file_rmap_pte(struct folio *folio,
- struct page *page)
+ struct page *page, struct vm_area_struct *dst_vma)
{
- __folio_dup_file_rmap(folio, page, 1, RMAP_LEVEL_PTE);
+ __folio_dup_file_rmap(folio, page, 1, dst_vma, RMAP_LEVEL_PTE);
}
/**
* folio_dup_file_rmap_pmd - duplicate a PMD mapping of a page range of a folio
* @folio: The folio to duplicate the mapping of
* @page: The first page to duplicate the mapping of
+ * @dst_vma: The destination vm area
*
* The page range of the folio is defined by [page, page + HPAGE_PMD_NR)
*
* The caller needs to hold the page table lock.
*/
static inline void folio_dup_file_rmap_pmd(struct folio *folio,
- struct page *page)
+ struct page *page, struct vm_area_struct *dst_vma)
{
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- __folio_dup_file_rmap(folio, page, HPAGE_PMD_NR, RMAP_LEVEL_PTE);
+ __folio_dup_file_rmap(folio, page, HPAGE_PMD_NR, dst_vma, RMAP_LEVEL_PTE);
#else
WARN_ON_ONCE(true);
#endif
}
static __always_inline int __folio_try_dup_anon_rmap(struct folio *folio,
- struct page *page, int nr_pages, struct vm_area_struct *src_vma,
- enum rmap_level level)
+ struct page *page, int nr_pages, struct vm_area_struct *dst_vma,
+ struct vm_area_struct *src_vma, enum rmap_level level)
{
const int orig_nr_pages = nr_pages;
bool maybe_pinned;
@@ -432,18 +660,20 @@ static __always_inline int __folio_try_dup_anon_rmap(struct folio *folio,
do {
if (PageAnonExclusive(page))
ClearPageAnonExclusive(page);
- atomic_inc(&page->_mapcount);
+ if (IS_ENABLED(CONFIG_PAGE_MAPCOUNT))
+ atomic_inc(&page->_mapcount);
} while (page++, --nr_pages > 0);
- atomic_add(orig_nr_pages, &folio->_large_mapcount);
+ folio_add_large_mapcount(folio, orig_nr_pages, dst_vma);
break;
case RMAP_LEVEL_PMD:
+ case RMAP_LEVEL_PUD:
if (PageAnonExclusive(page)) {
if (unlikely(maybe_pinned))
return -EBUSY;
ClearPageAnonExclusive(page);
}
atomic_inc(&folio->_entire_mapcount);
- atomic_inc(&folio->_large_mapcount);
+ folio_inc_large_mapcount(folio, dst_vma);
break;
}
return 0;
@@ -455,6 +685,7 @@ static __always_inline int __folio_try_dup_anon_rmap(struct folio *folio,
* @folio: The folio to duplicate the mappings of
* @page: The first page to duplicate the mappings of
* @nr_pages: The number of pages of which the mapping will be duplicated
+ * @dst_vma: The destination vm area
* @src_vma: The vm area from which the mappings are duplicated
*
* The page range of the folio is defined by [page, page + nr_pages)
@@ -473,16 +704,18 @@ static __always_inline int __folio_try_dup_anon_rmap(struct folio *folio,
* Returns 0 if duplicating the mappings succeeded. Returns -EBUSY otherwise.
*/
static inline int folio_try_dup_anon_rmap_ptes(struct folio *folio,
- struct page *page, int nr_pages, struct vm_area_struct *src_vma)
+ struct page *page, int nr_pages, struct vm_area_struct *dst_vma,
+ struct vm_area_struct *src_vma)
{
- return __folio_try_dup_anon_rmap(folio, page, nr_pages, src_vma,
- RMAP_LEVEL_PTE);
+ return __folio_try_dup_anon_rmap(folio, page, nr_pages, dst_vma,
+ src_vma, RMAP_LEVEL_PTE);
}
static __always_inline int folio_try_dup_anon_rmap_pte(struct folio *folio,
- struct page *page, struct vm_area_struct *src_vma)
+ struct page *page, struct vm_area_struct *dst_vma,
+ struct vm_area_struct *src_vma)
{
- return __folio_try_dup_anon_rmap(folio, page, 1, src_vma,
+ return __folio_try_dup_anon_rmap(folio, page, 1, dst_vma, src_vma,
RMAP_LEVEL_PTE);
}
@@ -491,6 +724,7 @@ static __always_inline int folio_try_dup_anon_rmap_pte(struct folio *folio,
* of a folio
* @folio: The folio to duplicate the mapping of
* @page: The first page to duplicate the mapping of
+ * @dst_vma: The destination vm area
* @src_vma: The vm area from which the mapping is duplicated
*
* The page range of the folio is defined by [page, page + HPAGE_PMD_NR)
@@ -509,11 +743,12 @@ static __always_inline int folio_try_dup_anon_rmap_pte(struct folio *folio,
* Returns 0 if duplicating the mapping succeeded. Returns -EBUSY otherwise.
*/
static inline int folio_try_dup_anon_rmap_pmd(struct folio *folio,
- struct page *page, struct vm_area_struct *src_vma)
+ struct page *page, struct vm_area_struct *dst_vma,
+ struct vm_area_struct *src_vma)
{
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- return __folio_try_dup_anon_rmap(folio, page, HPAGE_PMD_NR, src_vma,
- RMAP_LEVEL_PMD);
+ return __folio_try_dup_anon_rmap(folio, page, HPAGE_PMD_NR, dst_vma,
+ src_vma, RMAP_LEVEL_PMD);
#else
WARN_ON_ONCE(true);
return -EBUSY;
@@ -663,9 +898,8 @@ int folio_referenced(struct folio *, int is_locked,
void try_to_migrate(struct folio *folio, enum ttu_flags flags);
void try_to_unmap(struct folio *, enum ttu_flags flags);
-int make_device_exclusive_range(struct mm_struct *mm, unsigned long start,
- unsigned long end, struct page **pages,
- void *arg);
+struct page *make_device_exclusive(struct mm_struct *mm, unsigned long addr,
+ void *owner, struct folio **foliop);
/* Avoid racy checks */
#define PVMW_SYNC (1 << 0)
@@ -739,6 +973,9 @@ unsigned long page_address_in_vma(const struct folio *folio,
*/
int folio_mkclean(struct folio *);
+int mapping_wrprotect_range(struct address_space *mapping, pgoff_t pgoff,
+ unsigned long pfn, unsigned long nr_pages);
+
int pfn_mkclean_range(unsigned long pfn, unsigned long nr_pages, pgoff_t pgoff,
struct vm_area_struct *vma);
diff --git a/include/linux/rtc.h b/include/linux/rtc.h
index 3f4d315aaec9..95da051fb155 100644
--- a/include/linux/rtc.h
+++ b/include/linux/rtc.h
@@ -170,6 +170,7 @@ struct rtc_device {
/* useful timestamps */
#define RTC_TIMESTAMP_BEGIN_0000 -62167219200ULL /* 0000-01-01 00:00:00 */
#define RTC_TIMESTAMP_BEGIN_1900 -2208988800LL /* 1900-01-01 00:00:00 */
+#define RTC_TIMESTAMP_EPOCH_GPS 315964800LL /* 1980-01-06 00:00:00 */
#define RTC_TIMESTAMP_BEGIN_2000 946684800LL /* 2000-01-01 00:00:00 */
#define RTC_TIMESTAMP_END_2063 2966371199LL /* 2063-12-31 23:59:59 */
#define RTC_TIMESTAMP_END_2079 3471292799LL /* 2079-12-31 23:59:59 */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 56ddeb37b5cd..f96ac1982893 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1239,6 +1239,10 @@ struct task_struct {
struct mutex_waiter *blocked_on;
#endif
+#ifdef CONFIG_DETECT_HUNG_TASK_BLOCKER
+ struct mutex *blocker_mutex;
+#endif
+
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
int non_block_count;
#endif
diff --git a/include/linux/sched/smt.h b/include/linux/sched/smt.h
index fb1e295e7e63..166b19af956f 100644
--- a/include/linux/sched/smt.h
+++ b/include/linux/sched/smt.h
@@ -12,7 +12,7 @@ static __always_inline bool sched_smt_active(void)
return static_branch_likely(&sched_smt_present);
}
#else
-static inline bool sched_smt_active(void) { return false; }
+static __always_inline bool sched_smt_active(void) { return false; }
#endif
void arch_smt_update(void);
diff --git a/include/linux/seq_buf.h b/include/linux/seq_buf.h
index fe41da005970..52791e070506 100644
--- a/include/linux/seq_buf.h
+++ b/include/linux/seq_buf.h
@@ -167,8 +167,8 @@ extern int seq_buf_hex_dump(struct seq_buf *s, const char *prefix_str,
const void *buf, size_t len, bool ascii);
#ifdef CONFIG_BINARY_PRINTF
-extern int
-seq_buf_bprintf(struct seq_buf *s, const char *fmt, const u32 *binary);
+__printf(2, 0)
+int seq_buf_bprintf(struct seq_buf *s, const char *fmt, const u32 *binary);
#endif
void seq_buf_do_printk(struct seq_buf *s, const char *lvl);
diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
index 2fb266ea69fa..d6ebf0596510 100644
--- a/include/linux/seq_file.h
+++ b/include/linux/seq_file.h
@@ -181,6 +181,7 @@ int seq_open_private(struct file *, const struct seq_operations *, int);
int seq_release_private(struct inode *, struct file *);
#ifdef CONFIG_BINARY_PRINTF
+__printf(2, 0)
void seq_bprintf(struct seq_file *m, const char *f, const u32 *binary);
#endif
diff --git a/include/linux/serdev.h b/include/linux/serdev.h
index ff78efc1f60d..34562eb99931 100644
--- a/include/linux/serdev.h
+++ b/include/linux/serdev.h
@@ -84,7 +84,6 @@ enum serdev_parity {
struct serdev_controller_ops {
ssize_t (*write_buf)(struct serdev_controller *, const u8 *, size_t);
void (*write_flush)(struct serdev_controller *);
- int (*write_room)(struct serdev_controller *);
int (*open)(struct serdev_controller *);
void (*close)(struct serdev_controller *);
void (*set_flow_control)(struct serdev_controller *, bool);
@@ -212,7 +211,6 @@ int serdev_device_break_ctl(struct serdev_device *serdev, int break_state);
void serdev_device_write_wakeup(struct serdev_device *);
ssize_t serdev_device_write(struct serdev_device *, const u8 *, size_t, long);
void serdev_device_write_flush(struct serdev_device *);
-int serdev_device_write_room(struct serdev_device *);
/*
* serdev device driver functions
@@ -273,10 +271,6 @@ static inline ssize_t serdev_device_write(struct serdev_device *sdev,
return -ENODEV;
}
static inline void serdev_device_write_flush(struct serdev_device *sdev) {}
-static inline int serdev_device_write_room(struct serdev_device *sdev)
-{
- return 0;
-}
#define serdev_device_driver_register(x)
#define serdev_device_driver_unregister(x)
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 98e07e9e9e58..d5a8ab98035c 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -137,6 +137,15 @@ enum _slab_flag_bits {
* rcu_read_lock before reading the address, then rcu_read_unlock after
* taking the spinlock within the structure expected at that address.
*
+ * Note that object identity check has to be done *after* acquiring a
+ * reference, therefore user has to ensure proper ordering for loads.
+ * Similarly, when initializing objects allocated with SLAB_TYPESAFE_BY_RCU,
+ * the newly allocated object has to be fully initialized *before* its
+ * refcount gets initialized and proper ordering for stores is required.
+ * refcount_{add|inc}_not_zero_acquire() and refcount_set_release() are
+ * designed with the proper fences required for reference counting objects
+ * allocated with SLAB_TYPESAFE_BY_RCU.
+ *
* Note that it is not possible to acquire a lock within a structure
* allocated with SLAB_TYPESAFE_BY_RCU without first acquiring a reference
* as described above. The reason is that SLAB_TYPESAFE_BY_RCU pages
@@ -236,12 +245,6 @@ enum _slab_flag_bits {
#endif
/*
- * freeptr_t represents a SLUB freelist pointer, which might be encoded
- * and not dereferenceable if CONFIG_SLAB_FREELIST_HARDENED is enabled.
- */
-typedef struct { unsigned long v; } freeptr_t;
-
-/*
* ZERO_SIZE_PTR will be returned for zero sized kmalloc requests.
*
* Dereferencing ZERO_SIZE_PTR will lead to a distinct access fault.
diff --git a/include/linux/sort.h b/include/linux/sort.h
index e163287ac6c1..8e5603b10941 100644
--- a/include/linux/sort.h
+++ b/include/linux/sort.h
@@ -13,4 +13,15 @@ void sort(void *base, size_t num, size_t size,
cmp_func_t cmp_func,
swap_func_t swap_func);
+/* Versions that periodically call cond_resched(): */
+
+void sort_r_nonatomic(void *base, size_t num, size_t size,
+ cmp_r_func_t cmp_func,
+ swap_r_func_t swap_func,
+ const void *priv);
+
+void sort_nonatomic(void *base, size_t num, size_t size,
+ cmp_func_t cmp_func,
+ swap_func_t swap_func);
+
#endif
diff --git a/include/linux/soundwire/sdw.h b/include/linux/soundwire/sdw.h
index 2d6c30317792..2362f621d94c 100644
--- a/include/linux/soundwire/sdw.h
+++ b/include/linux/soundwire/sdw.h
@@ -150,12 +150,14 @@ enum sdw_dpn_pkg_mode {
*
* @SDW_STREAM_PCM: PCM data stream
* @SDW_STREAM_PDM: PDM data stream
+ * @SDW_STREAM_BPT: BPT data stream
*
* spec doesn't define this, but is used in implementation
*/
enum sdw_stream_type {
SDW_STREAM_PCM = 0,
SDW_STREAM_PDM = 1,
+ SDW_STREAM_BPT = 2,
};
/**
@@ -822,6 +824,15 @@ struct sdw_defer {
struct completion complete;
};
+/*
+ * Add a practical limit to BPT transfer sizes. BPT is typically used
+ * to transfer firmware, and larger firmware transfers will increase
+ * the cold latency beyond typical OS or user requirements.
+ */
+#define SDW_BPT_MSG_MAX_BYTES (1024 * 1024)
+
+struct sdw_bpt_msg;
+
/**
* struct sdw_master_ops - Master driver ops
* @read_prop: Read Master properties
@@ -837,6 +848,10 @@ struct sdw_defer {
* @get_device_num: Callback for vendor-specific device_number allocation
* @put_device_num: Callback for vendor-specific device_number release
* @new_peripheral_assigned: Callback to handle enumeration of new peripheral.
+ * @bpt_send_async: reserve resources for BPT stream and send message
+ * using BTP protocol
+ * @bpt_wait: wait for message completion using BTP protocol
+ * and release resources
*/
struct sdw_master_ops {
int (*read_prop)(struct sdw_bus *bus);
@@ -853,6 +868,9 @@ struct sdw_master_ops {
void (*new_peripheral_assigned)(struct sdw_bus *bus,
struct sdw_slave *slave,
int dev_num);
+ int (*bpt_send_async)(struct sdw_bus *bus, struct sdw_slave *slave,
+ struct sdw_bpt_msg *msg);
+ int (*bpt_wait)(struct sdw_bus *bus, struct sdw_slave *slave, struct sdw_bpt_msg *msg);
};
int sdw_bus_master_add(struct sdw_bus *bus, struct device *parent,
@@ -879,7 +897,7 @@ struct sdw_port_config {
* @ch_count: Channel count of the stream
* @bps: Number of bits per audio sample
* @direction: Data direction
- * @type: Stream type PCM or PDM
+ * @type: Stream type PCM, PDM or BPT
*/
struct sdw_stream_config {
unsigned int frame_rate;
@@ -929,7 +947,7 @@ struct sdw_stream_params {
* @name: SoundWire stream name
* @params: Stream parameters
* @state: Current state of the stream
- * @type: Stream type PCM or PDM
+ * @type: Stream type PCM, PDM or BPT
* @m_rt_count: Count of Master runtime(s) in this stream
* @master_list: List of Master runtime(s) in this stream.
* master_list can contain only one m_rt per Master instance
@@ -959,6 +977,9 @@ struct sdw_stream_runtime {
* @defer_msg: Defer message
* @params: Current bus parameters
* @stream_refcount: number of streams currently using this bus
+ * @btp_stream_refcount: number of BTP streams currently using this bus (should
+ * be zero or one, multiple streams per link is not supported).
+ * @bpt_stream: pointer stored to handle BTP streams.
* @ops: Master callback ops
* @port_ops: Master port callback ops
* @prop: Master properties
@@ -996,6 +1017,8 @@ struct sdw_bus {
struct sdw_defer defer_msg;
struct sdw_bus_params params;
int stream_refcount;
+ int bpt_stream_refcount;
+ struct sdw_stream_runtime *bpt_stream;
const struct sdw_master_ops *ops;
const struct sdw_master_port_ops *port_ops;
struct sdw_master_prop prop;
@@ -1017,7 +1040,7 @@ struct sdw_bus {
unsigned int lane_used_bandwidth[SDW_MAX_LANES];
};
-struct sdw_stream_runtime *sdw_alloc_stream(const char *stream_name);
+struct sdw_stream_runtime *sdw_alloc_stream(const char *stream_name, enum sdw_stream_type type);
void sdw_release_stream(struct sdw_stream_runtime *stream);
int sdw_compute_params(struct sdw_bus *bus, struct sdw_stream_runtime *stream);
@@ -1043,6 +1066,10 @@ int sdw_compare_devid(struct sdw_slave *slave, struct sdw_slave_id id);
void sdw_extract_slave_id(struct sdw_bus *bus, u64 addr, struct sdw_slave_id *id);
bool is_clock_scaling_supported_by_slave(struct sdw_slave *slave);
+int sdw_bpt_send_async(struct sdw_bus *bus, struct sdw_slave *slave, struct sdw_bpt_msg *msg);
+int sdw_bpt_wait(struct sdw_bus *bus, struct sdw_slave *slave, struct sdw_bpt_msg *msg);
+int sdw_bpt_send_sync(struct sdw_bus *bus, struct sdw_slave *slave, struct sdw_bpt_msg *msg);
+
#if IS_ENABLED(CONFIG_SOUNDWIRE)
int sdw_stream_add_slave(struct sdw_slave *slave,
diff --git a/include/linux/soundwire/sdw_amd.h b/include/linux/soundwire/sdw_amd.h
index 799f8578137b..6b839987f14c 100644
--- a/include/linux/soundwire/sdw_amd.h
+++ b/include/linux/soundwire/sdw_amd.h
@@ -28,6 +28,8 @@
#define ACP_SDW1 1
#define AMD_SDW_MAX_MANAGER_COUNT 2
#define ACP63_PCI_REV_ID 0x63
+#define ACP70_PCI_REV_ID 0x70
+#define ACP71_PCI_REV_ID 0x71
struct acp_sdw_pdata {
u16 instance;
diff --git a/include/linux/soundwire/sdw_intel.h b/include/linux/soundwire/sdw_intel.h
index 580086417e4b..493d9de4e472 100644
--- a/include/linux/soundwire/sdw_intel.h
+++ b/include/linux/soundwire/sdw_intel.h
@@ -436,6 +436,10 @@ struct sdw_intel_hw_ops {
bool (*sync_check_cmdsync_unlocked)(struct sdw_intel *sdw);
void (*program_sdi)(struct sdw_intel *sdw, int dev_num);
+
+ int (*bpt_send_async)(struct sdw_intel *sdw, struct sdw_slave *slave,
+ struct sdw_bpt_msg *msg);
+ int (*bpt_wait)(struct sdw_intel *sdw, struct sdw_slave *slave, struct sdw_bpt_msg *msg);
};
extern const struct sdw_intel_hw_ops sdw_intel_cnl_hw_ops;
diff --git a/include/linux/string.h b/include/linux/string.h
index 0403a4ca4c11..01621ad0f598 100644
--- a/include/linux/string.h
+++ b/include/linux/string.h
@@ -336,8 +336,8 @@ int __sysfs_match_string(const char * const *array, size_t n, const char *s);
#define sysfs_match_string(_a, _s) __sysfs_match_string(_a, ARRAY_SIZE(_a), _s)
#ifdef CONFIG_BINARY_PRINTF
-int vbin_printf(u32 *bin_buf, size_t size, const char *fmt, va_list args);
-int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf);
+__printf(3, 0) int vbin_printf(u32 *bin_buf, size_t size, const char *fmt, va_list args);
+__printf(3, 0) int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf);
#endif
extern ssize_t memory_read_from_buffer(void *to, size_t count, loff_t *ppos,
diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
index fec976e58174..f8b406b0a1af 100644
--- a/include/linux/sunrpc/clnt.h
+++ b/include/linux/sunrpc/clnt.h
@@ -64,7 +64,9 @@ struct rpc_clnt {
cl_noretranstimeo: 1,/* No retransmit timeouts */
cl_autobind : 1,/* use getport() */
cl_chatty : 1,/* be verbose */
- cl_shutdown : 1;/* rpc immediate -EIO */
+ cl_shutdown : 1,/* rpc immediate -EIO */
+ cl_netunreach_fatal : 1;
+ /* Treat ENETUNREACH errors as fatal */
struct xprtsec_parms cl_xprtsec; /* transport security policy */
struct rpc_rtt * cl_rtt; /* RTO estimator data */
@@ -175,6 +177,7 @@ struct rpc_add_xprt_test {
#define RPC_CLNT_CREATE_SOFTERR (1UL << 10)
#define RPC_CLNT_CREATE_REUSEPORT (1UL << 11)
#define RPC_CLNT_CREATE_CONNECTED (1UL << 12)
+#define RPC_CLNT_CREATE_NETUNREACH_FATAL (1UL << 13)
struct rpc_clnt *rpc_create(struct rpc_create_args *args);
struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *,
diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
index eac57914dcf3..ccba79ebf893 100644
--- a/include/linux/sunrpc/sched.h
+++ b/include/linux/sunrpc/sched.h
@@ -134,6 +134,7 @@ struct rpc_task_setup {
#define RPC_TASK_MOVEABLE 0x0004 /* nfs4.1+ rpc tasks */
#define RPC_TASK_NULLCREDS 0x0010 /* Use AUTH_NULL credential */
#define RPC_CALL_MAJORSEEN 0x0020 /* major timeout seen */
+#define RPC_TASK_NETUNREACH_FATAL 0x0040 /* ENETUNREACH is fatal */
#define RPC_TASK_DYNAMIC 0x0080 /* task was kmalloc'ed */
#define RPC_TASK_NO_ROUND_ROBIN 0x0100 /* send requests on "main" xprt */
#define RPC_TASK_SOFT 0x0200 /* Use soft timeouts */
diff --git a/include/linux/sunrpc/xprtmultipath.h b/include/linux/sunrpc/xprtmultipath.h
index e411368cdacf..e4db5022fe92 100644
--- a/include/linux/sunrpc/xprtmultipath.h
+++ b/include/linux/sunrpc/xprtmultipath.h
@@ -56,6 +56,7 @@ extern void rpc_xprt_switch_add_xprt(struct rpc_xprt_switch *xps,
struct rpc_xprt *xprt);
extern void rpc_xprt_switch_remove_xprt(struct rpc_xprt_switch *xps,
struct rpc_xprt *xprt, bool offline);
+extern struct rpc_xprt *rpc_xprt_switch_get_main_xprt(struct rpc_xprt_switch *xps);
extern void xprt_iter_init(struct rpc_xprt_iter *xpi,
struct rpc_xprt_switch *xps);
diff --git a/include/linux/swap.h b/include/linux/swap.h
index a98c757400fe..db46b25a65ae 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -24,7 +24,6 @@ struct pagevec;
#define SWAP_FLAG_PREFER 0x8000 /* set if swap priority specified */
#define SWAP_FLAG_PRIO_MASK 0x7fff
-#define SWAP_FLAG_PRIO_SHIFT 0
#define SWAP_FLAG_DISCARD 0x10000 /* enable discard for swap */
#define SWAP_FLAG_DISCARD_ONCE 0x20000 /* discard swap area at swapon-time */
#define SWAP_FLAG_DISCARD_PAGES 0x40000 /* discard page-clusters after use */
@@ -74,14 +73,13 @@ static inline int current_is_kswapd(void)
* to a special SWP_DEVICE_{READ|WRITE} entry.
*
* When a page is mapped by the device for exclusive access we set the CPU page
- * table entries to special SWP_DEVICE_EXCLUSIVE_* entries.
+ * table entries to a special SWP_DEVICE_EXCLUSIVE entry.
*/
#ifdef CONFIG_DEVICE_PRIVATE
-#define SWP_DEVICE_NUM 4
+#define SWP_DEVICE_NUM 3
#define SWP_DEVICE_WRITE (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM)
#define SWP_DEVICE_READ (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM+1)
-#define SWP_DEVICE_EXCLUSIVE_WRITE (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM+2)
-#define SWP_DEVICE_EXCLUSIVE_READ (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM+3)
+#define SWP_DEVICE_EXCLUSIVE (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM+2)
#else
#define SWP_DEVICE_NUM 0
#endif
@@ -286,12 +284,10 @@ enum swap_cluster_flags {
#endif
/*
- * We assign a cluster to each CPU, so each CPU can allocate swap entry from
- * its own cluster and swapout sequentially. The purpose is to optimize swapout
- * throughput.
+ * We keep using same cluster for rotational device so IO will be sequential.
+ * The purpose is to optimize SWAP throughput on these device.
*/
-struct percpu_cluster {
- local_lock_t lock; /* Protect the percpu_cluster above */
+struct swap_sequential_cluster {
unsigned int next[SWAP_NR_ORDERS]; /* Likely next allocation offset */
};
@@ -317,8 +313,7 @@ struct swap_info_struct {
atomic_long_t frag_cluster_nr[SWAP_NR_ORDERS];
unsigned int pages; /* total of usable pages of swap */
atomic_long_t inuse_pages; /* number of those currently in use */
- struct percpu_cluster __percpu *percpu_cluster; /* per cpu's swap location */
- struct percpu_cluster *global_cluster; /* Use one global cluster for rotating device */
+ struct swap_sequential_cluster *global_cluster; /* Use one global cluster for rotating device */
spinlock_t global_cluster_lock; /* Serialize usage of global cluster */
struct rb_root swap_extent_root;/* root of the swap extent rbtree */
struct block_device *bdev; /* swap device or bdev of swap file */
@@ -461,7 +456,6 @@ void free_pages_and_swap_cache(struct encoded_page **, int);
extern atomic_long_t nr_swap_pages;
extern long total_swap_pages;
extern atomic_t nr_rotate_swap;
-extern bool has_usable_swap(void);
/* Swap 50% full? Release swapcache more aggressively.. */
static inline bool vm_swap_full(void)
@@ -475,24 +469,22 @@ static inline long get_nr_swap_pages(void)
}
extern void si_swapinfo(struct sysinfo *);
-swp_entry_t folio_alloc_swap(struct folio *folio);
+int folio_alloc_swap(struct folio *folio, gfp_t gfp_mask);
bool folio_free_swap(struct folio *folio);
void put_swap_folio(struct folio *folio, swp_entry_t entry);
extern swp_entry_t get_swap_page_of_type(int);
-extern int get_swap_pages(int n, swp_entry_t swp_entries[], int order);
extern int add_swap_count_continuation(swp_entry_t, gfp_t);
extern void swap_shmem_alloc(swp_entry_t, int);
extern int swap_duplicate(swp_entry_t);
extern int swapcache_prepare(swp_entry_t entry, int nr);
extern void swap_free_nr(swp_entry_t entry, int nr_pages);
-extern void swapcache_free_entries(swp_entry_t *entries, int n);
extern void free_swap_and_cache_nr(swp_entry_t entry, int nr);
int swap_type_of(dev_t device, sector_t offset);
int find_first_swap(dev_t *device);
extern unsigned int count_swap_pages(int, int);
extern sector_t swapdev_block(int, pgoff_t);
extern int __swap_count(swp_entry_t entry);
-extern int swap_swapcount(struct swap_info_struct *si, swp_entry_t entry);
+extern bool swap_entry_swapped(struct swap_info_struct *si, swp_entry_t entry);
extern int swp_swapcount(swp_entry_t entry);
struct swap_info_struct *swp_swap_info(swp_entry_t entry);
struct backing_dev_info;
@@ -575,9 +567,9 @@ static inline int __swap_count(swp_entry_t entry)
return 0;
}
-static inline int swap_swapcount(struct swap_info_struct *si, swp_entry_t entry)
+static inline bool swap_entry_swapped(struct swap_info_struct *si, swp_entry_t entry)
{
- return 0;
+ return false;
}
static inline int swp_swapcount(swp_entry_t entry)
@@ -585,11 +577,9 @@ static inline int swp_swapcount(swp_entry_t entry)
return 0;
}
-static inline swp_entry_t folio_alloc_swap(struct folio *folio)
+static inline int folio_alloc_swap(struct folio *folio, gfp_t gfp_mask)
{
- swp_entry_t entry;
- entry.val = 0;
- return entry;
+ return -EINVAL;
}
static inline bool folio_free_swap(struct folio *folio)
@@ -650,7 +640,6 @@ static inline void folio_throttle_swaprate(struct folio *folio, gfp_t gfp)
#endif
#if defined(CONFIG_MEMCG) && defined(CONFIG_SWAP)
-void mem_cgroup_swapout(struct folio *folio, swp_entry_t entry);
int __mem_cgroup_try_charge_swap(struct folio *folio, swp_entry_t entry);
static inline int mem_cgroup_try_charge_swap(struct folio *folio,
swp_entry_t entry)
@@ -671,10 +660,6 @@ static inline void mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_p
extern long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg);
extern bool mem_cgroup_swap_full(struct folio *folio);
#else
-static inline void mem_cgroup_swapout(struct folio *folio, swp_entry_t entry)
-{
-}
-
static inline int mem_cgroup_try_charge_swap(struct folio *folio,
swp_entry_t entry)
{
diff --git a/include/linux/swap_slots.h b/include/linux/swap_slots.h
deleted file mode 100644
index 840aec3523b2..000000000000
--- a/include/linux/swap_slots.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINUX_SWAP_SLOTS_H
-#define _LINUX_SWAP_SLOTS_H
-
-#include <linux/swap.h>
-#include <linux/spinlock.h>
-#include <linux/mutex.h>
-
-#define SWAP_SLOTS_CACHE_SIZE SWAP_BATCH
-#define THRESHOLD_ACTIVATE_SWAP_SLOTS_CACHE (5*SWAP_SLOTS_CACHE_SIZE)
-#define THRESHOLD_DEACTIVATE_SWAP_SLOTS_CACHE (2*SWAP_SLOTS_CACHE_SIZE)
-
-struct swap_slots_cache {
- bool lock_initialized;
- struct mutex alloc_lock; /* protects slots, nr, cur */
- swp_entry_t *slots;
- int nr;
- int cur;
- int n_ret;
-};
-
-void disable_swap_slots_cache_lock(void);
-void reenable_swap_slots_cache_unlock(void);
-void enable_swap_slots_cache(void);
-
-extern bool swap_slot_cache_enabled;
-
-#endif /* _LINUX_SWAP_SLOTS_H */
diff --git a/include/linux/swapops.h b/include/linux/swapops.h
index 96f26e29fefe..64ea151a7ae3 100644
--- a/include/linux/swapops.h
+++ b/include/linux/swapops.h
@@ -186,26 +186,16 @@ static inline bool is_writable_device_private_entry(swp_entry_t entry)
return unlikely(swp_type(entry) == SWP_DEVICE_WRITE);
}
-static inline swp_entry_t make_readable_device_exclusive_entry(pgoff_t offset)
+static inline swp_entry_t make_device_exclusive_entry(pgoff_t offset)
{
- return swp_entry(SWP_DEVICE_EXCLUSIVE_READ, offset);
-}
-
-static inline swp_entry_t make_writable_device_exclusive_entry(pgoff_t offset)
-{
- return swp_entry(SWP_DEVICE_EXCLUSIVE_WRITE, offset);
+ return swp_entry(SWP_DEVICE_EXCLUSIVE, offset);
}
static inline bool is_device_exclusive_entry(swp_entry_t entry)
{
- return swp_type(entry) == SWP_DEVICE_EXCLUSIVE_READ ||
- swp_type(entry) == SWP_DEVICE_EXCLUSIVE_WRITE;
+ return swp_type(entry) == SWP_DEVICE_EXCLUSIVE;
}
-static inline bool is_writable_device_exclusive_entry(swp_entry_t entry)
-{
- return unlikely(swp_type(entry) == SWP_DEVICE_EXCLUSIVE_WRITE);
-}
#else /* CONFIG_DEVICE_PRIVATE */
static inline swp_entry_t make_readable_device_private_entry(pgoff_t offset)
{
@@ -227,12 +217,7 @@ static inline bool is_writable_device_private_entry(swp_entry_t entry)
return false;
}
-static inline swp_entry_t make_readable_device_exclusive_entry(pgoff_t offset)
-{
- return swp_entry(0, 0);
-}
-
-static inline swp_entry_t make_writable_device_exclusive_entry(pgoff_t offset)
+static inline swp_entry_t make_device_exclusive_entry(pgoff_t offset)
{
return swp_entry(0, 0);
}
@@ -242,10 +227,6 @@ static inline bool is_device_exclusive_entry(swp_entry_t entry)
return false;
}
-static inline bool is_writable_device_exclusive_entry(swp_entry_t entry)
-{
- return false;
-}
#endif /* CONFIG_DEVICE_PRIVATE */
#ifdef CONFIG_MIGRATION
diff --git a/include/linux/thermal.h b/include/linux/thermal.h
index 69f9bedd0ee8..0b5ed6821080 100644
--- a/include/linux/thermal.h
+++ b/include/linux/thermal.h
@@ -86,8 +86,6 @@ struct thermal_trip {
#define THERMAL_TRIP_PRIV_TO_INT(_val_) (uintptr_t)(_val_)
#define THERMAL_INT_TO_TRIP_PRIV(_val_) (void *)(uintptr_t)(_val_)
-struct thermal_zone_device;
-
struct cooling_spec {
unsigned long upper; /* Highest cooling state */
unsigned long lower; /* Lowest cooling state */
diff --git a/include/linux/trace.h b/include/linux/trace.h
index fdcd76b7be83..7eaad857dee0 100644
--- a/include/linux/trace.h
+++ b/include/linux/trace.h
@@ -72,8 +72,8 @@ static inline int unregister_ftrace_export(struct trace_export *export)
static inline void trace_printk_init_buffers(void)
{
}
-static inline int trace_array_printk(struct trace_array *tr, unsigned long ip,
- const char *fmt, ...)
+static inline __printf(3, 4)
+int trace_array_printk(struct trace_array *tr, unsigned long ip, const char *fmt, ...)
{
return 0;
}
diff --git a/include/linux/trace_seq.h b/include/linux/trace_seq.h
index 1ef95c0287f0..a93ed5ac3226 100644
--- a/include/linux/trace_seq.h
+++ b/include/linux/trace_seq.h
@@ -88,8 +88,8 @@ extern __printf(2, 3)
void trace_seq_printf(struct trace_seq *s, const char *fmt, ...);
extern __printf(2, 0)
void trace_seq_vprintf(struct trace_seq *s, const char *fmt, va_list args);
-extern void
-trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary);
+extern __printf(2, 0)
+void trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary);
extern int trace_print_seq(struct seq_file *m, struct trace_seq *s);
extern int trace_seq_to_user(struct trace_seq *s, char __user *ubuf,
int cnt);
@@ -113,8 +113,8 @@ static inline __printf(2, 3)
void trace_seq_printf(struct trace_seq *s, const char *fmt, ...)
{
}
-static inline void
-trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary)
+static inline __printf(2, 0)
+void trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary)
{
}
diff --git a/include/linux/tty.h b/include/linux/tty.h
index 2372f9357240..0a46e4054dec 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -239,7 +239,6 @@ struct tty_struct {
struct list_head tty_files;
-#define N_TTY_BUF_SIZE 4096
struct work_struct SAK_work;
} __randomize_layout;
@@ -251,7 +250,7 @@ struct tty_file_private {
};
/**
- * DOC: TTY Struct Flags
+ * enum tty_struct_flags - TTY Struct Flags
*
* These bits are used in the :c:member:`tty_struct.flags` field.
*
@@ -260,62 +259,64 @@ struct tty_file_private {
* tty->write. Thus, you must use the inline functions set_bit() and
* clear_bit() to make things atomic.
*
- * TTY_THROTTLED
+ * @TTY_THROTTLED:
* Driver input is throttled. The ldisc should call
* :c:member:`tty_driver.unthrottle()` in order to resume reception when
* it is ready to process more data (at threshold min).
*
- * TTY_IO_ERROR
+ * @TTY_IO_ERROR:
* If set, causes all subsequent userspace read/write calls on the tty to
* fail, returning -%EIO. (May be no ldisc too.)
*
- * TTY_OTHER_CLOSED
+ * @TTY_OTHER_CLOSED:
* Device is a pty and the other side has closed.
*
- * TTY_EXCLUSIVE
+ * @TTY_EXCLUSIVE:
* Exclusive open mode (a single opener).
*
- * TTY_DO_WRITE_WAKEUP
+ * @TTY_DO_WRITE_WAKEUP:
* If set, causes the driver to call the
* :c:member:`tty_ldisc_ops.write_wakeup()` method in order to resume
* transmission when it can accept more data to transmit.
*
- * TTY_LDISC_OPEN
+ * @TTY_LDISC_OPEN:
* Indicates that a line discipline is open. For debugging purposes only.
*
- * TTY_PTY_LOCK
+ * @TTY_PTY_LOCK:
* A flag private to pty code to implement %TIOCSPTLCK/%TIOCGPTLCK logic.
*
- * TTY_NO_WRITE_SPLIT
+ * @TTY_NO_WRITE_SPLIT:
* Prevent driver from splitting up writes into smaller chunks (preserve
* write boundaries to driver).
*
- * TTY_HUPPED
+ * @TTY_HUPPED:
* The TTY was hung up. This is set post :c:member:`tty_driver.hangup()`.
*
- * TTY_HUPPING
+ * @TTY_HUPPING:
* The TTY is in the process of hanging up to abort potential readers.
*
- * TTY_LDISC_CHANGING
+ * @TTY_LDISC_CHANGING:
* Line discipline for this TTY is being changed. I/O should not block
* when this is set. Use tty_io_nonblock() to check.
*
- * TTY_LDISC_HALTED
+ * @TTY_LDISC_HALTED:
* Line discipline for this TTY was stopped. No work should be queued to
* this ldisc.
*/
-#define TTY_THROTTLED 0
-#define TTY_IO_ERROR 1
-#define TTY_OTHER_CLOSED 2
-#define TTY_EXCLUSIVE 3
-#define TTY_DO_WRITE_WAKEUP 5
-#define TTY_LDISC_OPEN 11
-#define TTY_PTY_LOCK 16
-#define TTY_NO_WRITE_SPLIT 17
-#define TTY_HUPPED 18
-#define TTY_HUPPING 19
-#define TTY_LDISC_CHANGING 20
-#define TTY_LDISC_HALTED 22
+enum tty_struct_flags {
+ TTY_THROTTLED,
+ TTY_IO_ERROR,
+ TTY_OTHER_CLOSED,
+ TTY_EXCLUSIVE,
+ TTY_DO_WRITE_WAKEUP,
+ TTY_LDISC_OPEN,
+ TTY_PTY_LOCK,
+ TTY_NO_WRITE_SPLIT,
+ TTY_HUPPED,
+ TTY_HUPPING,
+ TTY_LDISC_CHANGING,
+ TTY_LDISC_HALTED,
+};
static inline bool tty_io_nonblock(struct tty_struct *tty, struct file *file)
{
diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h
index d4cdc089f6c3..188ee9b768eb 100644
--- a/include/linux/tty_driver.h
+++ b/include/linux/tty_driver.h
@@ -17,6 +17,92 @@ struct serial_icounter_struct;
struct serial_struct;
/**
+ * enum tty_driver_flag -- TTY Driver Flags
+ *
+ * These are flags passed to tty_alloc_driver().
+ *
+ * @TTY_DRIVER_INSTALLED:
+ * Whether this driver was succesfully installed. This is a tty internal
+ * flag. Do not touch.
+ *
+ * @TTY_DRIVER_RESET_TERMIOS:
+ * Requests the tty layer to reset the termios setting when the last
+ * process has closed the device. Used for PTYs, in particular.
+ *
+ * @TTY_DRIVER_REAL_RAW:
+ * Indicates that the driver will guarantee not to set any special
+ * character handling flags if this is set for the tty:
+ *
+ * ``(IGNBRK || (!BRKINT && !PARMRK)) && (IGNPAR || !INPCK)``
+ *
+ * That is, if there is no reason for the driver to
+ * send notifications of parity and break characters up to the line
+ * driver, it won't do so. This allows the line driver to optimize for
+ * this case if this flag is set. (Note that there is also a promise, if
+ * the above case is true, not to signal overruns, either.)
+ *
+ * @TTY_DRIVER_DYNAMIC_DEV:
+ * The individual tty devices need to be registered with a call to
+ * tty_register_device() when the device is found in the system and
+ * unregistered with a call to tty_unregister_device() so the devices will
+ * be show up properly in sysfs. If not set, all &tty_driver.num entries
+ * will be created by the tty core in sysfs when tty_register_driver() is
+ * called. This is to be used by drivers that have tty devices that can
+ * appear and disappear while the main tty driver is registered with the
+ * tty core.
+ *
+ * @TTY_DRIVER_DEVPTS_MEM:
+ * Don't use the standard arrays (&tty_driver.ttys and
+ * &tty_driver.termios), instead use dynamic memory keyed through the
+ * devpts filesystem. This is only applicable to the PTY driver.
+ *
+ * @TTY_DRIVER_HARDWARE_BREAK:
+ * Hardware handles break signals. Pass the requested timeout to the
+ * &tty_operations.break_ctl instead of using a simple on/off interface.
+ *
+ * @TTY_DRIVER_DYNAMIC_ALLOC:
+ * Do not allocate structures which are needed per line for this driver
+ * (&tty_driver.ports) as it would waste memory. The driver will take
+ * care. This is only applicable to the PTY driver.
+ *
+ * @TTY_DRIVER_UNNUMBERED_NODE:
+ * Do not create numbered ``/dev`` nodes. For example, create
+ * ``/dev/ttyprintk`` and not ``/dev/ttyprintk0``. Applicable only when a
+ * driver for a single tty device is being allocated.
+ */
+enum tty_driver_flag {
+ TTY_DRIVER_INSTALLED = BIT(0),
+ TTY_DRIVER_RESET_TERMIOS = BIT(1),
+ TTY_DRIVER_REAL_RAW = BIT(2),
+ TTY_DRIVER_DYNAMIC_DEV = BIT(3),
+ TTY_DRIVER_DEVPTS_MEM = BIT(4),
+ TTY_DRIVER_HARDWARE_BREAK = BIT(5),
+ TTY_DRIVER_DYNAMIC_ALLOC = BIT(6),
+ TTY_DRIVER_UNNUMBERED_NODE = BIT(7),
+};
+
+enum tty_driver_type {
+ TTY_DRIVER_TYPE_SYSTEM,
+ TTY_DRIVER_TYPE_CONSOLE,
+ TTY_DRIVER_TYPE_SERIAL,
+ TTY_DRIVER_TYPE_PTY,
+ TTY_DRIVER_TYPE_SCC,
+ TTY_DRIVER_TYPE_SYSCONS,
+};
+
+enum tty_driver_subtype {
+ SYSTEM_TYPE_TTY = 1,
+ SYSTEM_TYPE_CONSOLE,
+ SYSTEM_TYPE_SYSCONS,
+ SYSTEM_TYPE_SYSPTMX,
+
+ PTY_TYPE_MASTER = 1,
+ PTY_TYPE_SLAVE,
+
+ SERIAL_TYPE_NORMAL = 1,
+};
+
+/**
* struct tty_operations -- interface between driver and tty
*
* @lookup: ``struct tty_struct *()(struct tty_driver *self, struct file *,
@@ -414,8 +500,8 @@ struct tty_operations {
* @major: major /dev device number (zero for autoassignment)
* @minor_start: the first minor /dev device number
* @num: number of devices allocated
- * @type: type of tty driver (%TTY_DRIVER_TYPE_)
- * @subtype: subtype of tty driver (%SYSTEM_TYPE_, %PTY_TYPE_, %SERIAL_TYPE_)
+ * @type: type of tty driver (enum tty_driver_type)
+ * @subtype: subtype of tty driver (enum tty_driver_subtype)
* @init_termios: termios to set to each tty initially (e.g. %tty_std_termios)
* @flags: tty driver flags (%TTY_DRIVER_)
* @proc_entry: proc fs entry, used internally
@@ -447,8 +533,8 @@ struct tty_driver {
int major;
int minor_start;
unsigned int num;
- short type;
- short subtype;
+ enum tty_driver_type type;
+ enum tty_driver_subtype subtype;
struct ktermios init_termios;
unsigned long flags;
struct proc_dir_entry *proc_entry;
@@ -478,7 +564,13 @@ struct tty_driver *tty_find_polling_driver(char *name, int *line);
void tty_driver_kref_put(struct tty_driver *driver);
-/* Use TTY_DRIVER_* flags below */
+/**
+ * tty_alloc_driver - allocate tty driver
+ * @lines: count of lines this driver can handle at most
+ * @flags: some of enum tty_driver_flag, will be set in driver->flags
+ *
+ * Returns: struct tty_driver or a PTR-encoded error (use IS_ERR() and friends).
+ */
#define tty_alloc_driver(lines, flags) \
__tty_alloc_driver(lines, THIS_MODULE, flags)
@@ -494,84 +586,6 @@ static inline void tty_set_operations(struct tty_driver *driver,
driver->ops = op;
}
-/**
- * DOC: TTY Driver Flags
- *
- * TTY_DRIVER_RESET_TERMIOS
- * Requests the tty layer to reset the termios setting when the last
- * process has closed the device. Used for PTYs, in particular.
- *
- * TTY_DRIVER_REAL_RAW
- * Indicates that the driver will guarantee not to set any special
- * character handling flags if this is set for the tty:
- *
- * ``(IGNBRK || (!BRKINT && !PARMRK)) && (IGNPAR || !INPCK)``
- *
- * That is, if there is no reason for the driver to
- * send notifications of parity and break characters up to the line
- * driver, it won't do so. This allows the line driver to optimize for
- * this case if this flag is set. (Note that there is also a promise, if
- * the above case is true, not to signal overruns, either.)
- *
- * TTY_DRIVER_DYNAMIC_DEV
- * The individual tty devices need to be registered with a call to
- * tty_register_device() when the device is found in the system and
- * unregistered with a call to tty_unregister_device() so the devices will
- * be show up properly in sysfs. If not set, all &tty_driver.num entries
- * will be created by the tty core in sysfs when tty_register_driver() is
- * called. This is to be used by drivers that have tty devices that can
- * appear and disappear while the main tty driver is registered with the
- * tty core.
- *
- * TTY_DRIVER_DEVPTS_MEM
- * Don't use the standard arrays (&tty_driver.ttys and
- * &tty_driver.termios), instead use dynamic memory keyed through the
- * devpts filesystem. This is only applicable to the PTY driver.
- *
- * TTY_DRIVER_HARDWARE_BREAK
- * Hardware handles break signals. Pass the requested timeout to the
- * &tty_operations.break_ctl instead of using a simple on/off interface.
- *
- * TTY_DRIVER_DYNAMIC_ALLOC
- * Do not allocate structures which are needed per line for this driver
- * (&tty_driver.ports) as it would waste memory. The driver will take
- * care. This is only applicable to the PTY driver.
- *
- * TTY_DRIVER_UNNUMBERED_NODE
- * Do not create numbered ``/dev`` nodes. For example, create
- * ``/dev/ttyprintk`` and not ``/dev/ttyprintk0``. Applicable only when a
- * driver for a single tty device is being allocated.
- */
-#define TTY_DRIVER_INSTALLED 0x0001
-#define TTY_DRIVER_RESET_TERMIOS 0x0002
-#define TTY_DRIVER_REAL_RAW 0x0004
-#define TTY_DRIVER_DYNAMIC_DEV 0x0008
-#define TTY_DRIVER_DEVPTS_MEM 0x0010
-#define TTY_DRIVER_HARDWARE_BREAK 0x0020
-#define TTY_DRIVER_DYNAMIC_ALLOC 0x0040
-#define TTY_DRIVER_UNNUMBERED_NODE 0x0080
-
-/* tty driver types */
-#define TTY_DRIVER_TYPE_SYSTEM 0x0001
-#define TTY_DRIVER_TYPE_CONSOLE 0x0002
-#define TTY_DRIVER_TYPE_SERIAL 0x0003
-#define TTY_DRIVER_TYPE_PTY 0x0004
-#define TTY_DRIVER_TYPE_SCC 0x0005 /* scc driver */
-#define TTY_DRIVER_TYPE_SYSCONS 0x0006
-
-/* system subtypes (magic, used by tty_io.c) */
-#define SYSTEM_TYPE_TTY 0x0001
-#define SYSTEM_TYPE_CONSOLE 0x0002
-#define SYSTEM_TYPE_SYSCONS 0x0003
-#define SYSTEM_TYPE_SYSPTMX 0x0004
-
-/* pty subtypes (magic, used by tty_io.c) */
-#define PTY_TYPE_MASTER 0x0001
-#define PTY_TYPE_SLAVE 0x0002
-
-/* serial subtype definitions */
-#define SERIAL_TYPE_NORMAL 1
-
int tty_register_driver(struct tty_driver *driver);
void tty_unregister_driver(struct tty_driver *driver);
struct device *tty_register_device(struct tty_driver *driver, unsigned index,
diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
index af01e89074b2..c5cccc3fc1e8 100644
--- a/include/linux/tty_ldisc.h
+++ b/include/linux/tty_ldisc.h
@@ -39,7 +39,6 @@ do { \
int ldsem_down_read(struct ld_semaphore *sem, long timeout);
int ldsem_down_read_trylock(struct ld_semaphore *sem);
int ldsem_down_write(struct ld_semaphore *sem, long timeout);
-int ldsem_down_write_trylock(struct ld_semaphore *sem);
void ldsem_up_read(struct ld_semaphore *sem);
void ldsem_up_write(struct ld_semaphore *sem);
diff --git a/include/linux/types.h b/include/linux/types.h
index 1c509ce8f7f6..49b79c8bb1a9 100644
--- a/include/linux/types.h
+++ b/include/linux/types.h
@@ -92,6 +92,7 @@ typedef unsigned char unchar;
typedef unsigned short ushort;
typedef unsigned int uint;
typedef unsigned long ulong;
+typedef unsigned long long ullong;
#ifndef __BIT_TYPES_DEFINED__
#define __BIT_TYPES_DEFINED__
@@ -248,5 +249,17 @@ typedef void (*swap_func_t)(void *a, void *b, int size);
typedef int (*cmp_r_func_t)(const void *a, const void *b, const void *priv);
typedef int (*cmp_func_t)(const void *a, const void *b);
+/*
+ * rcuwait provides a way of blocking and waking up a single
+ * task in an rcu-safe manner.
+ *
+ * The only time @task is non-nil is when a user is blocked (or
+ * checking if it needs to) on a condition, and reset as soon as we
+ * know that the condition has succeeded and are awoken.
+ */
+struct rcuwait {
+ struct task_struct __rcu *task;
+};
+
#endif /* __ASSEMBLY__ */
#endif /* _LINUX_TYPES_H */
diff --git a/include/linux/usb.h b/include/linux/usb.h
index cfa8005e24f9..b46738701f8d 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -51,6 +51,7 @@ struct ep_device;
* @desc: descriptor for this endpoint, wMaxPacketSize in native byteorder
* @ss_ep_comp: SuperSpeed companion descriptor for this endpoint
* @ssp_isoc_ep_comp: SuperSpeedPlus isoc companion descriptor for this endpoint
+ * @eusb2_isoc_ep_comp: eUSB2 isoc companion descriptor for this endpoint
* @urb_list: urbs queued to this endpoint; maintained by usbcore
* @hcpriv: for use by HCD; typically holds hardware dma queue head (QH)
* with one or more transfer descriptors (TDs) per urb
@@ -64,9 +65,10 @@ struct ep_device;
* descriptor within an active interface in a given USB configuration.
*/
struct usb_host_endpoint {
- struct usb_endpoint_descriptor desc;
- struct usb_ss_ep_comp_descriptor ss_ep_comp;
- struct usb_ssp_isoc_ep_comp_descriptor ssp_isoc_ep_comp;
+ struct usb_endpoint_descriptor desc;
+ struct usb_ss_ep_comp_descriptor ss_ep_comp;
+ struct usb_ssp_isoc_ep_comp_descriptor ssp_isoc_ep_comp;
+ struct usb_eusb2_isoc_ep_comp_descriptor eusb2_isoc_ep_comp;
struct list_head urb_list;
void *hcpriv;
struct ep_device *ep_dev; /* For sysfs info */
diff --git a/include/linux/usb/musb.h b/include/linux/usb/musb.h
index 3963e55e88a3..fbdef950f06c 100644
--- a/include/linux/usb/musb.h
+++ b/include/linux/usb/musb.h
@@ -61,7 +61,7 @@ struct musb_hdrc_eps_bits {
};
struct musb_hdrc_config {
- struct musb_fifo_cfg *fifo_cfg; /* board fifo configuration */
+ const struct musb_fifo_cfg *fifo_cfg; /* board fifo configuration */
unsigned fifo_cfg_size; /* size of the fifo configuration */
/* MUSB configuration-specific details */
diff --git a/include/linux/usb/ulpi.h b/include/linux/usb/ulpi.h
index 5050f502c1ed..4b651065738a 100644
--- a/include/linux/usb/ulpi.h
+++ b/include/linux/usb/ulpi.h
@@ -49,19 +49,10 @@
/*-------------------------------------------------------------------------*/
#if IS_ENABLED(CONFIG_USB_ULPI)
-struct usb_phy *otg_ulpi_create(struct usb_phy_io_ops *ops,
- unsigned int flags);
-
struct usb_phy *devm_otg_ulpi_create(struct device *dev,
struct usb_phy_io_ops *ops,
unsigned int flags);
#else
-static inline struct usb_phy *otg_ulpi_create(struct usb_phy_io_ops *ops,
- unsigned int flags)
-{
- return NULL;
-}
-
static inline struct usb_phy *devm_otg_ulpi_create(struct device *dev,
struct usb_phy_io_ops *ops,
unsigned int flags)
diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h
index 7183e5aca282..a0bb6d012137 100644
--- a/include/linux/user_namespace.h
+++ b/include/linux/user_namespace.h
@@ -5,8 +5,10 @@
#include <linux/kref.h>
#include <linux/nsproxy.h>
#include <linux/ns_common.h>
+#include <linux/rculist_nulls.h>
#include <linux/sched.h>
#include <linux/workqueue.h>
+#include <linux/rcuref.h>
#include <linux/rwsem.h>
#include <linux/sysctl.h>
#include <linux/err.h>
@@ -115,10 +117,11 @@ struct user_namespace {
} __randomize_layout;
struct ucounts {
- struct hlist_node node;
+ struct hlist_nulls_node node;
struct user_namespace *ns;
kuid_t uid;
- atomic_t count;
+ struct rcu_head rcu;
+ rcuref_t count;
atomic_long_t ucount[UCOUNT_COUNTS];
atomic_long_t rlimit[UCOUNT_RLIMIT_COUNTS];
};
@@ -131,9 +134,15 @@ void retire_userns_sysctls(struct user_namespace *ns);
struct ucounts *inc_ucount(struct user_namespace *ns, kuid_t uid, enum ucount_type type);
void dec_ucount(struct ucounts *ucounts, enum ucount_type type);
struct ucounts *alloc_ucounts(struct user_namespace *ns, kuid_t uid);
-struct ucounts * __must_check get_ucounts(struct ucounts *ucounts);
void put_ucounts(struct ucounts *ucounts);
+static inline struct ucounts * __must_check get_ucounts(struct ucounts *ucounts)
+{
+ if (rcuref_get(&ucounts->count))
+ return ucounts;
+ return NULL;
+}
+
static inline long get_rlimit_value(struct ucounts *ucounts, enum rlimit_type type)
{
return atomic_long_read(&ucounts->rlimit[type]);
diff --git a/include/linux/vfio.h b/include/linux/vfio.h
index 000a6cab2d31..707b00772ce1 100644
--- a/include/linux/vfio.h
+++ b/include/linux/vfio.h
@@ -67,6 +67,7 @@ struct vfio_device {
struct inode *inode;
#if IS_ENABLED(CONFIG_IOMMUFD)
struct iommufd_device *iommufd_device;
+ struct ida pasids;
u8 iommufd_attached:1;
#endif
u8 cdev_opened:1;
@@ -91,6 +92,8 @@ struct vfio_device {
* bound iommufd. Undo in unbind_iommufd if @detach_ioas is not
* called.
* @detach_ioas: Opposite of attach_ioas
+ * @pasid_attach_ioas: The pasid variation of attach_ioas
+ * @pasid_detach_ioas: Opposite of pasid_attach_ioas
* @open_device: Called when the first file descriptor is opened for this device
* @close_device: Opposite of open_device
* @read: Perform read(2) on device file descriptor
@@ -115,6 +118,9 @@ struct vfio_device_ops {
void (*unbind_iommufd)(struct vfio_device *vdev);
int (*attach_ioas)(struct vfio_device *vdev, u32 *pt_id);
void (*detach_ioas)(struct vfio_device *vdev);
+ int (*pasid_attach_ioas)(struct vfio_device *vdev, u32 pasid,
+ u32 *pt_id);
+ void (*pasid_detach_ioas)(struct vfio_device *vdev, u32 pasid);
int (*open_device)(struct vfio_device *vdev);
void (*close_device)(struct vfio_device *vdev);
ssize_t (*read)(struct vfio_device *vdev, char __user *buf,
@@ -139,6 +145,10 @@ int vfio_iommufd_physical_bind(struct vfio_device *vdev,
void vfio_iommufd_physical_unbind(struct vfio_device *vdev);
int vfio_iommufd_physical_attach_ioas(struct vfio_device *vdev, u32 *pt_id);
void vfio_iommufd_physical_detach_ioas(struct vfio_device *vdev);
+int vfio_iommufd_physical_pasid_attach_ioas(struct vfio_device *vdev,
+ u32 pasid, u32 *pt_id);
+void vfio_iommufd_physical_pasid_detach_ioas(struct vfio_device *vdev,
+ u32 pasid);
int vfio_iommufd_emulated_bind(struct vfio_device *vdev,
struct iommufd_ctx *ictx, u32 *out_device_id);
void vfio_iommufd_emulated_unbind(struct vfio_device *vdev);
@@ -166,6 +176,10 @@ vfio_iommufd_get_dev_id(struct vfio_device *vdev, struct iommufd_ctx *ictx)
((int (*)(struct vfio_device *vdev, u32 *pt_id)) NULL)
#define vfio_iommufd_physical_detach_ioas \
((void (*)(struct vfio_device *vdev)) NULL)
+#define vfio_iommufd_physical_pasid_attach_ioas \
+ ((int (*)(struct vfio_device *vdev, u32 pasid, u32 *pt_id)) NULL)
+#define vfio_iommufd_physical_pasid_detach_ioas \
+ ((void (*)(struct vfio_device *vdev, u32 pasid)) NULL)
#define vfio_iommufd_emulated_bind \
((int (*)(struct vfio_device *vdev, struct iommufd_ctx *ictx, \
u32 *out_device_id)) NULL)
diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h
index 5a37cb2b6f93..9e15a088ba38 100644
--- a/include/linux/vm_event_item.h
+++ b/include/linux/vm_event_item.h
@@ -41,9 +41,11 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
PGSTEAL_KSWAPD,
PGSTEAL_DIRECT,
PGSTEAL_KHUGEPAGED,
+ PGSTEAL_PROACTIVE,
PGSCAN_KSWAPD,
PGSCAN_DIRECT,
PGSCAN_KHUGEPAGED,
+ PGSCAN_PROACTIVE,
PGSCAN_DIRECT_THROTTLE,
PGSCAN_ANON,
PGSCAN_FILE,
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index 4751e3ecc467..b2ccb6845595 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -504,7 +504,7 @@ static inline const char *node_stat_name(enum node_stat_item item)
static inline const char *lru_list_name(enum lru_list lru)
{
- return node_stat_name(NR_LRU_BASE + (enum node_stat_item)lru) + 3; // skip "nr_"
+ return node_stat_name(NR_LRU_BASE + lru) + 3; // skip "nr_"
}
#if defined(CONFIG_VM_EVENT_COUNTERS) || defined(CONFIG_MEMCG)
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index caf4f0b12235..eda4b62511f7 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -313,6 +313,30 @@ static inline void cgroup_writeback_umount(struct super_block *sb)
/*
* mm/page-writeback.c
*/
+/* consolidated parameters for balance_dirty_pages() and its subroutines */
+struct dirty_throttle_control {
+#ifdef CONFIG_CGROUP_WRITEBACK
+ struct wb_domain *dom;
+ struct dirty_throttle_control *gdtc; /* only set in memcg dtc's */
+#endif
+ struct bdi_writeback *wb;
+ struct fprop_local_percpu *wb_completions;
+
+ unsigned long avail; /* dirtyable */
+ unsigned long dirty; /* file_dirty + write + nfs */
+ unsigned long thresh; /* dirty threshold */
+ unsigned long bg_thresh; /* dirty background threshold */
+ unsigned long limit; /* hard dirty limit */
+
+ unsigned long wb_dirty; /* per-wb counterparts */
+ unsigned long wb_thresh;
+ unsigned long wb_bg_thresh;
+
+ unsigned long pos_ratio;
+ bool freerun;
+ bool dirty_exceeded;
+};
+
void laptop_io_completion(struct backing_dev_info *info);
void laptop_sync_completion(void);
void laptop_mode_timer_fn(struct timer_list *t);
diff --git a/include/linux/xarray.h b/include/linux/xarray.h
index 0b618ec04115..78eede109b1a 100644
--- a/include/linux/xarray.h
+++ b/include/linux/xarray.h
@@ -1555,6 +1555,8 @@ int xa_get_order(struct xarray *, unsigned long index);
int xas_get_order(struct xa_state *xas);
void xas_split(struct xa_state *, void *entry, unsigned int order);
void xas_split_alloc(struct xa_state *, void *entry, unsigned int order, gfp_t);
+void xas_try_split(struct xa_state *xas, void *entry, unsigned int order);
+unsigned int xas_try_split_min_order(unsigned int order);
#else
static inline int xa_get_order(struct xarray *xa, unsigned long index)
{
@@ -1576,6 +1578,17 @@ static inline void xas_split_alloc(struct xa_state *xas, void *entry,
unsigned int order, gfp_t gfp)
{
}
+
+static inline void xas_try_split(struct xa_state *xas, void *entry,
+ unsigned int order)
+{
+}
+
+static inline unsigned int xas_try_split_min_order(unsigned int order)
+{
+ return 0;
+}
+
#endif
/**
diff --git a/include/linux/zpool.h b/include/linux/zpool.h
index a67d62b79698..52f30e526607 100644
--- a/include/linux/zpool.h
+++ b/include/linux/zpool.h
@@ -4,9 +4,8 @@
*
* Copyright (C) 2014 Dan Streetman
*
- * This is a common frontend for the zbud and zsmalloc memory
- * storage pool implementations. Typically, this is used to
- * store compressed memory.
+ * This is a common frontend for the zswap compressed memory storage
+ * implementations.
*/
#ifndef _ZPOOL_H_
@@ -14,25 +13,6 @@
struct zpool;
-/*
- * Control how a handle is mapped. It will be ignored if the
- * implementation does not support it. Its use is optional.
- * Note that this does not refer to memory protection, it
- * refers to how the memory will be copied in/out if copying
- * is necessary during mapping; read-write is the safest as
- * it copies the existing memory in on map, and copies the
- * changed memory back out on unmap. Write-only does not copy
- * in the memory and should only be used for initialization.
- * If in doubt, use ZPOOL_MM_DEFAULT which is read-write.
- */
-enum zpool_mapmode {
- ZPOOL_MM_RW, /* normal read-write mapping */
- ZPOOL_MM_RO, /* read-only (no copy-out at unmap time) */
- ZPOOL_MM_WO, /* write-only (no copy-in at map time) */
-
- ZPOOL_MM_DEFAULT = ZPOOL_MM_RW
-};
-
bool zpool_has_pool(char *type);
struct zpool *zpool_create_pool(const char *type, const char *name, gfp_t gfp);
@@ -41,17 +21,19 @@ const char *zpool_get_type(struct zpool *pool);
void zpool_destroy_pool(struct zpool *pool);
-bool zpool_malloc_support_movable(struct zpool *pool);
-
int zpool_malloc(struct zpool *pool, size_t size, gfp_t gfp,
unsigned long *handle);
void zpool_free(struct zpool *pool, unsigned long handle);
-void *zpool_map_handle(struct zpool *pool, unsigned long handle,
- enum zpool_mapmode mm);
+void *zpool_obj_read_begin(struct zpool *zpool, unsigned long handle,
+ void *local_copy);
+
+void zpool_obj_read_end(struct zpool *zpool, unsigned long handle,
+ void *handle_mem);
-void zpool_unmap_handle(struct zpool *pool, unsigned long handle);
+void zpool_obj_write(struct zpool *zpool, unsigned long handle,
+ void *handle_mem, size_t mem_len);
u64 zpool_get_total_pages(struct zpool *pool);
@@ -81,15 +63,16 @@ struct zpool_driver {
void *(*create)(const char *name, gfp_t gfp);
void (*destroy)(void *pool);
- bool malloc_support_movable;
int (*malloc)(void *pool, size_t size, gfp_t gfp,
unsigned long *handle);
void (*free)(void *pool, unsigned long handle);
- bool sleep_mapped;
- void *(*map)(void *pool, unsigned long handle,
- enum zpool_mapmode mm);
- void (*unmap)(void *pool, unsigned long handle);
+ void *(*obj_read_begin)(void *pool, unsigned long handle,
+ void *local_copy);
+ void (*obj_read_end)(void *pool, unsigned long handle,
+ void *handle_mem);
+ void (*obj_write)(void *pool, unsigned long handle,
+ void *handle_mem, size_t mem_len);
u64 (*total_pages)(void *pool);
};
diff --git a/include/linux/zsmalloc.h b/include/linux/zsmalloc.h
index a48cd0ffe57d..c26baf9fb331 100644
--- a/include/linux/zsmalloc.h
+++ b/include/linux/zsmalloc.h
@@ -16,23 +16,6 @@
#include <linux/types.h>
-/*
- * zsmalloc mapping modes
- *
- * NOTE: These only make a difference when a mapped object spans pages.
- */
-enum zs_mapmode {
- ZS_MM_RW, /* normal read-write mapping */
- ZS_MM_RO, /* read-only (no copy-out at unmap time) */
- ZS_MM_WO /* write-only (no copy-in at map time) */
- /*
- * NOTE: ZS_MM_WO should only be used for initializing new
- * (uninitialized) allocations. Partial writes to already
- * initialized allocations should use ZS_MM_RW to preserve the
- * existing data.
- */
-};
-
struct zs_pool_stats {
/* How many pages were migrated (freed) */
atomic_long_t pages_compacted;
@@ -48,14 +31,18 @@ void zs_free(struct zs_pool *pool, unsigned long obj);
size_t zs_huge_class_size(struct zs_pool *pool);
-void *zs_map_object(struct zs_pool *pool, unsigned long handle,
- enum zs_mapmode mm);
-void zs_unmap_object(struct zs_pool *pool, unsigned long handle);
-
unsigned long zs_get_total_pages(struct zs_pool *pool);
unsigned long zs_compact(struct zs_pool *pool);
unsigned int zs_lookup_class_index(struct zs_pool *pool, unsigned int size);
void zs_pool_stats(struct zs_pool *pool, struct zs_pool_stats *stats);
+
+void *zs_obj_read_begin(struct zs_pool *pool, unsigned long handle,
+ void *local_copy);
+void zs_obj_read_end(struct zs_pool *pool, unsigned long handle,
+ void *handle_mem);
+void zs_obj_write(struct zs_pool *pool, unsigned long handle,
+ void *handle_mem, size_t mem_len);
+
#endif
diff --git a/include/linux/zswap.h b/include/linux/zswap.h
index d961ead91bf1..30c193a1207e 100644
--- a/include/linux/zswap.h
+++ b/include/linux/zswap.h
@@ -26,7 +26,7 @@ struct zswap_lruvec_state {
unsigned long zswap_total_pages(void);
bool zswap_store(struct folio *folio);
-bool zswap_load(struct folio *folio);
+int zswap_load(struct folio *folio);
void zswap_invalidate(swp_entry_t swp);
int zswap_swapon(int type, unsigned long nr_pages);
void zswap_swapoff(int type);
@@ -44,9 +44,9 @@ static inline bool zswap_store(struct folio *folio)
return false;
}
-static inline bool zswap_load(struct folio *folio)
+static inline int zswap_load(struct folio *folio)
{
- return false;
+ return -ENOENT;
}
static inline void zswap_invalidate(swp_entry_t swp) {}
diff --git a/include/misc/cxl-base.h b/include/misc/cxl-base.h
deleted file mode 100644
index 2251da7f32d9..000000000000
--- a/include/misc/cxl-base.h
+++ /dev/null
@@ -1,48 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright 2014 IBM Corp.
- */
-
-#ifndef _MISC_CXL_BASE_H
-#define _MISC_CXL_BASE_H
-
-#ifdef CONFIG_CXL_BASE
-
-#define CXL_IRQ_RANGES 4
-
-struct cxl_irq_ranges {
- irq_hw_number_t offset[CXL_IRQ_RANGES];
- irq_hw_number_t range[CXL_IRQ_RANGES];
-};
-
-extern atomic_t cxl_use_count;
-
-static inline bool cxl_ctx_in_use(void)
-{
- return (atomic_read(&cxl_use_count) != 0);
-}
-
-static inline void cxl_ctx_get(void)
-{
- atomic_inc(&cxl_use_count);
-}
-
-static inline void cxl_ctx_put(void)
-{
- atomic_dec(&cxl_use_count);
-}
-
-struct cxl_afu *cxl_afu_get(struct cxl_afu *afu);
-void cxl_afu_put(struct cxl_afu *afu);
-void cxl_slbia(struct mm_struct *mm);
-
-#else /* CONFIG_CXL_BASE */
-
-static inline bool cxl_ctx_in_use(void) { return false; }
-static inline struct cxl_afu *cxl_afu_get(struct cxl_afu *afu) { return NULL; }
-static inline void cxl_afu_put(struct cxl_afu *afu) {}
-static inline void cxl_slbia(struct mm_struct *mm) {}
-
-#endif /* CONFIG_CXL_BASE */
-
-#endif
diff --git a/include/misc/cxl.h b/include/misc/cxl.h
deleted file mode 100644
index d8044299d654..000000000000
--- a/include/misc/cxl.h
+++ /dev/null
@@ -1,265 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright 2015 IBM Corp.
- */
-
-#ifndef _MISC_CXL_H
-#define _MISC_CXL_H
-
-#include <linux/pci.h>
-#include <linux/poll.h>
-#include <linux/interrupt.h>
-#include <uapi/misc/cxl.h>
-
-/*
- * This documents the in kernel API for driver to use CXL. It allows kernel
- * drivers to bind to AFUs using an AFU configuration record exposed as a PCI
- * configuration record.
- *
- * This API enables control over AFU and contexts which can't be part of the
- * generic PCI API. This API is agnostic to the actual AFU.
- */
-
-/* Get the AFU associated with a pci_dev */
-struct cxl_afu *cxl_pci_to_afu(struct pci_dev *dev);
-
-/* Get the AFU conf record number associated with a pci_dev */
-unsigned int cxl_pci_to_cfg_record(struct pci_dev *dev);
-
-
-/*
- * Context lifetime overview:
- *
- * An AFU context may be inited and then started and stopped multiple times
- * before it's released. ie.
- * - cxl_dev_context_init()
- * - cxl_start_context()
- * - cxl_stop_context()
- * - cxl_start_context()
- * - cxl_stop_context()
- * ...repeat...
- * - cxl_release_context()
- * Once released, a context can't be started again.
- *
- * One context is inited by the cxl driver for every pci_dev. This is to be
- * used as a default kernel context. cxl_get_context() will get this
- * context. This context will be released by PCI hot unplug, so doesn't need to
- * be released explicitly by drivers.
- *
- * Additional kernel contexts may be inited using cxl_dev_context_init().
- * These must be released using cxl_context_detach().
- *
- * Once a context has been inited, IRQs may be configured. Firstly these IRQs
- * must be allocated (cxl_allocate_afu_irqs()), then individually mapped to
- * specific handlers (cxl_map_afu_irq()).
- *
- * These IRQs can be unmapped (cxl_unmap_afu_irq()) and finally released
- * (cxl_free_afu_irqs()).
- *
- * The AFU can be reset (cxl_afu_reset()). This will cause the PSL/AFU
- * hardware to lose track of all contexts. It's upto the caller of
- * cxl_afu_reset() to restart these contexts.
- */
-
-/*
- * On pci_enabled_device(), the cxl driver will init a single cxl context for
- * use by the driver. It doesn't start this context (as that will likely
- * generate DMA traffic for most AFUs).
- *
- * This gets the default context associated with this pci_dev. This context
- * doesn't need to be released as this will be done by the PCI subsystem on hot
- * unplug.
- */
-struct cxl_context *cxl_get_context(struct pci_dev *dev);
-/*
- * Allocate and initalise a context associated with a AFU PCI device. This
- * doesn't start the context in the AFU.
- */
-struct cxl_context *cxl_dev_context_init(struct pci_dev *dev);
-/*
- * Release and free a context. Context should be stopped before calling.
- */
-int cxl_release_context(struct cxl_context *ctx);
-
-/*
- * Set and get private data associated with a context. Allows drivers to have a
- * back pointer to some useful structure.
- */
-int cxl_set_priv(struct cxl_context *ctx, void *priv);
-void *cxl_get_priv(struct cxl_context *ctx);
-
-/*
- * Allocate AFU interrupts for this context. num=0 will allocate the default
- * for this AFU as given in the AFU descriptor. This number doesn't include the
- * interrupt 0 (CAIA defines AFU IRQ 0 for page faults). Each interrupt to be
- * used must map a handler with cxl_map_afu_irq.
- */
-int cxl_allocate_afu_irqs(struct cxl_context *cxl, int num);
-/* Free allocated interrupts */
-void cxl_free_afu_irqs(struct cxl_context *cxl);
-
-/*
- * Map a handler for an AFU interrupt associated with a particular context. AFU
- * IRQS numbers start from 1 (CAIA defines AFU IRQ 0 for page faults). cookie
- * is private data is that will be provided to the interrupt handler.
- */
-int cxl_map_afu_irq(struct cxl_context *cxl, int num,
- irq_handler_t handler, void *cookie, char *name);
-/* unmap mapped IRQ handlers */
-void cxl_unmap_afu_irq(struct cxl_context *cxl, int num, void *cookie);
-
-/*
- * Start work on the AFU. This starts an cxl context and associates it with a
- * task. task == NULL will make it a kernel context.
- */
-int cxl_start_context(struct cxl_context *ctx, u64 wed,
- struct task_struct *task);
-/*
- * Stop a context and remove it from the PSL
- */
-int cxl_stop_context(struct cxl_context *ctx);
-
-/* Reset the AFU */
-int cxl_afu_reset(struct cxl_context *ctx);
-
-/*
- * Set a context as a master context.
- * This sets the default problem space area mapped as the full space, rather
- * than just the per context area (for slaves).
- */
-void cxl_set_master(struct cxl_context *ctx);
-
-/*
- * Map and unmap the AFU Problem Space area. The amount and location mapped
- * depends on if this context is a master or slave.
- */
-void __iomem *cxl_psa_map(struct cxl_context *ctx);
-void cxl_psa_unmap(void __iomem *addr);
-
-/* Get the process element for this context */
-int cxl_process_element(struct cxl_context *ctx);
-
-/*
- * These calls allow drivers to create their own file descriptors and make them
- * identical to the cxl file descriptor user API. An example use case:
- *
- * struct file_operations cxl_my_fops = {};
- * ......
- * // Init the context
- * ctx = cxl_dev_context_init(dev);
- * if (IS_ERR(ctx))
- * return PTR_ERR(ctx);
- * // Create and attach a new file descriptor to my file ops
- * file = cxl_get_fd(ctx, &cxl_my_fops, &fd);
- * // Start context
- * rc = cxl_start_work(ctx, &work.work);
- * if (rc) {
- * fput(file);
- * put_unused_fd(fd);
- * return -ENODEV;
- * }
- * // No error paths after installing the fd
- * fd_install(fd, file);
- * return fd;
- *
- * This inits a context, and gets a file descriptor and associates some file
- * ops to that file descriptor. If the file ops are blank, the cxl driver will
- * fill them in with the default ones that mimic the standard user API. Once
- * completed, the file descriptor can be installed. Once the file descriptor is
- * installed, it's visible to the user so no errors must occur past this point.
- *
- * If cxl_fd_release() file op call is installed, the context will be stopped
- * and released when the fd is released. Hence the driver won't need to manage
- * this itself.
- */
-
-/*
- * Take a context and associate it with my file ops. Returns the associated
- * file and file descriptor. Any file ops which are blank are filled in by the
- * cxl driver with the default ops to mimic the standard API.
- */
-struct file *cxl_get_fd(struct cxl_context *ctx, struct file_operations *fops,
- int *fd);
-/* Get the context associated with this file */
-struct cxl_context *cxl_fops_get_context(struct file *file);
-/*
- * Start a context associated a struct cxl_ioctl_start_work used by the
- * standard cxl user API.
- */
-int cxl_start_work(struct cxl_context *ctx,
- struct cxl_ioctl_start_work *work);
-/*
- * Export all the existing fops so drivers can use them
- */
-int cxl_fd_open(struct inode *inode, struct file *file);
-int cxl_fd_release(struct inode *inode, struct file *file);
-long cxl_fd_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
-int cxl_fd_mmap(struct file *file, struct vm_area_struct *vm);
-__poll_t cxl_fd_poll(struct file *file, struct poll_table_struct *poll);
-ssize_t cxl_fd_read(struct file *file, char __user *buf, size_t count,
- loff_t *off);
-
-/*
- * For EEH, a driver may want to assert a PERST will reload the same image
- * from flash into the FPGA.
- *
- * This is a property of the entire adapter, not a single AFU, so drivers
- * should set this property with care!
- */
-void cxl_perst_reloads_same_image(struct cxl_afu *afu,
- bool perst_reloads_same_image);
-
-/*
- * Read the VPD for the card where the AFU resides
- */
-ssize_t cxl_read_adapter_vpd(struct pci_dev *dev, void *buf, size_t count);
-
-/*
- * AFU driver ops allow an AFU driver to create their own events to pass to
- * userspace through the file descriptor as a simpler alternative to overriding
- * the read() and poll() calls that works with the generic cxl events. These
- * events are given priority over the generic cxl events, so they will be
- * delivered first if multiple types of events are pending.
- *
- * The AFU driver must call cxl_context_events_pending() to notify the cxl
- * driver that new events are ready to be delivered for a specific context.
- * cxl_context_events_pending() will adjust the current count of AFU driver
- * events for this context, and wake up anyone waiting on the context wait
- * queue.
- *
- * The cxl driver will then call fetch_event() to get a structure defining
- * the size and address of the driver specific event data. The cxl driver
- * will build a cxl header with type and process_element fields filled in,
- * and header.size set to sizeof(struct cxl_event_header) + data_size.
- * The total size of the event is limited to CXL_READ_MIN_SIZE (4K).
- *
- * fetch_event() is called with a spin lock held, so it must not sleep.
- *
- * The cxl driver will then deliver the event to userspace, and finally
- * call event_delivered() to return the status of the operation, identified
- * by cxl context and AFU driver event data pointers.
- * 0 Success
- * -EFAULT copy_to_user() has failed
- * -EINVAL Event data pointer is NULL, or event size is greater than
- * CXL_READ_MIN_SIZE.
- */
-struct cxl_afu_driver_ops {
- struct cxl_event_afu_driver_reserved *(*fetch_event) (
- struct cxl_context *ctx);
- void (*event_delivered) (struct cxl_context *ctx,
- struct cxl_event_afu_driver_reserved *event,
- int rc);
-};
-
-/*
- * Associate the above driver ops with a specific context.
- * Reset the current count of AFU driver events.
- */
-void cxl_set_driver_ops(struct cxl_context *ctx,
- struct cxl_afu_driver_ops *ops);
-
-/* Notify cxl driver that new events are ready to be delivered for context */
-void cxl_context_events_pending(struct cxl_context *ctx,
- unsigned int new_events);
-
-#endif /* _MISC_CXL_H */
diff --git a/include/misc/cxllib.h b/include/misc/cxllib.h
deleted file mode 100644
index eacc417288fc..000000000000
--- a/include/misc/cxllib.h
+++ /dev/null
@@ -1,129 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright 2017 IBM Corp.
- */
-
-#ifndef _MISC_CXLLIB_H
-#define _MISC_CXLLIB_H
-
-#include <linux/pci.h>
-#include <asm/reg.h>
-
-/*
- * cxl driver exports a in-kernel 'library' API which can be called by
- * other drivers to help interacting with an IBM XSL.
- */
-
-/*
- * tells whether capi is supported on the PCIe slot where the
- * device is seated
- *
- * Input:
- * dev: device whose slot needs to be checked
- * flags: 0 for the time being
- */
-bool cxllib_slot_is_supported(struct pci_dev *dev, unsigned long flags);
-
-
-/*
- * Returns the configuration parameters to be used by the XSL or device
- *
- * Input:
- * dev: device, used to find PHB
- * Output:
- * struct cxllib_xsl_config:
- * version
- * capi BAR address, i.e. 0x2000000000000-0x2FFFFFFFFFFFF
- * capi BAR size
- * data send control (XSL_DSNCTL)
- * dummy read address (XSL_DRA)
- */
-#define CXL_XSL_CONFIG_VERSION1 1
-struct cxllib_xsl_config {
- u32 version; /* format version for register encoding */
- u32 log_bar_size;/* log size of the capi_window */
- u64 bar_addr; /* address of the start of capi window */
- u64 dsnctl; /* matches definition of XSL_DSNCTL */
- u64 dra; /* real address that can be used for dummy read */
-};
-
-int cxllib_get_xsl_config(struct pci_dev *dev, struct cxllib_xsl_config *cfg);
-
-
-/*
- * Activate capi for the pci host bridge associated with the device.
- * Can be extended to deactivate once we know how to do it.
- * Device must be ready to accept messages from the CAPP unit and
- * respond accordingly (TLB invalidates, ...)
- *
- * PHB is switched to capi mode through calls to skiboot.
- * CAPP snooping is activated
- *
- * Input:
- * dev: device whose PHB should switch mode
- * mode: mode to switch to i.e. CAPI or PCI
- * flags: options related to the mode
- */
-enum cxllib_mode {
- CXL_MODE_CXL,
- CXL_MODE_PCI,
-};
-
-#define CXL_MODE_NO_DMA 0
-#define CXL_MODE_DMA_TVT0 1
-#define CXL_MODE_DMA_TVT1 2
-
-int cxllib_switch_phb_mode(struct pci_dev *dev, enum cxllib_mode mode,
- unsigned long flags);
-
-
-/*
- * Set the device for capi DMA.
- * Define its dma_ops and dma offset so that allocations will be using TVT#1
- *
- * Input:
- * dev: device to set
- * flags: options. CXL_MODE_DMA_TVT1 should be used
- */
-int cxllib_set_device_dma(struct pci_dev *dev, unsigned long flags);
-
-
-/*
- * Get the Process Element structure for the given thread
- *
- * Input:
- * task: task_struct for the context of the translation
- * translation_mode: whether addresses should be translated
- * Output:
- * attr: attributes to fill up the Process Element structure from CAIA
- */
-struct cxllib_pe_attributes {
- u64 sr;
- u32 lpid;
- u32 tid;
- u32 pid;
-};
-#define CXL_TRANSLATED_MODE 0
-#define CXL_REAL_MODE 1
-
-int cxllib_get_PE_attributes(struct task_struct *task,
- unsigned long translation_mode, struct cxllib_pe_attributes *attr);
-
-
-/*
- * Handle memory fault.
- * Fault in all the pages of the specified buffer for the permissions
- * provided in ‘flags’
- *
- * Shouldn't be called from interrupt context
- *
- * Input:
- * mm: struct mm for the thread faulting the pages
- * addr: base address of the buffer to page in
- * size: size of the buffer to page in
- * flags: permission requested (DSISR_ISSTORE...)
- */
-int cxllib_handle_fault(struct mm_struct *mm, u64 addr, u64 size, u64 flags);
-
-
-#endif /* _MISC_CXLLIB_H */
diff --git a/include/net/snmp.h b/include/net/snmp.h
index 468a67836e2f..4cb4326dfebe 100644
--- a/include/net/snmp.h
+++ b/include/net/snmp.h
@@ -159,7 +159,7 @@ struct linux_tls_mib {
#define __SNMP_ADD_STATS64(mib, field, addend) \
do { \
- __typeof__(*mib) *ptr = raw_cpu_ptr(mib); \
+ TYPEOF_UNQUAL(*mib) *ptr = raw_cpu_ptr(mib); \
u64_stats_update_begin(&ptr->syncp); \
ptr->mibs[field] += addend; \
u64_stats_update_end(&ptr->syncp); \
@@ -176,8 +176,7 @@ struct linux_tls_mib {
#define SNMP_INC_STATS64(mib, field) SNMP_ADD_STATS64(mib, field, 1)
#define __SNMP_UPD_PO_STATS64(mib, basefield, addend) \
do { \
- __typeof__(*mib) *ptr; \
- ptr = raw_cpu_ptr((mib)); \
+ TYPEOF_UNQUAL(*mib) *ptr = raw_cpu_ptr(mib); \
u64_stats_update_begin(&ptr->syncp); \
ptr->mibs[basefield##PKTS]++; \
ptr->mibs[basefield##OCTETS] += addend; \
diff --git a/include/net/tcp.h b/include/net/tcp.h
index df04dc09c519..4450c384ef17 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -779,7 +779,6 @@ static inline int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize)
/* tcp.c */
void tcp_get_info(struct sock *, struct tcp_info *);
-void tcp_sock_rfree(struct sk_buff *skb);
/* Read 'sendfile()'-style from a TCP socket */
int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
@@ -2899,18 +2898,4 @@ enum skb_drop_reason tcp_inbound_hash(struct sock *sk,
const void *saddr, const void *daddr,
int family, int dif, int sdif);
-/* version of skb_set_owner_r() avoiding one atomic_add() */
-static inline void tcp_skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
-{
- skb_orphan(skb);
- skb->sk = sk;
- skb->destructor = tcp_sock_rfree;
-
- sock_owned_by_me(sk);
- atomic_set(&sk->sk_rmem_alloc,
- atomic_read(&sk->sk_rmem_alloc) + skb->truesize);
-
- sk_forward_alloc_add(sk, -skb->truesize);
-}
-
#endif /* _TCP_H */
diff --git a/include/sound/hda-sdw-bpt.h b/include/sound/hda-sdw-bpt.h
new file mode 100644
index 000000000000..f649549b75d5
--- /dev/null
+++ b/include/sound/hda-sdw-bpt.h
@@ -0,0 +1,69 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * Copyright(c) 2025 Intel Corporation.
+ */
+
+#ifndef __HDA_SDW_BPT_H
+#define __HDA_SDW_BPT_H
+
+#include <linux/device.h>
+
+struct hdac_ext_stream;
+struct snd_dma_buffer;
+
+#if IS_ENABLED(CONFIG_SND_SOF_SOF_HDA_SDW_BPT)
+int hda_sdw_bpt_open(struct device *dev, int link_id, struct hdac_ext_stream **bpt_tx_stream,
+ struct snd_dma_buffer *dmab_tx_bdl, u32 bpt_tx_num_bytes,
+ u32 tx_dma_bandwidth, struct hdac_ext_stream **bpt_rx_stream,
+ struct snd_dma_buffer *dmab_rx_bdl, u32 bpt_rx_num_bytes,
+ u32 rx_dma_bandwidth);
+
+int hda_sdw_bpt_send_async(struct device *dev, struct hdac_ext_stream *bpt_tx_stream,
+ struct hdac_ext_stream *bpt_rx_stream);
+
+int hda_sdw_bpt_wait(struct device *dev, struct hdac_ext_stream *bpt_tx_stream,
+ struct hdac_ext_stream *bpt_rx_stream);
+
+int hda_sdw_bpt_close(struct device *dev, struct hdac_ext_stream *bpt_tx_stream,
+ struct snd_dma_buffer *dmab_tx_bdl, struct hdac_ext_stream *bpt_rx_stream,
+ struct snd_dma_buffer *dmab_rx_bdl);
+#else
+static inline int hda_sdw_bpt_open(struct device *dev, int link_id,
+ struct hdac_ext_stream **bpt_tx_stream,
+ struct snd_dma_buffer *dmab_tx_bdl, u32 bpt_tx_num_bytes,
+ u32 tx_dma_bandwidth, struct hdac_ext_stream **bpt_rx_stream,
+ struct snd_dma_buffer *dmab_rx_bdl, u32 bpt_rx_num_bytes,
+ u32 rx_dma_bandwidth)
+{
+ WARN_ONCE(1, "SoundWire BPT is disabled");
+ return -EOPNOTSUPP;
+}
+
+static inline int hda_sdw_bpt_send_async(struct device *dev, struct hdac_ext_stream *bpt_tx_stream,
+ struct hdac_ext_stream *bpt_rx_stream)
+{
+ WARN_ONCE(1, "SoundWire BPT is disabled");
+ return -EOPNOTSUPP;
+}
+
+static inline int hda_sdw_bpt_wait(struct device *dev, struct hdac_ext_stream *bpt_tx_stream,
+ struct hdac_ext_stream *bpt_rx_stream)
+{
+ WARN_ONCE(1, "SoundWire BPT is disabled");
+ return -EOPNOTSUPP;
+}
+
+static inline int hda_sdw_bpt_close(struct device *dev, struct hdac_ext_stream *bpt_tx_stream,
+ struct snd_dma_buffer *dmab_tx_bdl,
+ struct hdac_ext_stream *bpt_rx_stream,
+ struct snd_dma_buffer *dmab_rx_bdl)
+{
+ WARN_ONCE(1, "SoundWire BPT is disabled");
+ return -EOPNOTSUPP;
+}
+#endif
+
+#endif /* __HDA_SDW_BPT_H */
diff --git a/include/trace/events/kmem.h b/include/trace/events/kmem.h
index b37eb0a7060f..f74925a6cf69 100644
--- a/include/trace/events/kmem.h
+++ b/include/trace/events/kmem.h
@@ -342,6 +342,84 @@ TRACE_EVENT(mm_alloc_contig_migrate_range_info,
__entry->nr_mapped)
);
+TRACE_EVENT(mm_setup_per_zone_wmarks,
+
+ TP_PROTO(struct zone *zone),
+
+ TP_ARGS(zone),
+
+ TP_STRUCT__entry(
+ __field(int, node_id)
+ __string(name, zone->name)
+ __field(unsigned long, watermark_min)
+ __field(unsigned long, watermark_low)
+ __field(unsigned long, watermark_high)
+ __field(unsigned long, watermark_promo)
+ ),
+
+ TP_fast_assign(
+ __entry->node_id = zone->zone_pgdat->node_id;
+ __assign_str(name);
+ __entry->watermark_min = zone->_watermark[WMARK_MIN];
+ __entry->watermark_low = zone->_watermark[WMARK_LOW];
+ __entry->watermark_high = zone->_watermark[WMARK_HIGH];
+ __entry->watermark_promo = zone->_watermark[WMARK_PROMO];
+ ),
+
+ TP_printk("node_id=%d zone name=%s watermark min=%lu low=%lu high=%lu promo=%lu",
+ __entry->node_id,
+ __get_str(name),
+ __entry->watermark_min,
+ __entry->watermark_low,
+ __entry->watermark_high,
+ __entry->watermark_promo)
+);
+
+TRACE_EVENT(mm_setup_per_zone_lowmem_reserve,
+
+ TP_PROTO(struct zone *zone, struct zone *upper_zone, long lowmem_reserve),
+
+ TP_ARGS(zone, upper_zone, lowmem_reserve),
+
+ TP_STRUCT__entry(
+ __field(int, node_id)
+ __string(name, zone->name)
+ __string(upper_name, upper_zone->name)
+ __field(long, lowmem_reserve)
+ ),
+
+ TP_fast_assign(
+ __entry->node_id = zone->zone_pgdat->node_id;
+ __assign_str(name);
+ __assign_str(upper_name);
+ __entry->lowmem_reserve = lowmem_reserve;
+ ),
+
+ TP_printk("node_id=%d zone name=%s upper_zone name=%s lowmem_reserve_pages=%ld",
+ __entry->node_id,
+ __get_str(name),
+ __get_str(upper_name),
+ __entry->lowmem_reserve)
+);
+
+TRACE_EVENT(mm_calculate_totalreserve_pages,
+
+ TP_PROTO(unsigned long totalreserve_pages),
+
+ TP_ARGS(totalreserve_pages),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, totalreserve_pages)
+ ),
+
+ TP_fast_assign(
+ __entry->totalreserve_pages = totalreserve_pages;
+ ),
+
+ TP_printk("totalreserve_pages=%lu", __entry->totalreserve_pages)
+);
+
+
/*
* Required for uniquely and securely identifying mm in rss_stat tracepoint.
*/
diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h
index 851841336ee6..5d331383047b 100644
--- a/include/trace/events/sunrpc.h
+++ b/include/trace/events/sunrpc.h
@@ -343,6 +343,7 @@ TRACE_EVENT(rpc_request,
{ RPC_TASK_MOVEABLE, "MOVEABLE" }, \
{ RPC_TASK_NULLCREDS, "NULLCREDS" }, \
{ RPC_CALL_MAJORSEEN, "MAJORSEEN" }, \
+ { RPC_TASK_NETUNREACH_FATAL, "NETUNREACH_FATAL"}, \
{ RPC_TASK_DYNAMIC, "DYNAMIC" }, \
{ RPC_TASK_NO_ROUND_ROBIN, "NO_ROUND_ROBIN" }, \
{ RPC_TASK_SOFT, "SOFT" }, \
diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h
index a261e86e61fa..0ff388131fc9 100644
--- a/include/trace/events/writeback.h
+++ b/include/trace/events/writeback.h
@@ -629,11 +629,7 @@ TRACE_EVENT(bdi_dirty_ratelimit,
TRACE_EVENT(balance_dirty_pages,
TP_PROTO(struct bdi_writeback *wb,
- unsigned long thresh,
- unsigned long bg_thresh,
- unsigned long dirty,
- unsigned long bdi_thresh,
- unsigned long bdi_dirty,
+ struct dirty_throttle_control *dtc,
unsigned long dirty_ratelimit,
unsigned long task_ratelimit,
unsigned long dirtied,
@@ -641,7 +637,7 @@ TRACE_EVENT(balance_dirty_pages,
long pause,
unsigned long start_time),
- TP_ARGS(wb, thresh, bg_thresh, dirty, bdi_thresh, bdi_dirty,
+ TP_ARGS(wb, dtc,
dirty_ratelimit, task_ratelimit,
dirtied, period, pause, start_time),
@@ -650,8 +646,8 @@ TRACE_EVENT(balance_dirty_pages,
__field(unsigned long, limit)
__field(unsigned long, setpoint)
__field(unsigned long, dirty)
- __field(unsigned long, bdi_setpoint)
- __field(unsigned long, bdi_dirty)
+ __field(unsigned long, wb_setpoint)
+ __field(unsigned long, wb_dirty)
__field(unsigned long, dirty_ratelimit)
__field(unsigned long, task_ratelimit)
__field(unsigned int, dirtied)
@@ -664,16 +660,15 @@ TRACE_EVENT(balance_dirty_pages,
),
TP_fast_assign(
- unsigned long freerun = (thresh + bg_thresh) / 2;
+ unsigned long freerun = (dtc->thresh + dtc->bg_thresh) / 2;
strscpy_pad(__entry->bdi, bdi_dev_name(wb->bdi), 32);
- __entry->limit = global_wb_domain.dirty_limit;
- __entry->setpoint = (global_wb_domain.dirty_limit +
- freerun) / 2;
- __entry->dirty = dirty;
- __entry->bdi_setpoint = __entry->setpoint *
- bdi_thresh / (thresh + 1);
- __entry->bdi_dirty = bdi_dirty;
+ __entry->limit = dtc->limit;
+ __entry->setpoint = (dtc->limit + freerun) / 2;
+ __entry->dirty = dtc->dirty;
+ __entry->wb_setpoint = __entry->setpoint *
+ dtc->wb_thresh / (dtc->thresh + 1);
+ __entry->wb_dirty = dtc->wb_dirty;
__entry->dirty_ratelimit = KBps(dirty_ratelimit);
__entry->task_ratelimit = KBps(task_ratelimit);
__entry->dirtied = dirtied;
@@ -689,7 +684,7 @@ TRACE_EVENT(balance_dirty_pages,
TP_printk("bdi %s: "
"limit=%lu setpoint=%lu dirty=%lu "
- "bdi_setpoint=%lu bdi_dirty=%lu "
+ "wb_setpoint=%lu wb_dirty=%lu "
"dirty_ratelimit=%lu task_ratelimit=%lu "
"dirtied=%u dirtied_pause=%u "
"paused=%lu pause=%ld period=%lu think=%ld cgroup_ino=%lu",
@@ -697,8 +692,8 @@ TRACE_EVENT(balance_dirty_pages,
__entry->limit,
__entry->setpoint,
__entry->dirty,
- __entry->bdi_setpoint,
- __entry->bdi_dirty,
+ __entry->wb_setpoint,
+ __entry->wb_dirty,
__entry->dirty_ratelimit,
__entry->task_ratelimit,
__entry->dirtied,
diff --git a/include/uapi/linux/capability.h b/include/uapi/linux/capability.h
index 5bb906098697..2e21b5594f81 100644
--- a/include/uapi/linux/capability.h
+++ b/include/uapi/linux/capability.h
@@ -275,6 +275,7 @@ struct vfs_ns_cap_data {
/* Allow setting encryption key on loopback filesystem */
/* Allow setting zone reclaim policy */
/* Allow everything under CAP_BPF and CAP_PERFMON for backward compatibility */
+/* Allow setting hardware protection emergency action */
#define CAP_SYS_ADMIN 21
diff --git a/include/uapi/linux/counter.h b/include/uapi/linux/counter.h
index 008a691c254b..350b45d616bb 100644
--- a/include/uapi/linux/counter.h
+++ b/include/uapi/linux/counter.h
@@ -65,6 +65,8 @@ enum counter_event_type {
COUNTER_EVENT_CHANGE_OF_STATE,
/* Count value captured */
COUNTER_EVENT_CAPTURE,
+ /* Direction change detected */
+ COUNTER_EVENT_DIRECTION_CHANGE,
};
/**
diff --git a/include/uapi/linux/counter/microchip-tcb-capture.h b/include/uapi/linux/counter/microchip-tcb-capture.h
new file mode 100644
index 000000000000..136e2faa7730
--- /dev/null
+++ b/include/uapi/linux/counter/microchip-tcb-capture.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Channel numbers used by the microchip-tcb-capture driver
+ * Copyright (C) 2025 Bence Csókás
+ */
+#ifndef _UAPI_COUNTER_MCHP_TCB_H_
+#define _UAPI_COUNTER_MCHP_TCB_H_
+
+/*
+ * The driver defines the following components:
+ *
+ * Count 0
+ * \__ Synapse 0 -- Signal 0 (Channel A, i.e. TIOA)
+ * \__ Synapse 1 -- Signal 1 (Channel B, i.e. TIOB)
+ * \__ Extension capture0 (RA register)
+ * \__ Extension capture1 (RB register)
+ *
+ * It also supports the following events:
+ *
+ * Channel 0:
+ * - CV register changed
+ * - CV overflowed
+ * - RA captured
+ * Channel 1:
+ * - RB captured
+ * Channel 2:
+ * - RC compare triggered
+ */
+
+/* Capture extensions */
+#define COUNTER_MCHP_EXCAP_RA 0
+#define COUNTER_MCHP_EXCAP_RB 1
+
+/* Event channels */
+#define COUNTER_MCHP_EVCHN_CV 0
+#define COUNTER_MCHP_EVCHN_RA 0
+#define COUNTER_MCHP_EVCHN_RB 1
+#define COUNTER_MCHP_EVCHN_RC 2
+
+#endif /* _UAPI_COUNTER_MCHP_TCB_H_ */
diff --git a/include/uapi/linux/fuse.h b/include/uapi/linux/fuse.h
index 5e0eb41d967e..5ec43ecbceb7 100644
--- a/include/uapi/linux/fuse.h
+++ b/include/uapi/linux/fuse.h
@@ -229,6 +229,9 @@
* - FUSE_URING_IN_OUT_HEADER_SZ
* - FUSE_URING_OP_IN_OUT_SZ
* - enum fuse_uring_cmd
+ *
+ * 7.43
+ * - add FUSE_REQUEST_TIMEOUT
*/
#ifndef _LINUX_FUSE_H
@@ -264,7 +267,7 @@
#define FUSE_KERNEL_VERSION 7
/** Minor version number of this interface */
-#define FUSE_KERNEL_MINOR_VERSION 42
+#define FUSE_KERNEL_MINOR_VERSION 43
/** The node ID of the root inode */
#define FUSE_ROOT_ID 1
@@ -435,6 +438,8 @@ struct fuse_file_lock {
* of the request ID indicates resend requests
* FUSE_ALLOW_IDMAP: allow creation of idmapped mounts
* FUSE_OVER_IO_URING: Indicate that client supports io-uring
+ * FUSE_REQUEST_TIMEOUT: kernel supports timing out requests.
+ * init_out.request_timeout contains the timeout (in secs)
*/
#define FUSE_ASYNC_READ (1 << 0)
#define FUSE_POSIX_LOCKS (1 << 1)
@@ -477,11 +482,11 @@ struct fuse_file_lock {
#define FUSE_PASSTHROUGH (1ULL << 37)
#define FUSE_NO_EXPORT_SUPPORT (1ULL << 38)
#define FUSE_HAS_RESEND (1ULL << 39)
-
/* Obsolete alias for FUSE_DIRECT_IO_ALLOW_MMAP */
#define FUSE_DIRECT_IO_RELAX FUSE_DIRECT_IO_ALLOW_MMAP
#define FUSE_ALLOW_IDMAP (1ULL << 40)
#define FUSE_OVER_IO_URING (1ULL << 41)
+#define FUSE_REQUEST_TIMEOUT (1ULL << 42)
/**
* CUSE INIT request/reply flags
@@ -909,7 +914,8 @@ struct fuse_init_out {
uint16_t map_alignment;
uint32_t flags2;
uint32_t max_stack_depth;
- uint32_t unused[6];
+ uint16_t request_timeout;
+ uint16_t unused[11];
};
#define CUSE_INIT_INFO_MAX 4096
diff --git a/include/uapi/linux/iio/types.h b/include/uapi/linux/iio/types.h
index 12886d4465e4..3eb0821af7a4 100644
--- a/include/uapi/linux/iio/types.h
+++ b/include/uapi/linux/iio/types.h
@@ -119,6 +119,7 @@ enum iio_event_type {
IIO_EV_TYPE_CHANGE,
IIO_EV_TYPE_MAG_REFERENCED,
IIO_EV_TYPE_GESTURE,
+ IIO_EV_TYPE_FAULT,
};
enum iio_event_direction {
@@ -128,6 +129,7 @@ enum iio_event_direction {
IIO_EV_DIR_NONE,
IIO_EV_DIR_SINGLETAP,
IIO_EV_DIR_DOUBLETAP,
+ IIO_EV_DIR_FAULT_OPENWIRE,
};
#endif /* _UAPI_IIO_TYPES_H_ */
diff --git a/include/uapi/linux/iommufd.h b/include/uapi/linux/iommufd.h
index 78747b24bd0f..f29b6c44655e 100644
--- a/include/uapi/linux/iommufd.h
+++ b/include/uapi/linux/iommufd.h
@@ -55,6 +55,7 @@ enum {
IOMMUFD_CMD_VIOMMU_ALLOC = 0x90,
IOMMUFD_CMD_VDEVICE_ALLOC = 0x91,
IOMMUFD_CMD_IOAS_CHANGE_PROCESS = 0x92,
+ IOMMUFD_CMD_VEVENTQ_ALLOC = 0x93,
};
/**
@@ -392,6 +393,9 @@ struct iommu_vfio_ioas {
* Any domain attached to the non-PASID part of the
* device must also be flagged, otherwise attaching a
* PASID will blocked.
+ * For the user that wants to attach PASID, ioas is
+ * not recommended for both the non-PASID part
+ * and PASID part of the device.
* If IOMMU does not support PASID it will return
* error (-EOPNOTSUPP).
*/
@@ -608,9 +612,17 @@ enum iommu_hw_info_type {
* IOMMU_HWPT_GET_DIRTY_BITMAP
* IOMMU_HWPT_SET_DIRTY_TRACKING
*
+ * @IOMMU_HW_CAP_PCI_PASID_EXEC: Execute Permission Supported, user ignores it
+ * when the struct
+ * iommu_hw_info::out_max_pasid_log2 is zero.
+ * @IOMMU_HW_CAP_PCI_PASID_PRIV: Privileged Mode Supported, user ignores it
+ * when the struct
+ * iommu_hw_info::out_max_pasid_log2 is zero.
*/
enum iommufd_hw_capabilities {
IOMMU_HW_CAP_DIRTY_TRACKING = 1 << 0,
+ IOMMU_HW_CAP_PCI_PASID_EXEC = 1 << 1,
+ IOMMU_HW_CAP_PCI_PASID_PRIV = 1 << 2,
};
/**
@@ -626,6 +638,9 @@ enum iommufd_hw_capabilities {
* iommu_hw_info_type.
* @out_capabilities: Output the generic iommu capability info type as defined
* in the enum iommu_hw_capabilities.
+ * @out_max_pasid_log2: Output the width of PASIDs. 0 means no PASID support.
+ * PCI devices turn to out_capabilities to check if the
+ * specific capabilities is supported or not.
* @__reserved: Must be 0
*
* Query an iommu type specific hardware information data from an iommu behind
@@ -649,7 +664,8 @@ struct iommu_hw_info {
__u32 data_len;
__aligned_u64 data_uptr;
__u32 out_data_type;
- __u32 __reserved;
+ __u8 out_max_pasid_log2;
+ __u8 __reserved[3];
__aligned_u64 out_capabilities;
};
#define IOMMU_GET_HW_INFO _IO(IOMMUFD_TYPE, IOMMUFD_CMD_GET_HW_INFO)
@@ -1014,4 +1030,115 @@ struct iommu_ioas_change_process {
#define IOMMU_IOAS_CHANGE_PROCESS \
_IO(IOMMUFD_TYPE, IOMMUFD_CMD_IOAS_CHANGE_PROCESS)
+/**
+ * enum iommu_veventq_flag - flag for struct iommufd_vevent_header
+ * @IOMMU_VEVENTQ_FLAG_LOST_EVENTS: vEVENTQ has lost vEVENTs
+ */
+enum iommu_veventq_flag {
+ IOMMU_VEVENTQ_FLAG_LOST_EVENTS = (1U << 0),
+};
+
+/**
+ * struct iommufd_vevent_header - Virtual Event Header for a vEVENTQ Status
+ * @flags: Combination of enum iommu_veventq_flag
+ * @sequence: The sequence index of a vEVENT in the vEVENTQ, with a range of
+ * [0, INT_MAX] where the following index of INT_MAX is 0
+ *
+ * Each iommufd_vevent_header reports a sequence index of the following vEVENT:
+ *
+ * +----------------------+-------+----------------------+-------+---+-------+
+ * | header0 {sequence=0} | data0 | header1 {sequence=1} | data1 |...| dataN |
+ * +----------------------+-------+----------------------+-------+---+-------+
+ *
+ * And this sequence index is expected to be monotonic to the sequence index of
+ * the previous vEVENT. If two adjacent sequence indexes has a delta larger than
+ * 1, it means that delta - 1 number of vEVENTs has lost, e.g. two lost vEVENTs:
+ *
+ * +-----+----------------------+-------+----------------------+-------+-----+
+ * | ... | header3 {sequence=3} | data3 | header6 {sequence=6} | data6 | ... |
+ * +-----+----------------------+-------+----------------------+-------+-----+
+ *
+ * If a vEVENT lost at the tail of the vEVENTQ and there is no following vEVENT
+ * providing the next sequence index, an IOMMU_VEVENTQ_FLAG_LOST_EVENTS header
+ * would be added to the tail, and no data would follow this header:
+ *
+ * +--+----------------------+-------+-----------------------------------------+
+ * |..| header3 {sequence=3} | data3 | header4 {flags=LOST_EVENTS, sequence=4} |
+ * +--+----------------------+-------+-----------------------------------------+
+ */
+struct iommufd_vevent_header {
+ __u32 flags;
+ __u32 sequence;
+};
+
+/**
+ * enum iommu_veventq_type - Virtual Event Queue Type
+ * @IOMMU_VEVENTQ_TYPE_DEFAULT: Reserved for future use
+ * @IOMMU_VEVENTQ_TYPE_ARM_SMMUV3: ARM SMMUv3 Virtual Event Queue
+ */
+enum iommu_veventq_type {
+ IOMMU_VEVENTQ_TYPE_DEFAULT = 0,
+ IOMMU_VEVENTQ_TYPE_ARM_SMMUV3 = 1,
+};
+
+/**
+ * struct iommu_vevent_arm_smmuv3 - ARM SMMUv3 Virtual Event
+ * (IOMMU_VEVENTQ_TYPE_ARM_SMMUV3)
+ * @evt: 256-bit ARM SMMUv3 Event record, little-endian.
+ * Reported event records: (Refer to "7.3 Event records" in SMMUv3 HW Spec)
+ * - 0x04 C_BAD_STE
+ * - 0x06 F_STREAM_DISABLED
+ * - 0x08 C_BAD_SUBSTREAMID
+ * - 0x0a C_BAD_CD
+ * - 0x10 F_TRANSLATION
+ * - 0x11 F_ADDR_SIZE
+ * - 0x12 F_ACCESS
+ * - 0x13 F_PERMISSION
+ *
+ * StreamID field reports a virtual device ID. To receive a virtual event for a
+ * device, a vDEVICE must be allocated via IOMMU_VDEVICE_ALLOC.
+ */
+struct iommu_vevent_arm_smmuv3 {
+ __aligned_le64 evt[4];
+};
+
+/**
+ * struct iommu_veventq_alloc - ioctl(IOMMU_VEVENTQ_ALLOC)
+ * @size: sizeof(struct iommu_veventq_alloc)
+ * @flags: Must be 0
+ * @viommu_id: virtual IOMMU ID to associate the vEVENTQ with
+ * @type: Type of the vEVENTQ. Must be defined in enum iommu_veventq_type
+ * @veventq_depth: Maximum number of events in the vEVENTQ
+ * @out_veventq_id: The ID of the new vEVENTQ
+ * @out_veventq_fd: The fd of the new vEVENTQ. User space must close the
+ * successfully returned fd after using it
+ * @__reserved: Must be 0
+ *
+ * Explicitly allocate a virtual event queue interface for a vIOMMU. A vIOMMU
+ * can have multiple FDs for different types, but is confined to one per @type.
+ * User space should open the @out_veventq_fd to read vEVENTs out of a vEVENTQ,
+ * if there are vEVENTs available. A vEVENTQ will lose events due to overflow,
+ * if the number of the vEVENTs hits @veventq_depth.
+ *
+ * Each vEVENT in a vEVENTQ encloses a struct iommufd_vevent_header followed by
+ * a type-specific data structure, in a normal case:
+ *
+ * +-+---------+-------+---------+-------+-----+---------+-------+-+
+ * | | header0 | data0 | header1 | data1 | ... | headerN | dataN | |
+ * +-+---------+-------+---------+-------+-----+---------+-------+-+
+ *
+ * unless a tailing IOMMU_VEVENTQ_FLAG_LOST_EVENTS header is logged (refer to
+ * struct iommufd_vevent_header).
+ */
+struct iommu_veventq_alloc {
+ __u32 size;
+ __u32 flags;
+ __u32 viommu_id;
+ __u32 type;
+ __u32 veventq_depth;
+ __u32 out_veventq_id;
+ __u32 out_veventq_fd;
+ __u32 __reserved;
+};
+#define IOMMU_VEVENTQ_ALLOC _IO(IOMMUFD_TYPE, IOMMUFD_CMD_VEVENTQ_ALLOC)
#endif
diff --git a/include/uapi/linux/ublk_cmd.h b/include/uapi/linux/ublk_cmd.h
index 7255b36b5cf6..583b86681c93 100644
--- a/include/uapi/linux/ublk_cmd.h
+++ b/include/uapi/linux/ublk_cmd.h
@@ -410,6 +410,29 @@ struct ublk_param_dma_align {
__u8 pad[4];
};
+#define UBLK_MIN_SEGMENT_SIZE 4096
+/*
+ * If any one of the three segment parameter is set as 0, the behavior is
+ * undefined.
+ */
+struct ublk_param_segment {
+ /*
+ * seg_boundary_mask + 1 needs to be power_of_2(), and the sum has
+ * to be >= UBLK_MIN_SEGMENT_SIZE(4096)
+ */
+ __u64 seg_boundary_mask;
+
+ /*
+ * max_segment_size could be override by virt_boundary_mask, so be
+ * careful when setting both.
+ *
+ * max_segment_size has to be >= UBLK_MIN_SEGMENT_SIZE(4096)
+ */
+ __u32 max_segment_size;
+ __u16 max_segments;
+ __u8 pad[2];
+};
+
struct ublk_params {
/*
* Total length of parameters, userspace has to set 'len' for both
@@ -423,6 +446,7 @@ struct ublk_params {
#define UBLK_PARAM_TYPE_DEVT (1 << 2)
#define UBLK_PARAM_TYPE_ZONED (1 << 3)
#define UBLK_PARAM_TYPE_DMA_ALIGN (1 << 4)
+#define UBLK_PARAM_TYPE_SEGMENT (1 << 5)
__u32 types; /* types of parameter included */
struct ublk_param_basic basic;
@@ -430,6 +454,7 @@ struct ublk_params {
struct ublk_param_devt devt;
struct ublk_param_zoned zoned;
struct ublk_param_dma_align dma;
+ struct ublk_param_segment seg;
};
#endif
diff --git a/include/uapi/linux/usb/ch9.h b/include/uapi/linux/usb/ch9.h
index 052290652046..8003243a4937 100644
--- a/include/uapi/linux/usb/ch9.h
+++ b/include/uapi/linux/usb/ch9.h
@@ -253,6 +253,9 @@ struct usb_ctrlrequest {
#define USB_DT_BOS 0x0f
#define USB_DT_DEVICE_CAPABILITY 0x10
#define USB_DT_WIRELESS_ENDPOINT_COMP 0x11
+/* From the eUSB2 spec */
+#define USB_DT_EUSB2_ISOC_ENDPOINT_COMP 0x12
+/* From Wireless USB spec */
#define USB_DT_WIRE_ADAPTER 0x21
/* From USB Device Firmware Upgrade Specification, Revision 1.1 */
#define USB_DT_DFU_FUNCTIONAL 0x21
@@ -676,6 +679,18 @@ static inline int usb_endpoint_interrupt_type(
/*-------------------------------------------------------------------------*/
+/* USB_DT_EUSB2_ISOC_ENDPOINT_COMP: eUSB2 Isoch Endpoint Companion descriptor */
+struct usb_eusb2_isoc_ep_comp_descriptor {
+ __u8 bLength;
+ __u8 bDescriptorType;
+ __le16 wMaxPacketSize;
+ __le32 dwBytesPerInterval;
+} __attribute__ ((packed));
+
+#define USB_DT_EUSB2_ISOC_EP_COMP_SIZE 8
+
+/*-------------------------------------------------------------------------*/
+
/* USB_DT_SSP_ISOC_ENDPOINT_COMP: SuperSpeedPlus Isochronous Endpoint Companion
* descriptor
*/
diff --git a/include/uapi/linux/vfio.h b/include/uapi/linux/vfio.h
index a2d3e1ac6239..5764f315137f 100644
--- a/include/uapi/linux/vfio.h
+++ b/include/uapi/linux/vfio.h
@@ -932,29 +932,34 @@ struct vfio_device_bind_iommufd {
* VFIO_DEVICE_ATTACH_IOMMUFD_PT - _IOW(VFIO_TYPE, VFIO_BASE + 19,
* struct vfio_device_attach_iommufd_pt)
* @argsz: User filled size of this data.
- * @flags: Must be 0.
+ * @flags: Flags for attach.
* @pt_id: Input the target id which can represent an ioas or a hwpt
* allocated via iommufd subsystem.
* Output the input ioas id or the attached hwpt id which could
* be the specified hwpt itself or a hwpt automatically created
* for the specified ioas by kernel during the attachment.
+ * @pasid: The pasid to be attached, only meaningful when
+ * VFIO_DEVICE_ATTACH_PASID is set in @flags
*
* Associate the device with an address space within the bound iommufd.
* Undo by VFIO_DEVICE_DETACH_IOMMUFD_PT or device fd close. This is only
* allowed on cdev fds.
*
- * If a vfio device is currently attached to a valid hw_pagetable, without doing
- * a VFIO_DEVICE_DETACH_IOMMUFD_PT, a second VFIO_DEVICE_ATTACH_IOMMUFD_PT ioctl
- * passing in another hw_pagetable (hwpt) id is allowed. This action, also known
- * as a hw_pagetable replacement, will replace the device's currently attached
- * hw_pagetable with a new hw_pagetable corresponding to the given pt_id.
+ * If a vfio device or a pasid of this device is currently attached to a valid
+ * hw_pagetable (hwpt), without doing a VFIO_DEVICE_DETACH_IOMMUFD_PT, a second
+ * VFIO_DEVICE_ATTACH_IOMMUFD_PT ioctl passing in another hwpt id is allowed.
+ * This action, also known as a hw_pagetable replacement, will replace the
+ * currently attached hwpt of the device or the pasid of this device with a new
+ * hwpt corresponding to the given pt_id.
*
* Return: 0 on success, -errno on failure.
*/
struct vfio_device_attach_iommufd_pt {
__u32 argsz;
__u32 flags;
+#define VFIO_DEVICE_ATTACH_PASID (1 << 0)
__u32 pt_id;
+ __u32 pasid;
};
#define VFIO_DEVICE_ATTACH_IOMMUFD_PT _IO(VFIO_TYPE, VFIO_BASE + 19)
@@ -963,17 +968,21 @@ struct vfio_device_attach_iommufd_pt {
* VFIO_DEVICE_DETACH_IOMMUFD_PT - _IOW(VFIO_TYPE, VFIO_BASE + 20,
* struct vfio_device_detach_iommufd_pt)
* @argsz: User filled size of this data.
- * @flags: Must be 0.
+ * @flags: Flags for detach.
+ * @pasid: The pasid to be detached, only meaningful when
+ * VFIO_DEVICE_DETACH_PASID is set in @flags
*
- * Remove the association of the device and its current associated address
- * space. After it, the device should be in a blocking DMA state. This is only
- * allowed on cdev fds.
+ * Remove the association of the device or a pasid of the device and its current
+ * associated address space. After it, the device or the pasid should be in a
+ * blocking DMA state. This is only allowed on cdev fds.
*
* Return: 0 on success, -errno on failure.
*/
struct vfio_device_detach_iommufd_pt {
__u32 argsz;
__u32 flags;
+#define VFIO_DEVICE_DETACH_PASID (1 << 0)
+ __u32 pasid;
};
#define VFIO_DEVICE_DETACH_IOMMUFD_PT _IO(VFIO_TYPE, VFIO_BASE + 20)
diff --git a/include/uapi/misc/cxl.h b/include/uapi/misc/cxl.h
deleted file mode 100644
index 56376d3907d8..000000000000
--- a/include/uapi/misc/cxl.h
+++ /dev/null
@@ -1,156 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
-/*
- * Copyright 2014 IBM Corp.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#ifndef _UAPI_MISC_CXL_H
-#define _UAPI_MISC_CXL_H
-
-#include <linux/types.h>
-#include <linux/ioctl.h>
-
-
-struct cxl_ioctl_start_work {
- __u64 flags;
- __u64 work_element_descriptor;
- __u64 amr;
- __s16 num_interrupts;
- __u16 tid;
- __s32 reserved1;
- __u64 reserved2;
- __u64 reserved3;
- __u64 reserved4;
- __u64 reserved5;
-};
-
-#define CXL_START_WORK_AMR 0x0000000000000001ULL
-#define CXL_START_WORK_NUM_IRQS 0x0000000000000002ULL
-#define CXL_START_WORK_ERR_FF 0x0000000000000004ULL
-#define CXL_START_WORK_TID 0x0000000000000008ULL
-#define CXL_START_WORK_ALL (CXL_START_WORK_AMR |\
- CXL_START_WORK_NUM_IRQS |\
- CXL_START_WORK_ERR_FF |\
- CXL_START_WORK_TID)
-
-
-/* Possible modes that an afu can be in */
-#define CXL_MODE_DEDICATED 0x1
-#define CXL_MODE_DIRECTED 0x2
-
-/* possible flags for the cxl_afu_id flags field */
-#define CXL_AFUID_FLAG_SLAVE 0x1 /* In directed-mode afu is in slave mode */
-
-struct cxl_afu_id {
- __u64 flags; /* One of CXL_AFUID_FLAG_X */
- __u32 card_id;
- __u32 afu_offset;
- __u32 afu_mode; /* one of the CXL_MODE_X */
- __u32 reserved1;
- __u64 reserved2;
- __u64 reserved3;
- __u64 reserved4;
- __u64 reserved5;
- __u64 reserved6;
-};
-
-/* base adapter image header is included in the image */
-#define CXL_AI_NEED_HEADER 0x0000000000000001ULL
-#define CXL_AI_ALL CXL_AI_NEED_HEADER
-
-#define CXL_AI_HEADER_SIZE 128
-#define CXL_AI_BUFFER_SIZE 4096
-#define CXL_AI_MAX_ENTRIES 256
-#define CXL_AI_MAX_CHUNK_SIZE (CXL_AI_BUFFER_SIZE * CXL_AI_MAX_ENTRIES)
-
-struct cxl_adapter_image {
- __u64 flags;
- __u64 data;
- __u64 len_data;
- __u64 len_image;
- __u64 reserved1;
- __u64 reserved2;
- __u64 reserved3;
- __u64 reserved4;
-};
-
-/* ioctl numbers */
-#define CXL_MAGIC 0xCA
-/* AFU devices */
-#define CXL_IOCTL_START_WORK _IOW(CXL_MAGIC, 0x00, struct cxl_ioctl_start_work)
-#define CXL_IOCTL_GET_PROCESS_ELEMENT _IOR(CXL_MAGIC, 0x01, __u32)
-#define CXL_IOCTL_GET_AFU_ID _IOR(CXL_MAGIC, 0x02, struct cxl_afu_id)
-/* adapter devices */
-#define CXL_IOCTL_DOWNLOAD_IMAGE _IOW(CXL_MAGIC, 0x0A, struct cxl_adapter_image)
-#define CXL_IOCTL_VALIDATE_IMAGE _IOW(CXL_MAGIC, 0x0B, struct cxl_adapter_image)
-
-#define CXL_READ_MIN_SIZE 0x1000 /* 4K */
-
-/* Events from read() */
-enum cxl_event_type {
- CXL_EVENT_RESERVED = 0,
- CXL_EVENT_AFU_INTERRUPT = 1,
- CXL_EVENT_DATA_STORAGE = 2,
- CXL_EVENT_AFU_ERROR = 3,
- CXL_EVENT_AFU_DRIVER = 4,
-};
-
-struct cxl_event_header {
- __u16 type;
- __u16 size;
- __u16 process_element;
- __u16 reserved1;
-};
-
-struct cxl_event_afu_interrupt {
- __u16 flags;
- __u16 irq; /* Raised AFU interrupt number */
- __u32 reserved1;
-};
-
-struct cxl_event_data_storage {
- __u16 flags;
- __u16 reserved1;
- __u32 reserved2;
- __u64 addr;
- __u64 dsisr;
- __u64 reserved3;
-};
-
-struct cxl_event_afu_error {
- __u16 flags;
- __u16 reserved1;
- __u32 reserved2;
- __u64 error;
-};
-
-struct cxl_event_afu_driver_reserved {
- /*
- * Defines the buffer passed to the cxl driver by the AFU driver.
- *
- * This is not ABI since the event header.size passed to the user for
- * existing events is set in the read call to sizeof(cxl_event_header)
- * + sizeof(whatever event is being dispatched) and the user is already
- * required to use a 4K buffer on the read call.
- *
- * Of course the contents will be ABI, but that's up the AFU driver.
- */
- __u32 data_size;
- __u8 data[];
-};
-
-struct cxl_event {
- struct cxl_event_header header;
- union {
- struct cxl_event_afu_interrupt irq;
- struct cxl_event_data_storage fault;
- struct cxl_event_afu_error afu_error;
- struct cxl_event_afu_driver_reserved afu_driver_event;
- };
-};
-
-#endif /* _UAPI_MISC_CXL_H */
diff --git a/init/Kconfig b/init/Kconfig
index 681f38ee68db..ede5a43029a9 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -132,6 +132,11 @@ config CC_HAS_COUNTED_BY
config CC_HAS_MULTIDIMENSIONAL_NONSTRING
def_bool $(success,echo 'char tag[][4] __attribute__((__nonstring__)) = { };' | $(CC) $(CLANG_FLAGS) -x c - -c -o /dev/null -Werror)
+config LD_CAN_USE_KEEP_IN_OVERLAY
+ # ld.lld prior to 21.0.0 did not support KEEP within an overlay description
+ # https://github.com/llvm/llvm-project/pull/130661
+ def_bool LD_IS_BFD || LLD_VERSION >= 210000
+
config RUSTC_HAS_COERCE_POINTEE
def_bool RUSTC_VERSION >= 108400
@@ -1888,6 +1893,28 @@ config ARCH_HAS_MEMBARRIER_CALLBACKS
config ARCH_HAS_MEMBARRIER_SYNC_CORE
bool
+config ARCH_SUPPORTS_MSEAL_SYSTEM_MAPPINGS
+ bool
+ help
+ Control MSEAL_SYSTEM_MAPPINGS access based on architecture.
+
+ A 64-bit kernel is required for the memory sealing feature.
+ No specific hardware features from the CPU are needed.
+
+ To enable this feature, the architecture needs to update their
+ special mappings calls to include the sealing flag and confirm
+ that it doesn't unmap/remap system mappings during the life
+ time of the process. The existence of this flag for an architecture
+ implies that it does not require the remapping of the system
+ mappings during process lifetime, so sealing these mappings is safe
+ from a kernel perspective.
+
+ After the architecture enables this, a distribution can set
+ CONFIG_MSEAL_SYSTEM_MAPPING to manage access to the feature.
+
+ For complete descriptions of memory sealing, please see
+ Documentation/userspace-api/mseal.rst
+
config HAVE_PERF_EVENTS
bool
help
diff --git a/io_uring/Kconfig b/io_uring/Kconfig
index 9e2a4beba1ef..4b949c42c0bf 100644
--- a/io_uring/Kconfig
+++ b/io_uring/Kconfig
@@ -5,6 +5,7 @@
config IO_URING_ZCRX
def_bool y
+ depends on IO_URING
depends on PAGE_POOL
depends on INET
depends on NET_RX_BUSY_POLL
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index 3ba49c628337..c6209fe44cb1 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -1141,10 +1141,9 @@ void tctx_task_work(struct callback_head *cb)
WARN_ON_ONCE(ret);
}
-static inline void io_req_local_work_add(struct io_kiocb *req,
- struct io_ring_ctx *ctx,
- unsigned flags)
+static void io_req_local_work_add(struct io_kiocb *req, unsigned flags)
{
+ struct io_ring_ctx *ctx = req->ctx;
unsigned nr_wait, nr_tw, nr_tw_prev;
struct llist_node *head;
@@ -1239,17 +1238,16 @@ static void io_req_normal_work_add(struct io_kiocb *req)
void __io_req_task_work_add(struct io_kiocb *req, unsigned flags)
{
if (req->ctx->flags & IORING_SETUP_DEFER_TASKRUN)
- io_req_local_work_add(req, req->ctx, flags);
+ io_req_local_work_add(req, flags);
else
io_req_normal_work_add(req);
}
-void io_req_task_work_add_remote(struct io_kiocb *req, struct io_ring_ctx *ctx,
- unsigned flags)
+void io_req_task_work_add_remote(struct io_kiocb *req, unsigned flags)
{
- if (WARN_ON_ONCE(!(ctx->flags & IORING_SETUP_DEFER_TASKRUN)))
+ if (WARN_ON_ONCE(!(req->ctx->flags & IORING_SETUP_DEFER_TASKRUN)))
return;
- io_req_local_work_add(req, ctx, flags);
+ __io_req_task_work_add(req, flags);
}
static void __cold io_move_task_work_from_local(struct io_ring_ctx *ctx)
@@ -1645,6 +1643,8 @@ io_req_flags_t io_file_get_flags(struct file *file)
{
io_req_flags_t res = 0;
+ BUILD_BUG_ON(REQ_F_ISREG_BIT != REQ_F_SUPPORT_NOWAIT_BIT + 1);
+
if (S_ISREG(file_inode(file)->i_mode))
res |= REQ_F_ISREG;
if ((file->f_flags & O_NONBLOCK) || (file->f_mode & FMODE_NOWAIT))
@@ -1796,7 +1796,7 @@ struct io_wq_work *io_wq_free_work(struct io_wq_work *work)
struct io_kiocb *req = container_of(work, struct io_kiocb, work);
struct io_kiocb *nxt = NULL;
- if (req_ref_put_and_test(req)) {
+ if (req_ref_put_and_test_atomic(req)) {
if (req->flags & IO_REQ_LINK_FLAGS)
nxt = io_req_find_next(req);
io_free_req(req);
diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h
index 87f883130286..e4050b2d0821 100644
--- a/io_uring/io_uring.h
+++ b/io_uring/io_uring.h
@@ -89,8 +89,7 @@ struct file *io_file_get_fixed(struct io_kiocb *req, int fd,
unsigned issue_flags);
void __io_req_task_work_add(struct io_kiocb *req, unsigned flags);
-void io_req_task_work_add_remote(struct io_kiocb *req, struct io_ring_ctx *ctx,
- unsigned flags);
+void io_req_task_work_add_remote(struct io_kiocb *req, unsigned flags);
void io_req_task_queue(struct io_kiocb *req);
void io_req_task_complete(struct io_kiocb *req, io_tw_token_t tw);
void io_req_task_queue_fail(struct io_kiocb *req, int ret);
diff --git a/io_uring/msg_ring.c b/io_uring/msg_ring.c
index 0bbcbbcdebfd..50a958e9c921 100644
--- a/io_uring/msg_ring.c
+++ b/io_uring/msg_ring.c
@@ -38,8 +38,8 @@ static void io_double_unlock_ctx(struct io_ring_ctx *octx)
mutex_unlock(&octx->uring_lock);
}
-static int io_double_lock_ctx(struct io_ring_ctx *octx,
- unsigned int issue_flags)
+static int io_lock_external_ctx(struct io_ring_ctx *octx,
+ unsigned int issue_flags)
{
/*
* To ensure proper ordering between the two ctxs, we can only
@@ -93,13 +93,14 @@ static int io_msg_remote_post(struct io_ring_ctx *ctx, struct io_kiocb *req,
kmem_cache_free(req_cachep, req);
return -EOWNERDEAD;
}
+ req->opcode = IORING_OP_NOP;
req->cqe.user_data = user_data;
io_req_set_res(req, res, cflags);
percpu_ref_get(&ctx->refs);
req->ctx = ctx;
req->tctx = NULL;
req->io_task_work.func = io_msg_tw_complete;
- io_req_task_work_add_remote(req, ctx, IOU_F_TWQ_LAZY_WAKE);
+ io_req_task_work_add_remote(req, IOU_F_TWQ_LAZY_WAKE);
return 0;
}
@@ -154,7 +155,7 @@ static int __io_msg_ring_data(struct io_ring_ctx *target_ctx,
ret = -EOVERFLOW;
if (target_ctx->flags & IORING_SETUP_IOPOLL) {
- if (unlikely(io_double_lock_ctx(target_ctx, issue_flags)))
+ if (unlikely(io_lock_external_ctx(target_ctx, issue_flags)))
return -EAGAIN;
}
if (io_post_aux_cqe(target_ctx, msg->user_data, msg->len, flags))
@@ -199,7 +200,7 @@ static int io_msg_install_complete(struct io_kiocb *req, unsigned int issue_flag
struct file *src_file = msg->src_file;
int ret;
- if (unlikely(io_double_lock_ctx(target_ctx, issue_flags)))
+ if (unlikely(io_lock_external_ctx(target_ctx, issue_flags)))
return -EAGAIN;
ret = __io_fixed_fd_install(target_ctx, src_file, msg->dst_fd);
diff --git a/io_uring/net.c b/io_uring/net.c
index 8944eb679024..24040bc3916a 100644
--- a/io_uring/net.c
+++ b/io_uring/net.c
@@ -97,6 +97,11 @@ struct io_recvzc {
struct io_zcrx_ifq *ifq;
};
+static int io_sg_from_iter_iovec(struct sk_buff *skb,
+ struct iov_iter *from, size_t length);
+static int io_sg_from_iter(struct sk_buff *skb,
+ struct iov_iter *from, size_t length);
+
int io_shutdown_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
struct io_shutdown *shutdown = io_kiocb_to_cmd(req, struct io_shutdown);
@@ -176,16 +181,6 @@ static struct io_async_msghdr *io_msg_alloc_async(struct io_kiocb *req)
return hdr;
}
-/* assign new iovec to kmsg, if we need to */
-static void io_net_vec_assign(struct io_kiocb *req, struct io_async_msghdr *kmsg,
- struct iovec *iov)
-{
- if (iov) {
- req->flags |= REQ_F_NEED_CLEANUP;
- io_vec_reset_iovec(&kmsg->vec, iov, kmsg->msg.msg_iter.nr_segs);
- }
-}
-
static inline void io_mshot_prep_retry(struct io_kiocb *req,
struct io_async_msghdr *kmsg)
{
@@ -217,7 +212,11 @@ static int io_net_import_vec(struct io_kiocb *req, struct io_async_msghdr *iomsg
&iomsg->msg.msg_iter, io_is_compat(req->ctx));
if (unlikely(ret < 0))
return ret;
- io_net_vec_assign(req, iomsg, iov);
+
+ if (iov) {
+ req->flags |= REQ_F_NEED_CLEANUP;
+ io_vec_reset_iovec(&iomsg->vec, iov, iomsg->msg.msg_iter.nr_segs);
+ }
return 0;
}
@@ -325,25 +324,6 @@ static int io_msg_copy_hdr(struct io_kiocb *req, struct io_async_msghdr *iomsg,
return 0;
}
-static int io_sendmsg_copy_hdr(struct io_kiocb *req,
- struct io_async_msghdr *iomsg)
-{
- struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
- struct user_msghdr msg;
- int ret;
-
- ret = io_msg_copy_hdr(req, iomsg, &msg, ITER_SOURCE, NULL);
- if (unlikely(ret))
- return ret;
-
- if (!(req->flags & REQ_F_BUFFER_SELECT))
- ret = io_net_import_vec(req, iomsg, msg.msg_iov, msg.msg_iovlen,
- ITER_SOURCE);
- /* save msg_control as sys_sendmsg() overwrites it */
- sr->msg_control = iomsg->msg.msg_control_user;
- return ret;
-}
-
void io_sendmsg_recvmsg_cleanup(struct io_kiocb *req)
{
struct io_async_msghdr *io = req->async_data;
@@ -379,6 +359,8 @@ static int io_send_setup(struct io_kiocb *req, const struct io_uring_sqe *sqe)
kmsg->msg.msg_name = &kmsg->addr;
kmsg->msg.msg_namelen = addr_len;
}
+ if (sr->flags & IORING_RECVSEND_FIXED_BUF)
+ return 0;
if (!io_do_buffer_select(req)) {
ret = import_ubuf(ITER_SOURCE, sr->buf, sr->len,
&kmsg->msg.msg_iter);
@@ -392,31 +374,24 @@ static int io_sendmsg_setup(struct io_kiocb *req, const struct io_uring_sqe *sqe
{
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
struct io_async_msghdr *kmsg = req->async_data;
-
- sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
-
- return io_sendmsg_copy_hdr(req, kmsg);
-}
-
-static int io_sendmsg_zc_setup(struct io_kiocb *req, const struct io_uring_sqe *sqe)
-{
- struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
- struct io_async_msghdr *kmsg = req->async_data;
struct user_msghdr msg;
int ret;
- if (!(sr->flags & IORING_RECVSEND_FIXED_BUF))
- return io_sendmsg_setup(req, sqe);
-
sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
-
ret = io_msg_copy_hdr(req, kmsg, &msg, ITER_SOURCE, NULL);
if (unlikely(ret))
return ret;
+ /* save msg_control as sys_sendmsg() overwrites it */
sr->msg_control = kmsg->msg.msg_control_user;
- kmsg->msg.msg_iter.nr_segs = msg.msg_iovlen;
- return io_prep_reg_iovec(req, &kmsg->vec, msg.msg_iov, msg.msg_iovlen);
+ if (sr->flags & IORING_RECVSEND_FIXED_BUF) {
+ kmsg->msg.msg_iter.nr_segs = msg.msg_iovlen;
+ return io_prep_reg_iovec(req, &kmsg->vec, msg.msg_iov,
+ msg.msg_iovlen);
+ }
+ if (req->flags & REQ_F_BUFFER_SELECT)
+ return 0;
+ return io_net_import_vec(req, kmsg, msg.msg_iov, msg.msg_iovlen, ITER_SOURCE);
}
#define SENDMSG_FLAGS (IORING_RECVSEND_POLL_FIRST | IORING_RECVSEND_BUNDLE)
@@ -427,12 +402,6 @@ int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
sr->done_io = 0;
sr->retry = false;
-
- if (req->opcode != IORING_OP_SEND) {
- if (sqe->addr2 || sqe->file_index)
- return -EINVAL;
- }
-
sr->len = READ_ONCE(sqe->len);
sr->flags = READ_ONCE(sqe->ioprio);
if (sr->flags & ~SENDMSG_FLAGS)
@@ -458,6 +427,8 @@ int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
return -ENOMEM;
if (req->opcode != IORING_OP_SENDMSG)
return io_send_setup(req, sqe);
+ if (unlikely(sqe->addr2 || sqe->file_index))
+ return -EINVAL;
return io_sendmsg_setup(req, sqe);
}
@@ -1302,11 +1273,12 @@ int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
struct io_ring_ctx *ctx = req->ctx;
+ struct io_async_msghdr *iomsg;
struct io_kiocb *notif;
+ int ret;
zc->done_io = 0;
zc->retry = false;
- req->flags |= REQ_F_POLL_NO_LAZY;
if (unlikely(READ_ONCE(sqe->__pad2[0]) || READ_ONCE(sqe->addr3)))
return -EINVAL;
@@ -1320,7 +1292,7 @@ int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
notif->cqe.user_data = req->cqe.user_data;
notif->cqe.res = 0;
notif->cqe.flags = IORING_CQE_F_NOTIF;
- req->flags |= REQ_F_NEED_CLEANUP;
+ req->flags |= REQ_F_NEED_CLEANUP | REQ_F_POLL_NO_LAZY;
zc->flags = READ_ONCE(sqe->ioprio);
if (unlikely(zc->flags & ~IO_ZC_FLAGS_COMMON)) {
@@ -1335,11 +1307,6 @@ int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
}
}
- if (req->opcode != IORING_OP_SEND_ZC) {
- if (unlikely(sqe->addr2 || sqe->file_index))
- return -EINVAL;
- }
-
zc->len = READ_ONCE(sqe->len);
zc->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL | MSG_ZEROCOPY;
req->buf_index = READ_ONCE(sqe->buf_index);
@@ -1349,13 +1316,28 @@ int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
if (io_is_compat(req->ctx))
zc->msg_flags |= MSG_CMSG_COMPAT;
- if (unlikely(!io_msg_alloc_async(req)))
+ iomsg = io_msg_alloc_async(req);
+ if (unlikely(!iomsg))
return -ENOMEM;
+
if (req->opcode == IORING_OP_SEND_ZC) {
- req->flags |= REQ_F_IMPORT_BUFFER;
- return io_send_setup(req, sqe);
+ if (zc->flags & IORING_RECVSEND_FIXED_BUF)
+ req->flags |= REQ_F_IMPORT_BUFFER;
+ ret = io_send_setup(req, sqe);
+ } else {
+ if (unlikely(sqe->addr2 || sqe->file_index))
+ return -EINVAL;
+ ret = io_sendmsg_setup(req, sqe);
+ }
+ if (unlikely(ret))
+ return ret;
+
+ if (!(zc->flags & IORING_RECVSEND_FIXED_BUF)) {
+ iomsg->msg.sg_from_iter = io_sg_from_iter_iovec;
+ return io_notif_account_mem(zc->notif, iomsg->msg.msg_iter.count);
}
- return io_sendmsg_zc_setup(req, sqe);
+ iomsg->msg.sg_from_iter = io_sg_from_iter;
+ return 0;
}
static int io_sg_from_iter_iovec(struct sk_buff *skb,
@@ -1412,27 +1394,13 @@ static int io_send_zc_import(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
struct io_async_msghdr *kmsg = req->async_data;
- int ret;
- if (sr->flags & IORING_RECVSEND_FIXED_BUF) {
- sr->notif->buf_index = req->buf_index;
- ret = io_import_reg_buf(sr->notif, &kmsg->msg.msg_iter,
- (u64)(uintptr_t)sr->buf, sr->len,
- ITER_SOURCE, issue_flags);
- if (unlikely(ret))
- return ret;
- kmsg->msg.sg_from_iter = io_sg_from_iter;
- } else {
- ret = import_ubuf(ITER_SOURCE, sr->buf, sr->len, &kmsg->msg.msg_iter);
- if (unlikely(ret))
- return ret;
- ret = io_notif_account_mem(sr->notif, sr->len);
- if (unlikely(ret))
- return ret;
- kmsg->msg.sg_from_iter = io_sg_from_iter_iovec;
- }
+ WARN_ON_ONCE(!(sr->flags & IORING_RECVSEND_FIXED_BUF));
- return ret;
+ sr->notif->buf_index = req->buf_index;
+ return io_import_reg_buf(sr->notif, &kmsg->msg.msg_iter,
+ (u64)(uintptr_t)sr->buf, sr->len,
+ ITER_SOURCE, issue_flags);
}
int io_send_zc(struct io_kiocb *req, unsigned int issue_flags)
@@ -1513,8 +1481,6 @@ int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags)
unsigned flags;
int ret, min_ret = 0;
- kmsg->msg.sg_from_iter = io_sg_from_iter_iovec;
-
if (req->flags & REQ_F_IMPORT_BUFFER) {
unsigned uvec_segs = kmsg->msg.msg_iter.nr_segs;
int ret;
@@ -1523,7 +1489,6 @@ int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags)
&kmsg->vec, uvec_segs, issue_flags);
if (unlikely(ret))
return ret;
- kmsg->msg.sg_from_iter = io_sg_from_iter;
req->flags &= ~REQ_F_IMPORT_BUFFER;
}
diff --git a/io_uring/refs.h b/io_uring/refs.h
index 63982ead9f7d..0d928d87c4ed 100644
--- a/io_uring/refs.h
+++ b/io_uring/refs.h
@@ -17,6 +17,13 @@ static inline bool req_ref_inc_not_zero(struct io_kiocb *req)
return atomic_inc_not_zero(&req->refs);
}
+static inline bool req_ref_put_and_test_atomic(struct io_kiocb *req)
+{
+ WARN_ON_ONCE(!(data_race(req->flags) & REQ_F_REFCOUNT));
+ WARN_ON_ONCE(req_ref_zero_or_close_to_overflow(req));
+ return atomic_dec_and_test(&req->refs);
+}
+
static inline bool req_ref_put_and_test(struct io_kiocb *req)
{
if (likely(!(req->flags & REQ_F_REFCOUNT)))
diff --git a/io_uring/rsrc.c b/io_uring/rsrc.c
index 3f195e24777e..5e64a8bb30a4 100644
--- a/io_uring/rsrc.c
+++ b/io_uring/rsrc.c
@@ -1002,20 +1002,33 @@ unlock:
}
EXPORT_SYMBOL_GPL(io_buffer_unregister_bvec);
-static int io_import_fixed(int ddir, struct iov_iter *iter,
- struct io_mapped_ubuf *imu,
- u64 buf_addr, size_t len)
+static int validate_fixed_range(u64 buf_addr, size_t len,
+ const struct io_mapped_ubuf *imu)
{
u64 buf_end;
- size_t offset;
- if (WARN_ON_ONCE(!imu))
- return -EFAULT;
if (unlikely(check_add_overflow(buf_addr, (u64)len, &buf_end)))
return -EFAULT;
/* not inside the mapped region */
if (unlikely(buf_addr < imu->ubuf || buf_end > (imu->ubuf + imu->len)))
return -EFAULT;
+ if (unlikely(len > MAX_RW_COUNT))
+ return -EFAULT;
+ return 0;
+}
+
+static int io_import_fixed(int ddir, struct iov_iter *iter,
+ struct io_mapped_ubuf *imu,
+ u64 buf_addr, size_t len)
+{
+ size_t offset;
+ int ret;
+
+ if (WARN_ON_ONCE(!imu))
+ return -EFAULT;
+ ret = validate_fixed_range(buf_addr, len, imu);
+ if (unlikely(ret))
+ return ret;
if (!(imu->dir & (1 << ddir)))
return -EFAULT;
@@ -1305,12 +1318,12 @@ static int io_vec_fill_bvec(int ddir, struct iov_iter *iter,
u64 buf_addr = (u64)(uintptr_t)iovec[iov_idx].iov_base;
struct bio_vec *src_bvec;
size_t offset;
- u64 buf_end;
+ int ret;
+
+ ret = validate_fixed_range(buf_addr, iov_len, imu);
+ if (unlikely(ret))
+ return ret;
- if (unlikely(check_add_overflow(buf_addr, (u64)iov_len, &buf_end)))
- return -EFAULT;
- if (unlikely(buf_addr < imu->ubuf || buf_end > (imu->ubuf + imu->len)))
- return -EFAULT;
if (unlikely(!iov_len))
return -EFAULT;
if (unlikely(check_add_overflow(total_len, iov_len, &total_len)))
@@ -1349,6 +1362,82 @@ static int io_estimate_bvec_size(struct iovec *iov, unsigned nr_iovs,
return max_segs;
}
+static int io_vec_fill_kern_bvec(int ddir, struct iov_iter *iter,
+ struct io_mapped_ubuf *imu,
+ struct iovec *iovec, unsigned nr_iovs,
+ struct iou_vec *vec)
+{
+ const struct bio_vec *src_bvec = imu->bvec;
+ struct bio_vec *res_bvec = vec->bvec;
+ unsigned res_idx = 0;
+ size_t total_len = 0;
+ unsigned iov_idx;
+
+ for (iov_idx = 0; iov_idx < nr_iovs; iov_idx++) {
+ size_t offset = (size_t)(uintptr_t)iovec[iov_idx].iov_base;
+ size_t iov_len = iovec[iov_idx].iov_len;
+ struct bvec_iter bi = {
+ .bi_size = offset + iov_len,
+ };
+ struct bio_vec bv;
+
+ bvec_iter_advance(src_bvec, &bi, offset);
+ for_each_mp_bvec(bv, src_bvec, bi, bi)
+ res_bvec[res_idx++] = bv;
+ total_len += iov_len;
+ }
+ iov_iter_bvec(iter, ddir, res_bvec, res_idx, total_len);
+ return 0;
+}
+
+static int iov_kern_bvec_size(const struct iovec *iov,
+ const struct io_mapped_ubuf *imu,
+ unsigned int *nr_seg)
+{
+ size_t offset = (size_t)(uintptr_t)iov->iov_base;
+ const struct bio_vec *bvec = imu->bvec;
+ int start = 0, i = 0;
+ size_t off = 0;
+ int ret;
+
+ ret = validate_fixed_range(offset, iov->iov_len, imu);
+ if (unlikely(ret))
+ return ret;
+
+ for (i = 0; off < offset + iov->iov_len && i < imu->nr_bvecs;
+ off += bvec[i].bv_len, i++) {
+ if (offset >= off && offset < off + bvec[i].bv_len)
+ start = i;
+ }
+ *nr_seg = i - start;
+ return 0;
+}
+
+static int io_kern_bvec_size(struct iovec *iov, unsigned nr_iovs,
+ struct io_mapped_ubuf *imu, unsigned *nr_segs)
+{
+ unsigned max_segs = 0;
+ size_t total_len = 0;
+ unsigned i;
+ int ret;
+
+ *nr_segs = 0;
+ for (i = 0; i < nr_iovs; i++) {
+ if (unlikely(!iov[i].iov_len))
+ return -EFAULT;
+ if (unlikely(check_add_overflow(total_len, iov[i].iov_len,
+ &total_len)))
+ return -EOVERFLOW;
+ ret = iov_kern_bvec_size(&iov[i], imu, &max_segs);
+ if (unlikely(ret))
+ return ret;
+ *nr_segs += max_segs;
+ }
+ if (total_len > MAX_RW_COUNT)
+ return -EINVAL;
+ return 0;
+}
+
int io_import_reg_vec(int ddir, struct iov_iter *iter,
struct io_kiocb *req, struct iou_vec *vec,
unsigned nr_iovs, unsigned issue_flags)
@@ -1363,14 +1452,20 @@ int io_import_reg_vec(int ddir, struct iov_iter *iter,
if (!node)
return -EFAULT;
imu = node->buf;
- if (imu->is_kbuf)
- return -EOPNOTSUPP;
if (!(imu->dir & (1 << ddir)))
return -EFAULT;
iovec_off = vec->nr - nr_iovs;
iov = vec->iovec + iovec_off;
- nr_segs = io_estimate_bvec_size(iov, nr_iovs, imu);
+
+ if (imu->is_kbuf) {
+ int ret = io_kern_bvec_size(iov, nr_iovs, imu, &nr_segs);
+
+ if (unlikely(ret))
+ return ret;
+ } else {
+ nr_segs = io_estimate_bvec_size(iov, nr_iovs, imu);
+ }
if (sizeof(struct bio_vec) > sizeof(struct iovec)) {
size_t bvec_bytes;
@@ -1397,6 +1492,9 @@ int io_import_reg_vec(int ddir, struct iov_iter *iter,
req->flags |= REQ_F_NEED_CLEANUP;
}
+ if (imu->is_kbuf)
+ return io_vec_fill_kern_bvec(ddir, iter, imu, iov, nr_iovs, vec);
+
return io_vec_fill_bvec(ddir, iter, imu, iov, nr_iovs, vec);
}
diff --git a/io_uring/uring_cmd.c b/io_uring/uring_cmd.c
index f2cfc371f3d0..a9ea7d29cdd9 100644
--- a/io_uring/uring_cmd.c
+++ b/io_uring/uring_cmd.c
@@ -205,8 +205,8 @@ static int io_uring_cmd_prep_setup(struct io_kiocb *req,
* that it doesn't read in per-op data, play it safe and ensure that
* any SQE data is stable beyond prep. This can later get relaxed.
*/
- memcpy(ac->data.sqes, sqe, uring_sqe_size(req->ctx));
- ioucmd->sqe = ac->data.sqes;
+ memcpy(ac->sqes, sqe, uring_sqe_size(req->ctx));
+ ioucmd->sqe = ac->sqes;
return 0;
}
@@ -307,17 +307,18 @@ static inline int io_uring_cmd_getsockopt(struct socket *sock,
struct io_uring_cmd *cmd,
unsigned int issue_flags)
{
+ const struct io_uring_sqe *sqe = cmd->sqe;
bool compat = !!(issue_flags & IO_URING_F_COMPAT);
int optlen, optname, level, err;
void __user *optval;
- level = READ_ONCE(cmd->sqe->level);
+ level = READ_ONCE(sqe->level);
if (level != SOL_SOCKET)
return -EOPNOTSUPP;
- optval = u64_to_user_ptr(READ_ONCE(cmd->sqe->optval));
- optname = READ_ONCE(cmd->sqe->optname);
- optlen = READ_ONCE(cmd->sqe->optlen);
+ optval = u64_to_user_ptr(READ_ONCE(sqe->optval));
+ optname = READ_ONCE(sqe->optname);
+ optlen = READ_ONCE(sqe->optlen);
err = do_sock_getsockopt(sock, compat, level, optname,
USER_SOCKPTR(optval),
@@ -333,15 +334,16 @@ static inline int io_uring_cmd_setsockopt(struct socket *sock,
struct io_uring_cmd *cmd,
unsigned int issue_flags)
{
+ const struct io_uring_sqe *sqe = cmd->sqe;
bool compat = !!(issue_flags & IO_URING_F_COMPAT);
int optname, optlen, level;
void __user *optval;
sockptr_t optval_s;
- optval = u64_to_user_ptr(READ_ONCE(cmd->sqe->optval));
- optname = READ_ONCE(cmd->sqe->optname);
- optlen = READ_ONCE(cmd->sqe->optlen);
- level = READ_ONCE(cmd->sqe->level);
+ optval = u64_to_user_ptr(READ_ONCE(sqe->optval));
+ optname = READ_ONCE(sqe->optname);
+ optlen = READ_ONCE(sqe->optlen);
+ level = READ_ONCE(sqe->level);
optval_s = USER_SOCKPTR(optval);
return do_sock_setsockopt(sock, compat, level, optname, optval_s,
diff --git a/io_uring/uring_cmd.h b/io_uring/uring_cmd.h
index 14e525255854..b04686b6b5d2 100644
--- a/io_uring/uring_cmd.h
+++ b/io_uring/uring_cmd.h
@@ -6,6 +6,7 @@
struct io_async_cmd {
struct io_uring_cmd_data data;
struct iou_vec vec;
+ struct io_uring_sqe sqes[2];
};
int io_uring_cmd(struct io_kiocb *req, unsigned int issue_flags);
diff --git a/io_uring/zcrx.c b/io_uring/zcrx.c
index 9c95b5b6ec4e..80d4a6f71d29 100644
--- a/io_uring/zcrx.c
+++ b/io_uring/zcrx.c
@@ -818,6 +818,14 @@ io_zcrx_recv_skb(read_descriptor_t *desc, struct sk_buff *skb,
int ret = 0;
len = min_t(size_t, len, desc->count);
+ /*
+ * __tcp_read_sock() always calls io_zcrx_recv_skb one last time, even
+ * if desc->count is already 0. This is caused by the if (offset + 1 !=
+ * skb->len) check. Return early in this case to break out of
+ * __tcp_read_sock().
+ */
+ if (!len)
+ return 0;
if (unlikely(args->nr_skbs++ > IO_SKBS_PER_CALL_LIMIT))
return -EAGAIN;
diff --git a/kernel/Kconfig.hz b/kernel/Kconfig.hz
index 38ef6d06888e..ce1435cb08b1 100644
--- a/kernel/Kconfig.hz
+++ b/kernel/Kconfig.hz
@@ -30,7 +30,7 @@ choice
250 Hz is a good compromise choice allowing server performance
while also showing good interactive responsiveness even
on SMP and NUMA systems. If you are going to be using NTSC video
- or multimedia, selected 300Hz instead.
+ or multimedia, select 300Hz instead.
config HZ_300
bool "300 HZ"
diff --git a/kernel/cgroup/cgroup-v1.c b/kernel/cgroup/cgroup-v1.c
index 11ea8d24ac72..fa24c032ed6f 100644
--- a/kernel/cgroup/cgroup-v1.c
+++ b/kernel/cgroup/cgroup-v1.c
@@ -851,7 +851,7 @@ static int cgroup1_rename(struct kernfs_node *kn, struct kernfs_node *new_parent
if (kernfs_type(kn) != KERNFS_DIR)
return -ENOTDIR;
- if (kn->parent != new_parent)
+ if (rcu_access_pointer(kn->__parent) != new_parent)
return -EIO;
/*
diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
index f231fe3a0744..ac2db99941ca 100644
--- a/kernel/cgroup/cgroup.c
+++ b/kernel/cgroup/cgroup.c
@@ -633,9 +633,22 @@ int cgroup_task_count(const struct cgroup *cgrp)
return count;
}
+static struct cgroup *kn_priv(struct kernfs_node *kn)
+{
+ struct kernfs_node *parent;
+ /*
+ * The parent can not be replaced due to KERNFS_ROOT_INVARIANT_PARENT.
+ * Therefore it is always safe to dereference this pointer outside of a
+ * RCU section.
+ */
+ parent = rcu_dereference_check(kn->__parent,
+ kernfs_root_flags(kn) & KERNFS_ROOT_INVARIANT_PARENT);
+ return parent->priv;
+}
+
struct cgroup_subsys_state *of_css(struct kernfs_open_file *of)
{
- struct cgroup *cgrp = of->kn->parent->priv;
+ struct cgroup *cgrp = kn_priv(of->kn);
struct cftype *cft = of_cft(of);
/*
@@ -1612,7 +1625,7 @@ void cgroup_kn_unlock(struct kernfs_node *kn)
if (kernfs_type(kn) == KERNFS_DIR)
cgrp = kn->priv;
else
- cgrp = kn->parent->priv;
+ cgrp = kn_priv(kn);
cgroup_unlock();
@@ -1644,7 +1657,7 @@ struct cgroup *cgroup_kn_lock_live(struct kernfs_node *kn, bool drain_offline)
if (kernfs_type(kn) == KERNFS_DIR)
cgrp = kn->priv;
else
- cgrp = kn->parent->priv;
+ cgrp = kn_priv(kn);
/*
* We're gonna grab cgroup_mutex which nests outside kernfs
@@ -2118,7 +2131,8 @@ int cgroup_setup_root(struct cgroup_root *root, u16 ss_mask)
root->kf_root = kernfs_create_root(kf_sops,
KERNFS_ROOT_CREATE_DEACTIVATED |
KERNFS_ROOT_SUPPORT_EXPORTOP |
- KERNFS_ROOT_SUPPORT_USER_XATTR,
+ KERNFS_ROOT_SUPPORT_USER_XATTR |
+ KERNFS_ROOT_INVARIANT_PARENT,
root_cgrp);
if (IS_ERR(root->kf_root)) {
ret = PTR_ERR(root->kf_root);
@@ -4115,7 +4129,7 @@ static ssize_t cgroup_file_write(struct kernfs_open_file *of, char *buf,
size_t nbytes, loff_t off)
{
struct cgroup_file_ctx *ctx = of->priv;
- struct cgroup *cgrp = of->kn->parent->priv;
+ struct cgroup *cgrp = kn_priv(of->kn);
struct cftype *cft = of_cft(of);
struct cgroup_subsys_state *css;
int ret;
diff --git a/kernel/configs/debug.config b/kernel/configs/debug.config
index 20552f163930..8aafd050b754 100644
--- a/kernel/configs/debug.config
+++ b/kernel/configs/debug.config
@@ -73,7 +73,6 @@ CONFIG_DEBUG_VM=y
CONFIG_DEBUG_VM_PGFLAGS=y
CONFIG_DEBUG_VM_RB=y
CONFIG_DEBUG_VM_VMACACHE=y
-CONFIG_GENERIC_PTDUMP=y
CONFIG_KASAN=y
CONFIG_KASAN_GENERIC=y
CONFIG_KASAN_INLINE=y
diff --git a/kernel/crash_reserve.c b/kernel/crash_reserve.c
index a620fb4b2116..aff7c0fdbefa 100644
--- a/kernel/crash_reserve.c
+++ b/kernel/crash_reserve.c
@@ -375,11 +375,10 @@ static int __init reserve_crashkernel_low(unsigned long long low_size)
return 0;
}
-void __init reserve_crashkernel_generic(char *cmdline,
- unsigned long long crash_size,
- unsigned long long crash_base,
- unsigned long long crash_low_size,
- bool high)
+void __init reserve_crashkernel_generic(unsigned long long crash_size,
+ unsigned long long crash_base,
+ unsigned long long crash_low_size,
+ bool high)
{
unsigned long long search_end = CRASH_ADDR_LOW_MAX, search_base = 0;
bool fixed_base = false;
diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
index ce1bb2301c06..0b9495187fba 100644
--- a/kernel/debug/debug_core.c
+++ b/kernel/debug/debug_core.c
@@ -837,10 +837,6 @@ kgdb_handle_exception(int evector, int signo, int ecode, struct pt_regs *regs)
{
struct kgdb_state kgdb_var;
struct kgdb_state *ks = &kgdb_var;
- int ret = 0;
-
- if (arch_kgdb_ops.enable_nmi)
- arch_kgdb_ops.enable_nmi(0);
/*
* Avoid entering the debugger if we were triggered due to an oops
* but panic_timeout indicates the system should automatically
@@ -858,15 +854,11 @@ kgdb_handle_exception(int evector, int signo, int ecode, struct pt_regs *regs)
ks->linux_regs = regs;
if (kgdb_reenter_check(ks))
- goto out; /* Ouch, double exception ! */
+ return 0; /* Ouch, double exception ! */
if (kgdb_info[ks->cpu].enter_kgdb != 0)
- goto out;
+ return 0;
- ret = kgdb_cpu_enter(ks, regs, DCPU_WANT_MASTER);
-out:
- if (arch_kgdb_ops.enable_nmi)
- arch_kgdb_ops.enable_nmi(1);
- return ret;
+ return kgdb_cpu_enter(ks, regs, DCPU_WANT_MASTER);
}
NOKPROBE_SYMBOL(kgdb_handle_exception);
diff --git a/kernel/debug/kdb/kdb_io.c b/kernel/debug/kdb/kdb_io.c
index 6a77f1c779c4..9b11b10b120c 100644
--- a/kernel/debug/kdb/kdb_io.c
+++ b/kernel/debug/kdb/kdb_io.c
@@ -334,7 +334,7 @@ poll_again:
*cp = '\0';
p_tmp = strrchr(buffer, ' ');
p_tmp = (p_tmp ? p_tmp + 1 : buffer);
- strscpy(tmpbuffer, p_tmp, sizeof(tmpbuffer));
+ strscpy(tmpbuffer, p_tmp);
*cp = tmp;
len = strlen(tmpbuffer);
@@ -452,7 +452,7 @@ poll_again:
char *kdb_getstr(char *buffer, size_t bufsize, const char *prompt)
{
if (prompt && kdb_prompt_str != prompt)
- strscpy(kdb_prompt_str, prompt, CMD_BUFLEN);
+ strscpy(kdb_prompt_str, prompt);
kdb_printf("%s", kdb_prompt_str);
kdb_nextline = 1; /* Prompt and input resets line number */
return kdb_read(buffer, bufsize);
diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
index 5f4be507d79f..7a4d2d4689a5 100644
--- a/kernel/debug/kdb/kdb_main.c
+++ b/kernel/debug/kdb/kdb_main.c
@@ -25,7 +25,6 @@
#include <linux/smp.h>
#include <linux/utsname.h>
#include <linux/vmalloc.h>
-#include <linux/atomic.h>
#include <linux/moduleparam.h>
#include <linux/mm.h>
#include <linux/init.h>
@@ -105,7 +104,7 @@ static kdbmsg_t kdbmsgs[] = {
KDBMSG(NOENVVALUE, "Environment variable should have value"),
KDBMSG(NOTIMP, "Command not implemented"),
KDBMSG(ENVFULL, "Environment full"),
- KDBMSG(ENVBUFFULL, "Environment buffer full"),
+ KDBMSG(KMALLOCFAILED, "Failed to allocate memory"),
KDBMSG(TOOMANYBPT, "Too many breakpoints defined"),
#ifdef CONFIG_CPU_XSCALE
KDBMSG(TOOMANYDBREGS, "More breakpoints than ibcr registers defined"),
@@ -130,13 +129,9 @@ static const int __nkdb_err = ARRAY_SIZE(kdbmsgs);
/*
- * Initial environment. This is all kept static and local to
- * this file. We don't want to rely on the memory allocation
- * mechanisms in the kernel, so we use a very limited allocate-only
- * heap for new and altered environment variables. The entire
- * environment is limited to a fixed number of entries (add more
- * to __env[] if required) and a fixed amount of heap (add more to
- * KDB_ENVBUFSIZE if required).
+ * Initial environment. This is all kept static and local to this file.
+ * The entire environment is limited to a fixed number of entries
+ * (add more to __env[] if required)
*/
static char *__env[31] = {
@@ -259,35 +254,6 @@ char *kdbgetenv(const char *match)
}
/*
- * kdballocenv - This function is used to allocate bytes for
- * environment entries.
- * Parameters:
- * bytes The number of bytes to allocate in the static buffer.
- * Returns:
- * A pointer to the allocated space in the buffer on success.
- * NULL if bytes > size available in the envbuffer.
- * Remarks:
- * We use a static environment buffer (envbuffer) to hold the values
- * of dynamically generated environment variables (see kdb_set). Buffer
- * space once allocated is never free'd, so over time, the amount of space
- * (currently 512 bytes) will be exhausted if env variables are changed
- * frequently.
- */
-static char *kdballocenv(size_t bytes)
-{
-#define KDB_ENVBUFSIZE 512
- static char envbuffer[KDB_ENVBUFSIZE];
- static int envbufsize;
- char *ep = NULL;
-
- if ((KDB_ENVBUFSIZE - envbufsize) >= bytes) {
- ep = &envbuffer[envbufsize];
- envbufsize += bytes;
- }
- return ep;
-}
-
-/*
* kdbgetulenv - This function will return the value of an unsigned
* long-valued environment variable.
* Parameters:
@@ -348,9 +314,9 @@ static int kdb_setenv(const char *var, const char *val)
varlen = strlen(var);
vallen = strlen(val);
- ep = kdballocenv(varlen + vallen + 2);
- if (ep == (char *)0)
- return KDB_ENVBUFFULL;
+ ep = kmalloc(varlen + vallen + 2, GFP_KDB);
+ if (!ep)
+ return KDB_KMALLOCFAILED;
sprintf(ep, "%s=%s", var, val);
@@ -359,6 +325,7 @@ static int kdb_setenv(const char *var, const char *val)
&& ((strncmp(__env[i], var, varlen) == 0)
&& ((__env[i][varlen] == '\0')
|| (__env[i][varlen] == '=')))) {
+ kfree_const(__env[i]);
__env[i] = ep;
return 0;
}
@@ -2119,32 +2086,6 @@ static int kdb_dmesg(int argc, const char **argv)
return 0;
}
#endif /* CONFIG_PRINTK */
-
-/* Make sure we balance enable/disable calls, must disable first. */
-static atomic_t kdb_nmi_disabled;
-
-static int kdb_disable_nmi(int argc, const char *argv[])
-{
- if (atomic_read(&kdb_nmi_disabled))
- return 0;
- atomic_set(&kdb_nmi_disabled, 1);
- arch_kgdb_ops.enable_nmi(0);
- return 0;
-}
-
-static int kdb_param_enable_nmi(const char *val, const struct kernel_param *kp)
-{
- if (!atomic_add_unless(&kdb_nmi_disabled, -1, 0))
- return -EINVAL;
- arch_kgdb_ops.enable_nmi(1);
- return 0;
-}
-
-static const struct kernel_param_ops kdb_param_ops_enable_nmi = {
- .set = kdb_param_enable_nmi,
-};
-module_param_cb(enable_nmi, &kdb_param_ops_enable_nmi, NULL, 0600);
-
/*
* kdb_cpu - This function implements the 'cpu' command.
* cpu [<cpunum>]
@@ -2836,20 +2777,10 @@ static kdbtab_t maintab[] = {
},
};
-static kdbtab_t nmicmd = {
- .name = "disable_nmi",
- .func = kdb_disable_nmi,
- .usage = "",
- .help = "Disable NMI entry to KDB",
- .flags = KDB_ENABLE_ALWAYS_SAFE,
-};
-
/* Initialize the kdb command table. */
static void __init kdb_inittab(void)
{
kdb_register_table(maintab, ARRAY_SIZE(maintab));
- if (arch_kgdb_ops.enable_nmi)
- kdb_register_table(&nmicmd, 1);
}
/* Execute any commands defined in kdb_cmds. */
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index 70c84b9d7be3..615b4e6d22c7 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -173,6 +173,7 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
DEFINE_FOLIO_VMA_WALK(pvmw, old_folio, vma, addr, 0);
int err;
struct mmu_notifier_range range;
+ pte_t pte;
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, addr,
addr + PAGE_SIZE);
@@ -192,6 +193,16 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
if (!page_vma_mapped_walk(&pvmw))
goto unlock;
VM_BUG_ON_PAGE(addr != pvmw.address, old_page);
+ pte = ptep_get(pvmw.pte);
+
+ /*
+ * Handle PFN swap PTES, such as device-exclusive ones, that actually
+ * map pages: simply trigger GUP again to fix it up.
+ */
+ if (unlikely(!pte_present(pte))) {
+ page_vma_mapped_walk_done(&pvmw);
+ goto unlock;
+ }
if (new_page) {
folio_get(new_folio);
@@ -206,7 +217,7 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
inc_mm_counter(mm, MM_ANONPAGES);
}
- flush_cache_page(vma, addr, pte_pfn(ptep_get(pvmw.pte)));
+ flush_cache_page(vma, addr, pte_pfn(pte));
ptep_clear_flush(vma, addr, pvmw.pte);
if (new_page)
set_pte_at(mm, addr, pvmw.pte,
@@ -1692,7 +1703,8 @@ static int xol_add_vma(struct mm_struct *mm, struct xol_area *area)
}
vma = _install_special_mapping(mm, area->vaddr, PAGE_SIZE,
- VM_EXEC|VM_MAYEXEC|VM_DONTCOPY|VM_IO,
+ VM_EXEC|VM_MAYEXEC|VM_DONTCOPY|VM_IO|
+ VM_SEALED_SYSMAP,
&xol_mapping);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
diff --git a/kernel/exit.c b/kernel/exit.c
index c2e6c7b7779f..1b51dc099f1e 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -268,6 +268,9 @@ repeat:
leader = p->group_leader;
if (leader != p && thread_group_empty(leader)
&& leader->exit_state == EXIT_ZOMBIE) {
+ /* for pidfs_exit() and do_notify_parent() */
+ if (leader->signal->flags & SIGNAL_GROUP_EXIT)
+ leader->exit_code = leader->signal->group_exit_code;
/*
* If we were the last child thread and the leader has
* exited already, and the leader's parent ignores SIGCHLD,
@@ -756,12 +759,6 @@ static void exit_notify(struct task_struct *tsk, int group_dead)
kill_orphaned_pgrp(tsk->group_leader, NULL);
tsk->exit_state = EXIT_ZOMBIE;
- /*
- * Ignore thread-group leaders that exited before all
- * subthreads did.
- */
- if (!delay_group_leader(tsk))
- do_notify_pidfd(tsk);
if (unlikely(tsk->ptrace)) {
int sig = thread_group_leader(tsk) &&
@@ -774,6 +771,8 @@ static void exit_notify(struct task_struct *tsk, int group_dead)
do_notify_parent(tsk, tsk->exit_signal);
} else {
autoreap = true;
+ /* untraced sub-thread */
+ do_notify_pidfd(tsk);
}
if (autoreap) {
diff --git a/kernel/fork.c b/kernel/fork.c
index 1b659b07ecd5..c4b26cd8998b 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -311,11 +311,9 @@ static int alloc_thread_stack_node(struct task_struct *tsk, int node)
* so memcg accounting is performed manually on assigning/releasing
* stacks to tasks. Drop __GFP_ACCOUNT.
*/
- stack = __vmalloc_node_range(THREAD_SIZE, THREAD_ALIGN,
- VMALLOC_START, VMALLOC_END,
+ stack = __vmalloc_node(THREAD_SIZE, THREAD_ALIGN,
THREADINFO_GFP & ~__GFP_ACCOUNT,
- PAGE_KERNEL,
- 0, node, __builtin_return_address(0));
+ node, __builtin_return_address(0));
if (!stack)
return -ENOMEM;
@@ -436,35 +434,6 @@ static struct kmem_cache *vm_area_cachep;
/* SLAB cache for mm_struct structures (tsk->mm) */
static struct kmem_cache *mm_cachep;
-#ifdef CONFIG_PER_VMA_LOCK
-
-/* SLAB cache for vm_area_struct.lock */
-static struct kmem_cache *vma_lock_cachep;
-
-static bool vma_lock_alloc(struct vm_area_struct *vma)
-{
- vma->vm_lock = kmem_cache_alloc(vma_lock_cachep, GFP_KERNEL);
- if (!vma->vm_lock)
- return false;
-
- init_rwsem(&vma->vm_lock->lock);
- vma->vm_lock_seq = UINT_MAX;
-
- return true;
-}
-
-static inline void vma_lock_free(struct vm_area_struct *vma)
-{
- kmem_cache_free(vma_lock_cachep, vma->vm_lock);
-}
-
-#else /* CONFIG_PER_VMA_LOCK */
-
-static inline bool vma_lock_alloc(struct vm_area_struct *vma) { return true; }
-static inline void vma_lock_free(struct vm_area_struct *vma) {}
-
-#endif /* CONFIG_PER_VMA_LOCK */
-
struct vm_area_struct *vm_area_alloc(struct mm_struct *mm)
{
struct vm_area_struct *vma;
@@ -474,14 +443,46 @@ struct vm_area_struct *vm_area_alloc(struct mm_struct *mm)
return NULL;
vma_init(vma, mm);
- if (!vma_lock_alloc(vma)) {
- kmem_cache_free(vm_area_cachep, vma);
- return NULL;
- }
return vma;
}
+static void vm_area_init_from(const struct vm_area_struct *src,
+ struct vm_area_struct *dest)
+{
+ dest->vm_mm = src->vm_mm;
+ dest->vm_ops = src->vm_ops;
+ dest->vm_start = src->vm_start;
+ dest->vm_end = src->vm_end;
+ dest->anon_vma = src->anon_vma;
+ dest->vm_pgoff = src->vm_pgoff;
+ dest->vm_file = src->vm_file;
+ dest->vm_private_data = src->vm_private_data;
+ vm_flags_init(dest, src->vm_flags);
+ memcpy(&dest->vm_page_prot, &src->vm_page_prot,
+ sizeof(dest->vm_page_prot));
+ /*
+ * src->shared.rb may be modified concurrently when called from
+ * dup_mmap(), but the clone will reinitialize it.
+ */
+ data_race(memcpy(&dest->shared, &src->shared, sizeof(dest->shared)));
+ memcpy(&dest->vm_userfaultfd_ctx, &src->vm_userfaultfd_ctx,
+ sizeof(dest->vm_userfaultfd_ctx));
+#ifdef CONFIG_ANON_VMA_NAME
+ dest->anon_name = src->anon_name;
+#endif
+#ifdef CONFIG_SWAP
+ memcpy(&dest->swap_readahead_info, &src->swap_readahead_info,
+ sizeof(dest->swap_readahead_info));
+#endif
+#ifndef CONFIG_MMU
+ dest->vm_region = src->vm_region;
+#endif
+#ifdef CONFIG_NUMA
+ dest->vm_policy = src->vm_policy;
+#endif
+}
+
struct vm_area_struct *vm_area_dup(struct vm_area_struct *orig)
{
struct vm_area_struct *new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
@@ -491,15 +492,8 @@ struct vm_area_struct *vm_area_dup(struct vm_area_struct *orig)
ASSERT_EXCLUSIVE_WRITER(orig->vm_flags);
ASSERT_EXCLUSIVE_WRITER(orig->vm_file);
- /*
- * orig->shared.rb may be modified concurrently, but the clone
- * will be reinitialized.
- */
- data_race(memcpy(new, orig, sizeof(*new)));
- if (!vma_lock_alloc(new)) {
- kmem_cache_free(vm_area_cachep, new);
- return NULL;
- }
+ vm_area_init_from(orig, new);
+ vma_lock_init(new, true);
INIT_LIST_HEAD(&new->anon_vma_chain);
vma_numab_state_init(new);
dup_anon_vma_name(orig, new);
@@ -511,35 +505,15 @@ struct vm_area_struct *vm_area_dup(struct vm_area_struct *orig)
return new;
}
-void __vm_area_free(struct vm_area_struct *vma)
+void vm_area_free(struct vm_area_struct *vma)
{
+ /* The vma should be detached while being destroyed. */
+ vma_assert_detached(vma);
vma_numab_state_free(vma);
free_anon_vma_name(vma);
- vma_lock_free(vma);
kmem_cache_free(vm_area_cachep, vma);
}
-#ifdef CONFIG_PER_VMA_LOCK
-static void vm_area_free_rcu_cb(struct rcu_head *head)
-{
- struct vm_area_struct *vma = container_of(head, struct vm_area_struct,
- vm_rcu);
-
- /* The vma should not be locked while being destroyed. */
- VM_BUG_ON_VMA(rwsem_is_locked(&vma->vm_lock->lock), vma);
- __vm_area_free(vma);
-}
-#endif
-
-void vm_area_free(struct vm_area_struct *vma)
-{
-#ifdef CONFIG_PER_VMA_LOCK
- call_rcu(&vma->vm_rcu, vm_area_free_rcu_cb);
-#else
- __vm_area_free(vma);
-#endif
-}
-
static void account_kernel_stack(struct task_struct *tsk, int account)
{
if (IS_ENABLED(CONFIG_VMAP_STACK)) {
@@ -830,6 +804,36 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
#define mm_free_pgd(mm)
#endif /* CONFIG_MMU */
+#ifdef CONFIG_MM_ID
+static DEFINE_IDA(mm_ida);
+
+static inline int mm_alloc_id(struct mm_struct *mm)
+{
+ int ret;
+
+ ret = ida_alloc_range(&mm_ida, MM_ID_MIN, MM_ID_MAX, GFP_KERNEL);
+ if (ret < 0)
+ return ret;
+ mm->mm_id = ret;
+ return 0;
+}
+
+static inline void mm_free_id(struct mm_struct *mm)
+{
+ const mm_id_t id = mm->mm_id;
+
+ mm->mm_id = MM_ID_DUMMY;
+ if (id == MM_ID_DUMMY)
+ return;
+ if (WARN_ON_ONCE(id < MM_ID_MIN || id > MM_ID_MAX))
+ return;
+ ida_free(&mm_ida, id);
+}
+#else /* !CONFIG_MM_ID */
+static inline int mm_alloc_id(struct mm_struct *mm) { return 0; }
+static inline void mm_free_id(struct mm_struct *mm) {}
+#endif /* CONFIG_MM_ID */
+
static void check_mm(struct mm_struct *mm)
{
int i;
@@ -933,6 +937,7 @@ void __mmdrop(struct mm_struct *mm)
WARN_ON_ONCE(mm == current->active_mm);
mm_free_pgd(mm);
+ mm_free_id(mm);
destroy_context(mm);
mmu_notifier_subscriptions_destroy(mm);
check_mm(mm);
@@ -1267,6 +1272,15 @@ static void mm_init_uprobes_state(struct mm_struct *mm)
#endif
}
+static void mmap_init_lock(struct mm_struct *mm)
+{
+ init_rwsem(&mm->mmap_lock);
+ mm_lock_seqcount_init(mm);
+#ifdef CONFIG_PER_VMA_LOCK
+ rcuwait_init(&mm->vma_writer_wait);
+#endif
+}
+
static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,
struct user_namespace *user_ns)
{
@@ -1308,6 +1322,9 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,
if (mm_alloc_pgd(mm))
goto fail_nopgd;
+ if (mm_alloc_id(mm))
+ goto fail_noid;
+
if (init_new_context(p, mm))
goto fail_nocontext;
@@ -1327,6 +1344,8 @@ fail_pcpu:
fail_cid:
destroy_context(mm);
fail_nocontext:
+ mm_free_id(mm);
+fail_noid:
mm_free_pgd(mm);
fail_nopgd:
free_mm(mm);
@@ -1563,6 +1582,17 @@ struct mm_struct *get_task_mm(struct task_struct *task)
}
EXPORT_SYMBOL_GPL(get_task_mm);
+static bool may_access_mm(struct mm_struct *mm, struct task_struct *task, unsigned int mode)
+{
+ if (mm == current->mm)
+ return true;
+ if (ptrace_may_access(task, mode))
+ return true;
+ if ((mode & PTRACE_MODE_READ) && perfmon_capable())
+ return true;
+ return false;
+}
+
struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
{
struct mm_struct *mm;
@@ -1575,7 +1605,7 @@ struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
mm = get_task_mm(task);
if (!mm) {
mm = ERR_PTR(-ESRCH);
- } else if (mm != current->mm && !ptrace_may_access(task, mode)) {
+ } else if (!may_access_mm(mm, task, mode)) {
mmput(mm);
mm = ERR_PTR(-EACCES);
}
@@ -3183,6 +3213,11 @@ void __init mm_cache_init(void)
void __init proc_caches_init(void)
{
+ struct kmem_cache_args args = {
+ .use_freeptr_offset = true,
+ .freeptr_offset = offsetof(struct vm_area_struct, vm_freeptr),
+ };
+
sighand_cachep = kmem_cache_create("sighand_cache",
sizeof(struct sighand_struct), 0,
SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_TYPESAFE_BY_RCU|
@@ -3199,11 +3234,10 @@ void __init proc_caches_init(void)
sizeof(struct fs_struct), 0,
SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT,
NULL);
-
- vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC|SLAB_ACCOUNT);
-#ifdef CONFIG_PER_VMA_LOCK
- vma_lock_cachep = KMEM_CACHE(vma_lock, SLAB_PANIC|SLAB_ACCOUNT);
-#endif
+ vm_area_cachep = kmem_cache_create("vm_area_struct",
+ sizeof(struct vm_area_struct), &args,
+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_TYPESAFE_BY_RCU|
+ SLAB_ACCOUNT);
mmap_init();
nsproxy_cache_init();
}
diff --git a/kernel/hung_task.c b/kernel/hung_task.c
index 04efa7a6e69b..dc898ec93463 100644
--- a/kernel/hung_task.c
+++ b/kernel/hung_task.c
@@ -93,6 +93,43 @@ static struct notifier_block panic_block = {
.notifier_call = hung_task_panic,
};
+
+#ifdef CONFIG_DETECT_HUNG_TASK_BLOCKER
+static void debug_show_blocker(struct task_struct *task)
+{
+ struct task_struct *g, *t;
+ unsigned long owner;
+ struct mutex *lock;
+
+ RCU_LOCKDEP_WARN(!rcu_read_lock_held(), "No rcu lock held");
+
+ lock = READ_ONCE(task->blocker_mutex);
+ if (!lock)
+ return;
+
+ owner = mutex_get_owner(lock);
+ if (unlikely(!owner)) {
+ pr_err("INFO: task %s:%d is blocked on a mutex, but the owner is not found.\n",
+ task->comm, task->pid);
+ return;
+ }
+
+ /* Ensure the owner information is correct. */
+ for_each_process_thread(g, t) {
+ if ((unsigned long)t == owner) {
+ pr_err("INFO: task %s:%d is blocked on a mutex likely owned by task %s:%d.\n",
+ task->comm, task->pid, t->comm, t->pid);
+ sched_show_task(t);
+ return;
+ }
+ }
+}
+#else
+static inline void debug_show_blocker(struct task_struct *task)
+{
+}
+#endif
+
static void check_hung_task(struct task_struct *t, unsigned long timeout)
{
unsigned long switch_count = t->nvcsw + t->nivcsw;
@@ -152,6 +189,7 @@ static void check_hung_task(struct task_struct *t, unsigned long timeout)
pr_err("\"echo 0 > /proc/sys/kernel/hung_task_timeout_secs\""
" disables this message.\n");
sched_show_task(t);
+ debug_show_blocker(t);
hung_task_show_lock = true;
if (sysctl_hung_task_all_cpu_backtrace)
diff --git a/kernel/kexec_core.c b/kernel/kexec_core.c
index c22ad51c4317..3e62b944c883 100644
--- a/kernel/kexec_core.c
+++ b/kernel/kexec_core.c
@@ -210,6 +210,16 @@ int sanity_check_segment_list(struct kimage *image)
}
#endif
+ /*
+ * The destination addresses are searched from system RAM rather than
+ * being allocated from the buddy allocator, so they are not guaranteed
+ * to be accepted by the current kernel. Accept the destination
+ * addresses before kexec swaps their content with the segments' source
+ * pages to avoid accessing memory before it is accepted.
+ */
+ for (i = 0; i < nr_segments; i++)
+ accept_memory(image->segment[i].mem, image->segment[i].memsz);
+
return 0;
}
diff --git a/kernel/kexec_elf.c b/kernel/kexec_elf.c
index d3689632e8b9..3a5c25b2adc9 100644
--- a/kernel/kexec_elf.c
+++ b/kernel/kexec_elf.c
@@ -390,7 +390,7 @@ int kexec_elf_load(struct kimage *image, struct elfhdr *ehdr,
struct kexec_buf *kbuf,
unsigned long *lowest_load_addr)
{
- unsigned long lowest_addr = UINT_MAX;
+ unsigned long lowest_addr = ULONG_MAX;
int ret;
size_t i;
diff --git a/kernel/kexec_file.c b/kernel/kexec_file.c
index 3eedb8c226ad..fba686487e3b 100644
--- a/kernel/kexec_file.c
+++ b/kernel/kexec_file.c
@@ -464,6 +464,12 @@ static int locate_mem_hole_top_down(unsigned long start, unsigned long end,
continue;
}
+ /* Make sure this does not conflict with exclude range */
+ if (arch_check_excluded_range(image, temp_start, temp_end)) {
+ temp_start = temp_start - PAGE_SIZE;
+ continue;
+ }
+
/* We found a suitable memory range */
break;
} while (1);
@@ -498,6 +504,12 @@ static int locate_mem_hole_bottom_up(unsigned long start, unsigned long end,
continue;
}
+ /* Make sure this does not conflict with exclude range */
+ if (arch_check_excluded_range(image, temp_start, temp_end)) {
+ temp_start = temp_start + PAGE_SIZE;
+ continue;
+ }
+
/* We found a suitable memory range */
break;
} while (1);
diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
index 19b636f60a24..555e2b3a665a 100644
--- a/kernel/locking/mutex.c
+++ b/kernel/locking/mutex.c
@@ -72,6 +72,14 @@ static inline unsigned long __owner_flags(unsigned long owner)
return owner & MUTEX_FLAGS;
}
+/* Do not use the return value as a pointer directly. */
+unsigned long mutex_get_owner(struct mutex *lock)
+{
+ unsigned long owner = atomic_long_read(&lock->owner);
+
+ return (unsigned long)__owner_task(owner);
+}
+
/*
* Returns: __mutex_owner(lock) on failure or NULL on success.
*/
@@ -182,6 +190,9 @@ static void
__mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
struct list_head *list)
{
+#ifdef CONFIG_DETECT_HUNG_TASK_BLOCKER
+ WRITE_ONCE(current->blocker_mutex, lock);
+#endif
debug_mutex_add_waiter(lock, waiter, current);
list_add_tail(&waiter->list, list);
@@ -197,6 +208,9 @@ __mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter)
__mutex_clear_flag(lock, MUTEX_FLAGS);
debug_mutex_remove_waiter(lock, waiter, current);
+#ifdef CONFIG_DETECT_HUNG_TASK_BLOCKER
+ WRITE_ONCE(current->blocker_mutex, NULL);
+#endif
}
/*
diff --git a/kernel/locking/percpu-rwsem.c b/kernel/locking/percpu-rwsem.c
index 6083883c4fe0..d6964fc29f51 100644
--- a/kernel/locking/percpu-rwsem.c
+++ b/kernel/locking/percpu-rwsem.c
@@ -184,7 +184,7 @@ EXPORT_SYMBOL_GPL(__percpu_down_read);
#define per_cpu_sum(var) \
({ \
- typeof(var) __sum = 0; \
+ TYPEOF_UNQUAL(var) __sum = 0; \
int cpu; \
compiletime_assert_atomic_type(__sum); \
for_each_possible_cpu(cpu) \
diff --git a/kernel/panic.c b/kernel/panic.c
index 0c55eec9e874..a3889f38153d 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -833,9 +833,15 @@ device_initcall(register_warn_debugfs);
*/
__visible noinstr void __stack_chk_fail(void)
{
+ unsigned long flags;
+
instrumentation_begin();
+ flags = user_access_save();
+
panic("stack-protector: Kernel stack is corrupted in: %pB",
__builtin_return_address(0));
+
+ user_access_restore(flags);
instrumentation_end();
}
EXPORT_SYMBOL(__stack_chk_fail);
diff --git a/kernel/rcu/Kconfig b/kernel/rcu/Kconfig
index aa42de4d2768..4d9b21f69eaa 100644
--- a/kernel/rcu/Kconfig
+++ b/kernel/rcu/Kconfig
@@ -68,6 +68,8 @@ config TREE_SRCU
config FORCE_NEED_SRCU_NMI_SAFE
bool "Force selection of NEED_SRCU_NMI_SAFE"
depends on !TINY_SRCU
+ depends on RCU_EXPERT
+ depends on ARCH_HAS_NMI_SAFE_THIS_CPU_OPS
select NEED_SRCU_NMI_SAFE
default n
help
diff --git a/kernel/reboot.c b/kernel/reboot.c
index 41ab9e1ba357..ec087827c85c 100644
--- a/kernel/reboot.c
+++ b/kernel/reboot.c
@@ -36,6 +36,8 @@ enum reboot_mode reboot_mode DEFAULT_REBOOT_MODE;
EXPORT_SYMBOL_GPL(reboot_mode);
enum reboot_mode panic_reboot_mode = REBOOT_UNDEFINED;
+static enum hw_protection_action hw_protection_action = HWPROT_ACT_SHUTDOWN;
+
/*
* This variable is used privately to keep track of whether or not
* reboot_type is still set to its default value (i.e., reboot= hasn't
@@ -229,6 +231,9 @@ EXPORT_SYMBOL(unregister_restart_handler);
/**
* do_kernel_restart - Execute kernel restart handler call chain
*
+ * @cmd: pointer to buffer containing command to execute for restart
+ * or %NULL
+ *
* Calls functions registered with register_restart_handler.
*
* Expected to be called from machine_restart as last step of the restart
@@ -933,61 +938,86 @@ void orderly_reboot(void)
}
EXPORT_SYMBOL_GPL(orderly_reboot);
+static const char *hw_protection_action_str(enum hw_protection_action action)
+{
+ switch (action) {
+ case HWPROT_ACT_SHUTDOWN:
+ return "shutdown";
+ case HWPROT_ACT_REBOOT:
+ return "reboot";
+ default:
+ return "undefined";
+ }
+}
+
+static enum hw_protection_action hw_failure_emergency_action;
+
/**
- * hw_failure_emergency_poweroff_func - emergency poweroff work after a known delay
- * @work: work_struct associated with the emergency poweroff function
+ * hw_failure_emergency_action_func - emergency action work after a known delay
+ * @work: work_struct associated with the emergency action function
*
* This function is called in very critical situations to force
- * a kernel poweroff after a configurable timeout value.
+ * a kernel poweroff or reboot after a configurable timeout value.
*/
-static void hw_failure_emergency_poweroff_func(struct work_struct *work)
+static void hw_failure_emergency_action_func(struct work_struct *work)
{
+ const char *action_str = hw_protection_action_str(hw_failure_emergency_action);
+
+ pr_emerg("Hardware protection timed-out. Trying forced %s\n",
+ action_str);
+
/*
- * We have reached here after the emergency shutdown waiting period has
- * expired. This means orderly_poweroff has not been able to shut off
- * the system for some reason.
+ * We have reached here after the emergency action waiting period has
+ * expired. This means orderly_poweroff/reboot has not been able to
+ * shut off the system for some reason.
*
- * Try to shut down the system immediately using kernel_power_off
- * if populated
+ * Try to shut off the system immediately if possible
*/
- pr_emerg("Hardware protection timed-out. Trying forced poweroff\n");
- kernel_power_off();
+
+ if (hw_failure_emergency_action == HWPROT_ACT_REBOOT)
+ kernel_restart(NULL);
+ else
+ kernel_power_off();
/*
* Worst of the worst case trigger emergency restart
*/
- pr_emerg("Hardware protection shutdown failed. Trying emergency restart\n");
+ pr_emerg("Hardware protection %s failed. Trying emergency restart\n",
+ action_str);
emergency_restart();
}
-static DECLARE_DELAYED_WORK(hw_failure_emergency_poweroff_work,
- hw_failure_emergency_poweroff_func);
+static DECLARE_DELAYED_WORK(hw_failure_emergency_action_work,
+ hw_failure_emergency_action_func);
/**
- * hw_failure_emergency_poweroff - Trigger an emergency system poweroff
+ * hw_failure_emergency_schedule - Schedule an emergency system shutdown or reboot
+ *
+ * @action: The hardware protection action to be taken
+ * @action_delay_ms: Time in milliseconds to elapse before triggering action
*
* This may be called from any critical situation to trigger a system shutdown
- * after a given period of time. If time is negative this is not scheduled.
+ * or reboot after a given period of time.
+ * If time is negative this is not scheduled.
*/
-static void hw_failure_emergency_poweroff(int poweroff_delay_ms)
+static void hw_failure_emergency_schedule(enum hw_protection_action action,
+ int action_delay_ms)
{
- if (poweroff_delay_ms <= 0)
+ if (action_delay_ms <= 0)
return;
- schedule_delayed_work(&hw_failure_emergency_poweroff_work,
- msecs_to_jiffies(poweroff_delay_ms));
+ hw_failure_emergency_action = action;
+ schedule_delayed_work(&hw_failure_emergency_action_work,
+ msecs_to_jiffies(action_delay_ms));
}
/**
- * __hw_protection_shutdown - Trigger an emergency system shutdown or reboot
+ * __hw_protection_trigger - Trigger an emergency system shutdown or reboot
*
* @reason: Reason of emergency shutdown or reboot to be printed.
* @ms_until_forced: Time to wait for orderly shutdown or reboot before
* triggering it. Negative value disables the forced
* shutdown or reboot.
- * @shutdown: If true, indicates that a shutdown will happen
- * after the critical tempeature is reached.
- * If false, indicates that a reboot will happen
- * after the critical tempeature is reached.
+ * @action: The hardware protection action to be taken.
*
* Initiate an emergency system shutdown or reboot in order to protect
* hardware from further damage. Usage examples include a thermal protection.
@@ -995,11 +1025,16 @@ static void hw_failure_emergency_poweroff(int poweroff_delay_ms)
* pending even if the previous request has given a large timeout for forced
* shutdown/reboot.
*/
-void __hw_protection_shutdown(const char *reason, int ms_until_forced, bool shutdown)
+void __hw_protection_trigger(const char *reason, int ms_until_forced,
+ enum hw_protection_action action)
{
static atomic_t allow_proceed = ATOMIC_INIT(1);
- pr_emerg("HARDWARE PROTECTION shutdown (%s)\n", reason);
+ if (action == HWPROT_ACT_DEFAULT)
+ action = hw_protection_action;
+
+ pr_emerg("HARDWARE PROTECTION %s (%s)\n",
+ hw_protection_action_str(action), reason);
/* Shutdown should be initiated only once. */
if (!atomic_dec_and_test(&allow_proceed))
@@ -1009,13 +1044,55 @@ void __hw_protection_shutdown(const char *reason, int ms_until_forced, bool shut
* Queue a backup emergency shutdown in the event of
* orderly_poweroff failure
*/
- hw_failure_emergency_poweroff(ms_until_forced);
- if (shutdown)
+ hw_failure_emergency_schedule(action, ms_until_forced);
+ if (action == HWPROT_ACT_REBOOT)
+ orderly_reboot();
+ else
orderly_poweroff(true);
+}
+EXPORT_SYMBOL_GPL(__hw_protection_trigger);
+
+static bool hw_protection_action_parse(const char *str,
+ enum hw_protection_action *action)
+{
+ if (sysfs_streq(str, "shutdown"))
+ *action = HWPROT_ACT_SHUTDOWN;
+ else if (sysfs_streq(str, "reboot"))
+ *action = HWPROT_ACT_REBOOT;
else
- orderly_reboot();
+ return false;
+
+ return true;
+}
+
+static int __init hw_protection_setup(char *str)
+{
+ hw_protection_action_parse(str, &hw_protection_action);
+ return 1;
+}
+__setup("hw_protection=", hw_protection_setup);
+
+#ifdef CONFIG_SYSFS
+static ssize_t hw_protection_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "%s\n",
+ hw_protection_action_str(hw_protection_action));
+}
+static ssize_t hw_protection_store(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buf,
+ size_t count)
+{
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ if (!hw_protection_action_parse(buf, &hw_protection_action))
+ return -EINVAL;
+
+ return count;
}
-EXPORT_SYMBOL_GPL(__hw_protection_shutdown);
+static struct kobj_attribute hw_protection_attr = __ATTR_RW(hw_protection);
+#endif
static int __init reboot_setup(char *str)
{
@@ -1276,6 +1353,7 @@ static struct kobj_attribute reboot_cpu_attr = __ATTR_RW(cpu);
#endif
static struct attribute *reboot_attrs[] = {
+ &hw_protection_attr.attr,
&reboot_mode_attr.attr,
#ifdef CONFIG_X86
&reboot_force_attr.attr,
diff --git a/kernel/relay.c b/kernel/relay.c
index a8ae436dc77e..5ac7e711e4b6 100644
--- a/kernel/relay.c
+++ b/kernel/relay.c
@@ -351,10 +351,9 @@ static struct dentry *relay_create_buf_file(struct rchan *chan,
struct dentry *dentry;
char *tmpname;
- tmpname = kzalloc(NAME_MAX + 1, GFP_KERNEL);
+ tmpname = kasprintf(GFP_KERNEL, "%s%d", chan->base_filename, cpu);
if (!tmpname)
return NULL;
- snprintf(tmpname, NAME_MAX, "%s%d", chan->base_filename, cpu);
/* Create file in fs */
dentry = chan->cb->create_buf_file(tmpname, chan->parent,
diff --git a/kernel/resource.c b/kernel/resource.c
index 12004452d999..8d3e6ed0bdc1 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -561,8 +561,7 @@ static int __region_intersects(struct resource *parent, resource_size_t start,
struct resource res, o;
bool covered;
- res.start = start;
- res.end = start + size - 1;
+ res = DEFINE_RES(start, size, 0);
for (p = parent->child; p ; p = p->sibling) {
if (!resource_intersection(p, &res, &o))
@@ -1714,18 +1713,13 @@ static int __init reserve_setup(char *str)
* I/O port space; otherwise assume it's memory.
*/
if (io_start < 0x10000) {
- res->flags = IORESOURCE_IO;
+ *res = DEFINE_RES_IO_NAMED(io_start, io_num, "reserved");
parent = &ioport_resource;
} else {
- res->flags = IORESOURCE_MEM;
+ *res = DEFINE_RES_MEM_NAMED(io_start, io_num, "reserved");
parent = &iomem_resource;
}
- res->name = "reserved";
- res->start = io_start;
- res->end = io_start + io_num - 1;
res->flags |= IORESOURCE_BUSY;
- res->desc = IORES_DESC_NONE;
- res->child = NULL;
if (request_resource(parent, res) == 0)
reserved = x+1;
}
@@ -1975,11 +1969,7 @@ get_free_mem_region(struct device *dev, struct resource *base,
*/
revoke_iomem(res);
} else {
- res->start = addr;
- res->end = addr + size - 1;
- res->name = name;
- res->desc = desc;
- res->flags = IORESOURCE_MEM;
+ *res = DEFINE_RES_NAMED_DESC(addr, size, name, IORESOURCE_MEM, desc);
/*
* Only succeed if the resource hosts an exclusive
diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c
index 21575d39c376..66bcd40a28ca 100644
--- a/kernel/sched/ext.c
+++ b/kernel/sched/ext.c
@@ -4171,8 +4171,8 @@ static struct scx_dispatch_q *create_dsq(u64 dsq_id, int node)
init_dsq(dsq, dsq_id);
- ret = rhashtable_insert_fast(&dsq_hash, &dsq->hash_node,
- dsq_hash_params);
+ ret = rhashtable_lookup_insert_fast(&dsq_hash, &dsq->hash_node,
+ dsq_hash_params);
if (ret) {
kfree(dsq);
return ERR_PTR(ret);
@@ -5361,6 +5361,8 @@ static int scx_ops_enable(struct sched_ext_ops *ops, struct bpf_link *link)
*/
cpus_read_lock();
+ scx_idle_enable(ops);
+
if (scx_ops.init) {
ret = SCX_CALL_OP_RET(SCX_KF_UNLOCKED, init);
if (ret) {
@@ -5427,8 +5429,6 @@ static int scx_ops_enable(struct sched_ext_ops *ops, struct bpf_link *link)
if (scx_ops.cpu_acquire || scx_ops.cpu_release)
static_branch_enable(&scx_ops_cpu_preempt);
- scx_idle_enable(ops);
-
/*
* Lock out forks, cgroup on/offlining and moves before opening the
* floodgate so that they don't wander into the operations prematurely.
diff --git a/kernel/sched/ext_idle.c b/kernel/sched/ext_idle.c
index 52c36a70a3d0..cb343ca889e0 100644
--- a/kernel/sched/ext_idle.c
+++ b/kernel/sched/ext_idle.c
@@ -544,7 +544,7 @@ s32 scx_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, u64 wake_flags, u64
* core.
*/
if (flags & SCX_PICK_IDLE_CORE) {
- cpu = prev_cpu;
+ cpu = -EBUSY;
goto out_unlock;
}
}
@@ -584,8 +584,6 @@ s32 scx_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, u64 wake_flags, u64
* increasing distance.
*/
cpu = scx_pick_idle_cpu(p->cpus_ptr, node, flags);
- if (cpu >= 0)
- goto out_unlock;
out_unlock:
rcu_read_unlock();
@@ -723,14 +721,14 @@ static void reset_idle_masks(struct sched_ext_ops *ops)
void scx_idle_enable(struct sched_ext_ops *ops)
{
if (!ops->update_idle || (ops->flags & SCX_OPS_KEEP_BUILTIN_IDLE))
- static_branch_enable(&scx_builtin_idle_enabled);
+ static_branch_enable_cpuslocked(&scx_builtin_idle_enabled);
else
- static_branch_disable(&scx_builtin_idle_enabled);
+ static_branch_disable_cpuslocked(&scx_builtin_idle_enabled);
if (ops->flags & SCX_OPS_BUILTIN_IDLE_PER_NODE)
- static_branch_enable(&scx_builtin_idle_per_node);
+ static_branch_enable_cpuslocked(&scx_builtin_idle_per_node);
else
- static_branch_disable(&scx_builtin_idle_per_node);
+ static_branch_disable_cpuslocked(&scx_builtin_idle_per_node);
#ifdef CONFIG_SMP
reset_idle_masks(ops);
diff --git a/kernel/signal.c b/kernel/signal.c
index 86ba66d95da5..f8859faa26c5 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -176,9 +176,10 @@ static bool recalc_sigpending_tsk(struct task_struct *t)
void recalc_sigpending(void)
{
- if (!recalc_sigpending_tsk(current) && !freezing(current))
- clear_thread_flag(TIF_SIGPENDING);
-
+ if (!recalc_sigpending_tsk(current) && !freezing(current)) {
+ if (unlikely(test_thread_flag(TIF_SIGPENDING)))
+ clear_thread_flag(TIF_SIGPENDING);
+ }
}
EXPORT_SYMBOL(recalc_sigpending);
@@ -2179,11 +2180,9 @@ bool do_notify_parent(struct task_struct *tsk, int sig)
WARN_ON_ONCE(!tsk->ptrace &&
(tsk->group_leader != tsk || !thread_group_empty(tsk)));
- /*
- * Notify for thread-group leaders without subthreads.
- */
- if (thread_group_empty(tsk))
- do_notify_pidfd(tsk);
+
+ /* ptraced, or group-leader without sub-threads */
+ do_notify_pidfd(tsk);
if (sig != SIGCHLD) {
/*
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index 033fba0633cf..a3f35c7d83b6 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -265,8 +265,7 @@ config FUNCTION_GRAPH_RETADDR
config FUNCTION_TRACE_ARGS
bool
- depends on HAVE_FUNCTION_ARG_ACCESS_API
- depends on DEBUG_INFO_BTF
+ depends on PROBE_EVENTS_BTF_ARGS
default y
help
If supported with function argument access API and BTF, then
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 92015de6203d..1a48aedb5255 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -6855,6 +6855,7 @@ ftrace_graph_set_hash(struct ftrace_hash *hash, char *buffer)
}
}
}
+ cond_resched();
} while_for_each_ftrace_rec();
return fail ? -EINVAL : 0;
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index d8d7b28e2c2f..c0f877d39a24 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -6016,7 +6016,7 @@ static void rb_update_meta_page(struct ring_buffer_per_cpu *cpu_buffer)
meta->read = cpu_buffer->read;
/* Some archs do not have data cache coherency between kernel and user-space */
- flush_dcache_folio(virt_to_folio(cpu_buffer->meta_page));
+ flush_kernel_vmap_range(cpu_buffer->meta_page, PAGE_SIZE);
}
static void
@@ -7319,7 +7319,8 @@ consume:
out:
/* Some archs do not have data cache coherency between kernel and user-space */
- flush_dcache_folio(virt_to_folio(cpu_buffer->reader_page->page));
+ flush_kernel_vmap_range(cpu_buffer->reader_page->page,
+ buffer->subbuf_size + BUF_PAGE_HDR_SIZE);
rb_update_meta_page(cpu_buffer);
diff --git a/kernel/trace/rv/rv.c b/kernel/trace/rv/rv.c
index 50344aa9f7f9..968c5c3b0246 100644
--- a/kernel/trace/rv/rv.c
+++ b/kernel/trace/rv/rv.c
@@ -809,7 +809,8 @@ int rv_register_monitor(struct rv_monitor *monitor, struct rv_monitor *parent)
if (p && rv_is_nested_monitor(p)) {
pr_info("Parent monitor %s is already nested, cannot nest further\n",
parent->name);
- return -EINVAL;
+ retval = -EINVAL;
+ goto out_unlock;
}
r = kzalloc(sizeof(struct rv_monitor_def), GFP_KERNEL);
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 103b193875b3..b581e388a9d9 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -50,6 +50,7 @@
#include <linux/irq_work.h>
#include <linux/workqueue.h>
#include <linux/sort.h>
+#include <linux/io.h> /* vmap_page_range() */
#include <asm/setup.h> /* COMMAND_LINE_SIZE */
@@ -3341,10 +3342,9 @@ out_nobuffer:
}
EXPORT_SYMBOL_GPL(trace_vbprintk);
-__printf(3, 0)
-static int
-__trace_array_vprintk(struct trace_buffer *buffer,
- unsigned long ip, const char *fmt, va_list args)
+static __printf(3, 0)
+int __trace_array_vprintk(struct trace_buffer *buffer,
+ unsigned long ip, const char *fmt, va_list args)
{
struct ring_buffer_event *event;
int len = 0, size;
@@ -3394,7 +3394,6 @@ out_nobuffer:
return len;
}
-__printf(3, 0)
int trace_array_vprintk(struct trace_array *tr,
unsigned long ip, const char *fmt, va_list args)
{
@@ -3424,7 +3423,6 @@ int trace_array_vprintk(struct trace_array *tr,
* Note, trace_array_init_printk() must be called on @tr before this
* can be used.
*/
-__printf(3, 0)
int trace_array_printk(struct trace_array *tr,
unsigned long ip, const char *fmt, ...)
{
@@ -3469,7 +3467,6 @@ int trace_array_init_printk(struct trace_array *tr)
}
EXPORT_SYMBOL_GPL(trace_array_init_printk);
-__printf(3, 4)
int trace_array_printk_buf(struct trace_buffer *buffer,
unsigned long ip, const char *fmt, ...)
{
@@ -3485,7 +3482,6 @@ int trace_array_printk_buf(struct trace_buffer *buffer,
return ret;
}
-__printf(2, 0)
int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
{
return trace_array_vprintk(printk_trace, ip, fmt, args);
@@ -8505,6 +8501,10 @@ static int tracing_buffers_mmap(struct file *filp, struct vm_area_struct *vma)
struct trace_iterator *iter = &info->iter;
int ret = 0;
+ /* A memmap'ed buffer is not supported for user space mmap */
+ if (iter->tr->flags & TRACE_ARRAY_FL_MEMMAP)
+ return -ENODEV;
+
/* Currently the boot mapped buffer is not supported for mmap */
if (iter->tr->flags & TRACE_ARRAY_FL_BOOT)
return -ENODEV;
@@ -9609,13 +9609,11 @@ static void free_trace_buffers(struct trace_array *tr)
return;
free_trace_buffer(&tr->array_buffer);
+ kfree(tr->module_delta);
#ifdef CONFIG_TRACER_MAX_TRACE
free_trace_buffer(&tr->max_buffer);
#endif
-
- if (tr->range_addr_start)
- vunmap((void *)tr->range_addr_start);
}
static void init_trace_flags_index(struct trace_array *tr)
@@ -9808,29 +9806,27 @@ static int instance_mkdir(const char *name)
return ret;
}
-static u64 map_pages(u64 start, u64 size)
+static u64 map_pages(unsigned long start, unsigned long size)
{
- struct page **pages;
- phys_addr_t page_start;
- unsigned int page_count;
- unsigned int i;
- void *vaddr;
-
- page_count = DIV_ROUND_UP(size, PAGE_SIZE);
+ unsigned long vmap_start, vmap_end;
+ struct vm_struct *area;
+ int ret;
- page_start = start;
- pages = kmalloc_array(page_count, sizeof(struct page *), GFP_KERNEL);
- if (!pages)
+ area = get_vm_area(size, VM_IOREMAP);
+ if (!area)
return 0;
- for (i = 0; i < page_count; i++) {
- phys_addr_t addr = page_start + i * PAGE_SIZE;
- pages[i] = pfn_to_page(addr >> PAGE_SHIFT);
+ vmap_start = (unsigned long) area->addr;
+ vmap_end = vmap_start + size;
+
+ ret = vmap_page_range(vmap_start, vmap_end,
+ start, pgprot_nx(PAGE_KERNEL));
+ if (ret < 0) {
+ free_vm_area(area);
+ return 0;
}
- vaddr = vmap(pages, page_count, VM_MAP, PAGE_KERNEL);
- kfree(pages);
- return (u64)(unsigned long)vaddr;
+ return (u64)vmap_start;
}
/**
@@ -10709,6 +10705,7 @@ static inline void do_allocate_snapshot(const char *name) { }
__init static void enable_instances(void)
{
struct trace_array *tr;
+ bool memmap_area = false;
char *curr_str;
char *name;
char *str;
@@ -10777,6 +10774,7 @@ __init static void enable_instances(void)
name);
continue;
}
+ memmap_area = true;
} else if (tok) {
if (!reserve_mem_find_by_name(tok, &start, &size)) {
start = 0;
@@ -10787,7 +10785,20 @@ __init static void enable_instances(void)
}
if (start) {
- addr = map_pages(start, size);
+ /* Start and size must be page aligned */
+ if (start & ~PAGE_MASK) {
+ pr_warn("Tracing: mapping start addr %pa is not page aligned\n", &start);
+ continue;
+ }
+ if (size & ~PAGE_MASK) {
+ pr_warn("Tracing: mapping size %pa is not page aligned\n", &size);
+ continue;
+ }
+
+ if (memmap_area)
+ addr = map_pages(start, size);
+ else
+ addr = (unsigned long)phys_to_virt(start);
if (addr) {
pr_info("Tracing: mapped boot instance %s at physical memory %pa of size 0x%lx\n",
name, &start, (unsigned long)size);
@@ -10814,10 +10825,13 @@ __init static void enable_instances(void)
update_printk_trace(tr);
/*
- * If start is set, then this is a mapped buffer, and
- * cannot be deleted by user space, so keep the reference
- * to it.
+ * memmap'd buffers can not be freed.
*/
+ if (memmap_area) {
+ tr->flags |= TRACE_ARRAY_FL_MEMMAP;
+ tr->ref++;
+ }
+
if (start) {
tr->flags |= TRACE_ARRAY_FL_BOOT | TRACE_ARRAY_FL_LAST_BOOT;
tr->range_name = no_free_ptr(rname);
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index c20f6bcc200a..79be1995db44 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -447,6 +447,7 @@ enum {
TRACE_ARRAY_FL_BOOT = BIT(1),
TRACE_ARRAY_FL_LAST_BOOT = BIT(2),
TRACE_ARRAY_FL_MOD_INIT = BIT(3),
+ TRACE_ARRAY_FL_MEMMAP = BIT(4),
};
#ifdef CONFIG_MODULES
@@ -852,13 +853,15 @@ static inline void __init disable_tracing_selftest(const char *reason)
extern void *head_page(struct trace_array_cpu *data);
extern unsigned long long ns2usecs(u64 nsec);
-extern int
-trace_vbprintk(unsigned long ip, const char *fmt, va_list args);
-extern int
-trace_vprintk(unsigned long ip, const char *fmt, va_list args);
-extern int
-trace_array_vprintk(struct trace_array *tr,
- unsigned long ip, const char *fmt, va_list args);
+
+__printf(2, 0)
+int trace_vbprintk(unsigned long ip, const char *fmt, va_list args);
+__printf(2, 0)
+int trace_vprintk(unsigned long ip, const char *fmt, va_list args);
+__printf(3, 0)
+int trace_array_vprintk(struct trace_array *tr,
+ unsigned long ip, const char *fmt, va_list args);
+__printf(3, 4)
int trace_array_printk_buf(struct trace_buffer *buffer,
unsigned long ip, const char *fmt, ...);
void trace_printk_seq(struct trace_seq *s);
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 8638b7f7ff85..069e92856bda 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -470,6 +470,7 @@ static void test_event_printk(struct trace_event_call *call)
case '%':
continue;
case 'p':
+ do_pointer:
/* Find dereferencing fields */
switch (fmt[i + 1]) {
case 'B': case 'R': case 'r':
@@ -498,6 +499,12 @@ static void test_event_printk(struct trace_event_call *call)
continue;
if (fmt[i + j] == '*') {
star = true;
+ /* Handle %*pbl case */
+ if (!j && fmt[i + 1] == 'p') {
+ arg++;
+ i++;
+ goto do_pointer;
+ }
continue;
}
if ((fmt[i + j] == 's')) {
diff --git a/kernel/ucount.c b/kernel/ucount.c
index 86c5f1c0bad9..8686e329b8f2 100644
--- a/kernel/ucount.c
+++ b/kernel/ucount.c
@@ -11,11 +11,14 @@
struct ucounts init_ucounts = {
.ns = &init_user_ns,
.uid = GLOBAL_ROOT_UID,
- .count = ATOMIC_INIT(1),
+ .count = RCUREF_INIT(1),
};
#define UCOUNTS_HASHTABLE_BITS 10
-static struct hlist_head ucounts_hashtable[(1 << UCOUNTS_HASHTABLE_BITS)];
+#define UCOUNTS_HASHTABLE_ENTRIES (1 << UCOUNTS_HASHTABLE_BITS)
+static struct hlist_nulls_head ucounts_hashtable[UCOUNTS_HASHTABLE_ENTRIES] = {
+ [0 ... UCOUNTS_HASHTABLE_ENTRIES - 1] = HLIST_NULLS_HEAD_INIT(0)
+};
static DEFINE_SPINLOCK(ucounts_lock);
#define ucounts_hashfn(ns, uid) \
@@ -24,7 +27,6 @@ static DEFINE_SPINLOCK(ucounts_lock);
#define ucounts_hashentry(ns, uid) \
(ucounts_hashtable + ucounts_hashfn(ns, uid))
-
#ifdef CONFIG_SYSCTL
static struct ctl_table_set *
set_lookup(struct ctl_table_root *root)
@@ -127,88 +129,73 @@ void retire_userns_sysctls(struct user_namespace *ns)
#endif
}
-static struct ucounts *find_ucounts(struct user_namespace *ns, kuid_t uid, struct hlist_head *hashent)
+static struct ucounts *find_ucounts(struct user_namespace *ns, kuid_t uid,
+ struct hlist_nulls_head *hashent)
{
struct ucounts *ucounts;
+ struct hlist_nulls_node *pos;
- hlist_for_each_entry(ucounts, hashent, node) {
- if (uid_eq(ucounts->uid, uid) && (ucounts->ns == ns))
- return ucounts;
+ guard(rcu)();
+ hlist_nulls_for_each_entry_rcu(ucounts, pos, hashent, node) {
+ if (uid_eq(ucounts->uid, uid) && (ucounts->ns == ns)) {
+ if (rcuref_get(&ucounts->count))
+ return ucounts;
+ }
}
return NULL;
}
static void hlist_add_ucounts(struct ucounts *ucounts)
{
- struct hlist_head *hashent = ucounts_hashentry(ucounts->ns, ucounts->uid);
+ struct hlist_nulls_head *hashent = ucounts_hashentry(ucounts->ns, ucounts->uid);
+
spin_lock_irq(&ucounts_lock);
- hlist_add_head(&ucounts->node, hashent);
+ hlist_nulls_add_head_rcu(&ucounts->node, hashent);
spin_unlock_irq(&ucounts_lock);
}
-static inline bool get_ucounts_or_wrap(struct ucounts *ucounts)
+struct ucounts *alloc_ucounts(struct user_namespace *ns, kuid_t uid)
{
- /* Returns true on a successful get, false if the count wraps. */
- return !atomic_add_negative(1, &ucounts->count);
-}
+ struct hlist_nulls_head *hashent = ucounts_hashentry(ns, uid);
+ struct ucounts *ucounts, *new;
-struct ucounts *get_ucounts(struct ucounts *ucounts)
-{
- if (!get_ucounts_or_wrap(ucounts)) {
- put_ucounts(ucounts);
- ucounts = NULL;
- }
- return ucounts;
-}
+ ucounts = find_ucounts(ns, uid, hashent);
+ if (ucounts)
+ return ucounts;
-struct ucounts *alloc_ucounts(struct user_namespace *ns, kuid_t uid)
-{
- struct hlist_head *hashent = ucounts_hashentry(ns, uid);
- bool wrapped;
- struct ucounts *ucounts, *new = NULL;
+ new = kzalloc(sizeof(*new), GFP_KERNEL);
+ if (!new)
+ return NULL;
+
+ new->ns = ns;
+ new->uid = uid;
+ rcuref_init(&new->count, 1);
spin_lock_irq(&ucounts_lock);
ucounts = find_ucounts(ns, uid, hashent);
- if (!ucounts) {
+ if (ucounts) {
spin_unlock_irq(&ucounts_lock);
-
- new = kzalloc(sizeof(*new), GFP_KERNEL);
- if (!new)
- return NULL;
-
- new->ns = ns;
- new->uid = uid;
- atomic_set(&new->count, 1);
-
- spin_lock_irq(&ucounts_lock);
- ucounts = find_ucounts(ns, uid, hashent);
- if (!ucounts) {
- hlist_add_head(&new->node, hashent);
- get_user_ns(new->ns);
- spin_unlock_irq(&ucounts_lock);
- return new;
- }
+ kfree(new);
+ return ucounts;
}
- wrapped = !get_ucounts_or_wrap(ucounts);
+ hlist_nulls_add_head_rcu(&new->node, hashent);
+ get_user_ns(new->ns);
spin_unlock_irq(&ucounts_lock);
- kfree(new);
- if (wrapped) {
- put_ucounts(ucounts);
- return NULL;
- }
- return ucounts;
+ return new;
}
void put_ucounts(struct ucounts *ucounts)
{
unsigned long flags;
- if (atomic_dec_and_lock_irqsave(&ucounts->count, &ucounts_lock, flags)) {
- hlist_del_init(&ucounts->node);
+ if (rcuref_put(&ucounts->count)) {
+ spin_lock_irqsave(&ucounts_lock, flags);
+ hlist_nulls_del_rcu(&ucounts->node);
spin_unlock_irqrestore(&ucounts_lock, flags);
+
put_user_ns(ucounts->ns);
- kfree(ucounts);
+ kfree_rcu(ucounts, rcu);
}
}
diff --git a/kernel/watchdog_perf.c b/kernel/watchdog_perf.c
index a78ff092d636..75af12ff774e 100644
--- a/kernel/watchdog_perf.c
+++ b/kernel/watchdog_perf.c
@@ -269,12 +269,10 @@ void __init hardlockup_config_perf_event(const char *str)
} else {
unsigned int len = comma - str;
- if (len >= sizeof(buf))
+ if (len > sizeof(buf))
return;
- if (strscpy(buf, str, sizeof(buf)) < 0)
- return;
- buf[len] = 0;
+ strscpy(buf, str, len);
if (kstrtoull(buf, 16, &config))
return;
}
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 0ffd5526bd46..df9587aa5c5e 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1280,6 +1280,17 @@ config BOOTPARAM_HUNG_TASK_PANIC
Say N if unsure.
+config DETECT_HUNG_TASK_BLOCKER
+ bool "Dump Hung Tasks Blocker"
+ depends on DETECT_HUNG_TASK
+ depends on !PREEMPT_RT
+ default y
+ help
+ Say Y here to show the blocker task's stacktrace who acquires
+ the mutex lock which "hung tasks" are waiting.
+ This will add overhead a bit but shows suspicious tasks and
+ call trace if it comes from waiting a mutex.
+
config WQ_WATCHDOG
bool "Detect Workqueue Stalls"
depends on DEBUG_KERNEL
@@ -2502,13 +2513,20 @@ config TEST_IDA
tristate "Perform selftest on IDA functions"
config TEST_MISC_MINOR
- tristate "Basic misc minor Kunit test" if !KUNIT_ALL_TESTS
+ tristate "miscdevice KUnit test" if !KUNIT_ALL_TESTS
depends on KUNIT
default KUNIT_ALL_TESTS
help
- Kunit test for the misc minor.
- It tests misc minor functions for dynamic and misc dynamic minor.
- This include misc_xxx functions
+ Kunit test for miscdevice API, specially its behavior in respect to
+ static and dynamic minor numbers.
+
+ KUnit tests run during boot and output the results to the debug log
+ in TAP format (https://testanything.org/). Only useful for kernel devs
+ running the KUnit test harness, and not intended for inclusion into a
+ production build.
+
+ For more information on KUnit and unit tests in general please refer
+ to the KUnit documentation in Documentation/dev-tools/kunit/.
If unsure, say N.
diff --git a/lib/alloc_tag.c b/lib/alloc_tag.c
index 19b45617bdcf..1d893e313614 100644
--- a/lib/alloc_tag.c
+++ b/lib/alloc_tag.c
@@ -174,7 +174,7 @@ void pgalloc_tag_split(struct folio *folio, int old_order, int new_order)
if (!mem_alloc_profiling_enabled())
return;
- tag = pgalloc_tag_get(&folio->page);
+ tag = __pgalloc_tag_get(&folio->page);
if (!tag)
return;
@@ -200,10 +200,10 @@ void pgalloc_tag_swap(struct folio *new, struct folio *old)
if (!mem_alloc_profiling_enabled())
return;
- tag_old = pgalloc_tag_get(&old->page);
+ tag_old = __pgalloc_tag_get(&old->page);
if (!tag_old)
return;
- tag_new = pgalloc_tag_get(&new->page);
+ tag_new = __pgalloc_tag_get(&new->page);
if (!tag_new)
return;
diff --git a/lib/idr.c b/lib/idr.c
index da36054c3ca0..e2adc457abb4 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -477,6 +477,73 @@ nospc:
EXPORT_SYMBOL(ida_alloc_range);
/**
+ * ida_find_first_range - Get the lowest used ID.
+ * @ida: IDA handle.
+ * @min: Lowest ID to get.
+ * @max: Highest ID to get.
+ *
+ * Get the lowest used ID between @min and @max, inclusive. The returned
+ * ID will not exceed %INT_MAX, even if @max is larger.
+ *
+ * Context: Any context. Takes and releases the xa_lock.
+ * Return: The lowest used ID, or errno if no used ID is found.
+ */
+int ida_find_first_range(struct ida *ida, unsigned int min, unsigned int max)
+{
+ unsigned long index = min / IDA_BITMAP_BITS;
+ unsigned int offset = min % IDA_BITMAP_BITS;
+ unsigned long *addr, size, bit;
+ unsigned long tmp = 0;
+ unsigned long flags;
+ void *entry;
+ int ret;
+
+ if ((int)min < 0)
+ return -EINVAL;
+ if ((int)max < 0)
+ max = INT_MAX;
+
+ xa_lock_irqsave(&ida->xa, flags);
+
+ entry = xa_find(&ida->xa, &index, max / IDA_BITMAP_BITS, XA_PRESENT);
+ if (!entry) {
+ ret = -ENOENT;
+ goto err_unlock;
+ }
+
+ if (index > min / IDA_BITMAP_BITS)
+ offset = 0;
+ if (index * IDA_BITMAP_BITS + offset > max) {
+ ret = -ENOENT;
+ goto err_unlock;
+ }
+
+ if (xa_is_value(entry)) {
+ tmp = xa_to_value(entry);
+ addr = &tmp;
+ size = BITS_PER_XA_VALUE;
+ } else {
+ addr = ((struct ida_bitmap *)entry)->bitmap;
+ size = IDA_BITMAP_BITS;
+ }
+
+ bit = find_next_bit(addr, size, offset);
+
+ xa_unlock_irqrestore(&ida->xa, flags);
+
+ if (bit == size ||
+ index * IDA_BITMAP_BITS + bit > max)
+ return -ENOENT;
+
+ return index * IDA_BITMAP_BITS + bit;
+
+err_unlock:
+ xa_unlock_irqrestore(&ida->xa, flags);
+ return ret;
+}
+EXPORT_SYMBOL(ida_find_first_range);
+
+/**
* ida_free() - Release an allocated ID.
* @ida: IDA handle.
* @id: Previously allocated ID.
diff --git a/lib/interval_tree.c b/lib/interval_tree.c
index 3412737ff365..324766e9bf63 100644
--- a/lib/interval_tree.c
+++ b/lib/interval_tree.c
@@ -20,9 +20,15 @@ EXPORT_SYMBOL_GPL(interval_tree_iter_next);
/*
* Roll nodes[1] into nodes[0] by advancing nodes[1] to the end of a contiguous
* span of nodes. This makes nodes[0]->last the end of that contiguous used span
- * indexes that started at the original nodes[1]->start. nodes[1] is now the
- * first node starting the next used span. A hole span is between nodes[0]->last
- * and nodes[1]->start. nodes[1] must be !NULL.
+ * of indexes that started at the original nodes[1]->start.
+ *
+ * If there is an interior hole, nodes[1] is now the first node starting the
+ * next used span. A hole span is between nodes[0]->last and nodes[1]->start.
+ *
+ * If there is a tailing hole, nodes[1] is now NULL. A hole span is between
+ * nodes[0]->last and last_index.
+ *
+ * If the contiguous used range span to last_index, nodes[1] is set to NULL.
*/
static void
interval_tree_span_iter_next_gap(struct interval_tree_span_iter *state)
diff --git a/lib/interval_tree_test.c b/lib/interval_tree_test.c
index 837064b83a6c..5fd62656f42e 100644
--- a/lib/interval_tree_test.c
+++ b/lib/interval_tree_test.c
@@ -5,6 +5,8 @@
#include <linux/prandom.h>
#include <linux/slab.h>
#include <asm/timex.h>
+#include <linux/bitmap.h>
+#include <linux/maple_tree.h>
#define __param(type, name, init, msg) \
static type name = init; \
@@ -19,6 +21,7 @@ __param(int, search_loops, 1000, "Number of iterations searching the tree");
__param(bool, search_all, false, "Searches will iterate all nodes in the tree");
__param(uint, max_endpoint, ~0, "Largest value for the interval's endpoint");
+__param(ullong, seed, 3141592653589793238ULL, "Random seed");
static struct rb_root_cached root = RB_ROOT_CACHED;
static struct interval_tree_node *nodes = NULL;
@@ -59,26 +62,13 @@ static void init(void)
queries[i] = (prandom_u32_state(&rnd) >> 4) % max_endpoint;
}
-static int interval_tree_test_init(void)
+static int basic_check(void)
{
int i, j;
- unsigned long results;
cycles_t time1, time2, time;
- nodes = kmalloc_array(nnodes, sizeof(struct interval_tree_node),
- GFP_KERNEL);
- if (!nodes)
- return -ENOMEM;
-
- queries = kmalloc_array(nsearches, sizeof(int), GFP_KERNEL);
- if (!queries) {
- kfree(nodes);
- return -ENOMEM;
- }
-
printk(KERN_ALERT "interval tree insert/remove");
- prandom_seed_state(&rnd, 3141592653589793238ULL);
init();
time1 = get_cycles();
@@ -96,8 +86,19 @@ static int interval_tree_test_init(void)
time = div_u64(time, perf_loops);
printk(" -> %llu cycles\n", (unsigned long long)time);
+ return 0;
+}
+
+static int search_check(void)
+{
+ int i, j;
+ unsigned long results;
+ cycles_t time1, time2, time;
+
printk(KERN_ALERT "interval tree search");
+ init();
+
for (j = 0; j < nnodes; j++)
interval_tree_insert(nodes + j, &root);
@@ -120,6 +121,214 @@ static int interval_tree_test_init(void)
printk(" -> %llu cycles (%lu results)\n",
(unsigned long long)time, results);
+ for (j = 0; j < nnodes; j++)
+ interval_tree_remove(nodes + j, &root);
+
+ return 0;
+}
+
+static int intersection_range_check(void)
+{
+ int i, j, k;
+ unsigned long start, last;
+ struct interval_tree_node *node;
+ unsigned long *intxn1;
+ unsigned long *intxn2;
+
+ printk(KERN_ALERT "interval tree iteration\n");
+
+ intxn1 = bitmap_alloc(nnodes, GFP_KERNEL);
+ if (!intxn1) {
+ WARN_ON_ONCE("Failed to allocate intxn1\n");
+ return -ENOMEM;
+ }
+
+ intxn2 = bitmap_alloc(nnodes, GFP_KERNEL);
+ if (!intxn2) {
+ WARN_ON_ONCE("Failed to allocate intxn2\n");
+ bitmap_free(intxn1);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < search_loops; i++) {
+ /* Initialize interval tree for each round */
+ init();
+ for (j = 0; j < nnodes; j++)
+ interval_tree_insert(nodes + j, &root);
+
+ /* Let's try nsearches different ranges */
+ for (k = 0; k < nsearches; k++) {
+ /* Try whole range once */
+ if (!k) {
+ start = 0UL;
+ last = ULONG_MAX;
+ } else {
+ last = (prandom_u32_state(&rnd) >> 4) % max_endpoint;
+ start = (prandom_u32_state(&rnd) >> 4) % last;
+ }
+
+ /* Walk nodes to mark intersection nodes */
+ bitmap_zero(intxn1, nnodes);
+ for (j = 0; j < nnodes; j++) {
+ node = nodes + j;
+
+ if (start <= node->last && last >= node->start)
+ bitmap_set(intxn1, j, 1);
+ }
+
+ /* Iterate tree to clear intersection nodes */
+ bitmap_zero(intxn2, nnodes);
+ for (node = interval_tree_iter_first(&root, start, last); node;
+ node = interval_tree_iter_next(node, start, last))
+ bitmap_set(intxn2, node - nodes, 1);
+
+ WARN_ON_ONCE(!bitmap_equal(intxn1, intxn2, nnodes));
+ }
+
+ for (j = 0; j < nnodes; j++)
+ interval_tree_remove(nodes + j, &root);
+ }
+
+ bitmap_free(intxn1);
+ bitmap_free(intxn2);
+ return 0;
+}
+
+#ifdef CONFIG_INTERVAL_TREE_SPAN_ITER
+/*
+ * Helper function to get span of current position from maple tree point of
+ * view.
+ */
+static void mas_cur_span(struct ma_state *mas, struct interval_tree_span_iter *state)
+{
+ unsigned long cur_start;
+ unsigned long cur_last;
+ int is_hole;
+
+ if (mas->status == ma_overflow)
+ return;
+
+ /* walk to current position */
+ state->is_hole = mas_walk(mas) ? 0 : 1;
+
+ cur_start = mas->index < state->first_index ?
+ state->first_index : mas->index;
+
+ /* whether we have followers */
+ do {
+
+ cur_last = mas->last > state->last_index ?
+ state->last_index : mas->last;
+
+ is_hole = mas_next_range(mas, state->last_index) ? 0 : 1;
+
+ } while (mas->status != ma_overflow && is_hole == state->is_hole);
+
+ if (state->is_hole) {
+ state->start_hole = cur_start;
+ state->last_hole = cur_last;
+ } else {
+ state->start_used = cur_start;
+ state->last_used = cur_last;
+ }
+
+ /* advance position for next round */
+ if (mas->status != ma_overflow)
+ mas_set(mas, cur_last + 1);
+}
+
+static int span_iteration_check(void)
+{
+ int i, j, k;
+ unsigned long start, last;
+ struct interval_tree_span_iter span, mas_span;
+
+ DEFINE_MTREE(tree);
+
+ MA_STATE(mas, &tree, 0, 0);
+
+ printk(KERN_ALERT "interval tree span iteration\n");
+
+ for (i = 0; i < search_loops; i++) {
+ /* Initialize interval tree for each round */
+ init();
+ for (j = 0; j < nnodes; j++)
+ interval_tree_insert(nodes + j, &root);
+
+ /* Put all the range into maple tree */
+ mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
+ mt_set_in_rcu(&tree);
+
+ for (j = 0; j < nnodes; j++)
+ WARN_ON_ONCE(mtree_store_range(&tree, nodes[j].start,
+ nodes[j].last, nodes + j, GFP_KERNEL));
+
+ /* Let's try nsearches different ranges */
+ for (k = 0; k < nsearches; k++) {
+ /* Try whole range once */
+ if (!k) {
+ start = 0UL;
+ last = ULONG_MAX;
+ } else {
+ last = (prandom_u32_state(&rnd) >> 4) % max_endpoint;
+ start = (prandom_u32_state(&rnd) >> 4) % last;
+ }
+
+ mas_span.first_index = start;
+ mas_span.last_index = last;
+ mas_span.is_hole = -1;
+ mas_set(&mas, start);
+
+ interval_tree_for_each_span(&span, &root, start, last) {
+ mas_cur_span(&mas, &mas_span);
+
+ WARN_ON_ONCE(span.is_hole != mas_span.is_hole);
+
+ if (span.is_hole) {
+ WARN_ON_ONCE(span.start_hole != mas_span.start_hole);
+ WARN_ON_ONCE(span.last_hole != mas_span.last_hole);
+ } else {
+ WARN_ON_ONCE(span.start_used != mas_span.start_used);
+ WARN_ON_ONCE(span.last_used != mas_span.last_used);
+ }
+ }
+
+ }
+
+ WARN_ON_ONCE(mas.status != ma_overflow);
+
+ /* Cleanup maple tree for each round */
+ mtree_destroy(&tree);
+ /* Cleanup interval tree for each round */
+ for (j = 0; j < nnodes; j++)
+ interval_tree_remove(nodes + j, &root);
+ }
+ return 0;
+}
+#else
+static inline int span_iteration_check(void) {return 0; }
+#endif
+
+static int interval_tree_test_init(void)
+{
+ nodes = kmalloc_array(nnodes, sizeof(struct interval_tree_node),
+ GFP_KERNEL);
+ if (!nodes)
+ return -ENOMEM;
+
+ queries = kmalloc_array(nsearches, sizeof(int), GFP_KERNEL);
+ if (!queries) {
+ kfree(nodes);
+ return -ENOMEM;
+ }
+
+ prandom_seed_state(&rnd, seed);
+
+ basic_check();
+ search_check();
+ intersection_range_check();
+ span_iteration_check();
+
kfree(queries);
kfree(nodes);
diff --git a/lib/maple_tree.c b/lib/maple_tree.c
index f7153ade1be5..d0bea23fa4bc 100644
--- a/lib/maple_tree.c
+++ b/lib/maple_tree.c
@@ -584,13 +584,10 @@ static __always_inline bool ma_dead_node(const struct maple_node *node)
*/
static __always_inline bool mte_dead_node(const struct maple_enode *enode)
{
- struct maple_node *parent, *node;
+ struct maple_node *node;
node = mte_to_node(enode);
- /* Do not reorder reads from the node prior to the parent check */
- smp_rmb();
- parent = mte_parent(enode);
- return (parent == node);
+ return ma_dead_node(node);
}
/*
@@ -1245,7 +1242,6 @@ static inline void mas_alloc_nodes(struct ma_state *mas, gfp_t gfp)
if (mas->mas_flags & MA_STATE_PREALLOC) {
if (allocated)
return;
- BUG_ON(!allocated);
WARN_ON(!allocated);
}
@@ -1353,7 +1349,7 @@ static void mas_node_count(struct ma_state *mas, int count)
* mas_start() - Sets up maple state for operations.
* @mas: The maple state.
*
- * If mas->status == mas_start, then set the min, max and depth to
+ * If mas->status == ma_start, then set the min, max and depth to
* defaults.
*
* Return:
diff --git a/lib/min_heap.c b/lib/min_heap.c
index 4485372ff3b1..96f01a4c5fb6 100644
--- a/lib/min_heap.c
+++ b/lib/min_heap.c
@@ -2,7 +2,7 @@
#include <linux/export.h>
#include <linux/min_heap.h>
-void __min_heap_init(min_heap_char *heap, void *data, int size)
+void __min_heap_init(min_heap_char *heap, void *data, size_t size)
{
__min_heap_init_inline(heap, data, size);
}
@@ -20,7 +20,7 @@ bool __min_heap_full(min_heap_char *heap)
}
EXPORT_SYMBOL(__min_heap_full);
-void __min_heap_sift_down(min_heap_char *heap, int pos, size_t elem_size,
+void __min_heap_sift_down(min_heap_char *heap, size_t pos, size_t elem_size,
const struct min_heap_callbacks *func, void *args)
{
__min_heap_sift_down_inline(heap, pos, elem_size, func, args);
diff --git a/lib/plist.c b/lib/plist.c
index c6bce1226874..330febb4bd7d 100644
--- a/lib/plist.c
+++ b/lib/plist.c
@@ -171,12 +171,24 @@ void plist_requeue(struct plist_node *node, struct plist_head *head)
plist_del(node, head);
+ /*
+ * After plist_del(), iter is the replacement of the node. If the node
+ * was on prio_list, take shortcut to find node_next instead of looping.
+ */
+ if (!list_empty(&iter->prio_list)) {
+ iter = list_entry(iter->prio_list.next, struct plist_node,
+ prio_list);
+ node_next = &iter->node_list;
+ goto queue;
+ }
+
plist_for_each_continue(iter, head) {
if (node->prio != iter->prio) {
node_next = &iter->node_list;
break;
}
}
+queue:
list_add_tail(&node->node_list, node_next);
plist_check_head(head);
diff --git a/lib/rbtree_test.c b/lib/rbtree_test.c
index 8655a76d29a1..690cede46ac2 100644
--- a/lib/rbtree_test.c
+++ b/lib/rbtree_test.c
@@ -14,6 +14,7 @@
__param(int, nnodes, 100, "Number of nodes in the rb-tree");
__param(int, perf_loops, 1000, "Number of iterations modifying the rb-tree");
__param(int, check_loops, 100, "Number of iterations modifying and verifying the rb-tree");
+__param(ullong, seed, 3141592653589793238ULL, "Random seed");
struct test_node {
u32 key;
@@ -239,19 +240,14 @@ static void check_augmented(int nr_nodes)
}
}
-static int __init rbtree_test_init(void)
+static int basic_check(void)
{
int i, j;
cycles_t time1, time2, time;
struct rb_node *node;
- nodes = kmalloc_array(nnodes, sizeof(*nodes), GFP_KERNEL);
- if (!nodes)
- return -ENOMEM;
-
printk(KERN_ALERT "rbtree testing");
- prandom_seed_state(&rnd, 3141592653589793238ULL);
init();
time1 = get_cycles();
@@ -343,6 +339,14 @@ static int __init rbtree_test_init(void)
check(0);
}
+ return 0;
+}
+
+static int augmented_check(void)
+{
+ int i, j;
+ cycles_t time1, time2, time;
+
printk(KERN_ALERT "augmented rbtree testing");
init();
@@ -390,6 +394,20 @@ static int __init rbtree_test_init(void)
check_augmented(0);
}
+ return 0;
+}
+
+static int __init rbtree_test_init(void)
+{
+ nodes = kmalloc_array(nnodes, sizeof(*nodes), GFP_KERNEL);
+ if (!nodes)
+ return -ENOMEM;
+
+ prandom_seed_state(&rnd, seed);
+
+ basic_check();
+ augmented_check();
+
kfree(nodes);
return -EAGAIN; /* Fail will directly unload the module */
diff --git a/lib/sg_split.c b/lib/sg_split.c
index 60a0babebf2e..0f89aab5c671 100644
--- a/lib/sg_split.c
+++ b/lib/sg_split.c
@@ -88,8 +88,6 @@ static void sg_split_phys(struct sg_splitter *splitters, const int nb_splits)
if (!j) {
out_sg->offset += split->skip_sg0;
out_sg->length -= split->skip_sg0;
- } else {
- out_sg->offset = 0;
}
sg_dma_address(out_sg) = 0;
sg_dma_len(out_sg) = 0;
diff --git a/lib/sort.c b/lib/sort.c
index 8e73dc55476b..52363995ccc5 100644
--- a/lib/sort.c
+++ b/lib/sort.c
@@ -186,36 +186,13 @@ static size_t parent(size_t i, unsigned int lsbit, size_t size)
return i / 2;
}
-/**
- * sort_r - sort an array of elements
- * @base: pointer to data to sort
- * @num: number of elements
- * @size: size of each element
- * @cmp_func: pointer to comparison function
- * @swap_func: pointer to swap function or NULL
- * @priv: third argument passed to comparison function
- *
- * This function does a heapsort on the given array. You may provide
- * a swap_func function if you need to do something more than a memory
- * copy (e.g. fix up pointers or auxiliary data), but the built-in swap
- * avoids a slow retpoline and so is significantly faster.
- *
- * The comparison function must adhere to specific mathematical
- * properties to ensure correct and stable sorting:
- * - Antisymmetry: cmp_func(a, b) must return the opposite sign of
- * cmp_func(b, a).
- * - Transitivity: if cmp_func(a, b) <= 0 and cmp_func(b, c) <= 0, then
- * cmp_func(a, c) <= 0.
- *
- * Sorting time is O(n log n) both on average and worst-case. While
- * quicksort is slightly faster on average, it suffers from exploitable
- * O(n*n) worst-case behavior and extra memory requirements that make
- * it less suitable for kernel use.
- */
-void sort_r(void *base, size_t num, size_t size,
- cmp_r_func_t cmp_func,
- swap_r_func_t swap_func,
- const void *priv)
+#include <linux/sched.h>
+
+static void __sort_r(void *base, size_t num, size_t size,
+ cmp_r_func_t cmp_func,
+ swap_r_func_t swap_func,
+ const void *priv,
+ bool may_schedule)
{
/* pre-scale counters for performance */
size_t n = num * size, a = (num/2) * size;
@@ -286,6 +263,9 @@ void sort_r(void *base, size_t num, size_t size,
b = parent(b, lsbit, size);
do_swap(base + b, base + c, size, swap_func, priv);
}
+
+ if (may_schedule)
+ cond_resched();
}
n -= size;
@@ -293,8 +273,63 @@ void sort_r(void *base, size_t num, size_t size,
if (n == size * 2 && do_cmp(base, base + size, cmp_func, priv) > 0)
do_swap(base, base + size, size, swap_func, priv);
}
+
+/**
+ * sort_r - sort an array of elements
+ * @base: pointer to data to sort
+ * @num: number of elements
+ * @size: size of each element
+ * @cmp_func: pointer to comparison function
+ * @swap_func: pointer to swap function or NULL
+ * @priv: third argument passed to comparison function
+ *
+ * This function does a heapsort on the given array. You may provide
+ * a swap_func function if you need to do something more than a memory
+ * copy (e.g. fix up pointers or auxiliary data), but the built-in swap
+ * avoids a slow retpoline and so is significantly faster.
+ *
+ * The comparison function must adhere to specific mathematical
+ * properties to ensure correct and stable sorting:
+ * - Antisymmetry: cmp_func(a, b) must return the opposite sign of
+ * cmp_func(b, a).
+ * - Transitivity: if cmp_func(a, b) <= 0 and cmp_func(b, c) <= 0, then
+ * cmp_func(a, c) <= 0.
+ *
+ * Sorting time is O(n log n) both on average and worst-case. While
+ * quicksort is slightly faster on average, it suffers from exploitable
+ * O(n*n) worst-case behavior and extra memory requirements that make
+ * it less suitable for kernel use.
+ */
+void sort_r(void *base, size_t num, size_t size,
+ cmp_r_func_t cmp_func,
+ swap_r_func_t swap_func,
+ const void *priv)
+{
+ __sort_r(base, num, size, cmp_func, swap_func, priv, false);
+}
EXPORT_SYMBOL(sort_r);
+/**
+ * sort_r_nonatomic - sort an array of elements, with cond_resched
+ * @base: pointer to data to sort
+ * @num: number of elements
+ * @size: size of each element
+ * @cmp_func: pointer to comparison function
+ * @swap_func: pointer to swap function or NULL
+ * @priv: third argument passed to comparison function
+ *
+ * Same as sort_r, but preferred for larger arrays as it does a periodic
+ * cond_resched().
+ */
+void sort_r_nonatomic(void *base, size_t num, size_t size,
+ cmp_r_func_t cmp_func,
+ swap_r_func_t swap_func,
+ const void *priv)
+{
+ __sort_r(base, num, size, cmp_func, swap_func, priv, true);
+}
+EXPORT_SYMBOL(sort_r_nonatomic);
+
void sort(void *base, size_t num, size_t size,
cmp_func_t cmp_func,
swap_func_t swap_func)
@@ -304,6 +339,19 @@ void sort(void *base, size_t num, size_t size,
.swap = swap_func,
};
- return sort_r(base, num, size, _CMP_WRAPPER, SWAP_WRAPPER, &w);
+ return __sort_r(base, num, size, _CMP_WRAPPER, SWAP_WRAPPER, &w, false);
}
EXPORT_SYMBOL(sort);
+
+void sort_nonatomic(void *base, size_t num, size_t size,
+ cmp_func_t cmp_func,
+ swap_func_t swap_func)
+{
+ struct wrapper w = {
+ .cmp = cmp_func,
+ .swap = swap_func,
+ };
+
+ return __sort_r(base, num, size, _CMP_WRAPPER, SWAP_WRAPPER, &w, true);
+}
+EXPORT_SYMBOL(sort_nonatomic);
diff --git a/lib/test_hmm.c b/lib/test_hmm.c
index 056f2e411d7b..5b144bc5c4ec 100644
--- a/lib/test_hmm.c
+++ b/lib/test_hmm.c
@@ -195,7 +195,8 @@ static int dmirror_fops_release(struct inode *inode, struct file *filp)
static struct dmirror_chunk *dmirror_page_to_chunk(struct page *page)
{
- return container_of(page->pgmap, struct dmirror_chunk, pagemap);
+ return container_of(page_pgmap(page), struct dmirror_chunk,
+ pagemap);
}
static struct dmirror_device *dmirror_page_to_device(struct page *page)
@@ -706,34 +707,23 @@ static int dmirror_check_atomic(struct dmirror *dmirror, unsigned long start,
return 0;
}
-static int dmirror_atomic_map(unsigned long start, unsigned long end,
- struct page **pages, struct dmirror *dmirror)
+static int dmirror_atomic_map(unsigned long addr, struct page *page,
+ struct dmirror *dmirror)
{
- unsigned long pfn, mapped = 0;
- int i;
+ void *entry;
/* Map the migrated pages into the device's page tables. */
mutex_lock(&dmirror->mutex);
- for (i = 0, pfn = start >> PAGE_SHIFT; pfn < (end >> PAGE_SHIFT); pfn++, i++) {
- void *entry;
-
- if (!pages[i])
- continue;
-
- entry = pages[i];
- entry = xa_tag_pointer(entry, DPT_XA_TAG_ATOMIC);
- entry = xa_store(&dmirror->pt, pfn, entry, GFP_ATOMIC);
- if (xa_is_err(entry)) {
- mutex_unlock(&dmirror->mutex);
- return xa_err(entry);
- }
-
- mapped++;
+ entry = xa_tag_pointer(page, DPT_XA_TAG_ATOMIC);
+ entry = xa_store(&dmirror->pt, addr >> PAGE_SHIFT, entry, GFP_ATOMIC);
+ if (xa_is_err(entry)) {
+ mutex_unlock(&dmirror->mutex);
+ return xa_err(entry);
}
mutex_unlock(&dmirror->mutex);
- return mapped;
+ return 0;
}
static int dmirror_migrate_finalize_and_map(struct migrate_vma *args,
@@ -780,10 +770,8 @@ static int dmirror_exclusive(struct dmirror *dmirror,
unsigned long start, end, addr;
unsigned long size = cmd->npages << PAGE_SHIFT;
struct mm_struct *mm = dmirror->notifier.mm;
- struct page *pages[64];
struct dmirror_bounce bounce;
- unsigned long next;
- int ret;
+ int ret = 0;
start = cmd->addr;
end = start + size;
@@ -795,36 +783,26 @@ static int dmirror_exclusive(struct dmirror *dmirror,
return -EINVAL;
mmap_read_lock(mm);
- for (addr = start; addr < end; addr = next) {
- unsigned long mapped = 0;
- int i;
-
- next = min(end, addr + (ARRAY_SIZE(pages) << PAGE_SHIFT));
+ for (addr = start; !ret && addr < end; addr += PAGE_SIZE) {
+ struct folio *folio;
+ struct page *page;
- ret = make_device_exclusive_range(mm, addr, next, pages, NULL);
- /*
- * Do dmirror_atomic_map() iff all pages are marked for
- * exclusive access to avoid accessing uninitialized
- * fields of pages.
- */
- if (ret == (next - addr) >> PAGE_SHIFT)
- mapped = dmirror_atomic_map(addr, next, pages, dmirror);
- for (i = 0; i < ret; i++) {
- if (pages[i]) {
- unlock_page(pages[i]);
- put_page(pages[i]);
- }
+ page = make_device_exclusive(mm, addr, NULL, &folio);
+ if (IS_ERR(page)) {
+ ret = PTR_ERR(page);
+ break;
}
- if (addr + (mapped << PAGE_SHIFT) < next) {
- mmap_read_unlock(mm);
- mmput(mm);
- return -EBUSY;
- }
+ ret = dmirror_atomic_map(addr, page, dmirror);
+ folio_unlock(folio);
+ folio_put(folio);
}
mmap_read_unlock(mm);
mmput(mm);
+ if (ret)
+ return ret;
+
/* Return the migrated data for verification. */
ret = dmirror_bounce_init(&bounce, start, size);
if (ret)
diff --git a/lib/test_ida.c b/lib/test_ida.c
index c80155a1956d..63078f8dc13f 100644
--- a/lib/test_ida.c
+++ b/lib/test_ida.c
@@ -189,6 +189,75 @@ static void ida_check_bad_free(struct ida *ida)
IDA_BUG_ON(ida, !ida_is_empty(ida));
}
+/*
+ * Check ida_find_first_range() and varriants.
+ */
+static void ida_check_find_first(struct ida *ida)
+{
+ /* IDA is empty; all of the below should be not exist */
+ IDA_BUG_ON(ida, ida_exists(ida, 0));
+ IDA_BUG_ON(ida, ida_exists(ida, 3));
+ IDA_BUG_ON(ida, ida_exists(ida, 63));
+ IDA_BUG_ON(ida, ida_exists(ida, 1023));
+ IDA_BUG_ON(ida, ida_exists(ida, (1 << 20) - 1));
+
+ /* IDA contains a single value entry */
+ IDA_BUG_ON(ida, ida_alloc_min(ida, 3, GFP_KERNEL) != 3);
+ IDA_BUG_ON(ida, ida_exists(ida, 0));
+ IDA_BUG_ON(ida, !ida_exists(ida, 3));
+ IDA_BUG_ON(ida, ida_exists(ida, 63));
+ IDA_BUG_ON(ida, ida_exists(ida, 1023));
+ IDA_BUG_ON(ida, ida_exists(ida, (1 << 20) - 1));
+
+ IDA_BUG_ON(ida, ida_alloc_min(ida, 63, GFP_KERNEL) != 63);
+ IDA_BUG_ON(ida, ida_exists(ida, 0));
+ IDA_BUG_ON(ida, !ida_exists(ida, 3));
+ IDA_BUG_ON(ida, !ida_exists(ida, 63));
+ IDA_BUG_ON(ida, ida_exists(ida, 1023));
+ IDA_BUG_ON(ida, ida_exists(ida, (1 << 20) - 1));
+
+ /* IDA contains a single bitmap */
+ IDA_BUG_ON(ida, ida_alloc_min(ida, 1023, GFP_KERNEL) != 1023);
+ IDA_BUG_ON(ida, ida_exists(ida, 0));
+ IDA_BUG_ON(ida, !ida_exists(ida, 3));
+ IDA_BUG_ON(ida, !ida_exists(ida, 63));
+ IDA_BUG_ON(ida, !ida_exists(ida, 1023));
+ IDA_BUG_ON(ida, ida_exists(ida, (1 << 20) - 1));
+
+ /* IDA contains a tree */
+ IDA_BUG_ON(ida, ida_alloc_min(ida, (1 << 20) - 1, GFP_KERNEL) != (1 << 20) - 1);
+ IDA_BUG_ON(ida, ida_exists(ida, 0));
+ IDA_BUG_ON(ida, !ida_exists(ida, 3));
+ IDA_BUG_ON(ida, !ida_exists(ida, 63));
+ IDA_BUG_ON(ida, !ida_exists(ida, 1023));
+ IDA_BUG_ON(ida, !ida_exists(ida, (1 << 20) - 1));
+
+ /* Now try to find first */
+ IDA_BUG_ON(ida, ida_find_first(ida) != 3);
+ IDA_BUG_ON(ida, ida_find_first_range(ida, -1, 2) != -EINVAL);
+ IDA_BUG_ON(ida, ida_find_first_range(ida, 0, 2) != -ENOENT); // no used ID
+ IDA_BUG_ON(ida, ida_find_first_range(ida, 0, 3) != 3);
+ IDA_BUG_ON(ida, ida_find_first_range(ida, 1, 3) != 3);
+ IDA_BUG_ON(ida, ida_find_first_range(ida, 3, 3) != 3);
+ IDA_BUG_ON(ida, ida_find_first_range(ida, 2, 4) != 3);
+ IDA_BUG_ON(ida, ida_find_first_range(ida, 4, 3) != -ENOENT); // min > max, fail
+ IDA_BUG_ON(ida, ida_find_first_range(ida, 4, 60) != -ENOENT); // no used ID
+ IDA_BUG_ON(ida, ida_find_first_range(ida, 4, 64) != 63);
+ IDA_BUG_ON(ida, ida_find_first_range(ida, 63, 63) != 63);
+ IDA_BUG_ON(ida, ida_find_first_range(ida, 64, 1026) != 1023);
+ IDA_BUG_ON(ida, ida_find_first_range(ida, 1023, 1023) != 1023);
+ IDA_BUG_ON(ida, ida_find_first_range(ida, 1023, (1 << 20) - 1) != 1023);
+ IDA_BUG_ON(ida, ida_find_first_range(ida, 1024, (1 << 20) - 1) != (1 << 20) - 1);
+ IDA_BUG_ON(ida, ida_find_first_range(ida, (1 << 20), INT_MAX) != -ENOENT);
+
+ ida_free(ida, 3);
+ ida_free(ida, 63);
+ ida_free(ida, 1023);
+ ida_free(ida, (1 << 20) - 1);
+
+ IDA_BUG_ON(ida, !ida_is_empty(ida));
+}
+
static DEFINE_IDA(ida);
static int ida_checks(void)
@@ -202,6 +271,7 @@ static int ida_checks(void)
ida_check_max(&ida);
ida_check_conv(&ida);
ida_check_bad_free(&ida);
+ ida_check_find_first(&ida);
printk("IDA: %u of %u tests passed\n", tests_passed, tests_run);
return (tests_run != tests_passed) ? 0 : -EINVAL;
diff --git a/lib/test_xarray.c b/lib/test_xarray.c
index 0e865bab4a10..080a39d22e73 100644
--- a/lib/test_xarray.c
+++ b/lib/test_xarray.c
@@ -1858,6 +1858,54 @@ static void check_split_1(struct xarray *xa, unsigned long index,
xa_destroy(xa);
}
+static void check_split_2(struct xarray *xa, unsigned long index,
+ unsigned int order, unsigned int new_order)
+{
+ XA_STATE_ORDER(xas, xa, index, new_order);
+ unsigned int i, found;
+ void *entry;
+
+ xa_store_order(xa, index, order, xa, GFP_KERNEL);
+ xa_set_mark(xa, index, XA_MARK_1);
+
+ /* allocate a node for xas_try_split() */
+ xas_set_err(&xas, -ENOMEM);
+ XA_BUG_ON(xa, !xas_nomem(&xas, GFP_KERNEL));
+
+ xas_lock(&xas);
+ xas_try_split(&xas, xa, order);
+ if (((new_order / XA_CHUNK_SHIFT) < (order / XA_CHUNK_SHIFT)) &&
+ new_order < order - 1) {
+ XA_BUG_ON(xa, !xas_error(&xas) || xas_error(&xas) != -EINVAL);
+ xas_unlock(&xas);
+ goto out;
+ }
+ for (i = 0; i < (1 << order); i += (1 << new_order))
+ __xa_store(xa, index + i, xa_mk_index(index + i), 0);
+ xas_unlock(&xas);
+
+ for (i = 0; i < (1 << order); i++) {
+ unsigned int val = index + (i & ~((1 << new_order) - 1));
+ XA_BUG_ON(xa, xa_load(xa, index + i) != xa_mk_index(val));
+ }
+
+ xa_set_mark(xa, index, XA_MARK_0);
+ XA_BUG_ON(xa, !xa_get_mark(xa, index, XA_MARK_0));
+
+ xas_set_order(&xas, index, 0);
+ found = 0;
+ rcu_read_lock();
+ xas_for_each_marked(&xas, entry, ULONG_MAX, XA_MARK_1) {
+ found++;
+ XA_BUG_ON(xa, xa_is_internal(entry));
+ }
+ rcu_read_unlock();
+ XA_BUG_ON(xa, found != 1 << (order - new_order));
+out:
+ xas_destroy(&xas);
+ xa_destroy(xa);
+}
+
static noinline void check_split(struct xarray *xa)
{
unsigned int order, new_order;
@@ -1869,6 +1917,10 @@ static noinline void check_split(struct xarray *xa)
check_split_1(xa, 0, order, new_order);
check_split_1(xa, 1UL << order, order, new_order);
check_split_1(xa, 3UL << order, order, new_order);
+
+ check_split_2(xa, 0, order, new_order);
+ check_split_2(xa, 1UL << order, order, new_order);
+ check_split_2(xa, 3UL << order, order, new_order);
}
}
}
diff --git a/lib/vdso/datastore.c b/lib/vdso/datastore.c
index c715e217ec65..3693c6caf2c4 100644
--- a/lib/vdso/datastore.c
+++ b/lib/vdso/datastore.c
@@ -99,7 +99,8 @@ const struct vm_special_mapping vdso_vvar_mapping = {
struct vm_area_struct *vdso_install_vvar_mapping(struct mm_struct *mm, unsigned long addr)
{
return _install_special_mapping(mm, addr, VDSO_NR_PAGES * PAGE_SIZE,
- VM_READ | VM_MAYREAD | VM_IO | VM_DONTDUMP | VM_PFNMAP,
+ VM_READ | VM_MAYREAD | VM_IO | VM_DONTDUMP |
+ VM_PFNMAP | VM_SEALED_SYSMAP,
&vdso_vvar_mapping);
}
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index abf40eb36c49..01699852f30c 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -1699,8 +1699,12 @@ char *escaped_string(char *buf, char *end, u8 *addr, struct printf_spec spec,
return buf;
}
+#pragma GCC diagnostic push
+#ifndef __clang__
+#pragma GCC diagnostic ignored "-Wsuggest-attribute=format"
+#endif
static char *va_format(char *buf, char *end, struct va_format *va_fmt,
- struct printf_spec spec, const char *fmt)
+ struct printf_spec spec)
{
va_list va;
@@ -1713,6 +1717,7 @@ static char *va_format(char *buf, char *end, struct va_format *va_fmt,
return buf;
}
+#pragma GCC diagnostic pop
static noinline_for_stack
char *uuid_string(char *buf, char *end, const u8 *addr,
@@ -2466,7 +2471,7 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
case 'U':
return uuid_string(buf, end, ptr, spec, fmt);
case 'V':
- return va_format(buf, end, ptr, spec, fmt);
+ return va_format(buf, end, ptr, spec);
case 'K':
return restricted_pointer(buf, end, ptr, spec);
case 'N':
diff --git a/lib/xarray.c b/lib/xarray.c
index 116e9286c64e..9644b18af18d 100644
--- a/lib/xarray.c
+++ b/lib/xarray.c
@@ -278,6 +278,7 @@ void xas_destroy(struct xa_state *xas)
xas->xa_alloc = node = next;
}
}
+EXPORT_SYMBOL_GPL(xas_destroy);
/**
* xas_nomem() - Allocate memory if needed.
@@ -1007,6 +1008,26 @@ static void node_set_marks(struct xa_node *node, unsigned int offset,
}
}
+static void __xas_init_node_for_split(struct xa_state *xas,
+ struct xa_node *node, void *entry)
+{
+ unsigned int i;
+ void *sibling = NULL;
+ unsigned int mask = xas->xa_sibs;
+
+ if (!node)
+ return;
+ node->array = xas->xa;
+ for (i = 0; i < XA_CHUNK_SIZE; i++) {
+ if ((i & mask) == 0) {
+ RCU_INIT_POINTER(node->slots[i], entry);
+ sibling = xa_mk_sibling(i);
+ } else {
+ RCU_INIT_POINTER(node->slots[i], sibling);
+ }
+ }
+}
+
/**
* xas_split_alloc() - Allocate memory for splitting an entry.
* @xas: XArray operation state.
@@ -1025,7 +1046,6 @@ void xas_split_alloc(struct xa_state *xas, void *entry, unsigned int order,
gfp_t gfp)
{
unsigned int sibs = (1 << (order % XA_CHUNK_SHIFT)) - 1;
- unsigned int mask = xas->xa_sibs;
/* XXX: no support for splitting really large entries yet */
if (WARN_ON(xas->xa_shift + 2 * XA_CHUNK_SHIFT <= order))
@@ -1034,22 +1054,13 @@ void xas_split_alloc(struct xa_state *xas, void *entry, unsigned int order,
return;
do {
- unsigned int i;
- void *sibling = NULL;
struct xa_node *node;
node = kmem_cache_alloc_lru(radix_tree_node_cachep, xas->xa_lru, gfp);
if (!node)
goto nomem;
- node->array = xas->xa;
- for (i = 0; i < XA_CHUNK_SIZE; i++) {
- if ((i & mask) == 0) {
- RCU_INIT_POINTER(node->slots[i], entry);
- sibling = xa_mk_sibling(i);
- } else {
- RCU_INIT_POINTER(node->slots[i], sibling);
- }
- }
+
+ __xas_init_node_for_split(xas, node, entry);
RCU_INIT_POINTER(node->parent, xas->xa_alloc);
xas->xa_alloc = node;
} while (sibs-- > 0);
@@ -1122,6 +1133,128 @@ void xas_split(struct xa_state *xas, void *entry, unsigned int order)
xas_update(xas, node);
}
EXPORT_SYMBOL_GPL(xas_split);
+
+/**
+ * xas_try_split_min_order() - Minimal split order xas_try_split() can accept
+ * @order: Current entry order.
+ *
+ * xas_try_split() can split a multi-index entry to smaller than @order - 1 if
+ * no new xa_node is needed. This function provides the minimal order
+ * xas_try_split() supports.
+ *
+ * Return: the minimal order xas_try_split() supports
+ *
+ * Context: Any context.
+ *
+ */
+unsigned int xas_try_split_min_order(unsigned int order)
+{
+ if (order % XA_CHUNK_SHIFT == 0)
+ return order == 0 ? 0 : order - 1;
+
+ return order - (order % XA_CHUNK_SHIFT);
+}
+EXPORT_SYMBOL_GPL(xas_try_split_min_order);
+
+/**
+ * xas_try_split() - Try to split a multi-index entry.
+ * @xas: XArray operation state.
+ * @entry: New entry to store in the array.
+ * @order: Current entry order.
+ *
+ * The size of the new entries is set in @xas. The value in @entry is
+ * copied to all the replacement entries. If and only if one new xa_node is
+ * needed, the function will use GFP_NOWAIT to get one if xas->xa_alloc is
+ * NULL. If more new xa_node are needed, the function gives EINVAL error.
+ *
+ * NOTE: use xas_try_split_min_order() to get next split order instead of
+ * @order - 1 if you want to minmize xas_try_split() calls.
+ *
+ * Context: Any context. The caller should hold the xa_lock.
+ */
+void xas_try_split(struct xa_state *xas, void *entry, unsigned int order)
+{
+ unsigned int sibs = (1 << (order % XA_CHUNK_SHIFT)) - 1;
+ unsigned int offset, marks;
+ struct xa_node *node;
+ void *curr = xas_load(xas);
+ int values = 0;
+ gfp_t gfp = GFP_NOWAIT;
+
+ node = xas->xa_node;
+ if (xas_top(node))
+ return;
+
+ if (xas->xa->xa_flags & XA_FLAGS_ACCOUNT)
+ gfp |= __GFP_ACCOUNT;
+
+ marks = node_get_marks(node, xas->xa_offset);
+
+ offset = xas->xa_offset + sibs;
+
+ if (xas->xa_shift < node->shift) {
+ struct xa_node *child = xas->xa_alloc;
+ unsigned int expected_sibs =
+ (1 << ((order - 1) % XA_CHUNK_SHIFT)) - 1;
+
+ /*
+ * No support for splitting sibling entries
+ * (horizontally) or cascade split (vertically), which
+ * requires two or more new xa_nodes.
+ * Since if one xa_node allocation fails,
+ * it is hard to free the prior allocations.
+ */
+ if (sibs || xas->xa_sibs != expected_sibs) {
+ xas_destroy(xas);
+ xas_set_err(xas, -EINVAL);
+ return;
+ }
+
+ if (!child) {
+ child = kmem_cache_alloc_lru(radix_tree_node_cachep,
+ xas->xa_lru, gfp);
+ if (!child) {
+ xas_destroy(xas);
+ xas_set_err(xas, -ENOMEM);
+ return;
+ }
+ RCU_INIT_POINTER(child->parent, xas->xa_alloc);
+ }
+ __xas_init_node_for_split(xas, child, entry);
+
+ xas->xa_alloc = rcu_dereference_raw(child->parent);
+ child->shift = node->shift - XA_CHUNK_SHIFT;
+ child->offset = offset;
+ child->count = XA_CHUNK_SIZE;
+ child->nr_values = xa_is_value(entry) ?
+ XA_CHUNK_SIZE : 0;
+ RCU_INIT_POINTER(child->parent, node);
+ node_set_marks(node, offset, child, xas->xa_sibs,
+ marks);
+ rcu_assign_pointer(node->slots[offset],
+ xa_mk_node(child));
+ if (xa_is_value(curr))
+ values--;
+ xas_update(xas, child);
+
+ } else {
+ do {
+ unsigned int canon = offset - xas->xa_sibs;
+
+ node_set_marks(node, canon, NULL, 0, marks);
+ rcu_assign_pointer(node->slots[canon], entry);
+ while (offset > canon)
+ rcu_assign_pointer(node->slots[offset--],
+ xa_mk_sibling(canon));
+ values += (xa_is_value(entry) - xa_is_value(curr)) *
+ (xas->xa_sibs + 1);
+ } while (offset-- > xas->xa_offset);
+ }
+
+ node->nr_values += values;
+ xas_update(xas, node);
+}
+EXPORT_SYMBOL_GPL(xas_try_split);
#endif
/**
diff --git a/lib/zlib_deflate/deflate.c b/lib/zlib_deflate/deflate.c
index 3a1d8d34182e..8fb2a3e17c0e 100644
--- a/lib/zlib_deflate/deflate.c
+++ b/lib/zlib_deflate/deflate.c
@@ -151,9 +151,6 @@ static const config configuration_table[10] = {
* meaning.
*/
-#define EQUAL 0
-/* result of memcmp for equal strings */
-
/* ===========================================================================
* Update a hash value with the given input byte
* IN assertion: all calls to UPDATE_HASH are made with consecutive
@@ -713,8 +710,7 @@ static void check_match(
)
{
/* check that the match is indeed a match */
- if (memcmp((char *)s->window + match,
- (char *)s->window + start, length) != EQUAL) {
+ if (memcmp((char *)s->window + match, (char *)s->window + start, length)) {
fprintf(stderr, " start %u, match %u, length %d\n",
start, match, length);
do {
diff --git a/mm/Kconfig b/mm/Kconfig
index 0b7f4bb5cb80..d3fb3762887b 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -129,7 +129,6 @@ choice
prompt "Default allocator"
depends on ZSWAP
default ZSWAP_ZPOOL_DEFAULT_ZSMALLOC if MMU
- default ZSWAP_ZPOOL_DEFAULT_ZBUD
help
Selects the default allocator for the compressed cache for
swap pages.
@@ -140,21 +139,6 @@ choice
The selection made here can be overridden by using the kernel
command line 'zswap.zpool=' option.
-config ZSWAP_ZPOOL_DEFAULT_ZBUD
- bool "zbud"
- select ZBUD
- help
- Use the zbud allocator as the default allocator.
-
-config ZSWAP_ZPOOL_DEFAULT_Z3FOLD_DEPRECATED
- bool "z3foldi (DEPRECATED)"
- select Z3FOLD_DEPRECATED
- help
- Use the z3fold allocator as the default allocator.
-
- Deprecated and scheduled for removal in a few cycles,
- see CONFIG_Z3FOLD_DEPRECATED.
-
config ZSWAP_ZPOOL_DEFAULT_ZSMALLOC
bool "zsmalloc"
select ZSMALLOC
@@ -165,40 +149,9 @@ endchoice
config ZSWAP_ZPOOL_DEFAULT
string
depends on ZSWAP
- default "zbud" if ZSWAP_ZPOOL_DEFAULT_ZBUD
- default "z3fold" if ZSWAP_ZPOOL_DEFAULT_Z3FOLD_DEPRECATED
default "zsmalloc" if ZSWAP_ZPOOL_DEFAULT_ZSMALLOC
default ""
-config ZBUD
- tristate "2:1 compression allocator (zbud)"
- depends on ZSWAP
- help
- A special purpose allocator for storing compressed pages.
- It is designed to store up to two compressed pages per physical
- page. While this design limits storage density, it has simple and
- deterministic reclaim properties that make it preferable to a higher
- density approach when reclaim will be used.
-
-config Z3FOLD_DEPRECATED
- tristate "3:1 compression allocator (z3fold) (DEPRECATED)"
- depends on ZSWAP
- help
- Deprecated and scheduled for removal in a few cycles. If you have
- a good reason for using Z3FOLD over ZSMALLOC, please contact
- linux-mm@kvack.org and the zswap maintainers.
-
- A special purpose allocator for storing compressed pages.
- It is designed to store up to three compressed pages per physical
- page. It is a ZBUD derivative so the simplicity and determinism are
- still there.
-
-config Z3FOLD
- tristate
- default y if Z3FOLD_DEPRECATED=y
- default m if Z3FOLD_DEPRECATED=m
- depends on Z3FOLD_DEPRECATED
-
config ZSMALLOC
tristate
prompt "N:1 compression allocator (zsmalloc)" if (ZSWAP || ZRAM)
@@ -493,6 +446,9 @@ config SPARSEMEM_VMEMMAP
SPARSEMEM_VMEMMAP uses a virtually mapped memmap to optimise
pfn_to_page and page_to_pfn operations. This is the most
efficient option when sufficient kernel resources are available.
+
+config SPARSEMEM_VMEMMAP_PREINIT
+ bool
#
# Select this config option from the architecture Kconfig, if it is preferred
# to enable the feature of HugeTLB/dev_dax vmemmap optimization.
@@ -503,6 +459,9 @@ config ARCH_WANT_OPTIMIZE_DAX_VMEMMAP
config ARCH_WANT_OPTIMIZE_HUGETLB_VMEMMAP
bool
+config ARCH_WANT_HUGETLB_VMEMMAP_PREINIT
+ bool
+
config HAVE_MEMBLOCK_PHYS_MAP
bool
@@ -860,11 +819,15 @@ config ARCH_WANT_GENERAL_HUGETLB
config ARCH_WANTS_THP_SWAP
def_bool n
+config MM_ID
+ def_bool n
+
menuconfig TRANSPARENT_HUGEPAGE
bool "Transparent Hugepage Support"
depends on HAVE_ARCH_TRANSPARENT_HUGEPAGE && !PREEMPT_RT
select COMPACTION
select XARRAY_MULTI
+ select MM_ID
help
Transparent Hugepages allows the kernel to use huge pages and
huge tlb transparently to the applications whenever possible.
@@ -928,8 +891,25 @@ config READ_ONLY_THP_FOR_FS
support of file THPs will be developed in the next few release
cycles.
+config NO_PAGE_MAPCOUNT
+ bool "No per-page mapcount (EXPERIMENTAL)"
+ help
+ Do not maintain per-page mapcounts for pages part of larger
+ allocations, such as transparent huge pages.
+
+ When this config option is enabled, some interfaces that relied on
+ this information will rely on less-precise per-allocation information
+ instead: for example, using the average per-page mapcount in such
+ a large allocation instead of the per-page mapcount.
+
+ EXPERIMENTAL because the impact of some changes is still unclear.
+
endif # TRANSPARENT_HUGEPAGE
+# simple helper to make the code a bit easier to read
+config PAGE_MAPCOUNT
+ def_bool !NO_PAGE_MAPCOUNT
+
#
# The architecture supports pgtable leaves that is larger than PAGE_SIZE
#
diff --git a/mm/Kconfig.debug b/mm/Kconfig.debug
index 41a58536531d..32b65073d0cc 100644
--- a/mm/Kconfig.debug
+++ b/mm/Kconfig.debug
@@ -186,8 +186,9 @@ config ARCH_HAS_DEBUG_WX
config DEBUG_WX
bool "Warn on W+X mappings at boot"
depends on ARCH_HAS_DEBUG_WX
+ depends on ARCH_HAS_PTDUMP
depends on MMU
- select PTDUMP_CORE
+ select PTDUMP
help
Generate a warning if any W+X mappings are found at boot.
@@ -212,18 +213,18 @@ config DEBUG_WX
If in doubt, say "Y".
-config GENERIC_PTDUMP
+config ARCH_HAS_PTDUMP
bool
-config PTDUMP_CORE
+config PTDUMP
bool
config PTDUMP_DEBUGFS
bool "Export kernel pagetable layout to userspace via debugfs"
depends on DEBUG_KERNEL
depends on DEBUG_FS
- depends on GENERIC_PTDUMP
- select PTDUMP_CORE
+ depends on ARCH_HAS_PTDUMP
+ select PTDUMP
help
Say Y here if you want to show the kernel pagetable layout in a
debugfs file. This information is only useful for kernel developers
diff --git a/mm/Makefile b/mm/Makefile
index 850386a67b3e..e7f6bbf8ae5f 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -75,10 +75,13 @@ ifdef CONFIG_MMU
obj-$(CONFIG_ADVISE_SYSCALLS) += madvise.o
endif
-obj-$(CONFIG_SWAP) += page_io.o swap_state.o swapfile.o swap_slots.o
+obj-$(CONFIG_SWAP) += page_io.o swap_state.o swapfile.o
obj-$(CONFIG_ZSWAP) += zswap.o
obj-$(CONFIG_HAS_DMA) += dmapool.o
obj-$(CONFIG_HUGETLBFS) += hugetlb.o
+ifdef CONFIG_CMA
+obj-$(CONFIG_HUGETLBFS) += hugetlb_cma.o
+endif
obj-$(CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP) += hugetlb_vmemmap.o
obj-$(CONFIG_NUMA) += mempolicy.o
obj-$(CONFIG_SPARSEMEM) += sparse.o
@@ -113,9 +116,7 @@ obj-$(CONFIG_DEBUG_VM_PGTABLE) += debug_vm_pgtable.o
obj-$(CONFIG_PAGE_OWNER) += page_owner.o
obj-$(CONFIG_MEMORY_ISOLATION) += page_isolation.o
obj-$(CONFIG_ZPOOL) += zpool.o
-obj-$(CONFIG_ZBUD) += zbud.o
obj-$(CONFIG_ZSMALLOC) += zsmalloc.o
-obj-$(CONFIG_Z3FOLD) += z3fold.o
obj-$(CONFIG_GENERIC_EARLY_IOREMAP) += early_ioremap.o
obj-$(CONFIG_CMA) += cma.o
obj-$(CONFIG_NUMA) += numa.o
@@ -138,7 +139,7 @@ obj-$(CONFIG_ZONE_DEVICE) += memremap.o
obj-$(CONFIG_HMM_MIRROR) += hmm.o
obj-$(CONFIG_MEMFD_CREATE) += memfd.o
obj-$(CONFIG_MAPPING_DIRTY_HELPERS) += mapping_dirty_helpers.o
-obj-$(CONFIG_PTDUMP_CORE) += ptdump.o
+obj-$(CONFIG_PTDUMP) += ptdump.o
obj-$(CONFIG_PAGE_REPORTING) += page_reporting.o
obj-$(CONFIG_IO_MAPPING) += io-mapping.o
obj-$(CONFIG_HAVE_BOOTMEM_INFO_NODE) += bootmem_info.o
diff --git a/mm/balloon_compaction.c b/mm/balloon_compaction.c
index 6597ebea8ae2..d3e00731e262 100644
--- a/mm/balloon_compaction.c
+++ b/mm/balloon_compaction.c
@@ -24,6 +24,7 @@ static void balloon_page_enqueue_one(struct balloon_dev_info *b_dev_info,
balloon_page_insert(b_dev_info, page);
unlock_page(page);
__count_vm_event(BALLOON_INFLATE);
+ inc_node_page_state(page, NR_BALLOON_PAGES);
}
/**
@@ -103,6 +104,7 @@ size_t balloon_page_list_dequeue(struct balloon_dev_info *b_dev_info,
__count_vm_event(BALLOON_DEFLATE);
list_add(&page->lru, pages);
unlock_page(page);
+ dec_node_page_state(page, NR_BALLOON_PAGES);
n_pages++;
}
spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
diff --git a/mm/bootmem_info.c b/mm/bootmem_info.c
index 95f288169a38..b0e2a9fa641f 100644
--- a/mm/bootmem_info.c
+++ b/mm/bootmem_info.c
@@ -88,7 +88,9 @@ static void __init register_page_bootmem_info_section(unsigned long start_pfn)
memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr);
- register_page_bootmem_memmap(section_nr, memmap, PAGES_PER_SECTION);
+ if (!preinited_vmemmap_section(ms))
+ register_page_bootmem_memmap(section_nr, memmap,
+ PAGES_PER_SECTION);
usage = ms->usage;
page = virt_to_page(usage);
diff --git a/mm/cma.c b/mm/cma.c
index de5bc0c81fc2..b06d5fe73399 100644
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -18,6 +18,7 @@
#include <linux/memblock.h>
#include <linux/err.h>
+#include <linux/list.h>
#include <linux/mm.h>
#include <linux/sizes.h>
#include <linux/slab.h>
@@ -33,11 +34,17 @@
struct cma cma_areas[MAX_CMA_AREAS];
unsigned int cma_area_count;
-static DEFINE_MUTEX(cma_mutex);
+
+static int __init __cma_declare_contiguous_nid(phys_addr_t base,
+ phys_addr_t size, phys_addr_t limit,
+ phys_addr_t alignment, unsigned int order_per_bit,
+ bool fixed, const char *name, struct cma **res_cma,
+ int nid);
phys_addr_t cma_get_base(const struct cma *cma)
{
- return PFN_PHYS(cma->base_pfn);
+ WARN_ON_ONCE(cma->nranges != 1);
+ return PFN_PHYS(cma->ranges[0].base_pfn);
}
unsigned long cma_get_size(const struct cma *cma)
@@ -63,9 +70,10 @@ static unsigned long cma_bitmap_aligned_mask(const struct cma *cma,
* The value returned is represented in order_per_bits.
*/
static unsigned long cma_bitmap_aligned_offset(const struct cma *cma,
+ const struct cma_memrange *cmr,
unsigned int align_order)
{
- return (cma->base_pfn & ((1UL << align_order) - 1))
+ return (cmr->base_pfn & ((1UL << align_order) - 1))
>> cma->order_per_bit;
}
@@ -75,65 +83,122 @@ static unsigned long cma_bitmap_pages_to_bits(const struct cma *cma,
return ALIGN(pages, 1UL << cma->order_per_bit) >> cma->order_per_bit;
}
-static void cma_clear_bitmap(struct cma *cma, unsigned long pfn,
- unsigned long count)
+static void cma_clear_bitmap(struct cma *cma, const struct cma_memrange *cmr,
+ unsigned long pfn, unsigned long count)
{
unsigned long bitmap_no, bitmap_count;
unsigned long flags;
- bitmap_no = (pfn - cma->base_pfn) >> cma->order_per_bit;
+ bitmap_no = (pfn - cmr->base_pfn) >> cma->order_per_bit;
bitmap_count = cma_bitmap_pages_to_bits(cma, count);
spin_lock_irqsave(&cma->lock, flags);
- bitmap_clear(cma->bitmap, bitmap_no, bitmap_count);
+ bitmap_clear(cmr->bitmap, bitmap_no, bitmap_count);
+ cma->available_count += count;
spin_unlock_irqrestore(&cma->lock, flags);
}
-static void __init cma_activate_area(struct cma *cma)
+/*
+ * Check if a CMA area contains no ranges that intersect with
+ * multiple zones. Store the result in the flags in case
+ * this gets called more than once.
+ */
+bool cma_validate_zones(struct cma *cma)
{
- unsigned long base_pfn = cma->base_pfn, pfn;
- struct zone *zone;
-
- cma->bitmap = bitmap_zalloc(cma_bitmap_maxno(cma), GFP_KERNEL);
- if (!cma->bitmap)
- goto out_error;
+ int r;
+ unsigned long base_pfn;
+ struct cma_memrange *cmr;
+ bool valid_bit_set;
/*
- * alloc_contig_range() requires the pfn range specified to be in the
- * same zone. Simplify by forcing the entire CMA resv range to be in the
- * same zone.
+ * If already validated, return result of previous check.
+ * Either the valid or invalid bit will be set if this
+ * check has already been done. If neither is set, the
+ * check has not been performed yet.
*/
- WARN_ON_ONCE(!pfn_valid(base_pfn));
- zone = page_zone(pfn_to_page(base_pfn));
- for (pfn = base_pfn + 1; pfn < base_pfn + cma->count; pfn++) {
- WARN_ON_ONCE(!pfn_valid(pfn));
- if (page_zone(pfn_to_page(pfn)) != zone)
- goto not_in_zone;
+ valid_bit_set = test_bit(CMA_ZONES_VALID, &cma->flags);
+ if (valid_bit_set || test_bit(CMA_ZONES_INVALID, &cma->flags))
+ return valid_bit_set;
+
+ for (r = 0; r < cma->nranges; r++) {
+ cmr = &cma->ranges[r];
+ base_pfn = cmr->base_pfn;
+
+ /*
+ * alloc_contig_range() requires the pfn range specified
+ * to be in the same zone. Simplify by forcing the entire
+ * CMA resv range to be in the same zone.
+ */
+ WARN_ON_ONCE(!pfn_valid(base_pfn));
+ if (pfn_range_intersects_zones(cma->nid, base_pfn, cmr->count)) {
+ set_bit(CMA_ZONES_INVALID, &cma->flags);
+ return false;
+ }
+ }
+
+ set_bit(CMA_ZONES_VALID, &cma->flags);
+
+ return true;
+}
+
+static void __init cma_activate_area(struct cma *cma)
+{
+ unsigned long pfn, end_pfn;
+ int allocrange, r;
+ struct cma_memrange *cmr;
+ unsigned long bitmap_count, count;
+
+ for (allocrange = 0; allocrange < cma->nranges; allocrange++) {
+ cmr = &cma->ranges[allocrange];
+ cmr->bitmap = bitmap_zalloc(cma_bitmap_maxno(cma, cmr),
+ GFP_KERNEL);
+ if (!cmr->bitmap)
+ goto cleanup;
}
- for (pfn = base_pfn; pfn < base_pfn + cma->count;
- pfn += pageblock_nr_pages)
- init_cma_reserved_pageblock(pfn_to_page(pfn));
+ if (!cma_validate_zones(cma))
+ goto cleanup;
+
+ for (r = 0; r < cma->nranges; r++) {
+ cmr = &cma->ranges[r];
+ if (cmr->early_pfn != cmr->base_pfn) {
+ count = cmr->early_pfn - cmr->base_pfn;
+ bitmap_count = cma_bitmap_pages_to_bits(cma, count);
+ bitmap_set(cmr->bitmap, 0, bitmap_count);
+ }
+
+ for (pfn = cmr->early_pfn; pfn < cmr->base_pfn + cmr->count;
+ pfn += pageblock_nr_pages)
+ init_cma_reserved_pageblock(pfn_to_page(pfn));
+ }
spin_lock_init(&cma->lock);
+ mutex_init(&cma->alloc_mutex);
+
#ifdef CONFIG_CMA_DEBUGFS
INIT_HLIST_HEAD(&cma->mem_head);
spin_lock_init(&cma->mem_head_lock);
#endif
+ set_bit(CMA_ACTIVATED, &cma->flags);
return;
-not_in_zone:
- bitmap_free(cma->bitmap);
-out_error:
+cleanup:
+ for (r = 0; r < allocrange; r++)
+ bitmap_free(cma->ranges[r].bitmap);
+
/* Expose all pages to the buddy, they are useless for CMA. */
- if (!cma->reserve_pages_on_error) {
- for (pfn = base_pfn; pfn < base_pfn + cma->count; pfn++)
- free_reserved_page(pfn_to_page(pfn));
+ if (!test_bit(CMA_RESERVE_PAGES_ON_ERROR, &cma->flags)) {
+ for (r = 0; r < allocrange; r++) {
+ cmr = &cma->ranges[r];
+ end_pfn = cmr->base_pfn + cmr->count;
+ for (pfn = cmr->early_pfn; pfn < end_pfn; pfn++)
+ free_reserved_page(pfn_to_page(pfn));
+ }
}
totalcma_pages -= cma->count;
- cma->count = 0;
+ cma->available_count = cma->count = 0;
pr_err("CMA area %s could not be activated\n", cma->name);
}
@@ -150,7 +215,44 @@ core_initcall(cma_init_reserved_areas);
void __init cma_reserve_pages_on_error(struct cma *cma)
{
- cma->reserve_pages_on_error = true;
+ set_bit(CMA_RESERVE_PAGES_ON_ERROR, &cma->flags);
+}
+
+static int __init cma_new_area(const char *name, phys_addr_t size,
+ unsigned int order_per_bit,
+ struct cma **res_cma)
+{
+ struct cma *cma;
+
+ if (cma_area_count == ARRAY_SIZE(cma_areas)) {
+ pr_err("Not enough slots for CMA reserved regions!\n");
+ return -ENOSPC;
+ }
+
+ /*
+ * Each reserved area must be initialised later, when more kernel
+ * subsystems (like slab allocator) are available.
+ */
+ cma = &cma_areas[cma_area_count];
+ cma_area_count++;
+
+ if (name)
+ snprintf(cma->name, CMA_MAX_NAME, "%s", name);
+ else
+ snprintf(cma->name, CMA_MAX_NAME, "cma%d\n", cma_area_count);
+
+ cma->available_count = cma->count = size >> PAGE_SHIFT;
+ cma->order_per_bit = order_per_bit;
+ *res_cma = cma;
+ totalcma_pages += cma->count;
+
+ return 0;
+}
+
+static void __init cma_drop_area(struct cma *cma)
+{
+ totalcma_pages -= cma->count;
+ cma_area_count--;
}
/**
@@ -171,13 +273,9 @@ int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
struct cma **res_cma)
{
struct cma *cma;
+ int ret;
/* Sanity checks */
- if (cma_area_count == ARRAY_SIZE(cma_areas)) {
- pr_err("Not enough slots for CMA reserved regions!\n");
- return -ENOSPC;
- }
-
if (!size || !memblock_is_region_reserved(base, size))
return -EINVAL;
@@ -194,25 +292,264 @@ int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
if (!IS_ALIGNED(base | size, CMA_MIN_ALIGNMENT_BYTES))
return -EINVAL;
+ ret = cma_new_area(name, size, order_per_bit, &cma);
+ if (ret != 0)
+ return ret;
+
+ cma->ranges[0].base_pfn = PFN_DOWN(base);
+ cma->ranges[0].early_pfn = PFN_DOWN(base);
+ cma->ranges[0].count = cma->count;
+ cma->nranges = 1;
+ cma->nid = NUMA_NO_NODE;
+
+ *res_cma = cma;
+
+ return 0;
+}
+
+/*
+ * Structure used while walking physical memory ranges and finding out
+ * which one(s) to use for a CMA area.
+ */
+struct cma_init_memrange {
+ phys_addr_t base;
+ phys_addr_t size;
+ struct list_head list;
+};
+
+/*
+ * Work array used during CMA initialization.
+ */
+static struct cma_init_memrange memranges[CMA_MAX_RANGES] __initdata;
+
+static bool __init revsizecmp(struct cma_init_memrange *mlp,
+ struct cma_init_memrange *mrp)
+{
+ return mlp->size > mrp->size;
+}
+
+static bool __init basecmp(struct cma_init_memrange *mlp,
+ struct cma_init_memrange *mrp)
+{
+ return mlp->base < mrp->base;
+}
+
+/*
+ * Helper function to create sorted lists.
+ */
+static void __init list_insert_sorted(
+ struct list_head *ranges,
+ struct cma_init_memrange *mrp,
+ bool (*cmp)(struct cma_init_memrange *lh, struct cma_init_memrange *rh))
+{
+ struct list_head *mp;
+ struct cma_init_memrange *mlp;
+
+ if (list_empty(ranges))
+ list_add(&mrp->list, ranges);
+ else {
+ list_for_each(mp, ranges) {
+ mlp = list_entry(mp, struct cma_init_memrange, list);
+ if (cmp(mlp, mrp))
+ break;
+ }
+ __list_add(&mrp->list, mlp->list.prev, &mlp->list);
+ }
+}
+
+/*
+ * Create CMA areas with a total size of @total_size. A normal allocation
+ * for one area is tried first. If that fails, the biggest memblock
+ * ranges above 4G are selected, and allocated bottom up.
+ *
+ * The complexity here is not great, but this function will only be
+ * called during boot, and the lists operated on have fewer than
+ * CMA_MAX_RANGES elements (default value: 8).
+ */
+int __init cma_declare_contiguous_multi(phys_addr_t total_size,
+ phys_addr_t align, unsigned int order_per_bit,
+ const char *name, struct cma **res_cma, int nid)
+{
+ phys_addr_t start, end;
+ phys_addr_t size, sizesum, sizeleft;
+ struct cma_init_memrange *mrp, *mlp, *failed;
+ struct cma_memrange *cmrp;
+ LIST_HEAD(ranges);
+ LIST_HEAD(final_ranges);
+ struct list_head *mp, *next;
+ int ret, nr = 1;
+ u64 i;
+ struct cma *cma;
+
/*
- * Each reserved area must be initialised later, when more kernel
- * subsystems (like slab allocator) are available.
+ * First, try it the normal way, producing just one range.
*/
- cma = &cma_areas[cma_area_count];
+ ret = __cma_declare_contiguous_nid(0, total_size, 0, align,
+ order_per_bit, false, name, res_cma, nid);
+ if (ret != -ENOMEM)
+ goto out;
- if (name)
- snprintf(cma->name, CMA_MAX_NAME, name);
- else
- snprintf(cma->name, CMA_MAX_NAME, "cma%d\n", cma_area_count);
+ /*
+ * Couldn't find one range that fits our needs, so try multiple
+ * ranges.
+ *
+ * No need to do the alignment checks here, the call to
+ * cma_declare_contiguous_nid above would have caught
+ * any issues. With the checks, we know that:
+ *
+ * - @align is a power of 2
+ * - @align is >= pageblock alignment
+ * - @size is aligned to @align and to @order_per_bit
+ *
+ * So, as long as we create ranges that have a base
+ * aligned to @align, and a size that is aligned to
+ * both @align and @order_to_bit, things will work out.
+ */
+ nr = 0;
+ sizesum = 0;
+ failed = NULL;
- cma->base_pfn = PFN_DOWN(base);
- cma->count = size >> PAGE_SHIFT;
- cma->order_per_bit = order_per_bit;
+ ret = cma_new_area(name, total_size, order_per_bit, &cma);
+ if (ret != 0)
+ goto out;
+
+ align = max_t(phys_addr_t, align, CMA_MIN_ALIGNMENT_BYTES);
+ /*
+ * Create a list of ranges above 4G, largest range first.
+ */
+ for_each_free_mem_range(i, nid, MEMBLOCK_NONE, &start, &end, NULL) {
+ if (upper_32_bits(start) == 0)
+ continue;
+
+ start = ALIGN(start, align);
+ if (start >= end)
+ continue;
+
+ end = ALIGN_DOWN(end, align);
+ if (end <= start)
+ continue;
+
+ size = end - start;
+ size = ALIGN_DOWN(size, (PAGE_SIZE << order_per_bit));
+ if (!size)
+ continue;
+ sizesum += size;
+
+ pr_debug("consider %016llx - %016llx\n", (u64)start, (u64)end);
+
+ /*
+ * If we don't yet have used the maximum number of
+ * areas, grab a new one.
+ *
+ * If we can't use anymore, see if this range is not
+ * smaller than the smallest one already recorded. If
+ * not, re-use the smallest element.
+ */
+ if (nr < CMA_MAX_RANGES)
+ mrp = &memranges[nr++];
+ else {
+ mrp = list_last_entry(&ranges,
+ struct cma_init_memrange, list);
+ if (size < mrp->size)
+ continue;
+ list_del(&mrp->list);
+ sizesum -= mrp->size;
+ pr_debug("deleted %016llx - %016llx from the list\n",
+ (u64)mrp->base, (u64)mrp->base + size);
+ }
+ mrp->base = start;
+ mrp->size = size;
+
+ /*
+ * Now do a sorted insert.
+ */
+ list_insert_sorted(&ranges, mrp, revsizecmp);
+ pr_debug("added %016llx - %016llx to the list\n",
+ (u64)mrp->base, (u64)mrp->base + size);
+ pr_debug("total size now %llu\n", (u64)sizesum);
+ }
+
+ /*
+ * There is not enough room in the CMA_MAX_RANGES largest
+ * ranges, so bail out.
+ */
+ if (sizesum < total_size) {
+ cma_drop_area(cma);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /*
+ * Found ranges that provide enough combined space.
+ * Now, sorted them by address, smallest first, because we
+ * want to mimic a bottom-up memblock allocation.
+ */
+ sizesum = 0;
+ list_for_each_safe(mp, next, &ranges) {
+ mlp = list_entry(mp, struct cma_init_memrange, list);
+ list_del(mp);
+ list_insert_sorted(&final_ranges, mlp, basecmp);
+ sizesum += mlp->size;
+ if (sizesum >= total_size)
+ break;
+ }
+
+ /*
+ * Walk the final list, and add a CMA range for
+ * each range, possibly not using the last one fully.
+ */
+ nr = 0;
+ sizeleft = total_size;
+ list_for_each(mp, &final_ranges) {
+ mlp = list_entry(mp, struct cma_init_memrange, list);
+ size = min(sizeleft, mlp->size);
+ if (memblock_reserve(mlp->base, size)) {
+ /*
+ * Unexpected error. Could go on to
+ * the next one, but just abort to
+ * be safe.
+ */
+ failed = mlp;
+ break;
+ }
+
+ pr_debug("created region %d: %016llx - %016llx\n",
+ nr, (u64)mlp->base, (u64)mlp->base + size);
+ cmrp = &cma->ranges[nr++];
+ cmrp->base_pfn = PHYS_PFN(mlp->base);
+ cmrp->early_pfn = cmrp->base_pfn;
+ cmrp->count = size >> PAGE_SHIFT;
+
+ sizeleft -= size;
+ if (sizeleft == 0)
+ break;
+ }
+
+ if (failed) {
+ list_for_each(mp, &final_ranges) {
+ mlp = list_entry(mp, struct cma_init_memrange, list);
+ if (mlp == failed)
+ break;
+ memblock_phys_free(mlp->base, mlp->size);
+ }
+ cma_drop_area(cma);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ cma->nranges = nr;
+ cma->nid = nid;
*res_cma = cma;
- cma_area_count++;
- totalcma_pages += cma->count;
- return 0;
+out:
+ if (ret != 0)
+ pr_err("Failed to reserve %lu MiB\n",
+ (unsigned long)total_size / SZ_1M);
+ else
+ pr_info("Reserved %lu MiB in %d range%s\n",
+ (unsigned long)total_size / SZ_1M, nr,
+ nr > 1 ? "s" : "");
+ return ret;
}
/**
@@ -241,6 +578,26 @@ int __init cma_declare_contiguous_nid(phys_addr_t base,
bool fixed, const char *name, struct cma **res_cma,
int nid)
{
+ int ret;
+
+ ret = __cma_declare_contiguous_nid(base, size, limit, alignment,
+ order_per_bit, fixed, name, res_cma, nid);
+ if (ret != 0)
+ pr_err("Failed to reserve %ld MiB\n",
+ (unsigned long)size / SZ_1M);
+ else
+ pr_info("Reserved %ld MiB at %pa\n",
+ (unsigned long)size / SZ_1M, &base);
+
+ return ret;
+}
+
+static int __init __cma_declare_contiguous_nid(phys_addr_t base,
+ phys_addr_t size, phys_addr_t limit,
+ phys_addr_t alignment, unsigned int order_per_bit,
+ bool fixed, const char *name, struct cma **res_cma,
+ int nid)
+{
phys_addr_t memblock_end = memblock_end_of_DRAM();
phys_addr_t highmem_start;
int ret;
@@ -272,10 +629,9 @@ int __init cma_declare_contiguous_nid(phys_addr_t base,
/* Sanitise input arguments. */
alignment = max_t(phys_addr_t, alignment, CMA_MIN_ALIGNMENT_BYTES);
if (fixed && base & (alignment - 1)) {
- ret = -EINVAL;
pr_err("Region at %pa must be aligned to %pa bytes\n",
&base, &alignment);
- goto err;
+ return -EINVAL;
}
base = ALIGN(base, alignment);
size = ALIGN(size, alignment);
@@ -293,10 +649,9 @@ int __init cma_declare_contiguous_nid(phys_addr_t base,
* low/high memory boundary.
*/
if (fixed && base < highmem_start && base + size > highmem_start) {
- ret = -EINVAL;
pr_err("Region at %pa defined on low/high memory boundary (%pa)\n",
&base, &highmem_start);
- goto err;
+ return -EINVAL;
}
/*
@@ -308,18 +663,16 @@ int __init cma_declare_contiguous_nid(phys_addr_t base,
limit = memblock_end;
if (base + size > limit) {
- ret = -EINVAL;
pr_err("Size (%pa) of region at %pa exceeds limit (%pa)\n",
&size, &base, &limit);
- goto err;
+ return -EINVAL;
}
/* Reserve memory */
if (fixed) {
if (memblock_is_region_reserved(base, size) ||
memblock_reserve(base, size) < 0) {
- ret = -EBUSY;
- goto err;
+ return -EBUSY;
}
} else {
phys_addr_t addr = 0;
@@ -356,10 +709,8 @@ int __init cma_declare_contiguous_nid(phys_addr_t base,
if (!addr) {
addr = memblock_alloc_range_nid(size, alignment, base,
limit, nid, true);
- if (!addr) {
- ret = -ENOMEM;
- goto err;
- }
+ if (!addr)
+ return -ENOMEM;
}
/*
@@ -372,86 +723,89 @@ int __init cma_declare_contiguous_nid(phys_addr_t base,
ret = cma_init_reserved_mem(base, size, order_per_bit, name, res_cma);
if (ret)
- goto free_mem;
+ memblock_phys_free(base, size);
- pr_info("Reserved %ld MiB at %pa on node %d\n", (unsigned long)size / SZ_1M,
- &base, nid);
- return 0;
+ (*res_cma)->nid = nid;
-free_mem:
- memblock_phys_free(base, size);
-err:
- pr_err("Failed to reserve %ld MiB on node %d\n", (unsigned long)size / SZ_1M,
- nid);
return ret;
}
static void cma_debug_show_areas(struct cma *cma)
{
unsigned long next_zero_bit, next_set_bit, nr_zero;
- unsigned long start = 0;
- unsigned long nr_part, nr_total = 0;
- unsigned long nbits = cma_bitmap_maxno(cma);
+ unsigned long start;
+ unsigned long nr_part;
+ unsigned long nbits;
+ int r;
+ struct cma_memrange *cmr;
spin_lock_irq(&cma->lock);
pr_info("number of available pages: ");
- for (;;) {
- next_zero_bit = find_next_zero_bit(cma->bitmap, nbits, start);
- if (next_zero_bit >= nbits)
- break;
- next_set_bit = find_next_bit(cma->bitmap, nbits, next_zero_bit);
- nr_zero = next_set_bit - next_zero_bit;
- nr_part = nr_zero << cma->order_per_bit;
- pr_cont("%s%lu@%lu", nr_total ? "+" : "", nr_part,
- next_zero_bit);
- nr_total += nr_part;
- start = next_zero_bit + nr_zero;
+ for (r = 0; r < cma->nranges; r++) {
+ cmr = &cma->ranges[r];
+
+ start = 0;
+ nbits = cma_bitmap_maxno(cma, cmr);
+
+ pr_info("range %d: ", r);
+ for (;;) {
+ next_zero_bit = find_next_zero_bit(cmr->bitmap,
+ nbits, start);
+ if (next_zero_bit >= nbits)
+ break;
+ next_set_bit = find_next_bit(cmr->bitmap, nbits,
+ next_zero_bit);
+ nr_zero = next_set_bit - next_zero_bit;
+ nr_part = nr_zero << cma->order_per_bit;
+ pr_cont("%s%lu@%lu", start ? "+" : "", nr_part,
+ next_zero_bit);
+ start = next_zero_bit + nr_zero;
+ }
+ pr_info("\n");
}
- pr_cont("=> %lu free of %lu total pages\n", nr_total, cma->count);
+ pr_cont("=> %lu free of %lu total pages\n", cma->available_count,
+ cma->count);
spin_unlock_irq(&cma->lock);
}
-static struct page *__cma_alloc(struct cma *cma, unsigned long count,
- unsigned int align, gfp_t gfp)
+static int cma_range_alloc(struct cma *cma, struct cma_memrange *cmr,
+ unsigned long count, unsigned int align,
+ struct page **pagep, gfp_t gfp)
{
unsigned long mask, offset;
unsigned long pfn = -1;
unsigned long start = 0;
unsigned long bitmap_maxno, bitmap_no, bitmap_count;
- unsigned long i;
+ int ret = -EBUSY;
struct page *page = NULL;
- int ret = -ENOMEM;
- const char *name = cma ? cma->name : NULL;
-
- trace_cma_alloc_start(name, count, align);
-
- if (!cma || !cma->count || !cma->bitmap)
- return page;
-
- pr_debug("%s(cma %p, name: %s, count %lu, align %d)\n", __func__,
- (void *)cma, cma->name, count, align);
-
- if (!count)
- return page;
mask = cma_bitmap_aligned_mask(cma, align);
- offset = cma_bitmap_aligned_offset(cma, align);
- bitmap_maxno = cma_bitmap_maxno(cma);
+ offset = cma_bitmap_aligned_offset(cma, cmr, align);
+ bitmap_maxno = cma_bitmap_maxno(cma, cmr);
bitmap_count = cma_bitmap_pages_to_bits(cma, count);
if (bitmap_count > bitmap_maxno)
- return page;
+ goto out;
for (;;) {
spin_lock_irq(&cma->lock);
- bitmap_no = bitmap_find_next_zero_area_off(cma->bitmap,
+ /*
+ * If the request is larger than the available number
+ * of pages, stop right away.
+ */
+ if (count > cma->available_count) {
+ spin_unlock_irq(&cma->lock);
+ break;
+ }
+ bitmap_no = bitmap_find_next_zero_area_off(cmr->bitmap,
bitmap_maxno, start, bitmap_count, mask,
offset);
if (bitmap_no >= bitmap_maxno) {
spin_unlock_irq(&cma->lock);
break;
}
- bitmap_set(cma->bitmap, bitmap_no, bitmap_count);
+ bitmap_set(cmr->bitmap, bitmap_no, bitmap_count);
+ cma->available_count -= count;
/*
* It's safe to drop the lock here. We've marked this region for
* our exclusive use. If the migration fails we will take the
@@ -459,16 +813,16 @@ static struct page *__cma_alloc(struct cma *cma, unsigned long count,
*/
spin_unlock_irq(&cma->lock);
- pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit);
- mutex_lock(&cma_mutex);
+ pfn = cmr->base_pfn + (bitmap_no << cma->order_per_bit);
+ mutex_lock(&cma->alloc_mutex);
ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA, gfp);
- mutex_unlock(&cma_mutex);
+ mutex_unlock(&cma->alloc_mutex);
if (ret == 0) {
page = pfn_to_page(pfn);
break;
}
- cma_clear_bitmap(cma, pfn, count);
+ cma_clear_bitmap(cma, cmr, pfn, count);
if (ret != -EBUSY)
break;
@@ -480,6 +834,38 @@ static struct page *__cma_alloc(struct cma *cma, unsigned long count,
/* try again with a bit different memory target */
start = bitmap_no + mask + 1;
}
+out:
+ *pagep = page;
+ return ret;
+}
+
+static struct page *__cma_alloc(struct cma *cma, unsigned long count,
+ unsigned int align, gfp_t gfp)
+{
+ struct page *page = NULL;
+ int ret = -ENOMEM, r;
+ unsigned long i;
+ const char *name = cma ? cma->name : NULL;
+
+ trace_cma_alloc_start(name, count, align);
+
+ if (!cma || !cma->count)
+ return page;
+
+ pr_debug("%s(cma %p, name: %s, count %lu, align %d)\n", __func__,
+ (void *)cma, cma->name, count, align);
+
+ if (!count)
+ return page;
+
+ for (r = 0; r < cma->nranges; r++) {
+ page = NULL;
+
+ ret = cma_range_alloc(cma, &cma->ranges[r], count, align,
+ &page, gfp);
+ if (ret != -EBUSY || page)
+ break;
+ }
/*
* CMA can allocate multiple page blocks, which results in different
@@ -498,7 +884,8 @@ static struct page *__cma_alloc(struct cma *cma, unsigned long count,
}
pr_debug("%s(): returned %p\n", __func__, page);
- trace_cma_alloc_finish(name, pfn, page, count, align, ret);
+ trace_cma_alloc_finish(name, page ? page_to_pfn(page) : 0,
+ page, count, align, ret);
if (page) {
count_vm_event(CMA_ALLOC_SUCCESS);
cma_sysfs_account_success_pages(cma, count);
@@ -541,20 +928,31 @@ struct folio *cma_alloc_folio(struct cma *cma, int order, gfp_t gfp)
bool cma_pages_valid(struct cma *cma, const struct page *pages,
unsigned long count)
{
- unsigned long pfn;
+ unsigned long pfn, end;
+ int r;
+ struct cma_memrange *cmr;
+ bool ret;
- if (!cma || !pages)
+ if (!cma || !pages || count > cma->count)
return false;
pfn = page_to_pfn(pages);
+ ret = false;
- if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count) {
- pr_debug("%s(page %p, count %lu)\n", __func__,
- (void *)pages, count);
- return false;
+ for (r = 0; r < cma->nranges; r++) {
+ cmr = &cma->ranges[r];
+ end = cmr->base_pfn + cmr->count;
+ if (pfn >= cmr->base_pfn && pfn < end) {
+ ret = pfn + count <= end;
+ break;
+ }
}
- return true;
+ if (!ret)
+ pr_debug("%s(page %p, count %lu)\n",
+ __func__, (void *)pages, count);
+
+ return ret;
}
/**
@@ -570,19 +968,32 @@ bool cma_pages_valid(struct cma *cma, const struct page *pages,
bool cma_release(struct cma *cma, const struct page *pages,
unsigned long count)
{
- unsigned long pfn;
+ struct cma_memrange *cmr;
+ unsigned long pfn, end_pfn;
+ int r;
+
+ pr_debug("%s(page %p, count %lu)\n", __func__, (void *)pages, count);
if (!cma_pages_valid(cma, pages, count))
return false;
- pr_debug("%s(page %p, count %lu)\n", __func__, (void *)pages, count);
-
pfn = page_to_pfn(pages);
+ end_pfn = pfn + count;
+
+ for (r = 0; r < cma->nranges; r++) {
+ cmr = &cma->ranges[r];
+ if (pfn >= cmr->base_pfn &&
+ pfn < (cmr->base_pfn + cmr->count)) {
+ VM_BUG_ON(end_pfn > cmr->base_pfn + cmr->count);
+ break;
+ }
+ }
- VM_BUG_ON(pfn + count > cma->base_pfn + cma->count);
+ if (r == cma->nranges)
+ return false;
free_contig_range(pfn, count);
- cma_clear_bitmap(cma, pfn, count);
+ cma_clear_bitmap(cma, cmr, pfn, count);
cma_sysfs_account_release_pages(cma, count);
trace_cma_release(cma->name, pfn, pages, count);
@@ -610,3 +1021,86 @@ int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data)
return 0;
}
+
+bool cma_intersects(struct cma *cma, unsigned long start, unsigned long end)
+{
+ int r;
+ struct cma_memrange *cmr;
+ unsigned long rstart, rend;
+
+ for (r = 0; r < cma->nranges; r++) {
+ cmr = &cma->ranges[r];
+
+ rstart = PFN_PHYS(cmr->base_pfn);
+ rend = PFN_PHYS(cmr->base_pfn + cmr->count);
+ if (end < rstart)
+ continue;
+ if (start >= rend)
+ continue;
+ return true;
+ }
+
+ return false;
+}
+
+/*
+ * Very basic function to reserve memory from a CMA area that has not
+ * yet been activated. This is expected to be called early, when the
+ * system is single-threaded, so there is no locking. The alignment
+ * checking is restrictive - only pageblock-aligned areas
+ * (CMA_MIN_ALIGNMENT_BYTES) may be reserved through this function.
+ * This keeps things simple, and is enough for the current use case.
+ *
+ * The CMA bitmaps have not yet been allocated, so just start
+ * reserving from the bottom up, using a PFN to keep track
+ * of what has been reserved. Unreserving is not possible.
+ *
+ * The caller is responsible for initializing the page structures
+ * in the area properly, since this just points to memblock-allocated
+ * memory. The caller should subsequently use init_cma_pageblock to
+ * set the migrate type and CMA stats the pageblocks that were reserved.
+ *
+ * If the CMA area fails to activate later, memory obtained through
+ * this interface is not handed to the page allocator, this is
+ * the responsibility of the caller (e.g. like normal memblock-allocated
+ * memory).
+ */
+void __init *cma_reserve_early(struct cma *cma, unsigned long size)
+{
+ int r;
+ struct cma_memrange *cmr;
+ unsigned long available;
+ void *ret = NULL;
+
+ if (!cma || !cma->count)
+ return NULL;
+ /*
+ * Can only be called early in init.
+ */
+ if (test_bit(CMA_ACTIVATED, &cma->flags))
+ return NULL;
+
+ if (!IS_ALIGNED(size, CMA_MIN_ALIGNMENT_BYTES))
+ return NULL;
+
+ if (!IS_ALIGNED(size, (PAGE_SIZE << cma->order_per_bit)))
+ return NULL;
+
+ size >>= PAGE_SHIFT;
+
+ if (size > cma->available_count)
+ return NULL;
+
+ for (r = 0; r < cma->nranges; r++) {
+ cmr = &cma->ranges[r];
+ available = cmr->count - (cmr->early_pfn - cmr->base_pfn);
+ if (size <= available) {
+ ret = phys_to_virt(PFN_PHYS(cmr->early_pfn));
+ cmr->early_pfn += size;
+ cma->available_count -= size;
+ return ret;
+ }
+ }
+
+ return ret;
+}
diff --git a/mm/cma.h b/mm/cma.h
index 8485ef893e99..41a3ab0ec3de 100644
--- a/mm/cma.h
+++ b/mm/cma.h
@@ -10,18 +10,43 @@ struct cma_kobject {
struct cma *cma;
};
+/*
+ * Multi-range support. This can be useful if the size of the allocation
+ * is not expected to be larger than the alignment (like with hugetlb_cma),
+ * and the total amount of memory requested, while smaller than the total
+ * amount of memory available, is large enough that it doesn't fit in a
+ * single physical memory range because of memory holes.
+ *
+ * Fields:
+ * @base_pfn: physical address of range
+ * @early_pfn: first PFN not reserved through cma_reserve_early
+ * @count: size of range
+ * @bitmap: bitmap of allocated (1 << order_per_bit)-sized chunks.
+ */
+struct cma_memrange {
+ unsigned long base_pfn;
+ unsigned long early_pfn;
+ unsigned long count;
+ unsigned long *bitmap;
+#ifdef CONFIG_CMA_DEBUGFS
+ struct debugfs_u32_array dfs_bitmap;
+#endif
+};
+#define CMA_MAX_RANGES 8
+
struct cma {
- unsigned long base_pfn;
unsigned long count;
- unsigned long *bitmap;
+ unsigned long available_count;
unsigned int order_per_bit; /* Order of pages represented by one bit */
spinlock_t lock;
+ struct mutex alloc_mutex;
#ifdef CONFIG_CMA_DEBUGFS
struct hlist_head mem_head;
spinlock_t mem_head_lock;
- struct debugfs_u32_array dfs_bitmap;
#endif
char name[CMA_MAX_NAME];
+ int nranges;
+ struct cma_memrange ranges[CMA_MAX_RANGES];
#ifdef CONFIG_CMA_SYSFS
/* the number of CMA page successful allocations */
atomic64_t nr_pages_succeeded;
@@ -32,15 +57,25 @@ struct cma {
/* kobject requires dynamic object */
struct cma_kobject *cma_kobj;
#endif
- bool reserve_pages_on_error;
+ unsigned long flags;
+ /* NUMA node (NUMA_NO_NODE if unspecified) */
+ int nid;
+};
+
+enum cma_flags {
+ CMA_RESERVE_PAGES_ON_ERROR,
+ CMA_ZONES_VALID,
+ CMA_ZONES_INVALID,
+ CMA_ACTIVATED,
};
extern struct cma cma_areas[MAX_CMA_AREAS];
extern unsigned int cma_area_count;
-static inline unsigned long cma_bitmap_maxno(struct cma *cma)
+static inline unsigned long cma_bitmap_maxno(struct cma *cma,
+ struct cma_memrange *cmr)
{
- return cma->count >> cma->order_per_bit;
+ return cmr->count >> cma->order_per_bit;
}
#ifdef CONFIG_CMA_SYSFS
diff --git a/mm/cma_debug.c b/mm/cma_debug.c
index 602fff89b15f..fdf899532ca0 100644
--- a/mm/cma_debug.c
+++ b/mm/cma_debug.c
@@ -34,13 +34,10 @@ DEFINE_DEBUGFS_ATTRIBUTE(cma_debugfs_fops, cma_debugfs_get, NULL, "%llu\n");
static int cma_used_get(void *data, u64 *val)
{
struct cma *cma = data;
- unsigned long used;
spin_lock_irq(&cma->lock);
- /* pages counter is smaller than sizeof(int) */
- used = bitmap_weight(cma->bitmap, (int)cma_bitmap_maxno(cma));
+ *val = cma->count - cma->available_count;
spin_unlock_irq(&cma->lock);
- *val = (u64)used << cma->order_per_bit;
return 0;
}
@@ -49,17 +46,26 @@ DEFINE_DEBUGFS_ATTRIBUTE(cma_used_fops, cma_used_get, NULL, "%llu\n");
static int cma_maxchunk_get(void *data, u64 *val)
{
struct cma *cma = data;
+ struct cma_memrange *cmr;
unsigned long maxchunk = 0;
- unsigned long start, end = 0;
- unsigned long bitmap_maxno = cma_bitmap_maxno(cma);
+ unsigned long start, end;
+ unsigned long bitmap_maxno;
+ int r;
spin_lock_irq(&cma->lock);
- for (;;) {
- start = find_next_zero_bit(cma->bitmap, bitmap_maxno, end);
- if (start >= bitmap_maxno)
- break;
- end = find_next_bit(cma->bitmap, bitmap_maxno, start);
- maxchunk = max(end - start, maxchunk);
+ for (r = 0; r < cma->nranges; r++) {
+ cmr = &cma->ranges[r];
+ bitmap_maxno = cma_bitmap_maxno(cma, cmr);
+ end = 0;
+ for (;;) {
+ start = find_next_zero_bit(cmr->bitmap,
+ bitmap_maxno, end);
+ if (start >= bitmap_maxno)
+ break;
+ end = find_next_bit(cmr->bitmap, bitmap_maxno,
+ start);
+ maxchunk = max(end - start, maxchunk);
+ }
}
spin_unlock_irq(&cma->lock);
*val = (u64)maxchunk << cma->order_per_bit;
@@ -162,24 +168,41 @@ DEFINE_DEBUGFS_ATTRIBUTE(cma_alloc_fops, NULL, cma_alloc_write, "%llu\n");
static void cma_debugfs_add_one(struct cma *cma, struct dentry *root_dentry)
{
- struct dentry *tmp;
+ struct dentry *tmp, *dir, *rangedir;
+ int r;
+ char rdirname[12];
+ struct cma_memrange *cmr;
tmp = debugfs_create_dir(cma->name, root_dentry);
debugfs_create_file("alloc", 0200, tmp, cma, &cma_alloc_fops);
debugfs_create_file("free", 0200, tmp, cma, &cma_free_fops);
- debugfs_create_file("base_pfn", 0444, tmp,
- &cma->base_pfn, &cma_debugfs_fops);
debugfs_create_file("count", 0444, tmp, &cma->count, &cma_debugfs_fops);
debugfs_create_file("order_per_bit", 0444, tmp,
&cma->order_per_bit, &cma_debugfs_fops);
debugfs_create_file("used", 0444, tmp, cma, &cma_used_fops);
debugfs_create_file("maxchunk", 0444, tmp, cma, &cma_maxchunk_fops);
- cma->dfs_bitmap.array = (u32 *)cma->bitmap;
- cma->dfs_bitmap.n_elements = DIV_ROUND_UP(cma_bitmap_maxno(cma),
- BITS_PER_BYTE * sizeof(u32));
- debugfs_create_u32_array("bitmap", 0444, tmp, &cma->dfs_bitmap);
+ rangedir = debugfs_create_dir("ranges", tmp);
+ for (r = 0; r < cma->nranges; r++) {
+ cmr = &cma->ranges[r];
+ snprintf(rdirname, sizeof(rdirname), "%d", r);
+ dir = debugfs_create_dir(rdirname, rangedir);
+ debugfs_create_file("base_pfn", 0444, dir,
+ &cmr->base_pfn, &cma_debugfs_fops);
+ cmr->dfs_bitmap.array = (u32 *)cmr->bitmap;
+ cmr->dfs_bitmap.n_elements =
+ DIV_ROUND_UP(cma_bitmap_maxno(cma, cmr),
+ BITS_PER_BYTE * sizeof(u32));
+ debugfs_create_u32_array("bitmap", 0444, dir,
+ &cmr->dfs_bitmap);
+ }
+
+ /*
+ * Backward compatible symlinks to range 0 for base_pfn and bitmap.
+ */
+ debugfs_create_symlink("base_pfn", tmp, "ranges/0/base_pfn");
+ debugfs_create_symlink("bitmap", tmp, "ranges/0/bitmap");
}
static int __init cma_debugfs_init(void)
diff --git a/mm/cma_sysfs.c b/mm/cma_sysfs.c
index f50db3973171..97acd3e5a6a5 100644
--- a/mm/cma_sysfs.c
+++ b/mm/cma_sysfs.c
@@ -62,6 +62,24 @@ static ssize_t release_pages_success_show(struct kobject *kobj,
}
CMA_ATTR_RO(release_pages_success);
+static ssize_t total_pages_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ struct cma *cma = cma_from_kobj(kobj);
+
+ return sysfs_emit(buf, "%lu\n", cma->count);
+}
+CMA_ATTR_RO(total_pages);
+
+static ssize_t available_pages_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ struct cma *cma = cma_from_kobj(kobj);
+
+ return sysfs_emit(buf, "%lu\n", cma->available_count);
+}
+CMA_ATTR_RO(available_pages);
+
static void cma_kobj_release(struct kobject *kobj)
{
struct cma *cma = cma_from_kobj(kobj);
@@ -75,6 +93,8 @@ static struct attribute *cma_attrs[] = {
&alloc_pages_success_attr.attr,
&alloc_pages_fail_attr.attr,
&release_pages_success_attr.attr,
+ &total_pages_attr.attr,
+ &available_pages_attr.attr,
NULL,
};
ATTRIBUTE_GROUPS(cma);
diff --git a/mm/compaction.c b/mm/compaction.c
index a3203d97123e..139f00c0308a 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -2328,11 +2328,27 @@ static enum compact_result __compact_finished(struct compact_control *cc)
if (!pageblock_aligned(cc->migrate_pfn))
return COMPACT_CONTINUE;
+ /*
+ * When defrag_mode is enabled, make kcompactd target
+ * watermarks in whole pageblocks. Because they can be stolen
+ * without polluting, no further fallback checks are needed.
+ */
+ if (defrag_mode && !cc->direct_compaction) {
+ if (__zone_watermark_ok(cc->zone, cc->order,
+ high_wmark_pages(cc->zone),
+ cc->highest_zoneidx, cc->alloc_flags,
+ zone_page_state(cc->zone,
+ NR_FREE_PAGES_BLOCKS)))
+ return COMPACT_SUCCESS;
+
+ return COMPACT_CONTINUE;
+ }
+
/* Direct compactor: Is a suitable page free? */
ret = COMPACT_NO_SUITABLE_PAGE;
for (order = cc->order; order < NR_PAGE_ORDERS; order++) {
struct free_area *area = &cc->zone->free_area[order];
- bool can_steal;
+ bool claim_block;
/* Job done if page is free of the right migratetype */
if (!free_area_empty(area, migratetype))
@@ -2349,7 +2365,7 @@ static enum compact_result __compact_finished(struct compact_control *cc)
* other migratetype buddy lists.
*/
if (find_suitable_fallback(area, order, migratetype,
- true, &can_steal) != -1)
+ true, &claim_block) != -1)
/*
* Movable pages are OK in any pageblock. If we are
* stealing for a non-movable allocation, make sure
@@ -2381,40 +2397,42 @@ static enum compact_result compact_finished(struct compact_control *cc)
}
static bool __compaction_suitable(struct zone *zone, int order,
- int highest_zoneidx,
- unsigned long wmark_target)
+ unsigned long watermark, int highest_zoneidx,
+ unsigned long free_pages)
{
- unsigned long watermark;
/*
* Watermarks for order-0 must be met for compaction to be able to
* isolate free pages for migration targets. This means that the
- * watermark and alloc_flags have to match, or be more pessimistic than
- * the check in __isolate_free_page(). We don't use the direct
- * compactor's alloc_flags, as they are not relevant for freepage
- * isolation. We however do use the direct compactor's highest_zoneidx
- * to skip over zones where lowmem reserves would prevent allocation
- * even if compaction succeeds.
- * For costly orders, we require low watermark instead of min for
- * compaction to proceed to increase its chances.
+ * watermark have to match, or be more pessimistic than the check in
+ * __isolate_free_page().
+ *
+ * For costly orders, we require a higher watermark for compaction to
+ * proceed to increase its chances.
+ *
+ * We use the direct compactor's highest_zoneidx to skip over zones
+ * where lowmem reserves would prevent allocation even if compaction
+ * succeeds.
+ *
* ALLOC_CMA is used, as pages in CMA pageblocks are considered
- * suitable migration targets
+ * suitable migration targets.
*/
- watermark = (order > PAGE_ALLOC_COSTLY_ORDER) ?
- low_wmark_pages(zone) : min_wmark_pages(zone);
watermark += compact_gap(order);
+ if (order > PAGE_ALLOC_COSTLY_ORDER)
+ watermark += low_wmark_pages(zone) - min_wmark_pages(zone);
return __zone_watermark_ok(zone, 0, watermark, highest_zoneidx,
- ALLOC_CMA, wmark_target);
+ ALLOC_CMA, free_pages);
}
/*
* compaction_suitable: Is this suitable to run compaction on this zone now?
*/
-bool compaction_suitable(struct zone *zone, int order, int highest_zoneidx)
+bool compaction_suitable(struct zone *zone, int order, unsigned long watermark,
+ int highest_zoneidx)
{
enum compact_result compact_result;
bool suitable;
- suitable = __compaction_suitable(zone, order, highest_zoneidx,
+ suitable = __compaction_suitable(zone, order, watermark, highest_zoneidx,
zone_page_state(zone, NR_FREE_PAGES));
/*
* fragmentation index determines if allocation failures are due to
@@ -2452,6 +2470,7 @@ bool compaction_suitable(struct zone *zone, int order, int highest_zoneidx)
return suitable;
}
+/* Used by direct reclaimers */
bool compaction_zonelist_suitable(struct alloc_context *ac, int order,
int alloc_flags)
{
@@ -2474,8 +2493,8 @@ bool compaction_zonelist_suitable(struct alloc_context *ac, int order,
*/
available = zone_reclaimable_pages(zone) / order;
available += zone_page_state_snapshot(zone, NR_FREE_PAGES);
- if (__compaction_suitable(zone, order, ac->highest_zoneidx,
- available))
+ if (__compaction_suitable(zone, order, min_wmark_pages(zone),
+ ac->highest_zoneidx, available))
return true;
}
@@ -2492,13 +2511,19 @@ bool compaction_zonelist_suitable(struct alloc_context *ac, int order,
static enum compact_result
compaction_suit_allocation_order(struct zone *zone, unsigned int order,
int highest_zoneidx, unsigned int alloc_flags,
- bool async)
+ bool async, bool kcompactd)
{
+ unsigned long free_pages;
unsigned long watermark;
+ if (kcompactd && defrag_mode)
+ free_pages = zone_page_state(zone, NR_FREE_PAGES_BLOCKS);
+ else
+ free_pages = zone_page_state(zone, NR_FREE_PAGES);
+
watermark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK);
- if (zone_watermark_ok(zone, order, watermark, highest_zoneidx,
- alloc_flags))
+ if (__zone_watermark_ok(zone, order, watermark, highest_zoneidx,
+ alloc_flags, free_pages))
return COMPACT_SUCCESS;
/*
@@ -2512,13 +2537,13 @@ compaction_suit_allocation_order(struct zone *zone, unsigned int order,
*/
if (order > PAGE_ALLOC_COSTLY_ORDER && async &&
!(alloc_flags & ALLOC_CMA)) {
- watermark = low_wmark_pages(zone) + compact_gap(order);
- if (!__zone_watermark_ok(zone, 0, watermark, highest_zoneidx,
- 0, zone_page_state(zone, NR_FREE_PAGES)))
+ if (!__zone_watermark_ok(zone, 0, watermark + compact_gap(order),
+ highest_zoneidx, 0,
+ zone_page_state(zone, NR_FREE_PAGES)))
return COMPACT_SKIPPED;
}
- if (!compaction_suitable(zone, order, highest_zoneidx))
+ if (!compaction_suitable(zone, order, watermark, highest_zoneidx))
return COMPACT_SKIPPED;
return COMPACT_CONTINUE;
@@ -2554,7 +2579,8 @@ compact_zone(struct compact_control *cc, struct capture_control *capc)
ret = compaction_suit_allocation_order(cc->zone, cc->order,
cc->highest_zoneidx,
cc->alloc_flags,
- cc->mode == MIGRATE_ASYNC);
+ cc->mode == MIGRATE_ASYNC,
+ !cc->direct_compaction);
if (ret != COMPACT_CONTINUE)
return ret;
}
@@ -3048,6 +3074,8 @@ static bool kcompactd_node_suitable(pg_data_t *pgdat)
struct zone *zone;
enum zone_type highest_zoneidx = pgdat->kcompactd_highest_zoneidx;
enum compact_result ret;
+ unsigned int alloc_flags = defrag_mode ?
+ ALLOC_WMARK_HIGH : ALLOC_WMARK_MIN;
for (zoneid = 0; zoneid <= highest_zoneidx; zoneid++) {
zone = &pgdat->node_zones[zoneid];
@@ -3057,8 +3085,8 @@ static bool kcompactd_node_suitable(pg_data_t *pgdat)
ret = compaction_suit_allocation_order(zone,
pgdat->kcompactd_max_order,
- highest_zoneidx, ALLOC_WMARK_MIN,
- false);
+ highest_zoneidx, alloc_flags,
+ false, true);
if (ret == COMPACT_CONTINUE)
return true;
}
@@ -3081,6 +3109,7 @@ static void kcompactd_do_work(pg_data_t *pgdat)
.mode = MIGRATE_SYNC_LIGHT,
.ignore_skip_hint = false,
.gfp_mask = GFP_KERNEL,
+ .alloc_flags = defrag_mode ? ALLOC_WMARK_HIGH : ALLOC_WMARK_MIN,
};
enum compact_result ret;
@@ -3099,8 +3128,8 @@ static void kcompactd_do_work(pg_data_t *pgdat)
continue;
ret = compaction_suit_allocation_order(zone,
- cc.order, zoneid, ALLOC_WMARK_MIN,
- false);
+ cc.order, zoneid, cc.alloc_flags,
+ false, true);
if (ret != COMPACT_CONTINUE)
continue;
diff --git a/mm/damon/core.c b/mm/damon/core.c
index 384935ef4e65..f0c1676f0599 100644
--- a/mm/damon/core.c
+++ b/mm/damon/core.c
@@ -76,14 +76,13 @@ int damon_register_ops(struct damon_operations *ops)
if (ops->id >= NR_DAMON_OPS)
return -EINVAL;
+
mutex_lock(&damon_ops_lock);
/* Fail for already registered ops */
- if (__damon_is_registered_ops(ops->id)) {
+ if (__damon_is_registered_ops(ops->id))
err = -EINVAL;
- goto out;
- }
- damon_registered_ops[ops->id] = *ops;
-out:
+ else
+ damon_registered_ops[ops->id] = *ops;
mutex_unlock(&damon_ops_lock);
return err;
}
@@ -281,9 +280,31 @@ struct damos_filter *damos_new_filter(enum damos_filter_type type,
return filter;
}
+/**
+ * damos_filter_for_ops() - Return if the filter is ops-hndled one.
+ * @type: type of the filter.
+ *
+ * Return: true if the filter of @type needs to be handled by ops layer, false
+ * otherwise.
+ */
+bool damos_filter_for_ops(enum damos_filter_type type)
+{
+ switch (type) {
+ case DAMOS_FILTER_TYPE_ADDR:
+ case DAMOS_FILTER_TYPE_TARGET:
+ return false;
+ default:
+ break;
+ }
+ return true;
+}
+
void damos_add_filter(struct damos *s, struct damos_filter *f)
{
- list_add_tail(&f->list, &s->filters);
+ if (damos_filter_for_ops(f->type))
+ list_add_tail(&f->list, &s->ops_filters);
+ else
+ list_add_tail(&f->list, &s->filters);
}
static void damos_del_filter(struct damos_filter *f)
@@ -375,6 +396,7 @@ struct damos *damon_new_scheme(struct damos_access_pattern *pattern,
scheme->next_apply_sis = 0;
scheme->walk_completed = false;
INIT_LIST_HEAD(&scheme->filters);
+ INIT_LIST_HEAD(&scheme->ops_filters);
scheme->stat = (struct damos_stat){};
INIT_LIST_HEAD(&scheme->list);
@@ -502,7 +524,7 @@ struct damon_ctx *damon_new_ctx(void)
ctx->attrs.ops_update_interval = 60 * 1000 * 1000;
ctx->passed_sample_intervals = 0;
- /* These will be set from kdamond_init_intervals_sis() */
+ /* These will be set from kdamond_init_ctx() */
ctx->next_aggregation_sis = 0;
ctx->next_ops_update_sis = 0;
@@ -580,11 +602,25 @@ static unsigned int damon_nr_accesses_for_new_attrs(unsigned int nr_accesses,
}
static void damon_update_monitoring_result(struct damon_region *r,
- struct damon_attrs *old_attrs, struct damon_attrs *new_attrs)
+ struct damon_attrs *old_attrs, struct damon_attrs *new_attrs,
+ bool aggregating)
{
- r->nr_accesses = damon_nr_accesses_for_new_attrs(r->nr_accesses,
- old_attrs, new_attrs);
- r->nr_accesses_bp = r->nr_accesses * 10000;
+ if (!aggregating) {
+ r->nr_accesses = damon_nr_accesses_for_new_attrs(
+ r->nr_accesses, old_attrs, new_attrs);
+ r->nr_accesses_bp = r->nr_accesses * 10000;
+ } else {
+ /*
+ * if this is called in the middle of the aggregation, reset
+ * the aggregations we made so far for this aggregation
+ * interval. In other words, make the status like
+ * kdamond_reset_aggregated() is called.
+ */
+ r->last_nr_accesses = damon_nr_accesses_for_new_attrs(
+ r->last_nr_accesses, old_attrs, new_attrs);
+ r->nr_accesses_bp = r->last_nr_accesses * 10000;
+ r->nr_accesses = 0;
+ }
r->age = damon_age_for_new_attrs(r->age, old_attrs, new_attrs);
}
@@ -597,7 +633,7 @@ static void damon_update_monitoring_result(struct damon_region *r,
* ->nr_accesses and ->age of given damon_ctx's regions for new damon_attrs.
*/
static void damon_update_monitoring_results(struct damon_ctx *ctx,
- struct damon_attrs *new_attrs)
+ struct damon_attrs *new_attrs, bool aggregating)
{
struct damon_attrs *old_attrs = &ctx->attrs;
struct damon_target *t;
@@ -612,7 +648,26 @@ static void damon_update_monitoring_results(struct damon_ctx *ctx,
damon_for_each_target(t, ctx)
damon_for_each_region(r, t)
damon_update_monitoring_result(
- r, old_attrs, new_attrs);
+ r, old_attrs, new_attrs, aggregating);
+}
+
+/*
+ * damon_valid_intervals_goal() - return if the intervals goal of @attrs is
+ * valid.
+ */
+static bool damon_valid_intervals_goal(struct damon_attrs *attrs)
+{
+ struct damon_intervals_goal *goal = &attrs->intervals_goal;
+
+ /* tuning is disabled */
+ if (!goal->aggrs)
+ return true;
+ if (goal->min_sample_us > goal->max_sample_us)
+ return false;
+ if (attrs->sample_interval < goal->min_sample_us ||
+ goal->max_sample_us < attrs->sample_interval)
+ return false;
+ return true;
}
/**
@@ -620,10 +675,10 @@ static void damon_update_monitoring_results(struct damon_ctx *ctx,
* @ctx: monitoring context
* @attrs: monitoring attributes
*
- * This function should be called while the kdamond is not running, or an
- * access check results aggregation is not ongoing (e.g., from
- * &struct damon_callback->after_aggregation or
- * &struct damon_callback->after_wmarks_check callbacks).
+ * This function should be called while the kdamond is not running, an access
+ * check results aggregation is not ongoing (e.g., from &struct
+ * damon_callback->after_aggregation or &struct
+ * damon_callback->after_wmarks_check callbacks), or from damon_call().
*
* Every time interval is in micro-seconds.
*
@@ -634,6 +689,11 @@ int damon_set_attrs(struct damon_ctx *ctx, struct damon_attrs *attrs)
unsigned long sample_interval = attrs->sample_interval ?
attrs->sample_interval : 1;
struct damos *s;
+ bool aggregating = ctx->passed_sample_intervals <
+ ctx->next_aggregation_sis;
+
+ if (!damon_valid_intervals_goal(attrs))
+ return -EINVAL;
if (attrs->min_nr_regions < 3)
return -EINVAL;
@@ -642,12 +702,16 @@ int damon_set_attrs(struct damon_ctx *ctx, struct damon_attrs *attrs)
if (attrs->sample_interval > attrs->aggr_interval)
return -EINVAL;
+ /* calls from core-external doesn't set this. */
+ if (!attrs->aggr_samples)
+ attrs->aggr_samples = attrs->aggr_interval / sample_interval;
+
ctx->next_aggregation_sis = ctx->passed_sample_intervals +
attrs->aggr_interval / sample_interval;
ctx->next_ops_update_sis = ctx->passed_sample_intervals +
attrs->ops_update_interval / sample_interval;
- damon_update_monitoring_results(ctx, attrs);
+ damon_update_monitoring_results(ctx, attrs, aggregating);
ctx->attrs = *attrs;
damon_for_each_scheme(s, ctx)
@@ -777,6 +841,9 @@ static void damos_commit_filter_arg(
case DAMOS_FILTER_TYPE_TARGET:
dst->target_idx = src->target_idx;
break;
+ case DAMOS_FILTER_TYPE_HUGEPAGE_SIZE:
+ dst->sz_range = src->sz_range;
+ break;
default:
break;
}
@@ -790,7 +857,7 @@ static void damos_commit_filter(
damos_commit_filter_arg(dst, src);
}
-static int damos_commit_filters(struct damos *dst, struct damos *src)
+static int damos_commit_core_filters(struct damos *dst, struct damos *src)
{
struct damos_filter *dst_filter, *next, *src_filter, *new_filter;
int i = 0, j = 0;
@@ -818,6 +885,74 @@ static int damos_commit_filters(struct damos *dst, struct damos *src)
return 0;
}
+static int damos_commit_ops_filters(struct damos *dst, struct damos *src)
+{
+ struct damos_filter *dst_filter, *next, *src_filter, *new_filter;
+ int i = 0, j = 0;
+
+ damos_for_each_ops_filter_safe(dst_filter, next, dst) {
+ src_filter = damos_nth_filter(i++, src);
+ if (src_filter)
+ damos_commit_filter(dst_filter, src_filter);
+ else
+ damos_destroy_filter(dst_filter);
+ }
+
+ damos_for_each_ops_filter_safe(src_filter, next, src) {
+ if (j++ < i)
+ continue;
+
+ new_filter = damos_new_filter(
+ src_filter->type, src_filter->matching,
+ src_filter->allow);
+ if (!new_filter)
+ return -ENOMEM;
+ damos_commit_filter_arg(new_filter, src_filter);
+ damos_add_filter(dst, new_filter);
+ }
+ return 0;
+}
+
+/**
+ * damos_filters_default_reject() - decide whether to reject memory that didn't
+ * match with any given filter.
+ * @filters: Given DAMOS filters of a group.
+ */
+static bool damos_filters_default_reject(struct list_head *filters)
+{
+ struct damos_filter *last_filter;
+
+ if (list_empty(filters))
+ return false;
+ last_filter = list_last_entry(filters, struct damos_filter, list);
+ return last_filter->allow;
+}
+
+static void damos_set_filters_default_reject(struct damos *s)
+{
+ if (!list_empty(&s->ops_filters))
+ s->core_filters_default_reject = false;
+ else
+ s->core_filters_default_reject =
+ damos_filters_default_reject(&s->filters);
+ s->ops_filters_default_reject =
+ damos_filters_default_reject(&s->ops_filters);
+}
+
+static int damos_commit_filters(struct damos *dst, struct damos *src)
+{
+ int err;
+
+ err = damos_commit_core_filters(dst, src);
+ if (err)
+ return err;
+ err = damos_commit_ops_filters(dst, src);
+ if (err)
+ return err;
+ damos_set_filters_default_reject(dst);
+ return 0;
+}
+
static struct damos *damon_nth_scheme(int n, struct damon_ctx *ctx)
{
struct damos *s;
@@ -1276,6 +1411,65 @@ static void kdamond_reset_aggregated(struct damon_ctx *c)
}
}
+static unsigned long damon_get_intervals_score(struct damon_ctx *c)
+{
+ struct damon_target *t;
+ struct damon_region *r;
+ unsigned long sz_region, max_access_events = 0, access_events = 0;
+ unsigned long target_access_events;
+ unsigned long goal_bp = c->attrs.intervals_goal.access_bp;
+
+ damon_for_each_target(t, c) {
+ damon_for_each_region(r, t) {
+ sz_region = damon_sz_region(r);
+ max_access_events += sz_region * c->attrs.aggr_samples;
+ access_events += sz_region * r->nr_accesses;
+ }
+ }
+ target_access_events = max_access_events * goal_bp / 10000;
+ return access_events * 10000 / target_access_events;
+}
+
+static unsigned long damon_feed_loop_next_input(unsigned long last_input,
+ unsigned long score);
+
+static unsigned long damon_get_intervals_adaptation_bp(struct damon_ctx *c)
+{
+ unsigned long score_bp, adaptation_bp;
+
+ score_bp = damon_get_intervals_score(c);
+ adaptation_bp = damon_feed_loop_next_input(100000000, score_bp) /
+ 10000;
+ /*
+ * adaptaion_bp ranges from 1 to 20,000. Avoid too rapid reduction of
+ * the intervals by rescaling [1,10,000] to [5000, 10,000].
+ */
+ if (adaptation_bp <= 10000)
+ adaptation_bp = 5000 + adaptation_bp / 2;
+ return adaptation_bp;
+}
+
+static void kdamond_tune_intervals(struct damon_ctx *c)
+{
+ unsigned long adaptation_bp;
+ struct damon_attrs new_attrs;
+ struct damon_intervals_goal *goal;
+
+ adaptation_bp = damon_get_intervals_adaptation_bp(c);
+ if (adaptation_bp == 10000)
+ return;
+
+ new_attrs = c->attrs;
+ goal = &c->attrs.intervals_goal;
+ new_attrs.sample_interval = min(goal->max_sample_us,
+ c->attrs.sample_interval * adaptation_bp / 10000);
+ new_attrs.sample_interval = max(goal->min_sample_us,
+ new_attrs.sample_interval);
+ new_attrs.aggr_interval = new_attrs.sample_interval *
+ c->attrs.aggr_samples;
+ damon_set_attrs(c, &new_attrs);
+}
+
static void damon_split_region_at(struct damon_target *t,
struct damon_region *r, unsigned long sz_r);
@@ -1438,7 +1632,7 @@ static bool damos_filter_out(struct damon_ctx *ctx, struct damon_target *t,
return !filter->allow;
}
}
- return false;
+ return s->core_filters_default_reject;
}
/*
@@ -1458,11 +1652,13 @@ static void damos_walk_call_walk(struct damon_ctx *ctx, struct damon_target *t,
{
struct damos_walk_control *control;
- mutex_lock(&ctx->walk_control_lock);
+ if (s->walk_completed)
+ return;
+
control = ctx->walk_control;
- mutex_unlock(&ctx->walk_control_lock);
if (!control)
return;
+
control->walk_fn(control->data, ctx, t, r, s, sz_filter_passed);
}
@@ -1482,9 +1678,7 @@ static void damos_walk_complete(struct damon_ctx *ctx, struct damos *s)
struct damos *siter;
struct damos_walk_control *control;
- mutex_lock(&ctx->walk_control_lock);
control = ctx->walk_control;
- mutex_unlock(&ctx->walk_control_lock);
if (!control)
return;
@@ -1494,10 +1688,11 @@ static void damos_walk_complete(struct damon_ctx *ctx, struct damos *s)
if (!siter->walk_completed)
return;
}
+ damon_for_each_scheme(siter, ctx)
+ siter->walk_completed = false;
+
complete(&control->completion);
- mutex_lock(&ctx->walk_control_lock);
ctx->walk_control = NULL;
- mutex_unlock(&ctx->walk_control_lock);
}
/*
@@ -1535,7 +1730,6 @@ static void damos_apply_scheme(struct damon_ctx *c, struct damon_target *t,
struct timespec64 begin, end;
unsigned long sz_applied = 0;
unsigned long sz_ops_filter_passed = 0;
- int err = 0;
/*
* We plan to support multiple context per kdamond, as DAMON sysfs
* implies with 'nr_contexts' file. Nevertheless, only single context
@@ -1575,14 +1769,10 @@ static void damos_apply_scheme(struct damon_ctx *c, struct damon_target *t,
if (damos_filter_out(c, t, r, s))
return;
ktime_get_coarse_ts64(&begin);
- if (c->callback.before_damos_apply)
- err = c->callback.before_damos_apply(c, t, r, s);
- if (!err) {
- trace_damos_before_apply(cidx, sidx, tidx, r,
- damon_nr_regions(t), do_trace);
- sz_applied = c->ops.apply_scheme(c, t, r, s,
- &sz_ops_filter_passed);
- }
+ trace_damos_before_apply(cidx, sidx, tidx, r,
+ damon_nr_regions(t), do_trace);
+ sz_applied = c->ops.apply_scheme(c, t, r, s,
+ &sz_ops_filter_passed);
damos_walk_call_walk(c, t, r, s, sz_ops_filter_passed);
ktime_get_coarse_ts64(&end);
quota->total_charged_ns += timespec64_to_ns(&end) -
@@ -1844,6 +2034,7 @@ static void kdamond_apply_schemes(struct damon_ctx *c)
if (!has_schemes_to_apply)
return;
+ mutex_lock(&c->walk_control_lock);
damon_for_each_target(t, c) {
damon_for_each_region_safe(r, next_r, t)
damon_do_apply_schemes(c, t, r);
@@ -1856,7 +2047,9 @@ static void kdamond_apply_schemes(struct damon_ctx *c)
s->next_apply_sis = c->passed_sample_intervals +
(s->apply_interval_us ? s->apply_interval_us :
c->attrs.aggr_interval) / sample_interval;
+ s->last_applied = NULL;
}
+ mutex_unlock(&c->walk_control_lock);
}
/*
@@ -2169,7 +2362,7 @@ static int kdamond_wait_activation(struct damon_ctx *ctx)
return -EBUSY;
}
-static void kdamond_init_intervals_sis(struct damon_ctx *ctx)
+static void kdamond_init_ctx(struct damon_ctx *ctx)
{
unsigned long sample_interval = ctx->attrs.sample_interval ?
ctx->attrs.sample_interval : 1;
@@ -2180,11 +2373,14 @@ static void kdamond_init_intervals_sis(struct damon_ctx *ctx)
ctx->next_aggregation_sis = ctx->attrs.aggr_interval / sample_interval;
ctx->next_ops_update_sis = ctx->attrs.ops_update_interval /
sample_interval;
+ ctx->next_intervals_tune_sis = ctx->next_aggregation_sis *
+ ctx->attrs.intervals_goal.aggrs;
damon_for_each_scheme(scheme, ctx) {
apply_interval = scheme->apply_interval_us ?
scheme->apply_interval_us : ctx->attrs.aggr_interval;
scheme->next_apply_sis = apply_interval / sample_interval;
+ damos_set_filters_default_reject(scheme);
}
}
@@ -2202,12 +2398,10 @@ static int kdamond_fn(void *data)
pr_debug("kdamond (%d) starts\n", current->pid);
complete(&ctx->kdamond_started);
- kdamond_init_intervals_sis(ctx);
+ kdamond_init_ctx(ctx);
if (ctx->ops.init)
ctx->ops.init(ctx);
- if (ctx->callback.before_start && ctx->callback.before_start(ctx))
- goto done;
ctx->regions_score_histogram = kmalloc_array(DAMOS_MAX_SCORE + 1,
sizeof(*ctx->regions_score_histogram), GFP_KERNEL);
if (!ctx->regions_score_histogram)
@@ -2232,10 +2426,6 @@ static int kdamond_fn(void *data)
if (ctx->ops.prepare_access_checks)
ctx->ops.prepare_access_checks(ctx);
- if (ctx->callback.after_sampling &&
- ctx->callback.after_sampling(ctx))
- break;
- kdamond_call(ctx, false);
kdamond_usleep(sample_interval);
ctx->passed_sample_intervals++;
@@ -2253,9 +2443,10 @@ static int kdamond_fn(void *data)
}
/*
- * do kdamond_apply_schemes() after kdamond_merge_regions() if
- * possible, to reduce overhead
+ * do kdamond_call() and kdamond_apply_schemes() after
+ * kdamond_merge_regions() if possible, to reduce overhead
*/
+ kdamond_call(ctx, false);
if (!list_empty(&ctx->schemes))
kdamond_apply_schemes(ctx);
else
@@ -2264,13 +2455,40 @@ static int kdamond_fn(void *data)
sample_interval = ctx->attrs.sample_interval ?
ctx->attrs.sample_interval : 1;
if (ctx->passed_sample_intervals >= next_aggregation_sis) {
+ if (ctx->attrs.intervals_goal.aggrs &&
+ ctx->passed_sample_intervals >=
+ ctx->next_intervals_tune_sis) {
+ /*
+ * ctx->next_aggregation_sis might be updated
+ * from kdamond_call(). In the case,
+ * damon_set_attrs() which will be called from
+ * kdamond_tune_interval() may wrongly think
+ * this is in the middle of the current
+ * aggregation, and make aggregation
+ * information reset for all regions. Then,
+ * following kdamond_reset_aggregated() call
+ * will make the region information invalid,
+ * particularly for ->nr_accesses_bp.
+ *
+ * Reset ->next_aggregation_sis to avoid that.
+ * It will anyway correctly updated after this
+ * if caluse.
+ */
+ ctx->next_aggregation_sis =
+ next_aggregation_sis;
+ ctx->next_intervals_tune_sis +=
+ ctx->attrs.aggr_samples *
+ ctx->attrs.intervals_goal.aggrs;
+ kdamond_tune_intervals(ctx);
+ sample_interval = ctx->attrs.sample_interval ?
+ ctx->attrs.sample_interval : 1;
+
+ }
ctx->next_aggregation_sis = next_aggregation_sis +
ctx->attrs.aggr_interval / sample_interval;
kdamond_reset_aggregated(ctx);
kdamond_split_regions(ctx);
- if (ctx->ops.reset_aggregated)
- ctx->ops.reset_aggregated(ctx);
}
if (ctx->passed_sample_intervals >= next_ops_update_sis) {
diff --git a/mm/damon/ops-common.c b/mm/damon/ops-common.c
index d25d99cb5f2b..0db1fc70c84d 100644
--- a/mm/damon/ops-common.c
+++ b/mm/damon/ops-common.c
@@ -9,6 +9,8 @@
#include <linux/page_idle.h>
#include <linux/pagemap.h>
#include <linux/rmap.h>
+#include <linux/swap.h>
+#include <linux/swapops.h>
#include "ops-common.h"
@@ -24,7 +26,7 @@ struct folio *damon_get_folio(unsigned long pfn)
struct page *page = pfn_to_online_page(pfn);
struct folio *folio;
- if (!page || PageTail(page))
+ if (!page)
return NULL;
folio = page_folio(page);
@@ -39,12 +41,29 @@ struct folio *damon_get_folio(unsigned long pfn)
void damon_ptep_mkold(pte_t *pte, struct vm_area_struct *vma, unsigned long addr)
{
- struct folio *folio = damon_get_folio(pte_pfn(ptep_get(pte)));
+ pte_t pteval = ptep_get(pte);
+ struct folio *folio;
+ bool young = false;
+ unsigned long pfn;
+
+ if (likely(pte_present(pteval)))
+ pfn = pte_pfn(pteval);
+ else
+ pfn = swp_offset_pfn(pte_to_swp_entry(pteval));
+ folio = damon_get_folio(pfn);
if (!folio)
return;
- if (ptep_clear_young_notify(vma, addr, pte))
+ /*
+ * PFN swap PTEs, such as device-exclusive ones, that actually map pages
+ * are "old" from a CPU perspective. The MMU notifier takes care of any
+ * device aspects.
+ */
+ if (likely(pte_present(pteval)))
+ young |= ptep_test_and_clear_young(vma, addr, pte);
+ young |= mmu_notifier_clear_young(vma->vm_mm, addr, addr + PAGE_SIZE);
+ if (young)
folio_set_young(folio);
folio_set_idle(folio);
diff --git a/mm/damon/paddr.c b/mm/damon/paddr.c
index c834aa217835..1b70d3f36046 100644
--- a/mm/damon/paddr.c
+++ b/mm/damon/paddr.c
@@ -92,12 +92,20 @@ static bool damon_folio_young_one(struct folio *folio,
{
bool *accessed = arg;
DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, addr, 0);
+ pte_t pte;
*accessed = false;
while (page_vma_mapped_walk(&pvmw)) {
addr = pvmw.address;
if (pvmw.pte) {
- *accessed = pte_young(ptep_get(pvmw.pte)) ||
+ pte = ptep_get(pvmw.pte);
+
+ /*
+ * PFN swap PTEs, such as device-exclusive ones, that
+ * actually map pages are "old" from a CPU perspective.
+ * The MMU notifier takes care of any device aspects.
+ */
+ *accessed = (pte_present(pte) && pte_young(pte)) ||
!folio_test_idle(folio) ||
mmu_notifier_test_young(vma->vm_mm, addr);
} else {
@@ -203,11 +211,15 @@ static bool damos_pa_filter_match(struct damos_filter *filter,
{
bool matched = false;
struct mem_cgroup *memcg;
+ size_t folio_sz;
switch (filter->type) {
case DAMOS_FILTER_TYPE_ANON:
matched = folio_test_anon(folio);
break;
+ case DAMOS_FILTER_TYPE_ACTIVE:
+ matched = folio_test_active(folio);
+ break;
case DAMOS_FILTER_TYPE_MEMCG:
rcu_read_lock();
memcg = folio_memcg_check(folio);
@@ -222,6 +234,14 @@ static bool damos_pa_filter_match(struct damos_filter *filter,
if (matched)
damon_folio_mkold(folio);
break;
+ case DAMOS_FILTER_TYPE_HUGEPAGE_SIZE:
+ folio_sz = folio_size(folio);
+ matched = filter->sz_range.min <= folio_sz &&
+ folio_sz <= filter->sz_range.max;
+ break;
+ case DAMOS_FILTER_TYPE_UNMAPPED:
+ matched = !folio_mapped(folio) || !folio_raw_mapping(folio);
+ break;
default:
break;
}
@@ -239,10 +259,21 @@ static bool damos_pa_filter_out(struct damos *scheme, struct folio *folio)
if (scheme->core_filters_allowed)
return false;
- damos_for_each_filter(filter, scheme) {
+ damos_for_each_ops_filter(filter, scheme) {
if (damos_pa_filter_match(filter, folio))
return !filter->allow;
}
+ return scheme->ops_filters_default_reject;
+}
+
+static bool damon_pa_invalid_damos_folio(struct folio *folio, struct damos *s)
+{
+ if (!folio)
+ return true;
+ if (folio == s->last_applied) {
+ folio_put(folio);
+ return true;
+ }
return false;
}
@@ -253,9 +284,10 @@ static unsigned long damon_pa_pageout(struct damon_region *r, struct damos *s,
LIST_HEAD(folio_list);
bool install_young_filter = true;
struct damos_filter *filter;
+ struct folio *folio;
/* check access in page level again by default */
- damos_for_each_filter(filter, s) {
+ damos_for_each_ops_filter(filter, s) {
if (filter->type == DAMOS_FILTER_TYPE_YOUNG) {
install_young_filter = false;
break;
@@ -269,11 +301,13 @@ static unsigned long damon_pa_pageout(struct damon_region *r, struct damos *s,
damos_add_filter(s, filter);
}
- for (addr = r->ar.start; addr < r->ar.end; addr += PAGE_SIZE) {
- struct folio *folio = damon_get_folio(PHYS_PFN(addr));
-
- if (!folio)
+ addr = r->ar.start;
+ while (addr < r->ar.end) {
+ folio = damon_get_folio(PHYS_PFN(addr));
+ if (damon_pa_invalid_damos_folio(folio, s)) {
+ addr += PAGE_SIZE;
continue;
+ }
if (damos_pa_filter_out(s, folio))
goto put_folio;
@@ -289,12 +323,14 @@ static unsigned long damon_pa_pageout(struct damon_region *r, struct damos *s,
else
list_add(&folio->lru, &folio_list);
put_folio:
+ addr += folio_size(folio);
folio_put(folio);
}
if (install_young_filter)
damos_destroy_filter(filter);
applied = reclaim_pages(&folio_list);
cond_resched();
+ s->last_applied = folio;
return applied * PAGE_SIZE;
}
@@ -303,12 +339,15 @@ static inline unsigned long damon_pa_mark_accessed_or_deactivate(
unsigned long *sz_filter_passed)
{
unsigned long addr, applied = 0;
+ struct folio *folio;
- for (addr = r->ar.start; addr < r->ar.end; addr += PAGE_SIZE) {
- struct folio *folio = damon_get_folio(PHYS_PFN(addr));
-
- if (!folio)
+ addr = r->ar.start;
+ while (addr < r->ar.end) {
+ folio = damon_get_folio(PHYS_PFN(addr));
+ if (damon_pa_invalid_damos_folio(folio, s)) {
+ addr += PAGE_SIZE;
continue;
+ }
if (damos_pa_filter_out(s, folio))
goto put_folio;
@@ -321,8 +360,10 @@ static inline unsigned long damon_pa_mark_accessed_or_deactivate(
folio_deactivate(folio);
applied += folio_nr_pages(folio);
put_folio:
+ addr += folio_size(folio);
folio_put(folio);
}
+ s->last_applied = folio;
return applied * PAGE_SIZE;
}
@@ -466,12 +507,15 @@ static unsigned long damon_pa_migrate(struct damon_region *r, struct damos *s,
{
unsigned long addr, applied;
LIST_HEAD(folio_list);
+ struct folio *folio;
- for (addr = r->ar.start; addr < r->ar.end; addr += PAGE_SIZE) {
- struct folio *folio = damon_get_folio(PHYS_PFN(addr));
-
- if (!folio)
+ addr = r->ar.start;
+ while (addr < r->ar.end) {
+ folio = damon_get_folio(PHYS_PFN(addr));
+ if (damon_pa_invalid_damos_folio(folio, s)) {
+ addr += PAGE_SIZE;
continue;
+ }
if (damos_pa_filter_out(s, folio))
goto put_folio;
@@ -482,10 +526,12 @@ static unsigned long damon_pa_migrate(struct damon_region *r, struct damos *s,
goto put_folio;
list_add(&folio->lru, &folio_list);
put_folio:
+ addr += folio_size(folio);
folio_put(folio);
}
applied = damon_pa_migrate_pages(&folio_list, s->target_nid);
cond_resched();
+ s->last_applied = folio;
return applied * PAGE_SIZE;
}
@@ -493,7 +539,7 @@ static bool damon_pa_scheme_has_filter(struct damos *s)
{
struct damos_filter *f;
- damos_for_each_filter(f, s)
+ damos_for_each_ops_filter(f, s)
return true;
return false;
}
@@ -503,15 +549,15 @@ static unsigned long damon_pa_stat(struct damon_region *r, struct damos *s,
{
unsigned long addr;
LIST_HEAD(folio_list);
+ struct folio *folio;
if (!damon_pa_scheme_has_filter(s))
return 0;
addr = r->ar.start;
while (addr < r->ar.end) {
- struct folio *folio = damon_get_folio(PHYS_PFN(addr));
-
- if (!folio) {
+ folio = damon_get_folio(PHYS_PFN(addr));
+ if (damon_pa_invalid_damos_folio(folio, s)) {
addr += PAGE_SIZE;
continue;
}
@@ -521,6 +567,7 @@ static unsigned long damon_pa_stat(struct damon_region *r, struct damos *s,
addr += folio_size(folio);
folio_put(folio);
}
+ s->last_applied = folio;
return 0;
}
@@ -577,7 +624,6 @@ static int __init damon_pa_initcall(void)
.update = NULL,
.prepare_access_checks = damon_pa_prepare_access_checks,
.check_accesses = damon_pa_check_accesses,
- .reset_aggregated = NULL,
.target_valid = NULL,
.cleanup = NULL,
.apply_scheme = damon_pa_apply_scheme,
diff --git a/mm/damon/sysfs-schemes.c b/mm/damon/sysfs-schemes.c
index 98f93ae9f59e..23b562df0839 100644
--- a/mm/damon/sysfs-schemes.c
+++ b/mm/damon/sysfs-schemes.c
@@ -309,26 +309,46 @@ static const struct kobj_type damon_sysfs_stats_ktype = {
* filter directory
*/
+/*
+ * enum damos_sysfs_filter_handle_layer - Layers handling filters of a dir.
+ */
+enum damos_sysfs_filter_handle_layer {
+ DAMOS_SYSFS_FILTER_HANDLE_LAYER_CORE,
+ DAMOS_SYSFS_FILTER_HANDLE_LAYER_OPS,
+ DAMOS_SYSFS_FILTER_HANDLE_LAYER_BOTH,
+};
+
struct damon_sysfs_scheme_filter {
struct kobject kobj;
+ enum damos_sysfs_filter_handle_layer handle_layer;
enum damos_filter_type type;
bool matching;
bool allow;
char *memcg_path;
struct damon_addr_range addr_range;
+ struct damon_size_range sz_range;
int target_idx;
};
-static struct damon_sysfs_scheme_filter *damon_sysfs_scheme_filter_alloc(void)
+static struct damon_sysfs_scheme_filter *damon_sysfs_scheme_filter_alloc(
+ enum damos_sysfs_filter_handle_layer layer)
{
- return kzalloc(sizeof(struct damon_sysfs_scheme_filter), GFP_KERNEL);
+ struct damon_sysfs_scheme_filter *filter;
+
+ filter = kzalloc(sizeof(struct damon_sysfs_scheme_filter), GFP_KERNEL);
+ if (filter)
+ filter->handle_layer = layer;
+ return filter;
}
/* Should match with enum damos_filter_type */
static const char * const damon_sysfs_scheme_filter_type_strs[] = {
"anon",
+ "active",
"memcg",
"young",
+ "hugepage_size",
+ "unmapped",
"addr",
"target",
};
@@ -343,6 +363,23 @@ static ssize_t type_show(struct kobject *kobj,
damon_sysfs_scheme_filter_type_strs[filter->type]);
}
+static bool damos_sysfs_scheme_filter_valid_type(
+ enum damos_sysfs_filter_handle_layer layer,
+ enum damos_filter_type type)
+{
+ switch (layer) {
+ case DAMOS_SYSFS_FILTER_HANDLE_LAYER_BOTH:
+ return true;
+ case DAMOS_SYSFS_FILTER_HANDLE_LAYER_CORE:
+ return !damos_filter_for_ops(type);
+ case DAMOS_SYSFS_FILTER_HANDLE_LAYER_OPS:
+ return damos_filter_for_ops(type);
+ default:
+ break;
+ }
+ return false;
+}
+
static ssize_t type_store(struct kobject *kobj,
struct kobj_attribute *attr, const char *buf, size_t count)
{
@@ -354,6 +391,9 @@ static ssize_t type_store(struct kobject *kobj,
for (type = 0; type < NR_DAMOS_FILTER_TYPES; type++) {
if (sysfs_streq(buf, damon_sysfs_scheme_filter_type_strs[
type])) {
+ if (!damos_sysfs_scheme_filter_valid_type(
+ filter->handle_layer, type))
+ break;
filter->type = type;
ret = count;
break;
@@ -473,6 +513,44 @@ static ssize_t addr_end_store(struct kobject *kobj,
return err ? err : count;
}
+static ssize_t min_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ struct damon_sysfs_scheme_filter *filter = container_of(kobj,
+ struct damon_sysfs_scheme_filter, kobj);
+
+ return sysfs_emit(buf, "%lu\n", filter->sz_range.min);
+}
+
+static ssize_t min_store(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buf, size_t count)
+{
+ struct damon_sysfs_scheme_filter *filter = container_of(kobj,
+ struct damon_sysfs_scheme_filter, kobj);
+ int err = kstrtoul(buf, 0, &filter->sz_range.min);
+
+ return err ? err : count;
+}
+
+static ssize_t max_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ struct damon_sysfs_scheme_filter *filter = container_of(kobj,
+ struct damon_sysfs_scheme_filter, kobj);
+
+ return sysfs_emit(buf, "%lu\n", filter->sz_range.max);
+}
+
+static ssize_t max_store(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buf, size_t count)
+{
+ struct damon_sysfs_scheme_filter *filter = container_of(kobj,
+ struct damon_sysfs_scheme_filter, kobj);
+ int err = kstrtoul(buf, 0, &filter->sz_range.max);
+
+ return err ? err : count;
+}
+
static ssize_t damon_target_idx_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
@@ -519,6 +597,12 @@ static struct kobj_attribute damon_sysfs_scheme_filter_addr_start_attr =
static struct kobj_attribute damon_sysfs_scheme_filter_addr_end_attr =
__ATTR_RW_MODE(addr_end, 0600);
+static struct kobj_attribute damon_sysfs_scheme_filter_min_attr =
+ __ATTR_RW_MODE(min, 0600);
+
+static struct kobj_attribute damon_sysfs_scheme_filter_max_attr =
+ __ATTR_RW_MODE(max, 0600);
+
static struct kobj_attribute damon_sysfs_scheme_filter_damon_target_idx_attr =
__ATTR_RW_MODE(damon_target_idx, 0600);
@@ -529,6 +613,8 @@ static struct attribute *damon_sysfs_scheme_filter_attrs[] = {
&damon_sysfs_scheme_filter_memcg_path_attr.attr,
&damon_sysfs_scheme_filter_addr_start_attr.attr,
&damon_sysfs_scheme_filter_addr_end_attr.attr,
+ &damon_sysfs_scheme_filter_min_attr.attr,
+ &damon_sysfs_scheme_filter_max_attr.attr,
&damon_sysfs_scheme_filter_damon_target_idx_attr.attr,
NULL,
};
@@ -546,14 +632,20 @@ static const struct kobj_type damon_sysfs_scheme_filter_ktype = {
struct damon_sysfs_scheme_filters {
struct kobject kobj;
+ enum damos_sysfs_filter_handle_layer handle_layer;
struct damon_sysfs_scheme_filter **filters_arr;
int nr;
};
static struct damon_sysfs_scheme_filters *
-damon_sysfs_scheme_filters_alloc(void)
+damon_sysfs_scheme_filters_alloc(enum damos_sysfs_filter_handle_layer layer)
{
- return kzalloc(sizeof(struct damon_sysfs_scheme_filters), GFP_KERNEL);
+ struct damon_sysfs_scheme_filters *filters;
+
+ filters = kzalloc(sizeof(struct damon_sysfs_scheme_filters), GFP_KERNEL);
+ if (filters)
+ filters->handle_layer = layer;
+ return filters;
}
static void damon_sysfs_scheme_filters_rm_dirs(
@@ -586,7 +678,8 @@ static int damon_sysfs_scheme_filters_add_dirs(
filters->filters_arr = filters_arr;
for (i = 0; i < nr_filters; i++) {
- filter = damon_sysfs_scheme_filter_alloc();
+ filter = damon_sysfs_scheme_filter_alloc(
+ filters->handle_layer);
if (!filter) {
damon_sysfs_scheme_filters_rm_dirs(filters);
return -ENOMEM;
@@ -1379,7 +1472,7 @@ static int damon_sysfs_access_pattern_add_range_dir(
if (!range)
return -ENOMEM;
err = kobject_init_and_add(&range->kobj, &damon_sysfs_ul_range_ktype,
- &access_pattern->kobj, name);
+ &access_pattern->kobj, "%s", name);
if (err)
kobject_put(&range->kobj);
else
@@ -1455,6 +1548,8 @@ struct damon_sysfs_scheme {
unsigned long apply_interval_us;
struct damon_sysfs_quotas *quotas;
struct damon_sysfs_watermarks *watermarks;
+ struct damon_sysfs_scheme_filters *core_filters;
+ struct damon_sysfs_scheme_filters *ops_filters;
struct damon_sysfs_scheme_filters *filters;
struct damon_sysfs_stats *stats;
struct damon_sysfs_scheme_regions *tried_regions;
@@ -1555,21 +1650,53 @@ static int damon_sysfs_scheme_set_watermarks(struct damon_sysfs_scheme *scheme)
return err;
}
-static int damon_sysfs_scheme_set_filters(struct damon_sysfs_scheme *scheme)
+static int damon_sysfs_scheme_set_filters(struct damon_sysfs_scheme *scheme,
+ enum damos_sysfs_filter_handle_layer layer, const char *name,
+ struct damon_sysfs_scheme_filters **filters_ptr)
{
struct damon_sysfs_scheme_filters *filters =
- damon_sysfs_scheme_filters_alloc();
+ damon_sysfs_scheme_filters_alloc(layer);
int err;
if (!filters)
return -ENOMEM;
err = kobject_init_and_add(&filters->kobj,
&damon_sysfs_scheme_filters_ktype, &scheme->kobj,
- "filters");
+ "%s", name);
if (err)
kobject_put(&filters->kobj);
else
- scheme->filters = filters;
+ *filters_ptr = filters;
+ return err;
+}
+
+static int damos_sysfs_set_filter_dirs(struct damon_sysfs_scheme *scheme)
+{
+ int err;
+
+ err = damon_sysfs_scheme_set_filters(scheme,
+ DAMOS_SYSFS_FILTER_HANDLE_LAYER_BOTH, "filters",
+ &scheme->filters);
+ if (err)
+ return err;
+ err = damon_sysfs_scheme_set_filters(scheme,
+ DAMOS_SYSFS_FILTER_HANDLE_LAYER_CORE, "core_filters",
+ &scheme->core_filters);
+ if (err)
+ goto put_filters_out;
+ err = damon_sysfs_scheme_set_filters(scheme,
+ DAMOS_SYSFS_FILTER_HANDLE_LAYER_OPS, "ops_filters",
+ &scheme->ops_filters);
+ if (err)
+ goto put_core_filters_out;
+ return 0;
+
+put_core_filters_out:
+ kobject_put(&scheme->core_filters->kobj);
+ scheme->core_filters = NULL;
+put_filters_out:
+ kobject_put(&scheme->filters->kobj);
+ scheme->filters = NULL;
return err;
}
@@ -1621,7 +1748,7 @@ static int damon_sysfs_scheme_add_dirs(struct damon_sysfs_scheme *scheme)
err = damon_sysfs_scheme_set_watermarks(scheme);
if (err)
goto put_quotas_access_pattern_out;
- err = damon_sysfs_scheme_set_filters(scheme);
+ err = damos_sysfs_set_filter_dirs(scheme);
if (err)
goto put_watermarks_quotas_access_pattern_out;
err = damon_sysfs_scheme_set_stats(scheme);
@@ -1636,6 +1763,10 @@ put_tried_regions_out:
kobject_put(&scheme->tried_regions->kobj);
scheme->tried_regions = NULL;
put_filters_watermarks_quotas_access_pattern_out:
+ kobject_put(&scheme->ops_filters->kobj);
+ scheme->ops_filters = NULL;
+ kobject_put(&scheme->core_filters->kobj);
+ scheme->core_filters = NULL;
kobject_put(&scheme->filters->kobj);
scheme->filters = NULL;
put_watermarks_quotas_access_pattern_out:
@@ -1659,6 +1790,10 @@ static void damon_sysfs_scheme_rm_dirs(struct damon_sysfs_scheme *scheme)
kobject_put(&scheme->watermarks->kobj);
damon_sysfs_scheme_filters_rm_dirs(scheme->filters);
kobject_put(&scheme->filters->kobj);
+ damon_sysfs_scheme_filters_rm_dirs(scheme->core_filters);
+ kobject_put(&scheme->core_filters->kobj);
+ damon_sysfs_scheme_filters_rm_dirs(scheme->ops_filters);
+ kobject_put(&scheme->ops_filters->kobj);
kobject_put(&scheme->stats->kobj);
damon_sysfs_scheme_regions_rm_dirs(scheme->tried_regions);
kobject_put(&scheme->tried_regions->kobj);
@@ -1953,6 +2088,13 @@ static int damon_sysfs_add_scheme_filters(struct damos *scheme,
filter->addr_range = sysfs_filter->addr_range;
} else if (filter->type == DAMOS_FILTER_TYPE_TARGET) {
filter->target_idx = sysfs_filter->target_idx;
+ } else if (filter->type == DAMOS_FILTER_TYPE_HUGEPAGE_SIZE) {
+ if (sysfs_filter->sz_range.min >
+ sysfs_filter->sz_range.max) {
+ damos_destroy_filter(filter);
+ return -EINVAL;
+ }
+ filter->sz_range = sysfs_filter->sz_range;
}
damos_add_filter(scheme, filter);
@@ -2048,8 +2190,6 @@ static struct damos *damon_sysfs_mk_scheme(
struct damon_sysfs_quotas *sysfs_quotas = sysfs_scheme->quotas;
struct damon_sysfs_weights *sysfs_weights = sysfs_quotas->weights;
struct damon_sysfs_watermarks *sysfs_wmarks = sysfs_scheme->watermarks;
- struct damon_sysfs_scheme_filters *sysfs_filters =
- sysfs_scheme->filters;
struct damos *scheme;
int err;
@@ -2089,7 +2229,17 @@ static struct damos *damon_sysfs_mk_scheme(
return NULL;
}
- err = damon_sysfs_add_scheme_filters(scheme, sysfs_filters);
+ err = damon_sysfs_add_scheme_filters(scheme, sysfs_scheme->core_filters);
+ if (err) {
+ damon_destroy_scheme(scheme);
+ return NULL;
+ }
+ err = damon_sysfs_add_scheme_filters(scheme, sysfs_scheme->ops_filters);
+ if (err) {
+ damon_destroy_scheme(scheme);
+ return NULL;
+ }
+ err = damon_sysfs_add_scheme_filters(scheme, sysfs_scheme->filters);
if (err) {
damon_destroy_scheme(scheme);
return NULL;
@@ -2192,7 +2342,6 @@ void damos_sysfs_populate_region_dir(struct damon_sysfs_schemes *sysfs_schemes,
}
}
-/* Called from damon_sysfs_cmd_request_callback under damon_sysfs_lock */
int damon_sysfs_schemes_clear_regions(
struct damon_sysfs_schemes *sysfs_schemes)
{
diff --git a/mm/damon/sysfs.c b/mm/damon/sysfs.c
index deeab04d3b46..1af6aff35d84 100644
--- a/mm/damon/sysfs.c
+++ b/mm/damon/sysfs.c
@@ -409,6 +409,164 @@ static const struct kobj_type damon_sysfs_targets_ktype = {
};
/*
+ * intervals goal directory
+ */
+
+struct damon_sysfs_intervals_goal {
+ struct kobject kobj;
+ unsigned long access_bp;
+ unsigned long aggrs;
+ unsigned long min_sample_us;
+ unsigned long max_sample_us;
+};
+
+static struct damon_sysfs_intervals_goal *damon_sysfs_intervals_goal_alloc(
+ unsigned long access_bp, unsigned long aggrs,
+ unsigned long min_sample_us, unsigned long max_sample_us)
+{
+ struct damon_sysfs_intervals_goal *goal = kmalloc(sizeof(*goal),
+ GFP_KERNEL);
+
+ if (!goal)
+ return NULL;
+
+ goal->kobj = (struct kobject){};
+ goal->access_bp = access_bp;
+ goal->aggrs = aggrs;
+ goal->min_sample_us = min_sample_us;
+ goal->max_sample_us = max_sample_us;
+ return goal;
+}
+
+static ssize_t access_bp_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ struct damon_sysfs_intervals_goal *goal = container_of(kobj,
+ struct damon_sysfs_intervals_goal, kobj);
+
+ return sysfs_emit(buf, "%lu\n", goal->access_bp);
+}
+
+static ssize_t access_bp_store(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buf, size_t count)
+{
+ struct damon_sysfs_intervals_goal *goal = container_of(kobj,
+ struct damon_sysfs_intervals_goal, kobj);
+ unsigned long nr;
+ int err = kstrtoul(buf, 0, &nr);
+
+ if (err)
+ return err;
+
+ goal->access_bp = nr;
+ return count;
+}
+
+static ssize_t aggrs_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ struct damon_sysfs_intervals_goal *goal = container_of(kobj,
+ struct damon_sysfs_intervals_goal, kobj);
+
+ return sysfs_emit(buf, "%lu\n", goal->aggrs);
+}
+
+static ssize_t aggrs_store(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buf, size_t count)
+{
+ struct damon_sysfs_intervals_goal *goal = container_of(kobj,
+ struct damon_sysfs_intervals_goal, kobj);
+ unsigned long nr;
+ int err = kstrtoul(buf, 0, &nr);
+
+ if (err)
+ return err;
+
+ goal->aggrs = nr;
+ return count;
+}
+
+static ssize_t min_sample_us_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ struct damon_sysfs_intervals_goal *goal = container_of(kobj,
+ struct damon_sysfs_intervals_goal, kobj);
+
+ return sysfs_emit(buf, "%lu\n", goal->min_sample_us);
+}
+
+static ssize_t min_sample_us_store(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buf, size_t count)
+{
+ struct damon_sysfs_intervals_goal *goal = container_of(kobj,
+ struct damon_sysfs_intervals_goal, kobj);
+ unsigned long nr;
+ int err = kstrtoul(buf, 0, &nr);
+
+ if (err)
+ return err;
+
+ goal->min_sample_us = nr;
+ return count;
+}
+
+static ssize_t max_sample_us_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ struct damon_sysfs_intervals_goal *goal = container_of(kobj,
+ struct damon_sysfs_intervals_goal, kobj);
+
+ return sysfs_emit(buf, "%lu\n", goal->max_sample_us);
+}
+
+static ssize_t max_sample_us_store(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buf, size_t count)
+{
+ struct damon_sysfs_intervals_goal *goal = container_of(kobj,
+ struct damon_sysfs_intervals_goal, kobj);
+ unsigned long nr;
+ int err = kstrtoul(buf, 0, &nr);
+
+ if (err)
+ return err;
+
+ goal->max_sample_us = nr;
+ return count;
+}
+
+static void damon_sysfs_intervals_goal_release(struct kobject *kobj)
+{
+ kfree(container_of(kobj, struct damon_sysfs_intervals_goal, kobj));
+}
+
+static struct kobj_attribute damon_sysfs_intervals_goal_access_bp_attr =
+ __ATTR_RW_MODE(access_bp, 0600);
+
+static struct kobj_attribute damon_sysfs_intervals_goal_aggrs_attr =
+ __ATTR_RW_MODE(aggrs, 0600);
+
+static struct kobj_attribute damon_sysfs_intervals_goal_min_sample_us_attr =
+ __ATTR_RW_MODE(min_sample_us, 0600);
+
+static struct kobj_attribute damon_sysfs_intervals_goal_max_sample_us_attr =
+ __ATTR_RW_MODE(max_sample_us, 0600);
+
+static struct attribute *damon_sysfs_intervals_goal_attrs[] = {
+ &damon_sysfs_intervals_goal_access_bp_attr.attr,
+ &damon_sysfs_intervals_goal_aggrs_attr.attr,
+ &damon_sysfs_intervals_goal_min_sample_us_attr.attr,
+ &damon_sysfs_intervals_goal_max_sample_us_attr.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(damon_sysfs_intervals_goal);
+
+static const struct kobj_type damon_sysfs_intervals_goal_ktype = {
+ .release = damon_sysfs_intervals_goal_release,
+ .sysfs_ops = &kobj_sysfs_ops,
+ .default_groups = damon_sysfs_intervals_goal_groups,
+};
+
+/*
* intervals directory
*/
@@ -417,6 +575,7 @@ struct damon_sysfs_intervals {
unsigned long sample_us;
unsigned long aggr_us;
unsigned long update_us;
+ struct damon_sysfs_intervals_goal *intervals_goal;
};
static struct damon_sysfs_intervals *damon_sysfs_intervals_alloc(
@@ -436,6 +595,32 @@ static struct damon_sysfs_intervals *damon_sysfs_intervals_alloc(
return intervals;
}
+static int damon_sysfs_intervals_add_dirs(struct damon_sysfs_intervals *intervals)
+{
+ struct damon_sysfs_intervals_goal *goal;
+ int err;
+
+ goal = damon_sysfs_intervals_goal_alloc(0, 0, 0, 0);
+ if (!goal)
+ return -ENOMEM;
+
+ err = kobject_init_and_add(&goal->kobj,
+ &damon_sysfs_intervals_goal_ktype, &intervals->kobj,
+ "intervals_goal");
+ if (err) {
+ kobject_put(&goal->kobj);
+ intervals->intervals_goal = NULL;
+ return err;
+ }
+ intervals->intervals_goal = goal;
+ return 0;
+}
+
+static void damon_sysfs_intervals_rm_dirs(struct damon_sysfs_intervals *intervals)
+{
+ kobject_put(&intervals->intervals_goal->kobj);
+}
+
static ssize_t sample_us_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
@@ -571,6 +756,9 @@ static int damon_sysfs_attrs_add_dirs(struct damon_sysfs_attrs *attrs)
"intervals");
if (err)
goto put_intervals_out;
+ err = damon_sysfs_intervals_add_dirs(intervals);
+ if (err)
+ goto put_intervals_out;
attrs->intervals = intervals;
nr_regions_range = damon_sysfs_ul_range_alloc(10, 1000);
@@ -599,6 +787,7 @@ put_intervals_out:
static void damon_sysfs_attrs_rm_dirs(struct damon_sysfs_attrs *attrs)
{
kobject_put(&attrs->nr_regions_range->kobj);
+ damon_sysfs_intervals_rm_dirs(attrs->intervals);
kobject_put(&attrs->intervals->kobj);
}
@@ -1025,6 +1214,11 @@ enum damon_sysfs_cmd {
*/
DAMON_SYSFS_CMD_UPDATE_SCHEMES_EFFECTIVE_QUOTAS,
/*
+ * @DAMON_SYSFS_CMD_UPDATE_TUNED_INTERVALS: Update the tuned monitoring
+ * intevals.
+ */
+ DAMON_SYSFS_CMD_UPDATE_TUNED_INTERVALS,
+ /*
* @NR_DAMON_SYSFS_CMDS: Total number of DAMON sysfs commands.
*/
NR_DAMON_SYSFS_CMDS,
@@ -1041,27 +1235,9 @@ static const char * const damon_sysfs_cmd_strs[] = {
"update_schemes_tried_regions",
"clear_schemes_tried_regions",
"update_schemes_effective_quotas",
+ "update_tuned_intervals",
};
-/*
- * struct damon_sysfs_cmd_request - A request to the DAMON callback.
- * @cmd: The command that needs to be handled by the callback.
- * @kdamond: The kobject wrapper that associated to the kdamond thread.
- *
- * This structure represents a sysfs command request that need to access some
- * DAMON context-internal data. Because DAMON context-internal data can be
- * safely accessed from DAMON callbacks without additional synchronization, the
- * request will be handled by the DAMON callback. None-``NULL`` @kdamond means
- * the request is valid.
- */
-struct damon_sysfs_cmd_request {
- enum damon_sysfs_cmd cmd;
- struct damon_sysfs_kdamond *kdamond;
-};
-
-/* Current DAMON callback request. Protected by damon_sysfs_lock. */
-static struct damon_sysfs_cmd_request damon_sysfs_cmd_request;
-
static ssize_t state_show(struct kobject *kobj, struct kobj_attribute *attr,
char *buf)
{
@@ -1084,11 +1260,18 @@ static int damon_sysfs_set_attrs(struct damon_ctx *ctx,
struct damon_sysfs_attrs *sys_attrs)
{
struct damon_sysfs_intervals *sys_intervals = sys_attrs->intervals;
+ struct damon_sysfs_intervals_goal *sys_goal =
+ sys_intervals->intervals_goal;
struct damon_sysfs_ul_range *sys_nr_regions =
sys_attrs->nr_regions_range;
struct damon_attrs attrs = {
.sample_interval = sys_intervals->sample_us,
.aggr_interval = sys_intervals->aggr_us,
+ .intervals_goal = {
+ .access_bp = sys_goal->access_bp,
+ .aggrs = sys_goal->aggrs,
+ .min_sample_us = sys_goal->min_sample_us,
+ .max_sample_us = sys_goal->max_sample_us},
.ops_update_interval = sys_intervals->update_us,
.min_nr_regions = sys_nr_regions->min,
.max_nr_regions = sys_nr_regions->max,
@@ -1247,11 +1430,12 @@ static struct damon_ctx *damon_sysfs_build_ctx(
* damon_sysfs_commit_input() - Commit user inputs to a running kdamond.
* @kdamond: The kobject wrapper for the associated kdamond.
*
- * If the sysfs input is wrong, the kdamond will be terminated.
+ * Returns error if the sysfs input is wrong.
*/
-static int damon_sysfs_commit_input(struct damon_sysfs_kdamond *kdamond)
+static int damon_sysfs_commit_input(void *data)
{
- struct damon_ctx *param_ctx;
+ struct damon_sysfs_kdamond *kdamond = data;
+ struct damon_ctx *param_ctx, *test_ctx;
int err;
if (!damon_sysfs_kdamond_running(kdamond))
@@ -1263,7 +1447,15 @@ static int damon_sysfs_commit_input(struct damon_sysfs_kdamond *kdamond)
param_ctx = damon_sysfs_build_ctx(kdamond->contexts->contexts_arr[0]);
if (IS_ERR(param_ctx))
return PTR_ERR(param_ctx);
+ test_ctx = damon_new_ctx();
+ err = damon_commit_ctx(test_ctx, param_ctx);
+ if (err) {
+ damon_sysfs_destroy_targets(test_ctx);
+ damon_destroy_ctx(test_ctx);
+ goto out;
+ }
err = damon_commit_ctx(kdamond->damon_ctx, param_ctx);
+out:
damon_sysfs_destroy_targets(param_ctx);
damon_destroy_ctx(param_ctx);
return err;
@@ -1306,69 +1498,16 @@ static int damon_sysfs_upd_schemes_effective_quotas(void *data)
return 0;
}
-
-/*
- * damon_sysfs_cmd_request_callback() - DAMON callback for handling requests.
- * @c: The DAMON context of the callback.
- * @active: Whether @c is not deactivated due to watermarks.
- * @after_aggr: Whether this is called from after_aggregation() callback.
- *
- * This function is periodically called back from the kdamond thread for @c.
- * Then, it checks if there is a waiting DAMON sysfs request and handles it.
- */
-static int damon_sysfs_cmd_request_callback(struct damon_ctx *c, bool active,
- bool after_aggregation)
-{
- struct damon_sysfs_kdamond *kdamond;
- int err = 0;
-
- /* avoid deadlock due to concurrent state_store('off') */
- if (!mutex_trylock(&damon_sysfs_lock))
- return 0;
- kdamond = damon_sysfs_cmd_request.kdamond;
- if (!kdamond || kdamond->damon_ctx != c)
- goto out;
- switch (damon_sysfs_cmd_request.cmd) {
- case DAMON_SYSFS_CMD_COMMIT:
- if (!after_aggregation)
- goto out;
- err = damon_sysfs_commit_input(kdamond);
- break;
- default:
- break;
- }
- /* Mark the request as invalid now. */
- damon_sysfs_cmd_request.kdamond = NULL;
-out:
- mutex_unlock(&damon_sysfs_lock);
- return err;
-}
-
-static int damon_sysfs_after_wmarks_check(struct damon_ctx *c)
+static int damon_sysfs_upd_tuned_intervals(void *data)
{
- /*
- * after_wmarks_check() is called back while the context is deactivated
- * by watermarks.
- */
- return damon_sysfs_cmd_request_callback(c, false, false);
-}
-
-static int damon_sysfs_after_sampling(struct damon_ctx *c)
-{
- /*
- * after_sampling() is called back only while the context is not
- * deactivated by watermarks.
- */
- return damon_sysfs_cmd_request_callback(c, true, false);
-}
+ struct damon_sysfs_kdamond *kdamond = data;
+ struct damon_ctx *ctx = kdamond->damon_ctx;
-static int damon_sysfs_after_aggregation(struct damon_ctx *c)
-{
- /*
- * after_aggregation() is called back only while the context is not
- * deactivated by watermarks.
- */
- return damon_sysfs_cmd_request_callback(c, true, true);
+ kdamond->contexts->contexts_arr[0]->attrs->intervals->sample_us =
+ ctx->attrs.sample_interval;
+ kdamond->contexts->contexts_arr[0]->attrs->intervals->aggr_us =
+ ctx->attrs.aggr_interval;
+ return 0;
}
static struct damon_ctx *damon_sysfs_build_ctx(
@@ -1386,9 +1525,6 @@ static struct damon_ctx *damon_sysfs_build_ctx(
return ERR_PTR(err);
}
- ctx->callback.after_wmarks_check = damon_sysfs_after_wmarks_check;
- ctx->callback.after_sampling = damon_sysfs_after_sampling;
- ctx->callback.after_aggregation = damon_sysfs_after_aggregation;
ctx->callback.before_terminate = damon_sysfs_before_terminate;
return ctx;
}
@@ -1400,8 +1536,6 @@ static int damon_sysfs_turn_damon_on(struct damon_sysfs_kdamond *kdamond)
if (damon_sysfs_kdamond_running(kdamond))
return -EBUSY;
- if (damon_sysfs_cmd_request.kdamond == kdamond)
- return -EBUSY;
/* TODO: support multiple contexts per kdamond */
if (kdamond->contexts->nr != 1)
return -EINVAL;
@@ -1491,24 +1625,21 @@ static int damon_sysfs_update_schemes_tried_regions(
* @cmd: The command to handle.
* @kdamond: The kobject wrapper for the associated kdamond.
*
- * This function handles a DAMON sysfs command for a kdamond. For commands
- * that need to access running DAMON context-internal data, it requests
- * handling of the command to the DAMON callback
- * (@damon_sysfs_cmd_request_callback()) and wait until it is properly handled,
- * or the context is completed.
+ * This function handles a DAMON sysfs command for a kdamond.
*
* Return: 0 on success, negative error code otherwise.
*/
static int damon_sysfs_handle_cmd(enum damon_sysfs_cmd cmd,
struct damon_sysfs_kdamond *kdamond)
{
- bool need_wait = true;
-
switch (cmd) {
case DAMON_SYSFS_CMD_ON:
return damon_sysfs_turn_damon_on(kdamond);
case DAMON_SYSFS_CMD_OFF:
return damon_sysfs_turn_damon_off(kdamond);
+ case DAMON_SYSFS_CMD_COMMIT:
+ return damon_sysfs_damon_call(
+ damon_sysfs_commit_input, kdamond);
case DAMON_SYSFS_CMD_COMMIT_SCHEMES_QUOTA_GOALS:
return damon_sysfs_damon_call(
damon_sysfs_commit_schemes_quota_goals,
@@ -1527,39 +1658,12 @@ static int damon_sysfs_handle_cmd(enum damon_sysfs_cmd cmd,
return damon_sysfs_damon_call(
damon_sysfs_upd_schemes_effective_quotas,
kdamond);
+ case DAMON_SYSFS_CMD_UPDATE_TUNED_INTERVALS:
+ return damon_sysfs_damon_call(
+ damon_sysfs_upd_tuned_intervals, kdamond);
default:
- break;
- }
-
- /* Pass the command to DAMON callback for safe DAMON context access */
- if (damon_sysfs_cmd_request.kdamond)
- return -EBUSY;
- if (!damon_sysfs_kdamond_running(kdamond))
return -EINVAL;
- damon_sysfs_cmd_request.cmd = cmd;
- damon_sysfs_cmd_request.kdamond = kdamond;
-
- /*
- * wait until damon_sysfs_cmd_request_callback() handles the request
- * from kdamond context
- */
- mutex_unlock(&damon_sysfs_lock);
- while (need_wait) {
- schedule_timeout_idle(msecs_to_jiffies(100));
- if (!mutex_trylock(&damon_sysfs_lock))
- continue;
- if (!damon_sysfs_cmd_request.kdamond) {
- /* damon_sysfs_cmd_request_callback() handled */
- need_wait = false;
- } else if (!damon_sysfs_kdamond_running(kdamond)) {
- /* kdamond has already finished */
- need_wait = false;
- damon_sysfs_cmd_request.kdamond = NULL;
- }
- mutex_unlock(&damon_sysfs_lock);
}
- mutex_lock(&damon_sysfs_lock);
- return 0;
}
static ssize_t state_store(struct kobject *kobj, struct kobj_attribute *attr,
@@ -1671,8 +1775,7 @@ static bool damon_sysfs_kdamonds_busy(struct damon_sysfs_kdamond **kdamonds,
int i;
for (i = 0; i < nr_kdamonds; i++) {
- if (damon_sysfs_kdamond_running(kdamonds[i]) ||
- damon_sysfs_cmd_request.kdamond == kdamonds[i])
+ if (damon_sysfs_kdamond_running(kdamonds[i]))
return true;
}
diff --git a/mm/damon/tests/core-kunit.h b/mm/damon/tests/core-kunit.h
index 532c6a6f21f9..be0fea9ee5fc 100644
--- a/mm/damon/tests/core-kunit.h
+++ b/mm/damon/tests/core-kunit.h
@@ -348,19 +348,19 @@ static void damon_test_update_monitoring_result(struct kunit *test)
new_attrs = (struct damon_attrs){
.sample_interval = 100, .aggr_interval = 10000,};
- damon_update_monitoring_result(r, &old_attrs, &new_attrs);
+ damon_update_monitoring_result(r, &old_attrs, &new_attrs, false);
KUNIT_EXPECT_EQ(test, r->nr_accesses, 15);
KUNIT_EXPECT_EQ(test, r->age, 2);
new_attrs = (struct damon_attrs){
.sample_interval = 1, .aggr_interval = 1000};
- damon_update_monitoring_result(r, &old_attrs, &new_attrs);
+ damon_update_monitoring_result(r, &old_attrs, &new_attrs, false);
KUNIT_EXPECT_EQ(test, r->nr_accesses, 150);
KUNIT_EXPECT_EQ(test, r->age, 2);
new_attrs = (struct damon_attrs){
.sample_interval = 1, .aggr_interval = 100};
- damon_update_monitoring_result(r, &old_attrs, &new_attrs);
+ damon_update_monitoring_result(r, &old_attrs, &new_attrs, false);
KUNIT_EXPECT_EQ(test, r->nr_accesses, 150);
KUNIT_EXPECT_EQ(test, r->age, 20);
diff --git a/mm/damon/vaddr.c b/mm/damon/vaddr.c
index a6174f725bd7..e6d99106a7f9 100644
--- a/mm/damon/vaddr.c
+++ b/mm/damon/vaddr.c
@@ -710,7 +710,6 @@ static int __init damon_va_initcall(void)
.update = damon_va_update,
.prepare_access_checks = damon_va_prepare_access_checks,
.check_accesses = damon_va_check_accesses,
- .reset_aggregated = NULL,
.target_valid = damon_va_target_valid,
.cleanup = NULL,
.apply_scheme = damon_va_apply_scheme,
diff --git a/mm/debug.c b/mm/debug.c
index 8d2acf432385..db83e381a8ae 100644
--- a/mm/debug.c
+++ b/mm/debug.c
@@ -79,12 +79,17 @@ static void __dump_folio(struct folio *folio, struct page *page,
folio_ref_count(folio), mapcount, mapping,
folio->index + idx, pfn);
if (folio_test_large(folio)) {
+ int pincount = 0;
+
+ if (folio_has_pincount(folio))
+ pincount = atomic_read(&folio->_pincount);
+
pr_warn("head: order:%u mapcount:%d entire_mapcount:%d nr_pages_mapped:%d pincount:%d\n",
folio_order(folio),
folio_mapcount(folio),
folio_entire_mapcount(folio),
folio_nr_pages_mapped(folio),
- atomic_read(&folio->_pincount));
+ pincount);
}
#ifdef CONFIG_MEMCG
@@ -146,6 +151,9 @@ again:
if (idx < MAX_FOLIO_NR_PAGES) {
memcpy(&folio, foliop, 2 * sizeof(struct page));
nr_pages = folio_nr_pages(&folio);
+ if (nr_pages > 1)
+ memcpy(&folio.__page_2, &foliop->__page_2,
+ sizeof(struct page));
foliop = &folio;
}
@@ -165,7 +173,7 @@ dump:
void dump_page(const struct page *page, const char *reason)
{
if (PagePoisoned(page))
- pr_warn("page:%p is uninitialized and poisoned", page);
+ pr_warn("page:%p is uninitialized and poisoned\n", page);
else
__dump_page(page);
if (reason)
@@ -181,11 +189,17 @@ void dump_vma(const struct vm_area_struct *vma)
pr_emerg("vma %px start %px end %px mm %px\n"
"prot %lx anon_vma %px vm_ops %px\n"
"pgoff %lx file %px private_data %px\n"
+#ifdef CONFIG_PER_VMA_LOCK
+ "refcnt %x\n"
+#endif
"flags: %#lx(%pGv)\n",
vma, (void *)vma->vm_start, (void *)vma->vm_end, vma->vm_mm,
(unsigned long)pgprot_val(vma->vm_page_prot),
vma->anon_vma, vma->vm_ops, vma->vm_pgoff,
vma->vm_file, vma->vm_private_data,
+#ifdef CONFIG_PER_VMA_LOCK
+ refcount_read(&vma->vm_refcnt),
+#endif
vma->vm_flags, &vma->vm_flags);
}
EXPORT_SYMBOL(dump_vma);
@@ -261,16 +275,19 @@ void dump_vmg(const struct vma_merge_struct *vmg, const char *reason)
pr_warn("vmg %px state: mm %px pgoff %lx\n"
"vmi %px [%lx,%lx)\n"
- "prev %px next %px vma %px\n"
+ "prev %px middle %px next %px target %px\n"
"start %lx end %lx flags %lx\n"
"file %px anon_vma %px policy %px\n"
"uffd_ctx %px\n"
"anon_name %px\n"
- "merge_flags %x state %x\n",
+ "state %x\n"
+ "just_expand %d\n"
+ "__adjust_middle_start %d __adjust_next_start %d\n"
+ "__remove_middle %d __remove_next %d\n",
vmg, vmg->mm, vmg->pgoff,
vmg->vmi, vmg->vmi ? vma_iter_addr(vmg->vmi) : 0,
vmg->vmi ? vma_iter_end(vmg->vmi) : 0,
- vmg->prev, vmg->next, vmg->vma,
+ vmg->prev, vmg->middle, vmg->next, vmg->target,
vmg->start, vmg->end, vmg->flags,
vmg->file, vmg->anon_vma, vmg->policy,
#ifdef CONFIG_USERFAULTFD
@@ -279,7 +296,10 @@ void dump_vmg(const struct vma_merge_struct *vmg, const char *reason)
(void *)0,
#endif
vmg->anon_name,
- (int)vmg->merge_flags, (int)vmg->state);
+ (int)vmg->state,
+ vmg->just_expand,
+ vmg->__adjust_middle_start, vmg->__adjust_next_start,
+ vmg->__remove_middle, vmg->__remove_next);
if (vmg->mm) {
pr_warn("vmg %px mm:\n", vmg);
@@ -288,13 +308,6 @@ void dump_vmg(const struct vma_merge_struct *vmg, const char *reason)
pr_warn("vmg %px mm: (NULL)\n", vmg);
}
- if (vmg->vma) {
- pr_warn("vmg %px vma:\n", vmg);
- dump_vma(vmg->vma);
- } else {
- pr_warn("vmg %px vma: (NULL)\n", vmg);
- }
-
if (vmg->prev) {
pr_warn("vmg %px prev:\n", vmg);
dump_vma(vmg->prev);
@@ -302,6 +315,13 @@ void dump_vmg(const struct vma_merge_struct *vmg, const char *reason)
pr_warn("vmg %px prev: (NULL)\n", vmg);
}
+ if (vmg->middle) {
+ pr_warn("vmg %px middle:\n", vmg);
+ dump_vma(vmg->middle);
+ } else {
+ pr_warn("vmg %px middle: (NULL)\n", vmg);
+ }
+
if (vmg->next) {
pr_warn("vmg %px next:\n", vmg);
dump_vma(vmg->next);
diff --git a/mm/filemap.c b/mm/filemap.c
index cd586203e299..b5e784f34d98 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -227,15 +227,12 @@ void __filemap_remove_folio(struct folio *folio, void *shadow)
void filemap_free_folio(struct address_space *mapping, struct folio *folio)
{
void (*free_folio)(struct folio *);
- int refs = 1;
free_folio = mapping->a_ops->free_folio;
if (free_folio)
free_folio(folio);
- if (folio_test_large(folio))
- refs = folio_nr_pages(folio);
- folio_put_refs(folio, refs);
+ folio_put_refs(folio, folio_nr_pages(folio));
}
/**
@@ -860,11 +857,10 @@ EXPORT_SYMBOL_GPL(replace_page_cache_folio);
noinline int __filemap_add_folio(struct address_space *mapping,
struct folio *folio, pgoff_t index, gfp_t gfp, void **shadowp)
{
- XA_STATE(xas, &mapping->i_pages, index);
- void *alloced_shadow = NULL;
- int alloced_order = 0;
+ XA_STATE_ORDER(xas, &mapping->i_pages, index, folio_order(folio));
bool huge;
long nr;
+ unsigned int forder = folio_order(folio);
VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
VM_BUG_ON_FOLIO(folio_test_swapbacked(folio), folio);
@@ -873,7 +869,6 @@ noinline int __filemap_add_folio(struct address_space *mapping,
mapping_set_update(&xas, mapping);
VM_BUG_ON_FOLIO(index & (folio_nr_pages(folio) - 1), folio);
- xas_set_order(&xas, index, folio_order(folio));
huge = folio_test_hugetlb(folio);
nr = folio_nr_pages(folio);
@@ -883,7 +878,7 @@ noinline int __filemap_add_folio(struct address_space *mapping,
folio->index = xas.xa_index;
for (;;) {
- int order = -1, split_order = 0;
+ int order = -1;
void *entry, *old = NULL;
xas_lock_irq(&xas);
@@ -901,21 +896,25 @@ noinline int __filemap_add_folio(struct address_space *mapping,
order = xas_get_order(&xas);
}
- /* entry may have changed before we re-acquire the lock */
- if (alloced_order && (old != alloced_shadow || order != alloced_order)) {
- xas_destroy(&xas);
- alloced_order = 0;
- }
-
if (old) {
- if (order > 0 && order > folio_order(folio)) {
+ if (order > 0 && order > forder) {
+ unsigned int split_order = max(forder,
+ xas_try_split_min_order(order));
+
/* How to handle large swap entries? */
BUG_ON(shmem_mapping(mapping));
- if (!alloced_order) {
- split_order = order;
- goto unlock;
+
+ while (order > forder) {
+ xas_set_order(&xas, index, split_order);
+ xas_try_split(&xas, old, order);
+ if (xas_error(&xas))
+ goto unlock;
+ order = split_order;
+ split_order =
+ max(xas_try_split_min_order(
+ split_order),
+ forder);
}
- xas_split(&xas, old, order);
xas_reset(&xas);
}
if (shadowp)
@@ -939,17 +938,6 @@ noinline int __filemap_add_folio(struct address_space *mapping,
unlock:
xas_unlock_irq(&xas);
- /* split needed, alloc here and retry. */
- if (split_order) {
- xas_split_alloc(&xas, old, split_order, gfp);
- if (xas_error(&xas))
- goto error;
- alloced_shadow = old;
- alloced_order = split_order;
- xas_reset(&xas);
- continue;
- }
-
if (!xas_nomem(&xas, gfp))
break;
}
diff --git a/mm/gup.c b/mm/gup.c
index 855ab860f88b..92351e2fa876 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -96,8 +96,7 @@ retry:
* belongs to this folio.
*/
if (unlikely(page_folio(page) != folio)) {
- if (!put_devmap_managed_folio_refs(folio, refs))
- folio_put_refs(folio, refs);
+ folio_put_refs(folio, refs);
goto retry;
}
@@ -110,14 +109,13 @@ static void gup_put_folio(struct folio *folio, int refs, unsigned int flags)
if (is_zero_folio(folio))
return;
node_stat_mod_folio(folio, NR_FOLL_PIN_RELEASED, refs);
- if (folio_test_large(folio))
+ if (folio_has_pincount(folio))
atomic_sub(refs, &folio->_pincount);
else
refs *= GUP_PIN_COUNTING_BIAS;
}
- if (!put_devmap_managed_folio_refs(folio, refs))
- folio_put_refs(folio, refs);
+ folio_put_refs(folio, refs);
}
/**
@@ -166,7 +164,7 @@ int __must_check try_grab_folio(struct folio *folio, int refs,
* Increment the normal page refcount field at least once,
* so that the page really is pinned.
*/
- if (folio_test_large(folio)) {
+ if (folio_has_pincount(folio)) {
folio_ref_add(folio, refs);
atomic_add(refs, &folio->_pincount);
} else {
@@ -225,7 +223,7 @@ void folio_add_pin(struct folio *folio)
* page refcount field at least once, so that the page really is
* pinned.
*/
- if (folio_test_large(folio)) {
+ if (folio_has_pincount(folio)) {
WARN_ON_ONCE(atomic_read(&folio->_pincount) < 1);
folio_ref_inc(folio);
atomic_inc(&folio->_pincount);
@@ -565,8 +563,7 @@ static struct folio *try_grab_folio_fast(struct page *page, int refs,
*/
if (unlikely((flags & FOLL_LONGTERM) &&
!folio_is_longterm_pinnable(folio))) {
- if (!put_devmap_managed_folio_refs(folio, refs))
- folio_put_refs(folio, refs);
+ folio_put_refs(folio, refs);
return NULL;
}
@@ -578,7 +575,7 @@ static struct folio *try_grab_folio_fast(struct page *page, int refs,
* is pinned. That's why the refcount from the earlier
* try_get_folio() is left intact.
*/
- if (folio_test_large(folio))
+ if (folio_has_pincount(folio))
atomic_add(refs, &folio->_pincount);
else
folio_ref_add(folio,
@@ -1283,6 +1280,9 @@ static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags)
if ((gup_flags & FOLL_LONGTERM) && vma_is_fsdax(vma))
return -EOPNOTSUPP;
+ if ((gup_flags & FOLL_SPLIT_PMD) && is_vm_hugetlb_page(vma))
+ return -EOPNOTSUPP;
+
if (vma_is_secretmem(vma))
return -EFAULT;
@@ -2757,7 +2757,7 @@ EXPORT_SYMBOL(get_user_pages_unlocked);
*
* *) ptes can be read atomically by the architecture.
*
- * *) access_ok is sufficient to validate userspace address ranges.
+ * *) valid user addesses are below TASK_MAX_SIZE
*
* The last two assumptions can be relaxed by the addition of helper functions.
*
@@ -3010,11 +3010,6 @@ static int gup_fast_devmap_leaf(unsigned long pfn, unsigned long addr,
break;
}
- if (!(flags & FOLL_PCI_P2PDMA) && is_pci_p2pdma_page(page)) {
- gup_fast_undo_dev_pagemap(nr, nr_start, flags, pages);
- break;
- }
-
folio = try_grab_folio_fast(page, 1, flags);
if (!folio) {
gup_fast_undo_dev_pagemap(nr, nr_start, flags, pages);
@@ -3411,8 +3406,6 @@ static int gup_fast_fallback(unsigned long start, unsigned long nr_pages,
return -EOVERFLOW;
if (end > TASK_SIZE_MAX)
return -EFAULT;
- if (unlikely(!access_ok((void __user *)start, len)))
- return -EFAULT;
nr_pinned = gup_fast(start, end, gup_flags, pages);
if (nr_pinned == nr_pages || gup_flags & FOLL_FAST_ONLY)
diff --git a/mm/hmm.c b/mm/hmm.c
index 7e0229ae4a5a..082f7b7c0b9e 100644
--- a/mm/hmm.c
+++ b/mm/hmm.c
@@ -248,7 +248,7 @@ static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr,
* just report the PFN.
*/
if (is_device_private_entry(entry) &&
- pfn_swap_entry_to_page(entry)->pgmap->owner ==
+ page_pgmap(pfn_swap_entry_to_page(entry))->owner ==
range->dev_private_owner) {
cpu_flags = HMM_PFN_VALID;
if (is_writable_device_private_entry(entry))
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 373781b21e5c..2a47682d1ab7 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1309,8 +1309,6 @@ static void set_huge_zero_folio(pgtable_t pgtable, struct mm_struct *mm,
struct folio *zero_folio)
{
pmd_t entry;
- if (!pmd_none(*pmd))
- return;
entry = mk_pmd(&zero_folio->page, vma->vm_page_prot);
entry = pmd_mkhuge(entry);
pgtable_trans_huge_deposit(mm, pmd, pgtable);
@@ -1375,20 +1373,20 @@ vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf)
return __do_huge_pmd_anonymous_page(vmf);
}
-static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
+static int insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
pmd_t *pmd, pfn_t pfn, pgprot_t prot, bool write,
pgtable_t pgtable)
{
struct mm_struct *mm = vma->vm_mm;
pmd_t entry;
- spinlock_t *ptl;
- ptl = pmd_lock(mm, pmd);
+ lockdep_assert_held(pmd_lockptr(mm, pmd));
+
if (!pmd_none(*pmd)) {
if (write) {
if (pmd_pfn(*pmd) != pfn_t_to_pfn(pfn)) {
WARN_ON_ONCE(!is_huge_zero_pmd(*pmd));
- goto out_unlock;
+ return -EEXIST;
}
entry = pmd_mkyoung(*pmd);
entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
@@ -1396,7 +1394,7 @@ static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
update_mmu_cache_pmd(vma, addr, pmd);
}
- goto out_unlock;
+ return -EEXIST;
}
entry = pmd_mkhuge(pfn_t_pmd(pfn, prot));
@@ -1412,16 +1410,11 @@ static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
if (pgtable) {
pgtable_trans_huge_deposit(mm, pmd, pgtable);
mm_inc_nr_ptes(mm);
- pgtable = NULL;
}
set_pmd_at(mm, addr, pmd, entry);
update_mmu_cache_pmd(vma, addr, pmd);
-
-out_unlock:
- spin_unlock(ptl);
- if (pgtable)
- pte_free(mm, pgtable);
+ return 0;
}
/**
@@ -1440,6 +1433,8 @@ vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, bool write)
struct vm_area_struct *vma = vmf->vma;
pgprot_t pgprot = vma->vm_page_prot;
pgtable_t pgtable = NULL;
+ spinlock_t *ptl;
+ int error;
/*
* If we had pmd_special, we could avoid all these restrictions,
@@ -1462,12 +1457,56 @@ vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, bool write)
}
track_pfn_insert(vma, &pgprot, pfn);
+ ptl = pmd_lock(vma->vm_mm, vmf->pmd);
+ error = insert_pfn_pmd(vma, addr, vmf->pmd, pfn, pgprot, write,
+ pgtable);
+ spin_unlock(ptl);
+ if (error && pgtable)
+ pte_free(vma->vm_mm, pgtable);
- insert_pfn_pmd(vma, addr, vmf->pmd, pfn, pgprot, write, pgtable);
return VM_FAULT_NOPAGE;
}
EXPORT_SYMBOL_GPL(vmf_insert_pfn_pmd);
+vm_fault_t vmf_insert_folio_pmd(struct vm_fault *vmf, struct folio *folio,
+ bool write)
+{
+ struct vm_area_struct *vma = vmf->vma;
+ unsigned long addr = vmf->address & PMD_MASK;
+ struct mm_struct *mm = vma->vm_mm;
+ spinlock_t *ptl;
+ pgtable_t pgtable = NULL;
+ int error;
+
+ if (addr < vma->vm_start || addr >= vma->vm_end)
+ return VM_FAULT_SIGBUS;
+
+ if (WARN_ON_ONCE(folio_order(folio) != PMD_ORDER))
+ return VM_FAULT_SIGBUS;
+
+ if (arch_needs_pgtable_deposit()) {
+ pgtable = pte_alloc_one(vma->vm_mm);
+ if (!pgtable)
+ return VM_FAULT_OOM;
+ }
+
+ ptl = pmd_lock(mm, vmf->pmd);
+ if (pmd_none(*vmf->pmd)) {
+ folio_get(folio);
+ folio_add_file_rmap_pmd(folio, &folio->page, vma);
+ add_mm_counter(mm, mm_counter_file(folio), HPAGE_PMD_NR);
+ }
+ error = insert_pfn_pmd(vma, addr, vmf->pmd,
+ pfn_to_pfn_t(folio_pfn(folio)), vma->vm_page_prot,
+ write, pgtable);
+ spin_unlock(ptl);
+ if (error && pgtable)
+ pte_free(mm, pgtable);
+
+ return VM_FAULT_NOPAGE;
+}
+EXPORT_SYMBOL_GPL(vmf_insert_folio_pmd);
+
#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
static pud_t maybe_pud_mkwrite(pud_t pud, struct vm_area_struct *vma)
{
@@ -1482,19 +1521,17 @@ static void insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr,
struct mm_struct *mm = vma->vm_mm;
pgprot_t prot = vma->vm_page_prot;
pud_t entry;
- spinlock_t *ptl;
- ptl = pud_lock(mm, pud);
if (!pud_none(*pud)) {
if (write) {
if (WARN_ON_ONCE(pud_pfn(*pud) != pfn_t_to_pfn(pfn)))
- goto out_unlock;
+ return;
entry = pud_mkyoung(*pud);
entry = maybe_pud_mkwrite(pud_mkdirty(entry), vma);
if (pudp_set_access_flags(vma, addr, pud, entry, 1))
update_mmu_cache_pud(vma, addr, pud);
}
- goto out_unlock;
+ return;
}
entry = pud_mkhuge(pfn_t_pud(pfn, prot));
@@ -1508,9 +1545,6 @@ static void insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr,
}
set_pud_at(mm, addr, pud, entry);
update_mmu_cache_pud(vma, addr, pud);
-
-out_unlock:
- spin_unlock(ptl);
}
/**
@@ -1528,6 +1562,7 @@ vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn, bool write)
unsigned long addr = vmf->address & PUD_MASK;
struct vm_area_struct *vma = vmf->vma;
pgprot_t pgprot = vma->vm_page_prot;
+ spinlock_t *ptl;
/*
* If we had pud_special, we could avoid all these restrictions,
@@ -1545,10 +1580,57 @@ vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn, bool write)
track_pfn_insert(vma, &pgprot, pfn);
+ ptl = pud_lock(vma->vm_mm, vmf->pud);
insert_pfn_pud(vma, addr, vmf->pud, pfn, write);
+ spin_unlock(ptl);
+
return VM_FAULT_NOPAGE;
}
EXPORT_SYMBOL_GPL(vmf_insert_pfn_pud);
+
+/**
+ * vmf_insert_folio_pud - insert a pud size folio mapped by a pud entry
+ * @vmf: Structure describing the fault
+ * @folio: folio to insert
+ * @write: whether it's a write fault
+ *
+ * Return: vm_fault_t value.
+ */
+vm_fault_t vmf_insert_folio_pud(struct vm_fault *vmf, struct folio *folio,
+ bool write)
+{
+ struct vm_area_struct *vma = vmf->vma;
+ unsigned long addr = vmf->address & PUD_MASK;
+ pud_t *pud = vmf->pud;
+ struct mm_struct *mm = vma->vm_mm;
+ spinlock_t *ptl;
+
+ if (addr < vma->vm_start || addr >= vma->vm_end)
+ return VM_FAULT_SIGBUS;
+
+ if (WARN_ON_ONCE(folio_order(folio) != PUD_ORDER))
+ return VM_FAULT_SIGBUS;
+
+ ptl = pud_lock(mm, pud);
+
+ /*
+ * If there is already an entry present we assume the folio is
+ * already mapped, hence no need to take another reference. We
+ * still call insert_pfn_pud() though in case the mapping needs
+ * upgrading to writeable.
+ */
+ if (pud_none(*vmf->pud)) {
+ folio_get(folio);
+ folio_add_file_rmap_pud(folio, &folio->page, vma);
+ add_mm_counter(mm, mm_counter_file(folio), HPAGE_PUD_NR);
+ }
+ insert_pfn_pud(vma, addr, vmf->pud, pfn_to_pfn_t(folio_pfn(folio)),
+ write);
+ spin_unlock(ptl);
+
+ return VM_FAULT_NOPAGE;
+}
+EXPORT_SYMBOL_GPL(vmf_insert_folio_pud);
#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
void touch_pmd(struct vm_area_struct *vma, unsigned long addr,
@@ -1698,7 +1780,7 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
src_folio = page_folio(src_page);
folio_get(src_folio);
- if (unlikely(folio_try_dup_anon_rmap_pmd(src_folio, src_page, src_vma))) {
+ if (unlikely(folio_try_dup_anon_rmap_pmd(src_folio, src_page, dst_vma, src_vma))) {
/* Page maybe pinned: split and retry the fault on PTEs. */
folio_put(src_folio);
pte_free(dst_mm, pgtable);
@@ -2071,7 +2153,7 @@ bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
* If other processes are mapping this folio, we couldn't discard
* the folio unless they all do MADV_FREE so let's skip the folio.
*/
- if (folio_likely_mapped_shared(folio))
+ if (folio_maybe_mapped_shared(folio))
goto out;
if (!folio_trylock(folio))
@@ -2141,12 +2223,13 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
tlb->fullmm);
arch_check_zapped_pmd(vma, orig_pmd);
tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
- if (vma_is_special_huge(vma)) {
+ if (!vma_is_dax(vma) && vma_is_special_huge(vma)) {
if (arch_needs_pgtable_deposit())
zap_deposited_table(tlb->mm, pmd);
spin_unlock(ptl);
} else if (is_huge_zero_pmd(orig_pmd)) {
- zap_deposited_table(tlb->mm, pmd);
+ if (!vma_is_dax(vma) || arch_needs_pgtable_deposit())
+ zap_deposited_table(tlb->mm, pmd);
spin_unlock(ptl);
} else {
struct folio *folio = NULL;
@@ -2646,12 +2729,24 @@ int zap_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma,
orig_pud = pudp_huge_get_and_clear_full(vma, addr, pud, tlb->fullmm);
arch_check_zapped_pud(vma, orig_pud);
tlb_remove_pud_tlb_entry(tlb, pud, addr);
- if (vma_is_special_huge(vma)) {
+ if (!vma_is_dax(vma) && vma_is_special_huge(vma)) {
spin_unlock(ptl);
/* No zero page support yet */
} else {
- /* No support for anonymous PUD pages yet */
- BUG();
+ struct page *page = NULL;
+ struct folio *folio;
+
+ /* No support for anonymous PUD pages or migration yet */
+ VM_WARN_ON_ONCE(vma_is_anonymous(vma) ||
+ !pud_present(orig_pud));
+
+ page = pud_page(orig_pud);
+ folio = page_folio(page);
+ folio_remove_rmap_pud(folio, page, vma);
+ add_mm_counter(tlb->mm, mm_counter_file(folio), -HPAGE_PUD_NR);
+
+ spin_unlock(ptl);
+ tlb_remove_page_size(tlb, page, HPAGE_PUD_SIZE);
}
return 1;
}
@@ -2659,6 +2754,10 @@ int zap_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma,
static void __split_huge_pud_locked(struct vm_area_struct *vma, pud_t *pud,
unsigned long haddr)
{
+ struct folio *folio;
+ struct page *page;
+ pud_t old_pud;
+
VM_BUG_ON(haddr & ~HPAGE_PUD_MASK);
VM_BUG_ON_VMA(vma->vm_start > haddr, vma);
VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PUD_SIZE, vma);
@@ -2666,7 +2765,22 @@ static void __split_huge_pud_locked(struct vm_area_struct *vma, pud_t *pud,
count_vm_event(THP_SPLIT_PUD);
- pudp_huge_clear_flush(vma, haddr, pud);
+ old_pud = pudp_huge_clear_flush(vma, haddr, pud);
+
+ if (!vma_is_dax(vma))
+ return;
+
+ page = pud_page(old_pud);
+ folio = page_folio(page);
+
+ if (!folio_test_dirty(folio) && pud_dirty(old_pud))
+ folio_mark_dirty(folio);
+ if (!folio_test_referenced(folio) && pud_young(old_pud))
+ folio_set_referenced(folio);
+ folio_remove_rmap_pud(folio, page, vma);
+ folio_put(folio);
+ add_mm_counter(vma->vm_mm, mm_counter_file(folio),
+ -HPAGE_PUD_NR);
}
void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
@@ -2766,13 +2880,15 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
*/
if (arch_needs_pgtable_deposit())
zap_deposited_table(mm, pmd);
- if (vma_is_special_huge(vma))
+ if (!vma_is_dax(vma) && vma_is_special_huge(vma))
return;
if (unlikely(is_pmd_migration_entry(old_pmd))) {
swp_entry_t entry;
entry = pmd_to_swp_entry(old_pmd);
folio = pfn_swap_entry_folio(entry);
+ } else if (is_huge_zero_pmd(old_pmd)) {
+ return;
} else {
page = pmd_page(old_pmd);
folio = page_folio(page);
@@ -3017,9 +3133,9 @@ static inline void split_huge_pmd_if_needed(struct vm_area_struct *vma, unsigned
}
void vma_adjust_trans_huge(struct vm_area_struct *vma,
- unsigned long start,
- unsigned long end,
- long adjust_next)
+ unsigned long start,
+ unsigned long end,
+ struct vm_area_struct *next)
{
/* Check if we need to split start first. */
split_huge_pmd_if_needed(vma, start);
@@ -3027,16 +3143,9 @@ void vma_adjust_trans_huge(struct vm_area_struct *vma,
/* Check if we need to split end next. */
split_huge_pmd_if_needed(vma, end);
- /*
- * If we're also updating the next vma vm_start,
- * check if we need to split it.
- */
- if (adjust_next > 0) {
- struct vm_area_struct *next = find_vma(vma->vm_mm, vma->vm_end);
- unsigned long nstart = next->vm_start;
- nstart += adjust_next;
- split_huge_pmd_if_needed(next, nstart);
- }
+ /* If we're incrementing next->vm_start, we might need to split it. */
+ if (next)
+ split_huge_pmd_if_needed(next, end);
}
static void unmap_folio(struct folio *folio)
@@ -3070,8 +3179,12 @@ static bool __discard_anon_folio_pmd_locked(struct vm_area_struct *vma,
int ref_count, map_count;
pmd_t orig_pmd = *pmdp;
- if (folio_test_dirty(folio) || pmd_dirty(orig_pmd))
+ if (pmd_dirty(orig_pmd))
+ folio_set_dirty(folio);
+ if (folio_test_dirty(folio) && !(vma->vm_flags & VM_DROPPABLE)) {
+ folio_set_swapbacked(folio);
return false;
+ }
orig_pmd = pmdp_huge_clear_flush(vma, addr, pmdp);
@@ -3098,8 +3211,15 @@ static bool __discard_anon_folio_pmd_locked(struct vm_area_struct *vma,
*
* The only folio refs must be one from isolation plus the rmap(s).
*/
- if (folio_test_dirty(folio) || pmd_dirty(orig_pmd) ||
- ref_count != map_count + 1) {
+ if (pmd_dirty(orig_pmd))
+ folio_set_dirty(folio);
+ if (folio_test_dirty(folio) && !(vma->vm_flags & VM_DROPPABLE)) {
+ folio_set_swapbacked(folio);
+ set_pmd_at(mm, addr, pmdp, orig_pmd);
+ return false;
+ }
+
+ if (ref_count != map_count + 1) {
set_pmd_at(mm, addr, pmdp, orig_pmd);
return false;
}
@@ -3119,12 +3239,11 @@ bool unmap_huge_pmd_locked(struct vm_area_struct *vma, unsigned long addr,
{
VM_WARN_ON_FOLIO(!folio_test_pmd_mappable(folio), folio);
VM_WARN_ON_FOLIO(!folio_test_locked(folio), folio);
+ VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio);
+ VM_WARN_ON_FOLIO(folio_test_swapbacked(folio), folio);
VM_WARN_ON_ONCE(!IS_ALIGNED(addr, HPAGE_PMD_SIZE));
- if (folio_test_anon(folio) && !folio_test_swapbacked(folio))
- return __discard_anon_folio_pmd_locked(vma, addr, pmdp, folio);
-
- return false;
+ return __discard_anon_folio_pmd_locked(vma, addr, pmdp, folio);
}
static void remap_page(struct folio *folio, unsigned long nr, int flags)
@@ -3143,225 +3262,378 @@ static void remap_page(struct folio *folio, unsigned long nr, int flags)
}
}
-static void lru_add_page_tail(struct folio *folio, struct page *tail,
+static void lru_add_split_folio(struct folio *folio, struct folio *new_folio,
struct lruvec *lruvec, struct list_head *list)
{
- VM_BUG_ON_FOLIO(!folio_test_large(folio), folio);
- VM_BUG_ON_FOLIO(PageLRU(tail), folio);
+ VM_BUG_ON_FOLIO(folio_test_lru(new_folio), folio);
lockdep_assert_held(&lruvec->lru_lock);
if (list) {
/* page reclaim is reclaiming a huge page */
VM_WARN_ON(folio_test_lru(folio));
- get_page(tail);
- list_add_tail(&tail->lru, list);
+ folio_get(new_folio);
+ list_add_tail(&new_folio->lru, list);
} else {
/* head is still on lru (and we have it frozen) */
VM_WARN_ON(!folio_test_lru(folio));
if (folio_test_unevictable(folio))
- tail->mlock_count = 0;
+ new_folio->mlock_count = 0;
else
- list_add_tail(&tail->lru, &folio->lru);
- SetPageLRU(tail);
+ list_add_tail(&new_folio->lru, &folio->lru);
+ folio_set_lru(new_folio);
}
}
-static void __split_huge_page_tail(struct folio *folio, int tail,
- struct lruvec *lruvec, struct list_head *list,
- unsigned int new_order)
+/* Racy check whether the huge page can be split */
+bool can_split_folio(struct folio *folio, int caller_pins, int *pextra_pins)
{
- struct page *head = &folio->page;
- struct page *page_tail = head + tail;
- /*
- * Careful: new_folio is not a "real" folio before we cleared PageTail.
- * Don't pass it around before clear_compound_head().
- */
- struct folio *new_folio = (struct folio *)page_tail;
+ int extra_pins;
+
+ /* Additional pins from page cache */
+ if (folio_test_anon(folio))
+ extra_pins = folio_test_swapcache(folio) ?
+ folio_nr_pages(folio) : 0;
+ else
+ extra_pins = folio_nr_pages(folio);
+ if (pextra_pins)
+ *pextra_pins = extra_pins;
+ return folio_mapcount(folio) == folio_ref_count(folio) - extra_pins -
+ caller_pins;
+}
- VM_BUG_ON_PAGE(atomic_read(&page_tail->_mapcount) != -1, page_tail);
+/*
+ * It splits @folio into @new_order folios and copies the @folio metadata to
+ * all the resulting folios.
+ */
+static void __split_folio_to_order(struct folio *folio, int old_order,
+ int new_order)
+{
+ long new_nr_pages = 1 << new_order;
+ long nr_pages = 1 << old_order;
+ long i;
/*
- * Clone page flags before unfreezing refcount.
- *
- * After successful get_page_unless_zero() might follow flags change,
- * for example lock_page() which set PG_waiters.
- *
- * Note that for mapped sub-pages of an anonymous THP,
- * PG_anon_exclusive has been cleared in unmap_folio() and is stored in
- * the migration entry instead from where remap_page() will restore it.
- * We can still have PG_anon_exclusive set on effectively unmapped and
- * unreferenced sub-pages of an anonymous THP: we can simply drop
- * PG_anon_exclusive (-> PG_mappedtodisk) for these here.
+ * Skip the first new_nr_pages, since the new folio from them have all
+ * the flags from the original folio.
*/
- page_tail->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
- page_tail->flags |= (head->flags &
- ((1L << PG_referenced) |
- (1L << PG_swapbacked) |
- (1L << PG_swapcache) |
- (1L << PG_mlocked) |
- (1L << PG_uptodate) |
- (1L << PG_active) |
- (1L << PG_workingset) |
- (1L << PG_locked) |
- (1L << PG_unevictable) |
+ for (i = new_nr_pages; i < nr_pages; i += new_nr_pages) {
+ struct page *new_head = &folio->page + i;
+
+ /*
+ * Careful: new_folio is not a "real" folio before we cleared PageTail.
+ * Don't pass it around before clear_compound_head().
+ */
+ struct folio *new_folio = (struct folio *)new_head;
+
+ VM_BUG_ON_PAGE(atomic_read(&new_folio->_mapcount) != -1, new_head);
+
+ /*
+ * Clone page flags before unfreezing refcount.
+ *
+ * After successful get_page_unless_zero() might follow flags change,
+ * for example lock_page() which set PG_waiters.
+ *
+ * Note that for mapped sub-pages of an anonymous THP,
+ * PG_anon_exclusive has been cleared in unmap_folio() and is stored in
+ * the migration entry instead from where remap_page() will restore it.
+ * We can still have PG_anon_exclusive set on effectively unmapped and
+ * unreferenced sub-pages of an anonymous THP: we can simply drop
+ * PG_anon_exclusive (-> PG_mappedtodisk) for these here.
+ */
+ new_folio->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
+ new_folio->flags |= (folio->flags &
+ ((1L << PG_referenced) |
+ (1L << PG_swapbacked) |
+ (1L << PG_swapcache) |
+ (1L << PG_mlocked) |
+ (1L << PG_uptodate) |
+ (1L << PG_active) |
+ (1L << PG_workingset) |
+ (1L << PG_locked) |
+ (1L << PG_unevictable) |
#ifdef CONFIG_ARCH_USES_PG_ARCH_2
- (1L << PG_arch_2) |
+ (1L << PG_arch_2) |
#endif
#ifdef CONFIG_ARCH_USES_PG_ARCH_3
- (1L << PG_arch_3) |
+ (1L << PG_arch_3) |
#endif
- (1L << PG_dirty) |
- LRU_GEN_MASK | LRU_REFS_MASK));
+ (1L << PG_dirty) |
+ LRU_GEN_MASK | LRU_REFS_MASK));
- /* ->mapping in first and second tail page is replaced by other uses */
- VM_BUG_ON_PAGE(tail > 2 && page_tail->mapping != TAIL_MAPPING,
- page_tail);
- new_folio->mapping = folio->mapping;
- new_folio->index = folio->index + tail;
+ new_folio->mapping = folio->mapping;
+ new_folio->index = folio->index + i;
- /*
- * page->private should not be set in tail pages. Fix up and warn once
- * if private is unexpectedly set.
- */
- if (unlikely(page_tail->private)) {
- VM_WARN_ON_ONCE_PAGE(true, page_tail);
- page_tail->private = 0;
- }
- if (folio_test_swapcache(folio))
- new_folio->swap.val = folio->swap.val + tail;
+ /*
+ * page->private should not be set in tail pages. Fix up and warn once
+ * if private is unexpectedly set.
+ */
+ if (unlikely(new_folio->private)) {
+ VM_WARN_ON_ONCE_PAGE(true, new_head);
+ new_folio->private = NULL;
+ }
- /* Page flags must be visible before we make the page non-compound. */
- smp_wmb();
+ if (folio_test_swapcache(folio))
+ new_folio->swap.val = folio->swap.val + i;
- /*
- * Clear PageTail before unfreezing page refcount.
- *
- * After successful get_page_unless_zero() might follow put_page()
- * which needs correct compound_head().
- */
- clear_compound_head(page_tail);
- if (new_order) {
- prep_compound_page(page_tail, new_order);
- folio_set_large_rmappable(new_folio);
- }
+ /* Page flags must be visible before we make the page non-compound. */
+ smp_wmb();
- /* Finally unfreeze refcount. Additional reference from page cache. */
- page_ref_unfreeze(page_tail,
- 1 + ((!folio_test_anon(folio) || folio_test_swapcache(folio)) ?
- folio_nr_pages(new_folio) : 0));
+ /*
+ * Clear PageTail before unfreezing page refcount.
+ *
+ * After successful get_page_unless_zero() might follow put_page()
+ * which needs correct compound_head().
+ */
+ clear_compound_head(new_head);
+ if (new_order) {
+ prep_compound_page(new_head, new_order);
+ folio_set_large_rmappable(new_folio);
+ }
- if (folio_test_young(folio))
- folio_set_young(new_folio);
- if (folio_test_idle(folio))
- folio_set_idle(new_folio);
+ if (folio_test_young(folio))
+ folio_set_young(new_folio);
+ if (folio_test_idle(folio))
+ folio_set_idle(new_folio);
+#ifdef CONFIG_MEMCG
+ new_folio->memcg_data = folio->memcg_data;
+#endif
- folio_xchg_last_cpupid(new_folio, folio_last_cpupid(folio));
+ folio_xchg_last_cpupid(new_folio, folio_last_cpupid(folio));
+ }
- /*
- * always add to the tail because some iterators expect new
- * pages to show after the currently processed elements - e.g.
- * migrate_pages
- */
- lru_add_page_tail(folio, page_tail, lruvec, list);
+ if (new_order)
+ folio_set_order(folio, new_order);
+ else
+ ClearPageCompound(&folio->page);
}
-static void __split_huge_page(struct page *page, struct list_head *list,
- pgoff_t end, unsigned int new_order)
+/*
+ * It splits an unmapped @folio to lower order smaller folios in two ways.
+ * @folio: the to-be-split folio
+ * @new_order: the smallest order of the after split folios (since buddy
+ * allocator like split generates folios with orders from @folio's
+ * order - 1 to new_order).
+ * @split_at: in buddy allocator like split, the folio containing @split_at
+ * will be split until its order becomes @new_order.
+ * @lock_at: the folio containing @lock_at is left locked for caller.
+ * @list: the after split folios will be added to @list if it is not NULL,
+ * otherwise to LRU lists.
+ * @end: the end of the file @folio maps to. -1 if @folio is anonymous memory.
+ * @xas: xa_state pointing to folio->mapping->i_pages and locked by caller
+ * @mapping: @folio->mapping
+ * @uniform_split: if the split is uniform or not (buddy allocator like split)
+ *
+ *
+ * 1. uniform split: the given @folio into multiple @new_order small folios,
+ * where all small folios have the same order. This is done when
+ * uniform_split is true.
+ * 2. buddy allocator like (non-uniform) split: the given @folio is split into
+ * half and one of the half (containing the given page) is split into half
+ * until the given @page's order becomes @new_order. This is done when
+ * uniform_split is false.
+ *
+ * The high level flow for these two methods are:
+ * 1. uniform split: a single __split_folio_to_order() is called to split the
+ * @folio into @new_order, then we traverse all the resulting folios one by
+ * one in PFN ascending order and perform stats, unfreeze, adding to list,
+ * and file mapping index operations.
+ * 2. non-uniform split: in general, folio_order - @new_order calls to
+ * __split_folio_to_order() are made in a for loop to split the @folio
+ * to one lower order at a time. The resulting small folios are processed
+ * like what is done during the traversal in 1, except the one containing
+ * @page, which is split in next for loop.
+ *
+ * After splitting, the caller's folio reference will be transferred to the
+ * folio containing @page. The other folios may be freed if they are not mapped.
+ *
+ * In terms of locking, after splitting,
+ * 1. uniform split leaves @page (or the folio contains it) locked;
+ * 2. buddy allocator like (non-uniform) split leaves @folio locked.
+ *
+ *
+ * For !uniform_split, when -ENOMEM is returned, the original folio might be
+ * split. The caller needs to check the input folio.
+ */
+static int __split_unmapped_folio(struct folio *folio, int new_order,
+ struct page *split_at, struct page *lock_at,
+ struct list_head *list, pgoff_t end,
+ struct xa_state *xas, struct address_space *mapping,
+ bool uniform_split)
{
- struct folio *folio = page_folio(page);
- struct page *head = &folio->page;
struct lruvec *lruvec;
struct address_space *swap_cache = NULL;
- unsigned long offset = 0;
- int i, nr_dropped = 0;
- unsigned int new_nr = 1 << new_order;
+ struct folio *origin_folio = folio;
+ struct folio *next_folio = folio_next(folio);
+ struct folio *new_folio;
+ struct folio *next;
int order = folio_order(folio);
- unsigned int nr = 1 << order;
+ int split_order;
+ int start_order = uniform_split ? new_order : order - 1;
+ int nr_dropped = 0;
+ int ret = 0;
+ bool stop_split = false;
+
+ if (folio_test_swapcache(folio)) {
+ VM_BUG_ON(mapping);
- /* complete memcg works before add pages to LRU */
- split_page_memcg(head, order, new_order);
+ /* a swapcache folio can only be uniformly split to order-0 */
+ if (!uniform_split || new_order != 0)
+ return -EINVAL;
- if (folio_test_anon(folio) && folio_test_swapcache(folio)) {
- offset = swap_cache_index(folio->swap);
swap_cache = swap_address_space(folio->swap);
xa_lock(&swap_cache->i_pages);
}
+ if (folio_test_anon(folio))
+ mod_mthp_stat(order, MTHP_STAT_NR_ANON, -1);
+
/* lock lru list/PageCompound, ref frozen by page_ref_freeze */
lruvec = folio_lruvec_lock(folio);
folio_clear_has_hwpoisoned(folio);
- for (i = nr - new_nr; i >= new_nr; i -= new_nr) {
- struct folio *tail;
- __split_huge_page_tail(folio, i, lruvec, list, new_order);
- tail = page_folio(head + i);
- /* Some pages can be beyond EOF: drop them from page cache */
- if (tail->index >= end) {
- if (shmem_mapping(folio->mapping))
- nr_dropped++;
- else if (folio_test_clear_dirty(tail))
- folio_account_cleaned(tail,
- inode_to_wb(folio->mapping->host));
- __filemap_remove_folio(tail, NULL);
- folio_put_refs(tail, folio_nr_pages(tail));
- } else if (!folio_test_anon(folio)) {
- __xa_store(&folio->mapping->i_pages, tail->index,
- tail, 0);
- } else if (swap_cache) {
- __xa_store(&swap_cache->i_pages, offset + i,
- tail, 0);
+ /*
+ * split to new_order one order at a time. For uniform split,
+ * folio is split to new_order directly.
+ */
+ for (split_order = start_order;
+ split_order >= new_order && !stop_split;
+ split_order--) {
+ int old_order = folio_order(folio);
+ struct folio *release;
+ struct folio *end_folio = folio_next(folio);
+
+ /* order-1 anonymous folio is not supported */
+ if (folio_test_anon(folio) && split_order == 1)
+ continue;
+ if (uniform_split && split_order != new_order)
+ continue;
+
+ if (mapping) {
+ /*
+ * uniform split has xas_split_alloc() called before
+ * irq is disabled to allocate enough memory, whereas
+ * non-uniform split can handle ENOMEM.
+ */
+ if (uniform_split)
+ xas_split(xas, folio, old_order);
+ else {
+ xas_set_order(xas, folio->index, split_order);
+ xas_try_split(xas, folio, old_order);
+ if (xas_error(xas)) {
+ ret = xas_error(xas);
+ stop_split = true;
+ goto after_split;
+ }
+ }
}
- }
- if (!new_order)
- ClearPageCompound(head);
- else {
- struct folio *new_folio = (struct folio *)head;
+ folio_split_memcg_refs(folio, old_order, split_order);
+ split_page_owner(&folio->page, old_order, split_order);
+ pgalloc_tag_split(folio, old_order, split_order);
- folio_set_order(new_folio, new_order);
- }
- unlock_page_lruvec(lruvec);
- /* Caller disabled irqs, so they are still disabled here */
+ __split_folio_to_order(folio, old_order, split_order);
- split_page_owner(head, order, new_order);
- pgalloc_tag_split(folio, order, new_order);
+after_split:
+ /*
+ * Iterate through after-split folios and perform related
+ * operations. But in buddy allocator like split, the folio
+ * containing the specified page is skipped until its order
+ * is new_order, since the folio will be worked on in next
+ * iteration.
+ */
+ for (release = folio; release != end_folio; release = next) {
+ next = folio_next(release);
+ /*
+ * for buddy allocator like split, the folio containing
+ * page will be split next and should not be released,
+ * until the folio's order is new_order or stop_split
+ * is set to true by the above xas_split() failure.
+ */
+ if (release == page_folio(split_at)) {
+ folio = release;
+ if (split_order != new_order && !stop_split)
+ continue;
+ }
+ if (folio_test_anon(release)) {
+ mod_mthp_stat(folio_order(release),
+ MTHP_STAT_NR_ANON, 1);
+ }
- /* See comment in __split_huge_page_tail() */
- if (folio_test_anon(folio)) {
- /* Additional pin to swap cache */
- if (folio_test_swapcache(folio)) {
- folio_ref_add(folio, 1 + new_nr);
- xa_unlock(&swap_cache->i_pages);
- } else {
- folio_ref_inc(folio);
+ /*
+ * origin_folio should be kept frozon until page cache
+ * entries are updated with all the other after-split
+ * folios to prevent others seeing stale page cache
+ * entries.
+ */
+ if (release == origin_folio)
+ continue;
+
+ folio_ref_unfreeze(release, 1 +
+ ((mapping || swap_cache) ?
+ folio_nr_pages(release) : 0));
+
+ lru_add_split_folio(origin_folio, release, lruvec,
+ list);
+
+ /* Some pages can be beyond EOF: drop them from cache */
+ if (release->index >= end) {
+ if (shmem_mapping(mapping))
+ nr_dropped += folio_nr_pages(release);
+ else if (folio_test_clear_dirty(release))
+ folio_account_cleaned(release,
+ inode_to_wb(mapping->host));
+ __filemap_remove_folio(release, NULL);
+ folio_put_refs(release, folio_nr_pages(release));
+ } else if (mapping) {
+ __xa_store(&mapping->i_pages,
+ release->index, release, 0);
+ } else if (swap_cache) {
+ __xa_store(&swap_cache->i_pages,
+ swap_cache_index(release->swap),
+ release, 0);
+ }
}
- } else {
- /* Additional pin to page cache */
- folio_ref_add(folio, 1 + new_nr);
- xa_unlock(&folio->mapping->i_pages);
}
+
+ /*
+ * Unfreeze origin_folio only after all page cache entries, which used
+ * to point to it, have been updated with new folios. Otherwise,
+ * a parallel folio_try_get() can grab origin_folio and its caller can
+ * see stale page cache entries.
+ */
+ folio_ref_unfreeze(origin_folio, 1 +
+ ((mapping || swap_cache) ? folio_nr_pages(origin_folio) : 0));
+
+ unlock_page_lruvec(lruvec);
+
+ if (swap_cache)
+ xa_unlock(&swap_cache->i_pages);
+ if (mapping)
+ xa_unlock(&mapping->i_pages);
+
+ /* Caller disabled irqs, so they are still disabled here */
local_irq_enable();
if (nr_dropped)
- shmem_uncharge(folio->mapping->host, nr_dropped);
- remap_page(folio, nr, PageAnon(head) ? RMP_USE_SHARED_ZEROPAGE : 0);
+ shmem_uncharge(mapping->host, nr_dropped);
+
+ remap_page(origin_folio, 1 << order,
+ folio_test_anon(origin_folio) ?
+ RMP_USE_SHARED_ZEROPAGE : 0);
/*
- * set page to its compound_head when split to non order-0 pages, so
- * we can skip unlocking it below, since PG_locked is transferred to
- * the compound_head of the page and the caller will unlock it.
+ * At this point, folio should contain the specified page.
+ * For uniform split, it is left for caller to unlock.
+ * For buddy allocator like split, the first after-split folio is left
+ * for caller to unlock.
*/
- if (new_order)
- page = compound_head(page);
-
- for (i = 0; i < nr; i += new_nr) {
- struct page *subpage = head + i;
- struct folio *new_folio = page_folio(subpage);
- if (subpage == page)
+ for (new_folio = origin_folio; new_folio != next_folio; new_folio = next) {
+ next = folio_next(new_folio);
+ if (new_folio == page_folio(lock_at))
continue;
- folio_unlock(new_folio);
+ folio_unlock(new_folio);
/*
* Subpages may be freed if there wasn't any mapping
* like if add_to_swap() is running on a lru page that
@@ -3369,81 +3641,90 @@ static void __split_huge_page(struct page *page, struct list_head *list,
* requires taking the lru_lock so we do the put_page
* of the tail pages after the split is complete.
*/
- free_page_and_swap_cache(subpage);
+ free_page_and_swap_cache(&new_folio->page);
}
+ return ret;
}
-/* Racy check whether the huge page can be split */
-bool can_split_folio(struct folio *folio, int caller_pins, int *pextra_pins)
+bool non_uniform_split_supported(struct folio *folio, unsigned int new_order,
+ bool warns)
{
- int extra_pins;
+ if (folio_test_anon(folio)) {
+ /* order-1 is not supported for anonymous THP. */
+ VM_WARN_ONCE(warns && new_order == 1,
+ "Cannot split to order-1 folio");
+ return new_order != 1;
+ } else if (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) &&
+ !mapping_large_folio_support(folio->mapping)) {
+ /*
+ * No split if the file system does not support large folio.
+ * Note that we might still have THPs in such mappings due to
+ * CONFIG_READ_ONLY_THP_FOR_FS. But in that case, the mapping
+ * does not actually support large folios properly.
+ */
+ VM_WARN_ONCE(warns,
+ "Cannot split file folio to non-0 order");
+ return false;
+ }
- /* Additional pins from page cache */
- if (folio_test_anon(folio))
- extra_pins = folio_test_swapcache(folio) ?
- folio_nr_pages(folio) : 0;
- else
- extra_pins = folio_nr_pages(folio);
- if (pextra_pins)
- *pextra_pins = extra_pins;
- return folio_mapcount(folio) == folio_ref_count(folio) - extra_pins -
- caller_pins;
+ /* Only swapping a whole PMD-mapped folio is supported */
+ if (folio_test_swapcache(folio)) {
+ VM_WARN_ONCE(warns,
+ "Cannot split swapcache folio to non-0 order");
+ return false;
+ }
+
+ return true;
+}
+
+/* See comments in non_uniform_split_supported() */
+bool uniform_split_supported(struct folio *folio, unsigned int new_order,
+ bool warns)
+{
+ if (folio_test_anon(folio)) {
+ VM_WARN_ONCE(warns && new_order == 1,
+ "Cannot split to order-1 folio");
+ return new_order != 1;
+ } else if (new_order) {
+ if (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) &&
+ !mapping_large_folio_support(folio->mapping)) {
+ VM_WARN_ONCE(warns,
+ "Cannot split file folio to non-0 order");
+ return false;
+ }
+ }
+
+ if (new_order && folio_test_swapcache(folio)) {
+ VM_WARN_ONCE(warns,
+ "Cannot split swapcache folio to non-0 order");
+ return false;
+ }
+
+ return true;
}
/*
- * This function splits a large folio into smaller folios of order @new_order.
- * @page can point to any page of the large folio to split. The split operation
- * does not change the position of @page.
- *
- * Prerequisites:
- *
- * 1) The caller must hold a reference on the @page's owning folio, also known
- * as the large folio.
+ * __folio_split: split a folio at @split_at to a @new_order folio
+ * @folio: folio to split
+ * @new_order: the order of the new folio
+ * @split_at: a page within the new folio
+ * @lock_at: a page within @folio to be left locked to caller
+ * @list: after-split folios will be put on it if non NULL
+ * @uniform_split: perform uniform split or not (non-uniform split)
*
- * 2) The large folio must be locked.
+ * It calls __split_unmapped_folio() to perform uniform and non-uniform split.
+ * It is in charge of checking whether the split is supported or not and
+ * preparing @folio for __split_unmapped_folio().
*
- * 3) The folio must not be pinned. Any unexpected folio references, including
- * GUP pins, will result in the folio not getting split; instead, the caller
- * will receive an -EAGAIN.
- *
- * 4) @new_order > 1, usually. Splitting to order-1 anonymous folios is not
- * supported for non-file-backed folios, because folio->_deferred_list, which
- * is used by partially mapped folios, is stored in subpage 2, but an order-1
- * folio only has subpages 0 and 1. File-backed order-1 folios are supported,
- * since they do not use _deferred_list.
- *
- * After splitting, the caller's folio reference will be transferred to @page,
- * resulting in a raised refcount of @page after this call. The other pages may
- * be freed if they are not mapped.
- *
- * If @list is null, tail pages will be added to LRU list, otherwise, to @list.
- *
- * Pages in @new_order will inherit the mapping, flags, and so on from the
- * huge page.
- *
- * Returns 0 if the huge page was split successfully.
- *
- * Returns -EAGAIN if the folio has unexpected reference (e.g., GUP) or if
- * the folio was concurrently removed from the page cache.
- *
- * Returns -EBUSY when trying to split the huge zeropage, if the folio is
- * under writeback, if fs-specific folio metadata cannot currently be
- * released, or if some unexpected race happened (e.g., anon VMA disappeared,
- * truncation).
- *
- * Callers should ensure that the order respects the address space mapping
- * min-order if one is set for non-anonymous folios.
- *
- * Returns -EINVAL when trying to split to an order that is incompatible
- * with the folio. Splitting to order 0 is compatible with all folios.
+ * return: 0: successful, <0 failed (if -ENOMEM is returned, @folio might be
+ * split but not to @new_order, the caller needs to check)
*/
-int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
- unsigned int new_order)
+static int __folio_split(struct folio *folio, unsigned int new_order,
+ struct page *split_at, struct page *lock_at,
+ struct list_head *list, bool uniform_split)
{
- struct folio *folio = page_folio(page);
struct deferred_split *ds_queue = get_deferred_split_queue(folio);
- /* reset xarray order to new order after split */
- XA_STATE_ORDER(xas, &folio->mapping->i_pages, folio->index, new_order);
+ XA_STATE(xas, &folio->mapping->i_pages, folio->index);
bool is_anon = folio_test_anon(folio);
struct address_space *mapping = NULL;
struct anon_vma *anon_vma = NULL;
@@ -3455,38 +3736,17 @@ int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
VM_BUG_ON_FOLIO(!folio_test_large(folio), folio);
+ if (folio != page_folio(split_at) || folio != page_folio(lock_at))
+ return -EINVAL;
+
if (new_order >= folio_order(folio))
return -EINVAL;
- if (is_anon) {
- /* order-1 is not supported for anonymous THP. */
- if (new_order == 1) {
- VM_WARN_ONCE(1, "Cannot split to order-1 folio");
- return -EINVAL;
- }
- } else if (new_order) {
- /* Split shmem folio to non-zero order not supported */
- if (shmem_mapping(folio->mapping)) {
- VM_WARN_ONCE(1,
- "Cannot split shmem folio to non-0 order");
- return -EINVAL;
- }
- /*
- * No split if the file system does not support large folio.
- * Note that we might still have THPs in such mappings due to
- * CONFIG_READ_ONLY_THP_FOR_FS. But in that case, the mapping
- * does not actually support large folios properly.
- */
- if (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) &&
- !mapping_large_folio_support(folio->mapping)) {
- VM_WARN_ONCE(1,
- "Cannot split file folio to non-0 order");
- return -EINVAL;
- }
- }
+ if (uniform_split && !uniform_split_supported(folio, new_order, true))
+ return -EINVAL;
- /* Only swapping a whole PMD-mapped folio is supported */
- if (folio_test_swapcache(folio) && new_order)
+ if (!uniform_split &&
+ !non_uniform_split_supported(folio, new_order, true))
return -EINVAL;
is_hzp = is_huge_zero_folio(folio);
@@ -3522,6 +3782,11 @@ int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
mapping = folio->mapping;
/* Truncated ? */
+ /*
+ * TODO: add support for large shmem folio in swap cache.
+ * When shmem is in swap cache, mapping is NULL and
+ * folio_test_swapcache() is true.
+ */
if (!mapping) {
ret = -EBUSY;
goto out;
@@ -3543,21 +3808,24 @@ int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
goto out;
}
- xas_split_alloc(&xas, folio, folio_order(folio), gfp);
- if (xas_error(&xas)) {
- ret = xas_error(&xas);
- goto out;
+ if (uniform_split) {
+ xas_set_order(&xas, folio->index, new_order);
+ xas_split_alloc(&xas, folio, folio_order(folio), gfp);
+ if (xas_error(&xas)) {
+ ret = xas_error(&xas);
+ goto out;
+ }
}
anon_vma = NULL;
i_mmap_lock_read(mapping);
/*
- *__split_huge_page() may need to trim off pages beyond EOF:
- * but on 32-bit, i_size_read() takes an irq-unsafe seqlock,
- * which cannot be nested inside the page tree lock. So note
- * end now: i_size itself may be changed at any moment, but
- * folio lock is good enough to serialize the trimming.
+ *__split_unmapped_folio() may need to trim off pages beyond
+ * EOF: but on 32-bit, i_size_read() takes an irq-unsafe
+ * seqlock, which cannot be nested inside the page tree lock.
+ * So note end now: i_size itself may be changed at any moment,
+ * but folio lock is good enough to serialize the trimming.
*/
end = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE);
if (shmem_mapping(mapping))
@@ -3611,7 +3879,6 @@ int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
if (mapping) {
int nr = folio_nr_pages(folio);
- xas_split(&xas, folio, folio_order(folio));
if (folio_test_pmd_mappable(folio) &&
new_order < HPAGE_PMD_ORDER) {
if (folio_test_swapbacked(folio)) {
@@ -3625,12 +3892,9 @@ int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
}
}
- if (is_anon) {
- mod_mthp_stat(order, MTHP_STAT_NR_ANON, -1);
- mod_mthp_stat(new_order, MTHP_STAT_NR_ANON, 1 << (order - new_order));
- }
- __split_huge_page(page, list, end, new_order);
- ret = 0;
+ ret = __split_unmapped_folio(folio, new_order,
+ split_at, lock_at, list, end, &xas, mapping,
+ uniform_split);
} else {
spin_unlock(&ds_queue->split_queue_lock);
fail:
@@ -3656,6 +3920,90 @@ out:
return ret;
}
+/*
+ * This function splits a large folio into smaller folios of order @new_order.
+ * @page can point to any page of the large folio to split. The split operation
+ * does not change the position of @page.
+ *
+ * Prerequisites:
+ *
+ * 1) The caller must hold a reference on the @page's owning folio, also known
+ * as the large folio.
+ *
+ * 2) The large folio must be locked.
+ *
+ * 3) The folio must not be pinned. Any unexpected folio references, including
+ * GUP pins, will result in the folio not getting split; instead, the caller
+ * will receive an -EAGAIN.
+ *
+ * 4) @new_order > 1, usually. Splitting to order-1 anonymous folios is not
+ * supported for non-file-backed folios, because folio->_deferred_list, which
+ * is used by partially mapped folios, is stored in subpage 2, but an order-1
+ * folio only has subpages 0 and 1. File-backed order-1 folios are supported,
+ * since they do not use _deferred_list.
+ *
+ * After splitting, the caller's folio reference will be transferred to @page,
+ * resulting in a raised refcount of @page after this call. The other pages may
+ * be freed if they are not mapped.
+ *
+ * If @list is null, tail pages will be added to LRU list, otherwise, to @list.
+ *
+ * Pages in @new_order will inherit the mapping, flags, and so on from the
+ * huge page.
+ *
+ * Returns 0 if the huge page was split successfully.
+ *
+ * Returns -EAGAIN if the folio has unexpected reference (e.g., GUP) or if
+ * the folio was concurrently removed from the page cache.
+ *
+ * Returns -EBUSY when trying to split the huge zeropage, if the folio is
+ * under writeback, if fs-specific folio metadata cannot currently be
+ * released, or if some unexpected race happened (e.g., anon VMA disappeared,
+ * truncation).
+ *
+ * Callers should ensure that the order respects the address space mapping
+ * min-order if one is set for non-anonymous folios.
+ *
+ * Returns -EINVAL when trying to split to an order that is incompatible
+ * with the folio. Splitting to order 0 is compatible with all folios.
+ */
+int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
+ unsigned int new_order)
+{
+ struct folio *folio = page_folio(page);
+
+ return __folio_split(folio, new_order, &folio->page, page, list, true);
+}
+
+/*
+ * folio_split: split a folio at @split_at to a @new_order folio
+ * @folio: folio to split
+ * @new_order: the order of the new folio
+ * @split_at: a page within the new folio
+ *
+ * return: 0: successful, <0 failed (if -ENOMEM is returned, @folio might be
+ * split but not to @new_order, the caller needs to check)
+ *
+ * It has the same prerequisites and returns as
+ * split_huge_page_to_list_to_order().
+ *
+ * Split a folio at @split_at to a new_order folio, leave the
+ * remaining subpages of the original folio as large as possible. For example,
+ * in the case of splitting an order-9 folio at its third order-3 subpages to
+ * an order-3 folio, there are 2^(9-3)=64 order-3 subpages in the order-9 folio.
+ * After the split, there will be a group of folios with different orders and
+ * the new folio containing @split_at is marked in bracket:
+ * [order-4, {order-3}, order-3, order-5, order-6, order-7, order-8].
+ *
+ * After split, folio is left locked for caller.
+ */
+int folio_split(struct folio *folio, unsigned int new_order,
+ struct page *split_at, struct list_head *list)
+{
+ return __folio_split(folio, new_order, split_at, &folio->page, list,
+ false);
+}
+
int min_order_for_split(struct folio *folio)
{
if (folio_test_anon(folio))
@@ -3740,7 +4088,7 @@ void deferred_split_folio(struct folio *folio, bool partially_mapped)
/*
* Exclude swapcache: originally to avoid a corrupt deferred split
- * queue. Nowadays that is fully prevented by mem_cgroup_swapout();
+ * queue. Nowadays that is fully prevented by memcg1_swapout();
* but if page reclaim is already handling the same folio, it is
* unnecessary to handle it again in the shrinker, so excluding
* swapcache here may still be a useful optimization.
@@ -3975,7 +4323,8 @@ static inline bool vma_not_suitable_for_thp_split(struct vm_area_struct *vma)
}
static int split_huge_pages_pid(int pid, unsigned long vaddr_start,
- unsigned long vaddr_end, unsigned int new_order)
+ unsigned long vaddr_end, unsigned int new_order,
+ long in_folio_offset)
{
int ret = 0;
struct task_struct *task;
@@ -4059,8 +4408,16 @@ static int split_huge_pages_pid(int pid, unsigned long vaddr_start,
if (!folio_test_anon(folio) && folio->mapping != mapping)
goto unlock;
- if (!split_folio_to_order(folio, target_order))
- split++;
+ if (in_folio_offset < 0 ||
+ in_folio_offset >= folio_nr_pages(folio)) {
+ if (!split_folio_to_order(folio, target_order))
+ split++;
+ } else {
+ struct page *split_at = folio_page(folio,
+ in_folio_offset);
+ if (!folio_split(folio, target_order, split_at, NULL))
+ split++;
+ }
unlock:
@@ -4083,7 +4440,8 @@ out:
}
static int split_huge_pages_in_file(const char *file_path, pgoff_t off_start,
- pgoff_t off_end, unsigned int new_order)
+ pgoff_t off_end, unsigned int new_order,
+ long in_folio_offset)
{
struct filename *file;
struct file *candidate;
@@ -4132,8 +4490,15 @@ static int split_huge_pages_in_file(const char *file_path, pgoff_t off_start,
if (folio->mapping != mapping)
goto unlock;
- if (!split_folio_to_order(folio, target_order))
- split++;
+ if (in_folio_offset < 0 || in_folio_offset >= nr_pages) {
+ if (!split_folio_to_order(folio, target_order))
+ split++;
+ } else {
+ struct page *split_at = folio_page(folio,
+ in_folio_offset);
+ if (!folio_split(folio, target_order, split_at, NULL))
+ split++;
+ }
unlock:
folio_unlock(folio);
@@ -4166,6 +4531,7 @@ static ssize_t split_huge_pages_write(struct file *file, const char __user *buf,
int pid;
unsigned long vaddr_start, vaddr_end;
unsigned int new_order = 0;
+ long in_folio_offset = -1;
ret = mutex_lock_interruptible(&split_debug_mutex);
if (ret)
@@ -4194,30 +4560,33 @@ static ssize_t split_huge_pages_write(struct file *file, const char __user *buf,
goto out;
}
- ret = sscanf(tok_buf, "0x%lx,0x%lx,%d", &off_start,
- &off_end, &new_order);
- if (ret != 2 && ret != 3) {
+ ret = sscanf(tok_buf, "0x%lx,0x%lx,%d,%ld", &off_start, &off_end,
+ &new_order, &in_folio_offset);
+ if (ret != 2 && ret != 3 && ret != 4) {
ret = -EINVAL;
goto out;
}
- ret = split_huge_pages_in_file(file_path, off_start, off_end, new_order);
+ ret = split_huge_pages_in_file(file_path, off_start, off_end,
+ new_order, in_folio_offset);
if (!ret)
ret = input_len;
goto out;
}
- ret = sscanf(input_buf, "%d,0x%lx,0x%lx,%d", &pid, &vaddr_start, &vaddr_end, &new_order);
+ ret = sscanf(input_buf, "%d,0x%lx,0x%lx,%d,%ld", &pid, &vaddr_start,
+ &vaddr_end, &new_order, &in_folio_offset);
if (ret == 1 && pid == 1) {
split_huge_pages_all();
ret = strlen(input_buf);
goto out;
- } else if (ret != 3 && ret != 4) {
+ } else if (ret != 3 && ret != 4 && ret != 5) {
ret = -EINVAL;
goto out;
}
- ret = split_huge_pages_pid(pid, vaddr_start, vaddr_end, new_order);
+ ret = split_huge_pages_pid(pid, vaddr_start, vaddr_end, new_order,
+ in_folio_offset);
if (!ret)
ret = strlen(input_buf);
out:
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 318624c96584..39f92aad7bd1 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -14,9 +14,11 @@
#include <linux/pagemap.h>
#include <linux/mempolicy.h>
#include <linux/compiler.h>
+#include <linux/cpumask.h>
#include <linux/cpuset.h>
#include <linux/mutex.h>
#include <linux/memblock.h>
+#include <linux/minmax.h>
#include <linux/sysfs.h>
#include <linux/slab.h>
#include <linux/sched/mm.h>
@@ -40,6 +42,7 @@
#include <asm/page.h>
#include <asm/pgalloc.h>
#include <asm/tlb.h>
+#include <asm/setup.h>
#include <linux/io.h>
#include <linux/hugetlb.h>
@@ -48,19 +51,33 @@
#include <linux/page_owner.h>
#include "internal.h"
#include "hugetlb_vmemmap.h"
+#include "hugetlb_cma.h"
#include <linux/page-isolation.h>
int hugetlb_max_hstate __read_mostly;
unsigned int default_hstate_idx;
struct hstate hstates[HUGE_MAX_HSTATE];
-#ifdef CONFIG_CMA
-static struct cma *hugetlb_cma[MAX_NUMNODES];
-static unsigned long hugetlb_cma_size_in_node[MAX_NUMNODES] __initdata;
-#endif
-static unsigned long hugetlb_cma_size __initdata;
-
__initdata struct list_head huge_boot_pages[MAX_NUMNODES];
+static unsigned long hstate_boot_nrinvalid[HUGE_MAX_HSTATE] __initdata;
+
+/*
+ * Due to ordering constraints across the init code for various
+ * architectures, hugetlb hstate cmdline parameters can't simply
+ * be early_param. early_param might call the setup function
+ * before valid hugetlb page sizes are determined, leading to
+ * incorrect rejection of valid hugepagesz= options.
+ *
+ * So, record the parameters early and consume them whenever the
+ * init code is ready for them, by calling hugetlb_parse_params().
+ */
+
+/* one (hugepagesz=,hugepages=) pair per hstate, one default_hugepagesz */
+#define HUGE_MAX_CMDLINE_ARGS (2 * HUGE_MAX_HSTATE + 1)
+struct hugetlb_cmdline {
+ char *val;
+ int (*setup)(char *val);
+};
/* for command line parsing */
static struct hstate * __initdata parsed_hstate;
@@ -68,6 +85,21 @@ static unsigned long __initdata default_hstate_max_huge_pages;
static bool __initdata parsed_valid_hugepagesz = true;
static bool __initdata parsed_default_hugepagesz;
static unsigned int default_hugepages_in_node[MAX_NUMNODES] __initdata;
+static unsigned long hugepage_allocation_threads __initdata;
+
+static char hstate_cmdline_buf[COMMAND_LINE_SIZE] __initdata;
+static int hstate_cmdline_index __initdata;
+static struct hugetlb_cmdline hugetlb_params[HUGE_MAX_CMDLINE_ARGS] __initdata;
+static int hugetlb_param_index __initdata;
+static __init int hugetlb_add_param(char *s, int (*setup)(char *val));
+static __init void hugetlb_parse_params(void);
+
+#define hugetlb_early_param(str, func) \
+static __init int func##args(char *s) \
+{ \
+ return hugetlb_add_param(s, func); \
+} \
+early_param(str, func##args)
/*
* Protects updates to hugepage_freelists, hugepage_activelist, nr_huge_pages,
@@ -93,12 +125,11 @@ static struct resv_map *vma_resv_map(struct vm_area_struct *vma);
static void hugetlb_free_folio(struct folio *folio)
{
-#ifdef CONFIG_CMA
- int nid = folio_nid(folio);
-
- if (cma_free_folio(hugetlb_cma[nid], folio))
+ if (folio_test_hugetlb_cma(folio)) {
+ hugetlb_cma_free_folio(folio);
return;
-#endif
+ }
+
folio_put(folio);
}
@@ -1455,27 +1486,11 @@ static struct folio *alloc_gigantic_folio(struct hstate *h, gfp_t gfp_mask,
if (nid == NUMA_NO_NODE)
nid = numa_mem_id();
retry:
- folio = NULL;
-#ifdef CONFIG_CMA
- {
- int node;
-
- if (hugetlb_cma[nid])
- folio = cma_alloc_folio(hugetlb_cma[nid], order, gfp_mask);
-
- if (!folio && !(gfp_mask & __GFP_THISNODE)) {
- for_each_node_mask(node, *nodemask) {
- if (node == nid || !hugetlb_cma[node])
- continue;
-
- folio = cma_alloc_folio(hugetlb_cma[node], order, gfp_mask);
- if (folio)
- break;
- }
- }
- }
-#endif
+ folio = hugetlb_cma_alloc_folio(h, gfp_mask, nid, nodemask);
if (!folio) {
+ if (hugetlb_cma_exclusive_alloc())
+ return NULL;
+
folio = folio_alloc_gigantic(order, gfp_mask, nid, nodemask);
if (!folio)
return NULL;
@@ -1634,7 +1649,6 @@ static void __update_and_free_hugetlb_folio(struct hstate *h,
folio_ref_unfreeze(folio, 1);
- INIT_LIST_HEAD(&folio->_deferred_list);
hugetlb_free_folio(folio);
}
@@ -2245,12 +2259,21 @@ static struct folio *alloc_surplus_hugetlb_folio(struct hstate *h,
goto out_unlock;
spin_unlock_irq(&hugetlb_lock);
- folio = alloc_fresh_hugetlb_folio(h, gfp_mask, nid, nmask);
+ folio = only_alloc_fresh_hugetlb_folio(h, gfp_mask, nid, nmask, NULL);
if (!folio)
return NULL;
+ hugetlb_vmemmap_optimize_folio(h, folio);
+
spin_lock_irq(&hugetlb_lock);
/*
+ * nr_huge_pages needs to be adjusted within the same lock cycle
+ * as surplus_pages, otherwise it might confuse
+ * persistent_huge_pages() momentarily.
+ */
+ __prep_account_new_huge_page(h, nid);
+
+ /*
* We could have raced with the pool size change.
* Double check that and simply deallocate the new page
* if we would end up overcommiting the surpluses. Abuse
@@ -3148,6 +3171,56 @@ out_end_reservation:
return ERR_PTR(-ENOSPC);
}
+static __init void *alloc_bootmem(struct hstate *h, int nid, bool node_exact)
+{
+ struct huge_bootmem_page *m;
+ int listnode = nid;
+
+ if (hugetlb_early_cma(h))
+ m = hugetlb_cma_alloc_bootmem(h, &listnode, node_exact);
+ else {
+ if (node_exact)
+ m = memblock_alloc_exact_nid_raw(huge_page_size(h),
+ huge_page_size(h), 0,
+ MEMBLOCK_ALLOC_ACCESSIBLE, nid);
+ else {
+ m = memblock_alloc_try_nid_raw(huge_page_size(h),
+ huge_page_size(h), 0,
+ MEMBLOCK_ALLOC_ACCESSIBLE, nid);
+ /*
+ * For pre-HVO to work correctly, pages need to be on
+ * the list for the node they were actually allocated
+ * from. That node may be different in the case of
+ * fallback by memblock_alloc_try_nid_raw. So,
+ * extract the actual node first.
+ */
+ if (m)
+ listnode = early_pfn_to_nid(PHYS_PFN(virt_to_phys(m)));
+ }
+
+ if (m) {
+ m->flags = 0;
+ m->cma = NULL;
+ }
+ }
+
+ if (m) {
+ /*
+ * Use the beginning of the huge page to store the
+ * huge_bootmem_page struct (until gather_bootmem
+ * puts them into the mem_map).
+ *
+ * Put them into a private list first because mem_map
+ * is not up yet.
+ */
+ INIT_LIST_HEAD(&m->list);
+ list_add(&m->list, &huge_boot_pages[listnode]);
+ m->hstate = h;
+ }
+
+ return m;
+}
+
int alloc_bootmem_huge_page(struct hstate *h, int nid)
__attribute__ ((weak, alias("__alloc_bootmem_huge_page")));
int __alloc_bootmem_huge_page(struct hstate *h, int nid)
@@ -3157,22 +3230,15 @@ int __alloc_bootmem_huge_page(struct hstate *h, int nid)
/* do node specific alloc */
if (nid != NUMA_NO_NODE) {
- m = memblock_alloc_exact_nid_raw(huge_page_size(h), huge_page_size(h),
- 0, MEMBLOCK_ALLOC_ACCESSIBLE, nid);
+ m = alloc_bootmem(h, node, true);
if (!m)
return 0;
goto found;
}
+
/* allocate from next node when distributing huge pages */
- for_each_node_mask_to_alloc(&h->next_nid_to_alloc, nr_nodes, node, &node_states[N_MEMORY]) {
- m = memblock_alloc_try_nid_raw(
- huge_page_size(h), huge_page_size(h),
- 0, MEMBLOCK_ALLOC_ACCESSIBLE, node);
- /*
- * Use the beginning of the huge page to store the
- * huge_bootmem_page struct (until gather_bootmem
- * puts them into the mem_map).
- */
+ for_each_node_mask_to_alloc(&h->next_nid_to_alloc, nr_nodes, node, &node_states[N_ONLINE]) {
+ m = alloc_bootmem(h, node, false);
if (!m)
return 0;
goto found;
@@ -3189,10 +3255,7 @@ found:
*/
memblock_reserved_mark_noinit(virt_to_phys((void *)m + PAGE_SIZE),
huge_page_size(h) - PAGE_SIZE);
- /* Put them into a private list first because mem_map is not up yet */
- INIT_LIST_HEAD(&m->list);
- list_add(&m->list, &huge_boot_pages[node]);
- m->hstate = h;
+
return 1;
}
@@ -3210,7 +3273,6 @@ static void __init hugetlb_folio_init_tail_vmemmap(struct folio *folio,
for (pfn = head_pfn + start_page_number; pfn < end_pfn; pfn++) {
struct page *page = pfn_to_page(pfn);
- __ClearPageReserved(folio_page(folio, pfn - head_pfn));
__init_single_page(page, pfn, zone, nid);
prep_compound_tail((struct page *)folio, pfn - head_pfn);
ret = page_ref_freeze(page, 1);
@@ -3234,6 +3296,42 @@ static void __init hugetlb_folio_init_vmemmap(struct folio *folio,
prep_compound_head((struct page *)folio, huge_page_order(h));
}
+static bool __init hugetlb_bootmem_page_prehvo(struct huge_bootmem_page *m)
+{
+ return m->flags & HUGE_BOOTMEM_HVO;
+}
+
+static bool __init hugetlb_bootmem_page_earlycma(struct huge_bootmem_page *m)
+{
+ return m->flags & HUGE_BOOTMEM_CMA;
+}
+
+/*
+ * memblock-allocated pageblocks might not have the migrate type set
+ * if marked with the 'noinit' flag. Set it to the default (MIGRATE_MOVABLE)
+ * here, or MIGRATE_CMA if this was a page allocated through an early CMA
+ * reservation.
+ *
+ * In case of vmemmap optimized folios, the tail vmemmap pages are mapped
+ * read-only, but that's ok - for sparse vmemmap this does not write to
+ * the page structure.
+ */
+static void __init hugetlb_bootmem_init_migratetype(struct folio *folio,
+ struct hstate *h)
+{
+ unsigned long nr_pages = pages_per_huge_page(h), i;
+
+ WARN_ON_ONCE(!pageblock_aligned(folio_pfn(folio)));
+
+ for (i = 0; i < nr_pages; i += pageblock_nr_pages) {
+ if (folio_test_hugetlb_cma(folio))
+ init_cma_pageblock(folio_page(folio, i));
+ else
+ set_pageblock_migratetype(folio_page(folio, i),
+ MIGRATE_MOVABLE);
+ }
+}
+
static void __init prep_and_add_bootmem_folios(struct hstate *h,
struct list_head *folio_list)
{
@@ -3241,7 +3339,7 @@ static void __init prep_and_add_bootmem_folios(struct hstate *h,
struct folio *folio, *tmp_f;
/* Send list for bulk vmemmap optimization processing */
- hugetlb_vmemmap_optimize_folios(h, folio_list);
+ hugetlb_vmemmap_optimize_bootmem_folios(h, folio_list);
list_for_each_entry_safe(folio, tmp_f, folio_list, lru) {
if (!folio_test_hugetlb_vmemmap_optimized(folio)) {
@@ -3255,6 +3353,7 @@ static void __init prep_and_add_bootmem_folios(struct hstate *h,
HUGETLB_VMEMMAP_RESERVE_PAGES,
pages_per_huge_page(h));
}
+ hugetlb_bootmem_init_migratetype(folio, h);
/* Subdivide locks to achieve better parallel performance */
spin_lock_irqsave(&hugetlb_lock, flags);
__prep_account_new_huge_page(h, folio_nid(folio));
@@ -3263,6 +3362,57 @@ static void __init prep_and_add_bootmem_folios(struct hstate *h,
}
}
+bool __init hugetlb_bootmem_page_zones_valid(int nid,
+ struct huge_bootmem_page *m)
+{
+ unsigned long start_pfn;
+ bool valid;
+
+ if (m->flags & HUGE_BOOTMEM_ZONES_VALID) {
+ /*
+ * Already validated, skip check.
+ */
+ return true;
+ }
+
+ if (hugetlb_bootmem_page_earlycma(m)) {
+ valid = cma_validate_zones(m->cma);
+ goto out;
+ }
+
+ start_pfn = virt_to_phys(m) >> PAGE_SHIFT;
+
+ valid = !pfn_range_intersects_zones(nid, start_pfn,
+ pages_per_huge_page(m->hstate));
+out:
+ if (!valid)
+ hstate_boot_nrinvalid[hstate_index(m->hstate)]++;
+
+ return valid;
+}
+
+/*
+ * Free a bootmem page that was found to be invalid (intersecting with
+ * multiple zones).
+ *
+ * Since it intersects with multiple zones, we can't just do a free
+ * operation on all pages at once, but instead have to walk all
+ * pages, freeing them one by one.
+ */
+static void __init hugetlb_bootmem_free_invalid_page(int nid, struct page *page,
+ struct hstate *h)
+{
+ unsigned long npages = pages_per_huge_page(h);
+ unsigned long pfn;
+
+ while (npages--) {
+ pfn = page_to_pfn(page);
+ __init_page_from_nid(pfn, nid);
+ free_reserved_page(page);
+ page++;
+ }
+}
+
/*
* Put bootmem huge pages into the standard lists after mem_map is up.
* Note: This only applies to gigantic (order > MAX_PAGE_ORDER) pages.
@@ -3270,14 +3420,25 @@ static void __init prep_and_add_bootmem_folios(struct hstate *h,
static void __init gather_bootmem_prealloc_node(unsigned long nid)
{
LIST_HEAD(folio_list);
- struct huge_bootmem_page *m;
+ struct huge_bootmem_page *m, *tm;
struct hstate *h = NULL, *prev_h = NULL;
- list_for_each_entry(m, &huge_boot_pages[nid], list) {
+ list_for_each_entry_safe(m, tm, &huge_boot_pages[nid], list) {
struct page *page = virt_to_page(m);
struct folio *folio = (void *)page;
h = m->hstate;
+ if (!hugetlb_bootmem_page_zones_valid(nid, m)) {
+ /*
+ * Can't use this page. Initialize the
+ * page structures if that hasn't already
+ * been done, and give them to the page
+ * allocator.
+ */
+ hugetlb_bootmem_free_invalid_page(nid, page, h);
+ continue;
+ }
+
/*
* It is possible to have multiple huge page sizes (hstates)
* in this list. If so, process each size separately.
@@ -3292,14 +3453,30 @@ static void __init gather_bootmem_prealloc_node(unsigned long nid)
hugetlb_folio_init_vmemmap(folio, h,
HUGETLB_VMEMMAP_RESERVE_PAGES);
init_new_hugetlb_folio(h, folio);
+
+ if (hugetlb_bootmem_page_prehvo(m))
+ /*
+ * If pre-HVO was done, just set the
+ * flag, the HVO code will then skip
+ * this folio.
+ */
+ folio_set_hugetlb_vmemmap_optimized(folio);
+
+ if (hugetlb_bootmem_page_earlycma(m))
+ folio_set_hugetlb_cma(folio);
+
list_add(&folio->lru, &folio_list);
/*
* We need to restore the 'stolen' pages to totalram_pages
* in order to fix confusing memory reports from free(1) and
* other side-effects, like CommitLimit going negative.
+ *
+ * For CMA pages, this is done in init_cma_pageblock
+ * (via hugetlb_bootmem_init_migratetype), so skip it here.
*/
- adjust_managed_page_count(page, pages_per_huge_page(h));
+ if (!folio_test_hugetlb_cma(folio))
+ adjust_managed_page_count(page, pages_per_huge_page(h));
cond_resched();
}
@@ -3439,32 +3616,44 @@ static unsigned long __init hugetlb_pages_alloc_boot(struct hstate *h)
.numa_aware = true
};
+ unsigned long jiffies_start;
+ unsigned long jiffies_end;
+
job.thread_fn = hugetlb_pages_alloc_boot_node;
job.start = 0;
job.size = h->max_huge_pages;
/*
- * job.max_threads is twice the num_node_state(N_MEMORY),
+ * job.max_threads is 25% of the available cpu threads by default.
*
- * Tests below indicate that a multiplier of 2 significantly improves
- * performance, and although larger values also provide improvements,
- * the gains are marginal.
+ * On large servers with terabytes of memory, huge page allocation
+ * can consume a considerably amount of time.
*
- * Therefore, choosing 2 as the multiplier strikes a good balance between
- * enhancing parallel processing capabilities and maintaining efficient
- * resource management.
+ * Tests below show how long it takes to allocate 1 TiB of memory with 2MiB huge pages.
+ * 2MiB huge pages. Using more threads can significantly improve allocation time.
*
- * +------------+-------+-------+-------+-------+-------+
- * | multiplier | 1 | 2 | 3 | 4 | 5 |
- * +------------+-------+-------+-------+-------+-------+
- * | 256G 2node | 358ms | 215ms | 157ms | 134ms | 126ms |
- * | 2T 4node | 979ms | 679ms | 543ms | 489ms | 481ms |
- * | 50G 2node | 71ms | 44ms | 37ms | 30ms | 31ms |
- * +------------+-------+-------+-------+-------+-------+
+ * +-----------------------+-------+-------+-------+-------+-------+
+ * | threads | 8 | 16 | 32 | 64 | 128 |
+ * +-----------------------+-------+-------+-------+-------+-------+
+ * | skylake 144 cpus | 44s | 22s | 16s | 19s | 20s |
+ * | cascade lake 192 cpus | 39s | 20s | 11s | 10s | 9s |
+ * +-----------------------+-------+-------+-------+-------+-------+
*/
- job.max_threads = num_node_state(N_MEMORY) * 2;
- job.min_chunk = h->max_huge_pages / num_node_state(N_MEMORY) / 2;
+ if (hugepage_allocation_threads == 0) {
+ hugepage_allocation_threads = num_online_cpus() / 4;
+ hugepage_allocation_threads = max(hugepage_allocation_threads, 1);
+ }
+
+ job.max_threads = hugepage_allocation_threads;
+ job.min_chunk = h->max_huge_pages / hugepage_allocation_threads;
+
+ jiffies_start = jiffies;
padata_do_multithreaded(&job);
+ jiffies_end = jiffies;
+
+ pr_info("HugeTLB: allocation took %dms with hugepage_allocation_threads=%ld\n",
+ jiffies_to_msecs(jiffies_end - jiffies_start),
+ hugepage_allocation_threads);
return h->nr_huge_pages;
}
@@ -3483,23 +3672,17 @@ static unsigned long __init hugetlb_pages_alloc_boot(struct hstate *h)
static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
{
unsigned long allocated;
- static bool initialized __initdata;
- /* skip gigantic hugepages allocation if hugetlb_cma enabled */
- if (hstate_is_gigantic(h) && hugetlb_cma_size) {
+ /*
+ * Skip gigantic hugepages allocation if early CMA
+ * reservations are not available.
+ */
+ if (hstate_is_gigantic(h) && hugetlb_cma_total_size() &&
+ !hugetlb_early_cma(h)) {
pr_warn_once("HugeTLB: hugetlb_cma is enabled, skip boot time allocation\n");
return;
}
- /* hugetlb_hstate_alloc_pages will be called many times, initialize huge_boot_pages once */
- if (!initialized) {
- int i = 0;
-
- for (i = 0; i < MAX_NUMNODES; i++)
- INIT_LIST_HEAD(&huge_boot_pages[i]);
- initialized = true;
- }
-
/* do node specific alloc */
if (hugetlb_hstate_alloc_pages_specific_nodes(h))
return;
@@ -3532,7 +3715,7 @@ static void __init hugetlb_init_hstates(void)
*/
if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
continue;
- if (hugetlb_cma_size && h->order <= HUGETLB_PAGE_ORDER)
+ if (hugetlb_cma_total_size() && h->order <= HUGETLB_PAGE_ORDER)
continue;
for_each_hstate(h2) {
if (h2 == h)
@@ -3547,13 +3730,20 @@ static void __init hugetlb_init_hstates(void)
static void __init report_hugepages(void)
{
struct hstate *h;
+ unsigned long nrinvalid;
for_each_hstate(h) {
char buf[32];
+ nrinvalid = hstate_boot_nrinvalid[hstate_index(h)];
+ h->max_huge_pages -= nrinvalid;
+
string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32);
pr_info("HugeTLB: registered %s page size, pre-allocated %ld pages\n",
buf, h->free_huge_pages);
+ if (nrinvalid)
+ pr_info("HugeTLB: %s page size: %lu invalid page%s discarded\n",
+ buf, nrinvalid, nrinvalid > 1 ? "s" : "");
pr_info("HugeTLB: %d KiB vmemmap can be freed for a %s page\n",
hugetlb_vmemmap_optimizable_size(h) / SZ_1K, buf);
}
@@ -4427,14 +4617,6 @@ static void hugetlb_register_all_nodes(void) { }
#endif
-#ifdef CONFIG_CMA
-static void __init hugetlb_cma_check(void);
-#else
-static inline __init void hugetlb_cma_check(void)
-{
-}
-#endif
-
static void __init hugetlb_sysfs_init(void)
{
struct hstate *h;
@@ -4559,8 +4741,6 @@ void __init hugetlb_add_hstate(unsigned int order)
for (i = 0; i < MAX_NUMNODES; ++i)
INIT_LIST_HEAD(&h->hugepage_freelists[i]);
INIT_LIST_HEAD(&h->hugepage_activelist);
- h->next_nid_to_alloc = first_memory_node;
- h->next_nid_to_free = first_memory_node;
snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
huge_page_size(h)/SZ_1K);
@@ -4585,6 +4765,44 @@ static void __init hugepages_clear_pages_in_node(void)
}
}
+static __init int hugetlb_add_param(char *s, int (*setup)(char *))
+{
+ size_t len;
+ char *p;
+
+ if (hugetlb_param_index >= HUGE_MAX_CMDLINE_ARGS)
+ return -EINVAL;
+
+ len = strlen(s) + 1;
+ if (len + hstate_cmdline_index > sizeof(hstate_cmdline_buf))
+ return -EINVAL;
+
+ p = &hstate_cmdline_buf[hstate_cmdline_index];
+ memcpy(p, s, len);
+ hstate_cmdline_index += len;
+
+ hugetlb_params[hugetlb_param_index].val = p;
+ hugetlb_params[hugetlb_param_index].setup = setup;
+
+ hugetlb_param_index++;
+
+ return 0;
+}
+
+static __init void hugetlb_parse_params(void)
+{
+ int i;
+ struct hugetlb_cmdline *hcp;
+
+ for (i = 0; i < hugetlb_param_index; i++) {
+ hcp = &hugetlb_params[i];
+
+ hcp->setup(hcp->val);
+ }
+
+ hugetlb_cma_validate_params();
+}
+
/*
* hugepages command line processing
* hugepages normally follows a valid hugepagsz or default_hugepagsz
@@ -4604,7 +4822,7 @@ static int __init hugepages_setup(char *s)
if (!parsed_valid_hugepagesz) {
pr_warn("HugeTLB: hugepages=%s does not follow a valid hugepagesz, ignoring\n", s);
parsed_valid_hugepagesz = true;
- return 1;
+ return -EINVAL;
}
/*
@@ -4658,24 +4876,16 @@ static int __init hugepages_setup(char *s)
}
}
- /*
- * Global state is always initialized later in hugetlb_init.
- * But we need to allocate gigantic hstates here early to still
- * use the bootmem allocator.
- */
- if (hugetlb_max_hstate && hstate_is_gigantic(parsed_hstate))
- hugetlb_hstate_alloc_pages(parsed_hstate);
-
last_mhp = mhp;
- return 1;
+ return 0;
invalid:
pr_warn("HugeTLB: Invalid hugepages parameter %s\n", p);
hugepages_clear_pages_in_node();
- return 1;
+ return -EINVAL;
}
-__setup("hugepages=", hugepages_setup);
+hugetlb_early_param("hugepages", hugepages_setup);
/*
* hugepagesz command line processing
@@ -4694,7 +4904,7 @@ static int __init hugepagesz_setup(char *s)
if (!arch_hugetlb_valid_size(size)) {
pr_err("HugeTLB: unsupported hugepagesz=%s\n", s);
- return 1;
+ return -EINVAL;
}
h = size_to_hstate(size);
@@ -4709,7 +4919,7 @@ static int __init hugepagesz_setup(char *s)
if (!parsed_default_hugepagesz || h != &default_hstate ||
default_hstate.max_huge_pages) {
pr_warn("HugeTLB: hugepagesz=%s specified twice, ignoring\n", s);
- return 1;
+ return -EINVAL;
}
/*
@@ -4719,14 +4929,14 @@ static int __init hugepagesz_setup(char *s)
*/
parsed_hstate = h;
parsed_valid_hugepagesz = true;
- return 1;
+ return 0;
}
hugetlb_add_hstate(ilog2(size) - PAGE_SHIFT);
parsed_valid_hugepagesz = true;
- return 1;
+ return 0;
}
-__setup("hugepagesz=", hugepagesz_setup);
+hugetlb_early_param("hugepagesz", hugepagesz_setup);
/*
* default_hugepagesz command line input
@@ -4740,14 +4950,14 @@ static int __init default_hugepagesz_setup(char *s)
parsed_valid_hugepagesz = false;
if (parsed_default_hugepagesz) {
pr_err("HugeTLB: default_hugepagesz previously specified, ignoring %s\n", s);
- return 1;
+ return -EINVAL;
}
size = (unsigned long)memparse(s, NULL);
if (!arch_hugetlb_valid_size(size)) {
pr_err("HugeTLB: unsupported default_hugepagesz=%s\n", s);
- return 1;
+ return -EINVAL;
}
hugetlb_add_hstate(ilog2(size) - PAGE_SHIFT);
@@ -4764,17 +4974,74 @@ static int __init default_hugepagesz_setup(char *s)
*/
if (default_hstate_max_huge_pages) {
default_hstate.max_huge_pages = default_hstate_max_huge_pages;
- for_each_online_node(i)
- default_hstate.max_huge_pages_node[i] =
- default_hugepages_in_node[i];
- if (hstate_is_gigantic(&default_hstate))
- hugetlb_hstate_alloc_pages(&default_hstate);
+ /*
+ * Since this is an early parameter, we can't check
+ * NUMA node state yet, so loop through MAX_NUMNODES.
+ */
+ for (i = 0; i < MAX_NUMNODES; i++) {
+ if (default_hugepages_in_node[i] != 0)
+ default_hstate.max_huge_pages_node[i] =
+ default_hugepages_in_node[i];
+ }
default_hstate_max_huge_pages = 0;
}
+ return 0;
+}
+hugetlb_early_param("default_hugepagesz", default_hugepagesz_setup);
+
+static bool __hugetlb_bootmem_allocated __initdata;
+
+bool __init hugetlb_bootmem_allocated(void)
+{
+ return __hugetlb_bootmem_allocated;
+}
+
+void __init hugetlb_bootmem_alloc(void)
+{
+ struct hstate *h;
+ int i;
+
+ if (__hugetlb_bootmem_allocated)
+ return;
+
+ for (i = 0; i < MAX_NUMNODES; i++)
+ INIT_LIST_HEAD(&huge_boot_pages[i]);
+
+ hugetlb_parse_params();
+
+ for_each_hstate(h) {
+ h->next_nid_to_alloc = first_online_node;
+ h->next_nid_to_free = first_online_node;
+
+ if (hstate_is_gigantic(h))
+ hugetlb_hstate_alloc_pages(h);
+ }
+
+ __hugetlb_bootmem_allocated = true;
+}
+
+/*
+ * hugepage_alloc_threads command line parsing.
+ *
+ * When set, use this specific number of threads for the boot
+ * allocation of hugepages.
+ */
+static int __init hugepage_alloc_threads_setup(char *s)
+{
+ unsigned long allocation_threads;
+
+ if (kstrtoul(s, 0, &allocation_threads) != 0)
+ return 1;
+
+ if (allocation_threads == 0)
+ return 1;
+
+ hugepage_allocation_threads = allocation_threads;
+
return 1;
}
-__setup("default_hugepagesz=", default_hugepagesz_setup);
+__setup("hugepage_alloc_threads=", hugepage_alloc_threads_setup);
static unsigned int allowed_mems_nr(struct hstate *h)
{
@@ -4912,7 +5179,7 @@ static const struct ctl_table hugetlb_table[] = {
},
};
-static void hugetlb_sysctl_init(void)
+static void __init hugetlb_sysctl_init(void)
{
register_sysctl_init("vm", hugetlb_table);
}
@@ -7625,163 +7892,3 @@ void hugetlb_unshare_all_pmds(struct vm_area_struct *vma)
hugetlb_unshare_pmds(vma, ALIGN(vma->vm_start, PUD_SIZE),
ALIGN_DOWN(vma->vm_end, PUD_SIZE));
}
-
-#ifdef CONFIG_CMA
-static bool cma_reserve_called __initdata;
-
-static int __init cmdline_parse_hugetlb_cma(char *p)
-{
- int nid, count = 0;
- unsigned long tmp;
- char *s = p;
-
- while (*s) {
- if (sscanf(s, "%lu%n", &tmp, &count) != 1)
- break;
-
- if (s[count] == ':') {
- if (tmp >= MAX_NUMNODES)
- break;
- nid = array_index_nospec(tmp, MAX_NUMNODES);
-
- s += count + 1;
- tmp = memparse(s, &s);
- hugetlb_cma_size_in_node[nid] = tmp;
- hugetlb_cma_size += tmp;
-
- /*
- * Skip the separator if have one, otherwise
- * break the parsing.
- */
- if (*s == ',')
- s++;
- else
- break;
- } else {
- hugetlb_cma_size = memparse(p, &p);
- break;
- }
- }
-
- return 0;
-}
-
-early_param("hugetlb_cma", cmdline_parse_hugetlb_cma);
-
-void __init hugetlb_cma_reserve(int order)
-{
- unsigned long size, reserved, per_node;
- bool node_specific_cma_alloc = false;
- int nid;
-
- /*
- * HugeTLB CMA reservation is required for gigantic
- * huge pages which could not be allocated via the
- * page allocator. Just warn if there is any change
- * breaking this assumption.
- */
- VM_WARN_ON(order <= MAX_PAGE_ORDER);
- cma_reserve_called = true;
-
- if (!hugetlb_cma_size)
- return;
-
- for (nid = 0; nid < MAX_NUMNODES; nid++) {
- if (hugetlb_cma_size_in_node[nid] == 0)
- continue;
-
- if (!node_online(nid)) {
- pr_warn("hugetlb_cma: invalid node %d specified\n", nid);
- hugetlb_cma_size -= hugetlb_cma_size_in_node[nid];
- hugetlb_cma_size_in_node[nid] = 0;
- continue;
- }
-
- if (hugetlb_cma_size_in_node[nid] < (PAGE_SIZE << order)) {
- pr_warn("hugetlb_cma: cma area of node %d should be at least %lu MiB\n",
- nid, (PAGE_SIZE << order) / SZ_1M);
- hugetlb_cma_size -= hugetlb_cma_size_in_node[nid];
- hugetlb_cma_size_in_node[nid] = 0;
- } else {
- node_specific_cma_alloc = true;
- }
- }
-
- /* Validate the CMA size again in case some invalid nodes specified. */
- if (!hugetlb_cma_size)
- return;
-
- if (hugetlb_cma_size < (PAGE_SIZE << order)) {
- pr_warn("hugetlb_cma: cma area should be at least %lu MiB\n",
- (PAGE_SIZE << order) / SZ_1M);
- hugetlb_cma_size = 0;
- return;
- }
-
- if (!node_specific_cma_alloc) {
- /*
- * If 3 GB area is requested on a machine with 4 numa nodes,
- * let's allocate 1 GB on first three nodes and ignore the last one.
- */
- per_node = DIV_ROUND_UP(hugetlb_cma_size, nr_online_nodes);
- pr_info("hugetlb_cma: reserve %lu MiB, up to %lu MiB per node\n",
- hugetlb_cma_size / SZ_1M, per_node / SZ_1M);
- }
-
- reserved = 0;
- for_each_online_node(nid) {
- int res;
- char name[CMA_MAX_NAME];
-
- if (node_specific_cma_alloc) {
- if (hugetlb_cma_size_in_node[nid] == 0)
- continue;
-
- size = hugetlb_cma_size_in_node[nid];
- } else {
- size = min(per_node, hugetlb_cma_size - reserved);
- }
-
- size = round_up(size, PAGE_SIZE << order);
-
- snprintf(name, sizeof(name), "hugetlb%d", nid);
- /*
- * Note that 'order per bit' is based on smallest size that
- * may be returned to CMA allocator in the case of
- * huge page demotion.
- */
- res = cma_declare_contiguous_nid(0, size, 0,
- PAGE_SIZE << order,
- HUGETLB_PAGE_ORDER, false, name,
- &hugetlb_cma[nid], nid);
- if (res) {
- pr_warn("hugetlb_cma: reservation failed: err %d, node %d",
- res, nid);
- continue;
- }
-
- reserved += size;
- pr_info("hugetlb_cma: reserved %lu MiB on node %d\n",
- size / SZ_1M, nid);
-
- if (reserved >= hugetlb_cma_size)
- break;
- }
-
- if (!reserved)
- /*
- * hugetlb_cma_size is used to determine if allocations from
- * cma are possible. Set to zero if no cma regions are set up.
- */
- hugetlb_cma_size = 0;
-}
-
-static void __init hugetlb_cma_check(void)
-{
- if (!hugetlb_cma_size || cma_reserve_called)
- return;
-
- pr_warn("hugetlb_cma: the option isn't supported by current arch\n");
-}
-
-#endif /* CONFIG_CMA */
diff --git a/mm/hugetlb_cgroup.c b/mm/hugetlb_cgroup.c
index bb9578bd99f9..58e895f3899a 100644
--- a/mm/hugetlb_cgroup.c
+++ b/mm/hugetlb_cgroup.c
@@ -101,10 +101,9 @@ static void hugetlb_cgroup_init(struct hugetlb_cgroup *h_cgroup,
int idx;
for (idx = 0; idx < HUGE_MAX_HSTATE; idx++) {
- struct page_counter *fault_parent = NULL;
- struct page_counter *rsvd_parent = NULL;
+ struct page_counter *fault, *fault_parent = NULL;
+ struct page_counter *rsvd, *rsvd_parent = NULL;
unsigned long limit;
- int ret;
if (parent_h_cgroup) {
fault_parent = hugetlb_cgroup_counter_from_cgroup(
@@ -112,24 +111,22 @@ static void hugetlb_cgroup_init(struct hugetlb_cgroup *h_cgroup,
rsvd_parent = hugetlb_cgroup_counter_from_cgroup_rsvd(
parent_h_cgroup, idx);
}
- page_counter_init(hugetlb_cgroup_counter_from_cgroup(h_cgroup,
- idx),
- fault_parent, false);
- page_counter_init(
- hugetlb_cgroup_counter_from_cgroup_rsvd(h_cgroup, idx),
- rsvd_parent, false);
+ fault = hugetlb_cgroup_counter_from_cgroup(h_cgroup, idx);
+ rsvd = hugetlb_cgroup_counter_from_cgroup_rsvd(h_cgroup, idx);
+
+ page_counter_init(fault, fault_parent, false);
+ page_counter_init(rsvd, rsvd_parent, false);
+
+ if (!cgroup_subsys_on_dfl(hugetlb_cgrp_subsys)) {
+ fault->track_failcnt = true;
+ rsvd->track_failcnt = true;
+ }
limit = round_down(PAGE_COUNTER_MAX,
pages_per_huge_page(&hstates[idx]));
- ret = page_counter_set_max(
- hugetlb_cgroup_counter_from_cgroup(h_cgroup, idx),
- limit);
- VM_BUG_ON(ret);
- ret = page_counter_set_max(
- hugetlb_cgroup_counter_from_cgroup_rsvd(h_cgroup, idx),
- limit);
- VM_BUG_ON(ret);
+ VM_BUG_ON(page_counter_set_max(fault, limit));
+ VM_BUG_ON(page_counter_set_max(rsvd, limit));
}
}
diff --git a/mm/hugetlb_cma.c b/mm/hugetlb_cma.c
new file mode 100644
index 000000000000..e0f2d5c3a84c
--- /dev/null
+++ b/mm/hugetlb_cma.c
@@ -0,0 +1,275 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/mm.h>
+#include <linux/cma.h>
+#include <linux/compiler.h>
+#include <linux/mm_inline.h>
+
+#include <asm/page.h>
+#include <asm/setup.h>
+
+#include <linux/hugetlb.h>
+#include "internal.h"
+#include "hugetlb_cma.h"
+
+
+static struct cma *hugetlb_cma[MAX_NUMNODES];
+static unsigned long hugetlb_cma_size_in_node[MAX_NUMNODES] __initdata;
+static bool hugetlb_cma_only;
+static unsigned long hugetlb_cma_size __initdata;
+
+void hugetlb_cma_free_folio(struct folio *folio)
+{
+ int nid = folio_nid(folio);
+
+ WARN_ON_ONCE(!cma_free_folio(hugetlb_cma[nid], folio));
+}
+
+
+struct folio *hugetlb_cma_alloc_folio(struct hstate *h, gfp_t gfp_mask,
+ int nid, nodemask_t *nodemask)
+{
+ int node;
+ int order = huge_page_order(h);
+ struct folio *folio = NULL;
+
+ if (hugetlb_cma[nid])
+ folio = cma_alloc_folio(hugetlb_cma[nid], order, gfp_mask);
+
+ if (!folio && !(gfp_mask & __GFP_THISNODE)) {
+ for_each_node_mask(node, *nodemask) {
+ if (node == nid || !hugetlb_cma[node])
+ continue;
+
+ folio = cma_alloc_folio(hugetlb_cma[node], order, gfp_mask);
+ if (folio)
+ break;
+ }
+ }
+
+ if (folio)
+ folio_set_hugetlb_cma(folio);
+
+ return folio;
+}
+
+struct huge_bootmem_page * __init
+hugetlb_cma_alloc_bootmem(struct hstate *h, int *nid, bool node_exact)
+{
+ struct cma *cma;
+ struct huge_bootmem_page *m;
+ int node = *nid;
+
+ cma = hugetlb_cma[*nid];
+ m = cma_reserve_early(cma, huge_page_size(h));
+ if (!m) {
+ if (node_exact)
+ return NULL;
+
+ for_each_online_node(node) {
+ cma = hugetlb_cma[node];
+ if (!cma || node == *nid)
+ continue;
+ m = cma_reserve_early(cma, huge_page_size(h));
+ if (m) {
+ *nid = node;
+ break;
+ }
+ }
+ }
+
+ if (m) {
+ m->flags = HUGE_BOOTMEM_CMA;
+ m->cma = cma;
+ }
+
+ return m;
+}
+
+
+static bool cma_reserve_called __initdata;
+
+static int __init cmdline_parse_hugetlb_cma(char *p)
+{
+ int nid, count = 0;
+ unsigned long tmp;
+ char *s = p;
+
+ while (*s) {
+ if (sscanf(s, "%lu%n", &tmp, &count) != 1)
+ break;
+
+ if (s[count] == ':') {
+ if (tmp >= MAX_NUMNODES)
+ break;
+ nid = array_index_nospec(tmp, MAX_NUMNODES);
+
+ s += count + 1;
+ tmp = memparse(s, &s);
+ hugetlb_cma_size_in_node[nid] = tmp;
+ hugetlb_cma_size += tmp;
+
+ /*
+ * Skip the separator if have one, otherwise
+ * break the parsing.
+ */
+ if (*s == ',')
+ s++;
+ else
+ break;
+ } else {
+ hugetlb_cma_size = memparse(p, &p);
+ break;
+ }
+ }
+
+ return 0;
+}
+
+early_param("hugetlb_cma", cmdline_parse_hugetlb_cma);
+
+static int __init cmdline_parse_hugetlb_cma_only(char *p)
+{
+ return kstrtobool(p, &hugetlb_cma_only);
+}
+
+early_param("hugetlb_cma_only", cmdline_parse_hugetlb_cma_only);
+
+void __init hugetlb_cma_reserve(int order)
+{
+ unsigned long size, reserved, per_node;
+ bool node_specific_cma_alloc = false;
+ int nid;
+
+ /*
+ * HugeTLB CMA reservation is required for gigantic
+ * huge pages which could not be allocated via the
+ * page allocator. Just warn if there is any change
+ * breaking this assumption.
+ */
+ VM_WARN_ON(order <= MAX_PAGE_ORDER);
+ cma_reserve_called = true;
+
+ if (!hugetlb_cma_size)
+ return;
+
+ for (nid = 0; nid < MAX_NUMNODES; nid++) {
+ if (hugetlb_cma_size_in_node[nid] == 0)
+ continue;
+
+ if (!node_online(nid)) {
+ pr_warn("hugetlb_cma: invalid node %d specified\n", nid);
+ hugetlb_cma_size -= hugetlb_cma_size_in_node[nid];
+ hugetlb_cma_size_in_node[nid] = 0;
+ continue;
+ }
+
+ if (hugetlb_cma_size_in_node[nid] < (PAGE_SIZE << order)) {
+ pr_warn("hugetlb_cma: cma area of node %d should be at least %lu MiB\n",
+ nid, (PAGE_SIZE << order) / SZ_1M);
+ hugetlb_cma_size -= hugetlb_cma_size_in_node[nid];
+ hugetlb_cma_size_in_node[nid] = 0;
+ } else {
+ node_specific_cma_alloc = true;
+ }
+ }
+
+ /* Validate the CMA size again in case some invalid nodes specified. */
+ if (!hugetlb_cma_size)
+ return;
+
+ if (hugetlb_cma_size < (PAGE_SIZE << order)) {
+ pr_warn("hugetlb_cma: cma area should be at least %lu MiB\n",
+ (PAGE_SIZE << order) / SZ_1M);
+ hugetlb_cma_size = 0;
+ return;
+ }
+
+ if (!node_specific_cma_alloc) {
+ /*
+ * If 3 GB area is requested on a machine with 4 numa nodes,
+ * let's allocate 1 GB on first three nodes and ignore the last one.
+ */
+ per_node = DIV_ROUND_UP(hugetlb_cma_size, nr_online_nodes);
+ pr_info("hugetlb_cma: reserve %lu MiB, up to %lu MiB per node\n",
+ hugetlb_cma_size / SZ_1M, per_node / SZ_1M);
+ }
+
+ reserved = 0;
+ for_each_online_node(nid) {
+ int res;
+ char name[CMA_MAX_NAME];
+
+ if (node_specific_cma_alloc) {
+ if (hugetlb_cma_size_in_node[nid] == 0)
+ continue;
+
+ size = hugetlb_cma_size_in_node[nid];
+ } else {
+ size = min(per_node, hugetlb_cma_size - reserved);
+ }
+
+ size = round_up(size, PAGE_SIZE << order);
+
+ snprintf(name, sizeof(name), "hugetlb%d", nid);
+ /*
+ * Note that 'order per bit' is based on smallest size that
+ * may be returned to CMA allocator in the case of
+ * huge page demotion.
+ */
+ res = cma_declare_contiguous_multi(size, PAGE_SIZE << order,
+ HUGETLB_PAGE_ORDER, name,
+ &hugetlb_cma[nid], nid);
+ if (res) {
+ pr_warn("hugetlb_cma: reservation failed: err %d, node %d",
+ res, nid);
+ continue;
+ }
+
+ reserved += size;
+ pr_info("hugetlb_cma: reserved %lu MiB on node %d\n",
+ size / SZ_1M, nid);
+
+ if (reserved >= hugetlb_cma_size)
+ break;
+ }
+
+ if (!reserved)
+ /*
+ * hugetlb_cma_size is used to determine if allocations from
+ * cma are possible. Set to zero if no cma regions are set up.
+ */
+ hugetlb_cma_size = 0;
+}
+
+void __init hugetlb_cma_check(void)
+{
+ if (!hugetlb_cma_size || cma_reserve_called)
+ return;
+
+ pr_warn("hugetlb_cma: the option isn't supported by current arch\n");
+}
+
+bool hugetlb_cma_exclusive_alloc(void)
+{
+ return hugetlb_cma_only;
+}
+
+unsigned long __init hugetlb_cma_total_size(void)
+{
+ return hugetlb_cma_size;
+}
+
+void __init hugetlb_cma_validate_params(void)
+{
+ if (!hugetlb_cma_size)
+ hugetlb_cma_only = false;
+}
+
+bool __init hugetlb_early_cma(struct hstate *h)
+{
+ if (arch_has_huge_bootmem_alloc())
+ return false;
+
+ return hstate_is_gigantic(h) && hugetlb_cma_only;
+}
diff --git a/mm/hugetlb_cma.h b/mm/hugetlb_cma.h
new file mode 100644
index 000000000000..f7d7fb9880a2
--- /dev/null
+++ b/mm/hugetlb_cma.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_HUGETLB_CMA_H
+#define _LINUX_HUGETLB_CMA_H
+
+#ifdef CONFIG_CMA
+void hugetlb_cma_free_folio(struct folio *folio);
+struct folio *hugetlb_cma_alloc_folio(struct hstate *h, gfp_t gfp_mask,
+ int nid, nodemask_t *nodemask);
+struct huge_bootmem_page *hugetlb_cma_alloc_bootmem(struct hstate *h, int *nid,
+ bool node_exact);
+void hugetlb_cma_check(void);
+bool hugetlb_cma_exclusive_alloc(void);
+unsigned long hugetlb_cma_total_size(void);
+void hugetlb_cma_validate_params(void);
+bool hugetlb_early_cma(struct hstate *h);
+#else
+static inline void hugetlb_cma_free_folio(struct folio *folio)
+{
+}
+
+static inline struct folio *hugetlb_cma_alloc_folio(struct hstate *h,
+ gfp_t gfp_mask, int nid, nodemask_t *nodemask)
+{
+ return NULL;
+}
+
+static inline
+struct huge_bootmem_page *hugetlb_cma_alloc_bootmem(struct hstate *h, int *nid,
+ bool node_exact)
+{
+ return NULL;
+}
+
+static inline void hugetlb_cma_check(void)
+{
+}
+
+static inline bool hugetlb_cma_exclusive_alloc(void)
+{
+ return false;
+}
+
+static inline unsigned long hugetlb_cma_total_size(void)
+{
+ return 0;
+}
+
+static inline void hugetlb_cma_validate_params(void)
+{
+}
+
+static inline bool hugetlb_early_cma(struct hstate *h)
+{
+ return false;
+}
+#endif
+#endif
diff --git a/mm/hugetlb_vmemmap.c b/mm/hugetlb_vmemmap.c
index 7735972add01..9a99dfa3c495 100644
--- a/mm/hugetlb_vmemmap.c
+++ b/mm/hugetlb_vmemmap.c
@@ -444,7 +444,11 @@ DEFINE_STATIC_KEY_FALSE(hugetlb_optimize_vmemmap_key);
EXPORT_SYMBOL(hugetlb_optimize_vmemmap_key);
static bool vmemmap_optimize_enabled = IS_ENABLED(CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP_DEFAULT_ON);
-core_param(hugetlb_free_vmemmap, vmemmap_optimize_enabled, bool, 0);
+static int __init hugetlb_vmemmap_optimize_param(char *buf)
+{
+ return kstrtobool(buf, &vmemmap_optimize_enabled);
+}
+early_param("hugetlb_free_vmemmap", hugetlb_vmemmap_optimize_param);
static int __hugetlb_vmemmap_restore_folio(const struct hstate *h,
struct folio *folio, unsigned long flags)
@@ -645,14 +649,39 @@ static int hugetlb_vmemmap_split_folio(const struct hstate *h, struct folio *fol
return vmemmap_remap_split(vmemmap_start, vmemmap_end, vmemmap_reuse);
}
-void hugetlb_vmemmap_optimize_folios(struct hstate *h, struct list_head *folio_list)
+static void __hugetlb_vmemmap_optimize_folios(struct hstate *h,
+ struct list_head *folio_list,
+ bool boot)
{
struct folio *folio;
+ int nr_to_optimize;
LIST_HEAD(vmemmap_pages);
unsigned long flags = VMEMMAP_REMAP_NO_TLB_FLUSH | VMEMMAP_SYNCHRONIZE_RCU;
+ nr_to_optimize = 0;
list_for_each_entry(folio, folio_list, lru) {
- int ret = hugetlb_vmemmap_split_folio(h, folio);
+ int ret;
+ unsigned long spfn, epfn;
+
+ if (boot && folio_test_hugetlb_vmemmap_optimized(folio)) {
+ /*
+ * Already optimized by pre-HVO, just map the
+ * mirrored tail page structs RO.
+ */
+ spfn = (unsigned long)&folio->page;
+ epfn = spfn + pages_per_huge_page(h);
+ vmemmap_wrprotect_hvo(spfn, epfn, folio_nid(folio),
+ HUGETLB_VMEMMAP_RESERVE_SIZE);
+ register_page_bootmem_memmap(pfn_to_section_nr(spfn),
+ &folio->page,
+ HUGETLB_VMEMMAP_RESERVE_SIZE);
+ static_branch_inc(&hugetlb_optimize_vmemmap_key);
+ continue;
+ }
+
+ nr_to_optimize++;
+
+ ret = hugetlb_vmemmap_split_folio(h, folio);
/*
* Spliting the PMD requires allocating a page, thus lets fail
@@ -664,6 +693,16 @@ void hugetlb_vmemmap_optimize_folios(struct hstate *h, struct list_head *folio_l
break;
}
+ if (!nr_to_optimize)
+ /*
+ * All pre-HVO folios, nothing left to do. It's ok if
+ * there is a mix of pre-HVO and not yet HVO-ed folios
+ * here, as __hugetlb_vmemmap_optimize_folio() will
+ * skip any folios that already have the optimized flag
+ * set, see vmemmap_should_optimize_folio().
+ */
+ goto out;
+
flush_tlb_all();
list_for_each_entry(folio, folio_list, lru) {
@@ -689,10 +728,164 @@ void hugetlb_vmemmap_optimize_folios(struct hstate *h, struct list_head *folio_l
}
}
+out:
flush_tlb_all();
free_vmemmap_page_list(&vmemmap_pages);
}
+void hugetlb_vmemmap_optimize_folios(struct hstate *h, struct list_head *folio_list)
+{
+ __hugetlb_vmemmap_optimize_folios(h, folio_list, false);
+}
+
+void hugetlb_vmemmap_optimize_bootmem_folios(struct hstate *h, struct list_head *folio_list)
+{
+ __hugetlb_vmemmap_optimize_folios(h, folio_list, true);
+}
+
+#ifdef CONFIG_SPARSEMEM_VMEMMAP_PREINIT
+
+/* Return true of a bootmem allocated HugeTLB page should be pre-HVO-ed */
+static bool vmemmap_should_optimize_bootmem_page(struct huge_bootmem_page *m)
+{
+ unsigned long section_size, psize, pmd_vmemmap_size;
+ phys_addr_t paddr;
+
+ if (!READ_ONCE(vmemmap_optimize_enabled))
+ return false;
+
+ if (!hugetlb_vmemmap_optimizable(m->hstate))
+ return false;
+
+ psize = huge_page_size(m->hstate);
+ paddr = virt_to_phys(m);
+
+ /*
+ * Pre-HVO only works if the bootmem huge page
+ * is aligned to the section size.
+ */
+ section_size = (1UL << PA_SECTION_SHIFT);
+ if (!IS_ALIGNED(paddr, section_size) ||
+ !IS_ALIGNED(psize, section_size))
+ return false;
+
+ /*
+ * The pre-HVO code does not deal with splitting PMDS,
+ * so the bootmem page must be aligned to the number
+ * of base pages that can be mapped with one vmemmap PMD.
+ */
+ pmd_vmemmap_size = (PMD_SIZE / (sizeof(struct page))) << PAGE_SHIFT;
+ if (!IS_ALIGNED(paddr, pmd_vmemmap_size) ||
+ !IS_ALIGNED(psize, pmd_vmemmap_size))
+ return false;
+
+ return true;
+}
+
+/*
+ * Initialize memmap section for a gigantic page, HVO-style.
+ */
+void __init hugetlb_vmemmap_init_early(int nid)
+{
+ unsigned long psize, paddr, section_size;
+ unsigned long ns, i, pnum, pfn, nr_pages;
+ unsigned long start, end;
+ struct huge_bootmem_page *m = NULL;
+ void *map;
+
+ /*
+ * Noting to do if bootmem pages were not allocated
+ * early in boot, or if HVO wasn't enabled in the
+ * first place.
+ */
+ if (!hugetlb_bootmem_allocated())
+ return;
+
+ if (!READ_ONCE(vmemmap_optimize_enabled))
+ return;
+
+ section_size = (1UL << PA_SECTION_SHIFT);
+
+ list_for_each_entry(m, &huge_boot_pages[nid], list) {
+ if (!vmemmap_should_optimize_bootmem_page(m))
+ continue;
+
+ nr_pages = pages_per_huge_page(m->hstate);
+ psize = nr_pages << PAGE_SHIFT;
+ paddr = virt_to_phys(m);
+ pfn = PHYS_PFN(paddr);
+ map = pfn_to_page(pfn);
+ start = (unsigned long)map;
+ end = start + nr_pages * sizeof(struct page);
+
+ if (vmemmap_populate_hvo(start, end, nid,
+ HUGETLB_VMEMMAP_RESERVE_SIZE) < 0)
+ continue;
+
+ memmap_boot_pages_add(HUGETLB_VMEMMAP_RESERVE_SIZE / PAGE_SIZE);
+
+ pnum = pfn_to_section_nr(pfn);
+ ns = psize / section_size;
+
+ for (i = 0; i < ns; i++) {
+ sparse_init_early_section(nid, map, pnum,
+ SECTION_IS_VMEMMAP_PREINIT);
+ map += section_map_size();
+ pnum++;
+ }
+
+ m->flags |= HUGE_BOOTMEM_HVO;
+ }
+}
+
+void __init hugetlb_vmemmap_init_late(int nid)
+{
+ struct huge_bootmem_page *m, *tm;
+ unsigned long phys, nr_pages, start, end;
+ unsigned long pfn, nr_mmap;
+ struct hstate *h;
+ void *map;
+
+ if (!hugetlb_bootmem_allocated())
+ return;
+
+ if (!READ_ONCE(vmemmap_optimize_enabled))
+ return;
+
+ list_for_each_entry_safe(m, tm, &huge_boot_pages[nid], list) {
+ if (!(m->flags & HUGE_BOOTMEM_HVO))
+ continue;
+
+ phys = virt_to_phys(m);
+ h = m->hstate;
+ pfn = PHYS_PFN(phys);
+ nr_pages = pages_per_huge_page(h);
+
+ if (!hugetlb_bootmem_page_zones_valid(nid, m)) {
+ /*
+ * Oops, the hugetlb page spans multiple zones.
+ * Remove it from the list, and undo HVO.
+ */
+ list_del(&m->list);
+
+ map = pfn_to_page(pfn);
+
+ start = (unsigned long)map;
+ end = start + nr_pages * sizeof(struct page);
+
+ vmemmap_undo_hvo(start, end, nid,
+ HUGETLB_VMEMMAP_RESERVE_SIZE);
+ nr_mmap = end - start - HUGETLB_VMEMMAP_RESERVE_SIZE;
+ memmap_boot_pages_add(DIV_ROUND_UP(nr_mmap, PAGE_SIZE));
+
+ memblock_phys_free(phys, huge_page_size(h));
+ continue;
+ } else
+ m->flags |= HUGE_BOOTMEM_ZONES_VALID;
+ }
+}
+#endif
+
static const struct ctl_table hugetlb_vmemmap_sysctls[] = {
{
.procname = "hugetlb_optimize_vmemmap",
diff --git a/mm/hugetlb_vmemmap.h b/mm/hugetlb_vmemmap.h
index 2fcae92d3359..18b490825215 100644
--- a/mm/hugetlb_vmemmap.h
+++ b/mm/hugetlb_vmemmap.h
@@ -9,6 +9,8 @@
#ifndef _LINUX_HUGETLB_VMEMMAP_H
#define _LINUX_HUGETLB_VMEMMAP_H
#include <linux/hugetlb.h>
+#include <linux/io.h>
+#include <linux/memblock.h>
/*
* Reserve one vmemmap page, all vmemmap addresses are mapped to it. See
@@ -24,6 +26,12 @@ long hugetlb_vmemmap_restore_folios(const struct hstate *h,
struct list_head *non_hvo_folios);
void hugetlb_vmemmap_optimize_folio(const struct hstate *h, struct folio *folio);
void hugetlb_vmemmap_optimize_folios(struct hstate *h, struct list_head *folio_list);
+void hugetlb_vmemmap_optimize_bootmem_folios(struct hstate *h, struct list_head *folio_list);
+#ifdef CONFIG_SPARSEMEM_VMEMMAP_PREINIT
+void hugetlb_vmemmap_init_early(int nid);
+void hugetlb_vmemmap_init_late(int nid);
+#endif
+
static inline unsigned int hugetlb_vmemmap_size(const struct hstate *h)
{
@@ -48,7 +56,7 @@ static inline int hugetlb_vmemmap_restore_folio(const struct hstate *h, struct f
return 0;
}
-static long hugetlb_vmemmap_restore_folios(const struct hstate *h,
+static inline long hugetlb_vmemmap_restore_folios(const struct hstate *h,
struct list_head *folio_list,
struct list_head *non_hvo_folios)
{
@@ -64,6 +72,19 @@ static inline void hugetlb_vmemmap_optimize_folios(struct hstate *h, struct list
{
}
+static inline void hugetlb_vmemmap_optimize_bootmem_folios(struct hstate *h,
+ struct list_head *folio_list)
+{
+}
+
+static inline void hugetlb_vmemmap_init_early(int nid)
+{
+}
+
+static inline void hugetlb_vmemmap_init_late(int nid)
+{
+}
+
static inline unsigned int hugetlb_vmemmap_optimizable_size(const struct hstate *h)
{
return 0;
diff --git a/mm/init-mm.c b/mm/init-mm.c
index 6af3ad675930..4600e7605cab 100644
--- a/mm/init-mm.c
+++ b/mm/init-mm.c
@@ -40,6 +40,7 @@ struct mm_struct init_mm = {
.arg_lock = __SPIN_LOCK_UNLOCKED(init_mm.arg_lock),
.mmlist = LIST_HEAD_INIT(init_mm.mmlist),
#ifdef CONFIG_PER_VMA_LOCK
+ .vma_writer_wait = __RCUWAIT_INITIALIZER(init_mm.vma_writer_wait),
.mm_lock_seq = SEQCNT_ZERO(init_mm.mm_lock_seq),
#endif
.user_ns = &init_user_ns,
diff --git a/mm/internal.h b/mm/internal.h
index 8d1bada7323a..50c2f590b2d0 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -25,6 +25,47 @@
struct folio_batch;
/*
+ * Maintains state across a page table move. The operation assumes both source
+ * and destination VMAs already exist and are specified by the user.
+ *
+ * Partial moves are permitted, but the old and new ranges must both reside
+ * within a VMA.
+ *
+ * mmap lock must be held in write and VMA write locks must be held on any VMA
+ * that is visible.
+ *
+ * Use the PAGETABLE_MOVE() macro to initialise this struct.
+ *
+ * The old_addr and new_addr fields are updated as the page table move is
+ * executed.
+ *
+ * NOTE: The page table move is affected by reading from [old_addr, old_end),
+ * and old_addr may be updated for better page table alignment, so len_in
+ * represents the length of the range being copied as specified by the user.
+ */
+struct pagetable_move_control {
+ struct vm_area_struct *old; /* Source VMA. */
+ struct vm_area_struct *new; /* Destination VMA. */
+ unsigned long old_addr; /* Address from which the move begins. */
+ unsigned long old_end; /* Exclusive address at which old range ends. */
+ unsigned long new_addr; /* Address to move page tables to. */
+ unsigned long len_in; /* Bytes to remap specified by user. */
+
+ bool need_rmap_locks; /* Do rmap locks need to be taken? */
+ bool for_stack; /* Is this an early temp stack being moved? */
+};
+
+#define PAGETABLE_MOVE(name, old_, new_, old_addr_, new_addr_, len_) \
+ struct pagetable_move_control name = { \
+ .old = old_, \
+ .new = new_, \
+ .old_addr = old_addr_, \
+ .old_end = (old_addr_) + (len_), \
+ .new_addr = new_addr_, \
+ .len_in = len_, \
+ }
+
+/*
* The set of flags that only affect watermark checking and reclaim
* behaviour. This is used by the MM to obey the caller constraints
* about IO, FS and watermark checking while ignoring placement
@@ -84,6 +125,8 @@ void page_writeback_init(void);
*/
static inline int folio_nr_pages_mapped(const struct folio *folio)
{
+ if (IS_ENABLED(CONFIG_NO_PAGE_MAPCOUNT))
+ return -1;
return atomic_read(&folio->_nr_pages_mapped) & FOLIO_PAGES_MAPPED;
}
@@ -493,6 +536,7 @@ extern char * const zone_names[MAX_NR_ZONES];
DECLARE_STATIC_KEY_MAYBE(CONFIG_DEBUG_VM, check_pages_enabled);
extern int min_free_kbytes;
+extern int defrag_mode;
void setup_per_zone_wmarks(void);
void calculate_min_free_kbytes(void);
@@ -658,6 +702,8 @@ static inline struct page *pageblock_pfn_to_page(unsigned long start_pfn,
}
void set_zone_contiguous(struct zone *zone);
+bool pfn_range_intersects_zones(int nid, unsigned long start_pfn,
+ unsigned long nr_pages);
static inline void clear_zone_contiguous(struct zone *zone)
{
@@ -682,8 +728,8 @@ static inline void folio_set_order(struct folio *folio, unsigned int order)
return;
folio->_flags_1 = (folio->_flags_1 & ~0xffUL) | order;
-#ifdef CONFIG_64BIT
- folio->_folio_nr_pages = 1U << order;
+#ifdef NR_PAGES_IN_LARGE_FOLIO
+ folio->_nr_pages = 1U << order;
#endif
}
@@ -719,9 +765,17 @@ static inline void prep_compound_head(struct page *page, unsigned int order)
folio_set_order(folio, order);
atomic_set(&folio->_large_mapcount, -1);
- atomic_set(&folio->_entire_mapcount, -1);
- atomic_set(&folio->_nr_pages_mapped, 0);
- atomic_set(&folio->_pincount, 0);
+ if (IS_ENABLED(CONFIG_PAGE_MAPCOUNT))
+ atomic_set(&folio->_nr_pages_mapped, 0);
+ if (IS_ENABLED(CONFIG_MM_ID)) {
+ folio->_mm_ids = 0;
+ folio->_mm_id_mapcount[0] = -1;
+ folio->_mm_id_mapcount[1] = -1;
+ }
+ if (IS_ENABLED(CONFIG_64BIT) || order > 1) {
+ atomic_set(&folio->_pincount, 0);
+ atomic_set(&folio->_entire_mapcount, -1);
+ }
if (order > 1)
INIT_LIST_HEAD(&folio->_deferred_list);
}
@@ -735,8 +789,6 @@ static inline void prep_compound_tail(struct page *head, int tail_idx)
set_page_private(p, 0);
}
-extern void prep_compound_page(struct page *page, unsigned int order);
-
void post_alloc_hook(struct page *page, unsigned int order, gfp_t gfp_flags);
extern bool free_pages_prepare(struct page *page, unsigned int order);
@@ -846,8 +898,24 @@ void init_cma_reserved_pageblock(struct page *page);
#endif /* CONFIG_COMPACTION || CONFIG_CMA */
+struct cma;
+
+#ifdef CONFIG_CMA
+void *cma_reserve_early(struct cma *cma, unsigned long size);
+void init_cma_pageblock(struct page *page);
+#else
+static inline void *cma_reserve_early(struct cma *cma, unsigned long size)
+{
+ return NULL;
+}
+static inline void init_cma_pageblock(struct page *page)
+{
+}
+#endif
+
+
int find_suitable_fallback(struct free_area *area, unsigned int order,
- int migratetype, bool only_stealable, bool *can_steal);
+ int migratetype, bool claim_only, bool *claim_block);
static inline bool free_area_empty(struct free_area *area, int migratetype)
{
@@ -1419,7 +1487,8 @@ static inline bool gup_must_unshare(struct vm_area_struct *vma,
}
extern bool mirrored_kernelcore;
-extern bool memblock_has_mirror(void);
+bool memblock_has_mirror(void);
+void memblock_free_all(void);
static __always_inline void vma_set_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end,
@@ -1460,6 +1529,7 @@ static inline bool pte_needs_soft_dirty_wp(struct vm_area_struct *vma, pte_t pte
void __meminit __init_single_page(struct page *page, unsigned long pfn,
unsigned long zone, int nid);
+void __meminit __init_page_from_nid(unsigned long pfn, int nid);
/* shrinker related functions */
unsigned long shrink_slab(gfp_t gfp_mask, int nid, struct mem_cgroup *memcg,
@@ -1521,10 +1591,7 @@ extern struct list_lru shadow_nodes;
} while (0)
/* mremap.c */
-unsigned long move_page_tables(struct vm_area_struct *vma,
- unsigned long old_addr, struct vm_area_struct *new_vma,
- unsigned long new_addr, unsigned long len,
- bool need_rmap_locks, bool for_stack);
+unsigned long move_page_tables(struct pagetable_move_control *pmc);
#ifdef CONFIG_UNACCEPTED_MEMORY
void accept_page(struct page *page);
diff --git a/mm/ioremap.c b/mm/ioremap.c
index 3e049dfb28bd..c36dd9f62fd5 100644
--- a/mm/ioremap.c
+++ b/mm/ioremap.c
@@ -50,9 +50,9 @@ void __iomem *generic_ioremap_prot(phys_addr_t phys_addr, size_t size,
#ifndef ioremap_prot
void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size,
- unsigned long prot)
+ pgprot_t prot)
{
- return generic_ioremap_prot(phys_addr, size, __pgprot(prot));
+ return generic_ioremap_prot(phys_addr, size, prot);
}
EXPORT_SYMBOL(ioremap_prot);
#endif
diff --git a/mm/kasan/kasan_test_c.c b/mm/kasan/kasan_test_c.c
index 59d673400085..3ea317837c2d 100644
--- a/mm/kasan/kasan_test_c.c
+++ b/mm/kasan/kasan_test_c.c
@@ -1073,14 +1073,11 @@ static void kmem_cache_rcu_uaf(struct kunit *test)
kmem_cache_destroy(cache);
}
-static void empty_cache_ctor(void *object) { }
-
static void kmem_cache_double_destroy(struct kunit *test)
{
struct kmem_cache *cache;
- /* Provide a constructor to prevent cache merging. */
- cache = kmem_cache_create("test_cache", 200, 0, 0, empty_cache_ctor);
+ cache = kmem_cache_create("test_cache", 200, 0, SLAB_NO_MERGE, NULL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
kmem_cache_destroy(cache);
KUNIT_EXPECT_KASAN_FAIL(test, kmem_cache_destroy(cache));
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 5f0be134141e..cc945c6ab3bd 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -607,7 +607,7 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
VM_BUG_ON_FOLIO(!folio_test_anon(folio), folio);
/* See hpage_collapse_scan_pmd(). */
- if (folio_likely_mapped_shared(folio)) {
+ if (folio_maybe_mapped_shared(folio)) {
++shared;
if (cc->is_khugepaged &&
shared > khugepaged_max_ptes_shared) {
@@ -1359,11 +1359,9 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm,
/*
* We treat a single page as shared if any part of the THP
- * is shared. "False negatives" from
- * folio_likely_mapped_shared() are not expected to matter
- * much in practice.
+ * is shared.
*/
- if (folio_likely_mapped_shared(folio)) {
+ if (folio_maybe_mapped_shared(folio)) {
++shared;
if (cc->is_khugepaged &&
shared > khugepaged_max_ptes_shared) {
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index c6ed68604136..c12cef3eeb32 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -352,6 +352,15 @@ static bool unreferenced_object(struct kmemleak_object *object)
jiffies_last_scan);
}
+static const char *__object_type_str(struct kmemleak_object *object)
+{
+ if (object->flags & OBJECT_PHYS)
+ return " (phys)";
+ if (object->flags & OBJECT_PERCPU)
+ return " (percpu)";
+ return "";
+}
+
/*
* Printing of the unreferenced objects information to the seq file. The
* print_unreferenced function must be called with the object->lock held.
@@ -364,8 +373,9 @@ static void print_unreferenced(struct seq_file *seq,
unsigned int nr_entries;
nr_entries = stack_depot_fetch(object->trace_handle, &entries);
- warn_or_seq_printf(seq, "unreferenced object 0x%08lx (size %zu):\n",
- object->pointer, object->size);
+ warn_or_seq_printf(seq, "unreferenced object%s 0x%08lx (size %zu):\n",
+ __object_type_str(object),
+ object->pointer, object->size);
warn_or_seq_printf(seq, " comm \"%s\", pid %d, jiffies %lu\n",
object->comm, object->pid, object->jiffies);
hex_dump_object(seq, object);
@@ -384,10 +394,10 @@ static void print_unreferenced(struct seq_file *seq,
*/
static void dump_object_info(struct kmemleak_object *object)
{
- pr_notice("Object 0x%08lx (size %zu):\n",
- object->pointer, object->size);
+ pr_notice("Object%s 0x%08lx (size %zu):\n",
+ __object_type_str(object), object->pointer, object->size);
pr_notice(" comm \"%s\", pid %d, jiffies %lu\n",
- object->comm, object->pid, object->jiffies);
+ object->comm, object->pid, object->jiffies);
pr_notice(" min_count = %d\n", object->min_count);
pr_notice(" count = %d\n", object->count);
pr_notice(" flags = 0x%x\n", object->flags);
@@ -1998,25 +2008,41 @@ static int kmemleak_open(struct inode *inode, struct file *file)
return seq_open(file, &kmemleak_seq_ops);
}
-static int dump_str_object_info(const char *str)
+static bool __dump_str_object_info(unsigned long addr, unsigned int objflags)
{
unsigned long flags;
struct kmemleak_object *object;
+
+ object = __find_and_get_object(addr, 1, objflags);
+ if (!object)
+ return false;
+
+ raw_spin_lock_irqsave(&object->lock, flags);
+ dump_object_info(object);
+ raw_spin_unlock_irqrestore(&object->lock, flags);
+
+ put_object(object);
+
+ return true;
+}
+
+static int dump_str_object_info(const char *str)
+{
unsigned long addr;
+ bool found = false;
if (kstrtoul(str, 0, &addr))
return -EINVAL;
- object = find_and_get_object(addr, 0);
- if (!object) {
+
+ found |= __dump_str_object_info(addr, 0);
+ found |= __dump_str_object_info(addr, OBJECT_PHYS);
+ found |= __dump_str_object_info(addr, OBJECT_PERCPU);
+
+ if (!found) {
pr_info("Unknown object at 0x%08lx\n", addr);
return -EINVAL;
}
- raw_spin_lock_irqsave(&object->lock, flags);
- dump_object_info(object);
- raw_spin_unlock_irqrestore(&object->lock, flags);
-
- put_object(object);
return 0;
}
diff --git a/mm/ksm.c b/mm/ksm.c
index 8be2b144fefd..8583fb91ef13 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -1270,8 +1270,15 @@ static int write_protect_page(struct vm_area_struct *vma, struct folio *folio,
if (WARN_ONCE(!pvmw.pte, "Unexpected PMD mapping?"))
goto out_unlock;
- anon_exclusive = PageAnonExclusive(&folio->page);
entry = ptep_get(pvmw.pte);
+ /*
+ * Handle PFN swap PTEs, such as device-exclusive ones, that actually
+ * map pages: give up just like the next folio_walk would.
+ */
+ if (unlikely(!pte_present(entry)))
+ goto out_unlock;
+
+ anon_exclusive = PageAnonExclusive(&folio->page);
if (pte_write(entry) || pte_dirty(entry) ||
anon_exclusive || mm_tlb_flush_pending(mm)) {
swapped = folio_test_swapcache(folio);
diff --git a/mm/list_lru.c b/mm/list_lru.c
index 7d69434c70e0..490473af3122 100644
--- a/mm/list_lru.c
+++ b/mm/list_lru.c
@@ -510,7 +510,7 @@ int memcg_list_lru_alloc(struct mem_cgroup *memcg, struct list_lru *lru,
gfp_t gfp)
{
unsigned long flags;
- struct list_lru_memcg *mlru;
+ struct list_lru_memcg *mlru = NULL;
struct mem_cgroup *pos, *parent;
XA_STATE(xas, &lru->xa, 0);
@@ -535,9 +535,11 @@ int memcg_list_lru_alloc(struct mem_cgroup *memcg, struct list_lru *lru,
parent = parent_mem_cgroup(pos);
}
- mlru = memcg_init_list_lru_one(lru, gfp);
- if (!mlru)
- return -ENOMEM;
+ if (!mlru) {
+ mlru = memcg_init_list_lru_one(lru, gfp);
+ if (!mlru)
+ return -ENOMEM;
+ }
xas_set(&xas, pos->kmemcg_id);
do {
xas_lock_irqsave(&xas, flags);
@@ -548,10 +550,11 @@ int memcg_list_lru_alloc(struct mem_cgroup *memcg, struct list_lru *lru,
}
xas_unlock_irqrestore(&xas, flags);
} while (xas_nomem(&xas, gfp));
- if (mlru)
- kfree(mlru);
} while (pos != memcg && !css_is_dying(&pos->css));
+ if (unlikely(mlru))
+ kfree(mlru);
+
return xas_error(&xas);
}
#else
diff --git a/mm/madvise.c b/mm/madvise.c
index 08b207f8e61e..b17f684322ad 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -387,7 +387,7 @@ static int madvise_cold_or_pageout_pte_range(pmd_t *pmd,
folio = pmd_folio(orig_pmd);
/* Do not interfere with other mappings of this folio */
- if (folio_likely_mapped_shared(folio))
+ if (folio_maybe_mapped_shared(folio))
goto huge_unlock;
if (pageout_anon_only_filter && !folio_test_anon(folio))
@@ -486,7 +486,7 @@ restart:
if (nr < folio_nr_pages(folio)) {
int err;
- if (folio_likely_mapped_shared(folio))
+ if (folio_maybe_mapped_shared(folio))
continue;
if (pageout_anon_only_filter && !folio_test_anon(folio))
continue;
@@ -721,7 +721,7 @@ static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr,
if (nr < folio_nr_pages(folio)) {
int err;
- if (folio_likely_mapped_shared(folio))
+ if (folio_maybe_mapped_shared(folio))
continue;
if (!folio_trylock(folio))
continue;
@@ -1051,13 +1051,7 @@ static bool is_valid_guard_vma(struct vm_area_struct *vma, bool allow_locked)
if (!allow_locked)
disallowed |= VM_LOCKED;
- if (!vma_is_anonymous(vma))
- return false;
-
- if ((vma->vm_flags & (VM_MAYWRITE | disallowed)) != VM_MAYWRITE)
- return false;
-
- return true;
+ return !(vma->vm_flags & disallowed);
}
static bool is_guard_pte_marker(pte_t ptent)
@@ -1398,7 +1392,32 @@ static int madvise_inject_error(int behavior,
return 0;
}
-#endif
+
+static bool is_memory_failure(int behavior)
+{
+ switch (behavior) {
+ case MADV_HWPOISON:
+ case MADV_SOFT_OFFLINE:
+ return true;
+ default:
+ return false;
+ }
+}
+
+#else
+
+static int madvise_inject_error(int behavior,
+ unsigned long start, unsigned long end)
+{
+ return 0;
+}
+
+static bool is_memory_failure(int behavior)
+{
+ return false;
+}
+
+#endif /* CONFIG_MEMORY_FAILURE */
static bool
madvise_behavior_valid(int behavior)
@@ -1574,6 +1593,111 @@ int madvise_set_anon_name(struct mm_struct *mm, unsigned long start,
madvise_vma_anon_name);
}
#endif /* CONFIG_ANON_VMA_NAME */
+
+static int madvise_lock(struct mm_struct *mm, int behavior)
+{
+ if (is_memory_failure(behavior))
+ return 0;
+
+ if (madvise_need_mmap_write(behavior)) {
+ if (mmap_write_lock_killable(mm))
+ return -EINTR;
+ } else {
+ mmap_read_lock(mm);
+ }
+ return 0;
+}
+
+static void madvise_unlock(struct mm_struct *mm, int behavior)
+{
+ if (is_memory_failure(behavior))
+ return;
+
+ if (madvise_need_mmap_write(behavior))
+ mmap_write_unlock(mm);
+ else
+ mmap_read_unlock(mm);
+}
+
+static bool is_valid_madvise(unsigned long start, size_t len_in, int behavior)
+{
+ size_t len;
+
+ if (!madvise_behavior_valid(behavior))
+ return false;
+
+ if (!PAGE_ALIGNED(start))
+ return false;
+ len = PAGE_ALIGN(len_in);
+
+ /* Check to see whether len was rounded up from small -ve to zero */
+ if (len_in && !len)
+ return false;
+
+ if (start + len < start)
+ return false;
+
+ return true;
+}
+
+/*
+ * madvise_should_skip() - Return if the request is invalid or nothing.
+ * @start: Start address of madvise-requested address range.
+ * @len_in: Length of madvise-requested address range.
+ * @behavior: Requested madvise behavor.
+ * @err: Pointer to store an error code from the check.
+ *
+ * If the specified behaviour is invalid or nothing would occur, we skip the
+ * operation. This function returns true in the cases, otherwise false. In
+ * the former case we store an error on @err.
+ */
+static bool madvise_should_skip(unsigned long start, size_t len_in,
+ int behavior, int *err)
+{
+ if (!is_valid_madvise(start, len_in, behavior)) {
+ *err = -EINVAL;
+ return true;
+ }
+ if (start + PAGE_ALIGN(len_in) == start) {
+ *err = 0;
+ return true;
+ }
+ return false;
+}
+
+static bool is_madvise_populate(int behavior)
+{
+ switch (behavior) {
+ case MADV_POPULATE_READ:
+ case MADV_POPULATE_WRITE:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static int madvise_do_behavior(struct mm_struct *mm,
+ unsigned long start, size_t len_in, int behavior)
+{
+ struct blk_plug plug;
+ unsigned long end;
+ int error;
+
+ if (is_memory_failure(behavior))
+ return madvise_inject_error(behavior, start, start + len_in);
+ start = untagged_addr_remote(mm, start);
+ end = start + PAGE_ALIGN(len_in);
+
+ blk_start_plug(&plug);
+ if (is_madvise_populate(behavior))
+ error = madvise_populate(mm, start, end, behavior);
+ else
+ error = madvise_walk_vmas(mm, start, end, behavior,
+ madvise_vma_behavior);
+ blk_finish_plug(&plug);
+ return error;
+}
+
/*
* The madvise(2) system call.
*
@@ -1648,63 +1772,15 @@ int madvise_set_anon_name(struct mm_struct *mm, unsigned long start,
*/
int do_madvise(struct mm_struct *mm, unsigned long start, size_t len_in, int behavior)
{
- unsigned long end;
int error;
- int write;
- size_t len;
- struct blk_plug plug;
-
- if (!madvise_behavior_valid(behavior))
- return -EINVAL;
-
- if (!PAGE_ALIGNED(start))
- return -EINVAL;
- len = PAGE_ALIGN(len_in);
- /* Check to see whether len was rounded up from small -ve to zero */
- if (len_in && !len)
- return -EINVAL;
-
- end = start + len;
- if (end < start)
- return -EINVAL;
-
- if (end == start)
- return 0;
-
-#ifdef CONFIG_MEMORY_FAILURE
- if (behavior == MADV_HWPOISON || behavior == MADV_SOFT_OFFLINE)
- return madvise_inject_error(behavior, start, start + len_in);
-#endif
-
- write = madvise_need_mmap_write(behavior);
- if (write) {
- if (mmap_write_lock_killable(mm))
- return -EINTR;
- } else {
- mmap_read_lock(mm);
- }
-
- start = untagged_addr_remote(mm, start);
- end = start + len;
-
- blk_start_plug(&plug);
- switch (behavior) {
- case MADV_POPULATE_READ:
- case MADV_POPULATE_WRITE:
- error = madvise_populate(mm, start, end, behavior);
- break;
- default:
- error = madvise_walk_vmas(mm, start, end, behavior,
- madvise_vma_behavior);
- break;
- }
- blk_finish_plug(&plug);
-
- if (write)
- mmap_write_unlock(mm);
- else
- mmap_read_unlock(mm);
+ if (madvise_should_skip(start, len_in, behavior, &error))
+ return error;
+ error = madvise_lock(mm, behavior);
+ if (error)
+ return error;
+ error = madvise_do_behavior(mm, start, len_in, behavior);
+ madvise_unlock(mm, behavior);
return error;
}
@@ -1723,16 +1799,26 @@ static ssize_t vector_madvise(struct mm_struct *mm, struct iov_iter *iter,
total_len = iov_iter_count(iter);
+ ret = madvise_lock(mm, behavior);
+ if (ret)
+ return ret;
+
while (iov_iter_count(iter)) {
- ret = do_madvise(mm, (unsigned long)iter_iov_addr(iter),
- iter_iov_len(iter), behavior);
+ unsigned long start = (unsigned long)iter_iov_addr(iter);
+ size_t len_in = iter_iov_len(iter);
+ int error;
+
+ if (madvise_should_skip(start, len_in, behavior, &error))
+ ret = error;
+ else
+ ret = madvise_do_behavior(mm, start, len_in, behavior);
/*
* An madvise operation is attempting to restart the syscall,
* but we cannot proceed as it would not be correct to repeat
* the operation in aggregate, and would be surprising to the
* user.
*
- * As we have already dropped locks, it is safe to just loop and
+ * We drop and reacquire locks so it is safe to just loop and
* try again. We check for fatal signals in case we need exit
* early anyway.
*/
@@ -1741,12 +1827,17 @@ static ssize_t vector_madvise(struct mm_struct *mm, struct iov_iter *iter,
ret = -EINTR;
break;
}
+
+ /* Drop and reacquire lock to unwind race. */
+ madvise_unlock(mm, behavior);
+ madvise_lock(mm, behavior);
continue;
}
if (ret < 0)
break;
iov_iter_advance(iter, iter_iov_len(iter));
}
+ madvise_unlock(mm, behavior);
ret = (total_len - iov_iter_count(iter)) ? : ret;
diff --git a/mm/memblock.c b/mm/memblock.c
index 8cd95f60015d..0a53db4d9f7b 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -2165,8 +2165,10 @@ static unsigned long __init __free_memory_core(phys_addr_t start,
phys_addr_t end)
{
unsigned long start_pfn = PFN_UP(start);
- unsigned long end_pfn = min_t(unsigned long,
- PFN_DOWN(end), max_low_pfn);
+ unsigned long end_pfn = PFN_DOWN(end);
+
+ if (!IS_ENABLED(CONFIG_HIGHMEM) && end_pfn > max_low_pfn)
+ end_pfn = max_low_pfn;
if (start_pfn >= end_pfn)
return 0;
diff --git a/mm/memcontrol-v1.c b/mm/memcontrol-v1.c
index 2e9fa431bbf5..8660908850dc 100644
--- a/mm/memcontrol-v1.c
+++ b/mm/memcontrol-v1.c
@@ -490,6 +490,19 @@ static void mem_cgroup_threshold(struct mem_cgroup *memcg)
}
/* Cgroup1: threshold notifications & softlimit tree updates */
+
+/*
+ * Per memcg event counter is incremented at every pagein/pageout. With THP,
+ * it will be incremented by the number of pages. This counter is used
+ * to trigger some periodic events. This is straightforward and better
+ * than using jiffies etc. to handle periodic memcg event.
+ */
+enum mem_cgroup_events_target {
+ MEM_CGROUP_TARGET_THRESH,
+ MEM_CGROUP_TARGET_SOFTLIMIT,
+ MEM_CGROUP_NTARGETS,
+};
+
struct memcg1_events_percpu {
unsigned long nr_page_events;
unsigned long targets[MEM_CGROUP_NTARGETS];
@@ -568,8 +581,59 @@ void memcg1_commit_charge(struct folio *folio, struct mem_cgroup *memcg)
local_irq_restore(flags);
}
-void memcg1_swapout(struct folio *folio, struct mem_cgroup *memcg)
+/**
+ * memcg1_swapout - transfer a memsw charge to swap
+ * @folio: folio whose memsw charge to transfer
+ * @entry: swap entry to move the charge to
+ *
+ * Transfer the memsw charge of @folio to @entry.
+ */
+void memcg1_swapout(struct folio *folio, swp_entry_t entry)
{
+ struct mem_cgroup *memcg, *swap_memcg;
+ unsigned int nr_entries;
+
+ VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
+ VM_BUG_ON_FOLIO(folio_ref_count(folio), folio);
+
+ if (mem_cgroup_disabled())
+ return;
+
+ if (!do_memsw_account())
+ return;
+
+ memcg = folio_memcg(folio);
+
+ VM_WARN_ON_ONCE_FOLIO(!memcg, folio);
+ if (!memcg)
+ return;
+
+ /*
+ * In case the memcg owning these pages has been offlined and doesn't
+ * have an ID allocated to it anymore, charge the closest online
+ * ancestor for the swap instead and transfer the memory+swap charge.
+ */
+ swap_memcg = mem_cgroup_id_get_online(memcg);
+ nr_entries = folio_nr_pages(folio);
+ /* Get references for the tail pages, too */
+ if (nr_entries > 1)
+ mem_cgroup_id_get_many(swap_memcg, nr_entries - 1);
+ mod_memcg_state(swap_memcg, MEMCG_SWAP, nr_entries);
+
+ swap_cgroup_record(folio, mem_cgroup_id(memcg), entry);
+
+ folio_unqueue_deferred_split(folio);
+ folio->memcg_data = 0;
+
+ if (!mem_cgroup_is_root(memcg))
+ page_counter_uncharge(&memcg->memory, nr_entries);
+
+ if (memcg != swap_memcg) {
+ if (!mem_cgroup_is_root(swap_memcg))
+ page_counter_charge(&swap_memcg->memsw, nr_entries);
+ page_counter_uncharge(&memcg->memsw, nr_entries);
+ }
+
/*
* Interrupts should be disabled here because the caller holds the
* i_pages lock which is taken with interrupts-off. It is
@@ -581,6 +645,42 @@ void memcg1_swapout(struct folio *folio, struct mem_cgroup *memcg)
memcg1_charge_statistics(memcg, -folio_nr_pages(folio));
preempt_enable_nested();
memcg1_check_events(memcg, folio_nid(folio));
+
+ css_put(&memcg->css);
+}
+
+/*
+ * memcg1_swapin - uncharge swap slot
+ * @entry: the first swap entry for which the pages are charged
+ * @nr_pages: number of pages which will be uncharged
+ *
+ * Call this function after successfully adding the charged page to swapcache.
+ *
+ * Note: This function assumes the page for which swap slot is being uncharged
+ * is order 0 page.
+ */
+void memcg1_swapin(swp_entry_t entry, unsigned int nr_pages)
+{
+ /*
+ * Cgroup1's unified memory+swap counter has been charged with the
+ * new swapcache page, finish the transfer by uncharging the swap
+ * slot. The swap slot would also get uncharged when it dies, but
+ * it can stick around indefinitely and we'd count the page twice
+ * the entire time.
+ *
+ * Cgroup2 has separate resource counters for memory and swap,
+ * so this is a non-issue here. Memory and swap charge lifetimes
+ * correspond 1:1 to page and swap slot lifetimes: we charge the
+ * page to memory here, and uncharge swap when the slot is freed.
+ */
+ if (do_memsw_account()) {
+ /*
+ * The swap entry might not get freed for a long time,
+ * let's not wait for it. The page already received a
+ * memory+swap charge, drop the swap entry duplicate.
+ */
+ mem_cgroup_uncharge_swap(entry, nr_pages);
+ }
}
void memcg1_uncharge_batch(struct mem_cgroup *memcg, unsigned long pgpgout,
diff --git a/mm/memcontrol-v1.h b/mm/memcontrol-v1.h
index 144d71b65907..6358464bb416 100644
--- a/mm/memcontrol-v1.h
+++ b/mm/memcontrol-v1.h
@@ -7,21 +7,6 @@
/* Cgroup v1 and v2 common declarations */
-int try_charge_memcg(struct mem_cgroup *memcg, gfp_t gfp_mask,
- unsigned int nr_pages);
-
-static inline int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
- unsigned int nr_pages)
-{
- if (mem_cgroup_is_root(memcg))
- return 0;
-
- return try_charge_memcg(memcg, gfp_mask, nr_pages);
-}
-
-void mem_cgroup_id_get_many(struct mem_cgroup *memcg, unsigned int n);
-void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n);
-
/*
* Iteration constructs for visiting all cgroups (under a tree). If
* loops are exited prematurely (break), mem_cgroup_iter_break() must
@@ -37,38 +22,29 @@ void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n);
iter != NULL; \
iter = mem_cgroup_iter(NULL, iter, NULL))
-/* Whether legacy memory+swap accounting is active */
-static inline bool do_memsw_account(void)
-{
- return !cgroup_subsys_on_dfl(memory_cgrp_subsys);
-}
-
-/*
- * Per memcg event counter is incremented at every pagein/pageout. With THP,
- * it will be incremented by the number of pages. This counter is used
- * to trigger some periodic events. This is straightforward and better
- * than using jiffies etc. to handle periodic memcg event.
- */
-enum mem_cgroup_events_target {
- MEM_CGROUP_TARGET_THRESH,
- MEM_CGROUP_TARGET_SOFTLIMIT,
- MEM_CGROUP_NTARGETS,
-};
-
unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap);
void drain_all_stock(struct mem_cgroup *root_memcg);
unsigned long memcg_events(struct mem_cgroup *memcg, int event);
-unsigned long memcg_events_local(struct mem_cgroup *memcg, int event);
-unsigned long memcg_page_state_local(struct mem_cgroup *memcg, int idx);
unsigned long memcg_page_state_output(struct mem_cgroup *memcg, int item);
-unsigned long memcg_page_state_local_output(struct mem_cgroup *memcg, int item);
int memory_stat_show(struct seq_file *m, void *v);
+void mem_cgroup_id_get_many(struct mem_cgroup *memcg, unsigned int n);
+struct mem_cgroup *mem_cgroup_id_get_online(struct mem_cgroup *memcg);
+
/* Cgroup v1-specific declarations */
#ifdef CONFIG_MEMCG_V1
+/* Whether legacy memory+swap accounting is active */
+static inline bool do_memsw_account(void)
+{
+ return !cgroup_subsys_on_dfl(memory_cgrp_subsys);
+}
+
+unsigned long memcg_events_local(struct mem_cgroup *memcg, int event);
+unsigned long memcg_page_state_local(struct mem_cgroup *memcg, int idx);
+unsigned long memcg_page_state_local_output(struct mem_cgroup *memcg, int item);
bool memcg1_alloc_events(struct mem_cgroup *memcg);
void memcg1_free_events(struct mem_cgroup *memcg);
@@ -96,7 +72,6 @@ void memcg1_oom_finish(struct mem_cgroup *memcg, bool locked);
void memcg1_oom_recover(struct mem_cgroup *memcg);
void memcg1_commit_charge(struct folio *folio, struct mem_cgroup *memcg);
-void memcg1_swapout(struct folio *folio, struct mem_cgroup *memcg);
void memcg1_uncharge_batch(struct mem_cgroup *memcg, unsigned long pgpgout,
unsigned long nr_memory, int nid);
@@ -119,6 +94,7 @@ extern struct cftype mem_cgroup_legacy_files[];
#else /* CONFIG_MEMCG_V1 */
+static inline bool do_memsw_account(void) { return false; }
static inline bool memcg1_alloc_events(struct mem_cgroup *memcg) { return true; }
static inline void memcg1_free_events(struct mem_cgroup *memcg) {}
@@ -134,8 +110,6 @@ static inline void memcg1_oom_recover(struct mem_cgroup *memcg) {}
static inline void memcg1_commit_charge(struct folio *folio,
struct mem_cgroup *memcg) {}
-static inline void memcg1_swapout(struct folio *folio, struct mem_cgroup *memcg) {}
-
static inline void memcg1_uncharge_batch(struct mem_cgroup *memcg,
unsigned long pgpgout,
unsigned long nr_memory, int nid) {}
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 83c2df73e4b6..421740f1bcdc 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -315,6 +315,7 @@ static const unsigned int memcg_node_stat_items[] = {
PGDEMOTE_KSWAPD,
PGDEMOTE_DIRECT,
PGDEMOTE_KHUGEPAGED,
+ PGDEMOTE_PROACTIVE,
#ifdef CONFIG_HUGETLB_PAGE
NR_HUGETLB,
#endif
@@ -431,9 +432,11 @@ static const unsigned int memcg_vm_event_stat[] = {
PGSCAN_KSWAPD,
PGSCAN_DIRECT,
PGSCAN_KHUGEPAGED,
+ PGSCAN_PROACTIVE,
PGSTEAL_KSWAPD,
PGSTEAL_DIRECT,
PGSTEAL_KHUGEPAGED,
+ PGSTEAL_PROACTIVE,
PGFAULT,
PGMAJFAULT,
PGREFILL,
@@ -706,6 +709,7 @@ void __mod_memcg_state(struct mem_cgroup *memcg, enum memcg_stat_item idx,
trace_mod_memcg_state(memcg, idx, val);
}
+#ifdef CONFIG_MEMCG_V1
/* idx can be of type enum memcg_stat_item or node_stat_item. */
unsigned long memcg_page_state_local(struct mem_cgroup *memcg, int idx)
{
@@ -722,6 +726,7 @@ unsigned long memcg_page_state_local(struct mem_cgroup *memcg, int idx)
#endif
return x;
}
+#endif
static void __mod_memcg_lruvec_state(struct lruvec *lruvec,
enum node_stat_item idx,
@@ -869,6 +874,7 @@ unsigned long memcg_events(struct mem_cgroup *memcg, int event)
return READ_ONCE(memcg->vmstats->events[i]);
}
+#ifdef CONFIG_MEMCG_V1
unsigned long memcg_events_local(struct mem_cgroup *memcg, int event)
{
int i = memcg_events_index(event);
@@ -878,6 +884,7 @@ unsigned long memcg_events_local(struct mem_cgroup *memcg, int event)
return READ_ONCE(memcg->vmstats->events_local[i]);
}
+#endif
struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
{
@@ -1390,6 +1397,7 @@ static const struct memory_stat memory_stats[] = {
{ "pgdemote_kswapd", PGDEMOTE_KSWAPD },
{ "pgdemote_direct", PGDEMOTE_DIRECT },
{ "pgdemote_khugepaged", PGDEMOTE_KHUGEPAGED },
+ { "pgdemote_proactive", PGDEMOTE_PROACTIVE },
#ifdef CONFIG_NUMA_BALANCING
{ "pgpromote_success", PGPROMOTE_SUCCESS },
#endif
@@ -1432,6 +1440,7 @@ static int memcg_page_state_output_unit(int item)
case PGDEMOTE_KSWAPD:
case PGDEMOTE_DIRECT:
case PGDEMOTE_KHUGEPAGED:
+ case PGDEMOTE_PROACTIVE:
#ifdef CONFIG_NUMA_BALANCING
case PGPROMOTE_SUCCESS:
#endif
@@ -1447,11 +1456,13 @@ unsigned long memcg_page_state_output(struct mem_cgroup *memcg, int item)
memcg_page_state_output_unit(item);
}
+#ifdef CONFIG_MEMCG_V1
unsigned long memcg_page_state_local_output(struct mem_cgroup *memcg, int item)
{
return memcg_page_state_local(memcg, item) *
memcg_page_state_output_unit(item);
}
+#endif
#ifdef CONFIG_HUGETLB_PAGE
static bool memcg_accounts_hugetlb(void)
@@ -1503,10 +1514,12 @@ static void memcg_stat_format(struct mem_cgroup *memcg, struct seq_buf *s)
seq_buf_printf(s, "pgscan %lu\n",
memcg_events(memcg, PGSCAN_KSWAPD) +
memcg_events(memcg, PGSCAN_DIRECT) +
+ memcg_events(memcg, PGSCAN_PROACTIVE) +
memcg_events(memcg, PGSCAN_KHUGEPAGED));
seq_buf_printf(s, "pgsteal %lu\n",
memcg_events(memcg, PGSTEAL_KSWAPD) +
memcg_events(memcg, PGSTEAL_DIRECT) +
+ memcg_events(memcg, PGSTEAL_PROACTIVE) +
memcg_events(memcg, PGSTEAL_KHUGEPAGED));
for (i = 0; i < ARRAY_SIZE(memcg_vm_event_stat); i++) {
@@ -1566,16 +1579,23 @@ void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg)
/* Use static buffer, for the caller is holding oom_lock. */
static char buf[SEQ_BUF_SIZE];
struct seq_buf s;
+ unsigned long memory_failcnt;
lockdep_assert_held(&oom_lock);
+ if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
+ memory_failcnt = atomic_long_read(&memcg->memory_events[MEMCG_MAX]);
+ else
+ memory_failcnt = memcg->memory.failcnt;
+
pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n",
K((u64)page_counter_read(&memcg->memory)),
- K((u64)READ_ONCE(memcg->memory.max)), memcg->memory.failcnt);
+ K((u64)READ_ONCE(memcg->memory.max)), memory_failcnt);
if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
pr_info("swap: usage %llukB, limit %llukB, failcnt %lu\n",
K((u64)page_counter_read(&memcg->swap)),
- K((u64)READ_ONCE(memcg->swap.max)), memcg->swap.failcnt);
+ K((u64)READ_ONCE(memcg->swap.max)),
+ atomic_long_read(&memcg->memory_events[MEMCG_SWAP_MAX]));
#ifdef CONFIG_MEMCG_V1
else {
pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %lu\n",
@@ -2224,8 +2244,8 @@ out:
css_put(&memcg->css);
}
-int try_charge_memcg(struct mem_cgroup *memcg, gfp_t gfp_mask,
- unsigned int nr_pages)
+static int try_charge_memcg(struct mem_cgroup *memcg, gfp_t gfp_mask,
+ unsigned int nr_pages)
{
unsigned int batch = max(MEMCG_CHARGE_BATCH, nr_pages);
int nr_retries = MAX_RECLAIM_RETRIES;
@@ -2418,6 +2438,15 @@ done_restock:
return 0;
}
+static inline int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
+ unsigned int nr_pages)
+{
+ if (mem_cgroup_is_root(memcg))
+ return 0;
+
+ return try_charge_memcg(memcg, gfp_mask, nr_pages);
+}
+
static void commit_charge(struct folio *folio, struct mem_cgroup *memcg)
{
VM_BUG_ON_FOLIO(folio_memcg_charged(folio), folio);
@@ -2642,7 +2671,8 @@ static void obj_cgroup_uncharge_pages(struct obj_cgroup *objcg,
mod_memcg_state(memcg, MEMCG_KMEM, -nr_pages);
memcg1_account_kmem(memcg, -nr_pages);
- refill_stock(memcg, nr_pages);
+ if (!mem_cgroup_is_root(memcg))
+ refill_stock(memcg, nr_pages);
css_put(&memcg->css);
}
@@ -2675,6 +2705,23 @@ out:
return ret;
}
+static struct obj_cgroup *page_objcg(const struct page *page)
+{
+ unsigned long memcg_data = page->memcg_data;
+
+ if (mem_cgroup_disabled() || !memcg_data)
+ return NULL;
+
+ VM_BUG_ON_PAGE((memcg_data & OBJEXTS_FLAGS_MASK) != MEMCG_DATA_KMEM,
+ page);
+ return (struct obj_cgroup *)(memcg_data - MEMCG_DATA_KMEM);
+}
+
+static void page_set_objcg(struct page *page, const struct obj_cgroup *objcg)
+{
+ page->memcg_data = (unsigned long)objcg | MEMCG_DATA_KMEM;
+}
+
/**
* __memcg_kmem_charge_page: charge a kmem page to the current memory cgroup
* @page: page to charge
@@ -2693,8 +2740,7 @@ int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order)
ret = obj_cgroup_charge_pages(objcg, gfp, 1 << order);
if (!ret) {
obj_cgroup_get(objcg);
- page->memcg_data = (unsigned long)objcg |
- MEMCG_DATA_KMEM;
+ page_set_objcg(page, objcg);
return 0;
}
}
@@ -2708,19 +2754,31 @@ int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order)
*/
void __memcg_kmem_uncharge_page(struct page *page, int order)
{
- struct folio *folio = page_folio(page);
- struct obj_cgroup *objcg;
+ struct obj_cgroup *objcg = page_objcg(page);
unsigned int nr_pages = 1 << order;
- if (!folio_memcg_kmem(folio))
+ if (!objcg)
return;
- objcg = __folio_objcg(folio);
obj_cgroup_uncharge_pages(objcg, nr_pages);
- folio->memcg_data = 0;
+ page->memcg_data = 0;
obj_cgroup_put(objcg);
}
+/* Replace the stock objcg with objcg, return the old objcg */
+static struct obj_cgroup *replace_stock_objcg(struct memcg_stock_pcp *stock,
+ struct obj_cgroup *objcg)
+{
+ struct obj_cgroup *old = NULL;
+
+ old = drain_obj_stock(stock);
+ obj_cgroup_get(objcg);
+ stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes)
+ ? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0;
+ WRITE_ONCE(stock->cached_objcg, objcg);
+ return old;
+}
+
static void mod_objcg_state(struct obj_cgroup *objcg, struct pglist_data *pgdat,
enum node_stat_item idx, int nr)
{
@@ -2738,11 +2796,7 @@ static void mod_objcg_state(struct obj_cgroup *objcg, struct pglist_data *pgdat,
* changes.
*/
if (READ_ONCE(stock->cached_objcg) != objcg) {
- old = drain_obj_stock(stock);
- obj_cgroup_get(objcg);
- stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes)
- ? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0;
- WRITE_ONCE(stock->cached_objcg, objcg);
+ old = replace_stock_objcg(stock, objcg);
stock->cached_pgdat = pgdat;
} else if (stock->cached_pgdat != pgdat) {
/* Flush the existing cached vmstat data */
@@ -2896,11 +2950,7 @@ static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes,
stock = this_cpu_ptr(&memcg_stock);
if (READ_ONCE(stock->cached_objcg) != objcg) { /* reset if necessary */
- old = drain_obj_stock(stock);
- obj_cgroup_get(objcg);
- WRITE_ONCE(stock->cached_objcg, objcg);
- stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes)
- ? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0;
+ old = replace_stock_objcg(stock, objcg);
allow_uncharge = true; /* Allow uncharge when objcg changes */
}
stock->nr_bytes += nr_bytes;
@@ -3058,25 +3108,33 @@ void __memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab,
}
/*
- * Because folio_memcg(head) is not set on tails, set it now.
+ * The objcg is only set on the first page, so transfer it to all the
+ * other pages.
*/
-void split_page_memcg(struct page *head, int old_order, int new_order)
+void split_page_memcg(struct page *page, unsigned order)
{
- struct folio *folio = page_folio(head);
- int i;
- unsigned int old_nr = 1 << old_order;
- unsigned int new_nr = 1 << new_order;
+ struct obj_cgroup *objcg = page_objcg(page);
+ unsigned int i, nr = 1 << order;
- if (mem_cgroup_disabled() || !folio_memcg_charged(folio))
+ if (!objcg)
return;
- for (i = new_nr; i < old_nr; i += new_nr)
- folio_page(folio, i)->memcg_data = folio->memcg_data;
+ for (i = 1; i < nr; i++)
+ page_set_objcg(&page[i], objcg);
- if (folio_memcg_kmem(folio))
- obj_cgroup_get_many(__folio_objcg(folio), old_nr / new_nr - 1);
- else
- css_get_many(&folio_memcg(folio)->css, old_nr / new_nr - 1);
+ obj_cgroup_get_many(objcg, nr - 1);
+}
+
+void folio_split_memcg_refs(struct folio *folio, unsigned old_order,
+ unsigned new_order)
+{
+ unsigned new_refs;
+
+ if (mem_cgroup_disabled() || !folio_memcg_charged(folio))
+ return;
+
+ new_refs = (1 << (old_order - new_order)) - 1;
+ css_get_many(&__folio_memcg(folio)->css, new_refs);
}
unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
@@ -3404,7 +3462,7 @@ void __maybe_unused mem_cgroup_id_get_many(struct mem_cgroup *memcg,
refcount_add(n, &memcg->id.ref);
}
-void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n)
+static void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n)
{
if (refcount_sub_and_test(n, &memcg->id.ref)) {
mem_cgroup_id_remove(memcg);
@@ -3419,6 +3477,24 @@ static inline void mem_cgroup_id_put(struct mem_cgroup *memcg)
mem_cgroup_id_put_many(memcg, 1);
}
+struct mem_cgroup *mem_cgroup_id_get_online(struct mem_cgroup *memcg)
+{
+ while (!refcount_inc_not_zero(&memcg->id.ref)) {
+ /*
+ * The root cgroup cannot be destroyed, so it's refcount must
+ * always be >= 1.
+ */
+ if (WARN_ON_ONCE(mem_cgroup_is_root(memcg))) {
+ VM_BUG_ON(1);
+ break;
+ }
+ memcg = parent_mem_cgroup(memcg);
+ if (!memcg)
+ memcg = root_mem_cgroup;
+ }
+ return memcg;
+}
+
/**
* mem_cgroup_from_id - look up a memcg from a memcg id
* @id: the memcg id to look up
@@ -3454,6 +3530,16 @@ struct mem_cgroup *mem_cgroup_get_from_ino(unsigned long ino)
}
#endif
+static void free_mem_cgroup_per_node_info(struct mem_cgroup_per_node *pn)
+{
+ if (!pn)
+ return;
+
+ free_percpu(pn->lruvec_stats_percpu);
+ kfree(pn->lruvec_stats);
+ kfree(pn);
+}
+
static bool alloc_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
{
struct mem_cgroup_per_node *pn;
@@ -3478,23 +3564,10 @@ static bool alloc_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
memcg->nodeinfo[node] = pn;
return true;
fail:
- kfree(pn->lruvec_stats);
- kfree(pn);
+ free_mem_cgroup_per_node_info(pn);
return false;
}
-static void free_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
-{
- struct mem_cgroup_per_node *pn = memcg->nodeinfo[node];
-
- if (!pn)
- return;
-
- free_percpu(pn->lruvec_stats_percpu);
- kfree(pn->lruvec_stats);
- kfree(pn);
-}
-
static void __mem_cgroup_free(struct mem_cgroup *memcg)
{
int node;
@@ -3502,7 +3575,7 @@ static void __mem_cgroup_free(struct mem_cgroup *memcg)
obj_cgroup_put(memcg->orig_objcg);
for_each_node(node)
- free_mem_cgroup_per_node_info(memcg, node);
+ free_mem_cgroup_per_node_info(memcg->nodeinfo[node]);
memcg1_free_events(memcg);
kfree(memcg->vmstats);
free_percpu(memcg->vmstats_percpu);
@@ -3595,6 +3668,7 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
{
struct mem_cgroup *parent = mem_cgroup_from_css(parent_css);
struct mem_cgroup *memcg, *old_memcg;
+ bool memcg_on_dfl = cgroup_subsys_on_dfl(memory_cgrp_subsys);
old_memcg = set_active_memcg(parent);
memcg = mem_cgroup_alloc(parent);
@@ -3612,9 +3686,10 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
if (parent) {
WRITE_ONCE(memcg->swappiness, mem_cgroup_swappiness(parent));
- page_counter_init(&memcg->memory, &parent->memory, true);
+ page_counter_init(&memcg->memory, &parent->memory, memcg_on_dfl);
page_counter_init(&memcg->swap, &parent->swap, false);
#ifdef CONFIG_MEMCG_V1
+ memcg->memory.track_failcnt = !memcg_on_dfl;
WRITE_ONCE(memcg->oom_kill_disable, READ_ONCE(parent->oom_kill_disable));
page_counter_init(&memcg->kmem, &parent->kmem, false);
page_counter_init(&memcg->tcpmem, &parent->tcpmem, false);
@@ -3632,7 +3707,7 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
return &memcg->css;
}
- if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
+ if (memcg_on_dfl && !cgroup_memory_nosocket)
static_branch_inc(&memcg_sockets_enabled_key);
if (!cgroup_memory_nobpf)
@@ -4034,7 +4109,7 @@ static ssize_t peak_write(struct kernfs_open_file *of, char *buf, size_t nbytes,
WRITE_ONCE(peer_ctx->value, usage);
/* initial write, register watcher */
- if (ofp->value == -1)
+ if (ofp->value == OFP_PEAK_UNSET)
list_add(&ofp->list, watchers);
WRITE_ONCE(ofp->value, usage);
@@ -4607,40 +4682,6 @@ int mem_cgroup_swapin_charge_folio(struct folio *folio, struct mm_struct *mm,
return ret;
}
-/*
- * mem_cgroup_swapin_uncharge_swap - uncharge swap slot
- * @entry: the first swap entry for which the pages are charged
- * @nr_pages: number of pages which will be uncharged
- *
- * Call this function after successfully adding the charged page to swapcache.
- *
- * Note: This function assumes the page for which swap slot is being uncharged
- * is order 0 page.
- */
-void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry, unsigned int nr_pages)
-{
- /*
- * Cgroup1's unified memory+swap counter has been charged with the
- * new swapcache page, finish the transfer by uncharging the swap
- * slot. The swap slot would also get uncharged when it dies, but
- * it can stick around indefinitely and we'd count the page twice
- * the entire time.
- *
- * Cgroup2 has separate resource counters for memory and swap,
- * so this is a non-issue here. Memory and swap charge lifetimes
- * correspond 1:1 to page and swap slot lifetimes: we charge the
- * page to memory here, and uncharge swap when the slot is freed.
- */
- if (do_memsw_account()) {
- /*
- * The swap entry might not get freed for a long time,
- * let's not wait for it. The page already received a
- * memory+swap charge, drop the swap entry duplicate.
- */
- mem_cgroup_uncharge_swap(entry, nr_pages);
- }
-}
-
struct uncharge_gather {
struct mem_cgroup *memcg;
unsigned long nr_memory;
@@ -4891,7 +4932,7 @@ bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages,
if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
return memcg1_charge_skmem(memcg, nr_pages, gfp_mask);
- if (try_charge(memcg, gfp_mask, nr_pages) == 0) {
+ if (try_charge_memcg(memcg, gfp_mask, nr_pages) == 0) {
mod_memcg_state(memcg, MEMCG_SOCK, nr_pages);
return true;
}
@@ -4966,81 +5007,6 @@ static int __init mem_cgroup_init(void)
subsys_initcall(mem_cgroup_init);
#ifdef CONFIG_SWAP
-static struct mem_cgroup *mem_cgroup_id_get_online(struct mem_cgroup *memcg)
-{
- while (!refcount_inc_not_zero(&memcg->id.ref)) {
- /*
- * The root cgroup cannot be destroyed, so it's refcount must
- * always be >= 1.
- */
- if (WARN_ON_ONCE(mem_cgroup_is_root(memcg))) {
- VM_BUG_ON(1);
- break;
- }
- memcg = parent_mem_cgroup(memcg);
- if (!memcg)
- memcg = root_mem_cgroup;
- }
- return memcg;
-}
-
-/**
- * mem_cgroup_swapout - transfer a memsw charge to swap
- * @folio: folio whose memsw charge to transfer
- * @entry: swap entry to move the charge to
- *
- * Transfer the memsw charge of @folio to @entry.
- */
-void mem_cgroup_swapout(struct folio *folio, swp_entry_t entry)
-{
- struct mem_cgroup *memcg, *swap_memcg;
- unsigned int nr_entries;
-
- VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
- VM_BUG_ON_FOLIO(folio_ref_count(folio), folio);
-
- if (mem_cgroup_disabled())
- return;
-
- if (!do_memsw_account())
- return;
-
- memcg = folio_memcg(folio);
-
- VM_WARN_ON_ONCE_FOLIO(!memcg, folio);
- if (!memcg)
- return;
-
- /*
- * In case the memcg owning these pages has been offlined and doesn't
- * have an ID allocated to it anymore, charge the closest online
- * ancestor for the swap instead and transfer the memory+swap charge.
- */
- swap_memcg = mem_cgroup_id_get_online(memcg);
- nr_entries = folio_nr_pages(folio);
- /* Get references for the tail pages, too */
- if (nr_entries > 1)
- mem_cgroup_id_get_many(swap_memcg, nr_entries - 1);
- mod_memcg_state(swap_memcg, MEMCG_SWAP, nr_entries);
-
- swap_cgroup_record(folio, mem_cgroup_id(swap_memcg), entry);
-
- folio_unqueue_deferred_split(folio);
- folio->memcg_data = 0;
-
- if (!mem_cgroup_is_root(memcg))
- page_counter_uncharge(&memcg->memory, nr_entries);
-
- if (memcg != swap_memcg) {
- if (!mem_cgroup_is_root(swap_memcg))
- page_counter_charge(&swap_memcg->memsw, nr_entries);
- page_counter_uncharge(&memcg->memsw, nr_entries);
- }
-
- memcg1_swapout(folio, memcg);
- css_put(&memcg->css);
-}
-
/**
* __mem_cgroup_try_charge_swap - try charging swap space for a folio
* @folio: folio being added to swap
diff --git a/mm/memfd.c b/mm/memfd.c
index 37f7be57c2f5..c64df1343059 100644
--- a/mm/memfd.c
+++ b/mm/memfd.c
@@ -259,7 +259,7 @@ static int memfd_add_seals(struct file *file, unsigned int seals)
}
/*
- * SEAL_EXEC implys SEAL_WRITE, making W^X from the start.
+ * SEAL_EXEC implies SEAL_WRITE, making W^X from the start.
*/
if (seals & F_SEAL_EXEC && inode->i_mode & 0111)
seals |= F_SEAL_SHRINK|F_SEAL_GROW|F_SEAL_WRITE|F_SEAL_FUTURE_WRITE;
@@ -337,7 +337,7 @@ static int check_write_seal(unsigned long *vm_flags_ptr)
unsigned long vm_flags = *vm_flags_ptr;
unsigned long mask = vm_flags & (VM_SHARED | VM_WRITE);
- /* If a private matting then writability is irrelevant. */
+ /* If a private mapping then writability is irrelevant. */
if (!(mask & VM_SHARED))
return 0;
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 327e02fdc029..b91a33fb6c69 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -419,18 +419,18 @@ static unsigned long dev_pagemap_mapping_shift(struct vm_area_struct *vma,
pud = pud_offset(p4d, address);
if (!pud_present(*pud))
return 0;
- if (pud_devmap(*pud))
+ if (pud_trans_huge(*pud))
return PUD_SHIFT;
pmd = pmd_offset(pud, address);
if (!pmd_present(*pmd))
return 0;
- if (pmd_devmap(*pmd))
+ if (pmd_trans_huge(*pmd))
return PMD_SHIFT;
pte = pte_offset_map(pmd, address);
if (!pte)
return 0;
ptent = ptep_get(pte);
- if (pte_present(ptent) && pte_devmap(ptent))
+ if (pte_present(ptent))
ret = PAGE_SHIFT;
pte_unmap(pte);
return ret;
@@ -881,12 +881,17 @@ static int kill_accessing_process(struct task_struct *p, unsigned long pfn,
mmap_read_lock(p->mm);
ret = walk_page_range(p->mm, 0, TASK_SIZE, &hwpoison_walk_ops,
(void *)&priv);
+ /*
+ * ret = 1 when CMCI wins, regardless of whether try_to_unmap()
+ * succeeds or fails, then kill the process with SIGBUS.
+ * ret = 0 when poison page is a clean page and it's dropped, no
+ * SIGBUS is needed.
+ */
if (ret == 1 && priv.tk.addr)
kill_proc(&priv.tk, pfn, flags);
- else
- ret = 0;
mmap_read_unlock(p->mm);
- return ret > 0 ? -EHWPOISON : -EFAULT;
+
+ return ret > 0 ? -EHWPOISON : 0;
}
/*
@@ -2210,9 +2215,13 @@ static void kill_procs_now(struct page *p, unsigned long pfn, int flags,
* Must run in process context (e.g. a work queue) with interrupts
* enabled and no spinlocks held.
*
- * Return: 0 for successfully handled the memory error,
- * -EOPNOTSUPP for hwpoison_filter() filtered the error event,
- * < 0(except -EOPNOTSUPP) on failure.
+ * Return:
+ * 0 - success,
+ * -ENXIO - memory not managed by the kernel
+ * -EOPNOTSUPP - hwpoison_filter() filtered the error event,
+ * -EHWPOISON - the page was already poisoned, potentially
+ * kill process,
+ * other negative values - failure.
*/
int memory_failure(unsigned long pfn, int flags)
{
diff --git a/mm/memory.c b/mm/memory.c
index 369905596243..2d8c265fc7d6 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -94,14 +94,6 @@
#warning Unfortunate NUMA and NUMA Balancing config, growing page-frame for last_cpupid.
#endif
-#ifndef CONFIG_NUMA
-unsigned long max_mapnr;
-EXPORT_SYMBOL(max_mapnr);
-
-struct page *mem_map;
-EXPORT_SYMBOL(mem_map);
-#endif
-
static vm_fault_t do_fault(struct vm_fault *vmf);
static vm_fault_t do_anonymous_page(struct vm_fault *vmf);
static bool vmf_pte_changed(struct vm_fault *vmf);
@@ -121,14 +113,6 @@ static __always_inline bool vmf_orig_pte_uffd_wp(struct vm_fault *vmf)
}
/*
- * A number of key systems in x86 including ioremap() rely on the assumption
- * that high_memory defines the upper bound on direct map memory, then end
- * of ZONE_NORMAL.
- */
-void *high_memory;
-EXPORT_SYMBOL(high_memory);
-
-/*
* Randomize the address space (stacks, mmaps, brk, etc.).
*
* ( When CONFIG_COMPAT_BRK=y we exclude brk from randomization,
@@ -715,42 +699,53 @@ struct folio *vm_normal_folio_pmd(struct vm_area_struct *vma,
}
#endif
+/**
+ * restore_exclusive_pte - Restore a device-exclusive entry
+ * @vma: VMA covering @address
+ * @folio: the mapped folio
+ * @page: the mapped folio page
+ * @address: the virtual address
+ * @ptep: pte pointer into the locked page table mapping the folio page
+ * @orig_pte: pte value at @ptep
+ *
+ * Restore a device-exclusive non-swap entry to an ordinary present pte.
+ *
+ * The folio and the page table must be locked, and MMU notifiers must have
+ * been called to invalidate any (exclusive) device mappings.
+ *
+ * Locking the folio makes sure that anybody who just converted the pte to
+ * a device-exclusive entry can map it into the device to make forward
+ * progress without others converting it back until the folio was unlocked.
+ *
+ * If the folio lock ever becomes an issue, we can stop relying on the folio
+ * lock; it might make some scenarios with heavy thrashing less likely to
+ * make forward progress, but these scenarios might not be valid use cases.
+ *
+ * Note that the folio lock does not protect against all cases of concurrent
+ * page table modifications (e.g., MADV_DONTNEED, mprotect), so device drivers
+ * must use MMU notifiers to sync against any concurrent changes.
+ */
static void restore_exclusive_pte(struct vm_area_struct *vma,
- struct page *page, unsigned long address,
- pte_t *ptep)
+ struct folio *folio, struct page *page, unsigned long address,
+ pte_t *ptep, pte_t orig_pte)
{
- struct folio *folio = page_folio(page);
- pte_t orig_pte;
pte_t pte;
- swp_entry_t entry;
- orig_pte = ptep_get(ptep);
+ VM_WARN_ON_FOLIO(!folio_test_locked(folio), folio);
+
pte = pte_mkold(mk_pte(page, READ_ONCE(vma->vm_page_prot)));
if (pte_swp_soft_dirty(orig_pte))
pte = pte_mksoft_dirty(pte);
- entry = pte_to_swp_entry(orig_pte);
if (pte_swp_uffd_wp(orig_pte))
pte = pte_mkuffd_wp(pte);
- else if (is_writable_device_exclusive_entry(entry))
- pte = maybe_mkwrite(pte_mkdirty(pte), vma);
-
- VM_BUG_ON_FOLIO(pte_write(pte) && (!folio_test_anon(folio) &&
- PageAnonExclusive(page)), folio);
-
- /*
- * No need to take a page reference as one was already
- * created when the swap entry was made.
- */
- if (folio_test_anon(folio))
- folio_add_anon_rmap_pte(folio, page, vma, address, RMAP_NONE);
- else
- /*
- * Currently device exclusive access only supports anonymous
- * memory so the entry shouldn't point to a filebacked page.
- */
- WARN_ON_ONCE(1);
+ if ((vma->vm_flags & VM_WRITE) &&
+ can_change_pte_writable(vma, address, pte)) {
+ if (folio_test_dirty(folio))
+ pte = pte_mkdirty(pte);
+ pte = pte_mkwrite(pte, vma);
+ }
set_pte_at(vma->vm_mm, address, ptep, pte);
/*
@@ -764,16 +759,15 @@ static void restore_exclusive_pte(struct vm_area_struct *vma,
* Tries to restore an exclusive pte if the page lock can be acquired without
* sleeping.
*/
-static int
-try_restore_exclusive_pte(pte_t *src_pte, struct vm_area_struct *vma,
- unsigned long addr)
+static int try_restore_exclusive_pte(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep, pte_t orig_pte)
{
- swp_entry_t entry = pte_to_swp_entry(ptep_get(src_pte));
- struct page *page = pfn_swap_entry_to_page(entry);
+ struct page *page = pfn_swap_entry_to_page(pte_to_swp_entry(orig_pte));
+ struct folio *folio = page_folio(page);
- if (trylock_page(page)) {
- restore_exclusive_pte(vma, page, addr, src_pte);
- unlock_page(page);
+ if (folio_trylock(folio)) {
+ restore_exclusive_pte(vma, folio, page, addr, ptep, orig_pte);
+ folio_unlock(folio);
return 0;
}
@@ -853,7 +847,7 @@ copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
folio_get(folio);
rss[mm_counter(folio)]++;
/* Cannot fail as these pages cannot get pinned. */
- folio_try_dup_anon_rmap_pte(folio, page, src_vma);
+ folio_try_dup_anon_rmap_pte(folio, page, dst_vma, src_vma);
/*
* We do not preserve soft-dirty information, because so
@@ -879,7 +873,7 @@ copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
* (ie. COW) mappings.
*/
VM_BUG_ON(!is_cow_mapping(src_vma->vm_flags));
- if (try_restore_exclusive_pte(src_pte, src_vma, addr))
+ if (try_restore_exclusive_pte(src_vma, addr, src_pte, orig_pte))
return -EBUSY;
return -ENOENT;
} else if (is_pte_marker_entry(entry)) {
@@ -1007,14 +1001,14 @@ copy_present_ptes(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma
folio_ref_add(folio, nr);
if (folio_test_anon(folio)) {
if (unlikely(folio_try_dup_anon_rmap_ptes(folio, page,
- nr, src_vma))) {
+ nr, dst_vma, src_vma))) {
folio_ref_sub(folio, nr);
return -EAGAIN;
}
rss[MM_ANONPAGES] += nr;
VM_WARN_ON_FOLIO(PageAnonExclusive(page), folio);
} else {
- folio_dup_file_rmap_ptes(folio, page, nr);
+ folio_dup_file_rmap_ptes(folio, page, nr, dst_vma);
rss[mm_counter_file(folio)] += nr;
}
if (any_writable)
@@ -1032,7 +1026,7 @@ copy_present_ptes(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma
* guarantee the pinned page won't be randomly replaced in the
* future.
*/
- if (unlikely(folio_try_dup_anon_rmap_pte(folio, page, src_vma))) {
+ if (unlikely(folio_try_dup_anon_rmap_pte(folio, page, dst_vma, src_vma))) {
/* Page may be pinned, we have to copy. */
folio_put(folio);
err = copy_present_page(dst_vma, src_vma, dst_pte, src_pte,
@@ -1042,7 +1036,7 @@ copy_present_ptes(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma
rss[MM_ANONPAGES]++;
VM_WARN_ON_FOLIO(PageAnonExclusive(page), folio);
} else {
- folio_dup_file_rmap_pte(folio, page);
+ folio_dup_file_rmap_pte(folio, page, dst_vma);
rss[mm_counter_file(folio)]++;
}
@@ -1619,8 +1613,7 @@ static inline int zap_nonpresent_ptes(struct mmu_gather *tlb,
*/
WARN_ON_ONCE(!vma_is_anonymous(vma));
rss[mm_counter(folio)]--;
- if (is_device_private_entry(entry))
- folio_remove_rmap_pte(folio, page, vma);
+ folio_remove_rmap_pte(folio, page, vma);
folio_put(folio);
} else if (!non_swap_entry(entry)) {
/* Genuine swap entries, hence a private anon pages */
@@ -2132,19 +2125,39 @@ static int validate_page_before_insert(struct vm_area_struct *vma,
}
static int insert_page_into_pte_locked(struct vm_area_struct *vma, pte_t *pte,
- unsigned long addr, struct page *page, pgprot_t prot)
+ unsigned long addr, struct page *page,
+ pgprot_t prot, bool mkwrite)
{
struct folio *folio = page_folio(page);
- pte_t pteval;
+ pte_t pteval = ptep_get(pte);
+
+ if (!pte_none(pteval)) {
+ if (!mkwrite)
+ return -EBUSY;
+
+ /* see insert_pfn(). */
+ if (pte_pfn(pteval) != page_to_pfn(page)) {
+ WARN_ON_ONCE(!is_zero_pfn(pte_pfn(pteval)));
+ return -EFAULT;
+ }
+ pteval = maybe_mkwrite(pteval, vma);
+ pteval = pte_mkyoung(pteval);
+ if (ptep_set_access_flags(vma, addr, pte, pteval, 1))
+ update_mmu_cache(vma, addr, pte);
+ return 0;
+ }
- if (!pte_none(ptep_get(pte)))
- return -EBUSY;
/* Ok, finally just insert the thing.. */
pteval = mk_pte(page, prot);
if (unlikely(is_zero_folio(folio))) {
pteval = pte_mkspecial(pteval);
} else {
folio_get(folio);
+ pteval = mk_pte(page, prot);
+ if (mkwrite) {
+ pteval = pte_mkyoung(pteval);
+ pteval = maybe_mkwrite(pte_mkdirty(pteval), vma);
+ }
inc_mm_counter(vma->vm_mm, mm_counter_file(folio));
folio_add_file_rmap_pte(folio, page, vma);
}
@@ -2153,7 +2166,7 @@ static int insert_page_into_pte_locked(struct vm_area_struct *vma, pte_t *pte,
}
static int insert_page(struct vm_area_struct *vma, unsigned long addr,
- struct page *page, pgprot_t prot)
+ struct page *page, pgprot_t prot, bool mkwrite)
{
int retval;
pte_t *pte;
@@ -2166,7 +2179,8 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
pte = get_locked_pte(vma->vm_mm, addr, &ptl);
if (!pte)
goto out;
- retval = insert_page_into_pte_locked(vma, pte, addr, page, prot);
+ retval = insert_page_into_pte_locked(vma, pte, addr, page, prot,
+ mkwrite);
pte_unmap_unlock(pte, ptl);
out:
return retval;
@@ -2180,7 +2194,7 @@ static int insert_page_in_batch_locked(struct vm_area_struct *vma, pte_t *pte,
err = validate_page_before_insert(vma, page);
if (err)
return err;
- return insert_page_into_pte_locked(vma, pte, addr, page, prot);
+ return insert_page_into_pte_locked(vma, pte, addr, page, prot, false);
}
/* insert_pages() amortizes the cost of spinlock operations
@@ -2316,7 +2330,7 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
BUG_ON(vma->vm_flags & VM_PFNMAP);
vm_flags_set(vma, VM_MIXEDMAP);
}
- return insert_page(vma, addr, page, vma->vm_page_prot);
+ return insert_page(vma, addr, page, vma->vm_page_prot, false);
}
EXPORT_SYMBOL(vm_insert_page);
@@ -2596,7 +2610,7 @@ static vm_fault_t __vm_insert_mixed(struct vm_area_struct *vma,
* result in pfn_t_has_page() == false.
*/
page = pfn_to_page(pfn_t_to_pfn(pfn));
- err = insert_page(vma, addr, page, pgprot);
+ err = insert_page(vma, addr, page, pgprot, mkwrite);
} else {
return insert_pfn(vma, addr, pfn, pgprot, mkwrite);
}
@@ -2609,6 +2623,26 @@ static vm_fault_t __vm_insert_mixed(struct vm_area_struct *vma,
return VM_FAULT_NOPAGE;
}
+vm_fault_t vmf_insert_page_mkwrite(struct vm_fault *vmf, struct page *page,
+ bool write)
+{
+ pgprot_t pgprot = vmf->vma->vm_page_prot;
+ unsigned long addr = vmf->address;
+ int err;
+
+ if (addr < vmf->vma->vm_start || addr >= vmf->vma->vm_end)
+ return VM_FAULT_SIGBUS;
+
+ err = insert_page(vmf->vma, addr, page, pgprot, write);
+ if (err == -ENOMEM)
+ return VM_FAULT_OOM;
+ if (err < 0 && err != -EBUSY)
+ return VM_FAULT_SIGBUS;
+
+ return VM_FAULT_NOPAGE;
+}
+EXPORT_SYMBOL_GPL(vmf_insert_page_mkwrite);
+
vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
pfn_t pfn)
{
@@ -3673,19 +3707,86 @@ static vm_fault_t wp_page_shared(struct vm_fault *vmf, struct folio *folio)
return ret;
}
-static bool wp_can_reuse_anon_folio(struct folio *folio,
- struct vm_area_struct *vma)
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+static bool __wp_can_reuse_large_anon_folio(struct folio *folio,
+ struct vm_area_struct *vma)
{
+ bool exclusive = false;
+
+ /* Let's just free up a large folio if only a single page is mapped. */
+ if (folio_large_mapcount(folio) <= 1)
+ return false;
+
/*
- * We could currently only reuse a subpage of a large folio if no
- * other subpages of the large folios are still mapped. However,
- * let's just consistently not reuse subpages even if we could
- * reuse in that scenario, and give back a large folio a bit
- * sooner.
+ * The assumption for anonymous folios is that each page can only get
+ * mapped once into each MM. The only exception are KSM folios, which
+ * are always small.
+ *
+ * Each taken mapcount must be paired with exactly one taken reference,
+ * whereby the refcount must be incremented before the mapcount when
+ * mapping a page, and the refcount must be decremented after the
+ * mapcount when unmapping a page.
+ *
+ * If all folio references are from mappings, and all mappings are in
+ * the page tables of this MM, then this folio is exclusive to this MM.
*/
- if (folio_test_large(folio))
+ if (folio_test_large_maybe_mapped_shared(folio))
+ return false;
+
+ VM_WARN_ON_ONCE(folio_test_ksm(folio));
+ VM_WARN_ON_ONCE(folio_mapcount(folio) > folio_nr_pages(folio));
+ VM_WARN_ON_ONCE(folio_entire_mapcount(folio));
+
+ if (unlikely(folio_test_swapcache(folio))) {
+ /*
+ * Note: freeing up the swapcache will fail if some PTEs are
+ * still swap entries.
+ */
+ if (!folio_trylock(folio))
+ return false;
+ folio_free_swap(folio);
+ folio_unlock(folio);
+ }
+
+ if (folio_large_mapcount(folio) != folio_ref_count(folio))
return false;
+ /* Stabilize the mapcount vs. refcount and recheck. */
+ folio_lock_large_mapcount(folio);
+ VM_WARN_ON_ONCE(folio_large_mapcount(folio) < folio_ref_count(folio));
+
+ if (folio_test_large_maybe_mapped_shared(folio))
+ goto unlock;
+ if (folio_large_mapcount(folio) != folio_ref_count(folio))
+ goto unlock;
+
+ VM_WARN_ON_ONCE(folio_mm_id(folio, 0) != vma->vm_mm->mm_id &&
+ folio_mm_id(folio, 1) != vma->vm_mm->mm_id);
+
+ /*
+ * Do we need the folio lock? Likely not. If there would have been
+ * references from page migration/swapout, we would have detected
+ * an additional folio reference and never ended up here.
+ */
+ exclusive = true;
+unlock:
+ folio_unlock_large_mapcount(folio);
+ return exclusive;
+}
+#else /* !CONFIG_TRANSPARENT_HUGEPAGE */
+static bool __wp_can_reuse_large_anon_folio(struct folio *folio,
+ struct vm_area_struct *vma)
+{
+ BUILD_BUG();
+}
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+
+static bool wp_can_reuse_anon_folio(struct folio *folio,
+ struct vm_area_struct *vma)
+{
+ if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && folio_test_large(folio))
+ return __wp_can_reuse_large_anon_folio(folio, vma);
+
/*
* We have to verify under folio lock: these early checks are
* just an optimization to avoid locking the folio and freeing
@@ -3794,13 +3895,15 @@ static vm_fault_t do_wp_page(struct vm_fault *vmf)
if (vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) {
/*
* VM_MIXEDMAP !pfn_valid() case, or VM_SOFTDIRTY clear on a
- * VM_PFNMAP VMA.
+ * VM_PFNMAP VMA. FS DAX also wants ops->pfn_mkwrite called.
*
* We should not cow pages in a shared writeable mapping.
* Just mark the pages writable and/or call ops->pfn_mkwrite.
*/
- if (!vmf->page)
+ if (!vmf->page || is_fsdax_page(vmf->page)) {
+ vmf->page = NULL;
return wp_pfn_shared(vmf);
+ }
return wp_page_shared(vmf, folio);
}
@@ -3990,7 +4093,7 @@ static vm_fault_t remove_device_exclusive_entry(struct vm_fault *vmf)
folio_put(folio);
return ret;
}
- mmu_notifier_range_init_owner(&range, MMU_NOTIFY_EXCLUSIVE, 0,
+ mmu_notifier_range_init_owner(&range, MMU_NOTIFY_CLEAR, 0,
vma->vm_mm, vmf->address & PAGE_MASK,
(vmf->address & PAGE_MASK) + PAGE_SIZE, NULL);
mmu_notifier_invalidate_range_start(&range);
@@ -3998,7 +4101,8 @@ static vm_fault_t remove_device_exclusive_entry(struct vm_fault *vmf)
vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
&vmf->ptl);
if (likely(vmf->pte && pte_same(ptep_get(vmf->pte), vmf->orig_pte)))
- restore_exclusive_pte(vma, vmf->page, vmf->address, vmf->pte);
+ restore_exclusive_pte(vma, folio, vmf->page, vmf->address,
+ vmf->pte, vmf->orig_pte);
if (vmf->pte)
pte_unmap_unlock(vmf->pte, vmf->ptl);
@@ -4347,9 +4451,12 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
* freed.
*/
if (trylock_page(vmf->page)) {
+ struct dev_pagemap *pgmap;
+
get_page(vmf->page);
pte_unmap_unlock(vmf->pte, vmf->ptl);
- ret = vmf->page->pgmap->ops->migrate_to_ram(vmf);
+ pgmap = page_pgmap(vmf->page);
+ ret = pgmap->ops->migrate_to_ram(vmf);
unlock_page(vmf->page);
put_page(vmf->page);
} else {
@@ -4408,7 +4515,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
}
need_clear_cache = true;
- mem_cgroup_swapin_uncharge_swap(entry, nr_pages);
+ memcg1_swapin(entry, nr_pages);
shadow = get_shadow_from_swap_cache(entry);
if (shadow)
@@ -5577,7 +5684,7 @@ int numa_migrate_check(struct folio *folio, struct vm_fault *vmf,
* Flag if the folio is shared between multiple address spaces. This
* is later used when determining whether to group tasks together
*/
- if (folio_likely_mapped_shared(folio) && (vma->vm_flags & VM_SHARED))
+ if (folio_maybe_mapped_shared(folio) && (vma->vm_flags & VM_SHARED))
*flags |= TNF_SHARED;
/*
* For memory tiering mode, cpupid of slow memory page is used
@@ -6348,6 +6455,88 @@ fail:
#endif
#ifdef CONFIG_PER_VMA_LOCK
+static inline bool __vma_enter_locked(struct vm_area_struct *vma, bool detaching)
+{
+ unsigned int tgt_refcnt = VMA_LOCK_OFFSET;
+
+ /* Additional refcnt if the vma is attached. */
+ if (!detaching)
+ tgt_refcnt++;
+
+ /*
+ * If vma is detached then only vma_mark_attached() can raise the
+ * vm_refcnt. mmap_write_lock prevents racing with vma_mark_attached().
+ */
+ if (!refcount_add_not_zero(VMA_LOCK_OFFSET, &vma->vm_refcnt))
+ return false;
+
+ rwsem_acquire(&vma->vmlock_dep_map, 0, 0, _RET_IP_);
+ rcuwait_wait_event(&vma->vm_mm->vma_writer_wait,
+ refcount_read(&vma->vm_refcnt) == tgt_refcnt,
+ TASK_UNINTERRUPTIBLE);
+ lock_acquired(&vma->vmlock_dep_map, _RET_IP_);
+
+ return true;
+}
+
+static inline void __vma_exit_locked(struct vm_area_struct *vma, bool *detached)
+{
+ *detached = refcount_sub_and_test(VMA_LOCK_OFFSET, &vma->vm_refcnt);
+ rwsem_release(&vma->vmlock_dep_map, _RET_IP_);
+}
+
+void __vma_start_write(struct vm_area_struct *vma, unsigned int mm_lock_seq)
+{
+ bool locked;
+
+ /*
+ * __vma_enter_locked() returns false immediately if the vma is not
+ * attached, otherwise it waits until refcnt is indicating that vma
+ * is attached with no readers.
+ */
+ locked = __vma_enter_locked(vma, false);
+
+ /*
+ * We should use WRITE_ONCE() here because we can have concurrent reads
+ * from the early lockless pessimistic check in vma_start_read().
+ * We don't really care about the correctness of that early check, but
+ * we should use WRITE_ONCE() for cleanliness and to keep KCSAN happy.
+ */
+ WRITE_ONCE(vma->vm_lock_seq, mm_lock_seq);
+
+ if (locked) {
+ bool detached;
+
+ __vma_exit_locked(vma, &detached);
+ WARN_ON_ONCE(detached); /* vma should remain attached */
+ }
+}
+EXPORT_SYMBOL_GPL(__vma_start_write);
+
+void vma_mark_detached(struct vm_area_struct *vma)
+{
+ vma_assert_write_locked(vma);
+ vma_assert_attached(vma);
+
+ /*
+ * We are the only writer, so no need to use vma_refcount_put().
+ * The condition below is unlikely because the vma has been already
+ * write-locked and readers can increment vm_refcnt only temporarily
+ * before they check vm_lock_seq, realize the vma is locked and drop
+ * back the vm_refcnt. That is a narrow window for observing a raised
+ * vm_refcnt.
+ */
+ if (unlikely(!refcount_dec_and_test(&vma->vm_refcnt))) {
+ /* Wait until vma is detached with no readers. */
+ if (__vma_enter_locked(vma, true)) {
+ bool detached;
+
+ __vma_exit_locked(vma, &detached);
+ WARN_ON_ONCE(!detached);
+ }
+ }
+}
+
/*
* Lookup and lock a VMA under RCU protection. Returned VMA is guaranteed to be
* stable and not isolated. If the VMA is not found or is being modified the
@@ -6365,15 +6554,17 @@ retry:
if (!vma)
goto inval;
- if (!vma_start_read(vma))
- goto inval;
+ vma = vma_start_read(mm, vma);
+ if (IS_ERR_OR_NULL(vma)) {
+ /* Check if the VMA got isolated after we found it */
+ if (PTR_ERR(vma) == -EAGAIN) {
+ count_vm_vma_lock_event(VMA_LOCK_MISS);
+ /* The area was replaced with another one */
+ goto retry;
+ }
- /* Check if the VMA got isolated after we found it */
- if (vma->detached) {
- vma_end_read(vma);
- count_vm_vma_lock_event(VMA_LOCK_MISS);
- /* The area was replaced with another one */
- goto retry;
+ /* Failed to lock the VMA */
+ goto inval;
}
/*
* At this point, we have a stable reference to a VMA: The VMA is
@@ -6382,8 +6573,9 @@ retry:
* fields are accessible for RCU readers.
*/
- /* Check since vm_start/vm_end might change before we lock the VMA */
- if (unlikely(address < vma->vm_start || address >= vma->vm_end))
+ /* Check if the vma we locked is the right one. */
+ if (unlikely(vma->vm_mm != mm ||
+ address < vma->vm_start || address >= vma->vm_end))
goto inval_end_read;
rcu_read_unlock();
@@ -6478,6 +6670,7 @@ static inline void pfnmap_args_setup(struct follow_pfnmap_args *args,
args->lock = lock;
args->ptep = ptep;
args->pfn = pfn_base + ((args->address & ~addr_mask) >> PAGE_SHIFT);
+ args->addr_mask = addr_mask;
args->pgprot = pgprot;
args->writable = writable;
args->special = special;
@@ -6637,7 +6830,7 @@ int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
void *buf, int len, int write)
{
resource_size_t phys_addr;
- unsigned long prot = 0;
+ pgprot_t prot = __pgprot(0);
void __iomem *maddr;
int offset = offset_in_page(addr);
int ret = -EINVAL;
@@ -6647,7 +6840,7 @@ int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
retry:
if (follow_pfnmap_start(&args))
return -EINVAL;
- prot = pgprot_val(args.pgprot);
+ prot = args.pgprot;
phys_addr = (resource_size_t)args.pfn << PAGE_SHIFT;
writable = args.writable;
follow_pfnmap_end(&args);
@@ -6662,7 +6855,7 @@ retry:
if (follow_pfnmap_start(&args))
goto out_unmap;
- if ((prot != pgprot_val(args.pgprot)) ||
+ if ((pgprot_val(prot) != pgprot_val(args.pgprot)) ||
(phys_addr != (args.pfn << PAGE_SHIFT)) ||
(writable != args.writable)) {
follow_pfnmap_end(&args);
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 16cf9e17077e..8305483de38b 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -1813,23 +1813,16 @@ static void do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
page = pfn_to_page(pfn);
folio = page_folio(page);
- /*
- * No reference or lock is held on the folio, so it might
- * be modified concurrently (e.g. split). As such,
- * folio_nr_pages() may read garbage. This is fine as the outer
- * loop will revisit the split folio later.
- */
- if (folio_test_large(folio))
- pfn = folio_pfn(folio) + folio_nr_pages(folio) - 1;
-
if (!folio_try_get(folio))
continue;
if (unlikely(page_folio(page) != folio))
goto put_folio;
- if (folio_test_hwpoison(folio) ||
- (folio_test_large(folio) && folio_test_has_hwpoisoned(folio))) {
+ if (folio_test_large(folio))
+ pfn = folio_pfn(folio) + folio_nr_pages(folio) - 1;
+
+ if (folio_contain_hwpoisoned_page(folio)) {
if (WARN_ON(folio_test_lru(folio)))
folio_isolate_lru(folio);
if (folio_mapped(folio)) {
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index a9eea051b0d6..b28a1e6ae096 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -673,11 +673,11 @@ static int queue_folios_hugetlb(pte_t *pte, unsigned long hmask,
* Unless MPOL_MF_MOVE_ALL, we try to avoid migrating a shared folio.
* Choosing not to migrate a shared folio is not counted as a failure.
*
- * See folio_likely_mapped_shared() on possible imprecision when we
+ * See folio_maybe_mapped_shared() on possible imprecision when we
* cannot easily detect if a folio is shared.
*/
if ((flags & MPOL_MF_MOVE_ALL) ||
- (!folio_likely_mapped_shared(folio) && !hugetlb_pmd_shared(pte)))
+ (!folio_maybe_mapped_shared(folio) && !hugetlb_pmd_shared(pte)))
if (!folio_isolate_hugetlb(folio, qp->pagelist))
qp->nr_failed++;
unlock:
@@ -1064,10 +1064,10 @@ static bool migrate_folio_add(struct folio *folio, struct list_head *foliolist,
* Unless MPOL_MF_MOVE_ALL, we try to avoid migrating a shared folio.
* Choosing not to migrate a shared folio is not counted as a failure.
*
- * See folio_likely_mapped_shared() on possible imprecision when we
+ * See folio_maybe_mapped_shared() on possible imprecision when we
* cannot easily detect if a folio is shared.
*/
- if ((flags & MPOL_MF_MOVE_ALL) || !folio_likely_mapped_shared(folio)) {
+ if ((flags & MPOL_MF_MOVE_ALL) || !folio_maybe_mapped_shared(folio)) {
if (folio_isolate_lru(folio)) {
list_add_tail(&folio->lru, foliolist);
node_stat_mod_folio(folio,
diff --git a/mm/memremap.c b/mm/memremap.c
index 40d4547ce514..2aebc1b192da 100644
--- a/mm/memremap.c
+++ b/mm/memremap.c
@@ -458,8 +458,9 @@ EXPORT_SYMBOL_GPL(get_dev_pagemap);
void free_zone_device_folio(struct folio *folio)
{
- if (WARN_ON_ONCE(!folio->page.pgmap->ops ||
- !folio->page.pgmap->ops->page_free))
+ struct dev_pagemap *pgmap = folio->pgmap;
+
+ if (WARN_ON_ONCE(!pgmap))
return;
mem_cgroup_uncharge(folio);
@@ -484,19 +485,42 @@ void free_zone_device_folio(struct folio *folio)
* For other types of ZONE_DEVICE pages, migration is either
* handled differently or not done at all, so there is no need
* to clear folio->mapping.
+ *
+ * FS DAX pages clear the mapping when the folio->share count hits
+ * zero which indicating the page has been removed from the file
+ * system mapping.
*/
- folio->mapping = NULL;
- folio->page.pgmap->ops->page_free(folio_page(folio, 0));
+ if (pgmap->type != MEMORY_DEVICE_FS_DAX &&
+ pgmap->type != MEMORY_DEVICE_GENERIC)
+ folio->mapping = NULL;
+
+ switch (pgmap->type) {
+ case MEMORY_DEVICE_PRIVATE:
+ case MEMORY_DEVICE_COHERENT:
+ if (WARN_ON_ONCE(!pgmap->ops || !pgmap->ops->page_free))
+ break;
+ pgmap->ops->page_free(folio_page(folio, 0));
+ put_dev_pagemap(pgmap);
+ break;
- if (folio->page.pgmap->type != MEMORY_DEVICE_PRIVATE &&
- folio->page.pgmap->type != MEMORY_DEVICE_COHERENT)
+ case MEMORY_DEVICE_GENERIC:
/*
* Reset the refcount to 1 to prepare for handing out the page
* again.
*/
folio_set_count(folio, 1);
- else
- put_dev_pagemap(folio->page.pgmap);
+ break;
+
+ case MEMORY_DEVICE_FS_DAX:
+ wake_up_var(&folio->page);
+ break;
+
+ case MEMORY_DEVICE_PCI_P2PDMA:
+ if (WARN_ON_ONCE(!pgmap->ops || !pgmap->ops->page_free))
+ break;
+ pgmap->ops->page_free(folio_page(folio, 0));
+ break;
+ }
}
void zone_device_page_init(struct page *page)
@@ -505,26 +529,8 @@ void zone_device_page_init(struct page *page)
* Drivers shouldn't be allocating pages after calling
* memunmap_pages().
*/
- WARN_ON_ONCE(!percpu_ref_tryget_live(&page->pgmap->ref));
+ WARN_ON_ONCE(!percpu_ref_tryget_live(&page_pgmap(page)->ref));
set_page_count(page, 1);
lock_page(page);
}
EXPORT_SYMBOL_GPL(zone_device_page_init);
-
-#ifdef CONFIG_FS_DAX
-bool __put_devmap_managed_folio_refs(struct folio *folio, int refs)
-{
- if (folio->page.pgmap->type != MEMORY_DEVICE_FS_DAX)
- return false;
-
- /*
- * fsdax page refcounts are 1-based, rather than 0-based: if
- * refcount is 1, then the page is free and the refcount is
- * stable because nobody holds a reference on the page.
- */
- if (folio_ref_sub_return(folio, refs) == 1)
- wake_up_var(&folio->_refcount);
- return true;
-}
-EXPORT_SYMBOL(__put_devmap_managed_folio_refs);
-#endif /* CONFIG_FS_DAX */
diff --git a/mm/migrate.c b/mm/migrate.c
index 97f0edf0c032..f3ee6d8d5e2e 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -202,7 +202,7 @@ static bool try_to_map_unused_to_zeropage(struct page_vma_mapped_walk *pvmw,
return false;
VM_BUG_ON_PAGE(!PageAnon(page), page);
VM_BUG_ON_PAGE(!PageLocked(page), page);
- VM_BUG_ON_PAGE(pte_present(*pvmw->pte), page);
+ VM_BUG_ON_PAGE(pte_present(ptep_get(pvmw->pte)), page);
if (folio_test_mlocked(folio) || (pvmw->vma->vm_flags & VM_LOCKED) ||
mm_forbids_zeropage(pvmw->vma->vm_mm))
@@ -328,7 +328,7 @@ static bool remove_migration_pte(struct folio *folio,
folio_add_file_rmap_pte(folio, new, vma);
set_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
}
- if (vma->vm_flags & VM_LOCKED)
+ if (READ_ONCE(vma->vm_flags) & VM_LOCKED)
mlock_drain_local();
trace_remove_migration_pte(pvmw.address, pte_val(pte),
@@ -2226,7 +2226,7 @@ static int __add_folio_for_migration(struct folio *folio, int node,
if (folio_nid(folio) == node)
return 0;
- if (folio_likely_mapped_shared(folio) && !migrate_all)
+ if (folio_maybe_mapped_shared(folio) && !migrate_all)
return -EACCES;
if (folio_test_hugetlb(folio)) {
@@ -2651,11 +2651,10 @@ int migrate_misplaced_folio_prepare(struct folio *folio,
* processes with execute permissions as they are probably
* shared libraries.
*
- * See folio_likely_mapped_shared() on possible imprecision
+ * See folio_maybe_mapped_shared() on possible imprecision
* when we cannot easily detect if a folio is shared.
*/
- if ((vma->vm_flags & VM_EXEC) &&
- folio_likely_mapped_shared(folio))
+ if ((vma->vm_flags & VM_EXEC) && folio_maybe_mapped_shared(folio))
return -EACCES;
/*
diff --git a/mm/migrate_device.c b/mm/migrate_device.c
index a351497ced4a..3158afe7eb23 100644
--- a/mm/migrate_device.c
+++ b/mm/migrate_device.c
@@ -113,6 +113,7 @@ again:
arch_enter_lazy_mmu_mode();
for (; addr < end; addr += PAGE_SIZE, ptep++) {
+ struct dev_pagemap *pgmap;
unsigned long mpfn = 0, pfn;
struct folio *folio;
struct page *page;
@@ -140,9 +141,10 @@ again:
goto next;
page = pfn_swap_entry_to_page(entry);
+ pgmap = page_pgmap(page);
if (!(migrate->flags &
MIGRATE_VMA_SELECT_DEVICE_PRIVATE) ||
- page->pgmap->owner != migrate->pgmap_owner)
+ pgmap->owner != migrate->pgmap_owner)
goto next;
mpfn = migrate_pfn(page_to_pfn(page)) |
@@ -159,12 +161,16 @@ again:
}
page = vm_normal_page(migrate->vma, addr, pte);
if (page && !is_zone_device_page(page) &&
- !(migrate->flags & MIGRATE_VMA_SELECT_SYSTEM))
- goto next;
- else if (page && is_device_coherent_page(page) &&
- (!(migrate->flags & MIGRATE_VMA_SELECT_DEVICE_COHERENT) ||
- page->pgmap->owner != migrate->pgmap_owner))
+ !(migrate->flags & MIGRATE_VMA_SELECT_SYSTEM)) {
goto next;
+ } else if (page && is_device_coherent_page(page)) {
+ pgmap = page_pgmap(page);
+
+ if (!(migrate->flags &
+ MIGRATE_VMA_SELECT_DEVICE_COHERENT) ||
+ pgmap->owner != migrate->pgmap_owner)
+ goto next;
+ }
mpfn = migrate_pfn(pfn) | MIGRATE_PFN_MIGRATE;
mpfn |= pte_write(pte) ? MIGRATE_PFN_WRITE : 0;
}
diff --git a/mm/mincore.c b/mm/mincore.c
index d6bd19e520fc..832f29f46767 100644
--- a/mm/mincore.c
+++ b/mm/mincore.c
@@ -239,7 +239,7 @@ SYSCALL_DEFINE3(mincore, unsigned long, start, size_t, len,
start = untagged_addr(start);
/* Check the start address: needs to be page-aligned.. */
- if (start & ~PAGE_MASK)
+ if (unlikely(start & ~PAGE_MASK))
return -EINVAL;
/* ..and we need to be passed a valid user-space range */
diff --git a/mm/mlock.c b/mm/mlock.c
index cde076fa7d5e..3cb72b579ffd 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -368,6 +368,8 @@ static int mlock_pte_range(pmd_t *pmd, unsigned long addr,
if (is_huge_zero_pmd(*pmd))
goto out;
folio = pmd_folio(*pmd);
+ if (folio_is_zone_device(folio))
+ goto out;
if (vma->vm_flags & VM_LOCKED)
mlock_folio(folio);
else
diff --git a/mm/mm_init.c b/mm/mm_init.c
index 2630cc30147e..84f14fa12d0d 100644
--- a/mm/mm_init.c
+++ b/mm/mm_init.c
@@ -30,12 +30,28 @@
#include <linux/crash_dump.h>
#include <linux/execmem.h>
#include <linux/vmstat.h>
+#include <linux/hugetlb.h>
#include "internal.h"
#include "slab.h"
#include "shuffle.h"
#include <asm/setup.h>
+#ifndef CONFIG_NUMA
+unsigned long max_mapnr;
+EXPORT_SYMBOL(max_mapnr);
+
+struct page *mem_map;
+EXPORT_SYMBOL(mem_map);
+#endif
+
+/*
+ * high_memory defines the upper bound on direct map memory, then end
+ * of ZONE_NORMAL.
+ */
+void *high_memory;
+EXPORT_SYMBOL(high_memory);
+
#ifdef CONFIG_DEBUG_MEMORY_INIT
int __meminitdata mminit_loglevel;
@@ -438,7 +454,7 @@ static void __init find_zone_movable_pfns_for_nodes(void)
* was requested by the user
*/
required_movablecore =
- roundup(required_movablecore, MAX_ORDER_NR_PAGES);
+ round_up(required_movablecore, MAX_ORDER_NR_PAGES);
required_movablecore = min(totalpages, required_movablecore);
corepages = totalpages - required_movablecore;
@@ -545,11 +561,11 @@ restart:
out2:
/* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */
- for (nid = 0; nid < MAX_NUMNODES; nid++) {
+ for_each_node_state(nid, N_MEMORY) {
unsigned long start_pfn, end_pfn;
zone_movable_pfn[nid] =
- roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES);
+ round_up(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES);
get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
if (zone_movable_pfn[nid] >= end_pfn)
@@ -649,6 +665,28 @@ static inline void fixup_hashdist(void)
static inline void fixup_hashdist(void) {}
#endif /* CONFIG_NUMA */
+/*
+ * Initialize a reserved page unconditionally, finding its zone first.
+ */
+void __meminit __init_page_from_nid(unsigned long pfn, int nid)
+{
+ pg_data_t *pgdat;
+ int zid;
+
+ pgdat = NODE_DATA(nid);
+
+ for (zid = 0; zid < MAX_NR_ZONES; zid++) {
+ struct zone *zone = &pgdat->node_zones[zid];
+
+ if (zone_spans_pfn(zone, pfn))
+ break;
+ }
+ __init_single_page(pfn_to_page(pfn), pfn, zid, nid);
+
+ if (pageblock_aligned(pfn))
+ set_pageblock_migratetype(pfn_to_page(pfn), MIGRATE_MOVABLE);
+}
+
#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
static inline void pgdat_set_deferred_range(pg_data_t *pgdat)
{
@@ -705,26 +743,12 @@ defer_init(int nid, unsigned long pfn, unsigned long end_pfn)
return false;
}
-static void __meminit init_reserved_page(unsigned long pfn, int nid)
+static void __meminit init_deferred_page(unsigned long pfn, int nid)
{
- pg_data_t *pgdat;
- int zid;
-
if (early_page_initialised(pfn, nid))
return;
- pgdat = NODE_DATA(nid);
-
- for (zid = 0; zid < MAX_NR_ZONES; zid++) {
- struct zone *zone = &pgdat->node_zones[zid];
-
- if (zone_spans_pfn(zone, pfn))
- break;
- }
- __init_single_page(pfn_to_page(pfn), pfn, zid, nid);
-
- if (pageblock_aligned(pfn))
- set_pageblock_migratetype(pfn_to_page(pfn), MIGRATE_MOVABLE);
+ __init_page_from_nid(pfn, nid);
}
#else
static inline void pgdat_set_deferred_range(pg_data_t *pgdat) {}
@@ -739,7 +763,7 @@ static inline bool defer_init(int nid, unsigned long pfn, unsigned long end_pfn)
return false;
}
-static inline void init_reserved_page(unsigned long pfn, int nid)
+static inline void init_deferred_page(unsigned long pfn, int nid)
{
}
#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
@@ -760,7 +784,7 @@ void __meminit reserve_bootmem_region(phys_addr_t start,
if (pfn_valid(start_pfn)) {
struct page *page = pfn_to_page(start_pfn);
- init_reserved_page(start_pfn, nid);
+ init_deferred_page(start_pfn, nid);
/*
* no need for atomic set_bit because the struct
@@ -960,19 +984,19 @@ static void __init memmap_init(void)
}
}
-#ifdef CONFIG_SPARSEMEM
/*
* Initialize the memory map for hole in the range [memory_end,
- * section_end].
+ * section_end] for SPARSEMEM and in the range [memory_end, memmap_end]
+ * for FLATMEM.
* Append the pages in this hole to the highest zone in the last
* node.
- * The call to init_unavailable_range() is outside the ifdef to
- * silence the compiler warining about zone_id set but not used;
- * for FLATMEM it is a nop anyway
*/
+#ifdef CONFIG_SPARSEMEM
end_pfn = round_up(end_pfn, PAGES_PER_SECTION);
- if (hole_pfn < end_pfn)
+#else
+ end_pfn = round_up(end_pfn, MAX_ORDER_NR_PAGES);
#endif
+ if (hole_pfn < end_pfn)
init_unavailable_range(hole_pfn, end_pfn, zone_id, nid);
}
@@ -998,7 +1022,7 @@ static void __ref __init_zone_device_page(struct page *page, unsigned long pfn,
* and zone_device_data. It is a bug if a ZONE_DEVICE page is
* ever freed or placed on a driver-private list.
*/
- page->pgmap = pgmap;
+ page_folio(page)->pgmap = pgmap;
page->zone_device_data = NULL;
/*
@@ -1017,12 +1041,25 @@ static void __ref __init_zone_device_page(struct page *page, unsigned long pfn,
}
/*
- * ZONE_DEVICE pages are released directly to the driver page allocator
- * which will set the page count to 1 when allocating the page.
+ * ZONE_DEVICE pages other than MEMORY_TYPE_GENERIC are released
+ * directly to the driver page allocator which will set the page count
+ * to 1 when allocating the page.
+ *
+ * MEMORY_TYPE_GENERIC and MEMORY_TYPE_FS_DAX pages automatically have
+ * their refcount reset to one whenever they are freed (ie. after
+ * their refcount drops to 0).
*/
- if (pgmap->type == MEMORY_DEVICE_PRIVATE ||
- pgmap->type == MEMORY_DEVICE_COHERENT)
+ switch (pgmap->type) {
+ case MEMORY_DEVICE_FS_DAX:
+ case MEMORY_DEVICE_PRIVATE:
+ case MEMORY_DEVICE_COHERENT:
+ case MEMORY_DEVICE_PCI_P2PDMA:
set_page_count(page, 0);
+ break;
+
+ case MEMORY_DEVICE_GENERIC:
+ break;
+ }
}
/*
@@ -1431,7 +1468,7 @@ void __meminit init_currently_empty_zone(struct zone *zone,
#ifndef CONFIG_SPARSEMEM
/*
- * Calculate the size of the zone->blockflags rounded to an unsigned long
+ * Calculate the size of the zone->pageblock_flags rounded to an unsigned long
* Start by making sure zonesize is a multiple of pageblock_order by rounding
* up. Then use 1 NR_PAGEBLOCK_BITS worth of bits per pageblock, finally
* round what is now in bits to nearest long in bits, then return it in
@@ -1442,10 +1479,10 @@ static unsigned long __init usemap_size(unsigned long zone_start_pfn, unsigned l
unsigned long usemapsize;
zonesize += zone_start_pfn & (pageblock_nr_pages-1);
- usemapsize = roundup(zonesize, pageblock_nr_pages);
+ usemapsize = round_up(zonesize, pageblock_nr_pages);
usemapsize = usemapsize >> pageblock_order;
usemapsize *= NR_PAGEBLOCK_BITS;
- usemapsize = roundup(usemapsize, BITS_PER_LONG);
+ usemapsize = round_up(usemapsize, BITS_PER_LONG);
return usemapsize / BITS_PER_BYTE;
}
@@ -1617,7 +1654,7 @@ static void __init alloc_node_mem_map(struct pglist_data *pgdat)
start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1);
offset = pgdat->node_start_pfn - start;
/*
- * The zone's endpoints aren't required to be MAX_PAGE_ORDER
+ * The zone's endpoints aren't required to be MAX_PAGE_ORDER
* aligned but the node_mem_map endpoints must be in order
* for the buddy allocator to function correctly.
*/
@@ -1633,14 +1670,15 @@ static void __init alloc_node_mem_map(struct pglist_data *pgdat)
pr_debug("%s: node %d, pgdat %08lx, node_mem_map %08lx\n",
__func__, pgdat->node_id, (unsigned long)pgdat,
(unsigned long)pgdat->node_mem_map);
-#ifndef CONFIG_NUMA
+
/* the global mem_map is just set as node 0's */
- if (pgdat == NODE_DATA(0)) {
- mem_map = NODE_DATA(0)->node_mem_map;
- if (page_to_pfn(mem_map) != pgdat->node_start_pfn)
- mem_map -= offset;
- }
-#endif
+ WARN_ON(pgdat != NODE_DATA(0));
+
+ mem_map = pgdat->node_mem_map;
+ if (page_to_pfn(mem_map) != pgdat->node_start_pfn)
+ mem_map -= offset;
+
+ max_mapnr = end - start;
}
#else
static inline void alloc_node_mem_map(struct pglist_data *pgdat) { }
@@ -1747,6 +1785,27 @@ static bool arch_has_descending_max_zone_pfns(void)
return IS_ENABLED(CONFIG_ARC) && !IS_ENABLED(CONFIG_ARC_HAS_PAE40);
}
+static void set_high_memory(void)
+{
+ phys_addr_t highmem = memblock_end_of_DRAM();
+
+ /*
+ * Some architectures (e.g. ARM) set high_memory very early and
+ * use it in arch setup code.
+ * If an architecture already set high_memory don't overwrite it
+ */
+ if (high_memory)
+ return;
+
+#ifdef CONFIG_HIGHMEM
+ if (arch_has_descending_max_zone_pfns() ||
+ highmem > PFN_PHYS(arch_zone_lowest_possible_pfn[ZONE_HIGHMEM]))
+ highmem = PFN_PHYS(arch_zone_lowest_possible_pfn[ZONE_HIGHMEM]);
+#endif
+
+ high_memory = phys_to_virt(highmem - 1) + 1;
+}
+
/**
* free_area_init - Initialise all pg_data_t and zone data
* @max_zone_pfn: an array of max PFNs for each zone
@@ -1861,11 +1920,16 @@ void __init free_area_init(unsigned long *max_zone_pfn)
}
}
+ for_each_node_state(nid, N_MEMORY)
+ sparse_vmemmap_init_nid_late(nid);
+
calc_nr_kernel_pages();
memmap_init();
/* disable hash distribution for systems with a single node */
fixup_hashdist();
+
+ set_high_memory();
}
/**
@@ -2251,6 +2315,15 @@ void __init init_cma_reserved_pageblock(struct page *page)
adjust_managed_page_count(page, pageblock_nr_pages);
page_zone(page)->cma_pages += pageblock_nr_pages;
}
+/*
+ * Similar to above, but only set the migrate type and stats.
+ */
+void __init init_cma_pageblock(struct page *page)
+{
+ set_pageblock_migratetype(page, MIGRATE_CMA);
+ adjust_managed_page_count(page, pageblock_nr_pages);
+ page_zone(page)->cma_pages += pageblock_nr_pages;
+}
#endif
void set_zone_contiguous(struct zone *zone)
@@ -2275,6 +2348,31 @@ void set_zone_contiguous(struct zone *zone)
zone->contiguous = true;
}
+/*
+ * Check if a PFN range intersects multiple zones on one or more
+ * NUMA nodes. Specify the @nid argument if it is known that this
+ * PFN range is on one node, NUMA_NO_NODE otherwise.
+ */
+bool pfn_range_intersects_zones(int nid, unsigned long start_pfn,
+ unsigned long nr_pages)
+{
+ struct zone *zone, *izone = NULL;
+
+ for_each_zone(zone) {
+ if (nid != NUMA_NO_NODE && zone_to_nid(zone) != nid)
+ continue;
+
+ if (zone_intersects(zone, start_pfn, nr_pages)) {
+ if (izone != NULL)
+ return true;
+ izone = zone;
+ }
+
+ }
+
+ return false;
+}
+
static void __init mem_init_print_info(void);
void __init page_alloc_init_late(void)
{
@@ -2636,11 +2734,22 @@ static void __init mem_init_print_info(void)
);
}
+void __init __weak arch_mm_preinit(void)
+{
+}
+
+void __init __weak mem_init(void)
+{
+}
+
/*
* Set up kernel memory allocators
*/
void __init mm_core_init(void)
{
+ arch_mm_preinit();
+ hugetlb_bootmem_alloc();
+
/* Initializations relying on SMP setup */
BUILD_BUG_ON(MAX_ZONELISTS > 2);
build_all_zonelists(NULL);
@@ -2656,6 +2765,7 @@ void __init mm_core_init(void)
report_meminit();
kmsan_init_shadow();
stack_depot_early_init();
+ memblock_free_all();
mem_init();
kmem_cache_init();
/*
diff --git a/mm/mmap.c b/mm/mmap.c
index d6bbe435bd99..bd210aaf7ebd 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1305,7 +1305,8 @@ void exit_mmap(struct mm_struct *mm)
do {
if (vma->vm_flags & VM_ACCOUNT)
nr_accounted += vma_pages(vma);
- remove_vma(vma, /* unreachable = */ true);
+ vma_mark_detached(vma);
+ remove_vma(vma);
count++;
cond_resched();
vma = vma_next(&vmi);
@@ -1747,6 +1748,7 @@ int relocate_vma_down(struct vm_area_struct *vma, unsigned long shift)
VMG_STATE(vmg, mm, &vmi, new_start, old_end, 0, vma->vm_pgoff);
struct vm_area_struct *next;
struct mmu_gather tlb;
+ PAGETABLE_MOVE(pmc, vma, vma, old_start, new_start, length);
BUG_ON(new_start > new_end);
@@ -1761,7 +1763,7 @@ int relocate_vma_down(struct vm_area_struct *vma, unsigned long shift)
/*
* cover the whole range: [new_start, old_end)
*/
- vmg.vma = vma;
+ vmg.middle = vma;
if (vma_expand(&vmg))
return -ENOMEM;
@@ -1769,8 +1771,8 @@ int relocate_vma_down(struct vm_area_struct *vma, unsigned long shift)
* move the page tables downwards, on failure we rely on
* process cleanup to remove whatever mess we made.
*/
- if (length != move_page_tables(vma, old_start,
- vma, new_start, length, false, true))
+ pmc.for_stack = true;
+ if (length != move_page_tables(&pmc))
return -ENOMEM;
tlb_gather_mmu(&tlb, mm);
diff --git a/mm/mmu_gather.c b/mm/mmu_gather.c
index 7aa6f18c500b..db7ba4a725d6 100644
--- a/mm/mmu_gather.c
+++ b/mm/mmu_gather.c
@@ -246,8 +246,16 @@ static void __tlb_remove_table_free(struct mmu_table_batch *batch)
* IRQs delays the completion of the TLB flush we can never observe an already
* freed page.
*
- * Architectures that do not have this (PPC) need to delay the freeing by some
- * other means, this is that means.
+ * Not all systems IPI every CPU for this purpose:
+ *
+ * - Some architectures have HW support for cross-CPU synchronisation of TLB
+ * flushes, so there's no IPI at all.
+ *
+ * - Paravirt guests can do this TLB flushing in the hypervisor, or coordinate
+ * with the hypervisor to defer flushing on preempted vCPUs.
+ *
+ * Such systems need to delay the freeing by some other means, this is that
+ * means.
*
* What we do is batch the freed directory pages (tables) and RCU free them.
* We use the sched RCU variant, as that guarantees that IRQ/preempt disabling
diff --git a/mm/mprotect.c b/mm/mprotect.c
index 516b1d847e2c..62c1f7945741 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -133,7 +133,7 @@ static long change_pte_range(struct mmu_gather *tlb,
/* Also skip shared copy-on-write pages */
if (is_cow_mapping(vma->vm_flags) &&
(folio_maybe_dma_pinned(folio) ||
- folio_likely_mapped_shared(folio)))
+ folio_maybe_mapped_shared(folio)))
continue;
/*
@@ -225,14 +225,6 @@ static long change_pte_range(struct mmu_gather *tlb,
newpte = swp_entry_to_pte(entry);
if (pte_swp_uffd_wp(oldpte))
newpte = pte_swp_mkuffd_wp(newpte);
- } else if (is_writable_device_exclusive_entry(entry)) {
- entry = make_readable_device_exclusive_entry(
- swp_offset(entry));
- newpte = swp_entry_to_pte(entry);
- if (pte_swp_soft_dirty(oldpte))
- newpte = pte_swp_mksoft_dirty(newpte);
- if (pte_swp_uffd_wp(oldpte))
- newpte = pte_swp_mkuffd_wp(newpte);
} else if (is_pte_marker_entry(entry)) {
/*
* Ignore error swap entries unconditionally,
@@ -607,7 +599,7 @@ mprotect_fixup(struct vma_iterator *vmi, struct mmu_gather *tlb,
unsigned long start, unsigned long end, unsigned long newflags)
{
struct mm_struct *mm = vma->vm_mm;
- unsigned long oldflags = vma->vm_flags;
+ unsigned long oldflags = READ_ONCE(vma->vm_flags);
long nrpages = (end - start) >> PAGE_SHIFT;
unsigned int mm_cp_flags = 0;
unsigned long charged = 0;
@@ -627,7 +619,7 @@ mprotect_fixup(struct vma_iterator *vmi, struct mmu_gather *tlb,
* uncommon case, so doesn't need to be very optimized.
*/
if (arch_has_pfn_modify_check() &&
- (vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) &&
+ (oldflags & (VM_PFNMAP|VM_MIXEDMAP)) &&
(newflags & VM_ACCESS_FLAGS) == 0) {
pgprot_t new_pgprot = vm_get_page_prot(newflags);
@@ -676,7 +668,7 @@ mprotect_fixup(struct vma_iterator *vmi, struct mmu_gather *tlb,
* held in write mode.
*/
vma_start_write(vma);
- vm_flags_reset(vma, newflags);
+ vm_flags_reset_once(vma, newflags);
if (vma_wants_manual_pte_write_upgrade(vma))
mm_cp_flags |= MM_CP_TRY_CHANGE_WRITABLE;
vma_set_page_prot(vma);
diff --git a/mm/mremap.c b/mm/mremap.c
index cff7f552f909..7db9da609c84 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -32,6 +32,45 @@
#include "internal.h"
+/* Classify the kind of remap operation being performed. */
+enum mremap_type {
+ MREMAP_INVALID, /* Initial state. */
+ MREMAP_NO_RESIZE, /* old_len == new_len, if not moved, do nothing. */
+ MREMAP_SHRINK, /* old_len > new_len. */
+ MREMAP_EXPAND, /* old_len < new_len. */
+};
+
+/*
+ * Describes a VMA mremap() operation and is threaded throughout it.
+ *
+ * Any of the fields may be mutated by the operation, however these values will
+ * always accurately reflect the remap (for instance, we may adjust lengths and
+ * delta to account for hugetlb alignment).
+ */
+struct vma_remap_struct {
+ /* User-provided state. */
+ unsigned long addr; /* User-specified address from which we remap. */
+ unsigned long old_len; /* Length of range being remapped. */
+ unsigned long new_len; /* Desired new length of mapping. */
+ unsigned long flags; /* user-specified MREMAP_* flags. */
+ unsigned long new_addr; /* Optionally, desired new address. */
+
+ /* uffd state. */
+ struct vm_userfaultfd_ctx *uf;
+ struct list_head *uf_unmap_early;
+ struct list_head *uf_unmap;
+
+ /* VMA state, determined in do_mremap(). */
+ struct vm_area_struct *vma;
+
+ /* Internal state, determined in do_mremap(). */
+ unsigned long delta; /* Absolute delta of old_len,new_len. */
+ bool mlocked; /* Was the VMA mlock()'d? */
+ enum mremap_type remap_type; /* expand, shrink, etc. */
+ bool mmap_locked; /* Is mm currently write-locked? */
+ unsigned long charged; /* If VM_ACCOUNT, # pages to account. */
+};
+
static pud_t *get_old_pud(struct mm_struct *mm, unsigned long addr)
{
pgd_t *pgd;
@@ -69,8 +108,7 @@ static pmd_t *get_old_pmd(struct mm_struct *mm, unsigned long addr)
return pmd;
}
-static pud_t *alloc_new_pud(struct mm_struct *mm, struct vm_area_struct *vma,
- unsigned long addr)
+static pud_t *alloc_new_pud(struct mm_struct *mm, unsigned long addr)
{
pgd_t *pgd;
p4d_t *p4d;
@@ -83,13 +121,12 @@ static pud_t *alloc_new_pud(struct mm_struct *mm, struct vm_area_struct *vma,
return pud_alloc(mm, p4d, addr);
}
-static pmd_t *alloc_new_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
- unsigned long addr)
+static pmd_t *alloc_new_pmd(struct mm_struct *mm, unsigned long addr)
{
pud_t *pud;
pmd_t *pmd;
- pud = alloc_new_pud(mm, vma, addr);
+ pud = alloc_new_pud(mm, addr);
if (!pud)
return NULL;
@@ -133,17 +170,19 @@ static pte_t move_soft_dirty_pte(pte_t pte)
return pte;
}
-static int move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
- unsigned long old_addr, unsigned long old_end,
- struct vm_area_struct *new_vma, pmd_t *new_pmd,
- unsigned long new_addr, bool need_rmap_locks)
+static int move_ptes(struct pagetable_move_control *pmc,
+ unsigned long extent, pmd_t *old_pmd, pmd_t *new_pmd)
{
+ struct vm_area_struct *vma = pmc->old;
bool need_clear_uffd_wp = vma_has_uffd_without_event_remap(vma);
struct mm_struct *mm = vma->vm_mm;
pte_t *old_pte, *new_pte, pte;
pmd_t dummy_pmdval;
spinlock_t *old_ptl, *new_ptl;
bool force_flush = false;
+ unsigned long old_addr = pmc->old_addr;
+ unsigned long new_addr = pmc->new_addr;
+ unsigned long old_end = old_addr + extent;
unsigned long len = old_end - old_addr;
int err = 0;
@@ -165,7 +204,7 @@ static int move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
* serialize access to individual ptes, but only rmap traversal
* order guarantees that we won't miss both the old and new ptes).
*/
- if (need_rmap_locks)
+ if (pmc->need_rmap_locks)
take_rmap_locks(vma);
/*
@@ -239,7 +278,7 @@ static int move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
pte_unmap(new_pte - 1);
pte_unmap_unlock(old_pte - 1, old_ptl);
out:
- if (need_rmap_locks)
+ if (pmc->need_rmap_locks)
drop_rmap_locks(vma);
return err;
}
@@ -254,10 +293,11 @@ static inline bool arch_supports_page_table_move(void)
#endif
#ifdef CONFIG_HAVE_MOVE_PMD
-static bool move_normal_pmd(struct vm_area_struct *vma, unsigned long old_addr,
- unsigned long new_addr, pmd_t *old_pmd, pmd_t *new_pmd)
+static bool move_normal_pmd(struct pagetable_move_control *pmc,
+ pmd_t *old_pmd, pmd_t *new_pmd)
{
spinlock_t *old_ptl, *new_ptl;
+ struct vm_area_struct *vma = pmc->old;
struct mm_struct *mm = vma->vm_mm;
bool res = false;
pmd_t pmd;
@@ -303,7 +343,7 @@ static bool move_normal_pmd(struct vm_area_struct *vma, unsigned long old_addr,
* We don't have to worry about the ordering of src and dst
* ptlocks because exclusive mmap_lock prevents deadlock.
*/
- old_ptl = pmd_lock(vma->vm_mm, old_pmd);
+ old_ptl = pmd_lock(mm, old_pmd);
new_ptl = pmd_lockptr(mm, new_pmd);
if (new_ptl != old_ptl)
spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
@@ -320,7 +360,7 @@ static bool move_normal_pmd(struct vm_area_struct *vma, unsigned long old_addr,
VM_BUG_ON(!pmd_none(*new_pmd));
pmd_populate(mm, new_pmd, pmd_pgtable(pmd));
- flush_tlb_range(vma, old_addr, old_addr + PMD_SIZE);
+ flush_tlb_range(vma, pmc->old_addr, pmc->old_addr + PMD_SIZE);
out_unlock:
if (new_ptl != old_ptl)
spin_unlock(new_ptl);
@@ -329,19 +369,19 @@ out_unlock:
return res;
}
#else
-static inline bool move_normal_pmd(struct vm_area_struct *vma,
- unsigned long old_addr, unsigned long new_addr, pmd_t *old_pmd,
- pmd_t *new_pmd)
+static inline bool move_normal_pmd(struct pagetable_move_control *pmc,
+ pmd_t *old_pmd, pmd_t *new_pmd)
{
return false;
}
#endif
#if CONFIG_PGTABLE_LEVELS > 2 && defined(CONFIG_HAVE_MOVE_PUD)
-static bool move_normal_pud(struct vm_area_struct *vma, unsigned long old_addr,
- unsigned long new_addr, pud_t *old_pud, pud_t *new_pud)
+static bool move_normal_pud(struct pagetable_move_control *pmc,
+ pud_t *old_pud, pud_t *new_pud)
{
spinlock_t *old_ptl, *new_ptl;
+ struct vm_area_struct *vma = pmc->old;
struct mm_struct *mm = vma->vm_mm;
pud_t pud;
@@ -367,7 +407,7 @@ static bool move_normal_pud(struct vm_area_struct *vma, unsigned long old_addr,
* We don't have to worry about the ordering of src and dst
* ptlocks because exclusive mmap_lock prevents deadlock.
*/
- old_ptl = pud_lock(vma->vm_mm, old_pud);
+ old_ptl = pud_lock(mm, old_pud);
new_ptl = pud_lockptr(mm, new_pud);
if (new_ptl != old_ptl)
spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
@@ -379,7 +419,7 @@ static bool move_normal_pud(struct vm_area_struct *vma, unsigned long old_addr,
VM_BUG_ON(!pud_none(*new_pud));
pud_populate(mm, new_pud, pud_pgtable(pud));
- flush_tlb_range(vma, old_addr, old_addr + PUD_SIZE);
+ flush_tlb_range(vma, pmc->old_addr, pmc->old_addr + PUD_SIZE);
if (new_ptl != old_ptl)
spin_unlock(new_ptl);
spin_unlock(old_ptl);
@@ -387,19 +427,19 @@ static bool move_normal_pud(struct vm_area_struct *vma, unsigned long old_addr,
return true;
}
#else
-static inline bool move_normal_pud(struct vm_area_struct *vma,
- unsigned long old_addr, unsigned long new_addr, pud_t *old_pud,
- pud_t *new_pud)
+static inline bool move_normal_pud(struct pagetable_move_control *pmc,
+ pud_t *old_pud, pud_t *new_pud)
{
return false;
}
#endif
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
-static bool move_huge_pud(struct vm_area_struct *vma, unsigned long old_addr,
- unsigned long new_addr, pud_t *old_pud, pud_t *new_pud)
+static bool move_huge_pud(struct pagetable_move_control *pmc,
+ pud_t *old_pud, pud_t *new_pud)
{
spinlock_t *old_ptl, *new_ptl;
+ struct vm_area_struct *vma = pmc->old;
struct mm_struct *mm = vma->vm_mm;
pud_t pud;
@@ -414,7 +454,7 @@ static bool move_huge_pud(struct vm_area_struct *vma, unsigned long old_addr,
* We don't have to worry about the ordering of src and dst
* ptlocks because exclusive mmap_lock prevents deadlock.
*/
- old_ptl = pud_lock(vma->vm_mm, old_pud);
+ old_ptl = pud_lock(mm, old_pud);
new_ptl = pud_lockptr(mm, new_pud);
if (new_ptl != old_ptl)
spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
@@ -427,8 +467,8 @@ static bool move_huge_pud(struct vm_area_struct *vma, unsigned long old_addr,
/* Set the new pud */
/* mark soft_ditry when we add pud level soft dirty support */
- set_pud_at(mm, new_addr, new_pud, pud);
- flush_pud_tlb_range(vma, old_addr, old_addr + HPAGE_PUD_SIZE);
+ set_pud_at(mm, pmc->new_addr, new_pud, pud);
+ flush_pud_tlb_range(vma, pmc->old_addr, pmc->old_addr + HPAGE_PUD_SIZE);
if (new_ptl != old_ptl)
spin_unlock(new_ptl);
spin_unlock(old_ptl);
@@ -436,8 +476,9 @@ static bool move_huge_pud(struct vm_area_struct *vma, unsigned long old_addr,
return true;
}
#else
-static bool move_huge_pud(struct vm_area_struct *vma, unsigned long old_addr,
- unsigned long new_addr, pud_t *old_pud, pud_t *new_pud)
+static bool move_huge_pud(struct pagetable_move_control *pmc,
+ pud_t *old_pud, pud_t *new_pud)
+
{
WARN_ON_ONCE(1);
return false;
@@ -458,10 +499,12 @@ enum pgt_entry {
* destination pgt_entry.
*/
static __always_inline unsigned long get_extent(enum pgt_entry entry,
- unsigned long old_addr, unsigned long old_end,
- unsigned long new_addr)
+ struct pagetable_move_control *pmc)
{
unsigned long next, extent, mask, size;
+ unsigned long old_addr = pmc->old_addr;
+ unsigned long old_end = pmc->old_end;
+ unsigned long new_addr = pmc->new_addr;
switch (entry) {
case HPAGE_PMD:
@@ -491,37 +534,50 @@ static __always_inline unsigned long get_extent(enum pgt_entry entry,
}
/*
+ * Should move_pgt_entry() acquire the rmap locks? This is either expressed in
+ * the PMC, or overridden in the case of normal, larger page tables.
+ */
+static bool should_take_rmap_locks(struct pagetable_move_control *pmc,
+ enum pgt_entry entry)
+{
+ switch (entry) {
+ case NORMAL_PMD:
+ case NORMAL_PUD:
+ return true;
+ default:
+ return pmc->need_rmap_locks;
+ }
+}
+
+/*
* Attempts to speedup the move by moving entry at the level corresponding to
* pgt_entry. Returns true if the move was successful, else false.
*/
-static bool move_pgt_entry(enum pgt_entry entry, struct vm_area_struct *vma,
- unsigned long old_addr, unsigned long new_addr,
- void *old_entry, void *new_entry, bool need_rmap_locks)
+static bool move_pgt_entry(struct pagetable_move_control *pmc,
+ enum pgt_entry entry, void *old_entry, void *new_entry)
{
bool moved = false;
+ bool need_rmap_locks = should_take_rmap_locks(pmc, entry);
/* See comment in move_ptes() */
if (need_rmap_locks)
- take_rmap_locks(vma);
+ take_rmap_locks(pmc->old);
switch (entry) {
case NORMAL_PMD:
- moved = move_normal_pmd(vma, old_addr, new_addr, old_entry,
- new_entry);
+ moved = move_normal_pmd(pmc, old_entry, new_entry);
break;
case NORMAL_PUD:
- moved = move_normal_pud(vma, old_addr, new_addr, old_entry,
- new_entry);
+ moved = move_normal_pud(pmc, old_entry, new_entry);
break;
case HPAGE_PMD:
moved = IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
- move_huge_pmd(vma, old_addr, new_addr, old_entry,
+ move_huge_pmd(pmc->old, pmc->old_addr, pmc->new_addr, old_entry,
new_entry);
break;
case HPAGE_PUD:
moved = IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
- move_huge_pud(vma, old_addr, new_addr, old_entry,
- new_entry);
+ move_huge_pud(pmc, old_entry, new_entry);
break;
default:
@@ -530,7 +586,7 @@ static bool move_pgt_entry(enum pgt_entry entry, struct vm_area_struct *vma,
}
if (need_rmap_locks)
- drop_rmap_locks(vma);
+ drop_rmap_locks(pmc->old);
return moved;
}
@@ -541,8 +597,9 @@ static bool move_pgt_entry(enum pgt_entry entry, struct vm_area_struct *vma,
* the VMA that is created to span the source and destination of the move,
* so we make an exception for it.
*/
-static bool can_align_down(struct vm_area_struct *vma, unsigned long addr_to_align,
- unsigned long mask, bool for_stack)
+static bool can_align_down(struct pagetable_move_control *pmc,
+ struct vm_area_struct *vma, unsigned long addr_to_align,
+ unsigned long mask)
{
unsigned long addr_masked = addr_to_align & mask;
@@ -551,11 +608,11 @@ static bool can_align_down(struct vm_area_struct *vma, unsigned long addr_to_ali
* of the corresponding VMA, we can't align down or we will destroy part
* of the current mapping.
*/
- if (!for_stack && vma->vm_start != addr_to_align)
+ if (!pmc->for_stack && vma->vm_start != addr_to_align)
return false;
/* In the stack case we explicitly permit in-VMA alignment. */
- if (for_stack && addr_masked >= vma->vm_start)
+ if (pmc->for_stack && addr_masked >= vma->vm_start)
return true;
/*
@@ -565,163 +622,390 @@ static bool can_align_down(struct vm_area_struct *vma, unsigned long addr_to_ali
return find_vma_intersection(vma->vm_mm, addr_masked, vma->vm_start) == NULL;
}
-/* Opportunistically realign to specified boundary for faster copy. */
-static void try_realign_addr(unsigned long *old_addr, struct vm_area_struct *old_vma,
- unsigned long *new_addr, struct vm_area_struct *new_vma,
- unsigned long mask, bool for_stack)
+/*
+ * Determine if are in fact able to realign for efficiency to a higher page
+ * table boundary.
+ */
+static bool can_realign_addr(struct pagetable_move_control *pmc,
+ unsigned long pagetable_mask)
{
+ unsigned long align_mask = ~pagetable_mask;
+ unsigned long old_align = pmc->old_addr & align_mask;
+ unsigned long new_align = pmc->new_addr & align_mask;
+ unsigned long pagetable_size = align_mask + 1;
+ unsigned long old_align_next = pagetable_size - old_align;
+
+ /*
+ * We don't want to have to go hunting for VMAs from the end of the old
+ * VMA to the next page table boundary, also we want to make sure the
+ * operation is wortwhile.
+ *
+ * So ensure that we only perform this realignment if the end of the
+ * range being copied reaches or crosses the page table boundary.
+ *
+ * boundary boundary
+ * .<- old_align -> .
+ * . |----------------.-----------|
+ * . | vma . |
+ * . |----------------.-----------|
+ * . <----------------.----------->
+ * . len_in
+ * <------------------------------->
+ * . pagetable_size .
+ * . <---------------->
+ * . old_align_next .
+ */
+ if (pmc->len_in < old_align_next)
+ return false;
+
/* Skip if the addresses are already aligned. */
- if ((*old_addr & ~mask) == 0)
- return;
+ if (old_align == 0)
+ return false;
/* Only realign if the new and old addresses are mutually aligned. */
- if ((*old_addr & ~mask) != (*new_addr & ~mask))
- return;
+ if (old_align != new_align)
+ return false;
/* Ensure realignment doesn't cause overlap with existing mappings. */
- if (!can_align_down(old_vma, *old_addr, mask, for_stack) ||
- !can_align_down(new_vma, *new_addr, mask, for_stack))
+ if (!can_align_down(pmc, pmc->old, pmc->old_addr, pagetable_mask) ||
+ !can_align_down(pmc, pmc->new, pmc->new_addr, pagetable_mask))
+ return false;
+
+ return true;
+}
+
+/*
+ * Opportunistically realign to specified boundary for faster copy.
+ *
+ * Consider an mremap() of a VMA with page table boundaries as below, and no
+ * preceding VMAs from the lower page table boundary to the start of the VMA,
+ * with the end of the range reaching or crossing the page table boundary.
+ *
+ * boundary boundary
+ * . |----------------.-----------|
+ * . | vma . |
+ * . |----------------.-----------|
+ * . pmc->old_addr . pmc->old_end
+ * . <---------------------------->
+ * . move these page tables
+ *
+ * If we proceed with moving page tables in this scenario, we will have a lot of
+ * work to do traversing old page tables and establishing new ones in the
+ * destination across multiple lower level page tables.
+ *
+ * The idea here is simply to align pmc->old_addr, pmc->new_addr down to the
+ * page table boundary, so we can simply copy a single page table entry for the
+ * aligned portion of the VMA instead:
+ *
+ * boundary boundary
+ * . |----------------.-----------|
+ * . | vma . |
+ * . |----------------.-----------|
+ * pmc->old_addr . pmc->old_end
+ * <------------------------------------------->
+ * . move these page tables
+ */
+static void try_realign_addr(struct pagetable_move_control *pmc,
+ unsigned long pagetable_mask)
+{
+
+ if (!can_realign_addr(pmc, pagetable_mask))
return;
- *old_addr = *old_addr & mask;
- *new_addr = *new_addr & mask;
+ /*
+ * Simply align to page table boundaries. Note that we do NOT update the
+ * pmc->old_end value, and since the move_page_tables() operation spans
+ * from [old_addr, old_end) (offsetting new_addr as it is performed),
+ * this simply changes the start of the copy, not the end.
+ */
+ pmc->old_addr &= pagetable_mask;
+ pmc->new_addr &= pagetable_mask;
+}
+
+/* Is the page table move operation done? */
+static bool pmc_done(struct pagetable_move_control *pmc)
+{
+ return pmc->old_addr >= pmc->old_end;
+}
+
+/* Advance to the next page table, offset by extent bytes. */
+static void pmc_next(struct pagetable_move_control *pmc, unsigned long extent)
+{
+ pmc->old_addr += extent;
+ pmc->new_addr += extent;
}
-unsigned long move_page_tables(struct vm_area_struct *vma,
- unsigned long old_addr, struct vm_area_struct *new_vma,
- unsigned long new_addr, unsigned long len,
- bool need_rmap_locks, bool for_stack)
+/*
+ * Determine how many bytes in the specified input range have had their page
+ * tables moved so far.
+ */
+static unsigned long pmc_progress(struct pagetable_move_control *pmc)
{
- unsigned long extent, old_end;
+ unsigned long orig_old_addr = pmc->old_end - pmc->len_in;
+ unsigned long old_addr = pmc->old_addr;
+
+ /*
+ * Prevent negative return values when {old,new}_addr was realigned but
+ * we broke out of the loop in move_page_tables() for the first PMD
+ * itself.
+ */
+ return old_addr < orig_old_addr ? 0 : old_addr - orig_old_addr;
+}
+
+unsigned long move_page_tables(struct pagetable_move_control *pmc)
+{
+ unsigned long extent;
struct mmu_notifier_range range;
pmd_t *old_pmd, *new_pmd;
pud_t *old_pud, *new_pud;
+ struct mm_struct *mm = pmc->old->vm_mm;
- if (!len)
+ if (!pmc->len_in)
return 0;
- old_end = old_addr + len;
-
- if (is_vm_hugetlb_page(vma))
- return move_hugetlb_page_tables(vma, new_vma, old_addr,
- new_addr, len);
+ if (is_vm_hugetlb_page(pmc->old))
+ return move_hugetlb_page_tables(pmc->old, pmc->new, pmc->old_addr,
+ pmc->new_addr, pmc->len_in);
/*
* If possible, realign addresses to PMD boundary for faster copy.
* Only realign if the mremap copying hits a PMD boundary.
*/
- if (len >= PMD_SIZE - (old_addr & ~PMD_MASK))
- try_realign_addr(&old_addr, vma, &new_addr, new_vma, PMD_MASK,
- for_stack);
+ try_realign_addr(pmc, PMD_MASK);
- flush_cache_range(vma, old_addr, old_end);
- mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma->vm_mm,
- old_addr, old_end);
+ flush_cache_range(pmc->old, pmc->old_addr, pmc->old_end);
+ mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, mm,
+ pmc->old_addr, pmc->old_end);
mmu_notifier_invalidate_range_start(&range);
- for (; old_addr < old_end; old_addr += extent, new_addr += extent) {
+ for (; !pmc_done(pmc); pmc_next(pmc, extent)) {
cond_resched();
/*
* If extent is PUD-sized try to speed up the move by moving at the
* PUD level if possible.
*/
- extent = get_extent(NORMAL_PUD, old_addr, old_end, new_addr);
+ extent = get_extent(NORMAL_PUD, pmc);
- old_pud = get_old_pud(vma->vm_mm, old_addr);
+ old_pud = get_old_pud(mm, pmc->old_addr);
if (!old_pud)
continue;
- new_pud = alloc_new_pud(vma->vm_mm, vma, new_addr);
+ new_pud = alloc_new_pud(mm, pmc->new_addr);
if (!new_pud)
break;
if (pud_trans_huge(*old_pud) || pud_devmap(*old_pud)) {
if (extent == HPAGE_PUD_SIZE) {
- move_pgt_entry(HPAGE_PUD, vma, old_addr, new_addr,
- old_pud, new_pud, need_rmap_locks);
+ move_pgt_entry(pmc, HPAGE_PUD, old_pud, new_pud);
/* We ignore and continue on error? */
continue;
}
} else if (IS_ENABLED(CONFIG_HAVE_MOVE_PUD) && extent == PUD_SIZE) {
-
- if (move_pgt_entry(NORMAL_PUD, vma, old_addr, new_addr,
- old_pud, new_pud, true))
+ if (move_pgt_entry(pmc, NORMAL_PUD, old_pud, new_pud))
continue;
}
- extent = get_extent(NORMAL_PMD, old_addr, old_end, new_addr);
- old_pmd = get_old_pmd(vma->vm_mm, old_addr);
+ extent = get_extent(NORMAL_PMD, pmc);
+ old_pmd = get_old_pmd(mm, pmc->old_addr);
if (!old_pmd)
continue;
- new_pmd = alloc_new_pmd(vma->vm_mm, vma, new_addr);
+ new_pmd = alloc_new_pmd(mm, pmc->new_addr);
if (!new_pmd)
break;
again:
if (is_swap_pmd(*old_pmd) || pmd_trans_huge(*old_pmd) ||
pmd_devmap(*old_pmd)) {
if (extent == HPAGE_PMD_SIZE &&
- move_pgt_entry(HPAGE_PMD, vma, old_addr, new_addr,
- old_pmd, new_pmd, need_rmap_locks))
+ move_pgt_entry(pmc, HPAGE_PMD, old_pmd, new_pmd))
continue;
- split_huge_pmd(vma, old_pmd, old_addr);
+ split_huge_pmd(pmc->old, old_pmd, pmc->old_addr);
} else if (IS_ENABLED(CONFIG_HAVE_MOVE_PMD) &&
extent == PMD_SIZE) {
/*
* If the extent is PMD-sized, try to speed the move by
* moving at the PMD level if possible.
*/
- if (move_pgt_entry(NORMAL_PMD, vma, old_addr, new_addr,
- old_pmd, new_pmd, true))
+ if (move_pgt_entry(pmc, NORMAL_PMD, old_pmd, new_pmd))
continue;
}
if (pmd_none(*old_pmd))
continue;
- if (pte_alloc(new_vma->vm_mm, new_pmd))
+ if (pte_alloc(pmc->new->vm_mm, new_pmd))
break;
- if (move_ptes(vma, old_pmd, old_addr, old_addr + extent,
- new_vma, new_pmd, new_addr, need_rmap_locks) < 0)
+ if (move_ptes(pmc, extent, old_pmd, new_pmd) < 0)
goto again;
}
mmu_notifier_invalidate_range_end(&range);
+ return pmc_progress(pmc);
+}
+
+/* Set vrm->delta to the difference in VMA size specified by user. */
+static void vrm_set_delta(struct vma_remap_struct *vrm)
+{
+ vrm->delta = abs_diff(vrm->old_len, vrm->new_len);
+}
+
+/* Determine what kind of remap this is - shrink, expand or no resize at all. */
+static enum mremap_type vrm_remap_type(struct vma_remap_struct *vrm)
+{
+ if (vrm->delta == 0)
+ return MREMAP_NO_RESIZE;
+
+ if (vrm->old_len > vrm->new_len)
+ return MREMAP_SHRINK;
+
+ return MREMAP_EXPAND;
+}
+
+/*
+ * When moving a VMA to vrm->new_adr, does this result in the new and old VMAs
+ * overlapping?
+ */
+static bool vrm_overlaps(struct vma_remap_struct *vrm)
+{
+ unsigned long start_old = vrm->addr;
+ unsigned long start_new = vrm->new_addr;
+ unsigned long end_old = vrm->addr + vrm->old_len;
+ unsigned long end_new = vrm->new_addr + vrm->new_len;
+
/*
- * Prevent negative return values when {old,new}_addr was realigned
- * but we broke out of the above loop for the first PMD itself.
+ * start_old end_old
+ * |-----------|
+ * | |
+ * |-----------|
+ * |-------------|
+ * | |
+ * |-------------|
+ * start_new end_new
*/
- if (old_addr < old_end - len)
- return 0;
+ if (end_old > start_new && end_new > start_old)
+ return true;
- return len + old_addr - old_end; /* how much done */
+ return false;
}
-static unsigned long move_vma(struct vm_area_struct *vma,
- unsigned long old_addr, unsigned long old_len,
- unsigned long new_len, unsigned long new_addr,
- bool *locked, unsigned long flags,
- struct vm_userfaultfd_ctx *uf, struct list_head *uf_unmap)
+/* Do the mremap() flags require that the new_addr parameter be specified? */
+static bool vrm_implies_new_addr(struct vma_remap_struct *vrm)
{
- long to_account = new_len - old_len;
- struct mm_struct *mm = vma->vm_mm;
- struct vm_area_struct *new_vma;
- unsigned long vm_flags = vma->vm_flags;
- unsigned long new_pgoff;
- unsigned long moved_len;
- unsigned long account_start = 0;
- unsigned long account_end = 0;
- unsigned long hiwater_vm;
- int err = 0;
- bool need_rmap_locks;
- struct vma_iterator vmi;
+ return vrm->flags & (MREMAP_FIXED | MREMAP_DONTUNMAP);
+}
+
+/*
+ * Find an unmapped area for the requested vrm->new_addr.
+ *
+ * If MREMAP_FIXED then this is equivalent to a MAP_FIXED mmap() call. If only
+ * MREMAP_DONTUNMAP is set, then this is equivalent to providing a hint to
+ * mmap(), otherwise this is equivalent to mmap() specifying a NULL address.
+ *
+ * Returns 0 on success (with vrm->new_addr updated), or an error code upon
+ * failure.
+ */
+static unsigned long vrm_set_new_addr(struct vma_remap_struct *vrm)
+{
+ struct vm_area_struct *vma = vrm->vma;
+ unsigned long map_flags = 0;
+ /* Page Offset _into_ the VMA. */
+ pgoff_t internal_pgoff = (vrm->addr - vma->vm_start) >> PAGE_SHIFT;
+ pgoff_t pgoff = vma->vm_pgoff + internal_pgoff;
+ unsigned long new_addr = vrm_implies_new_addr(vrm) ? vrm->new_addr : 0;
+ unsigned long res;
+
+ if (vrm->flags & MREMAP_FIXED)
+ map_flags |= MAP_FIXED;
+ if (vma->vm_flags & VM_MAYSHARE)
+ map_flags |= MAP_SHARED;
+
+ res = get_unmapped_area(vma->vm_file, new_addr, vrm->new_len, pgoff,
+ map_flags);
+ if (IS_ERR_VALUE(res))
+ return res;
+
+ vrm->new_addr = res;
+ return 0;
+}
+
+/*
+ * Keep track of pages which have been added to the memory mapping. If the VMA
+ * is accounted, also check to see if there is sufficient memory.
+ *
+ * Returns true on success, false if insufficient memory to charge.
+ */
+static bool vrm_charge(struct vma_remap_struct *vrm)
+{
+ unsigned long charged;
+
+ if (!(vrm->vma->vm_flags & VM_ACCOUNT))
+ return true;
+
+ /*
+ * If we don't unmap the old mapping, then we account the entirety of
+ * the length of the new one. Otherwise it's just the delta in size.
+ */
+ if (vrm->flags & MREMAP_DONTUNMAP)
+ charged = vrm->new_len >> PAGE_SHIFT;
+ else
+ charged = vrm->delta >> PAGE_SHIFT;
+
+
+ /* This accounts 'charged' pages of memory. */
+ if (security_vm_enough_memory_mm(current->mm, charged))
+ return false;
+
+ vrm->charged = charged;
+ return true;
+}
+
+/*
+ * an error has occurred so we will not be using vrm->charged memory. Unaccount
+ * this memory if the VMA is accounted.
+ */
+static void vrm_uncharge(struct vma_remap_struct *vrm)
+{
+ if (!(vrm->vma->vm_flags & VM_ACCOUNT))
+ return;
+
+ vm_unacct_memory(vrm->charged);
+ vrm->charged = 0;
+}
+
+/*
+ * Update mm exec_vm, stack_vm, data_vm, and locked_vm fields as needed to
+ * account for 'bytes' memory used, and if locked, indicate this in the VRM so
+ * we can handle this correctly later.
+ */
+static void vrm_stat_account(struct vma_remap_struct *vrm,
+ unsigned long bytes)
+{
+ unsigned long pages = bytes >> PAGE_SHIFT;
+ struct mm_struct *mm = current->mm;
+ struct vm_area_struct *vma = vrm->vma;
+
+ vm_stat_account(mm, vma->vm_flags, pages);
+ if (vma->vm_flags & VM_LOCKED) {
+ mm->locked_vm += pages;
+ vrm->mlocked = true;
+ }
+}
+
+/*
+ * Perform checks before attempting to write a VMA prior to it being
+ * moved.
+ */
+static unsigned long prep_move_vma(struct vma_remap_struct *vrm)
+{
+ unsigned long err = 0;
+ struct vm_area_struct *vma = vrm->vma;
+ unsigned long old_addr = vrm->addr;
+ unsigned long old_len = vrm->old_len;
+ unsigned long dummy = vma->vm_flags;
/*
* We'd prefer to avoid failure later on in do_munmap:
* which may split one vma into three before unmapping.
*/
- if (mm->map_count >= sysctl_max_map_count - 3)
+ if (current->mm->map_count >= sysctl_max_map_count - 3)
return -ENOMEM;
- if (unlikely(flags & MREMAP_DONTUNMAP))
- to_account = new_len;
-
if (vma->vm_ops && vma->vm_ops->may_split) {
if (vma->vm_start != old_addr)
err = vma->vm_ops->may_split(vma, old_addr);
@@ -739,61 +1023,239 @@ static unsigned long move_vma(struct vm_area_struct *vma,
* so KSM can come around to merge on vma and new_vma afterwards.
*/
err = ksm_madvise(vma, old_addr, old_addr + old_len,
- MADV_UNMERGEABLE, &vm_flags);
+ MADV_UNMERGEABLE, &dummy);
if (err)
return err;
- if (vm_flags & VM_ACCOUNT) {
- if (security_vm_enough_memory_mm(mm, to_account >> PAGE_SHIFT))
- return -ENOMEM;
+ return 0;
+}
+
+/*
+ * Unmap source VMA for VMA move, turning it from a copy to a move, being
+ * careful to ensure we do not underflow memory account while doing so if an
+ * accountable move.
+ *
+ * This is best effort, if we fail to unmap then we simply try to correct
+ * accounting and exit.
+ */
+static void unmap_source_vma(struct vma_remap_struct *vrm)
+{
+ struct mm_struct *mm = current->mm;
+ unsigned long addr = vrm->addr;
+ unsigned long len = vrm->old_len;
+ struct vm_area_struct *vma = vrm->vma;
+ VMA_ITERATOR(vmi, mm, addr);
+ int err;
+ unsigned long vm_start;
+ unsigned long vm_end;
+ /*
+ * It might seem odd that we check for MREMAP_DONTUNMAP here, given this
+ * function implies that we unmap the original VMA, which seems
+ * contradictory.
+ *
+ * However, this occurs when this operation was attempted and an error
+ * arose, in which case we _do_ wish to unmap the _new_ VMA, which means
+ * we actually _do_ want it be unaccounted.
+ */
+ bool accountable_move = (vma->vm_flags & VM_ACCOUNT) &&
+ !(vrm->flags & MREMAP_DONTUNMAP);
+
+ /*
+ * So we perform a trick here to prevent incorrect accounting. Any merge
+ * or new VMA allocation performed in copy_vma() does not adjust
+ * accounting, it is expected that callers handle this.
+ *
+ * And indeed we already have, accounting appropriately in the case of
+ * both in vrm_charge().
+ *
+ * However, when we unmap the existing VMA (to effect the move), this
+ * code will, if the VMA has VM_ACCOUNT set, attempt to unaccount
+ * removed pages.
+ *
+ * To avoid this we temporarily clear this flag, reinstating on any
+ * portions of the original VMA that remain.
+ */
+ if (accountable_move) {
+ vm_flags_clear(vma, VM_ACCOUNT);
+ /* We are about to split vma, so store the start/end. */
+ vm_start = vma->vm_start;
+ vm_end = vma->vm_end;
+ }
+
+ err = do_vmi_munmap(&vmi, mm, addr, len, vrm->uf_unmap, /* unlock= */false);
+ vrm->vma = NULL; /* Invalidated. */
+ if (err) {
+ /* OOM: unable to split vma, just get accounts right */
+ vm_acct_memory(len >> PAGE_SHIFT);
+ return;
+ }
+
+ /*
+ * If we mremap() from a VMA like this:
+ *
+ * addr end
+ * | |
+ * v v
+ * |-------------|
+ * | |
+ * |-------------|
+ *
+ * Having cleared VM_ACCOUNT from the whole VMA, after we unmap above
+ * we'll end up with:
+ *
+ * addr end
+ * | |
+ * v v
+ * |---| |---|
+ * | A | | B |
+ * |---| |---|
+ *
+ * The VMI is still pointing at addr, so vma_prev() will give us A, and
+ * a subsequent or lone vma_next() will give as B.
+ *
+ * do_vmi_munmap() will have restored the VMI back to addr.
+ */
+ if (accountable_move) {
+ unsigned long end = addr + len;
+
+ if (vm_start < addr) {
+ struct vm_area_struct *prev = vma_prev(&vmi);
+
+ vm_flags_set(prev, VM_ACCOUNT); /* Acquires VMA lock. */
+ }
+
+ if (vm_end > end) {
+ struct vm_area_struct *next = vma_next(&vmi);
+
+ vm_flags_set(next, VM_ACCOUNT); /* Acquires VMA lock. */
+ }
}
+}
+
+/*
+ * Copy vrm->vma over to vrm->new_addr possibly adjusting size as part of the
+ * process. Additionally handle an error occurring on moving of page tables,
+ * where we reset vrm state to cause unmapping of the new VMA.
+ *
+ * Outputs the newly installed VMA to new_vma_ptr. Returns 0 on success or an
+ * error code.
+ */
+static int copy_vma_and_data(struct vma_remap_struct *vrm,
+ struct vm_area_struct **new_vma_ptr)
+{
+ unsigned long internal_offset = vrm->addr - vrm->vma->vm_start;
+ unsigned long internal_pgoff = internal_offset >> PAGE_SHIFT;
+ unsigned long new_pgoff = vrm->vma->vm_pgoff + internal_pgoff;
+ unsigned long moved_len;
+ struct vm_area_struct *vma = vrm->vma;
+ struct vm_area_struct *new_vma;
+ int err = 0;
+ PAGETABLE_MOVE(pmc, NULL, NULL, vrm->addr, vrm->new_addr, vrm->old_len);
- vma_start_write(vma);
- new_pgoff = vma->vm_pgoff + ((old_addr - vma->vm_start) >> PAGE_SHIFT);
- new_vma = copy_vma(&vma, new_addr, new_len, new_pgoff,
- &need_rmap_locks);
+ new_vma = copy_vma(&vma, vrm->new_addr, vrm->new_len, new_pgoff,
+ &pmc.need_rmap_locks);
if (!new_vma) {
- if (vm_flags & VM_ACCOUNT)
- vm_unacct_memory(to_account >> PAGE_SHIFT);
+ vrm_uncharge(vrm);
+ *new_vma_ptr = NULL;
return -ENOMEM;
}
+ vrm->vma = vma;
+ pmc.old = vma;
+ pmc.new = new_vma;
- moved_len = move_page_tables(vma, old_addr, new_vma, new_addr, old_len,
- need_rmap_locks, false);
- if (moved_len < old_len) {
+ moved_len = move_page_tables(&pmc);
+ if (moved_len < vrm->old_len)
err = -ENOMEM;
- } else if (vma->vm_ops && vma->vm_ops->mremap) {
+ else if (vma->vm_ops && vma->vm_ops->mremap)
err = vma->vm_ops->mremap(new_vma);
- }
if (unlikely(err)) {
+ PAGETABLE_MOVE(pmc_revert, new_vma, vma, vrm->new_addr,
+ vrm->addr, moved_len);
+
/*
* On error, move entries back from new area to old,
* which will succeed since page tables still there,
* and then proceed to unmap new area instead of old.
*/
- move_page_tables(new_vma, new_addr, vma, old_addr, moved_len,
- true, false);
- vma = new_vma;
- old_len = new_len;
- old_addr = new_addr;
- new_addr = err;
+ pmc_revert.need_rmap_locks = true;
+ move_page_tables(&pmc_revert);
+
+ vrm->vma = new_vma;
+ vrm->old_len = vrm->new_len;
+ vrm->addr = vrm->new_addr;
} else {
- mremap_userfaultfd_prep(new_vma, uf);
+ mremap_userfaultfd_prep(new_vma, vrm->uf);
}
- if (is_vm_hugetlb_page(vma)) {
+ if (is_vm_hugetlb_page(vma))
clear_vma_resv_huge_pages(vma);
- }
- /* Conceal VM_ACCOUNT so old reservation is not undone */
- if (vm_flags & VM_ACCOUNT && !(flags & MREMAP_DONTUNMAP)) {
- vm_flags_clear(vma, VM_ACCOUNT);
- if (vma->vm_start < old_addr)
- account_start = vma->vm_start;
- if (vma->vm_end > old_addr + old_len)
- account_end = vma->vm_end;
- }
+ /* Tell pfnmap has moved from this vma */
+ if (unlikely(vma->vm_flags & VM_PFNMAP))
+ untrack_pfn_clear(vma);
+
+ *new_vma_ptr = new_vma;
+ return err;
+}
+
+/*
+ * Perform final tasks for MADV_DONTUNMAP operation, clearing mlock() and
+ * account flags on remaining VMA by convention (it cannot be mlock()'d any
+ * longer, as pages in range are no longer mapped), and removing anon_vma_chain
+ * links from it (if the entire VMA was copied over).
+ */
+static void dontunmap_complete(struct vma_remap_struct *vrm,
+ struct vm_area_struct *new_vma)
+{
+ unsigned long start = vrm->addr;
+ unsigned long end = vrm->addr + vrm->old_len;
+ unsigned long old_start = vrm->vma->vm_start;
+ unsigned long old_end = vrm->vma->vm_end;
+
+ /*
+ * We always clear VM_LOCKED[ONFAULT] | VM_ACCOUNT on the old
+ * vma.
+ */
+ vm_flags_clear(vrm->vma, VM_LOCKED_MASK | VM_ACCOUNT);
+
+ /*
+ * anon_vma links of the old vma is no longer needed after its page
+ * table has been moved.
+ */
+ if (new_vma != vrm->vma && start == old_start && end == old_end)
+ unlink_anon_vmas(vrm->vma);
+
+ /* Because we won't unmap we don't need to touch locked_vm. */
+}
+
+static unsigned long move_vma(struct vma_remap_struct *vrm)
+{
+ struct mm_struct *mm = current->mm;
+ struct vm_area_struct *new_vma;
+ unsigned long hiwater_vm;
+ int err;
+
+ err = prep_move_vma(vrm);
+ if (err)
+ return err;
+
+ /* If accounted, charge the number of bytes the operation will use. */
+ if (!vrm_charge(vrm))
+ return -ENOMEM;
+
+ /* We don't want racing faults. */
+ vma_start_write(vrm->vma);
+
+ /* Perform copy step. */
+ err = copy_vma_and_data(vrm, &new_vma);
+ /*
+ * If we established the copied-to VMA, we attempt to recover from the
+ * error by setting the destination VMA to the source VMA and unmapping
+ * it below.
+ */
+ if (err && !new_vma)
+ return err;
/*
* If we failed to move page tables we still do total_vm increment
@@ -805,73 +1267,31 @@ static unsigned long move_vma(struct vm_area_struct *vma,
* If this were a serious issue, we'd add a flag to do_munmap().
*/
hiwater_vm = mm->hiwater_vm;
- vm_stat_account(mm, vma->vm_flags, new_len >> PAGE_SHIFT);
-
- /* Tell pfnmap has moved from this vma */
- if (unlikely(vma->vm_flags & VM_PFNMAP))
- untrack_pfn_clear(vma);
-
- if (unlikely(!err && (flags & MREMAP_DONTUNMAP))) {
- /* We always clear VM_LOCKED[ONFAULT] on the old vma */
- vm_flags_clear(vma, VM_LOCKED_MASK);
-
- /*
- * anon_vma links of the old vma is no longer needed after its page
- * table has been moved.
- */
- if (new_vma != vma && vma->vm_start == old_addr &&
- vma->vm_end == (old_addr + old_len))
- unlink_anon_vmas(vma);
- /* Because we won't unmap we don't need to touch locked_vm */
- return new_addr;
- }
-
- vma_iter_init(&vmi, mm, old_addr);
- if (do_vmi_munmap(&vmi, mm, old_addr, old_len, uf_unmap, false) < 0) {
- /* OOM: unable to split vma, just get accounts right */
- if (vm_flags & VM_ACCOUNT && !(flags & MREMAP_DONTUNMAP))
- vm_acct_memory(old_len >> PAGE_SHIFT);
- account_start = account_end = 0;
- }
-
- if (vm_flags & VM_LOCKED) {
- mm->locked_vm += new_len >> PAGE_SHIFT;
- *locked = true;
- }
+ vrm_stat_account(vrm, vrm->new_len);
+ if (unlikely(!err && (vrm->flags & MREMAP_DONTUNMAP)))
+ dontunmap_complete(vrm, new_vma);
+ else
+ unmap_source_vma(vrm);
mm->hiwater_vm = hiwater_vm;
- /* Restore VM_ACCOUNT if one or two pieces of vma left */
- if (account_start) {
- vma = vma_prev(&vmi);
- vm_flags_set(vma, VM_ACCOUNT);
- }
-
- if (account_end) {
- vma = vma_next(&vmi);
- vm_flags_set(vma, VM_ACCOUNT);
- }
-
- return new_addr;
+ return err ? (unsigned long)err : vrm->new_addr;
}
/*
* resize_is_valid() - Ensure the vma can be resized to the new length at the give
* address.
*
- * @vma: The vma to resize
- * @addr: The old address
- * @old_len: The current size
- * @new_len: The desired size
- * @flags: The vma flags
- *
* Return 0 on success, error otherwise.
*/
-static int resize_is_valid(struct vm_area_struct *vma, unsigned long addr,
- unsigned long old_len, unsigned long new_len, unsigned long flags)
+static int resize_is_valid(struct vma_remap_struct *vrm)
{
struct mm_struct *mm = current->mm;
+ struct vm_area_struct *vma = vrm->vma;
+ unsigned long addr = vrm->addr;
+ unsigned long old_len = vrm->old_len;
+ unsigned long new_len = vrm->new_len;
unsigned long pgoff;
/*
@@ -883,11 +1303,12 @@ static int resize_is_valid(struct vm_area_struct *vma, unsigned long addr,
* behavior. As a result, fail such attempts.
*/
if (!old_len && !(vma->vm_flags & (VM_SHARED | VM_MAYSHARE))) {
- pr_warn_once("%s (%d): attempted to duplicate a private mapping with mremap. This is not supported.\n", current->comm, current->pid);
+ pr_warn_once("%s (%d): attempted to duplicate a private mapping with mremap. This is not supported.\n",
+ current->comm, current->pid);
return -EINVAL;
}
- if ((flags & MREMAP_DONTUNMAP) &&
+ if ((vrm->flags & MREMAP_DONTUNMAP) &&
(vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP)))
return -EINVAL;
@@ -907,118 +1328,120 @@ static int resize_is_valid(struct vm_area_struct *vma, unsigned long addr,
if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP))
return -EFAULT;
- if (!mlock_future_ok(mm, vma->vm_flags, new_len - old_len))
+ if (!mlock_future_ok(mm, vma->vm_flags, vrm->delta))
return -EAGAIN;
- if (!may_expand_vm(mm, vma->vm_flags,
- (new_len - old_len) >> PAGE_SHIFT))
+ if (!may_expand_vm(mm, vma->vm_flags, vrm->delta >> PAGE_SHIFT))
return -ENOMEM;
return 0;
}
/*
- * mremap_to() - remap a vma to a new location
- * @addr: The old address
- * @old_len: The old size
- * @new_addr: The target address
- * @new_len: The new size
- * @locked: If the returned vma is locked (VM_LOCKED)
- * @flags: the mremap flags
- * @uf: The mremap userfaultfd context
- * @uf_unmap_early: The userfaultfd unmap early context
- * @uf_unmap: The userfaultfd unmap context
+ * The user has requested that the VMA be shrunk (i.e., old_len > new_len), so
+ * execute this, optionally dropping the mmap lock when we do so.
*
- * Returns: The new address of the vma or an error.
+ * In both cases this invalidates the VMA, however if we don't drop the lock,
+ * then load the correct VMA into vrm->vma afterwards.
*/
-static unsigned long mremap_to(unsigned long addr, unsigned long old_len,
- unsigned long new_addr, unsigned long new_len, bool *locked,
- unsigned long flags, struct vm_userfaultfd_ctx *uf,
- struct list_head *uf_unmap_early,
- struct list_head *uf_unmap)
+static unsigned long shrink_vma(struct vma_remap_struct *vrm,
+ bool drop_lock)
{
struct mm_struct *mm = current->mm;
- struct vm_area_struct *vma;
- unsigned long ret;
- unsigned long map_flags = 0;
+ unsigned long unmap_start = vrm->addr + vrm->new_len;
+ unsigned long unmap_bytes = vrm->delta;
+ unsigned long res;
+ VMA_ITERATOR(vmi, mm, unmap_start);
- if (offset_in_page(new_addr))
- return -EINVAL;
+ VM_BUG_ON(vrm->remap_type != MREMAP_SHRINK);
- if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
- return -EINVAL;
-
- /* Ensure the old/new locations do not overlap */
- if (addr + old_len > new_addr && new_addr + new_len > addr)
- return -EINVAL;
+ res = do_vmi_munmap(&vmi, mm, unmap_start, unmap_bytes,
+ vrm->uf_unmap, drop_lock);
+ vrm->vma = NULL; /* Invalidated. */
+ if (res)
+ return res;
/*
- * move_vma() need us to stay 4 maps below the threshold, otherwise
- * it will bail out at the very beginning.
- * That is a problem if we have already unmaped the regions here
- * (new_addr, and old_addr), because userspace will not know the
- * state of the vma's after it gets -ENOMEM.
- * So, to avoid such scenario we can pre-compute if the whole
- * operation has high chances to success map-wise.
- * Worst-scenario case is when both vma's (new_addr and old_addr) get
- * split in 3 before unmapping it.
- * That means 2 more maps (1 for each) to the ones we already hold.
- * Check whether current map count plus 2 still leads us to 4 maps below
- * the threshold, otherwise return -ENOMEM here to be more safe.
+ * If we've not dropped the lock, then we should reload the VMA to
+ * replace the invalidated VMA with the one that may have now been
+ * split.
*/
- if ((mm->map_count + 2) >= sysctl_max_map_count - 3)
- return -ENOMEM;
+ if (drop_lock) {
+ vrm->mmap_locked = false;
+ } else {
+ vrm->vma = vma_lookup(mm, vrm->addr);
+ if (!vrm->vma)
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+/*
+ * mremap_to() - remap a vma to a new location.
+ * Returns: The new address of the vma or an error.
+ */
+static unsigned long mremap_to(struct vma_remap_struct *vrm)
+{
+ struct mm_struct *mm = current->mm;
+ unsigned long err;
+
+ /* Is the new length or address silly? */
+ if (vrm->new_len > TASK_SIZE ||
+ vrm->new_addr > TASK_SIZE - vrm->new_len)
+ return -EINVAL;
- if (flags & MREMAP_FIXED) {
+ if (vrm_overlaps(vrm))
+ return -EINVAL;
+
+ if (vrm->flags & MREMAP_FIXED) {
/*
* In mremap_to().
* VMA is moved to dst address, and munmap dst first.
* do_munmap will check if dst is sealed.
*/
- ret = do_munmap(mm, new_addr, new_len, uf_unmap_early);
- if (ret)
- return ret;
- }
+ err = do_munmap(mm, vrm->new_addr, vrm->new_len,
+ vrm->uf_unmap_early);
+ vrm->vma = NULL; /* Invalidated. */
+ if (err)
+ return err;
- if (old_len > new_len) {
- ret = do_munmap(mm, addr+new_len, old_len - new_len, uf_unmap);
- if (ret)
- return ret;
- old_len = new_len;
+ /*
+ * If we remap a portion of a VMA elsewhere in the same VMA,
+ * this can invalidate the old VMA. Reset.
+ */
+ vrm->vma = vma_lookup(mm, vrm->addr);
+ if (!vrm->vma)
+ return -EFAULT;
}
- vma = vma_lookup(mm, addr);
- if (!vma)
- return -EFAULT;
-
- ret = resize_is_valid(vma, addr, old_len, new_len, flags);
- if (ret)
- return ret;
+ if (vrm->remap_type == MREMAP_SHRINK) {
+ err = shrink_vma(vrm, /* drop_lock= */false);
+ if (err)
+ return err;
- /* MREMAP_DONTUNMAP expands by old_len since old_len == new_len */
- if (flags & MREMAP_DONTUNMAP &&
- !may_expand_vm(mm, vma->vm_flags, old_len >> PAGE_SHIFT)) {
- return -ENOMEM;
+ /* Set up for the move now shrink has been executed. */
+ vrm->old_len = vrm->new_len;
}
- if (flags & MREMAP_FIXED)
- map_flags |= MAP_FIXED;
+ err = resize_is_valid(vrm);
+ if (err)
+ return err;
- if (vma->vm_flags & VM_MAYSHARE)
- map_flags |= MAP_SHARED;
+ /* MREMAP_DONTUNMAP expands by old_len since old_len == new_len */
+ if (vrm->flags & MREMAP_DONTUNMAP) {
+ vm_flags_t vm_flags = vrm->vma->vm_flags;
+ unsigned long pages = vrm->old_len >> PAGE_SHIFT;
- ret = get_unmapped_area(vma->vm_file, new_addr, new_len, vma->vm_pgoff +
- ((addr - vma->vm_start) >> PAGE_SHIFT),
- map_flags);
- if (IS_ERR_VALUE(ret))
- return ret;
+ if (!may_expand_vm(mm, vm_flags, pages))
+ return -ENOMEM;
+ }
- /* We got a new mapping */
- if (!(flags & MREMAP_FIXED))
- new_addr = ret;
+ err = vrm_set_new_addr(vrm);
+ if (err)
+ return err;
- return move_vma(vma, addr, old_len, new_len, new_addr, locked, flags,
- uf, uf_unmap);
+ return move_vma(vrm);
}
static int vma_expandable(struct vm_area_struct *vma, unsigned long delta)
@@ -1035,215 +1458,329 @@ static int vma_expandable(struct vm_area_struct *vma, unsigned long delta)
return 1;
}
+/* Determine whether we are actually able to execute an in-place expansion. */
+static bool vrm_can_expand_in_place(struct vma_remap_struct *vrm)
+{
+ /* Number of bytes from vrm->addr to end of VMA. */
+ unsigned long suffix_bytes = vrm->vma->vm_end - vrm->addr;
+
+ /* If end of range aligns to end of VMA, we can just expand in-place. */
+ if (suffix_bytes != vrm->old_len)
+ return false;
+
+ /* Check whether this is feasible. */
+ if (!vma_expandable(vrm->vma, vrm->delta))
+ return false;
+
+ return true;
+}
+
/*
- * Expand (or shrink) an existing mapping, potentially moving it at the
- * same time (controlled by the MREMAP_MAYMOVE flag and available VM space)
- *
- * MREMAP_FIXED option added 5-Dec-1999 by Benjamin LaHaise
- * This option implies MREMAP_MAYMOVE.
+ * Are the parameters passed to mremap() valid? If so return 0, otherwise return
+ * error.
*/
-SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
- unsigned long, new_len, unsigned long, flags,
- unsigned long, new_addr)
+static unsigned long check_mremap_params(struct vma_remap_struct *vrm)
+
{
- struct mm_struct *mm = current->mm;
- struct vm_area_struct *vma;
- unsigned long ret = -EINVAL;
- bool locked = false;
- struct vm_userfaultfd_ctx uf = NULL_VM_UFFD_CTX;
- LIST_HEAD(uf_unmap_early);
- LIST_HEAD(uf_unmap);
+ unsigned long addr = vrm->addr;
+ unsigned long flags = vrm->flags;
+
+ /* Ensure no unexpected flag values. */
+ if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE | MREMAP_DONTUNMAP))
+ return -EINVAL;
+
+ /* Start address must be page-aligned. */
+ if (offset_in_page(addr))
+ return -EINVAL;
/*
- * There is a deliberate asymmetry here: we strip the pointer tag
- * from the old address but leave the new address alone. This is
- * for consistency with mmap(), where we prevent the creation of
- * aliasing mappings in userspace by leaving the tag bits of the
- * mapping address intact. A non-zero tag will cause the subsequent
- * range checks to reject the address as invalid.
- *
- * See Documentation/arch/arm64/tagged-address-abi.rst for more
- * information.
+ * We allow a zero old-len as a special case
+ * for DOS-emu "duplicate shm area" thing. But
+ * a zero new-len is nonsensical.
*/
- addr = untagged_addr(addr);
+ if (!PAGE_ALIGN(vrm->new_len))
+ return -EINVAL;
- if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE | MREMAP_DONTUNMAP))
- return ret;
+ /* Remainder of checks are for cases with specific new_addr. */
+ if (!vrm_implies_new_addr(vrm))
+ return 0;
- if (flags & MREMAP_FIXED && !(flags & MREMAP_MAYMOVE))
- return ret;
+ /* The new address must be page-aligned. */
+ if (offset_in_page(vrm->new_addr))
+ return -EINVAL;
+
+ /* A fixed address implies a move. */
+ if (!(flags & MREMAP_MAYMOVE))
+ return -EINVAL;
+
+ /* MREMAP_DONTUNMAP does not allow resizing in the process. */
+ if (flags & MREMAP_DONTUNMAP && vrm->old_len != vrm->new_len)
+ return -EINVAL;
/*
- * MREMAP_DONTUNMAP is always a move and it does not allow resizing
- * in the process.
+ * move_vma() need us to stay 4 maps below the threshold, otherwise
+ * it will bail out at the very beginning.
+ * That is a problem if we have already unmaped the regions here
+ * (new_addr, and old_addr), because userspace will not know the
+ * state of the vma's after it gets -ENOMEM.
+ * So, to avoid such scenario we can pre-compute if the whole
+ * operation has high chances to success map-wise.
+ * Worst-scenario case is when both vma's (new_addr and old_addr) get
+ * split in 3 before unmapping it.
+ * That means 2 more maps (1 for each) to the ones we already hold.
+ * Check whether current map count plus 2 still leads us to 4 maps below
+ * the threshold, otherwise return -ENOMEM here to be more safe.
*/
- if (flags & MREMAP_DONTUNMAP &&
- (!(flags & MREMAP_MAYMOVE) || old_len != new_len))
- return ret;
+ if ((current->mm->map_count + 2) >= sysctl_max_map_count - 3)
+ return -ENOMEM;
+ return 0;
+}
- if (offset_in_page(addr))
- return ret;
+/*
+ * We know we can expand the VMA in-place by delta pages, so do so.
+ *
+ * If we discover the VMA is locked, update mm_struct statistics accordingly and
+ * indicate so to the caller.
+ */
+static unsigned long expand_vma_in_place(struct vma_remap_struct *vrm)
+{
+ struct mm_struct *mm = current->mm;
+ struct vm_area_struct *vma = vrm->vma;
+ VMA_ITERATOR(vmi, mm, vma->vm_end);
- old_len = PAGE_ALIGN(old_len);
- new_len = PAGE_ALIGN(new_len);
+ if (!vrm_charge(vrm))
+ return -ENOMEM;
/*
- * We allow a zero old-len as a special case
- * for DOS-emu "duplicate shm area" thing. But
- * a zero new-len is nonsensical.
+ * Function vma_merge_extend() is called on the
+ * extension we are adding to the already existing vma,
+ * vma_merge_extend() will merge this extension with the
+ * already existing vma (expand operation itself) and
+ * possibly also with the next vma if it becomes
+ * adjacent to the expanded vma and otherwise
+ * compatible.
*/
- if (!new_len)
- return ret;
-
- if (mmap_write_lock_killable(current->mm))
- return -EINTR;
- vma = vma_lookup(mm, addr);
+ vma = vma_merge_extend(&vmi, vma, vrm->delta);
if (!vma) {
- ret = -EFAULT;
- goto out;
+ vrm_uncharge(vrm);
+ return -ENOMEM;
}
+ vrm->vma = vma;
- /* Don't allow remapping vmas when they have already been sealed */
- if (!can_modify_vma(vma)) {
- ret = -EPERM;
- goto out;
- }
+ vrm_stat_account(vrm, vrm->delta);
+
+ return 0;
+}
+
+static bool align_hugetlb(struct vma_remap_struct *vrm)
+{
+ struct hstate *h __maybe_unused = hstate_vma(vrm->vma);
+
+ vrm->old_len = ALIGN(vrm->old_len, huge_page_size(h));
+ vrm->new_len = ALIGN(vrm->new_len, huge_page_size(h));
+
+ /* addrs must be huge page aligned */
+ if (vrm->addr & ~huge_page_mask(h))
+ return false;
+ if (vrm->new_addr & ~huge_page_mask(h))
+ return false;
+
+ /*
+ * Don't allow remap expansion, because the underlying hugetlb
+ * reservation is not yet capable to handle split reservation.
+ */
+ if (vrm->new_len > vrm->old_len)
+ return false;
+
+ vrm_set_delta(vrm);
+
+ return true;
+}
- if (is_vm_hugetlb_page(vma)) {
- struct hstate *h __maybe_unused = hstate_vma(vma);
+/*
+ * We are mremap()'ing without specifying a fixed address to move to, but are
+ * requesting that the VMA's size be increased.
+ *
+ * Try to do so in-place, if this fails, then move the VMA to a new location to
+ * action the change.
+ */
+static unsigned long expand_vma(struct vma_remap_struct *vrm)
+{
+ unsigned long err;
+ unsigned long addr = vrm->addr;
- old_len = ALIGN(old_len, huge_page_size(h));
- new_len = ALIGN(new_len, huge_page_size(h));
+ err = resize_is_valid(vrm);
+ if (err)
+ return err;
- /* addrs must be huge page aligned */
- if (addr & ~huge_page_mask(h))
- goto out;
- if (new_addr & ~huge_page_mask(h))
- goto out;
+ /*
+ * [addr, old_len) spans precisely to the end of the VMA, so try to
+ * expand it in-place.
+ */
+ if (vrm_can_expand_in_place(vrm)) {
+ err = expand_vma_in_place(vrm);
+ if (err)
+ return err;
/*
- * Don't allow remap expansion, because the underlying hugetlb
- * reservation is not yet capable to handle split reservation.
+ * We want to populate the newly expanded portion of the VMA to
+ * satisfy the expectation that mlock()'ing a VMA maintains all
+ * of its pages in memory.
*/
- if (new_len > old_len)
- goto out;
- }
+ if (vrm->mlocked)
+ vrm->new_addr = addr;
- if (flags & (MREMAP_FIXED | MREMAP_DONTUNMAP)) {
- ret = mremap_to(addr, old_len, new_addr, new_len,
- &locked, flags, &uf, &uf_unmap_early,
- &uf_unmap);
- goto out;
+ /* OK we're done! */
+ return addr;
}
/*
- * Always allow a shrinking remap: that just unmaps
- * the unnecessary pages..
- * do_vmi_munmap does all the needed commit accounting, and
- * unlocks the mmap_lock if so directed.
+ * We weren't able to just expand or shrink the area,
+ * we need to create a new one and move it.
*/
- if (old_len >= new_len) {
- VMA_ITERATOR(vmi, mm, addr + new_len);
- if (old_len == new_len) {
- ret = addr;
- goto out;
- }
+ /* We're not allowed to move the VMA, so error out. */
+ if (!(vrm->flags & MREMAP_MAYMOVE))
+ return -ENOMEM;
+
+ /* Find a new location to move the VMA to. */
+ err = vrm_set_new_addr(vrm);
+ if (err)
+ return err;
+
+ return move_vma(vrm);
+}
+
+/*
+ * Attempt to resize the VMA in-place, if we cannot, then move the VMA to the
+ * first available address to perform the operation.
+ */
+static unsigned long mremap_at(struct vma_remap_struct *vrm)
+{
+ unsigned long res;
- ret = do_vmi_munmap(&vmi, mm, addr + new_len, old_len - new_len,
- &uf_unmap, true);
- if (ret)
- goto out;
+ switch (vrm->remap_type) {
+ case MREMAP_INVALID:
+ break;
+ case MREMAP_NO_RESIZE:
+ /* NO-OP CASE - resizing to the same size. */
+ return vrm->addr;
+ case MREMAP_SHRINK:
+ /*
+ * SHRINK CASE. Can always be done in-place.
+ *
+ * Simply unmap the shrunken portion of the VMA. This does all
+ * the needed commit accounting, and we indicate that the mmap
+ * lock should be dropped.
+ */
+ res = shrink_vma(vrm, /* drop_lock= */true);
+ if (res)
+ return res;
- ret = addr;
- goto out_unlocked;
+ return vrm->addr;
+ case MREMAP_EXPAND:
+ return expand_vma(vrm);
}
- /*
- * Ok, we need to grow..
- */
- ret = resize_is_valid(vma, addr, old_len, new_len, flags);
+ BUG();
+}
+
+static unsigned long do_mremap(struct vma_remap_struct *vrm)
+{
+ struct mm_struct *mm = current->mm;
+ struct vm_area_struct *vma;
+ unsigned long ret;
+
+ ret = check_mremap_params(vrm);
if (ret)
- goto out;
+ return ret;
- /* old_len exactly to the end of the area..
- */
- if (old_len == vma->vm_end - addr) {
- unsigned long delta = new_len - old_len;
-
- /* can we just expand the current mapping? */
- if (vma_expandable(vma, delta)) {
- long pages = delta >> PAGE_SHIFT;
- VMA_ITERATOR(vmi, mm, vma->vm_end);
- long charged = 0;
-
- if (vma->vm_flags & VM_ACCOUNT) {
- if (security_vm_enough_memory_mm(mm, pages)) {
- ret = -ENOMEM;
- goto out;
- }
- charged = pages;
- }
+ vrm->old_len = PAGE_ALIGN(vrm->old_len);
+ vrm->new_len = PAGE_ALIGN(vrm->new_len);
+ vrm_set_delta(vrm);
- /*
- * Function vma_merge_extend() is called on the
- * extension we are adding to the already existing vma,
- * vma_merge_extend() will merge this extension with the
- * already existing vma (expand operation itself) and
- * possibly also with the next vma if it becomes
- * adjacent to the expanded vma and otherwise
- * compatible.
- */
- vma = vma_merge_extend(&vmi, vma, delta);
- if (!vma) {
- vm_unacct_memory(charged);
- ret = -ENOMEM;
- goto out;
- }
+ if (mmap_write_lock_killable(mm))
+ return -EINTR;
+ vrm->mmap_locked = true;
- vm_stat_account(mm, vma->vm_flags, pages);
- if (vma->vm_flags & VM_LOCKED) {
- mm->locked_vm += pages;
- locked = true;
- new_addr = addr;
- }
- ret = addr;
- goto out;
- }
+ vma = vrm->vma = vma_lookup(mm, vrm->addr);
+ if (!vma) {
+ ret = -EFAULT;
+ goto out;
}
- /*
- * We weren't able to just expand or shrink the area,
- * we need to create a new one and move it..
- */
- ret = -ENOMEM;
- if (flags & MREMAP_MAYMOVE) {
- unsigned long map_flags = 0;
- if (vma->vm_flags & VM_MAYSHARE)
- map_flags |= MAP_SHARED;
-
- new_addr = get_unmapped_area(vma->vm_file, 0, new_len,
- vma->vm_pgoff +
- ((addr - vma->vm_start) >> PAGE_SHIFT),
- map_flags);
- if (IS_ERR_VALUE(new_addr)) {
- ret = new_addr;
- goto out;
- }
+ /* If mseal()'d, mremap() is prohibited. */
+ if (!can_modify_vma(vma)) {
+ ret = -EPERM;
+ goto out;
+ }
- ret = move_vma(vma, addr, old_len, new_len, new_addr,
- &locked, flags, &uf, &uf_unmap);
+ /* Align to hugetlb page size, if required. */
+ if (is_vm_hugetlb_page(vma) && !align_hugetlb(vrm)) {
+ ret = -EINVAL;
+ goto out;
}
+
+ vrm->remap_type = vrm_remap_type(vrm);
+
+ /* Actually execute mremap. */
+ ret = vrm_implies_new_addr(vrm) ? mremap_to(vrm) : mremap_at(vrm);
+
out:
- if (offset_in_page(ret))
- locked = false;
- mmap_write_unlock(current->mm);
- if (locked && new_len > old_len)
- mm_populate(new_addr + old_len, new_len - old_len);
-out_unlocked:
- userfaultfd_unmap_complete(mm, &uf_unmap_early);
- mremap_userfaultfd_complete(&uf, addr, ret, old_len);
- userfaultfd_unmap_complete(mm, &uf_unmap);
+ if (vrm->mmap_locked) {
+ mmap_write_unlock(mm);
+ vrm->mmap_locked = false;
+
+ if (!offset_in_page(ret) && vrm->mlocked && vrm->new_len > vrm->old_len)
+ mm_populate(vrm->new_addr + vrm->old_len, vrm->delta);
+ }
+
+ userfaultfd_unmap_complete(mm, vrm->uf_unmap_early);
+ mremap_userfaultfd_complete(vrm->uf, vrm->addr, ret, vrm->old_len);
+ userfaultfd_unmap_complete(mm, vrm->uf_unmap);
+
return ret;
}
+
+/*
+ * Expand (or shrink) an existing mapping, potentially moving it at the
+ * same time (controlled by the MREMAP_MAYMOVE flag and available VM space)
+ *
+ * MREMAP_FIXED option added 5-Dec-1999 by Benjamin LaHaise
+ * This option implies MREMAP_MAYMOVE.
+ */
+SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
+ unsigned long, new_len, unsigned long, flags,
+ unsigned long, new_addr)
+{
+ struct vm_userfaultfd_ctx uf = NULL_VM_UFFD_CTX;
+ LIST_HEAD(uf_unmap_early);
+ LIST_HEAD(uf_unmap);
+ /*
+ * There is a deliberate asymmetry here: we strip the pointer tag
+ * from the old address but leave the new address alone. This is
+ * for consistency with mmap(), where we prevent the creation of
+ * aliasing mappings in userspace by leaving the tag bits of the
+ * mapping address intact. A non-zero tag will cause the subsequent
+ * range checks to reject the address as invalid.
+ *
+ * See Documentation/arch/arm64/tagged-address-abi.rst for more
+ * information.
+ */
+ struct vma_remap_struct vrm = {
+ .addr = untagged_addr(addr),
+ .old_len = old_len,
+ .new_len = new_len,
+ .flags = flags,
+ .new_addr = new_addr,
+
+ .uf = &uf,
+ .uf_unmap_early = &uf_unmap_early,
+ .uf_unmap = &uf_unmap,
+
+ .remap_type = MREMAP_INVALID, /* We set later. */
+ };
+
+ return do_mremap(&vrm);
+}
diff --git a/mm/nommu.c b/mm/nommu.c
index 753384666bae..617e7ba8022f 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -42,17 +42,11 @@
#include <asm/mmu_context.h>
#include "internal.h"
-void *high_memory;
-EXPORT_SYMBOL(high_memory);
-struct page *mem_map;
-unsigned long max_mapnr;
-EXPORT_SYMBOL(max_mapnr);
unsigned long highest_memmap_pfn;
int heap_stack_gap = 0;
atomic_long_t mmap_pages_allocated;
-EXPORT_SYMBOL(mem_map);
/* list of mapped, potentially shareable regions */
static struct kmem_cache *vm_region_jar;
@@ -1204,7 +1198,7 @@ share:
setup_vma_to_mm(vma, current->mm);
current->mm->map_count++;
/* add the VMA to the tree */
- vma_iter_store(&vmi, vma);
+ vma_iter_store_new(&vmi, vma);
/* we flush the region from the icache only when the first executable
* mapping of it is made */
@@ -1369,7 +1363,7 @@ static int split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma,
setup_vma_to_mm(vma, mm);
setup_vma_to_mm(new, mm);
- vma_iter_store(vmi, new);
+ vma_iter_store_new(vmi, new);
mm->map_count++;
return 0;
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 1cf121ad7085..25923cfec9c6 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -563,7 +563,7 @@ static bool __oom_reap_task_mm(struct mm_struct *mm)
}
/*
- * Reaps the address space of the give task.
+ * Reaps the address space of the given task.
*
* Returns true on success and false if none or part of the address space
* has been reclaimed and the caller should retry later.
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index eb55ece39c56..18456ddd463b 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -120,29 +120,6 @@ EXPORT_SYMBOL(laptop_mode);
struct wb_domain global_wb_domain;
-/* consolidated parameters for balance_dirty_pages() and its subroutines */
-struct dirty_throttle_control {
-#ifdef CONFIG_CGROUP_WRITEBACK
- struct wb_domain *dom;
- struct dirty_throttle_control *gdtc; /* only set in memcg dtc's */
-#endif
- struct bdi_writeback *wb;
- struct fprop_local_percpu *wb_completions;
-
- unsigned long avail; /* dirtyable */
- unsigned long dirty; /* file_dirty + write + nfs */
- unsigned long thresh; /* dirty threshold */
- unsigned long bg_thresh; /* dirty background threshold */
-
- unsigned long wb_dirty; /* per-wb counterparts */
- unsigned long wb_thresh;
- unsigned long wb_bg_thresh;
-
- unsigned long pos_ratio;
- bool freerun;
- bool dirty_exceeded;
-};
-
/*
* Length of period for aging writeout fractions of bdis. This is an
* arbitrarily chosen number. The longer the period, the slower fractions will
@@ -1095,7 +1072,7 @@ static void wb_position_ratio(struct dirty_throttle_control *dtc)
struct bdi_writeback *wb = dtc->wb;
unsigned long write_bw = READ_ONCE(wb->avg_write_bandwidth);
unsigned long freerun = dirty_freerun_ceiling(dtc->thresh, dtc->bg_thresh);
- unsigned long limit = hard_dirty_limit(dtc_dom(dtc), dtc->thresh);
+ unsigned long limit = dtc->limit = hard_dirty_limit(dtc_dom(dtc), dtc->thresh);
unsigned long wb_thresh = dtc->wb_thresh;
unsigned long x_intercept;
unsigned long setpoint; /* dirty pages' target balance point */
@@ -1962,11 +1939,7 @@ free_running:
*/
if (pause < min_pause) {
trace_balance_dirty_pages(wb,
- sdtc->thresh,
- sdtc->bg_thresh,
- sdtc->dirty,
- sdtc->wb_thresh,
- sdtc->wb_dirty,
+ sdtc,
dirty_ratelimit,
task_ratelimit,
pages_dirtied,
@@ -1991,11 +1964,7 @@ free_running:
pause:
trace_balance_dirty_pages(wb,
- sdtc->thresh,
- sdtc->bg_thresh,
- sdtc->dirty,
- sdtc->wb_thresh,
- sdtc->wb_dirty,
+ sdtc,
dirty_ratelimit,
task_ratelimit,
pages_dirtied,
@@ -3109,6 +3078,7 @@ void __folio_start_writeback(struct folio *folio, bool keep_write)
int access_ret;
VM_BUG_ON_FOLIO(folio_test_writeback(folio), folio);
+ VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
if (mapping && mapping_use_writeback_tags(mapping)) {
XA_STATE(xas, &mapping->i_pages, folio_index(folio));
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index e3ea5bf5c459..fd6b865cb1ab 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -276,6 +276,7 @@ int min_free_kbytes = 1024;
int user_min_free_kbytes = -1;
static int watermark_boost_factor __read_mostly = 15000;
static int watermark_scale_factor = 10;
+int defrag_mode;
/* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
int movable_zone;
@@ -511,9 +512,9 @@ out:
static inline unsigned int order_to_pindex(int migratetype, int order)
{
- bool __maybe_unused movable;
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ bool movable;
if (order > PAGE_ALLOC_COSTLY_ORDER) {
VM_BUG_ON(order != HPAGE_PMD_ORDER);
@@ -617,6 +618,10 @@ compaction_capture(struct capture_control *capc, struct page *page,
capc->cc->migratetype != MIGRATE_MOVABLE)
return false;
+ if (migratetype != capc->cc->migratetype)
+ trace_mm_page_alloc_extfrag(page, capc->cc->order, order,
+ capc->cc->migratetype, migratetype);
+
capc->page = page;
return true;
}
@@ -658,16 +663,20 @@ static inline void __add_to_free_list(struct page *page, struct zone *zone,
bool tail)
{
struct free_area *area = &zone->free_area[order];
+ int nr_pages = 1 << order;
VM_WARN_ONCE(get_pageblock_migratetype(page) != migratetype,
"page type is %lu, passed migratetype is %d (nr=%d)\n",
- get_pageblock_migratetype(page), migratetype, 1 << order);
+ get_pageblock_migratetype(page), migratetype, nr_pages);
if (tail)
list_add_tail(&page->buddy_list, &area->free_list[migratetype]);
else
list_add(&page->buddy_list, &area->free_list[migratetype]);
area->nr_free++;
+
+ if (order >= pageblock_order && !is_migrate_isolate(migratetype))
+ __mod_zone_page_state(zone, NR_FREE_PAGES_BLOCKS, nr_pages);
}
/*
@@ -679,24 +688,34 @@ static inline void move_to_free_list(struct page *page, struct zone *zone,
unsigned int order, int old_mt, int new_mt)
{
struct free_area *area = &zone->free_area[order];
+ int nr_pages = 1 << order;
/* Free page moving can fail, so it happens before the type update */
VM_WARN_ONCE(get_pageblock_migratetype(page) != old_mt,
"page type is %lu, passed migratetype is %d (nr=%d)\n",
- get_pageblock_migratetype(page), old_mt, 1 << order);
+ get_pageblock_migratetype(page), old_mt, nr_pages);
list_move_tail(&page->buddy_list, &area->free_list[new_mt]);
- account_freepages(zone, -(1 << order), old_mt);
- account_freepages(zone, 1 << order, new_mt);
+ account_freepages(zone, -nr_pages, old_mt);
+ account_freepages(zone, nr_pages, new_mt);
+
+ if (order >= pageblock_order &&
+ is_migrate_isolate(old_mt) != is_migrate_isolate(new_mt)) {
+ if (!is_migrate_isolate(old_mt))
+ nr_pages = -nr_pages;
+ __mod_zone_page_state(zone, NR_FREE_PAGES_BLOCKS, nr_pages);
+ }
}
static inline void __del_page_from_free_list(struct page *page, struct zone *zone,
unsigned int order, int migratetype)
{
+ int nr_pages = 1 << order;
+
VM_WARN_ONCE(get_pageblock_migratetype(page) != migratetype,
"page type is %lu, passed migratetype is %d (nr=%d)\n",
- get_pageblock_migratetype(page), migratetype, 1 << order);
+ get_pageblock_migratetype(page), migratetype, nr_pages);
/* clear reported state and update reported page count */
if (page_reported(page))
@@ -706,6 +725,9 @@ static inline void __del_page_from_free_list(struct page *page, struct zone *zon
__ClearPageBuddy(page);
set_page_private(page, 0);
zone->free_area[order].nr_free--;
+
+ if (order >= pageblock_order && !is_migrate_isolate(migratetype))
+ __mod_zone_page_state(zone, NR_FREE_PAGES_BLOCKS, -nr_pages);
}
static inline void del_page_from_free_list(struct page *page, struct zone *zone,
@@ -950,21 +972,34 @@ static int free_tail_page_prepare(struct page *head_page, struct page *page)
switch (page - head_page) {
case 1:
/* the first tail page: these may be in place of ->mapping */
- if (unlikely(folio_entire_mapcount(folio))) {
- bad_page(page, "nonzero entire_mapcount");
- goto out;
- }
if (unlikely(folio_large_mapcount(folio))) {
bad_page(page, "nonzero large_mapcount");
goto out;
}
- if (unlikely(atomic_read(&folio->_nr_pages_mapped))) {
+ if (IS_ENABLED(CONFIG_PAGE_MAPCOUNT) &&
+ unlikely(atomic_read(&folio->_nr_pages_mapped))) {
bad_page(page, "nonzero nr_pages_mapped");
goto out;
}
- if (unlikely(atomic_read(&folio->_pincount))) {
- bad_page(page, "nonzero pincount");
- goto out;
+ if (IS_ENABLED(CONFIG_MM_ID)) {
+ if (unlikely(folio->_mm_id_mapcount[0] != -1)) {
+ bad_page(page, "nonzero mm mapcount 0");
+ goto out;
+ }
+ if (unlikely(folio->_mm_id_mapcount[1] != -1)) {
+ bad_page(page, "nonzero mm mapcount 1");
+ goto out;
+ }
+ }
+ if (IS_ENABLED(CONFIG_64BIT)) {
+ if (unlikely(atomic_read(&folio->_entire_mapcount) + 1)) {
+ bad_page(page, "nonzero entire_mapcount");
+ goto out;
+ }
+ if (unlikely(atomic_read(&folio->_pincount))) {
+ bad_page(page, "nonzero pincount");
+ goto out;
+ }
}
break;
case 2:
@@ -973,7 +1008,22 @@ static int free_tail_page_prepare(struct page *head_page, struct page *page)
bad_page(page, "on deferred list");
goto out;
}
+ if (!IS_ENABLED(CONFIG_64BIT)) {
+ if (unlikely(atomic_read(&folio->_entire_mapcount) + 1)) {
+ bad_page(page, "nonzero entire_mapcount");
+ goto out;
+ }
+ if (unlikely(atomic_read(&folio->_pincount))) {
+ bad_page(page, "nonzero pincount");
+ goto out;
+ }
+ }
break;
+ case 3:
+ /* the third tail page: hugetlb specifics overlap ->mappings */
+ if (IS_ENABLED(CONFIG_HUGETLB_PAGE))
+ break;
+ fallthrough;
default:
if (page->mapping != TAIL_MAPPING) {
bad_page(page, "corrupted mapping in tail page");
@@ -1044,6 +1094,84 @@ static void kernel_init_pages(struct page *page, int numpages)
kasan_enable_current();
}
+#ifdef CONFIG_MEM_ALLOC_PROFILING
+
+/* Should be called only if mem_alloc_profiling_enabled() */
+void __clear_page_tag_ref(struct page *page)
+{
+ union pgtag_ref_handle handle;
+ union codetag_ref ref;
+
+ if (get_page_tag_ref(page, &ref, &handle)) {
+ set_codetag_empty(&ref);
+ update_page_tag_ref(handle, &ref);
+ put_page_tag_ref(handle);
+ }
+}
+
+/* Should be called only if mem_alloc_profiling_enabled() */
+static noinline
+void __pgalloc_tag_add(struct page *page, struct task_struct *task,
+ unsigned int nr)
+{
+ union pgtag_ref_handle handle;
+ union codetag_ref ref;
+
+ if (get_page_tag_ref(page, &ref, &handle)) {
+ alloc_tag_add(&ref, task->alloc_tag, PAGE_SIZE * nr);
+ update_page_tag_ref(handle, &ref);
+ put_page_tag_ref(handle);
+ }
+}
+
+static inline void pgalloc_tag_add(struct page *page, struct task_struct *task,
+ unsigned int nr)
+{
+ if (mem_alloc_profiling_enabled())
+ __pgalloc_tag_add(page, task, nr);
+}
+
+/* Should be called only if mem_alloc_profiling_enabled() */
+static noinline
+void __pgalloc_tag_sub(struct page *page, unsigned int nr)
+{
+ union pgtag_ref_handle handle;
+ union codetag_ref ref;
+
+ if (get_page_tag_ref(page, &ref, &handle)) {
+ alloc_tag_sub(&ref, PAGE_SIZE * nr);
+ update_page_tag_ref(handle, &ref);
+ put_page_tag_ref(handle);
+ }
+}
+
+static inline void pgalloc_tag_sub(struct page *page, unsigned int nr)
+{
+ if (mem_alloc_profiling_enabled())
+ __pgalloc_tag_sub(page, nr);
+}
+
+static inline void pgalloc_tag_sub_pages(struct page *page, unsigned int nr)
+{
+ struct alloc_tag *tag;
+
+ if (!mem_alloc_profiling_enabled())
+ return;
+
+ tag = __pgalloc_tag_get(page);
+ if (tag)
+ this_cpu_sub(tag->counters->bytes, PAGE_SIZE * nr);
+}
+
+#else /* CONFIG_MEM_ALLOC_PROFILING */
+
+static inline void pgalloc_tag_add(struct page *page, struct task_struct *task,
+ unsigned int nr) {}
+static inline void pgalloc_tag_sub(struct page *page, unsigned int nr) {}
+static inline void pgalloc_tag_sub_pages(struct page *page, unsigned int nr) {}
+
+#endif /* CONFIG_MEM_ALLOC_PROFILING */
+
__always_inline bool free_pages_prepare(struct page *page,
unsigned int order)
{
@@ -1099,8 +1227,12 @@ __always_inline bool free_pages_prepare(struct page *page,
if (unlikely(order)) {
int i;
- if (compound)
+ if (compound) {
page[1].flags &= ~PAGE_FLAGS_SECOND;
+#ifdef NR_PAGES_IN_LARGE_FOLIO
+ folio->_nr_pages = 0;
+#endif
+ }
for (i = 1; i < (1 << order); i++) {
if (compound)
bad += free_tail_page_prepare(page, page + i);
@@ -1461,7 +1593,7 @@ static __always_inline void page_del_and_expand(struct zone *zone,
static void check_new_page_bad(struct page *page)
{
- if (unlikely(page->flags & __PG_HWPOISON)) {
+ if (unlikely(PageHWPoison(page))) {
/* Don't complain about hwpoisoned pages */
if (PageBuddy(page))
__ClearPageBuddy(page);
@@ -1859,47 +1991,6 @@ static void change_pageblock_range(struct page *pageblock_page,
}
}
-/*
- * When we are falling back to another migratetype during allocation, try to
- * steal extra free pages from the same pageblocks to satisfy further
- * allocations, instead of polluting multiple pageblocks.
- *
- * If we are stealing a relatively large buddy page, it is likely there will
- * be more free pages in the pageblock, so try to steal them all. For
- * reclaimable and unmovable allocations, we steal regardless of page size,
- * as fragmentation caused by those allocations polluting movable pageblocks
- * is worse than movable allocations stealing from unmovable and reclaimable
- * pageblocks.
- */
-static bool can_steal_fallback(unsigned int order, int start_mt)
-{
- /*
- * Leaving this order check is intended, although there is
- * relaxed order check in next check. The reason is that
- * we can actually steal whole pageblock if this condition met,
- * but, below check doesn't guarantee it and that is just heuristic
- * so could be changed anytime.
- */
- if (order >= pageblock_order)
- return true;
-
- /*
- * Movable pages won't cause permanent fragmentation, so when you alloc
- * small pages, you just need to temporarily steal unmovable or
- * reclaimable pages that are closest to the request size. After a
- * while, memory compaction may occur to form large contiguous pages,
- * and the next movable allocation may not need to steal. Unmovable and
- * reclaimable allocations need to actually steal pages.
- */
- if (order >= pageblock_order / 2 ||
- start_mt == MIGRATE_RECLAIMABLE ||
- start_mt == MIGRATE_UNMOVABLE ||
- page_group_by_mobility_disabled)
- return true;
-
- return false;
-}
-
static inline bool boost_watermark(struct zone *zone)
{
unsigned long max_boost;
@@ -1938,30 +2029,99 @@ static inline bool boost_watermark(struct zone *zone)
}
/*
- * This function implements actual steal behaviour. If order is large enough, we
- * can claim the whole pageblock for the requested migratetype. If not, we check
- * the pageblock for constituent pages; if at least half of the pages are free
- * or compatible, we can still claim the whole block, so pages freed in the
- * future will be put on the correct free list. Otherwise, we isolate exactly
- * the order we need from the fallback block and leave its migratetype alone.
+ * When we are falling back to another migratetype during allocation, should we
+ * try to claim an entire block to satisfy further allocations, instead of
+ * polluting multiple pageblocks?
*/
-static struct page *
-steal_suitable_fallback(struct zone *zone, struct page *page,
- int current_order, int order, int start_type,
- unsigned int alloc_flags, bool whole_block)
+static bool should_try_claim_block(unsigned int order, int start_mt)
{
- int free_pages, movable_pages, alike_pages;
- unsigned long start_pfn;
- int block_type;
+ /*
+ * Leaving this order check is intended, although there is
+ * relaxed order check in next check. The reason is that
+ * we can actually claim the whole pageblock if this condition met,
+ * but, below check doesn't guarantee it and that is just heuristic
+ * so could be changed anytime.
+ */
+ if (order >= pageblock_order)
+ return true;
+
+ /*
+ * Above a certain threshold, always try to claim, as it's likely there
+ * will be more free pages in the pageblock.
+ */
+ if (order >= pageblock_order / 2)
+ return true;
- block_type = get_pageblock_migratetype(page);
+ /*
+ * Unmovable/reclaimable allocations would cause permanent
+ * fragmentations if they fell back to allocating from a movable block
+ * (polluting it), so we try to claim the whole block regardless of the
+ * allocation size. Later movable allocations can always steal from this
+ * block, which is less problematic.
+ */
+ if (start_mt == MIGRATE_RECLAIMABLE || start_mt == MIGRATE_UNMOVABLE)
+ return true;
+
+ if (page_group_by_mobility_disabled)
+ return true;
/*
- * This can happen due to races and we want to prevent broken
- * highatomic accounting.
+ * Movable pages won't cause permanent fragmentation, so when you alloc
+ * small pages, we just need to temporarily steal unmovable or
+ * reclaimable pages that are closest to the request size. After a
+ * while, memory compaction may occur to form large contiguous pages,
+ * and the next movable allocation may not need to steal.
*/
- if (is_migrate_highatomic(block_type))
- goto single_page;
+ return false;
+}
+
+/*
+ * Check whether there is a suitable fallback freepage with requested order.
+ * Sets *claim_block to instruct the caller whether it should convert a whole
+ * pageblock to the returned migratetype.
+ * If only_claim is true, this function returns fallback_mt only if
+ * we would do this whole-block claiming. This would help to reduce
+ * fragmentation due to mixed migratetype pages in one pageblock.
+ */
+int find_suitable_fallback(struct free_area *area, unsigned int order,
+ int migratetype, bool only_claim, bool *claim_block)
+{
+ int i;
+ int fallback_mt;
+
+ if (area->nr_free == 0)
+ return -1;
+
+ *claim_block = false;
+ for (i = 0; i < MIGRATE_PCPTYPES - 1 ; i++) {
+ fallback_mt = fallbacks[migratetype][i];
+ if (free_area_empty(area, fallback_mt))
+ continue;
+
+ if (should_try_claim_block(order, migratetype))
+ *claim_block = true;
+
+ if (*claim_block || !only_claim)
+ return fallback_mt;
+ }
+
+ return -1;
+}
+
+/*
+ * This function implements actual block claiming behaviour. If order is large
+ * enough, we can claim the whole pageblock for the requested migratetype. If
+ * not, we check the pageblock for constituent pages; if at least half of the
+ * pages are free or compatible, we can still claim the whole block, so pages
+ * freed in the future will be put on the correct free list.
+ */
+static struct page *
+try_to_claim_block(struct zone *zone, struct page *page,
+ int current_order, int order, int start_type,
+ int block_type, unsigned int alloc_flags)
+{
+ int free_pages, movable_pages, alike_pages;
+ unsigned long start_pfn;
/* Take ownership for orders >= pageblock_order */
if (current_order >= pageblock_order) {
@@ -1982,14 +2142,10 @@ steal_suitable_fallback(struct zone *zone, struct page *page,
if (boost_watermark(zone) && (alloc_flags & ALLOC_KSWAPD))
set_bit(ZONE_BOOSTED_WATERMARK, &zone->flags);
- /* We are not allowed to try stealing from the whole block */
- if (!whole_block)
- goto single_page;
-
/* moving whole block can fail due to zone boundary conditions */
if (!prep_move_freepages_block(zone, page, &start_pfn, &free_pages,
&movable_pages))
- goto single_page;
+ return NULL;
/*
* Determine how many pages are compatible with our allocation.
@@ -2022,198 +2178,24 @@ steal_suitable_fallback(struct zone *zone, struct page *page,
return __rmqueue_smallest(zone, order, start_type);
}
-single_page:
- page_del_and_expand(zone, page, order, current_order, block_type);
- return page;
-}
-
-/*
- * Check whether there is a suitable fallback freepage with requested order.
- * If only_stealable is true, this function returns fallback_mt only if
- * we can steal other freepages all together. This would help to reduce
- * fragmentation due to mixed migratetype pages in one pageblock.
- */
-int find_suitable_fallback(struct free_area *area, unsigned int order,
- int migratetype, bool only_stealable, bool *can_steal)
-{
- int i;
- int fallback_mt;
-
- if (area->nr_free == 0)
- return -1;
-
- *can_steal = false;
- for (i = 0; i < MIGRATE_PCPTYPES - 1 ; i++) {
- fallback_mt = fallbacks[migratetype][i];
- if (free_area_empty(area, fallback_mt))
- continue;
-
- if (can_steal_fallback(order, migratetype))
- *can_steal = true;
-
- if (!only_stealable)
- return fallback_mt;
-
- if (*can_steal)
- return fallback_mt;
- }
-
- return -1;
-}
-
-/*
- * Reserve the pageblock(s) surrounding an allocation request for
- * exclusive use of high-order atomic allocations if there are no
- * empty page blocks that contain a page with a suitable order
- */
-static void reserve_highatomic_pageblock(struct page *page, int order,
- struct zone *zone)
-{
- int mt;
- unsigned long max_managed, flags;
-
- /*
- * The number reserved as: minimum is 1 pageblock, maximum is
- * roughly 1% of a zone. But if 1% of a zone falls below a
- * pageblock size, then don't reserve any pageblocks.
- * Check is race-prone but harmless.
- */
- if ((zone_managed_pages(zone) / 100) < pageblock_nr_pages)
- return;
- max_managed = ALIGN((zone_managed_pages(zone) / 100), pageblock_nr_pages);
- if (zone->nr_reserved_highatomic >= max_managed)
- return;
-
- spin_lock_irqsave(&zone->lock, flags);
-
- /* Recheck the nr_reserved_highatomic limit under the lock */
- if (zone->nr_reserved_highatomic >= max_managed)
- goto out_unlock;
-
- /* Yoink! */
- mt = get_pageblock_migratetype(page);
- /* Only reserve normal pageblocks (i.e., they can merge with others) */
- if (!migratetype_is_mergeable(mt))
- goto out_unlock;
-
- if (order < pageblock_order) {
- if (move_freepages_block(zone, page, mt, MIGRATE_HIGHATOMIC) == -1)
- goto out_unlock;
- zone->nr_reserved_highatomic += pageblock_nr_pages;
- } else {
- change_pageblock_range(page, order, MIGRATE_HIGHATOMIC);
- zone->nr_reserved_highatomic += 1 << order;
- }
-
-out_unlock:
- spin_unlock_irqrestore(&zone->lock, flags);
+ return NULL;
}
/*
- * Used when an allocation is about to fail under memory pressure. This
- * potentially hurts the reliability of high-order allocations when under
- * intense memory pressure but failed atomic allocations should be easier
- * to recover from than an OOM.
+ * Try finding a free buddy page on the fallback list.
*
- * If @force is true, try to unreserve pageblocks even though highatomic
- * pageblock is exhausted.
- */
-static bool unreserve_highatomic_pageblock(const struct alloc_context *ac,
- bool force)
-{
- struct zonelist *zonelist = ac->zonelist;
- unsigned long flags;
- struct zoneref *z;
- struct zone *zone;
- struct page *page;
- int order;
- int ret;
-
- for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->highest_zoneidx,
- ac->nodemask) {
- /*
- * Preserve at least one pageblock unless memory pressure
- * is really high.
- */
- if (!force && zone->nr_reserved_highatomic <=
- pageblock_nr_pages)
- continue;
-
- spin_lock_irqsave(&zone->lock, flags);
- for (order = 0; order < NR_PAGE_ORDERS; order++) {
- struct free_area *area = &(zone->free_area[order]);
- int mt;
-
- page = get_page_from_free_area(area, MIGRATE_HIGHATOMIC);
- if (!page)
- continue;
-
- mt = get_pageblock_migratetype(page);
- /*
- * In page freeing path, migratetype change is racy so
- * we can counter several free pages in a pageblock
- * in this loop although we changed the pageblock type
- * from highatomic to ac->migratetype. So we should
- * adjust the count once.
- */
- if (is_migrate_highatomic(mt)) {
- unsigned long size;
- /*
- * It should never happen but changes to
- * locking could inadvertently allow a per-cpu
- * drain to add pages to MIGRATE_HIGHATOMIC
- * while unreserving so be safe and watch for
- * underflows.
- */
- size = max(pageblock_nr_pages, 1UL << order);
- size = min(size, zone->nr_reserved_highatomic);
- zone->nr_reserved_highatomic -= size;
- }
-
- /*
- * Convert to ac->migratetype and avoid the normal
- * pageblock stealing heuristics. Minimally, the caller
- * is doing the work and needs the pages. More
- * importantly, if the block was always converted to
- * MIGRATE_UNMOVABLE or another type then the number
- * of pageblocks that cannot be completely freed
- * may increase.
- */
- if (order < pageblock_order)
- ret = move_freepages_block(zone, page, mt,
- ac->migratetype);
- else {
- move_to_free_list(page, zone, order, mt,
- ac->migratetype);
- change_pageblock_range(page, order,
- ac->migratetype);
- ret = 1;
- }
- /*
- * Reserving the block(s) already succeeded,
- * so this should not fail on zone boundaries.
- */
- WARN_ON_ONCE(ret == -1);
- if (ret > 0) {
- spin_unlock_irqrestore(&zone->lock, flags);
- return ret;
- }
- }
- spin_unlock_irqrestore(&zone->lock, flags);
- }
-
- return false;
-}
-
-/*
- * Try finding a free buddy page on the fallback list and put it on the free
- * list of requested migratetype, possibly along with other pages from the same
- * block, depending on fragmentation avoidance heuristics. Returns true if
- * fallback was found so that __rmqueue_smallest() can grab it.
+ * This will attempt to claim a whole pageblock for the requested type
+ * to ensure grouping of such requests in the future.
+ *
+ * If a whole block cannot be claimed, steal an individual page, regressing to
+ * __rmqueue_smallest() logic to at least break up as little contiguity as
+ * possible.
*
* The use of signed ints for order and current_order is a deliberate
* deviation from the rest of this file, to make the for loop
* condition simpler.
+ *
+ * Return the stolen page, or NULL if none can be found.
*/
static __always_inline struct page *
__rmqueue_fallback(struct zone *zone, int order, int start_migratetype,
@@ -2224,7 +2206,7 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype,
int min_order = order;
struct page *page;
int fallback_mt;
- bool can_steal;
+ bool claim_block;
/*
* Do not steal pages from freelists belonging to other pageblocks
@@ -2243,49 +2225,40 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype,
--current_order) {
area = &(zone->free_area[current_order]);
fallback_mt = find_suitable_fallback(area, current_order,
- start_migratetype, false, &can_steal);
+ start_migratetype, false, &claim_block);
if (fallback_mt == -1)
continue;
- /*
- * We cannot steal all free pages from the pageblock and the
- * requested migratetype is movable. In that case it's better to
- * steal and split the smallest available page instead of the
- * largest available page, because even if the next movable
- * allocation falls back into a different pageblock than this
- * one, it won't cause permanent fragmentation.
- */
- if (!can_steal && start_migratetype == MIGRATE_MOVABLE
- && current_order > order)
- goto find_smallest;
+ if (!claim_block)
+ break;
- goto do_steal;
+ page = get_page_from_free_area(area, fallback_mt);
+ page = try_to_claim_block(zone, page, current_order, order,
+ start_migratetype, fallback_mt,
+ alloc_flags);
+ if (page)
+ goto got_one;
}
- return NULL;
+ if (alloc_flags & ALLOC_NOFRAGMENT)
+ return NULL;
-find_smallest:
+ /* No luck claiming pageblock. Find the smallest fallback page */
for (current_order = order; current_order < NR_PAGE_ORDERS; current_order++) {
area = &(zone->free_area[current_order]);
fallback_mt = find_suitable_fallback(area, current_order,
- start_migratetype, false, &can_steal);
- if (fallback_mt != -1)
- break;
- }
-
- /*
- * This should not happen - we already found a suitable fallback
- * when looking for the largest page.
- */
- VM_BUG_ON(current_order > MAX_PAGE_ORDER);
+ start_migratetype, false, &claim_block);
+ if (fallback_mt == -1)
+ continue;
-do_steal:
- page = get_page_from_free_area(area, fallback_mt);
+ page = get_page_from_free_area(area, fallback_mt);
+ page_del_and_expand(zone, page, order, current_order, fallback_mt);
+ goto got_one;
+ }
- /* take off list, maybe claim block, expand remainder */
- page = steal_suitable_fallback(zone, page, current_order, order,
- start_migratetype, alloc_flags, can_steal);
+ return NULL;
+got_one:
trace_mm_page_alloc_extfrag(page, order, current_order,
start_migratetype, fallback_mt);
@@ -2862,7 +2835,7 @@ void split_page(struct page *page, unsigned int order)
set_page_refcounted(page + i);
split_page_owner(page, order, 0);
pgalloc_tag_split(page_folio(page), order, 0);
- split_page_memcg(page, order, 0);
+ split_page_memcg(page, order);
}
EXPORT_SYMBOL_GPL(split_page);
@@ -3156,6 +3129,142 @@ out:
return page;
}
+/*
+ * Reserve the pageblock(s) surrounding an allocation request for
+ * exclusive use of high-order atomic allocations if there are no
+ * empty page blocks that contain a page with a suitable order
+ */
+static void reserve_highatomic_pageblock(struct page *page, int order,
+ struct zone *zone)
+{
+ int mt;
+ unsigned long max_managed, flags;
+
+ /*
+ * The number reserved as: minimum is 1 pageblock, maximum is
+ * roughly 1% of a zone. But if 1% of a zone falls below a
+ * pageblock size, then don't reserve any pageblocks.
+ * Check is race-prone but harmless.
+ */
+ if ((zone_managed_pages(zone) / 100) < pageblock_nr_pages)
+ return;
+ max_managed = ALIGN((zone_managed_pages(zone) / 100), pageblock_nr_pages);
+ if (zone->nr_reserved_highatomic >= max_managed)
+ return;
+
+ spin_lock_irqsave(&zone->lock, flags);
+
+ /* Recheck the nr_reserved_highatomic limit under the lock */
+ if (zone->nr_reserved_highatomic >= max_managed)
+ goto out_unlock;
+
+ /* Yoink! */
+ mt = get_pageblock_migratetype(page);
+ /* Only reserve normal pageblocks (i.e., they can merge with others) */
+ if (!migratetype_is_mergeable(mt))
+ goto out_unlock;
+
+ if (order < pageblock_order) {
+ if (move_freepages_block(zone, page, mt, MIGRATE_HIGHATOMIC) == -1)
+ goto out_unlock;
+ zone->nr_reserved_highatomic += pageblock_nr_pages;
+ } else {
+ change_pageblock_range(page, order, MIGRATE_HIGHATOMIC);
+ zone->nr_reserved_highatomic += 1 << order;
+ }
+
+out_unlock:
+ spin_unlock_irqrestore(&zone->lock, flags);
+}
+
+/*
+ * Used when an allocation is about to fail under memory pressure. This
+ * potentially hurts the reliability of high-order allocations when under
+ * intense memory pressure but failed atomic allocations should be easier
+ * to recover from than an OOM.
+ *
+ * If @force is true, try to unreserve pageblocks even though highatomic
+ * pageblock is exhausted.
+ */
+static bool unreserve_highatomic_pageblock(const struct alloc_context *ac,
+ bool force)
+{
+ struct zonelist *zonelist = ac->zonelist;
+ unsigned long flags;
+ struct zoneref *z;
+ struct zone *zone;
+ struct page *page;
+ int order;
+ int ret;
+
+ for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->highest_zoneidx,
+ ac->nodemask) {
+ /*
+ * Preserve at least one pageblock unless memory pressure
+ * is really high.
+ */
+ if (!force && zone->nr_reserved_highatomic <=
+ pageblock_nr_pages)
+ continue;
+
+ spin_lock_irqsave(&zone->lock, flags);
+ for (order = 0; order < NR_PAGE_ORDERS; order++) {
+ struct free_area *area = &(zone->free_area[order]);
+ unsigned long size;
+
+ page = get_page_from_free_area(area, MIGRATE_HIGHATOMIC);
+ if (!page)
+ continue;
+
+ size = max(pageblock_nr_pages, 1UL << order);
+ /*
+ * It should never happen but changes to
+ * locking could inadvertently allow a per-cpu
+ * drain to add pages to MIGRATE_HIGHATOMIC
+ * while unreserving so be safe and watch for
+ * underflows.
+ */
+ if (WARN_ON_ONCE(size > zone->nr_reserved_highatomic))
+ size = zone->nr_reserved_highatomic;
+ zone->nr_reserved_highatomic -= size;
+
+ /*
+ * Convert to ac->migratetype and avoid the normal
+ * pageblock stealing heuristics. Minimally, the caller
+ * is doing the work and needs the pages. More
+ * importantly, if the block was always converted to
+ * MIGRATE_UNMOVABLE or another type then the number
+ * of pageblocks that cannot be completely freed
+ * may increase.
+ */
+ if (order < pageblock_order)
+ ret = move_freepages_block(zone, page,
+ MIGRATE_HIGHATOMIC,
+ ac->migratetype);
+ else {
+ move_to_free_list(page, zone, order,
+ MIGRATE_HIGHATOMIC,
+ ac->migratetype);
+ change_pageblock_range(page, order,
+ ac->migratetype);
+ ret = 1;
+ }
+ /*
+ * Reserving the block(s) already succeeded,
+ * so this should not fail on zone boundaries.
+ */
+ WARN_ON_ONCE(ret == -1);
+ if (ret > 0) {
+ spin_unlock_irqrestore(&zone->lock, flags);
+ return ret;
+ }
+ }
+ spin_unlock_irqrestore(&zone->lock, flags);
+ }
+
+ return false;
+}
+
static inline long __zone_watermark_unusable_free(struct zone *z,
unsigned int order, unsigned int alloc_flags)
{
@@ -3359,6 +3468,11 @@ alloc_flags_nofragment(struct zone *zone, gfp_t gfp_mask)
*/
alloc_flags = (__force int) (gfp_mask & __GFP_KSWAPD_RECLAIM);
+ if (defrag_mode) {
+ alloc_flags |= ALLOC_NOFRAGMENT;
+ return alloc_flags;
+ }
+
#ifdef CONFIG_ZONE_DMA32
if (!zone)
return alloc_flags;
@@ -3450,7 +3564,7 @@ retry:
continue;
}
- if (no_fallback && nr_online_nodes > 1 &&
+ if (no_fallback && !defrag_mode && nr_online_nodes > 1 &&
zone != zonelist_zone(ac->preferred_zoneref)) {
int local_nid;
@@ -3561,7 +3675,7 @@ try_this_zone:
* It's possible on a UMA machine to get through all zones that are
* fragmented. If avoiding fragmentation, reset and try again.
*/
- if (no_fallback) {
+ if (no_fallback && !defrag_mode) {
alloc_flags &= ~ALLOC_NOFRAGMENT;
goto retry;
}
@@ -4040,15 +4154,21 @@ static void wake_all_kswapds(unsigned int order, gfp_t gfp_mask,
struct zone *zone;
pg_data_t *last_pgdat = NULL;
enum zone_type highest_zoneidx = ac->highest_zoneidx;
+ unsigned int reclaim_order;
+
+ if (defrag_mode)
+ reclaim_order = max(order, pageblock_order);
+ else
+ reclaim_order = order;
for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, highest_zoneidx,
ac->nodemask) {
if (!managed_zone(zone))
continue;
- if (last_pgdat != zone->zone_pgdat) {
- wakeup_kswapd(zone, gfp_mask, order, highest_zoneidx);
- last_pgdat = zone->zone_pgdat;
- }
+ if (last_pgdat == zone->zone_pgdat)
+ continue;
+ wakeup_kswapd(zone, gfp_mask, reclaim_order, highest_zoneidx);
+ last_pgdat = zone->zone_pgdat;
}
}
@@ -4098,6 +4218,9 @@ gfp_to_alloc_flags(gfp_t gfp_mask, unsigned int order)
alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, alloc_flags);
+ if (defrag_mode)
+ alloc_flags |= ALLOC_NOFRAGMENT;
+
return alloc_flags;
}
@@ -4480,6 +4603,11 @@ retry:
&compaction_retries))
goto retry;
+ /* Reclaim/compaction failed to prevent the fallback */
+ if (defrag_mode && (alloc_flags & ALLOC_NOFRAGMENT)) {
+ alloc_flags &= ~ALLOC_NOFRAGMENT;
+ goto retry;
+ }
/*
* Deal with possible cpuset update races or zonelist updates to avoid
@@ -4901,12 +5029,11 @@ static void ___free_pages(struct page *page, unsigned int order,
{
/* get PageHead before we drop reference */
int head = PageHead(page);
- struct alloc_tag *tag = pgalloc_tag_get(page);
if (put_page_testzero(page))
__free_frozen_pages(page, order, fpi_flags);
else if (!head) {
- pgalloc_tag_sub_pages(tag, (1 << order) - 1);
+ pgalloc_tag_sub_pages(page, (1 << order) - 1);
while (order-- > 0)
__free_frozen_pages(page + (1 << order), order,
fpi_flags);
@@ -4947,7 +5074,7 @@ static void *make_alloc_exact(unsigned long addr, unsigned int order,
split_page_owner(page, order, 0);
pgalloc_tag_split(page_folio(page), order, 0);
- split_page_memcg(page, order, 0);
+ split_page_memcg(page, order);
while (page < --last)
set_page_refcounted(last);
@@ -5910,6 +6037,7 @@ static void calculate_totalreserve_pages(void)
}
}
totalreserve_pages = reserve_pages;
+ trace_mm_calculate_totalreserve_pages(totalreserve_pages);
}
/*
@@ -5939,6 +6067,8 @@ static void setup_per_zone_lowmem_reserve(void)
zone->lowmem_reserve[j] = 0;
else
zone->lowmem_reserve[j] = managed_pages / ratio;
+ trace_mm_setup_per_zone_lowmem_reserve(zone, upper_zone,
+ zone->lowmem_reserve[j]);
}
}
}
@@ -6002,6 +6132,7 @@ static void __setup_per_zone_wmarks(void)
zone->_watermark[WMARK_LOW] = min_wmark_pages(zone) + tmp;
zone->_watermark[WMARK_HIGH] = low_wmark_pages(zone) + tmp;
zone->_watermark[WMARK_PROMO] = high_wmark_pages(zone) + tmp;
+ trace_mm_setup_per_zone_wmarks(zone);
spin_unlock_irqrestore(&zone->lock, flags);
}
@@ -6275,6 +6406,15 @@ static const struct ctl_table page_alloc_sysctl_table[] = {
.extra2 = SYSCTL_THREE_THOUSAND,
},
{
+ .procname = "defrag_mode",
+ .data = &defrag_mode,
+ .maxlen = sizeof(defrag_mode),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_ONE,
+ },
+ {
.procname = "percpu_pagelist_high_fraction",
.data = &percpu_pagelist_high_fraction,
.maxlen = sizeof(percpu_pagelist_high_fraction),
@@ -7245,6 +7385,9 @@ struct page *try_alloc_pages_noprof(int nid, unsigned int order)
/* Unlike regular alloc_pages() there is no __alloc_pages_slowpath(). */
+ if (page)
+ set_page_refcounted(page);
+
if (memcg_kmem_online() && page &&
unlikely(__memcg_kmem_charge_page(page, alloc_gfp, order) != 0)) {
free_pages_nolock(page, order);
diff --git a/mm/page_counter.c b/mm/page_counter.c
index af23f927611b..661e0f2a5127 100644
--- a/mm/page_counter.c
+++ b/mm/page_counter.c
@@ -121,6 +121,7 @@ bool page_counter_try_charge(struct page_counter *counter,
{
struct page_counter *c;
bool protection = track_protection(counter);
+ bool track_failcnt = counter->track_failcnt;
for (c = counter; c; c = c->parent) {
long new;
@@ -146,7 +147,8 @@ bool page_counter_try_charge(struct page_counter *counter,
* inaccuracy in the failcnt which is only used
* to report stats.
*/
- data_race(c->failcnt++);
+ if (track_failcnt)
+ data_race(c->failcnt++);
*fail = c;
goto failed;
}
diff --git a/mm/page_ext.c b/mm/page_ext.c
index 641d93f6af4c..c351fdfe9e9a 100644
--- a/mm/page_ext.c
+++ b/mm/page_ext.c
@@ -508,6 +508,19 @@ void __meminit pgdat_page_ext_init(struct pglist_data *pgdat)
#endif
/**
+ * page_ext_lookup() - Lookup a page extension for a PFN.
+ * @pfn: PFN of the page we're interested in.
+ *
+ * Must be called with RCU read lock taken and @pfn must be valid.
+ *
+ * Return: NULL if no page_ext exists for this page.
+ */
+struct page_ext *page_ext_lookup(unsigned long pfn)
+{
+ return lookup_page_ext(pfn_to_page(pfn));
+}
+
+/**
* page_ext_get() - Get the extended information for a page.
* @page: The page we're interested in.
*
diff --git a/mm/page_idle.c b/mm/page_idle.c
index 947c7c7a3728..408aaf29a3ea 100644
--- a/mm/page_idle.c
+++ b/mm/page_idle.c
@@ -62,9 +62,14 @@ static bool page_idle_clear_pte_refs_one(struct folio *folio,
/*
* For PTE-mapped THP, one sub page is referenced,
* the whole THP is referenced.
+ *
+ * PFN swap PTEs, such as device-exclusive ones, that
+ * actually map pages are "old" from a CPU perspective.
+ * The MMU notifier takes care of any device aspects.
*/
- if (ptep_clear_young_notify(vma, addr, pvmw.pte))
- referenced = true;
+ if (likely(pte_present(ptep_get(pvmw.pte))))
+ referenced |= ptep_test_and_clear_young(vma, addr, pvmw.pte);
+ referenced |= mmu_notifier_clear_young(vma->vm_mm, addr, addr + PAGE_SIZE);
} else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
if (pmdp_clear_young_notify(vma, addr, pvmw.pmd))
referenced = true;
diff --git a/mm/page_io.c b/mm/page_io.c
index 9b983de351f9..4bce19df557b 100644
--- a/mm/page_io.c
+++ b/mm/page_io.c
@@ -638,11 +638,11 @@ void swap_read_folio(struct folio *folio, struct swap_iocb **plug)
if (swap_read_folio_zeromap(folio)) {
folio_unlock(folio);
goto finish;
- } else if (zswap_load(folio)) {
- folio_unlock(folio);
- goto finish;
}
+ if (zswap_load(folio) != -ENOENT)
+ goto finish;
+
/* We have to read from slower devices. Increase zswap protection. */
zswap_folio_swapin(folio);
diff --git a/mm/page_isolation.c b/mm/page_isolation.c
index a051a29e95ad..b2fc5266e3d2 100644
--- a/mm/page_isolation.c
+++ b/mm/page_isolation.c
@@ -83,7 +83,14 @@ static struct page *has_unmovable_pages(unsigned long start_pfn, unsigned long e
unsigned int skip_pages;
if (PageHuge(page)) {
- if (!hugepage_migration_supported(folio_hstate(folio)))
+ struct hstate *h;
+
+ /*
+ * The huge page may be freed so can not
+ * use folio_hstate() directly.
+ */
+ h = size_to_hstate(folio_size(folio));
+ if (h && !hugepage_migration_supported(h))
return page;
} else if (!folio_test_lru(folio) && !__folio_test_movable(folio)) {
return page;
diff --git a/mm/page_owner.c b/mm/page_owner.c
index 90e31d0e3ed7..cc4a6916eec6 100644
--- a/mm/page_owner.c
+++ b/mm/page_owner.c
@@ -229,17 +229,19 @@ static void dec_stack_record_count(depot_stack_handle_t handle,
handle);
}
-static inline void __update_page_owner_handle(struct page_ext *page_ext,
+static inline void __update_page_owner_handle(struct page *page,
depot_stack_handle_t handle,
unsigned short order,
gfp_t gfp_mask,
short last_migrate_reason, u64 ts_nsec,
pid_t pid, pid_t tgid, char *comm)
{
- int i;
+ struct page_ext_iter iter;
+ struct page_ext *page_ext;
struct page_owner *page_owner;
- for (i = 0; i < (1 << order); i++) {
+ rcu_read_lock();
+ for_each_page_ext(page, 1 << order, page_ext, iter) {
page_owner = get_page_owner(page_ext);
page_owner->handle = handle;
page_owner->order = order;
@@ -252,20 +254,22 @@ static inline void __update_page_owner_handle(struct page_ext *page_ext,
sizeof(page_owner->comm));
__set_bit(PAGE_EXT_OWNER, &page_ext->flags);
__set_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags);
- page_ext = page_ext_next(page_ext);
}
+ rcu_read_unlock();
}
-static inline void __update_page_owner_free_handle(struct page_ext *page_ext,
+static inline void __update_page_owner_free_handle(struct page *page,
depot_stack_handle_t handle,
unsigned short order,
pid_t pid, pid_t tgid,
u64 free_ts_nsec)
{
- int i;
+ struct page_ext_iter iter;
+ struct page_ext *page_ext;
struct page_owner *page_owner;
- for (i = 0; i < (1 << order); i++) {
+ rcu_read_lock();
+ for_each_page_ext(page, 1 << order, page_ext, iter) {
page_owner = get_page_owner(page_ext);
/* Only __reset_page_owner() wants to clear the bit */
if (handle) {
@@ -275,8 +279,8 @@ static inline void __update_page_owner_free_handle(struct page_ext *page_ext,
page_owner->free_ts_nsec = free_ts_nsec;
page_owner->free_pid = current->pid;
page_owner->free_tgid = current->tgid;
- page_ext = page_ext_next(page_ext);
}
+ rcu_read_unlock();
}
void __reset_page_owner(struct page *page, unsigned short order)
@@ -293,6 +297,7 @@ void __reset_page_owner(struct page *page, unsigned short order)
page_owner = get_page_owner(page_ext);
alloc_handle = page_owner->handle;
+ page_ext_put(page_ext);
/*
* Do not specify GFP_NOWAIT to make gfpflags_allow_spinning() == false
@@ -301,9 +306,8 @@ void __reset_page_owner(struct page *page, unsigned short order)
* to signal stack_depot to avoid spin_locks.
*/
handle = save_stack(__GFP_NOWARN);
- __update_page_owner_free_handle(page_ext, handle, order, current->pid,
+ __update_page_owner_free_handle(page, handle, order, current->pid,
current->tgid, free_ts_nsec);
- page_ext_put(page_ext);
if (alloc_handle != early_handle)
/*
@@ -319,19 +323,13 @@ void __reset_page_owner(struct page *page, unsigned short order)
noinline void __set_page_owner(struct page *page, unsigned short order,
gfp_t gfp_mask)
{
- struct page_ext *page_ext;
u64 ts_nsec = local_clock();
depot_stack_handle_t handle;
handle = save_stack(gfp_mask);
-
- page_ext = page_ext_get(page);
- if (unlikely(!page_ext))
- return;
- __update_page_owner_handle(page_ext, handle, order, gfp_mask, -1,
+ __update_page_owner_handle(page, handle, order, gfp_mask, -1,
ts_nsec, current->pid, current->tgid,
current->comm);
- page_ext_put(page_ext);
inc_stack_record_count(handle, gfp_mask, 1 << order);
}
@@ -350,44 +348,42 @@ void __set_page_owner_migrate_reason(struct page *page, int reason)
void __split_page_owner(struct page *page, int old_order, int new_order)
{
- int i;
- struct page_ext *page_ext = page_ext_get(page);
+ struct page_ext_iter iter;
+ struct page_ext *page_ext;
struct page_owner *page_owner;
- if (unlikely(!page_ext))
- return;
-
- for (i = 0; i < (1 << old_order); i++) {
+ rcu_read_lock();
+ for_each_page_ext(page, 1 << old_order, page_ext, iter) {
page_owner = get_page_owner(page_ext);
page_owner->order = new_order;
- page_ext = page_ext_next(page_ext);
}
- page_ext_put(page_ext);
+ rcu_read_unlock();
}
void __folio_copy_owner(struct folio *newfolio, struct folio *old)
{
- int i;
- struct page_ext *old_ext;
- struct page_ext *new_ext;
+ struct page_ext *page_ext;
+ struct page_ext_iter iter;
struct page_owner *old_page_owner;
struct page_owner *new_page_owner;
depot_stack_handle_t migrate_handle;
- old_ext = page_ext_get(&old->page);
- if (unlikely(!old_ext))
+ page_ext = page_ext_get(&old->page);
+ if (unlikely(!page_ext))
return;
- new_ext = page_ext_get(&newfolio->page);
- if (unlikely(!new_ext)) {
- page_ext_put(old_ext);
+ old_page_owner = get_page_owner(page_ext);
+ page_ext_put(page_ext);
+
+ page_ext = page_ext_get(&newfolio->page);
+ if (unlikely(!page_ext))
return;
- }
- old_page_owner = get_page_owner(old_ext);
- new_page_owner = get_page_owner(new_ext);
+ new_page_owner = get_page_owner(page_ext);
+ page_ext_put(page_ext);
+
migrate_handle = new_page_owner->handle;
- __update_page_owner_handle(new_ext, old_page_owner->handle,
+ __update_page_owner_handle(&newfolio->page, old_page_owner->handle,
old_page_owner->order, old_page_owner->gfp_mask,
old_page_owner->last_migrate_reason,
old_page_owner->ts_nsec, old_page_owner->pid,
@@ -397,7 +393,7 @@ void __folio_copy_owner(struct folio *newfolio, struct folio *old)
* will be freed after migration. Keep them until then as they may be
* useful.
*/
- __update_page_owner_free_handle(new_ext, 0, old_page_owner->order,
+ __update_page_owner_free_handle(&newfolio->page, 0, old_page_owner->order,
old_page_owner->free_pid,
old_page_owner->free_tgid,
old_page_owner->free_ts_nsec);
@@ -406,14 +402,12 @@ void __folio_copy_owner(struct folio *newfolio, struct folio *old)
* for the new one and the old folio otherwise there will be an imbalance
* when subtracting those pages from the stack.
*/
- for (i = 0; i < (1 << new_page_owner->order); i++) {
+ rcu_read_lock();
+ for_each_page_ext(&old->page, 1 << new_page_owner->order, page_ext, iter) {
+ old_page_owner = get_page_owner(page_ext);
old_page_owner->handle = migrate_handle;
- old_ext = page_ext_next(old_ext);
- old_page_owner = get_page_owner(old_ext);
}
-
- page_ext_put(new_ext);
- page_ext_put(old_ext);
+ rcu_read_unlock();
}
void pagetypeinfo_showmixedcount_print(struct seq_file *m,
@@ -513,7 +507,7 @@ static inline int print_page_owner_memcg(char *kbuf, size_t count, int ret,
rcu_read_lock();
memcg_data = READ_ONCE(page->memcg_data);
- if (!memcg_data)
+ if (!memcg_data || PageTail(page))
goto out_unlock;
if (memcg_data & MEMCG_DATA_OBJEXTS)
@@ -819,7 +813,7 @@ static void init_pages_in_zone(pg_data_t *pgdat, struct zone *zone)
goto ext_put_continue;
/* Found early allocated page */
- __update_page_owner_handle(page_ext, early_handle, 0, 0,
+ __update_page_owner_handle(page, early_handle, 0, 0,
-1, local_clock(), current->pid,
current->tgid, current->comm);
count++;
diff --git a/mm/page_table_check.c b/mm/page_table_check.c
index 509c6ef8de40..68109ee93841 100644
--- a/mm/page_table_check.c
+++ b/mm/page_table_check.c
@@ -62,24 +62,20 @@ static struct page_table_check *get_page_table_check(struct page_ext *page_ext)
*/
static void page_table_check_clear(unsigned long pfn, unsigned long pgcnt)
{
+ struct page_ext_iter iter;
struct page_ext *page_ext;
struct page *page;
- unsigned long i;
bool anon;
if (!pfn_valid(pfn))
return;
page = pfn_to_page(pfn);
- page_ext = page_ext_get(page);
-
- if (!page_ext)
- return;
-
BUG_ON(PageSlab(page));
anon = PageAnon(page);
- for (i = 0; i < pgcnt; i++) {
+ rcu_read_lock();
+ for_each_page_ext(page, pgcnt, page_ext, iter) {
struct page_table_check *ptc = get_page_table_check(page_ext);
if (anon) {
@@ -89,9 +85,8 @@ static void page_table_check_clear(unsigned long pfn, unsigned long pgcnt)
BUG_ON(atomic_read(&ptc->anon_map_count));
BUG_ON(atomic_dec_return(&ptc->file_map_count) < 0);
}
- page_ext = page_ext_next(page_ext);
}
- page_ext_put(page_ext);
+ rcu_read_unlock();
}
/*
@@ -102,24 +97,20 @@ static void page_table_check_clear(unsigned long pfn, unsigned long pgcnt)
static void page_table_check_set(unsigned long pfn, unsigned long pgcnt,
bool rw)
{
+ struct page_ext_iter iter;
struct page_ext *page_ext;
struct page *page;
- unsigned long i;
bool anon;
if (!pfn_valid(pfn))
return;
page = pfn_to_page(pfn);
- page_ext = page_ext_get(page);
-
- if (!page_ext)
- return;
-
BUG_ON(PageSlab(page));
anon = PageAnon(page);
- for (i = 0; i < pgcnt; i++) {
+ rcu_read_lock();
+ for_each_page_ext(page, pgcnt, page_ext, iter) {
struct page_table_check *ptc = get_page_table_check(page_ext);
if (anon) {
@@ -129,9 +120,8 @@ static void page_table_check_set(unsigned long pfn, unsigned long pgcnt,
BUG_ON(atomic_read(&ptc->anon_map_count));
BUG_ON(atomic_inc_return(&ptc->file_map_count) < 0);
}
- page_ext = page_ext_next(page_ext);
}
- page_ext_put(page_ext);
+ rcu_read_unlock();
}
/*
@@ -140,24 +130,19 @@ static void page_table_check_set(unsigned long pfn, unsigned long pgcnt,
*/
void __page_table_check_zero(struct page *page, unsigned int order)
{
+ struct page_ext_iter iter;
struct page_ext *page_ext;
- unsigned long i;
BUG_ON(PageSlab(page));
- page_ext = page_ext_get(page);
-
- if (!page_ext)
- return;
-
- for (i = 0; i < (1ul << order); i++) {
+ rcu_read_lock();
+ for_each_page_ext(page, 1 << order, page_ext, iter) {
struct page_table_check *ptc = get_page_table_check(page_ext);
BUG_ON(atomic_read(&ptc->anon_map_count));
BUG_ON(atomic_read(&ptc->file_map_count));
- page_ext = page_ext_next(page_ext);
}
- page_ext_put(page_ext);
+ rcu_read_unlock();
}
void __page_table_check_pte_clear(struct mm_struct *mm, pte_t pte)
@@ -196,9 +181,8 @@ EXPORT_SYMBOL(__page_table_check_pud_clear);
/* Whether the swap entry cached writable information */
static inline bool swap_cached_writable(swp_entry_t entry)
{
- return is_writable_device_exclusive_entry(entry) ||
- is_writable_device_private_entry(entry) ||
- is_writable_migration_entry(entry);
+ return is_writable_device_private_entry(entry) ||
+ is_writable_migration_entry(entry);
}
static inline void page_table_check_pte_flags(pte_t pte)
diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c
index 81839a9e74f1..e463c3be934a 100644
--- a/mm/page_vma_mapped.c
+++ b/mm/page_vma_mapped.c
@@ -84,6 +84,7 @@ again:
* mapped at the @pvmw->pte
* @pvmw: page_vma_mapped_walk struct, includes a pair pte and pfn range
* for checking
+ * @pte_nr: the number of small pages described by @pvmw->pte.
*
* page_vma_mapped_walk() found a place where pfn range is *potentially*
* mapped. check_pte() has to validate this.
@@ -100,7 +101,7 @@ again:
* Otherwise, return false.
*
*/
-static bool check_pte(struct page_vma_mapped_walk *pvmw)
+static bool check_pte(struct page_vma_mapped_walk *pvmw, unsigned long pte_nr)
{
unsigned long pfn;
pte_t ptent = ptep_get(pvmw->pte);
@@ -111,8 +112,7 @@ static bool check_pte(struct page_vma_mapped_walk *pvmw)
return false;
entry = pte_to_swp_entry(ptent);
- if (!is_migration_entry(entry) &&
- !is_device_exclusive_entry(entry))
+ if (!is_migration_entry(entry))
return false;
pfn = swp_offset_pfn(entry);
@@ -133,7 +133,11 @@ static bool check_pte(struct page_vma_mapped_walk *pvmw)
pfn = pte_pfn(ptent);
}
- return (pfn - pvmw->pfn) < pvmw->nr_pages;
+ if ((pfn + pte_nr - 1) < pvmw->pfn)
+ return false;
+ if (pfn > (pvmw->pfn + pvmw->nr_pages - 1))
+ return false;
+ return true;
}
/* Returns true if the two ranges overlap. Careful to not overflow. */
@@ -208,7 +212,7 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
return false;
pvmw->ptl = huge_pte_lock(hstate, mm, pvmw->pte);
- if (!check_pte(pvmw))
+ if (!check_pte(pvmw, pages_per_huge_page(hstate)))
return not_found(pvmw);
return true;
}
@@ -291,7 +295,7 @@ restart:
goto next_pte;
}
this_pte:
- if (check_pte(pvmw))
+ if (check_pte(pvmw, 1))
return true;
next_pte:
do {
diff --git a/mm/percpu.c b/mm/percpu.c
index 7b5835356d1e..b35494c8ede2 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -1745,7 +1745,7 @@ void __percpu *pcpu_alloc_noprof(size_t size, size_t align, bool reserved,
gfp = current_gfp_context(gfp);
/* whitelisted flags that can be passed to the backing allocators */
pcpu_gfp = gfp & (GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN);
- is_atomic = (gfp & GFP_KERNEL) != GFP_KERNEL;
+ is_atomic = !gfpflags_allow_blocking(gfp);
do_warn = !(gfp & __GFP_NOWARN);
/*
@@ -2191,7 +2191,12 @@ static void pcpu_balance_workfn(struct work_struct *work)
* to grow other chunks. This then gives pcpu_reclaim_populated() time
* to move fully free chunks to the active list to be freed if
* appropriate.
+ *
+ * Enforce GFP_NOIO allocations because we have pcpu_alloc users
+ * constrained to GFP_NOIO/NOFS contexts and they could form lock
+ * dependency through pcpu_alloc_mutex
*/
+ unsigned int flags = memalloc_noio_save();
mutex_lock(&pcpu_alloc_mutex);
spin_lock_irq(&pcpu_lock);
@@ -2202,6 +2207,7 @@ static void pcpu_balance_workfn(struct work_struct *work)
spin_unlock_irq(&pcpu_lock);
mutex_unlock(&pcpu_alloc_mutex);
+ memalloc_noio_restore(flags);
}
/**
diff --git a/mm/rmap.c b/mm/rmap.c
index c6c4d4ea29a7..67bb273dfb80 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -672,7 +672,7 @@ void try_to_unmap_flush_dirty(void)
(TLB_FLUSH_BATCH_PENDING_MASK / 2)
static void set_tlb_ubc_flush_pending(struct mm_struct *mm, pte_t pteval,
- unsigned long uaddr)
+ unsigned long start, unsigned long end)
{
struct tlbflush_unmap_batch *tlb_ubc = &current->tlb_ubc;
int batch;
@@ -681,7 +681,7 @@ static void set_tlb_ubc_flush_pending(struct mm_struct *mm, pte_t pteval,
if (!pte_accessible(mm, pteval))
return;
- arch_tlbbatch_add_pending(&tlb_ubc->arch, mm, uaddr);
+ arch_tlbbatch_add_pending(&tlb_ubc->arch, mm, start, end);
tlb_ubc->flush_required = true;
/*
@@ -757,7 +757,7 @@ void flush_tlb_batched_pending(struct mm_struct *mm)
}
#else
static void set_tlb_ubc_flush_pending(struct mm_struct *mm, pte_t pteval,
- unsigned long uaddr)
+ unsigned long start, unsigned long end)
{
}
@@ -889,7 +889,7 @@ static bool folio_referenced_one(struct folio *folio,
if ((!atomic_read(&vma->vm_mm->mm_users) ||
check_stable_address_space(vma->vm_mm)) &&
folio_test_anon(folio) && folio_test_swapbacked(folio) &&
- !folio_likely_mapped_shared(folio)) {
+ !folio_maybe_mapped_shared(folio)) {
pra->referenced = -1;
page_vma_mapped_walk_done(&pvmw);
return false;
@@ -1044,6 +1044,14 @@ static int page_vma_mkclean_one(struct page_vma_mapped_walk *pvmw)
pte_t *pte = pvmw->pte;
pte_t entry = ptep_get(pte);
+ /*
+ * PFN swap PTEs, such as device-exclusive ones, that
+ * actually map pages are clean and not writable from a
+ * CPU perspective. The MMU notifier takes care of any
+ * device aspects.
+ */
+ if (!pte_present(entry))
+ continue;
if (!pte_dirty(entry) && !pte_write(entry))
continue;
@@ -1127,6 +1135,80 @@ int folio_mkclean(struct folio *folio)
}
EXPORT_SYMBOL_GPL(folio_mkclean);
+struct wrprotect_file_state {
+ int cleaned;
+ pgoff_t pgoff;
+ unsigned long pfn;
+ unsigned long nr_pages;
+};
+
+static bool mapping_wrprotect_range_one(struct folio *folio,
+ struct vm_area_struct *vma, unsigned long address, void *arg)
+{
+ struct wrprotect_file_state *state = (struct wrprotect_file_state *)arg;
+ struct page_vma_mapped_walk pvmw = {
+ .pfn = state->pfn,
+ .nr_pages = state->nr_pages,
+ .pgoff = state->pgoff,
+ .vma = vma,
+ .address = address,
+ .flags = PVMW_SYNC,
+ };
+
+ state->cleaned += page_vma_mkclean_one(&pvmw);
+
+ return true;
+}
+
+static void __rmap_walk_file(struct folio *folio, struct address_space *mapping,
+ pgoff_t pgoff_start, unsigned long nr_pages,
+ struct rmap_walk_control *rwc, bool locked);
+
+/**
+ * mapping_wrprotect_range() - Write-protect all mappings in a specified range.
+ *
+ * @mapping: The mapping whose reverse mapping should be traversed.
+ * @pgoff: The page offset at which @pfn is mapped within @mapping.
+ * @pfn: The PFN of the page mapped in @mapping at @pgoff.
+ * @nr_pages: The number of physically contiguous base pages spanned.
+ *
+ * Traverses the reverse mapping, finding all VMAs which contain a shared
+ * mapping of the pages in the specified range in @mapping, and write-protects
+ * them (that is, updates the page tables to mark the mappings read-only such
+ * that a write protection fault arises when the mappings are written to).
+ *
+ * The @pfn value need not refer to a folio, but rather can reference a kernel
+ * allocation which is mapped into userland. We therefore do not require that
+ * the page maps to a folio with a valid mapping or index field, rather the
+ * caller specifies these in @mapping and @pgoff.
+ *
+ * Return: the number of write-protected PTEs, or an error.
+ */
+int mapping_wrprotect_range(struct address_space *mapping, pgoff_t pgoff,
+ unsigned long pfn, unsigned long nr_pages)
+{
+ struct wrprotect_file_state state = {
+ .cleaned = 0,
+ .pgoff = pgoff,
+ .pfn = pfn,
+ .nr_pages = nr_pages,
+ };
+ struct rmap_walk_control rwc = {
+ .arg = (void *)&state,
+ .rmap_one = mapping_wrprotect_range_one,
+ .invalid_vma = invalid_mkclean_vma,
+ };
+
+ if (!mapping)
+ return 0;
+
+ __rmap_walk_file(/* folio = */NULL, mapping, pgoff, nr_pages, &rwc,
+ /* locked = */false);
+
+ return state.cleaned;
+}
+EXPORT_SYMBOL_GPL(mapping_wrprotect_range);
+
/**
* pfn_mkclean_range - Cleans the PTEs (including PMDs) mapped with range of
* [@pfn, @pfn + @nr_pages) at the specific offset (@pgoff)
@@ -1160,8 +1242,8 @@ int pfn_mkclean_range(unsigned long pfn, unsigned long nr_pages, pgoff_t pgoff,
}
static __always_inline unsigned int __folio_add_rmap(struct folio *folio,
- struct page *page, int nr_pages, enum rmap_level level,
- int *nr_pmdmapped)
+ struct page *page, int nr_pages, struct vm_area_struct *vma,
+ enum rmap_level level, int *nr_pmdmapped)
{
atomic_t *mapped = &folio->_nr_pages_mapped;
const int orig_nr_pages = nr_pages;
@@ -1176,6 +1258,16 @@ static __always_inline unsigned int __folio_add_rmap(struct folio *folio,
break;
}
+ if (IS_ENABLED(CONFIG_NO_PAGE_MAPCOUNT)) {
+ nr = folio_add_return_large_mapcount(folio, orig_nr_pages, vma);
+ if (nr == orig_nr_pages)
+ /* Was completely unmapped. */
+ nr = folio_large_nr_pages(folio);
+ else
+ nr = 0;
+ break;
+ }
+
do {
first += atomic_inc_and_test(&page->_mapcount);
} while (page++, --nr_pages > 0);
@@ -1184,15 +1276,34 @@ static __always_inline unsigned int __folio_add_rmap(struct folio *folio,
atomic_add_return_relaxed(first, mapped) < ENTIRELY_MAPPED)
nr = first;
- atomic_add(orig_nr_pages, &folio->_large_mapcount);
+ folio_add_large_mapcount(folio, orig_nr_pages, vma);
break;
case RMAP_LEVEL_PMD:
+ case RMAP_LEVEL_PUD:
first = atomic_inc_and_test(&folio->_entire_mapcount);
+ if (IS_ENABLED(CONFIG_NO_PAGE_MAPCOUNT)) {
+ if (level == RMAP_LEVEL_PMD && first)
+ *nr_pmdmapped = folio_large_nr_pages(folio);
+ nr = folio_inc_return_large_mapcount(folio, vma);
+ if (nr == 1)
+ /* Was completely unmapped. */
+ nr = folio_large_nr_pages(folio);
+ else
+ nr = 0;
+ break;
+ }
+
if (first) {
nr = atomic_add_return_relaxed(ENTIRELY_MAPPED, mapped);
if (likely(nr < ENTIRELY_MAPPED + ENTIRELY_MAPPED)) {
- *nr_pmdmapped = folio_nr_pages(folio);
- nr = *nr_pmdmapped - (nr & FOLIO_PAGES_MAPPED);
+ nr_pages = folio_large_nr_pages(folio);
+ /*
+ * We only track PMD mappings of PMD-sized
+ * folios separately.
+ */
+ if (level == RMAP_LEVEL_PMD)
+ *nr_pmdmapped = nr_pages;
+ nr = nr_pages - (nr & FOLIO_PAGES_MAPPED);
/* Raced ahead of a remove and another add? */
if (unlikely(nr < 0))
nr = 0;
@@ -1201,7 +1312,7 @@ static __always_inline unsigned int __folio_add_rmap(struct folio *folio,
nr = 0;
}
}
- atomic_inc(&folio->_large_mapcount);
+ folio_inc_large_mapcount(folio, vma);
break;
}
return nr;
@@ -1322,7 +1433,7 @@ static __always_inline void __folio_add_anon_rmap(struct folio *folio,
VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio);
- nr = __folio_add_rmap(folio, page, nr_pages, level, &nr_pmdmapped);
+ nr = __folio_add_rmap(folio, page, nr_pages, vma, level, &nr_pmdmapped);
if (likely(!folio_test_ksm(folio)))
__page_check_anon_rmap(folio, page, vma, address);
@@ -1338,15 +1449,32 @@ static __always_inline void __folio_add_anon_rmap(struct folio *folio,
case RMAP_LEVEL_PMD:
SetPageAnonExclusive(page);
break;
+ case RMAP_LEVEL_PUD:
+ /*
+ * Keep the compiler happy, we don't support anonymous
+ * PUD mappings.
+ */
+ WARN_ON_ONCE(1);
+ break;
}
}
+
+ VM_WARN_ON_FOLIO(!folio_test_large(folio) && PageAnonExclusive(page) &&
+ atomic_read(&folio->_mapcount) > 0, folio);
for (i = 0; i < nr_pages; i++) {
struct page *cur_page = page + i;
- /* While PTE-mapping a THP we have a PMD and a PTE mapping. */
- VM_WARN_ON_FOLIO((atomic_read(&cur_page->_mapcount) > 0 ||
- (folio_test_large(folio) &&
- folio_entire_mapcount(folio) > 1)) &&
+ VM_WARN_ON_FOLIO(folio_test_large(folio) &&
+ folio_entire_mapcount(folio) > 1 &&
+ PageAnonExclusive(cur_page), folio);
+ if (IS_ENABLED(CONFIG_NO_PAGE_MAPCOUNT))
+ continue;
+
+ /*
+ * While PTE-mapping a THP we have a PMD and a PTE
+ * mapping.
+ */
+ VM_WARN_ON_FOLIO(atomic_read(&cur_page->_mapcount) > 0 &&
PageAnonExclusive(cur_page), folio);
}
@@ -1426,14 +1554,11 @@ void folio_add_anon_rmap_pmd(struct folio *folio, struct page *page,
void folio_add_new_anon_rmap(struct folio *folio, struct vm_area_struct *vma,
unsigned long address, rmap_t flags)
{
- const int nr = folio_nr_pages(folio);
const bool exclusive = flags & RMAP_EXCLUSIVE;
- int nr_pmdmapped = 0;
+ int nr = 1, nr_pmdmapped = 0;
VM_WARN_ON_FOLIO(folio_test_hugetlb(folio), folio);
VM_WARN_ON_FOLIO(!exclusive && !folio_test_locked(folio), folio);
- VM_BUG_ON_VMA(address < vma->vm_start ||
- address + (nr << PAGE_SHIFT) > vma->vm_end, vma);
/*
* VM_DROPPABLE mappings don't swap; instead they're just dropped when
@@ -1451,29 +1576,35 @@ void folio_add_new_anon_rmap(struct folio *folio, struct vm_area_struct *vma,
} else if (!folio_test_pmd_mappable(folio)) {
int i;
+ nr = folio_large_nr_pages(folio);
for (i = 0; i < nr; i++) {
struct page *page = folio_page(folio, i);
- /* increment count (starts at -1) */
- atomic_set(&page->_mapcount, 0);
+ if (IS_ENABLED(CONFIG_PAGE_MAPCOUNT))
+ /* increment count (starts at -1) */
+ atomic_set(&page->_mapcount, 0);
if (exclusive)
SetPageAnonExclusive(page);
}
- /* increment count (starts at -1) */
- atomic_set(&folio->_large_mapcount, nr - 1);
- atomic_set(&folio->_nr_pages_mapped, nr);
+ folio_set_large_mapcount(folio, nr, vma);
+ if (IS_ENABLED(CONFIG_PAGE_MAPCOUNT))
+ atomic_set(&folio->_nr_pages_mapped, nr);
} else {
+ nr = folio_large_nr_pages(folio);
/* increment count (starts at -1) */
atomic_set(&folio->_entire_mapcount, 0);
- /* increment count (starts at -1) */
- atomic_set(&folio->_large_mapcount, 0);
- atomic_set(&folio->_nr_pages_mapped, ENTIRELY_MAPPED);
+ folio_set_large_mapcount(folio, 1, vma);
+ if (IS_ENABLED(CONFIG_PAGE_MAPCOUNT))
+ atomic_set(&folio->_nr_pages_mapped, ENTIRELY_MAPPED);
if (exclusive)
SetPageAnonExclusive(&folio->page);
nr_pmdmapped = nr;
}
+ VM_WARN_ON_ONCE(address < vma->vm_start ||
+ address + (nr << PAGE_SHIFT) > vma->vm_end);
+
__folio_mod_stat(folio, nr, nr_pmdmapped);
mod_mthp_stat(folio_order(folio), MTHP_STAT_NR_ANON, 1);
}
@@ -1486,7 +1617,7 @@ static __always_inline void __folio_add_file_rmap(struct folio *folio,
VM_WARN_ON_FOLIO(folio_test_anon(folio), folio);
- nr = __folio_add_rmap(folio, page, nr_pages, level, &nr_pmdmapped);
+ nr = __folio_add_rmap(folio, page, nr_pages, vma, level, &nr_pmdmapped);
__folio_mod_stat(folio, nr, nr_pmdmapped);
/* See comments in folio_add_anon_rmap_*() */
@@ -1531,6 +1662,27 @@ void folio_add_file_rmap_pmd(struct folio *folio, struct page *page,
#endif
}
+/**
+ * folio_add_file_rmap_pud - add a PUD mapping to a page range of a folio
+ * @folio: The folio to add the mapping to
+ * @page: The first page to add
+ * @vma: The vm area in which the mapping is added
+ *
+ * The page range of the folio is defined by [page, page + HPAGE_PUD_NR)
+ *
+ * The caller needs to hold the page table lock.
+ */
+void folio_add_file_rmap_pud(struct folio *folio, struct page *page,
+ struct vm_area_struct *vma)
+{
+#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && \
+ defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
+ __folio_add_file_rmap(folio, page, HPAGE_PUD_NR, vma, RMAP_LEVEL_PUD);
+#else
+ WARN_ON_ONCE(true);
+#endif
+}
+
static __always_inline void __folio_remove_rmap(struct folio *folio,
struct page *page, int nr_pages, struct vm_area_struct *vma,
enum rmap_level level)
@@ -1548,7 +1700,20 @@ static __always_inline void __folio_remove_rmap(struct folio *folio,
break;
}
- atomic_sub(nr_pages, &folio->_large_mapcount);
+ if (IS_ENABLED(CONFIG_NO_PAGE_MAPCOUNT)) {
+ nr = folio_sub_return_large_mapcount(folio, nr_pages, vma);
+ if (!nr) {
+ /* Now completely unmapped. */
+ nr = folio_nr_pages(folio);
+ } else {
+ partially_mapped = nr < folio_large_nr_pages(folio) &&
+ !folio_entire_mapcount(folio);
+ nr = 0;
+ }
+ break;
+ }
+
+ folio_sub_large_mapcount(folio, nr_pages, vma);
do {
last += atomic_add_negative(-1, &page->_mapcount);
} while (page++, --nr_pages > 0);
@@ -1560,13 +1725,32 @@ static __always_inline void __folio_remove_rmap(struct folio *folio,
partially_mapped = nr && atomic_read(mapped);
break;
case RMAP_LEVEL_PMD:
- atomic_dec(&folio->_large_mapcount);
+ case RMAP_LEVEL_PUD:
+ if (IS_ENABLED(CONFIG_NO_PAGE_MAPCOUNT)) {
+ last = atomic_add_negative(-1, &folio->_entire_mapcount);
+ if (level == RMAP_LEVEL_PMD && last)
+ nr_pmdmapped = folio_large_nr_pages(folio);
+ nr = folio_dec_return_large_mapcount(folio, vma);
+ if (!nr) {
+ /* Now completely unmapped. */
+ nr = folio_large_nr_pages(folio);
+ } else {
+ partially_mapped = last &&
+ nr < folio_large_nr_pages(folio);
+ nr = 0;
+ }
+ break;
+ }
+
+ folio_dec_large_mapcount(folio, vma);
last = atomic_add_negative(-1, &folio->_entire_mapcount);
if (last) {
nr = atomic_sub_return_relaxed(ENTIRELY_MAPPED, mapped);
if (likely(nr < ENTIRELY_MAPPED)) {
- nr_pmdmapped = folio_nr_pages(folio);
- nr = nr_pmdmapped - (nr & FOLIO_PAGES_MAPPED);
+ nr_pages = folio_large_nr_pages(folio);
+ if (level == RMAP_LEVEL_PMD)
+ nr_pmdmapped = nr_pages;
+ nr = nr_pages - (nr & FOLIO_PAGES_MAPPED);
/* Raced ahead of another remove and an add? */
if (unlikely(nr < 0))
nr = 0;
@@ -1640,6 +1824,46 @@ void folio_remove_rmap_pmd(struct folio *folio, struct page *page,
#endif
}
+/**
+ * folio_remove_rmap_pud - remove a PUD mapping from a page range of a folio
+ * @folio: The folio to remove the mapping from
+ * @page: The first page to remove
+ * @vma: The vm area from which the mapping is removed
+ *
+ * The page range of the folio is defined by [page, page + HPAGE_PUD_NR)
+ *
+ * The caller needs to hold the page table lock.
+ */
+void folio_remove_rmap_pud(struct folio *folio, struct page *page,
+ struct vm_area_struct *vma)
+{
+#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && \
+ defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
+ __folio_remove_rmap(folio, page, HPAGE_PUD_NR, vma, RMAP_LEVEL_PUD);
+#else
+ WARN_ON_ONCE(true);
+#endif
+}
+
+/* We support batch unmapping of PTEs for lazyfree large folios */
+static inline bool can_batch_unmap_folio_ptes(unsigned long addr,
+ struct folio *folio, pte_t *ptep)
+{
+ const fpb_t fpb_flags = FPB_IGNORE_DIRTY | FPB_IGNORE_SOFT_DIRTY;
+ int max_nr = folio_nr_pages(folio);
+ pte_t pte = ptep_get(ptep);
+
+ if (!folio_test_anon(folio) || folio_test_swapbacked(folio))
+ return false;
+ if (pte_unused(pte))
+ return false;
+ if (pte_pfn(pte) != folio_pfn(folio))
+ return false;
+
+ return folio_pte_batch(folio, addr, ptep, pte, max_nr, fpb_flags, NULL,
+ NULL, NULL) == max_nr;
+}
+
/*
* @arg: enum ttu_flags will be passed to this argument
*/
@@ -1648,11 +1872,12 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
{
struct mm_struct *mm = vma->vm_mm;
DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0);
+ bool anon_exclusive, ret = true;
pte_t pteval;
struct page *subpage;
- bool anon_exclusive, ret = true;
struct mmu_notifier_range range;
enum ttu_flags flags = (enum ttu_flags)(long)arg;
+ unsigned long nr_pages = 1, end_addr;
unsigned long pfn;
unsigned long hsz = 0;
@@ -1702,9 +1927,16 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
}
if (!pvmw.pte) {
- if (unmap_huge_pmd_locked(vma, pvmw.address, pvmw.pmd,
- folio))
- goto walk_done;
+ if (folio_test_anon(folio) && !folio_test_swapbacked(folio)) {
+ if (unmap_huge_pmd_locked(vma, pvmw.address, pvmw.pmd, folio))
+ goto walk_done;
+ /*
+ * unmap_huge_pmd_locked has either already marked
+ * the folio as swap-backed or decided to retain it
+ * due to GUP or speculative references.
+ */
+ goto walk_abort;
+ }
if (flags & TTU_SPLIT_HUGE_PMD) {
/*
@@ -1722,7 +1954,18 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
/* Unexpected PMD-mapped THP? */
VM_BUG_ON_FOLIO(!pvmw.pte, folio);
- pfn = pte_pfn(ptep_get(pvmw.pte));
+ /*
+ * Handle PFN swap PTEs, such as device-exclusive ones, that
+ * actually map pages.
+ */
+ pteval = ptep_get(pvmw.pte);
+ if (likely(pte_present(pteval))) {
+ pfn = pte_pfn(pteval);
+ } else {
+ pfn = swp_offset_pfn(pte_to_swp_entry(pteval));
+ VM_WARN_ON_FOLIO(folio_test_hugetlb(folio), folio);
+ }
+
subpage = folio_page(folio, pfn - folio_pfn(folio));
address = pvmw.address;
anon_exclusive = folio_test_anon(folio) &&
@@ -1778,24 +2021,33 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
hugetlb_vma_unlock_write(vma);
}
pteval = huge_ptep_clear_flush(vma, address, pvmw.pte);
- } else {
- flush_cache_page(vma, address, pfn);
- /* Nuke the page table entry. */
- if (should_defer_flush(mm, flags)) {
- /*
- * We clear the PTE but do not flush so potentially
- * a remote CPU could still be writing to the folio.
- * If the entry was previously clean then the
- * architecture must guarantee that a clear->dirty
- * transition on a cached TLB entry is written through
- * and traps if the PTE is unmapped.
- */
- pteval = ptep_get_and_clear(mm, address, pvmw.pte);
+ if (pte_dirty(pteval))
+ folio_mark_dirty(folio);
+ } else if (likely(pte_present(pteval))) {
+ if (folio_test_large(folio) && !(flags & TTU_HWPOISON) &&
+ can_batch_unmap_folio_ptes(address, folio, pvmw.pte))
+ nr_pages = folio_nr_pages(folio);
+ end_addr = address + nr_pages * PAGE_SIZE;
+ flush_cache_range(vma, address, end_addr);
- set_tlb_ubc_flush_pending(mm, pteval, address);
- } else {
- pteval = ptep_clear_flush(vma, address, pvmw.pte);
- }
+ /* Nuke the page table entry. */
+ pteval = get_and_clear_full_ptes(mm, address, pvmw.pte, nr_pages, 0);
+ /*
+ * We clear the PTE but do not flush so potentially
+ * a remote CPU could still be writing to the folio.
+ * If the entry was previously clean then the
+ * architecture must guarantee that a clear->dirty
+ * transition on a cached TLB entry is written through
+ * and traps if the PTE is unmapped.
+ */
+ if (should_defer_flush(mm, flags))
+ set_tlb_ubc_flush_pending(mm, pteval, address, end_addr);
+ else
+ flush_tlb_range(vma, address, end_addr);
+ if (pte_dirty(pteval))
+ folio_mark_dirty(folio);
+ } else {
+ pte_clear(mm, address, pvmw.pte);
}
/*
@@ -1805,10 +2057,6 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
*/
pte_install_uffd_wp_if_needed(vma, address, pvmw.pte, pteval);
- /* Set the dirty flag on the folio now the pte is gone. */
- if (pte_dirty(pteval))
- folio_mark_dirty(folio);
-
/* Update high watermark before we lower rss */
update_hiwater_rss(mm);
@@ -1822,8 +2070,8 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
dec_mm_counter(mm, mm_counter(folio));
set_pte_at(mm, address, pvmw.pte, pteval);
}
-
- } else if (pte_unused(pteval) && !userfaultfd_armed(vma)) {
+ } else if (likely(pte_present(pteval)) && pte_unused(pteval) &&
+ !userfaultfd_armed(vma)) {
/*
* The guest indicated that the page content is of no
* interest anymore. Simply discard the pte, vmscan
@@ -1868,40 +2116,41 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
*/
smp_rmb();
- /*
- * The only page refs must be one from isolation
- * plus the rmap(s) (dropped by discard:).
- */
- if (ref_count == 1 + map_count &&
- (!folio_test_dirty(folio) ||
- /*
- * Unlike MADV_FREE mappings, VM_DROPPABLE
- * ones can be dropped even if they've
- * been dirtied.
- */
- (vma->vm_flags & VM_DROPPABLE))) {
- dec_mm_counter(mm, MM_ANONPAGES);
- goto discard;
- }
-
- /*
- * If the folio was redirtied, it cannot be
- * discarded. Remap the page to page table.
- */
- set_pte_at(mm, address, pvmw.pte, pteval);
- /*
- * Unlike MADV_FREE mappings, VM_DROPPABLE ones
- * never get swap backed on failure to drop.
- */
- if (!(vma->vm_flags & VM_DROPPABLE))
+ if (folio_test_dirty(folio) && !(vma->vm_flags & VM_DROPPABLE)) {
+ /*
+ * redirtied either using the page table or a previously
+ * obtained GUP reference.
+ */
+ set_ptes(mm, address, pvmw.pte, pteval, nr_pages);
folio_set_swapbacked(folio);
- goto walk_abort;
+ goto walk_abort;
+ } else if (ref_count != 1 + map_count) {
+ /*
+ * Additional reference. Could be a GUP reference or any
+ * speculative reference. GUP users must mark the folio
+ * dirty if there was a modification. This folio cannot be
+ * reclaimed right now either way, so act just like nothing
+ * happened.
+ * We'll come back here later and detect if the folio was
+ * dirtied when the additional reference is gone.
+ */
+ set_ptes(mm, address, pvmw.pte, pteval, nr_pages);
+ goto walk_abort;
+ }
+ add_mm_counter(mm, MM_ANONPAGES, -nr_pages);
+ goto discard;
}
if (swap_duplicate(entry) < 0) {
set_pte_at(mm, address, pvmw.pte, pteval);
goto walk_abort;
}
+
+ /*
+ * arch_unmap_one() is expected to be a NOP on
+ * architectures where we could have PFN swap PTEs,
+ * so we'll not check/care.
+ */
if (arch_unmap_one(mm, vma, address, pteval) < 0) {
swap_free(entry);
set_pte_at(mm, address, pvmw.pte, pteval);
@@ -1926,10 +2175,17 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
swp_pte = swp_entry_to_pte(entry);
if (anon_exclusive)
swp_pte = pte_swp_mkexclusive(swp_pte);
- if (pte_soft_dirty(pteval))
- swp_pte = pte_swp_mksoft_dirty(swp_pte);
- if (pte_uffd_wp(pteval))
- swp_pte = pte_swp_mkuffd_wp(swp_pte);
+ if (likely(pte_present(pteval))) {
+ if (pte_soft_dirty(pteval))
+ swp_pte = pte_swp_mksoft_dirty(swp_pte);
+ if (pte_uffd_wp(pteval))
+ swp_pte = pte_swp_mkuffd_wp(swp_pte);
+ } else {
+ if (pte_swp_soft_dirty(pteval))
+ swp_pte = pte_swp_mksoft_dirty(swp_pte);
+ if (pte_swp_uffd_wp(pteval))
+ swp_pte = pte_swp_mkuffd_wp(swp_pte);
+ }
set_pte_at(mm, address, pvmw.pte, swp_pte);
} else {
/*
@@ -1946,13 +2202,18 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
dec_mm_counter(mm, mm_counter_file(folio));
}
discard:
- if (unlikely(folio_test_hugetlb(folio)))
+ if (unlikely(folio_test_hugetlb(folio))) {
hugetlb_remove_rmap(folio);
- else
- folio_remove_rmap_pte(folio, subpage, vma);
+ } else {
+ folio_remove_rmap_ptes(folio, subpage, nr_pages, vma);
+ folio_ref_sub(folio, nr_pages - 1);
+ }
if (vma->vm_flags & VM_LOCKED)
mlock_drain_local();
folio_put(folio);
+ /* We have already batched the entire folio */
+ if (nr_pages > 1)
+ goto walk_done;
continue;
walk_abort:
ret = false;
@@ -2013,9 +2274,9 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
{
struct mm_struct *mm = vma->vm_mm;
DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0);
+ bool anon_exclusive, writable, ret = true;
pte_t pteval;
struct page *subpage;
- bool anon_exclusive, ret = true;
struct mmu_notifier_range range;
enum ttu_flags flags = (enum ttu_flags)(long)arg;
unsigned long pfn;
@@ -2082,24 +2343,19 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
/* Unexpected PMD-mapped THP? */
VM_BUG_ON_FOLIO(!pvmw.pte, folio);
- pfn = pte_pfn(ptep_get(pvmw.pte));
-
- if (folio_is_zone_device(folio)) {
- /*
- * Our PTE is a non-present device exclusive entry and
- * calculating the subpage as for the common case would
- * result in an invalid pointer.
- *
- * Since only PAGE_SIZE pages can currently be
- * migrated, just set it to page. This will need to be
- * changed when hugepage migrations to device private
- * memory are supported.
- */
- VM_BUG_ON_FOLIO(folio_nr_pages(folio) > 1, folio);
- subpage = &folio->page;
+ /*
+ * Handle PFN swap PTEs, such as device-exclusive ones, that
+ * actually map pages.
+ */
+ pteval = ptep_get(pvmw.pte);
+ if (likely(pte_present(pteval))) {
+ pfn = pte_pfn(pteval);
} else {
- subpage = folio_page(folio, pfn - folio_pfn(folio));
+ pfn = swp_offset_pfn(pte_to_swp_entry(pteval));
+ VM_WARN_ON_FOLIO(folio_test_hugetlb(folio), folio);
}
+
+ subpage = folio_page(folio, pfn - folio_pfn(folio));
address = pvmw.address;
anon_exclusive = folio_test_anon(folio) &&
PageAnonExclusive(subpage);
@@ -2155,7 +2411,10 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
}
/* Nuke the hugetlb page table entry */
pteval = huge_ptep_clear_flush(vma, address, pvmw.pte);
- } else {
+ if (pte_dirty(pteval))
+ folio_mark_dirty(folio);
+ writable = pte_write(pteval);
+ } else if (likely(pte_present(pteval))) {
flush_cache_page(vma, address, pfn);
/* Nuke the page table entry. */
if (should_defer_flush(mm, flags)) {
@@ -2169,58 +2428,27 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
*/
pteval = ptep_get_and_clear(mm, address, pvmw.pte);
- set_tlb_ubc_flush_pending(mm, pteval, address);
+ set_tlb_ubc_flush_pending(mm, pteval, address, address + PAGE_SIZE);
} else {
pteval = ptep_clear_flush(vma, address, pvmw.pte);
}
+ if (pte_dirty(pteval))
+ folio_mark_dirty(folio);
+ writable = pte_write(pteval);
+ } else {
+ pte_clear(mm, address, pvmw.pte);
+ writable = is_writable_device_private_entry(pte_to_swp_entry(pteval));
}
- /* Set the dirty flag on the folio now the pte is gone. */
- if (pte_dirty(pteval))
- folio_mark_dirty(folio);
+ VM_WARN_ON_FOLIO(writable && folio_test_anon(folio) &&
+ !anon_exclusive, folio);
/* Update high watermark before we lower rss */
update_hiwater_rss(mm);
- if (folio_is_device_private(folio)) {
- unsigned long pfn = folio_pfn(folio);
- swp_entry_t entry;
- pte_t swp_pte;
-
- if (anon_exclusive)
- WARN_ON_ONCE(folio_try_share_anon_rmap_pte(folio,
- subpage));
-
- /*
- * Store the pfn of the page in a special migration
- * pte. do_swap_page() will wait until the migration
- * pte is removed and then restart fault handling.
- */
- entry = pte_to_swp_entry(pteval);
- if (is_writable_device_private_entry(entry))
- entry = make_writable_migration_entry(pfn);
- else if (anon_exclusive)
- entry = make_readable_exclusive_migration_entry(pfn);
- else
- entry = make_readable_migration_entry(pfn);
- swp_pte = swp_entry_to_pte(entry);
+ if (PageHWPoison(subpage)) {
+ VM_WARN_ON_FOLIO(folio_is_device_private(folio), folio);
- /*
- * pteval maps a zone device page and is therefore
- * a swap pte.
- */
- if (pte_swp_soft_dirty(pteval))
- swp_pte = pte_swp_mksoft_dirty(swp_pte);
- if (pte_swp_uffd_wp(pteval))
- swp_pte = pte_swp_mkuffd_wp(swp_pte);
- set_pte_at(mm, pvmw.address, pvmw.pte, swp_pte);
- trace_set_migration_pte(pvmw.address, pte_val(swp_pte),
- folio_order(folio));
- /*
- * No need to invalidate here it will synchronize on
- * against the special swap migration pte.
- */
- } else if (PageHWPoison(subpage)) {
pteval = swp_entry_to_pte(make_hwpoison_entry(subpage));
if (folio_test_hugetlb(folio)) {
hugetlb_count_sub(folio_nr_pages(folio), mm);
@@ -2230,8 +2458,8 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
dec_mm_counter(mm, mm_counter(folio));
set_pte_at(mm, address, pvmw.pte, pteval);
}
-
- } else if (pte_unused(pteval) && !userfaultfd_armed(vma)) {
+ } else if (likely(pte_present(pteval)) && pte_unused(pteval) &&
+ !userfaultfd_armed(vma)) {
/*
* The guest indicated that the page content is of no
* interest anymore. Simply discard the pte, vmscan
@@ -2247,6 +2475,11 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
swp_entry_t entry;
pte_t swp_pte;
+ /*
+ * arch_unmap_one() is expected to be a NOP on
+ * architectures where we could have PFN swap PTEs,
+ * so we'll not check/care.
+ */
if (arch_unmap_one(mm, vma, address, pteval) < 0) {
if (folio_test_hugetlb(folio))
set_huge_pte_at(mm, address, pvmw.pte,
@@ -2257,8 +2490,6 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
page_vma_mapped_walk_done(&pvmw);
break;
}
- VM_BUG_ON_PAGE(pte_write(pteval) && folio_test_anon(folio) &&
- !anon_exclusive, subpage);
/* See folio_try_share_anon_rmap_pte(): clear PTE first. */
if (folio_test_hugetlb(folio)) {
@@ -2283,7 +2514,7 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
* pte. do_swap_page() will wait until the migration
* pte is removed and then restart fault handling.
*/
- if (pte_write(pteval))
+ if (writable)
entry = make_writable_migration_entry(
page_to_pfn(subpage));
else if (anon_exclusive)
@@ -2292,15 +2523,23 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
else
entry = make_readable_migration_entry(
page_to_pfn(subpage));
- if (pte_young(pteval))
- entry = make_migration_entry_young(entry);
- if (pte_dirty(pteval))
- entry = make_migration_entry_dirty(entry);
- swp_pte = swp_entry_to_pte(entry);
- if (pte_soft_dirty(pteval))
- swp_pte = pte_swp_mksoft_dirty(swp_pte);
- if (pte_uffd_wp(pteval))
- swp_pte = pte_swp_mkuffd_wp(swp_pte);
+ if (likely(pte_present(pteval))) {
+ if (pte_young(pteval))
+ entry = make_migration_entry_young(entry);
+ if (pte_dirty(pteval))
+ entry = make_migration_entry_dirty(entry);
+ swp_pte = swp_entry_to_pte(entry);
+ if (pte_soft_dirty(pteval))
+ swp_pte = pte_swp_mksoft_dirty(swp_pte);
+ if (pte_uffd_wp(pteval))
+ swp_pte = pte_swp_mkuffd_wp(swp_pte);
+ } else {
+ swp_pte = swp_entry_to_pte(entry);
+ if (pte_swp_soft_dirty(pteval))
+ swp_pte = pte_swp_mksoft_dirty(swp_pte);
+ if (pte_swp_uffd_wp(pteval))
+ swp_pte = pte_swp_mkuffd_wp(swp_pte);
+ }
if (folio_test_hugetlb(folio))
set_huge_pte_at(mm, address, pvmw.pte, swp_pte,
hsz);
@@ -2375,190 +2614,139 @@ void try_to_migrate(struct folio *folio, enum ttu_flags flags)
}
#ifdef CONFIG_DEVICE_PRIVATE
-struct make_exclusive_args {
- struct mm_struct *mm;
- unsigned long address;
- void *owner;
- bool valid;
-};
-
-static bool page_make_device_exclusive_one(struct folio *folio,
- struct vm_area_struct *vma, unsigned long address, void *priv)
+/**
+ * make_device_exclusive() - Mark a page for exclusive use by a device
+ * @mm: mm_struct of associated target process
+ * @addr: the virtual address to mark for exclusive device access
+ * @owner: passed to MMU_NOTIFY_EXCLUSIVE range notifier to allow filtering
+ * @foliop: folio pointer will be stored here on success.
+ *
+ * This function looks up the page mapped at the given address, grabs a
+ * folio reference, locks the folio and replaces the PTE with special
+ * device-exclusive PFN swap entry, preventing access through the process
+ * page tables. The function will return with the folio locked and referenced.
+ *
+ * On fault, the device-exclusive entries are replaced with the original PTE
+ * under folio lock, after calling MMU notifiers.
+ *
+ * Only anonymous non-hugetlb folios are supported and the VMA must have
+ * write permissions such that we can fault in the anonymous page writable
+ * in order to mark it exclusive. The caller must hold the mmap_lock in read
+ * mode.
+ *
+ * A driver using this to program access from a device must use a mmu notifier
+ * critical section to hold a device specific lock during programming. Once
+ * programming is complete it should drop the folio lock and reference after
+ * which point CPU access to the page will revoke the exclusive access.
+ *
+ * Notes:
+ * #. This function always operates on individual PTEs mapping individual
+ * pages. PMD-sized THPs are first remapped to be mapped by PTEs before
+ * the conversion happens on a single PTE corresponding to @addr.
+ * #. While concurrent access through the process page tables is prevented,
+ * concurrent access through other page references (e.g., earlier GUP
+ * invocation) is not handled and not supported.
+ * #. device-exclusive entries are considered "clean" and "old" by core-mm.
+ * Device drivers must update the folio state when informed by MMU
+ * notifiers.
+ *
+ * Returns: pointer to mapped page on success, otherwise a negative error.
+ */
+struct page *make_device_exclusive(struct mm_struct *mm, unsigned long addr,
+ void *owner, struct folio **foliop)
{
- struct mm_struct *mm = vma->vm_mm;
- DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0);
- struct make_exclusive_args *args = priv;
- pte_t pteval;
- struct page *subpage;
- bool ret = true;
struct mmu_notifier_range range;
+ struct folio *folio, *fw_folio;
+ struct vm_area_struct *vma;
+ struct folio_walk fw;
+ struct page *page;
swp_entry_t entry;
pte_t swp_pte;
- pte_t ptent;
-
- mmu_notifier_range_init_owner(&range, MMU_NOTIFY_EXCLUSIVE, 0,
- vma->vm_mm, address, min(vma->vm_end,
- address + folio_size(folio)),
- args->owner);
- mmu_notifier_invalidate_range_start(&range);
-
- while (page_vma_mapped_walk(&pvmw)) {
- /* Unexpected PMD-mapped THP? */
- VM_BUG_ON_FOLIO(!pvmw.pte, folio);
-
- ptent = ptep_get(pvmw.pte);
- if (!pte_present(ptent)) {
- ret = false;
- page_vma_mapped_walk_done(&pvmw);
- break;
- }
-
- subpage = folio_page(folio,
- pte_pfn(ptent) - folio_pfn(folio));
- address = pvmw.address;
-
- /* Nuke the page table entry. */
- flush_cache_page(vma, address, pte_pfn(ptent));
- pteval = ptep_clear_flush(vma, address, pvmw.pte);
-
- /* Set the dirty flag on the folio now the pte is gone. */
- if (pte_dirty(pteval))
- folio_mark_dirty(folio);
-
- /*
- * Check that our target page is still mapped at the expected
- * address.
- */
- if (args->mm == mm && args->address == address &&
- pte_write(pteval))
- args->valid = true;
-
- /*
- * Store the pfn of the page in a special migration
- * pte. do_swap_page() will wait until the migration
- * pte is removed and then restart fault handling.
- */
- if (pte_write(pteval))
- entry = make_writable_device_exclusive_entry(
- page_to_pfn(subpage));
- else
- entry = make_readable_device_exclusive_entry(
- page_to_pfn(subpage));
- swp_pte = swp_entry_to_pte(entry);
- if (pte_soft_dirty(pteval))
- swp_pte = pte_swp_mksoft_dirty(swp_pte);
- if (pte_uffd_wp(pteval))
- swp_pte = pte_swp_mkuffd_wp(swp_pte);
+ int ret;
- set_pte_at(mm, address, pvmw.pte, swp_pte);
+ mmap_assert_locked(mm);
+ addr = PAGE_ALIGN_DOWN(addr);
- /*
- * There is a reference on the page for the swap entry which has
- * been removed, so shouldn't take another.
- */
- folio_remove_rmap_pte(folio, subpage, vma);
+ /*
+ * Fault in the page writable and try to lock it; note that if the
+ * address would already be marked for exclusive use by a device,
+ * the GUP call would undo that first by triggering a fault.
+ *
+ * If any other device would already map this page exclusively, the
+ * fault will trigger a conversion to an ordinary
+ * (non-device-exclusive) PTE and issue a MMU_NOTIFY_EXCLUSIVE.
+ */
+retry:
+ page = get_user_page_vma_remote(mm, addr,
+ FOLL_GET | FOLL_WRITE | FOLL_SPLIT_PMD,
+ &vma);
+ if (IS_ERR(page))
+ return page;
+ folio = page_folio(page);
+
+ if (!folio_test_anon(folio) || folio_test_hugetlb(folio)) {
+ folio_put(folio);
+ return ERR_PTR(-EOPNOTSUPP);
}
- mmu_notifier_invalidate_range_end(&range);
-
- return ret;
-}
-
-/**
- * folio_make_device_exclusive - Mark the folio exclusively owned by a device.
- * @folio: The folio to replace page table entries for.
- * @mm: The mm_struct where the folio is expected to be mapped.
- * @address: Address where the folio is expected to be mapped.
- * @owner: passed to MMU_NOTIFY_EXCLUSIVE range notifier callbacks
- *
- * Tries to remove all the page table entries which are mapping this
- * folio and replace them with special device exclusive swap entries to
- * grant a device exclusive access to the folio.
- *
- * Context: Caller must hold the folio lock.
- * Return: false if the page is still mapped, or if it could not be unmapped
- * from the expected address. Otherwise returns true (success).
- */
-static bool folio_make_device_exclusive(struct folio *folio,
- struct mm_struct *mm, unsigned long address, void *owner)
-{
- struct make_exclusive_args args = {
- .mm = mm,
- .address = address,
- .owner = owner,
- .valid = false,
- };
- struct rmap_walk_control rwc = {
- .rmap_one = page_make_device_exclusive_one,
- .done = folio_not_mapped,
- .anon_lock = folio_lock_anon_vma_read,
- .arg = &args,
- };
+ ret = folio_lock_killable(folio);
+ if (ret) {
+ folio_put(folio);
+ return ERR_PTR(ret);
+ }
/*
- * Restrict to anonymous folios for now to avoid potential writeback
- * issues.
+ * Inform secondary MMUs that we are going to convert this PTE to
+ * device-exclusive, such that they unmap it now. Note that the
+ * caller must filter this event out to prevent livelocks.
*/
- if (!folio_test_anon(folio))
- return false;
-
- rmap_walk(folio, &rwc);
+ mmu_notifier_range_init_owner(&range, MMU_NOTIFY_EXCLUSIVE, 0,
+ mm, addr, addr + PAGE_SIZE, owner);
+ mmu_notifier_invalidate_range_start(&range);
- return args.valid && !folio_mapcount(folio);
-}
+ /*
+ * Let's do a second walk and make sure we still find the same page
+ * mapped writable. Note that any page of an anonymous folio can
+ * only be mapped writable using exactly one PTE ("exclusive"), so
+ * there cannot be other mappings.
+ */
+ fw_folio = folio_walk_start(&fw, vma, addr, 0);
+ if (fw_folio != folio || fw.page != page ||
+ fw.level != FW_LEVEL_PTE || !pte_write(fw.pte)) {
+ if (fw_folio)
+ folio_walk_end(&fw, vma);
+ mmu_notifier_invalidate_range_end(&range);
+ folio_unlock(folio);
+ folio_put(folio);
+ goto retry;
+ }
-/**
- * make_device_exclusive_range() - Mark a range for exclusive use by a device
- * @mm: mm_struct of associated target process
- * @start: start of the region to mark for exclusive device access
- * @end: end address of region
- * @pages: returns the pages which were successfully marked for exclusive access
- * @owner: passed to MMU_NOTIFY_EXCLUSIVE range notifier to allow filtering
- *
- * Returns: number of pages found in the range by GUP. A page is marked for
- * exclusive access only if the page pointer is non-NULL.
- *
- * This function finds ptes mapping page(s) to the given address range, locks
- * them and replaces mappings with special swap entries preventing userspace CPU
- * access. On fault these entries are replaced with the original mapping after
- * calling MMU notifiers.
- *
- * A driver using this to program access from a device must use a mmu notifier
- * critical section to hold a device specific lock during programming. Once
- * programming is complete it should drop the page lock and reference after
- * which point CPU access to the page will revoke the exclusive access.
- */
-int make_device_exclusive_range(struct mm_struct *mm, unsigned long start,
- unsigned long end, struct page **pages,
- void *owner)
-{
- long npages = (end - start) >> PAGE_SHIFT;
- long i;
-
- npages = get_user_pages_remote(mm, start, npages,
- FOLL_GET | FOLL_WRITE | FOLL_SPLIT_PMD,
- pages, NULL);
- if (npages < 0)
- return npages;
-
- for (i = 0; i < npages; i++, start += PAGE_SIZE) {
- struct folio *folio = page_folio(pages[i]);
- if (PageTail(pages[i]) || !folio_trylock(folio)) {
- folio_put(folio);
- pages[i] = NULL;
- continue;
- }
+ /* Nuke the page table entry so we get the uptodate dirty bit. */
+ flush_cache_page(vma, addr, page_to_pfn(page));
+ fw.pte = ptep_clear_flush(vma, addr, fw.ptep);
- if (!folio_make_device_exclusive(folio, mm, start, owner)) {
- folio_unlock(folio);
- folio_put(folio);
- pages[i] = NULL;
- }
- }
+ /* Set the dirty flag on the folio now the PTE is gone. */
+ if (pte_dirty(fw.pte))
+ folio_mark_dirty(folio);
- return npages;
+ /*
+ * Store the pfn of the page in a special device-exclusive PFN swap PTE.
+ * do_swap_page() will trigger the conversion back while holding the
+ * folio lock.
+ */
+ entry = make_device_exclusive_entry(page_to_pfn(page));
+ swp_pte = swp_entry_to_pte(entry);
+ if (pte_soft_dirty(fw.pte))
+ swp_pte = pte_swp_mksoft_dirty(swp_pte);
+ /* The pte is writable, uffd-wp does not apply. */
+ set_pte_at(mm, addr, fw.ptep, swp_pte);
+
+ folio_walk_end(&fw, vma);
+ mmu_notifier_invalidate_range_end(&range);
+ *foliop = folio;
+ return page;
}
-EXPORT_SYMBOL_GPL(make_device_exclusive_range);
+EXPORT_SYMBOL_GPL(make_device_exclusive);
#endif
void __put_anon_vma(struct anon_vma *anon_vma)
@@ -2653,35 +2841,37 @@ static void rmap_walk_anon(struct folio *folio,
anon_vma_unlock_read(anon_vma);
}
-/*
- * rmap_walk_file - do something to file page using the object-based rmap method
- * @folio: the folio to be handled
- * @rwc: control variable according to each walk type
- * @locked: caller holds relevant rmap lock
+/**
+ * __rmap_walk_file() - Traverse the reverse mapping for a file-backed mapping
+ * of a page mapped within a specified page cache object at a specified offset.
*
- * Find all the mappings of a folio using the mapping pointer and the vma chains
- * contained in the address_space struct it points to.
+ * @folio: Either the folio whose mappings to traverse, or if NULL,
+ * the callbacks specified in @rwc will be configured such
+ * as to be able to look up mappings correctly.
+ * @mapping: The page cache object whose mapping VMAs we intend to
+ * traverse. If @folio is non-NULL, this should be equal to
+ * folio_mapping(folio).
+ * @pgoff_start: The offset within @mapping of the page which we are
+ * looking up. If @folio is non-NULL, this should be equal
+ * to folio_pgoff(folio).
+ * @nr_pages: The number of pages mapped by the mapping. If @folio is
+ * non-NULL, this should be equal to folio_nr_pages(folio).
+ * @rwc: The reverse mapping walk control object describing how
+ * the traversal should proceed.
+ * @locked: Is the @mapping already locked? If not, we acquire the
+ * lock.
*/
-static void rmap_walk_file(struct folio *folio,
- struct rmap_walk_control *rwc, bool locked)
+static void __rmap_walk_file(struct folio *folio, struct address_space *mapping,
+ pgoff_t pgoff_start, unsigned long nr_pages,
+ struct rmap_walk_control *rwc, bool locked)
{
- struct address_space *mapping = folio_mapping(folio);
- pgoff_t pgoff_start, pgoff_end;
+ pgoff_t pgoff_end = pgoff_start + nr_pages - 1;
struct vm_area_struct *vma;
- /*
- * The page lock not only makes sure that page->mapping cannot
- * suddenly be NULLified by truncation, it makes sure that the
- * structure at mapping cannot be freed and reused yet,
- * so we can safely take mapping->i_mmap_rwsem.
- */
- VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
+ VM_WARN_ON_FOLIO(folio && mapping != folio_mapping(folio), folio);
+ VM_WARN_ON_FOLIO(folio && pgoff_start != folio_pgoff(folio), folio);
+ VM_WARN_ON_FOLIO(folio && nr_pages != folio_nr_pages(folio), folio);
- if (!mapping)
- return;
-
- pgoff_start = folio_pgoff(folio);
- pgoff_end = pgoff_start + folio_nr_pages(folio) - 1;
if (!locked) {
if (i_mmap_trylock_read(mapping))
goto lookup;
@@ -2696,8 +2886,7 @@ static void rmap_walk_file(struct folio *folio,
lookup:
vma_interval_tree_foreach(vma, &mapping->i_mmap,
pgoff_start, pgoff_end) {
- unsigned long address = vma_address(vma, pgoff_start,
- folio_nr_pages(folio));
+ unsigned long address = vma_address(vma, pgoff_start, nr_pages);
VM_BUG_ON_VMA(address == -EFAULT, vma);
cond_resched();
@@ -2710,12 +2899,38 @@ lookup:
if (rwc->done && rwc->done(folio))
goto done;
}
-
done:
if (!locked)
i_mmap_unlock_read(mapping);
}
+/*
+ * rmap_walk_file - do something to file page using the object-based rmap method
+ * @folio: the folio to be handled
+ * @rwc: control variable according to each walk type
+ * @locked: caller holds relevant rmap lock
+ *
+ * Find all the mappings of a folio using the mapping pointer and the vma chains
+ * contained in the address_space struct it points to.
+ */
+static void rmap_walk_file(struct folio *folio,
+ struct rmap_walk_control *rwc, bool locked)
+{
+ /*
+ * The folio lock not only makes sure that folio->mapping cannot
+ * suddenly be NULLified by truncation, it makes sure that the structure
+ * at mapping cannot be freed and reused yet, so we can safely take
+ * mapping->i_mmap_rwsem.
+ */
+ VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
+
+ if (!folio->mapping)
+ return;
+
+ __rmap_walk_file(folio, folio->mapping, folio->index,
+ folio_nr_pages(folio), rwc, locked);
+}
+
void rmap_walk(struct folio *folio, struct rmap_walk_control *rwc)
{
if (unlikely(folio_test_ksm(folio)))
diff --git a/mm/shmem.c b/mm/shmem.c
index ab61c8bb20e1..99327c30507c 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -86,7 +86,6 @@ static struct vfsmount *shm_mnt __ro_after_init;
#include "internal.h"
-#define BLOCKS_PER_PAGE (PAGE_SIZE/512)
#define VM_ACCT(size) (PAGE_ALIGN(size) >> PAGE_SHIFT)
/* Pretend that each entry is of this size in directory's i_size */
@@ -526,9 +525,9 @@ static bool shmem_confirm_swap(struct address_space *mapping,
* enables huge pages for the mount;
* SHMEM_HUGE_WITHIN_SIZE:
* only allocate huge pages if the page will be fully within i_size,
- * also respect fadvise()/madvise() hints;
+ * also respect madvise() hints;
* SHMEM_HUGE_ADVISE:
- * only allocate huge pages if requested with fadvise()/madvise();
+ * only allocate huge pages if requested with madvise();
*/
#define SHMEM_HUGE_NEVER 0
@@ -591,6 +590,28 @@ shmem_mapping_size_orders(struct address_space *mapping, pgoff_t index, loff_t w
return order > 0 ? BIT(order + 1) - 1 : 0;
}
+static unsigned int shmem_get_orders_within_size(struct inode *inode,
+ unsigned long within_size_orders, pgoff_t index,
+ loff_t write_end)
+{
+ pgoff_t aligned_index;
+ unsigned long order;
+ loff_t i_size;
+
+ order = highest_order(within_size_orders);
+ while (within_size_orders) {
+ aligned_index = round_up(index + 1, 1 << order);
+ i_size = max(write_end, i_size_read(inode));
+ i_size = round_up(i_size, PAGE_SIZE);
+ if (i_size >> PAGE_SHIFT >= aligned_index)
+ return within_size_orders;
+
+ order = next_order(&within_size_orders, order);
+ }
+
+ return 0;
+}
+
static unsigned int shmem_huge_global_enabled(struct inode *inode, pgoff_t index,
loff_t write_end, bool shmem_huge_force,
struct vm_area_struct *vma,
@@ -599,9 +620,6 @@ static unsigned int shmem_huge_global_enabled(struct inode *inode, pgoff_t index
unsigned int maybe_pmd_order = HPAGE_PMD_ORDER > MAX_PAGECACHE_ORDER ?
0 : BIT(HPAGE_PMD_ORDER);
unsigned long within_size_orders;
- unsigned int order;
- pgoff_t aligned_index;
- loff_t i_size;
if (!S_ISREG(inode->i_mode))
return 0;
@@ -635,16 +653,11 @@ static unsigned int shmem_huge_global_enabled(struct inode *inode, pgoff_t index
within_size_orders = shmem_mapping_size_orders(inode->i_mapping,
index, write_end);
- order = highest_order(within_size_orders);
- while (within_size_orders) {
- aligned_index = round_up(index + 1, 1 << order);
- i_size = max(write_end, i_size_read(inode));
- i_size = round_up(i_size, PAGE_SIZE);
- if (i_size >> PAGE_SHIFT >= aligned_index)
- return within_size_orders;
+ within_size_orders = shmem_get_orders_within_size(inode, within_size_orders,
+ index, write_end);
+ if (within_size_orders > 0)
+ return within_size_orders;
- order = next_order(&within_size_orders, order);
- }
fallthrough;
case SHMEM_HUGE_ADVISE:
if (vm_flags & VM_HUGEPAGE)
@@ -1380,9 +1393,9 @@ static void shmem_evict_inode(struct inode *inode)
#endif
}
-static int shmem_find_swap_entries(struct address_space *mapping,
- pgoff_t start, struct folio_batch *fbatch,
- pgoff_t *indices, unsigned int type)
+static unsigned int shmem_find_swap_entries(struct address_space *mapping,
+ pgoff_t start, struct folio_batch *fbatch,
+ pgoff_t *indices, unsigned int type)
{
XA_STATE(xas, &mapping->i_pages, start);
struct folio *folio;
@@ -1415,7 +1428,7 @@ static int shmem_find_swap_entries(struct address_space *mapping,
}
rcu_read_unlock();
- return xas.xa_index;
+ return folio_batch_count(fbatch);
}
/*
@@ -1462,8 +1475,8 @@ static int shmem_unuse_inode(struct inode *inode, unsigned int type)
do {
folio_batch_init(&fbatch);
- shmem_find_swap_entries(mapping, start, &fbatch, indices, type);
- if (folio_batch_count(&fbatch) == 0) {
+ if (!shmem_find_swap_entries(mapping, start, &fbatch,
+ indices, type)) {
ret = 0;
break;
}
@@ -1533,7 +1546,6 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
struct inode *inode = mapping->host;
struct shmem_inode_info *info = SHMEM_I(inode);
struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
- swp_entry_t swap;
pgoff_t index;
int nr_pages;
bool split = false;
@@ -1615,14 +1627,6 @@ try_split:
folio_mark_uptodate(folio);
}
- swap = folio_alloc_swap(folio);
- if (!swap.val) {
- if (nr_pages > 1)
- goto try_split;
-
- goto redirty;
- }
-
/*
* Add inode to shmem_unuse()'s list of swapped-out inodes,
* if it's not already there. Do it now before the folio is
@@ -1635,20 +1639,20 @@ try_split:
if (list_empty(&info->swaplist))
list_add(&info->swaplist, &shmem_swaplist);
- if (add_to_swap_cache(folio, swap,
- __GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN,
- NULL) == 0) {
+ if (!folio_alloc_swap(folio, __GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN)) {
shmem_recalc_inode(inode, 0, nr_pages);
- swap_shmem_alloc(swap, nr_pages);
- shmem_delete_from_page_cache(folio, swp_to_radix_entry(swap));
+ swap_shmem_alloc(folio->swap, nr_pages);
+ shmem_delete_from_page_cache(folio, swp_to_radix_entry(folio->swap));
mutex_unlock(&shmem_swaplist_mutex);
BUG_ON(folio_mapped(folio));
return swap_writepage(&folio->page, wbc);
}
+ list_del_init(&info->swaplist);
mutex_unlock(&shmem_swaplist_mutex);
- put_swap_folio(folio, swap);
+ if (nr_pages > 1)
+ goto try_split;
redirty:
folio_mark_dirty(folio);
if (wbc->for_reclaim)
@@ -1757,10 +1761,7 @@ unsigned long shmem_allowable_huge_orders(struct inode *inode,
unsigned long mask = READ_ONCE(huge_shmem_orders_always);
unsigned long within_size_orders = READ_ONCE(huge_shmem_orders_within_size);
unsigned long vm_flags = vma ? vma->vm_flags : 0;
- pgoff_t aligned_index;
unsigned int global_orders;
- loff_t i_size;
- int order;
if (thp_disabled_by_hw() || (vma && vma_thp_disabled(vma, vm_flags)))
return 0;
@@ -1786,17 +1787,7 @@ unsigned long shmem_allowable_huge_orders(struct inode *inode,
return READ_ONCE(huge_shmem_orders_inherit);
/* Allow mTHP that will be fully within i_size. */
- order = highest_order(within_size_orders);
- while (within_size_orders) {
- aligned_index = round_up(index + 1, 1 << order);
- i_size = round_up(i_size_read(inode), PAGE_SIZE);
- if (i_size >> PAGE_SHIFT >= aligned_index) {
- mask |= within_size_orders;
- break;
- }
-
- order = next_order(&within_size_orders, order);
- }
+ mask |= shmem_get_orders_within_size(inode, within_size_orders, index, 0);
if (vm_flags & VM_HUGEPAGE)
mask |= READ_ONCE(huge_shmem_orders_madvise);
@@ -2017,7 +2008,7 @@ static struct folio *shmem_swap_alloc_folio(struct inode *inode,
__folio_set_swapbacked(new);
new->swap = entry;
- mem_cgroup_swapin_uncharge_swap(entry, nr_pages);
+ memcg1_swapin(entry, nr_pages);
shadow = get_shadow_from_swap_cache(entry);
if (shadow)
workingset_refault(new, shadow);
@@ -2162,15 +2153,16 @@ static int shmem_split_large_entry(struct inode *inode, pgoff_t index,
{
struct address_space *mapping = inode->i_mapping;
XA_STATE_ORDER(xas, &mapping->i_pages, index, 0);
- void *alloced_shadow = NULL;
- int alloced_order = 0, i;
+ int split_order = 0, entry_order;
+ int i;
/* Convert user data gfp flags to xarray node gfp flags */
gfp &= GFP_RECLAIM_MASK;
for (;;) {
- int order = -1, split_order = 0;
void *old = NULL;
+ int cur_order;
+ pgoff_t swap_index;
xas_lock_irq(&xas);
old = xas_load(&xas);
@@ -2179,60 +2171,56 @@ static int shmem_split_large_entry(struct inode *inode, pgoff_t index,
goto unlock;
}
- order = xas_get_order(&xas);
+ entry_order = xas_get_order(&xas);
- /* Swap entry may have changed before we re-acquire the lock */
- if (alloced_order &&
- (old != alloced_shadow || order != alloced_order)) {
- xas_destroy(&xas);
- alloced_order = 0;
- }
+ if (!entry_order)
+ goto unlock;
/* Try to split large swap entry in pagecache */
- if (order > 0) {
- if (!alloced_order) {
- split_order = order;
+ cur_order = entry_order;
+ swap_index = round_down(index, 1 << entry_order);
+
+ split_order = xas_try_split_min_order(cur_order);
+
+ while (cur_order > 0) {
+ pgoff_t aligned_index =
+ round_down(index, 1 << cur_order);
+ pgoff_t swap_offset = aligned_index - swap_index;
+
+ xas_set_order(&xas, index, split_order);
+ xas_try_split(&xas, old, cur_order);
+ if (xas_error(&xas))
goto unlock;
- }
- xas_split(&xas, old, order);
/*
* Re-set the swap entry after splitting, and the swap
* offset of the original large entry must be continuous.
*/
- for (i = 0; i < 1 << order; i++) {
- pgoff_t aligned_index = round_down(index, 1 << order);
+ for (i = 0; i < 1 << cur_order;
+ i += (1 << split_order)) {
swp_entry_t tmp;
- tmp = swp_entry(swp_type(swap), swp_offset(swap) + i);
+ tmp = swp_entry(swp_type(swap),
+ swp_offset(swap) + swap_offset +
+ i);
__xa_store(&mapping->i_pages, aligned_index + i,
swp_to_radix_entry(tmp), 0);
}
+ cur_order = split_order;
+ split_order = xas_try_split_min_order(split_order);
}
unlock:
xas_unlock_irq(&xas);
- /* split needed, alloc here and retry. */
- if (split_order) {
- xas_split_alloc(&xas, old, split_order, gfp);
- if (xas_error(&xas))
- goto error;
- alloced_shadow = old;
- alloced_order = split_order;
- xas_reset(&xas);
- continue;
- }
-
if (!xas_nomem(&xas, gfp))
break;
}
-error:
if (xas_error(&xas))
return xas_error(&xas);
- return alloced_order;
+ return entry_order;
}
/*
@@ -3302,8 +3290,7 @@ shmem_write_begin(struct file *file, struct address_space *mapping,
if (ret)
return ret;
- if (folio_test_hwpoison(folio) ||
- (folio_test_large(folio) && folio_test_has_hwpoisoned(folio))) {
+ if (folio_contain_hwpoisoned_page(folio)) {
folio_unlock(folio);
folio_put(folio);
return -EIO;
@@ -5674,19 +5661,19 @@ static int __init setup_thp_shmem(char *str)
THP_ORDERS_ALL_FILE_DEFAULT);
}
- if (start == -EINVAL) {
+ if (start < 0) {
pr_err("invalid size %s in thp_shmem boot parameter\n",
start_size);
goto err;
}
- if (end == -EINVAL) {
+ if (end < 0) {
pr_err("invalid size %s in thp_shmem boot parameter\n",
end_size);
goto err;
}
- if (start < 0 || end < 0 || start > end)
+ if (start > end)
goto err;
nr = end - start + 1;
@@ -5853,7 +5840,7 @@ static struct file *__shmem_file_setup(struct vfsmount *mnt, const char *name,
* underlying inode. So users of this interface must do LSM checks at a
* higher layer. The users are the big_key and shm implementations. LSM
* checks are provided at the key or shm level rather than the inode.
- * @name: name for dentry (to be seen in /proc/<pid>/maps
+ * @name: name for dentry (to be seen in /proc/<pid>/maps)
* @size: size to be set for the file
* @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
*/
@@ -5865,7 +5852,7 @@ EXPORT_SYMBOL_GPL(shmem_kernel_file_setup);
/**
* shmem_file_setup - get an unlinked file living in tmpfs
- * @name: name for dentry (to be seen in /proc/<pid>/maps
+ * @name: name for dentry (to be seen in /proc/<pid>/maps)
* @size: size to be set for the file
* @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
*/
@@ -5878,7 +5865,7 @@ EXPORT_SYMBOL_GPL(shmem_file_setup);
/**
* shmem_file_setup_with_mnt - get an unlinked file living in tmpfs
* @mnt: the tmpfs mount where the file will be created
- * @name: name for dentry (to be seen in /proc/<pid>/maps
+ * @name: name for dentry (to be seen in /proc/<pid>/maps)
* @size: size to be set for the file
* @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
*/
diff --git a/mm/show_mem.c b/mm/show_mem.c
index 43afb56abbd3..6af13bcd2ab3 100644
--- a/mm/show_mem.c
+++ b/mm/show_mem.c
@@ -260,6 +260,7 @@ static void show_free_areas(unsigned int filter, nodemask_t *nodemask, int max_z
" pagetables:%lukB"
" sec_pagetables:%lukB"
" all_unreclaimable? %s"
+ " Balloon:%lukB"
"\n",
pgdat->node_id,
K(node_page_state(pgdat, NR_ACTIVE_ANON)),
@@ -285,7 +286,8 @@ static void show_free_areas(unsigned int filter, nodemask_t *nodemask, int max_z
#endif
K(node_page_state(pgdat, NR_PAGETABLE)),
K(node_page_state(pgdat, NR_SECONDARY_PAGETABLE)),
- str_yes_no(pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES));
+ str_yes_no(pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES),
+ K(node_page_state(pgdat, NR_BALLOON_PAGES)));
}
for_each_populated_zone(zone) {
diff --git a/mm/shrinker_debug.c b/mm/shrinker_debug.c
index 794bd433cce0..20eaee3e97f7 100644
--- a/mm/shrinker_debug.c
+++ b/mm/shrinker_debug.c
@@ -214,10 +214,14 @@ int shrinker_debugfs_rename(struct shrinker *shrinker, const char *fmt, ...)
ret = debugfs_change_name(shrinker->debugfs_entry, "%s-%d",
shrinker->name, shrinker->debugfs_id);
+ if (ret) {
+ shrinker->name = old;
+ kfree_const(new);
+ } else {
+ kfree_const(old);
+ }
mutex_unlock(&shrinker_mutex);
- kfree_const(old);
-
return ret;
}
EXPORT_SYMBOL(shrinker_debugfs_rename);
diff --git a/mm/slub.c b/mm/slub.c
index 5eac408e818e..b46f87662e71 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2023,7 +2023,8 @@ int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s,
return 0;
}
-static inline void free_slab_obj_exts(struct slab *slab)
+/* Should be called only if mem_alloc_profiling_enabled() */
+static noinline void free_slab_obj_exts(struct slab *slab)
{
struct slabobj_ext *obj_exts;
@@ -2100,33 +2101,37 @@ prepare_slab_obj_exts_hook(struct kmem_cache *s, gfp_t flags, void *p)
return slab_obj_exts(slab) + obj_to_index(s, slab, p);
}
-static inline void
-alloc_tagging_slab_alloc_hook(struct kmem_cache *s, void *object, gfp_t flags)
+/* Should be called only if mem_alloc_profiling_enabled() */
+static noinline void
+__alloc_tagging_slab_alloc_hook(struct kmem_cache *s, void *object, gfp_t flags)
{
- if (need_slab_obj_ext()) {
- struct slabobj_ext *obj_exts;
+ struct slabobj_ext *obj_exts;
- obj_exts = prepare_slab_obj_exts_hook(s, flags, object);
- /*
- * Currently obj_exts is used only for allocation profiling.
- * If other users appear then mem_alloc_profiling_enabled()
- * check should be added before alloc_tag_add().
- */
- if (likely(obj_exts))
- alloc_tag_add(&obj_exts->ref, current->alloc_tag, s->size);
- }
+ obj_exts = prepare_slab_obj_exts_hook(s, flags, object);
+ /*
+ * Currently obj_exts is used only for allocation profiling.
+ * If other users appear then mem_alloc_profiling_enabled()
+ * check should be added before alloc_tag_add().
+ */
+ if (likely(obj_exts))
+ alloc_tag_add(&obj_exts->ref, current->alloc_tag, s->size);
}
static inline void
-alloc_tagging_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p,
- int objects)
+alloc_tagging_slab_alloc_hook(struct kmem_cache *s, void *object, gfp_t flags)
+{
+ if (need_slab_obj_ext())
+ __alloc_tagging_slab_alloc_hook(s, object, flags);
+}
+
+/* Should be called only if mem_alloc_profiling_enabled() */
+static noinline void
+__alloc_tagging_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p,
+ int objects)
{
struct slabobj_ext *obj_exts;
int i;
- if (!mem_alloc_profiling_enabled())
- return;
-
/* slab->obj_exts might not be NULL if it was created for MEMCG accounting. */
if (s->flags & (SLAB_NO_OBJ_EXT | SLAB_NOLEAKTRACE))
return;
@@ -2142,6 +2147,14 @@ alloc_tagging_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p,
}
}
+static inline void
+alloc_tagging_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p,
+ int objects)
+{
+ if (mem_alloc_profiling_enabled())
+ __alloc_tagging_slab_free_hook(s, slab, p, objects);
+}
+
#else /* CONFIG_MEM_ALLOC_PROFILING */
static inline void
diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
index 3287ebadd167..fd2ab5118e13 100644
--- a/mm/sparse-vmemmap.c
+++ b/mm/sparse-vmemmap.c
@@ -30,6 +30,15 @@
#include <asm/dma.h>
#include <asm/pgalloc.h>
+#include <asm/tlbflush.h>
+
+#include "hugetlb_vmemmap.h"
+
+/*
+ * Flags for vmemmap_populate_range and friends.
+ */
+/* Get a ref on the head page struct page, for ZONE_DEVICE compound pages */
+#define VMEMMAP_POPULATE_PAGEREF 0x0001
#include "internal.h"
@@ -144,17 +153,18 @@ void __meminit vmemmap_verify(pte_t *pte, int node,
pte_t * __meminit vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node,
struct vmem_altmap *altmap,
- struct page *reuse)
+ unsigned long ptpfn, unsigned long flags)
{
pte_t *pte = pte_offset_kernel(pmd, addr);
if (pte_none(ptep_get(pte))) {
pte_t entry;
void *p;
- if (!reuse) {
+ if (ptpfn == (unsigned long)-1) {
p = vmemmap_alloc_block_buf(PAGE_SIZE, node, altmap);
if (!p)
return NULL;
+ ptpfn = PHYS_PFN(__pa(p));
} else {
/*
* When a PTE/PMD entry is freed from the init_mm
@@ -165,10 +175,10 @@ pte_t * __meminit vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node,
* and through vmemmap_populate_compound_pages() when
* slab is available.
*/
- get_page(reuse);
- p = page_to_virt(reuse);
+ if (flags & VMEMMAP_POPULATE_PAGEREF)
+ get_page(pfn_to_page(ptpfn));
}
- entry = pfn_pte(__pa(p) >> PAGE_SHIFT, PAGE_KERNEL);
+ entry = pfn_pte(ptpfn, PAGE_KERNEL);
set_pte_at(&init_mm, addr, pte, entry);
}
return pte;
@@ -238,7 +248,8 @@ pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node)
static pte_t * __meminit vmemmap_populate_address(unsigned long addr, int node,
struct vmem_altmap *altmap,
- struct page *reuse)
+ unsigned long ptpfn,
+ unsigned long flags)
{
pgd_t *pgd;
p4d_t *p4d;
@@ -258,7 +269,7 @@ static pte_t * __meminit vmemmap_populate_address(unsigned long addr, int node,
pmd = vmemmap_pmd_populate(pud, addr, node);
if (!pmd)
return NULL;
- pte = vmemmap_pte_populate(pmd, addr, node, altmap, reuse);
+ pte = vmemmap_pte_populate(pmd, addr, node, altmap, ptpfn, flags);
if (!pte)
return NULL;
vmemmap_verify(pte, node, addr, addr + PAGE_SIZE);
@@ -269,13 +280,15 @@ static pte_t * __meminit vmemmap_populate_address(unsigned long addr, int node,
static int __meminit vmemmap_populate_range(unsigned long start,
unsigned long end, int node,
struct vmem_altmap *altmap,
- struct page *reuse)
+ unsigned long ptpfn,
+ unsigned long flags)
{
unsigned long addr = start;
pte_t *pte;
for (; addr < end; addr += PAGE_SIZE) {
- pte = vmemmap_populate_address(addr, node, altmap, reuse);
+ pte = vmemmap_populate_address(addr, node, altmap,
+ ptpfn, flags);
if (!pte)
return -ENOMEM;
}
@@ -286,7 +299,107 @@ static int __meminit vmemmap_populate_range(unsigned long start,
int __meminit vmemmap_populate_basepages(unsigned long start, unsigned long end,
int node, struct vmem_altmap *altmap)
{
- return vmemmap_populate_range(start, end, node, altmap, NULL);
+ return vmemmap_populate_range(start, end, node, altmap, -1, 0);
+}
+
+/*
+ * Undo populate_hvo, and replace it with a normal base page mapping.
+ * Used in memory init in case a HVO mapping needs to be undone.
+ *
+ * This can happen when it is discovered that a memblock allocated
+ * hugetlb page spans multiple zones, which can only be verified
+ * after zones have been initialized.
+ *
+ * We know that:
+ * 1) The first @headsize / PAGE_SIZE vmemmap pages were individually
+ * allocated through memblock, and mapped.
+ *
+ * 2) The rest of the vmemmap pages are mirrors of the last head page.
+ */
+int __meminit vmemmap_undo_hvo(unsigned long addr, unsigned long end,
+ int node, unsigned long headsize)
+{
+ unsigned long maddr, pfn;
+ pte_t *pte;
+ int headpages;
+
+ /*
+ * Should only be called early in boot, so nothing will
+ * be accessing these page structures.
+ */
+ WARN_ON(!early_boot_irqs_disabled);
+
+ headpages = headsize >> PAGE_SHIFT;
+
+ /*
+ * Clear mirrored mappings for tail page structs.
+ */
+ for (maddr = addr + headsize; maddr < end; maddr += PAGE_SIZE) {
+ pte = virt_to_kpte(maddr);
+ pte_clear(&init_mm, maddr, pte);
+ }
+
+ /*
+ * Clear and free mappings for head page and first tail page
+ * structs.
+ */
+ for (maddr = addr; headpages-- > 0; maddr += PAGE_SIZE) {
+ pte = virt_to_kpte(maddr);
+ pfn = pte_pfn(ptep_get(pte));
+ pte_clear(&init_mm, maddr, pte);
+ memblock_phys_free(PFN_PHYS(pfn), PAGE_SIZE);
+ }
+
+ flush_tlb_kernel_range(addr, end);
+
+ return vmemmap_populate(addr, end, node, NULL);
+}
+
+/*
+ * Write protect the mirrored tail page structs for HVO. This will be
+ * called from the hugetlb code when gathering and initializing the
+ * memblock allocated gigantic pages. The write protect can't be
+ * done earlier, since it can't be guaranteed that the reserved
+ * page structures will not be written to during initialization,
+ * even if CONFIG_DEFERRED_STRUCT_PAGE_INIT is enabled.
+ *
+ * The PTEs are known to exist, and nothing else should be touching
+ * these pages. The caller is responsible for any TLB flushing.
+ */
+void vmemmap_wrprotect_hvo(unsigned long addr, unsigned long end,
+ int node, unsigned long headsize)
+{
+ unsigned long maddr;
+ pte_t *pte;
+
+ for (maddr = addr + headsize; maddr < end; maddr += PAGE_SIZE) {
+ pte = virt_to_kpte(maddr);
+ ptep_set_wrprotect(&init_mm, maddr, pte);
+ }
+}
+
+/*
+ * Populate vmemmap pages HVO-style. The first page contains the head
+ * page and needed tail pages, the other ones are mirrors of the first
+ * page.
+ */
+int __meminit vmemmap_populate_hvo(unsigned long addr, unsigned long end,
+ int node, unsigned long headsize)
+{
+ pte_t *pte;
+ unsigned long maddr;
+
+ for (maddr = addr; maddr < addr + headsize; maddr += PAGE_SIZE) {
+ pte = vmemmap_populate_address(maddr, node, NULL, -1, 0);
+ if (!pte)
+ return -ENOMEM;
+ }
+
+ /*
+ * Reuse the last page struct page mapped above for the rest.
+ */
+ return vmemmap_populate_range(maddr, end, node, NULL,
+ pte_pfn(ptep_get(pte)), 0);
}
void __weak __meminit vmemmap_set_pmd(pmd_t *pmd, void *p, int node,
@@ -409,7 +522,8 @@ static int __meminit vmemmap_populate_compound_pages(unsigned long start_pfn,
* with just tail struct pages.
*/
return vmemmap_populate_range(start, end, node, NULL,
- pte_page(ptep_get(pte)));
+ pte_pfn(ptep_get(pte)),
+ VMEMMAP_POPULATE_PAGEREF);
}
size = min(end - start, pgmap_vmemmap_nr(pgmap) * sizeof(struct page));
@@ -417,13 +531,13 @@ static int __meminit vmemmap_populate_compound_pages(unsigned long start_pfn,
unsigned long next, last = addr + size;
/* Populate the head page vmemmap page */
- pte = vmemmap_populate_address(addr, node, NULL, NULL);
+ pte = vmemmap_populate_address(addr, node, NULL, -1, 0);
if (!pte)
return -ENOMEM;
/* Populate the tail pages vmemmap page */
next = addr + PAGE_SIZE;
- pte = vmemmap_populate_address(next, node, NULL, NULL);
+ pte = vmemmap_populate_address(next, node, NULL, -1, 0);
if (!pte)
return -ENOMEM;
@@ -433,7 +547,8 @@ static int __meminit vmemmap_populate_compound_pages(unsigned long start_pfn,
*/
next += PAGE_SIZE;
rc = vmemmap_populate_range(next, last, node, NULL,
- pte_page(ptep_get(pte)));
+ pte_pfn(ptep_get(pte)),
+ VMEMMAP_POPULATE_PAGEREF);
if (rc)
return -ENOMEM;
}
@@ -470,3 +585,28 @@ struct page * __meminit __populate_section_memmap(unsigned long pfn,
return pfn_to_page(pfn);
}
+
+#ifdef CONFIG_SPARSEMEM_VMEMMAP_PREINIT
+/*
+ * This is called just before initializing sections for a NUMA node.
+ * Any special initialization that needs to be done before the
+ * generic initialization can be done from here. Sections that
+ * are initialized in hooks called from here will be skipped by
+ * the generic initialization.
+ */
+void __init sparse_vmemmap_init_nid_early(int nid)
+{
+ hugetlb_vmemmap_init_early(nid);
+}
+
+/*
+ * This is called just before the initialization of page structures
+ * through memmap_init. Zones are now initialized, so any work that
+ * needs to be done that needs zone information can be done from
+ * here.
+ */
+void __init sparse_vmemmap_init_nid_late(int nid)
+{
+ hugetlb_vmemmap_init_late(nid);
+}
+#endif
diff --git a/mm/sparse.c b/mm/sparse.c
index 133b033d0cba..3c012cf83cc2 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -170,11 +170,6 @@ static void __section_mark_present(struct mem_section *ms,
ms->section_mem_map |= SECTION_MARKED_PRESENT;
}
-#define for_each_present_section_nr(start, section_nr) \
- for (section_nr = next_present_section_nr(start-1); \
- section_nr != -1; \
- section_nr = next_present_section_nr(section_nr))
-
static inline unsigned long first_present_section_nr(void)
{
return next_present_section_nr(-1);
@@ -408,13 +403,13 @@ static void __init check_usemap_section_nr(int nid,
#endif /* CONFIG_MEMORY_HOTREMOVE */
#ifdef CONFIG_SPARSEMEM_VMEMMAP
-static unsigned long __init section_map_size(void)
+unsigned long __init section_map_size(void)
{
return ALIGN(sizeof(struct page) * PAGES_PER_SECTION, PMD_SIZE);
}
#else
-static unsigned long __init section_map_size(void)
+unsigned long __init section_map_size(void)
{
return PAGE_ALIGN(sizeof(struct page) * PAGES_PER_SECTION);
}
@@ -495,6 +490,44 @@ void __weak __meminit vmemmap_populate_print_last(void)
{
}
+static void *sparse_usagebuf __meminitdata;
+static void *sparse_usagebuf_end __meminitdata;
+
+/*
+ * Helper function that is used for generic section initialization, and
+ * can also be used by any hooks added above.
+ */
+void __init sparse_init_early_section(int nid, struct page *map,
+ unsigned long pnum, unsigned long flags)
+{
+ BUG_ON(!sparse_usagebuf || sparse_usagebuf >= sparse_usagebuf_end);
+ check_usemap_section_nr(nid, sparse_usagebuf);
+ sparse_init_one_section(__nr_to_section(pnum), pnum, map,
+ sparse_usagebuf, SECTION_IS_EARLY | flags);
+ sparse_usagebuf = (void *)sparse_usagebuf + mem_section_usage_size();
+}
+
+static int __init sparse_usage_init(int nid, unsigned long map_count)
+{
+ unsigned long size;
+
+ size = mem_section_usage_size() * map_count;
+ sparse_usagebuf = sparse_early_usemaps_alloc_pgdat_section(
+ NODE_DATA(nid), size);
+ if (!sparse_usagebuf) {
+ sparse_usagebuf_end = NULL;
+ return -ENOMEM;
+ }
+
+ sparse_usagebuf_end = sparse_usagebuf + size;
+ return 0;
+}
+
+static void __init sparse_usage_fini(void)
+{
+ sparse_usagebuf = sparse_usagebuf_end = NULL;
+}
+
/*
* Initialize sparse on a specific node. The node spans [pnum_begin, pnum_end)
* And number of present sections in this node is map_count.
@@ -503,47 +536,54 @@ static void __init sparse_init_nid(int nid, unsigned long pnum_begin,
unsigned long pnum_end,
unsigned long map_count)
{
- struct mem_section_usage *usage;
unsigned long pnum;
struct page *map;
+ struct mem_section *ms;
- usage = sparse_early_usemaps_alloc_pgdat_section(NODE_DATA(nid),
- mem_section_usage_size() * map_count);
- if (!usage) {
+ if (sparse_usage_init(nid, map_count)) {
pr_err("%s: node[%d] usemap allocation failed", __func__, nid);
goto failed;
}
+
sparse_buffer_init(map_count * section_map_size(), nid);
+
+ sparse_vmemmap_init_nid_early(nid);
+
for_each_present_section_nr(pnum_begin, pnum) {
unsigned long pfn = section_nr_to_pfn(pnum);
if (pnum >= pnum_end)
break;
- map = __populate_section_memmap(pfn, PAGES_PER_SECTION,
- nid, NULL, NULL);
- if (!map) {
- pr_err("%s: node[%d] memory map backing failed. Some memory will not be available.",
- __func__, nid);
- pnum_begin = pnum;
- sparse_buffer_fini();
- goto failed;
+ ms = __nr_to_section(pnum);
+ if (!preinited_vmemmap_section(ms)) {
+ map = __populate_section_memmap(pfn, PAGES_PER_SECTION,
+ nid, NULL, NULL);
+ if (!map) {
+ pr_err("%s: node[%d] memory map backing failed. Some memory will not be available.",
+ __func__, nid);
+ pnum_begin = pnum;
+ sparse_usage_fini();
+ sparse_buffer_fini();
+ goto failed;
+ }
+ sparse_init_early_section(nid, map, pnum, 0);
}
- check_usemap_section_nr(nid, usage);
- sparse_init_one_section(__nr_to_section(pnum), pnum, map, usage,
- SECTION_IS_EARLY);
- usage = (void *) usage + mem_section_usage_size();
}
+ sparse_usage_fini();
sparse_buffer_fini();
return;
failed:
- /* We failed to allocate, mark all the following pnums as not present */
+ /*
+ * We failed to allocate, mark all the following pnums as not present,
+ * except the ones already initialized earlier.
+ */
for_each_present_section_nr(pnum_begin, pnum) {
- struct mem_section *ms;
-
if (pnum >= pnum_end)
break;
ms = __nr_to_section(pnum);
+ if (!preinited_vmemmap_section(ms))
+ ms->section_mem_map = 0;
ms->section_mem_map = 0;
}
}
diff --git a/mm/swap.c b/mm/swap.c
index b81cce146eb2..77b2d5997873 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -956,8 +956,6 @@ void folios_put_refs(struct folio_batch *folios, unsigned int *refs)
unlock_page_lruvec_irqrestore(lruvec, flags);
lruvec = NULL;
}
- if (put_devmap_managed_folio_refs(folio, nr_refs))
- continue;
if (folio_ref_sub_and_test(folio, nr_refs))
free_zone_device_folio(folio);
continue;
diff --git a/mm/swap.h b/mm/swap.h
index 274dcc6219a0..6f4a3f927edb 100644
--- a/mm/swap.h
+++ b/mm/swap.h
@@ -51,7 +51,6 @@ static inline pgoff_t swap_cache_index(swp_entry_t entry)
}
void show_swap_cache_info(void);
-bool add_to_swap(struct folio *folio);
void *get_shadow_from_swap_cache(swp_entry_t entry);
int add_to_swap_cache(struct folio *folio, swp_entry_t entry,
gfp_t gfp, void **shadowp);
@@ -164,11 +163,6 @@ struct folio *filemap_get_incore_folio(struct address_space *mapping,
return filemap_get_folio(mapping, index);
}
-static inline bool add_to_swap(struct folio *folio)
-{
- return false;
-}
-
static inline void *get_shadow_from_swap_cache(swp_entry_t entry)
{
return NULL;
diff --git a/mm/swap_cgroup.c b/mm/swap_cgroup.c
index 1007c30f12e2..de779fed8c21 100644
--- a/mm/swap_cgroup.c
+++ b/mm/swap_cgroup.c
@@ -92,8 +92,7 @@ void swap_cgroup_record(struct folio *folio, unsigned short id,
*/
unsigned short swap_cgroup_clear(swp_entry_t ent, unsigned int nr_ents)
{
- pgoff_t offset = swp_offset(ent);
- pgoff_t end = offset + nr_ents;
+ pgoff_t offset, end;
struct swap_cgroup *map;
unsigned short old, iter = 0;
diff --git a/mm/swap_slots.c b/mm/swap_slots.c
deleted file mode 100644
index 9c7c171df7ba..000000000000
--- a/mm/swap_slots.c
+++ /dev/null
@@ -1,295 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Manage cache of swap slots to be used for and returned from
- * swap.
- *
- * Copyright(c) 2016 Intel Corporation.
- *
- * Author: Tim Chen <tim.c.chen@linux.intel.com>
- *
- * We allocate the swap slots from the global pool and put
- * it into local per cpu caches. This has the advantage
- * of no needing to acquire the swap_info lock every time
- * we need a new slot.
- *
- * There is also opportunity to simply return the slot
- * to local caches without needing to acquire swap_info
- * lock. We do not reuse the returned slots directly but
- * move them back to the global pool in a batch. This
- * allows the slots to coalesce and reduce fragmentation.
- *
- * The swap entry allocated is marked with SWAP_HAS_CACHE
- * flag in map_count that prevents it from being allocated
- * again from the global pool.
- *
- * The swap slots cache is protected by a mutex instead of
- * a spin lock as when we search for slots with scan_swap_map,
- * we can possibly sleep.
- */
-
-#include <linux/swap_slots.h>
-#include <linux/cpu.h>
-#include <linux/cpumask.h>
-#include <linux/slab.h>
-#include <linux/vmalloc.h>
-#include <linux/mutex.h>
-#include <linux/mm.h>
-
-static DEFINE_PER_CPU(struct swap_slots_cache, swp_slots);
-static bool swap_slot_cache_active;
-bool swap_slot_cache_enabled;
-static bool swap_slot_cache_initialized;
-static DEFINE_MUTEX(swap_slots_cache_mutex);
-/* Serialize swap slots cache enable/disable operations */
-static DEFINE_MUTEX(swap_slots_cache_enable_mutex);
-
-static void __drain_swap_slots_cache(void);
-
-#define use_swap_slot_cache (swap_slot_cache_active && swap_slot_cache_enabled)
-
-static void deactivate_swap_slots_cache(void)
-{
- mutex_lock(&swap_slots_cache_mutex);
- swap_slot_cache_active = false;
- __drain_swap_slots_cache();
- mutex_unlock(&swap_slots_cache_mutex);
-}
-
-static void reactivate_swap_slots_cache(void)
-{
- mutex_lock(&swap_slots_cache_mutex);
- swap_slot_cache_active = true;
- mutex_unlock(&swap_slots_cache_mutex);
-}
-
-/* Must not be called with cpu hot plug lock */
-void disable_swap_slots_cache_lock(void)
-{
- mutex_lock(&swap_slots_cache_enable_mutex);
- swap_slot_cache_enabled = false;
- if (swap_slot_cache_initialized) {
- /* serialize with cpu hotplug operations */
- cpus_read_lock();
- __drain_swap_slots_cache();
- cpus_read_unlock();
- }
-}
-
-static void __reenable_swap_slots_cache(void)
-{
- swap_slot_cache_enabled = has_usable_swap();
-}
-
-void reenable_swap_slots_cache_unlock(void)
-{
- __reenable_swap_slots_cache();
- mutex_unlock(&swap_slots_cache_enable_mutex);
-}
-
-static bool check_cache_active(void)
-{
- long pages;
-
- if (!swap_slot_cache_enabled)
- return false;
-
- pages = get_nr_swap_pages();
- if (!swap_slot_cache_active) {
- if (pages > num_online_cpus() *
- THRESHOLD_ACTIVATE_SWAP_SLOTS_CACHE)
- reactivate_swap_slots_cache();
- goto out;
- }
-
- /* if global pool of slot caches too low, deactivate cache */
- if (pages < num_online_cpus() * THRESHOLD_DEACTIVATE_SWAP_SLOTS_CACHE)
- deactivate_swap_slots_cache();
-out:
- return swap_slot_cache_active;
-}
-
-static int alloc_swap_slot_cache(unsigned int cpu)
-{
- struct swap_slots_cache *cache;
- swp_entry_t *slots;
-
- /*
- * Do allocation outside swap_slots_cache_mutex
- * as kvzalloc could trigger reclaim and folio_alloc_swap,
- * which can lock swap_slots_cache_mutex.
- */
- slots = kvcalloc(SWAP_SLOTS_CACHE_SIZE, sizeof(swp_entry_t),
- GFP_KERNEL);
- if (!slots)
- return -ENOMEM;
-
- mutex_lock(&swap_slots_cache_mutex);
- cache = &per_cpu(swp_slots, cpu);
- if (cache->slots) {
- /* cache already allocated */
- mutex_unlock(&swap_slots_cache_mutex);
-
- kvfree(slots);
-
- return 0;
- }
-
- if (!cache->lock_initialized) {
- mutex_init(&cache->alloc_lock);
- cache->lock_initialized = true;
- }
- cache->nr = 0;
- cache->cur = 0;
- cache->n_ret = 0;
- /*
- * We initialized alloc_lock and free_lock earlier. We use
- * !cache->slots or !cache->slots_ret to know if it is safe to acquire
- * the corresponding lock and use the cache. Memory barrier below
- * ensures the assumption.
- */
- mb();
- cache->slots = slots;
- mutex_unlock(&swap_slots_cache_mutex);
- return 0;
-}
-
-static void drain_slots_cache_cpu(unsigned int cpu, bool free_slots)
-{
- struct swap_slots_cache *cache;
-
- cache = &per_cpu(swp_slots, cpu);
- if (cache->slots) {
- mutex_lock(&cache->alloc_lock);
- swapcache_free_entries(cache->slots + cache->cur, cache->nr);
- cache->cur = 0;
- cache->nr = 0;
- if (free_slots && cache->slots) {
- kvfree(cache->slots);
- cache->slots = NULL;
- }
- mutex_unlock(&cache->alloc_lock);
- }
-}
-
-static void __drain_swap_slots_cache(void)
-{
- unsigned int cpu;
-
- /*
- * This function is called during
- * 1) swapoff, when we have to make sure no
- * left over slots are in cache when we remove
- * a swap device;
- * 2) disabling of swap slot cache, when we run low
- * on swap slots when allocating memory and need
- * to return swap slots to global pool.
- *
- * We cannot acquire cpu hot plug lock here as
- * this function can be invoked in the cpu
- * hot plug path:
- * cpu_up -> lock cpu_hotplug -> cpu hotplug state callback
- * -> memory allocation -> direct reclaim -> folio_alloc_swap
- * -> drain_swap_slots_cache
- *
- * Hence the loop over current online cpu below could miss cpu that
- * is being brought online but not yet marked as online.
- * That is okay as we do not schedule and run anything on a
- * cpu before it has been marked online. Hence, we will not
- * fill any swap slots in slots cache of such cpu.
- * There are no slots on such cpu that need to be drained.
- */
- for_each_online_cpu(cpu)
- drain_slots_cache_cpu(cpu, false);
-}
-
-static int free_slot_cache(unsigned int cpu)
-{
- mutex_lock(&swap_slots_cache_mutex);
- drain_slots_cache_cpu(cpu, true);
- mutex_unlock(&swap_slots_cache_mutex);
- return 0;
-}
-
-void enable_swap_slots_cache(void)
-{
- mutex_lock(&swap_slots_cache_enable_mutex);
- if (!swap_slot_cache_initialized) {
- int ret;
-
- ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "swap_slots_cache",
- alloc_swap_slot_cache, free_slot_cache);
- if (WARN_ONCE(ret < 0, "Cache allocation failed (%s), operating "
- "without swap slots cache.\n", __func__))
- goto out_unlock;
-
- swap_slot_cache_initialized = true;
- }
-
- __reenable_swap_slots_cache();
-out_unlock:
- mutex_unlock(&swap_slots_cache_enable_mutex);
-}
-
-/* called with swap slot cache's alloc lock held */
-static int refill_swap_slots_cache(struct swap_slots_cache *cache)
-{
- if (!use_swap_slot_cache)
- return 0;
-
- cache->cur = 0;
- if (swap_slot_cache_active)
- cache->nr = get_swap_pages(SWAP_SLOTS_CACHE_SIZE,
- cache->slots, 0);
-
- return cache->nr;
-}
-
-swp_entry_t folio_alloc_swap(struct folio *folio)
-{
- swp_entry_t entry;
- struct swap_slots_cache *cache;
-
- entry.val = 0;
-
- if (folio_test_large(folio)) {
- if (IS_ENABLED(CONFIG_THP_SWAP))
- get_swap_pages(1, &entry, folio_order(folio));
- goto out;
- }
-
- /*
- * Preemption is allowed here, because we may sleep
- * in refill_swap_slots_cache(). But it is safe, because
- * accesses to the per-CPU data structure are protected by the
- * mutex cache->alloc_lock.
- *
- * The alloc path here does not touch cache->slots_ret
- * so cache->free_lock is not taken.
- */
- cache = raw_cpu_ptr(&swp_slots);
-
- if (likely(check_cache_active() && cache->slots)) {
- mutex_lock(&cache->alloc_lock);
- if (cache->slots) {
-repeat:
- if (cache->nr) {
- entry = cache->slots[cache->cur];
- cache->slots[cache->cur++].val = 0;
- cache->nr--;
- } else if (refill_swap_slots_cache(cache)) {
- goto repeat;
- }
- }
- mutex_unlock(&cache->alloc_lock);
- if (entry.val)
- goto out;
- }
-
- get_swap_pages(1, &entry, 0);
-out:
- if (mem_cgroup_try_charge_swap(folio, entry)) {
- put_swap_folio(folio, entry);
- entry.val = 0;
- }
- return entry;
-}
diff --git a/mm/swap_state.c b/mm/swap_state.c
index ca42b2be64d9..68fd981b514f 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -20,7 +20,6 @@
#include <linux/blkdev.h>
#include <linux/migrate.h>
#include <linux/vmalloc.h>
-#include <linux/swap_slots.h>
#include <linux/huge_mm.h>
#include <linux/shmem_fs.h>
#include "internal.h"
@@ -85,7 +84,7 @@ void *get_shadow_from_swap_cache(swp_entry_t entry)
/*
* add_to_swap_cache resembles filemap_add_folio on swapper_space,
- * but sets SwapCache flag and private instead of mapping and index.
+ * but sets SwapCache flag and 'swap' instead of mapping and index.
*/
int add_to_swap_cache(struct folio *folio, swp_entry_t entry,
gfp_t gfp, void **shadowp)
@@ -167,67 +166,6 @@ void __delete_from_swap_cache(struct folio *folio,
__lruvec_stat_mod_folio(folio, NR_SWAPCACHE, -nr);
}
-/**
- * add_to_swap - allocate swap space for a folio
- * @folio: folio we want to move to swap
- *
- * Allocate swap space for the folio and add the folio to the
- * swap cache.
- *
- * Context: Caller needs to hold the folio lock.
- * Return: Whether the folio was added to the swap cache.
- */
-bool add_to_swap(struct folio *folio)
-{
- swp_entry_t entry;
- int err;
-
- VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
- VM_BUG_ON_FOLIO(!folio_test_uptodate(folio), folio);
-
- entry = folio_alloc_swap(folio);
- if (!entry.val)
- return false;
-
- /*
- * XArray node allocations from PF_MEMALLOC contexts could
- * completely exhaust the page allocator. __GFP_NOMEMALLOC
- * stops emergency reserves from being allocated.
- *
- * TODO: this could cause a theoretical memory reclaim
- * deadlock in the swap out path.
- */
- /*
- * Add it to the swap cache.
- */
- err = add_to_swap_cache(folio, entry,
- __GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN, NULL);
- if (err)
- /*
- * add_to_swap_cache() doesn't return -EEXIST, so we can safely
- * clear SWAP_HAS_CACHE flag.
- */
- goto fail;
- /*
- * Normally the folio will be dirtied in unmap because its
- * pte should be dirty. A special case is MADV_FREE page. The
- * page's pte could have dirty bit cleared but the folio's
- * SwapBacked flag is still set because clearing the dirty bit
- * and SwapBacked flag has no lock protected. For such folio,
- * unmap will not set dirty bit for it, so folio reclaim will
- * not write the folio out. This can cause data corruption when
- * the folio is swapped in later. Always setting the dirty flag
- * for the folio solves the problem.
- */
- folio_mark_dirty(folio);
-
- return true;
-
-fail:
- put_swap_folio(folio, entry);
- return false;
-}
-
/*
* This must be called only on folios that have
* been verified to be in the swap cache and locked.
@@ -270,9 +208,7 @@ void clear_shadow_from_swap_cache(int type, unsigned long begin,
xa_unlock_irq(&address_space->i_pages);
/* search the next swapcache until we meet end */
- curr >>= SWAP_ADDRESS_SPACE_SHIFT;
- curr++;
- curr <<= SWAP_ADDRESS_SPACE_SHIFT;
+ curr = ALIGN((curr + 1), SWAP_ADDRESS_SPACE_PAGES);
if (curr > end)
break;
}
@@ -432,17 +368,13 @@ struct folio *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
struct mempolicy *mpol, pgoff_t ilx, bool *new_page_allocated,
bool skip_if_exists)
{
- struct swap_info_struct *si;
+ struct swap_info_struct *si = swp_swap_info(entry);
struct folio *folio;
struct folio *new_folio = NULL;
struct folio *result = NULL;
void *shadow = NULL;
*new_page_allocated = false;
- si = get_swap_device(entry);
- if (!si)
- return NULL;
-
for (;;) {
int err;
/*
@@ -457,13 +389,8 @@ struct folio *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
/*
* Just skip read ahead for unused swap slot.
- * During swap_off when swap_slot_cache is disabled,
- * we have to handle the race between putting
- * swap entry in swap cache and marking swap slot
- * as SWAP_HAS_CACHE. That's done in later part of code or
- * else swap_off will be aborted if we return NULL.
*/
- if (!swap_swapcount(si, entry) && swap_slot_cache_enabled)
+ if (!swap_entry_swapped(si, entry))
goto put_and_return;
/*
@@ -521,7 +448,7 @@ struct folio *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
if (add_to_swap_cache(new_folio, entry, gfp_mask & GFP_RECLAIM_MASK, &shadow))
goto fail_unlock;
- mem_cgroup_swapin_uncharge_swap(entry, 1);
+ memcg1_swapin(entry, 1);
if (shadow)
workingset_refault(new_folio, shadow);
@@ -538,7 +465,6 @@ fail_unlock:
put_swap_folio(new_folio, entry);
folio_unlock(new_folio);
put_and_return:
- put_swap_device(si);
if (!(*new_page_allocated) && new_folio)
folio_put(new_folio);
return result;
@@ -558,11 +484,16 @@ struct folio *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
struct vm_area_struct *vma, unsigned long addr,
struct swap_iocb **plug)
{
+ struct swap_info_struct *si;
bool page_allocated;
struct mempolicy *mpol;
pgoff_t ilx;
struct folio *folio;
+ si = get_swap_device(entry);
+ if (!si)
+ return NULL;
+
mpol = get_vma_policy(vma, addr, 0, &ilx);
folio = __read_swap_cache_async(entry, gfp_mask, mpol, ilx,
&page_allocated, false);
@@ -570,6 +501,8 @@ struct folio *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
if (page_allocated)
swap_read_folio(folio, plug);
+
+ put_swap_device(si);
return folio;
}
diff --git a/mm/swapfile.c b/mm/swapfile.c
index df7c4e8b089c..2eff8b51a945 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -37,7 +37,6 @@
#include <linux/oom.h>
#include <linux/swapfile.h>
#include <linux/export.h>
-#include <linux/swap_slots.h>
#include <linux/sort.h>
#include <linux/completion.h>
#include <linux/suspend.h>
@@ -116,6 +115,18 @@ static atomic_t proc_poll_event = ATOMIC_INIT(0);
atomic_t nr_rotate_swap = ATOMIC_INIT(0);
+struct percpu_swap_cluster {
+ struct swap_info_struct *si[SWAP_NR_ORDERS];
+ unsigned long offset[SWAP_NR_ORDERS];
+ local_lock_t lock;
+};
+
+static DEFINE_PER_CPU(struct percpu_swap_cluster, percpu_swap_cluster) = {
+ .si = { NULL },
+ .offset = { SWAP_ENTRY_INVALID },
+ .lock = INIT_LOCAL_LOCK(),
+};
+
static struct swap_info_struct *swap_type_to_swap_info(int type)
{
if (type >= MAX_SWAPFILES)
@@ -158,10 +169,8 @@ static long swap_usage_in_pages(struct swap_info_struct *si)
#define TTRS_UNMAPPED 0x2
/* Reclaim the swap entry if swap is getting full */
#define TTRS_FULL 0x4
-/* Reclaim directly, bypass the slot cache and don't touch device lock */
-#define TTRS_DIRECT 0x8
-static bool swap_is_has_cache(struct swap_info_struct *si,
+static bool swap_only_has_cache(struct swap_info_struct *si,
unsigned long offset, int nr_pages)
{
unsigned char *map = si->swap_map + offset;
@@ -210,6 +219,7 @@ static int __try_to_reclaim_swap(struct swap_info_struct *si,
int ret, nr_pages;
bool need_reclaim;
+again:
folio = filemap_get_folio(address_space, swap_cache_index(entry));
if (IS_ERR(folio))
return 0;
@@ -227,8 +237,16 @@ static int __try_to_reclaim_swap(struct swap_info_struct *si,
if (!folio_trylock(folio))
goto out;
- /* offset could point to the middle of a large folio */
+ /*
+ * Offset could point to the middle of a large folio, or folio
+ * may no longer point to the expected offset before it's locked.
+ */
entry = folio->swap;
+ if (offset < swp_offset(entry) || offset >= swp_offset(entry) + nr_pages) {
+ folio_unlock(folio);
+ folio_put(folio);
+ goto again;
+ }
offset = swp_offset(entry);
need_reclaim = ((flags & TTRS_ANYWAY) ||
@@ -243,28 +261,13 @@ static int __try_to_reclaim_swap(struct swap_info_struct *si,
* reference or pending writeback, and can't be allocated to others.
*/
ci = lock_cluster(si, offset);
- need_reclaim = swap_is_has_cache(si, offset, nr_pages);
+ need_reclaim = swap_only_has_cache(si, offset, nr_pages);
unlock_cluster(ci);
if (!need_reclaim)
goto out_unlock;
- if (!(flags & TTRS_DIRECT)) {
- /* Free through slot cache */
- delete_from_swap_cache(folio);
- folio_set_dirty(folio);
- ret = nr_pages;
- goto out_unlock;
- }
-
- xa_lock_irq(&address_space->i_pages);
- __delete_from_swap_cache(folio, entry, NULL);
- xa_unlock_irq(&address_space->i_pages);
- folio_ref_sub(folio, nr_pages);
+ delete_from_swap_cache(folio);
folio_set_dirty(folio);
-
- ci = lock_cluster(si, offset);
- swap_entry_range_free(si, ci, entry, nr_pages);
- unlock_cluster(ci);
ret = nr_pages;
out_unlock:
folio_unlock(folio);
@@ -479,15 +482,6 @@ static void move_cluster(struct swap_info_struct *si,
static void swap_cluster_schedule_discard(struct swap_info_struct *si,
struct swap_cluster_info *ci)
{
- unsigned int idx = cluster_index(si, ci);
- /*
- * If scan_swap_map_slots() can't find a free cluster, it will check
- * si->swap_map directly. To make sure the discarding cluster isn't
- * taken by scan_swap_map_slots(), mark the swap entries bad (occupied).
- * It will be cleared after discard
- */
- memset(si->swap_map + idx * SWAPFILE_CLUSTER,
- SWAP_MAP_BAD, SWAPFILE_CLUSTER);
VM_BUG_ON(ci->flags == CLUSTER_FLAG_FREE);
move_cluster(si, ci, &si->discard_clusters, CLUSTER_FLAG_DISCARD);
schedule_work(&si->discard_work);
@@ -556,7 +550,7 @@ static bool swap_do_scheduled_discard(struct swap_info_struct *si)
ci = list_first_entry(&si->discard_clusters, struct swap_cluster_info, list);
/*
* Delete the cluster from list to prepare for discard, but keep
- * the CLUSTER_FLAG_DISCARD flag, there could be percpu_cluster
+ * the CLUSTER_FLAG_DISCARD flag, percpu_swap_cluster could be
* pointing to it, or ran into by relocate_cluster.
*/
list_del(&ci->list);
@@ -571,8 +565,6 @@ static bool swap_do_scheduled_discard(struct swap_info_struct *si)
* return the cluster to allocation list.
*/
ci->flags = CLUSTER_FLAG_NONE;
- memset(si->swap_map + idx * SWAPFILE_CLUSTER,
- 0, SWAPFILE_CLUSTER);
__free_cluster(si, ci);
spin_unlock(&ci->lock);
ret = true;
@@ -699,7 +691,7 @@ static bool cluster_reclaim_range(struct swap_info_struct *si,
offset++;
break;
case SWAP_HAS_CACHE:
- nr_reclaim = __try_to_reclaim_swap(si, offset, TTRS_ANYWAY | TTRS_DIRECT);
+ nr_reclaim = __try_to_reclaim_swap(si, offset, TTRS_ANYWAY);
if (nr_reclaim > 0)
offset += nr_reclaim;
else
@@ -730,6 +722,9 @@ static bool cluster_scan_range(struct swap_info_struct *si,
unsigned long offset, end = start + nr_pages;
unsigned char *map = si->swap_map;
+ if (cluster_is_empty(ci))
+ return true;
+
for (offset = start; offset < end; offset++) {
switch (READ_ONCE(map[offset])) {
case 0:
@@ -821,14 +816,15 @@ static unsigned int alloc_swap_scan_cluster(struct swap_info_struct *si,
out:
relocate_cluster(si, ci);
unlock_cluster(ci);
- if (si->flags & SWP_SOLIDSTATE)
- __this_cpu_write(si->percpu_cluster->next[order], next);
- else
+ if (si->flags & SWP_SOLIDSTATE) {
+ this_cpu_write(percpu_swap_cluster.offset[order], next);
+ this_cpu_write(percpu_swap_cluster.si[order], si);
+ } else {
si->global_cluster->next[order] = next;
+ }
return found;
}
-/* Return true if reclaimed a whole cluster */
static void swap_reclaim_full_clusters(struct swap_info_struct *si, bool force)
{
long to_scan = 1;
@@ -849,7 +845,7 @@ static void swap_reclaim_full_clusters(struct swap_info_struct *si, bool force)
if (READ_ONCE(map[offset]) == SWAP_HAS_CACHE) {
spin_unlock(&ci->lock);
nr_reclaim = __try_to_reclaim_swap(si, offset,
- TTRS_ANYWAY | TTRS_DIRECT);
+ TTRS_ANYWAY);
spin_lock(&ci->lock);
if (nr_reclaim) {
offset += abs(nr_reclaim);
@@ -879,27 +875,29 @@ static void swap_reclaim_work(struct work_struct *work)
}
/*
- * Try to get swap entries with specified order from current cpu's swap entry
- * pool (a cluster). This might involve allocating a new cluster for current CPU
- * too.
+ * Try to allocate swap entries with specified order and try set a new
+ * cluster for current CPU too.
*/
static unsigned long cluster_alloc_swap_entry(struct swap_info_struct *si, int order,
unsigned char usage)
{
struct swap_cluster_info *ci;
- unsigned int offset, found = 0;
+ unsigned int offset = SWAP_ENTRY_INVALID, found = SWAP_ENTRY_INVALID;
- if (si->flags & SWP_SOLIDSTATE) {
- /* Fast path using per CPU cluster */
- local_lock(&si->percpu_cluster->lock);
- offset = __this_cpu_read(si->percpu_cluster->next[order]);
- } else {
+ /*
+ * Swapfile is not block device so unable
+ * to allocate large entries.
+ */
+ if (order && !(si->flags & SWP_BLKDEV))
+ return 0;
+
+ if (!(si->flags & SWP_SOLIDSTATE)) {
/* Serialize HDD SWAP allocation for each device. */
spin_lock(&si->global_cluster_lock);
offset = si->global_cluster->next[order];
- }
+ if (offset == SWAP_ENTRY_INVALID)
+ goto new_cluster;
- if (offset) {
ci = lock_cluster(si, offset);
/* Cluster could have been used by another order */
if (cluster_is_usable(ci, order)) {
@@ -990,9 +988,7 @@ new_cluster:
}
}
done:
- if (si->flags & SWP_SOLIDSTATE)
- local_unlock(&si->percpu_cluster->lock);
- else
+ if (!(si->flags & SWP_SOLIDSTATE))
spin_unlock(&si->global_cluster_lock);
return found;
}
@@ -1106,7 +1102,7 @@ static void swap_usage_sub(struct swap_info_struct *si, unsigned int nr_entries)
/*
* If device is not full, and SWAP_USAGE_OFFLIST_BIT is set,
- * remove it from the plist.
+ * add it to the plist.
*/
if (unlikely(val & SWAP_USAGE_OFFLIST_BIT))
add_to_avail_list(si, false);
@@ -1160,61 +1156,6 @@ static void swap_range_free(struct swap_info_struct *si, unsigned long offset,
swap_usage_sub(si, nr_entries);
}
-static int cluster_alloc_swap(struct swap_info_struct *si,
- unsigned char usage, int nr,
- swp_entry_t slots[], int order)
-{
- int n_ret = 0;
-
- while (n_ret < nr) {
- unsigned long offset = cluster_alloc_swap_entry(si, order, usage);
-
- if (!offset)
- break;
- slots[n_ret++] = swp_entry(si->type, offset);
- }
-
- return n_ret;
-}
-
-static int scan_swap_map_slots(struct swap_info_struct *si,
- unsigned char usage, int nr,
- swp_entry_t slots[], int order)
-{
- unsigned int nr_pages = 1 << order;
-
- /*
- * We try to cluster swap pages by allocating them sequentially
- * in swap. Once we've allocated SWAPFILE_CLUSTER pages this
- * way, however, we resort to first-free allocation, starting
- * a new cluster. This prevents us from scattering swap pages
- * all over the entire swap partition, so that we reduce
- * overall disk seek times between swap pages. -- sct
- * But we do now try to find an empty cluster. -Andrea
- * And we let swap pages go all over an SSD partition. Hugh
- */
- if (order > 0) {
- /*
- * Should not even be attempting large allocations when huge
- * page swap is disabled. Warn and fail the allocation.
- */
- if (!IS_ENABLED(CONFIG_THP_SWAP) ||
- nr_pages > SWAPFILE_CLUSTER) {
- VM_WARN_ON_ONCE(1);
- return 0;
- }
-
- /*
- * Swapfile is not block device so unable
- * to allocate large entries.
- */
- if (!(si->flags & SWP_BLKDEV))
- return 0;
- }
-
- return cluster_alloc_swap(si, usage, nr, slots, order);
-}
-
static bool get_swap_device_info(struct swap_info_struct *si)
{
if (!percpu_ref_tryget_live(&si->users))
@@ -1231,39 +1172,65 @@ static bool get_swap_device_info(struct swap_info_struct *si)
return true;
}
-int get_swap_pages(int n_goal, swp_entry_t swp_entries[], int entry_order)
+/*
+ * Fast path try to get swap entries with specified order from current
+ * CPU's swap entry pool (a cluster).
+ */
+static bool swap_alloc_fast(swp_entry_t *entry,
+ int order)
{
- int order = swap_entry_order(entry_order);
- unsigned long size = 1 << order;
- struct swap_info_struct *si, *next;
- long avail_pgs;
- int n_ret = 0;
- int node;
+ struct swap_cluster_info *ci;
+ struct swap_info_struct *si;
+ unsigned int offset, found = SWAP_ENTRY_INVALID;
- spin_lock(&swap_avail_lock);
+ /*
+ * Once allocated, swap_info_struct will never be completely freed,
+ * so checking it's liveness by get_swap_device_info is enough.
+ */
+ si = this_cpu_read(percpu_swap_cluster.si[order]);
+ offset = this_cpu_read(percpu_swap_cluster.offset[order]);
+ if (!si || !offset || !get_swap_device_info(si))
+ return false;
- avail_pgs = atomic_long_read(&nr_swap_pages) / size;
- if (avail_pgs <= 0) {
- spin_unlock(&swap_avail_lock);
- goto noswap;
+ ci = lock_cluster(si, offset);
+ if (cluster_is_usable(ci, order)) {
+ if (cluster_is_empty(ci))
+ offset = cluster_offset(si, ci);
+ found = alloc_swap_scan_cluster(si, ci, offset, order, SWAP_HAS_CACHE);
+ if (found)
+ *entry = swp_entry(si->type, found);
+ } else {
+ unlock_cluster(ci);
}
- n_goal = min3((long)n_goal, (long)SWAP_BATCH, avail_pgs);
+ put_swap_device(si);
+ return !!found;
+}
- atomic_long_sub(n_goal * size, &nr_swap_pages);
+/* Rotate the device and switch to a new cluster */
+static bool swap_alloc_slow(swp_entry_t *entry,
+ int order)
+{
+ int node;
+ unsigned long offset;
+ struct swap_info_struct *si, *next;
-start_over:
node = numa_node_id();
+ spin_lock(&swap_avail_lock);
+start_over:
plist_for_each_entry_safe(si, next, &swap_avail_heads[node], avail_lists[node]) {
- /* requeue si to after same-priority siblings */
+ /* Rotate the device and switch to a new cluster */
plist_requeue(&si->avail_lists[node], &swap_avail_heads[node]);
spin_unlock(&swap_avail_lock);
if (get_swap_device_info(si)) {
- n_ret = scan_swap_map_slots(si, SWAP_HAS_CACHE,
- n_goal, swp_entries, order);
+ offset = cluster_alloc_swap_entry(si, order, SWAP_HAS_CACHE);
put_swap_device(si);
- if (n_ret || size > 1)
- goto check_out;
+ if (offset) {
+ *entry = swp_entry(si->type, offset);
+ return true;
+ }
+ if (order)
+ return false;
}
spin_lock(&swap_avail_lock);
@@ -1281,15 +1248,68 @@ start_over:
if (plist_node_empty(&next->avail_lists[node]))
goto start_over;
}
-
spin_unlock(&swap_avail_lock);
+ return false;
+}
+
+/**
+ * folio_alloc_swap - allocate swap space for a folio
+ * @folio: folio we want to move to swap
+ * @gfp: gfp mask for shadow nodes
+ *
+ * Allocate swap space for the folio and add the folio to the
+ * swap cache.
+ *
+ * Context: Caller needs to hold the folio lock.
+ * Return: Whether the folio was added to the swap cache.
+ */
+int folio_alloc_swap(struct folio *folio, gfp_t gfp)
+{
+ unsigned int order = folio_order(folio);
+ unsigned int size = 1 << order;
+ swp_entry_t entry = {};
+
+ VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
+ VM_BUG_ON_FOLIO(!folio_test_uptodate(folio), folio);
+
+ /*
+ * Should not even be attempting large allocations when huge
+ * page swap is disabled. Warn and fail the allocation.
+ */
+ if (order && (!IS_ENABLED(CONFIG_THP_SWAP) || size > SWAPFILE_CLUSTER)) {
+ VM_WARN_ON_ONCE(1);
+ return -EINVAL;
+ }
+
+ local_lock(&percpu_swap_cluster.lock);
+ if (!swap_alloc_fast(&entry, order))
+ swap_alloc_slow(&entry, order);
+ local_unlock(&percpu_swap_cluster.lock);
-check_out:
- if (n_ret < n_goal)
- atomic_long_add((long)(n_goal - n_ret) * size,
- &nr_swap_pages);
-noswap:
- return n_ret;
+ /* Need to call this even if allocation failed, for MEMCG_SWAP_FAIL. */
+ if (mem_cgroup_try_charge_swap(folio, entry))
+ goto out_free;
+
+ if (!entry.val)
+ return -ENOMEM;
+
+ /*
+ * XArray node allocations from PF_MEMALLOC contexts could
+ * completely exhaust the page allocator. __GFP_NOMEMALLOC
+ * stops emergency reserves from being allocated.
+ *
+ * TODO: this could cause a theoretical memory reclaim
+ * deadlock in the swap out path.
+ */
+ if (add_to_swap_cache(folio, entry, gfp | __GFP_NOMEMALLOC, NULL))
+ goto out_free;
+
+ atomic_long_sub(size, &nr_swap_pages);
+ return 0;
+
+out_free:
+ put_swap_folio(folio, entry);
+ return -ENOMEM;
}
static struct swap_info_struct *_swap_info_get(swp_entry_t entry)
@@ -1574,7 +1594,7 @@ void put_swap_folio(struct folio *folio, swp_entry_t entry)
return;
ci = lock_cluster(si, offset);
- if (swap_is_has_cache(si, offset, size))
+ if (swap_only_has_cache(si, offset, size))
swap_entry_range_free(si, ci, entry, size);
else {
for (int i = 0; i < size; i++, entry.val++) {
@@ -1585,25 +1605,6 @@ void put_swap_folio(struct folio *folio, swp_entry_t entry)
unlock_cluster(ci);
}
-void swapcache_free_entries(swp_entry_t *entries, int n)
-{
- int i;
- struct swap_cluster_info *ci;
- struct swap_info_struct *si = NULL;
-
- if (n <= 0)
- return;
-
- for (i = 0; i < n; ++i) {
- si = _swap_info_get(entries[i]);
- if (si) {
- ci = lock_cluster(si, swp_offset(entries[i]));
- swap_entry_range_free(si, ci, entries[i], 1);
- unlock_cluster(ci);
- }
- }
-}
-
int __swap_count(swp_entry_t entry)
{
struct swap_info_struct *si = swp_swap_info(entry);
@@ -1617,7 +1618,7 @@ int __swap_count(swp_entry_t entry)
* This does not give an exact answer when swap count is continued,
* but does include the high COUNT_CONTINUED flag to allow for that.
*/
-int swap_swapcount(struct swap_info_struct *si, swp_entry_t entry)
+bool swap_entry_swapped(struct swap_info_struct *si, swp_entry_t entry)
{
pgoff_t offset = swp_offset(entry);
struct swap_cluster_info *ci;
@@ -1626,7 +1627,7 @@ int swap_swapcount(struct swap_info_struct *si, swp_entry_t entry)
ci = lock_cluster(si, offset);
count = swap_count(si->swap_map[offset]);
unlock_cluster(ci);
- return count;
+ return !!count;
}
/*
@@ -1712,7 +1713,7 @@ static bool folio_swapped(struct folio *folio)
return false;
if (!IS_ENABLED(CONFIG_THP_SWAP) || likely(!folio_test_large(folio)))
- return swap_swapcount(si, entry) != 0;
+ return swap_entry_swapped(si, entry);
return swap_page_trans_huge_swapped(si, entry, folio_order(folio));
}
@@ -1786,9 +1787,6 @@ void free_swap_and_cache_nr(swp_entry_t entry, int nr)
bool any_only_cache = false;
unsigned long offset;
- if (non_swap_entry(entry))
- return;
-
si = get_swap_device(entry);
if (!si)
return;
@@ -1847,6 +1845,7 @@ out:
swp_entry_t get_swap_page_of_type(int type)
{
struct swap_info_struct *si = swap_type_to_swap_info(type);
+ unsigned long offset;
swp_entry_t entry = {0};
if (!si)
@@ -1854,8 +1853,13 @@ swp_entry_t get_swap_page_of_type(int type)
/* This is called for allocating swap entry, not cache */
if (get_swap_device_info(si)) {
- if ((si->flags & SWP_WRITEOK) && scan_swap_map_slots(si, 1, 1, &entry, 0))
- atomic_long_dec(&nr_swap_pages);
+ if (si->flags & SWP_WRITEOK) {
+ offset = cluster_alloc_swap_entry(si, 0, 1);
+ if (offset) {
+ entry = swp_entry(si->type, offset);
+ atomic_long_dec(&nr_swap_pages);
+ }
+ }
put_swap_device(si);
}
fail:
@@ -2616,21 +2620,6 @@ static void reinsert_swap_info(struct swap_info_struct *si)
spin_unlock(&swap_lock);
}
-static bool __has_usable_swap(void)
-{
- return !plist_head_empty(&swap_active_head);
-}
-
-bool has_usable_swap(void)
-{
- bool ret;
-
- spin_lock(&swap_lock);
- ret = __has_usable_swap();
- spin_unlock(&swap_lock);
- return ret;
-}
-
/*
* Called after clearing SWP_WRITEOK, ensures cluster_alloc_range
* see the updated flags, so there will be no more allocations.
@@ -2649,6 +2638,28 @@ static void wait_for_allocation(struct swap_info_struct *si)
}
}
+/*
+ * Called after swap device's reference count is dead, so
+ * neither scan nor allocation will use it.
+ */
+static void flush_percpu_swap_cluster(struct swap_info_struct *si)
+{
+ int cpu, i;
+ struct swap_info_struct **pcp_si;
+
+ for_each_possible_cpu(cpu) {
+ pcp_si = per_cpu_ptr(percpu_swap_cluster.si, cpu);
+ /*
+ * Invalidate the percpu swap cluster cache, si->users
+ * is dead, so no new user will point to it, just flush
+ * any existing user.
+ */
+ for (i = 0; i < SWAP_NR_ORDERS; i++)
+ cmpxchg(&pcp_si[i], si, NULL);
+ }
+}
+
+
SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
{
struct swap_info_struct *p = NULL;
@@ -2721,8 +2732,6 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
wait_for_allocation(p);
- disable_swap_slots_cache_lock();
-
set_current_oom_origin();
err = try_to_unuse(p->type);
clear_current_oom_origin();
@@ -2730,12 +2739,9 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
if (err) {
/* re-insert swap space back into swap_list */
reinsert_swap_info(p);
- reenable_swap_slots_cache_unlock();
goto out_dput;
}
- reenable_swap_slots_cache_unlock();
-
/*
* Wait for swap operations protected by get/put_swap_device()
* to complete. Because of synchronize_rcu() here, all swap
@@ -2750,6 +2756,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
flush_work(&p->discard_work);
flush_work(&p->reclaim_work);
+ flush_percpu_swap_cluster(p);
destroy_swap_extents(p);
if (p->flags & SWP_CONTINUED)
@@ -2777,8 +2784,6 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
arch_swap_invalidate_area(p->type);
zswap_swapoff(p->type);
mutex_unlock(&swapon_mutex);
- free_percpu(p->percpu_cluster);
- p->percpu_cluster = NULL;
kfree(p->global_cluster);
p->global_cluster = NULL;
vfree(swap_map);
@@ -3124,13 +3129,6 @@ static unsigned long read_swap_header(struct swap_info_struct *si,
return maxpages;
}
-#define SWAP_CLUSTER_INFO_COLS \
- DIV_ROUND_UP(L1_CACHE_BYTES, sizeof(struct swap_cluster_info))
-#define SWAP_CLUSTER_SPACE_COLS \
- DIV_ROUND_UP(SWAP_ADDRESS_SPACE_PAGES, SWAPFILE_CLUSTER)
-#define SWAP_CLUSTER_COLS \
- max_t(unsigned int, SWAP_CLUSTER_INFO_COLS, SWAP_CLUSTER_SPACE_COLS)
-
static int setup_swap_map_and_extents(struct swap_info_struct *si,
union swap_header *swap_header,
unsigned char *swap_map,
@@ -3170,14 +3168,21 @@ static int setup_swap_map_and_extents(struct swap_info_struct *si,
return nr_extents;
}
+#define SWAP_CLUSTER_INFO_COLS \
+ DIV_ROUND_UP(L1_CACHE_BYTES, sizeof(struct swap_cluster_info))
+#define SWAP_CLUSTER_SPACE_COLS \
+ DIV_ROUND_UP(SWAP_ADDRESS_SPACE_PAGES, SWAPFILE_CLUSTER)
+#define SWAP_CLUSTER_COLS \
+ max_t(unsigned int, SWAP_CLUSTER_INFO_COLS, SWAP_CLUSTER_SPACE_COLS)
+
static struct swap_cluster_info *setup_clusters(struct swap_info_struct *si,
union swap_header *swap_header,
unsigned long maxpages)
{
unsigned long nr_clusters = DIV_ROUND_UP(maxpages, SWAPFILE_CLUSTER);
struct swap_cluster_info *cluster_info;
- unsigned long i, j, k, idx;
- int cpu, err = -ENOMEM;
+ unsigned long i, j, idx;
+ int err = -ENOMEM;
cluster_info = kvcalloc(nr_clusters, sizeof(*cluster_info), GFP_KERNEL);
if (!cluster_info)
@@ -3186,20 +3191,7 @@ static struct swap_cluster_info *setup_clusters(struct swap_info_struct *si,
for (i = 0; i < nr_clusters; i++)
spin_lock_init(&cluster_info[i].lock);
- if (si->flags & SWP_SOLIDSTATE) {
- si->percpu_cluster = alloc_percpu(struct percpu_cluster);
- if (!si->percpu_cluster)
- goto err_free;
-
- for_each_possible_cpu(cpu) {
- struct percpu_cluster *cluster;
-
- cluster = per_cpu_ptr(si->percpu_cluster, cpu);
- for (i = 0; i < SWAP_NR_ORDERS; i++)
- cluster->next[i] = SWAP_ENTRY_INVALID;
- local_lock_init(&cluster->lock);
- }
- } else {
+ if (!(si->flags & SWP_SOLIDSTATE)) {
si->global_cluster = kmalloc(sizeof(*si->global_cluster),
GFP_KERNEL);
if (!si->global_cluster)
@@ -3237,8 +3229,7 @@ static struct swap_cluster_info *setup_clusters(struct swap_info_struct *si,
* Reduce false cache line sharing between cluster_info and
* sharing same address space.
*/
- for (k = 0; k < SWAP_CLUSTER_COLS; k++) {
- j = k % SWAP_CLUSTER_COLS;
+ for (j = 0; j < SWAP_CLUSTER_COLS; j++) {
for (i = 0; i < DIV_ROUND_UP(nr_clusters, SWAP_CLUSTER_COLS); i++) {
struct swap_cluster_info *ci;
idx = i * SWAP_CLUSTER_COLS + j;
@@ -3453,8 +3444,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
mutex_lock(&swapon_mutex);
prio = -1;
if (swap_flags & SWAP_FLAG_PREFER)
- prio =
- (swap_flags & SWAP_FLAG_PRIO_MASK) >> SWAP_FLAG_PRIO_SHIFT;
+ prio = swap_flags & SWAP_FLAG_PRIO_MASK;
enable_swap_info(si, prio, swap_map, cluster_info, zeromap);
pr_info("Adding %uk swap on %s. Priority:%d extents:%d across:%lluk %s%s%s%s\n",
@@ -3478,8 +3468,6 @@ free_swap_address_space:
bad_swap_unlock_inode:
inode_unlock(inode);
bad_swap:
- free_percpu(si->percpu_cluster);
- si->percpu_cluster = NULL;
kfree(si->global_cluster);
si->global_cluster = NULL;
inode = NULL;
@@ -3503,8 +3491,6 @@ out:
putname(name);
if (inode)
inode_unlock(inode);
- if (!error)
- enable_swap_slots_cache();
return error;
}
@@ -3531,7 +3517,6 @@ void si_swapinfo(struct sysinfo *val)
* Returns error code in following case.
* - success -> 0
* - swp_entry is invalid -> EINVAL
- * - swp_entry is migration entry -> EINVAL
* - swap-cache reference is requested but there is already one. -> EEXIST
* - swap-cache reference is requested but the entry is not used. -> ENOENT
* - swap-mapped reference requested but needs continued swap count. -> ENOMEM
@@ -3795,8 +3780,8 @@ outer:
* into, carry if so, or else fail until a new continuation page is allocated;
* when the original swap_map count is decremented from 0 with continuation,
* borrow from the continuation and report whether it still holds more.
- * Called while __swap_duplicate() or swap_entry_free() holds swap or cluster
- * lock.
+ * Called while __swap_duplicate() or caller of __swap_entry_free_locked()
+ * holds cluster lock.
*/
static bool swap_count_continued(struct swap_info_struct *si,
pgoff_t offset, unsigned char count)
@@ -3901,6 +3886,11 @@ static void free_swap_count_continuations(struct swap_info_struct *si)
}
#if defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP)
+static bool __has_usable_swap(void)
+{
+ return !plist_head_empty(&swap_active_head);
+}
+
void __folio_throttle_swaprate(struct folio *folio, gfp_t gfp)
{
struct swap_info_struct *si, *next;
diff --git a/mm/truncate.c b/mm/truncate.c
index 76d8fcd89bd0..5d98054094d1 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -78,8 +78,22 @@ static void truncate_folio_batch_exceptionals(struct address_space *mapping,
if (dax_mapping(mapping)) {
for (i = j; i < nr; i++) {
- if (xa_is_value(fbatch->folios[i]))
+ if (xa_is_value(fbatch->folios[i])) {
+ /*
+ * File systems should already have called
+ * dax_break_layout_entry() to remove all DAX
+ * entries while holding a lock to prevent
+ * establishing new entries. Therefore we
+ * shouldn't find any here.
+ */
+ WARN_ON_ONCE(1);
+
+ /*
+ * Delete the mapping so truncate_pagecache()
+ * doesn't loop forever.
+ */
dax_delete_mapping_entry(mapping, indices[i]);
+ }
}
goto out;
}
@@ -178,6 +192,7 @@ bool truncate_inode_partial_folio(struct folio *folio, loff_t start, loff_t end)
{
loff_t pos = folio_pos(folio);
unsigned int offset, length;
+ struct page *split_at, *split_at2;
if (pos < start)
offset = start - pos;
@@ -207,8 +222,42 @@ bool truncate_inode_partial_folio(struct folio *folio, loff_t start, loff_t end)
folio_invalidate(folio, offset, length);
if (!folio_test_large(folio))
return true;
- if (split_folio(folio) == 0)
+
+ split_at = folio_page(folio, PAGE_ALIGN_DOWN(offset) / PAGE_SIZE);
+ split_at2 = folio_page(folio,
+ PAGE_ALIGN_DOWN(offset + length) / PAGE_SIZE);
+
+ if (!try_folio_split(folio, split_at, NULL)) {
+ /*
+ * try to split at offset + length to make sure folios within
+ * the range can be dropped, especially to avoid memory waste
+ * for shmem truncate
+ */
+ struct folio *folio2 = page_folio(split_at2);
+
+ if (!folio_try_get(folio2))
+ goto no_split;
+
+ if (!folio_test_large(folio2))
+ goto out;
+
+ if (!folio_trylock(folio2))
+ goto out;
+
+ /*
+ * make sure folio2 is large and does not change its mapping.
+ * Its split result does not matter here.
+ */
+ if (folio_test_large(folio2) &&
+ folio2->mapping == folio->mapping)
+ try_folio_split(folio2, split_at2, NULL);
+
+ folio_unlock(folio2);
+out:
+ folio_put(folio2);
+no_split:
return true;
+ }
if (folio_test_dirty(folio))
return false;
truncate_inode_folio(folio->mapping, folio);
diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
index d06453fa8aba..fbf2cf62ab9f 100644
--- a/mm/userfaultfd.c
+++ b/mm/userfaultfd.c
@@ -86,14 +86,10 @@ static struct vm_area_struct *uffd_lock_vma(struct mm_struct *mm,
mmap_read_lock(mm);
vma = find_vma_and_prepare_anon(mm, address);
if (!IS_ERR(vma)) {
- /*
- * We cannot use vma_start_read() as it may fail due to
- * false locked (see comment in vma_start_read()). We
- * can avoid that by directly locking vm_lock under
- * mmap_lock, which guarantees that nobody can lock the
- * vma for write (vma_start_write()) under us.
- */
- down_read(&vma->vm_lock->lock);
+ bool locked = vma_start_read_locked(vma);
+
+ if (!locked)
+ vma = ERR_PTR(-EAGAIN);
}
mmap_read_unlock(mm);
@@ -1563,16 +1559,24 @@ static int uffd_move_lock(struct mm_struct *mm,
mmap_read_lock(mm);
err = find_vmas_mm_locked(mm, dst_start, src_start, dst_vmap, src_vmap);
- if (!err) {
- /*
- * See comment in uffd_lock_vma() as to why not using
- * vma_start_read() here.
- */
- down_read(&(*dst_vmap)->vm_lock->lock);
- if (*dst_vmap != *src_vmap)
- down_read_nested(&(*src_vmap)->vm_lock->lock,
- SINGLE_DEPTH_NESTING);
+ if (err)
+ goto out;
+
+ if (!vma_start_read_locked(*dst_vmap)) {
+ err = -EAGAIN;
+ goto out;
+ }
+
+ /* Nothing further to do if both vmas are locked. */
+ if (*dst_vmap == *src_vmap)
+ goto out;
+
+ if (!vma_start_read_locked_nested(*src_vmap, SINGLE_DEPTH_NESTING)) {
+ /* Undo dst_vmap locking if src_vmap failed to lock */
+ vma_end_read(*dst_vmap);
+ err = -EAGAIN;
}
+out:
mmap_read_unlock(mm);
return err;
}
diff --git a/mm/vma.c b/mm/vma.c
index 71ca012c616c..5cdc5612bfc1 100644
--- a/mm/vma.c
+++ b/mm/vma.c
@@ -52,10 +52,9 @@ struct mmap_state {
.pgoff = (map_)->pgoff, \
.file = (map_)->file, \
.prev = (map_)->prev, \
- .vma = vma_, \
+ .middle = vma_, \
.next = (vma_) ? NULL : (map_)->next, \
.state = VMA_MERGE_START, \
- .merge_flags = VMG_FLAG_DEFAULT, \
}
static inline bool is_mergeable_vma(struct vma_merge_struct *vmg, bool merge_next)
@@ -107,29 +106,44 @@ static inline bool are_anon_vmas_compatible(struct vm_area_struct *vma1,
* init_multi_vma_prep() - Initializer for struct vma_prepare
* @vp: The vma_prepare struct
* @vma: The vma that will be altered once locked
- * @next: The next vma if it is to be adjusted
- * @remove: The first vma to be removed
- * @remove2: The second vma to be removed
+ * @vmg: The merge state that will be used to determine adjustment and VMA
+ * removal.
*/
static void init_multi_vma_prep(struct vma_prepare *vp,
struct vm_area_struct *vma,
- struct vm_area_struct *next,
- struct vm_area_struct *remove,
- struct vm_area_struct *remove2)
+ struct vma_merge_struct *vmg)
{
+ struct vm_area_struct *adjust;
+ struct vm_area_struct **remove = &vp->remove;
+
memset(vp, 0, sizeof(struct vma_prepare));
vp->vma = vma;
vp->anon_vma = vma->anon_vma;
- vp->remove = remove;
- vp->remove2 = remove2;
- vp->adj_next = next;
- if (!vp->anon_vma && next)
- vp->anon_vma = next->anon_vma;
+
+ if (vmg && vmg->__remove_middle) {
+ *remove = vmg->middle;
+ remove = &vp->remove2;
+ }
+ if (vmg && vmg->__remove_next)
+ *remove = vmg->next;
+
+ if (vmg && vmg->__adjust_middle_start)
+ adjust = vmg->middle;
+ else if (vmg && vmg->__adjust_next_start)
+ adjust = vmg->next;
+ else
+ adjust = NULL;
+
+ vp->adj_next = adjust;
+ if (!vp->anon_vma && adjust)
+ vp->anon_vma = adjust->anon_vma;
+
+ VM_WARN_ON(vp->anon_vma && adjust && adjust->anon_vma &&
+ vp->anon_vma != adjust->anon_vma);
vp->file = vma->vm_file;
if (vp->file)
vp->mapping = vma->vm_file->f_mapping;
-
}
/*
@@ -306,7 +320,7 @@ static void vma_complete(struct vma_prepare *vp, struct vma_iterator *vmi,
* us to insert it before dropping the locks
* (it may either follow vma or precede it).
*/
- vma_iter_store(vmi, vp->insert);
+ vma_iter_store_new(vmi, vp->insert);
mm->map_count++;
}
@@ -327,7 +341,7 @@ static void vma_complete(struct vma_prepare *vp, struct vma_iterator *vmi,
if (vp->remove) {
again:
- vma_mark_detached(vp->remove, true);
+ vma_mark_detached(vp->remove);
if (vp->file) {
uprobe_munmap(vp->remove, vp->remove->vm_start,
vp->remove->vm_end);
@@ -362,7 +376,7 @@ again:
*/
static void init_vma_prep(struct vma_prepare *vp, struct vm_area_struct *vma)
{
- init_multi_vma_prep(vp, vma, NULL, NULL, NULL);
+ init_multi_vma_prep(vp, vma, NULL);
}
/*
@@ -406,17 +420,14 @@ static bool can_vma_merge_right(struct vma_merge_struct *vmg,
/*
* Close a vm structure and free it.
*/
-void remove_vma(struct vm_area_struct *vma, bool unreachable)
+void remove_vma(struct vm_area_struct *vma)
{
might_sleep();
vma_close(vma);
if (vma->vm_file)
fput(vma->vm_file);
mpol_put(vma_policy(vma));
- if (unreachable)
- __vm_area_free(vma);
- else
- vm_area_free(vma);
+ vm_area_free(vma);
}
/*
@@ -499,7 +510,7 @@ __split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma,
init_vma_prep(&vp, vma);
vp.insert = new;
vma_prepare(&vp);
- vma_adjust_trans_huge(vma, vma->vm_start, addr, 0);
+ vma_adjust_trans_huge(vma, vma->vm_start, addr, NULL);
if (new_below) {
vma->vm_start = addr;
@@ -629,49 +640,66 @@ void validate_mm(struct mm_struct *mm)
}
#endif /* CONFIG_DEBUG_VM_MAPLE_TREE */
-/* Actually perform the VMA merge operation. */
-static int commit_merge(struct vma_merge_struct *vmg,
- struct vm_area_struct *adjust,
- struct vm_area_struct *remove,
- struct vm_area_struct *remove2,
- long adj_start,
- bool expanded)
+/*
+ * Based on the vmg flag indicating whether we need to adjust the vm_start field
+ * for the middle or next VMA, we calculate what the range of the newly adjusted
+ * VMA ought to be, and set the VMA's range accordingly.
+ */
+static void vmg_adjust_set_range(struct vma_merge_struct *vmg)
{
- struct vma_prepare vp;
+ struct vm_area_struct *adjust;
+ pgoff_t pgoff;
- init_multi_vma_prep(&vp, vmg->vma, adjust, remove, remove2);
+ if (vmg->__adjust_middle_start) {
+ adjust = vmg->middle;
+ pgoff = adjust->vm_pgoff + PHYS_PFN(vmg->end - adjust->vm_start);
+ } else if (vmg->__adjust_next_start) {
+ adjust = vmg->next;
+ pgoff = adjust->vm_pgoff - PHYS_PFN(adjust->vm_start - vmg->end);
+ } else {
+ return;
+ }
- VM_WARN_ON(vp.anon_vma && adjust && adjust->anon_vma &&
- vp.anon_vma != adjust->anon_vma);
+ vma_set_range(adjust, vmg->end, adjust->vm_end, pgoff);
+}
- if (expanded) {
- /* Note: vma iterator must be pointing to 'start'. */
- vma_iter_config(vmg->vmi, vmg->start, vmg->end);
+/*
+ * Actually perform the VMA merge operation.
+ *
+ * Returns 0 on success, or an error value on failure.
+ */
+static int commit_merge(struct vma_merge_struct *vmg)
+{
+ struct vm_area_struct *vma;
+ struct vma_prepare vp;
+
+ if (vmg->__adjust_next_start) {
+ /* We manipulate middle and adjust next, which is the target. */
+ vma = vmg->middle;
+ vma_iter_config(vmg->vmi, vmg->end, vmg->next->vm_end);
} else {
- vma_iter_config(vmg->vmi, adjust->vm_start + adj_start,
- adjust->vm_end);
+ vma = vmg->target;
+ /* Note: vma iterator must be pointing to 'start'. */
+ vma_iter_config(vmg->vmi, vmg->start, vmg->end);
}
- if (vma_iter_prealloc(vmg->vmi, vmg->vma))
+ init_multi_vma_prep(&vp, vma, vmg);
+
+ if (vma_iter_prealloc(vmg->vmi, vma))
return -ENOMEM;
vma_prepare(&vp);
- vma_adjust_trans_huge(vmg->vma, vmg->start, vmg->end, adj_start);
- vma_set_range(vmg->vma, vmg->start, vmg->end, vmg->pgoff);
-
- if (expanded)
- vma_iter_store(vmg->vmi, vmg->vma);
-
- if (adj_start) {
- adjust->vm_start += adj_start;
- adjust->vm_pgoff += PHYS_PFN(adj_start);
- if (adj_start < 0) {
- WARN_ON(expanded);
- vma_iter_store(vmg->vmi, adjust);
- }
- }
+ /*
+ * THP pages may need to do additional splits if we increase
+ * middle->vm_start.
+ */
+ vma_adjust_trans_huge(vma, vmg->start, vmg->end,
+ vmg->__adjust_middle_start ? vmg->middle : NULL);
+ vma_set_range(vma, vmg->start, vmg->end, vmg->pgoff);
+ vmg_adjust_set_range(vmg);
+ vma_iter_store_overwrite(vmg->vmi, vmg->target);
- vma_complete(&vp, vmg->vmi, vmg->vma->vm_mm);
+ vma_complete(&vp, vmg->vmi, vma->vm_mm);
return 0;
}
@@ -694,8 +722,9 @@ static bool can_merge_remove_vma(struct vm_area_struct *vma)
* identical properties.
*
* This function checks for the existence of any such mergeable VMAs and updates
- * the maple tree describing the @vmg->vma->vm_mm address space to account for
- * this, as well as any VMAs shrunk/expanded/deleted as a result of this merge.
+ * the maple tree describing the @vmg->middle->vm_mm address space to account
+ * for this, as well as any VMAs shrunk/expanded/deleted as a result of this
+ * merge.
*
* As part of this operation, if a merge occurs, the @vmg object will have its
* vma, start, end, and pgoff fields modified to execute the merge. Subsequent
@@ -704,45 +733,43 @@ static bool can_merge_remove_vma(struct vm_area_struct *vma)
* Returns: The merged VMA if merge succeeds, or NULL otherwise.
*
* ASSUMPTIONS:
- * - The caller must assign the VMA to be modifed to @vmg->vma.
+ * - The caller must assign the VMA to be modifed to @vmg->middle.
* - The caller must have set @vmg->prev to the previous VMA, if there is one.
* - The caller must not set @vmg->next, as we determine this.
* - The caller must hold a WRITE lock on the mm_struct->mmap_lock.
- * - vmi must be positioned within [@vmg->vma->vm_start, @vmg->vma->vm_end).
+ * - vmi must be positioned within [@vmg->middle->vm_start, @vmg->middle->vm_end).
*/
static __must_check struct vm_area_struct *vma_merge_existing_range(
struct vma_merge_struct *vmg)
{
- struct vm_area_struct *vma = vmg->vma;
+ struct vm_area_struct *middle = vmg->middle;
struct vm_area_struct *prev = vmg->prev;
- struct vm_area_struct *next, *res;
+ struct vm_area_struct *next;
struct vm_area_struct *anon_dup = NULL;
- struct vm_area_struct *adjust = NULL;
unsigned long start = vmg->start;
unsigned long end = vmg->end;
- bool left_side = vma && start == vma->vm_start;
- bool right_side = vma && end == vma->vm_end;
+ bool left_side = middle && start == middle->vm_start;
+ bool right_side = middle && end == middle->vm_end;
int err = 0;
- long adj_start = 0;
- bool merge_will_delete_vma, merge_will_delete_next;
bool merge_left, merge_right, merge_both;
- bool expanded;
mmap_assert_write_locked(vmg->mm);
- VM_WARN_ON_VMG(!vma, vmg); /* We are modifying a VMA, so caller must specify. */
+ VM_WARN_ON_VMG(!middle, vmg); /* We are modifying a VMA, so caller must specify. */
VM_WARN_ON_VMG(vmg->next, vmg); /* We set this. */
VM_WARN_ON_VMG(prev && start <= prev->vm_start, vmg);
VM_WARN_ON_VMG(start >= end, vmg);
/*
- * If vma == prev, then we are offset into a VMA. Otherwise, if we are
+ * If middle == prev, then we are offset into a VMA. Otherwise, if we are
* not, we must span a portion of the VMA.
*/
- VM_WARN_ON_VMG(vma && ((vma != prev && vmg->start != vma->vm_start) ||
- vmg->end > vma->vm_end), vmg);
- /* The vmi must be positioned within vmg->vma. */
- VM_WARN_ON_VMG(vma && !(vma_iter_addr(vmg->vmi) >= vma->vm_start &&
- vma_iter_addr(vmg->vmi) < vma->vm_end), vmg);
+ VM_WARN_ON_VMG(middle &&
+ ((middle != prev && vmg->start != middle->vm_start) ||
+ vmg->end > middle->vm_end), vmg);
+ /* The vmi must be positioned within vmg->middle. */
+ VM_WARN_ON_VMG(middle &&
+ !(vma_iter_addr(vmg->vmi) >= middle->vm_start &&
+ vma_iter_addr(vmg->vmi) < middle->vm_end), vmg);
vmg->state = VMA_MERGE_NOMERGE;
@@ -776,49 +803,52 @@ static __must_check struct vm_area_struct *vma_merge_existing_range(
merge_both = merge_left && merge_right;
/* If we span the entire VMA, a merge implies it will be deleted. */
- merge_will_delete_vma = left_side && right_side;
+ vmg->__remove_middle = left_side && right_side;
/*
- * If we need to remove vma in its entirety but are unable to do so,
+ * If we need to remove middle in its entirety but are unable to do so,
* we have no sensible recourse but to abort the merge.
*/
- if (merge_will_delete_vma && !can_merge_remove_vma(vma))
+ if (vmg->__remove_middle && !can_merge_remove_vma(middle))
return NULL;
/*
* If we merge both VMAs, then next is also deleted. This implies
* merge_will_delete_vma also.
*/
- merge_will_delete_next = merge_both;
+ vmg->__remove_next = merge_both;
/*
* If we cannot delete next, then we can reduce the operation to merging
- * prev and vma (thereby deleting vma).
+ * prev and middle (thereby deleting middle).
*/
- if (merge_will_delete_next && !can_merge_remove_vma(next)) {
- merge_will_delete_next = false;
+ if (vmg->__remove_next && !can_merge_remove_vma(next)) {
+ vmg->__remove_next = false;
merge_right = false;
merge_both = false;
}
- /* No matter what happens, we will be adjusting vma. */
- vma_start_write(vma);
-
- if (merge_left)
- vma_start_write(prev);
+ /* No matter what happens, we will be adjusting middle. */
+ vma_start_write(middle);
- if (merge_right)
+ if (merge_right) {
vma_start_write(next);
+ vmg->target = next;
+ }
+
+ if (merge_left) {
+ vma_start_write(prev);
+ vmg->target = prev;
+ }
if (merge_both) {
/*
- * |<----->|
- * |-------*********-------|
- * prev vma next
- * extend delete delete
+ * |<-------------------->|
+ * |-------********-------|
+ * prev middle next
+ * extend delete delete
*/
- vmg->vma = prev;
vmg->start = prev->vm_start;
vmg->end = next->vm_end;
vmg->pgoff = prev->vm_pgoff;
@@ -826,80 +856,62 @@ static __must_check struct vm_area_struct *vma_merge_existing_range(
/*
* We already ensured anon_vma compatibility above, so now it's
* simply a case of, if prev has no anon_vma object, which of
- * next or vma contains the anon_vma we must duplicate.
+ * next or middle contains the anon_vma we must duplicate.
*/
- err = dup_anon_vma(prev, next->anon_vma ? next : vma, &anon_dup);
+ err = dup_anon_vma(prev, next->anon_vma ? next : middle,
+ &anon_dup);
} else if (merge_left) {
/*
- * |<----->| OR
- * |<--------->|
+ * |<------------>| OR
+ * |<----------------->|
* |-------*************
- * prev vma
+ * prev middle
* extend shrink/delete
*/
- vmg->vma = prev;
vmg->start = prev->vm_start;
vmg->pgoff = prev->vm_pgoff;
- if (!merge_will_delete_vma) {
- adjust = vma;
- adj_start = vmg->end - vma->vm_start;
- }
+ if (!vmg->__remove_middle)
+ vmg->__adjust_middle_start = true;
- err = dup_anon_vma(prev, vma, &anon_dup);
+ err = dup_anon_vma(prev, middle, &anon_dup);
} else { /* merge_right */
/*
- * |<----->| OR
- * |<--------->|
+ * |<------------->| OR
+ * |<----------------->|
* *************-------|
- * vma next
+ * middle next
* shrink/delete extend
*/
pgoff_t pglen = PHYS_PFN(vmg->end - vmg->start);
VM_WARN_ON_VMG(!merge_right, vmg);
- /* If we are offset into a VMA, then prev must be vma. */
- VM_WARN_ON_VMG(vmg->start > vma->vm_start && prev && vma != prev, vmg);
+ /* If we are offset into a VMA, then prev must be middle. */
+ VM_WARN_ON_VMG(vmg->start > middle->vm_start && prev && middle != prev, vmg);
- if (merge_will_delete_vma) {
- vmg->vma = next;
+ if (vmg->__remove_middle) {
vmg->end = next->vm_end;
vmg->pgoff = next->vm_pgoff - pglen;
} else {
- /*
- * We shrink vma and expand next.
- *
- * IMPORTANT: This is the ONLY case where the final
- * merged VMA is NOT vmg->vma, but rather vmg->next.
- */
-
- vmg->start = vma->vm_start;
+ /* We shrink middle and expand next. */
+ vmg->__adjust_next_start = true;
+ vmg->start = middle->vm_start;
vmg->end = start;
- vmg->pgoff = vma->vm_pgoff;
-
- adjust = next;
- adj_start = -(vma->vm_end - start);
+ vmg->pgoff = middle->vm_pgoff;
}
- err = dup_anon_vma(next, vma, &anon_dup);
+ err = dup_anon_vma(next, middle, &anon_dup);
}
if (err)
goto abort;
- /*
- * In nearly all cases, we expand vmg->vma. There is one exception -
- * merge_right where we partially span the VMA. In this case we shrink
- * the end of vmg->vma and adjust the start of vmg->next accordingly.
- */
- expanded = !merge_right || merge_will_delete_vma;
+ err = commit_merge(vmg);
+ if (err) {
+ VM_WARN_ON(err != -ENOMEM);
- if (commit_merge(vmg, adjust,
- merge_will_delete_vma ? vma : NULL,
- merge_will_delete_next ? next : NULL,
- adj_start, expanded)) {
if (anon_dup)
unlink_anon_vmas(anon_dup);
@@ -907,11 +919,9 @@ static __must_check struct vm_area_struct *vma_merge_existing_range(
return NULL;
}
- res = merge_left ? prev : next;
- khugepaged_enter_vma(res, vmg->flags);
-
+ khugepaged_enter_vma(vmg->target, vmg->flags);
vmg->state = VMA_MERGE_SUCCESS;
- return res;
+ return vmg->target;
abort:
vma_iter_set(vmg->vmi, start);
@@ -970,10 +980,9 @@ struct vm_area_struct *vma_merge_new_range(struct vma_merge_struct *vmg)
struct vm_area_struct *next = vmg->next;
unsigned long end = vmg->end;
bool can_merge_left, can_merge_right;
- bool just_expand = vmg->merge_flags & VMG_FLAG_JUST_EXPAND;
mmap_assert_write_locked(vmg->mm);
- VM_WARN_ON_VMG(vmg->vma, vmg);
+ VM_WARN_ON_VMG(vmg->middle, vmg);
/* vmi must point at or before the gap. */
VM_WARN_ON_VMG(vma_iter_addr(vmg->vmi) > end, vmg);
@@ -984,18 +993,18 @@ struct vm_area_struct *vma_merge_new_range(struct vma_merge_struct *vmg)
return NULL;
can_merge_left = can_vma_merge_left(vmg);
- can_merge_right = !just_expand && can_vma_merge_right(vmg, can_merge_left);
+ can_merge_right = !vmg->just_expand && can_vma_merge_right(vmg, can_merge_left);
/* If we can merge with the next VMA, adjust vmg accordingly. */
if (can_merge_right) {
vmg->end = next->vm_end;
- vmg->vma = next;
+ vmg->middle = next;
}
/* If we can merge with the previous VMA, adjust vmg accordingly. */
if (can_merge_left) {
vmg->start = prev->vm_start;
- vmg->vma = prev;
+ vmg->middle = prev;
vmg->pgoff = prev->vm_pgoff;
/*
@@ -1007,7 +1016,7 @@ struct vm_area_struct *vma_merge_new_range(struct vma_merge_struct *vmg)
vmg->end = end;
/* In expand-only case we are already positioned at prev. */
- if (!just_expand) {
+ if (!vmg->just_expand) {
/* Equivalent to going to the previous range. */
vma_prev(vmg->vmi);
}
@@ -1017,10 +1026,10 @@ struct vm_area_struct *vma_merge_new_range(struct vma_merge_struct *vmg)
* Now try to expand adjacent VMA(s). This takes care of removing the
* following VMA if we have VMAs on both sides.
*/
- if (vmg->vma && !vma_expand(vmg)) {
- khugepaged_enter_vma(vmg->vma, vmg->flags);
+ if (vmg->middle && !vma_expand(vmg)) {
+ khugepaged_enter_vma(vmg->middle, vmg->flags);
vmg->state = VMA_MERGE_SUCCESS;
- return vmg->vma;
+ return vmg->middle;
}
return NULL;
@@ -1032,45 +1041,50 @@ struct vm_area_struct *vma_merge_new_range(struct vma_merge_struct *vmg)
* @vmg: Describes a VMA expansion operation.
*
* Expand @vma to vmg->start and vmg->end. Can expand off the start and end.
- * Will expand over vmg->next if it's different from vmg->vma and vmg->end ==
- * vmg->next->vm_end. Checking if the vmg->vma can expand and merge with
+ * Will expand over vmg->next if it's different from vmg->middle and vmg->end ==
+ * vmg->next->vm_end. Checking if the vmg->middle can expand and merge with
* vmg->next needs to be handled by the caller.
*
* Returns: 0 on success.
*
* ASSUMPTIONS:
- * - The caller must hold a WRITE lock on vmg->vma->mm->mmap_lock.
- * - The caller must have set @vmg->vma and @vmg->next.
+ * - The caller must hold a WRITE lock on vmg->middle->mm->mmap_lock.
+ * - The caller must have set @vmg->middle and @vmg->next.
*/
int vma_expand(struct vma_merge_struct *vmg)
{
struct vm_area_struct *anon_dup = NULL;
bool remove_next = false;
- struct vm_area_struct *vma = vmg->vma;
+ struct vm_area_struct *middle = vmg->middle;
struct vm_area_struct *next = vmg->next;
mmap_assert_write_locked(vmg->mm);
- vma_start_write(vma);
- if (next && (vma != next) && (vmg->end == next->vm_end)) {
+ vma_start_write(middle);
+ if (next && (middle != next) && (vmg->end == next->vm_end)) {
int ret;
remove_next = true;
/* This should already have been checked by this point. */
VM_WARN_ON_VMG(!can_merge_remove_vma(next), vmg);
vma_start_write(next);
- ret = dup_anon_vma(vma, next, &anon_dup);
+ ret = dup_anon_vma(middle, next, &anon_dup);
if (ret)
return ret;
}
/* Not merging but overwriting any part of next is not handled. */
VM_WARN_ON_VMG(next && !remove_next &&
- next != vma && vmg->end > next->vm_start, vmg);
+ next != middle && vmg->end > next->vm_start, vmg);
/* Only handles expanding */
- VM_WARN_ON_VMG(vma->vm_start < vmg->start || vma->vm_end > vmg->end, vmg);
+ VM_WARN_ON_VMG(middle->vm_start < vmg->start ||
+ middle->vm_end > vmg->end, vmg);
+
+ vmg->target = middle;
+ if (remove_next)
+ vmg->__remove_next = true;
- if (commit_merge(vmg, NULL, remove_next ? next : NULL, NULL, 0, true))
+ if (commit_merge(vmg))
goto nomem;
return 0;
@@ -1110,7 +1124,7 @@ int vma_shrink(struct vma_iterator *vmi, struct vm_area_struct *vma,
init_vma_prep(&vp, vma);
vma_prepare(&vp);
- vma_adjust_trans_huge(vma, start, end, 0);
+ vma_adjust_trans_huge(vma, start, end, NULL);
vma_iter_clear(vmi);
vma_set_range(vma, start, end, pgoff);
@@ -1199,7 +1213,7 @@ static void vms_complete_munmap_vmas(struct vma_munmap_struct *vms,
/* Remove and clean up vmas */
mas_set(mas_detach, 0);
mas_for_each(mas_detach, vma, ULONG_MAX)
- remove_vma(vma, /* unreachable = */ false);
+ remove_vma(vma);
vm_unacct_memory(vms->nr_accounted);
validate_mm(mm);
@@ -1221,7 +1235,7 @@ static void reattach_vmas(struct ma_state *mas_detach)
mas_set(mas_detach, 0);
mas_for_each(mas_detach, vma, ULONG_MAX)
- vma_mark_detached(vma, false);
+ vma_mark_attached(vma);
__mt_destroy(mas_detach->tree);
}
@@ -1296,7 +1310,7 @@ static int vms_gather_munmap_vmas(struct vma_munmap_struct *vms,
if (error)
goto munmap_gather_failed;
- vma_mark_detached(next, true);
+ vma_mark_detached(next);
nrpages = vma_pages(next);
vms->nr_pages += nrpages;
@@ -1508,7 +1522,7 @@ int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm,
*/
static struct vm_area_struct *vma_modify(struct vma_merge_struct *vmg)
{
- struct vm_area_struct *vma = vmg->vma;
+ struct vm_area_struct *vma = vmg->middle;
unsigned long start = vmg->start;
unsigned long end = vmg->end;
struct vm_area_struct *merged;
@@ -1609,7 +1623,7 @@ struct vm_area_struct *vma_merge_extend(struct vma_iterator *vmi,
VMG_VMA_STATE(vmg, vmi, vma, vma, vma->vm_end, vma->vm_end + delta);
vmg.next = vma_iter_next_rewind(vmi, NULL);
- vmg.vma = NULL; /* We use the VMA to populate VMG fields only. */
+ vmg.middle = NULL; /* We use the VMA to populate VMG fields only. */
return vma_merge_new_range(&vmg);
}
@@ -1694,7 +1708,7 @@ int vma_link(struct mm_struct *mm, struct vm_area_struct *vma)
return -ENOMEM;
vma_start_write(vma);
- vma_iter_store(&vmi, vma);
+ vma_iter_store_new(&vmi, vma);
vma_link_file(vma);
mm->map_count++;
validate_mm(mm);
@@ -1730,7 +1744,7 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
if (new_vma && new_vma->vm_start < addr + len)
return NULL; /* should never get here */
- vmg.vma = NULL; /* New VMA range. */
+ vmg.middle = NULL; /* New VMA range. */
vmg.pgoff = pgoff;
vmg.next = vma_iter_next_rewind(&vmi, NULL);
new_vma = vma_merge_new_range(&vmg);
@@ -2373,7 +2387,7 @@ static int __mmap_new_vma(struct mmap_state *map, struct vm_area_struct **vmap)
/* Lock the VMA since it is modified after insertion into VMA tree */
vma_start_write(vma);
- vma_iter_store(vmi, vma);
+ vma_iter_store_new(vmi, vma);
map->mm->map_count++;
vma_link_file(vma);
@@ -2587,7 +2601,7 @@ int do_brk_flags(struct vma_iterator *vmi, struct vm_area_struct *vma,
vmg.prev = vma;
/* vmi is positioned at prev, which this mode expects. */
- vmg.merge_flags = VMG_FLAG_JUST_EXPAND;
+ vmg.just_expand = true;
if (vma_merge_new_range(&vmg))
goto out;
@@ -2850,7 +2864,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
anon_vma_interval_tree_pre_update_vma(vma);
vma->vm_end = address;
/* Overwrite old entry in mtree. */
- vma_iter_store(&vmi, vma);
+ vma_iter_store_overwrite(&vmi, vma);
anon_vma_interval_tree_post_update_vma(vma);
perf_event_mmap(vma);
@@ -2930,7 +2944,7 @@ int expand_downwards(struct vm_area_struct *vma, unsigned long address)
vma->vm_start = address;
vma->vm_pgoff -= grow;
/* Overwrite old entry in mtree. */
- vma_iter_store(&vmi, vma);
+ vma_iter_store_overwrite(&vmi, vma);
anon_vma_interval_tree_post_update_vma(vma);
perf_event_mmap(vma);
diff --git a/mm/vma.h b/mm/vma.h
index a2e8710b8c47..7356ca5a22d3 100644
--- a/mm/vma.h
+++ b/mm/vma.h
@@ -58,35 +58,85 @@ enum vma_merge_state {
VMA_MERGE_SUCCESS,
};
-enum vma_merge_flags {
- VMG_FLAG_DEFAULT = 0,
- /*
- * If we can expand, simply do so. We know there is nothing to merge to
- * the right. Does not reset state upon failure to merge. The VMA
- * iterator is assumed to be positioned at the previous VMA, rather than
- * at the gap.
- */
- VMG_FLAG_JUST_EXPAND = 1 << 0,
-};
-
-/* Represents a VMA merge operation. */
+/*
+ * Describes a VMA merge operation and is threaded throughout it.
+ *
+ * Any of the fields may be mutated by the merge operation, so no guarantees are
+ * made to the contents of this structure after a merge operation has completed.
+ */
struct vma_merge_struct {
struct mm_struct *mm;
struct vma_iterator *vmi;
- pgoff_t pgoff;
+ /*
+ * Adjacent VMAs, any of which may be NULL if not present:
+ *
+ * |------|--------|------|
+ * | prev | middle | next |
+ * |------|--------|------|
+ *
+ * middle may not yet exist in the case of a proposed new VMA being
+ * merged, or it may be an existing VMA.
+ *
+ * next may be assigned by the caller.
+ */
struct vm_area_struct *prev;
- struct vm_area_struct *next; /* Modified by vma_merge(). */
- struct vm_area_struct *vma; /* Either a new VMA or the one being modified. */
+ struct vm_area_struct *middle;
+ struct vm_area_struct *next;
+ /* This is the VMA we ultimately target to become the merged VMA. */
+ struct vm_area_struct *target;
+ /*
+ * Initially, the start, end, pgoff fields are provided by the caller
+ * and describe the proposed new VMA range, whether modifying an
+ * existing VMA (which will be 'middle'), or adding a new one.
+ *
+ * During the merge process these fields are updated to describe the new
+ * range _including those VMAs which will be merged_.
+ */
unsigned long start;
unsigned long end;
+ pgoff_t pgoff;
+
unsigned long flags;
struct file *file;
struct anon_vma *anon_vma;
struct mempolicy *policy;
struct vm_userfaultfd_ctx uffd_ctx;
struct anon_vma_name *anon_name;
- enum vma_merge_flags merge_flags;
enum vma_merge_state state;
+
+ /* Flags which callers can use to modify merge behaviour: */
+
+ /*
+ * If we can expand, simply do so. We know there is nothing to merge to
+ * the right. Does not reset state upon failure to merge. The VMA
+ * iterator is assumed to be positioned at the previous VMA, rather than
+ * at the gap.
+ */
+ bool just_expand :1;
+
+ /* Internal flags set during merge process: */
+
+ /*
+ * Internal flag indicating the merge increases vmg->middle->vm_start
+ * (and thereby, vmg->prev->vm_end).
+ */
+ bool __adjust_middle_start :1;
+ /*
+ * Internal flag indicating the merge decreases vmg->next->vm_start
+ * (and thereby, vmg->middle->vm_end).
+ */
+ bool __adjust_next_start :1;
+ /*
+ * Internal flag used during the merge operation to indicate we will
+ * remove vmg->middle.
+ */
+ bool __remove_middle :1;
+ /*
+ * Internal flag used during the merge operationr to indicate we will
+ * remove vmg->next.
+ */
+ bool __remove_next :1;
+
};
static inline bool vmg_nomem(struct vma_merge_struct *vmg)
@@ -110,7 +160,6 @@ static inline pgoff_t vma_pgoff_offset(struct vm_area_struct *vma,
.flags = flags_, \
.pgoff = pgoff_, \
.state = VMA_MERGE_START, \
- .merge_flags = VMG_FLAG_DEFAULT, \
}
#define VMG_VMA_STATE(name, vmi_, prev_, vma_, start_, end_) \
@@ -118,8 +167,8 @@ static inline pgoff_t vma_pgoff_offset(struct vm_area_struct *vma,
.mm = vma_->vm_mm, \
.vmi = vmi_, \
.prev = prev_, \
+ .middle = vma_, \
.next = NULL, \
- .vma = vma_, \
.start = start_, \
.end = end_, \
.flags = vma_->vm_flags, \
@@ -130,7 +179,6 @@ static inline pgoff_t vma_pgoff_offset(struct vm_area_struct *vma,
.uffd_ctx = vma_->vm_userfaultfd_ctx, \
.anon_name = anon_vma_name(vma_), \
.state = VMA_MERGE_START, \
- .merge_flags = VMG_FLAG_DEFAULT, \
}
#ifdef CONFIG_DEBUG_VM_MAPLE_TREE
@@ -157,6 +205,7 @@ static inline int vma_iter_store_gfp(struct vma_iterator *vmi,
if (unlikely(mas_is_err(&vmi->mas)))
return -ENOMEM;
+ vma_mark_attached(vma);
return 0;
}
@@ -169,7 +218,7 @@ int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm,
unsigned long start, size_t len, struct list_head *uf,
bool unlock);
-void remove_vma(struct vm_area_struct *vma, bool unreachable);
+void remove_vma(struct vm_area_struct *vma);
void unmap_region(struct ma_state *mas, struct vm_area_struct *vma,
struct vm_area_struct *prev, struct vm_area_struct *next);
@@ -364,9 +413,10 @@ static inline struct vm_area_struct *vma_iter_load(struct vma_iterator *vmi)
}
/* Store a VMA with preallocated memory */
-static inline void vma_iter_store(struct vma_iterator *vmi,
- struct vm_area_struct *vma)
+static inline void vma_iter_store_overwrite(struct vma_iterator *vmi,
+ struct vm_area_struct *vma)
{
+ vma_assert_attached(vma);
#if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
if (MAS_WARN_ON(&vmi->mas, vmi->mas.status != ma_start &&
@@ -391,6 +441,13 @@ static inline void vma_iter_store(struct vma_iterator *vmi,
mas_store_prealloc(&vmi->mas, vma);
}
+static inline void vma_iter_store_new(struct vma_iterator *vmi,
+ struct vm_area_struct *vma)
+{
+ vma_mark_attached(vma);
+ vma_iter_store_overwrite(vmi, vma);
+}
+
static inline unsigned long vma_iter_addr(struct vma_iterator *vmi)
{
return vmi->mas.index;
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 61981ee1c9d2..3ed720a787ec 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -3771,8 +3771,7 @@ void *__vmalloc_node_range_noprof(unsigned long size, unsigned long align,
struct vm_struct *area;
void *ret;
kasan_vmalloc_flags_t kasan_flags = KASAN_VMALLOC_NONE;
- unsigned long real_size = size;
- unsigned long real_align = align;
+ unsigned long original_align = align;
unsigned int shift = PAGE_SHIFT;
if (WARN_ON_ONCE(!size))
@@ -3781,7 +3780,7 @@ void *__vmalloc_node_range_noprof(unsigned long size, unsigned long align,
if ((size >> PAGE_SHIFT) > totalram_pages()) {
warn_alloc(gfp_mask, NULL,
"vmalloc error: size %lu, exceeds total pages",
- real_size);
+ size);
return NULL;
}
@@ -3798,19 +3797,18 @@ void *__vmalloc_node_range_noprof(unsigned long size, unsigned long align,
else
shift = arch_vmap_pte_supported_shift(size);
- align = max(real_align, 1UL << shift);
- size = ALIGN(real_size, 1UL << shift);
+ align = max(original_align, 1UL << shift);
}
again:
- area = __get_vm_area_node(real_size, align, shift, VM_ALLOC |
+ area = __get_vm_area_node(size, align, shift, VM_ALLOC |
VM_UNINITIALIZED | vm_flags, start, end, node,
gfp_mask, caller);
if (!area) {
bool nofail = gfp_mask & __GFP_NOFAIL;
warn_alloc(gfp_mask, NULL,
"vmalloc error: size %lu, vm_struct allocation failed%s",
- real_size, (nofail) ? ". Retrying." : "");
+ size, (nofail) ? ". Retrying." : "");
if (nofail) {
schedule_timeout_uninterruptible(1);
goto again;
@@ -3860,7 +3858,7 @@ again:
(gfp_mask & __GFP_SKIP_ZERO))
kasan_flags |= KASAN_VMALLOC_INIT;
/* KASAN_VMALLOC_PROT_NORMAL already set if required. */
- area->addr = kasan_unpoison_vmalloc(area->addr, real_size, kasan_flags);
+ area->addr = kasan_unpoison_vmalloc(area->addr, size, kasan_flags);
/*
* In this function, newly allocated vm_struct has VM_UNINITIALIZED
@@ -3869,17 +3867,15 @@ again:
*/
clear_vm_uninitialized_flag(area);
- size = PAGE_ALIGN(size);
if (!(vm_flags & VM_DEFER_KMEMLEAK))
- kmemleak_vmalloc(area, size, gfp_mask);
+ kmemleak_vmalloc(area, PAGE_ALIGN(size), gfp_mask);
return area->addr;
fail:
if (shift > PAGE_SHIFT) {
shift = PAGE_SHIFT;
- align = real_align;
- size = real_size;
+ align = original_align;
goto again;
}
diff --git a/mm/vmscan.c b/mm/vmscan.c
index eb228a8cd769..b620d74b0f66 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -271,6 +271,25 @@ static int sc_swappiness(struct scan_control *sc, struct mem_cgroup *memcg)
}
#endif
+/* for_each_managed_zone_pgdat - helper macro to iterate over all managed zones in a pgdat up to
+ * and including the specified highidx
+ * @zone: The current zone in the iterator
+ * @pgdat: The pgdat which node_zones are being iterated
+ * @idx: The index variable
+ * @highidx: The index of the highest zone to return
+ *
+ * This macro iterates through all managed zones up to and including the specified highidx.
+ * The zone iterator enters an invalid state after macro call and must be reinitialized
+ * before it can be used again.
+ */
+#define for_each_managed_zone_pgdat(zone, pgdat, idx, highidx) \
+ for ((idx) = 0, (zone) = (pgdat)->node_zones; \
+ (idx) <= (highidx); \
+ (idx)++, (zone)++) \
+ if (!managed_zone(zone)) \
+ continue; \
+ else
+
static void set_task_reclaim_state(struct task_struct *task,
struct reclaim_state *rs)
{
@@ -396,13 +415,9 @@ static unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru,
{
unsigned long size = 0;
int zid;
+ struct zone *zone;
- for (zid = 0; zid <= zone_idx; zid++) {
- struct zone *zone = &lruvec_pgdat(lruvec)->node_zones[zid];
-
- if (!managed_zone(zone))
- continue;
-
+ for_each_managed_zone_pgdat(zone, lruvec_pgdat(lruvec), zid, zone_idx) {
if (!mem_cgroup_disabled())
size += mem_cgroup_get_zone_lru_size(lruvec, lru, zid);
else
@@ -441,21 +456,26 @@ void drop_slab(void)
} while ((freed >> shift++) > 1);
}
-static int reclaimer_offset(void)
+#define CHECK_RECLAIMER_OFFSET(type) \
+ do { \
+ BUILD_BUG_ON(PGSTEAL_##type - PGSTEAL_KSWAPD != \
+ PGDEMOTE_##type - PGDEMOTE_KSWAPD); \
+ BUILD_BUG_ON(PGSTEAL_##type - PGSTEAL_KSWAPD != \
+ PGSCAN_##type - PGSCAN_KSWAPD); \
+ } while (0)
+
+static int reclaimer_offset(struct scan_control *sc)
{
- BUILD_BUG_ON(PGSTEAL_DIRECT - PGSTEAL_KSWAPD !=
- PGDEMOTE_DIRECT - PGDEMOTE_KSWAPD);
- BUILD_BUG_ON(PGSTEAL_KHUGEPAGED - PGSTEAL_KSWAPD !=
- PGDEMOTE_KHUGEPAGED - PGDEMOTE_KSWAPD);
- BUILD_BUG_ON(PGSTEAL_DIRECT - PGSTEAL_KSWAPD !=
- PGSCAN_DIRECT - PGSCAN_KSWAPD);
- BUILD_BUG_ON(PGSTEAL_KHUGEPAGED - PGSTEAL_KSWAPD !=
- PGSCAN_KHUGEPAGED - PGSCAN_KSWAPD);
+ CHECK_RECLAIMER_OFFSET(DIRECT);
+ CHECK_RECLAIMER_OFFSET(KHUGEPAGED);
+ CHECK_RECLAIMER_OFFSET(PROACTIVE);
if (current_is_kswapd())
return 0;
if (current_is_khugepaged())
return PGSTEAL_KHUGEPAGED - PGSTEAL_KSWAPD;
+ if (sc->proactive)
+ return PGSTEAL_PROACTIVE - PGSTEAL_KSWAPD;
return PGSTEAL_DIRECT - PGSTEAL_KSWAPD;
}
@@ -495,7 +515,7 @@ static bool skip_throttle_noprogress(pg_data_t *pgdat)
{
int reclaimable = 0, write_pending = 0;
int i;
-
+ struct zone *zone;
/*
* If kswapd is disabled, reschedule if necessary but do not
* throttle as the system is likely near OOM.
@@ -508,12 +528,7 @@ static bool skip_throttle_noprogress(pg_data_t *pgdat)
* throttle as throttling will occur when the folios cycle
* towards the end of the LRU if still under writeback.
*/
- for (i = 0; i < MAX_NR_ZONES; i++) {
- struct zone *zone = pgdat->node_zones + i;
-
- if (!managed_zone(zone))
- continue;
-
+ for_each_managed_zone_pgdat(zone, pgdat, i, MAX_NR_ZONES - 1) {
reclaimable += zone_reclaimable_pages(zone);
write_pending += zone_page_state_snapshot(zone,
NR_ZONE_WRITE_PENDING);
@@ -769,7 +784,7 @@ static int __remove_mapping(struct address_space *mapping, struct folio *folio,
if (reclaimed && !mapping_exiting(mapping))
shadow = workingset_eviction(folio, target_memcg);
__delete_from_swap_cache(folio, swap, shadow);
- mem_cgroup_swapout(folio, swap);
+ memcg1_swapout(folio, swap);
xa_unlock_irq(&mapping->i_pages);
put_swap_folio(folio, swap);
} else {
@@ -1112,6 +1127,13 @@ retry:
if (!folio_trylock(folio))
goto keep;
+ if (folio_contain_hwpoisoned_page(folio)) {
+ unmap_poisoned_folio(folio, folio_pfn(folio), false);
+ folio_unlock(folio);
+ folio_put(folio);
+ continue;
+ }
+
VM_BUG_ON_FOLIO(folio_test_active(folio), folio);
nr_pages = folio_nr_pages(folio);
@@ -1279,7 +1301,7 @@ retry:
split_folio_to_list(folio, folio_list))
goto activate_locked;
}
- if (!add_to_swap(folio)) {
+ if (folio_alloc_swap(folio, __GFP_HIGH | __GFP_NOWARN)) {
int __maybe_unused order = folio_order(folio);
if (!folio_test_large(folio))
@@ -1295,9 +1317,21 @@ retry:
}
#endif
count_mthp_stat(order, MTHP_STAT_SWPOUT_FALLBACK);
- if (!add_to_swap(folio))
+ if (folio_alloc_swap(folio, __GFP_HIGH | __GFP_NOWARN))
goto activate_locked_split;
}
+ /*
+ * Normally the folio will be dirtied in unmap because its
+ * pte should be dirty. A special case is MADV_FREE page. The
+ * page's pte could have dirty bit cleared but the folio's
+ * SwapBacked flag is still set because clearing the dirty bit
+ * and SwapBacked flag has no lock protected. For such folio,
+ * unmap will not set dirty bit for it, so folio reclaim will
+ * not write the folio out. This can cause data corruption when
+ * the folio is swapped in later. Always setting the dirty flag
+ * for the folio solves the problem.
+ */
+ folio_mark_dirty(folio);
}
}
@@ -1986,7 +2020,7 @@ static unsigned long shrink_inactive_list(unsigned long nr_to_scan,
&nr_scanned, sc, lru);
__mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, nr_taken);
- item = PGSCAN_KSWAPD + reclaimer_offset();
+ item = PGSCAN_KSWAPD + reclaimer_offset(sc);
if (!cgroup_reclaim(sc))
__count_vm_events(item, nr_scanned);
__count_memcg_events(lruvec_memcg(lruvec), item, nr_scanned);
@@ -2002,10 +2036,10 @@ static unsigned long shrink_inactive_list(unsigned long nr_to_scan,
spin_lock_irq(&lruvec->lru_lock);
move_folios_to_lru(lruvec, &folio_list);
- __mod_lruvec_state(lruvec, PGDEMOTE_KSWAPD + reclaimer_offset(),
+ __mod_lruvec_state(lruvec, PGDEMOTE_KSWAPD + reclaimer_offset(sc),
stat.nr_demoted);
__mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -nr_taken);
- item = PGSTEAL_KSWAPD + reclaimer_offset();
+ item = PGSTEAL_KSWAPD + reclaimer_offset(sc);
if (!cgroup_reclaim(sc))
__count_vm_events(item, nr_reclaimed);
__count_memcg_events(lruvec_memcg(lruvec), item, nr_reclaimed);
@@ -2372,17 +2406,13 @@ static void prepare_scan_control(pg_data_t *pgdat, struct scan_control *sc)
unsigned long total_high_wmark = 0;
unsigned long free, anon;
int z;
+ struct zone *zone;
free = sum_zone_node_page_state(pgdat->node_id, NR_FREE_PAGES);
file = node_page_state(pgdat, NR_ACTIVE_FILE) +
node_page_state(pgdat, NR_INACTIVE_FILE);
- for (z = 0; z < MAX_NR_ZONES; z++) {
- struct zone *zone = &pgdat->node_zones[z];
-
- if (!managed_zone(zone))
- continue;
-
+ for_each_managed_zone_pgdat(zone, pgdat, z, MAX_NR_ZONES - 1) {
total_high_wmark += high_wmark_pages(zone);
}
@@ -2400,6 +2430,43 @@ static void prepare_scan_control(pg_data_t *pgdat, struct scan_control *sc)
}
}
+static inline void calculate_pressure_balance(struct scan_control *sc,
+ int swappiness, u64 *fraction, u64 *denominator)
+{
+ unsigned long anon_cost, file_cost, total_cost;
+ unsigned long ap, fp;
+
+ /*
+ * Calculate the pressure balance between anon and file pages.
+ *
+ * The amount of pressure we put on each LRU is inversely
+ * proportional to the cost of reclaiming each list, as
+ * determined by the share of pages that are refaulting, times
+ * the relative IO cost of bringing back a swapped out
+ * anonymous page vs reloading a filesystem page (swappiness).
+ *
+ * Although we limit that influence to ensure no list gets
+ * left behind completely: at least a third of the pressure is
+ * applied, before swappiness.
+ *
+ * With swappiness at 100, anon and file have equal IO cost.
+ */
+ total_cost = sc->anon_cost + sc->file_cost;
+ anon_cost = total_cost + sc->anon_cost;
+ file_cost = total_cost + sc->file_cost;
+ total_cost = anon_cost + file_cost;
+
+ ap = swappiness * (total_cost + 1);
+ ap /= anon_cost + 1;
+
+ fp = (MAX_SWAPPINESS - swappiness) * (total_cost + 1);
+ fp /= file_cost + 1;
+
+ fraction[WORKINGSET_ANON] = ap;
+ fraction[WORKINGSET_FILE] = fp;
+ *denominator = ap + fp;
+}
+
/*
* Determine how aggressively the anon and file LRU lists should be
* scanned.
@@ -2412,12 +2479,10 @@ static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc,
{
struct pglist_data *pgdat = lruvec_pgdat(lruvec);
struct mem_cgroup *memcg = lruvec_memcg(lruvec);
- unsigned long anon_cost, file_cost, total_cost;
int swappiness = sc_swappiness(sc, memcg);
u64 fraction[ANON_AND_FILE];
u64 denominator = 0; /* gcc */
enum scan_balance scan_balance;
- unsigned long ap, fp;
enum lru_list lru;
/* If we have no swap space, do not bother scanning anon folios. */
@@ -2466,35 +2531,8 @@ static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc,
}
scan_balance = SCAN_FRACT;
- /*
- * Calculate the pressure balance between anon and file pages.
- *
- * The amount of pressure we put on each LRU is inversely
- * proportional to the cost of reclaiming each list, as
- * determined by the share of pages that are refaulting, times
- * the relative IO cost of bringing back a swapped out
- * anonymous page vs reloading a filesystem page (swappiness).
- *
- * Although we limit that influence to ensure no list gets
- * left behind completely: at least a third of the pressure is
- * applied, before swappiness.
- *
- * With swappiness at 100, anon and file have equal IO cost.
- */
- total_cost = sc->anon_cost + sc->file_cost;
- anon_cost = total_cost + sc->anon_cost;
- file_cost = total_cost + sc->file_cost;
- total_cost = anon_cost + file_cost;
+ calculate_pressure_balance(sc, swappiness, fraction, &denominator);
- ap = swappiness * (total_cost + 1);
- ap /= anon_cost + 1;
-
- fp = (MAX_SWAPPINESS - swappiness) * (total_cost + 1);
- fp /= file_cost + 1;
-
- fraction[0] = ap;
- fraction[1] = fp;
- denominator = ap + fp;
out:
for_each_evictable_lru(lru) {
bool file = is_file_lru(lru);
@@ -4545,7 +4583,7 @@ static int scan_folios(struct lruvec *lruvec, struct scan_control *sc,
break;
}
- item = PGSCAN_KSWAPD + reclaimer_offset();
+ item = PGSCAN_KSWAPD + reclaimer_offset(sc);
if (!cgroup_reclaim(sc)) {
__count_vm_events(item, isolated);
__count_vm_events(PGREFILL, sorted);
@@ -4695,10 +4733,10 @@ retry:
reset_batch_size(walk);
}
- __mod_lruvec_state(lruvec, PGDEMOTE_KSWAPD + reclaimer_offset(),
+ __mod_lruvec_state(lruvec, PGDEMOTE_KSWAPD + reclaimer_offset(sc),
stat.nr_demoted);
- item = PGSTEAL_KSWAPD + reclaimer_offset();
+ item = PGSTEAL_KSWAPD + reclaimer_offset(sc);
if (!cgroup_reclaim(sc))
__count_vm_events(item, reclaimed);
__count_memcg_events(memcg, item, reclaimed);
@@ -5843,6 +5881,7 @@ static inline bool should_continue_reclaim(struct pglist_data *pgdat,
unsigned long pages_for_compaction;
unsigned long inactive_lru_pages;
int z;
+ struct zone *zone;
/* If not in reclaim/compaction mode, stop */
if (!in_reclaim_compaction(sc))
@@ -5862,17 +5901,16 @@ static inline bool should_continue_reclaim(struct pglist_data *pgdat,
return false;
/* If compaction would go ahead or the allocation would succeed, stop */
- for (z = 0; z <= sc->reclaim_idx; z++) {
- struct zone *zone = &pgdat->node_zones[z];
- if (!managed_zone(zone))
- continue;
+ for_each_managed_zone_pgdat(zone, pgdat, z, sc->reclaim_idx) {
+ unsigned long watermark = min_wmark_pages(zone);
/* Allocation can already succeed, nothing to do */
- if (zone_watermark_ok(zone, sc->order, min_wmark_pages(zone),
+ if (zone_watermark_ok(zone, sc->order, watermark,
sc->reclaim_idx, 0))
return false;
- if (compaction_suitable(zone, sc->order, sc->reclaim_idx))
+ if (compaction_suitable(zone, sc->order, watermark,
+ sc->reclaim_idx))
return false;
}
@@ -6099,22 +6137,21 @@ static inline bool compaction_ready(struct zone *zone, struct scan_control *sc)
sc->reclaim_idx, 0))
return true;
- /* Compaction cannot yet proceed. Do reclaim. */
- if (!compaction_suitable(zone, sc->order, sc->reclaim_idx))
- return false;
-
/*
- * Compaction is already possible, but it takes time to run and there
- * are potentially other callers using the pages just freed. So proceed
- * with reclaim to make a buffer of free pages available to give
- * compaction a reasonable chance of completing and allocating the page.
+ * Direct reclaim usually targets the min watermark, but compaction
+ * takes time to run and there are potentially other callers using the
+ * pages just freed. So target a higher buffer to give compaction a
+ * reasonable chance of completing and allocating the pages.
+ *
* Note that we won't actually reclaim the whole buffer in one attempt
* as the target watermark in should_continue_reclaim() is lower. But if
* we are already above the high+gap watermark, don't reclaim at all.
*/
- watermark = high_wmark_pages(zone) + compact_gap(sc->order);
+ watermark = high_wmark_pages(zone);
+ if (compaction_suitable(zone, sc->order, watermark, sc->reclaim_idx))
+ return true;
- return zone_watermark_ok_safe(zone, 0, watermark, sc->reclaim_idx);
+ return false;
}
static void consider_reclaim_throttle(pg_data_t *pgdat, struct scan_control *sc)
@@ -6393,11 +6430,7 @@ static bool allow_direct_reclaim(pg_data_t *pgdat)
if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES)
return true;
- for (i = 0; i <= ZONE_NORMAL; i++) {
- zone = &pgdat->node_zones[i];
- if (!managed_zone(zone))
- continue;
-
+ for_each_managed_zone_pgdat(zone, pgdat, i, ZONE_NORMAL) {
if (!zone_reclaimable_pages(zone))
continue;
@@ -6702,17 +6735,25 @@ static bool pgdat_balanced(pg_data_t *pgdat, int order, int highest_zoneidx)
* Check watermarks bottom-up as lower zones are more likely to
* meet watermarks.
*/
- for (i = 0; i <= highest_zoneidx; i++) {
- zone = pgdat->node_zones + i;
-
- if (!managed_zone(zone))
- continue;
+ for_each_managed_zone_pgdat(zone, pgdat, i, highest_zoneidx) {
+ unsigned long free_pages;
if (sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING)
mark = promo_wmark_pages(zone);
else
mark = high_wmark_pages(zone);
- if (zone_watermark_ok_safe(zone, order, mark, highest_zoneidx))
+
+ /*
+ * In defrag_mode, watermarks must be met in whole
+ * blocks to avoid polluting allocator fallbacks.
+ */
+ if (defrag_mode)
+ free_pages = zone_page_state(zone, NR_FREE_PAGES_BLOCKS);
+ else
+ free_pages = zone_page_state(zone, NR_FREE_PAGES);
+
+ if (__zone_watermark_ok(zone, order, mark, highest_zoneidx,
+ 0, free_pages))
return true;
}
@@ -6792,11 +6833,7 @@ static bool kswapd_shrink_node(pg_data_t *pgdat,
/* Reclaim a number of pages proportional to the number of zones */
sc->nr_to_reclaim = 0;
- for (z = 0; z <= sc->reclaim_idx; z++) {
- zone = pgdat->node_zones + z;
- if (!managed_zone(zone))
- continue;
-
+ for_each_managed_zone_pgdat(zone, pgdat, z, sc->reclaim_idx) {
sc->nr_to_reclaim += max(high_wmark_pages(zone), SWAP_CLUSTER_MAX);
}
@@ -6827,12 +6864,7 @@ update_reclaim_active(pg_data_t *pgdat, int highest_zoneidx, bool active)
int i;
struct zone *zone;
- for (i = 0; i <= highest_zoneidx; i++) {
- zone = pgdat->node_zones + i;
-
- if (!managed_zone(zone))
- continue;
-
+ for_each_managed_zone_pgdat(zone, pgdat, i, highest_zoneidx) {
if (active)
set_bit(ZONE_RECLAIM_ACTIVE, &zone->flags);
else
@@ -6893,11 +6925,7 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int highest_zoneidx)
* stall or direct reclaim until kswapd is finished.
*/
nr_boost_reclaim = 0;
- for (i = 0; i <= highest_zoneidx; i++) {
- zone = pgdat->node_zones + i;
- if (!managed_zone(zone))
- continue;
-
+ for_each_managed_zone_pgdat(zone, pgdat, i, highest_zoneidx) {
nr_boost_reclaim += zone->watermark_boost;
zone_boosts[i] = zone->watermark_boost;
}
@@ -7599,11 +7627,11 @@ int node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned int order)
if (node_state(pgdat->node_id, N_CPU) && pgdat->node_id != numa_node_id())
return NODE_RECLAIM_NOSCAN;
- if (test_and_set_bit(PGDAT_RECLAIM_LOCKED, &pgdat->flags))
+ if (test_and_set_bit_lock(PGDAT_RECLAIM_LOCKED, &pgdat->flags))
return NODE_RECLAIM_NOSCAN;
ret = __node_reclaim(pgdat, gfp_mask, order);
- clear_bit(PGDAT_RECLAIM_LOCKED, &pgdat->flags);
+ clear_bit_unlock(PGDAT_RECLAIM_LOCKED, &pgdat->flags);
if (ret)
count_vm_event(PGSCAN_ZONE_RECLAIM_SUCCESS);
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 651318765ebf..4c268ce39ff2 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -1193,6 +1193,7 @@ int fragmentation_index(struct zone *zone, unsigned int order)
const char * const vmstat_text[] = {
/* enum zone_stat_item counters */
"nr_free_pages",
+ "nr_free_pages_blocks",
"nr_zone_inactive_anon",
"nr_zone_active_anon",
"nr_zone_inactive_file",
@@ -1276,9 +1277,11 @@ const char * const vmstat_text[] = {
"pgdemote_kswapd",
"pgdemote_direct",
"pgdemote_khugepaged",
+ "pgdemote_proactive",
#ifdef CONFIG_HUGETLB_PAGE
"nr_hugetlb",
#endif
+ "nr_balloon_pages",
/* system-wide enum vm_stat_item counters */
"nr_dirty_threshold",
"nr_dirty_background_threshold",
@@ -1310,9 +1313,11 @@ const char * const vmstat_text[] = {
"pgsteal_kswapd",
"pgsteal_direct",
"pgsteal_khugepaged",
+ "pgsteal_proactive",
"pgscan_kswapd",
"pgscan_direct",
"pgscan_khugepaged",
+ "pgscan_proactive",
"pgscan_direct_throttle",
"pgscan_anon",
"pgscan_file",
diff --git a/mm/z3fold.c b/mm/z3fold.c
deleted file mode 100644
index 379d24b4fef9..000000000000
--- a/mm/z3fold.c
+++ /dev/null
@@ -1,1447 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * z3fold.c
- *
- * Author: Vitaly Wool <vitaly.wool@konsulko.com>
- * Copyright (C) 2016, Sony Mobile Communications Inc.
- *
- * This implementation is based on zbud written by Seth Jennings.
- *
- * z3fold is an special purpose allocator for storing compressed pages. It
- * can store up to three compressed pages per page which improves the
- * compression ratio of zbud while retaining its main concepts (e. g. always
- * storing an integral number of objects per page) and simplicity.
- * It still has simple and deterministic reclaim properties that make it
- * preferable to a higher density approach (with no requirement on integral
- * number of object per page) when reclaim is used.
- *
- * As in zbud, pages are divided into "chunks". The size of the chunks is
- * fixed at compile time and is determined by NCHUNKS_ORDER below.
- *
- * z3fold doesn't export any API and is meant to be used via zpool API.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/atomic.h>
-#include <linux/sched.h>
-#include <linux/cpumask.h>
-#include <linux/list.h>
-#include <linux/mm.h>
-#include <linux/module.h>
-#include <linux/page-flags.h>
-#include <linux/migrate.h>
-#include <linux/node.h>
-#include <linux/compaction.h>
-#include <linux/percpu.h>
-#include <linux/preempt.h>
-#include <linux/workqueue.h>
-#include <linux/slab.h>
-#include <linux/spinlock.h>
-#include <linux/zpool.h>
-#include <linux/kmemleak.h>
-
-/*
- * NCHUNKS_ORDER determines the internal allocation granularity, effectively
- * adjusting internal fragmentation. It also determines the number of
- * freelists maintained in each pool. NCHUNKS_ORDER of 6 means that the
- * allocation granularity will be in chunks of size PAGE_SIZE/64. Some chunks
- * in the beginning of an allocated page are occupied by z3fold header, so
- * NCHUNKS will be calculated to 63 (or 62 in case CONFIG_DEBUG_SPINLOCK=y),
- * which shows the max number of free chunks in z3fold page, also there will
- * be 63, or 62, respectively, freelists per pool.
- */
-#define NCHUNKS_ORDER 6
-
-#define CHUNK_SHIFT (PAGE_SHIFT - NCHUNKS_ORDER)
-#define CHUNK_SIZE (1 << CHUNK_SHIFT)
-#define ZHDR_SIZE_ALIGNED round_up(sizeof(struct z3fold_header), CHUNK_SIZE)
-#define ZHDR_CHUNKS (ZHDR_SIZE_ALIGNED >> CHUNK_SHIFT)
-#define TOTAL_CHUNKS (PAGE_SIZE >> CHUNK_SHIFT)
-#define NCHUNKS (TOTAL_CHUNKS - ZHDR_CHUNKS)
-
-#define BUDDY_MASK (0x3)
-#define BUDDY_SHIFT 2
-#define SLOTS_ALIGN (0x40)
-
-/*****************
- * Structures
-*****************/
-struct z3fold_pool;
-
-enum buddy {
- HEADLESS = 0,
- FIRST,
- MIDDLE,
- LAST,
- BUDDIES_MAX = LAST
-};
-
-struct z3fold_buddy_slots {
- /*
- * we are using BUDDY_MASK in handle_to_buddy etc. so there should
- * be enough slots to hold all possible variants
- */
- unsigned long slot[BUDDY_MASK + 1];
- unsigned long pool; /* back link */
- rwlock_t lock;
-};
-#define HANDLE_FLAG_MASK (0x03)
-
-/*
- * struct z3fold_header - z3fold page metadata occupying first chunks of each
- * z3fold page, except for HEADLESS pages
- * @buddy: links the z3fold page into the relevant list in the
- * pool
- * @page_lock: per-page lock
- * @refcount: reference count for the z3fold page
- * @work: work_struct for page layout optimization
- * @slots: pointer to the structure holding buddy slots
- * @pool: pointer to the containing pool
- * @cpu: CPU which this page "belongs" to
- * @first_chunks: the size of the first buddy in chunks, 0 if free
- * @middle_chunks: the size of the middle buddy in chunks, 0 if free
- * @last_chunks: the size of the last buddy in chunks, 0 if free
- * @first_num: the starting number (for the first handle)
- * @mapped_count: the number of objects currently mapped
- */
-struct z3fold_header {
- struct list_head buddy;
- spinlock_t page_lock;
- struct kref refcount;
- struct work_struct work;
- struct z3fold_buddy_slots *slots;
- struct z3fold_pool *pool;
- short cpu;
- unsigned short first_chunks;
- unsigned short middle_chunks;
- unsigned short last_chunks;
- unsigned short start_middle;
- unsigned short first_num:2;
- unsigned short mapped_count:2;
- unsigned short foreign_handles:2;
-};
-
-/**
- * struct z3fold_pool - stores metadata for each z3fold pool
- * @name: pool name
- * @lock: protects pool unbuddied lists
- * @stale_lock: protects pool stale page list
- * @unbuddied: per-cpu array of lists tracking z3fold pages that contain 2-
- * buddies; the list each z3fold page is added to depends on
- * the size of its free region.
- * @stale: list of pages marked for freeing
- * @pages_nr: number of z3fold pages in the pool.
- * @c_handle: cache for z3fold_buddy_slots allocation
- * @compact_wq: workqueue for page layout background optimization
- * @release_wq: workqueue for safe page release
- * @work: work_struct for safe page release
- *
- * This structure is allocated at pool creation time and maintains metadata
- * pertaining to a particular z3fold pool.
- */
-struct z3fold_pool {
- const char *name;
- spinlock_t lock;
- spinlock_t stale_lock;
- struct list_head __percpu *unbuddied;
- struct list_head stale;
- atomic64_t pages_nr;
- struct kmem_cache *c_handle;
- struct workqueue_struct *compact_wq;
- struct workqueue_struct *release_wq;
- struct work_struct work;
-};
-
-/*
- * Internal z3fold page flags
- */
-enum z3fold_page_flags {
- PAGE_HEADLESS = 0,
- MIDDLE_CHUNK_MAPPED,
- NEEDS_COMPACTING,
- PAGE_STALE,
- PAGE_CLAIMED, /* by either reclaim or free */
- PAGE_MIGRATED, /* page is migrated and soon to be released */
-};
-
-/*
- * handle flags, go under HANDLE_FLAG_MASK
- */
-enum z3fold_handle_flags {
- HANDLES_NOFREE = 0,
-};
-
-/*
- * Forward declarations
- */
-static struct z3fold_header *__z3fold_alloc(struct z3fold_pool *, size_t, bool);
-static void compact_page_work(struct work_struct *w);
-
-/*****************
- * Helpers
-*****************/
-
-/* Converts an allocation size in bytes to size in z3fold chunks */
-static int size_to_chunks(size_t size)
-{
- return (size + CHUNK_SIZE - 1) >> CHUNK_SHIFT;
-}
-
-#define for_each_unbuddied_list(_iter, _begin) \
- for ((_iter) = (_begin); (_iter) < NCHUNKS; (_iter)++)
-
-static inline struct z3fold_buddy_slots *alloc_slots(struct z3fold_pool *pool,
- gfp_t gfp)
-{
- struct z3fold_buddy_slots *slots = kmem_cache_zalloc(pool->c_handle,
- gfp);
-
- if (slots) {
- /* It will be freed separately in free_handle(). */
- kmemleak_not_leak(slots);
- slots->pool = (unsigned long)pool;
- rwlock_init(&slots->lock);
- }
-
- return slots;
-}
-
-static inline struct z3fold_pool *slots_to_pool(struct z3fold_buddy_slots *s)
-{
- return (struct z3fold_pool *)(s->pool & ~HANDLE_FLAG_MASK);
-}
-
-static inline struct z3fold_buddy_slots *handle_to_slots(unsigned long handle)
-{
- return (struct z3fold_buddy_slots *)(handle & ~(SLOTS_ALIGN - 1));
-}
-
-/* Lock a z3fold page */
-static inline void z3fold_page_lock(struct z3fold_header *zhdr)
-{
- spin_lock(&zhdr->page_lock);
-}
-
-/* Try to lock a z3fold page */
-static inline int z3fold_page_trylock(struct z3fold_header *zhdr)
-{
- return spin_trylock(&zhdr->page_lock);
-}
-
-/* Unlock a z3fold page */
-static inline void z3fold_page_unlock(struct z3fold_header *zhdr)
-{
- spin_unlock(&zhdr->page_lock);
-}
-
-/* return locked z3fold page if it's not headless */
-static inline struct z3fold_header *get_z3fold_header(unsigned long handle)
-{
- struct z3fold_buddy_slots *slots;
- struct z3fold_header *zhdr;
- int locked = 0;
-
- if (!(handle & (1 << PAGE_HEADLESS))) {
- slots = handle_to_slots(handle);
- do {
- unsigned long addr;
-
- read_lock(&slots->lock);
- addr = *(unsigned long *)handle;
- zhdr = (struct z3fold_header *)(addr & PAGE_MASK);
- locked = z3fold_page_trylock(zhdr);
- read_unlock(&slots->lock);
- if (locked) {
- struct page *page = virt_to_page(zhdr);
-
- if (!test_bit(PAGE_MIGRATED, &page->private))
- break;
- z3fold_page_unlock(zhdr);
- }
- cpu_relax();
- } while (true);
- } else {
- zhdr = (struct z3fold_header *)(handle & PAGE_MASK);
- }
-
- return zhdr;
-}
-
-static inline void put_z3fold_header(struct z3fold_header *zhdr)
-{
- struct page *page = virt_to_page(zhdr);
-
- if (!test_bit(PAGE_HEADLESS, &page->private))
- z3fold_page_unlock(zhdr);
-}
-
-static inline void free_handle(unsigned long handle, struct z3fold_header *zhdr)
-{
- struct z3fold_buddy_slots *slots;
- int i;
- bool is_free;
-
- if (WARN_ON(*(unsigned long *)handle == 0))
- return;
-
- slots = handle_to_slots(handle);
- write_lock(&slots->lock);
- *(unsigned long *)handle = 0;
-
- if (test_bit(HANDLES_NOFREE, &slots->pool)) {
- write_unlock(&slots->lock);
- return; /* simple case, nothing else to do */
- }
-
- if (zhdr->slots != slots)
- zhdr->foreign_handles--;
-
- is_free = true;
- for (i = 0; i <= BUDDY_MASK; i++) {
- if (slots->slot[i]) {
- is_free = false;
- break;
- }
- }
- write_unlock(&slots->lock);
-
- if (is_free) {
- struct z3fold_pool *pool = slots_to_pool(slots);
-
- if (zhdr->slots == slots)
- zhdr->slots = NULL;
- kmem_cache_free(pool->c_handle, slots);
- }
-}
-
-/* Initializes the z3fold header of a newly allocated z3fold page */
-static struct z3fold_header *init_z3fold_page(struct page *page, bool headless,
- struct z3fold_pool *pool, gfp_t gfp)
-{
- struct z3fold_header *zhdr = page_address(page);
- struct z3fold_buddy_slots *slots;
-
- clear_bit(PAGE_HEADLESS, &page->private);
- clear_bit(MIDDLE_CHUNK_MAPPED, &page->private);
- clear_bit(NEEDS_COMPACTING, &page->private);
- clear_bit(PAGE_STALE, &page->private);
- clear_bit(PAGE_CLAIMED, &page->private);
- clear_bit(PAGE_MIGRATED, &page->private);
- if (headless)
- return zhdr;
-
- slots = alloc_slots(pool, gfp);
- if (!slots)
- return NULL;
-
- memset(zhdr, 0, sizeof(*zhdr));
- spin_lock_init(&zhdr->page_lock);
- kref_init(&zhdr->refcount);
- zhdr->cpu = -1;
- zhdr->slots = slots;
- zhdr->pool = pool;
- INIT_LIST_HEAD(&zhdr->buddy);
- INIT_WORK(&zhdr->work, compact_page_work);
- return zhdr;
-}
-
-/* Resets the struct page fields and frees the page */
-static void free_z3fold_page(struct page *page, bool headless)
-{
- if (!headless) {
- lock_page(page);
- __ClearPageMovable(page);
- unlock_page(page);
- }
- __free_page(page);
-}
-
-/* Helper function to build the index */
-static inline int __idx(struct z3fold_header *zhdr, enum buddy bud)
-{
- return (bud + zhdr->first_num) & BUDDY_MASK;
-}
-
-/*
- * Encodes the handle of a particular buddy within a z3fold page.
- * Zhdr->page_lock should be held as this function accesses first_num
- * if bud != HEADLESS.
- */
-static unsigned long __encode_handle(struct z3fold_header *zhdr,
- struct z3fold_buddy_slots *slots,
- enum buddy bud)
-{
- unsigned long h = (unsigned long)zhdr;
- int idx = 0;
-
- /*
- * For a headless page, its handle is its pointer with the extra
- * PAGE_HEADLESS bit set
- */
- if (bud == HEADLESS)
- return h | (1 << PAGE_HEADLESS);
-
- /* otherwise, return pointer to encoded handle */
- idx = __idx(zhdr, bud);
- h += idx;
- if (bud == LAST)
- h |= (zhdr->last_chunks << BUDDY_SHIFT);
-
- write_lock(&slots->lock);
- slots->slot[idx] = h;
- write_unlock(&slots->lock);
- return (unsigned long)&slots->slot[idx];
-}
-
-static unsigned long encode_handle(struct z3fold_header *zhdr, enum buddy bud)
-{
- return __encode_handle(zhdr, zhdr->slots, bud);
-}
-
-/* only for LAST bud, returns zero otherwise */
-static unsigned short handle_to_chunks(unsigned long handle)
-{
- struct z3fold_buddy_slots *slots = handle_to_slots(handle);
- unsigned long addr;
-
- read_lock(&slots->lock);
- addr = *(unsigned long *)handle;
- read_unlock(&slots->lock);
- return (addr & ~PAGE_MASK) >> BUDDY_SHIFT;
-}
-
-/*
- * (handle & BUDDY_MASK) < zhdr->first_num is possible in encode_handle
- * but that doesn't matter. because the masking will result in the
- * correct buddy number.
- */
-static enum buddy handle_to_buddy(unsigned long handle)
-{
- struct z3fold_header *zhdr;
- struct z3fold_buddy_slots *slots = handle_to_slots(handle);
- unsigned long addr;
-
- read_lock(&slots->lock);
- WARN_ON(handle & (1 << PAGE_HEADLESS));
- addr = *(unsigned long *)handle;
- read_unlock(&slots->lock);
- zhdr = (struct z3fold_header *)(addr & PAGE_MASK);
- return (addr - zhdr->first_num) & BUDDY_MASK;
-}
-
-static inline struct z3fold_pool *zhdr_to_pool(struct z3fold_header *zhdr)
-{
- return zhdr->pool;
-}
-
-static void __release_z3fold_page(struct z3fold_header *zhdr, bool locked)
-{
- struct page *page = virt_to_page(zhdr);
- struct z3fold_pool *pool = zhdr_to_pool(zhdr);
-
- WARN_ON(!list_empty(&zhdr->buddy));
- set_bit(PAGE_STALE, &page->private);
- clear_bit(NEEDS_COMPACTING, &page->private);
- spin_lock(&pool->lock);
- spin_unlock(&pool->lock);
-
- if (locked)
- z3fold_page_unlock(zhdr);
-
- spin_lock(&pool->stale_lock);
- list_add(&zhdr->buddy, &pool->stale);
- queue_work(pool->release_wq, &pool->work);
- spin_unlock(&pool->stale_lock);
-
- atomic64_dec(&pool->pages_nr);
-}
-
-static void release_z3fold_page_locked(struct kref *ref)
-{
- struct z3fold_header *zhdr = container_of(ref, struct z3fold_header,
- refcount);
- WARN_ON(z3fold_page_trylock(zhdr));
- __release_z3fold_page(zhdr, true);
-}
-
-static void release_z3fold_page_locked_list(struct kref *ref)
-{
- struct z3fold_header *zhdr = container_of(ref, struct z3fold_header,
- refcount);
- struct z3fold_pool *pool = zhdr_to_pool(zhdr);
-
- spin_lock(&pool->lock);
- list_del_init(&zhdr->buddy);
- spin_unlock(&pool->lock);
-
- WARN_ON(z3fold_page_trylock(zhdr));
- __release_z3fold_page(zhdr, true);
-}
-
-static inline int put_z3fold_locked(struct z3fold_header *zhdr)
-{
- return kref_put(&zhdr->refcount, release_z3fold_page_locked);
-}
-
-static inline int put_z3fold_locked_list(struct z3fold_header *zhdr)
-{
- return kref_put(&zhdr->refcount, release_z3fold_page_locked_list);
-}
-
-static void free_pages_work(struct work_struct *w)
-{
- struct z3fold_pool *pool = container_of(w, struct z3fold_pool, work);
-
- spin_lock(&pool->stale_lock);
- while (!list_empty(&pool->stale)) {
- struct z3fold_header *zhdr = list_first_entry(&pool->stale,
- struct z3fold_header, buddy);
- struct page *page = virt_to_page(zhdr);
-
- list_del(&zhdr->buddy);
- if (WARN_ON(!test_bit(PAGE_STALE, &page->private)))
- continue;
- spin_unlock(&pool->stale_lock);
- cancel_work_sync(&zhdr->work);
- free_z3fold_page(page, false);
- cond_resched();
- spin_lock(&pool->stale_lock);
- }
- spin_unlock(&pool->stale_lock);
-}
-
-/*
- * Returns the number of free chunks in a z3fold page.
- * NB: can't be used with HEADLESS pages.
- */
-static int num_free_chunks(struct z3fold_header *zhdr)
-{
- int nfree;
- /*
- * If there is a middle object, pick up the bigger free space
- * either before or after it. Otherwise just subtract the number
- * of chunks occupied by the first and the last objects.
- */
- if (zhdr->middle_chunks != 0) {
- int nfree_before = zhdr->first_chunks ?
- 0 : zhdr->start_middle - ZHDR_CHUNKS;
- int nfree_after = zhdr->last_chunks ?
- 0 : TOTAL_CHUNKS -
- (zhdr->start_middle + zhdr->middle_chunks);
- nfree = max(nfree_before, nfree_after);
- } else
- nfree = NCHUNKS - zhdr->first_chunks - zhdr->last_chunks;
- return nfree;
-}
-
-/* Add to the appropriate unbuddied list */
-static inline void add_to_unbuddied(struct z3fold_pool *pool,
- struct z3fold_header *zhdr)
-{
- if (zhdr->first_chunks == 0 || zhdr->last_chunks == 0 ||
- zhdr->middle_chunks == 0) {
- struct list_head *unbuddied;
- int freechunks = num_free_chunks(zhdr);
-
- migrate_disable();
- unbuddied = this_cpu_ptr(pool->unbuddied);
- spin_lock(&pool->lock);
- list_add(&zhdr->buddy, &unbuddied[freechunks]);
- spin_unlock(&pool->lock);
- zhdr->cpu = smp_processor_id();
- migrate_enable();
- }
-}
-
-static inline enum buddy get_free_buddy(struct z3fold_header *zhdr, int chunks)
-{
- enum buddy bud = HEADLESS;
-
- if (zhdr->middle_chunks) {
- if (!zhdr->first_chunks &&
- chunks <= zhdr->start_middle - ZHDR_CHUNKS)
- bud = FIRST;
- else if (!zhdr->last_chunks)
- bud = LAST;
- } else {
- if (!zhdr->first_chunks)
- bud = FIRST;
- else if (!zhdr->last_chunks)
- bud = LAST;
- else
- bud = MIDDLE;
- }
-
- return bud;
-}
-
-static inline void *mchunk_memmove(struct z3fold_header *zhdr,
- unsigned short dst_chunk)
-{
- void *beg = zhdr;
- return memmove(beg + (dst_chunk << CHUNK_SHIFT),
- beg + (zhdr->start_middle << CHUNK_SHIFT),
- zhdr->middle_chunks << CHUNK_SHIFT);
-}
-
-static inline bool buddy_single(struct z3fold_header *zhdr)
-{
- return !((zhdr->first_chunks && zhdr->middle_chunks) ||
- (zhdr->first_chunks && zhdr->last_chunks) ||
- (zhdr->middle_chunks && zhdr->last_chunks));
-}
-
-static struct z3fold_header *compact_single_buddy(struct z3fold_header *zhdr)
-{
- struct z3fold_pool *pool = zhdr_to_pool(zhdr);
- void *p = zhdr;
- unsigned long old_handle = 0;
- size_t sz = 0;
- struct z3fold_header *new_zhdr = NULL;
- int first_idx = __idx(zhdr, FIRST);
- int middle_idx = __idx(zhdr, MIDDLE);
- int last_idx = __idx(zhdr, LAST);
- unsigned short *moved_chunks = NULL;
-
- /*
- * No need to protect slots here -- all the slots are "local" and
- * the page lock is already taken
- */
- if (zhdr->first_chunks && zhdr->slots->slot[first_idx]) {
- p += ZHDR_SIZE_ALIGNED;
- sz = zhdr->first_chunks << CHUNK_SHIFT;
- old_handle = (unsigned long)&zhdr->slots->slot[first_idx];
- moved_chunks = &zhdr->first_chunks;
- } else if (zhdr->middle_chunks && zhdr->slots->slot[middle_idx]) {
- p += zhdr->start_middle << CHUNK_SHIFT;
- sz = zhdr->middle_chunks << CHUNK_SHIFT;
- old_handle = (unsigned long)&zhdr->slots->slot[middle_idx];
- moved_chunks = &zhdr->middle_chunks;
- } else if (zhdr->last_chunks && zhdr->slots->slot[last_idx]) {
- p += PAGE_SIZE - (zhdr->last_chunks << CHUNK_SHIFT);
- sz = zhdr->last_chunks << CHUNK_SHIFT;
- old_handle = (unsigned long)&zhdr->slots->slot[last_idx];
- moved_chunks = &zhdr->last_chunks;
- }
-
- if (sz > 0) {
- enum buddy new_bud = HEADLESS;
- short chunks = size_to_chunks(sz);
- void *q;
-
- new_zhdr = __z3fold_alloc(pool, sz, false);
- if (!new_zhdr)
- return NULL;
-
- if (WARN_ON(new_zhdr == zhdr))
- goto out_fail;
-
- new_bud = get_free_buddy(new_zhdr, chunks);
- q = new_zhdr;
- switch (new_bud) {
- case FIRST:
- new_zhdr->first_chunks = chunks;
- q += ZHDR_SIZE_ALIGNED;
- break;
- case MIDDLE:
- new_zhdr->middle_chunks = chunks;
- new_zhdr->start_middle =
- new_zhdr->first_chunks + ZHDR_CHUNKS;
- q += new_zhdr->start_middle << CHUNK_SHIFT;
- break;
- case LAST:
- new_zhdr->last_chunks = chunks;
- q += PAGE_SIZE - (new_zhdr->last_chunks << CHUNK_SHIFT);
- break;
- default:
- goto out_fail;
- }
- new_zhdr->foreign_handles++;
- memcpy(q, p, sz);
- write_lock(&zhdr->slots->lock);
- *(unsigned long *)old_handle = (unsigned long)new_zhdr +
- __idx(new_zhdr, new_bud);
- if (new_bud == LAST)
- *(unsigned long *)old_handle |=
- (new_zhdr->last_chunks << BUDDY_SHIFT);
- write_unlock(&zhdr->slots->lock);
- add_to_unbuddied(pool, new_zhdr);
- z3fold_page_unlock(new_zhdr);
-
- *moved_chunks = 0;
- }
-
- return new_zhdr;
-
-out_fail:
- if (new_zhdr && !put_z3fold_locked(new_zhdr)) {
- add_to_unbuddied(pool, new_zhdr);
- z3fold_page_unlock(new_zhdr);
- }
- return NULL;
-
-}
-
-#define BIG_CHUNK_GAP 3
-/* Has to be called with lock held */
-static int z3fold_compact_page(struct z3fold_header *zhdr)
-{
- struct page *page = virt_to_page(zhdr);
-
- if (test_bit(MIDDLE_CHUNK_MAPPED, &page->private))
- return 0; /* can't move middle chunk, it's used */
-
- if (unlikely(PageIsolated(page)))
- return 0;
-
- if (zhdr->middle_chunks == 0)
- return 0; /* nothing to compact */
-
- if (zhdr->first_chunks == 0 && zhdr->last_chunks == 0) {
- /* move to the beginning */
- mchunk_memmove(zhdr, ZHDR_CHUNKS);
- zhdr->first_chunks = zhdr->middle_chunks;
- zhdr->middle_chunks = 0;
- zhdr->start_middle = 0;
- zhdr->first_num++;
- return 1;
- }
-
- /*
- * moving data is expensive, so let's only do that if
- * there's substantial gain (at least BIG_CHUNK_GAP chunks)
- */
- if (zhdr->first_chunks != 0 && zhdr->last_chunks == 0 &&
- zhdr->start_middle - (zhdr->first_chunks + ZHDR_CHUNKS) >=
- BIG_CHUNK_GAP) {
- mchunk_memmove(zhdr, zhdr->first_chunks + ZHDR_CHUNKS);
- zhdr->start_middle = zhdr->first_chunks + ZHDR_CHUNKS;
- return 1;
- } else if (zhdr->last_chunks != 0 && zhdr->first_chunks == 0 &&
- TOTAL_CHUNKS - (zhdr->last_chunks + zhdr->start_middle
- + zhdr->middle_chunks) >=
- BIG_CHUNK_GAP) {
- unsigned short new_start = TOTAL_CHUNKS - zhdr->last_chunks -
- zhdr->middle_chunks;
- mchunk_memmove(zhdr, new_start);
- zhdr->start_middle = new_start;
- return 1;
- }
-
- return 0;
-}
-
-static void do_compact_page(struct z3fold_header *zhdr, bool locked)
-{
- struct z3fold_pool *pool = zhdr_to_pool(zhdr);
- struct page *page;
-
- page = virt_to_page(zhdr);
- if (locked)
- WARN_ON(z3fold_page_trylock(zhdr));
- else
- z3fold_page_lock(zhdr);
- if (WARN_ON(!test_and_clear_bit(NEEDS_COMPACTING, &page->private))) {
- z3fold_page_unlock(zhdr);
- return;
- }
- spin_lock(&pool->lock);
- list_del_init(&zhdr->buddy);
- spin_unlock(&pool->lock);
-
- if (put_z3fold_locked(zhdr))
- return;
-
- if (test_bit(PAGE_STALE, &page->private) ||
- test_and_set_bit(PAGE_CLAIMED, &page->private)) {
- z3fold_page_unlock(zhdr);
- return;
- }
-
- if (!zhdr->foreign_handles && buddy_single(zhdr) &&
- zhdr->mapped_count == 0 && compact_single_buddy(zhdr)) {
- if (!put_z3fold_locked(zhdr)) {
- clear_bit(PAGE_CLAIMED, &page->private);
- z3fold_page_unlock(zhdr);
- }
- return;
- }
-
- z3fold_compact_page(zhdr);
- add_to_unbuddied(pool, zhdr);
- clear_bit(PAGE_CLAIMED, &page->private);
- z3fold_page_unlock(zhdr);
-}
-
-static void compact_page_work(struct work_struct *w)
-{
- struct z3fold_header *zhdr = container_of(w, struct z3fold_header,
- work);
-
- do_compact_page(zhdr, false);
-}
-
-/* returns _locked_ z3fold page header or NULL */
-static inline struct z3fold_header *__z3fold_alloc(struct z3fold_pool *pool,
- size_t size, bool can_sleep)
-{
- struct z3fold_header *zhdr = NULL;
- struct page *page;
- struct list_head *unbuddied;
- int chunks = size_to_chunks(size), i;
-
-lookup:
- migrate_disable();
- /* First, try to find an unbuddied z3fold page. */
- unbuddied = this_cpu_ptr(pool->unbuddied);
- for_each_unbuddied_list(i, chunks) {
- struct list_head *l = &unbuddied[i];
-
- zhdr = list_first_entry_or_null(READ_ONCE(l),
- struct z3fold_header, buddy);
-
- if (!zhdr)
- continue;
-
- /* Re-check under lock. */
- spin_lock(&pool->lock);
- if (unlikely(zhdr != list_first_entry(READ_ONCE(l),
- struct z3fold_header, buddy)) ||
- !z3fold_page_trylock(zhdr)) {
- spin_unlock(&pool->lock);
- zhdr = NULL;
- migrate_enable();
- if (can_sleep)
- cond_resched();
- goto lookup;
- }
- list_del_init(&zhdr->buddy);
- zhdr->cpu = -1;
- spin_unlock(&pool->lock);
-
- page = virt_to_page(zhdr);
- if (test_bit(NEEDS_COMPACTING, &page->private) ||
- test_bit(PAGE_CLAIMED, &page->private)) {
- z3fold_page_unlock(zhdr);
- zhdr = NULL;
- migrate_enable();
- if (can_sleep)
- cond_resched();
- goto lookup;
- }
-
- /*
- * this page could not be removed from its unbuddied
- * list while pool lock was held, and then we've taken
- * page lock so kref_put could not be called before
- * we got here, so it's safe to just call kref_get()
- */
- kref_get(&zhdr->refcount);
- break;
- }
- migrate_enable();
-
- if (!zhdr) {
- int cpu;
-
- /* look for _exact_ match on other cpus' lists */
- for_each_online_cpu(cpu) {
- struct list_head *l;
-
- unbuddied = per_cpu_ptr(pool->unbuddied, cpu);
- spin_lock(&pool->lock);
- l = &unbuddied[chunks];
-
- zhdr = list_first_entry_or_null(READ_ONCE(l),
- struct z3fold_header, buddy);
-
- if (!zhdr || !z3fold_page_trylock(zhdr)) {
- spin_unlock(&pool->lock);
- zhdr = NULL;
- continue;
- }
- list_del_init(&zhdr->buddy);
- zhdr->cpu = -1;
- spin_unlock(&pool->lock);
-
- page = virt_to_page(zhdr);
- if (test_bit(NEEDS_COMPACTING, &page->private) ||
- test_bit(PAGE_CLAIMED, &page->private)) {
- z3fold_page_unlock(zhdr);
- zhdr = NULL;
- if (can_sleep)
- cond_resched();
- continue;
- }
- kref_get(&zhdr->refcount);
- break;
- }
- }
-
- if (zhdr && !zhdr->slots) {
- zhdr->slots = alloc_slots(pool, GFP_ATOMIC);
- if (!zhdr->slots)
- goto out_fail;
- }
- return zhdr;
-
-out_fail:
- if (!put_z3fold_locked(zhdr)) {
- add_to_unbuddied(pool, zhdr);
- z3fold_page_unlock(zhdr);
- }
- return NULL;
-}
-
-/*
- * API Functions
- */
-
-/**
- * z3fold_create_pool() - create a new z3fold pool
- * @name: pool name
- * @gfp: gfp flags when allocating the z3fold pool structure
- *
- * Return: pointer to the new z3fold pool or NULL if the metadata allocation
- * failed.
- */
-static struct z3fold_pool *z3fold_create_pool(const char *name, gfp_t gfp)
-{
- struct z3fold_pool *pool = NULL;
- int i, cpu;
-
- pool = kzalloc(sizeof(struct z3fold_pool), gfp);
- if (!pool)
- goto out;
- pool->c_handle = kmem_cache_create("z3fold_handle",
- sizeof(struct z3fold_buddy_slots),
- SLOTS_ALIGN, 0, NULL);
- if (!pool->c_handle)
- goto out_c;
- spin_lock_init(&pool->lock);
- spin_lock_init(&pool->stale_lock);
- pool->unbuddied = __alloc_percpu(sizeof(struct list_head) * NCHUNKS,
- __alignof__(struct list_head));
- if (!pool->unbuddied)
- goto out_pool;
- for_each_possible_cpu(cpu) {
- struct list_head *unbuddied =
- per_cpu_ptr(pool->unbuddied, cpu);
- for_each_unbuddied_list(i, 0)
- INIT_LIST_HEAD(&unbuddied[i]);
- }
- INIT_LIST_HEAD(&pool->stale);
- atomic64_set(&pool->pages_nr, 0);
- pool->name = name;
- pool->compact_wq = create_singlethread_workqueue(pool->name);
- if (!pool->compact_wq)
- goto out_unbuddied;
- pool->release_wq = create_singlethread_workqueue(pool->name);
- if (!pool->release_wq)
- goto out_wq;
- INIT_WORK(&pool->work, free_pages_work);
- return pool;
-
-out_wq:
- destroy_workqueue(pool->compact_wq);
-out_unbuddied:
- free_percpu(pool->unbuddied);
-out_pool:
- kmem_cache_destroy(pool->c_handle);
-out_c:
- kfree(pool);
-out:
- return NULL;
-}
-
-/**
- * z3fold_destroy_pool() - destroys an existing z3fold pool
- * @pool: the z3fold pool to be destroyed
- *
- * The pool should be emptied before this function is called.
- */
-static void z3fold_destroy_pool(struct z3fold_pool *pool)
-{
- kmem_cache_destroy(pool->c_handle);
-
- /*
- * We need to destroy pool->compact_wq before pool->release_wq,
- * as any pending work on pool->compact_wq will call
- * queue_work(pool->release_wq, &pool->work).
- *
- * There are still outstanding pages until both workqueues are drained,
- * so we cannot unregister migration until then.
- */
-
- destroy_workqueue(pool->compact_wq);
- destroy_workqueue(pool->release_wq);
- free_percpu(pool->unbuddied);
- kfree(pool);
-}
-
-static const struct movable_operations z3fold_mops;
-
-/**
- * z3fold_alloc() - allocates a region of a given size
- * @pool: z3fold pool from which to allocate
- * @size: size in bytes of the desired allocation
- * @gfp: gfp flags used if the pool needs to grow
- * @handle: handle of the new allocation
- *
- * This function will attempt to find a free region in the pool large enough to
- * satisfy the allocation request. A search of the unbuddied lists is
- * performed first. If no suitable free region is found, then a new page is
- * allocated and added to the pool to satisfy the request.
- *
- * Return: 0 if success and handle is set, otherwise -EINVAL if the size or
- * gfp arguments are invalid or -ENOMEM if the pool was unable to allocate
- * a new page.
- */
-static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp,
- unsigned long *handle)
-{
- int chunks = size_to_chunks(size);
- struct z3fold_header *zhdr = NULL;
- struct page *page = NULL;
- enum buddy bud;
- bool can_sleep = gfpflags_allow_blocking(gfp);
-
- if (!size || (gfp & __GFP_HIGHMEM))
- return -EINVAL;
-
- if (size > PAGE_SIZE)
- return -ENOSPC;
-
- if (size > PAGE_SIZE - ZHDR_SIZE_ALIGNED - CHUNK_SIZE)
- bud = HEADLESS;
- else {
-retry:
- zhdr = __z3fold_alloc(pool, size, can_sleep);
- if (zhdr) {
- bud = get_free_buddy(zhdr, chunks);
- if (bud == HEADLESS) {
- if (!put_z3fold_locked(zhdr))
- z3fold_page_unlock(zhdr);
- pr_err("No free chunks in unbuddied\n");
- WARN_ON(1);
- goto retry;
- }
- page = virt_to_page(zhdr);
- goto found;
- }
- bud = FIRST;
- }
-
- page = alloc_page(gfp);
- if (!page)
- return -ENOMEM;
-
- zhdr = init_z3fold_page(page, bud == HEADLESS, pool, gfp);
- if (!zhdr) {
- __free_page(page);
- return -ENOMEM;
- }
- atomic64_inc(&pool->pages_nr);
-
- if (bud == HEADLESS) {
- set_bit(PAGE_HEADLESS, &page->private);
- goto headless;
- }
- if (can_sleep) {
- lock_page(page);
- __SetPageMovable(page, &z3fold_mops);
- unlock_page(page);
- } else {
- WARN_ON(!trylock_page(page));
- __SetPageMovable(page, &z3fold_mops);
- unlock_page(page);
- }
- z3fold_page_lock(zhdr);
-
-found:
- if (bud == FIRST)
- zhdr->first_chunks = chunks;
- else if (bud == LAST)
- zhdr->last_chunks = chunks;
- else {
- zhdr->middle_chunks = chunks;
- zhdr->start_middle = zhdr->first_chunks + ZHDR_CHUNKS;
- }
- add_to_unbuddied(pool, zhdr);
-
-headless:
- spin_lock(&pool->lock);
- *handle = encode_handle(zhdr, bud);
- spin_unlock(&pool->lock);
- if (bud != HEADLESS)
- z3fold_page_unlock(zhdr);
-
- return 0;
-}
-
-/**
- * z3fold_free() - frees the allocation associated with the given handle
- * @pool: pool in which the allocation resided
- * @handle: handle associated with the allocation returned by z3fold_alloc()
- *
- * In the case that the z3fold page in which the allocation resides is under
- * reclaim, as indicated by the PAGE_CLAIMED flag being set, this function
- * only sets the first|middle|last_chunks to 0. The page is actually freed
- * once all buddies are evicted (see z3fold_reclaim_page() below).
- */
-static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
-{
- struct z3fold_header *zhdr;
- struct page *page;
- enum buddy bud;
- bool page_claimed;
-
- zhdr = get_z3fold_header(handle);
- page = virt_to_page(zhdr);
- page_claimed = test_and_set_bit(PAGE_CLAIMED, &page->private);
-
- if (test_bit(PAGE_HEADLESS, &page->private)) {
- /* if a headless page is under reclaim, just leave.
- * NB: we use test_and_set_bit for a reason: if the bit
- * has not been set before, we release this page
- * immediately so we don't care about its value any more.
- */
- if (!page_claimed) {
- put_z3fold_header(zhdr);
- free_z3fold_page(page, true);
- atomic64_dec(&pool->pages_nr);
- }
- return;
- }
-
- /* Non-headless case */
- bud = handle_to_buddy(handle);
-
- switch (bud) {
- case FIRST:
- zhdr->first_chunks = 0;
- break;
- case MIDDLE:
- zhdr->middle_chunks = 0;
- break;
- case LAST:
- zhdr->last_chunks = 0;
- break;
- default:
- pr_err("%s: unknown bud %d\n", __func__, bud);
- WARN_ON(1);
- put_z3fold_header(zhdr);
- return;
- }
-
- if (!page_claimed)
- free_handle(handle, zhdr);
- if (put_z3fold_locked_list(zhdr))
- return;
- if (page_claimed) {
- /* the page has not been claimed by us */
- put_z3fold_header(zhdr);
- return;
- }
- if (test_and_set_bit(NEEDS_COMPACTING, &page->private)) {
- clear_bit(PAGE_CLAIMED, &page->private);
- put_z3fold_header(zhdr);
- return;
- }
- if (zhdr->cpu < 0 || !cpu_online(zhdr->cpu)) {
- zhdr->cpu = -1;
- kref_get(&zhdr->refcount);
- clear_bit(PAGE_CLAIMED, &page->private);
- do_compact_page(zhdr, true);
- return;
- }
- kref_get(&zhdr->refcount);
- clear_bit(PAGE_CLAIMED, &page->private);
- queue_work_on(zhdr->cpu, pool->compact_wq, &zhdr->work);
- put_z3fold_header(zhdr);
-}
-
-/**
- * z3fold_map() - maps the allocation associated with the given handle
- * @pool: pool in which the allocation resides
- * @handle: handle associated with the allocation to be mapped
- *
- * Extracts the buddy number from handle and constructs the pointer to the
- * correct starting chunk within the page.
- *
- * Returns: a pointer to the mapped allocation
- */
-static void *z3fold_map(struct z3fold_pool *pool, unsigned long handle)
-{
- struct z3fold_header *zhdr;
- struct page *page;
- void *addr;
- enum buddy buddy;
-
- zhdr = get_z3fold_header(handle);
- addr = zhdr;
- page = virt_to_page(zhdr);
-
- if (test_bit(PAGE_HEADLESS, &page->private))
- goto out;
-
- buddy = handle_to_buddy(handle);
- switch (buddy) {
- case FIRST:
- addr += ZHDR_SIZE_ALIGNED;
- break;
- case MIDDLE:
- addr += zhdr->start_middle << CHUNK_SHIFT;
- set_bit(MIDDLE_CHUNK_MAPPED, &page->private);
- break;
- case LAST:
- addr += PAGE_SIZE - (handle_to_chunks(handle) << CHUNK_SHIFT);
- break;
- default:
- pr_err("unknown buddy id %d\n", buddy);
- WARN_ON(1);
- addr = NULL;
- break;
- }
-
- if (addr)
- zhdr->mapped_count++;
-out:
- put_z3fold_header(zhdr);
- return addr;
-}
-
-/**
- * z3fold_unmap() - unmaps the allocation associated with the given handle
- * @pool: pool in which the allocation resides
- * @handle: handle associated with the allocation to be unmapped
- */
-static void z3fold_unmap(struct z3fold_pool *pool, unsigned long handle)
-{
- struct z3fold_header *zhdr;
- struct page *page;
- enum buddy buddy;
-
- zhdr = get_z3fold_header(handle);
- page = virt_to_page(zhdr);
-
- if (test_bit(PAGE_HEADLESS, &page->private))
- return;
-
- buddy = handle_to_buddy(handle);
- if (buddy == MIDDLE)
- clear_bit(MIDDLE_CHUNK_MAPPED, &page->private);
- zhdr->mapped_count--;
- put_z3fold_header(zhdr);
-}
-
-/**
- * z3fold_get_pool_pages() - gets the z3fold pool size in pages
- * @pool: pool whose size is being queried
- *
- * Returns: size in pages of the given pool.
- */
-static u64 z3fold_get_pool_pages(struct z3fold_pool *pool)
-{
- return atomic64_read(&pool->pages_nr);
-}
-
-static bool z3fold_page_isolate(struct page *page, isolate_mode_t mode)
-{
- struct z3fold_header *zhdr;
- struct z3fold_pool *pool;
-
- VM_BUG_ON_PAGE(PageIsolated(page), page);
-
- if (test_bit(PAGE_HEADLESS, &page->private))
- return false;
-
- zhdr = page_address(page);
- z3fold_page_lock(zhdr);
- if (test_bit(NEEDS_COMPACTING, &page->private) ||
- test_bit(PAGE_STALE, &page->private))
- goto out;
-
- if (zhdr->mapped_count != 0 || zhdr->foreign_handles != 0)
- goto out;
-
- if (test_and_set_bit(PAGE_CLAIMED, &page->private))
- goto out;
- pool = zhdr_to_pool(zhdr);
- spin_lock(&pool->lock);
- if (!list_empty(&zhdr->buddy))
- list_del_init(&zhdr->buddy);
- spin_unlock(&pool->lock);
-
- kref_get(&zhdr->refcount);
- z3fold_page_unlock(zhdr);
- return true;
-
-out:
- z3fold_page_unlock(zhdr);
- return false;
-}
-
-static int z3fold_page_migrate(struct page *newpage, struct page *page,
- enum migrate_mode mode)
-{
- struct z3fold_header *zhdr, *new_zhdr;
- struct z3fold_pool *pool;
-
- VM_BUG_ON_PAGE(!PageIsolated(page), page);
- VM_BUG_ON_PAGE(!test_bit(PAGE_CLAIMED, &page->private), page);
- VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
-
- zhdr = page_address(page);
- pool = zhdr_to_pool(zhdr);
-
- if (!z3fold_page_trylock(zhdr))
- return -EAGAIN;
- if (zhdr->mapped_count != 0 || zhdr->foreign_handles != 0) {
- clear_bit(PAGE_CLAIMED, &page->private);
- z3fold_page_unlock(zhdr);
- return -EBUSY;
- }
- if (work_pending(&zhdr->work)) {
- z3fold_page_unlock(zhdr);
- return -EAGAIN;
- }
- new_zhdr = page_address(newpage);
- memcpy(new_zhdr, zhdr, PAGE_SIZE);
- newpage->private = page->private;
- set_bit(PAGE_MIGRATED, &page->private);
- z3fold_page_unlock(zhdr);
- spin_lock_init(&new_zhdr->page_lock);
- INIT_WORK(&new_zhdr->work, compact_page_work);
- /*
- * z3fold_page_isolate() ensures that new_zhdr->buddy is empty,
- * so we only have to reinitialize it.
- */
- INIT_LIST_HEAD(&new_zhdr->buddy);
- __ClearPageMovable(page);
-
- get_page(newpage);
- z3fold_page_lock(new_zhdr);
- if (new_zhdr->first_chunks)
- encode_handle(new_zhdr, FIRST);
- if (new_zhdr->last_chunks)
- encode_handle(new_zhdr, LAST);
- if (new_zhdr->middle_chunks)
- encode_handle(new_zhdr, MIDDLE);
- set_bit(NEEDS_COMPACTING, &newpage->private);
- new_zhdr->cpu = smp_processor_id();
- __SetPageMovable(newpage, &z3fold_mops);
- z3fold_page_unlock(new_zhdr);
-
- queue_work_on(new_zhdr->cpu, pool->compact_wq, &new_zhdr->work);
-
- /* PAGE_CLAIMED and PAGE_MIGRATED are cleared now. */
- page->private = 0;
- put_page(page);
- return 0;
-}
-
-static void z3fold_page_putback(struct page *page)
-{
- struct z3fold_header *zhdr;
- struct z3fold_pool *pool;
-
- zhdr = page_address(page);
- pool = zhdr_to_pool(zhdr);
-
- z3fold_page_lock(zhdr);
- if (!list_empty(&zhdr->buddy))
- list_del_init(&zhdr->buddy);
- INIT_LIST_HEAD(&page->lru);
- if (put_z3fold_locked(zhdr))
- return;
- if (list_empty(&zhdr->buddy))
- add_to_unbuddied(pool, zhdr);
- clear_bit(PAGE_CLAIMED, &page->private);
- z3fold_page_unlock(zhdr);
-}
-
-static const struct movable_operations z3fold_mops = {
- .isolate_page = z3fold_page_isolate,
- .migrate_page = z3fold_page_migrate,
- .putback_page = z3fold_page_putback,
-};
-
-/*****************
- * zpool
- ****************/
-
-static void *z3fold_zpool_create(const char *name, gfp_t gfp)
-{
- return z3fold_create_pool(name, gfp);
-}
-
-static void z3fold_zpool_destroy(void *pool)
-{
- z3fold_destroy_pool(pool);
-}
-
-static int z3fold_zpool_malloc(void *pool, size_t size, gfp_t gfp,
- unsigned long *handle)
-{
- return z3fold_alloc(pool, size, gfp, handle);
-}
-static void z3fold_zpool_free(void *pool, unsigned long handle)
-{
- z3fold_free(pool, handle);
-}
-
-static void *z3fold_zpool_map(void *pool, unsigned long handle,
- enum zpool_mapmode mm)
-{
- return z3fold_map(pool, handle);
-}
-static void z3fold_zpool_unmap(void *pool, unsigned long handle)
-{
- z3fold_unmap(pool, handle);
-}
-
-static u64 z3fold_zpool_total_pages(void *pool)
-{
- return z3fold_get_pool_pages(pool);
-}
-
-static struct zpool_driver z3fold_zpool_driver = {
- .type = "z3fold",
- .sleep_mapped = true,
- .owner = THIS_MODULE,
- .create = z3fold_zpool_create,
- .destroy = z3fold_zpool_destroy,
- .malloc = z3fold_zpool_malloc,
- .free = z3fold_zpool_free,
- .map = z3fold_zpool_map,
- .unmap = z3fold_zpool_unmap,
- .total_pages = z3fold_zpool_total_pages,
-};
-
-MODULE_ALIAS("zpool-z3fold");
-
-static int __init init_z3fold(void)
-{
- /*
- * Make sure the z3fold header is not larger than the page size and
- * there has remaining spaces for its buddy.
- */
- BUILD_BUG_ON(ZHDR_SIZE_ALIGNED > PAGE_SIZE - CHUNK_SIZE);
- zpool_register_driver(&z3fold_zpool_driver);
-
- return 0;
-}
-
-static void __exit exit_z3fold(void)
-{
- zpool_unregister_driver(&z3fold_zpool_driver);
-}
-
-module_init(init_z3fold);
-module_exit(exit_z3fold);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Vitaly Wool <vitalywool@gmail.com>");
-MODULE_DESCRIPTION("3-Fold Allocator for Compressed Pages");
diff --git a/mm/zbud.c b/mm/zbud.c
deleted file mode 100644
index e9836fff9438..000000000000
--- a/mm/zbud.c
+++ /dev/null
@@ -1,455 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * zbud.c
- *
- * Copyright (C) 2013, Seth Jennings, IBM
- *
- * Concepts based on zcache internal zbud allocator by Dan Magenheimer.
- *
- * zbud is an special purpose allocator for storing compressed pages. Contrary
- * to what its name may suggest, zbud is not a buddy allocator, but rather an
- * allocator that "buddies" two compressed pages together in a single memory
- * page.
- *
- * While this design limits storage density, it has simple and deterministic
- * reclaim properties that make it preferable to a higher density approach when
- * reclaim will be used.
- *
- * zbud works by storing compressed pages, or "zpages", together in pairs in a
- * single memory page called a "zbud page". The first buddy is "left
- * justified" at the beginning of the zbud page, and the last buddy is "right
- * justified" at the end of the zbud page. The benefit is that if either
- * buddy is freed, the freed buddy space, coalesced with whatever slack space
- * that existed between the buddies, results in the largest possible free region
- * within the zbud page.
- *
- * zbud also provides an attractive lower bound on density. The ratio of zpages
- * to zbud pages can not be less than 1. This ensures that zbud can never "do
- * harm" by using more pages to store zpages than the uncompressed zpages would
- * have used on their own.
- *
- * zbud pages are divided into "chunks". The size of the chunks is fixed at
- * compile time and determined by NCHUNKS_ORDER below. Dividing zbud pages
- * into chunks allows organizing unbuddied zbud pages into a manageable number
- * of unbuddied lists according to the number of free chunks available in the
- * zbud page.
- *
- * The zbud API differs from that of conventional allocators in that the
- * allocation function, zbud_alloc(), returns an opaque handle to the user,
- * not a dereferenceable pointer. The user must map the handle using
- * zbud_map() in order to get a usable pointer by which to access the
- * allocation data and unmap the handle with zbud_unmap() when operations
- * on the allocation data are complete.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/atomic.h>
-#include <linux/list.h>
-#include <linux/mm.h>
-#include <linux/module.h>
-#include <linux/preempt.h>
-#include <linux/slab.h>
-#include <linux/spinlock.h>
-#include <linux/zpool.h>
-
-/*****************
- * Structures
-*****************/
-/*
- * NCHUNKS_ORDER determines the internal allocation granularity, effectively
- * adjusting internal fragmentation. It also determines the number of
- * freelists maintained in each pool. NCHUNKS_ORDER of 6 means that the
- * allocation granularity will be in chunks of size PAGE_SIZE/64. As one chunk
- * in allocated page is occupied by zbud header, NCHUNKS will be calculated to
- * 63 which shows the max number of free chunks in zbud page, also there will be
- * 63 freelists per pool.
- */
-#define NCHUNKS_ORDER 6
-
-#define CHUNK_SHIFT (PAGE_SHIFT - NCHUNKS_ORDER)
-#define CHUNK_SIZE (1 << CHUNK_SHIFT)
-#define ZHDR_SIZE_ALIGNED CHUNK_SIZE
-#define NCHUNKS ((PAGE_SIZE - ZHDR_SIZE_ALIGNED) >> CHUNK_SHIFT)
-
-struct zbud_pool;
-
-/**
- * struct zbud_pool - stores metadata for each zbud pool
- * @lock: protects all pool fields and first|last_chunk fields of any
- * zbud page in the pool
- * @unbuddied: array of lists tracking zbud pages that only contain one buddy;
- * the lists each zbud page is added to depends on the size of
- * its free region.
- * @buddied: list tracking the zbud pages that contain two buddies;
- * these zbud pages are full
- * @pages_nr: number of zbud pages in the pool.
- *
- * This structure is allocated at pool creation time and maintains metadata
- * pertaining to a particular zbud pool.
- */
-struct zbud_pool {
- spinlock_t lock;
- union {
- /*
- * Reuse unbuddied[0] as buddied on the ground that
- * unbuddied[0] is unused.
- */
- struct list_head buddied;
- struct list_head unbuddied[NCHUNKS];
- };
- u64 pages_nr;
-};
-
-/*
- * struct zbud_header - zbud page metadata occupying the first chunk of each
- * zbud page.
- * @buddy: links the zbud page into the unbuddied/buddied lists in the pool
- * @first_chunks: the size of the first buddy in chunks, 0 if free
- * @last_chunks: the size of the last buddy in chunks, 0 if free
- */
-struct zbud_header {
- struct list_head buddy;
- unsigned int first_chunks;
- unsigned int last_chunks;
-};
-
-/*****************
- * Helpers
-*****************/
-/* Just to make the code easier to read */
-enum buddy {
- FIRST,
- LAST
-};
-
-/* Converts an allocation size in bytes to size in zbud chunks */
-static int size_to_chunks(size_t size)
-{
- return (size + CHUNK_SIZE - 1) >> CHUNK_SHIFT;
-}
-
-#define for_each_unbuddied_list(_iter, _begin) \
- for ((_iter) = (_begin); (_iter) < NCHUNKS; (_iter)++)
-
-/* Initializes the zbud header of a newly allocated zbud page */
-static struct zbud_header *init_zbud_page(struct page *page)
-{
- struct zbud_header *zhdr = page_address(page);
- zhdr->first_chunks = 0;
- zhdr->last_chunks = 0;
- INIT_LIST_HEAD(&zhdr->buddy);
- return zhdr;
-}
-
-/* Resets the struct page fields and frees the page */
-static void free_zbud_page(struct zbud_header *zhdr)
-{
- __free_page(virt_to_page(zhdr));
-}
-
-/*
- * Encodes the handle of a particular buddy within a zbud page
- * Pool lock should be held as this function accesses first|last_chunks
- */
-static unsigned long encode_handle(struct zbud_header *zhdr, enum buddy bud)
-{
- unsigned long handle;
-
- /*
- * For now, the encoded handle is actually just the pointer to the data
- * but this might not always be the case. A little information hiding.
- * Add CHUNK_SIZE to the handle if it is the first allocation to jump
- * over the zbud header in the first chunk.
- */
- handle = (unsigned long)zhdr;
- if (bud == FIRST)
- /* skip over zbud header */
- handle += ZHDR_SIZE_ALIGNED;
- else /* bud == LAST */
- handle += PAGE_SIZE - (zhdr->last_chunks << CHUNK_SHIFT);
- return handle;
-}
-
-/* Returns the zbud page where a given handle is stored */
-static struct zbud_header *handle_to_zbud_header(unsigned long handle)
-{
- return (struct zbud_header *)(handle & PAGE_MASK);
-}
-
-/* Returns the number of free chunks in a zbud page */
-static int num_free_chunks(struct zbud_header *zhdr)
-{
- /*
- * Rather than branch for different situations, just use the fact that
- * free buddies have a length of zero to simplify everything.
- */
- return NCHUNKS - zhdr->first_chunks - zhdr->last_chunks;
-}
-
-/*****************
- * API Functions
-*****************/
-/**
- * zbud_create_pool() - create a new zbud pool
- * @gfp: gfp flags when allocating the zbud pool structure
- *
- * Return: pointer to the new zbud pool or NULL if the metadata allocation
- * failed.
- */
-static struct zbud_pool *zbud_create_pool(gfp_t gfp)
-{
- struct zbud_pool *pool;
- int i;
-
- pool = kzalloc(sizeof(struct zbud_pool), gfp);
- if (!pool)
- return NULL;
- spin_lock_init(&pool->lock);
- for_each_unbuddied_list(i, 0)
- INIT_LIST_HEAD(&pool->unbuddied[i]);
- INIT_LIST_HEAD(&pool->buddied);
- pool->pages_nr = 0;
- return pool;
-}
-
-/**
- * zbud_destroy_pool() - destroys an existing zbud pool
- * @pool: the zbud pool to be destroyed
- *
- * The pool should be emptied before this function is called.
- */
-static void zbud_destroy_pool(struct zbud_pool *pool)
-{
- kfree(pool);
-}
-
-/**
- * zbud_alloc() - allocates a region of a given size
- * @pool: zbud pool from which to allocate
- * @size: size in bytes of the desired allocation
- * @gfp: gfp flags used if the pool needs to grow
- * @handle: handle of the new allocation
- *
- * This function will attempt to find a free region in the pool large enough to
- * satisfy the allocation request. A search of the unbuddied lists is
- * performed first. If no suitable free region is found, then a new page is
- * allocated and added to the pool to satisfy the request.
- *
- * gfp should not set __GFP_HIGHMEM as highmem pages cannot be used
- * as zbud pool pages.
- *
- * Return: 0 if success and handle is set, otherwise -EINVAL if the size or
- * gfp arguments are invalid or -ENOMEM if the pool was unable to allocate
- * a new page.
- */
-static int zbud_alloc(struct zbud_pool *pool, size_t size, gfp_t gfp,
- unsigned long *handle)
-{
- int chunks, i, freechunks;
- struct zbud_header *zhdr = NULL;
- enum buddy bud;
- struct page *page;
-
- if (!size || (gfp & __GFP_HIGHMEM))
- return -EINVAL;
- if (size > PAGE_SIZE - ZHDR_SIZE_ALIGNED - CHUNK_SIZE)
- return -ENOSPC;
- chunks = size_to_chunks(size);
- spin_lock(&pool->lock);
-
- /* First, try to find an unbuddied zbud page. */
- for_each_unbuddied_list(i, chunks) {
- if (!list_empty(&pool->unbuddied[i])) {
- zhdr = list_first_entry(&pool->unbuddied[i],
- struct zbud_header, buddy);
- list_del(&zhdr->buddy);
- if (zhdr->first_chunks == 0)
- bud = FIRST;
- else
- bud = LAST;
- goto found;
- }
- }
-
- /* Couldn't find unbuddied zbud page, create new one */
- spin_unlock(&pool->lock);
- page = alloc_page(gfp);
- if (!page)
- return -ENOMEM;
- spin_lock(&pool->lock);
- pool->pages_nr++;
- zhdr = init_zbud_page(page);
- bud = FIRST;
-
-found:
- if (bud == FIRST)
- zhdr->first_chunks = chunks;
- else
- zhdr->last_chunks = chunks;
-
- if (zhdr->first_chunks == 0 || zhdr->last_chunks == 0) {
- /* Add to unbuddied list */
- freechunks = num_free_chunks(zhdr);
- list_add(&zhdr->buddy, &pool->unbuddied[freechunks]);
- } else {
- /* Add to buddied list */
- list_add(&zhdr->buddy, &pool->buddied);
- }
-
- *handle = encode_handle(zhdr, bud);
- spin_unlock(&pool->lock);
-
- return 0;
-}
-
-/**
- * zbud_free() - frees the allocation associated with the given handle
- * @pool: pool in which the allocation resided
- * @handle: handle associated with the allocation returned by zbud_alloc()
- */
-static void zbud_free(struct zbud_pool *pool, unsigned long handle)
-{
- struct zbud_header *zhdr;
- int freechunks;
-
- spin_lock(&pool->lock);
- zhdr = handle_to_zbud_header(handle);
-
- /* If first buddy, handle will be page aligned */
- if ((handle - ZHDR_SIZE_ALIGNED) & ~PAGE_MASK)
- zhdr->last_chunks = 0;
- else
- zhdr->first_chunks = 0;
-
- /* Remove from existing buddy list */
- list_del(&zhdr->buddy);
-
- if (zhdr->first_chunks == 0 && zhdr->last_chunks == 0) {
- /* zbud page is empty, free */
- free_zbud_page(zhdr);
- pool->pages_nr--;
- } else {
- /* Add to unbuddied list */
- freechunks = num_free_chunks(zhdr);
- list_add(&zhdr->buddy, &pool->unbuddied[freechunks]);
- }
-
- spin_unlock(&pool->lock);
-}
-
-/**
- * zbud_map() - maps the allocation associated with the given handle
- * @pool: pool in which the allocation resides
- * @handle: handle associated with the allocation to be mapped
- *
- * While trivial for zbud, the mapping functions for others allocators
- * implementing this allocation API could have more complex information encoded
- * in the handle and could create temporary mappings to make the data
- * accessible to the user.
- *
- * Returns: a pointer to the mapped allocation
- */
-static void *zbud_map(struct zbud_pool *pool, unsigned long handle)
-{
- return (void *)(handle);
-}
-
-/**
- * zbud_unmap() - maps the allocation associated with the given handle
- * @pool: pool in which the allocation resides
- * @handle: handle associated with the allocation to be unmapped
- */
-static void zbud_unmap(struct zbud_pool *pool, unsigned long handle)
-{
-}
-
-/**
- * zbud_get_pool_pages() - gets the zbud pool size in pages
- * @pool: pool whose size is being queried
- *
- * Returns: size in pages of the given pool. The pool lock need not be
- * taken to access pages_nr.
- */
-static u64 zbud_get_pool_pages(struct zbud_pool *pool)
-{
- return pool->pages_nr;
-}
-
-/*****************
- * zpool
- ****************/
-
-static void *zbud_zpool_create(const char *name, gfp_t gfp)
-{
- return zbud_create_pool(gfp);
-}
-
-static void zbud_zpool_destroy(void *pool)
-{
- zbud_destroy_pool(pool);
-}
-
-static int zbud_zpool_malloc(void *pool, size_t size, gfp_t gfp,
- unsigned long *handle)
-{
- return zbud_alloc(pool, size, gfp, handle);
-}
-static void zbud_zpool_free(void *pool, unsigned long handle)
-{
- zbud_free(pool, handle);
-}
-
-static void *zbud_zpool_map(void *pool, unsigned long handle,
- enum zpool_mapmode mm)
-{
- return zbud_map(pool, handle);
-}
-static void zbud_zpool_unmap(void *pool, unsigned long handle)
-{
- zbud_unmap(pool, handle);
-}
-
-static u64 zbud_zpool_total_pages(void *pool)
-{
- return zbud_get_pool_pages(pool);
-}
-
-static struct zpool_driver zbud_zpool_driver = {
- .type = "zbud",
- .sleep_mapped = true,
- .owner = THIS_MODULE,
- .create = zbud_zpool_create,
- .destroy = zbud_zpool_destroy,
- .malloc = zbud_zpool_malloc,
- .free = zbud_zpool_free,
- .map = zbud_zpool_map,
- .unmap = zbud_zpool_unmap,
- .total_pages = zbud_zpool_total_pages,
-};
-
-MODULE_ALIAS("zpool-zbud");
-
-static int __init init_zbud(void)
-{
- /* Make sure the zbud header will fit in one chunk */
- BUILD_BUG_ON(sizeof(struct zbud_header) > ZHDR_SIZE_ALIGNED);
- pr_info("loaded\n");
-
- zpool_register_driver(&zbud_zpool_driver);
-
- return 0;
-}
-
-static void __exit exit_zbud(void)
-{
- zpool_unregister_driver(&zbud_zpool_driver);
- pr_info("unloaded\n");
-}
-
-module_init(init_zbud);
-module_exit(exit_zbud);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Seth Jennings <sjennings@variantweb.net>");
-MODULE_DESCRIPTION("Buddy Allocator for Compressed Pages");
diff --git a/mm/zpool.c b/mm/zpool.c
index b9fda1fa857d..6d6d88930932 100644
--- a/mm/zpool.c
+++ b/mm/zpool.c
@@ -95,7 +95,7 @@ static void zpool_put_driver(struct zpool_driver *driver)
/**
* zpool_has_pool() - Check if the pool driver is available
- * @type: The type of the zpool to check (e.g. zbud, zsmalloc)
+ * @type: The type of the zpool to check (e.g. zsmalloc)
*
* This checks if the @type pool driver is available. This will try to load
* the requested module, if needed, but there is no guarantee the module will
@@ -130,7 +130,7 @@ EXPORT_SYMBOL(zpool_has_pool);
/**
* zpool_create_pool() - Create a new zpool
- * @type: The type of the zpool to create (e.g. zbud, zsmalloc)
+ * @type: The type of the zpool to create (e.g. zsmalloc)
* @name: The name of the zpool (e.g. zram0, zswap)
* @gfp: The GFP flags to use when allocating the pool.
*
@@ -221,22 +221,6 @@ const char *zpool_get_type(struct zpool *zpool)
}
/**
- * zpool_malloc_support_movable() - Check if the zpool supports
- * allocating movable memory
- * @zpool: The zpool to check
- *
- * This returns if the zpool supports allocating movable memory.
- *
- * Implementations must guarantee this to be thread-safe.
- *
- * Returns: true if the zpool supports allocating movable memory, false if not
- */
-bool zpool_malloc_support_movable(struct zpool *zpool)
-{
- return zpool->driver->malloc_support_movable;
-}
-
-/**
* zpool_malloc() - Allocate memory
* @zpool: The zpool to allocate from.
* @size: The amount of memory to allocate.
@@ -278,46 +262,51 @@ void zpool_free(struct zpool *zpool, unsigned long handle)
}
/**
- * zpool_map_handle() - Map a previously allocated handle into memory
+ * zpool_obj_read_begin() - Start reading from a previously allocated handle.
* @zpool: The zpool that the handle was allocated from
- * @handle: The handle to map
- * @mapmode: How the memory should be mapped
+ * @handle: The handle to read from
+ * @local_copy: A local buffer to use if needed.
*
- * This maps a previously allocated handle into memory. The @mapmode
- * param indicates to the implementation how the memory will be
- * used, i.e. read-only, write-only, read-write. If the
- * implementation does not support it, the memory will be treated
- * as read-write.
+ * This starts a read operation of a previously allocated handle. The passed
+ * @local_copy buffer may be used if needed by copying the memory into.
+ * zpool_obj_read_end() MUST be called after the read is completed to undo any
+ * actions taken (e.g. release locks).
*
- * This may hold locks, disable interrupts, and/or preemption,
- * and the zpool_unmap_handle() must be called to undo those
- * actions. The code that uses the mapped handle should complete
- * its operations on the mapped handle memory quickly and unmap
- * as soon as possible. As the implementation may use per-cpu
- * data, multiple handles should not be mapped concurrently on
- * any cpu.
+ * Returns: A pointer to the handle memory to be read, if @local_copy is used,
+ * the returned pointer is @local_copy.
+ */
+void *zpool_obj_read_begin(struct zpool *zpool, unsigned long handle,
+ void *local_copy)
+{
+ return zpool->driver->obj_read_begin(zpool->pool, handle, local_copy);
+}
+
+/**
+ * zpool_obj_read_end() - Finish reading from a previously allocated handle.
+ * @zpool: The zpool that the handle was allocated from
+ * @handle: The handle to read from
+ * @handle_mem: The pointer returned by zpool_obj_read_begin()
*
- * Returns: A pointer to the handle's mapped memory area.
+ * Finishes a read operation previously started by zpool_obj_read_begin().
*/
-void *zpool_map_handle(struct zpool *zpool, unsigned long handle,
- enum zpool_mapmode mapmode)
+void zpool_obj_read_end(struct zpool *zpool, unsigned long handle,
+ void *handle_mem)
{
- return zpool->driver->map(zpool->pool, handle, mapmode);
+ zpool->driver->obj_read_end(zpool->pool, handle, handle_mem);
}
/**
- * zpool_unmap_handle() - Unmap a previously mapped handle
+ * zpool_obj_write() - Write to a previously allocated handle.
* @zpool: The zpool that the handle was allocated from
- * @handle: The handle to unmap
+ * @handle: The handle to read from
+ * @handle_mem: The memory to copy from into the handle.
+ * @mem_len: The length of memory to be written.
*
- * This unmaps a previously mapped handle. Any locks or other
- * actions that the implementation took in zpool_map_handle()
- * will be undone here. The memory area returned from
- * zpool_map_handle() should no longer be used after this.
*/
-void zpool_unmap_handle(struct zpool *zpool, unsigned long handle)
+void zpool_obj_write(struct zpool *zpool, unsigned long handle,
+ void *handle_mem, size_t mem_len)
{
- zpool->driver->unmap(zpool->pool, handle);
+ zpool->driver->obj_write(zpool->pool, handle, handle_mem, mem_len);
}
/**
@@ -333,23 +322,5 @@ u64 zpool_get_total_pages(struct zpool *zpool)
return zpool->driver->total_pages(zpool->pool);
}
-/**
- * zpool_can_sleep_mapped - Test if zpool can sleep when do mapped.
- * @zpool: The zpool to test
- *
- * Some allocators enter non-preemptible context in ->map() callback (e.g.
- * disable pagefaults) and exit that context in ->unmap(), which limits what
- * we can do with the mapped object. For instance, we cannot wait for
- * asynchronous crypto API to decompress such an object or take mutexes
- * since those will call into the scheduler. This function tells us whether
- * we use such an allocator.
- *
- * Returns: true if zpool can sleep; false otherwise.
- */
-bool zpool_can_sleep_mapped(struct zpool *zpool)
-{
- return zpool->driver->sleep_mapped;
-}
-
MODULE_AUTHOR("Dan Streetman <ddstreet@ieee.org>");
MODULE_DESCRIPTION("Common API for compressed memory storage");
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index 6d0e47f7ae33..961b270f023c 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -18,7 +18,7 @@
/*
* lock ordering:
* page_lock
- * pool->migrate_lock
+ * pool->lock
* class->lock
* zspage->lock
*/
@@ -223,8 +223,8 @@ struct zs_pool {
#ifdef CONFIG_COMPACTION
struct work_struct free_work;
#endif
- /* protect page/zspage migration */
- rwlock_t migrate_lock;
+ /* protect zspage migration/compaction */
+ rwlock_t lock;
atomic_t compaction_in_progress;
};
@@ -257,6 +257,15 @@ static inline void free_zpdesc(struct zpdesc *zpdesc)
__free_page(page);
}
+#define ZS_PAGE_UNLOCKED 0
+#define ZS_PAGE_WRLOCKED -1
+
+struct zspage_lock {
+ spinlock_t lock;
+ int cnt;
+ struct lockdep_map dep_map;
+};
+
struct zspage {
struct {
unsigned int huge:HUGE_BITS;
@@ -269,15 +278,86 @@ struct zspage {
struct zpdesc *first_zpdesc;
struct list_head list; /* fullness list */
struct zs_pool *pool;
- rwlock_t lock;
+ struct zspage_lock zsl;
};
-struct mapping_area {
- local_lock_t lock;
- char *vm_buf; /* copy buffer for objects that span pages */
- char *vm_addr; /* address of kmap_local_page()'ed pages */
- enum zs_mapmode vm_mm; /* mapping mode */
-};
+static void zspage_lock_init(struct zspage *zspage)
+{
+ static struct lock_class_key __key;
+ struct zspage_lock *zsl = &zspage->zsl;
+
+ lockdep_init_map(&zsl->dep_map, "zspage->lock", &__key, 0);
+ spin_lock_init(&zsl->lock);
+ zsl->cnt = ZS_PAGE_UNLOCKED;
+}
+
+/*
+ * The zspage lock can be held from atomic contexts, but it needs to remain
+ * preemptible when held for reading because it remains held outside of those
+ * atomic contexts, otherwise we unnecessarily lose preemptibility.
+ *
+ * To achieve this, the following rules are enforced on readers and writers:
+ *
+ * - Writers are blocked by both writers and readers, while readers are only
+ * blocked by writers (i.e. normal rwlock semantics).
+ *
+ * - Writers are always atomic (to allow readers to spin waiting for them).
+ *
+ * - Writers always use trylock (as the lock may be held be sleeping readers).
+ *
+ * - Readers may spin on the lock (as they can only wait for atomic writers).
+ *
+ * - Readers may sleep while holding the lock (as writes only use trylock).
+ */
+static void zspage_read_lock(struct zspage *zspage)
+{
+ struct zspage_lock *zsl = &zspage->zsl;
+
+ rwsem_acquire_read(&zsl->dep_map, 0, 0, _RET_IP_);
+
+ spin_lock(&zsl->lock);
+ zsl->cnt++;
+ spin_unlock(&zsl->lock);
+
+ lock_acquired(&zsl->dep_map, _RET_IP_);
+}
+
+static void zspage_read_unlock(struct zspage *zspage)
+{
+ struct zspage_lock *zsl = &zspage->zsl;
+
+ rwsem_release(&zsl->dep_map, _RET_IP_);
+
+ spin_lock(&zsl->lock);
+ zsl->cnt--;
+ spin_unlock(&zsl->lock);
+}
+
+static __must_check bool zspage_write_trylock(struct zspage *zspage)
+{
+ struct zspage_lock *zsl = &zspage->zsl;
+
+ spin_lock(&zsl->lock);
+ if (zsl->cnt == ZS_PAGE_UNLOCKED) {
+ zsl->cnt = ZS_PAGE_WRLOCKED;
+ rwsem_acquire(&zsl->dep_map, 0, 1, _RET_IP_);
+ lock_acquired(&zsl->dep_map, _RET_IP_);
+ return true;
+ }
+
+ spin_unlock(&zsl->lock);
+ return false;
+}
+
+static void zspage_write_unlock(struct zspage *zspage)
+{
+ struct zspage_lock *zsl = &zspage->zsl;
+
+ rwsem_release(&zsl->dep_map, _RET_IP_);
+
+ zsl->cnt = ZS_PAGE_UNLOCKED;
+ spin_unlock(&zsl->lock);
+}
/* huge object: pages_per_zspage == 1 && maxobj_per_zspage == 1 */
static void SetZsHugePage(struct zspage *zspage)
@@ -290,12 +370,6 @@ static bool ZsHugePage(struct zspage *zspage)
return zspage->huge;
}
-static void migrate_lock_init(struct zspage *zspage);
-static void migrate_read_lock(struct zspage *zspage);
-static void migrate_read_unlock(struct zspage *zspage);
-static void migrate_write_lock(struct zspage *zspage);
-static void migrate_write_unlock(struct zspage *zspage);
-
#ifdef CONFIG_COMPACTION
static void kick_deferred_free(struct zs_pool *pool);
static void init_deferred_free(struct zs_pool *pool);
@@ -401,29 +475,22 @@ static void zs_zpool_free(void *pool, unsigned long handle)
zs_free(pool, handle);
}
-static void *zs_zpool_map(void *pool, unsigned long handle,
- enum zpool_mapmode mm)
+static void *zs_zpool_obj_read_begin(void *pool, unsigned long handle,
+ void *local_copy)
{
- enum zs_mapmode zs_mm;
-
- switch (mm) {
- case ZPOOL_MM_RO:
- zs_mm = ZS_MM_RO;
- break;
- case ZPOOL_MM_WO:
- zs_mm = ZS_MM_WO;
- break;
- case ZPOOL_MM_RW:
- default:
- zs_mm = ZS_MM_RW;
- break;
- }
+ return zs_obj_read_begin(pool, handle, local_copy);
+}
- return zs_map_object(pool, handle, zs_mm);
+static void zs_zpool_obj_read_end(void *pool, unsigned long handle,
+ void *handle_mem)
+{
+ zs_obj_read_end(pool, handle, handle_mem);
}
-static void zs_zpool_unmap(void *pool, unsigned long handle)
+
+static void zs_zpool_obj_write(void *pool, unsigned long handle,
+ void *handle_mem, size_t mem_len)
{
- zs_unmap_object(pool, handle);
+ zs_obj_write(pool, handle, handle_mem, mem_len);
}
static u64 zs_zpool_total_pages(void *pool)
@@ -436,22 +503,17 @@ static struct zpool_driver zs_zpool_driver = {
.owner = THIS_MODULE,
.create = zs_zpool_create,
.destroy = zs_zpool_destroy,
- .malloc_support_movable = true,
.malloc = zs_zpool_malloc,
.free = zs_zpool_free,
- .map = zs_zpool_map,
- .unmap = zs_zpool_unmap,
+ .obj_read_begin = zs_zpool_obj_read_begin,
+ .obj_read_end = zs_zpool_obj_read_end,
+ .obj_write = zs_zpool_obj_write,
.total_pages = zs_zpool_total_pages,
};
MODULE_ALIAS("zpool-zsmalloc");
#endif /* CONFIG_ZPOOL */
-/* per-cpu VM mapping areas for zspage accesses that cross page boundaries */
-static DEFINE_PER_CPU(struct mapping_area, zs_map_area) = {
- .lock = INIT_LOCAL_LOCK(lock),
-};
-
static inline bool __maybe_unused is_first_zpdesc(struct zpdesc *zpdesc)
{
return PagePrivate(zpdesc_page(zpdesc));
@@ -992,7 +1054,9 @@ static struct zspage *alloc_zspage(struct zs_pool *pool,
return NULL;
zspage->magic = ZSPAGE_MAGIC;
- migrate_lock_init(zspage);
+ zspage->pool = pool;
+ zspage->class = class->index;
+ zspage_lock_init(zspage);
for (i = 0; i < class->pages_per_zspage; i++) {
struct zpdesc *zpdesc;
@@ -1015,8 +1079,6 @@ static struct zspage *alloc_zspage(struct zs_pool *pool,
create_page_chain(class, zspage, zpdescs);
init_zspage(class, zspage);
- zspage->pool = pool;
- zspage->class = class->index;
return zspage;
}
@@ -1036,93 +1098,6 @@ static struct zspage *find_get_zspage(struct size_class *class)
return zspage;
}
-static inline int __zs_cpu_up(struct mapping_area *area)
-{
- /*
- * Make sure we don't leak memory if a cpu UP notification
- * and zs_init() race and both call zs_cpu_up() on the same cpu
- */
- if (area->vm_buf)
- return 0;
- area->vm_buf = kmalloc(ZS_MAX_ALLOC_SIZE, GFP_KERNEL);
- if (!area->vm_buf)
- return -ENOMEM;
- return 0;
-}
-
-static inline void __zs_cpu_down(struct mapping_area *area)
-{
- kfree(area->vm_buf);
- area->vm_buf = NULL;
-}
-
-static void *__zs_map_object(struct mapping_area *area,
- struct zpdesc *zpdescs[2], int off, int size)
-{
- size_t sizes[2];
- char *buf = area->vm_buf;
-
- /* disable page faults to match kmap_local_page() return conditions */
- pagefault_disable();
-
- /* no read fastpath */
- if (area->vm_mm == ZS_MM_WO)
- goto out;
-
- sizes[0] = PAGE_SIZE - off;
- sizes[1] = size - sizes[0];
-
- /* copy object to per-cpu buffer */
- memcpy_from_page(buf, zpdesc_page(zpdescs[0]), off, sizes[0]);
- memcpy_from_page(buf + sizes[0], zpdesc_page(zpdescs[1]), 0, sizes[1]);
-out:
- return area->vm_buf;
-}
-
-static void __zs_unmap_object(struct mapping_area *area,
- struct zpdesc *zpdescs[2], int off, int size)
-{
- size_t sizes[2];
- char *buf;
-
- /* no write fastpath */
- if (area->vm_mm == ZS_MM_RO)
- goto out;
-
- buf = area->vm_buf;
- buf = buf + ZS_HANDLE_SIZE;
- size -= ZS_HANDLE_SIZE;
- off += ZS_HANDLE_SIZE;
-
- sizes[0] = PAGE_SIZE - off;
- sizes[1] = size - sizes[0];
-
- /* copy per-cpu buffer to object */
- memcpy_to_page(zpdesc_page(zpdescs[0]), off, buf, sizes[0]);
- memcpy_to_page(zpdesc_page(zpdescs[1]), 0, buf + sizes[0], sizes[1]);
-
-out:
- /* enable page faults to match kunmap_local() return conditions */
- pagefault_enable();
-}
-
-static int zs_cpu_prepare(unsigned int cpu)
-{
- struct mapping_area *area;
-
- area = &per_cpu(zs_map_area, cpu);
- return __zs_cpu_up(area);
-}
-
-static int zs_cpu_dead(unsigned int cpu)
-{
- struct mapping_area *area;
-
- area = &per_cpu(zs_map_area, cpu);
- __zs_cpu_down(area);
- return 0;
-}
-
static bool can_merge(struct size_class *prev, int pages_per_zspage,
int objs_per_zspage)
{
@@ -1170,92 +1145,64 @@ unsigned long zs_get_total_pages(struct zs_pool *pool)
}
EXPORT_SYMBOL_GPL(zs_get_total_pages);
-/**
- * zs_map_object - get address of allocated object from handle.
- * @pool: pool from which the object was allocated
- * @handle: handle returned from zs_malloc
- * @mm: mapping mode to use
- *
- * Before using an object allocated from zs_malloc, it must be mapped using
- * this function. When done with the object, it must be unmapped using
- * zs_unmap_object.
- *
- * Only one object can be mapped per cpu at a time. There is no protection
- * against nested mappings.
- *
- * This function returns with preemption and page faults disabled.
- */
-void *zs_map_object(struct zs_pool *pool, unsigned long handle,
- enum zs_mapmode mm)
+void *zs_obj_read_begin(struct zs_pool *pool, unsigned long handle,
+ void *local_copy)
{
struct zspage *zspage;
struct zpdesc *zpdesc;
unsigned long obj, off;
unsigned int obj_idx;
-
struct size_class *class;
- struct mapping_area *area;
- struct zpdesc *zpdescs[2];
- void *ret;
+ void *addr;
- /*
- * Because we use per-cpu mapping areas shared among the
- * pools/users, we can't allow mapping in interrupt context
- * because it can corrupt another users mappings.
- */
- BUG_ON(in_interrupt());
-
- /* It guarantees it can get zspage from handle safely */
- read_lock(&pool->migrate_lock);
+ /* Guarantee we can get zspage from handle safely */
+ read_lock(&pool->lock);
obj = handle_to_obj(handle);
obj_to_location(obj, &zpdesc, &obj_idx);
zspage = get_zspage(zpdesc);
- /*
- * migration cannot move any zpages in this zspage. Here, class->lock
- * is too heavy since callers would take some time until they calls
- * zs_unmap_object API so delegate the locking from class to zspage
- * which is smaller granularity.
- */
- migrate_read_lock(zspage);
- read_unlock(&pool->migrate_lock);
+ /* Make sure migration doesn't move any pages in this zspage */
+ zspage_read_lock(zspage);
+ read_unlock(&pool->lock);
class = zspage_class(pool, zspage);
off = offset_in_page(class->size * obj_idx);
- local_lock(&zs_map_area.lock);
- area = this_cpu_ptr(&zs_map_area);
- area->vm_mm = mm;
if (off + class->size <= PAGE_SIZE) {
/* this object is contained entirely within a page */
- area->vm_addr = kmap_local_zpdesc(zpdesc);
- ret = area->vm_addr + off;
- goto out;
+ addr = kmap_local_zpdesc(zpdesc);
+ addr += off;
+ } else {
+ size_t sizes[2];
+
+ /* this object spans two pages */
+ sizes[0] = PAGE_SIZE - off;
+ sizes[1] = class->size - sizes[0];
+ addr = local_copy;
+
+ memcpy_from_page(addr, zpdesc_page(zpdesc),
+ off, sizes[0]);
+ zpdesc = get_next_zpdesc(zpdesc);
+ memcpy_from_page(addr + sizes[0],
+ zpdesc_page(zpdesc),
+ 0, sizes[1]);
}
- /* this object spans two pages */
- zpdescs[0] = zpdesc;
- zpdescs[1] = get_next_zpdesc(zpdesc);
- BUG_ON(!zpdescs[1]);
-
- ret = __zs_map_object(area, zpdescs, off, class->size);
-out:
- if (likely(!ZsHugePage(zspage)))
- ret += ZS_HANDLE_SIZE;
+ if (!ZsHugePage(zspage))
+ addr += ZS_HANDLE_SIZE;
- return ret;
+ return addr;
}
-EXPORT_SYMBOL_GPL(zs_map_object);
+EXPORT_SYMBOL_GPL(zs_obj_read_begin);
-void zs_unmap_object(struct zs_pool *pool, unsigned long handle)
+void zs_obj_read_end(struct zs_pool *pool, unsigned long handle,
+ void *handle_mem)
{
struct zspage *zspage;
struct zpdesc *zpdesc;
unsigned long obj, off;
unsigned int obj_idx;
-
struct size_class *class;
- struct mapping_area *area;
obj = handle_to_obj(handle);
obj_to_location(obj, &zpdesc, &obj_idx);
@@ -1263,23 +1210,65 @@ void zs_unmap_object(struct zs_pool *pool, unsigned long handle)
class = zspage_class(pool, zspage);
off = offset_in_page(class->size * obj_idx);
- area = this_cpu_ptr(&zs_map_area);
- if (off + class->size <= PAGE_SIZE)
- kunmap_local(area->vm_addr);
- else {
- struct zpdesc *zpdescs[2];
+ if (off + class->size <= PAGE_SIZE) {
+ if (!ZsHugePage(zspage))
+ off += ZS_HANDLE_SIZE;
+ handle_mem -= off;
+ kunmap_local(handle_mem);
+ }
+
+ zspage_read_unlock(zspage);
+}
+EXPORT_SYMBOL_GPL(zs_obj_read_end);
+
+void zs_obj_write(struct zs_pool *pool, unsigned long handle,
+ void *handle_mem, size_t mem_len)
+{
+ struct zspage *zspage;
+ struct zpdesc *zpdesc;
+ unsigned long obj, off;
+ unsigned int obj_idx;
+ struct size_class *class;
- zpdescs[0] = zpdesc;
- zpdescs[1] = get_next_zpdesc(zpdesc);
- BUG_ON(!zpdescs[1]);
+ /* Guarantee we can get zspage from handle safely */
+ read_lock(&pool->lock);
+ obj = handle_to_obj(handle);
+ obj_to_location(obj, &zpdesc, &obj_idx);
+ zspage = get_zspage(zpdesc);
- __zs_unmap_object(area, zpdescs, off, class->size);
+ /* Make sure migration doesn't move any pages in this zspage */
+ zspage_read_lock(zspage);
+ read_unlock(&pool->lock);
+
+ class = zspage_class(pool, zspage);
+ off = offset_in_page(class->size * obj_idx);
+
+ if (off + class->size <= PAGE_SIZE) {
+ /* this object is contained entirely within a page */
+ void *dst = kmap_local_zpdesc(zpdesc);
+
+ if (!ZsHugePage(zspage))
+ off += ZS_HANDLE_SIZE;
+ memcpy(dst + off, handle_mem, mem_len);
+ kunmap_local(dst);
+ } else {
+ /* this object spans two pages */
+ size_t sizes[2];
+
+ off += ZS_HANDLE_SIZE;
+ sizes[0] = PAGE_SIZE - off;
+ sizes[1] = mem_len - sizes[0];
+
+ memcpy_to_page(zpdesc_page(zpdesc), off,
+ handle_mem, sizes[0]);
+ zpdesc = get_next_zpdesc(zpdesc);
+ memcpy_to_page(zpdesc_page(zpdesc), 0,
+ handle_mem + sizes[0], sizes[1]);
}
- local_unlock(&zs_map_area.lock);
- migrate_read_unlock(zspage);
+ zspage_read_unlock(zspage);
}
-EXPORT_SYMBOL_GPL(zs_unmap_object);
+EXPORT_SYMBOL_GPL(zs_obj_write);
/**
* zs_huge_class_size() - Returns the size (in bytes) of the first huge
@@ -1450,16 +1439,16 @@ void zs_free(struct zs_pool *pool, unsigned long handle)
return;
/*
- * The pool->migrate_lock protects the race with zpage's migration
+ * The pool->lock protects the race with zpage's migration
* so it's safe to get the page from handle.
*/
- read_lock(&pool->migrate_lock);
+ read_lock(&pool->lock);
obj = handle_to_obj(handle);
obj_to_zpdesc(obj, &f_zpdesc);
zspage = get_zspage(f_zpdesc);
class = zspage_class(pool, zspage);
spin_lock(&class->lock);
- read_unlock(&pool->migrate_lock);
+ read_unlock(&pool->lock);
class_stat_sub(class, ZS_OBJS_INUSE, 1);
obj_free(class->size, obj);
@@ -1671,18 +1660,18 @@ static void lock_zspage(struct zspage *zspage)
/*
* Pages we haven't locked yet can be migrated off the list while we're
* trying to lock them, so we need to be careful and only attempt to
- * lock each page under migrate_read_lock(). Otherwise, the page we lock
+ * lock each page under zspage_read_lock(). Otherwise, the page we lock
* may no longer belong to the zspage. This means that we may wait for
* the wrong page to unlock, so we must take a reference to the page
- * prior to waiting for it to unlock outside migrate_read_lock().
+ * prior to waiting for it to unlock outside zspage_read_lock().
*/
while (1) {
- migrate_read_lock(zspage);
+ zspage_read_lock(zspage);
zpdesc = get_first_zpdesc(zspage);
if (zpdesc_trylock(zpdesc))
break;
zpdesc_get(zpdesc);
- migrate_read_unlock(zspage);
+ zspage_read_unlock(zspage);
zpdesc_wait_locked(zpdesc);
zpdesc_put(zpdesc);
}
@@ -1693,41 +1682,16 @@ static void lock_zspage(struct zspage *zspage)
curr_zpdesc = zpdesc;
} else {
zpdesc_get(zpdesc);
- migrate_read_unlock(zspage);
+ zspage_read_unlock(zspage);
zpdesc_wait_locked(zpdesc);
zpdesc_put(zpdesc);
- migrate_read_lock(zspage);
+ zspage_read_lock(zspage);
}
}
- migrate_read_unlock(zspage);
+ zspage_read_unlock(zspage);
}
#endif /* CONFIG_COMPACTION */
-static void migrate_lock_init(struct zspage *zspage)
-{
- rwlock_init(&zspage->lock);
-}
-
-static void migrate_read_lock(struct zspage *zspage) __acquires(&zspage->lock)
-{
- read_lock(&zspage->lock);
-}
-
-static void migrate_read_unlock(struct zspage *zspage) __releases(&zspage->lock)
-{
- read_unlock(&zspage->lock);
-}
-
-static void migrate_write_lock(struct zspage *zspage)
-{
- write_lock(&zspage->lock);
-}
-
-static void migrate_write_unlock(struct zspage *zspage)
-{
- write_unlock(&zspage->lock);
-}
-
#ifdef CONFIG_COMPACTION
static const struct movable_operations zsmalloc_mops;
@@ -1785,9 +1749,6 @@ static int zs_page_migrate(struct page *newpage, struct page *page,
VM_BUG_ON_PAGE(!zpdesc_is_isolated(zpdesc), zpdesc_page(zpdesc));
- /* We're committed, tell the world that this is a Zsmalloc page. */
- __zpdesc_set_zsmalloc(newzpdesc);
-
/* The page is locked, so this pointer must remain valid */
zspage = get_zspage(zpdesc);
pool = zspage->pool;
@@ -1796,15 +1757,22 @@ static int zs_page_migrate(struct page *newpage, struct page *page,
* The pool migrate_lock protects the race between zpage migration
* and zs_free.
*/
- write_lock(&pool->migrate_lock);
+ write_lock(&pool->lock);
class = zspage_class(pool, zspage);
/*
* the class lock protects zpage alloc/free in the zspage.
*/
spin_lock(&class->lock);
- /* the migrate_write_lock protects zpage access via zs_map_object */
- migrate_write_lock(zspage);
+ /* the zspage write_lock protects zpage access via zs_obj_read/write() */
+ if (!zspage_write_trylock(zspage)) {
+ spin_unlock(&class->lock);
+ write_unlock(&pool->lock);
+ return -EINVAL;
+ }
+
+ /* We're committed, tell the world that this is a Zsmalloc page. */
+ __zpdesc_set_zsmalloc(newzpdesc);
offset = get_first_obj_offset(zpdesc);
s_addr = kmap_local_zpdesc(zpdesc);
@@ -1833,9 +1801,9 @@ static int zs_page_migrate(struct page *newpage, struct page *page,
* Since we complete the data copy and set up new zspage structure,
* it's okay to release migration_lock.
*/
- write_unlock(&pool->migrate_lock);
+ write_unlock(&pool->lock);
spin_unlock(&class->lock);
- migrate_write_unlock(zspage);
+ zspage_write_unlock(zspage);
zpdesc_get(newzpdesc);
if (zpdesc_zone(newzpdesc) != zpdesc_zone(zpdesc)) {
@@ -1956,7 +1924,7 @@ static unsigned long __zs_compact(struct zs_pool *pool,
* protect the race between zpage migration and zs_free
* as well as zpage allocation/free
*/
- write_lock(&pool->migrate_lock);
+ write_lock(&pool->lock);
spin_lock(&class->lock);
while (zs_can_compact(class)) {
int fg;
@@ -1971,9 +1939,11 @@ static unsigned long __zs_compact(struct zs_pool *pool,
if (!src_zspage)
break;
- migrate_write_lock(src_zspage);
+ if (!zspage_write_trylock(src_zspage))
+ break;
+
migrate_zspage(pool, src_zspage, dst_zspage);
- migrate_write_unlock(src_zspage);
+ zspage_write_unlock(src_zspage);
fg = putback_zspage(class, src_zspage);
if (fg == ZS_INUSE_RATIO_0) {
@@ -1983,14 +1953,14 @@ static unsigned long __zs_compact(struct zs_pool *pool,
src_zspage = NULL;
if (get_fullness_group(class, dst_zspage) == ZS_INUSE_RATIO_100
- || rwlock_is_contended(&pool->migrate_lock)) {
+ || rwlock_is_contended(&pool->lock)) {
putback_zspage(class, dst_zspage);
dst_zspage = NULL;
spin_unlock(&class->lock);
- write_unlock(&pool->migrate_lock);
+ write_unlock(&pool->lock);
cond_resched();
- write_lock(&pool->migrate_lock);
+ write_lock(&pool->lock);
spin_lock(&class->lock);
}
}
@@ -2002,7 +1972,7 @@ static unsigned long __zs_compact(struct zs_pool *pool,
putback_zspage(class, dst_zspage);
spin_unlock(&class->lock);
- write_unlock(&pool->migrate_lock);
+ write_unlock(&pool->lock);
return pages_freed;
}
@@ -2014,10 +1984,10 @@ unsigned long zs_compact(struct zs_pool *pool)
unsigned long pages_freed = 0;
/*
- * Pool compaction is performed under pool->migrate_lock so it is basically
+ * Pool compaction is performed under pool->lock so it is basically
* single-threaded. Having more than one thread in __zs_compact()
- * will increase pool->migrate_lock contention, which will impact other
- * zsmalloc operations that need pool->migrate_lock.
+ * will increase pool->lock contention, which will impact other
+ * zsmalloc operations that need pool->lock.
*/
if (atomic_xchg(&pool->compaction_in_progress, 1))
return 0;
@@ -2139,7 +2109,7 @@ struct zs_pool *zs_create_pool(const char *name)
return NULL;
init_deferred_free(pool);
- rwlock_init(&pool->migrate_lock);
+ rwlock_init(&pool->lock);
atomic_set(&pool->compaction_in_progress, 0);
pool->name = kstrdup(name, GFP_KERNEL);
@@ -2278,23 +2248,11 @@ EXPORT_SYMBOL_GPL(zs_destroy_pool);
static int __init zs_init(void)
{
- int ret;
-
- ret = cpuhp_setup_state(CPUHP_MM_ZS_PREPARE, "mm/zsmalloc:prepare",
- zs_cpu_prepare, zs_cpu_dead);
- if (ret)
- goto out;
-
#ifdef CONFIG_ZPOOL
zpool_register_driver(&zs_zpool_driver);
#endif
-
zs_stat_init();
-
return 0;
-
-out:
- return ret;
}
static void __exit zs_exit(void)
@@ -2302,8 +2260,6 @@ static void __exit zs_exit(void)
#ifdef CONFIG_ZPOOL
zpool_unregister_driver(&zs_zpool_driver);
#endif
- cpuhp_remove_state(CPUHP_MM_ZS_PREPARE);
-
zs_stat_exit();
}
diff --git a/mm/zswap.c b/mm/zswap.c
index 23365e76a3ce..204fb59da33c 100644
--- a/mm/zswap.c
+++ b/mm/zswap.c
@@ -62,6 +62,8 @@ static u64 zswap_reject_reclaim_fail;
static u64 zswap_reject_compress_fail;
/* Compressed page was too big for the allocator to (optimally) store */
static u64 zswap_reject_compress_poor;
+/* Load or writeback failed due to decompression failure */
+static u64 zswap_decompress_fail;
/* Store failed because underlying allocator could not get memory */
static u64 zswap_reject_alloc_fail;
/* Store failed because the entry metadata could not be allocated (rare) */
@@ -881,18 +883,32 @@ static int zswap_cpu_comp_dead(unsigned int cpu, struct hlist_node *node)
{
struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node);
struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool->acomp_ctx, cpu);
+ struct acomp_req *req;
+ struct crypto_acomp *acomp;
+ u8 *buffer;
+
+ if (IS_ERR_OR_NULL(acomp_ctx))
+ return 0;
mutex_lock(&acomp_ctx->mutex);
- if (!IS_ERR_OR_NULL(acomp_ctx)) {
- if (!IS_ERR_OR_NULL(acomp_ctx->req))
- acomp_request_free(acomp_ctx->req);
- acomp_ctx->req = NULL;
- if (!IS_ERR_OR_NULL(acomp_ctx->acomp))
- crypto_free_acomp(acomp_ctx->acomp);
- kfree(acomp_ctx->buffer);
- }
+ req = acomp_ctx->req;
+ acomp = acomp_ctx->acomp;
+ buffer = acomp_ctx->buffer;
+ acomp_ctx->req = NULL;
+ acomp_ctx->acomp = NULL;
+ acomp_ctx->buffer = NULL;
mutex_unlock(&acomp_ctx->mutex);
+ /*
+ * Do the actual freeing after releasing the mutex to avoid subtle
+ * locking dependencies causing deadlocks.
+ */
+ if (!IS_ERR_OR_NULL(req))
+ acomp_request_free(req);
+ if (!IS_ERR_OR_NULL(acomp))
+ crypto_free_acomp(acomp);
+ kfree(buffer);
+
return 0;
}
@@ -930,7 +946,6 @@ static bool zswap_compress(struct page *page, struct zswap_entry *entry,
unsigned int dlen = PAGE_SIZE;
unsigned long handle;
struct zpool *zpool;
- char *buf;
gfp_t gfp;
u8 *dst;
@@ -965,17 +980,12 @@ static bool zswap_compress(struct page *page, struct zswap_entry *entry,
goto unlock;
zpool = pool->zpool;
- gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM;
- if (zpool_malloc_support_movable(zpool))
- gfp |= __GFP_HIGHMEM | __GFP_MOVABLE;
+ gfp = GFP_NOWAIT | __GFP_NORETRY | __GFP_HIGHMEM | __GFP_MOVABLE;
alloc_ret = zpool_malloc(zpool, dlen, gfp, &handle);
if (alloc_ret)
goto unlock;
- buf = zpool_map_handle(zpool, handle, ZPOOL_MM_WO);
- memcpy(buf, dst, dlen);
- zpool_unmap_handle(zpool, handle);
-
+ zpool_obj_write(zpool, handle, dst, dlen);
entry->handle = handle;
entry->length = dlen;
@@ -991,41 +1001,49 @@ unlock:
return comp_ret == 0 && alloc_ret == 0;
}
-static void zswap_decompress(struct zswap_entry *entry, struct folio *folio)
+static bool zswap_decompress(struct zswap_entry *entry, struct folio *folio)
{
struct zpool *zpool = entry->pool->zpool;
struct scatterlist input, output;
struct crypto_acomp_ctx *acomp_ctx;
- u8 *src;
+ int decomp_ret, dlen;
+ u8 *src, *obj;
acomp_ctx = acomp_ctx_get_cpu_lock(entry->pool);
- src = zpool_map_handle(zpool, entry->handle, ZPOOL_MM_RO);
+ obj = zpool_obj_read_begin(zpool, entry->handle, acomp_ctx->buffer);
+
/*
- * If zpool_map_handle is atomic, we cannot reliably utilize its mapped buffer
- * to do crypto_acomp_decompress() which might sleep. In such cases, we must
- * resort to copying the buffer to a temporary one.
- * Meanwhile, zpool_map_handle() might return a non-linearly mapped buffer,
- * such as a kmap address of high memory or even ever a vmap address.
- * However, sg_init_one is only equipped to handle linearly mapped low memory.
- * In such cases, we also must copy the buffer to a temporary and lowmem one.
+ * zpool_obj_read_begin() might return a kmap address of highmem when
+ * acomp_ctx->buffer is not used. However, sg_init_one() does not
+ * handle highmem addresses, so copy the object to acomp_ctx->buffer.
*/
- if ((acomp_ctx->is_sleepable && !zpool_can_sleep_mapped(zpool)) ||
- !virt_addr_valid(src)) {
- memcpy(acomp_ctx->buffer, src, entry->length);
+ if (virt_addr_valid(obj)) {
+ src = obj;
+ } else {
+ WARN_ON_ONCE(obj == acomp_ctx->buffer);
+ memcpy(acomp_ctx->buffer, obj, entry->length);
src = acomp_ctx->buffer;
- zpool_unmap_handle(zpool, entry->handle);
}
sg_init_one(&input, src, entry->length);
sg_init_table(&output, 1);
sg_set_folio(&output, folio, PAGE_SIZE, 0);
acomp_request_set_params(acomp_ctx->req, &input, &output, entry->length, PAGE_SIZE);
- BUG_ON(crypto_wait_req(crypto_acomp_decompress(acomp_ctx->req), &acomp_ctx->wait));
- BUG_ON(acomp_ctx->req->dlen != PAGE_SIZE);
+ decomp_ret = crypto_wait_req(crypto_acomp_decompress(acomp_ctx->req), &acomp_ctx->wait);
+ dlen = acomp_ctx->req->dlen;
- if (src != acomp_ctx->buffer)
- zpool_unmap_handle(zpool, entry->handle);
+ zpool_obj_read_end(zpool, entry->handle, obj);
acomp_ctx_put_unlock(acomp_ctx);
+
+ if (!decomp_ret && dlen == PAGE_SIZE)
+ return true;
+
+ zswap_decompress_fail++;
+ pr_alert_ratelimited("Decompression error from zswap (%d:%lu %s %u->%d)\n",
+ swp_type(entry->swpentry),
+ swp_offset(entry->swpentry),
+ entry->pool->tfm_name, entry->length, dlen);
+ return false;
}
/*********************************
@@ -1051,14 +1069,21 @@ static int zswap_writeback_entry(struct zswap_entry *entry,
struct folio *folio;
struct mempolicy *mpol;
bool folio_was_allocated;
+ struct swap_info_struct *si;
struct writeback_control wbc = {
.sync_mode = WB_SYNC_NONE,
};
+ int ret = 0;
/* try to allocate swap cache folio */
+ si = get_swap_device(swpentry);
+ if (!si)
+ return -EEXIST;
+
mpol = get_task_policy(current);
folio = __read_swap_cache_async(swpentry, GFP_KERNEL, mpol,
- NO_INTERLEAVE_INDEX, &folio_was_allocated, true);
+ NO_INTERLEAVE_INDEX, &folio_was_allocated, true);
+ put_swap_device(si);
if (!folio)
return -ENOMEM;
@@ -1070,8 +1095,8 @@ static int zswap_writeback_entry(struct zswap_entry *entry,
* and freed when invalidated by the concurrent shrinker anyway.
*/
if (!folio_was_allocated) {
- folio_put(folio);
- return -EEXIST;
+ ret = -EEXIST;
+ goto out;
}
/*
@@ -1084,14 +1109,17 @@ static int zswap_writeback_entry(struct zswap_entry *entry,
* be dereferenced.
*/
tree = swap_zswap_tree(swpentry);
- if (entry != xa_cmpxchg(tree, offset, entry, NULL, GFP_KERNEL)) {
- delete_from_swap_cache(folio);
- folio_unlock(folio);
- folio_put(folio);
- return -ENOMEM;
+ if (entry != xa_load(tree, offset)) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ if (!zswap_decompress(entry, folio)) {
+ ret = -EIO;
+ goto out;
}
- zswap_decompress(entry, folio);
+ xa_erase(tree, offset);
count_vm_event(ZSWPWB);
if (entry->objcg)
@@ -1107,9 +1135,14 @@ static int zswap_writeback_entry(struct zswap_entry *entry,
/* start writeback */
__swap_writepage(folio, &wbc);
- folio_put(folio);
- return 0;
+out:
+ if (ret && ret != -EEXIST) {
+ delete_from_swap_cache(folio);
+ folio_unlock(folio);
+ }
+ folio_put(folio);
+ return ret;
}
/*********************************
@@ -1609,7 +1642,27 @@ check_old:
return ret;
}
-bool zswap_load(struct folio *folio)
+/**
+ * zswap_load() - load a folio from zswap
+ * @folio: folio to load
+ *
+ * Return: 0 on success, with the folio unlocked and marked up-to-date, or one
+ * of the following error codes:
+ *
+ * -EIO: if the swapped out content was in zswap, but could not be loaded
+ * into the page due to a decompression failure. The folio is unlocked, but
+ * NOT marked up-to-date, so that an IO error is emitted (e.g. do_swap_page()
+ * will SIGBUS).
+ *
+ * -EINVAL: if the swapped out content was in zswap, but the page belongs
+ * to a large folio, which is not supported by zswap. The folio is unlocked,
+ * but NOT marked up-to-date, so that an IO error is emitted (e.g.
+ * do_swap_page() will SIGBUS).
+ *
+ * -ENOENT: if the swapped out content was not in zswap. The folio remains
+ * locked on return.
+ */
+int zswap_load(struct folio *folio)
{
swp_entry_t swp = folio->swap;
pgoff_t offset = swp_offset(swp);
@@ -1620,18 +1673,32 @@ bool zswap_load(struct folio *folio)
VM_WARN_ON_ONCE(!folio_test_locked(folio));
if (zswap_never_enabled())
- return false;
+ return -ENOENT;
/*
* Large folios should not be swapped in while zswap is being used, as
* they are not properly handled. Zswap does not properly load large
* folios, and a large folio may only be partially in zswap.
- *
- * Return true without marking the folio uptodate so that an IO error is
- * emitted (e.g. do_swap_page() will sigbus).
*/
- if (WARN_ON_ONCE(folio_test_large(folio)))
- return true;
+ if (WARN_ON_ONCE(folio_test_large(folio))) {
+ folio_unlock(folio);
+ return -EINVAL;
+ }
+
+ entry = xa_load(tree, offset);
+ if (!entry)
+ return -ENOENT;
+
+ if (!zswap_decompress(entry, folio)) {
+ folio_unlock(folio);
+ return -EIO;
+ }
+
+ folio_mark_uptodate(folio);
+
+ count_vm_event(ZSWPIN);
+ if (entry->objcg)
+ count_objcg_events(entry->objcg, ZSWPIN, 1);
/*
* When reading into the swapcache, invalidate our entry. The
@@ -1645,27 +1712,14 @@ bool zswap_load(struct folio *folio)
* files, which reads into a private page and may free it if
* the fault fails. We remain the primary owner of the entry.)
*/
- if (swapcache)
- entry = xa_erase(tree, offset);
- else
- entry = xa_load(tree, offset);
-
- if (!entry)
- return false;
-
- zswap_decompress(entry, folio);
-
- count_vm_event(ZSWPIN);
- if (entry->objcg)
- count_objcg_events(entry->objcg, ZSWPIN, 1);
-
if (swapcache) {
- zswap_entry_free(entry);
folio_mark_dirty(folio);
+ xa_erase(tree, offset);
+ zswap_entry_free(entry);
}
- folio_mark_uptodate(folio);
- return true;
+ folio_unlock(folio);
+ return 0;
}
void zswap_invalidate(swp_entry_t swp)
@@ -1760,6 +1814,8 @@ static int zswap_debugfs_init(void)
zswap_debugfs_root, &zswap_reject_compress_fail);
debugfs_create_u64("reject_compress_poor", 0444,
zswap_debugfs_root, &zswap_reject_compress_poor);
+ debugfs_create_u64("decompress_fail", 0444,
+ zswap_debugfs_root, &zswap_decompress_fail);
debugfs_create_u64("written_back_pages", 0444,
zswap_debugfs_root, &zswap_written_back_pages);
debugfs_create_file("pool_total_size", 0444,
diff --git a/net/9p/client.c b/net/9p/client.c
index 09f8ced9f8bb..61461b9fa134 100644
--- a/net/9p/client.c
+++ b/net/9p/client.c
@@ -1548,7 +1548,8 @@ p9_client_read_once(struct p9_fid *fid, u64 offset, struct iov_iter *to,
struct p9_client *clnt = fid->clnt;
struct p9_req_t *req;
int count = iov_iter_count(to);
- int rsize, received, non_zc = 0;
+ u32 rsize, received;
+ bool non_zc = false;
char *dataptr;
*err = 0;
@@ -1571,7 +1572,7 @@ p9_client_read_once(struct p9_fid *fid, u64 offset, struct iov_iter *to,
0, 11, "dqd", fid->fid,
offset, rsize);
} else {
- non_zc = 1;
+ non_zc = true;
req = p9_client_rpc(clnt, P9_TREAD, "dqd", fid->fid, offset,
rsize);
}
@@ -1592,11 +1593,13 @@ p9_client_read_once(struct p9_fid *fid, u64 offset, struct iov_iter *to,
return 0;
}
if (rsize < received) {
- pr_err("bogus RREAD count (%d > %d)\n", received, rsize);
- received = rsize;
+ pr_err("bogus RREAD count (%u > %u)\n", received, rsize);
+ *err = -EIO;
+ p9_req_put(clnt, req);
+ return 0;
}
- p9_debug(P9_DEBUG_9P, "<<< RREAD count %d\n", received);
+ p9_debug(P9_DEBUG_9P, "<<< RREAD count %u\n", received);
if (non_zc) {
int n = copy_to_iter(dataptr, received, to);
@@ -1623,9 +1626,9 @@ p9_client_write(struct p9_fid *fid, u64 offset, struct iov_iter *from, int *err)
*err = 0;
while (iov_iter_count(from)) {
- int count = iov_iter_count(from);
- int rsize = fid->iounit;
- int written;
+ size_t count = iov_iter_count(from);
+ u32 rsize = fid->iounit;
+ u32 written;
if (!rsize || rsize > clnt->msize - P9_IOHDRSZ)
rsize = clnt->msize - P9_IOHDRSZ;
@@ -1633,7 +1636,7 @@ p9_client_write(struct p9_fid *fid, u64 offset, struct iov_iter *from, int *err)
if (count < rsize)
rsize = count;
- p9_debug(P9_DEBUG_9P, ">>> TWRITE fid %d offset %llu count %d (/%d)\n",
+ p9_debug(P9_DEBUG_9P, ">>> TWRITE fid %d offset %llu count %u (/%zu)\n",
fid->fid, offset, rsize, count);
/* Don't bother zerocopy for small IO (< 1024) */
@@ -1659,11 +1662,14 @@ p9_client_write(struct p9_fid *fid, u64 offset, struct iov_iter *from, int *err)
break;
}
if (rsize < written) {
- pr_err("bogus RWRITE count (%d > %d)\n", written, rsize);
- written = rsize;
+ pr_err("bogus RWRITE count (%u > %u)\n", written, rsize);
+ *err = -EIO;
+ iov_iter_revert(from, count - iov_iter_count(from));
+ p9_req_put(clnt, req);
+ break;
}
- p9_debug(P9_DEBUG_9P, "<<< RWRITE count %d\n", written);
+ p9_debug(P9_DEBUG_9P, "<<< RWRITE count %u\n", written);
p9_req_put(clnt, req);
iov_iter_revert(from, count - written - iov_iter_count(from));
@@ -1712,7 +1718,7 @@ p9_client_write_subreq(struct netfs_io_subrequest *subreq)
if (written > len) {
pr_err("bogus RWRITE count (%d > %u)\n", written, len);
- written = len;
+ written = -EIO;
}
p9_debug(P9_DEBUG_9P, "<<< RWRITE count %d\n", len);
@@ -2098,7 +2104,8 @@ EXPORT_SYMBOL_GPL(p9_client_xattrcreate);
int p9_client_readdir(struct p9_fid *fid, char *data, u32 count, u64 offset)
{
- int err, rsize, non_zc = 0;
+ int err, non_zc = 0;
+ u32 rsize;
struct p9_client *clnt;
struct p9_req_t *req;
char *dataptr;
@@ -2107,7 +2114,7 @@ int p9_client_readdir(struct p9_fid *fid, char *data, u32 count, u64 offset)
iov_iter_kvec(&to, ITER_DEST, &kv, 1, count);
- p9_debug(P9_DEBUG_9P, ">>> TREADDIR fid %d offset %llu count %d\n",
+ p9_debug(P9_DEBUG_9P, ">>> TREADDIR fid %d offset %llu count %u\n",
fid->fid, offset, count);
clnt = fid->clnt;
@@ -2142,11 +2149,12 @@ int p9_client_readdir(struct p9_fid *fid, char *data, u32 count, u64 offset)
goto free_and_error;
}
if (rsize < count) {
- pr_err("bogus RREADDIR count (%d > %d)\n", count, rsize);
- count = rsize;
+ pr_err("bogus RREADDIR count (%u > %u)\n", count, rsize);
+ err = -EIO;
+ goto free_and_error;
}
- p9_debug(P9_DEBUG_9P, "<<< RREADDIR count %d\n", count);
+ p9_debug(P9_DEBUG_9P, "<<< RREADDIR count %u\n", count);
if (non_zc)
memmove(data, dataptr, count);
diff --git a/net/9p/error.c b/net/9p/error.c
index 8da744494b68..8ba8afc91482 100644
--- a/net/9p/error.c
+++ b/net/9p/error.c
@@ -16,6 +16,7 @@
#include <linux/list.h>
#include <linux/jhash.h>
#include <linux/errno.h>
+#include <linux/hashtable.h>
#include <net/9p/9p.h>
/**
@@ -33,8 +34,8 @@ struct errormap {
struct hlist_node list;
};
-#define ERRHASHSZ 32
-static struct hlist_head hash_errmap[ERRHASHSZ];
+#define ERRHASH_BITS 5
+static DEFINE_HASHTABLE(hash_errmap, ERRHASH_BITS);
/* FixMe - reduce to a reasonable size */
static struct errormap errmap[] = {
@@ -176,18 +177,14 @@ static struct errormap errmap[] = {
int p9_error_init(void)
{
struct errormap *c;
- int bucket;
-
- /* initialize hash table */
- for (bucket = 0; bucket < ERRHASHSZ; bucket++)
- INIT_HLIST_HEAD(&hash_errmap[bucket]);
+ u32 hash;
/* load initial error map into hash table */
for (c = errmap; c->name; c++) {
c->namelen = strlen(c->name);
- bucket = jhash(c->name, c->namelen, 0) % ERRHASHSZ;
+ hash = jhash(c->name, c->namelen, 0);
INIT_HLIST_NODE(&c->list);
- hlist_add_head(&c->list, &hash_errmap[bucket]);
+ hash_add(hash_errmap, &c->list, hash);
}
return 1;
@@ -205,12 +202,12 @@ int p9_errstr2errno(char *errstr, int len)
{
int errno;
struct errormap *c;
- int bucket;
+ u32 hash;
errno = 0;
c = NULL;
- bucket = jhash(errstr, len, 0) % ERRHASHSZ;
- hlist_for_each_entry(c, &hash_errmap[bucket], list) {
+ hash = jhash(errstr, len, 0);
+ hash_for_each_possible(hash_errmap, c, list, hash) {
if (c->namelen == len && !memcmp(c->name, errstr, len)) {
errno = c->val;
break;
diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
index 196060dc6138..339ec4e54778 100644
--- a/net/9p/trans_fd.c
+++ b/net/9p/trans_fd.c
@@ -11,6 +11,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/in.h>
+#include <linux/in6.h>
#include <linux/module.h>
#include <linux/net.h>
#include <linux/ipv6.h>
@@ -191,12 +192,13 @@ static void p9_conn_cancel(struct p9_conn *m, int err)
spin_lock(&m->req_lock);
- if (m->err) {
+ if (READ_ONCE(m->err)) {
spin_unlock(&m->req_lock);
return;
}
- m->err = err;
+ WRITE_ONCE(m->err, err);
+ ASSERT_EXCLUSIVE_WRITER(m->err);
list_for_each_entry_safe(req, rtmp, &m->req_list, req_list) {
list_move(&req->req_list, &cancel_list);
@@ -283,7 +285,7 @@ static void p9_read_work(struct work_struct *work)
m = container_of(work, struct p9_conn, rq);
- if (m->err < 0)
+ if (READ_ONCE(m->err) < 0)
return;
p9_debug(P9_DEBUG_TRANS, "start mux %p pos %zd\n", m, m->rc.offset);
@@ -450,7 +452,7 @@ static void p9_write_work(struct work_struct *work)
m = container_of(work, struct p9_conn, wq);
- if (m->err < 0) {
+ if (READ_ONCE(m->err) < 0) {
clear_bit(Wworksched, &m->wsched);
return;
}
@@ -622,7 +624,7 @@ static void p9_poll_mux(struct p9_conn *m)
__poll_t n;
int err = -ECONNRESET;
- if (m->err < 0)
+ if (READ_ONCE(m->err) < 0)
return;
n = p9_fd_poll(m->client, NULL, &err);
@@ -665,6 +667,7 @@ static void p9_poll_mux(struct p9_conn *m)
static int p9_fd_request(struct p9_client *client, struct p9_req_t *req)
{
__poll_t n;
+ int err;
struct p9_trans_fd *ts = client->trans;
struct p9_conn *m = &ts->conn;
@@ -673,9 +676,10 @@ static int p9_fd_request(struct p9_client *client, struct p9_req_t *req)
spin_lock(&m->req_lock);
- if (m->err < 0) {
+ err = READ_ONCE(m->err);
+ if (err < 0) {
spin_unlock(&m->req_lock);
- return m->err;
+ return err;
}
WRITE_ONCE(req->status, REQ_STATUS_UNSENT);
@@ -954,64 +958,55 @@ static void p9_fd_close(struct p9_client *client)
kfree(ts);
}
-/*
- * stolen from NFS - maybe should be made a generic function?
- */
-static inline int valid_ipaddr4(const char *buf)
-{
- int rc, count, in[4];
-
- rc = sscanf(buf, "%d.%d.%d.%d", &in[0], &in[1], &in[2], &in[3]);
- if (rc != 4)
- return -EINVAL;
- for (count = 0; count < 4; count++) {
- if (in[count] > 255)
- return -EINVAL;
- }
- return 0;
-}
-
static int p9_bind_privport(struct socket *sock)
{
- struct sockaddr_in cl;
+ struct sockaddr_storage stor = { 0 };
int port, err = -EINVAL;
- memset(&cl, 0, sizeof(cl));
- cl.sin_family = AF_INET;
- cl.sin_addr.s_addr = htonl(INADDR_ANY);
+ stor.ss_family = sock->ops->family;
+ if (stor.ss_family == AF_INET)
+ ((struct sockaddr_in *)&stor)->sin_addr.s_addr = htonl(INADDR_ANY);
+ else
+ ((struct sockaddr_in6 *)&stor)->sin6_addr = in6addr_any;
for (port = p9_ipport_resv_max; port >= p9_ipport_resv_min; port--) {
- cl.sin_port = htons((ushort)port);
- err = kernel_bind(sock, (struct sockaddr *)&cl, sizeof(cl));
+ if (stor.ss_family == AF_INET)
+ ((struct sockaddr_in *)&stor)->sin_port = htons((ushort)port);
+ else
+ ((struct sockaddr_in6 *)&stor)->sin6_port = htons((ushort)port);
+ err = kernel_bind(sock, (struct sockaddr *)&stor, sizeof(stor));
if (err != -EADDRINUSE)
break;
}
return err;
}
-
static int
p9_fd_create_tcp(struct p9_client *client, const char *addr, char *args)
{
int err;
+ char port_str[6];
struct socket *csocket;
- struct sockaddr_in sin_server;
+ struct sockaddr_storage stor = { 0 };
struct p9_fd_opts opts;
err = parse_opts(args, &opts);
if (err < 0)
return err;
- if (addr == NULL || valid_ipaddr4(addr) < 0)
+ if (!addr)
return -EINVAL;
+ sprintf(port_str, "%u", opts.port);
+ err = inet_pton_with_scope(current->nsproxy->net_ns, AF_UNSPEC, addr,
+ port_str, &stor);
+ if (err < 0)
+ return err;
+
csocket = NULL;
client->trans_opts.tcp.port = opts.port;
client->trans_opts.tcp.privport = opts.privport;
- sin_server.sin_family = AF_INET;
- sin_server.sin_addr.s_addr = in_aton(addr);
- sin_server.sin_port = htons(opts.port);
- err = __sock_create(current->nsproxy->net_ns, PF_INET,
+ err = __sock_create(current->nsproxy->net_ns, stor.ss_family,
SOCK_STREAM, IPPROTO_TCP, &csocket, 1);
if (err) {
pr_err("%s (%d): problem creating socket\n",
@@ -1030,8 +1025,8 @@ p9_fd_create_tcp(struct p9_client *client, const char *addr, char *args)
}
err = READ_ONCE(csocket->ops)->connect(csocket,
- (struct sockaddr *)&sin_server,
- sizeof(struct sockaddr_in), 0);
+ (struct sockaddr *)&stor,
+ sizeof(stor), 0);
if (err < 0) {
pr_err("%s (%d): problem connecting socket to %s\n",
__func__, task_pid_nr(current), addr);
diff --git a/net/core/netdev-genl.c b/net/core/netdev-genl.c
index fd1cfa9707dc..3afeaa8c5dc5 100644
--- a/net/core/netdev-genl.c
+++ b/net/core/netdev-genl.c
@@ -951,12 +951,14 @@ void netdev_nl_sock_priv_destroy(struct netdev_nl_sock *priv)
{
struct net_devmem_dmabuf_binding *binding;
struct net_devmem_dmabuf_binding *temp;
+ struct net_device *dev;
mutex_lock(&priv->lock);
list_for_each_entry_safe(binding, temp, &priv->bindings, list) {
- netdev_lock(binding->dev);
+ dev = binding->dev;
+ netdev_lock(dev);
net_devmem_unbind_dmabuf(binding);
- netdev_unlock(binding->dev);
+ netdev_unlock(dev);
}
mutex_unlock(&priv->lock);
}
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 5a24a30dfc2d..334db17be37d 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -1177,6 +1177,9 @@ static inline int rtnl_vfinfo_size(const struct net_device *dev,
/* IFLA_VF_STATS_TX_DROPPED */
nla_total_size_64bit(sizeof(__u64)));
}
+ if (dev->netdev_ops->ndo_get_vf_guid)
+ size += num_vfs * 2 *
+ nla_total_size(sizeof(struct ifla_vf_guid));
return size;
} else
return 0;
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index ea8de00f669d..6edc441b3702 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1525,25 +1525,11 @@ void tcp_cleanup_rbuf(struct sock *sk, int copied)
__tcp_cleanup_rbuf(sk, copied);
}
-/* private version of sock_rfree() avoiding one atomic_sub() */
-void tcp_sock_rfree(struct sk_buff *skb)
-{
- struct sock *sk = skb->sk;
- unsigned int len = skb->truesize;
-
- sock_owned_by_me(sk);
- atomic_set(&sk->sk_rmem_alloc,
- atomic_read(&sk->sk_rmem_alloc) - len);
-
- sk_forward_alloc_add(sk, len);
- sk_mem_reclaim(sk);
-}
-
static void tcp_eat_recv_skb(struct sock *sk, struct sk_buff *skb)
{
__skb_unlink(skb, &sk->sk_receive_queue);
- if (likely(skb->destructor == tcp_sock_rfree)) {
- tcp_sock_rfree(skb);
+ if (likely(skb->destructor == sock_rfree)) {
+ sock_rfree(skb);
skb->destructor = NULL;
skb->sk = NULL;
return skb_attempt_defer_free(skb);
diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c
index ca40665145c6..1a6b1bc54245 100644
--- a/net/ipv4/tcp_fastopen.c
+++ b/net/ipv4/tcp_fastopen.c
@@ -189,7 +189,7 @@ void tcp_fastopen_add_skb(struct sock *sk, struct sk_buff *skb)
tcp_segs_in(tp, skb);
__skb_pull(skb, tcp_hdrlen(skb));
sk_forced_mem_schedule(sk, skb->truesize);
- tcp_skb_set_owner_r(skb, sk);
+ skb_set_owner_r(skb, sk);
TCP_SKB_CB(skb)->seq++;
TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_SYN;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index e1f952fbac48..a35018e2d0ba 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -5171,7 +5171,7 @@ end:
if (tcp_is_sack(tp))
tcp_grow_window(sk, skb, false);
skb_condense(skb);
- tcp_skb_set_owner_r(skb, sk);
+ skb_set_owner_r(skb, sk);
}
}
@@ -5187,7 +5187,7 @@ static int __must_check tcp_queue_rcv(struct sock *sk, struct sk_buff *skb,
tcp_rcv_nxt_update(tcp_sk(sk), TCP_SKB_CB(skb)->end_seq);
if (!eaten) {
tcp_add_receive_queue(sk, skb);
- tcp_skb_set_owner_r(skb, sk);
+ skb_set_owner_r(skb, sk);
}
return eaten;
}
@@ -5504,7 +5504,7 @@ skip_this:
__skb_queue_before(list, skb, nskb);
else
__skb_queue_tail(&tmp, nskb); /* defer rbtree insertion */
- tcp_skb_set_owner_r(nskb, sk);
+ skb_set_owner_r(nskb, sk);
mptcp_skb_ext_move(nskb, skb);
/* Copy data, releasing collapsed skbs. */
diff --git a/net/mpls/internal.h b/net/mpls/internal.h
index b9f492ddf93b..83c629529b57 100644
--- a/net/mpls/internal.h
+++ b/net/mpls/internal.h
@@ -33,7 +33,7 @@ struct mpls_dev {
#define MPLS_INC_STATS_LEN(mdev, len, pkts_field, bytes_field) \
do { \
- __typeof__(*(mdev)->stats) *ptr = \
+ TYPEOF_UNQUAL(*(mdev)->stats) *ptr = \
raw_cpu_ptr((mdev)->stats); \
local_bh_disable(); \
u64_stats_update_begin(&ptr->syncp); \
@@ -45,7 +45,7 @@ struct mpls_dev {
#define MPLS_INC_STATS(mdev, field) \
do { \
- __typeof__(*(mdev)->stats) *ptr = \
+ TYPEOF_UNQUAL(*(mdev)->stats) *ptr = \
raw_cpu_ptr((mdev)->stats); \
local_bh_disable(); \
u64_stats_update_begin(&ptr->syncp); \
diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
index efe8d86496db..409bd415ef1d 100644
--- a/net/mptcp/subflow.c
+++ b/net/mptcp/subflow.c
@@ -754,8 +754,6 @@ static bool subflow_hmac_valid(const struct request_sock *req,
subflow_req = mptcp_subflow_rsk(req);
msk = subflow_req->msk;
- if (!msk)
- return false;
subflow_generate_hmac(READ_ONCE(msk->remote_key),
READ_ONCE(msk->local_key),
@@ -850,12 +848,8 @@ static struct sock *subflow_syn_recv_sock(const struct sock *sk,
} else if (subflow_req->mp_join) {
mptcp_get_options(skb, &mp_opt);
- if (!(mp_opt.suboptions & OPTION_MPTCP_MPJ_ACK) ||
- !subflow_hmac_valid(req, &mp_opt) ||
- !mptcp_can_accept_new_subflow(subflow_req->msk)) {
- SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINACKMAC);
+ if (!(mp_opt.suboptions & OPTION_MPTCP_MPJ_ACK))
fallback = true;
- }
}
create_child:
@@ -905,6 +899,13 @@ create_child:
goto dispose_child;
}
+ if (!subflow_hmac_valid(req, &mp_opt) ||
+ !mptcp_can_accept_new_subflow(subflow_req->msk)) {
+ SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINACKMAC);
+ subflow_add_reset_reason(skb, MPTCP_RST_EPROHIBIT);
+ goto dispose_child;
+ }
+
/* move the msk reference ownership to the subflow */
subflow_req->msk = NULL;
ctx->conn = (struct sock *)owner;
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index defb05c1fba4..f74a097f54ae 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -1267,38 +1267,8 @@ static struct Qdisc *qdisc_create(struct net_device *dev,
struct qdisc_size_table *stab;
ops = qdisc_lookup_ops(kind);
-#ifdef CONFIG_MODULES
- if (ops == NULL && kind != NULL) {
- char name[IFNAMSIZ];
- if (nla_strscpy(name, kind, IFNAMSIZ) >= 0) {
- /* We dropped the RTNL semaphore in order to
- * perform the module load. So, even if we
- * succeeded in loading the module we have to
- * tell the caller to replay the request. We
- * indicate this using -EAGAIN.
- * We replay the request because the device may
- * go away in the mean time.
- */
- netdev_unlock_ops(dev);
- rtnl_unlock();
- request_module(NET_SCH_ALIAS_PREFIX "%s", name);
- rtnl_lock();
- netdev_lock_ops(dev);
- ops = qdisc_lookup_ops(kind);
- if (ops != NULL) {
- /* We will try again qdisc_lookup_ops,
- * so don't keep a reference.
- */
- module_put(ops->owner);
- err = -EAGAIN;
- goto err_out;
- }
- }
- }
-#endif
-
- err = -ENOENT;
if (!ops) {
+ err = -ENOENT;
NL_SET_ERR_MSG(extack, "Specified qdisc kind is unknown");
goto err_out;
}
@@ -1623,8 +1593,7 @@ static int __tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
struct netlink_ext_ack *extack,
struct net_device *dev,
struct nlattr *tca[TCA_MAX + 1],
- struct tcmsg *tcm,
- bool *replay)
+ struct tcmsg *tcm)
{
struct Qdisc *q = NULL;
struct Qdisc *p = NULL;
@@ -1789,13 +1758,8 @@ create_n_graft2:
tcm->tcm_parent, tcm->tcm_handle,
tca, &err, extack);
}
- if (q == NULL) {
- if (err == -EAGAIN) {
- *replay = true;
- return 0;
- }
+ if (!q)
return err;
- }
graft:
err = qdisc_graft(dev, p, skb, n, clid, q, NULL, extack);
@@ -1808,6 +1772,27 @@ graft:
return 0;
}
+static void request_qdisc_module(struct nlattr *kind)
+{
+ struct Qdisc_ops *ops;
+ char name[IFNAMSIZ];
+
+ if (!kind)
+ return;
+
+ ops = qdisc_lookup_ops(kind);
+ if (ops) {
+ module_put(ops->owner);
+ return;
+ }
+
+ if (nla_strscpy(name, kind, IFNAMSIZ) >= 0) {
+ rtnl_unlock();
+ request_module(NET_SCH_ALIAS_PREFIX "%s", name);
+ rtnl_lock();
+ }
+}
+
/*
* Create/change qdisc.
*/
@@ -1818,27 +1803,23 @@ static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
struct nlattr *tca[TCA_MAX + 1];
struct net_device *dev;
struct tcmsg *tcm;
- bool replay;
int err;
-replay:
- /* Reinit, just in case something touches this. */
err = nlmsg_parse_deprecated(n, sizeof(*tcm), tca, TCA_MAX,
rtm_tca_policy, extack);
if (err < 0)
return err;
+ request_qdisc_module(tca[TCA_KIND]);
+
tcm = nlmsg_data(n);
dev = __dev_get_by_index(net, tcm->tcm_ifindex);
if (!dev)
return -ENODEV;
- replay = false;
netdev_lock_ops(dev);
- err = __tc_modify_qdisc(skb, n, extack, dev, tca, tcm, &replay);
+ err = __tc_modify_qdisc(skb, n, extack, dev, tca, tcm);
netdev_unlock_ops(dev);
- if (replay)
- goto replay;
return err;
}
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index 2fe88ea79a70..6f75862d9782 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -270,9 +270,6 @@ static struct rpc_xprt *rpc_clnt_set_transport(struct rpc_clnt *clnt,
old = rcu_dereference_protected(clnt->cl_xprt,
lockdep_is_held(&clnt->cl_lock));
- if (!xprt_bound(xprt))
- clnt->cl_autobind = 1;
-
clnt->cl_timeout = timeout;
rcu_assign_pointer(clnt->cl_xprt, xprt);
spin_unlock(&clnt->cl_lock);
@@ -512,6 +509,8 @@ static struct rpc_clnt *rpc_create_xprt(struct rpc_create_args *args,
clnt->cl_discrtry = 1;
if (!(args->flags & RPC_CLNT_CREATE_QUIET))
clnt->cl_chatty = 1;
+ if (args->flags & RPC_CLNT_CREATE_NETUNREACH_FATAL)
+ clnt->cl_netunreach_fatal = 1;
return clnt;
}
@@ -662,6 +661,7 @@ static struct rpc_clnt *__rpc_clone_client(struct rpc_create_args *args,
new->cl_noretranstimeo = clnt->cl_noretranstimeo;
new->cl_discrtry = clnt->cl_discrtry;
new->cl_chatty = clnt->cl_chatty;
+ new->cl_netunreach_fatal = clnt->cl_netunreach_fatal;
new->cl_principal = clnt->cl_principal;
new->cl_max_connect = clnt->cl_max_connect;
return new;
@@ -1195,6 +1195,8 @@ void rpc_task_set_client(struct rpc_task *task, struct rpc_clnt *clnt)
task->tk_flags |= RPC_TASK_TIMEOUT;
if (clnt->cl_noretranstimeo)
task->tk_flags |= RPC_TASK_NO_RETRANS_TIMEOUT;
+ if (clnt->cl_netunreach_fatal)
+ task->tk_flags |= RPC_TASK_NETUNREACH_FATAL;
atomic_inc(&clnt->cl_task_count);
}
@@ -2102,14 +2104,17 @@ call_bind_status(struct rpc_task *task)
case -EPROTONOSUPPORT:
trace_rpcb_bind_version_err(task);
goto retry_timeout;
+ case -ENETDOWN:
+ case -ENETUNREACH:
+ if (task->tk_flags & RPC_TASK_NETUNREACH_FATAL)
+ break;
+ fallthrough;
case -ECONNREFUSED: /* connection problems */
case -ECONNRESET:
case -ECONNABORTED:
case -ENOTCONN:
case -EHOSTDOWN:
- case -ENETDOWN:
case -EHOSTUNREACH:
- case -ENETUNREACH:
case -EPIPE:
trace_rpcb_unreachable_err(task);
if (!RPC_IS_SOFTCONN(task)) {
@@ -2191,19 +2196,22 @@ call_connect_status(struct rpc_task *task)
task->tk_status = 0;
switch (status) {
+ case -ENETDOWN:
+ case -ENETUNREACH:
+ if (task->tk_flags & RPC_TASK_NETUNREACH_FATAL)
+ break;
+ fallthrough;
case -ECONNREFUSED:
case -ECONNRESET:
/* A positive refusal suggests a rebind is needed. */
- if (RPC_IS_SOFTCONN(task))
- break;
if (clnt->cl_autobind) {
rpc_force_rebind(clnt);
+ if (RPC_IS_SOFTCONN(task))
+ break;
goto out_retry;
}
fallthrough;
case -ECONNABORTED:
- case -ENETDOWN:
- case -ENETUNREACH:
case -EHOSTUNREACH:
case -EPIPE:
case -EPROTO:
@@ -2455,10 +2463,13 @@ call_status(struct rpc_task *task)
trace_rpc_call_status(task);
task->tk_status = 0;
switch(status) {
- case -EHOSTDOWN:
case -ENETDOWN:
- case -EHOSTUNREACH:
case -ENETUNREACH:
+ if (task->tk_flags & RPC_TASK_NETUNREACH_FATAL)
+ goto out_exit;
+ fallthrough;
+ case -EHOSTDOWN:
+ case -EHOSTUNREACH:
case -EPERM:
if (RPC_IS_SOFTCONN(task))
goto out_exit;
diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c
index 102c3818bc54..53bcca365fb1 100644
--- a/net/sunrpc/rpcb_clnt.c
+++ b/net/sunrpc/rpcb_clnt.c
@@ -820,9 +820,10 @@ static void rpcb_getport_done(struct rpc_task *child, void *data)
}
trace_rpcb_setport(child, map->r_status, map->r_port);
- xprt->ops->set_port(xprt, map->r_port);
- if (map->r_port)
+ if (map->r_port) {
+ xprt->ops->set_port(xprt, map->r_port);
xprt_set_bound(xprt);
+ }
}
/*
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index 9b45fbdc90ca..73bc39281ef5 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -276,6 +276,8 @@ EXPORT_SYMBOL_GPL(rpc_destroy_wait_queue);
static int rpc_wait_bit_killable(struct wait_bit_key *key, int mode)
{
+ if (unlikely(current->flags & PF_EXITING))
+ return -EINTR;
schedule();
if (signal_pending_state(mode, current))
return -ERESTARTSYS;
diff --git a/net/sunrpc/sysfs.c b/net/sunrpc/sysfs.c
index 5c8ecdaaa985..09434e1143c5 100644
--- a/net/sunrpc/sysfs.c
+++ b/net/sunrpc/sysfs.c
@@ -59,6 +59,16 @@ static struct kobject *rpc_sysfs_object_alloc(const char *name,
return NULL;
}
+static inline struct rpc_clnt *
+rpc_sysfs_client_kobj_get_clnt(struct kobject *kobj)
+{
+ struct rpc_sysfs_client *c = container_of(kobj,
+ struct rpc_sysfs_client, kobject);
+ struct rpc_clnt *ret = c->clnt;
+
+ return refcount_inc_not_zero(&ret->cl_count) ? ret : NULL;
+}
+
static inline struct rpc_xprt *
rpc_sysfs_xprt_kobj_get_xprt(struct kobject *kobj)
{
@@ -86,6 +96,51 @@ rpc_sysfs_xprt_switch_kobj_get_xprt(struct kobject *kobj)
return xprt_switch_get(x->xprt_switch);
}
+static ssize_t rpc_sysfs_clnt_version_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ struct rpc_clnt *clnt = rpc_sysfs_client_kobj_get_clnt(kobj);
+ ssize_t ret;
+
+ if (!clnt)
+ return sprintf(buf, "<closed>\n");
+
+ ret = sprintf(buf, "%u", clnt->cl_vers);
+ refcount_dec(&clnt->cl_count);
+ return ret;
+}
+
+static ssize_t rpc_sysfs_clnt_program_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ struct rpc_clnt *clnt = rpc_sysfs_client_kobj_get_clnt(kobj);
+ ssize_t ret;
+
+ if (!clnt)
+ return sprintf(buf, "<closed>\n");
+
+ ret = sprintf(buf, "%s", clnt->cl_program->name);
+ refcount_dec(&clnt->cl_count);
+ return ret;
+}
+
+static ssize_t rpc_sysfs_clnt_max_connect_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ struct rpc_clnt *clnt = rpc_sysfs_client_kobj_get_clnt(kobj);
+ ssize_t ret;
+
+ if (!clnt)
+ return sprintf(buf, "<closed>\n");
+
+ ret = sprintf(buf, "%u\n", clnt->cl_max_connect);
+ refcount_dec(&clnt->cl_count);
+ return ret;
+}
+
static ssize_t rpc_sysfs_xprt_dstaddr_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *buf)
@@ -129,6 +184,31 @@ static ssize_t rpc_sysfs_xprt_srcaddr_show(struct kobject *kobj,
return ret;
}
+static const char *xprtsec_strings[] = {
+ [RPC_XPRTSEC_NONE] = "none",
+ [RPC_XPRTSEC_TLS_ANON] = "tls-anon",
+ [RPC_XPRTSEC_TLS_X509] = "tls-x509",
+};
+
+static ssize_t rpc_sysfs_xprt_xprtsec_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ struct rpc_xprt *xprt = rpc_sysfs_xprt_kobj_get_xprt(kobj);
+ ssize_t ret;
+
+ if (!xprt) {
+ ret = sprintf(buf, "<closed>\n");
+ goto out;
+ }
+
+ ret = sprintf(buf, "%s\n", xprtsec_strings[xprt->xprtsec.policy]);
+ xprt_put(xprt);
+out:
+ return ret;
+
+}
+
static ssize_t rpc_sysfs_xprt_info_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
@@ -206,6 +286,14 @@ static ssize_t rpc_sysfs_xprt_state_show(struct kobject *kobj,
return ret;
}
+static ssize_t rpc_sysfs_xprt_del_xprt_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "# delete this xprt\n");
+}
+
+
static ssize_t rpc_sysfs_xprt_switch_info_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *buf)
@@ -225,6 +313,55 @@ static ssize_t rpc_sysfs_xprt_switch_info_show(struct kobject *kobj,
return ret;
}
+static ssize_t rpc_sysfs_xprt_switch_add_xprt_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "# add one xprt to this xprt_switch\n");
+}
+
+static ssize_t rpc_sysfs_xprt_switch_add_xprt_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct rpc_xprt_switch *xprt_switch =
+ rpc_sysfs_xprt_switch_kobj_get_xprt(kobj);
+ struct xprt_create xprt_create_args;
+ struct rpc_xprt *xprt, *new;
+
+ if (!xprt_switch)
+ return 0;
+
+ xprt = rpc_xprt_switch_get_main_xprt(xprt_switch);
+ if (!xprt)
+ goto out;
+
+ xprt_create_args.ident = xprt->xprt_class->ident;
+ xprt_create_args.net = xprt->xprt_net;
+ xprt_create_args.dstaddr = (struct sockaddr *)&xprt->addr;
+ xprt_create_args.addrlen = xprt->addrlen;
+ xprt_create_args.servername = xprt->servername;
+ xprt_create_args.bc_xprt = xprt->bc_xprt;
+ xprt_create_args.xprtsec = xprt->xprtsec;
+ xprt_create_args.connect_timeout = xprt->connect_timeout;
+ xprt_create_args.reconnect_timeout = xprt->max_reconnect_timeout;
+
+ new = xprt_create_transport(&xprt_create_args);
+ if (IS_ERR_OR_NULL(new)) {
+ count = PTR_ERR(new);
+ goto out_put_xprt;
+ }
+
+ rpc_xprt_switch_add_xprt(xprt_switch, new);
+ xprt_put(new);
+
+out_put_xprt:
+ xprt_put(xprt);
+out:
+ xprt_switch_put(xprt_switch);
+ return count;
+}
+
static ssize_t rpc_sysfs_xprt_dstaddr_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t count)
@@ -335,6 +472,40 @@ out_put:
return count;
}
+static ssize_t rpc_sysfs_xprt_del_xprt(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct rpc_xprt *xprt = rpc_sysfs_xprt_kobj_get_xprt(kobj);
+ struct rpc_xprt_switch *xps = rpc_sysfs_xprt_kobj_get_xprt_switch(kobj);
+
+ if (!xprt || !xps) {
+ count = 0;
+ goto out;
+ }
+
+ if (xprt->main) {
+ count = -EINVAL;
+ goto release_tasks;
+ }
+
+ if (wait_on_bit_lock(&xprt->state, XPRT_LOCKED, TASK_KILLABLE)) {
+ count = -EINTR;
+ goto out_put;
+ }
+
+ xprt_set_offline_locked(xprt, xps);
+ xprt_delete_locked(xprt, xps);
+
+release_tasks:
+ xprt_release_write(xprt, NULL);
+out_put:
+ xprt_put(xprt);
+ xprt_switch_put(xps);
+out:
+ return count;
+}
+
int rpc_sysfs_init(void)
{
rpc_sunrpc_kset = kset_create_and_add("sunrpc", NULL, kernel_kobj);
@@ -398,23 +569,48 @@ static const void *rpc_sysfs_xprt_namespace(const struct kobject *kobj)
kobject)->xprt->xprt_net;
}
+static struct kobj_attribute rpc_sysfs_clnt_version = __ATTR(rpc_version,
+ 0444, rpc_sysfs_clnt_version_show, NULL);
+
+static struct kobj_attribute rpc_sysfs_clnt_program = __ATTR(program,
+ 0444, rpc_sysfs_clnt_program_show, NULL);
+
+static struct kobj_attribute rpc_sysfs_clnt_max_connect = __ATTR(max_connect,
+ 0444, rpc_sysfs_clnt_max_connect_show, NULL);
+
+static struct attribute *rpc_sysfs_rpc_clnt_attrs[] = {
+ &rpc_sysfs_clnt_version.attr,
+ &rpc_sysfs_clnt_program.attr,
+ &rpc_sysfs_clnt_max_connect.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(rpc_sysfs_rpc_clnt);
+
static struct kobj_attribute rpc_sysfs_xprt_dstaddr = __ATTR(dstaddr,
0644, rpc_sysfs_xprt_dstaddr_show, rpc_sysfs_xprt_dstaddr_store);
static struct kobj_attribute rpc_sysfs_xprt_srcaddr = __ATTR(srcaddr,
0644, rpc_sysfs_xprt_srcaddr_show, NULL);
+static struct kobj_attribute rpc_sysfs_xprt_xprtsec = __ATTR(xprtsec,
+ 0644, rpc_sysfs_xprt_xprtsec_show, NULL);
+
static struct kobj_attribute rpc_sysfs_xprt_info = __ATTR(xprt_info,
0444, rpc_sysfs_xprt_info_show, NULL);
static struct kobj_attribute rpc_sysfs_xprt_change_state = __ATTR(xprt_state,
0644, rpc_sysfs_xprt_state_show, rpc_sysfs_xprt_state_change);
+static struct kobj_attribute rpc_sysfs_xprt_del = __ATTR(del_xprt,
+ 0644, rpc_sysfs_xprt_del_xprt_show, rpc_sysfs_xprt_del_xprt);
+
static struct attribute *rpc_sysfs_xprt_attrs[] = {
&rpc_sysfs_xprt_dstaddr.attr,
&rpc_sysfs_xprt_srcaddr.attr,
+ &rpc_sysfs_xprt_xprtsec.attr,
&rpc_sysfs_xprt_info.attr,
&rpc_sysfs_xprt_change_state.attr,
+ &rpc_sysfs_xprt_del.attr,
NULL,
};
ATTRIBUTE_GROUPS(rpc_sysfs_xprt);
@@ -422,14 +618,20 @@ ATTRIBUTE_GROUPS(rpc_sysfs_xprt);
static struct kobj_attribute rpc_sysfs_xprt_switch_info =
__ATTR(xprt_switch_info, 0444, rpc_sysfs_xprt_switch_info_show, NULL);
+static struct kobj_attribute rpc_sysfs_xprt_switch_add_xprt =
+ __ATTR(add_xprt, 0644, rpc_sysfs_xprt_switch_add_xprt_show,
+ rpc_sysfs_xprt_switch_add_xprt_store);
+
static struct attribute *rpc_sysfs_xprt_switch_attrs[] = {
&rpc_sysfs_xprt_switch_info.attr,
+ &rpc_sysfs_xprt_switch_add_xprt.attr,
NULL,
};
ATTRIBUTE_GROUPS(rpc_sysfs_xprt_switch);
static const struct kobj_type rpc_sysfs_client_type = {
.release = rpc_sysfs_client_release,
+ .default_groups = rpc_sysfs_rpc_clnt_groups,
.sysfs_ops = &kobj_sysfs_ops,
.namespace = rpc_sysfs_client_namespace,
};
diff --git a/net/sunrpc/xprtmultipath.c b/net/sunrpc/xprtmultipath.c
index 7e98d4dd9f10..4c5e08b0aa64 100644
--- a/net/sunrpc/xprtmultipath.c
+++ b/net/sunrpc/xprtmultipath.c
@@ -92,6 +92,27 @@ void rpc_xprt_switch_remove_xprt(struct rpc_xprt_switch *xps,
xprt_put(xprt);
}
+/**
+ * rpc_xprt_switch_get_main_xprt - Get the 'main' xprt for an xprt switch.
+ * @xps: pointer to struct rpc_xprt_switch.
+ */
+struct rpc_xprt *rpc_xprt_switch_get_main_xprt(struct rpc_xprt_switch *xps)
+{
+ struct rpc_xprt_iter xpi;
+ struct rpc_xprt *xprt;
+
+ xprt_iter_init_listall(&xpi, xps);
+
+ xprt = xprt_iter_get_next(&xpi);
+ while (xprt && !xprt->main) {
+ xprt_put(xprt);
+ xprt = xprt_iter_get_next(&xpi);
+ }
+
+ xprt_iter_destroy(&xpi);
+ return xprt;
+}
+
static DEFINE_IDA(rpc_xprtswitch_ids);
void xprt_multipath_cleanup_ids(void)
diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
index e5d104ce7b82..5696af45bcf7 100644
--- a/net/xdp/xsk.c
+++ b/net/xdp/xsk.c
@@ -806,8 +806,11 @@ static int __xsk_generic_xmit(struct sock *sk)
* if there is space in it. This avoids having to implement
* any buffering in the Tx path.
*/
- if (xsk_cq_reserve_addr_locked(xs->pool, desc.addr))
+ err = xsk_cq_reserve_addr_locked(xs->pool, desc.addr);
+ if (err) {
+ err = -EAGAIN;
goto out;
+ }
skb = xsk_build_skb(xs, &desc);
if (IS_ERR(skb)) {
diff --git a/rust/Makefile b/rust/Makefile
index b9cc810764e9..99bc3eea44a6 100644
--- a/rust/Makefile
+++ b/rust/Makefile
@@ -260,7 +260,8 @@ bindgen_skip_c_flags := -mno-fp-ret-in-387 -mpreferred-stack-boundary=% \
-mfunction-return=thunk-extern -mrecord-mcount -mabi=lp64 \
-mindirect-branch-cs-prefix -mstack-protector-guard% -mtraceback=no \
-mno-pointers-to-nested-functions -mno-string \
- -mno-strict-align -mstrict-align \
+ -mno-strict-align -mstrict-align -mdirect-extern-access \
+ -mexplicit-relocs -mno-check-zero-division \
-fconserve-stack -falign-jumps=% -falign-loops=% \
-femit-struct-debug-baseonly -fno-ipa-cp-clone -fno-ipa-sra \
-fno-partial-inlining -fplugin-arg-arm_ssp_per_task_plugin-% \
@@ -274,6 +275,8 @@ bindgen_skip_c_flags := -mno-fp-ret-in-387 -mpreferred-stack-boundary=% \
# Derived from `scripts/Makefile.clang`.
BINDGEN_TARGET_x86 := x86_64-linux-gnu
BINDGEN_TARGET_arm64 := aarch64-linux-gnu
+BINDGEN_TARGET_arm := arm-linux-gnueabi
+BINDGEN_TARGET_loongarch := loongarch64-linux-gnusf
BINDGEN_TARGET_um := $(BINDGEN_TARGET_$(SUBARCH))
BINDGEN_TARGET := $(BINDGEN_TARGET_$(SRCARCH))
@@ -431,6 +434,13 @@ redirect-intrinsics = \
__muloti4 __multi3 \
__udivmodti4 __udivti3 __umodti3
+ifdef CONFIG_ARM
+ # Add eabi initrinsics for ARM 32-bit
+ redirect-intrinsics += \
+ __aeabi_fadd __aeabi_fmul __aeabi_fcmpeq __aeabi_fcmple __aeabi_fcmplt __aeabi_fcmpun \
+ __aeabi_dadd __aeabi_dmul __aeabi_dcmple __aeabi_dcmplt __aeabi_dcmpun \
+ __aeabi_uldivmod
+endif
ifneq ($(or $(CONFIG_ARM64),$(and $(CONFIG_RISCV),$(CONFIG_64BIT))),)
# These intrinsics are defined for ARM64 and RISCV64
redirect-intrinsics += \
diff --git a/rust/compiler_builtins.rs b/rust/compiler_builtins.rs
index f14b8d7caf89..dd16c1dc899c 100644
--- a/rust/compiler_builtins.rs
+++ b/rust/compiler_builtins.rs
@@ -73,5 +73,29 @@ define_panicking_intrinsics!("`u128` should not be used", {
__umodti3,
});
+#[cfg(target_arch = "arm")]
+define_panicking_intrinsics!("`f32` should not be used", {
+ __aeabi_fadd,
+ __aeabi_fmul,
+ __aeabi_fcmpeq,
+ __aeabi_fcmple,
+ __aeabi_fcmplt,
+ __aeabi_fcmpun,
+});
+
+#[cfg(target_arch = "arm")]
+define_panicking_intrinsics!("`f64` should not be used", {
+ __aeabi_dadd,
+ __aeabi_dmul,
+ __aeabi_dcmple,
+ __aeabi_dcmplt,
+ __aeabi_dcmpun,
+});
+
+#[cfg(target_arch = "arm")]
+define_panicking_intrinsics!("`u64` division/modulo should not be used", {
+ __aeabi_uldivmod,
+});
+
// NOTE: if you are adding a new intrinsic here, you should also add it to
// `redirect-intrinsics` in `rust/Makefile`.
diff --git a/rust/kernel/device.rs b/rust/kernel/device.rs
index db2d9658ba47..21b343a1dc4d 100644
--- a/rust/kernel/device.rs
+++ b/rust/kernel/device.rs
@@ -209,6 +209,32 @@ unsafe impl Send for Device {}
// synchronization in `struct device`.
unsafe impl Sync for Device {}
+/// Marker trait for the context of a bus specific device.
+///
+/// Some functions of a bus specific device should only be called from a certain context, i.e. bus
+/// callbacks, such as `probe()`.
+///
+/// This is the marker trait for structures representing the context of a bus specific device.
+pub trait DeviceContext: private::Sealed {}
+
+/// The [`Normal`] context is the context of a bus specific device when it is not an argument of
+/// any bus callback.
+pub struct Normal;
+
+/// The [`Core`] context is the context of a bus specific device when it is supplied as argument of
+/// any of the bus callbacks, such as `probe()`.
+pub struct Core;
+
+mod private {
+ pub trait Sealed {}
+
+ impl Sealed for super::Core {}
+ impl Sealed for super::Normal {}
+}
+
+impl DeviceContext for Core {}
+impl DeviceContext for Normal {}
+
#[doc(hidden)]
#[macro_export]
macro_rules! dev_printk {
diff --git a/rust/kernel/devres.rs b/rust/kernel/devres.rs
index 942376f6f3af..ddb1ce4a78d9 100644
--- a/rust/kernel/devres.rs
+++ b/rust/kernel/devres.rs
@@ -92,7 +92,7 @@ struct DevresInner<T> {
/// let devres = Devres::new(&dev, iomem, GFP_KERNEL)?;
///
/// let res = devres.try_access().ok_or(ENXIO)?;
-/// res.writel(0x42, 0x0);
+/// res.write8(0x42, 0x0);
/// # Ok(())
/// # }
/// ```
diff --git a/rust/kernel/faux.rs b/rust/kernel/faux.rs
index 5acc0c02d451..8a50fcd4c9bb 100644
--- a/rust/kernel/faux.rs
+++ b/rust/kernel/faux.rs
@@ -19,16 +19,25 @@ use core::ptr::{addr_of_mut, null, null_mut, NonNull};
/// `self.0` always holds a valid pointer to an initialized and registered [`struct faux_device`].
///
/// [`struct faux_device`]: srctree/include/linux/device/faux.h
-#[repr(transparent)]
pub struct Registration(NonNull<bindings::faux_device>);
impl Registration {
/// Create and register a new faux device with the given name.
- pub fn new(name: &CStr) -> Result<Self> {
+ #[inline]
+ pub fn new(name: &CStr, parent: Option<&device::Device>) -> Result<Self> {
// SAFETY:
// - `name` is copied by this function into its own storage
// - `faux_ops` is safe to leave NULL according to the C API
- let dev = unsafe { bindings::faux_device_create(name.as_char_ptr(), null_mut(), null()) };
+ // - `parent` can be either NULL or a pointer to a `struct device`, and `faux_device_create`
+ // will take a reference to `parent` using `device_add` - ensuring that it remains valid
+ // for the lifetime of the faux device.
+ let dev = unsafe {
+ bindings::faux_device_create(
+ name.as_char_ptr(),
+ parent.map_or(null_mut(), |p| p.as_raw()),
+ null(),
+ )
+ };
// The above function will return either a valid device, or NULL on failure
// INVARIANT: The device will remain registered until faux_device_destroy() is called, which
@@ -50,6 +59,7 @@ impl AsRef<device::Device> for Registration {
}
impl Drop for Registration {
+ #[inline]
fn drop(&mut self) {
// SAFETY: `self.0` is a valid registered faux_device via our type invariants.
unsafe { bindings::faux_device_destroy(self.as_raw()) }
diff --git a/rust/kernel/io.rs b/rust/kernel/io.rs
index d4a73e52e3ee..72d80a6f131e 100644
--- a/rust/kernel/io.rs
+++ b/rust/kernel/io.rs
@@ -98,9 +98,9 @@ impl<const SIZE: usize> IoRaw<SIZE> {
///# fn no_run() -> Result<(), Error> {
/// // SAFETY: Invalid usage for example purposes.
/// let iomem = unsafe { IoMem::<{ core::mem::size_of::<u32>() }>::new(0xBAAAAAAD)? };
-/// iomem.writel(0x42, 0x0);
-/// assert!(iomem.try_writel(0x42, 0x0).is_ok());
-/// assert!(iomem.try_writel(0x42, 0x4).is_err());
+/// iomem.write32(0x42, 0x0);
+/// assert!(iomem.try_write32(0x42, 0x0).is_ok());
+/// assert!(iomem.try_write32(0x42, 0x4).is_err());
/// # Ok(())
/// # }
/// ```
@@ -108,7 +108,7 @@ impl<const SIZE: usize> IoRaw<SIZE> {
pub struct Io<const SIZE: usize = 0>(IoRaw<SIZE>);
macro_rules! define_read {
- ($(#[$attr:meta])* $name:ident, $try_name:ident, $type_name:ty) => {
+ ($(#[$attr:meta])* $name:ident, $try_name:ident, $c_fn:ident -> $type_name:ty) => {
/// Read IO data from a given offset known at compile time.
///
/// Bound checks are performed on compile time, hence if the offset is not known at compile
@@ -119,7 +119,7 @@ macro_rules! define_read {
let addr = self.io_addr_assert::<$type_name>(offset);
// SAFETY: By the type invariant `addr` is a valid address for MMIO operations.
- unsafe { bindings::$name(addr as _) }
+ unsafe { bindings::$c_fn(addr as _) }
}
/// Read IO data from a given offset.
@@ -131,13 +131,13 @@ macro_rules! define_read {
let addr = self.io_addr::<$type_name>(offset)?;
// SAFETY: By the type invariant `addr` is a valid address for MMIO operations.
- Ok(unsafe { bindings::$name(addr as _) })
+ Ok(unsafe { bindings::$c_fn(addr as _) })
}
};
}
macro_rules! define_write {
- ($(#[$attr:meta])* $name:ident, $try_name:ident, $type_name:ty) => {
+ ($(#[$attr:meta])* $name:ident, $try_name:ident, $c_fn:ident <- $type_name:ty) => {
/// Write IO data from a given offset known at compile time.
///
/// Bound checks are performed on compile time, hence if the offset is not known at compile
@@ -148,7 +148,7 @@ macro_rules! define_write {
let addr = self.io_addr_assert::<$type_name>(offset);
// SAFETY: By the type invariant `addr` is a valid address for MMIO operations.
- unsafe { bindings::$name(value, addr as _, ) }
+ unsafe { bindings::$c_fn(value, addr as _, ) }
}
/// Write IO data from a given offset.
@@ -160,7 +160,7 @@ macro_rules! define_write {
let addr = self.io_addr::<$type_name>(offset)?;
// SAFETY: By the type invariant `addr` is a valid address for MMIO operations.
- unsafe { bindings::$name(value, addr as _) }
+ unsafe { bindings::$c_fn(value, addr as _) }
Ok(())
}
};
@@ -218,43 +218,43 @@ impl<const SIZE: usize> Io<SIZE> {
self.addr() + offset
}
- define_read!(readb, try_readb, u8);
- define_read!(readw, try_readw, u16);
- define_read!(readl, try_readl, u32);
+ define_read!(read8, try_read8, readb -> u8);
+ define_read!(read16, try_read16, readw -> u16);
+ define_read!(read32, try_read32, readl -> u32);
define_read!(
#[cfg(CONFIG_64BIT)]
- readq,
- try_readq,
- u64
+ read64,
+ try_read64,
+ readq -> u64
);
- define_read!(readb_relaxed, try_readb_relaxed, u8);
- define_read!(readw_relaxed, try_readw_relaxed, u16);
- define_read!(readl_relaxed, try_readl_relaxed, u32);
+ define_read!(read8_relaxed, try_read8_relaxed, readb_relaxed -> u8);
+ define_read!(read16_relaxed, try_read16_relaxed, readw_relaxed -> u16);
+ define_read!(read32_relaxed, try_read32_relaxed, readl_relaxed -> u32);
define_read!(
#[cfg(CONFIG_64BIT)]
- readq_relaxed,
- try_readq_relaxed,
- u64
+ read64_relaxed,
+ try_read64_relaxed,
+ readq_relaxed -> u64
);
- define_write!(writeb, try_writeb, u8);
- define_write!(writew, try_writew, u16);
- define_write!(writel, try_writel, u32);
+ define_write!(write8, try_write8, writeb <- u8);
+ define_write!(write16, try_write16, writew <- u16);
+ define_write!(write32, try_write32, writel <- u32);
define_write!(
#[cfg(CONFIG_64BIT)]
- writeq,
- try_writeq,
- u64
+ write64,
+ try_write64,
+ writeq <- u64
);
- define_write!(writeb_relaxed, try_writeb_relaxed, u8);
- define_write!(writew_relaxed, try_writew_relaxed, u16);
- define_write!(writel_relaxed, try_writel_relaxed, u32);
+ define_write!(write8_relaxed, try_write8_relaxed, writeb_relaxed <- u8);
+ define_write!(write16_relaxed, try_write16_relaxed, writew_relaxed <- u16);
+ define_write!(write32_relaxed, try_write32_relaxed, writel_relaxed <- u32);
define_write!(
#[cfg(CONFIG_64BIT)]
- writeq_relaxed,
- try_writeq_relaxed,
- u64
+ write64_relaxed,
+ try_write64_relaxed,
+ writeq_relaxed <- u64
);
}
diff --git a/rust/kernel/miscdevice.rs b/rust/kernel/miscdevice.rs
index e14433b2ab9d..fa9ecc42602a 100644
--- a/rust/kernel/miscdevice.rs
+++ b/rust/kernel/miscdevice.rs
@@ -35,7 +35,7 @@ impl MiscDeviceOptions {
let mut result: bindings::miscdevice = unsafe { MaybeUninit::zeroed().assume_init() };
result.minor = bindings::MISC_DYNAMIC_MINOR as _;
result.name = self.name.as_char_ptr();
- result.fops = create_vtable::<T>();
+ result.fops = MiscdeviceVTable::<T>::build();
result
}
}
@@ -160,171 +160,160 @@ pub trait MiscDevice: Sized {
}
}
-const fn create_vtable<T: MiscDevice>() -> &'static bindings::file_operations {
- const fn maybe_fn<T: Copy>(check: bool, func: T) -> Option<T> {
- if check {
- Some(func)
- } else {
- None
+/// A vtable for the file operations of a Rust miscdevice.
+struct MiscdeviceVTable<T: MiscDevice>(PhantomData<T>);
+
+impl<T: MiscDevice> MiscdeviceVTable<T> {
+ /// # Safety
+ ///
+ /// `file` and `inode` must be the file and inode for a file that is undergoing initialization.
+ /// The file must be associated with a `MiscDeviceRegistration<T>`.
+ unsafe extern "C" fn open(inode: *mut bindings::inode, raw_file: *mut bindings::file) -> c_int {
+ // SAFETY: The pointers are valid and for a file being opened.
+ let ret = unsafe { bindings::generic_file_open(inode, raw_file) };
+ if ret != 0 {
+ return ret;
}
- }
- struct VtableHelper<T: MiscDevice> {
- _t: PhantomData<T>,
- }
- impl<T: MiscDevice> VtableHelper<T> {
- const VTABLE: bindings::file_operations = bindings::file_operations {
- open: Some(fops_open::<T>),
- release: Some(fops_release::<T>),
- unlocked_ioctl: maybe_fn(T::HAS_IOCTL, fops_ioctl::<T>),
- #[cfg(CONFIG_COMPAT)]
- compat_ioctl: if T::HAS_COMPAT_IOCTL {
- Some(fops_compat_ioctl::<T>)
- } else if T::HAS_IOCTL {
- Some(bindings::compat_ptr_ioctl)
- } else {
- None
- },
- show_fdinfo: maybe_fn(T::HAS_SHOW_FDINFO, fops_show_fdinfo::<T>),
- // SAFETY: All zeros is a valid value for `bindings::file_operations`.
- ..unsafe { MaybeUninit::zeroed().assume_init() }
- };
- }
+ // SAFETY: The open call of a file can access the private data.
+ let misc_ptr = unsafe { (*raw_file).private_data };
- &VtableHelper::<T>::VTABLE
-}
+ // SAFETY: This is a miscdevice, so `misc_open()` set the private data to a pointer to the
+ // associated `struct miscdevice` before calling into this method. Furthermore,
+ // `misc_open()` ensures that the miscdevice can't be unregistered and freed during this
+ // call to `fops_open`.
+ let misc = unsafe { &*misc_ptr.cast::<MiscDeviceRegistration<T>>() };
-/// # Safety
-///
-/// `file` and `inode` must be the file and inode for a file that is undergoing initialization.
-/// The file must be associated with a `MiscDeviceRegistration<T>`.
-unsafe extern "C" fn fops_open<T: MiscDevice>(
- inode: *mut bindings::inode,
- raw_file: *mut bindings::file,
-) -> c_int {
- // SAFETY: The pointers are valid and for a file being opened.
- let ret = unsafe { bindings::generic_file_open(inode, raw_file) };
- if ret != 0 {
- return ret;
- }
+ // SAFETY:
+ // * This underlying file is valid for (much longer than) the duration of `T::open`.
+ // * There is no active fdget_pos region on the file on this thread.
+ let file = unsafe { File::from_raw_file(raw_file) };
- // SAFETY: The open call of a file can access the private data.
- let misc_ptr = unsafe { (*raw_file).private_data };
-
- // SAFETY: This is a miscdevice, so `misc_open()` set the private data to a pointer to the
- // associated `struct miscdevice` before calling into this method. Furthermore, `misc_open()`
- // ensures that the miscdevice can't be unregistered and freed during this call to `fops_open`.
- let misc = unsafe { &*misc_ptr.cast::<MiscDeviceRegistration<T>>() };
+ let ptr = match T::open(file, misc) {
+ Ok(ptr) => ptr,
+ Err(err) => return err.to_errno(),
+ };
- // SAFETY:
- // * This underlying file is valid for (much longer than) the duration of `T::open`.
- // * There is no active fdget_pos region on the file on this thread.
- let file = unsafe { File::from_raw_file(raw_file) };
+ // This overwrites the private data with the value specified by the user, changing the type
+ // of this file's private data. All future accesses to the private data is performed by
+ // other fops_* methods in this file, which all correctly cast the private data to the new
+ // type.
+ //
+ // SAFETY: The open call of a file can access the private data.
+ unsafe { (*raw_file).private_data = ptr.into_foreign() };
- let ptr = match T::open(file, misc) {
- Ok(ptr) => ptr,
- Err(err) => return err.to_errno(),
- };
-
- // This overwrites the private data with the value specified by the user, changing the type of
- // this file's private data. All future accesses to the private data is performed by other
- // fops_* methods in this file, which all correctly cast the private data to the new type.
- //
- // SAFETY: The open call of a file can access the private data.
- unsafe { (*raw_file).private_data = ptr.into_foreign() };
+ 0
+ }
- 0
-}
+ /// # Safety
+ ///
+ /// `file` and `inode` must be the file and inode for a file that is being released. The file
+ /// must be associated with a `MiscDeviceRegistration<T>`.
+ unsafe extern "C" fn release(_inode: *mut bindings::inode, file: *mut bindings::file) -> c_int {
+ // SAFETY: The release call of a file owns the private data.
+ let private = unsafe { (*file).private_data };
+ // SAFETY: The release call of a file owns the private data.
+ let ptr = unsafe { <T::Ptr as ForeignOwnable>::from_foreign(private) };
+
+ // SAFETY:
+ // * The file is valid for the duration of this call.
+ // * There is no active fdget_pos region on the file on this thread.
+ T::release(ptr, unsafe { File::from_raw_file(file) });
+
+ 0
+ }
-/// # Safety
-///
-/// `file` and `inode` must be the file and inode for a file that is being released. The file must
-/// be associated with a `MiscDeviceRegistration<T>`.
-unsafe extern "C" fn fops_release<T: MiscDevice>(
- _inode: *mut bindings::inode,
- file: *mut bindings::file,
-) -> c_int {
- // SAFETY: The release call of a file owns the private data.
- let private = unsafe { (*file).private_data };
- // SAFETY: The release call of a file owns the private data.
- let ptr = unsafe { <T::Ptr as ForeignOwnable>::from_foreign(private) };
-
- // SAFETY:
- // * The file is valid for the duration of this call.
- // * There is no active fdget_pos region on the file on this thread.
- T::release(ptr, unsafe { File::from_raw_file(file) });
-
- 0
-}
+ /// # Safety
+ ///
+ /// `file` must be a valid file that is associated with a `MiscDeviceRegistration<T>`.
+ unsafe extern "C" fn ioctl(file: *mut bindings::file, cmd: c_uint, arg: c_ulong) -> c_long {
+ // SAFETY: The ioctl call of a file can access the private data.
+ let private = unsafe { (*file).private_data };
+ // SAFETY: Ioctl calls can borrow the private data of the file.
+ let device = unsafe { <T::Ptr as ForeignOwnable>::borrow(private) };
+
+ // SAFETY:
+ // * The file is valid for the duration of this call.
+ // * There is no active fdget_pos region on the file on this thread.
+ let file = unsafe { File::from_raw_file(file) };
+
+ match T::ioctl(device, file, cmd, arg) {
+ Ok(ret) => ret as c_long,
+ Err(err) => err.to_errno() as c_long,
+ }
+ }
-/// # Safety
-///
-/// `file` must be a valid file that is associated with a `MiscDeviceRegistration<T>`.
-unsafe extern "C" fn fops_ioctl<T: MiscDevice>(
- file: *mut bindings::file,
- cmd: c_uint,
- arg: c_ulong,
-) -> c_long {
- // SAFETY: The ioctl call of a file can access the private data.
- let private = unsafe { (*file).private_data };
- // SAFETY: Ioctl calls can borrow the private data of the file.
- let device = unsafe { <T::Ptr as ForeignOwnable>::borrow(private) };
-
- // SAFETY:
- // * The file is valid for the duration of this call.
- // * There is no active fdget_pos region on the file on this thread.
- let file = unsafe { File::from_raw_file(file) };
-
- match T::ioctl(device, file, cmd, arg) {
- Ok(ret) => ret as c_long,
- Err(err) => err.to_errno() as c_long,
+ /// # Safety
+ ///
+ /// `file` must be a valid file that is associated with a `MiscDeviceRegistration<T>`.
+ #[cfg(CONFIG_COMPAT)]
+ unsafe extern "C" fn compat_ioctl(
+ file: *mut bindings::file,
+ cmd: c_uint,
+ arg: c_ulong,
+ ) -> c_long {
+ // SAFETY: The compat ioctl call of a file can access the private data.
+ let private = unsafe { (*file).private_data };
+ // SAFETY: Ioctl calls can borrow the private data of the file.
+ let device = unsafe { <T::Ptr as ForeignOwnable>::borrow(private) };
+
+ // SAFETY:
+ // * The file is valid for the duration of this call.
+ // * There is no active fdget_pos region on the file on this thread.
+ let file = unsafe { File::from_raw_file(file) };
+
+ match T::compat_ioctl(device, file, cmd, arg) {
+ Ok(ret) => ret as c_long,
+ Err(err) => err.to_errno() as c_long,
+ }
}
-}
-/// # Safety
-///
-/// `file` must be a valid file that is associated with a `MiscDeviceRegistration<T>`.
-#[cfg(CONFIG_COMPAT)]
-unsafe extern "C" fn fops_compat_ioctl<T: MiscDevice>(
- file: *mut bindings::file,
- cmd: c_uint,
- arg: c_ulong,
-) -> c_long {
- // SAFETY: The compat ioctl call of a file can access the private data.
- let private = unsafe { (*file).private_data };
- // SAFETY: Ioctl calls can borrow the private data of the file.
- let device = unsafe { <T::Ptr as ForeignOwnable>::borrow(private) };
-
- // SAFETY:
- // * The file is valid for the duration of this call.
- // * There is no active fdget_pos region on the file on this thread.
- let file = unsafe { File::from_raw_file(file) };
-
- match T::compat_ioctl(device, file, cmd, arg) {
- Ok(ret) => ret as c_long,
- Err(err) => err.to_errno() as c_long,
+ /// # Safety
+ ///
+ /// - `file` must be a valid file that is associated with a `MiscDeviceRegistration<T>`.
+ /// - `seq_file` must be a valid `struct seq_file` that we can write to.
+ unsafe extern "C" fn show_fdinfo(seq_file: *mut bindings::seq_file, file: *mut bindings::file) {
+ // SAFETY: The release call of a file owns the private data.
+ let private = unsafe { (*file).private_data };
+ // SAFETY: Ioctl calls can borrow the private data of the file.
+ let device = unsafe { <T::Ptr as ForeignOwnable>::borrow(private) };
+ // SAFETY:
+ // * The file is valid for the duration of this call.
+ // * There is no active fdget_pos region on the file on this thread.
+ let file = unsafe { File::from_raw_file(file) };
+ // SAFETY: The caller ensures that the pointer is valid and exclusive for the duration in
+ // which this method is called.
+ let m = unsafe { SeqFile::from_raw(seq_file) };
+
+ T::show_fdinfo(device, m, file);
}
-}
-/// # Safety
-///
-/// - `file` must be a valid file that is associated with a `MiscDeviceRegistration<T>`.
-/// - `seq_file` must be a valid `struct seq_file` that we can write to.
-unsafe extern "C" fn fops_show_fdinfo<T: MiscDevice>(
- seq_file: *mut bindings::seq_file,
- file: *mut bindings::file,
-) {
- // SAFETY: The release call of a file owns the private data.
- let private = unsafe { (*file).private_data };
- // SAFETY: Ioctl calls can borrow the private data of the file.
- let device = unsafe { <T::Ptr as ForeignOwnable>::borrow(private) };
- // SAFETY:
- // * The file is valid for the duration of this call.
- // * There is no active fdget_pos region on the file on this thread.
- let file = unsafe { File::from_raw_file(file) };
- // SAFETY: The caller ensures that the pointer is valid and exclusive for the duration in which
- // this method is called.
- let m = unsafe { SeqFile::from_raw(seq_file) };
-
- T::show_fdinfo(device, m, file);
+ const VTABLE: bindings::file_operations = bindings::file_operations {
+ open: Some(Self::open),
+ release: Some(Self::release),
+ unlocked_ioctl: if T::HAS_IOCTL {
+ Some(Self::ioctl)
+ } else {
+ None
+ },
+ #[cfg(CONFIG_COMPAT)]
+ compat_ioctl: if T::HAS_COMPAT_IOCTL {
+ Some(Self::compat_ioctl)
+ } else if T::HAS_IOCTL {
+ Some(bindings::compat_ptr_ioctl)
+ } else {
+ None
+ },
+ show_fdinfo: if T::HAS_SHOW_FDINFO {
+ Some(Self::show_fdinfo)
+ } else {
+ None
+ },
+ // SAFETY: All zeros is a valid value for `bindings::file_operations`.
+ ..unsafe { MaybeUninit::zeroed().assume_init() }
+ };
+
+ const fn build() -> &'static bindings::file_operations {
+ &Self::VTABLE
+ }
}
diff --git a/rust/kernel/pci.rs b/rust/kernel/pci.rs
index f7b2743828ae..c97d6d470b28 100644
--- a/rust/kernel/pci.rs
+++ b/rust/kernel/pci.rs
@@ -6,7 +6,7 @@
use crate::{
alloc::flags::*,
- bindings, container_of, device,
+ bindings, device,
device_id::RawDeviceId,
devres::Devres,
driver,
@@ -17,7 +17,11 @@ use crate::{
types::{ARef, ForeignOwnable, Opaque},
ThisModule,
};
-use core::{ops::Deref, ptr::addr_of_mut};
+use core::{
+ marker::PhantomData,
+ ops::Deref,
+ ptr::{addr_of_mut, NonNull},
+};
use kernel::prelude::*;
/// An adapter for the registration of PCI drivers.
@@ -60,17 +64,16 @@ impl<T: Driver + 'static> Adapter<T> {
) -> kernel::ffi::c_int {
// SAFETY: The PCI bus only ever calls the probe callback with a valid pointer to a
// `struct pci_dev`.
- let dev = unsafe { device::Device::get_device(addr_of_mut!((*pdev).dev)) };
- // SAFETY: `dev` is guaranteed to be embedded in a valid `struct pci_dev` by the call
- // above.
- let mut pdev = unsafe { Device::from_dev(dev) };
+ //
+ // INVARIANT: `pdev` is valid for the duration of `probe_callback()`.
+ let pdev = unsafe { &*pdev.cast::<Device<device::Core>>() };
// SAFETY: `DeviceId` is a `#[repr(transparent)` wrapper of `struct pci_device_id` and
// does not add additional invariants, so it's safe to transmute.
let id = unsafe { &*id.cast::<DeviceId>() };
let info = T::ID_TABLE.info(id.index());
- match T::probe(&mut pdev, info) {
+ match T::probe(pdev, info) {
Ok(data) => {
// Let the `struct pci_dev` own a reference of the driver's private data.
// SAFETY: By the type invariant `pdev.as_raw` returns a valid pointer to a
@@ -192,7 +195,7 @@ macro_rules! pci_device_table {
/// # Example
///
///```
-/// # use kernel::{bindings, pci};
+/// # use kernel::{bindings, device::Core, pci};
///
/// struct MyDriver;
///
@@ -210,7 +213,7 @@ macro_rules! pci_device_table {
/// const ID_TABLE: pci::IdTable<Self::IdInfo> = &PCI_TABLE;
///
/// fn probe(
-/// _pdev: &mut pci::Device,
+/// _pdev: &pci::Device<Core>,
/// _id_info: &Self::IdInfo,
/// ) -> Result<Pin<KBox<Self>>> {
/// Err(ENODEV)
@@ -219,7 +222,7 @@ macro_rules! pci_device_table {
///```
/// Drivers must implement this trait in order to get a PCI driver registered. Please refer to the
/// `Adapter` documentation for an example.
-pub trait Driver {
+pub trait Driver: Send {
/// The type holding information about each device id supported by the driver.
///
/// TODO: Use associated_type_defaults once stabilized:
@@ -234,20 +237,23 @@ pub trait Driver {
///
/// Called when a new platform device is added or discovered.
/// Implementers should attempt to initialize the device here.
- fn probe(dev: &mut Device, id_info: &Self::IdInfo) -> Result<Pin<KBox<Self>>>;
+ fn probe(dev: &Device<device::Core>, id_info: &Self::IdInfo) -> Result<Pin<KBox<Self>>>;
}
/// The PCI device representation.
///
-/// A PCI device is based on an always reference counted `device:Device` instance. Cloning a PCI
-/// device, hence, also increments the base device' reference count.
+/// This structure represents the Rust abstraction for a C `struct pci_dev`. The implementation
+/// abstracts the usage of an already existing C `struct pci_dev` within Rust code that we get
+/// passed from the C side.
///
/// # Invariants
///
-/// `Device` hold a valid reference of `ARef<device::Device>` whose underlying `struct device` is a
-/// member of a `struct pci_dev`.
-#[derive(Clone)]
-pub struct Device(ARef<device::Device>);
+/// A [`Device`] instance represents a valid `struct device` created by the C portion of the kernel.
+#[repr(transparent)]
+pub struct Device<Ctx: device::DeviceContext = device::Normal>(
+ Opaque<bindings::pci_dev>,
+ PhantomData<Ctx>,
+);
/// A PCI BAR to perform I/O-Operations on.
///
@@ -256,13 +262,13 @@ pub struct Device(ARef<device::Device>);
/// `Bar` always holds an `IoRaw` inststance that holds a valid pointer to the start of the I/O
/// memory mapped PCI bar and its size.
pub struct Bar<const SIZE: usize = 0> {
- pdev: Device,
+ pdev: ARef<Device>,
io: IoRaw<SIZE>,
num: i32,
}
impl<const SIZE: usize> Bar<SIZE> {
- fn new(pdev: Device, num: u32, name: &CStr) -> Result<Self> {
+ fn new(pdev: &Device, num: u32, name: &CStr) -> Result<Self> {
let len = pdev.resource_len(num)?;
if len == 0 {
return Err(ENOMEM);
@@ -300,12 +306,16 @@ impl<const SIZE: usize> Bar<SIZE> {
// `pdev` is valid by the invariants of `Device`.
// `ioptr` is guaranteed to be the start of a valid I/O mapped memory region.
// `num` is checked for validity by a previous call to `Device::resource_len`.
- unsafe { Self::do_release(&pdev, ioptr, num) };
+ unsafe { Self::do_release(pdev, ioptr, num) };
return Err(err);
}
};
- Ok(Bar { pdev, io, num })
+ Ok(Bar {
+ pdev: pdev.into(),
+ io,
+ num,
+ })
}
/// # Safety
@@ -351,20 +361,8 @@ impl<const SIZE: usize> Deref for Bar<SIZE> {
}
impl Device {
- /// Create a PCI Device instance from an existing `device::Device`.
- ///
- /// # Safety
- ///
- /// `dev` must be an `ARef<device::Device>` whose underlying `bindings::device` is a member of
- /// a `bindings::pci_dev`.
- pub unsafe fn from_dev(dev: ARef<device::Device>) -> Self {
- Self(dev)
- }
-
fn as_raw(&self) -> *mut bindings::pci_dev {
- // SAFETY: By the type invariant `self.0.as_raw` is a pointer to the `struct device`
- // embedded in `struct pci_dev`.
- unsafe { container_of!(self.0.as_raw(), bindings::pci_dev, dev) as _ }
+ self.0.get()
}
/// Returns the PCI vendor ID.
@@ -379,23 +377,6 @@ impl Device {
unsafe { (*self.as_raw()).device }
}
- /// Enable memory resources for this device.
- pub fn enable_device_mem(&self) -> Result {
- // SAFETY: `self.as_raw` is guaranteed to be a pointer to a valid `struct pci_dev`.
- let ret = unsafe { bindings::pci_enable_device_mem(self.as_raw()) };
- if ret != 0 {
- Err(Error::from_errno(ret))
- } else {
- Ok(())
- }
- }
-
- /// Enable bus-mastering for this device.
- pub fn set_master(&self) {
- // SAFETY: `self.as_raw` is guaranteed to be a pointer to a valid `struct pci_dev`.
- unsafe { bindings::pci_set_master(self.as_raw()) };
- }
-
/// Returns the size of the given PCI bar resource.
pub fn resource_len(&self, bar: u32) -> Result<bindings::resource_size_t> {
if !Bar::index_is_valid(bar) {
@@ -415,7 +396,7 @@ impl Device {
bar: u32,
name: &CStr,
) -> Result<Devres<Bar<SIZE>>> {
- let bar = Bar::<SIZE>::new(self.clone(), bar, name)?;
+ let bar = Bar::<SIZE>::new(self, bar, name)?;
let devres = Devres::new(self.as_ref(), bar, GFP_KERNEL)?;
Ok(devres)
@@ -427,8 +408,67 @@ impl Device {
}
}
+impl Device<device::Core> {
+ /// Enable memory resources for this device.
+ pub fn enable_device_mem(&self) -> Result {
+ // SAFETY: `self.as_raw` is guaranteed to be a pointer to a valid `struct pci_dev`.
+ to_result(unsafe { bindings::pci_enable_device_mem(self.as_raw()) })
+ }
+
+ /// Enable bus-mastering for this device.
+ pub fn set_master(&self) {
+ // SAFETY: `self.as_raw` is guaranteed to be a pointer to a valid `struct pci_dev`.
+ unsafe { bindings::pci_set_master(self.as_raw()) };
+ }
+}
+
+impl Deref for Device<device::Core> {
+ type Target = Device;
+
+ fn deref(&self) -> &Self::Target {
+ let ptr: *const Self = self;
+
+ // CAST: `Device<Ctx>` is a transparent wrapper of `Opaque<bindings::pci_dev>`.
+ let ptr = ptr.cast::<Device>();
+
+ // SAFETY: `ptr` was derived from `&self`.
+ unsafe { &*ptr }
+ }
+}
+
+impl From<&Device<device::Core>> for ARef<Device> {
+ fn from(dev: &Device<device::Core>) -> Self {
+ (&**dev).into()
+ }
+}
+
+// SAFETY: Instances of `Device` are always reference-counted.
+unsafe impl crate::types::AlwaysRefCounted for Device {
+ fn inc_ref(&self) {
+ // SAFETY: The existence of a shared reference guarantees that the refcount is non-zero.
+ unsafe { bindings::pci_dev_get(self.as_raw()) };
+ }
+
+ unsafe fn dec_ref(obj: NonNull<Self>) {
+ // SAFETY: The safety requirements guarantee that the refcount is non-zero.
+ unsafe { bindings::pci_dev_put(obj.cast().as_ptr()) }
+ }
+}
+
impl AsRef<device::Device> for Device {
fn as_ref(&self) -> &device::Device {
- &self.0
+ // SAFETY: By the type invariant of `Self`, `self.as_raw()` is a pointer to a valid
+ // `struct pci_dev`.
+ let dev = unsafe { addr_of_mut!((*self.as_raw()).dev) };
+
+ // SAFETY: `dev` points to a valid `struct device`.
+ unsafe { device::Device::as_ref(dev) }
}
}
+
+// SAFETY: A `Device` is always reference-counted and can be released from any thread.
+unsafe impl Send for Device {}
+
+// SAFETY: `Device` can be shared among threads because all methods of `Device`
+// (i.e. `Device<Normal>) are thread safe.
+unsafe impl Sync for Device {}
diff --git a/rust/kernel/platform.rs b/rust/kernel/platform.rs
index 1297f5292ba9..4917cb34e2fe 100644
--- a/rust/kernel/platform.rs
+++ b/rust/kernel/platform.rs
@@ -5,7 +5,7 @@
//! C header: [`include/linux/platform_device.h`](srctree/include/linux/platform_device.h)
use crate::{
- bindings, container_of, device, driver,
+ bindings, device, driver,
error::{to_result, Result},
of,
prelude::*,
@@ -14,7 +14,11 @@ use crate::{
ThisModule,
};
-use core::ptr::addr_of_mut;
+use core::{
+ marker::PhantomData,
+ ops::Deref,
+ ptr::{addr_of_mut, NonNull},
+};
/// An adapter for the registration of platform drivers.
pub struct Adapter<T: Driver>(T);
@@ -54,14 +58,14 @@ unsafe impl<T: Driver + 'static> driver::RegistrationOps for Adapter<T> {
impl<T: Driver + 'static> Adapter<T> {
extern "C" fn probe_callback(pdev: *mut bindings::platform_device) -> kernel::ffi::c_int {
- // SAFETY: The platform bus only ever calls the probe callback with a valid `pdev`.
- let dev = unsafe { device::Device::get_device(addr_of_mut!((*pdev).dev)) };
- // SAFETY: `dev` is guaranteed to be embedded in a valid `struct platform_device` by the
- // call above.
- let mut pdev = unsafe { Device::from_dev(dev) };
+ // SAFETY: The platform bus only ever calls the probe callback with a valid pointer to a
+ // `struct platform_device`.
+ //
+ // INVARIANT: `pdev` is valid for the duration of `probe_callback()`.
+ let pdev = unsafe { &*pdev.cast::<Device<device::Core>>() };
let info = <Self as driver::Adapter>::id_info(pdev.as_ref());
- match T::probe(&mut pdev, info) {
+ match T::probe(pdev, info) {
Ok(data) => {
// Let the `struct platform_device` own a reference of the driver's private data.
// SAFETY: By the type invariant `pdev.as_raw` returns a valid pointer to a
@@ -120,7 +124,7 @@ macro_rules! module_platform_driver {
/// # Example
///
///```
-/// # use kernel::{bindings, c_str, of, platform};
+/// # use kernel::{bindings, c_str, device::Core, of, platform};
///
/// struct MyDriver;
///
@@ -138,14 +142,14 @@ macro_rules! module_platform_driver {
/// const OF_ID_TABLE: Option<of::IdTable<Self::IdInfo>> = Some(&OF_TABLE);
///
/// fn probe(
-/// _pdev: &mut platform::Device,
+/// _pdev: &platform::Device<Core>,
/// _id_info: Option<&Self::IdInfo>,
/// ) -> Result<Pin<KBox<Self>>> {
/// Err(ENODEV)
/// }
/// }
///```
-pub trait Driver {
+pub trait Driver: Send {
/// The type holding driver private data about each device id supported by the driver.
///
/// TODO: Use associated_type_defaults once stabilized:
@@ -160,41 +164,79 @@ pub trait Driver {
///
/// Called when a new platform device is added or discovered.
/// Implementers should attempt to initialize the device here.
- fn probe(dev: &mut Device, id_info: Option<&Self::IdInfo>) -> Result<Pin<KBox<Self>>>;
+ fn probe(dev: &Device<device::Core>, id_info: Option<&Self::IdInfo>)
+ -> Result<Pin<KBox<Self>>>;
}
/// The platform device representation.
///
-/// A platform device is based on an always reference counted `device:Device` instance. Cloning a
-/// platform device, hence, also increments the base device' reference count.
+/// This structure represents the Rust abstraction for a C `struct platform_device`. The
+/// implementation abstracts the usage of an already existing C `struct platform_device` within Rust
+/// code that we get passed from the C side.
///
/// # Invariants
///
-/// `Device` holds a valid reference of `ARef<device::Device>` whose underlying `struct device` is a
-/// member of a `struct platform_device`.
-#[derive(Clone)]
-pub struct Device(ARef<device::Device>);
+/// A [`Device`] instance represents a valid `struct platform_device` created by the C portion of
+/// the kernel.
+#[repr(transparent)]
+pub struct Device<Ctx: device::DeviceContext = device::Normal>(
+ Opaque<bindings::platform_device>,
+ PhantomData<Ctx>,
+);
impl Device {
- /// Convert a raw kernel device into a `Device`
- ///
- /// # Safety
- ///
- /// `dev` must be an `Aref<device::Device>` whose underlying `bindings::device` is a member of a
- /// `bindings::platform_device`.
- unsafe fn from_dev(dev: ARef<device::Device>) -> Self {
- Self(dev)
+ fn as_raw(&self) -> *mut bindings::platform_device {
+ self.0.get()
}
+}
- fn as_raw(&self) -> *mut bindings::platform_device {
- // SAFETY: By the type invariant `self.0.as_raw` is a pointer to the `struct device`
- // embedded in `struct platform_device`.
- unsafe { container_of!(self.0.as_raw(), bindings::platform_device, dev) }.cast_mut()
+impl Deref for Device<device::Core> {
+ type Target = Device;
+
+ fn deref(&self) -> &Self::Target {
+ let ptr: *const Self = self;
+
+ // CAST: `Device<Ctx>` is a transparent wrapper of `Opaque<bindings::platform_device>`.
+ let ptr = ptr.cast::<Device>();
+
+ // SAFETY: `ptr` was derived from `&self`.
+ unsafe { &*ptr }
+ }
+}
+
+impl From<&Device<device::Core>> for ARef<Device> {
+ fn from(dev: &Device<device::Core>) -> Self {
+ (&**dev).into()
+ }
+}
+
+// SAFETY: Instances of `Device` are always reference-counted.
+unsafe impl crate::types::AlwaysRefCounted for Device {
+ fn inc_ref(&self) {
+ // SAFETY: The existence of a shared reference guarantees that the refcount is non-zero.
+ unsafe { bindings::get_device(self.as_ref().as_raw()) };
+ }
+
+ unsafe fn dec_ref(obj: NonNull<Self>) {
+ // SAFETY: The safety requirements guarantee that the refcount is non-zero.
+ unsafe { bindings::platform_device_put(obj.cast().as_ptr()) }
}
}
impl AsRef<device::Device> for Device {
fn as_ref(&self) -> &device::Device {
- &self.0
+ // SAFETY: By the type invariant of `Self`, `self.as_raw()` is a pointer to a valid
+ // `struct platform_device`.
+ let dev = unsafe { addr_of_mut!((*self.as_raw()).dev) };
+
+ // SAFETY: `dev` points to a valid `struct device`.
+ unsafe { device::Device::as_ref(dev) }
}
}
+
+// SAFETY: A `Device` is always reference-counted and can be released from any thread.
+unsafe impl Send for Device {}
+
+// SAFETY: `Device` can be shared among threads because all methods of `Device`
+// (i.e. `Device<Normal>) are thread safe.
+unsafe impl Sync for Device {}
diff --git a/samples/Kconfig b/samples/Kconfig
index 820e00b2ed68..09011be2391a 100644
--- a/samples/Kconfig
+++ b/samples/Kconfig
@@ -300,6 +300,15 @@ config SAMPLE_CHECK_EXEC
demonstrate how they should be used with execveat(2) +
AT_EXECVE_CHECK.
+config SAMPLE_HUNG_TASK
+ tristate "Hung task detector test code"
+ depends on DETECT_HUNG_TASK && DEBUG_FS
+ help
+ Build a module which provide a simple debugfs file. If user reads
+ the file, it will sleep long time (256 seconds) with holding a
+ mutex. Thus if there are 2 or more processes read this file, it
+ will be detected by the hung_task watchdog.
+
source "samples/rust/Kconfig"
source "samples/damon/Kconfig"
diff --git a/samples/Makefile b/samples/Makefile
index f24cd0d72dd0..bf6e6fca5410 100644
--- a/samples/Makefile
+++ b/samples/Makefile
@@ -42,3 +42,4 @@ obj-$(CONFIG_SAMPLE_FPROBE) += fprobe/
obj-$(CONFIG_SAMPLES_RUST) += rust/
obj-$(CONFIG_SAMPLE_DAMON_WSSE) += damon/
obj-$(CONFIG_SAMPLE_DAMON_PRCL) += damon/
+obj-$(CONFIG_SAMPLE_HUNG_TASK) += hung_task/
diff --git a/samples/damon/Kconfig b/samples/damon/Kconfig
index 63f6dcd71daa..564c49ed69a2 100644
--- a/samples/damon/Kconfig
+++ b/samples/damon/Kconfig
@@ -3,7 +3,7 @@
menu "DAMON Samples"
config SAMPLE_DAMON_WSSE
- bool "DAMON sameple module for working set size estimation"
+ bool "DAMON sample module for working set size estimation"
depends on DAMON && DAMON_VADDR
help
This builds DAMON sample module for working set size estimation.
@@ -15,7 +15,7 @@ config SAMPLE_DAMON_WSSE
If unsure, say N.
config SAMPLE_DAMON_PRCL
- bool "DAMON sameple module for access-aware proactive reclamation"
+ bool "DAMON sample module for access-aware proactive reclamation"
depends on DAMON && DAMON_VADDR
help
This builds DAMON sample module for access-aware proactive
diff --git a/samples/hung_task/Makefile b/samples/hung_task/Makefile
new file mode 100644
index 000000000000..f4d6ab563488
--- /dev/null
+++ b/samples/hung_task/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_SAMPLE_HUNG_TASK) += hung_task_mutex.o
diff --git a/samples/hung_task/hung_task_mutex.c b/samples/hung_task/hung_task_mutex.c
new file mode 100644
index 000000000000..47ed38239ea3
--- /dev/null
+++ b/samples/hung_task/hung_task_mutex.c
@@ -0,0 +1,66 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * hung_task_mutex.c - Sample code which causes hung task by mutex
+ *
+ * Usage: load this module and read `<debugfs>/hung_task/mutex`
+ * by 2 or more processes.
+ *
+ * This is for testing kernel hung_task error message.
+ * Note that this will make your system freeze and maybe
+ * cause panic. So do not use this except for the test.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/fs.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+
+#define HUNG_TASK_DIR "hung_task"
+#define HUNG_TASK_FILE "mutex"
+#define SLEEP_SECOND 256
+
+static const char dummy_string[] = "This is a dummy string.";
+static DEFINE_MUTEX(dummy_mutex);
+static struct dentry *hung_task_dir;
+
+static ssize_t read_dummy(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ /* If the second task waits on the lock, it is uninterruptible sleep. */
+ guard(mutex)(&dummy_mutex);
+
+ /* When the first task sleep here, it is interruptible. */
+ msleep_interruptible(SLEEP_SECOND * 1000);
+
+ return simple_read_from_buffer(user_buf, count, ppos,
+ dummy_string, sizeof(dummy_string));
+}
+
+static const struct file_operations hung_task_fops = {
+ .read = read_dummy,
+};
+
+static int __init hung_task_sample_init(void)
+{
+ hung_task_dir = debugfs_create_dir(HUNG_TASK_DIR, NULL);
+ if (IS_ERR(hung_task_dir))
+ return PTR_ERR(hung_task_dir);
+
+ debugfs_create_file(HUNG_TASK_FILE, 0400, hung_task_dir,
+ NULL, &hung_task_fops);
+
+ return 0;
+}
+
+static void __exit hung_task_sample_exit(void)
+{
+ debugfs_remove_recursive(hung_task_dir);
+}
+
+module_init(hung_task_sample_init);
+module_exit(hung_task_sample_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Masami Hiramatsu");
+MODULE_DESCRIPTION("Simple sleep under mutex file for testing hung task");
diff --git a/samples/kmemleak/kmemleak-test.c b/samples/kmemleak/kmemleak-test.c
index 544c36d51d56..8609812a37eb 100644
--- a/samples/kmemleak/kmemleak-test.c
+++ b/samples/kmemleak/kmemleak-test.c
@@ -40,25 +40,25 @@ static int kmemleak_test_init(void)
pr_info("Kmemleak testing\n");
/* make some orphan objects */
- pr_info("kmalloc(32) = %p\n", kmalloc(32, GFP_KERNEL));
- pr_info("kmalloc(32) = %p\n", kmalloc(32, GFP_KERNEL));
- pr_info("kmalloc(1024) = %p\n", kmalloc(1024, GFP_KERNEL));
- pr_info("kmalloc(1024) = %p\n", kmalloc(1024, GFP_KERNEL));
- pr_info("kmalloc(2048) = %p\n", kmalloc(2048, GFP_KERNEL));
- pr_info("kmalloc(2048) = %p\n", kmalloc(2048, GFP_KERNEL));
- pr_info("kmalloc(4096) = %p\n", kmalloc(4096, GFP_KERNEL));
- pr_info("kmalloc(4096) = %p\n", kmalloc(4096, GFP_KERNEL));
+ pr_info("kmalloc(32) = 0x%px\n", kmalloc(32, GFP_KERNEL));
+ pr_info("kmalloc(32) = 0x%px\n", kmalloc(32, GFP_KERNEL));
+ pr_info("kmalloc(1024) = 0x%px\n", kmalloc(1024, GFP_KERNEL));
+ pr_info("kmalloc(1024) = 0x%px\n", kmalloc(1024, GFP_KERNEL));
+ pr_info("kmalloc(2048) = 0x%px\n", kmalloc(2048, GFP_KERNEL));
+ pr_info("kmalloc(2048) = 0x%px\n", kmalloc(2048, GFP_KERNEL));
+ pr_info("kmalloc(4096) = 0x%px\n", kmalloc(4096, GFP_KERNEL));
+ pr_info("kmalloc(4096) = 0x%px\n", kmalloc(4096, GFP_KERNEL));
#ifndef CONFIG_MODULES
- pr_info("kmem_cache_alloc(files_cachep) = %p\n",
+ pr_info("kmem_cache_alloc(files_cachep) = 0x%px\n",
kmem_cache_alloc(files_cachep, GFP_KERNEL));
- pr_info("kmem_cache_alloc(files_cachep) = %p\n",
+ pr_info("kmem_cache_alloc(files_cachep) = 0x%px\n",
kmem_cache_alloc(files_cachep, GFP_KERNEL));
#endif
- pr_info("vmalloc(64) = %p\n", vmalloc(64));
- pr_info("vmalloc(64) = %p\n", vmalloc(64));
- pr_info("vmalloc(64) = %p\n", vmalloc(64));
- pr_info("vmalloc(64) = %p\n", vmalloc(64));
- pr_info("vmalloc(64) = %p\n", vmalloc(64));
+ pr_info("vmalloc(64) = 0x%px\n", vmalloc(64));
+ pr_info("vmalloc(64) = 0x%px\n", vmalloc(64));
+ pr_info("vmalloc(64) = 0x%px\n", vmalloc(64));
+ pr_info("vmalloc(64) = 0x%px\n", vmalloc(64));
+ pr_info("vmalloc(64) = 0x%px\n", vmalloc(64));
/*
* Add elements to a list. They should only appear as orphan
@@ -66,7 +66,7 @@ static int kmemleak_test_init(void)
*/
for (i = 0; i < 10; i++) {
elem = kzalloc(sizeof(*elem), GFP_KERNEL);
- pr_info("kzalloc(sizeof(*elem)) = %p\n", elem);
+ pr_info("kzalloc(sizeof(*elem)) = 0x%px\n", elem);
if (!elem)
return -ENOMEM;
INIT_LIST_HEAD(&elem->list);
@@ -75,11 +75,11 @@ static int kmemleak_test_init(void)
for_each_possible_cpu(i) {
per_cpu(kmemleak_test_pointer, i) = kmalloc(129, GFP_KERNEL);
- pr_info("kmalloc(129) = %p\n",
+ pr_info("kmalloc(129) = 0x%px\n",
per_cpu(kmemleak_test_pointer, i));
}
- pr_info("__alloc_percpu(64, 4) = %p\n", __alloc_percpu(64, 4));
+ pr_info("__alloc_percpu(64, 4) = 0x%px\n", __alloc_percpu(64, 4));
return 0;
}
diff --git a/samples/rust/rust_dma.rs b/samples/rust/rust_dma.rs
index 908acd34b8db..874c2c964afa 100644
--- a/samples/rust/rust_dma.rs
+++ b/samples/rust/rust_dma.rs
@@ -4,10 +4,10 @@
//!
//! To make this driver probe, QEMU must be run with `-device pci-testdev`.
-use kernel::{bindings, dma::CoherentAllocation, pci, prelude::*};
+use kernel::{bindings, device::Core, dma::CoherentAllocation, pci, prelude::*, types::ARef};
struct DmaSampleDriver {
- pdev: pci::Device,
+ pdev: ARef<pci::Device>,
ca: CoherentAllocation<MyStruct>,
}
@@ -48,7 +48,7 @@ impl pci::Driver for DmaSampleDriver {
type IdInfo = ();
const ID_TABLE: pci::IdTable<Self::IdInfo> = &PCI_TABLE;
- fn probe(pdev: &mut pci::Device, _info: &Self::IdInfo) -> Result<Pin<KBox<Self>>> {
+ fn probe(pdev: &pci::Device<Core>, _info: &Self::IdInfo) -> Result<Pin<KBox<Self>>> {
dev_info!(pdev.as_ref(), "Probe DMA test driver.\n");
let ca: CoherentAllocation<MyStruct> =
@@ -64,7 +64,7 @@ impl pci::Driver for DmaSampleDriver {
let drvdata = KBox::new(
Self {
- pdev: pdev.clone(),
+ pdev: pdev.into(),
ca,
},
GFP_KERNEL,
diff --git a/samples/rust/rust_driver_faux.rs b/samples/rust/rust_driver_faux.rs
index 378bab4b587d..ecc9fd378cbd 100644
--- a/samples/rust/rust_driver_faux.rs
+++ b/samples/rust/rust_driver_faux.rs
@@ -20,7 +20,7 @@ impl Module for SampleModule {
fn init(_module: &'static ThisModule) -> Result<Self> {
pr_info!("Initialising Rust Faux Device Sample\n");
- let reg = faux::Registration::new(c_str!("rust-faux-sample-device"))?;
+ let reg = faux::Registration::new(c_str!("rust-faux-sample-device"), None)?;
dev_info!(reg.as_ref(), "Hello from faux device!\n");
diff --git a/samples/rust/rust_driver_pci.rs b/samples/rust/rust_driver_pci.rs
index 364a0660a743..2bb260aebc9e 100644
--- a/samples/rust/rust_driver_pci.rs
+++ b/samples/rust/rust_driver_pci.rs
@@ -4,7 +4,7 @@
//!
//! To make this driver probe, QEMU must be run with `-device pci-testdev`.
-use kernel::{bindings, c_str, devres::Devres, pci, prelude::*};
+use kernel::{bindings, c_str, device::Core, devres::Devres, pci, prelude::*, types::ARef};
struct Regs;
@@ -26,7 +26,7 @@ impl TestIndex {
}
struct SampleDriver {
- pdev: pci::Device,
+ pdev: ARef<pci::Device>,
bar: Devres<Bar0>,
}
@@ -43,17 +43,17 @@ kernel::pci_device_table!(
impl SampleDriver {
fn testdev(index: &TestIndex, bar: &Bar0) -> Result<u32> {
// Select the test.
- bar.writeb(index.0, Regs::TEST);
+ bar.write8(index.0, Regs::TEST);
- let offset = u32::from_le(bar.readl(Regs::OFFSET)) as usize;
- let data = bar.readb(Regs::DATA);
+ let offset = u32::from_le(bar.read32(Regs::OFFSET)) as usize;
+ let data = bar.read8(Regs::DATA);
// Write `data` to `offset` to increase `count` by one.
//
- // Note that we need `try_writeb`, since `offset` can't be checked at compile-time.
- bar.try_writeb(data, offset)?;
+ // Note that we need `try_write8`, since `offset` can't be checked at compile-time.
+ bar.try_write8(data, offset)?;
- Ok(bar.readl(Regs::COUNT))
+ Ok(bar.read32(Regs::COUNT))
}
}
@@ -62,7 +62,7 @@ impl pci::Driver for SampleDriver {
const ID_TABLE: pci::IdTable<Self::IdInfo> = &PCI_TABLE;
- fn probe(pdev: &mut pci::Device, info: &Self::IdInfo) -> Result<Pin<KBox<Self>>> {
+ fn probe(pdev: &pci::Device<Core>, info: &Self::IdInfo) -> Result<Pin<KBox<Self>>> {
dev_dbg!(
pdev.as_ref(),
"Probe Rust PCI driver sample (PCI ID: 0x{:x}, 0x{:x}).\n",
@@ -77,7 +77,7 @@ impl pci::Driver for SampleDriver {
let drvdata = KBox::new(
Self {
- pdev: pdev.clone(),
+ pdev: pdev.into(),
bar,
},
GFP_KERNEL,
diff --git a/samples/rust/rust_driver_platform.rs b/samples/rust/rust_driver_platform.rs
index f7a0f1b29d1d..8b42b3cfb363 100644
--- a/samples/rust/rust_driver_platform.rs
+++ b/samples/rust/rust_driver_platform.rs
@@ -2,10 +2,10 @@
//! Rust Platform driver sample.
-use kernel::{c_str, of, platform, prelude::*};
+use kernel::{c_str, device::Core, of, platform, prelude::*, types::ARef};
struct SampleDriver {
- pdev: platform::Device,
+ pdev: ARef<platform::Device>,
}
struct Info(u32);
@@ -21,14 +21,17 @@ impl platform::Driver for SampleDriver {
type IdInfo = Info;
const OF_ID_TABLE: Option<of::IdTable<Self::IdInfo>> = Some(&OF_TABLE);
- fn probe(pdev: &mut platform::Device, info: Option<&Self::IdInfo>) -> Result<Pin<KBox<Self>>> {
+ fn probe(
+ pdev: &platform::Device<Core>,
+ info: Option<&Self::IdInfo>,
+ ) -> Result<Pin<KBox<Self>>> {
dev_dbg!(pdev.as_ref(), "Probe Rust Platform driver sample.\n");
if let Some(info) = info {
dev_info!(pdev.as_ref(), "Probed with info: '{}'.\n", info.0);
}
- let drvdata = KBox::new(Self { pdev: pdev.clone() }, GFP_KERNEL)?;
+ let drvdata = KBox::new(Self { pdev: pdev.into() }, GFP_KERNEL)?;
Ok(drvdata.into())
}
diff --git a/samples/rust/rust_misc_device.rs b/samples/rust/rust_misc_device.rs
index d3785e7c0330..c881fd6dbd08 100644
--- a/samples/rust/rust_misc_device.rs
+++ b/samples/rust/rust_misc_device.rs
@@ -3,97 +3,98 @@
// Copyright (C) 2024 Google LLC.
//! Rust misc device sample.
+//!
+//! Below is an example userspace C program that exercises this sample's functionality.
+//!
+//! ```c
+//! #include <stdio.h>
+//! #include <stdlib.h>
+//! #include <errno.h>
+//! #include <fcntl.h>
+//! #include <unistd.h>
+//! #include <sys/ioctl.h>
+//!
+//! #define RUST_MISC_DEV_FAIL _IO('|', 0)
+//! #define RUST_MISC_DEV_HELLO _IO('|', 0x80)
+//! #define RUST_MISC_DEV_GET_VALUE _IOR('|', 0x81, int)
+//! #define RUST_MISC_DEV_SET_VALUE _IOW('|', 0x82, int)
+//!
+//! int main() {
+//! int value, new_value;
+//! int fd, ret;
+//!
+//! // Open the device file
+//! printf("Opening /dev/rust-misc-device for reading and writing\n");
+//! fd = open("/dev/rust-misc-device", O_RDWR);
+//! if (fd < 0) {
+//! perror("open");
+//! return errno;
+//! }
+//!
+//! // Make call into driver to say "hello"
+//! printf("Calling Hello\n");
+//! ret = ioctl(fd, RUST_MISC_DEV_HELLO, NULL);
+//! if (ret < 0) {
+//! perror("ioctl: Failed to call into Hello");
+//! close(fd);
+//! return errno;
+//! }
+//!
+//! // Get initial value
+//! printf("Fetching initial value\n");
+//! ret = ioctl(fd, RUST_MISC_DEV_GET_VALUE, &value);
+//! if (ret < 0) {
+//! perror("ioctl: Failed to fetch the initial value");
+//! close(fd);
+//! return errno;
+//! }
+//!
+//! value++;
+//!
+//! // Set value to something different
+//! printf("Submitting new value (%d)\n", value);
+//! ret = ioctl(fd, RUST_MISC_DEV_SET_VALUE, &value);
+//! if (ret < 0) {
+//! perror("ioctl: Failed to submit new value");
+//! close(fd);
+//! return errno;
+//! }
+//!
+//! // Ensure new value was applied
+//! printf("Fetching new value\n");
+//! ret = ioctl(fd, RUST_MISC_DEV_GET_VALUE, &new_value);
+//! if (ret < 0) {
+//! perror("ioctl: Failed to fetch the new value");
+//! close(fd);
+//! return errno;
+//! }
+//!
+//! if (value != new_value) {
+//! printf("Failed: Committed and retrieved values are different (%d - %d)\n", value, new_value);
+//! close(fd);
+//! return -1;
+//! }
+//!
+//! // Call the unsuccessful ioctl
+//! printf("Attempting to call in to an non-existent IOCTL\n");
+//! ret = ioctl(fd, RUST_MISC_DEV_FAIL, NULL);
+//! if (ret < 0) {
+//! perror("ioctl: Succeeded to fail - this was expected");
+//! } else {
+//! printf("ioctl: Failed to fail\n");
+//! close(fd);
+//! return -1;
+//! }
+//!
+//! // Close the device file
+//! printf("Closing /dev/rust-misc-device\n");
+//! close(fd);
+//!
+//! printf("Success\n");
+//! return 0;
+//! }
+//! ```
-/// Below is an example userspace C program that exercises this sample's functionality.
-///
-/// ```c
-/// #include <stdio.h>
-/// #include <stdlib.h>
-/// #include <errno.h>
-/// #include <fcntl.h>
-/// #include <unistd.h>
-/// #include <sys/ioctl.h>
-///
-/// #define RUST_MISC_DEV_FAIL _IO('|', 0)
-/// #define RUST_MISC_DEV_HELLO _IO('|', 0x80)
-/// #define RUST_MISC_DEV_GET_VALUE _IOR('|', 0x81, int)
-/// #define RUST_MISC_DEV_SET_VALUE _IOW('|', 0x82, int)
-///
-/// int main() {
-/// int value, new_value;
-/// int fd, ret;
-///
-/// // Open the device file
-/// printf("Opening /dev/rust-misc-device for reading and writing\n");
-/// fd = open("/dev/rust-misc-device", O_RDWR);
-/// if (fd < 0) {
-/// perror("open");
-/// return errno;
-/// }
-///
-/// // Make call into driver to say "hello"
-/// printf("Calling Hello\n");
-/// ret = ioctl(fd, RUST_MISC_DEV_HELLO, NULL);
-/// if (ret < 0) {
-/// perror("ioctl: Failed to call into Hello");
-/// close(fd);
-/// return errno;
-/// }
-///
-/// // Get initial value
-/// printf("Fetching initial value\n");
-/// ret = ioctl(fd, RUST_MISC_DEV_GET_VALUE, &value);
-/// if (ret < 0) {
-/// perror("ioctl: Failed to fetch the initial value");
-/// close(fd);
-/// return errno;
-/// }
-///
-/// value++;
-///
-/// // Set value to something different
-/// printf("Submitting new value (%d)\n", value);
-/// ret = ioctl(fd, RUST_MISC_DEV_SET_VALUE, &value);
-/// if (ret < 0) {
-/// perror("ioctl: Failed to submit new value");
-/// close(fd);
-/// return errno;
-/// }
-///
-/// // Ensure new value was applied
-/// printf("Fetching new value\n");
-/// ret = ioctl(fd, RUST_MISC_DEV_GET_VALUE, &new_value);
-/// if (ret < 0) {
-/// perror("ioctl: Failed to fetch the new value");
-/// close(fd);
-/// return errno;
-/// }
-///
-/// if (value != new_value) {
-/// printf("Failed: Committed and retrieved values are different (%d - %d)\n", value, new_value);
-/// close(fd);
-/// return -1;
-/// }
-///
-/// // Call the unsuccessful ioctl
-/// printf("Attempting to call in to an non-existent IOCTL\n");
-/// ret = ioctl(fd, RUST_MISC_DEV_FAIL, NULL);
-/// if (ret < 0) {
-/// perror("ioctl: Succeeded to fail - this was expected");
-/// } else {
-/// printf("ioctl: Failed to fail\n");
-/// close(fd);
-/// return -1;
-/// }
-///
-/// // Close the device file
-/// printf("Closing /dev/rust-misc-device\n");
-/// close(fd);
-///
-/// printf("Success\n");
-/// return 0;
-/// }
-/// ```
use core::pin::Pin;
use kernel::{
diff --git a/samples/trace_events/trace-events-sample.h b/samples/trace_events/trace-events-sample.h
index 999f78d380ae..1a05fc153353 100644
--- a/samples/trace_events/trace-events-sample.h
+++ b/samples/trace_events/trace-events-sample.h
@@ -319,7 +319,8 @@ TRACE_EVENT(foo_bar,
__assign_cpumask(cpum, cpumask_bits(mask));
),
- TP_printk("foo %s %d %s %s %s %s %s %s (%s) (%s) %s", __entry->foo, __entry->bar,
+ TP_printk("foo %s %d %s %s %s %s %s %s (%s) (%s) %s [%d] %*pbl",
+ __entry->foo, __entry->bar,
/*
* Notice here the use of some helper functions. This includes:
@@ -370,7 +371,10 @@ TRACE_EVENT(foo_bar,
__get_str(str), __get_str(lstr),
__get_bitmask(cpus), __get_cpumask(cpum),
- __get_str(vstr))
+ __get_str(vstr),
+ __get_dynamic_array_len(cpus),
+ __get_dynamic_array_len(cpus),
+ __get_dynamic_array(cpus))
);
/*
diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib
index 57620b439a1f..4d543054f723 100644
--- a/scripts/Makefile.lib
+++ b/scripts/Makefile.lib
@@ -275,9 +275,9 @@ objtool-args-$(CONFIG_MITIGATION_SLS) += --sls
objtool-args-$(CONFIG_STACK_VALIDATION) += --stackval
objtool-args-$(CONFIG_HAVE_STATIC_CALL_INLINE) += --static-call
objtool-args-$(CONFIG_HAVE_UACCESS_VALIDATION) += --uaccess
-objtool-args-$(CONFIG_GCOV_KERNEL) += --no-unreachable
+objtool-args-$(or $(CONFIG_GCOV_KERNEL),$(CONFIG_KCOV)) += --no-unreachable
objtool-args-$(CONFIG_PREFIX_SYMBOLS) += --prefix=$(CONFIG_FUNCTION_PADDING_BYTES)
-objtool-args-$(CONFIG_OBJTOOL_WERROR) += --Werror --backtrace
+objtool-args-$(CONFIG_OBJTOOL_WERROR) += --Werror
objtool-args = $(objtool-args-y) \
$(if $(delay-objtool), --link) \
diff --git a/scripts/Makefile.vmlinux_o b/scripts/Makefile.vmlinux_o
index 0b6e2ebf60dc..938c7457717e 100644
--- a/scripts/Makefile.vmlinux_o
+++ b/scripts/Makefile.vmlinux_o
@@ -30,13 +30,20 @@ endif
# objtool for vmlinux.o
# ---------------------------------------------------------------------------
#
-# For LTO and IBT, objtool doesn't run on individual translation units.
-# Run everything on vmlinux instead.
+# For delay-objtool (IBT or LTO), objtool doesn't run on individual translation
+# units. Instead it runs on vmlinux.o.
+#
+# For !delay-objtool + CONFIG_NOINSTR_VALIDATION, it runs on both translation
+# units and vmlinux.o, with the latter only used for noinstr/unret validation.
objtool-enabled := $(or $(delay-objtool),$(CONFIG_NOINSTR_VALIDATION))
-vmlinux-objtool-args-$(delay-objtool) += $(objtool-args-y)
-vmlinux-objtool-args-$(CONFIG_GCOV_KERNEL) += --no-unreachable
+ifeq ($(delay-objtool),y)
+vmlinux-objtool-args-y += $(objtool-args-y)
+else
+vmlinux-objtool-args-$(CONFIG_OBJTOOL_WERROR) += --Werror
+endif
+
vmlinux-objtool-args-$(CONFIG_NOINSTR_VALIDATION) += --noinstr \
$(if $(or $(CONFIG_MITIGATION_UNRET_ENTRY),$(CONFIG_MITIGATION_SRSO)), --unret)
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index 7b28ad331742..784912f570e9 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -113,7 +113,8 @@ Options:
--max-line-length=n set the maximum line length, (default $max_line_length)
if exceeded, warn on patches
requires --strict for use with --file
- --min-conf-desc-length=n set the min description length, if shorter, warn
+ --min-conf-desc-length=n set the minimum description length for config symbols
+ in lines, if shorter, warn (default $min_conf_desc_length)
--tab-size=n set the number of spaces for tab (default $tabsize)
--root=PATH PATH to the kernel tree root
--no-summary suppress the per-file summary
@@ -3645,7 +3646,7 @@ sub process {
$help_length < $min_conf_desc_length) {
my $stat_real = get_stat_real($linenr, $ln - 1);
WARN("CONFIG_DESCRIPTION",
- "please write a help paragraph that fully describes the config symbol\n" . "$here\n$stat_real\n");
+ "please write a help paragraph that fully describes the config symbol with at least $min_conf_desc_length lines\n" . "$here\n$stat_real\n");
}
}
diff --git a/scripts/coccinelle/misc/secs_to_jiffies.cocci b/scripts/coccinelle/misc/secs_to_jiffies.cocci
index 8bbb2884ea5d..416f348174ca 100644
--- a/scripts/coccinelle/misc/secs_to_jiffies.cocci
+++ b/scripts/coccinelle/misc/secs_to_jiffies.cocci
@@ -20,3 +20,13 @@ virtual patch
- msecs_to_jiffies(C * MSEC_PER_SEC)
+ secs_to_jiffies(C)
+
+@depends on patch@ expression E; @@
+
+- msecs_to_jiffies(E * 1000)
++ secs_to_jiffies(E)
+
+@depends on patch@ expression E; @@
+
+- msecs_to_jiffies(E * MSEC_PER_SEC)
++ secs_to_jiffies(E)
diff --git a/scripts/extract-fwblobs b/scripts/extract-fwblobs
new file mode 100755
index 000000000000..53729124e5a0
--- /dev/null
+++ b/scripts/extract-fwblobs
@@ -0,0 +1,30 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# -----------------------------------------------------------------------------
+# Extracts the vmlinux built-in firmware blobs - requires a non-stripped image
+# -----------------------------------------------------------------------------
+
+if [ -z "$1" ]; then
+ echo "Must provide a non-stripped vmlinux as argument"
+ exit 1
+fi
+
+read -r RD_ADDR_HEX RD_OFF_HEX <<< "$( readelf -SW "$1" |\
+grep -w rodata | awk '{print "0x"$5" 0x"$6}' )"
+
+FW_SYMS="$(readelf -sW "$1" |\
+awk -n '/fw_end/ { end=$2 ; print name " 0x" start " 0x" end; } { start=$2; name=$8; }')"
+
+while IFS= read -r entry; do
+ read -r FW_NAME FW_ADDR_ST_HEX FW_ADDR_END_HEX <<< "$entry"
+
+ # Notice kernel prepends _fw_ and appends _bin to the FW name
+ # in rodata; hence we hereby filter that out.
+ FW_NAME=${FW_NAME:4:-4}
+
+ FW_OFFSET="$(printf "%d" $((FW_ADDR_ST_HEX - RD_ADDR_HEX + RD_OFF_HEX)))"
+ FW_SIZE="$(printf "%d" $((FW_ADDR_END_HEX - FW_ADDR_ST_HEX)))"
+
+ dd if="$1" of="./${FW_NAME}" bs="${FW_SIZE}" count=1 iflag=skip_bytes skip="${FW_OFFSET}"
+done <<< "${FW_SYMS}"
diff --git a/scripts/gdb/linux/cpus.py b/scripts/gdb/linux/cpus.py
index 8f7c4fb78c2c..f506965ea759 100644
--- a/scripts/gdb/linux/cpus.py
+++ b/scripts/gdb/linux/cpus.py
@@ -46,7 +46,7 @@ def per_cpu(var_ptr, cpu):
# !CONFIG_SMP case
offset = 0
pointer = var_ptr.cast(utils.get_long_type()) + offset
- return pointer.cast(var_ptr.type).dereference()
+ return pointer.cast(var_ptr.type)
cpu_mask = {}
@@ -149,11 +149,29 @@ Note that VAR has to be quoted as string."""
super(PerCpu, self).__init__("lx_per_cpu")
def invoke(self, var, cpu=-1):
- return per_cpu(var.address, cpu)
+ return per_cpu(var.address, cpu).dereference()
PerCpu()
+
+class PerCpuPtr(gdb.Function):
+ """Return per-cpu pointer.
+
+$lx_per_cpu_ptr("VAR"[, CPU]): Return the per-cpu pointer called VAR for the
+given CPU number. If CPU is omitted, the CPU of the current context is used.
+Note that VAR has to be quoted as string."""
+
+ def __init__(self):
+ super(PerCpuPtr, self).__init__("lx_per_cpu_ptr")
+
+ def invoke(self, var, cpu=-1):
+ return per_cpu(var, cpu)
+
+
+PerCpuPtr()
+
+
def get_current_task(cpu):
task_ptr_type = task_type.get_type().pointer()
diff --git a/scripts/gdb/linux/symbols.py b/scripts/gdb/linux/symbols.py
index f6c1b063775a..b255177301e9 100644
--- a/scripts/gdb/linux/symbols.py
+++ b/scripts/gdb/linux/symbols.py
@@ -14,7 +14,9 @@
import gdb
import os
import re
+import struct
+from itertools import count
from linux import modules, utils, constants
@@ -53,6 +55,29 @@ if hasattr(gdb, 'Breakpoint'):
return False
+def get_vmcore_s390():
+ with utils.qemu_phy_mem_mode():
+ vmcore_info = 0x0e0c
+ paddr_vmcoreinfo_note = gdb.parse_and_eval("*(unsigned long long *)" +
+ hex(vmcore_info))
+ inferior = gdb.selected_inferior()
+ elf_note = inferior.read_memory(paddr_vmcoreinfo_note, 12)
+ n_namesz, n_descsz, n_type = struct.unpack(">III", elf_note)
+ desc_paddr = paddr_vmcoreinfo_note + len(elf_note) + n_namesz + 1
+ return gdb.parse_and_eval("(char *)" + hex(desc_paddr)).string()
+
+
+def get_kerneloffset():
+ if utils.is_target_arch('s390'):
+ try:
+ vmcore_str = get_vmcore_s390()
+ except gdb.error as e:
+ gdb.write("{}\n".format(e))
+ return None
+ return utils.parse_vmcore(vmcore_str).kerneloffset
+ return None
+
+
class LxSymbols(gdb.Command):
"""(Re-)load symbols of Linux kernel and currently loaded modules.
@@ -95,10 +120,14 @@ lx-symbols command."""
except gdb.error:
return str(module_addr)
- attrs = sect_attrs['attrs']
- section_name_to_address = {
- attrs[n]['battr']['attr']['name'].string(): attrs[n]['address']
- for n in range(int(sect_attrs['nsections']))}
+ section_name_to_address = {}
+ for i in count():
+ # this is a NULL terminated array
+ if sect_attrs['grp']['bin_attrs'][i] == 0x0:
+ break
+
+ attr = sect_attrs['grp']['bin_attrs'][i].dereference()
+ section_name_to_address[attr['attr']['name'].string()] = attr['private']
textaddr = section_name_to_address.get(".text", module_addr)
args = []
@@ -155,7 +184,12 @@ lx-symbols command."""
obj.filename.endswith('vmlinux.debug')):
orig_vmlinux = obj.filename
gdb.execute("symbol-file", to_string=True)
- gdb.execute("symbol-file {0}".format(orig_vmlinux))
+ kerneloffset = get_kerneloffset()
+ if kerneloffset is None:
+ offset_arg = ""
+ else:
+ offset_arg = " -o " + hex(kerneloffset)
+ gdb.execute("symbol-file {0}{1}".format(orig_vmlinux, offset_arg))
self.loaded_modules = []
module_list = modules.module_list()
diff --git a/scripts/gdb/linux/utils.py b/scripts/gdb/linux/utils.py
index 245ab297ea84..03ebdccf5f69 100644
--- a/scripts/gdb/linux/utils.py
+++ b/scripts/gdb/linux/utils.py
@@ -11,6 +11,11 @@
# This work is licensed under the terms of the GNU GPL version 2.
#
+import contextlib
+import dataclasses
+import re
+import typing
+
import gdb
@@ -216,3 +221,33 @@ def gdb_eval_or_none(expresssion):
return gdb.parse_and_eval(expresssion)
except gdb.error:
return None
+
+
+@contextlib.contextmanager
+def qemu_phy_mem_mode():
+ connection = gdb.selected_inferior().connection
+ orig = connection.send_packet("qqemu.PhyMemMode")
+ if orig not in b"01":
+ raise gdb.error("Unexpected qemu.PhyMemMode")
+ orig = orig.decode()
+ if connection.send_packet("Qqemu.PhyMemMode:1") != b"OK":
+ raise gdb.error("Failed to set qemu.PhyMemMode")
+ try:
+ yield
+ finally:
+ if connection.send_packet("Qqemu.PhyMemMode:" + orig) != b"OK":
+ raise gdb.error("Failed to restore qemu.PhyMemMode")
+
+
+@dataclasses.dataclass
+class VmCore:
+ kerneloffset: typing.Optional[int]
+
+
+def parse_vmcore(s):
+ match = re.search(r"KERNELOFFSET=([0-9a-f]+)", s)
+ if match is None:
+ kerneloffset = None
+ else:
+ kerneloffset = int(match.group(1), 16)
+ return VmCore(kerneloffset=kerneloffset)
diff --git a/scripts/generate_rust_target.rs b/scripts/generate_rust_target.rs
index 4fd6b6ab3e32..8667d0ae3c82 100644
--- a/scripts/generate_rust_target.rs
+++ b/scripts/generate_rust_target.rs
@@ -184,7 +184,9 @@ fn main() {
let mut ts = TargetSpec::new();
// `llvm-target`s are taken from `scripts/Makefile.clang`.
- if cfg.has("ARM64") {
+ if cfg.has("ARM") {
+ panic!("arm uses the builtin rustc target");
+ } else if cfg.has("ARM64") {
panic!("arm64 uses the builtin rustc aarch64-unknown-none target");
} else if cfg.has("RISCV") {
if cfg.has("64BIT") {
diff --git a/scripts/get_maintainer.pl b/scripts/get_maintainer.pl
index 5ac02e198737..4414194bedcf 100755
--- a/scripts/get_maintainer.pl
+++ b/scripts/get_maintainer.pl
@@ -50,6 +50,7 @@ my $output_multiline = 1;
my $output_separator = ", ";
my $output_roles = 0;
my $output_rolestats = 1;
+my $output_substatus = undef;
my $output_section_maxlen = 50;
my $scm = 0;
my $tree = 1;
@@ -269,6 +270,7 @@ if (!GetOptions(
'separator=s' => \$output_separator,
'subsystem!' => \$subsystem,
'status!' => \$status,
+ 'substatus!' => \$output_substatus,
'scm!' => \$scm,
'tree!' => \$tree,
'web!' => \$web,
@@ -314,6 +316,10 @@ $output_multiline = 0 if ($output_separator ne ", ");
$output_rolestats = 1 if ($interactive);
$output_roles = 1 if ($output_rolestats);
+if (!defined $output_substatus) {
+ $output_substatus = $email && $output_roles && -t STDOUT;
+}
+
if ($sections || $letters ne "") {
$sections = 1;
$email = 0;
@@ -637,6 +643,7 @@ my @web = ();
my @bug = ();
my @subsystem = ();
my @status = ();
+my @substatus = ();
my %deduplicate_name_hash = ();
my %deduplicate_address_hash = ();
@@ -651,6 +658,11 @@ if ($scm) {
output(@scm);
}
+if ($output_substatus) {
+ @substatus = uniq(@substatus);
+ output(@substatus);
+}
+
if ($status) {
@status = uniq(@status);
output(@status);
@@ -859,6 +871,7 @@ sub get_maintainers {
@bug = ();
@subsystem = ();
@status = ();
+ @substatus = ();
%deduplicate_name_hash = ();
%deduplicate_address_hash = ();
if ($email_git_all_signature_types) {
@@ -1071,8 +1084,9 @@ MAINTAINER field selection options:
--moderated => include moderated lists(s) if any (default: true)
--s => include subscriber only list(s) if any (default: false)
--remove-duplicates => minimize duplicate email names/addresses
- --roles => show roles (status:subsystem, git-signer, list, etc...)
+ --roles => show roles (role:subsystem, git-signer, list, etc...)
--rolestats => show roles and statistics (commits/total_commits, %)
+ --substatus => show subsystem status if not Maintained (default: match --roles when output is tty)"
--file-emails => add email addresses found in -f file (default: 0 (off))
--fixes => for patches, add signatures of commits with 'Fixes: <commit>' (default: 1 (on))
--scm => print SCM tree(s) if any
@@ -1284,8 +1298,9 @@ sub get_maintainer_role {
my $start = find_starting_index($index);
my $end = find_ending_index($index);
- my $role = "unknown";
+ my $role = "maintainer";
my $subsystem = get_subsystem_name($index);
+ my $status = "unknown";
for ($i = $start + 1; $i < $end; $i++) {
my $tv = $typevalue[$i];
@@ -1293,23 +1308,13 @@ sub get_maintainer_role {
my $ptype = $1;
my $pvalue = $2;
if ($ptype eq "S") {
- $role = $pvalue;
+ $status = $pvalue;
}
}
}
- $role = lc($role);
- if ($role eq "supported") {
- $role = "supporter";
- } elsif ($role eq "maintained") {
- $role = "maintainer";
- } elsif ($role eq "odd fixes") {
- $role = "odd fixer";
- } elsif ($role eq "orphan") {
- $role = "orphan minder";
- } elsif ($role eq "obsolete") {
- $role = "obsolete minder";
- } elsif ($role eq "buried alive in reporters") {
+ $status = lc($status);
+ if ($status eq "buried alive in reporters") {
$role = "chief penguin";
}
@@ -1335,7 +1340,9 @@ sub add_categories {
my $start = find_starting_index($index);
my $end = find_ending_index($index);
- push(@subsystem, $typevalue[$start]);
+ my $subsystem = $typevalue[$start];
+ push(@subsystem, $subsystem);
+ my $status = "Unknown";
for ($i = $start + 1; $i < $end; $i++) {
my $tv = $typevalue[$i];
@@ -1386,8 +1393,8 @@ sub add_categories {
}
} elsif ($ptype eq "R") {
if ($email_reviewer) {
- my $subsystem = get_subsystem_name($i);
- push_email_addresses($pvalue, "reviewer:$subsystem" . $suffix);
+ my $subs = get_subsystem_name($i);
+ push_email_addresses($pvalue, "reviewer:$subs" . $suffix);
}
} elsif ($ptype eq "T") {
push(@scm, $pvalue . $suffix);
@@ -1397,9 +1404,14 @@ sub add_categories {
push(@bug, $pvalue . $suffix);
} elsif ($ptype eq "S") {
push(@status, $pvalue . $suffix);
+ $status = $pvalue;
}
}
}
+
+ if ($subsystem ne "THE REST" and $status ne "Maintained") {
+ push(@substatus, $subsystem . " status: " . $status . $suffix)
+ }
}
sub email_inuse {
@@ -1903,6 +1915,7 @@ EOT
$done = 1;
$output_rolestats = 0;
$output_roles = 0;
+ $output_substatus = 0;
last;
} elsif ($nr =~ /^\d+$/ && $nr > 0 && $nr <= $count) {
$selected{$nr - 1} = !$selected{$nr - 1};
diff --git a/scripts/sorttable.c b/scripts/sorttable.c
index 7b4b3714b1af..deed676bfe38 100644
--- a/scripts/sorttable.c
+++ b/scripts/sorttable.c
@@ -857,7 +857,7 @@ static void *sort_mcount_loc(void *arg)
for (void *ptr = vals; ptr < vals + size; ptr += long_size) {
uint64_t key;
- key = long_size == 4 ? r((uint32_t *)ptr) : r8((uint64_t *)ptr);
+ key = long_size == 4 ? *(uint32_t *)ptr : *(uint64_t *)ptr;
if (!find_func(key)) {
if (long_size == 4)
*(uint32_t *)ptr = 0;
diff --git a/scripts/tags.sh b/scripts/tags.sh
index 45eaf35f5bff..98680e9cd7be 100755
--- a/scripts/tags.sh
+++ b/scripts/tags.sh
@@ -146,6 +146,7 @@ dogtags()
# a ^[^#] is prepended by setup_regex unless an anchor is already present
regex_asm=(
'/^\(ENTRY\|_GLOBAL\)([[:space:]]*\([[:alnum:]_\\]*\)).*/\2/'
+ '/^SYM_[[:alnum:]_]*START[[:alnum:]_]*([[:space:]]*\([[:alnum:]_\\]*\)).*/\1/'
)
regex_c=(
'/^SYSCALL_DEFINE[0-9]([[:space:]]*\([[:alnum:]_]*\).*/sys_\1/'
diff --git a/security/Kconfig b/security/Kconfig
index 536061cf33a9..4816fc74f81e 100644
--- a/security/Kconfig
+++ b/security/Kconfig
@@ -51,6 +51,27 @@ config PROC_MEM_NO_FORCE
endchoice
+config MSEAL_SYSTEM_MAPPINGS
+ bool "mseal system mappings"
+ depends on 64BIT
+ depends on ARCH_SUPPORTS_MSEAL_SYSTEM_MAPPINGS
+ depends on !CHECKPOINT_RESTORE
+ help
+ Apply mseal on system mappings.
+ The system mappings includes vdso, vvar, vvar_vclock,
+ vectors (arm compat-mode), sigpage (arm compat-mode), uprobes.
+
+ A 64-bit kernel is required for the memory sealing feature.
+ No specific hardware features from the CPU are needed.
+
+ WARNING: This feature breaks programs which rely on relocating
+ or unmapping system mappings. Known broken software at the time
+ of writing includes CHECKPOINT_RESTORE, UML, gVisor, rr. Therefore
+ this config can't be enabled universally.
+
+ For complete descriptions of memory sealing, please see
+ Documentation/userspace-api/mseal.rst
+
config SECURITY
bool "Enable different security models"
depends on SYSFS
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index fb66ed2e97cf..e7a7dcab81db 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -3587,10 +3587,13 @@ static int selinux_kernfs_init_security(struct kernfs_node *kn_dir,
newsid = tsec->create_sid;
} else {
u16 secclass = inode_mode_to_security_class(kn->mode);
+ const char *kn_name;
struct qstr q;
- q.name = kn->name;
- q.hash_len = hashlen_string(kn_dir, kn->name);
+ /* kn is fresh, can't be renamed, name goes not away */
+ kn_name = rcu_dereference_check(kn->name, true);
+ q.name = kn_name;
+ q.hash_len = hashlen_string(kn_dir, kn_name);
rc = security_transition_sid(tsec->sid,
parent_sid, secclass, &q,
diff --git a/sound/hda/intel-sdw-acpi.c b/sound/hda/intel-sdw-acpi.c
index 49d3e0e30073..8686adaf4531 100644
--- a/sound/hda/intel-sdw-acpi.c
+++ b/sound/hda/intel-sdw-acpi.c
@@ -11,8 +11,8 @@
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/export.h>
-#include <linux/fwnode.h>
#include <linux/module.h>
+#include <linux/property.h>
#include <linux/soundwire/sdw_intel.h>
#include <linux/string.h>
diff --git a/sound/pci/ac97/ac97_codec.c b/sound/pci/ac97/ac97_codec.c
index 6e710dce5c60..88ac37739b76 100644
--- a/sound/pci/ac97/ac97_codec.c
+++ b/sound/pci/ac97/ac97_codec.c
@@ -2461,8 +2461,7 @@ int snd_ac97_update_power(struct snd_ac97 *ac97, int reg, int powerup)
* (for avoiding loud click noises for many (OSS) apps
* that open/close frequently)
*/
- schedule_delayed_work(&ac97->power_work,
- msecs_to_jiffies(power_save * 1000));
+ schedule_delayed_work(&ac97->power_work, secs_to_jiffies(power_save));
else {
cancel_delayed_work(&ac97->power_work);
update_power_regs(ac97);
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index b4fe681ec3cb..79004bc8107b 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -4743,6 +4743,22 @@ static void alc245_fixup_hp_mute_led_coefbit(struct hda_codec *codec,
}
}
+static void alc245_fixup_hp_mute_led_v1_coefbit(struct hda_codec *codec,
+ const struct hda_fixup *fix,
+ int action)
+{
+ struct alc_spec *spec = codec->spec;
+
+ if (action == HDA_FIXUP_ACT_PRE_PROBE) {
+ spec->mute_led_polarity = 0;
+ spec->mute_led_coef.idx = 0x0b;
+ spec->mute_led_coef.mask = 1 << 3;
+ spec->mute_led_coef.on = 1 << 3;
+ spec->mute_led_coef.off = 0;
+ snd_hda_gen_add_mute_led_cdev(codec, coef_mute_led_set);
+ }
+}
+
/* turn on/off mic-mute LED per capture hook by coef bit */
static int coef_micmute_led_set(struct led_classdev *led_cdev,
enum led_brightness brightness)
@@ -7574,6 +7590,24 @@ static void alc245_fixup_hp_spectre_x360_16_aa0xxx(struct hda_codec *codec,
alc245_fixup_hp_gpio_led(codec, fix, action);
}
+static void alc245_fixup_hp_zbook_firefly_g12a(struct hda_codec *codec,
+ const struct hda_fixup *fix, int action)
+{
+ struct alc_spec *spec = codec->spec;
+ static const hda_nid_t conn[] = { 0x02 };
+
+ switch (action) {
+ case HDA_FIXUP_ACT_PRE_PROBE:
+ spec->gen.auto_mute_via_amp = 1;
+ snd_hda_override_conn_list(codec, 0x17, ARRAY_SIZE(conn), conn);
+ break;
+ }
+
+ cs35l41_fixup_i2c_two(codec, fix, action);
+ alc245_fixup_hp_mute_led_coefbit(codec, fix, action);
+ alc285_fixup_hp_coef_micmute_led(codec, fix, action);
+}
+
/*
* ALC287 PCM hooks
*/
@@ -7911,6 +7945,7 @@ enum {
ALC245_FIXUP_TAS2781_SPI_2,
ALC287_FIXUP_YOGA7_14ARB7_I2C,
ALC245_FIXUP_HP_MUTE_LED_COEFBIT,
+ ALC245_FIXUP_HP_MUTE_LED_V1_COEFBIT,
ALC245_FIXUP_HP_X360_MUTE_LEDS,
ALC287_FIXUP_THINKPAD_I2S_SPK,
ALC287_FIXUP_MG_RTKC_CSAMP_CS35L41_I2C_THINKPAD,
@@ -7921,6 +7956,7 @@ enum {
ALC256_FIXUP_HEADPHONE_AMP_VOL,
ALC245_FIXUP_HP_SPECTRE_X360_EU0XXX,
ALC245_FIXUP_HP_SPECTRE_X360_16_AA0XXX,
+ ALC245_FIXUP_HP_ZBOOK_FIREFLY_G12A,
ALC285_FIXUP_ASUS_GA403U,
ALC285_FIXUP_ASUS_GA403U_HEADSET_MIC,
ALC285_FIXUP_ASUS_GA403U_I2C_SPEAKER2_TO_DAC1,
@@ -10164,6 +10200,10 @@ static const struct hda_fixup alc269_fixups[] = {
.type = HDA_FIXUP_FUNC,
.v.func = alc245_fixup_hp_mute_led_coefbit,
},
+ [ALC245_FIXUP_HP_MUTE_LED_V1_COEFBIT] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc245_fixup_hp_mute_led_v1_coefbit,
+ },
[ALC245_FIXUP_HP_X360_MUTE_LEDS] = {
.type = HDA_FIXUP_FUNC,
.v.func = alc245_fixup_hp_mute_led_coefbit,
@@ -10212,6 +10252,10 @@ static const struct hda_fixup alc269_fixups[] = {
.type = HDA_FIXUP_FUNC,
.v.func = alc245_fixup_hp_spectre_x360_16_aa0xxx,
},
+ [ALC245_FIXUP_HP_ZBOOK_FIREFLY_G12A] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc245_fixup_hp_zbook_firefly_g12a,
+ },
[ALC285_FIXUP_ASUS_GA403U] = {
.type = HDA_FIXUP_FUNC,
.v.func = alc285_fixup_asus_ga403u,
@@ -10658,6 +10702,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x103c, 0x8b97, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
SND_PCI_QUIRK(0x103c, 0x8bb3, "HP Slim OMEN", ALC287_FIXUP_CS35L41_I2C_2),
SND_PCI_QUIRK(0x103c, 0x8bb4, "HP Slim OMEN", ALC287_FIXUP_CS35L41_I2C_2),
+ SND_PCI_QUIRK(0x103c, 0x8bcd, "HP Omen 16-xd0xxx", ALC245_FIXUP_HP_MUTE_LED_V1_COEFBIT),
SND_PCI_QUIRK(0x103c, 0x8bdd, "HP Envy 17", ALC287_FIXUP_CS35L41_I2C_2),
SND_PCI_QUIRK(0x103c, 0x8bde, "HP Envy 17", ALC287_FIXUP_CS35L41_I2C_2),
SND_PCI_QUIRK(0x103c, 0x8bdf, "HP Envy 15", ALC287_FIXUP_CS35L41_I2C_2),
@@ -10751,15 +10796,15 @@ static const struct hda_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x103c, 0x8e11, "HP Trekker", ALC287_FIXUP_CS35L41_I2C_2),
SND_PCI_QUIRK(0x103c, 0x8e12, "HP Trekker", ALC287_FIXUP_CS35L41_I2C_2),
SND_PCI_QUIRK(0x103c, 0x8e13, "HP Trekker", ALC287_FIXUP_CS35L41_I2C_2),
- SND_PCI_QUIRK(0x103c, 0x8e14, "HP ZBook Firefly 14 G12", ALC285_FIXUP_HP_GPIO_LED),
- SND_PCI_QUIRK(0x103c, 0x8e15, "HP ZBook Firefly 14 G12", ALC285_FIXUP_HP_GPIO_LED),
- SND_PCI_QUIRK(0x103c, 0x8e16, "HP ZBook Firefly 14 G12", ALC285_FIXUP_HP_GPIO_LED),
- SND_PCI_QUIRK(0x103c, 0x8e17, "HP ZBook Firefly 14 G12", ALC285_FIXUP_HP_GPIO_LED),
- SND_PCI_QUIRK(0x103c, 0x8e18, "HP ZBook Firefly 14 G12A", ALC285_FIXUP_HP_GPIO_LED),
- SND_PCI_QUIRK(0x103c, 0x8e19, "HP ZBook Firefly 14 G12A", ALC285_FIXUP_HP_GPIO_LED),
- SND_PCI_QUIRK(0x103c, 0x8e1a, "HP ZBook Firefly 14 G12A", ALC285_FIXUP_HP_GPIO_LED),
- SND_PCI_QUIRK(0x103c, 0x8e1b, "HP EliteBook G12", ALC285_FIXUP_HP_GPIO_LED),
- SND_PCI_QUIRK(0x103c, 0x8e1c, "HP EliteBook G12", ALC285_FIXUP_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x8e14, "HP ZBook Firefly 14 G12", ALC245_FIXUP_HP_ZBOOK_FIREFLY_G12A),
+ SND_PCI_QUIRK(0x103c, 0x8e15, "HP ZBook Firefly 14 G12", ALC245_FIXUP_HP_ZBOOK_FIREFLY_G12A),
+ SND_PCI_QUIRK(0x103c, 0x8e16, "HP ZBook Firefly 14 G12", ALC245_FIXUP_HP_ZBOOK_FIREFLY_G12A),
+ SND_PCI_QUIRK(0x103c, 0x8e17, "HP ZBook Firefly 14 G12", ALC245_FIXUP_HP_ZBOOK_FIREFLY_G12A),
+ SND_PCI_QUIRK(0x103c, 0x8e18, "HP ZBook Firefly 14 G12A", ALC245_FIXUP_HP_ZBOOK_FIREFLY_G12A),
+ SND_PCI_QUIRK(0x103c, 0x8e19, "HP ZBook Firefly 14 G12A", ALC245_FIXUP_HP_ZBOOK_FIREFLY_G12A),
+ SND_PCI_QUIRK(0x103c, 0x8e1a, "HP ZBook Firefly 14 G12A", ALC245_FIXUP_HP_ZBOOK_FIREFLY_G12A),
+ SND_PCI_QUIRK(0x103c, 0x8e1b, "HP EliteBook G12", ALC245_FIXUP_HP_ZBOOK_FIREFLY_G12A),
+ SND_PCI_QUIRK(0x103c, 0x8e1c, "HP EliteBook G12", ALC245_FIXUP_HP_ZBOOK_FIREFLY_G12A),
SND_PCI_QUIRK(0x103c, 0x8e2c, "HP EliteBook 16 G12", ALC285_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x8e36, "HP 14 Enstrom OmniBook X", ALC287_FIXUP_CS35L41_I2C_2),
SND_PCI_QUIRK(0x103c, 0x8e37, "HP 16 Piston OmniBook X", ALC287_FIXUP_CS35L41_I2C_2),
@@ -10804,6 +10849,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1043, 0x1493, "ASUS GV601VV/VU/VJ/VQ/VI", ALC285_FIXUP_ASUS_HEADSET_MIC),
SND_PCI_QUIRK(0x1043, 0x14d3, "ASUS G614JY/JZ/JG", ALC245_FIXUP_CS35L41_SPI_2),
SND_PCI_QUIRK(0x1043, 0x14e3, "ASUS G513PI/PU/PV", ALC287_FIXUP_CS35L41_I2C_2),
+ SND_PCI_QUIRK(0x1043, 0x14f2, "ASUS VivoBook X515JA", ALC256_FIXUP_ASUS_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1043, 0x1503, "ASUS G733PY/PZ/PZV/PYV", ALC287_FIXUP_CS35L41_I2C_2),
SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_ASUS_ZENBOOK_UX31A),
SND_PCI_QUIRK(0x1043, 0x1533, "ASUS GV302XA/XJ/XQ/XU/XV/XI", ALC287_FIXUP_CS35L41_I2C_2),
@@ -10843,6 +10889,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1043, 0x1c43, "ASUS UX8406MA", ALC245_FIXUP_CS35L41_SPI_2),
SND_PCI_QUIRK(0x1043, 0x1c62, "ASUS GU603", ALC289_FIXUP_ASUS_GA401),
SND_PCI_QUIRK(0x1043, 0x1c63, "ASUS GU605M", ALC285_FIXUP_ASUS_GU605_SPI_SPEAKER2_TO_DAC1),
+ SND_PCI_QUIRK(0x1043, 0x1c80, "ASUS VivoBook TP401", ALC256_FIXUP_ASUS_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1043, 0x1c92, "ASUS ROG Strix G15", ALC285_FIXUP_ASUS_G533Z_PINS),
SND_PCI_QUIRK(0x1043, 0x1c9f, "ASUS G614JU/JV/JI", ALC285_FIXUP_ASUS_HEADSET_MIC),
SND_PCI_QUIRK(0x1043, 0x1caf, "ASUS G634JY/JZ/JI/JG", ALC285_FIXUP_ASUS_SPI_REAR_SPEAKERS),
diff --git a/sound/pci/hda/tas2781_hda_i2c.c b/sound/pci/hda/tas2781_hda_i2c.c
index 9ed49b0dbe6b..29dc4f500580 100644
--- a/sound/pci/hda/tas2781_hda_i2c.c
+++ b/sound/pci/hda/tas2781_hda_i2c.c
@@ -558,28 +558,38 @@ static int tas2563_save_calibration(struct tasdevice_priv *tas_priv)
static void tas2781_apply_calib(struct tasdevice_priv *tas_priv)
{
- static const unsigned char page_array[CALIB_MAX] = {
- 0x17, 0x18, 0x18, 0x13, 0x18,
+ struct calidata *cali_data = &tas_priv->cali_data;
+ struct cali_reg *r = &cali_data->cali_reg_array;
+ unsigned int cali_reg[CALIB_MAX] = {
+ TASDEVICE_REG(0, 0x17, 0x74),
+ TASDEVICE_REG(0, 0x18, 0x0c),
+ TASDEVICE_REG(0, 0x18, 0x14),
+ TASDEVICE_REG(0, 0x13, 0x70),
+ TASDEVICE_REG(0, 0x18, 0x7c),
};
- static const unsigned char rgno_array[CALIB_MAX] = {
- 0x74, 0x0c, 0x14, 0x70, 0x7c,
- };
- int offset = 0;
int i, j, rc;
+ int oft = 0;
__be32 data;
+ if (tas_priv->dspbin_typ != TASDEV_BASIC) {
+ cali_reg[0] = r->r0_reg;
+ cali_reg[1] = r->invr0_reg;
+ cali_reg[2] = r->r0_low_reg;
+ cali_reg[3] = r->pow_reg;
+ cali_reg[4] = r->tlimit_reg;
+ }
+
for (i = 0; i < tas_priv->ndev; i++) {
for (j = 0; j < CALIB_MAX; j++) {
data = cpu_to_be32(
- *(uint32_t *)&tas_priv->cali_data.data[offset]);
+ *(uint32_t *)&tas_priv->cali_data.data[oft]);
rc = tasdevice_dev_bulk_write(tas_priv, i,
- TASDEVICE_REG(0, page_array[j], rgno_array[j]),
- (unsigned char *)&data, 4);
+ cali_reg[j], (unsigned char *)&data, 4);
if (rc < 0)
dev_err(tas_priv->dev,
"chn %d calib %d bulk_wr err = %d\n",
i, j, rc);
- offset += 4;
+ oft += 4;
}
}
}
diff --git a/sound/soc/codecs/rt5665.c b/sound/soc/codecs/rt5665.c
index e0d1991cffdb..bcb6d7c6f301 100644
--- a/sound/soc/codecs/rt5665.c
+++ b/sound/soc/codecs/rt5665.c
@@ -31,9 +31,7 @@
#include "rl6231.h"
#include "rt5665.h"
-#define RT5665_NUM_SUPPLIES 3
-
-static const char *rt5665_supply_names[RT5665_NUM_SUPPLIES] = {
+static const char * const rt5665_supply_names[] = {
"AVDD",
"MICVDD",
"VBAT",
@@ -46,7 +44,6 @@ struct rt5665_priv {
struct gpio_desc *gpiod_ldo1_en;
struct gpio_desc *gpiod_reset;
struct snd_soc_jack *hs_jack;
- struct regulator_bulk_data supplies[RT5665_NUM_SUPPLIES];
struct delayed_work jack_detect_work;
struct delayed_work calibrate_work;
struct delayed_work jd_check_work;
@@ -4471,8 +4468,6 @@ static void rt5665_remove(struct snd_soc_component *component)
struct rt5665_priv *rt5665 = snd_soc_component_get_drvdata(component);
regmap_write(rt5665->regmap, RT5665_RESET, 0);
-
- regulator_bulk_disable(ARRAY_SIZE(rt5665->supplies), rt5665->supplies);
}
#ifdef CONFIG_PM
@@ -4758,7 +4753,7 @@ static int rt5665_i2c_probe(struct i2c_client *i2c)
{
struct rt5665_platform_data *pdata = dev_get_platdata(&i2c->dev);
struct rt5665_priv *rt5665;
- int i, ret;
+ int ret;
unsigned int val;
rt5665 = devm_kzalloc(&i2c->dev, sizeof(struct rt5665_priv),
@@ -4774,24 +4769,13 @@ static int rt5665_i2c_probe(struct i2c_client *i2c)
else
rt5665_parse_dt(rt5665, &i2c->dev);
- for (i = 0; i < ARRAY_SIZE(rt5665->supplies); i++)
- rt5665->supplies[i].supply = rt5665_supply_names[i];
-
- ret = devm_regulator_bulk_get(&i2c->dev, ARRAY_SIZE(rt5665->supplies),
- rt5665->supplies);
+ ret = devm_regulator_bulk_get_enable(&i2c->dev, ARRAY_SIZE(rt5665_supply_names),
+ rt5665_supply_names);
if (ret != 0) {
dev_err(&i2c->dev, "Failed to request supplies: %d\n", ret);
return ret;
}
- ret = regulator_bulk_enable(ARRAY_SIZE(rt5665->supplies),
- rt5665->supplies);
- if (ret != 0) {
- dev_err(&i2c->dev, "Failed to enable supplies: %d\n", ret);
- return ret;
- }
-
-
rt5665->gpiod_ldo1_en = devm_gpiod_get_optional(&i2c->dev,
"realtek,ldo1-en",
GPIOD_OUT_HIGH);
diff --git a/sound/soc/codecs/rt711-sdca-sdw.c b/sound/soc/codecs/rt711-sdca-sdw.c
index adf05f64259b..6eb05871db37 100644
--- a/sound/soc/codecs/rt711-sdca-sdw.c
+++ b/sound/soc/codecs/rt711-sdca-sdw.c
@@ -225,6 +225,14 @@ static int rt711_sdca_read_prop(struct sdw_slave *slave)
j++;
}
+ prop->dp0_prop = devm_kzalloc(&slave->dev, sizeof(*prop->dp0_prop),
+ GFP_KERNEL);
+ if (!prop->dp0_prop)
+ return -ENOMEM;
+
+ prop->dp0_prop->simple_ch_prep_sm = true;
+ prop->dp0_prop->ch_prep_timeout = 10;
+
/* set the timeout values */
prop->clk_stop_timeout = 700;
diff --git a/sound/soc/codecs/sma1307.c b/sound/soc/codecs/sma1307.c
index f5c303d4bb62..498189ab691c 100644
--- a/sound/soc/codecs/sma1307.c
+++ b/sound/soc/codecs/sma1307.c
@@ -1705,7 +1705,7 @@ static void sma1307_check_fault_worker(struct work_struct *work)
static void sma1307_setting_loaded(struct sma1307_priv *sma1307, const char *file)
{
const struct firmware *fw;
- int *data, size, offset, num_mode;
+ int size, offset, num_mode;
int ret;
ret = request_firmware(&fw, file, sma1307->dev);
@@ -1722,7 +1722,7 @@ static void sma1307_setting_loaded(struct sma1307_priv *sma1307, const char *fil
return;
}
- data = kzalloc(fw->size, GFP_KERNEL);
+ int *data __free(kfree) = kzalloc(fw->size, GFP_KERNEL);
if (!data) {
release_firmware(fw);
sma1307->set.status = false;
@@ -1742,7 +1742,6 @@ static void sma1307_setting_loaded(struct sma1307_priv *sma1307, const char *fil
sma1307->set.header_size,
GFP_KERNEL);
if (!sma1307->set.header) {
- kfree(data);
sma1307->set.status = false;
return;
}
@@ -1763,8 +1762,6 @@ static void sma1307_setting_loaded(struct sma1307_priv *sma1307, const char *fil
= devm_kzalloc(sma1307->dev,
sma1307->set.def_size * sizeof(int), GFP_KERNEL);
if (!sma1307->set.def) {
- kfree(data);
- kfree(sma1307->set.header);
sma1307->set.status = false;
return;
}
@@ -1782,9 +1779,6 @@ static void sma1307_setting_loaded(struct sma1307_priv *sma1307, const char *fil
sma1307->set.mode_size * 2 * sizeof(int),
GFP_KERNEL);
if (!sma1307->set.mode_set[i]) {
- kfree(data);
- kfree(sma1307->set.header);
- kfree(sma1307->set.def);
for (int j = 0; j < i; j++)
kfree(sma1307->set.mode_set[j]);
sma1307->set.status = false;
@@ -1799,7 +1793,6 @@ static void sma1307_setting_loaded(struct sma1307_priv *sma1307, const char *fil
}
}
- kfree(data);
sma1307->set.status = true;
}
diff --git a/sound/soc/codecs/wcd934x.c b/sound/soc/codecs/wcd934x.c
index dd0cda394bf1..fa69817c97ea 100644
--- a/sound/soc/codecs/wcd934x.c
+++ b/sound/soc/codecs/wcd934x.c
@@ -2263,7 +2263,7 @@ static irqreturn_t wcd934x_slim_irq_handler(int irq, void *data)
{
struct wcd934x_codec *wcd = data;
unsigned long status = 0;
- int i, j, port_id;
+ unsigned int i, j, port_id;
unsigned int val, int_val = 0;
irqreturn_t ret = IRQ_NONE;
bool tx;
diff --git a/sound/soc/codecs/wsa883x.c b/sound/soc/codecs/wsa883x.c
index d259e1d4d83d..1c9df7c061bd 100644
--- a/sound/soc/codecs/wsa883x.c
+++ b/sound/soc/codecs/wsa883x.c
@@ -568,7 +568,7 @@ static const struct sdw_port_config wsa883x_pconfig[WSA883X_MAX_SWR_PORTS] = {
},
[WSA883X_PORT_VISENSE] = {
.num = WSA883X_PORT_VISENSE + 1,
- .ch_mask = 0x3,
+ .ch_mask = 0x1,
},
};
diff --git a/sound/soc/codecs/wsa884x.c b/sound/soc/codecs/wsa884x.c
index 8051483aa1ac..daada1a2a34c 100644
--- a/sound/soc/codecs/wsa884x.c
+++ b/sound/soc/codecs/wsa884x.c
@@ -891,7 +891,7 @@ static const struct sdw_port_config wsa884x_pconfig[WSA884X_MAX_SWR_PORTS] = {
},
[WSA884X_PORT_VISENSE] = {
.num = WSA884X_PORT_VISENSE + 1,
- .ch_mask = 0x3,
+ .ch_mask = 0x1,
},
[WSA884X_PORT_CPS] = {
.num = WSA884X_PORT_CPS + 1,
diff --git a/sound/soc/fsl/imx-card.c b/sound/soc/fsl/imx-card.c
index 905294682996..3686d468506b 100644
--- a/sound/soc/fsl/imx-card.c
+++ b/sound/soc/fsl/imx-card.c
@@ -772,6 +772,8 @@ static int imx_card_probe(struct platform_device *pdev)
data->dapm_routes[i].sink =
devm_kasprintf(&pdev->dev, GFP_KERNEL, "%d %s",
i + 1, "Playback");
+ if (!data->dapm_routes[i].sink)
+ return -ENOMEM;
data->dapm_routes[i].source = "CPU-Playback";
}
}
@@ -789,6 +791,8 @@ static int imx_card_probe(struct platform_device *pdev)
data->dapm_routes[i].source =
devm_kasprintf(&pdev->dev, GFP_KERNEL, "%d %s",
i + 1, "Capture");
+ if (!data->dapm_routes[i].source)
+ return -ENOMEM;
data->dapm_routes[i].sink = "CPU-Capture";
}
}
diff --git a/sound/soc/qcom/qdsp6/q6apm-dai.c b/sound/soc/qcom/qdsp6/q6apm-dai.c
index c9404b5934c7..2cd522108221 100644
--- a/sound/soc/qcom/qdsp6/q6apm-dai.c
+++ b/sound/soc/qcom/qdsp6/q6apm-dai.c
@@ -24,8 +24,8 @@
#define PLAYBACK_MIN_PERIOD_SIZE 128
#define CAPTURE_MIN_NUM_PERIODS 2
#define CAPTURE_MAX_NUM_PERIODS 8
-#define CAPTURE_MAX_PERIOD_SIZE 4096
-#define CAPTURE_MIN_PERIOD_SIZE 320
+#define CAPTURE_MAX_PERIOD_SIZE 65536
+#define CAPTURE_MIN_PERIOD_SIZE 6144
#define BUFFER_BYTES_MAX (PLAYBACK_MAX_NUM_PERIODS * PLAYBACK_MAX_PERIOD_SIZE)
#define BUFFER_BYTES_MIN (PLAYBACK_MIN_NUM_PERIODS * PLAYBACK_MIN_PERIOD_SIZE)
#define COMPR_PLAYBACK_MAX_FRAGMENT_SIZE (128 * 1024)
@@ -64,12 +64,12 @@ struct q6apm_dai_rtd {
phys_addr_t phys;
unsigned int pcm_size;
unsigned int pcm_count;
- unsigned int pos; /* Buffer position */
unsigned int periods;
unsigned int bytes_sent;
unsigned int bytes_received;
unsigned int copied_total;
uint16_t bits_per_sample;
+ snd_pcm_uframes_t queue_ptr;
bool next_track;
enum stream_state state;
struct q6apm_graph *graph;
@@ -123,25 +123,16 @@ static void event_handler(uint32_t opcode, uint32_t token, void *payload, void *
{
struct q6apm_dai_rtd *prtd = priv;
struct snd_pcm_substream *substream = prtd->substream;
- unsigned long flags;
switch (opcode) {
case APM_CLIENT_EVENT_CMD_EOS_DONE:
prtd->state = Q6APM_STREAM_STOPPED;
break;
case APM_CLIENT_EVENT_DATA_WRITE_DONE:
- spin_lock_irqsave(&prtd->lock, flags);
- prtd->pos += prtd->pcm_count;
- spin_unlock_irqrestore(&prtd->lock, flags);
snd_pcm_period_elapsed(substream);
- if (prtd->state == Q6APM_STREAM_RUNNING)
- q6apm_write_async(prtd->graph, prtd->pcm_count, 0, 0, 0);
break;
case APM_CLIENT_EVENT_DATA_READ_DONE:
- spin_lock_irqsave(&prtd->lock, flags);
- prtd->pos += prtd->pcm_count;
- spin_unlock_irqrestore(&prtd->lock, flags);
snd_pcm_period_elapsed(substream);
if (prtd->state == Q6APM_STREAM_RUNNING)
q6apm_read(prtd->graph);
@@ -248,7 +239,6 @@ static int q6apm_dai_prepare(struct snd_soc_component *component,
}
prtd->pcm_count = snd_pcm_lib_period_bytes(substream);
- prtd->pos = 0;
/* rate and channels are sent to audio driver */
ret = q6apm_graph_media_format_shmem(prtd->graph, &cfg);
if (ret < 0) {
@@ -294,6 +284,27 @@ static int q6apm_dai_prepare(struct snd_soc_component *component,
return 0;
}
+static int q6apm_dai_ack(struct snd_soc_component *component, struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct q6apm_dai_rtd *prtd = runtime->private_data;
+ int i, ret = 0, avail_periods;
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ avail_periods = (runtime->control->appl_ptr - prtd->queue_ptr)/runtime->period_size;
+ for (i = 0; i < avail_periods; i++) {
+ ret = q6apm_write_async(prtd->graph, prtd->pcm_count, 0, 0, NO_TIMESTAMP);
+ if (ret < 0) {
+ dev_err(component->dev, "Error queuing playback buffer %d\n", ret);
+ return ret;
+ }
+ prtd->queue_ptr += runtime->period_size;
+ }
+ }
+
+ return ret;
+}
+
static int q6apm_dai_trigger(struct snd_soc_component *component,
struct snd_pcm_substream *substream, int cmd)
{
@@ -305,9 +316,6 @@ static int q6apm_dai_trigger(struct snd_soc_component *component,
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_RESUME:
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
- /* start writing buffers for playback only as we already queued capture buffers */
- if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
- ret = q6apm_write_async(prtd->graph, prtd->pcm_count, 0, 0, 0);
break;
case SNDRV_PCM_TRIGGER_STOP:
/* TODO support be handled via SoftPause Module */
@@ -377,13 +385,14 @@ static int q6apm_dai_open(struct snd_soc_component *component,
}
}
- ret = snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_BYTES, 32);
+ /* setup 10ms latency to accommodate DSP restrictions */
+ ret = snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, 480);
if (ret < 0) {
dev_err(dev, "constraint for period bytes step ret = %d\n", ret);
goto err;
}
- ret = snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_BYTES, 32);
+ ret = snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_SIZE, 480);
if (ret < 0) {
dev_err(dev, "constraint for buffer bytes step ret = %d\n", ret);
goto err;
@@ -428,16 +437,12 @@ static snd_pcm_uframes_t q6apm_dai_pointer(struct snd_soc_component *component,
struct snd_pcm_runtime *runtime = substream->runtime;
struct q6apm_dai_rtd *prtd = runtime->private_data;
snd_pcm_uframes_t ptr;
- unsigned long flags;
- spin_lock_irqsave(&prtd->lock, flags);
- if (prtd->pos == prtd->pcm_size)
- prtd->pos = 0;
-
- ptr = bytes_to_frames(runtime, prtd->pos);
- spin_unlock_irqrestore(&prtd->lock, flags);
+ ptr = q6apm_get_hw_pointer(prtd->graph, substream->stream) * runtime->period_size;
+ if (ptr)
+ return ptr - 1;
- return ptr;
+ return 0;
}
static int q6apm_dai_hw_params(struct snd_soc_component *component,
@@ -652,8 +657,6 @@ static int q6apm_dai_compr_set_params(struct snd_soc_component *component,
prtd->pcm_size = runtime->fragments * runtime->fragment_size;
prtd->bits_per_sample = 16;
- prtd->pos = 0;
-
if (prtd->next_track != true) {
memcpy(&prtd->codec, codec, sizeof(*codec));
@@ -836,6 +839,7 @@ static const struct snd_soc_component_driver q6apm_fe_dai_component = {
.hw_params = q6apm_dai_hw_params,
.pointer = q6apm_dai_pointer,
.trigger = q6apm_dai_trigger,
+ .ack = q6apm_dai_ack,
.compress_ops = &q6apm_dai_compress_ops,
.use_dai_pcm_id = true,
};
diff --git a/sound/soc/qcom/qdsp6/q6apm.c b/sound/soc/qcom/qdsp6/q6apm.c
index 11e252a70f69..b4ffa0f0b188 100644
--- a/sound/soc/qcom/qdsp6/q6apm.c
+++ b/sound/soc/qcom/qdsp6/q6apm.c
@@ -494,6 +494,19 @@ int q6apm_read(struct q6apm_graph *graph)
}
EXPORT_SYMBOL_GPL(q6apm_read);
+int q6apm_get_hw_pointer(struct q6apm_graph *graph, int dir)
+{
+ struct audioreach_graph_data *data;
+
+ if (dir == SNDRV_PCM_STREAM_PLAYBACK)
+ data = &graph->rx_data;
+ else
+ data = &graph->tx_data;
+
+ return (int)atomic_read(&data->hw_ptr);
+}
+EXPORT_SYMBOL_GPL(q6apm_get_hw_pointer);
+
static int graph_callback(struct gpr_resp_pkt *data, void *priv, int op)
{
struct data_cmd_rsp_rd_sh_mem_ep_data_buffer_done_v2 *rd_done;
@@ -520,7 +533,8 @@ static int graph_callback(struct gpr_resp_pkt *data, void *priv, int op)
done = data->payload;
phys = graph->rx_data.buf[token].phys;
mutex_unlock(&graph->lock);
-
+ /* token numbering starts at 0 */
+ atomic_set(&graph->rx_data.hw_ptr, token + 1);
if (lower_32_bits(phys) == done->buf_addr_lsw &&
upper_32_bits(phys) == done->buf_addr_msw) {
graph->result.opcode = hdr->opcode;
@@ -553,6 +567,8 @@ static int graph_callback(struct gpr_resp_pkt *data, void *priv, int op)
rd_done = data->payload;
phys = graph->tx_data.buf[hdr->token].phys;
mutex_unlock(&graph->lock);
+ /* token numbering starts at 0 */
+ atomic_set(&graph->tx_data.hw_ptr, hdr->token + 1);
if (upper_32_bits(phys) == rd_done->buf_addr_msw &&
lower_32_bits(phys) == rd_done->buf_addr_lsw) {
diff --git a/sound/soc/qcom/qdsp6/q6apm.h b/sound/soc/qcom/qdsp6/q6apm.h
index c248c8d2b1ab..7ce08b401e31 100644
--- a/sound/soc/qcom/qdsp6/q6apm.h
+++ b/sound/soc/qcom/qdsp6/q6apm.h
@@ -2,6 +2,7 @@
#ifndef __Q6APM_H__
#define __Q6APM_H__
#include <linux/types.h>
+#include <linux/atomic.h>
#include <linux/slab.h>
#include <linux/wait.h>
#include <linux/kernel.h>
@@ -77,6 +78,7 @@ struct audioreach_graph_data {
uint32_t num_periods;
uint32_t dsp_buf;
uint32_t mem_map_handle;
+ atomic_t hw_ptr;
};
struct audioreach_graph {
@@ -150,4 +152,5 @@ int q6apm_enable_compress_module(struct device *dev, struct q6apm_graph *graph,
int q6apm_remove_initial_silence(struct device *dev, struct q6apm_graph *graph, uint32_t samples);
int q6apm_remove_trailing_silence(struct device *dev, struct q6apm_graph *graph, uint32_t samples);
int q6apm_set_real_module_id(struct device *dev, struct q6apm_graph *graph, uint32_t codec_id);
+int q6apm_get_hw_pointer(struct q6apm_graph *graph, int dir);
#endif /* __APM_GRAPH_ */
diff --git a/sound/soc/qcom/qdsp6/q6asm-dai.c b/sound/soc/qcom/qdsp6/q6asm-dai.c
index 045100c94352..a400c9a31fea 100644
--- a/sound/soc/qcom/qdsp6/q6asm-dai.c
+++ b/sound/soc/qcom/qdsp6/q6asm-dai.c
@@ -892,9 +892,7 @@ static int q6asm_dai_compr_set_params(struct snd_soc_component *component,
if (ret < 0) {
dev_err(dev, "q6asm_open_write failed\n");
- q6asm_audio_client_free(prtd->audio_client);
- prtd->audio_client = NULL;
- return ret;
+ goto open_err;
}
}
@@ -903,7 +901,7 @@ static int q6asm_dai_compr_set_params(struct snd_soc_component *component,
prtd->session_id, dir);
if (ret) {
dev_err(dev, "Stream reg failed ret:%d\n", ret);
- return ret;
+ goto q6_err;
}
ret = __q6asm_dai_compr_set_codec_params(component, stream,
@@ -911,7 +909,7 @@ static int q6asm_dai_compr_set_params(struct snd_soc_component *component,
prtd->stream_id);
if (ret) {
dev_err(dev, "codec param setup failed ret:%d\n", ret);
- return ret;
+ goto q6_err;
}
ret = q6asm_map_memory_regions(dir, prtd->audio_client, prtd->phys,
@@ -920,12 +918,21 @@ static int q6asm_dai_compr_set_params(struct snd_soc_component *component,
if (ret < 0) {
dev_err(dev, "Buffer Mapping failed ret:%d\n", ret);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto q6_err;
}
prtd->state = Q6ASM_STREAM_RUNNING;
return 0;
+
+q6_err:
+ q6asm_cmd(prtd->audio_client, prtd->stream_id, CMD_CLOSE);
+
+open_err:
+ q6asm_audio_client_free(prtd->audio_client);
+ prtd->audio_client = NULL;
+ return ret;
}
static int q6asm_dai_compr_set_metadata(struct snd_soc_component *component,
diff --git a/sound/soc/qcom/sdw.c b/sound/soc/qcom/sdw.c
index 1d01b9329e08..7d7981d4295b 100644
--- a/sound/soc/qcom/sdw.c
+++ b/sound/soc/qcom/sdw.c
@@ -29,7 +29,7 @@ int qcom_snd_sdw_startup(struct snd_pcm_substream *substream)
u32 rx_ch_cnt = 0, tx_ch_cnt = 0;
int ret, i, j;
- sruntime = sdw_alloc_stream(cpu_dai->name);
+ sruntime = sdw_alloc_stream(cpu_dai->name, SDW_STREAM_PCM);
if (!sruntime)
return -ENOMEM;
diff --git a/sound/soc/sof/intel/Kconfig b/sound/soc/sof/intel/Kconfig
index 2c43558d96b9..fae3598fd601 100644
--- a/sound/soc/sof/intel/Kconfig
+++ b/sound/soc/sof/intel/Kconfig
@@ -268,6 +268,7 @@ config SND_SOC_SOF_INTEL_LNL
tristate
select SND_SOC_SOF_HDA_GENERIC
select SND_SOC_SOF_INTEL_SOUNDWIRE_LINK_BASELINE
+ select SND_SOF_SOF_HDA_SDW_BPT if SND_SOC_SOF_INTEL_SOUNDWIRE
select SND_SOC_SOF_IPC4
select SND_SOC_SOF_INTEL_MTL
@@ -342,6 +343,12 @@ config SND_SOC_SOF_HDA_AUDIO_CODEC
endif ## SND_SOC_SOF_HDA_GENERIC
+config SND_SOF_SOF_HDA_SDW_BPT
+ tristate
+ help
+ This option is not user-selectable but automagically handled by
+ 'select' statements at a higher level.
+
config SND_SOC_SOF_HDA_LINK_BASELINE
tristate
select SND_SOC_SOF_HDA if SND_SOC_SOF_HDA_LINK
diff --git a/sound/soc/sof/intel/Makefile b/sound/soc/sof/intel/Makefile
index 675f9fc92dde..aab803a495b1 100644
--- a/sound/soc/sof/intel/Makefile
+++ b/sound/soc/sof/intel/Makefile
@@ -12,6 +12,8 @@ snd-sof-intel-hda-generic-y := hda.o hda-common-ops.o
snd-sof-intel-hda-mlink-y := hda-mlink.o
+snd-sof-intel-hda-sdw-bpt-objs := hda-sdw-bpt.o
+
snd-sof-intel-hda-common-$(CONFIG_SND_SOC_SOF_HDA_PROBES) += hda-probes.o
snd-sof-intel-hda-y := hda-codec.o
@@ -26,6 +28,8 @@ obj-$(CONFIG_SND_SOC_SOF_HDA_GENERIC) += snd-sof-intel-hda-generic.o
obj-$(CONFIG_SND_SOC_SOF_HDA_MLINK) += snd-sof-intel-hda-mlink.o
obj-$(CONFIG_SND_SOC_SOF_HDA) += snd-sof-intel-hda.o
+obj-$(CONFIG_SND_SOF_SOF_HDA_SDW_BPT) += snd-sof-intel-hda-sdw-bpt.o
+
snd-sof-pci-intel-tng-y := pci-tng.o
snd-sof-pci-intel-skl-y := pci-skl.o skl.o hda-loader-skl.o
snd-sof-pci-intel-apl-y := pci-apl.o apl.o
diff --git a/sound/soc/sof/intel/hda-dsp.c b/sound/soc/sof/intel/hda-dsp.c
index ccf8eefdca70..f64e8a6a9a33 100644
--- a/sound/soc/sof/intel/hda-dsp.c
+++ b/sound/soc/sof/intel/hda-dsp.c
@@ -991,6 +991,10 @@ int hda_dsp_runtime_suspend(struct snd_sof_dev *sdev)
if (!sdev->dspless_mode_selected) {
/* cancel any attempt for DSP D0I3 */
cancel_delayed_work_sync(&hda->d0i3_work);
+
+ /* Cancel the microphone privacy work if mic privacy is active */
+ if (hda->mic_privacy.active)
+ cancel_work_sync(&hda->mic_privacy.work);
}
/* stop hda controller and power dsp off */
@@ -1017,6 +1021,10 @@ int hda_dsp_suspend(struct snd_sof_dev *sdev, u32 target_state)
if (!sdev->dspless_mode_selected) {
/* cancel any attempt for DSP D0I3 */
cancel_delayed_work_sync(&hda->d0i3_work);
+
+ /* Cancel the microphone privacy work if mic privacy is active */
+ if (hda->mic_privacy.active)
+ cancel_work_sync(&hda->mic_privacy.work);
}
if (target_state == SOF_DSP_PM_D0) {
diff --git a/sound/soc/sof/intel/hda-sdw-bpt.c b/sound/soc/sof/intel/hda-sdw-bpt.c
new file mode 100644
index 000000000000..1327f1cad0bc
--- /dev/null
+++ b/sound/soc/sof/intel/hda-sdw-bpt.c
@@ -0,0 +1,445 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// This file is provided under a dual BSD/GPLv2 license. When using or
+// redistributing this file, you may do so under either license.
+//
+// Copyright(c) 2025 Intel Corporation.
+//
+
+/*
+ * Hardware interface for SoundWire BPT support with HDA DMA
+ */
+
+#include <sound/hdaudio_ext.h>
+#include <sound/hda-mlink.h>
+#include <sound/hda-sdw-bpt.h>
+#include <sound/sof.h>
+#include <sound/sof/ipc4/header.h>
+#include "../ops.h"
+#include "../sof-priv.h"
+#include "../ipc4-priv.h"
+#include "hda.h"
+
+#define BPT_FREQUENCY 192000 /* The max rate defined in rate_bits[] hdac_device.c */
+#define BPT_MULTIPLIER ((BPT_FREQUENCY / 48000) - 1)
+#define BPT_CHAIN_DMA_FIFO_MS 10
+/*
+ * This routine is directly inspired by sof_ipc4_chain_dma_trigger(),
+ * with major simplifications since there are no pipelines defined
+ * and no dependency on ALSA hw_params
+ */
+static int chain_dma_trigger(struct snd_sof_dev *sdev, unsigned int stream_tag,
+ int direction, int state)
+{
+ struct sof_ipc4_fw_data *ipc4_data = sdev->private;
+ bool allocate, enable, set_fifo_size;
+ struct sof_ipc4_msg msg = {{ 0 }};
+ int dma_id;
+
+ if (sdev->pdata->ipc_type != SOF_IPC_TYPE_4)
+ return -EOPNOTSUPP;
+
+ switch (state) {
+ case SOF_IPC4_PIPE_RUNNING: /* Allocate and start the chain */
+ allocate = true;
+ enable = true;
+ set_fifo_size = true;
+ break;
+ case SOF_IPC4_PIPE_PAUSED: /* Stop the chain */
+ allocate = true;
+ enable = false;
+ set_fifo_size = false;
+ break;
+ case SOF_IPC4_PIPE_RESET: /* Deallocate chain resources and remove the chain */
+ allocate = false;
+ enable = false;
+ set_fifo_size = false;
+ break;
+ default:
+ dev_err(sdev->dev, "Unexpected state %d", state);
+ return -EINVAL;
+ }
+
+ msg.primary = SOF_IPC4_MSG_TYPE_SET(SOF_IPC4_GLB_CHAIN_DMA);
+ msg.primary |= SOF_IPC4_MSG_DIR(SOF_IPC4_MSG_REQUEST);
+ msg.primary |= SOF_IPC4_MSG_TARGET(SOF_IPC4_FW_GEN_MSG);
+
+ /* for BPT/BRA we can use the same stream tag for host and link */
+ dma_id = stream_tag - 1;
+ if (direction == SNDRV_PCM_STREAM_CAPTURE)
+ dma_id += ipc4_data->num_playback_streams;
+
+ msg.primary |= SOF_IPC4_GLB_CHAIN_DMA_HOST_ID(dma_id);
+ msg.primary |= SOF_IPC4_GLB_CHAIN_DMA_LINK_ID(dma_id);
+
+ /* For BPT/BRA we use 32 bits so SCS is not set */
+
+ /* CHAIN DMA needs at least 2ms */
+ if (set_fifo_size)
+ msg.extension |= SOF_IPC4_GLB_EXT_CHAIN_DMA_FIFO_SIZE(BPT_FREQUENCY / 1000 *
+ BPT_CHAIN_DMA_FIFO_MS *
+ sizeof(u32));
+
+ if (allocate)
+ msg.primary |= SOF_IPC4_GLB_CHAIN_DMA_ALLOCATE_MASK;
+
+ if (enable)
+ msg.primary |= SOF_IPC4_GLB_CHAIN_DMA_ENABLE_MASK;
+
+ return sof_ipc_tx_message_no_reply(sdev->ipc, &msg, 0);
+}
+
+static int hda_sdw_bpt_dma_prepare(struct device *dev, struct hdac_ext_stream **sdw_bpt_stream,
+ struct snd_dma_buffer *dmab_bdl, u32 bpt_num_bytes,
+ unsigned int num_channels, int direction)
+{
+ struct snd_sof_dev *sdev = dev_get_drvdata(dev);
+ struct hdac_ext_stream *bpt_stream;
+ unsigned int format = HDA_CL_STREAM_FORMAT;
+
+ /*
+ * the baseline format needs to be adjusted to
+ * bandwidth requirements
+ */
+ format |= (num_channels - 1);
+ format |= BPT_MULTIPLIER << AC_FMT_MULT_SHIFT;
+
+ dev_dbg(dev, "direction %d format_val %#x\n", direction, format);
+
+ bpt_stream = hda_cl_prepare(dev, format, bpt_num_bytes, dmab_bdl, false, direction, false);
+ if (IS_ERR(bpt_stream)) {
+ dev_err(sdev->dev, "%s: SDW BPT DMA prepare failed: dir %d\n",
+ __func__, direction);
+ return PTR_ERR(bpt_stream);
+ }
+ *sdw_bpt_stream = bpt_stream;
+
+ if (!sdev->dspless_mode_selected) {
+ struct hdac_stream *hstream;
+ u32 mask;
+
+ /* decouple host and link DMA if the DSP is used */
+ hstream = &bpt_stream->hstream;
+ mask = BIT(hstream->index);
+
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_PP_BAR, SOF_HDA_REG_PP_PPCTL, mask, mask);
+
+ snd_hdac_ext_stream_reset(bpt_stream);
+
+ snd_hdac_ext_stream_setup(bpt_stream, format);
+ }
+
+ if (hdac_stream(bpt_stream)->direction == SNDRV_PCM_STREAM_PLAYBACK) {
+ struct hdac_bus *bus = sof_to_bus(sdev);
+ struct hdac_ext_link *hlink;
+ int stream_tag;
+
+ stream_tag = hdac_stream(bpt_stream)->stream_tag;
+ hlink = hdac_bus_eml_sdw_get_hlink(bus);
+
+ snd_hdac_ext_bus_link_set_stream_id(hlink, stream_tag);
+ }
+ return 0;
+}
+
+static int hda_sdw_bpt_dma_deprepare(struct device *dev, struct hdac_ext_stream *sdw_bpt_stream,
+ struct snd_dma_buffer *dmab_bdl)
+{
+ struct snd_sof_dev *sdev = dev_get_drvdata(dev);
+ struct hdac_stream *hstream;
+ u32 mask;
+ int ret;
+
+ ret = hda_cl_cleanup(sdev->dev, dmab_bdl, true, sdw_bpt_stream);
+ if (ret < 0) {
+ dev_err(sdev->dev, "%s: SDW BPT DMA cleanup failed\n",
+ __func__);
+ return ret;
+ }
+
+ if (hdac_stream(sdw_bpt_stream)->direction == SNDRV_PCM_STREAM_PLAYBACK) {
+ struct hdac_bus *bus = sof_to_bus(sdev);
+ struct hdac_ext_link *hlink;
+ int stream_tag;
+
+ stream_tag = hdac_stream(sdw_bpt_stream)->stream_tag;
+ hlink = hdac_bus_eml_sdw_get_hlink(bus);
+
+ snd_hdac_ext_bus_link_clear_stream_id(hlink, stream_tag);
+ }
+
+ if (!sdev->dspless_mode_selected) {
+ /* Release CHAIN_DMA resources */
+ ret = chain_dma_trigger(sdev, hdac_stream(sdw_bpt_stream)->stream_tag,
+ hdac_stream(sdw_bpt_stream)->direction,
+ SOF_IPC4_PIPE_RESET);
+ if (ret < 0)
+ dev_err(sdev->dev, "%s: chain_dma_trigger PIPE_RESET failed: %d\n",
+ __func__, ret);
+
+ /* couple host and link DMA */
+ hstream = &sdw_bpt_stream->hstream;
+ mask = BIT(hstream->index);
+
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_PP_BAR, SOF_HDA_REG_PP_PPCTL, mask, 0);
+ }
+
+ return 0;
+}
+
+static int hda_sdw_bpt_dma_enable(struct device *dev, struct hdac_ext_stream *sdw_bpt_stream)
+{
+ struct snd_sof_dev *sdev = dev_get_drvdata(dev);
+ int ret;
+
+ ret = hda_cl_trigger(sdev->dev, sdw_bpt_stream, SNDRV_PCM_TRIGGER_START);
+ if (ret < 0)
+ dev_err(sdev->dev, "%s: SDW BPT DMA trigger start failed\n", __func__);
+
+ if (!sdev->dspless_mode_selected) {
+ /* the chain DMA needs to be programmed before the DMAs */
+ ret = chain_dma_trigger(sdev, hdac_stream(sdw_bpt_stream)->stream_tag,
+ hdac_stream(sdw_bpt_stream)->direction,
+ SOF_IPC4_PIPE_RUNNING);
+ if (ret < 0) {
+ dev_err(sdev->dev, "%s: chain_dma_trigger failed: %d\n",
+ __func__, ret);
+ hda_cl_trigger(sdev->dev, sdw_bpt_stream, SNDRV_PCM_TRIGGER_STOP);
+ return ret;
+ }
+ snd_hdac_ext_stream_start(sdw_bpt_stream);
+ }
+
+ return ret;
+}
+
+static int hda_sdw_bpt_dma_disable(struct device *dev, struct hdac_ext_stream *sdw_bpt_stream)
+{
+ struct snd_sof_dev *sdev = dev_get_drvdata(dev);
+ int ret;
+
+ if (!sdev->dspless_mode_selected) {
+ snd_hdac_ext_stream_clear(sdw_bpt_stream);
+
+ ret = chain_dma_trigger(sdev, hdac_stream(sdw_bpt_stream)->stream_tag,
+ hdac_stream(sdw_bpt_stream)->direction,
+ SOF_IPC4_PIPE_PAUSED);
+ if (ret < 0)
+ dev_err(sdev->dev, "%s: chain_dma_trigger PIPE_PAUSED failed: %d\n",
+ __func__, ret);
+ }
+
+ ret = hda_cl_trigger(sdev->dev, sdw_bpt_stream, SNDRV_PCM_TRIGGER_STOP);
+ if (ret < 0)
+ dev_err(sdev->dev, "%s: SDW BPT DMA trigger stop failed\n", __func__);
+
+ return ret;
+}
+
+int hda_sdw_bpt_open(struct device *dev, int link_id, struct hdac_ext_stream **bpt_tx_stream,
+ struct snd_dma_buffer *dmab_tx_bdl, u32 bpt_tx_num_bytes,
+ u32 tx_dma_bandwidth, struct hdac_ext_stream **bpt_rx_stream,
+ struct snd_dma_buffer *dmab_rx_bdl, u32 bpt_rx_num_bytes,
+ u32 rx_dma_bandwidth)
+{
+ struct snd_sof_dev *sdev = dev_get_drvdata(dev);
+ unsigned int num_channels_tx;
+ unsigned int num_channels_rx;
+ int ret1;
+ int ret;
+
+ num_channels_tx = DIV_ROUND_UP(tx_dma_bandwidth, BPT_FREQUENCY * 32);
+
+ ret = hda_sdw_bpt_dma_prepare(dev, bpt_tx_stream, dmab_tx_bdl, bpt_tx_num_bytes,
+ num_channels_tx, SNDRV_PCM_STREAM_PLAYBACK);
+ if (ret < 0) {
+ dev_err(dev, "%s: hda_sdw_bpt_dma_prepare failed for TX: %d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ num_channels_rx = DIV_ROUND_UP(rx_dma_bandwidth, BPT_FREQUENCY * 32);
+
+ ret = hda_sdw_bpt_dma_prepare(dev, bpt_rx_stream, dmab_rx_bdl, bpt_rx_num_bytes,
+ num_channels_rx, SNDRV_PCM_STREAM_CAPTURE);
+ if (ret < 0) {
+ dev_err(dev, "%s: hda_sdw_bpt_dma_prepare failed for RX: %d\n",
+ __func__, ret);
+
+ ret1 = hda_sdw_bpt_dma_deprepare(dev, *bpt_tx_stream, dmab_tx_bdl);
+ if (ret1 < 0)
+ dev_err(dev, "%s: hda_sdw_bpt_dma_deprepare failed for TX: %d\n",
+ __func__, ret1);
+ return ret;
+ }
+
+ /* we need to map the channels in PCMSyCM registers */
+ ret = hdac_bus_eml_sdw_map_stream_ch(sof_to_bus(sdev), link_id,
+ 0, /* cpu_dai->id -> PDI0 */
+ GENMASK(num_channels_tx - 1, 0),
+ hdac_stream(*bpt_tx_stream)->stream_tag,
+ SNDRV_PCM_STREAM_PLAYBACK);
+ if (ret < 0) {
+ dev_err(dev, "%s: hdac_bus_eml_sdw_map_stream_ch failed for TX: %d\n",
+ __func__, ret);
+ goto close;
+ }
+
+ ret = hdac_bus_eml_sdw_map_stream_ch(sof_to_bus(sdev), link_id,
+ 1, /* cpu_dai->id -> PDI1 */
+ GENMASK(num_channels_rx - 1, 0),
+ hdac_stream(*bpt_rx_stream)->stream_tag,
+ SNDRV_PCM_STREAM_CAPTURE);
+ if (!ret)
+ return 0;
+
+ dev_err(dev, "%s: hdac_bus_eml_sdw_map_stream_ch failed for RX: %d\n",
+ __func__, ret);
+
+close:
+ ret1 = hda_sdw_bpt_close(dev, *bpt_tx_stream, dmab_tx_bdl, *bpt_rx_stream, dmab_rx_bdl);
+ if (ret1 < 0)
+ dev_err(dev, "%s: hda_sdw_bpt_close failed: %d\n",
+ __func__, ret1);
+
+ return ret;
+}
+EXPORT_SYMBOL_NS(hda_sdw_bpt_open, "SND_SOC_SOF_INTEL_HDA_SDW_BPT");
+
+int hda_sdw_bpt_send_async(struct device *dev, struct hdac_ext_stream *bpt_tx_stream,
+ struct hdac_ext_stream *bpt_rx_stream)
+{
+ int ret1;
+ int ret;
+
+ ret = hda_sdw_bpt_dma_enable(dev, bpt_tx_stream);
+ if (ret < 0) {
+ dev_err(dev, "%s: hda_sdw_bpt_dma_enable failed for TX: %d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ ret = hda_sdw_bpt_dma_enable(dev, bpt_rx_stream);
+ if (ret < 0) {
+ dev_err(dev, "%s: hda_sdw_bpt_dma_enable failed for RX: %d\n",
+ __func__, ret);
+
+ ret1 = hda_sdw_bpt_dma_disable(dev, bpt_tx_stream);
+ if (ret1 < 0)
+ dev_err(dev, "%s: hda_sdw_bpt_dma_disable failed for TX: %d\n",
+ __func__, ret1);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_NS(hda_sdw_bpt_send_async, "SND_SOC_SOF_INTEL_HDA_SDW_BPT");
+
+/*
+ * 3s is several orders of magnitude larger than what is needed for a
+ * typical firmware download.
+ */
+#define HDA_BPT_IOC_TIMEOUT_MS 3000
+
+int hda_sdw_bpt_wait(struct device *dev, struct hdac_ext_stream *bpt_tx_stream,
+ struct hdac_ext_stream *bpt_rx_stream)
+{
+ struct sof_intel_hda_stream *hda_tx_stream;
+ struct sof_intel_hda_stream *hda_rx_stream;
+ snd_pcm_uframes_t tx_position;
+ snd_pcm_uframes_t rx_position;
+ unsigned long time_tx_left;
+ unsigned long time_rx_left;
+ int ret = 0;
+ int ret1;
+ int i;
+
+ hda_tx_stream = container_of(bpt_tx_stream, struct sof_intel_hda_stream, hext_stream);
+ hda_rx_stream = container_of(bpt_rx_stream, struct sof_intel_hda_stream, hext_stream);
+
+ time_tx_left = wait_for_completion_timeout(&hda_tx_stream->ioc,
+ msecs_to_jiffies(HDA_BPT_IOC_TIMEOUT_MS));
+ if (!time_tx_left) {
+ tx_position = hda_dsp_stream_get_position(hdac_stream(bpt_tx_stream),
+ SNDRV_PCM_STREAM_PLAYBACK, false);
+ dev_err(dev, "%s: SDW BPT TX DMA did not complete: %ld\n",
+ __func__, tx_position);
+ ret = -ETIMEDOUT;
+ goto dma_disable;
+ }
+
+ /* Make sure the DMA is flushed */
+ i = 0;
+ do {
+ tx_position = hda_dsp_stream_get_position(hdac_stream(bpt_tx_stream),
+ SNDRV_PCM_STREAM_PLAYBACK, false);
+ usleep_range(1000, 1010);
+ i++;
+ } while (tx_position && i < HDA_BPT_IOC_TIMEOUT_MS);
+ if (tx_position) {
+ dev_err(dev, "%s: SDW BPT TX DMA position %ld was not cleared\n",
+ __func__, tx_position);
+ ret = -ETIMEDOUT;
+ goto dma_disable;
+ }
+
+ /* the wait should be minimal here */
+ time_rx_left = wait_for_completion_timeout(&hda_rx_stream->ioc,
+ msecs_to_jiffies(HDA_BPT_IOC_TIMEOUT_MS));
+ if (!time_rx_left) {
+ rx_position = hda_dsp_stream_get_position(hdac_stream(bpt_rx_stream),
+ SNDRV_PCM_STREAM_CAPTURE, false);
+ dev_err(dev, "%s: SDW BPT RX DMA did not complete: %ld\n",
+ __func__, rx_position);
+ ret = -ETIMEDOUT;
+ goto dma_disable;
+ }
+
+ /* Make sure the DMA is flushed */
+ i = 0;
+ do {
+ rx_position = hda_dsp_stream_get_position(hdac_stream(bpt_rx_stream),
+ SNDRV_PCM_STREAM_CAPTURE, false);
+ usleep_range(1000, 1010);
+ i++;
+ } while (rx_position && i < HDA_BPT_IOC_TIMEOUT_MS);
+ if (rx_position) {
+ dev_err(dev, "%s: SDW BPT RX DMA position %ld was not cleared\n",
+ __func__, rx_position);
+ ret = -ETIMEDOUT;
+ goto dma_disable;
+ }
+
+dma_disable:
+ ret1 = hda_sdw_bpt_dma_disable(dev, bpt_rx_stream);
+ if (!ret)
+ ret = ret1;
+
+ ret1 = hda_sdw_bpt_dma_disable(dev, bpt_tx_stream);
+ if (!ret)
+ ret = ret1;
+
+ return ret;
+}
+EXPORT_SYMBOL_NS(hda_sdw_bpt_wait, "SND_SOC_SOF_INTEL_HDA_SDW_BPT");
+
+int hda_sdw_bpt_close(struct device *dev, struct hdac_ext_stream *bpt_tx_stream,
+ struct snd_dma_buffer *dmab_tx_bdl, struct hdac_ext_stream *bpt_rx_stream,
+ struct snd_dma_buffer *dmab_rx_bdl)
+{
+ int ret;
+ int ret1;
+
+ ret = hda_sdw_bpt_dma_deprepare(dev, bpt_rx_stream, dmab_rx_bdl);
+
+ ret1 = hda_sdw_bpt_dma_deprepare(dev, bpt_tx_stream, dmab_tx_bdl);
+ if (!ret)
+ ret = ret1;
+
+ return ret;
+}
+EXPORT_SYMBOL_NS(hda_sdw_bpt_close, "SND_SOC_SOF_INTEL_HDA_SDW_BPT");
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("SOF helpers for HDaudio SoundWire BPT");
+MODULE_IMPORT_NS("SND_SOC_SOF_INTEL_HDA_COMMON");
+MODULE_IMPORT_NS("SND_SOC_SOF_HDA_MLINK");
diff --git a/sound/soc/sof/intel/hda.c b/sound/soc/sof/intel/hda.c
index 6b1ada566476..b34e5fdf10f1 100644
--- a/sound/soc/sof/intel/hda.c
+++ b/sound/soc/sof/intel/hda.c
@@ -968,6 +968,10 @@ void hda_dsp_remove(struct snd_sof_dev *sdev)
if (sdev->dspless_mode_selected)
goto skip_disable_dsp;
+ /* Cancel the microphone privacy work if mic privacy is active */
+ if (hda->mic_privacy.active)
+ cancel_work_sync(&hda->mic_privacy.work);
+
/* no need to check for error as the DSP will be disabled anyway */
if (chip && chip->power_down_dsp)
chip->power_down_dsp(sdev);
diff --git a/sound/soc/sof/intel/hda.h b/sound/soc/sof/intel/hda.h
index 76154627fc17..108cad04879e 100644
--- a/sound/soc/sof/intel/hda.h
+++ b/sound/soc/sof/intel/hda.h
@@ -487,6 +487,11 @@ enum sof_hda_D0_substate {
SOF_HDA_DSP_PM_D0I3, /* low power D0 substate */
};
+struct sof_ace3_mic_privacy {
+ bool active;
+ struct work_struct work;
+};
+
/* represents DSP HDA controller frontend - i.e. host facing control */
struct sof_intel_hda_dev {
bool imrboot_supported;
@@ -542,6 +547,9 @@ struct sof_intel_hda_dev {
/* Intel NHLT information */
struct nhlt_acpi_table *nhlt;
+ /* work queue for mic privacy state change notification sending */
+ struct sof_ace3_mic_privacy mic_privacy;
+
/*
* Pointing to the IPC message if immediate sending was not possible
* because the downlink communication channel was BUSY at the time.
diff --git a/sound/soc/sof/intel/ptl.c b/sound/soc/sof/intel/ptl.c
index 8fa4bdceedd9..aa0b772178bc 100644
--- a/sound/soc/sof/intel/ptl.c
+++ b/sound/soc/sof/intel/ptl.c
@@ -27,22 +27,44 @@ static bool sof_ptl_check_mic_privacy_irq(struct snd_sof_dev *sdev, bool alt,
return hdac_bus_eml_is_mic_privacy_changed(sof_to_bus(sdev), alt, elid);
}
+static void sof_ptl_mic_privacy_work(struct work_struct *work)
+{
+ struct sof_intel_hda_dev *hdev = container_of(work,
+ struct sof_intel_hda_dev,
+ mic_privacy.work);
+ struct hdac_bus *bus = &hdev->hbus.core;
+ struct snd_sof_dev *sdev = dev_get_drvdata(bus->dev);
+ bool state;
+
+ /*
+ * The microphone privacy state is only available via Soundwire shim
+ * in PTL
+ * The work is only scheduled on change.
+ */
+ state = hdac_bus_eml_get_mic_privacy_state(bus, 1,
+ AZX_REG_ML_LEPTR_ID_SDW);
+ sof_ipc4_mic_privacy_state_change(sdev, state);
+}
+
static void sof_ptl_process_mic_privacy(struct snd_sof_dev *sdev, bool alt,
int elid)
{
- bool state;
+ struct sof_intel_hda_dev *hdev = sdev->pdata->hw_pdata;
if (!alt || elid != AZX_REG_ML_LEPTR_ID_SDW)
return;
- state = hdac_bus_eml_get_mic_privacy_state(sof_to_bus(sdev), alt, elid);
-
- sof_ipc4_mic_privacy_state_change(sdev, state);
+ /*
+ * Schedule the work to read the microphone privacy state and send IPC
+ * message about the new state to the firmware
+ */
+ schedule_work(&hdev->mic_privacy.work);
}
static void sof_ptl_set_mic_privacy(struct snd_sof_dev *sdev,
struct sof_ipc4_intel_mic_privacy_cap *caps)
{
+ struct sof_intel_hda_dev *hdev = sdev->pdata->hw_pdata;
u32 micpvcp;
if (!caps || !caps->capabilities_length)
@@ -58,6 +80,9 @@ static void sof_ptl_set_mic_privacy(struct snd_sof_dev *sdev,
hdac_bus_eml_set_mic_privacy_mask(sof_to_bus(sdev), true,
AZX_REG_ML_LEPTR_ID_SDW,
PTL_MICPVCP_GET_SDW_MASK(micpvcp));
+
+ INIT_WORK(&hdev->mic_privacy.work, sof_ptl_mic_privacy_work);
+ hdev->mic_privacy.active = true;
}
int sof_ptl_set_ops(struct snd_sof_dev *sdev, struct snd_sof_dsp_ops *dsp_ops)
diff --git a/sound/virtio/virtio_pcm.c b/sound/virtio/virtio_pcm.c
index 967e4c45be9b..2f7c5e709f07 100644
--- a/sound/virtio/virtio_pcm.c
+++ b/sound/virtio/virtio_pcm.c
@@ -339,6 +339,21 @@ int virtsnd_pcm_parse_cfg(struct virtio_snd *snd)
if (!snd->substreams)
return -ENOMEM;
+ /*
+ * Initialize critical substream fields early in case we hit an
+ * error path and end up trying to clean up uninitialized structures
+ * elsewhere.
+ */
+ for (i = 0; i < snd->nsubstreams; ++i) {
+ struct virtio_pcm_substream *vss = &snd->substreams[i];
+
+ vss->snd = snd;
+ vss->sid = i;
+ INIT_WORK(&vss->elapsed_period, virtsnd_pcm_period_elapsed);
+ init_waitqueue_head(&vss->msg_empty);
+ spin_lock_init(&vss->lock);
+ }
+
info = kcalloc(snd->nsubstreams, sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
@@ -352,12 +367,6 @@ int virtsnd_pcm_parse_cfg(struct virtio_snd *snd)
struct virtio_pcm_substream *vss = &snd->substreams[i];
struct virtio_pcm *vpcm;
- vss->snd = snd;
- vss->sid = i;
- INIT_WORK(&vss->elapsed_period, virtsnd_pcm_period_elapsed);
- init_waitqueue_head(&vss->msg_empty);
- spin_lock_init(&vss->lock);
-
rc = virtsnd_pcm_build_hw(vss, &info[i]);
if (rc)
goto on_exit;
diff --git a/tools/counter/.gitignore b/tools/counter/.gitignore
index 9fd290d4bf43..22d8727d2696 100644
--- a/tools/counter/.gitignore
+++ b/tools/counter/.gitignore
@@ -1,2 +1,3 @@
/counter_example
+/counter_watch_events
/include/linux/counter.h
diff --git a/tools/counter/counter_watch_events.c b/tools/counter/counter_watch_events.c
index 107631e0f2e3..15e21b0c5ffd 100644
--- a/tools/counter/counter_watch_events.c
+++ b/tools/counter/counter_watch_events.c
@@ -38,6 +38,7 @@ static const char * const counter_event_type_name[] = {
"COUNTER_EVENT_INDEX",
"COUNTER_EVENT_CHANGE_OF_STATE",
"COUNTER_EVENT_CAPTURE",
+ "COUNTER_EVENT_DIRECTION_CHANGE",
};
static const char * const counter_component_type_name[] = {
@@ -118,6 +119,7 @@ static void print_usage(void)
" evt_index (COUNTER_EVENT_INDEX)\n"
" evt_change_of_state (COUNTER_EVENT_CHANGE_OF_STATE)\n"
" evt_capture (COUNTER_EVENT_CAPTURE)\n"
+ " evt_direction_change (COUNTER_EVENT_DIRECTION_CHANGE)\n"
"\n"
" chan=<n> channel <n> for this watch [default: 0]\n"
" id=<n> component id <n> for this watch [default: 0]\n"
@@ -157,6 +159,7 @@ enum {
WATCH_EVENT_INDEX,
WATCH_EVENT_CHANGE_OF_STATE,
WATCH_EVENT_CAPTURE,
+ WATCH_EVENT_DIRECTION_CHANGE,
WATCH_CHANNEL,
WATCH_ID,
WATCH_PARENT,
@@ -183,6 +186,7 @@ static char * const counter_watch_subopts[WATCH_SUBOPTS_MAX + 1] = {
[WATCH_EVENT_INDEX] = "evt_index",
[WATCH_EVENT_CHANGE_OF_STATE] = "evt_change_of_state",
[WATCH_EVENT_CAPTURE] = "evt_capture",
+ [WATCH_EVENT_DIRECTION_CHANGE] = "evt_direction_change",
/* channel, id, parent */
[WATCH_CHANNEL] = "chan",
[WATCH_ID] = "id",
@@ -278,6 +282,7 @@ int main(int argc, char **argv)
case WATCH_EVENT_INDEX:
case WATCH_EVENT_CHANGE_OF_STATE:
case WATCH_EVENT_CAPTURE:
+ case WATCH_EVENT_DIRECTION_CHANGE:
/* match counter_event_type: subtract enum value */
ret -= WATCH_EVENT_OVERFLOW;
watches[i].event = ret;
diff --git a/tools/iio/iio_event_monitor.c b/tools/iio/iio_event_monitor.c
index cccf62ea2b8f..eab7b082f19d 100644
--- a/tools/iio/iio_event_monitor.c
+++ b/tools/iio/iio_event_monitor.c
@@ -75,6 +75,7 @@ static const char * const iio_ev_type_text[] = {
[IIO_EV_TYPE_CHANGE] = "change",
[IIO_EV_TYPE_MAG_REFERENCED] = "mag_referenced",
[IIO_EV_TYPE_GESTURE] = "gesture",
+ [IIO_EV_TYPE_FAULT] = "fault",
};
static const char * const iio_ev_dir_text[] = {
@@ -83,6 +84,7 @@ static const char * const iio_ev_dir_text[] = {
[IIO_EV_DIR_FALLING] = "falling",
[IIO_EV_DIR_SINGLETAP] = "singletap",
[IIO_EV_DIR_DOUBLETAP] = "doubletap",
+ [IIO_EV_DIR_FAULT_OPENWIRE] = "openwire",
};
static const char * const iio_modifier_names[] = {
@@ -249,6 +251,7 @@ static bool event_is_known(struct iio_event_data *event)
case IIO_EV_TYPE_MAG_ADAPTIVE:
case IIO_EV_TYPE_CHANGE:
case IIO_EV_TYPE_GESTURE:
+ case IIO_EV_TYPE_FAULT:
break;
default:
return false;
@@ -260,6 +263,7 @@ static bool event_is_known(struct iio_event_data *event)
case IIO_EV_DIR_FALLING:
case IIO_EV_DIR_SINGLETAP:
case IIO_EV_DIR_DOUBLETAP:
+ case IIO_EV_DIR_FAULT_OPENWIRE:
case IIO_EV_DIR_NONE:
break;
default:
diff --git a/tools/include/asm/timex.h b/tools/include/asm/timex.h
new file mode 100644
index 000000000000..5adfe3c6d326
--- /dev/null
+++ b/tools/include/asm/timex.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __TOOLS_LINUX_ASM_TIMEX_H
+#define __TOOLS_LINUX_ASM_TIMEX_H
+
+#include <time.h>
+
+#define cycles_t clock_t
+
+static inline cycles_t get_cycles(void)
+{
+ return clock();
+}
+#endif // __TOOLS_LINUX_ASM_TIMEX_H
diff --git a/tools/include/linux/bitmap.h b/tools/include/linux/bitmap.h
index 2a7f260ef9dc..d4d300040d01 100644
--- a/tools/include/linux/bitmap.h
+++ b/tools/include/linux/bitmap.h
@@ -19,6 +19,7 @@ bool __bitmap_and(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, unsigned int bits);
bool __bitmap_equal(const unsigned long *bitmap1,
const unsigned long *bitmap2, unsigned int bits);
+void __bitmap_set(unsigned long *map, unsigned int start, int len);
void __bitmap_clear(unsigned long *map, unsigned int start, int len);
bool __bitmap_intersects(const unsigned long *bitmap1,
const unsigned long *bitmap2, unsigned int bits);
@@ -79,6 +80,11 @@ static inline void bitmap_or(unsigned long *dst, const unsigned long *src1,
__bitmap_or(dst, src1, src2, nbits);
}
+static inline unsigned long *bitmap_alloc(unsigned int nbits, gfp_t flags __maybe_unused)
+{
+ return malloc(bitmap_size(nbits));
+}
+
/**
* bitmap_zalloc - Allocate bitmap
* @nbits: Number of bits
@@ -150,6 +156,21 @@ static inline bool bitmap_intersects(const unsigned long *src1,
return __bitmap_intersects(src1, src2, nbits);
}
+static inline void bitmap_set(unsigned long *map, unsigned int start, unsigned int nbits)
+{
+ if (__builtin_constant_p(nbits) && nbits == 1)
+ __set_bit(start, map);
+ else if (small_const_nbits(start + nbits))
+ *map |= GENMASK(start + nbits - 1, start);
+ else if (__builtin_constant_p(start & BITMAP_MEM_MASK) &&
+ IS_ALIGNED(start, BITMAP_MEM_ALIGNMENT) &&
+ __builtin_constant_p(nbits & BITMAP_MEM_MASK) &&
+ IS_ALIGNED(nbits, BITMAP_MEM_ALIGNMENT))
+ memset((char *)map + start / 8, 0xff, nbits / 8);
+ else
+ __bitmap_set(map, start, nbits);
+}
+
static inline void bitmap_clear(unsigned long *map, unsigned int start,
unsigned int nbits)
{
diff --git a/tools/include/linux/container_of.h b/tools/include/linux/container_of.h
new file mode 100644
index 000000000000..c879e14c3dd6
--- /dev/null
+++ b/tools/include/linux/container_of.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _TOOLS_LINUX_CONTAINER_OF_H
+#define _TOOLS_LINUX_CONTAINER_OF_H
+
+#ifndef container_of
+/**
+ * container_of - cast a member of a structure out to the containing structure
+ * @ptr: the pointer to the member.
+ * @type: the type of the container struct this is embedded in.
+ * @member: the name of the member within the struct.
+ *
+ */
+#define container_of(ptr, type, member) ({ \
+ const typeof(((type *)0)->member) * __mptr = (ptr); \
+ (type *)((char *)__mptr - offsetof(type, member)); })
+#endif
+
+#endif /* _TOOLS_LINUX_CONTAINER_OF_H */
diff --git a/tools/include/linux/kernel.h b/tools/include/linux/kernel.h
index 07cfad817d53..c8c18d3908a9 100644
--- a/tools/include/linux/kernel.h
+++ b/tools/include/linux/kernel.h
@@ -11,6 +11,7 @@
#include <linux/panic.h>
#include <endian.h>
#include <byteswap.h>
+#include <linux/container_of.h>
#ifndef UINT_MAX
#define UINT_MAX (~0U)
@@ -25,19 +26,6 @@
#define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER)
#endif
-#ifndef container_of
-/**
- * container_of - cast a member of a structure out to the containing structure
- * @ptr: the pointer to the member.
- * @type: the type of the container struct this is embedded in.
- * @member: the name of the member within the struct.
- *
- */
-#define container_of(ptr, type, member) ({ \
- const typeof(((type *)0)->member) * __mptr = (ptr); \
- (type *)((char *)__mptr - offsetof(type, member)); })
-#endif
-
#ifndef max
#define max(x, y) ({ \
typeof(x) _max1 = (x); \
diff --git a/tools/include/linux/math64.h b/tools/include/linux/math64.h
index 4ad45d5943dc..8a67d478bf19 100644
--- a/tools/include/linux/math64.h
+++ b/tools/include/linux/math64.h
@@ -72,4 +72,9 @@ static inline u64 mul_u64_u64_div64(u64 a, u64 b, u64 c)
}
#endif
+static inline u64 div_u64(u64 dividend, u32 divisor)
+{
+ return dividend / divisor;
+}
+
#endif /* _LINUX_MATH64_H */
diff --git a/tools/include/linux/moduleparam.h b/tools/include/linux/moduleparam.h
new file mode 100644
index 000000000000..4c4d05bef0cb
--- /dev/null
+++ b/tools/include/linux/moduleparam.h
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _TOOLS_LINUX_MODULE_PARAMS_H
+#define _TOOLS_LINUX_MODULE_PARAMS_H
+
+#define MODULE_PARM_DESC(parm, desc)
+
+#endif // _TOOLS_LINUX_MODULE_PARAMS_H
diff --git a/tools/include/linux/prandom.h b/tools/include/linux/prandom.h
new file mode 100644
index 000000000000..b745041ccd6a
--- /dev/null
+++ b/tools/include/linux/prandom.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __TOOLS_LINUX_PRANDOM_H
+#define __TOOLS_LINUX_PRANDOM_H
+
+#include <linux/types.h>
+
+struct rnd_state {
+ __u32 s1, s2, s3, s4;
+};
+
+/*
+ * Handle minimum values for seeds
+ */
+static inline u32 __seed(u32 x, u32 m)
+{
+ return (x < m) ? x + m : x;
+}
+
+/**
+ * prandom_seed_state - set seed for prandom_u32_state().
+ * @state: pointer to state structure to receive the seed.
+ * @seed: arbitrary 64-bit value to use as a seed.
+ */
+static inline void prandom_seed_state(struct rnd_state *state, u64 seed)
+{
+ u32 i = ((seed >> 32) ^ (seed << 10) ^ seed) & 0xffffffffUL;
+
+ state->s1 = __seed(i, 2U);
+ state->s2 = __seed(i, 8U);
+ state->s3 = __seed(i, 16U);
+ state->s4 = __seed(i, 128U);
+}
+
+/**
+ * prandom_u32_state - seeded pseudo-random number generator.
+ * @state: pointer to state structure holding seeded state.
+ *
+ * This is used for pseudo-randomness with no outside seeding.
+ * For more random results, use get_random_u32().
+ */
+static inline u32 prandom_u32_state(struct rnd_state *state)
+{
+#define TAUSWORTHE(s, a, b, c, d) (((s & c) << d) ^ (((s << a) ^ s) >> b))
+ state->s1 = TAUSWORTHE(state->s1, 6U, 13U, 4294967294U, 18U);
+ state->s2 = TAUSWORTHE(state->s2, 2U, 27U, 4294967288U, 2U);
+ state->s3 = TAUSWORTHE(state->s3, 13U, 21U, 4294967280U, 7U);
+ state->s4 = TAUSWORTHE(state->s4, 3U, 12U, 4294967168U, 13U);
+
+ return (state->s1 ^ state->s2 ^ state->s3 ^ state->s4);
+}
+#endif // __TOOLS_LINUX_PRANDOM_H
diff --git a/tools/include/linux/refcount.h b/tools/include/linux/refcount.h
index 36cb29bc57c2..1f30956e070d 100644
--- a/tools/include/linux/refcount.h
+++ b/tools/include/linux/refcount.h
@@ -60,6 +60,11 @@ static inline void refcount_set(refcount_t *r, unsigned int n)
atomic_set(&r->refs, n);
}
+static inline void refcount_set_release(refcount_t *r, unsigned int n)
+{
+ atomic_set(&r->refs, n);
+}
+
static inline unsigned int refcount_read(const refcount_t *r)
{
return atomic_read(&r->refs);
diff --git a/tools/include/linux/slab.h b/tools/include/linux/slab.h
index 51b25e9c4ec7..c87051e2b26f 100644
--- a/tools/include/linux/slab.h
+++ b/tools/include/linux/slab.h
@@ -12,6 +12,7 @@
void *kmalloc(size_t size, gfp_t gfp);
void kfree(void *p);
+void *kmalloc_array(size_t n, size_t size, gfp_t gfp);
bool slab_is_available(void);
diff --git a/tools/include/linux/types.h b/tools/include/linux/types.h
index 8519386acd23..4928e33d44ac 100644
--- a/tools/include/linux/types.h
+++ b/tools/include/linux/types.h
@@ -42,6 +42,8 @@ typedef __s16 s16;
typedef __u8 u8;
typedef __s8 s8;
+typedef unsigned long long ullong;
+
#ifdef __CHECKER__
#define __bitwise __attribute__((bitwise))
#else
diff --git a/tools/lib/bitmap.c b/tools/lib/bitmap.c
index 2178862bb114..51255c69754d 100644
--- a/tools/lib/bitmap.c
+++ b/tools/lib/bitmap.c
@@ -101,6 +101,26 @@ bool __bitmap_intersects(const unsigned long *bitmap1,
return false;
}
+void __bitmap_set(unsigned long *map, unsigned int start, int len)
+{
+ unsigned long *p = map + BIT_WORD(start);
+ const unsigned int size = start + len;
+ int bits_to_set = BITS_PER_LONG - (start % BITS_PER_LONG);
+ unsigned long mask_to_set = BITMAP_FIRST_WORD_MASK(start);
+
+ while (len - bits_to_set >= 0) {
+ *p |= mask_to_set;
+ len -= bits_to_set;
+ bits_to_set = BITS_PER_LONG;
+ mask_to_set = ~0UL;
+ p++;
+ }
+ if (len) {
+ mask_to_set &= BITMAP_LAST_WORD_MASK(size);
+ *p |= mask_to_set;
+ }
+}
+
void __bitmap_clear(unsigned long *map, unsigned int start, int len)
{
unsigned long *p = map + BIT_WORD(start);
diff --git a/tools/lib/slab.c b/tools/lib/slab.c
index 959997fb0652..981a21404f32 100644
--- a/tools/lib/slab.c
+++ b/tools/lib/slab.c
@@ -36,3 +36,19 @@ void kfree(void *p)
printf("Freeing %p to malloc\n", p);
free(p);
}
+
+void *kmalloc_array(size_t n, size_t size, gfp_t gfp)
+{
+ void *ret;
+
+ if (!(gfp & __GFP_DIRECT_RECLAIM))
+ return NULL;
+
+ ret = calloc(n, size);
+ uatomic_inc(&kmalloc_nr_allocated);
+ if (kmalloc_verbose)
+ printf("Allocating %p from calloc\n", ret);
+ if (gfp & __GFP_ZERO)
+ memset(ret, 0, n * size);
+ return ret;
+}
diff --git a/tools/objtool/Documentation/objtool.txt b/tools/objtool/Documentation/objtool.txt
index 28ac57b9e102..9e97fc25b2d8 100644
--- a/tools/objtool/Documentation/objtool.txt
+++ b/tools/objtool/Documentation/objtool.txt
@@ -34,7 +34,7 @@ Objtool has the following features:
- Return thunk annotation -- annotates all return thunk sites so kernel
can patch them inline, depending on enabled mitigations
-- Return thunk training valiation -- validate that all entry paths
+- Return thunk untraining validation -- validate that all entry paths
untrain a "safe return" before the first return (or call)
- Non-instrumentation validation -- validates non-instrumentable
@@ -281,8 +281,8 @@ the objtool maintainers.
If the error is for an asm file, and func() is indeed a callable
function, add proper frame pointer logic using the FRAME_BEGIN and
FRAME_END macros. Otherwise, if it's not a callable function, remove
- its ELF function annotation by changing ENDPROC to END, and instead
- use the manual unwind hint macros in asm/unwind_hints.h.
+ its ELF function annotation by using SYM_CODE_{START,END} and use the
+ manual unwind hint macros in asm/unwind_hints.h.
If it's a GCC-compiled .c file, the error may be because the function
uses an inline asm() statement which has a "call" instruction. An
@@ -352,7 +352,7 @@ the objtool maintainers.
This is a kernel entry/exit instruction like sysenter or iret. Such
instructions aren't allowed in a callable function, and are most
likely part of the kernel entry code. Such code should probably be
- placed in a SYM_FUNC_CODE block with unwind hints.
+ placed in a SYM_CODE_{START,END} block with unwind hints.
6. file.o: warning: objtool: func()+0x26: sibling call from callable instruction with modified stack frame
@@ -381,7 +381,7 @@ the objtool maintainers.
Another possibility is that the code has some asm or inline asm which
does some unusual things to the stack or the frame pointer. In such
- cases it's probably appropriate to use SYM_FUNC_CODE with unwind
+ cases it's probably appropriate to use SYM_CODE_{START,END} with unwind
hints.
diff --git a/tools/objtool/arch/loongarch/decode.c b/tools/objtool/arch/loongarch/decode.c
index 02e490555966..b6fdc68053cc 100644
--- a/tools/objtool/arch/loongarch/decode.c
+++ b/tools/objtool/arch/loongarch/decode.c
@@ -63,7 +63,7 @@ static bool is_loongarch(const struct elf *elf)
if (elf->ehdr.e_machine == EM_LOONGARCH)
return true;
- WARN("unexpected ELF machine type %d", elf->ehdr.e_machine);
+ ERROR("unexpected ELF machine type %d", elf->ehdr.e_machine);
return false;
}
@@ -327,8 +327,10 @@ const char *arch_nop_insn(int len)
{
static u32 nop;
- if (len != LOONGARCH_INSN_SIZE)
- WARN("invalid NOP size: %d\n", len);
+ if (len != LOONGARCH_INSN_SIZE) {
+ ERROR("invalid NOP size: %d\n", len);
+ return NULL;
+ }
nop = LOONGARCH_INSN_NOP;
@@ -339,8 +341,10 @@ const char *arch_ret_insn(int len)
{
static u32 ret;
- if (len != LOONGARCH_INSN_SIZE)
- WARN("invalid RET size: %d\n", len);
+ if (len != LOONGARCH_INSN_SIZE) {
+ ERROR("invalid RET size: %d\n", len);
+ return NULL;
+ }
emit_jirl((union loongarch_instruction *)&ret, LOONGARCH_GPR_RA, LOONGARCH_GPR_ZERO, 0);
diff --git a/tools/objtool/arch/loongarch/orc.c b/tools/objtool/arch/loongarch/orc.c
index 873536d009d9..b58c5ff443c9 100644
--- a/tools/objtool/arch/loongarch/orc.c
+++ b/tools/objtool/arch/loongarch/orc.c
@@ -41,7 +41,7 @@ int init_orc_entry(struct orc_entry *orc, struct cfi_state *cfi, struct instruct
orc->type = ORC_TYPE_REGS_PARTIAL;
break;
default:
- WARN_INSN(insn, "unknown unwind hint type %d", cfi->type);
+ ERROR_INSN(insn, "unknown unwind hint type %d", cfi->type);
return -1;
}
@@ -55,7 +55,7 @@ int init_orc_entry(struct orc_entry *orc, struct cfi_state *cfi, struct instruct
orc->sp_reg = ORC_REG_FP;
break;
default:
- WARN_INSN(insn, "unknown CFA base reg %d", cfi->cfa.base);
+ ERROR_INSN(insn, "unknown CFA base reg %d", cfi->cfa.base);
return -1;
}
@@ -72,7 +72,7 @@ int init_orc_entry(struct orc_entry *orc, struct cfi_state *cfi, struct instruct
orc->fp_reg = ORC_REG_FP;
break;
default:
- WARN_INSN(insn, "unknown FP base reg %d", fp->base);
+ ERROR_INSN(insn, "unknown FP base reg %d", fp->base);
return -1;
}
@@ -89,7 +89,7 @@ int init_orc_entry(struct orc_entry *orc, struct cfi_state *cfi, struct instruct
orc->ra_reg = ORC_REG_FP;
break;
default:
- WARN_INSN(insn, "unknown RA base reg %d", ra->base);
+ ERROR_INSN(insn, "unknown RA base reg %d", ra->base);
return -1;
}
diff --git a/tools/objtool/arch/x86/decode.c b/tools/objtool/arch/x86/decode.c
index 7567c893f45e..33d861c04ebd 100644
--- a/tools/objtool/arch/x86/decode.c
+++ b/tools/objtool/arch/x86/decode.c
@@ -36,7 +36,7 @@ static int is_x86_64(const struct elf *elf)
case EM_386:
return 0;
default:
- WARN("unexpected ELF machine type %d", elf->ehdr.e_machine);
+ ERROR("unexpected ELF machine type %d", elf->ehdr.e_machine);
return -1;
}
}
@@ -173,7 +173,7 @@ int arch_decode_instruction(struct objtool_file *file, const struct section *sec
ret = insn_decode(&ins, sec->data->d_buf + offset, maxlen,
x86_64 ? INSN_MODE_64 : INSN_MODE_32);
if (ret < 0) {
- WARN("can't decode instruction at %s:0x%lx", sec->name, offset);
+ ERROR("can't decode instruction at %s:0x%lx", sec->name, offset);
return -1;
}
@@ -321,7 +321,7 @@ int arch_decode_instruction(struct objtool_file *file, const struct section *sec
break;
default:
- /* WARN ? */
+ /* ERROR ? */
break;
}
@@ -561,8 +561,7 @@ int arch_decode_instruction(struct objtool_file *file, const struct section *sec
if (ins.prefixes.nbytes == 1 &&
ins.prefixes.bytes[0] == 0xf2) {
/* ENQCMD cannot be used in the kernel. */
- WARN("ENQCMD instruction at %s:%lx", sec->name,
- offset);
+ WARN("ENQCMD instruction at %s:%lx", sec->name, offset);
}
} else if (op2 == 0xa0 || op2 == 0xa8) {
@@ -646,7 +645,7 @@ int arch_decode_instruction(struct objtool_file *file, const struct section *sec
if (disp->sym->type == STT_SECTION)
func = find_symbol_by_offset(disp->sym->sec, reloc_addend(disp));
if (!func) {
- WARN("no func for pv_ops[]");
+ ERROR("no func for pv_ops[]");
return -1;
}
@@ -776,7 +775,7 @@ const char *arch_nop_insn(int len)
};
if (len < 1 || len > 5) {
- WARN("invalid NOP size: %d\n", len);
+ ERROR("invalid NOP size: %d\n", len);
return NULL;
}
@@ -796,7 +795,7 @@ const char *arch_ret_insn(int len)
};
if (len < 1 || len > 5) {
- WARN("invalid RET size: %d\n", len);
+ ERROR("invalid RET size: %d\n", len);
return NULL;
}
diff --git a/tools/objtool/arch/x86/orc.c b/tools/objtool/arch/x86/orc.c
index b6cd943e87f9..7176b9ec5b05 100644
--- a/tools/objtool/arch/x86/orc.c
+++ b/tools/objtool/arch/x86/orc.c
@@ -40,7 +40,7 @@ int init_orc_entry(struct orc_entry *orc, struct cfi_state *cfi, struct instruct
orc->type = ORC_TYPE_REGS_PARTIAL;
break;
default:
- WARN_INSN(insn, "unknown unwind hint type %d", cfi->type);
+ ERROR_INSN(insn, "unknown unwind hint type %d", cfi->type);
return -1;
}
@@ -72,7 +72,7 @@ int init_orc_entry(struct orc_entry *orc, struct cfi_state *cfi, struct instruct
orc->sp_reg = ORC_REG_DX;
break;
default:
- WARN_INSN(insn, "unknown CFA base reg %d", cfi->cfa.base);
+ ERROR_INSN(insn, "unknown CFA base reg %d", cfi->cfa.base);
return -1;
}
@@ -87,7 +87,7 @@ int init_orc_entry(struct orc_entry *orc, struct cfi_state *cfi, struct instruct
orc->bp_reg = ORC_REG_BP;
break;
default:
- WARN_INSN(insn, "unknown BP base reg %d", bp->base);
+ ERROR_INSN(insn, "unknown BP base reg %d", bp->base);
return -1;
}
diff --git a/tools/objtool/arch/x86/special.c b/tools/objtool/arch/x86/special.c
index 9c1c9df09aaa..403e587676f1 100644
--- a/tools/objtool/arch/x86/special.c
+++ b/tools/objtool/arch/x86/special.c
@@ -3,11 +3,9 @@
#include <objtool/special.h>
#include <objtool/builtin.h>
+#include <objtool/warn.h>
-#define X86_FEATURE_POPCNT (4 * 32 + 23)
-#define X86_FEATURE_SMAP (9 * 32 + 20)
-
-void arch_handle_alternative(unsigned short feature, struct special_alt *alt)
+void arch_handle_alternative(struct special_alt *alt)
{
static struct special_alt *group, *prev;
@@ -31,34 +29,6 @@ void arch_handle_alternative(unsigned short feature, struct special_alt *alt)
} else group = alt;
prev = alt;
-
- switch (feature) {
- case X86_FEATURE_SMAP:
- /*
- * If UACCESS validation is enabled; force that alternative;
- * otherwise force it the other way.
- *
- * What we want to avoid is having both the original and the
- * alternative code flow at the same time, in that case we can
- * find paths that see the STAC but take the NOP instead of
- * CLAC and the other way around.
- */
- if (opts.uaccess)
- alt->skip_orig = true;
- else
- alt->skip_alt = true;
- break;
- case X86_FEATURE_POPCNT:
- /*
- * It has been requested that we don't validate the !POPCNT
- * feature path which is a "very very small percentage of
- * machines".
- */
- alt->skip_orig = true;
- break;
- default:
- break;
- }
}
bool arch_support_alt_relocation(struct special_alt *special_alt,
@@ -156,8 +126,10 @@ struct reloc *arch_find_switch_table(struct objtool_file *file,
* indicates a rare GCC quirk/bug which can leave dead
* code behind.
*/
- if (reloc_type(text_reloc) == R_X86_64_PC32)
+ if (reloc_type(text_reloc) == R_X86_64_PC32) {
+ WARN_INSN(insn, "ignoring unreachables due to jump table quirk");
file->ignore_unreachables = true;
+ }
*table_size = 0;
return rodata_reloc;
diff --git a/tools/objtool/builtin-check.c b/tools/objtool/builtin-check.c
index 5f761f420b8c..80239843e9f0 100644
--- a/tools/objtool/builtin-check.c
+++ b/tools/objtool/builtin-check.c
@@ -8,18 +8,18 @@
#include <stdlib.h>
#include <fcntl.h>
#include <unistd.h>
+#include <errno.h>
#include <sys/stat.h>
#include <sys/sendfile.h>
#include <objtool/builtin.h>
#include <objtool/objtool.h>
+#include <objtool/warn.h>
-#define ERROR(format, ...) \
- fprintf(stderr, \
- "error: objtool: " format "\n", \
- ##__VA_ARGS__)
+#define ORIG_SUFFIX ".orig"
+int orig_argc;
+static char **orig_argv;
const char *objname;
-
struct opts opts;
static const char * const check_usage[] = {
@@ -194,30 +194,30 @@ static int copy_file(const char *src, const char *dst)
src_fd = open(src, O_RDONLY);
if (src_fd == -1) {
- ERROR("can't open '%s' for reading", src);
+ ERROR("can't open %s for reading: %s", src, strerror(errno));
return 1;
}
dst_fd = open(dst, O_WRONLY | O_CREAT | O_TRUNC, 0400);
if (dst_fd == -1) {
- ERROR("can't open '%s' for writing", dst);
+ ERROR("can't open %s for writing: %s", dst, strerror(errno));
return 1;
}
if (fstat(src_fd, &stat) == -1) {
- perror("fstat");
+ ERROR_GLIBC("fstat");
return 1;
}
if (fchmod(dst_fd, stat.st_mode) == -1) {
- perror("fchmod");
+ ERROR_GLIBC("fchmod");
return 1;
}
for (to_copy = stat.st_size; to_copy > 0; to_copy -= copied) {
copied = sendfile(dst_fd, src_fd, &offset, to_copy);
if (copied == -1) {
- perror("sendfile");
+ ERROR_GLIBC("sendfile");
return 1;
}
}
@@ -227,39 +227,73 @@ static int copy_file(const char *src, const char *dst)
return 0;
}
-static char **save_argv(int argc, const char **argv)
+static void save_argv(int argc, const char **argv)
{
- char **orig_argv;
-
orig_argv = calloc(argc, sizeof(char *));
if (!orig_argv) {
- perror("calloc");
- return NULL;
+ ERROR_GLIBC("calloc");
+ exit(1);
}
for (int i = 0; i < argc; i++) {
orig_argv[i] = strdup(argv[i]);
if (!orig_argv[i]) {
- perror("strdup");
- return NULL;
+ ERROR_GLIBC("strdup(%s)", argv[i]);
+ exit(1);
}
};
-
- return orig_argv;
}
-#define ORIG_SUFFIX ".orig"
+void print_args(void)
+{
+ char *backup = NULL;
+
+ if (opts.output || opts.dryrun)
+ goto print;
+
+ /*
+ * Make a backup before kbuild deletes the file so the error
+ * can be recreated without recompiling or relinking.
+ */
+ backup = malloc(strlen(objname) + strlen(ORIG_SUFFIX) + 1);
+ if (!backup) {
+ ERROR_GLIBC("malloc");
+ goto print;
+ }
+
+ strcpy(backup, objname);
+ strcat(backup, ORIG_SUFFIX);
+ if (copy_file(objname, backup)) {
+ backup = NULL;
+ goto print;
+ }
+
+print:
+ /*
+ * Print the cmdline args to make it easier to recreate. If '--output'
+ * wasn't used, add it to the printed args with the backup as input.
+ */
+ fprintf(stderr, "%s", orig_argv[0]);
+
+ for (int i = 1; i < orig_argc; i++) {
+ char *arg = orig_argv[i];
+
+ if (backup && !strcmp(arg, objname))
+ fprintf(stderr, " %s -o %s", backup, objname);
+ else
+ fprintf(stderr, " %s", arg);
+ }
+
+ fprintf(stderr, "\n");
+}
int objtool_run(int argc, const char **argv)
{
struct objtool_file *file;
- char *backup = NULL;
- char **orig_argv;
int ret = 0;
- orig_argv = save_argv(argc, argv);
- if (!orig_argv)
- return 1;
+ orig_argc = argc;
+ save_argv(argc, argv);
cmd_parse_options(argc, argv, check_usage);
@@ -282,59 +316,19 @@ int objtool_run(int argc, const char **argv)
file = objtool_open_read(objname);
if (!file)
- goto err;
+ return 1;
if (!opts.link && has_multiple_files(file->elf)) {
ERROR("Linked object requires --link");
- goto err;
+ return 1;
}
ret = check(file);
if (ret)
- goto err;
+ return ret;
if (!opts.dryrun && file->elf->changed && elf_write(file->elf))
- goto err;
-
- return 0;
-
-err:
- if (opts.dryrun)
- goto err_msg;
-
- if (opts.output) {
- unlink(opts.output);
- goto err_msg;
- }
-
- /*
- * Make a backup before kbuild deletes the file so the error
- * can be recreated without recompiling or relinking.
- */
- backup = malloc(strlen(objname) + strlen(ORIG_SUFFIX) + 1);
- if (!backup) {
- perror("malloc");
- return 1;
- }
-
- strcpy(backup, objname);
- strcat(backup, ORIG_SUFFIX);
- if (copy_file(objname, backup))
return 1;
-err_msg:
- fprintf(stderr, "%s", orig_argv[0]);
-
- for (int i = 1; i < argc; i++) {
- char *arg = orig_argv[i];
-
- if (backup && !strcmp(arg, objname))
- fprintf(stderr, " %s -o %s", backup, objname);
- else
- fprintf(stderr, " %s", arg);
- }
-
- fprintf(stderr, "\n");
-
- return 1;
+ return 0;
}
diff --git a/tools/objtool/check.c b/tools/objtool/check.c
index ca3435acc326..4a1f6c3169b3 100644
--- a/tools/objtool/check.c
+++ b/tools/objtool/check.c
@@ -25,7 +25,6 @@
struct alternative {
struct alternative *next;
struct instruction *insn;
- bool skip_orig;
};
static unsigned long nr_cfi, nr_cfi_reused, nr_cfi_cache;
@@ -341,12 +340,7 @@ static void init_insn_state(struct objtool_file *file, struct insn_state *state,
memset(state, 0, sizeof(*state));
init_cfi_state(&state->cfi);
- /*
- * We need the full vmlinux for noinstr validation, otherwise we can
- * not correctly determine insn_call_dest(insn)->sec (external symbols
- * do not have a section).
- */
- if (opts.link && opts.noinstr && sec)
+ if (opts.noinstr && sec)
state->noinstr = sec->noinstr;
}
@@ -354,7 +348,7 @@ static struct cfi_state *cfi_alloc(void)
{
struct cfi_state *cfi = calloc(1, sizeof(struct cfi_state));
if (!cfi) {
- WARN("calloc failed");
+ ERROR_GLIBC("calloc");
exit(1);
}
nr_cfi++;
@@ -410,7 +404,7 @@ static void *cfi_hash_alloc(unsigned long size)
PROT_READ|PROT_WRITE,
MAP_PRIVATE|MAP_ANON, -1, 0);
if (cfi_hash == (void *)-1L) {
- WARN("mmap fail cfi_hash");
+ ERROR_GLIBC("mmap fail cfi_hash");
cfi_hash = NULL;
} else if (opts.stats) {
printf("cfi_bits: %d\n", cfi_bits);
@@ -466,7 +460,7 @@ static int decode_instructions(struct objtool_file *file)
if (!insns || idx == INSN_CHUNK_MAX) {
insns = calloc(sizeof(*insn), INSN_CHUNK_SIZE);
if (!insns) {
- WARN("malloc failed");
+ ERROR_GLIBC("calloc");
return -1;
}
idx = 0;
@@ -501,8 +495,6 @@ static int decode_instructions(struct objtool_file *file)
nr_insns++;
}
-// printf("%s: last chunk used: %d\n", sec->name, (int)idx);
-
sec_for_each_sym(sec, func) {
if (func->type != STT_NOTYPE && func->type != STT_FUNC)
continue;
@@ -511,8 +503,7 @@ static int decode_instructions(struct objtool_file *file)
/* Heuristic: likely an "end" symbol */
if (func->type == STT_NOTYPE)
continue;
- WARN("%s(): STT_FUNC at end of section",
- func->name);
+ ERROR("%s(): STT_FUNC at end of section", func->name);
return -1;
}
@@ -520,8 +511,7 @@ static int decode_instructions(struct objtool_file *file)
continue;
if (!find_insn(file, sec, func->offset)) {
- WARN("%s(): can't find starting instruction",
- func->name);
+ ERROR("%s(): can't find starting instruction", func->name);
return -1;
}
@@ -568,14 +558,20 @@ static int add_pv_ops(struct objtool_file *file, const char *symname)
if (!reloc)
break;
+ idx = (reloc_offset(reloc) - sym->offset) / sizeof(unsigned long);
+
func = reloc->sym;
if (func->type == STT_SECTION)
func = find_symbol_by_offset(reloc->sym->sec,
reloc_addend(reloc));
+ if (!func) {
+ ERROR_FUNC(reloc->sym->sec, reloc_addend(reloc),
+ "can't find func at %s[%d]", symname, idx);
+ return -1;
+ }
- idx = (reloc_offset(reloc) - sym->offset) / sizeof(unsigned long);
-
- objtool_pv_add(file, idx, func);
+ if (objtool_pv_add(file, idx, func))
+ return -1;
off = reloc_offset(reloc) + 1;
if (off > end)
@@ -599,7 +595,7 @@ static int init_pv_ops(struct objtool_file *file)
};
const char *pv_ops;
struct symbol *sym;
- int idx, nr;
+ int idx, nr, ret;
if (!opts.noinstr)
return 0;
@@ -612,14 +608,19 @@ static int init_pv_ops(struct objtool_file *file)
nr = sym->len / sizeof(unsigned long);
file->pv_ops = calloc(sizeof(struct pv_state), nr);
- if (!file->pv_ops)
+ if (!file->pv_ops) {
+ ERROR_GLIBC("calloc");
return -1;
+ }
for (idx = 0; idx < nr; idx++)
INIT_LIST_HEAD(&file->pv_ops[idx].targets);
- for (idx = 0; (pv_ops = pv_ops_tables[idx]); idx++)
- add_pv_ops(file, pv_ops);
+ for (idx = 0; (pv_ops = pv_ops_tables[idx]); idx++) {
+ ret = add_pv_ops(file, pv_ops);
+ if (ret)
+ return ret;
+ }
return 0;
}
@@ -667,13 +668,12 @@ static int create_static_call_sections(struct objtool_file *file)
/* find key symbol */
key_name = strdup(insn_call_dest(insn)->name);
if (!key_name) {
- perror("strdup");
+ ERROR_GLIBC("strdup");
return -1;
}
if (strncmp(key_name, STATIC_CALL_TRAMP_PREFIX_STR,
STATIC_CALL_TRAMP_PREFIX_LEN)) {
- WARN("static_call: trampoline name malformed: %s", key_name);
- free(key_name);
+ ERROR("static_call: trampoline name malformed: %s", key_name);
return -1;
}
tmp = key_name + STATIC_CALL_TRAMP_PREFIX_LEN - STATIC_CALL_KEY_PREFIX_LEN;
@@ -682,8 +682,7 @@ static int create_static_call_sections(struct objtool_file *file)
key_sym = find_symbol_by_name(file->elf, tmp);
if (!key_sym) {
if (!opts.module) {
- WARN("static_call: can't find static_call_key symbol: %s", tmp);
- free(key_name);
+ ERROR("static_call: can't find static_call_key symbol: %s", tmp);
return -1;
}
@@ -698,7 +697,6 @@ static int create_static_call_sections(struct objtool_file *file)
*/
key_sym = insn_call_dest(insn);
}
- free(key_name);
/* populate reloc for 'key' */
if (!elf_init_reloc_data_sym(file->elf, sec,
@@ -829,8 +827,11 @@ static int create_ibt_endbr_seal_sections(struct objtool_file *file)
if (opts.module && sym && sym->type == STT_FUNC &&
insn->offset == sym->offset &&
(!strcmp(sym->name, "init_module") ||
- !strcmp(sym->name, "cleanup_module")))
- WARN("%s(): not an indirect call target", sym->name);
+ !strcmp(sym->name, "cleanup_module"))) {
+ ERROR("%s(): Magic init_module() function name is deprecated, use module_init(fn) instead",
+ sym->name);
+ return -1;
+ }
if (!elf_init_reloc_text_sym(file->elf, sec,
idx * sizeof(int), idx,
@@ -979,16 +980,15 @@ static int create_direct_call_sections(struct objtool_file *file)
/*
* Warnings shouldn't be reported for ignored functions.
*/
-static void add_ignores(struct objtool_file *file)
+static int add_ignores(struct objtool_file *file)
{
- struct instruction *insn;
struct section *rsec;
struct symbol *func;
struct reloc *reloc;
rsec = find_section_by_name(file->elf, ".rela.discard.func_stack_frame_non_standard");
if (!rsec)
- return;
+ return 0;
for_each_reloc(rsec, reloc) {
switch (reloc->sym->type) {
@@ -1003,14 +1003,17 @@ static void add_ignores(struct objtool_file *file)
break;
default:
- WARN("unexpected relocation symbol type in %s: %d",
- rsec->name, reloc->sym->type);
- continue;
+ ERROR("unexpected relocation symbol type in %s: %d",
+ rsec->name, reloc->sym->type);
+ return -1;
}
- func_for_each_insn(file, func, insn)
- insn->ignore = true;
+ func->ignore = true;
+ if (func->cfunc)
+ func->cfunc->ignore = true;
}
+
+ return 0;
}
/*
@@ -1188,12 +1191,15 @@ static const char *uaccess_safe_builtin[] = {
"__ubsan_handle_load_invalid_value",
/* STACKLEAK */
"stackleak_track_stack",
+ /* TRACE_BRANCH_PROFILING */
+ "ftrace_likely_update",
+ /* STACKPROTECTOR */
+ "__stack_chk_fail",
/* misc */
"csum_partial_copy_generic",
"copy_mc_fragile",
"copy_mc_fragile_handle_tail",
"copy_mc_enhanced_fast_string",
- "ftrace_likely_update", /* CONFIG_TRACE_BRANCH_PROFILING */
"rep_stos_alternative",
"rep_movs_alternative",
"__copy_user_nocache",
@@ -1275,7 +1281,7 @@ static void remove_insn_ops(struct instruction *insn)
insn->stack_ops = NULL;
}
-static void annotate_call_site(struct objtool_file *file,
+static int annotate_call_site(struct objtool_file *file,
struct instruction *insn, bool sibling)
{
struct reloc *reloc = insn_reloc(file, insn);
@@ -1286,12 +1292,12 @@ static void annotate_call_site(struct objtool_file *file,
if (sym->static_call_tramp) {
list_add_tail(&insn->call_node, &file->static_call_list);
- return;
+ return 0;
}
if (sym->retpoline_thunk) {
list_add_tail(&insn->call_node, &file->retpoline_call_list);
- return;
+ return 0;
}
/*
@@ -1303,10 +1309,12 @@ static void annotate_call_site(struct objtool_file *file,
if (reloc)
set_reloc_type(file->elf, reloc, R_NONE);
- elf_write_insn(file->elf, insn->sec,
- insn->offset, insn->len,
- sibling ? arch_ret_insn(insn->len)
- : arch_nop_insn(insn->len));
+ if (elf_write_insn(file->elf, insn->sec,
+ insn->offset, insn->len,
+ sibling ? arch_ret_insn(insn->len)
+ : arch_nop_insn(insn->len))) {
+ return -1;
+ }
insn->type = sibling ? INSN_RETURN : INSN_NOP;
@@ -1320,7 +1328,7 @@ static void annotate_call_site(struct objtool_file *file,
insn->retpoline_safe = true;
}
- return;
+ return 0;
}
if (opts.mcount && sym->fentry) {
@@ -1330,15 +1338,17 @@ static void annotate_call_site(struct objtool_file *file,
if (reloc)
set_reloc_type(file->elf, reloc, R_NONE);
- elf_write_insn(file->elf, insn->sec,
- insn->offset, insn->len,
- arch_nop_insn(insn->len));
+ if (elf_write_insn(file->elf, insn->sec,
+ insn->offset, insn->len,
+ arch_nop_insn(insn->len))) {
+ return -1;
+ }
insn->type = INSN_NOP;
}
list_add_tail(&insn->call_node, &file->mcount_loc_list);
- return;
+ return 0;
}
if (insn->type == INSN_CALL && !insn->sec->init &&
@@ -1347,14 +1357,16 @@ static void annotate_call_site(struct objtool_file *file,
if (!sibling && dead_end_function(file, sym))
insn->dead_end = true;
+
+ return 0;
}
-static void add_call_dest(struct objtool_file *file, struct instruction *insn,
+static int add_call_dest(struct objtool_file *file, struct instruction *insn,
struct symbol *dest, bool sibling)
{
insn->_call_dest = dest;
if (!dest)
- return;
+ return 0;
/*
* Whatever stack impact regular CALLs have, should be undone
@@ -1365,10 +1377,10 @@ static void add_call_dest(struct objtool_file *file, struct instruction *insn,
*/
remove_insn_ops(insn);
- annotate_call_site(file, insn, sibling);
+ return annotate_call_site(file, insn, sibling);
}
-static void add_retpoline_call(struct objtool_file *file, struct instruction *insn)
+static int add_retpoline_call(struct objtool_file *file, struct instruction *insn)
{
/*
* Retpoline calls/jumps are really dynamic calls/jumps in disguise,
@@ -1385,7 +1397,7 @@ static void add_retpoline_call(struct objtool_file *file, struct instruction *in
insn->type = INSN_JUMP_DYNAMIC_CONDITIONAL;
break;
default:
- return;
+ return 0;
}
insn->retpoline_safe = true;
@@ -1399,7 +1411,7 @@ static void add_retpoline_call(struct objtool_file *file, struct instruction *in
*/
remove_insn_ops(insn);
- annotate_call_site(file, insn, false);
+ return annotate_call_site(file, insn, false);
}
static void add_return_call(struct objtool_file *file, struct instruction *insn, bool add)
@@ -1468,8 +1480,11 @@ static int add_jump_destinations(struct objtool_file *file)
struct reloc *reloc;
struct section *dest_sec;
unsigned long dest_off;
+ int ret;
for_each_insn(file, insn) {
+ struct symbol *func = insn_func(insn);
+
if (insn->jump_dest) {
/*
* handle_group_alt() may have previously set
@@ -1488,17 +1503,21 @@ static int add_jump_destinations(struct objtool_file *file)
dest_sec = reloc->sym->sec;
dest_off = arch_dest_reloc_offset(reloc_addend(reloc));
} else if (reloc->sym->retpoline_thunk) {
- add_retpoline_call(file, insn);
+ ret = add_retpoline_call(file, insn);
+ if (ret)
+ return ret;
continue;
} else if (reloc->sym->return_thunk) {
add_return_call(file, insn, true);
continue;
- } else if (insn_func(insn)) {
+ } else if (func) {
/*
* External sibling call or internal sibling call with
* STT_FUNC reloc.
*/
- add_call_dest(file, insn, reloc->sym, true);
+ ret = add_call_dest(file, insn, reloc->sym, true);
+ if (ret)
+ return ret;
continue;
} else if (reloc->sym->sec->idx) {
dest_sec = reloc->sym->sec;
@@ -1526,8 +1545,17 @@ static int add_jump_destinations(struct objtool_file *file)
continue;
}
- WARN_INSN(insn, "can't find jump dest instruction at %s+0x%lx",
- dest_sec->name, dest_off);
+ /*
+ * GCOV/KCOV dead code can jump to the end of the
+ * function/section.
+ */
+ if (file->ignore_unreachables && func &&
+ dest_sec == insn->sec &&
+ dest_off == func->offset + func->len)
+ continue;
+
+ ERROR_INSN(insn, "can't find jump dest instruction at %s+0x%lx",
+ dest_sec->name, dest_off);
return -1;
}
@@ -1538,7 +1566,9 @@ static int add_jump_destinations(struct objtool_file *file)
*/
if (jump_dest->sym && jump_dest->offset == jump_dest->sym->offset) {
if (jump_dest->sym->retpoline_thunk) {
- add_retpoline_call(file, insn);
+ ret = add_retpoline_call(file, insn);
+ if (ret)
+ return ret;
continue;
}
if (jump_dest->sym->return_thunk) {
@@ -1550,8 +1580,7 @@ static int add_jump_destinations(struct objtool_file *file)
/*
* Cross-function jump.
*/
- if (insn_func(insn) && insn_func(jump_dest) &&
- insn_func(insn) != insn_func(jump_dest)) {
+ if (func && insn_func(jump_dest) && func != insn_func(jump_dest)) {
/*
* For GCC 8+, create parent/child links for any cold
@@ -1568,10 +1597,10 @@ static int add_jump_destinations(struct objtool_file *file)
* case where the parent function's only reference to a
* subfunction is through a jump table.
*/
- if (!strstr(insn_func(insn)->name, ".cold") &&
+ if (!strstr(func->name, ".cold") &&
strstr(insn_func(jump_dest)->name, ".cold")) {
- insn_func(insn)->cfunc = insn_func(jump_dest);
- insn_func(jump_dest)->pfunc = insn_func(insn);
+ func->cfunc = insn_func(jump_dest);
+ insn_func(jump_dest)->pfunc = func;
}
}
@@ -1580,7 +1609,9 @@ static int add_jump_destinations(struct objtool_file *file)
* Internal sibling call without reloc or with
* STT_SECTION reloc.
*/
- add_call_dest(file, insn, insn_func(jump_dest), true);
+ ret = add_call_dest(file, insn, insn_func(jump_dest), true);
+ if (ret)
+ return ret;
continue;
}
@@ -1610,8 +1641,10 @@ static int add_call_destinations(struct objtool_file *file)
unsigned long dest_off;
struct symbol *dest;
struct reloc *reloc;
+ int ret;
for_each_insn(file, insn) {
+ struct symbol *func = insn_func(insn);
if (insn->type != INSN_CALL)
continue;
@@ -1620,18 +1653,20 @@ static int add_call_destinations(struct objtool_file *file)
dest_off = arch_jump_destination(insn);
dest = find_call_destination(insn->sec, dest_off);
- add_call_dest(file, insn, dest, false);
+ ret = add_call_dest(file, insn, dest, false);
+ if (ret)
+ return ret;
- if (insn->ignore)
+ if (func && func->ignore)
continue;
if (!insn_call_dest(insn)) {
- WARN_INSN(insn, "unannotated intra-function call");
+ ERROR_INSN(insn, "unannotated intra-function call");
return -1;
}
- if (insn_func(insn) && insn_call_dest(insn)->type != STT_FUNC) {
- WARN_INSN(insn, "unsupported call to non-function");
+ if (func && insn_call_dest(insn)->type != STT_FUNC) {
+ ERROR_INSN(insn, "unsupported call to non-function");
return -1;
}
@@ -1639,18 +1674,25 @@ static int add_call_destinations(struct objtool_file *file)
dest_off = arch_dest_reloc_offset(reloc_addend(reloc));
dest = find_call_destination(reloc->sym->sec, dest_off);
if (!dest) {
- WARN_INSN(insn, "can't find call dest symbol at %s+0x%lx",
- reloc->sym->sec->name, dest_off);
+ ERROR_INSN(insn, "can't find call dest symbol at %s+0x%lx",
+ reloc->sym->sec->name, dest_off);
return -1;
}
- add_call_dest(file, insn, dest, false);
+ ret = add_call_dest(file, insn, dest, false);
+ if (ret)
+ return ret;
} else if (reloc->sym->retpoline_thunk) {
- add_retpoline_call(file, insn);
+ ret = add_retpoline_call(file, insn);
+ if (ret)
+ return ret;
- } else
- add_call_dest(file, insn, reloc->sym, false);
+ } else {
+ ret = add_call_dest(file, insn, reloc->sym, false);
+ if (ret)
+ return ret;
+ }
}
return 0;
@@ -1673,15 +1715,15 @@ static int handle_group_alt(struct objtool_file *file,
if (!orig_alt_group) {
struct instruction *last_orig_insn = NULL;
- orig_alt_group = malloc(sizeof(*orig_alt_group));
+ orig_alt_group = calloc(1, sizeof(*orig_alt_group));
if (!orig_alt_group) {
- WARN("malloc failed");
+ ERROR_GLIBC("calloc");
return -1;
}
orig_alt_group->cfi = calloc(special_alt->orig_len,
sizeof(struct cfi_state *));
if (!orig_alt_group->cfi) {
- WARN("calloc failed");
+ ERROR_GLIBC("calloc");
return -1;
}
@@ -1697,21 +1739,22 @@ static int handle_group_alt(struct objtool_file *file,
orig_alt_group->first_insn = orig_insn;
orig_alt_group->last_insn = last_orig_insn;
orig_alt_group->nop = NULL;
+ orig_alt_group->ignore = orig_insn->ignore_alts;
} else {
if (orig_alt_group->last_insn->offset + orig_alt_group->last_insn->len -
orig_alt_group->first_insn->offset != special_alt->orig_len) {
- WARN_INSN(orig_insn, "weirdly overlapping alternative! %ld != %d",
- orig_alt_group->last_insn->offset +
- orig_alt_group->last_insn->len -
- orig_alt_group->first_insn->offset,
- special_alt->orig_len);
+ ERROR_INSN(orig_insn, "weirdly overlapping alternative! %ld != %d",
+ orig_alt_group->last_insn->offset +
+ orig_alt_group->last_insn->len -
+ orig_alt_group->first_insn->offset,
+ special_alt->orig_len);
return -1;
}
}
- new_alt_group = malloc(sizeof(*new_alt_group));
+ new_alt_group = calloc(1, sizeof(*new_alt_group));
if (!new_alt_group) {
- WARN("malloc failed");
+ ERROR_GLIBC("calloc");
return -1;
}
@@ -1723,9 +1766,9 @@ static int handle_group_alt(struct objtool_file *file,
* instruction affects the stack, the instruction after it (the
* nop) will propagate the new state to the shared CFI array.
*/
- nop = malloc(sizeof(*nop));
+ nop = calloc(1, sizeof(*nop));
if (!nop) {
- WARN("malloc failed");
+ ERROR_GLIBC("calloc");
return -1;
}
memset(nop, 0, sizeof(*nop));
@@ -1736,7 +1779,6 @@ static int handle_group_alt(struct objtool_file *file,
nop->type = INSN_NOP;
nop->sym = orig_insn->sym;
nop->alt_group = new_alt_group;
- nop->ignore = orig_insn->ignore_alts;
}
if (!special_alt->new_len) {
@@ -1753,7 +1795,6 @@ static int handle_group_alt(struct objtool_file *file,
last_new_insn = insn;
- insn->ignore = orig_insn->ignore_alts;
insn->sym = orig_insn->sym;
insn->alt_group = new_alt_group;
@@ -1769,7 +1810,7 @@ static int handle_group_alt(struct objtool_file *file,
if (alt_reloc && arch_pc_relative_reloc(alt_reloc) &&
!arch_support_alt_relocation(special_alt, insn, alt_reloc)) {
- WARN_INSN(insn, "unsupported relocation in alternatives section");
+ ERROR_INSN(insn, "unsupported relocation in alternatives section");
return -1;
}
@@ -1783,15 +1824,15 @@ static int handle_group_alt(struct objtool_file *file,
if (dest_off == special_alt->new_off + special_alt->new_len) {
insn->jump_dest = next_insn_same_sec(file, orig_alt_group->last_insn);
if (!insn->jump_dest) {
- WARN_INSN(insn, "can't find alternative jump destination");
+ ERROR_INSN(insn, "can't find alternative jump destination");
return -1;
}
}
}
if (!last_new_insn) {
- WARN_FUNC("can't find last new alternative instruction",
- special_alt->new_sec, special_alt->new_off);
+ ERROR_FUNC(special_alt->new_sec, special_alt->new_off,
+ "can't find last new alternative instruction");
return -1;
}
@@ -1800,6 +1841,7 @@ end:
new_alt_group->first_insn = *new_insn;
new_alt_group->last_insn = last_new_insn;
new_alt_group->nop = nop;
+ new_alt_group->ignore = (*new_insn)->ignore_alts;
new_alt_group->cfi = orig_alt_group->cfi;
return 0;
}
@@ -1817,7 +1859,7 @@ static int handle_jump_alt(struct objtool_file *file,
if (orig_insn->type != INSN_JUMP_UNCONDITIONAL &&
orig_insn->type != INSN_NOP) {
- WARN_INSN(orig_insn, "unsupported instruction at jump label");
+ ERROR_INSN(orig_insn, "unsupported instruction at jump label");
return -1;
}
@@ -1826,9 +1868,13 @@ static int handle_jump_alt(struct objtool_file *file,
if (reloc)
set_reloc_type(file->elf, reloc, R_NONE);
- elf_write_insn(file->elf, orig_insn->sec,
- orig_insn->offset, orig_insn->len,
- arch_nop_insn(orig_insn->len));
+
+ if (elf_write_insn(file->elf, orig_insn->sec,
+ orig_insn->offset, orig_insn->len,
+ arch_nop_insn(orig_insn->len))) {
+ return -1;
+ }
+
orig_insn->type = INSN_NOP;
}
@@ -1864,19 +1910,17 @@ static int add_special_section_alts(struct objtool_file *file)
struct alternative *alt;
int ret;
- ret = special_get_alts(file->elf, &special_alts);
- if (ret)
- return ret;
+ if (special_get_alts(file->elf, &special_alts))
+ return -1;
list_for_each_entry_safe(special_alt, tmp, &special_alts, list) {
orig_insn = find_insn(file, special_alt->orig_sec,
special_alt->orig_off);
if (!orig_insn) {
- WARN_FUNC("special: can't find orig instruction",
- special_alt->orig_sec, special_alt->orig_off);
- ret = -1;
- goto out;
+ ERROR_FUNC(special_alt->orig_sec, special_alt->orig_off,
+ "special: can't find orig instruction");
+ return -1;
}
new_insn = NULL;
@@ -1884,41 +1928,37 @@ static int add_special_section_alts(struct objtool_file *file)
new_insn = find_insn(file, special_alt->new_sec,
special_alt->new_off);
if (!new_insn) {
- WARN_FUNC("special: can't find new instruction",
- special_alt->new_sec,
- special_alt->new_off);
- ret = -1;
- goto out;
+ ERROR_FUNC(special_alt->new_sec, special_alt->new_off,
+ "special: can't find new instruction");
+ return -1;
}
}
if (special_alt->group) {
if (!special_alt->orig_len) {
- WARN_INSN(orig_insn, "empty alternative entry");
+ ERROR_INSN(orig_insn, "empty alternative entry");
continue;
}
ret = handle_group_alt(file, special_alt, orig_insn,
&new_insn);
if (ret)
- goto out;
+ return ret;
+
} else if (special_alt->jump_or_nop) {
ret = handle_jump_alt(file, special_alt, orig_insn,
&new_insn);
if (ret)
- goto out;
+ return ret;
}
- alt = malloc(sizeof(*alt));
+ alt = calloc(1, sizeof(*alt));
if (!alt) {
- WARN("malloc failed");
- ret = -1;
- goto out;
+ ERROR_GLIBC("calloc");
+ return -1;
}
alt->insn = new_insn;
- alt->skip_orig = special_alt->skip_orig;
- orig_insn->ignore_alts |= special_alt->skip_alt;
alt->next = orig_insn->alts;
orig_insn->alts = alt;
@@ -1932,8 +1972,7 @@ static int add_special_section_alts(struct objtool_file *file)
printf("long:\t%ld\t%ld\n", file->jl_nop_long, file->jl_long);
}
-out:
- return ret;
+ return 0;
}
__weak unsigned long arch_jump_table_sym_offset(struct reloc *reloc, struct reloc *table)
@@ -1941,8 +1980,7 @@ __weak unsigned long arch_jump_table_sym_offset(struct reloc *reloc, struct relo
return reloc->sym->offset + reloc_addend(reloc);
}
-static int add_jump_table(struct objtool_file *file, struct instruction *insn,
- struct reloc *next_table)
+static int add_jump_table(struct objtool_file *file, struct instruction *insn)
{
unsigned long table_size = insn_jump_table_size(insn);
struct symbol *pfunc = insn_func(insn)->pfunc;
@@ -1962,7 +2000,7 @@ static int add_jump_table(struct objtool_file *file, struct instruction *insn,
/* Check for the end of the table: */
if (table_size && reloc_offset(reloc) - reloc_offset(table) >= table_size)
break;
- if (reloc != table && reloc == next_table)
+ if (reloc != table && is_jump_table(reloc))
break;
/* Make sure the table entries are consecutive: */
@@ -1991,9 +2029,9 @@ static int add_jump_table(struct objtool_file *file, struct instruction *insn,
if (!insn_func(dest_insn) || insn_func(dest_insn)->pfunc != pfunc)
break;
- alt = malloc(sizeof(*alt));
+ alt = calloc(1, sizeof(*alt));
if (!alt) {
- WARN("malloc failed");
+ ERROR_GLIBC("calloc");
return -1;
}
@@ -2005,7 +2043,7 @@ next:
}
if (!prev_offset) {
- WARN_INSN(insn, "can't find switch jump table");
+ ERROR_INSN(insn, "can't find switch jump table");
return -1;
}
@@ -2041,7 +2079,7 @@ static void find_jump_table(struct objtool_file *file, struct symbol *func,
insn->jump_dest &&
(insn->jump_dest->offset <= insn->offset ||
insn->jump_dest->offset > orig_insn->offset))
- break;
+ break;
table_reloc = arch_find_switch_table(file, insn, &table_size);
if (!table_reloc)
@@ -2053,8 +2091,10 @@ static void find_jump_table(struct objtool_file *file, struct symbol *func,
if (!dest_insn || !insn_func(dest_insn) || insn_func(dest_insn)->pfunc != func)
continue;
+ set_jump_table(table_reloc);
orig_insn->_jump_table = table_reloc;
orig_insn->_jump_table_size = table_size;
+
break;
}
}
@@ -2096,31 +2136,19 @@ static void mark_func_jump_tables(struct objtool_file *file,
static int add_func_jump_tables(struct objtool_file *file,
struct symbol *func)
{
- struct instruction *insn, *insn_t1 = NULL, *insn_t2;
- int ret = 0;
+ struct instruction *insn;
+ int ret;
func_for_each_insn(file, func, insn) {
if (!insn_jump_table(insn))
continue;
- if (!insn_t1) {
- insn_t1 = insn;
- continue;
- }
-
- insn_t2 = insn;
-
- ret = add_jump_table(file, insn_t1, insn_jump_table(insn_t2));
+ ret = add_jump_table(file, insn);
if (ret)
return ret;
-
- insn_t1 = insn_t2;
}
- if (insn_t1)
- ret = add_jump_table(file, insn_t1, NULL);
-
- return ret;
+ return 0;
}
/*
@@ -2173,12 +2201,12 @@ static int read_unwind_hints(struct objtool_file *file)
return 0;
if (!sec->rsec) {
- WARN("missing .rela.discard.unwind_hints section");
+ ERROR("missing .rela.discard.unwind_hints section");
return -1;
}
if (sec->sh.sh_size % sizeof(struct unwind_hint)) {
- WARN("struct unwind_hint size mismatch");
+ ERROR("struct unwind_hint size mismatch");
return -1;
}
@@ -2189,7 +2217,7 @@ static int read_unwind_hints(struct objtool_file *file)
reloc = find_reloc_by_dest(file->elf, sec, i * sizeof(*hint));
if (!reloc) {
- WARN("can't find reloc for unwind_hints[%d]", i);
+ ERROR("can't find reloc for unwind_hints[%d]", i);
return -1;
}
@@ -2198,13 +2226,13 @@ static int read_unwind_hints(struct objtool_file *file)
} else if (reloc->sym->local_label) {
offset = reloc->sym->offset;
} else {
- WARN("unexpected relocation symbol type in %s", sec->rsec->name);
+ ERROR("unexpected relocation symbol type in %s", sec->rsec->name);
return -1;
}
insn = find_insn(file, reloc->sym->sec, offset);
if (!insn) {
- WARN("can't find insn for unwind_hints[%d]", i);
+ ERROR("can't find insn for unwind_hints[%d]", i);
return -1;
}
@@ -2231,7 +2259,8 @@ static int read_unwind_hints(struct objtool_file *file)
if (sym && sym->bind == STB_GLOBAL) {
if (opts.ibt && insn->type != INSN_ENDBR && !insn->noendbr) {
- WARN_INSN(insn, "UNWIND_HINT_IRET_REGS without ENDBR");
+ ERROR_INSN(insn, "UNWIND_HINT_IRET_REGS without ENDBR");
+ return -1;
}
}
}
@@ -2245,7 +2274,7 @@ static int read_unwind_hints(struct objtool_file *file)
cfi = *(insn->cfi);
if (arch_decode_hint_reg(hint->sp_reg, &cfi.cfa.base)) {
- WARN_INSN(insn, "unsupported unwind_hint sp base reg %d", hint->sp_reg);
+ ERROR_INSN(insn, "unsupported unwind_hint sp base reg %d", hint->sp_reg);
return -1;
}
@@ -2291,7 +2320,7 @@ static int read_annotate(struct objtool_file *file,
insn = find_insn(file, reloc->sym->sec, offset);
if (!insn) {
- WARN("bad .discard.annotate_insn entry: %d of type %d", reloc_idx(reloc), type);
+ ERROR("bad .discard.annotate_insn entry: %d of type %d", reloc_idx(reloc), type);
return -1;
}
@@ -2306,6 +2335,8 @@ static int read_annotate(struct objtool_file *file,
static int __annotate_early(struct objtool_file *file, int type, struct instruction *insn)
{
switch (type) {
+
+ /* Must be before add_special_section_alts() */
case ANNOTYPE_IGNORE_ALTS:
insn->ignore_alts = true;
break;
@@ -2332,7 +2363,7 @@ static int __annotate_ifc(struct objtool_file *file, int type, struct instructio
return 0;
if (insn->type != INSN_CALL) {
- WARN_INSN(insn, "intra_function_call not a direct call");
+ ERROR_INSN(insn, "intra_function_call not a direct call");
return -1;
}
@@ -2346,8 +2377,8 @@ static int __annotate_ifc(struct objtool_file *file, int type, struct instructio
dest_off = arch_jump_destination(insn);
insn->jump_dest = find_insn(file, insn->sec, dest_off);
if (!insn->jump_dest) {
- WARN_INSN(insn, "can't find call dest at %s+0x%lx",
- insn->sec->name, dest_off);
+ ERROR_INSN(insn, "can't find call dest at %s+0x%lx",
+ insn->sec->name, dest_off);
return -1;
}
@@ -2366,7 +2397,7 @@ static int __annotate_late(struct objtool_file *file, int type, struct instructi
insn->type != INSN_CALL_DYNAMIC &&
insn->type != INSN_RETURN &&
insn->type != INSN_NOP) {
- WARN_INSN(insn, "retpoline_safe hint not an indirect jump/call/ret/nop");
+ ERROR_INSN(insn, "retpoline_safe hint not an indirect jump/call/ret/nop");
return -1;
}
@@ -2398,8 +2429,8 @@ static int __annotate_late(struct objtool_file *file, int type, struct instructi
break;
default:
- WARN_INSN(insn, "Unknown annotation type: %d", type);
- break;
+ ERROR_INSN(insn, "Unknown annotation type: %d", type);
+ return -1;
}
return 0;
@@ -2512,7 +2543,10 @@ static int decode_sections(struct objtool_file *file)
if (ret)
return ret;
- add_ignores(file);
+ ret = add_ignores(file);
+ if (ret)
+ return ret;
+
add_uaccess_safe(file);
ret = read_annotate(file, __annotate_early);
@@ -2732,7 +2766,7 @@ static int update_cfi_state(struct instruction *insn,
if (cfa->base == CFI_UNDEFINED) {
if (insn_func(insn)) {
WARN_INSN(insn, "undefined stack state");
- return -1;
+ return 1;
}
return 0;
}
@@ -3175,9 +3209,8 @@ static int propagate_alt_cfi(struct objtool_file *file, struct instruction *insn
if (cficmp(alt_cfi[group_off], insn->cfi)) {
struct alt_group *orig_group = insn->alt_group->orig_group ?: insn->alt_group;
struct instruction *orig = orig_group->first_insn;
- char *where = offstr(insn->sec, insn->offset);
- WARN_INSN(orig, "stack layout conflict in alternatives: %s", where);
- free(where);
+ WARN_INSN(orig, "stack layout conflict in alternatives: %s",
+ offstr(insn->sec, insn->offset));
return -1;
}
}
@@ -3190,13 +3223,15 @@ static int handle_insn_ops(struct instruction *insn,
struct insn_state *state)
{
struct stack_op *op;
+ int ret;
for (op = insn->stack_ops; op; op = op->next) {
- if (update_cfi_state(insn, next_insn, &state->cfi, op))
- return 1;
+ ret = update_cfi_state(insn, next_insn, &state->cfi, op);
+ if (ret)
+ return ret;
- if (!insn->alt_group)
+ if (!opts.uaccess || !insn->alt_group)
continue;
if (op->dest.type == OP_DEST_PUSHF) {
@@ -3238,36 +3273,41 @@ static bool insn_cfi_match(struct instruction *insn, struct cfi_state *cfi2)
WARN_INSN(insn, "stack state mismatch: cfa1=%d%+d cfa2=%d%+d",
cfi1->cfa.base, cfi1->cfa.offset,
cfi2->cfa.base, cfi2->cfa.offset);
+ return false;
- } else if (memcmp(&cfi1->regs, &cfi2->regs, sizeof(cfi1->regs))) {
+ }
+
+ if (memcmp(&cfi1->regs, &cfi2->regs, sizeof(cfi1->regs))) {
for (i = 0; i < CFI_NUM_REGS; i++) {
- if (!memcmp(&cfi1->regs[i], &cfi2->regs[i],
- sizeof(struct cfi_reg)))
+
+ if (!memcmp(&cfi1->regs[i], &cfi2->regs[i], sizeof(struct cfi_reg)))
continue;
WARN_INSN(insn, "stack state mismatch: reg1[%d]=%d%+d reg2[%d]=%d%+d",
i, cfi1->regs[i].base, cfi1->regs[i].offset,
i, cfi2->regs[i].base, cfi2->regs[i].offset);
- break;
}
+ return false;
+ }
- } else if (cfi1->type != cfi2->type) {
+ if (cfi1->type != cfi2->type) {
WARN_INSN(insn, "stack state mismatch: type1=%d type2=%d",
cfi1->type, cfi2->type);
+ return false;
+ }
- } else if (cfi1->drap != cfi2->drap ||
+ if (cfi1->drap != cfi2->drap ||
(cfi1->drap && cfi1->drap_reg != cfi2->drap_reg) ||
(cfi1->drap && cfi1->drap_offset != cfi2->drap_offset)) {
WARN_INSN(insn, "stack state mismatch: drap1=%d(%d,%d) drap2=%d(%d,%d)",
cfi1->drap, cfi1->drap_reg, cfi1->drap_offset,
cfi2->drap, cfi2->drap_reg, cfi2->drap_offset);
+ return false;
+ }
- } else
- return true;
-
- return false;
+ return true;
}
static inline bool func_uaccess_safe(struct symbol *func)
@@ -3480,6 +3520,9 @@ static int validate_branch(struct objtool_file *file, struct symbol *func,
u8 visited;
int ret;
+ if (func && func->ignore)
+ return 0;
+
sec = insn->sec;
while (1) {
@@ -3491,13 +3534,13 @@ static int validate_branch(struct objtool_file *file, struct symbol *func,
!strncmp(func->name, "__pfx_", 6))
return 0;
+ if (file->ignore_unreachables)
+ return 0;
+
WARN("%s() falls through to next function %s()",
func->name, insn_func(insn)->name);
- return 1;
- }
+ func->warned = 1;
- if (func && insn->ignore) {
- WARN_INSN(insn, "BUG: why am I validating an ignored function?");
return 1;
}
@@ -3572,24 +3615,19 @@ static int validate_branch(struct objtool_file *file, struct symbol *func,
if (propagate_alt_cfi(file, insn))
return 1;
- if (!insn->ignore_alts && insn->alts) {
- bool skip_orig = false;
-
+ if (insn->alts) {
for (alt = insn->alts; alt; alt = alt->next) {
- if (alt->skip_orig)
- skip_orig = true;
-
ret = validate_branch(file, func, alt->insn, state);
if (ret) {
BT_INSN(insn, "(alt)");
return ret;
}
}
-
- if (skip_orig)
- return 0;
}
+ if (insn->alt_group && insn->alt_group->ignore)
+ return 0;
+
if (handle_insn_ops(insn, next_insn, &state))
return 1;
@@ -3610,9 +3648,6 @@ static int validate_branch(struct objtool_file *file, struct symbol *func,
return 1;
}
- if (insn->dead_end)
- return 0;
-
break;
case INSN_JUMP_CONDITIONAL:
@@ -3660,6 +3695,9 @@ static int validate_branch(struct objtool_file *file, struct symbol *func,
return 0;
case INSN_STAC:
+ if (!opts.uaccess)
+ break;
+
if (state.uaccess) {
WARN_INSN(insn, "recursive UACCESS enable");
return 1;
@@ -3669,6 +3707,9 @@ static int validate_branch(struct objtool_file *file, struct symbol *func,
break;
case INSN_CLAC:
+ if (!opts.uaccess)
+ break;
+
if (!state.uaccess && func) {
WARN_INSN(insn, "redundant UACCESS disable");
return 1;
@@ -3710,7 +3751,12 @@ static int validate_branch(struct objtool_file *file, struct symbol *func,
if (!next_insn) {
if (state.cfi.cfa.base == CFI_UNDEFINED)
return 0;
- WARN("%s: unexpected end of section", sec->name);
+ if (file->ignore_unreachables)
+ return 0;
+
+ WARN("%s%sunexpected end of section %s",
+ func ? func->name : "", func ? "(): " : "",
+ sec->name);
return 1;
}
@@ -3725,7 +3771,7 @@ static int validate_unwind_hint(struct objtool_file *file,
struct instruction *insn,
struct insn_state *state)
{
- if (insn->hint && !insn->visited && !insn->ignore) {
+ if (insn->hint && !insn->visited) {
int ret = validate_branch(file, insn_func(insn), insn, *state);
if (ret)
BT_INSN(insn, "<=== (hint)");
@@ -3776,23 +3822,15 @@ static int validate_unret(struct objtool_file *file, struct instruction *insn)
insn->visited |= VISITED_UNRET;
- if (!insn->ignore_alts && insn->alts) {
+ if (insn->alts) {
struct alternative *alt;
- bool skip_orig = false;
-
for (alt = insn->alts; alt; alt = alt->next) {
- if (alt->skip_orig)
- skip_orig = true;
-
ret = validate_unret(file, alt->insn);
if (ret) {
BT_INSN(insn, "(alt)");
return ret;
}
}
-
- if (skip_orig)
- return 0;
}
switch (insn->type) {
@@ -3808,7 +3846,7 @@ static int validate_unret(struct objtool_file *file, struct instruction *insn)
if (!is_sibling_call(insn)) {
if (!insn->jump_dest) {
WARN_INSN(insn, "unresolved jump target after linking?!?");
- return -1;
+ return 1;
}
ret = validate_unret(file, insn->jump_dest);
if (ret) {
@@ -3830,7 +3868,7 @@ static int validate_unret(struct objtool_file *file, struct instruction *insn)
if (!dest) {
WARN("Unresolved function after linking!?: %s",
insn_call_dest(insn)->name);
- return -1;
+ return 1;
}
ret = validate_unret(file, dest);
@@ -3859,7 +3897,7 @@ static int validate_unret(struct objtool_file *file, struct instruction *insn)
if (!next) {
WARN_INSN(insn, "teh end!");
- return -1;
+ return 1;
}
insn = next;
}
@@ -3874,18 +3912,13 @@ static int validate_unret(struct objtool_file *file, struct instruction *insn)
static int validate_unrets(struct objtool_file *file)
{
struct instruction *insn;
- int ret, warnings = 0;
+ int warnings = 0;
for_each_insn(file, insn) {
if (!insn->unret)
continue;
- ret = validate_unret(file, insn);
- if (ret < 0) {
- WARN_INSN(insn, "Failed UNRET validation");
- return ret;
- }
- warnings += ret;
+ warnings += validate_unret(file, insn);
}
return warnings;
@@ -3911,13 +3944,13 @@ static int validate_retpoline(struct objtool_file *file)
if (insn->type == INSN_RETURN) {
if (opts.rethunk) {
WARN_INSN(insn, "'naked' return found in MITIGATION_RETHUNK build");
- } else
- continue;
- } else {
- WARN_INSN(insn, "indirect %s found in MITIGATION_RETPOLINE build",
- insn->type == INSN_JUMP_DYNAMIC ? "jump" : "call");
+ warnings++;
+ }
+ continue;
}
+ WARN_INSN(insn, "indirect %s found in MITIGATION_RETPOLINE build",
+ insn->type == INSN_JUMP_DYNAMIC ? "jump" : "call");
warnings++;
}
@@ -3939,10 +3972,11 @@ static bool is_ubsan_insn(struct instruction *insn)
static bool ignore_unreachable_insn(struct objtool_file *file, struct instruction *insn)
{
- int i;
+ struct symbol *func = insn_func(insn);
struct instruction *prev_insn;
+ int i;
- if (insn->ignore || insn->type == INSN_NOP || insn->type == INSN_TRAP)
+ if (insn->type == INSN_NOP || insn->type == INSN_TRAP || (func && func->ignore))
return true;
/*
@@ -3961,7 +3995,7 @@ static bool ignore_unreachable_insn(struct objtool_file *file, struct instructio
* In this case we'll find a piece of code (whole function) that is not
* covered by a !section symbol. Ignore them.
*/
- if (opts.link && !insn_func(insn)) {
+ if (opts.link && !func) {
int size = find_symbol_hole_containing(insn->sec, insn->offset);
unsigned long end = insn->offset + size;
@@ -3987,19 +4021,17 @@ static bool ignore_unreachable_insn(struct objtool_file *file, struct instructio
*/
if (insn->jump_dest && insn_func(insn->jump_dest) &&
strstr(insn_func(insn->jump_dest)->name, ".cold")) {
- struct instruction *dest = insn->jump_dest;
- func_for_each_insn(file, insn_func(dest), dest)
- dest->ignore = true;
+ insn_func(insn->jump_dest)->ignore = true;
}
}
return false;
}
- if (!insn_func(insn))
+ if (!func)
return false;
- if (insn_func(insn)->static_call_tramp)
+ if (func->static_call_tramp)
return true;
/*
@@ -4011,7 +4043,7 @@ static bool ignore_unreachable_insn(struct objtool_file *file, struct instructio
* It may also insert a UD2 after calling a __noreturn function.
*/
prev_insn = prev_insn_same_sec(file, insn);
- if (prev_insn->dead_end &&
+ if (prev_insn && prev_insn->dead_end &&
(insn->type == INSN_BUG ||
(insn->type == INSN_JUMP_UNCONDITIONAL &&
insn->jump_dest && insn->jump_dest->type == INSN_BUG)))
@@ -4030,7 +4062,7 @@ static bool ignore_unreachable_insn(struct objtool_file *file, struct instructio
if (insn->type == INSN_JUMP_UNCONDITIONAL) {
if (insn->jump_dest &&
- insn_func(insn->jump_dest) == insn_func(insn)) {
+ insn_func(insn->jump_dest) == func) {
insn = insn->jump_dest;
continue;
}
@@ -4038,7 +4070,7 @@ static bool ignore_unreachable_insn(struct objtool_file *file, struct instructio
break;
}
- if (insn->offset + insn->len >= insn_func(insn)->offset + insn_func(insn)->len)
+ if (insn->offset + insn->len >= func->offset + func->len)
break;
insn = next_insn_same_sec(file, insn);
@@ -4130,10 +4162,11 @@ static int validate_symbol(struct objtool_file *file, struct section *sec,
return 0;
insn = find_insn(file, sec, sym->offset);
- if (!insn || insn->ignore || insn->visited)
+ if (!insn || insn->visited)
return 0;
- state->uaccess = sym->uaccess_safe;
+ if (opts.uaccess)
+ state->uaccess = sym->uaccess_safe;
ret = validate_branch(file, insn_func(insn), insn, *state);
if (ret)
@@ -4354,9 +4387,8 @@ static int validate_ibt_data_reloc(struct objtool_file *file,
if (dest->noendbr)
return 0;
- WARN_FUNC("data relocation to !ENDBR: %s",
- reloc->sec->base, reloc_offset(reloc),
- offstr(dest->sec, dest->offset));
+ WARN_FUNC(reloc->sec->base, reloc_offset(reloc),
+ "data relocation to !ENDBR: %s", offstr(dest->sec, dest->offset));
return 1;
}
@@ -4484,13 +4516,15 @@ static int validate_reachable_instructions(struct objtool_file *file)
}
/* 'funcs' is a space-separated list of function names */
-static int disas_funcs(const char *funcs)
+static void disas_funcs(const char *funcs)
{
const char *objdump_str, *cross_compile;
int size, ret;
char *cmd;
cross_compile = getenv("CROSS_COMPILE");
+ if (!cross_compile)
+ cross_compile = "";
objdump_str = "%sobjdump -wdr %s | gawk -M -v _funcs='%s' '"
"BEGIN { split(_funcs, funcs); }"
@@ -4517,7 +4551,7 @@ static int disas_funcs(const char *funcs)
size = snprintf(NULL, 0, objdump_str, cross_compile, objname, funcs) + 1;
if (size <= 0) {
WARN("objdump string size calculation failed");
- return -1;
+ return;
}
cmd = malloc(size);
@@ -4527,24 +4561,30 @@ static int disas_funcs(const char *funcs)
ret = system(cmd);
if (ret) {
WARN("disassembly failed: %d", ret);
- return -1;
+ return;
}
-
- return 0;
}
-static int disas_warned_funcs(struct objtool_file *file)
+static void disas_warned_funcs(struct objtool_file *file)
{
struct symbol *sym;
char *funcs = NULL, *tmp;
for_each_sym(file, sym) {
- if (sym->warnings) {
+ if (sym->warned) {
if (!funcs) {
funcs = malloc(strlen(sym->name) + 1);
+ if (!funcs) {
+ ERROR_GLIBC("malloc");
+ return;
+ }
strcpy(funcs, sym->name);
} else {
tmp = malloc(strlen(funcs) + strlen(sym->name) + 2);
+ if (!tmp) {
+ ERROR_GLIBC("malloc");
+ return;
+ }
sprintf(tmp, "%s %s", funcs, sym->name);
free(funcs);
funcs = tmp;
@@ -4554,8 +4594,6 @@ static int disas_warned_funcs(struct objtool_file *file)
if (funcs)
disas_funcs(funcs);
-
- return 0;
}
struct insn_chunk {
@@ -4588,7 +4626,7 @@ static void free_insns(struct objtool_file *file)
int check(struct objtool_file *file)
{
- int ret, warnings = 0;
+ int ret = 0, warnings = 0;
arch_initial_func_cfi_state(&initial_func_cfi);
init_cfi_state(&init_cfi);
@@ -4606,44 +4644,27 @@ int check(struct objtool_file *file)
cfi_hash_add(&func_cfi);
ret = decode_sections(file);
- if (ret < 0)
+ if (ret)
goto out;
- warnings += ret;
-
if (!nr_insns)
goto out;
- if (opts.retpoline) {
- ret = validate_retpoline(file);
- if (ret < 0)
- goto out;
- warnings += ret;
- }
+ if (opts.retpoline)
+ warnings += validate_retpoline(file);
if (opts.stackval || opts.orc || opts.uaccess) {
- ret = validate_functions(file);
- if (ret < 0)
- goto out;
- warnings += ret;
+ int w = 0;
- ret = validate_unwind_hints(file, NULL);
- if (ret < 0)
- goto out;
- warnings += ret;
+ w += validate_functions(file);
+ w += validate_unwind_hints(file, NULL);
+ if (!w)
+ w += validate_reachable_instructions(file);
- if (!warnings) {
- ret = validate_reachable_instructions(file);
- if (ret < 0)
- goto out;
- warnings += ret;
- }
+ warnings += w;
} else if (opts.noinstr) {
- ret = validate_noinstr_sections(file);
- if (ret < 0)
- goto out;
- warnings += ret;
+ warnings += validate_noinstr_sections(file);
}
if (opts.unret) {
@@ -4651,94 +4672,71 @@ int check(struct objtool_file *file)
* Must be after validate_branch() and friends, it plays
* further games with insn->visited.
*/
- ret = validate_unrets(file);
- if (ret < 0)
- goto out;
- warnings += ret;
+ warnings += validate_unrets(file);
}
- if (opts.ibt) {
- ret = validate_ibt(file);
- if (ret < 0)
- goto out;
- warnings += ret;
- }
+ if (opts.ibt)
+ warnings += validate_ibt(file);
- if (opts.sls) {
- ret = validate_sls(file);
- if (ret < 0)
- goto out;
- warnings += ret;
- }
+ if (opts.sls)
+ warnings += validate_sls(file);
if (opts.static_call) {
ret = create_static_call_sections(file);
- if (ret < 0)
+ if (ret)
goto out;
- warnings += ret;
}
if (opts.retpoline) {
ret = create_retpoline_sites_sections(file);
- if (ret < 0)
+ if (ret)
goto out;
- warnings += ret;
}
if (opts.cfi) {
ret = create_cfi_sections(file);
- if (ret < 0)
+ if (ret)
goto out;
- warnings += ret;
}
if (opts.rethunk) {
ret = create_return_sites_sections(file);
- if (ret < 0)
+ if (ret)
goto out;
- warnings += ret;
if (opts.hack_skylake) {
ret = create_direct_call_sections(file);
- if (ret < 0)
+ if (ret)
goto out;
- warnings += ret;
}
}
if (opts.mcount) {
ret = create_mcount_loc_sections(file);
- if (ret < 0)
+ if (ret)
goto out;
- warnings += ret;
}
if (opts.prefix) {
ret = add_prefix_symbols(file);
- if (ret < 0)
+ if (ret)
goto out;
- warnings += ret;
}
if (opts.ibt) {
ret = create_ibt_endbr_seal_sections(file);
- if (ret < 0)
+ if (ret)
goto out;
- warnings += ret;
}
if (opts.orc && nr_insns) {
ret = orc_create(file);
- if (ret < 0)
+ if (ret)
goto out;
- warnings += ret;
}
free_insns(file);
- if (opts.verbose)
- disas_warned_funcs(file);
-
if (opts.stats) {
printf("nr_insns_visited: %ld\n", nr_insns_visited);
printf("nr_cfi: %ld\n", nr_cfi);
@@ -4747,19 +4745,18 @@ int check(struct objtool_file *file)
}
out:
- /*
- * CONFIG_OBJTOOL_WERROR upgrades all warnings (and errors) to actual
- * errors.
- *
- * Note that even "fatal" type errors don't actually return an error
- * without CONFIG_OBJTOOL_WERROR. That probably needs improved at some
- * point.
- */
- if (opts.werror && (ret || warnings)) {
- if (warnings)
+ if (!ret && !warnings)
+ return 0;
+
+ if (opts.werror && warnings)
+ ret = 1;
+
+ if (opts.verbose) {
+ if (opts.werror && warnings)
WARN("%d warning(s) upgraded to errors", warnings);
- return 1;
+ print_args();
+ disas_warned_funcs(file);
}
- return 0;
+ return ret;
}
diff --git a/tools/objtool/elf.c b/tools/objtool/elf.c
index be4f4b62730c..727a3a4fd9d7 100644
--- a/tools/objtool/elf.c
+++ b/tools/objtool/elf.c
@@ -72,17 +72,17 @@ static inline void __elf_hash_del(struct elf_hash_node *node,
obj; \
obj = elf_list_entry(obj->member.next, typeof(*(obj)), member))
-#define elf_alloc_hash(name, size) \
-({ \
- __elf_bits(name) = max(10, ilog2(size)); \
+#define elf_alloc_hash(name, size) \
+({ \
+ __elf_bits(name) = max(10, ilog2(size)); \
__elf_table(name) = mmap(NULL, sizeof(struct elf_hash_node *) << __elf_bits(name), \
- PROT_READ|PROT_WRITE, \
- MAP_PRIVATE|MAP_ANON, -1, 0); \
- if (__elf_table(name) == (void *)-1L) { \
- WARN("mmap fail " #name); \
- __elf_table(name) = NULL; \
- } \
- __elf_table(name); \
+ PROT_READ|PROT_WRITE, \
+ MAP_PRIVATE|MAP_ANON, -1, 0); \
+ if (__elf_table(name) == (void *)-1L) { \
+ ERROR_GLIBC("mmap fail " #name); \
+ __elf_table(name) = NULL; \
+ } \
+ __elf_table(name); \
})
static inline unsigned long __sym_start(struct symbol *s)
@@ -316,12 +316,12 @@ static int read_sections(struct elf *elf)
int i;
if (elf_getshdrnum(elf->elf, &sections_nr)) {
- WARN_ELF("elf_getshdrnum");
+ ERROR_ELF("elf_getshdrnum");
return -1;
}
if (elf_getshdrstrndx(elf->elf, &shstrndx)) {
- WARN_ELF("elf_getshdrstrndx");
+ ERROR_ELF("elf_getshdrstrndx");
return -1;
}
@@ -331,7 +331,7 @@ static int read_sections(struct elf *elf)
elf->section_data = calloc(sections_nr, sizeof(*sec));
if (!elf->section_data) {
- perror("calloc");
+ ERROR_GLIBC("calloc");
return -1;
}
for (i = 0; i < sections_nr; i++) {
@@ -341,33 +341,32 @@ static int read_sections(struct elf *elf)
s = elf_getscn(elf->elf, i);
if (!s) {
- WARN_ELF("elf_getscn");
+ ERROR_ELF("elf_getscn");
return -1;
}
sec->idx = elf_ndxscn(s);
if (!gelf_getshdr(s, &sec->sh)) {
- WARN_ELF("gelf_getshdr");
+ ERROR_ELF("gelf_getshdr");
return -1;
}
sec->name = elf_strptr(elf->elf, shstrndx, sec->sh.sh_name);
if (!sec->name) {
- WARN_ELF("elf_strptr");
+ ERROR_ELF("elf_strptr");
return -1;
}
if (sec->sh.sh_size != 0 && !is_dwarf_section(sec)) {
sec->data = elf_getdata(s, NULL);
if (!sec->data) {
- WARN_ELF("elf_getdata");
+ ERROR_ELF("elf_getdata");
return -1;
}
if (sec->data->d_off != 0 ||
sec->data->d_size != sec->sh.sh_size) {
- WARN("unexpected data attributes for %s",
- sec->name);
+ ERROR("unexpected data attributes for %s", sec->name);
return -1;
}
}
@@ -387,7 +386,7 @@ static int read_sections(struct elf *elf)
/* sanity check, one more call to elf_nextscn() should return NULL */
if (elf_nextscn(elf->elf, s)) {
- WARN("section entry mismatch");
+ ERROR("section entry mismatch");
return -1;
}
@@ -467,7 +466,7 @@ static int read_symbols(struct elf *elf)
elf->symbol_data = calloc(symbols_nr, sizeof(*sym));
if (!elf->symbol_data) {
- perror("calloc");
+ ERROR_GLIBC("calloc");
return -1;
}
for (i = 0; i < symbols_nr; i++) {
@@ -477,14 +476,14 @@ static int read_symbols(struct elf *elf)
if (!gelf_getsymshndx(symtab->data, shndx_data, i, &sym->sym,
&shndx)) {
- WARN_ELF("gelf_getsymshndx");
+ ERROR_ELF("gelf_getsymshndx");
goto err;
}
sym->name = elf_strptr(elf->elf, symtab->sh.sh_link,
sym->sym.st_name);
if (!sym->name) {
- WARN_ELF("elf_strptr");
+ ERROR_ELF("elf_strptr");
goto err;
}
@@ -496,8 +495,7 @@ static int read_symbols(struct elf *elf)
sym->sec = find_section_by_index(elf, shndx);
if (!sym->sec) {
- WARN("couldn't find section for symbol %s",
- sym->name);
+ ERROR("couldn't find section for symbol %s", sym->name);
goto err;
}
if (GELF_ST_TYPE(sym->sym.st_info) == STT_SECTION) {
@@ -536,8 +534,7 @@ static int read_symbols(struct elf *elf)
pnamelen = coldstr - sym->name;
pname = strndup(sym->name, pnamelen);
if (!pname) {
- WARN("%s(): failed to allocate memory",
- sym->name);
+ ERROR("%s(): failed to allocate memory", sym->name);
return -1;
}
@@ -545,8 +542,7 @@ static int read_symbols(struct elf *elf)
free(pname);
if (!pfunc) {
- WARN("%s(): can't find parent function",
- sym->name);
+ ERROR("%s(): can't find parent function", sym->name);
return -1;
}
@@ -583,7 +579,7 @@ static int elf_update_sym_relocs(struct elf *elf, struct symbol *sym)
{
struct reloc *reloc;
- for (reloc = sym->relocs; reloc; reloc = reloc->sym_next_reloc)
+ for (reloc = sym->relocs; reloc; reloc = sym_next_reloc(reloc))
set_reloc_sym(elf, reloc, reloc->sym->idx);
return 0;
@@ -613,14 +609,14 @@ static int elf_update_symbol(struct elf *elf, struct section *symtab,
s = elf_getscn(elf->elf, symtab->idx);
if (!s) {
- WARN_ELF("elf_getscn");
+ ERROR_ELF("elf_getscn");
return -1;
}
if (symtab_shndx) {
t = elf_getscn(elf->elf, symtab_shndx->idx);
if (!t) {
- WARN_ELF("elf_getscn");
+ ERROR_ELF("elf_getscn");
return -1;
}
}
@@ -643,7 +639,7 @@ static int elf_update_symbol(struct elf *elf, struct section *symtab,
if (idx) {
/* we don't do holes in symbol tables */
- WARN("index out of range");
+ ERROR("index out of range");
return -1;
}
@@ -654,7 +650,7 @@ static int elf_update_symbol(struct elf *elf, struct section *symtab,
buf = calloc(num, entsize);
if (!buf) {
- WARN("malloc");
+ ERROR_GLIBC("calloc");
return -1;
}
@@ -669,7 +665,7 @@ static int elf_update_symbol(struct elf *elf, struct section *symtab,
if (t) {
buf = calloc(num, sizeof(Elf32_Word));
if (!buf) {
- WARN("malloc");
+ ERROR_GLIBC("calloc");
return -1;
}
@@ -687,7 +683,7 @@ static int elf_update_symbol(struct elf *elf, struct section *symtab,
/* empty blocks should not happen */
if (!symtab_data->d_size) {
- WARN("zero size data");
+ ERROR("zero size data");
return -1;
}
@@ -702,7 +698,7 @@ static int elf_update_symbol(struct elf *elf, struct section *symtab,
/* something went side-ways */
if (idx < 0) {
- WARN("negative index");
+ ERROR("negative index");
return -1;
}
@@ -714,13 +710,13 @@ static int elf_update_symbol(struct elf *elf, struct section *symtab,
} else {
sym->sym.st_shndx = SHN_XINDEX;
if (!shndx_data) {
- WARN("no .symtab_shndx");
+ ERROR("no .symtab_shndx");
return -1;
}
}
if (!gelf_update_symshndx(symtab_data, shndx_data, idx, &sym->sym, shndx)) {
- WARN_ELF("gelf_update_symshndx");
+ ERROR_ELF("gelf_update_symshndx");
return -1;
}
@@ -738,7 +734,7 @@ __elf_create_symbol(struct elf *elf, struct symbol *sym)
if (symtab) {
symtab_shndx = find_section_by_name(elf, ".symtab_shndx");
} else {
- WARN("no .symtab");
+ ERROR("no .symtab");
return NULL;
}
@@ -760,7 +756,7 @@ __elf_create_symbol(struct elf *elf, struct symbol *sym)
old->idx = new_idx;
if (elf_update_symbol(elf, symtab, symtab_shndx, old)) {
- WARN("elf_update_symbol move");
+ ERROR("elf_update_symbol move");
return NULL;
}
@@ -778,7 +774,7 @@ __elf_create_symbol(struct elf *elf, struct symbol *sym)
non_local:
sym->idx = new_idx;
if (elf_update_symbol(elf, symtab, symtab_shndx, sym)) {
- WARN("elf_update_symbol");
+ ERROR("elf_update_symbol");
return NULL;
}
@@ -799,7 +795,7 @@ elf_create_section_symbol(struct elf *elf, struct section *sec)
struct symbol *sym = calloc(1, sizeof(*sym));
if (!sym) {
- perror("malloc");
+ ERROR_GLIBC("malloc");
return NULL;
}
@@ -829,7 +825,7 @@ elf_create_prefix_symbol(struct elf *elf, struct symbol *orig, long size)
char *name = malloc(namelen);
if (!sym || !name) {
- perror("malloc");
+ ERROR_GLIBC("malloc");
return NULL;
}
@@ -858,16 +854,16 @@ static struct reloc *elf_init_reloc(struct elf *elf, struct section *rsec,
struct reloc *reloc, empty = { 0 };
if (reloc_idx >= sec_num_entries(rsec)) {
- WARN("%s: bad reloc_idx %u for %s with %d relocs",
- __func__, reloc_idx, rsec->name, sec_num_entries(rsec));
+ ERROR("%s: bad reloc_idx %u for %s with %d relocs",
+ __func__, reloc_idx, rsec->name, sec_num_entries(rsec));
return NULL;
}
reloc = &rsec->relocs[reloc_idx];
if (memcmp(reloc, &empty, sizeof(empty))) {
- WARN("%s: %s: reloc %d already initialized!",
- __func__, rsec->name, reloc_idx);
+ ERROR("%s: %s: reloc %d already initialized!",
+ __func__, rsec->name, reloc_idx);
return NULL;
}
@@ -880,7 +876,7 @@ static struct reloc *elf_init_reloc(struct elf *elf, struct section *rsec,
set_reloc_addend(elf, reloc, addend);
elf_hash_add(reloc, &reloc->hash, reloc_hash(reloc));
- reloc->sym_next_reloc = sym->relocs;
+ set_sym_next_reloc(reloc, sym->relocs);
sym->relocs = reloc;
return reloc;
@@ -896,8 +892,7 @@ struct reloc *elf_init_reloc_text_sym(struct elf *elf, struct section *sec,
int addend = insn_off;
if (!(insn_sec->sh.sh_flags & SHF_EXECINSTR)) {
- WARN("bad call to %s() for data symbol %s",
- __func__, sym->name);
+ ERROR("bad call to %s() for data symbol %s", __func__, sym->name);
return NULL;
}
@@ -926,8 +921,7 @@ struct reloc *elf_init_reloc_data_sym(struct elf *elf, struct section *sec,
s64 addend)
{
if (sym->sec && (sec->sh.sh_flags & SHF_EXECINSTR)) {
- WARN("bad call to %s() for text symbol %s",
- __func__, sym->name);
+ ERROR("bad call to %s() for text symbol %s", __func__, sym->name);
return NULL;
}
@@ -953,8 +947,7 @@ static int read_relocs(struct elf *elf)
rsec->base = find_section_by_index(elf, rsec->sh.sh_info);
if (!rsec->base) {
- WARN("can't find base section for reloc section %s",
- rsec->name);
+ ERROR("can't find base section for reloc section %s", rsec->name);
return -1;
}
@@ -963,7 +956,7 @@ static int read_relocs(struct elf *elf)
nr_reloc = 0;
rsec->relocs = calloc(sec_num_entries(rsec), sizeof(*reloc));
if (!rsec->relocs) {
- perror("calloc");
+ ERROR_GLIBC("calloc");
return -1;
}
for (i = 0; i < sec_num_entries(rsec); i++) {
@@ -973,13 +966,12 @@ static int read_relocs(struct elf *elf)
symndx = reloc_sym(reloc);
reloc->sym = sym = find_symbol_by_index(elf, symndx);
if (!reloc->sym) {
- WARN("can't find reloc entry symbol %d for %s",
- symndx, rsec->name);
+ ERROR("can't find reloc entry symbol %d for %s", symndx, rsec->name);
return -1;
}
elf_hash_add(reloc, &reloc->hash, reloc_hash(reloc));
- reloc->sym_next_reloc = sym->relocs;
+ set_sym_next_reloc(reloc, sym->relocs);
sym->relocs = reloc;
nr_reloc++;
@@ -1005,7 +997,7 @@ struct elf *elf_open_read(const char *name, int flags)
elf = malloc(sizeof(*elf));
if (!elf) {
- perror("malloc");
+ ERROR_GLIBC("malloc");
return NULL;
}
memset(elf, 0, sizeof(*elf));
@@ -1028,12 +1020,12 @@ struct elf *elf_open_read(const char *name, int flags)
elf->elf = elf_begin(elf->fd, cmd, NULL);
if (!elf->elf) {
- WARN_ELF("elf_begin");
+ ERROR_ELF("elf_begin");
goto err;
}
if (!gelf_getehdr(elf->elf, &elf->ehdr)) {
- WARN_ELF("gelf_getehdr");
+ ERROR_ELF("gelf_getehdr");
goto err;
}
@@ -1062,19 +1054,19 @@ static int elf_add_string(struct elf *elf, struct section *strtab, char *str)
if (!strtab)
strtab = find_section_by_name(elf, ".strtab");
if (!strtab) {
- WARN("can't find .strtab section");
+ ERROR("can't find .strtab section");
return -1;
}
s = elf_getscn(elf->elf, strtab->idx);
if (!s) {
- WARN_ELF("elf_getscn");
+ ERROR_ELF("elf_getscn");
return -1;
}
data = elf_newdata(s);
if (!data) {
- WARN_ELF("elf_newdata");
+ ERROR_ELF("elf_newdata");
return -1;
}
@@ -1099,7 +1091,7 @@ struct section *elf_create_section(struct elf *elf, const char *name,
sec = malloc(sizeof(*sec));
if (!sec) {
- perror("malloc");
+ ERROR_GLIBC("malloc");
return NULL;
}
memset(sec, 0, sizeof(*sec));
@@ -1108,13 +1100,13 @@ struct section *elf_create_section(struct elf *elf, const char *name,
s = elf_newscn(elf->elf);
if (!s) {
- WARN_ELF("elf_newscn");
+ ERROR_ELF("elf_newscn");
return NULL;
}
sec->name = strdup(name);
if (!sec->name) {
- perror("strdup");
+ ERROR_GLIBC("strdup");
return NULL;
}
@@ -1122,7 +1114,7 @@ struct section *elf_create_section(struct elf *elf, const char *name,
sec->data = elf_newdata(s);
if (!sec->data) {
- WARN_ELF("elf_newdata");
+ ERROR_ELF("elf_newdata");
return NULL;
}
@@ -1132,14 +1124,14 @@ struct section *elf_create_section(struct elf *elf, const char *name,
if (size) {
sec->data->d_buf = malloc(size);
if (!sec->data->d_buf) {
- perror("malloc");
+ ERROR_GLIBC("malloc");
return NULL;
}
memset(sec->data->d_buf, 0, size);
}
if (!gelf_getshdr(s, &sec->sh)) {
- WARN_ELF("gelf_getshdr");
+ ERROR_ELF("gelf_getshdr");
return NULL;
}
@@ -1154,7 +1146,7 @@ struct section *elf_create_section(struct elf *elf, const char *name,
if (!shstrtab)
shstrtab = find_section_by_name(elf, ".strtab");
if (!shstrtab) {
- WARN("can't find .shstrtab or .strtab section");
+ ERROR("can't find .shstrtab or .strtab section");
return NULL;
}
sec->sh.sh_name = elf_add_string(elf, shstrtab, sec->name);
@@ -1179,7 +1171,7 @@ static struct section *elf_create_rela_section(struct elf *elf,
rsec_name = malloc(strlen(sec->name) + strlen(".rela") + 1);
if (!rsec_name) {
- perror("malloc");
+ ERROR_GLIBC("malloc");
return NULL;
}
strcpy(rsec_name, ".rela");
@@ -1199,7 +1191,7 @@ static struct section *elf_create_rela_section(struct elf *elf,
rsec->relocs = calloc(sec_num_entries(rsec), sizeof(struct reloc));
if (!rsec->relocs) {
- perror("calloc");
+ ERROR_GLIBC("calloc");
return NULL;
}
@@ -1232,7 +1224,7 @@ int elf_write_insn(struct elf *elf, struct section *sec,
Elf_Data *data = sec->data;
if (data->d_type != ELF_T_BYTE || data->d_off) {
- WARN("write to unexpected data for section: %s", sec->name);
+ ERROR("write to unexpected data for section: %s", sec->name);
return -1;
}
@@ -1261,7 +1253,7 @@ static int elf_truncate_section(struct elf *elf, struct section *sec)
s = elf_getscn(elf->elf, sec->idx);
if (!s) {
- WARN_ELF("elf_getscn");
+ ERROR_ELF("elf_getscn");
return -1;
}
@@ -1271,7 +1263,7 @@ static int elf_truncate_section(struct elf *elf, struct section *sec)
if (!data) {
if (size) {
- WARN("end of section data but non-zero size left\n");
+ ERROR("end of section data but non-zero size left\n");
return -1;
}
return 0;
@@ -1279,12 +1271,12 @@ static int elf_truncate_section(struct elf *elf, struct section *sec)
if (truncated) {
/* when we remove symbols */
- WARN("truncated; but more data\n");
+ ERROR("truncated; but more data\n");
return -1;
}
if (!data->d_size) {
- WARN("zero size data");
+ ERROR("zero size data");
return -1;
}
@@ -1310,13 +1302,13 @@ int elf_write(struct elf *elf)
if (sec_changed(sec)) {
s = elf_getscn(elf->elf, sec->idx);
if (!s) {
- WARN_ELF("elf_getscn");
+ ERROR_ELF("elf_getscn");
return -1;
}
/* Note this also flags the section dirty */
if (!gelf_update_shdr(s, &sec->sh)) {
- WARN_ELF("gelf_update_shdr");
+ ERROR_ELF("gelf_update_shdr");
return -1;
}
@@ -1329,7 +1321,7 @@ int elf_write(struct elf *elf)
/* Write all changes to the file. */
if (elf_update(elf->elf, ELF_C_WRITE) < 0) {
- WARN_ELF("elf_update");
+ ERROR_ELF("elf_update");
return -1;
}
diff --git a/tools/objtool/include/objtool/builtin.h b/tools/objtool/include/objtool/builtin.h
index 0fafd0f7a209..6b08666fa69d 100644
--- a/tools/objtool/include/objtool/builtin.h
+++ b/tools/objtool/include/objtool/builtin.h
@@ -43,8 +43,10 @@ struct opts {
extern struct opts opts;
-extern int cmd_parse_options(int argc, const char **argv, const char * const usage[]);
+int cmd_parse_options(int argc, const char **argv, const char * const usage[]);
-extern int objtool_run(int argc, const char **argv);
+int objtool_run(int argc, const char **argv);
+
+void print_args(void);
#endif /* _BUILTIN_H */
diff --git a/tools/objtool/include/objtool/check.h b/tools/objtool/include/objtool/check.h
index e1cd13cd28a3..00fb745e7233 100644
--- a/tools/objtool/include/objtool/check.h
+++ b/tools/objtool/include/objtool/check.h
@@ -34,6 +34,8 @@ struct alt_group {
* This is shared with the other alt_groups in the same alternative.
*/
struct cfi_state **cfi;
+
+ bool ignore;
};
#define INSN_CHUNK_BITS 8
@@ -54,7 +56,6 @@ struct instruction {
u32 idx : INSN_CHUNK_BITS,
dead_end : 1,
- ignore : 1,
ignore_alts : 1,
hint : 1,
save : 1,
diff --git a/tools/objtool/include/objtool/elf.h b/tools/objtool/include/objtool/elf.h
index 223ac1c24b90..c7c4e87ebe88 100644
--- a/tools/objtool/include/objtool/elf.h
+++ b/tools/objtool/include/objtool/elf.h
@@ -65,10 +65,11 @@ struct symbol {
u8 return_thunk : 1;
u8 fentry : 1;
u8 profiling_func : 1;
+ u8 warned : 1;
u8 embedded_insn : 1;
u8 local_label : 1;
u8 frame_pointer : 1;
- u8 warnings : 2;
+ u8 ignore : 1;
struct list_head pv_target;
struct reloc *relocs;
};
@@ -77,7 +78,7 @@ struct reloc {
struct elf_hash_node hash;
struct section *sec;
struct symbol *sym;
- struct reloc *sym_next_reloc;
+ unsigned long _sym_next_reloc;
};
struct elf {
@@ -297,6 +298,31 @@ static inline void set_reloc_type(struct elf *elf, struct reloc *reloc, unsigned
mark_sec_changed(elf, reloc->sec, true);
}
+#define RELOC_JUMP_TABLE_BIT 1UL
+
+/* Does reloc mark the beginning of a jump table? */
+static inline bool is_jump_table(struct reloc *reloc)
+{
+ return reloc->_sym_next_reloc & RELOC_JUMP_TABLE_BIT;
+}
+
+static inline void set_jump_table(struct reloc *reloc)
+{
+ reloc->_sym_next_reloc |= RELOC_JUMP_TABLE_BIT;
+}
+
+static inline struct reloc *sym_next_reloc(struct reloc *reloc)
+{
+ return (struct reloc *)(reloc->_sym_next_reloc & ~RELOC_JUMP_TABLE_BIT);
+}
+
+static inline void set_sym_next_reloc(struct reloc *reloc, struct reloc *next)
+{
+ unsigned long bit = reloc->_sym_next_reloc & RELOC_JUMP_TABLE_BIT;
+
+ reloc->_sym_next_reloc = (unsigned long)next | bit;
+}
+
#define for_each_sec(file, sec) \
list_for_each_entry(sec, &file->elf->sections, list)
diff --git a/tools/objtool/include/objtool/objtool.h b/tools/objtool/include/objtool/objtool.h
index 94a33ee7b363..c0dc86a78ff6 100644
--- a/tools/objtool/include/objtool/objtool.h
+++ b/tools/objtool/include/objtool/objtool.h
@@ -41,7 +41,7 @@ struct objtool_file {
struct objtool_file *objtool_open_read(const char *_objname);
-void objtool_pv_add(struct objtool_file *file, int idx, struct symbol *func);
+int objtool_pv_add(struct objtool_file *file, int idx, struct symbol *func);
int check(struct objtool_file *file);
int orc_dump(const char *objname);
diff --git a/tools/objtool/include/objtool/special.h b/tools/objtool/include/objtool/special.h
index e049679bb17b..72d09c0adf1a 100644
--- a/tools/objtool/include/objtool/special.h
+++ b/tools/objtool/include/objtool/special.h
@@ -16,8 +16,6 @@ struct special_alt {
struct list_head list;
bool group;
- bool skip_orig;
- bool skip_alt;
bool jump_or_nop;
u8 key_addend;
@@ -32,7 +30,7 @@ struct special_alt {
int special_get_alts(struct elf *elf, struct list_head *alts);
-void arch_handle_alternative(unsigned short feature, struct special_alt *alt);
+void arch_handle_alternative(struct special_alt *alt);
bool arch_support_alt_relocation(struct special_alt *special_alt,
struct instruction *insn,
diff --git a/tools/objtool/include/objtool/warn.h b/tools/objtool/include/objtool/warn.h
index e72b9d630551..cb8fe846d9dd 100644
--- a/tools/objtool/include/objtool/warn.h
+++ b/tools/objtool/include/objtool/warn.h
@@ -11,6 +11,7 @@
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
+#include <errno.h>
#include <objtool/builtin.h>
#include <objtool/elf.h>
@@ -41,36 +42,46 @@ static inline char *offstr(struct section *sec, unsigned long offset)
return str;
}
-#define WARN(format, ...) \
- fprintf(stderr, \
- "%s: %s: objtool: " format "\n", \
- objname, \
- opts.werror ? "error" : "warning", \
+#define ___WARN(severity, extra, format, ...) \
+ fprintf(stderr, \
+ "%s%s%s: objtool" extra ": " format "\n", \
+ objname ?: "", \
+ objname ? ": " : "", \
+ severity, \
##__VA_ARGS__)
-#define WARN_FUNC(format, sec, offset, ...) \
-({ \
- char *_str = offstr(sec, offset); \
- WARN("%s: " format, _str, ##__VA_ARGS__); \
- free(_str); \
+#define __WARN(severity, format, ...) \
+ ___WARN(severity, "", format, ##__VA_ARGS__)
+
+#define __WARN_LINE(severity, format, ...) \
+ ___WARN(severity, " [%s:%d]", format, __FILE__, __LINE__, ##__VA_ARGS__)
+
+#define __WARN_ELF(severity, format, ...) \
+ __WARN_LINE(severity, "%s: " format " failed: %s", __func__, ##__VA_ARGS__, elf_errmsg(-1))
+
+#define __WARN_GLIBC(severity, format, ...) \
+ __WARN_LINE(severity, "%s: " format " failed: %s", __func__, ##__VA_ARGS__, strerror(errno))
+
+#define __WARN_FUNC(severity, sec, offset, format, ...) \
+({ \
+ char *_str = offstr(sec, offset); \
+ __WARN(severity, "%s: " format, _str, ##__VA_ARGS__); \
+ free(_str); \
})
-#define WARN_LIMIT 2
+#define WARN_STR (opts.werror ? "error" : "warning")
+
+#define WARN(format, ...) __WARN(WARN_STR, format, ##__VA_ARGS__)
+#define WARN_FUNC(sec, offset, format, ...) __WARN_FUNC(WARN_STR, sec, offset, format, ##__VA_ARGS__)
#define WARN_INSN(insn, format, ...) \
({ \
struct instruction *_insn = (insn); \
- BUILD_BUG_ON(WARN_LIMIT > 2); \
- if (!_insn->sym || _insn->sym->warnings < WARN_LIMIT) { \
- WARN_FUNC(format, _insn->sec, _insn->offset, \
+ if (!_insn->sym || !_insn->sym->warned) \
+ WARN_FUNC(_insn->sec, _insn->offset, format, \
##__VA_ARGS__); \
- if (_insn->sym) \
- _insn->sym->warnings++; \
- } else if (_insn->sym && _insn->sym->warnings == WARN_LIMIT) { \
- WARN_FUNC("skipping duplicate warning(s)", \
- _insn->sec, _insn->offset); \
- _insn->sym->warnings++; \
- } \
+ if (_insn->sym) \
+ _insn->sym->warned = 1; \
})
#define BT_INSN(insn, format, ...) \
@@ -83,7 +94,12 @@ static inline char *offstr(struct section *sec, unsigned long offset)
} \
})
-#define WARN_ELF(format, ...) \
- WARN(format ": %s", ##__VA_ARGS__, elf_errmsg(-1))
+#define ERROR_STR "error"
+
+#define ERROR(format, ...) __WARN(ERROR_STR, format, ##__VA_ARGS__)
+#define ERROR_ELF(format, ...) __WARN_ELF(ERROR_STR, format, ##__VA_ARGS__)
+#define ERROR_GLIBC(format, ...) __WARN_GLIBC(ERROR_STR, format, ##__VA_ARGS__)
+#define ERROR_FUNC(sec, offset, format, ...) __WARN_FUNC(ERROR_STR, sec, offset, format, ##__VA_ARGS__)
+#define ERROR_INSN(insn, format, ...) WARN_FUNC(insn->sec, insn->offset, format, ##__VA_ARGS__)
#endif /* _WARN_H */
diff --git a/tools/objtool/objtool.c b/tools/objtool/objtool.c
index 1c73fb62fd57..5c8b974ad0f9 100644
--- a/tools/objtool/objtool.c
+++ b/tools/objtool/objtool.c
@@ -23,7 +23,7 @@ static struct objtool_file file;
struct objtool_file *objtool_open_read(const char *filename)
{
if (file.elf) {
- WARN("won't handle more than one file at a time");
+ ERROR("won't handle more than one file at a time");
return NULL;
}
@@ -44,14 +44,14 @@ struct objtool_file *objtool_open_read(const char *filename)
return &file;
}
-void objtool_pv_add(struct objtool_file *f, int idx, struct symbol *func)
+int objtool_pv_add(struct objtool_file *f, int idx, struct symbol *func)
{
if (!opts.noinstr)
- return;
+ return 0;
if (!f->pv_ops) {
- WARN("paravirt confusion");
- return;
+ ERROR("paravirt confusion");
+ return -1;
}
/*
@@ -60,14 +60,15 @@ void objtool_pv_add(struct objtool_file *f, int idx, struct symbol *func)
*/
if (!strcmp(func->name, "_paravirt_nop") ||
!strcmp(func->name, "_paravirt_ident_64"))
- return;
+ return 0;
/* already added this function */
if (!list_empty(&func->pv_target))
- return;
+ return 0;
list_add(&func->pv_target, &f->pv_ops[idx].targets);
f->pv_ops[idx].clean = false;
+ return 0;
}
int main(int argc, const char **argv)
diff --git a/tools/objtool/orc_dump.c b/tools/objtool/orc_dump.c
index 05ef0e297837..1dd9fc18fe62 100644
--- a/tools/objtool/orc_dump.c
+++ b/tools/objtool/orc_dump.c
@@ -36,47 +36,47 @@ int orc_dump(const char *filename)
elf = elf_begin(fd, ELF_C_READ_MMAP, NULL);
if (!elf) {
- WARN_ELF("elf_begin");
+ ERROR_ELF("elf_begin");
return -1;
}
if (!elf64_getehdr(elf)) {
- WARN_ELF("elf64_getehdr");
+ ERROR_ELF("elf64_getehdr");
return -1;
}
memcpy(&dummy_elf.ehdr, elf64_getehdr(elf), sizeof(dummy_elf.ehdr));
if (elf_getshdrnum(elf, &nr_sections)) {
- WARN_ELF("elf_getshdrnum");
+ ERROR_ELF("elf_getshdrnum");
return -1;
}
if (elf_getshdrstrndx(elf, &shstrtab_idx)) {
- WARN_ELF("elf_getshdrstrndx");
+ ERROR_ELF("elf_getshdrstrndx");
return -1;
}
for (i = 0; i < nr_sections; i++) {
scn = elf_getscn(elf, i);
if (!scn) {
- WARN_ELF("elf_getscn");
+ ERROR_ELF("elf_getscn");
return -1;
}
if (!gelf_getshdr(scn, &sh)) {
- WARN_ELF("gelf_getshdr");
+ ERROR_ELF("gelf_getshdr");
return -1;
}
name = elf_strptr(elf, shstrtab_idx, sh.sh_name);
if (!name) {
- WARN_ELF("elf_strptr");
+ ERROR_ELF("elf_strptr");
return -1;
}
data = elf_getdata(scn, NULL);
if (!data) {
- WARN_ELF("elf_getdata");
+ ERROR_ELF("elf_getdata");
return -1;
}
@@ -99,7 +99,7 @@ int orc_dump(const char *filename)
return 0;
if (orc_size % sizeof(*orc) != 0) {
- WARN("bad .orc_unwind section size");
+ ERROR("bad .orc_unwind section size");
return -1;
}
@@ -107,36 +107,36 @@ int orc_dump(const char *filename)
for (i = 0; i < nr_entries; i++) {
if (rela_orc_ip) {
if (!gelf_getrela(rela_orc_ip, i, &rela)) {
- WARN_ELF("gelf_getrela");
+ ERROR_ELF("gelf_getrela");
return -1;
}
if (!gelf_getsym(symtab, GELF_R_SYM(rela.r_info), &sym)) {
- WARN_ELF("gelf_getsym");
+ ERROR_ELF("gelf_getsym");
return -1;
}
if (GELF_ST_TYPE(sym.st_info) == STT_SECTION) {
scn = elf_getscn(elf, sym.st_shndx);
if (!scn) {
- WARN_ELF("elf_getscn");
+ ERROR_ELF("elf_getscn");
return -1;
}
if (!gelf_getshdr(scn, &sh)) {
- WARN_ELF("gelf_getshdr");
+ ERROR_ELF("gelf_getshdr");
return -1;
}
name = elf_strptr(elf, shstrtab_idx, sh.sh_name);
if (!name) {
- WARN_ELF("elf_strptr");
+ ERROR_ELF("elf_strptr");
return -1;
}
} else {
name = elf_strptr(elf, strtab_idx, sym.st_name);
if (!name) {
- WARN_ELF("elf_strptr");
+ ERROR_ELF("elf_strptr");
return -1;
}
}
diff --git a/tools/objtool/special.c b/tools/objtool/special.c
index 097a69db82a0..c80fed8a840e 100644
--- a/tools/objtool/special.c
+++ b/tools/objtool/special.c
@@ -54,7 +54,7 @@ static const struct special_entry entries[] = {
{},
};
-void __weak arch_handle_alternative(unsigned short feature, struct special_alt *alt)
+void __weak arch_handle_alternative(struct special_alt *alt)
{
}
@@ -86,27 +86,18 @@ static int get_alt_entry(struct elf *elf, const struct special_entry *entry,
orig_reloc = find_reloc_by_dest(elf, sec, offset + entry->orig);
if (!orig_reloc) {
- WARN_FUNC("can't find orig reloc", sec, offset + entry->orig);
+ ERROR_FUNC(sec, offset + entry->orig, "can't find orig reloc");
return -1;
}
reloc_to_sec_off(orig_reloc, &alt->orig_sec, &alt->orig_off);
- if (entry->feature) {
- unsigned short feature;
-
- feature = bswap_if_needed(elf,
- *(unsigned short *)(sec->data->d_buf +
- offset +
- entry->feature));
- arch_handle_alternative(feature, alt);
- }
+ arch_handle_alternative(alt);
if (!entry->group || alt->new_len) {
new_reloc = find_reloc_by_dest(elf, sec, offset + entry->new);
if (!new_reloc) {
- WARN_FUNC("can't find new reloc",
- sec, offset + entry->new);
+ ERROR_FUNC(sec, offset + entry->new, "can't find new reloc");
return -1;
}
@@ -122,8 +113,7 @@ static int get_alt_entry(struct elf *elf, const struct special_entry *entry,
key_reloc = find_reloc_by_dest(elf, sec, offset + entry->key);
if (!key_reloc) {
- WARN_FUNC("can't find key reloc",
- sec, offset + entry->key);
+ ERROR_FUNC(sec, offset + entry->key, "can't find key reloc");
return -1;
}
alt->key_addend = reloc_addend(key_reloc);
@@ -153,8 +143,7 @@ int special_get_alts(struct elf *elf, struct list_head *alts)
continue;
if (sec->sh.sh_size % entry->size != 0) {
- WARN("%s size not a multiple of %d",
- sec->name, entry->size);
+ ERROR("%s size not a multiple of %d", sec->name, entry->size);
return -1;
}
@@ -163,7 +152,7 @@ int special_get_alts(struct elf *elf, struct list_head *alts)
for (idx = 0; idx < nr_entries; idx++) {
alt = malloc(sizeof(*alt));
if (!alt) {
- WARN("malloc failed");
+ ERROR_GLIBC("malloc failed");
return -1;
}
memset(alt, 0, sizeof(*alt));
diff --git a/tools/sched_ext/include/scx/common.bpf.h b/tools/sched_ext/include/scx/common.bpf.h
index dc4333d23189..8787048c6762 100644
--- a/tools/sched_ext/include/scx/common.bpf.h
+++ b/tools/sched_ext/include/scx/common.bpf.h
@@ -586,36 +586,48 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
}
}
-#define READ_ONCE(x) \
-({ \
- union { typeof(x) __val; char __c[1]; } __u = \
- { .__c = { 0 } }; \
- __read_once_size(&(x), __u.__c, sizeof(x)); \
- __u.__val; \
-})
-
-#define WRITE_ONCE(x, val) \
-({ \
- union { typeof(x) __val; char __c[1]; } __u = \
- { .__val = (val) }; \
- __write_once_size(&(x), __u.__c, sizeof(x)); \
- __u.__val; \
-})
-
-#define READ_ONCE_ARENA(type, x) \
-({ \
- union { type __val; char __c[1]; } __u = \
- { .__c = { 0 } }; \
- __read_once_size((void *)&(x), __u.__c, sizeof(x)); \
- __u.__val; \
+/*
+ * __unqual_typeof(x) - Declare an unqualified scalar type, leaving
+ * non-scalar types unchanged,
+ *
+ * Prefer C11 _Generic for better compile-times and simpler code. Note: 'char'
+ * is not type-compatible with 'signed char', and we define a separate case.
+ *
+ * This is copied verbatim from kernel's include/linux/compiler_types.h, but
+ * with default expression (for pointers) changed from (x) to (typeof(x)0).
+ *
+ * This is because LLVM has a bug where for lvalue (x), it does not get rid of
+ * an extra address_space qualifier, but does in case of rvalue (typeof(x)0).
+ * Hence, for pointers, we need to create an rvalue expression to get the
+ * desired type. See https://github.com/llvm/llvm-project/issues/53400.
+ */
+#define __scalar_type_to_expr_cases(type) \
+ unsigned type : (unsigned type)0, signed type : (signed type)0
+
+#define __unqual_typeof(x) \
+ typeof(_Generic((x), \
+ char: (char)0, \
+ __scalar_type_to_expr_cases(char), \
+ __scalar_type_to_expr_cases(short), \
+ __scalar_type_to_expr_cases(int), \
+ __scalar_type_to_expr_cases(long), \
+ __scalar_type_to_expr_cases(long long), \
+ default: (typeof(x))0))
+
+#define READ_ONCE(x) \
+({ \
+ union { __unqual_typeof(x) __val; char __c[1]; } __u = \
+ { .__c = { 0 } }; \
+ __read_once_size((__unqual_typeof(x) *)&(x), __u.__c, sizeof(x)); \
+ __u.__val; \
})
-#define WRITE_ONCE_ARENA(type, x, val) \
-({ \
- union { type __val; char __c[1]; } __u = \
- { .__val = (val) }; \
- __write_once_size((void *)&(x), __u.__c, sizeof(x)); \
- __u.__val; \
+#define WRITE_ONCE(x, val) \
+({ \
+ union { __unqual_typeof(x) __val; char __c[1]; } __u = \
+ { .__val = (val) }; \
+ __write_once_size((__unqual_typeof(x) *)&(x), __u.__c, sizeof(x)); \
+ __u.__val; \
})
/*
@@ -648,6 +660,23 @@ static inline u32 log2_u64(u64 v)
return log2_u32(v) + 1;
}
+/*
+ * Return a value proportionally scaled to the task's weight.
+ */
+static inline u64 scale_by_task_weight(const struct task_struct *p, u64 value)
+{
+ return (value * p->scx.weight) / 100;
+}
+
+/*
+ * Return a value inversely proportional to the task's weight.
+ */
+static inline u64 scale_by_task_weight_inverse(const struct task_struct *p, u64 value)
+{
+ return value * 100 / p->scx.weight;
+}
+
+
#include "compat.bpf.h"
#include "enums.bpf.h"
diff --git a/tools/sched_ext/include/scx/enum_defs.autogen.h b/tools/sched_ext/include/scx/enum_defs.autogen.h
index 6e6c45f14fe1..c2c33df9292c 100644
--- a/tools/sched_ext/include/scx/enum_defs.autogen.h
+++ b/tools/sched_ext/include/scx/enum_defs.autogen.h
@@ -88,6 +88,8 @@
#define HAVE_SCX_OPS_ENQ_LAST
#define HAVE_SCX_OPS_ENQ_EXITING
#define HAVE_SCX_OPS_SWITCH_PARTIAL
+#define HAVE_SCX_OPS_ENQ_MIGRATION_DISABLED
+#define HAVE_SCX_OPS_ALLOW_QUEUED_WAKEUP
#define HAVE_SCX_OPS_HAS_CGROUP_WEIGHT
#define HAVE_SCX_OPS_ALL_FLAGS
#define HAVE_SCX_OPSS_NONE
@@ -104,6 +106,7 @@
#define HAVE_SCX_RQ_BAL_PENDING
#define HAVE_SCX_RQ_BAL_KEEP
#define HAVE_SCX_RQ_BYPASSING
+#define HAVE_SCX_RQ_CLK_VALID
#define HAVE_SCX_RQ_IN_WAKEUP
#define HAVE_SCX_RQ_IN_BALANCE
#define HAVE_SCX_TASK_NONE
diff --git a/tools/sched_ext/include/scx/enums.autogen.bpf.h b/tools/sched_ext/include/scx/enums.autogen.bpf.h
index 0e941a0d6f88..2f8002bcc19a 100644
--- a/tools/sched_ext/include/scx/enums.autogen.bpf.h
+++ b/tools/sched_ext/include/scx/enums.autogen.bpf.h
@@ -13,6 +13,30 @@ const volatile u64 __SCX_SLICE_DFL __weak;
const volatile u64 __SCX_SLICE_INF __weak;
#define SCX_SLICE_INF __SCX_SLICE_INF
+const volatile u64 __SCX_RQ_ONLINE __weak;
+#define SCX_RQ_ONLINE __SCX_RQ_ONLINE
+
+const volatile u64 __SCX_RQ_CAN_STOP_TICK __weak;
+#define SCX_RQ_CAN_STOP_TICK __SCX_RQ_CAN_STOP_TICK
+
+const volatile u64 __SCX_RQ_BAL_PENDING __weak;
+#define SCX_RQ_BAL_PENDING __SCX_RQ_BAL_PENDING
+
+const volatile u64 __SCX_RQ_BAL_KEEP __weak;
+#define SCX_RQ_BAL_KEEP __SCX_RQ_BAL_KEEP
+
+const volatile u64 __SCX_RQ_BYPASSING __weak;
+#define SCX_RQ_BYPASSING __SCX_RQ_BYPASSING
+
+const volatile u64 __SCX_RQ_CLK_VALID __weak;
+#define SCX_RQ_CLK_VALID __SCX_RQ_CLK_VALID
+
+const volatile u64 __SCX_RQ_IN_WAKEUP __weak;
+#define SCX_RQ_IN_WAKEUP __SCX_RQ_IN_WAKEUP
+
+const volatile u64 __SCX_RQ_IN_BALANCE __weak;
+#define SCX_RQ_IN_BALANCE __SCX_RQ_IN_BALANCE
+
const volatile u64 __SCX_DSQ_FLAG_BUILTIN __weak;
#define SCX_DSQ_FLAG_BUILTIN __SCX_DSQ_FLAG_BUILTIN
diff --git a/tools/sched_ext/include/scx/enums.autogen.h b/tools/sched_ext/include/scx/enums.autogen.h
index 88137a140e72..fedec938584b 100644
--- a/tools/sched_ext/include/scx/enums.autogen.h
+++ b/tools/sched_ext/include/scx/enums.autogen.h
@@ -8,6 +8,14 @@
SCX_ENUM_SET(skel, scx_public_consts, SCX_OPS_NAME_LEN); \
SCX_ENUM_SET(skel, scx_public_consts, SCX_SLICE_DFL); \
SCX_ENUM_SET(skel, scx_public_consts, SCX_SLICE_INF); \
+ SCX_ENUM_SET(skel, scx_rq_flags, SCX_RQ_ONLINE); \
+ SCX_ENUM_SET(skel, scx_rq_flags, SCX_RQ_CAN_STOP_TICK); \
+ SCX_ENUM_SET(skel, scx_rq_flags, SCX_RQ_BAL_PENDING); \
+ SCX_ENUM_SET(skel, scx_rq_flags, SCX_RQ_BAL_KEEP); \
+ SCX_ENUM_SET(skel, scx_rq_flags, SCX_RQ_BYPASSING); \
+ SCX_ENUM_SET(skel, scx_rq_flags, SCX_RQ_CLK_VALID); \
+ SCX_ENUM_SET(skel, scx_rq_flags, SCX_RQ_IN_WAKEUP); \
+ SCX_ENUM_SET(skel, scx_rq_flags, SCX_RQ_IN_BALANCE); \
SCX_ENUM_SET(skel, scx_dsq_id_flags, SCX_DSQ_FLAG_BUILTIN); \
SCX_ENUM_SET(skel, scx_dsq_id_flags, SCX_DSQ_FLAG_LOCAL_ON); \
SCX_ENUM_SET(skel, scx_dsq_id_flags, SCX_DSQ_INVALID); \
diff --git a/tools/sched_ext/include/scx/enums.h b/tools/sched_ext/include/scx/enums.h
index 34cbebe974b7..8e7c91575f0b 100644
--- a/tools/sched_ext/include/scx/enums.h
+++ b/tools/sched_ext/include/scx/enums.h
@@ -14,7 +14,8 @@ static inline void __ENUM_set(u64 *val, char *type, char *name)
bool res;
res = __COMPAT_read_enum(type, name, val);
- SCX_BUG_ON(!res, "enum not found(%s)", name);
+ if (!res)
+ *val = 0;
}
#define SCX_ENUM_SET(skel, type, name) do { \
diff --git a/tools/testing/cxl/Kbuild b/tools/testing/cxl/Kbuild
index 0a6572ab6f37..387f3df8b988 100644
--- a/tools/testing/cxl/Kbuild
+++ b/tools/testing/cxl/Kbuild
@@ -61,8 +61,11 @@ cxl_core-y += $(CXL_CORE_SRC)/pci.o
cxl_core-y += $(CXL_CORE_SRC)/hdm.o
cxl_core-y += $(CXL_CORE_SRC)/pmu.o
cxl_core-y += $(CXL_CORE_SRC)/cdat.o
+cxl_core-y += $(CXL_CORE_SRC)/ras.o
+cxl_core-y += $(CXL_CORE_SRC)/acpi.o
cxl_core-$(CONFIG_TRACING) += $(CXL_CORE_SRC)/trace.o
cxl_core-$(CONFIG_CXL_REGION) += $(CXL_CORE_SRC)/region.o
+cxl_core-$(CONFIG_CXL_MCE) += $(CXL_CORE_SRC)/mce.o
cxl_core-$(CONFIG_CXL_FEATURES) += $(CXL_CORE_SRC)/features.o
cxl_core-y += config_check.o
cxl_core-y += cxl_core_test.o
diff --git a/tools/testing/cxl/test/cxl.c b/tools/testing/cxl/test/cxl.c
index cc8948f49117..1c3336095923 100644
--- a/tools/testing/cxl/test/cxl.c
+++ b/tools/testing/cxl/test/cxl.c
@@ -155,7 +155,7 @@ static struct {
} cfmws7;
struct {
struct acpi_cedt_cfmws cfmws;
- u32 target[4];
+ u32 target[3];
} cfmws8;
struct {
struct acpi_cedt_cxims cxims;
@@ -331,14 +331,14 @@ static struct {
.length = sizeof(mock_cedt.cfmws8),
},
.interleave_arithmetic = ACPI_CEDT_CFMWS_ARITHMETIC_XOR,
- .interleave_ways = 2,
- .granularity = 0,
+ .interleave_ways = 8,
+ .granularity = 1,
.restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 |
ACPI_CEDT_CFMWS_RESTRICT_PMEM,
.qtg_id = FAKE_QTG_ID,
- .window_size = SZ_256M * 16UL,
+ .window_size = SZ_512M * 6UL,
},
- .target = { 0, 1, 0, 1, },
+ .target = { 0, 1, 2, },
},
.cxims0 = {
.cxims = {
@@ -1000,25 +1000,21 @@ static void mock_cxl_endpoint_parse_cdat(struct cxl_port *port)
find_cxl_root(port);
struct cxl_memdev *cxlmd = to_cxl_memdev(port->uport_dev);
struct cxl_dev_state *cxlds = cxlmd->cxlds;
- struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
struct access_coordinate ep_c[ACCESS_COORDINATE_MAX];
- struct range pmem_range = {
- .start = cxlds->pmem_res.start,
- .end = cxlds->pmem_res.end,
- };
- struct range ram_range = {
- .start = cxlds->ram_res.start,
- .end = cxlds->ram_res.end,
- };
if (!cxl_root)
return;
- if (range_len(&ram_range))
- dpa_perf_setup(port, &ram_range, &mds->ram_perf);
+ for (int i = 0; i < cxlds->nr_partitions; i++) {
+ struct resource *res = &cxlds->part[i].res;
+ struct cxl_dpa_perf *perf = &cxlds->part[i].perf;
+ struct range range = {
+ .start = res->start,
+ .end = res->end,
+ };
- if (range_len(&pmem_range))
- dpa_perf_setup(port, &pmem_range, &mds->pmem_perf);
+ dpa_perf_setup(port, &range, perf);
+ }
cxl_memdev_update_perf(cxlmd);
diff --git a/tools/testing/cxl/test/mem.c b/tools/testing/cxl/test/mem.c
index 9495dbcc03a7..f2957a3e36fe 100644
--- a/tools/testing/cxl/test/mem.c
+++ b/tools/testing/cxl/test/mem.c
@@ -78,6 +78,10 @@ static struct cxl_cel_entry mock_cel[] = {
.effect = CXL_CMD_EFFECT_NONE,
},
{
+ .opcode = cpu_to_le16(CXL_MBOX_OP_SET_SHUTDOWN_STATE),
+ .effect = POLICY_CHANGE_IMMEDIATE,
+ },
+ {
.opcode = cpu_to_le16(CXL_MBOX_OP_GET_POISON),
.effect = CXL_CMD_EFFECT_NONE,
},
@@ -178,6 +182,7 @@ struct cxl_mockmem_data {
u64 timestamp;
unsigned long sanitize_timeout;
struct vendor_test_feat test_feat;
+ u8 shutdown_state;
};
static struct mock_event_log *event_find_log(struct device *dev, int log_type)
@@ -1105,6 +1110,21 @@ static int mock_health_info(struct cxl_mbox_cmd *cmd)
return 0;
}
+static int mock_set_shutdown_state(struct cxl_mockmem_data *mdata,
+ struct cxl_mbox_cmd *cmd)
+{
+ struct cxl_mbox_set_shutdown_state_in *ss = cmd->payload_in;
+
+ if (cmd->size_in != sizeof(*ss))
+ return -EINVAL;
+
+ if (cmd->size_out != 0)
+ return -EINVAL;
+
+ mdata->shutdown_state = ss->state;
+ return 0;
+}
+
static struct mock_poison {
struct cxl_dev_state *cxlds;
u64 dpa;
@@ -1583,6 +1603,9 @@ static int cxl_mock_mbox_send(struct cxl_mailbox *cxl_mbox,
case CXL_MBOX_OP_PASSPHRASE_SECURE_ERASE:
rc = mock_passphrase_secure_erase(mdata, cmd);
break;
+ case CXL_MBOX_OP_SET_SHUTDOWN_STATE:
+ rc = mock_set_shutdown_state(mdata, cmd);
+ break;
case CXL_MBOX_OP_GET_POISON:
rc = mock_get_poison(cxlds, cmd);
break;
@@ -1670,6 +1693,7 @@ static int cxl_mock_mem_probe(struct platform_device *pdev)
struct cxl_dev_state *cxlds;
struct cxl_mockmem_data *mdata;
struct cxl_mailbox *cxl_mbox;
+ struct cxl_dpa_info range_info = { 0 };
int rc;
mdata = devm_kzalloc(dev, sizeof(*mdata), GFP_KERNEL);
@@ -1709,7 +1733,7 @@ static int cxl_mock_mem_probe(struct platform_device *pdev)
mds->event.buf = (struct cxl_get_event_payload *) mdata->event_buf;
INIT_DELAYED_WORK(&mds->security.poll_dwork, cxl_mockmem_sanitize_work);
- cxlds->serial = pdev->id;
+ cxlds->serial = pdev->id + 1;
if (is_rcd(pdev))
cxlds->rcd = true;
@@ -1730,7 +1754,11 @@ static int cxl_mock_mem_probe(struct platform_device *pdev)
if (rc)
return rc;
- rc = cxl_mem_create_range_info(mds);
+ rc = cxl_mem_dpa_fetch(mds, &range_info);
+ if (rc)
+ return rc;
+
+ rc = cxl_dpa_setup(cxlds, &range_info);
if (rc)
return rc;
diff --git a/tools/testing/radix-tree/Makefile b/tools/testing/radix-tree/Makefile
index 8b3591a51e1f..b2a6660bbd92 100644
--- a/tools/testing/radix-tree/Makefile
+++ b/tools/testing/radix-tree/Makefile
@@ -14,6 +14,7 @@ include ../shared/shared.mk
main: $(OFILES)
+xarray.o: ../../../lib/test_xarray.c
idr-test.o: ../../../lib/test_ida.c
idr-test: idr-test.o $(CORE_OFILES)
diff --git a/tools/testing/rbtree/Makefile b/tools/testing/rbtree/Makefile
new file mode 100644
index 000000000000..d7bbae2af4c7
--- /dev/null
+++ b/tools/testing/rbtree/Makefile
@@ -0,0 +1,33 @@
+# SPDX-License-Identifier: GPL-2.0
+
+.PHONY: clean
+
+TARGETS = rbtree_test interval_tree_test
+OFILES = $(SHARED_OFILES) rbtree-shim.o interval_tree-shim.o maple-shim.o
+DEPS = ../../../include/linux/rbtree.h \
+ ../../../include/linux/rbtree_types.h \
+ ../../../include/linux/rbtree_augmented.h \
+ ../../../include/linux/interval_tree.h \
+ ../../../include/linux/interval_tree_generic.h \
+ ../../../lib/rbtree.c \
+ ../../../lib/interval_tree.c
+
+targets: $(TARGETS)
+
+include ../shared/shared.mk
+
+ifeq ($(DEBUG), 1)
+ CFLAGS += -g
+endif
+
+$(TARGETS): $(OFILES)
+
+rbtree-shim.o: $(DEPS)
+rbtree_test.o: ../../../lib/rbtree_test.c
+interval_tree-shim.o: $(DEPS)
+interval_tree-shim.o: CFLAGS += -DCONFIG_INTERVAL_TREE_SPAN_ITER
+interval_tree_test.o: ../../../lib/interval_tree_test.c
+interval_tree_test.o: CFLAGS += -DCONFIG_INTERVAL_TREE_SPAN_ITER
+
+clean:
+ $(RM) $(TARGETS) *.o radix-tree.c idr.c generated/*
diff --git a/tools/testing/rbtree/interval_tree_test.c b/tools/testing/rbtree/interval_tree_test.c
new file mode 100644
index 000000000000..49bc5b534330
--- /dev/null
+++ b/tools/testing/rbtree/interval_tree_test.c
@@ -0,0 +1,58 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * interval_tree.c: Userspace Interval Tree test-suite
+ * Copyright (c) 2025 Wei Yang <richard.weiyang@gmail.com>
+ */
+#include <linux/math64.h>
+#include <linux/kern_levels.h>
+#include "shared.h"
+#include "maple-shared.h"
+
+#include "../../../lib/interval_tree_test.c"
+
+int usage(void)
+{
+ fprintf(stderr, "Userland interval tree test cases\n");
+ fprintf(stderr, " -n: Number of nodes in the interval tree\n");
+ fprintf(stderr, " -p: Number of iterations modifying the tree\n");
+ fprintf(stderr, " -q: Number of searches to the interval tree\n");
+ fprintf(stderr, " -s: Number of iterations searching the tree\n");
+ fprintf(stderr, " -a: Searches will iterate all nodes in the tree\n");
+ fprintf(stderr, " -m: Largest value for the interval's endpoint\n");
+ fprintf(stderr, " -r: Random seed\n");
+ exit(-1);
+}
+
+void interval_tree_tests(void)
+{
+ interval_tree_test_init();
+ interval_tree_test_exit();
+}
+
+int main(int argc, char **argv)
+{
+ int opt;
+
+ while ((opt = getopt(argc, argv, "n:p:q:s:am:r:")) != -1) {
+ if (opt == 'n')
+ nnodes = strtoul(optarg, NULL, 0);
+ else if (opt == 'p')
+ perf_loops = strtoul(optarg, NULL, 0);
+ else if (opt == 'q')
+ nsearches = strtoul(optarg, NULL, 0);
+ else if (opt == 's')
+ search_loops = strtoul(optarg, NULL, 0);
+ else if (opt == 'a')
+ search_all = true;
+ else if (opt == 'm')
+ max_endpoint = strtoul(optarg, NULL, 0);
+ else if (opt == 'r')
+ seed = strtoul(optarg, NULL, 0);
+ else
+ usage();
+ }
+
+ maple_tree_init();
+ interval_tree_tests();
+ return 0;
+}
diff --git a/tools/testing/rbtree/rbtree_test.c b/tools/testing/rbtree/rbtree_test.c
new file mode 100644
index 000000000000..585c970f679e
--- /dev/null
+++ b/tools/testing/rbtree/rbtree_test.c
@@ -0,0 +1,48 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * rbtree_test.c: Userspace Red Black Tree test-suite
+ * Copyright (c) 2025 Wei Yang <richard.weiyang@gmail.com>
+ */
+#include <linux/init.h>
+#include <linux/math64.h>
+#include <linux/kern_levels.h>
+#include "shared.h"
+
+#include "../../../lib/rbtree_test.c"
+
+int usage(void)
+{
+ fprintf(stderr, "Userland rbtree test cases\n");
+ fprintf(stderr, " -n: Number of nodes in the rb-tree\n");
+ fprintf(stderr, " -p: Number of iterations modifying the rb-tree\n");
+ fprintf(stderr, " -c: Number of iterations modifying and verifying the rb-tree\n");
+ fprintf(stderr, " -r: Random seed\n");
+ exit(-1);
+}
+
+void rbtree_tests(void)
+{
+ rbtree_test_init();
+ rbtree_test_exit();
+}
+
+int main(int argc, char **argv)
+{
+ int opt;
+
+ while ((opt = getopt(argc, argv, "n:p:c:r:")) != -1) {
+ if (opt == 'n')
+ nnodes = strtoul(optarg, NULL, 0);
+ else if (opt == 'p')
+ perf_loops = strtoul(optarg, NULL, 0);
+ else if (opt == 'c')
+ check_loops = strtoul(optarg, NULL, 0);
+ else if (opt == 'r')
+ seed = strtoul(optarg, NULL, 0);
+ else
+ usage();
+ }
+
+ rbtree_tests();
+ return 0;
+}
diff --git a/tools/testing/rbtree/test.h b/tools/testing/rbtree/test.h
new file mode 100644
index 000000000000..f1f1b545b55a
--- /dev/null
+++ b/tools/testing/rbtree/test.h
@@ -0,0 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+void rbtree_tests(void);
+void interval_tree_tests(void);
diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile
index 2694344274bf..c77c8c8e3d9b 100644
--- a/tools/testing/selftests/Makefile
+++ b/tools/testing/selftests/Makefile
@@ -62,6 +62,7 @@ TARGETS += mount
TARGETS += mount_setattr
TARGETS += move_mount_set_group
TARGETS += mqueue
+TARGETS += mseal_system_mappings
TARGETS += nci
TARGETS += net
TARGETS += net/af_unix
diff --git a/tools/testing/selftests/bpf/progs/profiler.inc.h b/tools/testing/selftests/bpf/progs/profiler.inc.h
index 8bd1ebd7d6af..813143b4985d 100644
--- a/tools/testing/selftests/bpf/progs/profiler.inc.h
+++ b/tools/testing/selftests/bpf/progs/profiler.inc.h
@@ -223,7 +223,7 @@ static INLINE void* read_full_cgroup_path(struct kernfs_node* cgroup_node,
if (bpf_cmp_likely(filepart_length, <=, MAX_PATH)) {
payload += filepart_length;
}
- cgroup_node = BPF_CORE_READ(cgroup_node, parent);
+ cgroup_node = BPF_CORE_READ(cgroup_node, __parent);
}
return payload;
}
diff --git a/tools/testing/selftests/bpf/progs/test_module_attach.c b/tools/testing/selftests/bpf/progs/test_module_attach.c
index fb07f5773888..7f3c233943b3 100644
--- a/tools/testing/selftests/bpf/progs/test_module_attach.c
+++ b/tools/testing/selftests/bpf/progs/test_module_attach.c
@@ -117,7 +117,7 @@ int BPF_PROG(handle_fexit_ret, int arg, struct file *ret)
bpf_probe_read_kernel(&buf, 8, ret);
bpf_probe_read_kernel(&buf, 8, (char *)ret + 256);
- *(volatile long long *)ret;
+ *(volatile int *)ret;
*(volatile int *)&ret->f_mode;
return 0;
}
diff --git a/tools/testing/selftests/bpf/progs/test_subprogs_extable.c b/tools/testing/selftests/bpf/progs/test_subprogs_extable.c
index e2a21fbd4e44..dcac69f5928a 100644
--- a/tools/testing/selftests/bpf/progs/test_subprogs_extable.c
+++ b/tools/testing/selftests/bpf/progs/test_subprogs_extable.c
@@ -21,7 +21,7 @@ static __u64 test_cb(struct bpf_map *map, __u32 *key, __u64 *val, void *data)
SEC("fexit/bpf_testmod_return_ptr")
int BPF_PROG(handle_fexit_ret_subprogs, int arg, struct file *ret)
{
- *(volatile long *)ret;
+ *(volatile int *)ret;
*(volatile int *)&ret->f_mode;
bpf_for_each_map_elem(&test_array, test_cb, NULL, 0);
triggered++;
@@ -31,7 +31,7 @@ int BPF_PROG(handle_fexit_ret_subprogs, int arg, struct file *ret)
SEC("fexit/bpf_testmod_return_ptr")
int BPF_PROG(handle_fexit_ret_subprogs2, int arg, struct file *ret)
{
- *(volatile long *)ret;
+ *(volatile int *)ret;
*(volatile int *)&ret->f_mode;
bpf_for_each_map_elem(&test_array, test_cb, NULL, 0);
triggered++;
@@ -41,7 +41,7 @@ int BPF_PROG(handle_fexit_ret_subprogs2, int arg, struct file *ret)
SEC("fexit/bpf_testmod_return_ptr")
int BPF_PROG(handle_fexit_ret_subprogs3, int arg, struct file *ret)
{
- *(volatile long *)ret;
+ *(volatile int *)ret;
*(volatile int *)&ret->f_mode;
bpf_for_each_map_elem(&test_array, test_cb, NULL, 0);
triggered++;
diff --git a/tools/testing/selftests/bpf/progs/verifier_bpf_fastcall.c b/tools/testing/selftests/bpf/progs/verifier_bpf_fastcall.c
index a9be6ae49454..c258b0722e04 100644
--- a/tools/testing/selftests/bpf/progs/verifier_bpf_fastcall.c
+++ b/tools/testing/selftests/bpf/progs/verifier_bpf_fastcall.c
@@ -12,7 +12,7 @@ SEC("raw_tp")
__arch_x86_64
__log_level(4) __msg("stack depth 8")
__xlated("4: r5 = 5")
-__xlated("5: w0 = ")
+__xlated("5: r0 = ")
__xlated("6: r0 = &(void __percpu *)(r0)")
__xlated("7: r0 = *(u32 *)(r0 +0)")
__xlated("8: exit")
@@ -704,7 +704,7 @@ SEC("raw_tp")
__arch_x86_64
__log_level(4) __msg("stack depth 32+0")
__xlated("2: r1 = 1")
-__xlated("3: w0 =")
+__xlated("3: r0 =")
__xlated("4: r0 = &(void __percpu *)(r0)")
__xlated("5: r0 = *(u32 *)(r0 +0)")
/* bpf_loop params setup */
@@ -753,7 +753,7 @@ __arch_x86_64
__log_level(4) __msg("stack depth 40+0")
/* call bpf_get_smp_processor_id */
__xlated("2: r1 = 42")
-__xlated("3: w0 =")
+__xlated("3: r0 =")
__xlated("4: r0 = &(void __percpu *)(r0)")
__xlated("5: r0 = *(u32 *)(r0 +0)")
/* call bpf_get_prandom_u32 */
diff --git a/tools/testing/selftests/bpf/progs/verifier_private_stack.c b/tools/testing/selftests/bpf/progs/verifier_private_stack.c
index b1fbdf119553..fc91b414364e 100644
--- a/tools/testing/selftests/bpf/progs/verifier_private_stack.c
+++ b/tools/testing/selftests/bpf/progs/verifier_private_stack.c
@@ -27,7 +27,7 @@ __description("Private stack, single prog")
__success
__arch_x86_64
__jited(" movabsq $0x{{.*}}, %r9")
-__jited(" addq %gs:0x{{.*}}, %r9")
+__jited(" addq %gs:{{.*}}, %r9")
__jited(" movl $0x2a, %edi")
__jited(" movq %rdi, -0x100(%r9)")
__naked void private_stack_single_prog(void)
@@ -74,7 +74,7 @@ __success
__arch_x86_64
/* private stack fp for the main prog */
__jited(" movabsq $0x{{.*}}, %r9")
-__jited(" addq %gs:0x{{.*}}, %r9")
+__jited(" addq %gs:{{.*}}, %r9")
__jited(" movl $0x2a, %edi")
__jited(" movq %rdi, -0x200(%r9)")
__jited(" pushq %r9")
@@ -122,7 +122,7 @@ __jited(" pushq %rbp")
__jited(" movq %rsp, %rbp")
__jited(" endbr64")
__jited(" movabsq $0x{{.*}}, %r9")
-__jited(" addq %gs:0x{{.*}}, %r9")
+__jited(" addq %gs:{{.*}}, %r9")
__jited(" pushq %r9")
__jited(" callq")
__jited(" popq %r9")
diff --git a/tools/testing/selftests/clone3/clone3_selftests.h b/tools/testing/selftests/clone3/clone3_selftests.h
index 3d2663fe50ba..eeca8005723f 100644
--- a/tools/testing/selftests/clone3/clone3_selftests.h
+++ b/tools/testing/selftests/clone3/clone3_selftests.h
@@ -16,7 +16,7 @@
#define ptr_to_u64(ptr) ((__u64)((uintptr_t)(ptr)))
#ifndef __NR_clone3
-#define __NR_clone3 -1
+#define __NR_clone3 435
#endif
struct __clone_args {
diff --git a/tools/testing/selftests/drivers/net/hds.py b/tools/testing/selftests/drivers/net/hds.py
index 7cc74faed743..8b7f6acad15f 100755
--- a/tools/testing/selftests/drivers/net/hds.py
+++ b/tools/testing/selftests/drivers/net/hds.py
@@ -20,7 +20,7 @@ def _get_hds_mode(cfg, netnl) -> str:
def _xdp_onoff(cfg):
- prog = cfg.rpath("../../net/lib/xdp_dummy.bpf.o")
+ prog = cfg.net_lib_dir / "xdp_dummy.bpf.o"
ip("link set dev %s xdp obj %s sec xdp" %
(cfg.ifname, prog))
ip("link set dev %s xdp off" % cfg.ifname)
diff --git a/tools/testing/selftests/drivers/net/hw/csum.py b/tools/testing/selftests/drivers/net/hw/csum.py
index 701aca1361e0..cd23af875317 100755
--- a/tools/testing/selftests/drivers/net/hw/csum.py
+++ b/tools/testing/selftests/drivers/net/hw/csum.py
@@ -88,7 +88,7 @@ def main() -> None:
with NetDrvEpEnv(__file__, nsim_test=False) as cfg:
check_nic_features(cfg)
- cfg.bin_local = cfg.rpath("../../../net/lib/csum")
+ cfg.bin_local = cfg.net_lib_dir / "csum"
cfg.bin_remote = cfg.remote.deploy(cfg.bin_local)
cases = []
diff --git a/tools/testing/selftests/drivers/net/hw/irq.py b/tools/testing/selftests/drivers/net/hw/irq.py
index 42ab98370245..0699d6a8b4e2 100755
--- a/tools/testing/selftests/drivers/net/hw/irq.py
+++ b/tools/testing/selftests/drivers/net/hw/irq.py
@@ -69,7 +69,7 @@ def check_reconfig_queues(cfg) -> None:
def check_reconfig_xdp(cfg) -> None:
def reconfig(cfg) -> None:
ip(f"link set dev %s xdp obj %s sec xdp" %
- (cfg.ifname, cfg.rpath("xdp_dummy.bpf.o")))
+ (cfg.ifname, cfg.net_lib_dir / "xdp_dummy.bpf.o"))
ip(f"link set dev %s xdp off" % cfg.ifname)
_check_reconfig(cfg, reconfig)
diff --git a/tools/testing/selftests/drivers/net/hw/xdp_dummy.bpf.c b/tools/testing/selftests/drivers/net/hw/xdp_dummy.bpf.c
deleted file mode 100644
index d988b2e0cee8..000000000000
--- a/tools/testing/selftests/drivers/net/hw/xdp_dummy.bpf.c
+++ /dev/null
@@ -1,13 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-
-#define KBUILD_MODNAME "xdp_dummy"
-#include <linux/bpf.h>
-#include <bpf/bpf_helpers.h>
-
-SEC("xdp")
-int xdp_dummy_prog(struct xdp_md *ctx)
-{
- return XDP_PASS;
-}
-
-char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/drivers/net/lib/py/env.py b/tools/testing/selftests/drivers/net/lib/py/env.py
index fd4d674e6c72..ad5ff645183a 100644
--- a/tools/testing/selftests/drivers/net/lib/py/env.py
+++ b/tools/testing/selftests/drivers/net/lib/py/env.py
@@ -13,22 +13,17 @@ from .remote import Remote
class NetDrvEnvBase:
"""
Base class for a NIC / host envirnoments
+
+ Attributes:
+ test_dir: Path to the source directory of the test
+ net_lib_dir: Path to the net/lib directory
"""
def __init__(self, src_path):
- self.src_path = src_path
- self.env = self._load_env_file()
-
- def rpath(self, path):
- """
- Get an absolute path to a file based on a path relative to the directory
- containing the test which constructed env.
+ self.src_path = Path(src_path)
+ self.test_dir = self.src_path.parent.resolve()
+ self.net_lib_dir = (Path(__file__).parent / "../../../../net/lib").resolve()
- For example, if the test.py is in the same directory as
- a binary (built from helper.c), the test can use env.rpath("helper")
- to get the absolute path to the binary
- """
- src_dir = Path(self.src_path).parent.resolve()
- return (src_dir / path).as_posix()
+ self.env = self._load_env_file()
def _load_env_file(self):
env = os.environ.copy()
diff --git a/tools/testing/selftests/drivers/net/ping.py b/tools/testing/selftests/drivers/net/ping.py
index 93120e86e102..4b6822866066 100755
--- a/tools/testing/selftests/drivers/net/ping.py
+++ b/tools/testing/selftests/drivers/net/ping.py
@@ -56,8 +56,7 @@ def _set_offload_checksum(cfg, netnl, on) -> None:
return
def _set_xdp_generic_sb_on(cfg) -> None:
- test_dir = os.path.dirname(os.path.realpath(__file__))
- prog = test_dir + "/../../net/lib/xdp_dummy.bpf.o"
+ prog = cfg.net_lib_dir / "xdp_dummy.bpf.o"
cmd(f"ip link set dev {remote_ifname} mtu 1500", shell=True, host=cfg.remote)
cmd(f"ip link set dev {cfg.ifname} mtu 1500 xdpgeneric obj {prog} sec xdp", shell=True)
defer(cmd, f"ip link set dev {cfg.ifname} xdpgeneric off")
@@ -66,8 +65,7 @@ def _set_xdp_generic_sb_on(cfg) -> None:
time.sleep(10)
def _set_xdp_generic_mb_on(cfg) -> None:
- test_dir = os.path.dirname(os.path.realpath(__file__))
- prog = test_dir + "/../../net/lib/xdp_dummy.bpf.o"
+ prog = cfg.net_lib_dir / "xdp_dummy.bpf.o"
cmd(f"ip link set dev {remote_ifname} mtu 9000", shell=True, host=cfg.remote)
defer(ip, f"link set dev {remote_ifname} mtu 1500", host=cfg.remote)
ip("link set dev %s mtu 9000 xdpgeneric obj %s sec xdp.frags" % (cfg.ifname, prog))
@@ -77,8 +75,7 @@ def _set_xdp_generic_mb_on(cfg) -> None:
time.sleep(10)
def _set_xdp_native_sb_on(cfg) -> None:
- test_dir = os.path.dirname(os.path.realpath(__file__))
- prog = test_dir + "/../../net/lib/xdp_dummy.bpf.o"
+ prog = cfg.net_lib_dir / "xdp_dummy.bpf.o"
cmd(f"ip link set dev {remote_ifname} mtu 1500", shell=True, host=cfg.remote)
cmd(f"ip -j link set dev {cfg.ifname} mtu 1500 xdp obj {prog} sec xdp", shell=True)
defer(ip, f"link set dev {cfg.ifname} mtu 1500 xdp off")
@@ -95,8 +92,7 @@ def _set_xdp_native_sb_on(cfg) -> None:
time.sleep(10)
def _set_xdp_native_mb_on(cfg) -> None:
- test_dir = os.path.dirname(os.path.realpath(__file__))
- prog = test_dir + "/../../net/lib/xdp_dummy.bpf.o"
+ prog = cfg.net_lib_dir / "xdp_dummy.bpf.o"
cmd(f"ip link set dev {remote_ifname} mtu 9000", shell=True, host=cfg.remote)
defer(ip, f"link set dev {remote_ifname} mtu 1500", host=cfg.remote)
try:
@@ -109,8 +105,7 @@ def _set_xdp_native_mb_on(cfg) -> None:
time.sleep(10)
def _set_xdp_offload_on(cfg) -> None:
- test_dir = os.path.dirname(os.path.realpath(__file__))
- prog = test_dir + "/../../net/lib/xdp_dummy.bpf.o"
+ prog = cfg.net_lib_dir / "xdp_dummy.bpf.o"
cmd(f"ip link set dev {cfg.ifname} mtu 1500", shell=True)
try:
cmd(f"ip link set dev {cfg.ifname} xdpoffload obj {prog} sec xdp", shell=True)
diff --git a/tools/testing/selftests/drivers/net/queues.py b/tools/testing/selftests/drivers/net/queues.py
index cae923f84f69..06abd3f233e1 100755
--- a/tools/testing/selftests/drivers/net/queues.py
+++ b/tools/testing/selftests/drivers/net/queues.py
@@ -26,13 +26,13 @@ def nl_get_queues(cfg, nl, qtype='rx'):
def check_xsk(cfg, nl, xdp_queue_id=0) -> None:
# Probe for support
- xdp = cmd(cfg.rpath("xdp_helper") + ' - -', fail=False)
+ xdp = cmd(f'{cfg.test_dir / "xdp_helper"} - -', fail=False)
if xdp.ret == 255:
raise KsftSkipEx('AF_XDP unsupported')
elif xdp.ret > 0:
raise KsftFailEx('unable to create AF_XDP socket')
- with bkg(f'{cfg.rpath("xdp_helper")} {cfg.ifindex} {xdp_queue_id}',
+ with bkg(f'{cfg.test_dir / "xdp_helper"} {cfg.ifindex} {xdp_queue_id}',
ksft_wait=3):
rx = tx = False
diff --git a/tools/testing/selftests/iommu/iommufd.c b/tools/testing/selftests/iommu/iommufd.c
index a1b2b657999d..1a8e85afe9aa 100644
--- a/tools/testing/selftests/iommu/iommufd.c
+++ b/tools/testing/selftests/iommu/iommufd.c
@@ -342,12 +342,14 @@ FIXTURE(iommufd_ioas)
uint32_t hwpt_id;
uint32_t device_id;
uint64_t base_iova;
+ uint32_t device_pasid_id;
};
FIXTURE_VARIANT(iommufd_ioas)
{
unsigned int mock_domains;
unsigned int memory_limit;
+ bool pasid_capable;
};
FIXTURE_SETUP(iommufd_ioas)
@@ -372,6 +374,12 @@ FIXTURE_SETUP(iommufd_ioas)
IOMMU_TEST_DEV_CACHE_DEFAULT);
self->base_iova = MOCK_APERTURE_START;
}
+
+ if (variant->pasid_capable)
+ test_cmd_mock_domain_flags(self->ioas_id,
+ MOCK_FLAGS_DEVICE_PASID,
+ NULL, NULL,
+ &self->device_pasid_id);
}
FIXTURE_TEARDOWN(iommufd_ioas)
@@ -387,6 +395,7 @@ FIXTURE_VARIANT_ADD(iommufd_ioas, no_domain)
FIXTURE_VARIANT_ADD(iommufd_ioas, mock_domain)
{
.mock_domains = 1,
+ .pasid_capable = true,
};
FIXTURE_VARIANT_ADD(iommufd_ioas, two_mock_domain)
@@ -439,6 +448,10 @@ TEST_F(iommufd_ioas, alloc_hwpt_nested)
&test_hwpt_id);
test_err_hwpt_alloc(EINVAL, self->device_id, self->device_id, 0,
&test_hwpt_id);
+ test_err_hwpt_alloc(EOPNOTSUPP, self->device_id, self->ioas_id,
+ IOMMU_HWPT_ALLOC_NEST_PARENT |
+ IOMMU_HWPT_FAULT_ID_VALID,
+ &test_hwpt_id);
test_cmd_hwpt_alloc(self->device_id, self->ioas_id,
IOMMU_HWPT_ALLOC_NEST_PARENT,
@@ -748,6 +761,8 @@ TEST_F(iommufd_ioas, get_hw_info)
} buffer_smaller;
if (self->device_id) {
+ uint8_t max_pasid = 0;
+
/* Provide a zero-size user_buffer */
test_cmd_get_hw_info(self->device_id, NULL, 0);
/* Provide a user_buffer with exact size */
@@ -762,6 +777,13 @@ TEST_F(iommufd_ioas, get_hw_info)
* the fields within the size range still gets updated.
*/
test_cmd_get_hw_info(self->device_id, &buffer_smaller, sizeof(buffer_smaller));
+ test_cmd_get_hw_info_pasid(self->device_id, &max_pasid);
+ ASSERT_EQ(0, max_pasid);
+ if (variant->pasid_capable) {
+ test_cmd_get_hw_info_pasid(self->device_pasid_id,
+ &max_pasid);
+ ASSERT_EQ(MOCK_PASID_WIDTH, max_pasid);
+ }
} else {
test_err_get_hw_info(ENOENT, self->device_id,
&buffer_exact, sizeof(buffer_exact));
@@ -2736,6 +2758,7 @@ TEST_F(iommufd_viommu, viommu_alloc_nested_iopf)
uint32_t iopf_hwpt_id;
uint32_t fault_id;
uint32_t fault_fd;
+ uint32_t vdev_id;
if (self->device_id) {
test_ioctl_fault_alloc(&fault_id, &fault_fd);
@@ -2752,6 +2775,10 @@ TEST_F(iommufd_viommu, viommu_alloc_nested_iopf)
&iopf_hwpt_id, IOMMU_HWPT_DATA_SELFTEST, &data,
sizeof(data));
+ /* Must allocate vdevice before attaching to a nested hwpt */
+ test_err_mock_domain_replace(ENOENT, self->stdev_id,
+ iopf_hwpt_id);
+ test_cmd_vdevice_alloc(viommu_id, dev_id, 0x99, &vdev_id);
test_cmd_mock_domain_replace(self->stdev_id, iopf_hwpt_id);
EXPECT_ERRNO(EBUSY,
_test_ioctl_destroy(self->fd, iopf_hwpt_id));
@@ -2769,15 +2796,46 @@ TEST_F(iommufd_viommu, vdevice_alloc)
uint32_t viommu_id = self->viommu_id;
uint32_t dev_id = self->device_id;
uint32_t vdev_id = 0;
+ uint32_t veventq_id;
+ uint32_t veventq_fd;
+ int prev_seq = -1;
if (dev_id) {
+ /* Must allocate vdevice before attaching to a nested hwpt */
+ test_err_mock_domain_replace(ENOENT, self->stdev_id,
+ self->nested_hwpt_id);
+
+ /* Allocate a vEVENTQ with veventq_depth=2 */
+ test_cmd_veventq_alloc(viommu_id, IOMMU_VEVENTQ_TYPE_SELFTEST,
+ &veventq_id, &veventq_fd);
+ test_err_veventq_alloc(EEXIST, viommu_id,
+ IOMMU_VEVENTQ_TYPE_SELFTEST, NULL, NULL);
/* Set vdev_id to 0x99, unset it, and set to 0x88 */
test_cmd_vdevice_alloc(viommu_id, dev_id, 0x99, &vdev_id);
+ test_cmd_mock_domain_replace(self->stdev_id,
+ self->nested_hwpt_id);
+ test_cmd_trigger_vevents(dev_id, 1);
+ test_cmd_read_vevents(veventq_fd, 1, 0x99, &prev_seq);
test_err_vdevice_alloc(EEXIST, viommu_id, dev_id, 0x99,
&vdev_id);
+ test_cmd_mock_domain_replace(self->stdev_id, self->ioas_id);
test_ioctl_destroy(vdev_id);
+
+ /* Try again with 0x88 */
test_cmd_vdevice_alloc(viommu_id, dev_id, 0x88, &vdev_id);
+ test_cmd_mock_domain_replace(self->stdev_id,
+ self->nested_hwpt_id);
+ /* Trigger an overflow with three events */
+ test_cmd_trigger_vevents(dev_id, 3);
+ test_err_read_vevents(EOVERFLOW, veventq_fd, 3, 0x88,
+ &prev_seq);
+ /* Overflow must be gone after the previous reads */
+ test_cmd_trigger_vevents(dev_id, 1);
+ test_cmd_read_vevents(veventq_fd, 1, 0x88, &prev_seq);
+ close(veventq_fd);
+ test_cmd_mock_domain_replace(self->stdev_id, self->ioas_id);
test_ioctl_destroy(vdev_id);
+ test_ioctl_destroy(veventq_id);
} else {
test_err_vdevice_alloc(ENOENT, viommu_id, dev_id, 0x99, NULL);
}
@@ -2956,4 +3014,311 @@ TEST_F(iommufd_viommu, vdevice_cache)
}
}
+FIXTURE(iommufd_device_pasid)
+{
+ int fd;
+ uint32_t ioas_id;
+ uint32_t hwpt_id;
+ uint32_t stdev_id;
+ uint32_t device_id;
+ uint32_t no_pasid_stdev_id;
+ uint32_t no_pasid_device_id;
+};
+
+FIXTURE_VARIANT(iommufd_device_pasid)
+{
+ bool pasid_capable;
+};
+
+FIXTURE_SETUP(iommufd_device_pasid)
+{
+ self->fd = open("/dev/iommu", O_RDWR);
+ ASSERT_NE(-1, self->fd);
+ test_ioctl_ioas_alloc(&self->ioas_id);
+
+ test_cmd_mock_domain_flags(self->ioas_id,
+ MOCK_FLAGS_DEVICE_PASID,
+ &self->stdev_id, &self->hwpt_id,
+ &self->device_id);
+ if (!variant->pasid_capable)
+ test_cmd_mock_domain_flags(self->ioas_id, 0,
+ &self->no_pasid_stdev_id, NULL,
+ &self->no_pasid_device_id);
+}
+
+FIXTURE_TEARDOWN(iommufd_device_pasid)
+{
+ teardown_iommufd(self->fd, _metadata);
+}
+
+FIXTURE_VARIANT_ADD(iommufd_device_pasid, no_pasid)
+{
+ .pasid_capable = false,
+};
+
+FIXTURE_VARIANT_ADD(iommufd_device_pasid, has_pasid)
+{
+ .pasid_capable = true,
+};
+
+TEST_F(iommufd_device_pasid, pasid_attach)
+{
+ struct iommu_hwpt_selftest data = {
+ .iotlb = IOMMU_TEST_IOTLB_DEFAULT,
+ };
+ uint32_t nested_hwpt_id[3] = {};
+ uint32_t parent_hwpt_id = 0;
+ uint32_t fault_id, fault_fd;
+ uint32_t s2_hwpt_id = 0;
+ uint32_t iopf_hwpt_id;
+ uint32_t pasid = 100;
+ uint32_t viommu_id;
+
+ /*
+ * Negative, detach pasid without attaching, this is not expected.
+ * But it should not result in failure anyway.
+ */
+ test_cmd_pasid_detach(pasid);
+
+ /* Allocate two nested hwpts sharing one common parent hwpt */
+ test_cmd_hwpt_alloc(self->device_id, self->ioas_id,
+ IOMMU_HWPT_ALLOC_NEST_PARENT,
+ &parent_hwpt_id);
+ test_cmd_hwpt_alloc_nested(self->device_id, parent_hwpt_id,
+ IOMMU_HWPT_ALLOC_PASID,
+ &nested_hwpt_id[0],
+ IOMMU_HWPT_DATA_SELFTEST,
+ &data, sizeof(data));
+ test_cmd_hwpt_alloc_nested(self->device_id, parent_hwpt_id,
+ IOMMU_HWPT_ALLOC_PASID,
+ &nested_hwpt_id[1],
+ IOMMU_HWPT_DATA_SELFTEST,
+ &data, sizeof(data));
+
+ /* Fault related preparation */
+ test_ioctl_fault_alloc(&fault_id, &fault_fd);
+ test_cmd_hwpt_alloc_iopf(self->device_id, parent_hwpt_id, fault_id,
+ IOMMU_HWPT_FAULT_ID_VALID | IOMMU_HWPT_ALLOC_PASID,
+ &iopf_hwpt_id,
+ IOMMU_HWPT_DATA_SELFTEST, &data,
+ sizeof(data));
+
+ /* Allocate a regular nested hwpt based on viommu */
+ test_cmd_viommu_alloc(self->device_id, parent_hwpt_id,
+ IOMMU_VIOMMU_TYPE_SELFTEST,
+ &viommu_id);
+ test_cmd_hwpt_alloc_nested(self->device_id, viommu_id,
+ IOMMU_HWPT_ALLOC_PASID,
+ &nested_hwpt_id[2],
+ IOMMU_HWPT_DATA_SELFTEST, &data,
+ sizeof(data));
+
+ test_cmd_hwpt_alloc(self->device_id, self->ioas_id,
+ IOMMU_HWPT_ALLOC_PASID,
+ &s2_hwpt_id);
+
+ /* Attach RID to non-pasid compat domain, */
+ test_cmd_mock_domain_replace(self->stdev_id, parent_hwpt_id);
+ /* then attach to pasid should fail */
+ test_err_pasid_attach(EINVAL, pasid, s2_hwpt_id);
+
+ /* Attach RID to pasid compat domain, */
+ test_cmd_mock_domain_replace(self->stdev_id, s2_hwpt_id);
+ /* then attach to pasid should succeed, */
+ test_cmd_pasid_attach(pasid, nested_hwpt_id[0]);
+ /* but attach RID to non-pasid compat domain should fail now. */
+ test_err_mock_domain_replace(EINVAL, self->stdev_id, parent_hwpt_id);
+ /*
+ * Detach hwpt from pasid 100, and check if the pasid 100
+ * has null domain.
+ */
+ test_cmd_pasid_detach(pasid);
+ ASSERT_EQ(0,
+ test_cmd_pasid_check_hwpt(self->fd, self->stdev_id,
+ pasid, 0));
+ /* RID is attached to pasid-comapt domain, pasid path is not used */
+
+ if (!variant->pasid_capable) {
+ /*
+ * PASID-compatible domain can be used by non-PASID-capable
+ * device.
+ */
+ test_cmd_mock_domain_replace(self->no_pasid_stdev_id, nested_hwpt_id[0]);
+ test_cmd_mock_domain_replace(self->no_pasid_stdev_id, self->ioas_id);
+ /*
+ * Attach hwpt to pasid 100 of non-PASID-capable device,
+ * should fail, no matter domain is pasid-comapt or not.
+ */
+ EXPECT_ERRNO(EINVAL,
+ _test_cmd_pasid_attach(self->fd, self->no_pasid_stdev_id,
+ pasid, parent_hwpt_id));
+ EXPECT_ERRNO(EINVAL,
+ _test_cmd_pasid_attach(self->fd, self->no_pasid_stdev_id,
+ pasid, s2_hwpt_id));
+ }
+
+ /*
+ * Attach non pasid compat hwpt to pasid-capable device, should
+ * fail, and have null domain.
+ */
+ test_err_pasid_attach(EINVAL, pasid, parent_hwpt_id);
+ ASSERT_EQ(0,
+ test_cmd_pasid_check_hwpt(self->fd, self->stdev_id,
+ pasid, 0));
+
+ /*
+ * Attach ioas to pasid 100, should fail, domain should
+ * be null.
+ */
+ test_err_pasid_attach(EINVAL, pasid, self->ioas_id);
+ ASSERT_EQ(0,
+ test_cmd_pasid_check_hwpt(self->fd, self->stdev_id,
+ pasid, 0));
+
+ /*
+ * Attach the s2_hwpt to pasid 100, should succeed, domain should
+ * be valid.
+ */
+ test_cmd_pasid_attach(pasid, s2_hwpt_id);
+ ASSERT_EQ(0,
+ test_cmd_pasid_check_hwpt(self->fd, self->stdev_id,
+ pasid, s2_hwpt_id));
+
+ /*
+ * Try attach pasid 100 with another hwpt, should FAIL
+ * as attach does not allow overwrite, use REPLACE instead.
+ */
+ test_err_pasid_attach(EBUSY, pasid, nested_hwpt_id[0]);
+
+ /*
+ * Detach hwpt from pasid 100 for next test, should succeed,
+ * and have null domain.
+ */
+ test_cmd_pasid_detach(pasid);
+ ASSERT_EQ(0,
+ test_cmd_pasid_check_hwpt(self->fd, self->stdev_id,
+ pasid, 0));
+
+ /*
+ * Attach nested hwpt to pasid 100, should succeed, domain
+ * should be valid.
+ */
+ test_cmd_pasid_attach(pasid, nested_hwpt_id[0]);
+ ASSERT_EQ(0,
+ test_cmd_pasid_check_hwpt(self->fd, self->stdev_id,
+ pasid, nested_hwpt_id[0]));
+
+ /* Attach to pasid 100 which has been attached, should fail. */
+ test_err_pasid_attach(EBUSY, pasid, nested_hwpt_id[0]);
+
+ /* cleanup pasid 100 */
+ test_cmd_pasid_detach(pasid);
+
+ /* Replace tests */
+
+ pasid = 200;
+ /*
+ * Replace pasid 200 without attaching it, should fail
+ * with -EINVAL.
+ */
+ test_err_pasid_replace(EINVAL, pasid, s2_hwpt_id);
+
+ /*
+ * Attach the s2 hwpt to pasid 200, should succeed, domain should
+ * be valid.
+ */
+ test_cmd_pasid_attach(pasid, s2_hwpt_id);
+ ASSERT_EQ(0,
+ test_cmd_pasid_check_hwpt(self->fd, self->stdev_id,
+ pasid, s2_hwpt_id));
+
+ /*
+ * Replace pasid 200 with self->ioas_id, should fail
+ * and domain should be the prior s2 hwpt.
+ */
+ test_err_pasid_replace(EINVAL, pasid, self->ioas_id);
+ ASSERT_EQ(0,
+ test_cmd_pasid_check_hwpt(self->fd, self->stdev_id,
+ pasid, s2_hwpt_id));
+
+ /*
+ * Replace a nested hwpt for pasid 200, should succeed,
+ * and have valid domain.
+ */
+ test_cmd_pasid_replace(pasid, nested_hwpt_id[0]);
+ ASSERT_EQ(0,
+ test_cmd_pasid_check_hwpt(self->fd, self->stdev_id,
+ pasid, nested_hwpt_id[0]));
+
+ /*
+ * Replace with another nested hwpt for pasid 200, should
+ * succeed, and have valid domain.
+ */
+ test_cmd_pasid_replace(pasid, nested_hwpt_id[1]);
+ ASSERT_EQ(0,
+ test_cmd_pasid_check_hwpt(self->fd, self->stdev_id,
+ pasid, nested_hwpt_id[1]));
+
+ /* cleanup pasid 200 */
+ test_cmd_pasid_detach(pasid);
+
+ /* Negative Tests for pasid replace, use pasid 1024 */
+
+ /*
+ * Attach the s2 hwpt to pasid 1024, should succeed, domain should
+ * be valid.
+ */
+ pasid = 1024;
+ test_cmd_pasid_attach(pasid, s2_hwpt_id);
+ ASSERT_EQ(0,
+ test_cmd_pasid_check_hwpt(self->fd, self->stdev_id,
+ pasid, s2_hwpt_id));
+
+ /*
+ * Replace pasid 1024 with nested_hwpt_id[0], should fail,
+ * but have the old valid domain. This is a designed
+ * negative case. Normally, this shall succeed.
+ */
+ test_err_pasid_replace(ENOMEM, pasid, nested_hwpt_id[0]);
+ ASSERT_EQ(0,
+ test_cmd_pasid_check_hwpt(self->fd, self->stdev_id,
+ pasid, s2_hwpt_id));
+
+ /* cleanup pasid 1024 */
+ test_cmd_pasid_detach(pasid);
+
+ /* Attach to iopf-capable hwpt */
+
+ /*
+ * Attach an iopf hwpt to pasid 2048, should succeed, domain should
+ * be valid.
+ */
+ pasid = 2048;
+ test_cmd_pasid_attach(pasid, iopf_hwpt_id);
+ ASSERT_EQ(0,
+ test_cmd_pasid_check_hwpt(self->fd, self->stdev_id,
+ pasid, iopf_hwpt_id));
+
+ test_cmd_trigger_iopf_pasid(self->device_id, pasid, fault_fd);
+
+ /*
+ * Replace with s2_hwpt_id for pasid 2048, should
+ * succeed, and have valid domain.
+ */
+ test_cmd_pasid_replace(pasid, s2_hwpt_id);
+ ASSERT_EQ(0,
+ test_cmd_pasid_check_hwpt(self->fd, self->stdev_id,
+ pasid, s2_hwpt_id));
+
+ /* cleanup pasid 2048 */
+ test_cmd_pasid_detach(pasid);
+
+ test_ioctl_destroy(iopf_hwpt_id);
+ close(fault_fd);
+ test_ioctl_destroy(fault_id);
+
+ /* Detach the s2_hwpt_id from RID */
+ test_cmd_mock_domain_replace(self->stdev_id, self->ioas_id);
+}
+
TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/iommu/iommufd_fail_nth.c b/tools/testing/selftests/iommu/iommufd_fail_nth.c
index 64b1f8e1b0cf..e11ec4b121fc 100644
--- a/tools/testing/selftests/iommu/iommufd_fail_nth.c
+++ b/tools/testing/selftests/iommu/iommufd_fail_nth.c
@@ -209,12 +209,16 @@ FIXTURE(basic_fail_nth)
{
int fd;
uint32_t access_id;
+ uint32_t stdev_id;
+ uint32_t pasid;
};
FIXTURE_SETUP(basic_fail_nth)
{
self->fd = -1;
self->access_id = 0;
+ self->stdev_id = 0;
+ self->pasid = 0; //test should use a non-zero value
}
FIXTURE_TEARDOWN(basic_fail_nth)
@@ -226,6 +230,8 @@ FIXTURE_TEARDOWN(basic_fail_nth)
rc = _test_cmd_destroy_access(self->access_id);
assert(rc == 0);
}
+ if (self->pasid && self->stdev_id)
+ _test_cmd_pasid_detach(self->fd, self->stdev_id, self->pasid);
teardown_iommufd(self->fd, _metadata);
}
@@ -620,10 +626,11 @@ TEST_FAIL_NTH(basic_fail_nth, device)
};
struct iommu_test_hw_info info;
uint32_t fault_id, fault_fd;
+ uint32_t veventq_id, veventq_fd;
uint32_t fault_hwpt_id;
+ uint32_t test_hwpt_id;
uint32_t ioas_id;
uint32_t ioas_id2;
- uint32_t stdev_id;
uint32_t idev_id;
uint32_t hwpt_id;
uint32_t viommu_id;
@@ -654,25 +661,30 @@ TEST_FAIL_NTH(basic_fail_nth, device)
fail_nth_enable();
- if (_test_cmd_mock_domain(self->fd, ioas_id, &stdev_id, NULL,
- &idev_id))
+ if (_test_cmd_mock_domain_flags(self->fd, ioas_id,
+ MOCK_FLAGS_DEVICE_PASID,
+ &self->stdev_id, NULL, &idev_id))
return -1;
- if (_test_cmd_get_hw_info(self->fd, idev_id, &info, sizeof(info), NULL))
+ if (_test_cmd_get_hw_info(self->fd, idev_id, &info,
+ sizeof(info), NULL, NULL))
return -1;
- if (_test_cmd_hwpt_alloc(self->fd, idev_id, ioas_id, 0, 0, &hwpt_id,
+ if (_test_cmd_hwpt_alloc(self->fd, idev_id, ioas_id, 0,
+ IOMMU_HWPT_ALLOC_PASID, &hwpt_id,
IOMMU_HWPT_DATA_NONE, 0, 0))
return -1;
- if (_test_cmd_mock_domain_replace(self->fd, stdev_id, ioas_id2, NULL))
+ if (_test_cmd_mock_domain_replace(self->fd, self->stdev_id, ioas_id2, NULL))
return -1;
- if (_test_cmd_mock_domain_replace(self->fd, stdev_id, hwpt_id, NULL))
+ if (_test_cmd_mock_domain_replace(self->fd, self->stdev_id, hwpt_id, NULL))
return -1;
if (_test_cmd_hwpt_alloc(self->fd, idev_id, ioas_id, 0,
- IOMMU_HWPT_ALLOC_NEST_PARENT, &hwpt_id,
+ IOMMU_HWPT_ALLOC_NEST_PARENT |
+ IOMMU_HWPT_ALLOC_PASID,
+ &hwpt_id,
IOMMU_HWPT_DATA_NONE, 0, 0))
return -1;
@@ -692,6 +704,37 @@ TEST_FAIL_NTH(basic_fail_nth, device)
IOMMU_HWPT_DATA_SELFTEST, &data, sizeof(data)))
return -1;
+ if (_test_cmd_veventq_alloc(self->fd, viommu_id,
+ IOMMU_VEVENTQ_TYPE_SELFTEST, &veventq_id,
+ &veventq_fd))
+ return -1;
+ close(veventq_fd);
+
+ if (_test_cmd_hwpt_alloc(self->fd, idev_id, ioas_id, 0,
+ IOMMU_HWPT_ALLOC_PASID,
+ &test_hwpt_id,
+ IOMMU_HWPT_DATA_NONE, 0, 0))
+ return -1;
+
+ /* Tests for pasid attach/replace/detach */
+
+ self->pasid = 200;
+
+ if (_test_cmd_pasid_attach(self->fd, self->stdev_id,
+ self->pasid, hwpt_id)) {
+ self->pasid = 0;
+ return -1;
+ }
+
+ if (_test_cmd_pasid_replace(self->fd, self->stdev_id,
+ self->pasid, test_hwpt_id))
+ return -1;
+
+ if (_test_cmd_pasid_detach(self->fd, self->stdev_id, self->pasid))
+ return -1;
+
+ self->pasid = 0;
+
return 0;
}
diff --git a/tools/testing/selftests/iommu/iommufd_utils.h b/tools/testing/selftests/iommu/iommufd_utils.h
index d979f5b0efe8..72f6636e5d90 100644
--- a/tools/testing/selftests/iommu/iommufd_utils.h
+++ b/tools/testing/selftests/iommu/iommufd_utils.h
@@ -9,6 +9,7 @@
#include <sys/ioctl.h>
#include <stdint.h>
#include <assert.h>
+#include <poll.h>
#include "../kselftest_harness.h"
#include "../../../../drivers/iommu/iommufd/iommufd_test.h"
@@ -757,7 +758,8 @@ static void teardown_iommufd(int fd, struct __test_metadata *_metadata)
/* @data can be NULL */
static int _test_cmd_get_hw_info(int fd, __u32 device_id, void *data,
- size_t data_len, uint32_t *capabilities)
+ size_t data_len, uint32_t *capabilities,
+ uint8_t *max_pasid)
{
struct iommu_test_hw_info *info = (struct iommu_test_hw_info *)data;
struct iommu_hw_info cmd = {
@@ -802,6 +804,9 @@ static int _test_cmd_get_hw_info(int fd, __u32 device_id, void *data,
assert(!info->flags);
}
+ if (max_pasid)
+ *max_pasid = cmd.out_max_pasid_log2;
+
if (capabilities)
*capabilities = cmd.out_capabilities;
@@ -810,14 +815,19 @@ static int _test_cmd_get_hw_info(int fd, __u32 device_id, void *data,
#define test_cmd_get_hw_info(device_id, data, data_len) \
ASSERT_EQ(0, _test_cmd_get_hw_info(self->fd, device_id, data, \
- data_len, NULL))
+ data_len, NULL, NULL))
#define test_err_get_hw_info(_errno, device_id, data, data_len) \
EXPECT_ERRNO(_errno, _test_cmd_get_hw_info(self->fd, device_id, data, \
- data_len, NULL))
+ data_len, NULL, NULL))
#define test_cmd_get_hw_capabilities(device_id, caps, mask) \
- ASSERT_EQ(0, _test_cmd_get_hw_info(self->fd, device_id, NULL, 0, &caps))
+ ASSERT_EQ(0, _test_cmd_get_hw_info(self->fd, device_id, NULL, \
+ 0, &caps, NULL))
+
+#define test_cmd_get_hw_info_pasid(device_id, max_pasid) \
+ ASSERT_EQ(0, _test_cmd_get_hw_info(self->fd, device_id, NULL, \
+ 0, NULL, max_pasid))
static int _test_ioctl_fault_alloc(int fd, __u32 *fault_id, __u32 *fault_fd)
{
@@ -842,14 +852,15 @@ static int _test_ioctl_fault_alloc(int fd, __u32 *fault_id, __u32 *fault_fd)
ASSERT_NE(0, *(fault_fd)); \
})
-static int _test_cmd_trigger_iopf(int fd, __u32 device_id, __u32 fault_fd)
+static int _test_cmd_trigger_iopf(int fd, __u32 device_id, __u32 pasid,
+ __u32 fault_fd)
{
struct iommu_test_cmd trigger_iopf_cmd = {
.size = sizeof(trigger_iopf_cmd),
.op = IOMMU_TEST_OP_TRIGGER_IOPF,
.trigger_iopf = {
.dev_id = device_id,
- .pasid = 0x1,
+ .pasid = pasid,
.grpid = 0x2,
.perm = IOMMU_PGFAULT_PERM_READ | IOMMU_PGFAULT_PERM_WRITE,
.addr = 0xdeadbeaf,
@@ -880,7 +891,10 @@ static int _test_cmd_trigger_iopf(int fd, __u32 device_id, __u32 fault_fd)
}
#define test_cmd_trigger_iopf(device_id, fault_fd) \
- ASSERT_EQ(0, _test_cmd_trigger_iopf(self->fd, device_id, fault_fd))
+ ASSERT_EQ(0, _test_cmd_trigger_iopf(self->fd, device_id, 0x1, fault_fd))
+#define test_cmd_trigger_iopf_pasid(device_id, pasid, fault_fd) \
+ ASSERT_EQ(0, _test_cmd_trigger_iopf(self->fd, device_id, \
+ pasid, fault_fd))
static int _test_cmd_viommu_alloc(int fd, __u32 device_id, __u32 hwpt_id,
__u32 type, __u32 flags, __u32 *viommu_id)
@@ -936,3 +950,204 @@ static int _test_cmd_vdevice_alloc(int fd, __u32 viommu_id, __u32 idev_id,
EXPECT_ERRNO(_errno, \
_test_cmd_vdevice_alloc(self->fd, viommu_id, idev_id, \
virt_id, vdev_id))
+
+static int _test_cmd_veventq_alloc(int fd, __u32 viommu_id, __u32 type,
+ __u32 *veventq_id, __u32 *veventq_fd)
+{
+ struct iommu_veventq_alloc cmd = {
+ .size = sizeof(cmd),
+ .type = type,
+ .veventq_depth = 2,
+ .viommu_id = viommu_id,
+ };
+ int ret;
+
+ ret = ioctl(fd, IOMMU_VEVENTQ_ALLOC, &cmd);
+ if (ret)
+ return ret;
+ if (veventq_id)
+ *veventq_id = cmd.out_veventq_id;
+ if (veventq_fd)
+ *veventq_fd = cmd.out_veventq_fd;
+ return 0;
+}
+
+#define test_cmd_veventq_alloc(viommu_id, type, veventq_id, veventq_fd) \
+ ASSERT_EQ(0, _test_cmd_veventq_alloc(self->fd, viommu_id, type, \
+ veventq_id, veventq_fd))
+#define test_err_veventq_alloc(_errno, viommu_id, type, veventq_id, \
+ veventq_fd) \
+ EXPECT_ERRNO(_errno, \
+ _test_cmd_veventq_alloc(self->fd, viommu_id, type, \
+ veventq_id, veventq_fd))
+
+static int _test_cmd_trigger_vevents(int fd, __u32 dev_id, __u32 nvevents)
+{
+ struct iommu_test_cmd trigger_vevent_cmd = {
+ .size = sizeof(trigger_vevent_cmd),
+ .op = IOMMU_TEST_OP_TRIGGER_VEVENT,
+ .trigger_vevent = {
+ .dev_id = dev_id,
+ },
+ };
+ int ret;
+
+ while (nvevents--) {
+ ret = ioctl(fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_TRIGGER_VEVENT),
+ &trigger_vevent_cmd);
+ if (ret < 0)
+ return -1;
+ }
+ return ret;
+}
+
+#define test_cmd_trigger_vevents(dev_id, nvevents) \
+ ASSERT_EQ(0, _test_cmd_trigger_vevents(self->fd, dev_id, nvevents))
+
+static int _test_cmd_read_vevents(int fd, __u32 event_fd, __u32 nvevents,
+ __u32 virt_id, int *prev_seq)
+{
+ struct pollfd pollfd = { .fd = event_fd, .events = POLLIN };
+ struct iommu_viommu_event_selftest *event;
+ struct iommufd_vevent_header *hdr;
+ ssize_t bytes;
+ void *data;
+ int ret, i;
+
+ ret = poll(&pollfd, 1, 1000);
+ if (ret < 0)
+ return -1;
+
+ data = calloc(nvevents, sizeof(*hdr) + sizeof(*event));
+ if (!data) {
+ errno = ENOMEM;
+ return -1;
+ }
+
+ bytes = read(event_fd, data,
+ nvevents * (sizeof(*hdr) + sizeof(*event)));
+ if (bytes <= 0) {
+ errno = EFAULT;
+ ret = -1;
+ goto out_free;
+ }
+
+ for (i = 0; i < nvevents; i++) {
+ hdr = data + i * (sizeof(*hdr) + sizeof(*event));
+
+ if (hdr->flags & IOMMU_VEVENTQ_FLAG_LOST_EVENTS ||
+ hdr->sequence - *prev_seq > 1) {
+ *prev_seq = hdr->sequence;
+ errno = EOVERFLOW;
+ ret = -1;
+ goto out_free;
+ }
+ *prev_seq = hdr->sequence;
+ event = data + sizeof(*hdr);
+ if (event->virt_id != virt_id) {
+ errno = EINVAL;
+ ret = -1;
+ goto out_free;
+ }
+ }
+
+ ret = 0;
+out_free:
+ free(data);
+ return ret;
+}
+
+#define test_cmd_read_vevents(event_fd, nvevents, virt_id, prev_seq) \
+ ASSERT_EQ(0, _test_cmd_read_vevents(self->fd, event_fd, nvevents, \
+ virt_id, prev_seq))
+#define test_err_read_vevents(_errno, event_fd, nvevents, virt_id, prev_seq) \
+ EXPECT_ERRNO(_errno, \
+ _test_cmd_read_vevents(self->fd, event_fd, nvevents, \
+ virt_id, prev_seq))
+
+static int _test_cmd_pasid_attach(int fd, __u32 stdev_id, __u32 pasid,
+ __u32 pt_id)
+{
+ struct iommu_test_cmd test_attach = {
+ .size = sizeof(test_attach),
+ .op = IOMMU_TEST_OP_PASID_ATTACH,
+ .id = stdev_id,
+ .pasid_attach = {
+ .pasid = pasid,
+ .pt_id = pt_id,
+ },
+ };
+
+ return ioctl(fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_PASID_ATTACH),
+ &test_attach);
+}
+
+#define test_cmd_pasid_attach(pasid, hwpt_id) \
+ ASSERT_EQ(0, _test_cmd_pasid_attach(self->fd, self->stdev_id, \
+ pasid, hwpt_id))
+
+#define test_err_pasid_attach(_errno, pasid, hwpt_id) \
+ EXPECT_ERRNO(_errno, \
+ _test_cmd_pasid_attach(self->fd, self->stdev_id, \
+ pasid, hwpt_id))
+
+static int _test_cmd_pasid_replace(int fd, __u32 stdev_id, __u32 pasid,
+ __u32 pt_id)
+{
+ struct iommu_test_cmd test_replace = {
+ .size = sizeof(test_replace),
+ .op = IOMMU_TEST_OP_PASID_REPLACE,
+ .id = stdev_id,
+ .pasid_replace = {
+ .pasid = pasid,
+ .pt_id = pt_id,
+ },
+ };
+
+ return ioctl(fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_PASID_REPLACE),
+ &test_replace);
+}
+
+#define test_cmd_pasid_replace(pasid, hwpt_id) \
+ ASSERT_EQ(0, _test_cmd_pasid_replace(self->fd, self->stdev_id, \
+ pasid, hwpt_id))
+
+#define test_err_pasid_replace(_errno, pasid, hwpt_id) \
+ EXPECT_ERRNO(_errno, \
+ _test_cmd_pasid_replace(self->fd, self->stdev_id, \
+ pasid, hwpt_id))
+
+static int _test_cmd_pasid_detach(int fd, __u32 stdev_id, __u32 pasid)
+{
+ struct iommu_test_cmd test_detach = {
+ .size = sizeof(test_detach),
+ .op = IOMMU_TEST_OP_PASID_DETACH,
+ .id = stdev_id,
+ .pasid_detach = {
+ .pasid = pasid,
+ },
+ };
+
+ return ioctl(fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_PASID_DETACH),
+ &test_detach);
+}
+
+#define test_cmd_pasid_detach(pasid) \
+ ASSERT_EQ(0, _test_cmd_pasid_detach(self->fd, self->stdev_id, pasid))
+
+static int test_cmd_pasid_check_hwpt(int fd, __u32 stdev_id, __u32 pasid,
+ __u32 hwpt_id)
+{
+ struct iommu_test_cmd test_pasid_check = {
+ .size = sizeof(test_pasid_check),
+ .op = IOMMU_TEST_OP_PASID_CHECK_HWPT,
+ .id = stdev_id,
+ .pasid_check = {
+ .pasid = pasid,
+ .hwpt_id = hwpt_id,
+ },
+ };
+
+ return ioctl(fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_PASID_CHECK_HWPT),
+ &test_pasid_check);
+}
diff --git a/tools/testing/selftests/mm/.gitignore b/tools/testing/selftests/mm/.gitignore
index 121000c28c10..c5241b193db8 100644
--- a/tools/testing/selftests/mm/.gitignore
+++ b/tools/testing/selftests/mm/.gitignore
@@ -57,4 +57,4 @@ droppable
hugetlb_dio
pkey_sighandler_tests_32
pkey_sighandler_tests_64
-guard-pages
+guard-regions
diff --git a/tools/testing/selftests/mm/Makefile b/tools/testing/selftests/mm/Makefile
index 63ce39d024bb..8270895039d1 100644
--- a/tools/testing/selftests/mm/Makefile
+++ b/tools/testing/selftests/mm/Makefile
@@ -97,7 +97,7 @@ TEST_GEN_FILES += hugetlb_fault_after_madv
TEST_GEN_FILES += hugetlb_madv_vs_map
TEST_GEN_FILES += hugetlb_dio
TEST_GEN_FILES += droppable
-TEST_GEN_FILES += guard-pages
+TEST_GEN_FILES += guard-regions
ifneq ($(ARCH),arm64)
TEST_GEN_FILES += soft-dirty
diff --git a/tools/testing/selftests/mm/cow.c b/tools/testing/selftests/mm/cow.c
index 9446673645eb..f0cb14ea8608 100644
--- a/tools/testing/selftests/mm/cow.c
+++ b/tools/testing/selftests/mm/cow.c
@@ -876,7 +876,7 @@ static void do_run_with_thp(test_fn fn, enum thp_run thp_run, size_t thpsize)
mremap_size = thpsize / 2;
mremap_mem = mmap(NULL, mremap_size, PROT_NONE,
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
- if (mem == MAP_FAILED) {
+ if (mremap_mem == MAP_FAILED) {
ksft_test_result_fail("mmap() failed\n");
goto munmap;
}
diff --git a/tools/testing/selftests/mm/guard-pages.c b/tools/testing/selftests/mm/guard-regions.c
index 525c50d3ec23..b3d0e2771096 100644
--- a/tools/testing/selftests/mm/guard-pages.c
+++ b/tools/testing/selftests/mm/guard-regions.c
@@ -6,6 +6,7 @@
#include <assert.h>
#include <errno.h>
#include <fcntl.h>
+#include <linux/limits.h>
#include <linux/userfaultfd.h>
#include <setjmp.h>
#include <signal.h>
@@ -18,6 +19,7 @@
#include <sys/syscall.h>
#include <sys/uio.h>
#include <unistd.h>
+#include "vm_util.h"
#include "../pidfd/pidfd.h"
@@ -39,6 +41,79 @@ static sigjmp_buf signal_jmp_buf;
*/
#define FORCE_READ(x) (*(volatile typeof(x) *)x)
+/*
+ * How is the test backing the mapping being tested?
+ */
+enum backing_type {
+ ANON_BACKED,
+ SHMEM_BACKED,
+ LOCAL_FILE_BACKED,
+};
+
+FIXTURE(guard_regions)
+{
+ unsigned long page_size;
+ char path[PATH_MAX];
+ int fd;
+};
+
+FIXTURE_VARIANT(guard_regions)
+{
+ enum backing_type backing;
+};
+
+FIXTURE_VARIANT_ADD(guard_regions, anon)
+{
+ .backing = ANON_BACKED,
+};
+
+FIXTURE_VARIANT_ADD(guard_regions, shmem)
+{
+ .backing = SHMEM_BACKED,
+};
+
+FIXTURE_VARIANT_ADD(guard_regions, file)
+{
+ .backing = LOCAL_FILE_BACKED,
+};
+
+static bool is_anon_backed(const FIXTURE_VARIANT(guard_regions) * variant)
+{
+ switch (variant->backing) {
+ case ANON_BACKED:
+ case SHMEM_BACKED:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static void *mmap_(FIXTURE_DATA(guard_regions) * self,
+ const FIXTURE_VARIANT(guard_regions) * variant,
+ void *addr, size_t length, int prot, int extra_flags,
+ off_t offset)
+{
+ int fd;
+ int flags = extra_flags;
+
+ switch (variant->backing) {
+ case ANON_BACKED:
+ flags |= MAP_PRIVATE | MAP_ANON;
+ fd = -1;
+ break;
+ case SHMEM_BACKED:
+ case LOCAL_FILE_BACKED:
+ flags |= MAP_SHARED;
+ fd = self->fd;
+ break;
+ default:
+ ksft_exit_fail();
+ break;
+ }
+
+ return mmap(addr, length, prot, flags, fd, offset);
+}
+
static int userfaultfd(int flags)
{
return syscall(SYS_userfaultfd, flags);
@@ -104,12 +179,7 @@ static bool try_read_write_buf(char *ptr)
return try_read_buf(ptr) && try_write_buf(ptr);
}
-FIXTURE(guard_pages)
-{
- unsigned long page_size;
-};
-
-FIXTURE_SETUP(guard_pages)
+static void setup_sighandler(void)
{
struct sigaction act = {
.sa_handler = &handle_fatal,
@@ -119,11 +189,9 @@ FIXTURE_SETUP(guard_pages)
sigemptyset(&act.sa_mask);
if (sigaction(SIGSEGV, &act, NULL))
ksft_exit_fail_perror("sigaction");
+}
- self->page_size = (unsigned long)sysconf(_SC_PAGESIZE);
-};
-
-FIXTURE_TEARDOWN(guard_pages)
+static void teardown_sighandler(void)
{
struct sigaction act = {
.sa_handler = SIG_DFL,
@@ -134,15 +202,109 @@ FIXTURE_TEARDOWN(guard_pages)
sigaction(SIGSEGV, &act, NULL);
}
-TEST_F(guard_pages, basic)
+static int open_file(const char *prefix, char *path)
+{
+ int fd;
+
+ snprintf(path, PATH_MAX, "%sguard_regions_test_file_XXXXXX", prefix);
+ fd = mkstemp(path);
+ if (fd < 0)
+ ksft_exit_fail_perror("mkstemp");
+
+ return fd;
+}
+
+/* Establish a varying pattern in a buffer. */
+static void set_pattern(char *ptr, size_t num_pages, size_t page_size)
+{
+ size_t i;
+
+ for (i = 0; i < num_pages; i++) {
+ char *ptr2 = &ptr[i * page_size];
+
+ memset(ptr2, 'a' + (i % 26), page_size);
+ }
+}
+
+/*
+ * Check that a buffer contains the pattern set by set_pattern(), starting at a
+ * page offset of pgoff within the buffer.
+ */
+static bool check_pattern_offset(char *ptr, size_t num_pages, size_t page_size,
+ size_t pgoff)
+{
+ size_t i;
+
+ for (i = 0; i < num_pages * page_size; i++) {
+ size_t offset = pgoff * page_size + i;
+ char actual = ptr[offset];
+ char expected = 'a' + ((offset / page_size) % 26);
+
+ if (actual != expected)
+ return false;
+ }
+
+ return true;
+}
+
+/* Check that a buffer contains the pattern set by set_pattern(). */
+static bool check_pattern(char *ptr, size_t num_pages, size_t page_size)
+{
+ return check_pattern_offset(ptr, num_pages, page_size, 0);
+}
+
+/* Determine if a buffer contains only repetitions of a specified char. */
+static bool is_buf_eq(char *buf, size_t size, char chr)
+{
+ size_t i;
+
+ for (i = 0; i < size; i++) {
+ if (buf[i] != chr)
+ return false;
+ }
+
+ return true;
+}
+
+FIXTURE_SETUP(guard_regions)
+{
+ self->page_size = (unsigned long)sysconf(_SC_PAGESIZE);
+ setup_sighandler();
+
+ if (variant->backing == ANON_BACKED)
+ return;
+
+ self->fd = open_file(
+ variant->backing == SHMEM_BACKED ? "/tmp/" : "",
+ self->path);
+
+ /* We truncate file to at least 100 pages, tests can modify as needed. */
+ ASSERT_EQ(ftruncate(self->fd, 100 * self->page_size), 0);
+};
+
+FIXTURE_TEARDOWN_PARENT(guard_regions)
+{
+ teardown_sighandler();
+
+ if (variant->backing == ANON_BACKED)
+ return;
+
+ if (self->fd >= 0)
+ close(self->fd);
+
+ if (self->path[0] != '\0')
+ unlink(self->path);
+}
+
+TEST_F(guard_regions, basic)
{
const unsigned long NUM_PAGES = 10;
const unsigned long page_size = self->page_size;
char *ptr;
int i;
- ptr = mmap(NULL, NUM_PAGES * page_size, PROT_READ | PROT_WRITE,
- MAP_PRIVATE | MAP_ANON, -1, 0);
+ ptr = mmap_(self, variant, NULL, NUM_PAGES * page_size,
+ PROT_READ | PROT_WRITE, 0, 0);
ASSERT_NE(ptr, MAP_FAILED);
/* Trivially assert we can touch the first page. */
@@ -228,32 +390,30 @@ TEST_F(guard_pages, basic)
}
/* Assert that operations applied across multiple VMAs work as expected. */
-TEST_F(guard_pages, multi_vma)
+TEST_F(guard_regions, multi_vma)
{
const unsigned long page_size = self->page_size;
char *ptr_region, *ptr, *ptr1, *ptr2, *ptr3;
int i;
/* Reserve a 100 page region over which we can install VMAs. */
- ptr_region = mmap(NULL, 100 * page_size, PROT_NONE,
- MAP_ANON | MAP_PRIVATE, -1, 0);
+ ptr_region = mmap_(self, variant, NULL, 100 * page_size,
+ PROT_NONE, 0, 0);
ASSERT_NE(ptr_region, MAP_FAILED);
/* Place a VMA of 10 pages size at the start of the region. */
- ptr1 = mmap(ptr_region, 10 * page_size, PROT_READ | PROT_WRITE,
- MAP_FIXED | MAP_ANON | MAP_PRIVATE, -1, 0);
+ ptr1 = mmap_(self, variant, ptr_region, 10 * page_size,
+ PROT_READ | PROT_WRITE, MAP_FIXED, 0);
ASSERT_NE(ptr1, MAP_FAILED);
/* Place a VMA of 5 pages size 50 pages into the region. */
- ptr2 = mmap(&ptr_region[50 * page_size], 5 * page_size,
- PROT_READ | PROT_WRITE,
- MAP_FIXED | MAP_ANON | MAP_PRIVATE, -1, 0);
+ ptr2 = mmap_(self, variant, &ptr_region[50 * page_size], 5 * page_size,
+ PROT_READ | PROT_WRITE, MAP_FIXED, 0);
ASSERT_NE(ptr2, MAP_FAILED);
/* Place a VMA of 20 pages size at the end of the region. */
- ptr3 = mmap(&ptr_region[80 * page_size], 20 * page_size,
- PROT_READ | PROT_WRITE,
- MAP_FIXED | MAP_ANON | MAP_PRIVATE, -1, 0);
+ ptr3 = mmap_(self, variant, &ptr_region[80 * page_size], 20 * page_size,
+ PROT_READ | PROT_WRITE, MAP_FIXED, 0);
ASSERT_NE(ptr3, MAP_FAILED);
/* Unmap gaps. */
@@ -323,13 +483,11 @@ TEST_F(guard_pages, multi_vma)
}
/* Now map incompatible VMAs in the gaps. */
- ptr = mmap(&ptr_region[10 * page_size], 40 * page_size,
- PROT_READ | PROT_WRITE | PROT_EXEC,
- MAP_FIXED | MAP_ANON | MAP_PRIVATE, -1, 0);
+ ptr = mmap_(self, variant, &ptr_region[10 * page_size], 40 * page_size,
+ PROT_READ | PROT_WRITE | PROT_EXEC, MAP_FIXED, 0);
ASSERT_NE(ptr, MAP_FAILED);
- ptr = mmap(&ptr_region[55 * page_size], 25 * page_size,
- PROT_READ | PROT_WRITE | PROT_EXEC,
- MAP_FIXED | MAP_ANON | MAP_PRIVATE, -1, 0);
+ ptr = mmap_(self, variant, &ptr_region[55 * page_size], 25 * page_size,
+ PROT_READ | PROT_WRITE | PROT_EXEC, MAP_FIXED, 0);
ASSERT_NE(ptr, MAP_FAILED);
/*
@@ -364,7 +522,7 @@ TEST_F(guard_pages, multi_vma)
* Assert that batched operations performed using process_madvise() work as
* expected.
*/
-TEST_F(guard_pages, process_madvise)
+TEST_F(guard_regions, process_madvise)
{
const unsigned long page_size = self->page_size;
char *ptr_region, *ptr1, *ptr2, *ptr3;
@@ -372,8 +530,8 @@ TEST_F(guard_pages, process_madvise)
struct iovec vec[6];
/* Reserve region to map over. */
- ptr_region = mmap(NULL, 100 * page_size, PROT_NONE,
- MAP_ANON | MAP_PRIVATE, -1, 0);
+ ptr_region = mmap_(self, variant, NULL, 100 * page_size,
+ PROT_NONE, 0, 0);
ASSERT_NE(ptr_region, MAP_FAILED);
/*
@@ -381,9 +539,8 @@ TEST_F(guard_pages, process_madvise)
* overwrite existing entries and test this code path against
* overwriting existing entries.
*/
- ptr1 = mmap(&ptr_region[page_size], 10 * page_size,
- PROT_READ | PROT_WRITE,
- MAP_FIXED | MAP_ANON | MAP_PRIVATE | MAP_POPULATE, -1, 0);
+ ptr1 = mmap_(self, variant, &ptr_region[page_size], 10 * page_size,
+ PROT_READ | PROT_WRITE, MAP_FIXED | MAP_POPULATE, 0);
ASSERT_NE(ptr1, MAP_FAILED);
/* We want guard markers at start/end of each VMA. */
vec[0].iov_base = ptr1;
@@ -392,9 +549,8 @@ TEST_F(guard_pages, process_madvise)
vec[1].iov_len = page_size;
/* 5 pages offset 50 pages into reserve region. */
- ptr2 = mmap(&ptr_region[50 * page_size], 5 * page_size,
- PROT_READ | PROT_WRITE,
- MAP_FIXED | MAP_ANON | MAP_PRIVATE, -1, 0);
+ ptr2 = mmap_(self, variant, &ptr_region[50 * page_size], 5 * page_size,
+ PROT_READ | PROT_WRITE, MAP_FIXED, 0);
ASSERT_NE(ptr2, MAP_FAILED);
vec[2].iov_base = ptr2;
vec[2].iov_len = page_size;
@@ -402,9 +558,8 @@ TEST_F(guard_pages, process_madvise)
vec[3].iov_len = page_size;
/* 20 pages offset 79 pages into reserve region. */
- ptr3 = mmap(&ptr_region[79 * page_size], 20 * page_size,
- PROT_READ | PROT_WRITE,
- MAP_FIXED | MAP_ANON | MAP_PRIVATE, -1, 0);
+ ptr3 = mmap_(self, variant, &ptr_region[79 * page_size], 20 * page_size,
+ PROT_READ | PROT_WRITE, MAP_FIXED, 0);
ASSERT_NE(ptr3, MAP_FAILED);
vec[4].iov_base = ptr3;
vec[4].iov_len = page_size;
@@ -459,13 +614,13 @@ TEST_F(guard_pages, process_madvise)
}
/* Assert that unmapping ranges does not leave guard markers behind. */
-TEST_F(guard_pages, munmap)
+TEST_F(guard_regions, munmap)
{
const unsigned long page_size = self->page_size;
char *ptr, *ptr_new1, *ptr_new2;
- ptr = mmap(NULL, 10 * page_size, PROT_READ | PROT_WRITE,
- MAP_ANON | MAP_PRIVATE, -1, 0);
+ ptr = mmap_(self, variant, NULL, 10 * page_size,
+ PROT_READ | PROT_WRITE, 0, 0);
ASSERT_NE(ptr, MAP_FAILED);
/* Guard first and last pages. */
@@ -481,11 +636,11 @@ TEST_F(guard_pages, munmap)
ASSERT_EQ(munmap(&ptr[9 * page_size], page_size), 0);
/* Map over them.*/
- ptr_new1 = mmap(ptr, page_size, PROT_READ | PROT_WRITE,
- MAP_FIXED | MAP_ANON | MAP_PRIVATE, -1, 0);
+ ptr_new1 = mmap_(self, variant, ptr, page_size, PROT_READ | PROT_WRITE,
+ MAP_FIXED, 0);
ASSERT_NE(ptr_new1, MAP_FAILED);
- ptr_new2 = mmap(&ptr[9 * page_size], page_size, PROT_READ | PROT_WRITE,
- MAP_FIXED | MAP_ANON | MAP_PRIVATE, -1, 0);
+ ptr_new2 = mmap_(self, variant, &ptr[9 * page_size], page_size,
+ PROT_READ | PROT_WRITE, MAP_FIXED, 0);
ASSERT_NE(ptr_new2, MAP_FAILED);
/* Assert that they are now not guarded. */
@@ -497,14 +652,14 @@ TEST_F(guard_pages, munmap)
}
/* Assert that mprotect() operations have no bearing on guard markers. */
-TEST_F(guard_pages, mprotect)
+TEST_F(guard_regions, mprotect)
{
const unsigned long page_size = self->page_size;
char *ptr;
int i;
- ptr = mmap(NULL, 10 * page_size, PROT_READ | PROT_WRITE,
- MAP_ANON | MAP_PRIVATE, -1, 0);
+ ptr = mmap_(self, variant, NULL, 10 * page_size,
+ PROT_READ | PROT_WRITE, 0, 0);
ASSERT_NE(ptr, MAP_FAILED);
/* Guard the middle of the range. */
@@ -545,14 +700,14 @@ TEST_F(guard_pages, mprotect)
}
/* Split and merge VMAs and make sure guard pages still behave. */
-TEST_F(guard_pages, split_merge)
+TEST_F(guard_regions, split_merge)
{
const unsigned long page_size = self->page_size;
char *ptr, *ptr_new;
int i;
- ptr = mmap(NULL, 10 * page_size, PROT_READ | PROT_WRITE,
- MAP_ANON | MAP_PRIVATE, -1, 0);
+ ptr = mmap_(self, variant, NULL, 10 * page_size,
+ PROT_READ | PROT_WRITE, 0, 0);
ASSERT_NE(ptr, MAP_FAILED);
/* Guard the whole range. */
@@ -593,14 +748,14 @@ TEST_F(guard_pages, split_merge)
}
/* Now map them again - the unmap will have cleared the guards. */
- ptr_new = mmap(&ptr[2 * page_size], page_size, PROT_READ | PROT_WRITE,
- MAP_FIXED | MAP_ANON | MAP_PRIVATE, -1, 0);
+ ptr_new = mmap_(self, variant, &ptr[2 * page_size], page_size,
+ PROT_READ | PROT_WRITE, MAP_FIXED, 0);
ASSERT_NE(ptr_new, MAP_FAILED);
- ptr_new = mmap(&ptr[5 * page_size], page_size, PROT_READ | PROT_WRITE,
- MAP_FIXED | MAP_ANON | MAP_PRIVATE, -1, 0);
+ ptr_new = mmap_(self, variant, &ptr[5 * page_size], page_size,
+ PROT_READ | PROT_WRITE, MAP_FIXED, 0);
ASSERT_NE(ptr_new, MAP_FAILED);
- ptr_new = mmap(&ptr[8 * page_size], page_size, PROT_READ | PROT_WRITE,
- MAP_FIXED | MAP_ANON | MAP_PRIVATE, -1, 0);
+ ptr_new = mmap_(self, variant, &ptr[8 * page_size], page_size,
+ PROT_READ | PROT_WRITE, MAP_FIXED, 0);
ASSERT_NE(ptr_new, MAP_FAILED);
/* Now make sure guard pages are established. */
@@ -676,14 +831,14 @@ TEST_F(guard_pages, split_merge)
}
/* Assert that MADV_DONTNEED does not remove guard markers. */
-TEST_F(guard_pages, dontneed)
+TEST_F(guard_regions, dontneed)
{
const unsigned long page_size = self->page_size;
char *ptr;
int i;
- ptr = mmap(NULL, 10 * page_size, PROT_READ | PROT_WRITE,
- MAP_ANON | MAP_PRIVATE, -1, 0);
+ ptr = mmap_(self, variant, NULL, 10 * page_size,
+ PROT_READ | PROT_WRITE, 0, 0);
ASSERT_NE(ptr, MAP_FAILED);
/* Back the whole range. */
@@ -713,8 +868,16 @@ TEST_F(guard_pages, dontneed)
ASSERT_FALSE(result);
} else {
ASSERT_TRUE(result);
- /* Make sure we really did get reset to zero page. */
- ASSERT_EQ(*curr, '\0');
+ switch (variant->backing) {
+ case ANON_BACKED:
+ /* If anon, then we get a zero page. */
+ ASSERT_EQ(*curr, '\0');
+ break;
+ default:
+ /* Otherwise, we get the file data. */
+ ASSERT_EQ(*curr, 'y');
+ break;
+ }
}
/* Now write... */
@@ -729,14 +892,14 @@ TEST_F(guard_pages, dontneed)
}
/* Assert that mlock()'ed pages work correctly with guard markers. */
-TEST_F(guard_pages, mlock)
+TEST_F(guard_regions, mlock)
{
const unsigned long page_size = self->page_size;
char *ptr;
int i;
- ptr = mmap(NULL, 10 * page_size, PROT_READ | PROT_WRITE,
- MAP_ANON | MAP_PRIVATE, -1, 0);
+ ptr = mmap_(self, variant, NULL, 10 * page_size,
+ PROT_READ | PROT_WRITE, 0, 0);
ASSERT_NE(ptr, MAP_FAILED);
/* Populate. */
@@ -802,14 +965,14 @@ TEST_F(guard_pages, mlock)
*
* - Moving a mapping alone should retain markers as they are.
*/
-TEST_F(guard_pages, mremap_move)
+TEST_F(guard_regions, mremap_move)
{
const unsigned long page_size = self->page_size;
char *ptr, *ptr_new;
/* Map 5 pages. */
- ptr = mmap(NULL, 5 * page_size, PROT_READ | PROT_WRITE,
- MAP_ANON | MAP_PRIVATE, -1, 0);
+ ptr = mmap_(self, variant, NULL, 5 * page_size,
+ PROT_READ | PROT_WRITE, 0, 0);
ASSERT_NE(ptr, MAP_FAILED);
/* Place guard markers at both ends of the 5 page span. */
@@ -823,8 +986,7 @@ TEST_F(guard_pages, mremap_move)
/* Map a new region we will move this range into. Doing this ensures
* that we have reserved a range to map into.
*/
- ptr_new = mmap(NULL, 5 * page_size, PROT_NONE, MAP_ANON | MAP_PRIVATE,
- -1, 0);
+ ptr_new = mmap_(self, variant, NULL, 5 * page_size, PROT_NONE, 0, 0);
ASSERT_NE(ptr_new, MAP_FAILED);
ASSERT_EQ(mremap(ptr, 5 * page_size, 5 * page_size,
@@ -849,14 +1011,14 @@ TEST_F(guard_pages, mremap_move)
* will have to remove guard pages manually to fix up (they'd have to do the
* same if it were a PROT_NONE mapping).
*/
-TEST_F(guard_pages, mremap_expand)
+TEST_F(guard_regions, mremap_expand)
{
const unsigned long page_size = self->page_size;
char *ptr, *ptr_new;
/* Map 10 pages... */
- ptr = mmap(NULL, 10 * page_size, PROT_READ | PROT_WRITE,
- MAP_ANON | MAP_PRIVATE, -1, 0);
+ ptr = mmap_(self, variant, NULL, 10 * page_size,
+ PROT_READ | PROT_WRITE, 0, 0);
ASSERT_NE(ptr, MAP_FAILED);
/* ...But unmap the last 5 so we can ensure we can expand into them. */
ASSERT_EQ(munmap(&ptr[5 * page_size], 5 * page_size), 0);
@@ -880,8 +1042,7 @@ TEST_F(guard_pages, mremap_expand)
ASSERT_FALSE(try_read_write_buf(&ptr[4 * page_size]));
/* Reserve a region which we can move to and expand into. */
- ptr_new = mmap(NULL, 20 * page_size, PROT_NONE,
- MAP_ANON | MAP_PRIVATE, -1, 0);
+ ptr_new = mmap_(self, variant, NULL, 20 * page_size, PROT_NONE, 0, 0);
ASSERT_NE(ptr_new, MAP_FAILED);
/* Now move and expand into it. */
@@ -912,15 +1073,15 @@ TEST_F(guard_pages, mremap_expand)
* if the user were using a PROT_NONE mapping they'd have to manually fix this
* up also so this is OK.
*/
-TEST_F(guard_pages, mremap_shrink)
+TEST_F(guard_regions, mremap_shrink)
{
const unsigned long page_size = self->page_size;
char *ptr;
int i;
/* Map 5 pages. */
- ptr = mmap(NULL, 5 * page_size, PROT_READ | PROT_WRITE,
- MAP_ANON | MAP_PRIVATE, -1, 0);
+ ptr = mmap_(self, variant, NULL, 5 * page_size,
+ PROT_READ | PROT_WRITE, 0, 0);
ASSERT_NE(ptr, MAP_FAILED);
/* Place guard markers at both ends of the 5 page span. */
@@ -976,7 +1137,7 @@ TEST_F(guard_pages, mremap_shrink)
* Assert that forking a process with VMAs that do not have VM_WIPEONFORK set
* retain guard pages.
*/
-TEST_F(guard_pages, fork)
+TEST_F(guard_regions, fork)
{
const unsigned long page_size = self->page_size;
char *ptr;
@@ -984,8 +1145,8 @@ TEST_F(guard_pages, fork)
int i;
/* Map 10 pages. */
- ptr = mmap(NULL, 10 * page_size, PROT_READ | PROT_WRITE,
- MAP_ANON | MAP_PRIVATE, -1, 0);
+ ptr = mmap_(self, variant, NULL, 10 * page_size,
+ PROT_READ | PROT_WRITE, 0, 0);
ASSERT_NE(ptr, MAP_FAILED);
/* Establish guard pages in the first 5 pages. */
@@ -1031,16 +1192,19 @@ TEST_F(guard_pages, fork)
* Assert expected behaviour after we fork populated ranges of anonymous memory
* and then guard and unguard the range.
*/
-TEST_F(guard_pages, fork_cow)
+TEST_F(guard_regions, fork_cow)
{
const unsigned long page_size = self->page_size;
char *ptr;
pid_t pid;
int i;
+ if (variant->backing != ANON_BACKED)
+ SKIP(return, "CoW only supported on anon mappings");
+
/* Map 10 pages. */
- ptr = mmap(NULL, 10 * page_size, PROT_READ | PROT_WRITE,
- MAP_ANON | MAP_PRIVATE, -1, 0);
+ ptr = mmap_(self, variant, NULL, 10 * page_size,
+ PROT_READ | PROT_WRITE, 0, 0);
ASSERT_NE(ptr, MAP_FAILED);
/* Populate range. */
@@ -1102,16 +1266,19 @@ TEST_F(guard_pages, fork_cow)
* Assert that forking a process with VMAs that do have VM_WIPEONFORK set
* behave as expected.
*/
-TEST_F(guard_pages, fork_wipeonfork)
+TEST_F(guard_regions, fork_wipeonfork)
{
const unsigned long page_size = self->page_size;
char *ptr;
pid_t pid;
int i;
+ if (variant->backing != ANON_BACKED)
+ SKIP(return, "Wipe on fork only supported on anon mappings");
+
/* Map 10 pages. */
- ptr = mmap(NULL, 10 * page_size, PROT_READ | PROT_WRITE,
- MAP_ANON | MAP_PRIVATE, -1, 0);
+ ptr = mmap_(self, variant, NULL, 10 * page_size,
+ PROT_READ | PROT_WRITE, 0, 0);
ASSERT_NE(ptr, MAP_FAILED);
/* Mark wipe on fork. */
@@ -1152,15 +1319,18 @@ TEST_F(guard_pages, fork_wipeonfork)
}
/* Ensure that MADV_FREE retains guard entries as expected. */
-TEST_F(guard_pages, lazyfree)
+TEST_F(guard_regions, lazyfree)
{
const unsigned long page_size = self->page_size;
char *ptr;
int i;
+ if (variant->backing != ANON_BACKED)
+ SKIP(return, "MADV_FREE only supported on anon mappings");
+
/* Map 10 pages. */
- ptr = mmap(NULL, 10 * page_size, PROT_READ | PROT_WRITE,
- MAP_ANON | MAP_PRIVATE, -1, 0);
+ ptr = mmap_(self, variant, NULL, 10 * page_size,
+ PROT_READ | PROT_WRITE, 0, 0);
ASSERT_NE(ptr, MAP_FAILED);
/* Guard range. */
@@ -1188,14 +1358,14 @@ TEST_F(guard_pages, lazyfree)
}
/* Ensure that MADV_POPULATE_READ, MADV_POPULATE_WRITE behave as expected. */
-TEST_F(guard_pages, populate)
+TEST_F(guard_regions, populate)
{
const unsigned long page_size = self->page_size;
char *ptr;
/* Map 10 pages. */
- ptr = mmap(NULL, 10 * page_size, PROT_READ | PROT_WRITE,
- MAP_ANON | MAP_PRIVATE, -1, 0);
+ ptr = mmap_(self, variant, NULL, 10 * page_size,
+ PROT_READ | PROT_WRITE, 0, 0);
ASSERT_NE(ptr, MAP_FAILED);
/* Guard range. */
@@ -1214,15 +1384,15 @@ TEST_F(guard_pages, populate)
}
/* Ensure that MADV_COLD, MADV_PAGEOUT do not remove guard markers. */
-TEST_F(guard_pages, cold_pageout)
+TEST_F(guard_regions, cold_pageout)
{
const unsigned long page_size = self->page_size;
char *ptr;
int i;
/* Map 10 pages. */
- ptr = mmap(NULL, 10 * page_size, PROT_READ | PROT_WRITE,
- MAP_ANON | MAP_PRIVATE, -1, 0);
+ ptr = mmap_(self, variant, NULL, 10 * page_size,
+ PROT_READ | PROT_WRITE, 0, 0);
ASSERT_NE(ptr, MAP_FAILED);
/* Guard range. */
@@ -1260,7 +1430,7 @@ TEST_F(guard_pages, cold_pageout)
}
/* Ensure that guard pages do not break userfaultd. */
-TEST_F(guard_pages, uffd)
+TEST_F(guard_regions, uffd)
{
const unsigned long page_size = self->page_size;
int uffd;
@@ -1273,6 +1443,9 @@ TEST_F(guard_pages, uffd)
struct uffdio_register reg;
struct uffdio_range range;
+ if (!is_anon_backed(variant))
+ SKIP(return, "uffd only works on anon backing");
+
/* Set up uffd. */
uffd = userfaultfd(0);
if (uffd == -1 && errno == EPERM)
@@ -1282,8 +1455,8 @@ TEST_F(guard_pages, uffd)
ASSERT_EQ(ioctl(uffd, UFFDIO_API, &api), 0);
/* Map 10 pages. */
- ptr = mmap(NULL, 10 * page_size, PROT_READ | PROT_WRITE,
- MAP_ANON | MAP_PRIVATE, -1, 0);
+ ptr = mmap_(self, variant, NULL, 10 * page_size,
+ PROT_READ | PROT_WRITE, 0, 0);
ASSERT_NE(ptr, MAP_FAILED);
/* Register the range with uffd. */
@@ -1309,4 +1482,593 @@ TEST_F(guard_pages, uffd)
ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
}
+/*
+ * Mark a region within a file-backed mapping using MADV_SEQUENTIAL so we
+ * aggressively read-ahead, then install guard regions and assert that it
+ * behaves correctly.
+ *
+ * We page out using MADV_PAGEOUT before checking guard regions so we drop page
+ * cache folios, meaning we maximise the possibility of some broken readahead.
+ */
+TEST_F(guard_regions, madvise_sequential)
+{
+ char *ptr;
+ int i;
+ const unsigned long page_size = self->page_size;
+
+ if (variant->backing == ANON_BACKED)
+ SKIP(return, "MADV_SEQUENTIAL meaningful only for file-backed");
+
+ ptr = mmap_(self, variant, NULL, 10 * page_size,
+ PROT_READ | PROT_WRITE, 0, 0);
+ ASSERT_NE(ptr, MAP_FAILED);
+
+ /* Establish a pattern of data in the file. */
+ set_pattern(ptr, 10, page_size);
+ ASSERT_TRUE(check_pattern(ptr, 10, page_size));
+
+ /* Mark it as being accessed sequentially. */
+ ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_SEQUENTIAL), 0);
+
+ /* Mark every other page a guard page. */
+ for (i = 0; i < 10; i += 2) {
+ char *ptr2 = &ptr[i * page_size];
+
+ ASSERT_EQ(madvise(ptr2, page_size, MADV_GUARD_INSTALL), 0);
+ }
+
+ /* Now page it out. */
+ ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_PAGEOUT), 0);
+
+ /* Now make sure pages are as expected. */
+ for (i = 0; i < 10; i++) {
+ char *chrp = &ptr[i * page_size];
+
+ if (i % 2 == 0) {
+ bool result = try_read_write_buf(chrp);
+
+ ASSERT_FALSE(result);
+ } else {
+ ASSERT_EQ(*chrp, 'a' + i);
+ }
+ }
+
+ /* Now remove guard pages. */
+ ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_REMOVE), 0);
+
+ /* Now make sure all data is as expected. */
+ if (!check_pattern(ptr, 10, page_size))
+ ASSERT_TRUE(false);
+
+ ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
+}
+
+/*
+ * Check that file-backed mappings implement guard regions with MAP_PRIVATE
+ * correctly.
+ */
+TEST_F(guard_regions, map_private)
+{
+ const unsigned long page_size = self->page_size;
+ char *ptr_shared, *ptr_private;
+ int i;
+
+ if (variant->backing == ANON_BACKED)
+ SKIP(return, "MAP_PRIVATE test specific to file-backed");
+
+ ptr_shared = mmap_(self, variant, NULL, 10 * page_size, PROT_READ | PROT_WRITE, 0, 0);
+ ASSERT_NE(ptr_shared, MAP_FAILED);
+
+ /* Manually mmap(), do not use mmap_() wrapper so we can force MAP_PRIVATE. */
+ ptr_private = mmap(NULL, 10 * page_size, PROT_READ | PROT_WRITE, MAP_PRIVATE, self->fd, 0);
+ ASSERT_NE(ptr_private, MAP_FAILED);
+
+ /* Set pattern in shared mapping. */
+ set_pattern(ptr_shared, 10, page_size);
+
+ /* Install guard regions in every other page in the shared mapping. */
+ for (i = 0; i < 10; i += 2) {
+ char *ptr = &ptr_shared[i * page_size];
+
+ ASSERT_EQ(madvise(ptr, page_size, MADV_GUARD_INSTALL), 0);
+ }
+
+ for (i = 0; i < 10; i++) {
+ /* Every even shared page should be guarded. */
+ ASSERT_EQ(try_read_buf(&ptr_shared[i * page_size]), i % 2 != 0);
+ /* Private mappings should always be readable. */
+ ASSERT_TRUE(try_read_buf(&ptr_private[i * page_size]));
+ }
+
+ /* Install guard regions in every other page in the private mapping. */
+ for (i = 0; i < 10; i += 2) {
+ char *ptr = &ptr_private[i * page_size];
+
+ ASSERT_EQ(madvise(ptr, page_size, MADV_GUARD_INSTALL), 0);
+ }
+
+ for (i = 0; i < 10; i++) {
+ /* Every even shared page should be guarded. */
+ ASSERT_EQ(try_read_buf(&ptr_shared[i * page_size]), i % 2 != 0);
+ /* Every odd private page should be guarded. */
+ ASSERT_EQ(try_read_buf(&ptr_private[i * page_size]), i % 2 != 0);
+ }
+
+ /* Remove guard regions from shared mapping. */
+ ASSERT_EQ(madvise(ptr_shared, 10 * page_size, MADV_GUARD_REMOVE), 0);
+
+ for (i = 0; i < 10; i++) {
+ /* Shared mappings should always be readable. */
+ ASSERT_TRUE(try_read_buf(&ptr_shared[i * page_size]));
+ /* Every even private page should be guarded. */
+ ASSERT_EQ(try_read_buf(&ptr_private[i * page_size]), i % 2 != 0);
+ }
+
+ /* Remove guard regions from private mapping. */
+ ASSERT_EQ(madvise(ptr_private, 10 * page_size, MADV_GUARD_REMOVE), 0);
+
+ for (i = 0; i < 10; i++) {
+ /* Shared mappings should always be readable. */
+ ASSERT_TRUE(try_read_buf(&ptr_shared[i * page_size]));
+ /* Private mappings should always be readable. */
+ ASSERT_TRUE(try_read_buf(&ptr_private[i * page_size]));
+ }
+
+ /* Ensure patterns are intact. */
+ ASSERT_TRUE(check_pattern(ptr_shared, 10, page_size));
+ ASSERT_TRUE(check_pattern(ptr_private, 10, page_size));
+
+ /* Now write out every other page to MAP_PRIVATE. */
+ for (i = 0; i < 10; i += 2) {
+ char *ptr = &ptr_private[i * page_size];
+
+ memset(ptr, 'a' + i, page_size);
+ }
+
+ /*
+ * At this point the mapping is:
+ *
+ * 0123456789
+ * SPSPSPSPSP
+ *
+ * Where S = shared, P = private mappings.
+ */
+
+ /* Now mark the beginning of the mapping guarded. */
+ ASSERT_EQ(madvise(ptr_private, 5 * page_size, MADV_GUARD_INSTALL), 0);
+
+ /*
+ * This renders the mapping:
+ *
+ * 0123456789
+ * xxxxxPSPSP
+ */
+
+ for (i = 0; i < 10; i++) {
+ char *ptr = &ptr_private[i * page_size];
+
+ /* Ensure guard regions as expected. */
+ ASSERT_EQ(try_read_buf(ptr), i >= 5);
+ /* The shared mapping should always succeed. */
+ ASSERT_TRUE(try_read_buf(&ptr_shared[i * page_size]));
+ }
+
+ /* Remove the guard regions altogether. */
+ ASSERT_EQ(madvise(ptr_private, 10 * page_size, MADV_GUARD_REMOVE), 0);
+
+ /*
+ *
+ * We now expect the mapping to be:
+ *
+ * 0123456789
+ * SSSSSPSPSP
+ *
+ * As we removed guard regions, the private pages from the first 5 will
+ * have been zapped, so on fault will reestablish the shared mapping.
+ */
+
+ for (i = 0; i < 10; i++) {
+ char *ptr = &ptr_private[i * page_size];
+
+ /*
+ * Assert that shared mappings in the MAP_PRIVATE mapping match
+ * the shared mapping.
+ */
+ if (i < 5 || i % 2 == 0) {
+ char *ptr_s = &ptr_shared[i * page_size];
+
+ ASSERT_EQ(memcmp(ptr, ptr_s, page_size), 0);
+ continue;
+ }
+
+ /* Everything else is a private mapping. */
+ ASSERT_TRUE(is_buf_eq(ptr, page_size, 'a' + i));
+ }
+
+ ASSERT_EQ(munmap(ptr_shared, 10 * page_size), 0);
+ ASSERT_EQ(munmap(ptr_private, 10 * page_size), 0);
+}
+
+/* Test that guard regions established over a read-only mapping function correctly. */
+TEST_F(guard_regions, readonly_file)
+{
+ const unsigned long page_size = self->page_size;
+ char *ptr;
+ int i;
+
+ if (variant->backing == ANON_BACKED)
+ SKIP(return, "Read-only test specific to file-backed");
+
+ /* Map shared so we can populate with pattern, populate it, unmap. */
+ ptr = mmap_(self, variant, NULL, 10 * page_size,
+ PROT_READ | PROT_WRITE, 0, 0);
+ ASSERT_NE(ptr, MAP_FAILED);
+ set_pattern(ptr, 10, page_size);
+ ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
+ /* Close the fd so we can re-open read-only. */
+ ASSERT_EQ(close(self->fd), 0);
+
+ /* Re-open read-only. */
+ self->fd = open(self->path, O_RDONLY);
+ ASSERT_NE(self->fd, -1);
+ /* Re-map read-only. */
+ ptr = mmap_(self, variant, NULL, 10 * page_size, PROT_READ, 0, 0);
+ ASSERT_NE(ptr, MAP_FAILED);
+
+ /* Mark every other page guarded. */
+ for (i = 0; i < 10; i += 2) {
+ char *ptr_pg = &ptr[i * page_size];
+
+ ASSERT_EQ(madvise(ptr_pg, page_size, MADV_GUARD_INSTALL), 0);
+ }
+
+ /* Assert that the guard regions are in place.*/
+ for (i = 0; i < 10; i++) {
+ char *ptr_pg = &ptr[i * page_size];
+
+ ASSERT_EQ(try_read_buf(ptr_pg), i % 2 != 0);
+ }
+
+ /* Remove guard regions. */
+ ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_REMOVE), 0);
+
+ /* Ensure the data is as expected. */
+ ASSERT_TRUE(check_pattern(ptr, 10, page_size));
+
+ ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
+}
+
+TEST_F(guard_regions, fault_around)
+{
+ const unsigned long page_size = self->page_size;
+ char *ptr;
+ int i;
+
+ if (variant->backing == ANON_BACKED)
+ SKIP(return, "Fault-around test specific to file-backed");
+
+ ptr = mmap_(self, variant, NULL, 10 * page_size,
+ PROT_READ | PROT_WRITE, 0, 0);
+ ASSERT_NE(ptr, MAP_FAILED);
+
+ /* Establish a pattern in the backing file. */
+ set_pattern(ptr, 10, page_size);
+
+ /*
+ * Now drop it from the page cache so we get major faults when next we
+ * map it.
+ */
+ ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_PAGEOUT), 0);
+
+ /* Unmap and remap 'to be sure'. */
+ ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
+ ptr = mmap_(self, variant, NULL, 10 * page_size,
+ PROT_READ | PROT_WRITE, 0, 0);
+ ASSERT_NE(ptr, MAP_FAILED);
+
+ /* Now make every even page guarded. */
+ for (i = 0; i < 10; i += 2) {
+ char *ptr_p = &ptr[i * page_size];
+
+ ASSERT_EQ(madvise(ptr_p, page_size, MADV_GUARD_INSTALL), 0);
+ }
+
+ /* Now fault in every odd page. This should trigger fault-around. */
+ for (i = 1; i < 10; i += 2) {
+ char *ptr_p = &ptr[i * page_size];
+
+ ASSERT_TRUE(try_read_buf(ptr_p));
+ }
+
+ /* Finally, ensure that guard regions are intact as expected. */
+ for (i = 0; i < 10; i++) {
+ char *ptr_p = &ptr[i * page_size];
+
+ ASSERT_EQ(try_read_buf(ptr_p), i % 2 != 0);
+ }
+
+ ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
+}
+
+TEST_F(guard_regions, truncation)
+{
+ const unsigned long page_size = self->page_size;
+ char *ptr;
+ int i;
+
+ if (variant->backing == ANON_BACKED)
+ SKIP(return, "Truncation test specific to file-backed");
+
+ ptr = mmap_(self, variant, NULL, 10 * page_size,
+ PROT_READ | PROT_WRITE, 0, 0);
+ ASSERT_NE(ptr, MAP_FAILED);
+
+ /*
+ * Establish a pattern in the backing file, just so there is data
+ * there.
+ */
+ set_pattern(ptr, 10, page_size);
+
+ /* Now make every even page guarded. */
+ for (i = 0; i < 10; i += 2) {
+ char *ptr_p = &ptr[i * page_size];
+
+ ASSERT_EQ(madvise(ptr_p, page_size, MADV_GUARD_INSTALL), 0);
+ }
+
+ /* Now assert things are as expected. */
+ for (i = 0; i < 10; i++) {
+ char *ptr_p = &ptr[i * page_size];
+
+ ASSERT_EQ(try_read_write_buf(ptr_p), i % 2 != 0);
+ }
+
+ /* Now truncate to actually used size (initialised to 100). */
+ ASSERT_EQ(ftruncate(self->fd, 10 * page_size), 0);
+
+ /* Here the guard regions will remain intact. */
+ for (i = 0; i < 10; i++) {
+ char *ptr_p = &ptr[i * page_size];
+
+ ASSERT_EQ(try_read_write_buf(ptr_p), i % 2 != 0);
+ }
+
+ /* Now truncate to half the size, then truncate again to the full size. */
+ ASSERT_EQ(ftruncate(self->fd, 5 * page_size), 0);
+ ASSERT_EQ(ftruncate(self->fd, 10 * page_size), 0);
+
+ /* Again, guard pages will remain intact. */
+ for (i = 0; i < 10; i++) {
+ char *ptr_p = &ptr[i * page_size];
+
+ ASSERT_EQ(try_read_write_buf(ptr_p), i % 2 != 0);
+ }
+
+ ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
+}
+
+TEST_F(guard_regions, hole_punch)
+{
+ const unsigned long page_size = self->page_size;
+ char *ptr;
+ int i;
+
+ if (variant->backing == ANON_BACKED)
+ SKIP(return, "Truncation test specific to file-backed");
+
+ /* Establish pattern in mapping. */
+ ptr = mmap_(self, variant, NULL, 10 * page_size,
+ PROT_READ | PROT_WRITE, 0, 0);
+ ASSERT_NE(ptr, MAP_FAILED);
+ set_pattern(ptr, 10, page_size);
+
+ /* Install a guard region in the middle of the mapping. */
+ ASSERT_EQ(madvise(&ptr[3 * page_size], 4 * page_size,
+ MADV_GUARD_INSTALL), 0);
+
+ /*
+ * The buffer will now be:
+ *
+ * 0123456789
+ * ***xxxx***
+ *
+ * Where * is data and x is the guard region.
+ */
+
+ /* Ensure established. */
+ for (i = 0; i < 10; i++) {
+ char *ptr_p = &ptr[i * page_size];
+
+ ASSERT_EQ(try_read_buf(ptr_p), i < 3 || i >= 7);
+ }
+
+ /* Now hole punch the guarded region. */
+ ASSERT_EQ(madvise(&ptr[3 * page_size], 4 * page_size,
+ MADV_REMOVE), 0);
+
+ /* Ensure guard regions remain. */
+ for (i = 0; i < 10; i++) {
+ char *ptr_p = &ptr[i * page_size];
+
+ ASSERT_EQ(try_read_buf(ptr_p), i < 3 || i >= 7);
+ }
+
+ /* Now remove guard region throughout. */
+ ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_REMOVE), 0);
+
+ /* Check that the pattern exists in non-hole punched region. */
+ ASSERT_TRUE(check_pattern(ptr, 3, page_size));
+ /* Check that hole punched region is zeroed. */
+ ASSERT_TRUE(is_buf_eq(&ptr[3 * page_size], 4 * page_size, '\0'));
+ /* Check that the pattern exists in the remainder of the file. */
+ ASSERT_TRUE(check_pattern_offset(ptr, 3, page_size, 7));
+
+ ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
+}
+
+/*
+ * Ensure that a memfd works correctly with guard regions, that we can write
+ * seal it then open the mapping read-only and still establish guard regions
+ * within, remove those guard regions and have everything work correctly.
+ */
+TEST_F(guard_regions, memfd_write_seal)
+{
+ const unsigned long page_size = self->page_size;
+ char *ptr;
+ int i;
+
+ if (variant->backing != SHMEM_BACKED)
+ SKIP(return, "memfd write seal test specific to shmem");
+
+ /* OK, we need a memfd, so close existing one. */
+ ASSERT_EQ(close(self->fd), 0);
+
+ /* Create and truncate memfd. */
+ self->fd = memfd_create("guard_regions_memfd_seals_test",
+ MFD_ALLOW_SEALING);
+ ASSERT_NE(self->fd, -1);
+ ASSERT_EQ(ftruncate(self->fd, 10 * page_size), 0);
+
+ /* Map, set pattern, unmap. */
+ ptr = mmap_(self, variant, NULL, 10 * page_size, PROT_READ | PROT_WRITE, 0, 0);
+ ASSERT_NE(ptr, MAP_FAILED);
+ set_pattern(ptr, 10, page_size);
+ ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
+
+ /* Write-seal the memfd. */
+ ASSERT_EQ(fcntl(self->fd, F_ADD_SEALS, F_SEAL_WRITE), 0);
+
+ /* Now map the memfd readonly. */
+ ptr = mmap_(self, variant, NULL, 10 * page_size, PROT_READ, 0, 0);
+ ASSERT_NE(ptr, MAP_FAILED);
+
+ /* Ensure pattern is as expected. */
+ ASSERT_TRUE(check_pattern(ptr, 10, page_size));
+
+ /* Now make every even page guarded. */
+ for (i = 0; i < 10; i += 2) {
+ char *ptr_p = &ptr[i * page_size];
+
+ ASSERT_EQ(madvise(ptr_p, page_size, MADV_GUARD_INSTALL), 0);
+ }
+
+ /* Now assert things are as expected. */
+ for (i = 0; i < 10; i++) {
+ char *ptr_p = &ptr[i * page_size];
+
+ ASSERT_EQ(try_read_buf(ptr_p), i % 2 != 0);
+ }
+
+ /* Now remove guard regions. */
+ ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_REMOVE), 0);
+
+ /* Ensure pattern is as expected. */
+ ASSERT_TRUE(check_pattern(ptr, 10, page_size));
+
+ /* Ensure write seal intact. */
+ for (i = 0; i < 10; i++) {
+ char *ptr_p = &ptr[i * page_size];
+
+ ASSERT_FALSE(try_write_buf(ptr_p));
+ }
+
+ ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
+}
+
+
+/*
+ * Since we are now permitted to establish guard regions in read-only anonymous
+ * mappings, for the sake of thoroughness, though it probably has no practical
+ * use, test that guard regions function with a mapping to the anonymous zero
+ * page.
+ */
+TEST_F(guard_regions, anon_zeropage)
+{
+ const unsigned long page_size = self->page_size;
+ char *ptr;
+ int i;
+
+ if (!is_anon_backed(variant))
+ SKIP(return, "anon zero page test specific to anon/shmem");
+
+ /* Obtain a read-only i.e. anon zero page mapping. */
+ ptr = mmap_(self, variant, NULL, 10 * page_size, PROT_READ, 0, 0);
+ ASSERT_NE(ptr, MAP_FAILED);
+
+ /* Now make every even page guarded. */
+ for (i = 0; i < 10; i += 2) {
+ char *ptr_p = &ptr[i * page_size];
+
+ ASSERT_EQ(madvise(ptr_p, page_size, MADV_GUARD_INSTALL), 0);
+ }
+
+ /* Now assert things are as expected. */
+ for (i = 0; i < 10; i++) {
+ char *ptr_p = &ptr[i * page_size];
+
+ ASSERT_EQ(try_read_buf(ptr_p), i % 2 != 0);
+ }
+
+ /* Now remove all guard regions. */
+ ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_REMOVE), 0);
+
+ /* Now assert things are as expected. */
+ for (i = 0; i < 10; i++) {
+ char *ptr_p = &ptr[i * page_size];
+
+ ASSERT_TRUE(try_read_buf(ptr_p));
+ }
+
+ /* Ensure zero page...*/
+ ASSERT_TRUE(is_buf_eq(ptr, 10 * page_size, '\0'));
+
+ ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
+}
+
+/*
+ * Assert that /proc/$pid/pagemap correctly identifies guard region ranges.
+ */
+TEST_F(guard_regions, pagemap)
+{
+ const unsigned long page_size = self->page_size;
+ int proc_fd;
+ char *ptr;
+ int i;
+
+ proc_fd = open("/proc/self/pagemap", O_RDONLY);
+ ASSERT_NE(proc_fd, -1);
+
+ ptr = mmap_(self, variant, NULL, 10 * page_size,
+ PROT_READ | PROT_WRITE, 0, 0);
+ ASSERT_NE(ptr, MAP_FAILED);
+
+ /* Read from pagemap, and assert no guard regions are detected. */
+ for (i = 0; i < 10; i++) {
+ char *ptr_p = &ptr[i * page_size];
+ unsigned long entry = pagemap_get_entry(proc_fd, ptr_p);
+ unsigned long masked = entry & PM_GUARD_REGION;
+
+ ASSERT_EQ(masked, 0);
+ }
+
+ /* Install a guard region in every other page. */
+ for (i = 0; i < 10; i += 2) {
+ char *ptr_p = &ptr[i * page_size];
+
+ ASSERT_EQ(madvise(ptr_p, page_size, MADV_GUARD_INSTALL), 0);
+ }
+
+ /* Re-read from pagemap, and assert guard regions are detected. */
+ for (i = 0; i < 10; i++) {
+ char *ptr_p = &ptr[i * page_size];
+ unsigned long entry = pagemap_get_entry(proc_fd, ptr_p);
+ unsigned long masked = entry & PM_GUARD_REGION;
+
+ ASSERT_EQ(masked, i % 2 == 0 ? PM_GUARD_REGION : 0);
+ }
+
+ ASSERT_EQ(close(proc_fd), 0);
+ ASSERT_EQ(munmap(ptr, 10 * page_size), 0);
+}
+
TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/mm/gup_longterm.c b/tools/testing/selftests/mm/gup_longterm.c
index 9423ad439a61..21595b20bbc3 100644
--- a/tools/testing/selftests/mm/gup_longterm.c
+++ b/tools/testing/selftests/mm/gup_longterm.c
@@ -96,13 +96,17 @@ static void do_test(int fd, size_t size, enum test_type type, bool shared)
int ret;
if (ftruncate(fd, size)) {
- ksft_test_result_fail("ftruncate() failed\n");
+ if (errno == ENOENT) {
+ skip_test_dodgy_fs("ftruncate()");
+ } else {
+ ksft_test_result_fail("ftruncate() failed (%s)\n", strerror(errno));
+ }
return;
}
if (fallocate(fd, 0, 0, size)) {
if (size == pagesize)
- ksft_test_result_fail("fallocate() failed\n");
+ ksft_test_result_fail("fallocate() failed (%s)\n", strerror(errno));
else
ksft_test_result_skip("need more free huge pages\n");
return;
@@ -112,7 +116,7 @@ static void do_test(int fd, size_t size, enum test_type type, bool shared)
shared ? MAP_SHARED : MAP_PRIVATE, fd, 0);
if (mem == MAP_FAILED) {
if (size == pagesize || shared)
- ksft_test_result_fail("mmap() failed\n");
+ ksft_test_result_fail("mmap() failed (%s)\n", strerror(errno));
else
ksft_test_result_skip("need more free huge pages\n");
return;
@@ -130,7 +134,7 @@ static void do_test(int fd, size_t size, enum test_type type, bool shared)
*/
ret = mprotect(mem, size, PROT_READ);
if (ret) {
- ksft_test_result_fail("mprotect() failed\n");
+ ksft_test_result_fail("mprotect() failed (%s)\n", strerror(errno));
goto munmap;
}
/* FALLTHROUGH */
@@ -165,18 +169,20 @@ static void do_test(int fd, size_t size, enum test_type type, bool shared)
args.flags |= rw ? PIN_LONGTERM_TEST_FLAG_USE_WRITE : 0;
ret = ioctl(gup_fd, PIN_LONGTERM_TEST_START, &args);
if (ret && errno == EINVAL) {
- ksft_test_result_skip("PIN_LONGTERM_TEST_START failed\n");
+ ksft_test_result_skip("PIN_LONGTERM_TEST_START failed (EINVAL)n");
break;
} else if (ret && errno == EFAULT) {
ksft_test_result(!should_work, "Should have failed\n");
break;
} else if (ret) {
- ksft_test_result_fail("PIN_LONGTERM_TEST_START failed\n");
+ ksft_test_result_fail("PIN_LONGTERM_TEST_START failed (%s)\n",
+ strerror(errno));
break;
}
if (ioctl(gup_fd, PIN_LONGTERM_TEST_STOP))
- ksft_print_msg("[INFO] PIN_LONGTERM_TEST_STOP failed\n");
+ ksft_print_msg("[INFO] PIN_LONGTERM_TEST_STOP failed (%s)\n",
+ strerror(errno));
/*
* TODO: if the kernel ever supports long-term R/W pinning on
@@ -202,7 +208,8 @@ static void do_test(int fd, size_t size, enum test_type type, bool shared)
/* Skip on errors, as we might just lack kernel support. */
ret = io_uring_queue_init(1, &ring, 0);
if (ret < 0) {
- ksft_test_result_skip("io_uring_queue_init() failed\n");
+ ksft_test_result_skip("io_uring_queue_init() failed (%s)\n",
+ strerror(-ret));
break;
}
/*
@@ -215,13 +222,15 @@ static void do_test(int fd, size_t size, enum test_type type, bool shared)
/* Only new kernels return EFAULT. */
if (ret && (errno == ENOSPC || errno == EOPNOTSUPP ||
errno == EFAULT)) {
- ksft_test_result(!should_work, "Should have failed\n");
+ ksft_test_result(!should_work, "Should have failed (%s)\n",
+ strerror(errno));
} else if (ret) {
/*
* We might just lack support or have insufficient
* MEMLOCK limits.
*/
- ksft_test_result_skip("io_uring_register_buffers() failed\n");
+ ksft_test_result_skip("io_uring_register_buffers() failed (%s)\n",
+ strerror(-ret));
} else {
ksft_test_result(should_work, "Should have worked\n");
io_uring_unregister_buffers(&ring);
@@ -249,7 +258,7 @@ static void run_with_memfd(test_fn fn, const char *desc)
fd = memfd_create("test", 0);
if (fd < 0) {
- ksft_test_result_fail("memfd_create() failed\n");
+ ksft_test_result_fail("memfd_create() failed (%s)\n", strerror(errno));
return;
}
@@ -266,13 +275,13 @@ static void run_with_tmpfile(test_fn fn, const char *desc)
file = tmpfile();
if (!file) {
- ksft_test_result_fail("tmpfile() failed\n");
+ ksft_test_result_fail("tmpfile() failed (%s)\n", strerror(errno));
return;
}
fd = fileno(file);
if (fd < 0) {
- ksft_test_result_fail("fileno() failed\n");
+ ksft_test_result_fail("fileno() failed (%s)\n", strerror(errno));
goto close;
}
@@ -290,12 +299,12 @@ static void run_with_local_tmpfile(test_fn fn, const char *desc)
fd = mkstemp(filename);
if (fd < 0) {
- ksft_test_result_fail("mkstemp() failed\n");
+ ksft_test_result_fail("mkstemp() failed (%s)\n", strerror(errno));
return;
}
if (unlink(filename)) {
- ksft_test_result_fail("unlink() failed\n");
+ ksft_test_result_fail("unlink() failed (%s)\n", strerror(errno));
goto close;
}
@@ -317,7 +326,7 @@ static void run_with_memfd_hugetlb(test_fn fn, const char *desc,
fd = memfd_create("test", flags);
if (fd < 0) {
- ksft_test_result_skip("memfd_create() failed\n");
+ ksft_test_result_skip("memfd_create() failed (%s)\n", strerror(errno));
return;
}
diff --git a/tools/testing/selftests/mm/map_populate.c b/tools/testing/selftests/mm/map_populate.c
index 5c8a53869b1b..9df2636c829b 100644
--- a/tools/testing/selftests/mm/map_populate.c
+++ b/tools/testing/selftests/mm/map_populate.c
@@ -18,6 +18,8 @@
#include <unistd.h>
#include "../kselftest.h"
+#include "vm_util.h"
+
#define MMAP_SZ 4096
#define BUG_ON(condition, description) \
@@ -87,6 +89,9 @@ int main(int argc, char **argv)
BUG_ON(!ftmp, "tmpfile()");
ret = ftruncate(fileno(ftmp), MMAP_SZ);
+ if (ret < 0 && errno == ENOENT) {
+ skip_test_dodgy_fs("ftruncate()");
+ }
BUG_ON(ret, "ftruncate()");
smap = mmap(0, MMAP_SZ, PROT_READ | PROT_WRITE,
diff --git a/tools/testing/selftests/mm/mlock-random-test.c b/tools/testing/selftests/mm/mlock-random-test.c
index 1cd80b0f76c3..b8d7e966f44c 100644
--- a/tools/testing/selftests/mm/mlock-random-test.c
+++ b/tools/testing/selftests/mm/mlock-random-test.c
@@ -161,9 +161,9 @@ static void test_mlock_within_limit(char *p, int alloc_size)
MLOCK_ONFAULT);
if (ret)
- ksft_exit_fail_msg("%s() failure at |%p(%d)| mlock:|%p(%d)|\n",
+ ksft_exit_fail_msg("%s() failure (%s) at |%p(%d)| mlock:|%p(%d)|\n",
is_mlock ? "mlock" : "mlock2",
- p, alloc_size,
+ strerror(errno), p, alloc_size,
p + start_offset, lock_size);
}
diff --git a/tools/testing/selftests/mm/mlock2.h b/tools/testing/selftests/mm/mlock2.h
index 4417eaa5cfb7..81e77fa41901 100644
--- a/tools/testing/selftests/mm/mlock2.h
+++ b/tools/testing/selftests/mm/mlock2.h
@@ -6,7 +6,13 @@
static int mlock2_(void *start, size_t len, int flags)
{
- return syscall(__NR_mlock2, start, len, flags);
+ int ret = syscall(__NR_mlock2, start, len, flags);
+
+ if (ret) {
+ errno = ret;
+ return -1;
+ }
+ return 0;
}
static FILE *seek_to_smaps_entry(unsigned long addr)
diff --git a/tools/testing/selftests/mm/run_vmtests.sh b/tools/testing/selftests/mm/run_vmtests.sh
index 7cc71d942f83..9aff33b10999 100755
--- a/tools/testing/selftests/mm/run_vmtests.sh
+++ b/tools/testing/selftests/mm/run_vmtests.sh
@@ -187,9 +187,10 @@ if [ -n "$freepgs" ] && [ -n "$hpgsize_KB" ]; then
printf "Not enough huge pages available (%d < %d)\n" \
"$freepgs" "$needpgs"
fi
+ HAVE_HUGEPAGES=1
else
echo "no hugetlbfs support in kernel?"
- exit 1
+ HAVE_HUGEPAGES=0
fi
# filter 64bit architectures
@@ -218,13 +219,20 @@ pretty_name() {
# Usage: run_test [test binary] [arbitrary test arguments...]
run_test() {
if test_selected ${CATEGORY}; then
+ local skip=0
+
# On memory constrainted systems some tests can fail to allocate hugepages.
# perform some cleanup before the test for a higher success rate.
if [ ${CATEGORY} == "thp" -o ${CATEGORY} == "hugetlb" ]; then
- echo 3 > /proc/sys/vm/drop_caches
- sleep 2
- echo 1 > /proc/sys/vm/compact_memory
- sleep 2
+ if [ "${HAVE_HUGEPAGES}" = "1" ]; then
+ echo 3 > /proc/sys/vm/drop_caches
+ sleep 2
+ echo 1 > /proc/sys/vm/compact_memory
+ sleep 2
+ else
+ echo "hugepages not supported" | tap_prefix
+ skip=1
+ fi
fi
local test=$(pretty_name "$*")
@@ -232,8 +240,12 @@ run_test() {
local sep=$(echo -n "$title" | tr "[:graph:][:space:]" -)
printf "%s\n%s\n%s\n" "$sep" "$title" "$sep" | tap_prefix
- ("$@" 2>&1) | tap_prefix
- local ret=${PIPESTATUS[0]}
+ if [ "${skip}" != "1" ]; then
+ ("$@" 2>&1) | tap_prefix
+ local ret=${PIPESTATUS[0]}
+ else
+ local ret=$ksft_skip
+ fi
count_total=$(( count_total + 1 ))
if [ $ret -eq 0 ]; then
count_pass=$(( count_pass + 1 ))
@@ -271,13 +283,15 @@ CATEGORY="hugetlb" run_test ./hugepage-vmemmap
CATEGORY="hugetlb" run_test ./hugetlb-madvise
CATEGORY="hugetlb" run_test ./hugetlb_dio
-nr_hugepages_tmp=$(cat /proc/sys/vm/nr_hugepages)
-# For this test, we need one and just one huge page
-echo 1 > /proc/sys/vm/nr_hugepages
-CATEGORY="hugetlb" run_test ./hugetlb_fault_after_madv
-CATEGORY="hugetlb" run_test ./hugetlb_madv_vs_map
-# Restore the previous number of huge pages, since further tests rely on it
-echo "$nr_hugepages_tmp" > /proc/sys/vm/nr_hugepages
+if [ "${HAVE_HUGEPAGES}" = "1" ]; then
+ nr_hugepages_tmp=$(cat /proc/sys/vm/nr_hugepages)
+ # For this test, we need one and just one huge page
+ echo 1 > /proc/sys/vm/nr_hugepages
+ CATEGORY="hugetlb" run_test ./hugetlb_fault_after_madv
+ CATEGORY="hugetlb" run_test ./hugetlb_madv_vs_map
+ # Restore the previous number of huge pages, since further tests rely on it
+ echo "$nr_hugepages_tmp" > /proc/sys/vm/nr_hugepages
+fi
if test_selected "hugetlb"; then
echo "NOTE: These hugetlb tests provide minimal coverage. Use" | tap_prefix
@@ -311,14 +325,35 @@ CATEGORY="userfaultfd" run_test ${uffd_stress_bin} hugetlb "$half_ufd_size_MB" 3
CATEGORY="userfaultfd" run_test ${uffd_stress_bin} hugetlb-private "$half_ufd_size_MB" 32
CATEGORY="userfaultfd" run_test ${uffd_stress_bin} shmem 20 16
CATEGORY="userfaultfd" run_test ${uffd_stress_bin} shmem-private 20 16
-CATEGORY="userfaultfd" run_test ./uffd-wp-mremap
+# uffd-wp-mremap requires at least one page of each size.
+have_all_size_hugepgs=true
+declare -A nr_size_hugepgs
+for f in /sys/kernel/mm/hugepages/**/nr_hugepages; do
+ old=$(cat $f)
+ nr_size_hugepgs["$f"]="$old"
+ if [ "$old" == 0 ]; then
+ echo 1 > "$f"
+ fi
+ if [ $(cat "$f") == 0 ]; then
+ have_all_size_hugepgs=false
+ break
+ fi
+done
+if $have_all_size_hugepgs; then
+ CATEGORY="userfaultfd" run_test ./uffd-wp-mremap
+else
+ echo "# SKIP ./uffd-wp-mremap"
+fi
#cleanup
+for f in "${!nr_size_hugepgs[@]}"; do
+ echo "${nr_size_hugepgs["$f"]}" > "$f"
+done
echo "$nr_hugepgs" > /proc/sys/vm/nr_hugepages
CATEGORY="compaction" run_test ./compaction_test
-if command -v sudo &> /dev/null;
+if command -v sudo &> /dev/null && sudo -u nobody ls ./on-fault-limit >/dev/null;
then
CATEGORY="mlock" run_test sudo -u nobody ./on-fault-limit
else
@@ -381,19 +416,21 @@ CATEGORY="mremap" run_test ./mremap_dontunmap
CATEGORY="hmm" run_test bash ./test_hmm.sh smoke
# MADV_GUARD_INSTALL and MADV_GUARD_REMOVE tests
-CATEGORY="madv_guard" run_test ./guard-pages
+CATEGORY="madv_guard" run_test ./guard-regions
# MADV_POPULATE_READ and MADV_POPULATE_WRITE tests
CATEGORY="madv_populate" run_test ./madv_populate
if [ -x ./memfd_secret ]
then
-(echo 0 | sudo tee /proc/sys/kernel/yama/ptrace_scope 2>&1) | tap_prefix
+(echo 0 > /proc/sys/kernel/yama/ptrace_scope 2>&1) | tap_prefix
CATEGORY="memfd_secret" run_test ./memfd_secret
fi
# KSM KSM_MERGE_TIME_HUGE_PAGES test with size of 100
-CATEGORY="ksm" run_test ./ksm_tests -H -s 100
+if [ "${HAVE_HUGEPAGES}" = "1" ]; then
+ CATEGORY="ksm" run_test ./ksm_tests -H -s 100
+fi
# KSM KSM_MERGE_TIME test with size of 100
CATEGORY="ksm" run_test ./ksm_tests -P -s 100
# KSM MADV_MERGEABLE test with 10 identical pages
@@ -442,15 +479,17 @@ CATEGORY="thp" run_test ./transhuge-stress -d 20
# Try to create XFS if not provided
if [ -z "${SPLIT_HUGE_PAGE_TEST_XFS_PATH}" ]; then
- if test_selected "thp"; then
- if grep xfs /proc/filesystems &>/dev/null; then
- XFS_IMG=$(mktemp /tmp/xfs_img_XXXXXX)
- SPLIT_HUGE_PAGE_TEST_XFS_PATH=$(mktemp -d /tmp/xfs_dir_XXXXXX)
- truncate -s 314572800 ${XFS_IMG}
- mkfs.xfs -q ${XFS_IMG}
- mount -o loop ${XFS_IMG} ${SPLIT_HUGE_PAGE_TEST_XFS_PATH}
- MOUNTED_XFS=1
- fi
+ if [ "${HAVE_HUGEPAGES}" = "1" ]; then
+ if test_selected "thp"; then
+ if grep xfs /proc/filesystems &>/dev/null; then
+ XFS_IMG=$(mktemp /tmp/xfs_img_XXXXXX)
+ SPLIT_HUGE_PAGE_TEST_XFS_PATH=$(mktemp -d /tmp/xfs_dir_XXXXXX)
+ truncate -s 314572800 ${XFS_IMG}
+ mkfs.xfs -q ${XFS_IMG}
+ mount -o loop ${XFS_IMG} ${SPLIT_HUGE_PAGE_TEST_XFS_PATH}
+ MOUNTED_XFS=1
+ fi
+ fi
fi
fi
diff --git a/tools/testing/selftests/mm/split_huge_page_test.c b/tools/testing/selftests/mm/split_huge_page_test.c
index 3f353f3d070f..aa7400ed0e99 100644
--- a/tools/testing/selftests/mm/split_huge_page_test.c
+++ b/tools/testing/selftests/mm/split_huge_page_test.c
@@ -5,6 +5,7 @@
*/
#define _GNU_SOURCE
+#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdarg.h>
@@ -14,6 +15,7 @@
#include <fcntl.h>
#include <sys/mman.h>
#include <sys/mount.h>
+#include <sys/param.h>
#include <malloc.h>
#include <stdbool.h>
#include <time.h>
@@ -261,18 +263,32 @@ void split_pte_mapped_thp(void)
close(kpageflags_fd);
}
-void split_file_backed_thp(void)
+void split_file_backed_thp(int order)
{
int status;
int fd;
- ssize_t num_written;
char tmpfs_template[] = "/tmp/thp_split_XXXXXX";
const char *tmpfs_loc = mkdtemp(tmpfs_template);
char testfile[INPUT_MAX];
+ ssize_t num_written, num_read;
+ char *file_buf1, *file_buf2;
uint64_t pgoff_start = 0, pgoff_end = 1024;
+ int i;
ksft_print_msg("Please enable pr_debug in split_huge_pages_in_file() for more info.\n");
+ file_buf1 = (char *)malloc(pmd_pagesize);
+ file_buf2 = (char *)malloc(pmd_pagesize);
+
+ if (!file_buf1 || !file_buf2) {
+ ksft_print_msg("cannot allocate file buffers\n");
+ goto out;
+ }
+
+ for (i = 0; i < pmd_pagesize; i++)
+ file_buf1[i] = (char)i;
+ memset(file_buf2, 0, pmd_pagesize);
+
status = mount("tmpfs", tmpfs_loc, "tmpfs", 0, "huge=always,size=4m");
if (status)
@@ -281,26 +297,45 @@ void split_file_backed_thp(void)
status = snprintf(testfile, INPUT_MAX, "%s/thp_file", tmpfs_loc);
if (status >= INPUT_MAX) {
ksft_exit_fail_msg("Fail to create file-backed THP split testing file\n");
+ goto cleanup;
}
- fd = open(testfile, O_CREAT|O_WRONLY, 0664);
+ fd = open(testfile, O_CREAT|O_RDWR, 0664);
if (fd == -1) {
ksft_perror("Cannot open testing file");
goto cleanup;
}
- /* write something to the file, so a file-backed THP can be allocated */
- num_written = write(fd, tmpfs_loc, strlen(tmpfs_loc) + 1);
- close(fd);
+ /* write pmd size data to the file, so a file-backed THP can be allocated */
+ num_written = write(fd, file_buf1, pmd_pagesize);
- if (num_written < 1) {
- ksft_perror("Fail to write data to testing file");
- goto cleanup;
+ if (num_written == -1 || num_written != pmd_pagesize) {
+ ksft_perror("Failed to write data to testing file");
+ goto close_file;
}
/* split the file-backed THP */
- write_debugfs(PATH_FMT, testfile, pgoff_start, pgoff_end, 0);
+ write_debugfs(PATH_FMT, testfile, pgoff_start, pgoff_end, order);
+
+ /* check file content after split */
+ status = lseek(fd, 0, SEEK_SET);
+ if (status == -1) {
+ ksft_perror("Cannot lseek file");
+ goto close_file;
+ }
+
+ num_read = read(fd, file_buf2, num_written);
+ if (num_read == -1 || num_read != num_written) {
+ ksft_perror("Cannot read file content back");
+ goto close_file;
+ }
+
+ if (strncmp(file_buf1, file_buf2, pmd_pagesize) != 0) {
+ ksft_print_msg("File content changed\n");
+ goto close_file;
+ }
+ close(fd);
status = unlink(testfile);
if (status) {
ksft_perror("Cannot remove testing file");
@@ -318,12 +353,15 @@ void split_file_backed_thp(void)
ksft_exit_fail_msg("cannot remove tmp dir: %s\n", strerror(errno));
ksft_print_msg("Please check dmesg for more information\n");
- ksft_test_result_pass("File-backed THP split test done\n");
+ ksft_test_result_pass("File-backed THP split to order %d test done\n", order);
return;
+close_file:
+ close(fd);
cleanup:
umount(tmpfs_loc);
rmdir(tmpfs_loc);
+out:
ksft_exit_fail_msg("Error occurred\n");
}
@@ -361,6 +399,7 @@ int create_pagecache_thp_and_fd(const char *testfile, size_t fd_size, int *fd,
{
size_t i;
int dummy = 0;
+ unsigned char buf[1024];
srand(time(NULL));
@@ -368,11 +407,12 @@ int create_pagecache_thp_and_fd(const char *testfile, size_t fd_size, int *fd,
if (*fd == -1)
ksft_exit_fail_msg("Failed to create a file at %s\n", testfile);
- for (i = 0; i < fd_size; i++) {
- unsigned char byte = (unsigned char)i;
+ assert(fd_size % sizeof(buf) == 0);
+ for (i = 0; i < sizeof(buf); i++)
+ buf[i] = (unsigned char)i;
+ for (i = 0; i < fd_size; i += sizeof(buf))
+ write(*fd, buf, sizeof(buf));
- write(*fd, &byte, sizeof(byte));
- }
close(*fd);
sync();
*fd = open("/proc/sys/vm/drop_caches", O_WRONLY);
@@ -420,7 +460,8 @@ err_out_unlink:
return -1;
}
-void split_thp_in_pagecache_to_order(size_t fd_size, int order, const char *fs_loc)
+void split_thp_in_pagecache_to_order_at(size_t fd_size, const char *fs_loc,
+ int order, int offset)
{
int fd;
char *addr;
@@ -438,7 +479,12 @@ void split_thp_in_pagecache_to_order(size_t fd_size, int order, const char *fs_l
return;
err = 0;
- write_debugfs(PID_FMT, getpid(), (uint64_t)addr, (uint64_t)addr + fd_size, order);
+ if (offset == -1)
+ write_debugfs(PID_FMT, getpid(), (uint64_t)addr,
+ (uint64_t)addr + fd_size, order);
+ else
+ write_debugfs(PID_FMT, getpid(), (uint64_t)addr,
+ (uint64_t)addr + fd_size, order, offset);
for (i = 0; i < fd_size; i++)
if (*(addr + i) != (char)i) {
@@ -457,9 +503,15 @@ out:
munmap(addr, fd_size);
close(fd);
unlink(testfile);
- if (err)
- ksft_exit_fail_msg("Split PMD-mapped pagecache folio to order %d failed\n", order);
- ksft_test_result_pass("Split PMD-mapped pagecache folio to order %d passed\n", order);
+ if (offset == -1) {
+ if (err)
+ ksft_exit_fail_msg("Split PMD-mapped pagecache folio to order %d failed\n", order);
+ ksft_test_result_pass("Split PMD-mapped pagecache folio to order %d passed\n", order);
+ } else {
+ if (err)
+ ksft_exit_fail_msg("Split PMD-mapped pagecache folio to order %d at in-folio offset %d failed\n", order, offset);
+ ksft_test_result_pass("Split PMD-mapped pagecache folio to order %d at in-folio offset %d passed\n", order, offset);
+ }
}
int main(int argc, char **argv)
@@ -470,6 +522,7 @@ int main(int argc, char **argv)
char fs_loc_template[] = "/tmp/thp_fs_XXXXXX";
const char *fs_loc;
bool created_tmp;
+ int offset;
ksft_print_header();
@@ -481,7 +534,7 @@ int main(int argc, char **argv)
if (argc > 1)
optional_xfs_path = argv[1];
- ksft_set_plan(1+8+2+9);
+ ksft_set_plan(1+8+1+9+9+8*4+2);
pagesize = getpagesize();
pageshift = ffs(pagesize) - 1;
@@ -498,12 +551,19 @@ int main(int argc, char **argv)
split_pmd_thp_to_order(i);
split_pte_mapped_thp();
- split_file_backed_thp();
+ for (i = 0; i < 9; i++)
+ split_file_backed_thp(i);
created_tmp = prepare_thp_fs(optional_xfs_path, fs_loc_template,
&fs_loc);
for (i = 8; i >= 0; i--)
- split_thp_in_pagecache_to_order(fd_size, i, fs_loc);
+ split_thp_in_pagecache_to_order_at(fd_size, fs_loc, i, -1);
+
+ for (i = 0; i < 9; i++)
+ for (offset = 0;
+ offset < pmd_pagesize / pagesize;
+ offset += MAX(pmd_pagesize / pagesize / 4, 1 << i))
+ split_thp_in_pagecache_to_order_at(fd_size, fs_loc, i, offset);
cleanup_thp_fs(fs_loc, created_tmp);
ksft_finished();
diff --git a/tools/testing/selftests/mm/thuge-gen.c b/tools/testing/selftests/mm/thuge-gen.c
index e4370b79b62f..cd5174d735be 100644
--- a/tools/testing/selftests/mm/thuge-gen.c
+++ b/tools/testing/selftests/mm/thuge-gen.c
@@ -127,7 +127,7 @@ void test_mmap(unsigned long size, unsigned flags)
show(size);
ksft_test_result(size == getpagesize() || (before - after) == NUM_PAGES,
- "%s mmap\n", __func__);
+ "%s mmap %lu\n", __func__, size);
if (munmap(map, size * NUM_PAGES))
ksft_exit_fail_msg("%s: unmap %s\n", __func__, strerror(errno));
@@ -165,7 +165,7 @@ void test_shmget(unsigned long size, unsigned flags)
show(size);
ksft_test_result(size == getpagesize() || (before - after) == NUM_PAGES,
- "%s: mmap\n", __func__);
+ "%s: mmap %lu\n", __func__, size);
if (shmdt(map))
ksft_exit_fail_msg("%s: shmdt: %s\n", __func__, strerror(errno));
}
diff --git a/tools/testing/selftests/mm/uffd-common.c b/tools/testing/selftests/mm/uffd-common.c
index 7ad6ba660c7d..a37088a23ffe 100644
--- a/tools/testing/selftests/mm/uffd-common.c
+++ b/tools/testing/selftests/mm/uffd-common.c
@@ -10,7 +10,7 @@
#define BASE_PMD_ADDR ((void *)(1UL << 30))
volatile bool test_uffdio_copy_eexist = true;
-unsigned long nr_cpus, nr_pages, nr_pages_per_cpu, page_size;
+unsigned long nr_parallel, nr_pages, nr_pages_per_cpu, page_size;
char *area_src, *area_src_alias, *area_dst, *area_dst_alias, *area_remap;
int uffd = -1, uffd_flags, finished, *pipefd, test_type;
bool map_shared;
@@ -269,7 +269,7 @@ void uffd_test_ctx_clear(void)
size_t i;
if (pipefd) {
- for (i = 0; i < nr_cpus * 2; ++i) {
+ for (i = 0; i < nr_parallel * 2; ++i) {
if (close(pipefd[i]))
err("close pipefd");
}
@@ -323,7 +323,7 @@ int uffd_test_ctx_init(uint64_t features, const char **errmsg)
ret = userfaultfd_open(&features);
if (ret) {
if (errmsg)
- *errmsg = "possible lack of priviledge";
+ *errmsg = "possible lack of privilege";
return ret;
}
@@ -348,7 +348,7 @@ int uffd_test_ctx_init(uint64_t features, const char **errmsg)
/*
* After initialization of area_src, we must explicitly release pages
* for area_dst to make sure it's fully empty. Otherwise we could have
- * some area_dst pages be errornously initialized with zero pages,
+ * some area_dst pages be erroneously initialized with zero pages,
* hence we could hit memory corruption later in the test.
*
* One example is when THP is globally enabled, above allocate_area()
@@ -365,10 +365,10 @@ int uffd_test_ctx_init(uint64_t features, const char **errmsg)
*/
uffd_test_ops->release_pages(area_dst);
- pipefd = malloc(sizeof(int) * nr_cpus * 2);
+ pipefd = malloc(sizeof(int) * nr_parallel * 2);
if (!pipefd)
err("pipefd");
- for (cpu = 0; cpu < nr_cpus; cpu++)
+ for (cpu = 0; cpu < nr_parallel; cpu++)
if (pipe2(&pipefd[cpu * 2], O_CLOEXEC | O_NONBLOCK))
err("pipe");
diff --git a/tools/testing/selftests/mm/uffd-common.h b/tools/testing/selftests/mm/uffd-common.h
index a70ae10b5f62..7700cbfa3975 100644
--- a/tools/testing/selftests/mm/uffd-common.h
+++ b/tools/testing/selftests/mm/uffd-common.h
@@ -98,7 +98,7 @@ struct uffd_test_case_ops {
};
typedef struct uffd_test_case_ops uffd_test_case_ops_t;
-extern unsigned long nr_cpus, nr_pages, nr_pages_per_cpu, page_size;
+extern unsigned long nr_parallel, nr_pages, nr_pages_per_cpu, page_size;
extern char *area_src, *area_src_alias, *area_dst, *area_dst_alias, *area_remap;
extern int uffd, uffd_flags, finished, *pipefd, test_type;
extern bool map_shared;
diff --git a/tools/testing/selftests/mm/uffd-stress.c b/tools/testing/selftests/mm/uffd-stress.c
index 944d559ade21..40af7f67c407 100644
--- a/tools/testing/selftests/mm/uffd-stress.c
+++ b/tools/testing/selftests/mm/uffd-stress.c
@@ -180,12 +180,12 @@ static void *background_thread(void *arg)
static int stress(struct uffd_args *args)
{
unsigned long cpu;
- pthread_t locking_threads[nr_cpus];
- pthread_t uffd_threads[nr_cpus];
- pthread_t background_threads[nr_cpus];
+ pthread_t locking_threads[nr_parallel];
+ pthread_t uffd_threads[nr_parallel];
+ pthread_t background_threads[nr_parallel];
finished = 0;
- for (cpu = 0; cpu < nr_cpus; cpu++) {
+ for (cpu = 0; cpu < nr_parallel; cpu++) {
if (pthread_create(&locking_threads[cpu], &attr,
locking_thread, (void *)cpu))
return 1;
@@ -203,7 +203,7 @@ static int stress(struct uffd_args *args)
background_thread, (void *)cpu))
return 1;
}
- for (cpu = 0; cpu < nr_cpus; cpu++)
+ for (cpu = 0; cpu < nr_parallel; cpu++)
if (pthread_join(background_threads[cpu], NULL))
return 1;
@@ -219,11 +219,11 @@ static int stress(struct uffd_args *args)
uffd_test_ops->release_pages(area_src);
finished = 1;
- for (cpu = 0; cpu < nr_cpus; cpu++)
+ for (cpu = 0; cpu < nr_parallel; cpu++)
if (pthread_join(locking_threads[cpu], NULL))
return 1;
- for (cpu = 0; cpu < nr_cpus; cpu++) {
+ for (cpu = 0; cpu < nr_parallel; cpu++) {
char c;
if (bounces & BOUNCE_POLL) {
if (write(pipefd[cpu*2+1], &c, 1) != 1)
@@ -246,11 +246,11 @@ static int userfaultfd_stress(void)
{
void *area;
unsigned long nr;
- struct uffd_args args[nr_cpus];
+ struct uffd_args args[nr_parallel];
uint64_t mem_size = nr_pages * page_size;
int flags = 0;
- memset(args, 0, sizeof(struct uffd_args) * nr_cpus);
+ memset(args, 0, sizeof(struct uffd_args) * nr_parallel);
if (features & UFFD_FEATURE_WP_UNPOPULATED && test_type == TEST_ANON)
flags = UFFD_FEATURE_WP_UNPOPULATED;
@@ -325,7 +325,7 @@ static int userfaultfd_stress(void)
*/
uffd_test_ops->release_pages(area_dst);
- uffd_stats_reset(args, nr_cpus);
+ uffd_stats_reset(args, nr_parallel);
/* bounce pass */
if (stress(args)) {
@@ -359,7 +359,7 @@ static int userfaultfd_stress(void)
swap(area_src_alias, area_dst_alias);
- uffd_stats_report(args, nr_cpus);
+ uffd_stats_report(args, nr_parallel);
}
uffd_test_ctx_clear();
@@ -412,8 +412,8 @@ static void parse_test_type_arg(const char *raw_type)
* feature.
*/
- if (uffd_get_features(&features))
- err("failed to get available features");
+ if (uffd_get_features(&features) && errno == ENOENT)
+ ksft_exit_skip("failed to get available features (%d)\n", errno);
test_uffdio_wp = test_uffdio_wp &&
(features & UFFD_FEATURE_PAGEFAULT_FLAG_WP);
@@ -435,6 +435,7 @@ static void sigalrm(int sig)
int main(int argc, char **argv)
{
+ unsigned long nr_cpus;
size_t bytes;
if (argc < 4)
@@ -454,10 +455,19 @@ int main(int argc, char **argv)
}
nr_cpus = sysconf(_SC_NPROCESSORS_ONLN);
+ if (nr_cpus > 32) {
+ /* Don't let calculation below go to zero. */
+ ksft_print_msg("_SC_NPROCESSORS_ONLN (%lu) too large, capping nr_threads to 32\n",
+ nr_cpus);
+ nr_parallel = 32;
+ } else {
+ nr_parallel = nr_cpus;
+ }
- nr_pages_per_cpu = bytes / page_size / nr_cpus;
+ nr_pages_per_cpu = bytes / page_size / nr_parallel;
if (!nr_pages_per_cpu) {
- _err("invalid MiB");
+ _err("pages_per_cpu = 0, cannot test (%lu / %lu / %lu)",
+ bytes, page_size, nr_parallel);
usage();
}
@@ -466,7 +476,7 @@ int main(int argc, char **argv)
_err("invalid bounces");
usage();
}
- nr_pages = nr_pages_per_cpu * nr_cpus;
+ nr_pages = nr_pages_per_cpu * nr_parallel;
printf("nr_pages: %lu, nr_pages_per_cpu: %lu\n",
nr_pages, nr_pages_per_cpu);
diff --git a/tools/testing/selftests/mm/uffd-unit-tests.c b/tools/testing/selftests/mm/uffd-unit-tests.c
index 74c8bc02b506..e8fd9011c2a3 100644
--- a/tools/testing/selftests/mm/uffd-unit-tests.c
+++ b/tools/testing/selftests/mm/uffd-unit-tests.c
@@ -26,6 +26,8 @@
#define ALIGN_UP(x, align_to) \
((__typeof__(x))((((unsigned long)(x)) + ((align_to)-1)) & ~((align_to)-1)))
+#define MAX(a, b) (((a) > (b)) ? (a) : (b))
+
struct mem_type {
const char *name;
unsigned int mem_flag;
@@ -196,9 +198,10 @@ uffd_setup_environment(uffd_test_args_t *args, uffd_test_case_t *test,
else
page_size = psize();
- nr_pages = UFFD_TEST_MEM_SIZE / page_size;
+ /* Ensure we have at least 2 pages */
+ nr_pages = MAX(UFFD_TEST_MEM_SIZE, page_size * 2) / page_size;
/* TODO: remove this global var.. it's so ugly */
- nr_cpus = 1;
+ nr_parallel = 1;
/* Initialize test arguments */
args->mem_type = mem_type;
diff --git a/tools/testing/selftests/mm/uffd-wp-mremap.c b/tools/testing/selftests/mm/uffd-wp-mremap.c
index 2c4f984bd73c..c2ba7d46c7b4 100644
--- a/tools/testing/selftests/mm/uffd-wp-mremap.c
+++ b/tools/testing/selftests/mm/uffd-wp-mremap.c
@@ -182,7 +182,10 @@ static void test_one_folio(size_t size, bool private, bool swapout, bool hugetlb
/* Register range for uffd-wp. */
if (userfaultfd_open(&features)) {
- ksft_test_result_fail("userfaultfd_open() failed\n");
+ if (errno == ENOENT)
+ ksft_test_result_skip("userfaultfd not available\n");
+ else
+ ksft_test_result_fail("userfaultfd_open() failed\n");
goto out;
}
if (uffd_register(uffd, mem, size, false, true, false)) {
diff --git a/tools/testing/selftests/mm/va_high_addr_switch.sh b/tools/testing/selftests/mm/va_high_addr_switch.sh
index 2c725773cd79..1f92e8caceac 100755
--- a/tools/testing/selftests/mm/va_high_addr_switch.sh
+++ b/tools/testing/selftests/mm/va_high_addr_switch.sh
@@ -41,6 +41,31 @@ check_supported_x86_64()
fi
}
+check_supported_ppc64()
+{
+ local config="/proc/config.gz"
+ [[ -f "${config}" ]] || config="/boot/config-$(uname -r)"
+ [[ -f "${config}" ]] || fail "Cannot find kernel config in /proc or /boot"
+
+ local pg_table_levels=$(gzip -dcfq "${config}" | grep PGTABLE_LEVELS | cut -d'=' -f 2)
+ if [[ "${pg_table_levels}" -lt 5 ]]; then
+ echo "$0: PGTABLE_LEVELS=${pg_table_levels}, must be >= 5 to run this test"
+ exit $ksft_skip
+ fi
+
+ local mmu_support=$(grep -m1 "mmu" /proc/cpuinfo | awk '{print $3}')
+ if [[ "$mmu_support" != "radix" ]]; then
+ echo "$0: System does not use Radix MMU, required for 5-level paging"
+ exit $ksft_skip
+ fi
+
+ local hugepages_total=$(awk '/HugePages_Total/ {print $2}' /proc/meminfo)
+ if [[ "${hugepages_total}" -eq 0 ]]; then
+ echo "$0: HugePages are not enabled, required for some tests"
+ exit $ksft_skip
+ fi
+}
+
check_test_requirements()
{
# The test supports x86_64 and powerpc64. We currently have no useful
@@ -50,6 +75,9 @@ check_test_requirements()
"x86_64")
check_supported_x86_64
;;
+ "ppc64le"|"ppc64")
+ check_supported_ppc64
+ ;;
*)
return 0
;;
diff --git a/tools/testing/selftests/mm/vm_util.h b/tools/testing/selftests/mm/vm_util.h
index b60ac68a9dc8..6effafdc4d8a 100644
--- a/tools/testing/selftests/mm/vm_util.h
+++ b/tools/testing/selftests/mm/vm_util.h
@@ -5,11 +5,13 @@
#include <err.h>
#include <strings.h> /* ffsl() */
#include <unistd.h> /* _SC_PAGESIZE */
+#include "../kselftest.h"
#define BIT_ULL(nr) (1ULL << (nr))
#define PM_SOFT_DIRTY BIT_ULL(55)
#define PM_MMAP_EXCLUSIVE BIT_ULL(56)
#define PM_UFFD_WP BIT_ULL(57)
+#define PM_GUARD_REGION BIT_ULL(58)
#define PM_FILE BIT_ULL(61)
#define PM_SWAP BIT_ULL(62)
#define PM_PRESENT BIT_ULL(63)
@@ -31,6 +33,23 @@ static inline unsigned int pshift(void)
return __page_shift;
}
+/*
+ * Plan 9 FS has bugs (at least on QEMU) where certain operations fail with
+ * ENOENT on unlinked files. See
+ * https://gitlab.com/qemu-project/qemu/-/issues/103 for some info about such
+ * bugs. There are rumours of NFS implementations with similar bugs.
+ *
+ * Ideally, tests should just detect filesystems known to have such issues and
+ * bail early. But 9pfs has the additional "feature" that it causes fstatfs to
+ * pass through the f_type field from the host filesystem. To avoid having to
+ * scrape /proc/mounts or some other hackery, tests can call this function when
+ * it seems such a bug might have been encountered.
+ */
+static inline void skip_test_dodgy_fs(const char *op_name)
+{
+ ksft_test_result_skip("%s failed with ENOENT. Filesystem might be buggy (9pfs?)\n", op_name);
+}
+
uint64_t pagemap_get_entry(int fd, char *start);
bool pagemap_is_softdirty(int fd, char *start);
bool pagemap_is_swapped(int fd, char *start);
diff --git a/tools/testing/selftests/mseal_system_mappings/.gitignore b/tools/testing/selftests/mseal_system_mappings/.gitignore
new file mode 100644
index 000000000000..319c497a595e
--- /dev/null
+++ b/tools/testing/selftests/mseal_system_mappings/.gitignore
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+sysmap_is_sealed
diff --git a/tools/testing/selftests/mseal_system_mappings/Makefile b/tools/testing/selftests/mseal_system_mappings/Makefile
new file mode 100644
index 000000000000..2b4504e2f52f
--- /dev/null
+++ b/tools/testing/selftests/mseal_system_mappings/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0-only
+CFLAGS += -std=c99 -pthread -Wall $(KHDR_INCLUDES)
+
+TEST_GEN_PROGS := sysmap_is_sealed
+
+include ../lib.mk
diff --git a/tools/testing/selftests/mseal_system_mappings/config b/tools/testing/selftests/mseal_system_mappings/config
new file mode 100644
index 000000000000..675cb9f37b86
--- /dev/null
+++ b/tools/testing/selftests/mseal_system_mappings/config
@@ -0,0 +1 @@
+CONFIG_MSEAL_SYSTEM_MAPPINGS=y
diff --git a/tools/testing/selftests/mseal_system_mappings/sysmap_is_sealed.c b/tools/testing/selftests/mseal_system_mappings/sysmap_is_sealed.c
new file mode 100644
index 000000000000..0d2af30c3bf5
--- /dev/null
+++ b/tools/testing/selftests/mseal_system_mappings/sysmap_is_sealed.c
@@ -0,0 +1,119 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * test system mappings are sealed when
+ * KCONFIG_MSEAL_SYSTEM_MAPPINGS=y
+ */
+
+#define _GNU_SOURCE
+#include <stdio.h>
+#include <errno.h>
+#include <unistd.h>
+#include <string.h>
+#include <stdbool.h>
+
+#include "../kselftest.h"
+#include "../kselftest_harness.h"
+
+#define VMFLAGS "VmFlags:"
+#define MSEAL_FLAGS "sl"
+#define MAX_LINE_LEN 512
+
+bool has_mapping(char *name, FILE *maps)
+{
+ char line[MAX_LINE_LEN];
+
+ while (fgets(line, sizeof(line), maps)) {
+ if (strstr(line, name))
+ return true;
+ }
+
+ return false;
+}
+
+bool mapping_is_sealed(char *name, FILE *maps)
+{
+ char line[MAX_LINE_LEN];
+
+ while (fgets(line, sizeof(line), maps)) {
+ if (!strncmp(line, VMFLAGS, strlen(VMFLAGS))) {
+ if (strstr(line, MSEAL_FLAGS))
+ return true;
+
+ return false;
+ }
+ }
+
+ return false;
+}
+
+FIXTURE(basic) {
+ FILE *maps;
+};
+
+FIXTURE_SETUP(basic)
+{
+ self->maps = fopen("/proc/self/smaps", "r");
+ if (!self->maps)
+ SKIP(return, "Could not open /proc/self/smap, errno=%d",
+ errno);
+};
+
+FIXTURE_TEARDOWN(basic)
+{
+ if (self->maps)
+ fclose(self->maps);
+};
+
+FIXTURE_VARIANT(basic)
+{
+ char *name;
+ bool sealed;
+};
+
+FIXTURE_VARIANT_ADD(basic, vdso) {
+ .name = "[vdso]",
+ .sealed = true,
+};
+
+FIXTURE_VARIANT_ADD(basic, vvar) {
+ .name = "[vvar]",
+ .sealed = true,
+};
+
+FIXTURE_VARIANT_ADD(basic, vvar_vclock) {
+ .name = "[vvar_vclock]",
+ .sealed = true,
+};
+
+FIXTURE_VARIANT_ADD(basic, sigpage) {
+ .name = "[sigpage]",
+ .sealed = true,
+};
+
+FIXTURE_VARIANT_ADD(basic, vectors) {
+ .name = "[vectors]",
+ .sealed = true,
+};
+
+FIXTURE_VARIANT_ADD(basic, uprobes) {
+ .name = "[uprobes]",
+ .sealed = true,
+};
+
+FIXTURE_VARIANT_ADD(basic, stack) {
+ .name = "[stack]",
+ .sealed = false,
+};
+
+TEST_F(basic, check_sealed)
+{
+ if (!has_mapping(variant->name, self->maps)) {
+ SKIP(return, "could not find the mapping, %s",
+ variant->name);
+ }
+
+ EXPECT_EQ(variant->sealed,
+ mapping_is_sealed(variant->name, self->maps));
+};
+
+TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/net/mptcp/.gitignore b/tools/testing/selftests/net/mptcp/.gitignore
index 49daae73c41e..833279fb34e2 100644
--- a/tools/testing/selftests/net/mptcp/.gitignore
+++ b/tools/testing/selftests/net/mptcp/.gitignore
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
mptcp_connect
+mptcp_diag
mptcp_inq
mptcp_sockopt
pm_nl_ctl
diff --git a/tools/testing/selftests/net/mptcp/mptcp_connect.c b/tools/testing/selftests/net/mptcp/mptcp_connect.c
index d240d02fa443..c83a8b47bbdf 100644
--- a/tools/testing/selftests/net/mptcp/mptcp_connect.c
+++ b/tools/testing/selftests/net/mptcp/mptcp_connect.c
@@ -1270,7 +1270,7 @@ int main_loop(void)
if (cfg_input && cfg_sockopt_types.mptfo) {
fd_in = open(cfg_input, O_RDONLY);
- if (fd < 0)
+ if (fd_in < 0)
xerror("can't open %s:%d", cfg_input, errno);
}
@@ -1293,13 +1293,13 @@ again:
if (cfg_input && !cfg_sockopt_types.mptfo) {
fd_in = open(cfg_input, O_RDONLY);
- if (fd < 0)
+ if (fd_in < 0)
xerror("can't open %s:%d", cfg_input, errno);
}
ret = copyfd_io(fd_in, fd, 1, 0, &winfo);
if (ret)
- return ret;
+ goto out;
if (cfg_truncate > 0) {
shutdown(fd, SHUT_WR);
@@ -1320,7 +1320,10 @@ again:
close(fd);
}
- return 0;
+out:
+ if (cfg_input)
+ close(fd_in);
+ return ret;
}
int parse_proto(const char *proto)
diff --git a/tools/testing/selftests/net/udpgro_bench.sh b/tools/testing/selftests/net/udpgro_bench.sh
index c51ea90a1395..815fad8c53a8 100755
--- a/tools/testing/selftests/net/udpgro_bench.sh
+++ b/tools/testing/selftests/net/udpgro_bench.sh
@@ -7,7 +7,7 @@ source net_helper.sh
readonly PEER_NS="ns-peer-$(mktemp -u XXXXXX)"
-BPF_FILE="xdp_dummy.bpf.o"
+BPF_FILE="lib/xdp_dummy.bpf.o"
cleanup() {
local -r jobs="$(jobs -p)"
diff --git a/tools/testing/selftests/net/udpgro_frglist.sh b/tools/testing/selftests/net/udpgro_frglist.sh
index 17404f49cdb6..5f3d1a110d11 100755
--- a/tools/testing/selftests/net/udpgro_frglist.sh
+++ b/tools/testing/selftests/net/udpgro_frglist.sh
@@ -7,7 +7,7 @@ source net_helper.sh
readonly PEER_NS="ns-peer-$(mktemp -u XXXXXX)"
-BPF_FILE="xdp_dummy.bpf.o"
+BPF_FILE="lib/xdp_dummy.bpf.o"
cleanup() {
local -r jobs="$(jobs -p)"
diff --git a/tools/testing/selftests/net/udpgro_fwd.sh b/tools/testing/selftests/net/udpgro_fwd.sh
index 550d8eb3e224..f22f6c66997e 100755
--- a/tools/testing/selftests/net/udpgro_fwd.sh
+++ b/tools/testing/selftests/net/udpgro_fwd.sh
@@ -3,7 +3,7 @@
source net_helper.sh
-BPF_FILE="xdp_dummy.bpf.o"
+BPF_FILE="lib/xdp_dummy.bpf.o"
readonly BASE="ns-$(mktemp -u XXXXXX)"
readonly SRC=2
readonly DST=1
diff --git a/tools/testing/selftests/net/veth.sh b/tools/testing/selftests/net/veth.sh
index 6bb7dfaa30b6..9709dd067c72 100755
--- a/tools/testing/selftests/net/veth.sh
+++ b/tools/testing/selftests/net/veth.sh
@@ -1,7 +1,7 @@
#!/bin/sh
# SPDX-License-Identifier: GPL-2.0
-BPF_FILE="xdp_dummy.bpf.o"
+BPF_FILE="lib/xdp_dummy.bpf.o"
readonly STATS="$(mktemp -p /tmp ns-XXXXXX)"
readonly BASE=`basename $STATS`
readonly SRC=2
diff --git a/tools/testing/selftests/net/xdp_dummy.bpf.c b/tools/testing/selftests/net/xdp_dummy.bpf.c
deleted file mode 100644
index d988b2e0cee8..000000000000
--- a/tools/testing/selftests/net/xdp_dummy.bpf.c
+++ /dev/null
@@ -1,13 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-
-#define KBUILD_MODNAME "xdp_dummy"
-#include <linux/bpf.h>
-#include <bpf/bpf_helpers.h>
-
-SEC("xdp")
-int xdp_dummy_prog(struct xdp_md *ctx)
-{
- return XDP_PASS;
-}
-
-char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/pidfd/pidfd.h b/tools/testing/selftests/pidfd/pidfd.h
index cec22aa11cdf..55bcf81a2b9a 100644
--- a/tools/testing/selftests/pidfd/pidfd.h
+++ b/tools/testing/selftests/pidfd/pidfd.h
@@ -32,19 +32,19 @@
#endif
#ifndef __NR_pidfd_open
-#define __NR_pidfd_open -1
+#define __NR_pidfd_open 434
#endif
#ifndef __NR_pidfd_send_signal
-#define __NR_pidfd_send_signal -1
+#define __NR_pidfd_send_signal 424
#endif
#ifndef __NR_clone3
-#define __NR_clone3 -1
+#define __NR_clone3 435
#endif
#ifndef __NR_pidfd_getfd
-#define __NR_pidfd_getfd -1
+#define __NR_pidfd_getfd 438
#endif
#ifndef PIDFD_NONBLOCK
diff --git a/tools/testing/selftests/rtc/.gitignore b/tools/testing/selftests/rtc/.gitignore
index fb2d533aa575..a2afe7994e85 100644
--- a/tools/testing/selftests/rtc/.gitignore
+++ b/tools/testing/selftests/rtc/.gitignore
@@ -1,3 +1,2 @@
# SPDX-License-Identifier: GPL-2.0-only
rtctest
-setdate
diff --git a/tools/testing/selftests/rtc/Makefile b/tools/testing/selftests/rtc/Makefile
index 9dbb395c5c79..547c244a2ca5 100644
--- a/tools/testing/selftests/rtc/Makefile
+++ b/tools/testing/selftests/rtc/Makefile
@@ -4,8 +4,6 @@ LDLIBS += -lrt -lpthread -lm
TEST_GEN_PROGS = rtctest
-TEST_GEN_PROGS_EXTENDED = setdate
-
TEST_FILES := settings
include ../lib.mk
diff --git a/tools/testing/selftests/rtc/rtctest.c b/tools/testing/selftests/rtc/rtctest.c
index e103097d0b5b..be175c0e6ae3 100644
--- a/tools/testing/selftests/rtc/rtctest.c
+++ b/tools/testing/selftests/rtc/rtctest.c
@@ -29,6 +29,7 @@ enum rtc_alarm_state {
RTC_ALARM_UNKNOWN,
RTC_ALARM_ENABLED,
RTC_ALARM_DISABLED,
+ RTC_ALARM_RES_MINUTE,
};
FIXTURE(rtc) {
@@ -88,7 +89,7 @@ static void nanosleep_with_retries(long ns)
}
}
-static enum rtc_alarm_state get_rtc_alarm_state(int fd)
+static enum rtc_alarm_state get_rtc_alarm_state(int fd, int need_seconds)
{
struct rtc_param param = { 0 };
int rc;
@@ -103,6 +104,10 @@ static enum rtc_alarm_state get_rtc_alarm_state(int fd)
if ((param.uvalue & _BITUL(RTC_FEATURE_ALARM)) == 0)
return RTC_ALARM_DISABLED;
+ /* Check if alarm has desired granularity */
+ if (need_seconds && (param.uvalue & _BITUL(RTC_FEATURE_ALARM_RES_MINUTE)))
+ return RTC_ALARM_RES_MINUTE;
+
return RTC_ALARM_ENABLED;
}
@@ -227,9 +232,11 @@ TEST_F(rtc, alarm_alm_set) {
SKIP(return, "Skipping test since %s does not exist", rtc_file);
ASSERT_NE(-1, self->fd);
- alarm_state = get_rtc_alarm_state(self->fd);
+ alarm_state = get_rtc_alarm_state(self->fd, 1);
if (alarm_state == RTC_ALARM_DISABLED)
SKIP(return, "Skipping test since alarms are not supported.");
+ if (alarm_state == RTC_ALARM_RES_MINUTE)
+ SKIP(return, "Skipping test since alarms has only minute granularity.");
rc = ioctl(self->fd, RTC_RD_TIME, &tm);
ASSERT_NE(-1, rc);
@@ -295,9 +302,11 @@ TEST_F(rtc, alarm_wkalm_set) {
SKIP(return, "Skipping test since %s does not exist", rtc_file);
ASSERT_NE(-1, self->fd);
- alarm_state = get_rtc_alarm_state(self->fd);
+ alarm_state = get_rtc_alarm_state(self->fd, 1);
if (alarm_state == RTC_ALARM_DISABLED)
SKIP(return, "Skipping test since alarms are not supported.");
+ if (alarm_state == RTC_ALARM_RES_MINUTE)
+ SKIP(return, "Skipping test since alarms has only minute granularity.");
rc = ioctl(self->fd, RTC_RD_TIME, &alarm.time);
ASSERT_NE(-1, rc);
@@ -357,7 +366,7 @@ TEST_F_TIMEOUT(rtc, alarm_alm_set_minute, 65) {
SKIP(return, "Skipping test since %s does not exist", rtc_file);
ASSERT_NE(-1, self->fd);
- alarm_state = get_rtc_alarm_state(self->fd);
+ alarm_state = get_rtc_alarm_state(self->fd, 0);
if (alarm_state == RTC_ALARM_DISABLED)
SKIP(return, "Skipping test since alarms are not supported.");
@@ -425,7 +434,7 @@ TEST_F_TIMEOUT(rtc, alarm_wkalm_set_minute, 65) {
SKIP(return, "Skipping test since %s does not exist", rtc_file);
ASSERT_NE(-1, self->fd);
- alarm_state = get_rtc_alarm_state(self->fd);
+ alarm_state = get_rtc_alarm_state(self->fd, 0);
if (alarm_state == RTC_ALARM_DISABLED)
SKIP(return, "Skipping test since alarms are not supported.");
diff --git a/tools/testing/selftests/rtc/setdate.c b/tools/testing/selftests/rtc/setdate.c
deleted file mode 100644
index b303890b3de2..000000000000
--- a/tools/testing/selftests/rtc/setdate.c
+++ /dev/null
@@ -1,77 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/* Real Time Clock Driver Test
- * by: Benjamin Gaignard (benjamin.gaignard@linaro.org)
- *
- * To build
- * gcc rtctest_setdate.c -o rtctest_setdate
- */
-
-#include <stdio.h>
-#include <linux/rtc.h>
-#include <sys/ioctl.h>
-#include <sys/time.h>
-#include <sys/types.h>
-#include <fcntl.h>
-#include <unistd.h>
-#include <stdlib.h>
-#include <errno.h>
-
-static const char default_time[] = "00:00:00";
-
-int main(int argc, char **argv)
-{
- int fd, retval;
- struct rtc_time new, current;
- const char *rtc, *date;
- const char *time = default_time;
-
- switch (argc) {
- case 4:
- time = argv[3];
- /* FALLTHROUGH */
- case 3:
- date = argv[2];
- rtc = argv[1];
- break;
- default:
- fprintf(stderr, "usage: rtctest_setdate <rtcdev> <DD-MM-YYYY> [HH:MM:SS]\n");
- return 1;
- }
-
- fd = open(rtc, O_RDONLY);
- if (fd == -1) {
- perror(rtc);
- exit(errno);
- }
-
- sscanf(date, "%d-%d-%d", &new.tm_mday, &new.tm_mon, &new.tm_year);
- new.tm_mon -= 1;
- new.tm_year -= 1900;
- sscanf(time, "%d:%d:%d", &new.tm_hour, &new.tm_min, &new.tm_sec);
-
- fprintf(stderr, "Test will set RTC date/time to %d-%d-%d, %02d:%02d:%02d.\n",
- new.tm_mday, new.tm_mon + 1, new.tm_year + 1900,
- new.tm_hour, new.tm_min, new.tm_sec);
-
- /* Write the new date in RTC */
- retval = ioctl(fd, RTC_SET_TIME, &new);
- if (retval == -1) {
- perror("RTC_SET_TIME ioctl");
- close(fd);
- exit(errno);
- }
-
- /* Read back */
- retval = ioctl(fd, RTC_RD_TIME, &current);
- if (retval == -1) {
- perror("RTC_RD_TIME ioctl");
- exit(errno);
- }
-
- fprintf(stderr, "\n\nCurrent RTC date/time is %d-%d-%d, %02d:%02d:%02d.\n",
- current.tm_mday, current.tm_mon + 1, current.tm_year + 1900,
- current.tm_hour, current.tm_min, current.tm_sec);
-
- close(fd);
- return 0;
-}
diff --git a/tools/testing/selftests/ublk/Makefile b/tools/testing/selftests/ublk/Makefile
index 7817afe29005..c7781efea0f3 100644
--- a/tools/testing/selftests/ublk/Makefile
+++ b/tools/testing/selftests/ublk/Makefile
@@ -4,6 +4,8 @@ CFLAGS += -O3 -Wl,-no-as-needed -Wall -I $(top_srcdir)
LDLIBS += -lpthread -lm -luring
TEST_PROGS := test_generic_01.sh
+TEST_PROGS += test_generic_02.sh
+TEST_PROGS += test_generic_03.sh
TEST_PROGS += test_null_01.sh
TEST_PROGS += test_null_02.sh
@@ -11,8 +13,11 @@ TEST_PROGS += test_loop_01.sh
TEST_PROGS += test_loop_02.sh
TEST_PROGS += test_loop_03.sh
TEST_PROGS += test_loop_04.sh
+TEST_PROGS += test_loop_05.sh
TEST_PROGS += test_stripe_01.sh
TEST_PROGS += test_stripe_02.sh
+TEST_PROGS += test_stripe_03.sh
+TEST_PROGS += test_stripe_04.sh
TEST_PROGS += test_stress_01.sh
TEST_PROGS += test_stress_02.sh
diff --git a/tools/testing/selftests/ublk/kublk.c b/tools/testing/selftests/ublk/kublk.c
index 05147b53c361..91c282bc7674 100644
--- a/tools/testing/selftests/ublk/kublk.c
+++ b/tools/testing/selftests/ublk/kublk.c
@@ -99,7 +99,7 @@ static int __ublk_ctrl_cmd(struct ublk_dev *dev,
static int ublk_ctrl_stop_dev(struct ublk_dev *dev)
{
struct ublk_ctrl_cmd_data data = {
- .cmd_op = UBLK_CMD_STOP_DEV,
+ .cmd_op = UBLK_U_CMD_STOP_DEV,
};
return __ublk_ctrl_cmd(dev, &data);
@@ -169,7 +169,7 @@ static int ublk_ctrl_get_params(struct ublk_dev *dev,
struct ublk_params *params)
{
struct ublk_ctrl_cmd_data data = {
- .cmd_op = UBLK_CMD_GET_PARAMS,
+ .cmd_op = UBLK_U_CMD_GET_PARAMS,
.flags = CTRL_CMD_HAS_BUF,
.addr = (__u64)params,
.len = sizeof(*params),
@@ -215,7 +215,7 @@ static void ublk_ctrl_dump(struct ublk_dev *dev)
ret = ublk_ctrl_get_params(dev, &p);
if (ret < 0) {
- ublk_err("failed to get params %m\n");
+ ublk_err("failed to get params %d %s\n", ret, strerror(-ret));
return;
}
@@ -322,7 +322,7 @@ static int ublk_queue_init(struct ublk_queue *q)
cmd_buf_size = ublk_queue_cmd_buf_sz(q);
off = UBLKSRV_CMD_BUF_OFFSET + q->q_id * ublk_queue_max_cmd_buf_sz();
- q->io_cmd_buf = (char *)mmap(0, cmd_buf_size, PROT_READ,
+ q->io_cmd_buf = mmap(0, cmd_buf_size, PROT_READ,
MAP_SHARED | MAP_POPULATE, dev->fds[0], off);
if (q->io_cmd_buf == MAP_FAILED) {
ublk_err("ublk dev %d queue %d map io_cmd_buf failed %m\n",
diff --git a/tools/testing/selftests/ublk/kublk.h b/tools/testing/selftests/ublk/kublk.h
index f31a5c4d4143..760ff8ffb810 100644
--- a/tools/testing/selftests/ublk/kublk.h
+++ b/tools/testing/selftests/ublk/kublk.h
@@ -128,7 +128,7 @@ struct ublk_queue {
unsigned int io_inflight;
struct ublk_dev *dev;
const struct ublk_tgt_ops *tgt_ops;
- char *io_cmd_buf;
+ struct ublksrv_io_desc *io_cmd_buf;
struct io_uring ring;
struct ublk_io ios[UBLK_QUEUE_DEPTH];
#define UBLKSRV_QUEUE_STOPPING (1U << 0)
@@ -302,7 +302,7 @@ static inline void ublk_mark_io_done(struct ublk_io *io, int res)
static inline const struct ublksrv_io_desc *ublk_get_iod(const struct ublk_queue *q, int tag)
{
- return (struct ublksrv_io_desc *)&(q->io_cmd_buf[tag * sizeof(struct ublksrv_io_desc)]);
+ return &q->io_cmd_buf[tag];
}
static inline void ublk_set_sqe_cmd_op(struct io_uring_sqe *sqe, __u32 cmd_op)
diff --git a/tools/testing/selftests/ublk/null.c b/tools/testing/selftests/ublk/null.c
index 899875ff50fe..91fec3690d4b 100644
--- a/tools/testing/selftests/ublk/null.c
+++ b/tools/testing/selftests/ublk/null.c
@@ -17,7 +17,8 @@ static int ublk_null_tgt_init(const struct dev_ctx *ctx, struct ublk_dev *dev)
dev->tgt.dev_size = dev_size;
dev->tgt.params = (struct ublk_params) {
- .types = UBLK_PARAM_TYPE_BASIC,
+ .types = UBLK_PARAM_TYPE_BASIC | UBLK_PARAM_TYPE_DMA_ALIGN |
+ UBLK_PARAM_TYPE_SEGMENT,
.basic = {
.logical_bs_shift = 9,
.physical_bs_shift = 12,
@@ -26,6 +27,14 @@ static int ublk_null_tgt_init(const struct dev_ctx *ctx, struct ublk_dev *dev)
.max_sectors = info->max_io_buf_bytes >> 9,
.dev_sectors = dev_size >> 9,
},
+ .dma = {
+ .alignment = 4095,
+ },
+ .seg = {
+ .seg_boundary_mask = 4095,
+ .max_segment_size = 32 << 10,
+ .max_segments = 32,
+ },
};
if (info->flags & UBLK_F_SUPPORT_ZERO_COPY)
diff --git a/tools/testing/selftests/ublk/stripe.c b/tools/testing/selftests/ublk/stripe.c
index 98c564b12f3c..179731c3dd6f 100644
--- a/tools/testing/selftests/ublk/stripe.c
+++ b/tools/testing/selftests/ublk/stripe.c
@@ -111,43 +111,67 @@ static void calculate_stripe_array(const struct stripe_conf *conf,
}
}
-static inline enum io_uring_op stripe_to_uring_op(const struct ublksrv_io_desc *iod)
+static inline enum io_uring_op stripe_to_uring_op(
+ const struct ublksrv_io_desc *iod, int zc)
{
unsigned ublk_op = ublksrv_get_op(iod);
if (ublk_op == UBLK_IO_OP_READ)
- return IORING_OP_READV;
+ return zc ? IORING_OP_READV_FIXED : IORING_OP_READV;
else if (ublk_op == UBLK_IO_OP_WRITE)
- return IORING_OP_WRITEV;
+ return zc ? IORING_OP_WRITEV_FIXED : IORING_OP_WRITEV;
assert(0);
}
static int stripe_queue_tgt_rw_io(struct ublk_queue *q, const struct ublksrv_io_desc *iod, int tag)
{
const struct stripe_conf *conf = get_chunk_shift(q);
- enum io_uring_op op = stripe_to_uring_op(iod);
+ int zc = !!(ublk_queue_use_zc(q) != 0);
+ enum io_uring_op op = stripe_to_uring_op(iod, zc);
struct io_uring_sqe *sqe[NR_STRIPE];
struct stripe_array *s = alloc_stripe_array(conf, iod);
struct ublk_io *io = ublk_get_io(q, tag);
- int i;
+ int i, extra = zc ? 2 : 0;
io->private_data = s;
calculate_stripe_array(conf, iod, s);
- ublk_queue_alloc_sqes(q, sqe, s->nr);
- for (i = 0; i < s->nr; i++) {
- struct stripe *t = &s->s[i];
+ ublk_queue_alloc_sqes(q, sqe, s->nr + extra);
+
+ if (zc) {
+ io_uring_prep_buf_register(sqe[0], 0, tag, q->q_id, tag);
+ sqe[0]->flags |= IOSQE_CQE_SKIP_SUCCESS | IOSQE_IO_HARDLINK;
+ sqe[0]->user_data = build_user_data(tag,
+ ublk_cmd_op_nr(sqe[0]->cmd_op), 0, 1);
+ }
+
+ for (i = zc; i < s->nr + extra - zc; i++) {
+ struct stripe *t = &s->s[i - zc];
io_uring_prep_rw(op, sqe[i],
t->seq + 1,
(void *)t->vec,
t->nr_vec,
t->start << 9);
- io_uring_sqe_set_flags(sqe[i], IOSQE_FIXED_FILE);
+ if (zc) {
+ sqe[i]->buf_index = tag;
+ io_uring_sqe_set_flags(sqe[i],
+ IOSQE_FIXED_FILE | IOSQE_IO_HARDLINK);
+ } else {
+ io_uring_sqe_set_flags(sqe[i], IOSQE_FIXED_FILE);
+ }
/* bit63 marks us as tgt io */
- sqe[i]->user_data = build_user_data(tag, ublksrv_get_op(iod), i, 1);
+ sqe[i]->user_data = build_user_data(tag, ublksrv_get_op(iod), i - zc, 1);
+ }
+ if (zc) {
+ struct io_uring_sqe *unreg = sqe[s->nr + 1];
+
+ io_uring_prep_buf_unregister(unreg, 0, tag, q->q_id, tag);
+ unreg->user_data = build_user_data(tag, ublk_cmd_op_nr(unreg->cmd_op), 0, 1);
}
- return s->nr;
+
+ /* register buffer is skip_success */
+ return s->nr + zc;
}
static int handle_flush(struct ublk_queue *q, const struct ublksrv_io_desc *iod, int tag)
@@ -208,19 +232,27 @@ static void ublk_stripe_io_done(struct ublk_queue *q, int tag,
struct ublk_io *io = ublk_get_io(q, tag);
int res = cqe->res;
- if (res < 0) {
+ if (res < 0 || op != ublk_cmd_op_nr(UBLK_U_IO_UNREGISTER_IO_BUF)) {
if (!io->result)
io->result = res;
- ublk_err("%s: io failure %d tag %u\n", __func__, res, tag);
+ if (res < 0)
+ ublk_err("%s: io failure %d tag %u\n", __func__, res, tag);
}
+ /* buffer register op is IOSQE_CQE_SKIP_SUCCESS */
+ if (op == ublk_cmd_op_nr(UBLK_U_IO_REGISTER_IO_BUF))
+ io->tgt_ios += 1;
+
/* fail short READ/WRITE simply */
if (op == UBLK_IO_OP_READ || op == UBLK_IO_OP_WRITE) {
unsigned seq = user_data_to_tgt_data(cqe->user_data);
struct stripe_array *s = io->private_data;
- if (res < s->s[seq].vec->iov_len)
+ if (res < s->s[seq].nr_sects << 9) {
io->result = -EIO;
+ ublk_err("%s: short rw op %u res %d exp %u tag %u\n",
+ __func__, op, res, s->s[seq].vec->iov_len, tag);
+ }
}
if (ublk_completed_tgt_io(q, tag)) {
@@ -253,7 +285,7 @@ static int ublk_stripe_tgt_init(const struct dev_ctx *ctx, struct ublk_dev *dev)
struct stripe_conf *conf;
unsigned chunk_shift;
loff_t bytes = 0;
- int ret, i;
+ int ret, i, mul = 1;
if ((chunk_size & (chunk_size - 1)) || !chunk_size) {
ublk_err("invalid chunk size %u\n", chunk_size);
@@ -295,8 +327,11 @@ static int ublk_stripe_tgt_init(const struct dev_ctx *ctx, struct ublk_dev *dev)
dev->tgt.dev_size = bytes;
p.basic.dev_sectors = bytes >> 9;
dev->tgt.params = p;
- dev->tgt.sq_depth = dev->dev_info.queue_depth * conf->nr_files;
- dev->tgt.cq_depth = dev->dev_info.queue_depth * conf->nr_files;
+
+ if (dev->dev_info.flags & UBLK_F_SUPPORT_ZERO_COPY)
+ mul = 2;
+ dev->tgt.sq_depth = mul * dev->dev_info.queue_depth * conf->nr_files;
+ dev->tgt.cq_depth = mul * dev->dev_info.queue_depth * conf->nr_files;
printf("%s: shift %u files %u\n", __func__, conf->shift, conf->nr_files);
diff --git a/tools/testing/selftests/ublk/test_common.sh b/tools/testing/selftests/ublk/test_common.sh
index 75f54ac6b1c4..a88b35943227 100755
--- a/tools/testing/selftests/ublk/test_common.sh
+++ b/tools/testing/selftests/ublk/test_common.sh
@@ -23,6 +23,12 @@ _get_disk_dev_t() {
echo $(( (major & 0xfff) << 20 | (minor & 0xfffff) ))
}
+_run_fio_verify_io() {
+ fio --name=verify --rw=randwrite --direct=1 --ioengine=libaio \
+ --bs=8k --iodepth=32 --verify=crc32c --do_verify=1 \
+ --verify_state_save=0 "$@" > /dev/null
+}
+
_create_backfile() {
local my_size=$1
local my_file
diff --git a/tools/testing/selftests/ublk/test_generic_02.sh b/tools/testing/selftests/ublk/test_generic_02.sh
new file mode 100755
index 000000000000..3e80121e3bf5
--- /dev/null
+++ b/tools/testing/selftests/ublk/test_generic_02.sh
@@ -0,0 +1,44 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+. "$(cd "$(dirname "$0")" && pwd)"/test_common.sh
+
+TID="generic_02"
+ERR_CODE=0
+
+if ! _have_program bpftrace; then
+ exit "$UBLK_SKIP_CODE"
+fi
+
+_prep_test "null" "sequential io order for MQ"
+
+dev_id=$(_add_ublk_dev -t null -q 2)
+_check_add_dev $TID $?
+
+dev_t=$(_get_disk_dev_t "$dev_id")
+bpftrace trace/seq_io.bt "$dev_t" "W" 1 > "$UBLK_TMP" 2>&1 &
+btrace_pid=$!
+sleep 2
+
+if ! kill -0 "$btrace_pid" > /dev/null 2>&1; then
+ _cleanup_test "null"
+ exit "$UBLK_SKIP_CODE"
+fi
+
+# run fio over this ublk disk
+fio --name=write_seq \
+ --filename=/dev/ublkb"${dev_id}" \
+ --ioengine=libaio --iodepth=16 \
+ --rw=write \
+ --size=512M \
+ --direct=1 \
+ --bs=4k > /dev/null 2>&1
+ERR_CODE=$?
+kill "$btrace_pid"
+wait
+if grep -q "io_out_of_order" "$UBLK_TMP"; then
+ cat "$UBLK_TMP"
+ ERR_CODE=255
+fi
+_cleanup_test "null"
+_show_result $TID $ERR_CODE
diff --git a/tools/testing/selftests/ublk/test_generic_03.sh b/tools/testing/selftests/ublk/test_generic_03.sh
new file mode 100755
index 000000000000..b551aa76cb0d
--- /dev/null
+++ b/tools/testing/selftests/ublk/test_generic_03.sh
@@ -0,0 +1,28 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+. "$(cd "$(dirname "$0")" && pwd)"/test_common.sh
+
+TID="generic_03"
+ERR_CODE=0
+
+_prep_test "null" "check dma & segment limits for zero copy"
+
+dev_id=$(_add_ublk_dev -t null -z)
+_check_add_dev $TID $?
+
+sysfs_path=/sys/block/ublkb"${dev_id}"
+dma_align=$(cat "$sysfs_path"/queue/dma_alignment)
+max_segments=$(cat "$sysfs_path"/queue/max_segments)
+max_segment_size=$(cat "$sysfs_path"/queue/max_segment_size)
+if [ "$dma_align" != "4095" ]; then
+ ERR_CODE=255
+fi
+if [ "$max_segments" != "32" ]; then
+ ERR_CODE=255
+fi
+if [ "$max_segment_size" != "32768" ]; then
+ ERR_CODE=255
+fi
+_cleanup_test "null"
+_show_result $TID $ERR_CODE
diff --git a/tools/testing/selftests/ublk/test_loop_01.sh b/tools/testing/selftests/ublk/test_loop_01.sh
index c882d2a08e13..1ef8b6044777 100755
--- a/tools/testing/selftests/ublk/test_loop_01.sh
+++ b/tools/testing/selftests/ublk/test_loop_01.sh
@@ -6,6 +6,10 @@
TID="loop_01"
ERR_CODE=0
+if ! _have_program fio; then
+ exit "$UBLK_SKIP_CODE"
+fi
+
_prep_test "loop" "write and verify test"
backfile_0=$(_create_backfile 256M)
@@ -14,15 +18,7 @@ dev_id=$(_add_ublk_dev -t loop "$backfile_0")
_check_add_dev $TID $? "${backfile_0}"
# run fio over the ublk disk
-fio --name=write_and_verify \
- --filename=/dev/ublkb"${dev_id}" \
- --ioengine=libaio --iodepth=16 \
- --rw=write \
- --size=256M \
- --direct=1 \
- --verify=crc32c \
- --do_verify=1 \
- --bs=4k > /dev/null 2>&1
+_run_fio_verify_io --filename=/dev/ublkb"${dev_id}" --size=256M
ERR_CODE=$?
_cleanup_test "loop"
diff --git a/tools/testing/selftests/ublk/test_loop_03.sh b/tools/testing/selftests/ublk/test_loop_03.sh
index 269c96787d7d..e9ca744de8b1 100755
--- a/tools/testing/selftests/ublk/test_loop_03.sh
+++ b/tools/testing/selftests/ublk/test_loop_03.sh
@@ -6,6 +6,10 @@
TID="loop_03"
ERR_CODE=0
+if ! _have_program fio; then
+ exit "$UBLK_SKIP_CODE"
+fi
+
_prep_test "loop" "write and verify over zero copy"
backfile_0=$(_create_backfile 256M)
@@ -13,15 +17,7 @@ dev_id=$(_add_ublk_dev -t loop -z "$backfile_0")
_check_add_dev $TID $? "$backfile_0"
# run fio over the ublk disk
-fio --name=write_and_verify \
- --filename=/dev/ublkb"${dev_id}" \
- --ioengine=libaio --iodepth=64 \
- --rw=write \
- --size=256M \
- --direct=1 \
- --verify=crc32c \
- --do_verify=1 \
- --bs=4k > /dev/null 2>&1
+_run_fio_verify_io --filename=/dev/ublkb"${dev_id}" --size=256M
ERR_CODE=$?
_cleanup_test "loop"
diff --git a/tools/testing/selftests/ublk/test_loop_05.sh b/tools/testing/selftests/ublk/test_loop_05.sh
new file mode 100755
index 000000000000..2e6e2e6978fc
--- /dev/null
+++ b/tools/testing/selftests/ublk/test_loop_05.sh
@@ -0,0 +1,28 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+. "$(cd "$(dirname "$0")" && pwd)"/test_common.sh
+
+TID="loop_05"
+ERR_CODE=0
+
+if ! _have_program fio; then
+ exit "$UBLK_SKIP_CODE"
+fi
+
+_prep_test "loop" "write and verify test"
+
+backfile_0=$(_create_backfile 256M)
+
+dev_id=$(_add_ublk_dev -q 2 -t loop "$backfile_0")
+_check_add_dev $TID $? "${backfile_0}"
+
+# run fio over the ublk disk
+_run_fio_verify_io --filename=/dev/ublkb"${dev_id}" --size=256M
+ERR_CODE=$?
+
+_cleanup_test "loop"
+
+_remove_backfile "$backfile_0"
+
+_show_result $TID $ERR_CODE
diff --git a/tools/testing/selftests/ublk/test_stress_01.sh b/tools/testing/selftests/ublk/test_stress_01.sh
index 7177f6c57bc5..a8be24532b24 100755
--- a/tools/testing/selftests/ublk/test_stress_01.sh
+++ b/tools/testing/selftests/ublk/test_stress_01.sh
@@ -27,20 +27,20 @@ ublk_io_and_remove()
_prep_test "stress" "run IO and remove device"
-ublk_io_and_remove 8G -t null
+ublk_io_and_remove 8G -t null -q 4
ERR_CODE=$?
if [ ${ERR_CODE} -ne 0 ]; then
_show_result $TID $ERR_CODE
fi
BACK_FILE=$(_create_backfile 256M)
-ublk_io_and_remove 256M -t loop "${BACK_FILE}"
+ublk_io_and_remove 256M -t loop -q 4 "${BACK_FILE}"
ERR_CODE=$?
if [ ${ERR_CODE} -ne 0 ]; then
_show_result $TID $ERR_CODE
fi
-ublk_io_and_remove 256M -t loop -z "${BACK_FILE}"
+ublk_io_and_remove 256M -t loop -q 4 -z "${BACK_FILE}"
ERR_CODE=$?
_cleanup_test "stress"
_remove_backfile "${BACK_FILE}"
diff --git a/tools/testing/selftests/ublk/test_stress_02.sh b/tools/testing/selftests/ublk/test_stress_02.sh
index 2a8e60579a06..2159e4cc8140 100755
--- a/tools/testing/selftests/ublk/test_stress_02.sh
+++ b/tools/testing/selftests/ublk/test_stress_02.sh
@@ -27,20 +27,20 @@ ublk_io_and_kill_daemon()
_prep_test "stress" "run IO and kill ublk server"
-ublk_io_and_kill_daemon 8G -t null
+ublk_io_and_kill_daemon 8G -t null -q 4
ERR_CODE=$?
if [ ${ERR_CODE} -ne 0 ]; then
_show_result $TID $ERR_CODE
fi
BACK_FILE=$(_create_backfile 256M)
-ublk_io_and_kill_daemon 256M -t loop "${BACK_FILE}"
+ublk_io_and_kill_daemon 256M -t loop -q 4 "${BACK_FILE}"
ERR_CODE=$?
if [ ${ERR_CODE} -ne 0 ]; then
_show_result $TID $ERR_CODE
fi
-ublk_io_and_kill_daemon 256M -t loop -z "${BACK_FILE}"
+ublk_io_and_kill_daemon 256M -t loop -q 4 -z "${BACK_FILE}"
ERR_CODE=$?
_cleanup_test "stress"
_remove_backfile "${BACK_FILE}"
diff --git a/tools/testing/selftests/ublk/test_stripe_01.sh b/tools/testing/selftests/ublk/test_stripe_01.sh
index c01f3dc325ab..7e387ef656ea 100755
--- a/tools/testing/selftests/ublk/test_stripe_01.sh
+++ b/tools/testing/selftests/ublk/test_stripe_01.sh
@@ -6,6 +6,10 @@
TID="stripe_01"
ERR_CODE=0
+if ! _have_program fio; then
+ exit "$UBLK_SKIP_CODE"
+fi
+
_prep_test "stripe" "write and verify test"
backfile_0=$(_create_backfile 256M)
@@ -15,15 +19,7 @@ dev_id=$(_add_ublk_dev -t stripe "$backfile_0" "$backfile_1")
_check_add_dev $TID $? "${backfile_0}"
# run fio over the ublk disk
-fio --name=write_and_verify \
- --filename=/dev/ublkb"${dev_id}" \
- --ioengine=libaio --iodepth=32 \
- --rw=write \
- --size=512M \
- --direct=1 \
- --verify=crc32c \
- --do_verify=1 \
- --bs=4k > /dev/null 2>&1
+_run_fio_verify_io --filename=/dev/ublkb"${dev_id}" --size=512M
ERR_CODE=$?
_cleanup_test "stripe"
diff --git a/tools/testing/selftests/ublk/test_stripe_03.sh b/tools/testing/selftests/ublk/test_stripe_03.sh
new file mode 100755
index 000000000000..c1b34af36145
--- /dev/null
+++ b/tools/testing/selftests/ublk/test_stripe_03.sh
@@ -0,0 +1,30 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+. "$(cd "$(dirname "$0")" && pwd)"/test_common.sh
+
+TID="stripe_03"
+ERR_CODE=0
+
+if ! _have_program fio; then
+ exit "$UBLK_SKIP_CODE"
+fi
+
+_prep_test "stripe" "write and verify test"
+
+backfile_0=$(_create_backfile 256M)
+backfile_1=$(_create_backfile 256M)
+
+dev_id=$(_add_ublk_dev -q 2 -t stripe "$backfile_0" "$backfile_1")
+_check_add_dev $TID $? "${backfile_0}"
+
+# run fio over the ublk disk
+_run_fio_verify_io --filename=/dev/ublkb"${dev_id}" --size=512M
+ERR_CODE=$?
+
+_cleanup_test "stripe"
+
+_remove_backfile "$backfile_0"
+_remove_backfile "$backfile_1"
+
+_show_result $TID $ERR_CODE
diff --git a/tools/testing/selftests/x86/test_mremap_vdso.c b/tools/testing/selftests/x86/test_mremap_vdso.c
index d53959e03593..94bee6e0c813 100644
--- a/tools/testing/selftests/x86/test_mremap_vdso.c
+++ b/tools/testing/selftests/x86/test_mremap_vdso.c
@@ -14,6 +14,7 @@
#include <errno.h>
#include <unistd.h>
#include <string.h>
+#include <stdbool.h>
#include <sys/mman.h>
#include <sys/auxv.h>
@@ -55,13 +56,55 @@ static int try_to_remap(void *vdso_addr, unsigned long size)
}
+#define VDSO_NAME "[vdso]"
+#define VMFLAGS "VmFlags:"
+#define MSEAL_FLAGS "sl"
+#define MAX_LINE_LEN 512
+
+bool vdso_sealed(FILE *maps)
+{
+ char line[MAX_LINE_LEN];
+ bool has_vdso = false;
+
+ while (fgets(line, sizeof(line), maps)) {
+ if (strstr(line, VDSO_NAME))
+ has_vdso = true;
+
+ if (has_vdso && !strncmp(line, VMFLAGS, strlen(VMFLAGS))) {
+ if (strstr(line, MSEAL_FLAGS))
+ return true;
+
+ return false;
+ }
+ }
+
+ return false;
+}
+
int main(int argc, char **argv, char **envp)
{
pid_t child;
+ FILE *maps;
ksft_print_header();
ksft_set_plan(1);
+ maps = fopen("/proc/self/smaps", "r");
+ if (!maps) {
+ ksft_test_result_skip(
+ "Could not open /proc/self/smaps, errno=%d\n",
+ errno);
+
+ return 0;
+ }
+
+ if (vdso_sealed(maps)) {
+ ksft_test_result_skip("vdso is sealed\n");
+ return 0;
+ }
+
+ fclose(maps);
+
child = fork();
if (child == -1)
ksft_exit_fail_msg("failed to fork (%d): %m\n", errno);
diff --git a/tools/testing/shared/interval_tree-shim.c b/tools/testing/shared/interval_tree-shim.c
new file mode 100644
index 000000000000..122e74756571
--- /dev/null
+++ b/tools/testing/shared/interval_tree-shim.c
@@ -0,0 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Very simple shim around the interval tree. */
+
+#include "../../../lib/interval_tree.c"
diff --git a/tools/testing/shared/linux/interval_tree.h b/tools/testing/shared/linux/interval_tree.h
new file mode 100644
index 000000000000..129faf9f1d0a
--- /dev/null
+++ b/tools/testing/shared/linux/interval_tree.h
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _TEST_INTERVAL_TREE_H
+#define _TEST_INTERVAL_TREE_H
+
+#include "../../../../include/linux/interval_tree.h"
+
+#endif /* _TEST_INTERVAL_TREE_H */
diff --git a/tools/testing/shared/linux/interval_tree_generic.h b/tools/testing/shared/linux/interval_tree_generic.h
new file mode 100644
index 000000000000..34cd654bee61
--- /dev/null
+++ b/tools/testing/shared/linux/interval_tree_generic.h
@@ -0,0 +1,2 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include "../../../../include/linux/interval_tree_generic.h"
diff --git a/tools/testing/shared/linux/rbtree.h b/tools/testing/shared/linux/rbtree.h
new file mode 100644
index 000000000000..d644bb7360bf
--- /dev/null
+++ b/tools/testing/shared/linux/rbtree.h
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _TEST_RBTREE_H
+#define _TEST_RBTREE_H
+
+#include <linux/kernel.h>
+#include "../../../../include/linux/rbtree.h"
+
+#endif /* _TEST_RBTREE_H */
diff --git a/tools/testing/shared/linux/rbtree_augmented.h b/tools/testing/shared/linux/rbtree_augmented.h
new file mode 100644
index 000000000000..ad138fcf6652
--- /dev/null
+++ b/tools/testing/shared/linux/rbtree_augmented.h
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _TEST_RBTREE_AUGMENTED_H
+#define _TEST_RBTREE_AUGMENTED_H
+
+#include "../../../../include/linux/rbtree_augmented.h"
+
+#endif /* _TEST_RBTREE_AUGMENTED_H */
diff --git a/tools/testing/shared/linux/rbtree_types.h b/tools/testing/shared/linux/rbtree_types.h
new file mode 100644
index 000000000000..194194a5bf92
--- /dev/null
+++ b/tools/testing/shared/linux/rbtree_types.h
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _TEST_RBTREE_TYPES_H
+#define _TEST_RBTREE_TYPES_H
+
+#include "../../../../include/linux/rbtree_types.h"
+
+#endif /* _TEST_RBTREE_TYPES_H */
+
diff --git a/tools/testing/shared/rbtree-shim.c b/tools/testing/shared/rbtree-shim.c
new file mode 100644
index 000000000000..7692a993e5f1
--- /dev/null
+++ b/tools/testing/shared/rbtree-shim.c
@@ -0,0 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Very simple shim around the rbtree. */
+
+#include "../../../lib/rbtree.c"
+
diff --git a/tools/testing/vma/linux/atomic.h b/tools/testing/vma/linux/atomic.h
index 3e1b6adc027b..788c597c4fde 100644
--- a/tools/testing/vma/linux/atomic.h
+++ b/tools/testing/vma/linux/atomic.h
@@ -9,4 +9,9 @@
#define atomic_set(x, y) uatomic_set(x, y)
#define U8_MAX UCHAR_MAX
+#ifndef atomic_cmpxchg_relaxed
+#define atomic_cmpxchg_relaxed uatomic_cmpxchg
+#define atomic_cmpxchg_release uatomic_cmpxchg
+#endif /* atomic_cmpxchg_relaxed */
+
#endif /* _LINUX_ATOMIC_H */
diff --git a/tools/testing/vma/vma.c b/tools/testing/vma/vma.c
index 04ab45e27fb8..11f761769b5b 100644
--- a/tools/testing/vma/vma.c
+++ b/tools/testing/vma/vma.c
@@ -74,11 +74,23 @@ static struct vm_area_struct *alloc_vma(struct mm_struct *mm,
ret->vm_end = end;
ret->vm_pgoff = pgoff;
ret->__vm_flags = flags;
+ vma_assert_detached(ret);
return ret;
}
/* Helper function to allocate a VMA and link it to the tree. */
+static int attach_vma(struct mm_struct *mm, struct vm_area_struct *vma)
+{
+ int res;
+
+ res = vma_link(mm, vma);
+ if (!res)
+ vma_assert_attached(vma);
+ return res;
+}
+
+/* Helper function to allocate a VMA and link it to the tree. */
static struct vm_area_struct *alloc_and_link_vma(struct mm_struct *mm,
unsigned long start,
unsigned long end,
@@ -90,7 +102,7 @@ static struct vm_area_struct *alloc_and_link_vma(struct mm_struct *mm,
if (vma == NULL)
return NULL;
- if (vma_link(mm, vma)) {
+ if (attach_vma(mm, vma)) {
vm_area_free(vma);
return NULL;
}
@@ -108,6 +120,7 @@ static struct vm_area_struct *alloc_and_link_vma(struct mm_struct *mm,
/* Helper function which provides a wrapper around a merge new VMA operation. */
static struct vm_area_struct *merge_new(struct vma_merge_struct *vmg)
{
+ struct vm_area_struct *vma;
/*
* For convenience, get prev and next VMAs. Which the new VMA operation
* requires.
@@ -116,7 +129,11 @@ static struct vm_area_struct *merge_new(struct vma_merge_struct *vmg)
vmg->prev = vma_prev(vmg->vmi);
vma_iter_next_range(vmg->vmi);
- return vma_merge_new_range(vmg);
+ vma = vma_merge_new_range(vmg);
+ if (vma)
+ vma_assert_attached(vma);
+
+ return vma;
}
/*
@@ -125,7 +142,12 @@ static struct vm_area_struct *merge_new(struct vma_merge_struct *vmg)
*/
static struct vm_area_struct *merge_existing(struct vma_merge_struct *vmg)
{
- return vma_merge_existing_range(vmg);
+ struct vm_area_struct *vma;
+
+ vma = vma_merge_existing_range(vmg);
+ if (vma)
+ vma_assert_attached(vma);
+ return vma;
}
/*
@@ -147,13 +169,20 @@ static void vmg_set_range(struct vma_merge_struct *vmg, unsigned long start,
vma_iter_set(vmg->vmi, start);
vmg->prev = NULL;
+ vmg->middle = NULL;
vmg->next = NULL;
- vmg->vma = NULL;
+ vmg->target = NULL;
vmg->start = start;
vmg->end = end;
vmg->pgoff = pgoff;
vmg->flags = flags;
+
+ vmg->just_expand = false;
+ vmg->__remove_middle = false;
+ vmg->__remove_next = false;
+ vmg->__adjust_middle_start = false;
+ vmg->__adjust_next_start = false;
}
/*
@@ -253,8 +282,8 @@ static bool test_simple_merge(void)
.pgoff = 1,
};
- ASSERT_FALSE(vma_link(&mm, vma_left));
- ASSERT_FALSE(vma_link(&mm, vma_right));
+ ASSERT_FALSE(attach_vma(&mm, vma_left));
+ ASSERT_FALSE(attach_vma(&mm, vma_right));
vma = merge_new(&vmg);
ASSERT_NE(vma, NULL);
@@ -278,7 +307,7 @@ static bool test_simple_modify(void)
struct vm_area_struct *init_vma = alloc_vma(&mm, 0, 0x3000, 0, flags);
VMA_ITERATOR(vmi, &mm, 0x1000);
- ASSERT_FALSE(vma_link(&mm, init_vma));
+ ASSERT_FALSE(attach_vma(&mm, init_vma));
/*
* The flags will not be changed, the vma_modify_flags() function
@@ -338,13 +367,13 @@ static bool test_simple_expand(void)
VMA_ITERATOR(vmi, &mm, 0);
struct vma_merge_struct vmg = {
.vmi = &vmi,
- .vma = vma,
+ .middle = vma,
.start = 0,
.end = 0x3000,
.pgoff = 0,
};
- ASSERT_FALSE(vma_link(&mm, vma));
+ ASSERT_FALSE(attach_vma(&mm, vma));
ASSERT_FALSE(expand_existing(&vmg));
@@ -365,7 +394,7 @@ static bool test_simple_shrink(void)
struct vm_area_struct *vma = alloc_vma(&mm, 0, 0x3000, 0, flags);
VMA_ITERATOR(vmi, &mm, 0);
- ASSERT_FALSE(vma_link(&mm, vma));
+ ASSERT_FALSE(attach_vma(&mm, vma));
ASSERT_FALSE(vma_shrink(&vmi, vma, 0, 0x1000, 0));
@@ -631,7 +660,7 @@ static bool test_vma_merge_special_flags(void)
*/
vma = alloc_and_link_vma(&mm, 0x3000, 0x4000, 3, flags);
ASSERT_NE(vma, NULL);
- vmg.vma = vma;
+ vmg.middle = vma;
for (i = 0; i < ARRAY_SIZE(special_flags); i++) {
vm_flags_t special_flag = special_flags[i];
@@ -760,7 +789,7 @@ static bool test_vma_merge_with_close(void)
vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags);
vmg.prev = vma_prev;
- vmg.vma = vma;
+ vmg.middle = vma;
/*
* The VMA being modified in a way that would otherwise merge should
@@ -787,7 +816,7 @@ static bool test_vma_merge_with_close(void)
vma->vm_ops = &vm_ops;
vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags);
- vmg.vma = vma;
+ vmg.middle = vma;
ASSERT_EQ(merge_existing(&vmg), NULL);
/*
* Initially this is misapprehended as an out of memory report, as the
@@ -817,7 +846,7 @@ static bool test_vma_merge_with_close(void)
vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags);
vmg.prev = vma_prev;
- vmg.vma = vma;
+ vmg.middle = vma;
ASSERT_EQ(merge_existing(&vmg), NULL);
ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
@@ -843,7 +872,7 @@ static bool test_vma_merge_with_close(void)
vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags);
vmg.prev = vma_prev;
- vmg.vma = vma;
+ vmg.middle = vma;
ASSERT_EQ(merge_existing(&vmg), vma_prev);
ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
@@ -940,7 +969,7 @@ static bool test_merge_existing(void)
vma_next = alloc_and_link_vma(&mm, 0x6000, 0x9000, 6, flags);
vma_next->vm_ops = &vm_ops; /* This should have no impact. */
vmg_set_range(&vmg, 0x3000, 0x6000, 3, flags);
- vmg.vma = vma;
+ vmg.middle = vma;
vmg.prev = vma;
vma->anon_vma = &dummy_anon_vma;
ASSERT_EQ(merge_existing(&vmg), vma_next);
@@ -973,7 +1002,7 @@ static bool test_merge_existing(void)
vma_next = alloc_and_link_vma(&mm, 0x6000, 0x9000, 6, flags);
vma_next->vm_ops = &vm_ops; /* This should have no impact. */
vmg_set_range(&vmg, 0x2000, 0x6000, 2, flags);
- vmg.vma = vma;
+ vmg.middle = vma;
vma->anon_vma = &dummy_anon_vma;
ASSERT_EQ(merge_existing(&vmg), vma_next);
ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
@@ -1003,7 +1032,7 @@ static bool test_merge_existing(void)
vma->vm_ops = &vm_ops; /* This should have no impact. */
vmg_set_range(&vmg, 0x3000, 0x6000, 3, flags);
vmg.prev = vma_prev;
- vmg.vma = vma;
+ vmg.middle = vma;
vma->anon_vma = &dummy_anon_vma;
ASSERT_EQ(merge_existing(&vmg), vma_prev);
@@ -1037,7 +1066,7 @@ static bool test_merge_existing(void)
vma = alloc_and_link_vma(&mm, 0x3000, 0x7000, 3, flags);
vmg_set_range(&vmg, 0x3000, 0x7000, 3, flags);
vmg.prev = vma_prev;
- vmg.vma = vma;
+ vmg.middle = vma;
vma->anon_vma = &dummy_anon_vma;
ASSERT_EQ(merge_existing(&vmg), vma_prev);
ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
@@ -1067,7 +1096,7 @@ static bool test_merge_existing(void)
vma_next = alloc_and_link_vma(&mm, 0x7000, 0x9000, 7, flags);
vmg_set_range(&vmg, 0x3000, 0x7000, 3, flags);
vmg.prev = vma_prev;
- vmg.vma = vma;
+ vmg.middle = vma;
vma->anon_vma = &dummy_anon_vma;
ASSERT_EQ(merge_existing(&vmg), vma_prev);
ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
@@ -1102,37 +1131,37 @@ static bool test_merge_existing(void)
vmg_set_range(&vmg, 0x4000, 0x5000, 4, flags);
vmg.prev = vma;
- vmg.vma = vma;
+ vmg.middle = vma;
ASSERT_EQ(merge_existing(&vmg), NULL);
ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
vmg_set_range(&vmg, 0x5000, 0x6000, 5, flags);
vmg.prev = vma;
- vmg.vma = vma;
+ vmg.middle = vma;
ASSERT_EQ(merge_existing(&vmg), NULL);
ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
vmg_set_range(&vmg, 0x6000, 0x7000, 6, flags);
vmg.prev = vma;
- vmg.vma = vma;
+ vmg.middle = vma;
ASSERT_EQ(merge_existing(&vmg), NULL);
ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
vmg_set_range(&vmg, 0x4000, 0x7000, 4, flags);
vmg.prev = vma;
- vmg.vma = vma;
+ vmg.middle = vma;
ASSERT_EQ(merge_existing(&vmg), NULL);
ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
vmg_set_range(&vmg, 0x4000, 0x6000, 4, flags);
vmg.prev = vma;
- vmg.vma = vma;
+ vmg.middle = vma;
ASSERT_EQ(merge_existing(&vmg), NULL);
ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
vmg_set_range(&vmg, 0x5000, 0x6000, 5, flags);
vmg.prev = vma;
- vmg.vma = vma;
+ vmg.middle = vma;
ASSERT_EQ(merge_existing(&vmg), NULL);
ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
@@ -1197,7 +1226,7 @@ static bool test_anon_vma_non_mergeable(void)
vmg_set_range(&vmg, 0x3000, 0x7000, 3, flags);
vmg.prev = vma_prev;
- vmg.vma = vma;
+ vmg.middle = vma;
ASSERT_EQ(merge_existing(&vmg), vma_prev);
ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
@@ -1277,7 +1306,7 @@ static bool test_dup_anon_vma(void)
vma_next->anon_vma = &dummy_anon_vma;
vmg_set_range(&vmg, 0, 0x5000, 0, flags);
- vmg.vma = vma_prev;
+ vmg.middle = vma_prev;
vmg.next = vma_next;
ASSERT_EQ(expand_existing(&vmg), 0);
@@ -1309,7 +1338,7 @@ static bool test_dup_anon_vma(void)
vma_next->anon_vma = &dummy_anon_vma;
vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags);
vmg.prev = vma_prev;
- vmg.vma = vma;
+ vmg.middle = vma;
ASSERT_EQ(merge_existing(&vmg), vma_prev);
ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
@@ -1338,7 +1367,7 @@ static bool test_dup_anon_vma(void)
vma->anon_vma = &dummy_anon_vma;
vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags);
vmg.prev = vma_prev;
- vmg.vma = vma;
+ vmg.middle = vma;
ASSERT_EQ(merge_existing(&vmg), vma_prev);
ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
@@ -1366,7 +1395,7 @@ static bool test_dup_anon_vma(void)
vma->anon_vma = &dummy_anon_vma;
vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags);
vmg.prev = vma_prev;
- vmg.vma = vma;
+ vmg.middle = vma;
ASSERT_EQ(merge_existing(&vmg), vma_prev);
ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
@@ -1394,7 +1423,7 @@ static bool test_dup_anon_vma(void)
vma->anon_vma = &dummy_anon_vma;
vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags);
vmg.prev = vma;
- vmg.vma = vma;
+ vmg.middle = vma;
ASSERT_EQ(merge_existing(&vmg), vma_next);
ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
@@ -1432,7 +1461,7 @@ static bool test_vmi_prealloc_fail(void)
vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags);
vmg.prev = vma_prev;
- vmg.vma = vma;
+ vmg.middle = vma;
fail_prealloc = true;
@@ -1458,7 +1487,7 @@ static bool test_vmi_prealloc_fail(void)
vma->anon_vma = &dummy_anon_vma;
vmg_set_range(&vmg, 0, 0x5000, 3, flags);
- vmg.vma = vma_prev;
+ vmg.middle = vma_prev;
vmg.next = vma;
fail_prealloc = true;
@@ -1515,11 +1544,11 @@ static bool test_copy_vma(void)
vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags);
vma_new = copy_vma(&vma, 0, 0x2000, 0, &need_locks);
-
ASSERT_NE(vma_new, vma);
ASSERT_EQ(vma_new->vm_start, 0);
ASSERT_EQ(vma_new->vm_end, 0x2000);
ASSERT_EQ(vma_new->vm_pgoff, 0);
+ vma_assert_attached(vma_new);
cleanup_mm(&mm, &vmi);
@@ -1528,6 +1557,7 @@ static bool test_copy_vma(void)
vma = alloc_and_link_vma(&mm, 0, 0x2000, 0, flags);
vma_next = alloc_and_link_vma(&mm, 0x6000, 0x8000, 6, flags);
vma_new = copy_vma(&vma, 0x4000, 0x2000, 4, &need_locks);
+ vma_assert_attached(vma_new);
ASSERT_EQ(vma_new, vma_next);
@@ -1546,7 +1576,7 @@ static bool test_expand_only_mode(void)
/*
* Place a VMA prior to the one we're expanding so we assert that we do
* not erroneously try to traverse to the previous VMA even though we
- * have, through the use of VMG_FLAG_JUST_EXPAND, indicated we do not
+ * have, through the use of the just_expand flag, indicated we do not
* need to do so.
*/
alloc_and_link_vma(&mm, 0, 0x2000, 0, flags);
@@ -1558,7 +1588,7 @@ static bool test_expand_only_mode(void)
vma_iter_set(&vmi, 0x3000);
vma_prev = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags);
vmg.prev = vma_prev;
- vmg.merge_flags = VMG_FLAG_JUST_EXPAND;
+ vmg.just_expand = true;
vma = vma_merge_new_range(&vmg);
ASSERT_NE(vma, NULL);
@@ -1569,6 +1599,7 @@ static bool test_expand_only_mode(void)
ASSERT_EQ(vma->vm_pgoff, 3);
ASSERT_TRUE(vma_write_started(vma));
ASSERT_EQ(vma_iter_addr(&vmi), 0x3000);
+ vma_assert_attached(vma);
cleanup_mm(&mm, &vmi);
return true;
diff --git a/tools/testing/vma/vma_internal.h b/tools/testing/vma/vma_internal.h
index 1eae23039854..572ab2cea763 100644
--- a/tools/testing/vma/vma_internal.h
+++ b/tools/testing/vma/vma_internal.h
@@ -25,7 +25,7 @@
#include <linux/maple_tree.h>
#include <linux/mm.h>
#include <linux/rbtree.h>
-#include <linux/rwsem.h>
+#include <linux/refcount.h>
extern unsigned long stack_guard_gap;
#ifdef CONFIG_MMU
@@ -135,10 +135,6 @@ typedef __bitwise unsigned int vm_fault_t;
*/
#define pr_warn_once pr_err
-typedef struct refcount_struct {
- atomic_t refs;
-} refcount_t;
-
struct kref {
refcount_t refcount;
};
@@ -233,15 +229,12 @@ struct mm_struct {
unsigned long flags; /* Must use atomic bitops to access */
};
-struct vma_lock {
- struct rw_semaphore lock;
-};
-
-
struct file {
struct address_space *f_mapping;
};
+#define VMA_LOCK_OFFSET 0x40000000
+
struct vm_area_struct {
/* The first cache line has the info for VMA tree walking. */
@@ -269,16 +262,13 @@ struct vm_area_struct {
};
#ifdef CONFIG_PER_VMA_LOCK
- /* Flag to indicate areas detached from the mm->mm_mt tree */
- bool detached;
-
/*
* Can only be written (using WRITE_ONCE()) while holding both:
* - mmap_lock (in write mode)
- * - vm_lock->lock (in write mode)
+ * - vm_refcnt bit at VMA_LOCK_OFFSET is set
* Can be read reliably while holding one of:
* - mmap_lock (in read or write mode)
- * - vm_lock->lock (in read or write mode)
+ * - vm_refcnt bit at VMA_LOCK_OFFSET is set or vm_refcnt > 1
* Can be read unreliably (using READ_ONCE()) for pessimistic bailout
* while holding nothing (except RCU to keep the VMA struct allocated).
*
@@ -287,20 +277,9 @@ struct vm_area_struct {
* slowpath.
*/
unsigned int vm_lock_seq;
- struct vma_lock *vm_lock;
#endif
/*
- * For areas with an address space and backing store,
- * linkage into the address_space->i_mmap interval tree.
- *
- */
- struct {
- struct rb_node rb;
- unsigned long rb_subtree_last;
- } shared;
-
- /*
* A file's MAP_PRIVATE vma can be in both i_mmap tree and anon_vma
* list, after a COW of one of the file pages. A MAP_SHARED vma
* can only be in the i_mmap tree. An anonymous MAP_PRIVATE, stack
@@ -319,14 +298,6 @@ struct vm_area_struct {
struct file * vm_file; /* File we map to (can be NULL). */
void * vm_private_data; /* was vm_pte (shared mem) */
-#ifdef CONFIG_ANON_VMA_NAME
- /*
- * For private and shared anonymous mappings, a pointer to a null
- * terminated string containing the name given to the vma, or NULL if
- * unnamed. Serialized by mmap_lock. Use anon_vma_name to access.
- */
- struct anon_vma_name *anon_name;
-#endif
#ifdef CONFIG_SWAP
atomic_long_t swap_readahead_info;
#endif
@@ -339,6 +310,27 @@ struct vm_area_struct {
#ifdef CONFIG_NUMA_BALANCING
struct vma_numab_state *numab_state; /* NUMA Balancing state */
#endif
+#ifdef CONFIG_PER_VMA_LOCK
+ /* Unstable RCU readers are allowed to read this. */
+ refcount_t vm_refcnt;
+#endif
+ /*
+ * For areas with an address space and backing store,
+ * linkage into the address_space->i_mmap interval tree.
+ *
+ */
+ struct {
+ struct rb_node rb;
+ unsigned long rb_subtree_last;
+ } shared;
+#ifdef CONFIG_ANON_VMA_NAME
+ /*
+ * For private and shared anonymous mappings, a pointer to a null
+ * terminated string containing the name given to the vma, or NULL if
+ * unnamed. Serialized by mmap_lock. Use anon_vma_name to access.
+ */
+ struct anon_vma_name *anon_name;
+#endif
struct vm_userfaultfd_ctx vm_userfaultfd_ctx;
} __randomize_layout;
@@ -464,26 +456,40 @@ static inline struct vm_area_struct *vma_next(struct vma_iterator *vmi)
return mas_find(&vmi->mas, ULONG_MAX);
}
-static inline bool vma_lock_alloc(struct vm_area_struct *vma)
+/*
+ * WARNING: to avoid racing with vma_mark_attached()/vma_mark_detached(), these
+ * assertions should be made either under mmap_write_lock or when the object
+ * has been isolated under mmap_write_lock, ensuring no competing writers.
+ */
+static inline void vma_assert_attached(struct vm_area_struct *vma)
{
- vma->vm_lock = calloc(1, sizeof(struct vma_lock));
-
- if (!vma->vm_lock)
- return false;
-
- init_rwsem(&vma->vm_lock->lock);
- vma->vm_lock_seq = UINT_MAX;
+ WARN_ON_ONCE(!refcount_read(&vma->vm_refcnt));
+}
- return true;
+static inline void vma_assert_detached(struct vm_area_struct *vma)
+{
+ WARN_ON_ONCE(refcount_read(&vma->vm_refcnt));
}
static inline void vma_assert_write_locked(struct vm_area_struct *);
-static inline void vma_mark_detached(struct vm_area_struct *vma, bool detached)
+static inline void vma_mark_attached(struct vm_area_struct *vma)
{
- /* When detaching vma should be write-locked */
- if (detached)
- vma_assert_write_locked(vma);
- vma->detached = detached;
+ vma_assert_write_locked(vma);
+ vma_assert_detached(vma);
+ refcount_set_release(&vma->vm_refcnt, 1);
+}
+
+static inline void vma_mark_detached(struct vm_area_struct *vma)
+{
+ vma_assert_write_locked(vma);
+ vma_assert_attached(vma);
+ /* We are the only writer, so no need to use vma_refcount_put(). */
+ if (unlikely(!refcount_dec_and_test(&vma->vm_refcnt))) {
+ /*
+ * Reader must have temporarily raised vm_refcnt but it will
+ * drop it without using the vma since vma is write-locked.
+ */
+ }
}
extern const struct vm_operations_struct vma_dummy_vm_ops;
@@ -496,7 +502,7 @@ static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm)
vma->vm_mm = mm;
vma->vm_ops = &vma_dummy_vm_ops;
INIT_LIST_HEAD(&vma->anon_vma_chain);
- vma_mark_detached(vma, false);
+ vma->vm_lock_seq = UINT_MAX;
}
static inline struct vm_area_struct *vm_area_alloc(struct mm_struct *mm)
@@ -507,10 +513,6 @@ static inline struct vm_area_struct *vm_area_alloc(struct mm_struct *mm)
return NULL;
vma_init(vma, mm);
- if (!vma_lock_alloc(vma)) {
- free(vma);
- return NULL;
- }
return vma;
}
@@ -523,10 +525,8 @@ static inline struct vm_area_struct *vm_area_dup(struct vm_area_struct *orig)
return NULL;
memcpy(new, orig, sizeof(*new));
- if (!vma_lock_alloc(new)) {
- free(new);
- return NULL;
- }
+ refcount_set(&new->vm_refcnt, 0);
+ new->vm_lock_seq = UINT_MAX;
INIT_LIST_HEAD(&new->anon_vma_chain);
return new;
@@ -696,20 +696,9 @@ static inline void mpol_put(struct mempolicy *)
{
}
-static inline void vma_lock_free(struct vm_area_struct *vma)
-{
- free(vma->vm_lock);
-}
-
-static inline void __vm_area_free(struct vm_area_struct *vma)
-{
- vma_lock_free(vma);
- free(vma);
-}
-
static inline void vm_area_free(struct vm_area_struct *vma)
{
- __vm_area_free(vma);
+ free(vma);
}
static inline void lru_add_drain(void)
@@ -796,12 +785,12 @@ static inline void vma_start_write(struct vm_area_struct *vma)
static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
unsigned long start,
unsigned long end,
- long adjust_next)
+ struct vm_area_struct *next)
{
(void)vma;
(void)start;
(void)end;
- (void)adjust_next;
+ (void)next;
}
static inline void vma_iter_free(struct vma_iterator *vmi)
diff --git a/tools/virtio/linux/compiler.h b/tools/virtio/linux/compiler.h
index 1f3a15b954b9..204ef0e9f542 100644
--- a/tools/virtio/linux/compiler.h
+++ b/tools/virtio/linux/compiler.h
@@ -10,4 +10,29 @@
#define READ_ONCE(var) (*((volatile typeof(var) *)(&(var))))
#define __aligned(x) __attribute((__aligned__(x)))
+
+/**
+ * data_race - mark an expression as containing intentional data races
+ *
+ * This data_race() macro is useful for situations in which data races
+ * should be forgiven. One example is diagnostic code that accesses
+ * shared variables but is not a part of the core synchronization design.
+ * For example, if accesses to a given variable are protected by a lock,
+ * except for diagnostic code, then the accesses under the lock should
+ * be plain C-language accesses and those in the diagnostic code should
+ * use data_race(). This way, KCSAN will complain if buggy lockless
+ * accesses to that variable are introduced, even if the buggy accesses
+ * are protected by READ_ONCE() or WRITE_ONCE().
+ *
+ * This macro *does not* affect normal code generation, but is a hint
+ * to tooling that data races here are to be ignored. If the access must
+ * be atomic *and* KCSAN should ignore the access, use both data_race()
+ * and READ_ONCE(), for example, data_race(READ_ONCE(x)).
+ */
+#define data_race(expr) \
+({ \
+ __auto_type __v = (expr); \
+ __v; \
+})
+
#endif
diff --git a/tools/virtio/linux/dma-mapping.h b/tools/virtio/linux/dma-mapping.h
index 822ecaa8e4df..095958461788 100644
--- a/tools/virtio/linux/dma-mapping.h
+++ b/tools/virtio/linux/dma-mapping.h
@@ -31,6 +31,7 @@ enum dma_data_direction {
#define dma_unmap_page(d, a, s, r) do { (void)(d); (void)(a); (void)(s); (void)(r); } while (0)
#define sg_dma_address(sg) (0)
+#define sg_dma_len(sg) (0)
#define dma_need_sync(v, a) (0)
#define dma_unmap_single_attrs(d, a, s, r, t) do { \
(void)(d); (void)(a); (void)(s); (void)(r); (void)(t); \
@@ -43,4 +44,16 @@ enum dma_data_direction {
} while (0)
#define dma_max_mapping_size(...) SIZE_MAX
+/*
+ * A dma_addr_t can hold any valid DMA or bus address for the platform. It can
+ * be given to a device to use as a DMA source or target. It is specific to a
+ * given device and there may be a translation between the CPU physical address
+ * space and the bus address space.
+ *
+ * DMA_MAPPING_ERROR is the magic error code if a mapping failed. It should not
+ * be used directly in drivers, but checked for using dma_mapping_error()
+ * instead.
+ */
+#define DMA_MAPPING_ERROR (~(dma_addr_t)0)
+
#endif
diff --git a/tools/virtio/linux/module.h b/tools/virtio/linux/module.h
index 9dfa96fea2b2..b91681fc1571 100644
--- a/tools/virtio/linux/module.h
+++ b/tools/virtio/linux/module.h
@@ -5,3 +5,10 @@
static __attribute__((unused)) const char *__MODULE_LICENSE_name = \
__MODULE_LICENSE_value
+#ifndef MODULE_AUTHOR
+#define MODULE_AUTHOR(x)
+#endif
+
+#ifndef MODULE_DESCRIPTION
+#define MODULE_DESCRIPTION(x)
+#endif